From 19fd435042a3191f6a5787a6eaf58e9c47920845 Mon Sep 17 00:00:00 2001 From: MrBubb1es <63935943+MrBubb1es@users.noreply.github.com> Date: Thu, 28 Mar 2024 12:19:51 -0500 Subject: [PATCH 001/104] Improved doctests for some functions (#11334) --- .../binary_tree/binary_tree_traversals.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 2b33cdca4..49c208335 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -97,6 +97,8 @@ def level_order(root: Node | None) -> Generator[int, None, None]: """ Returns a list of nodes value from a whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. + >>> list(level_order(make_tree())) + [1, 2, 3, 4, 5] """ if root is None: @@ -120,6 +122,10 @@ def get_nodes_from_left_to_right( """ Returns a list of nodes value from a particular level: Left to right direction of the binary tree. + >>> list(get_nodes_from_left_to_right(make_tree(), 1)) + [1] + >>> list(get_nodes_from_left_to_right(make_tree(), 2)) + [2, 3] """ def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: @@ -140,10 +146,14 @@ def get_nodes_from_right_to_left( """ Returns a list of nodes value from a particular level: Right to left direction of the binary tree. + >>> list(get_nodes_from_right_to_left(make_tree(), 1)) + [1] + >>> list(get_nodes_from_right_to_left(make_tree(), 2)) + [3, 2] """ def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: - if root is None: + if not root: return if level == 1: yield root.data @@ -158,6 +168,8 @@ def zigzag(root: Node | None) -> Generator[int, None, None]: """ ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively. + >>> list(zigzag(make_tree())) + [1, 3, 2, 4, 5] """ if root is None: return From 516a3028d1f6b6e7e11ae4501fdaee50a0965464 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:25:41 +0300 Subject: [PATCH 002/104] Enable ruff PLR5501 rule (#11332) * Enable ruff PLR5501 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/crossword_puzzle_solver.py | 5 +- cellular_automata/game_of_life.py | 5 +- ciphers/decrypt_caesar_with_chi_squared.py | 21 +++--- data_structures/binary_tree/avl_tree.py | 10 +-- .../binary_tree/binary_search_tree.py | 9 ++- .../binary_search_tree_recursive.py | 22 +++---- data_structures/binary_tree/red_black_tree.py | 66 +++++++++---------- data_structures/binary_tree/treap.py | 29 ++++---- data_structures/heap/max_heap.py | 7 +- .../stacks/infix_to_prefix_conversion.py | 13 ++-- data_structures/trie/radix_tree.py | 43 ++++++------ divide_and_conquer/convex_hull.py | 15 ++--- graphs/graph_list.py | 46 ++++++------- graphs/minimum_spanning_tree_prims.py | 7 +- graphs/multi_heuristic_astar.py | 33 +++++----- machine_learning/forecasting/run.py | 7 +- maths/largest_of_very_large_numbers.py | 9 ++- maths/pollard_rho.py | 13 ++-- matrix/cramers_rule_2x2.py | 15 ++--- project_euler/problem_019/sol1.py | 7 +- pyproject.toml | 1 - searches/hill_climbing.py | 7 +- searches/interpolation_search.py | 35 +++++----- strings/min_cost_string_conversion.py | 23 ++++--- 24 files changed, 210 insertions(+), 238 deletions(-) diff --git a/backtracking/crossword_puzzle_solver.py b/backtracking/crossword_puzzle_solver.py index b9c01c4ef..e702c7e52 100644 --- a/backtracking/crossword_puzzle_solver.py +++ b/backtracking/crossword_puzzle_solver.py @@ -28,9 +28,8 @@ def is_valid( if vertical: if row + i >= len(puzzle) or puzzle[row + i][col] != "": return False - else: - if col + i >= len(puzzle[0]) or puzzle[row][col + i] != "": - return False + elif col + i >= len(puzzle[0]) or puzzle[row][col + i] != "": + return False return True diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 67e647d64..76276b272 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -101,9 +101,8 @@ def __judge_point(pt: bool, neighbours: list[list[bool]]) -> bool: state = True elif alive > 3: state = False - else: - if alive == 3: - state = True + elif alive == 3: + state = True return state diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 6c3686020..10832203e 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -206,20 +206,19 @@ def decrypt_caesar_with_chi_squared( # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value - else: - if letter.lower() in frequencies: - # Get the amount of times the letter occurs in the message - occurrences = decrypted_with_shift.count(letter) + elif letter.lower() in frequencies: + # Get the amount of times the letter occurs in the message + occurrences = decrypted_with_shift.count(letter) - # Get the excepcted amount of times the letter should appear based - # on letter frequencies - expected = frequencies[letter] * occurrences + # Get the excepcted amount of times the letter should appear based + # on letter frequencies + expected = frequencies[letter] * occurrences - # Complete the chi squared statistic formula - chi_letter_value = ((occurrences - expected) ** 2) / expected + # Complete the chi squared statistic formula + chi_letter_value = ((occurrences - expected) ** 2) / expected - # Add the margin of error to the total chi squared statistic - chi_squared_statistic += chi_letter_value + # Add the margin of error to the total chi squared statistic + chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary chi_squared_statistic_values[shift] = ( diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 041ed7e36..9fca72374 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -215,11 +215,11 @@ def del_node(root: MyNode, data: Any) -> MyNode | None: return root else: root.set_left(del_node(left_child, data)) - else: # root.get_data() < data - if right_child is None: - return root - else: - root.set_right(del_node(right_child, data)) + # root.get_data() < data + elif right_child is None: + return root + else: + root.set_right(del_node(right_child, data)) if get_height(right_child) - get_height(left_child) == 2: assert right_child is not None diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 08a60a120..090e3e25f 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -185,12 +185,11 @@ class BinarySearchTree: break else: parent_node = parent_node.left + elif parent_node.right is None: + parent_node.right = new_node + break else: - if parent_node.right is None: - parent_node.right = new_node - break - else: - parent_node = parent_node.right + parent_node = parent_node.right new_node.parent = parent_node def insert(self, *values) -> Self: diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 6af1b053f..d94ac5253 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -74,14 +74,13 @@ class BinarySearchTree: def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Node: if node is None: node = Node(label, parent) + elif label < node.label: + node.left = self._put(node.left, label, node) + elif label > node.label: + node.right = self._put(node.right, label, node) else: - if label < node.label: - node.left = self._put(node.left, label, node) - elif label > node.label: - node.right = self._put(node.right, label, node) - else: - msg = f"Node with label {label} already exists" - raise ValueError(msg) + msg = f"Node with label {label} already exists" + raise ValueError(msg) return node @@ -106,11 +105,10 @@ class BinarySearchTree: if node is None: msg = f"Node with label {label} does not exist" raise ValueError(msg) - else: - if label < node.label: - node = self._search(node.left, label) - elif label > node.label: - node = self._search(node.right, label) + elif label < node.label: + node = self._search(node.left, label) + elif label > node.label: + node = self._search(node.right, label) return node diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 3b5845cd9..bdd808c82 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -107,12 +107,11 @@ class RedBlackTree: else: self.left = RedBlackTree(label, 1, self) self.left._insert_repair() + elif self.right: + self.right.insert(label) else: - if self.right: - self.right.insert(label) - else: - self.right = RedBlackTree(label, 1, self) - self.right._insert_repair() + self.right = RedBlackTree(label, 1, self) + self.right._insert_repair() return self.parent or self def _insert_repair(self) -> None: @@ -178,36 +177,34 @@ class RedBlackTree: self.parent.left = None else: self.parent.right = None - else: - # The node is black - if child is None: - # This node and its child are black - if self.parent is None: - # The tree is now empty - return RedBlackTree(None) - else: - self._remove_repair() - if self.is_left(): - self.parent.left = None - else: - self.parent.right = None - self.parent = None + # The node is black + elif child is None: + # This node and its child are black + if self.parent is None: + # The tree is now empty + return RedBlackTree(None) else: - # This node is black and its child is red - # Move the child node here and make it black - self.label = child.label - self.left = child.left - self.right = child.right - if self.left: - self.left.parent = self - if self.right: - self.right.parent = self + self._remove_repair() + if self.is_left(): + self.parent.left = None + else: + self.parent.right = None + self.parent = None + else: + # This node is black and its child is red + # Move the child node here and make it black + self.label = child.label + self.left = child.left + self.right = child.right + if self.left: + self.left.parent = self + if self.right: + self.right.parent = self elif self.label is not None and self.label > label: if self.left: self.left.remove(label) - else: - if self.right: - self.right.remove(label) + elif self.right: + self.right.remove(label) return self.parent or self def _remove_repair(self) -> None: @@ -369,11 +366,10 @@ class RedBlackTree: return None else: return self.right.search(label) + elif self.left is None: + return None else: - if self.left is None: - return None - else: - return self.left.search(label) + return self.left.search(label) def floor(self, label: int) -> int | None: """Returns the largest element in this tree which is at most label. diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index a53ac566e..e7ddf931b 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -43,22 +43,21 @@ def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]: return None, None elif root.value is None: return None, None + elif value < root.value: + """ + Right tree's root will be current node. + Now we split(with the same value) current node's left son + Left tree: left part of that split + Right tree's left son: right part of that split + """ + left, root.left = split(root.left, value) + return left, root else: - if value < root.value: - """ - Right tree's root will be current node. - Now we split(with the same value) current node's left son - Left tree: left part of that split - Right tree's left son: right part of that split - """ - left, root.left = split(root.left, value) - return left, root - else: - """ - Just symmetric to previous case - """ - root.right, right = split(root.right, value) - return root, right + """ + Just symmetric to previous case + """ + root.right, right = split(root.right, value) + return root, right def merge(left: Node | None, right: Node | None) -> Node | None: diff --git a/data_structures/heap/max_heap.py b/data_structures/heap/max_heap.py index fbc8eed09..5a9f9cf88 100644 --- a/data_structures/heap/max_heap.py +++ b/data_structures/heap/max_heap.py @@ -40,11 +40,10 @@ class BinaryHeap: while self.__size >= 2 * i: if 2 * i + 1 > self.__size: bigger_child = 2 * i + elif self.__heap[2 * i] > self.__heap[2 * i + 1]: + bigger_child = 2 * i else: - if self.__heap[2 * i] > self.__heap[2 * i + 1]: - bigger_child = 2 * i - else: - bigger_child = 2 * i + 1 + bigger_child = 2 * i + 1 temporary = self.__heap[i] if self.__heap[i] < self.__heap[bigger_child]: self.__heap[i] = self.__heap[bigger_child] diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index beff421c0..878473b93 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -95,13 +95,12 @@ def infix_2_postfix(infix: str) -> str: while stack[-1] != "(": post_fix.append(stack.pop()) # Pop stack & add the content to Postfix stack.pop() - else: - if len(stack) == 0: - stack.append(x) # If stack is empty, push x to stack - else: # while priority of x is not > priority of element in the stack - while stack and stack[-1] != "(" and priority[x] <= priority[stack[-1]]: - post_fix.append(stack.pop()) # pop stack & add to Postfix - stack.append(x) # push x to stack + elif len(stack) == 0: + stack.append(x) # If stack is empty, push x to stack + else: # while priority of x is not > priority of element in the stack + while stack and stack[-1] != "(" and priority[x] <= priority[stack[-1]]: + post_fix.append(stack.pop()) # pop stack & add to Postfix + stack.append(x) # push x to stack print( x.center(8), diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py index fadc50cb4..caf566a6c 100644 --- a/data_structures/trie/radix_tree.py +++ b/data_structures/trie/radix_tree.py @@ -153,31 +153,30 @@ class RadixNode: # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(remaining_word) + # If it is not a leaf, we don't have to delete + elif not incoming_node.is_leaf: + return False else: - # If it is not a leaf, we don't have to delete - if not incoming_node.is_leaf: - return False + # We delete the nodes if no edges go from it + if len(incoming_node.nodes) == 0: + del self.nodes[word[0]] + # We merge the current node with its only child + if len(self.nodes) == 1 and not self.is_leaf: + merging_node = next(iter(self.nodes.values())) + self.is_leaf = merging_node.is_leaf + self.prefix += merging_node.prefix + self.nodes = merging_node.nodes + # If there is more than 1 edge, we just mark it as non-leaf + elif len(incoming_node.nodes) > 1: + incoming_node.is_leaf = False + # If there is 1 edge, we merge it with its child else: - # We delete the nodes if no edges go from it - if len(incoming_node.nodes) == 0: - del self.nodes[word[0]] - # We merge the current node with its only child - if len(self.nodes) == 1 and not self.is_leaf: - merging_node = next(iter(self.nodes.values())) - self.is_leaf = merging_node.is_leaf - self.prefix += merging_node.prefix - self.nodes = merging_node.nodes - # If there is more than 1 edge, we just mark it as non-leaf - elif len(incoming_node.nodes) > 1: - incoming_node.is_leaf = False - # If there is 1 edge, we merge it with its child - else: - merging_node = next(iter(incoming_node.nodes.values())) - incoming_node.is_leaf = merging_node.is_leaf - incoming_node.prefix += merging_node.prefix - incoming_node.nodes = merging_node.nodes + merging_node = next(iter(incoming_node.nodes.values())) + incoming_node.is_leaf = merging_node.is_leaf + incoming_node.prefix += merging_node.prefix + incoming_node.nodes = merging_node.nodes - return True + return True def print_tree(self, height: int = 0) -> None: """Print the tree diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index a5d8b713b..93f6daf1f 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -274,14 +274,13 @@ def convex_hull_bf(points: list[Point]) -> list[Point]: points_left_of_ij = True elif det_k < 0: points_right_of_ij = True - else: - # point[i], point[j], point[k] all lie on a straight line - # if point[k] is to the left of point[i] or it's to the - # right of point[j], then point[i], point[j] cannot be - # part of the convex hull of A - if points[k] < points[i] or points[k] > points[j]: - ij_part_of_convex_hull = False - break + # point[i], point[j], point[k] all lie on a straight line + # if point[k] is to the left of point[i] or it's to the + # right of point[j], then point[i], point[j] cannot be + # part of the convex hull of A + elif points[k] < points[i] or points[k] > points[j]: + ij_part_of_convex_hull = False + break if points_left_of_ij and points_right_of_ij: ij_part_of_convex_hull = False diff --git a/graphs/graph_list.py b/graphs/graph_list.py index e871f3b8a..6563cbb76 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -120,29 +120,29 @@ class GraphAdjacencyList(Generic[T]): else: self.adj_list[source_vertex] = [destination_vertex] self.adj_list[destination_vertex] = [source_vertex] - else: # For directed graphs - # if both source vertex and destination vertex are present in adjacency - # list, add destination vertex to source vertex list of adjacent vertices. - if source_vertex in self.adj_list and destination_vertex in self.adj_list: - self.adj_list[source_vertex].append(destination_vertex) - # if only source vertex is present in adjacency list, add destination - # vertex to source vertex list of adjacent vertices and create a new vertex - # with destination vertex as key, which has no adjacent vertex - elif source_vertex in self.adj_list: - self.adj_list[source_vertex].append(destination_vertex) - self.adj_list[destination_vertex] = [] - # if only destination vertex is present in adjacency list, create a new - # vertex with source vertex as key and assign a list containing destination - # vertex as first adjacent vertex - elif destination_vertex in self.adj_list: - self.adj_list[source_vertex] = [destination_vertex] - # if both source vertex and destination vertex are not present in adjacency - # list, create a new vertex with source vertex as key and a list containing - # destination vertex as it's first adjacent vertex. Then create a new vertex - # with destination vertex as key, which has no adjacent vertex - else: - self.adj_list[source_vertex] = [destination_vertex] - self.adj_list[destination_vertex] = [] + # For directed graphs + # if both source vertex and destination vertex are present in adjacency + # list, add destination vertex to source vertex list of adjacent vertices. + elif source_vertex in self.adj_list and destination_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + # if only source vertex is present in adjacency list, add destination + # vertex to source vertex list of adjacent vertices and create a new vertex + # with destination vertex as key, which has no adjacent vertex + elif source_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + self.adj_list[destination_vertex] = [] + # if only destination vertex is present in adjacency list, create a new + # vertex with source vertex as key and assign a list containing destination + # vertex as first adjacent vertex + elif destination_vertex in self.adj_list: + self.adj_list[source_vertex] = [destination_vertex] + # if both source vertex and destination vertex are not present in adjacency + # list, create a new vertex with source vertex as key and a list containing + # destination vertex as it's first adjacent vertex. Then create a new vertex + # with destination vertex as key, which has no adjacent vertex + else: + self.adj_list[source_vertex] = [destination_vertex] + self.adj_list[destination_vertex] = [] return self diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 5a08ec57f..90c9f4c91 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -18,11 +18,10 @@ class Heap: else: if 2 * start + 2 >= size: smallest_child = 2 * start + 1 + elif heap[2 * start + 1] < heap[2 * start + 2]: + smallest_child = 2 * start + 1 else: - if heap[2 * start + 1] < heap[2 * start + 2]: - smallest_child = 2 * start + 1 - else: - smallest_child = 2 * start + 2 + smallest_child = 2 * start + 2 if heap[smallest_child] < heap[start]: temp, temp1 = heap[smallest_child], positions[smallest_child] heap[smallest_child], positions[smallest_child] = ( diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 0a18ede6e..6af9a187a 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -270,24 +270,23 @@ def multi_a_star(start: TPos, goal: TPos, n_heuristic: int): back_pointer, ) close_list_inad.append(get_s) + elif g_function[goal] <= open_list[0].minkey(): + if g_function[goal] < float("inf"): + do_something(back_pointer, goal, start) else: - if g_function[goal] <= open_list[0].minkey(): - if g_function[goal] < float("inf"): - do_something(back_pointer, goal, start) - else: - get_s = open_list[0].top_show() - visited.add(get_s) - expand_state( - get_s, - 0, - visited, - g_function, - close_list_anchor, - close_list_inad, - open_list, - back_pointer, - ) - close_list_anchor.append(get_s) + get_s = open_list[0].top_show() + visited.add(get_s) + expand_state( + get_s, + 0, + visited, + g_function, + close_list_anchor, + close_list_inad, + open_list, + back_pointer, + ) + close_list_anchor.append(get_s) print("No path found to goal") print() for i in range(n - 1, -1, -1): diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index 64e719daa..dbb86caf8 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -113,11 +113,10 @@ def data_safety_checker(list_vote: list, actual_result: float) -> bool: for i in list_vote: if i > actual_result: safe = not_safe + 1 + elif abs(abs(i) - abs(actual_result)) <= 0.1: + safe += 1 else: - if abs(abs(i) - abs(actual_result)) <= 0.1: - safe += 1 - else: - not_safe += 1 + not_safe += 1 return safe > not_safe diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py index eb5c121fd..edee50371 100644 --- a/maths/largest_of_very_large_numbers.py +++ b/maths/largest_of_very_large_numbers.py @@ -20,11 +20,10 @@ def res(x, y): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.log10(x) - else: - if x == 0: # 0 raised to any number is 0 - return 0 - elif y == 0: - return 1 # any number raised to 0 is 1 + elif x == 0: # 0 raised to any number is 0 + return 0 + elif y == 0: + return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen") diff --git a/maths/pollard_rho.py b/maths/pollard_rho.py index 5082f54f7..e8bc89cef 100644 --- a/maths/pollard_rho.py +++ b/maths/pollard_rho.py @@ -94,14 +94,13 @@ def pollard_rho( if divisor == 1: # No common divisor yet, just keep searching. continue + # We found a common divisor! + elif divisor == num: + # Unfortunately, the divisor is ``num`` itself and is useless. + break else: - # We found a common divisor! - if divisor == num: - # Unfortunately, the divisor is ``num`` itself and is useless. - break - else: - # The divisor is a nontrivial factor of ``num``! - return divisor + # The divisor is a nontrivial factor of ``num``! + return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare diff --git a/matrix/cramers_rule_2x2.py b/matrix/cramers_rule_2x2.py index 4f52dbe64..081035bec 100644 --- a/matrix/cramers_rule_2x2.py +++ b/matrix/cramers_rule_2x2.py @@ -73,12 +73,11 @@ def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> tuple[float, raise ValueError("Infinite solutions. (Consistent system)") else: raise ValueError("No solution. (Inconsistent system)") + elif determinant_x == determinant_y == 0: + # Trivial solution (Inconsistent system) + return (0.0, 0.0) else: - if determinant_x == determinant_y == 0: - # Trivial solution (Inconsistent system) - return (0.0, 0.0) - else: - x = determinant_x / determinant - y = determinant_y / determinant - # Non-Trivial Solution (Consistent system) - return (x, y) + x = determinant_x / determinant + y = determinant_y / determinant + # Non-Trivial Solution (Consistent system) + return (x, y) diff --git a/project_euler/problem_019/sol1.py b/project_euler/problem_019/sol1.py index 0e38137d4..656f104c3 100644 --- a/project_euler/problem_019/sol1.py +++ b/project_euler/problem_019/sol1.py @@ -46,10 +46,9 @@ def solution(): elif day > 29 and month == 2: month += 1 day = day - 29 - else: - if day > days_per_month[month - 1]: - month += 1 - day = day - days_per_month[month - 2] + elif day > days_per_month[month - 1]: + month += 1 + day = day - days_per_month[month - 2] if month > 12: year += 1 diff --git a/pyproject.toml b/pyproject.toml index 5187491e5..290a6b759 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLR5501", # Consider using `elif` instead of `else` -- FIX ME "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME diff --git a/searches/hill_climbing.py b/searches/hill_climbing.py index 83a3b8b74..689b7e5cc 100644 --- a/searches/hill_climbing.py +++ b/searches/hill_climbing.py @@ -137,11 +137,10 @@ def hill_climbing( if change > max_change and change > 0: max_change = change next_state = neighbor - else: # finding min + elif change < min_change and change < 0: # finding min # to direction with greatest descent - if change < min_change and change < 0: - min_change = change - next_state = neighbor + min_change = change + next_state = neighbor if next_state is not None: # we found at least one neighbor which improved the current state current_state = next_state diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index 49194c260..0591788aa 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -33,18 +33,16 @@ def interpolation_search(sorted_collection, item): current_item = sorted_collection[point] if current_item == item: return point + elif point < left: + right = left + left = point + elif point > right: + left = right + right = point + elif item < current_item: + right = point - 1 else: - if point < left: - right = left - left = point - elif point > right: - left = right - right = point - else: - if item < current_item: - right = point - 1 - else: - left = point + 1 + left = point + 1 return None @@ -79,15 +77,14 @@ def interpolation_search_by_recursion(sorted_collection, item, left, right): return interpolation_search_by_recursion(sorted_collection, item, point, left) elif point > right: return interpolation_search_by_recursion(sorted_collection, item, right, left) + elif sorted_collection[point] > item: + return interpolation_search_by_recursion( + sorted_collection, item, left, point - 1 + ) else: - if sorted_collection[point] > item: - return interpolation_search_by_recursion( - sorted_collection, item, left, point - 1 - ) - else: - return interpolation_search_by_recursion( - sorted_collection, item, point + 1, right - ) + return interpolation_search_by_recursion( + sorted_collection, item, point + 1, right + ) def __assert_sorted(collection): diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 0fad0b88c..d147a9d79 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -60,19 +60,18 @@ def compute_transform_tables( def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: if i == 0 and j == 0: return [] + elif ops[i][j][0] in {"C", "R"}: + seq = assemble_transformation(ops, i - 1, j - 1) + seq.append(ops[i][j]) + return seq + elif ops[i][j][0] == "D": + seq = assemble_transformation(ops, i - 1, j) + seq.append(ops[i][j]) + return seq else: - if ops[i][j][0] in {"C", "R"}: - seq = assemble_transformation(ops, i - 1, j - 1) - seq.append(ops[i][j]) - return seq - elif ops[i][j][0] == "D": - seq = assemble_transformation(ops, i - 1, j) - seq.append(ops[i][j]) - return seq - else: - seq = assemble_transformation(ops, i, j - 1) - seq.append(ops[i][j]) - return seq + seq = assemble_transformation(ops, i, j - 1) + seq.append(ops[i][j]) + return seq if __name__ == "__main__": From da47d5c88ccf18e27c5b8f10830376031ad1792a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:26:41 +0300 Subject: [PATCH 003/104] Enable ruff N999 rule (#11331) * Enable ruff N999 rule * updating DIRECTORY.md --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 6 +++--- ...(nlogn).py => longest_increasing_subsequence_o_nlogn.py} | 0 ...)_graph.py => directed_and_undirected_weighted_graph.py} | 0 ...eural_network.py => two_hidden_layers_neural_network.py} | 0 pyproject.toml | 1 - 5 files changed, 3 insertions(+), 4 deletions(-) rename dynamic_programming/{longest_increasing_subsequence_o(nlogn).py => longest_increasing_subsequence_o_nlogn.py} (100%) rename graphs/{directed_and_undirected_(weighted)_graph.py => directed_and_undirected_weighted_graph.py} (100%) rename neural_network/{2_hidden_layers_neural_network.py => two_hidden_layers_neural_network.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 01667c9fe..f6d6cb463 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -351,7 +351,7 @@ * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) - * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) + * [Longest Increasing Subsequence O Nlogn](dynamic_programming/longest_increasing_subsequence_o_nlogn.py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) @@ -465,7 +465,7 @@ * [Dijkstra Alternate](graphs/dijkstra_alternate.py) * [Dijkstra Binary Grid](graphs/dijkstra_binary_grid.py) * [Dinic](graphs/dinic.py) - * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) + * [Directed And Undirected Weighted Graph](graphs/directed_and_undirected_weighted_graph.py) * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) * [Eulerian Path And Circuit For Undirected Graph](graphs/eulerian_path_and_circuit_for_undirected_graph.py) * [Even Tree](graphs/even_tree.py) @@ -792,7 +792,6 @@ * [Minimum Cut](networking_flow/minimum_cut.py) ## Neural Network - * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * Activation Functions * [Binary Step](neural_network/activation_functions/binary_step.py) * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) @@ -809,6 +808,7 @@ * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Input Data](neural_network/input_data.py) * [Simple Neural Network](neural_network/simple_neural_network.py) + * [Two Hidden Layers Neural Network](neural_network/two_hidden_layers_neural_network.py) ## Other * [Activity Selection](other/activity_selection.py) diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py similarity index 100% rename from dynamic_programming/longest_increasing_subsequence_o(nlogn).py rename to dynamic_programming/longest_increasing_subsequence_o_nlogn.py diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_weighted_graph.py similarity index 100% rename from graphs/directed_and_undirected_(weighted)_graph.py rename to graphs/directed_and_undirected_weighted_graph.py diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py similarity index 100% rename from neural_network/2_hidden_layers_neural_network.py rename to neural_network/two_hidden_layers_neural_network.py diff --git a/pyproject.toml b/pyproject.toml index 290a6b759..5b2eb07b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "G004", # Logging statement uses f-string "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME - "N999", # Invalid module name -- FIX ME "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey From efb7463cde48305cfebb8a547273c93edbdaaee5 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:28:54 +0300 Subject: [PATCH 004/104] Enable ruff PLW0120 rule (#11330) Co-authored-by: Christian Clauss --- pyproject.toml | 1 - searches/fibonacci_search.py | 3 +-- searches/ternary_search.py | 3 +-- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5b2eb07b4..b9f3115df 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception diff --git a/searches/fibonacci_search.py b/searches/fibonacci_search.py index ec3dfa7f3..7b2252a68 100644 --- a/searches/fibonacci_search.py +++ b/searches/fibonacci_search.py @@ -123,8 +123,7 @@ def fibonacci_search(arr: list, val: int) -> int: elif val > item_k_1: offset += fibonacci(fibb_k - 1) fibb_k -= 2 - else: - return -1 + return -1 if __name__ == "__main__": diff --git a/searches/ternary_search.py b/searches/ternary_search.py index 8dcd6b5bd..73e4b1ddc 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -106,8 +106,7 @@ def ite_ternary_search(array: list[int], target: int) -> int: else: left = one_third + 1 right = two_third - 1 - else: - return -1 + return -1 def rec_ternary_search(left: int, right: int, array: list[int], target: int) -> int: From f2246ce7fd539d94fd9299bd2fe42469dafab03f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 21:03:23 +0300 Subject: [PATCH 005/104] Enable ruff ICN001 rule (#11329) * Enable ruff ICN001 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/hill_cipher.py | 38 ++++----- fractals/julia_sets.py | 54 ++++++------ fractals/koch_snowflake.py | 34 ++++---- graphics/bezier_curve.py | 2 +- machine_learning/gradient_descent.py | 4 +- neural_network/input_data.py | 32 +++---- .../two_hidden_layers_neural_network.py | 84 +++++++++---------- pyproject.toml | 1 - 8 files changed, 121 insertions(+), 128 deletions(-) diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index ea337a72d..33b2529f0 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -38,7 +38,7 @@ https://www.youtube.com/watch?v=4RhLNDqcjpA import string -import numpy +import numpy as np from maths.greatest_common_divisor import greatest_common_divisor @@ -49,11 +49,11 @@ class HillCipher: # i.e. a total of 36 characters # take x and return x % len(key_string) - modulus = numpy.vectorize(lambda x: x % 36) + modulus = np.vectorize(lambda x: x % 36) - to_int = numpy.vectorize(round) + to_int = np.vectorize(round) - def __init__(self, encrypt_key: numpy.ndarray) -> None: + def __init__(self, encrypt_key: np.ndarray) -> None: """ encrypt_key is an NxN numpy array """ @@ -63,7 +63,7 @@ class HillCipher: def replace_letters(self, letter: str) -> int: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_letters('T') 19 >>> hill_cipher.replace_letters('0') @@ -73,7 +73,7 @@ class HillCipher: def replace_digits(self, num: int) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_digits(19) 'T' >>> hill_cipher.replace_digits(26) @@ -83,10 +83,10 @@ class HillCipher: def check_determinant(self) -> None: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.check_determinant() """ - det = round(numpy.linalg.det(self.encrypt_key)) + det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) @@ -101,7 +101,7 @@ class HillCipher: def process_text(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.process_text('Testing Hill Cipher') 'TESTINGHILLCIPHERR' >>> hill_cipher.process_text('hello') @@ -117,7 +117,7 @@ class HillCipher: def encrypt(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.encrypt('testing hill cipher') 'WHXYJOLM9C6XT085LL' >>> hill_cipher.encrypt('hello') @@ -129,7 +129,7 @@ class HillCipher: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([vec]).T + batch_vec = np.array([vec]).T batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[ 0 ] @@ -140,14 +140,14 @@ class HillCipher: return encrypted - def make_decrypt_key(self) -> numpy.ndarray: + def make_decrypt_key(self) -> np.ndarray: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.make_decrypt_key() array([[ 6, 25], [ 5, 26]]) """ - det = round(numpy.linalg.det(self.encrypt_key)) + det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) @@ -158,16 +158,14 @@ class HillCipher: break inv_key = ( - det_inv - * numpy.linalg.det(self.encrypt_key) - * numpy.linalg.inv(self.encrypt_key) + det_inv * np.linalg.det(self.encrypt_key) * np.linalg.inv(self.encrypt_key) ) return self.to_int(self.modulus(inv_key)) def decrypt(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL') 'TESTINGHILLCIPHERR' >>> hill_cipher.decrypt('85FF00') @@ -180,7 +178,7 @@ class HillCipher: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([vec]).T + batch_vec = np.array([vec]).T batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0] decrypted_batch = "".join( self.replace_digits(num) for num in batch_decrypted @@ -199,7 +197,7 @@ def main() -> None: row = [int(x) for x in input().split()] hill_matrix.append(row) - hc = HillCipher(numpy.array(hill_matrix)) + hc = HillCipher(np.array(hill_matrix)) print("Would you like to encrypt or decrypt some text? (1 or 2)") option = input("\n1. Encrypt\n2. Decrypt\n") diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 482e1eddf..1eef4573b 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -25,8 +25,8 @@ import warnings from collections.abc import Callable from typing import Any -import numpy -from matplotlib import pyplot +import matplotlib.pyplot as plt +import numpy as np c_cauliflower = 0.25 + 0.0j c_polynomial_1 = -0.4 + 0.6j @@ -37,22 +37,20 @@ window_size = 2.0 nb_pixels = 666 -def eval_exponential(c_parameter: complex, z_values: numpy.ndarray) -> numpy.ndarray: +def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. >>> eval_exponential(0, 0) 1.0 - >>> abs(eval_exponential(1, numpy.pi*1.j)) < 1e-15 + >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 True >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 True """ - return numpy.exp(z_values) + c_parameter + return np.exp(z_values) + c_parameter -def eval_quadratic_polynomial( - c_parameter: complex, z_values: numpy.ndarray -) -> numpy.ndarray: +def eval_quadratic_polynomial(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ >>> eval_quadratic_polynomial(0, 2) 4 @@ -66,7 +64,7 @@ def eval_quadratic_polynomial( return z_values * z_values + c_parameter -def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: +def prepare_grid(window_size: float, nb_pixels: int) -> np.ndarray: """ Create a grid of complex values of size nb_pixels*nb_pixels with real and imaginary parts ranging from -window_size to window_size (inclusive). @@ -77,20 +75,20 @@ def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: [ 0.-1.j, 0.+0.j, 0.+1.j], [ 1.-1.j, 1.+0.j, 1.+1.j]]) """ - x = numpy.linspace(-window_size, window_size, nb_pixels) + x = np.linspace(-window_size, window_size, nb_pixels) x = x.reshape((nb_pixels, 1)) - y = numpy.linspace(-window_size, window_size, nb_pixels) + y = np.linspace(-window_size, window_size, nb_pixels) y = y.reshape((1, nb_pixels)) return x + 1.0j * y def iterate_function( - eval_function: Callable[[Any, numpy.ndarray], numpy.ndarray], + eval_function: Callable[[Any, np.ndarray], np.ndarray], function_params: Any, nb_iterations: int, - z_0: numpy.ndarray, + z_0: np.ndarray, infinity: float | None = None, -) -> numpy.ndarray: +) -> np.ndarray: """ Iterate the function "eval_function" exactly nb_iterations times. The first argument of the function is a parameter which is contained in @@ -98,22 +96,22 @@ def iterate_function( values to iterate from. This function returns the final iterates. - >>> iterate_function(eval_quadratic_polynomial, 0, 3, numpy.array([0,1,2])).shape + >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape (3,) - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0]) 0j - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1]) (1+0j) - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2]) (256+0j) """ @@ -121,8 +119,8 @@ def iterate_function( for _ in range(nb_iterations): z_n = eval_function(function_params, z_n) if infinity is not None: - numpy.nan_to_num(z_n, copy=False, nan=infinity) - z_n[abs(z_n) == numpy.inf] = infinity + np.nan_to_num(z_n, copy=False, nan=infinity) + z_n[abs(z_n) == np.inf] = infinity return z_n @@ -130,21 +128,21 @@ def show_results( function_label: str, function_params: Any, escape_radius: float, - z_final: numpy.ndarray, + z_final: np.ndarray, ) -> None: """ Plots of whether the absolute value of z_final is greater than the value of escape_radius. Adds the function_label and function_params to the title. - >>> show_results('80', 0, 1, numpy.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) + >>> show_results('80', 0, 1, np.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) """ abs_z_final = (abs(z_final)).transpose() abs_z_final[:, :] = abs_z_final[::-1, :] - pyplot.matshow(abs_z_final < escape_radius) - pyplot.title(f"Julia set of ${function_label}$, $c={function_params}$") - pyplot.show() + plt.matshow(abs_z_final < escape_radius) + plt.title(f"Julia set of ${function_label}$, $c={function_params}$") + plt.show() def ignore_overflow_warnings() -> None: diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py index 30cd4b39c..724b78f41 100644 --- a/fractals/koch_snowflake.py +++ b/fractals/koch_snowflake.py @@ -22,25 +22,25 @@ Requirements (pip): from __future__ import annotations -import matplotlib.pyplot as plt # type: ignore -import numpy +import matplotlib.pyplot as plt +import numpy as np # initial triangle of Koch snowflake -VECTOR_1 = numpy.array([0, 0]) -VECTOR_2 = numpy.array([0.5, 0.8660254]) -VECTOR_3 = numpy.array([1, 0]) +VECTOR_1 = np.array([0, 0]) +VECTOR_2 = np.array([0.5, 0.8660254]) +VECTOR_3 = np.array([1, 0]) INITIAL_VECTORS = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] # uncomment for simple Koch curve instead of Koch snowflake # INITIAL_VECTORS = [VECTOR_1, VECTOR_3] -def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndarray]: +def iterate(initial_vectors: list[np.ndarray], steps: int) -> list[np.ndarray]: """ Go through the number of iterations determined by the argument "steps". Be careful with high values (above 5) since the time to calculate increases exponentially. - >>> iterate([numpy.array([0, 0]), numpy.array([1, 0])], 1) + >>> iterate([np.array([0, 0]), np.array([1, 0])], 1) [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ @@ -50,13 +50,13 @@ def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndar return vectors -def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: +def iteration_step(vectors: list[np.ndarray]) -> list[np.ndarray]: """ Loops through each pair of adjacent vectors. Each line between two adjacent vectors is divided into 4 segments by adding 3 additional vectors in-between the original two vectors. The vector in the middle is constructed through a 60 degree rotation so it is bent outwards. - >>> iteration_step([numpy.array([0, 0]), numpy.array([1, 0])]) + >>> iteration_step([np.array([0, 0]), np.array([1, 0])]) [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ @@ -74,22 +74,22 @@ def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: return new_vectors -def rotate(vector: numpy.ndarray, angle_in_degrees: float) -> numpy.ndarray: +def rotate(vector: np.ndarray, angle_in_degrees: float) -> np.ndarray: """ Standard rotation of a 2D vector with a rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix ) - >>> rotate(numpy.array([1, 0]), 60) + >>> rotate(np.array([1, 0]), 60) array([0.5 , 0.8660254]) - >>> rotate(numpy.array([1, 0]), 90) + >>> rotate(np.array([1, 0]), 90) array([6.123234e-17, 1.000000e+00]) """ - theta = numpy.radians(angle_in_degrees) - c, s = numpy.cos(theta), numpy.sin(theta) - rotation_matrix = numpy.array(((c, -s), (s, c))) - return numpy.dot(rotation_matrix, vector) + theta = np.radians(angle_in_degrees) + c, s = np.cos(theta), np.sin(theta) + rotation_matrix = np.array(((c, -s), (s, c))) + return np.dot(rotation_matrix, vector) -def plot(vectors: list[numpy.ndarray]) -> None: +def plot(vectors: list[np.ndarray]) -> None: """ Utility function to plot the vectors using matplotlib.pyplot No doctest was implemented since this function does not have a return value diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 7c22329ad..6eeb89da6 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -78,7 +78,7 @@ class BezierCurve: step_size: defines the step(s) at which to evaluate the Bezier curve. The smaller the step size, the finer the curve produced. """ - from matplotlib import pyplot as plt # type: ignore + from matplotlib import pyplot as plt to_plot_x: list[float] = [] # x coordinates of points to plot to_plot_y: list[float] = [] # y coordinates of points to plot diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index db38b3c95..95463faf5 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -3,7 +3,7 @@ Implementation of gradient descent algorithm for minimizing cost of a linear hyp function. """ -import numpy +import numpy as np # List of input, output pairs train_data = ( @@ -116,7 +116,7 @@ def run_gradient_descent(): temp_parameter_vector[i] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) - if numpy.allclose( + if np.allclose( parameter_vector, temp_parameter_vector, atol=absolute_error_limit, diff --git a/neural_network/input_data.py b/neural_network/input_data.py index f7ae86b48..9d4195487 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -22,7 +22,7 @@ import os import typing import urllib -import numpy +import numpy as np from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated @@ -39,8 +39,8 @@ DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/" def _read32(bytestream): - dt = numpy.dtype(numpy.uint32).newbyteorder(">") - return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] + dt = np.dtype(np.uint32).newbyteorder(">") + return np.frombuffer(bytestream.read(4), dtype=dt)[0] @deprecated(None, "Please use tf.data to implement this functionality.") @@ -68,7 +68,7 @@ def _extract_images(f): rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) - data = numpy.frombuffer(buf, dtype=numpy.uint8) + data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, rows, cols, 1) return data @@ -77,8 +77,8 @@ def _extract_images(f): def _dense_to_one_hot(labels_dense, num_classes): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] - index_offset = numpy.arange(num_labels) * num_classes - labels_one_hot = numpy.zeros((num_labels, num_classes)) + index_offset = np.arange(num_labels) * num_classes + labels_one_hot = np.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot @@ -107,7 +107,7 @@ def _extract_labels(f, one_hot=False, num_classes=10): ) num_items = _read32(bytestream) buf = bytestream.read(num_items) - labels = numpy.frombuffer(buf, dtype=numpy.uint8) + labels = np.frombuffer(buf, dtype=np.uint8) if one_hot: return _dense_to_one_hot(labels, num_classes) return labels @@ -153,7 +153,7 @@ class _DataSet: """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned - numpy.random.seed(seed1 if seed is None else seed2) + np.random.seed(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) @@ -175,8 +175,8 @@ class _DataSet: ) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. - images = images.astype(numpy.float32) - images = numpy.multiply(images, 1.0 / 255.0) + images = images.astype(np.float32) + images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 @@ -210,8 +210,8 @@ class _DataSet: start = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: - perm0 = numpy.arange(self._num_examples) - numpy.random.shuffle(perm0) + perm0 = np.arange(self._num_examples) + np.random.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch @@ -224,8 +224,8 @@ class _DataSet: labels_rest_part = self._labels[start : self._num_examples] # Shuffle the data if shuffle: - perm = numpy.arange(self._num_examples) - numpy.random.shuffle(perm) + perm = np.arange(self._num_examples) + np.random.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch @@ -235,8 +235,8 @@ class _DataSet: images_new_part = self._images[start:end] labels_new_part = self._labels[start:end] return ( - numpy.concatenate((images_rest_part, images_new_part), axis=0), - numpy.concatenate((labels_rest_part, labels_new_part), axis=0), + np.concatenate((images_rest_part, images_new_part), axis=0), + np.concatenate((labels_rest_part, labels_new_part), axis=0), ) else: self._index_in_epoch += batch_size diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index 7b374a93d..dea7e2342 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -5,11 +5,11 @@ References: - https://en.wikipedia.org/wiki/Feedforward_neural_network (Feedforward) """ -import numpy +import numpy as np class TwoHiddenLayerNeuralNetwork: - def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> None: + def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None: """ This function initializes the TwoHiddenLayerNeuralNetwork class with random weights for every layer and initializes predicted output with zeroes. @@ -28,30 +28,28 @@ class TwoHiddenLayerNeuralNetwork: # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. - self.input_layer_and_first_hidden_layer_weights = numpy.random.rand( + self.input_layer_and_first_hidden_layer_weights = np.random.rand( self.input_array.shape[1], 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. - self.first_hidden_layer_and_second_hidden_layer_weights = numpy.random.rand( - 4, 3 - ) + self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. - self.second_hidden_layer_and_output_layer_weights = numpy.random.rand(3, 1) + self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1) # Real output values provided. self.output_array = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. - self.predicted_output = numpy.zeros(output_array.shape) + self.predicted_output = np.zeros(output_array.shape) - def feedforward(self) -> numpy.ndarray: + def feedforward(self) -> np.ndarray: """ The information moves in only one direction i.e. forward from the input nodes, through the two hidden nodes and to the output nodes. @@ -60,24 +58,24 @@ class TwoHiddenLayerNeuralNetwork: Return layer_between_second_hidden_layer_and_output (i.e the last layer of the neural network). - >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) - >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = np.array(([0], [0], [0]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() - >>> array_sum = numpy.sum(res) - >>> numpy.isnan(array_sum) + >>> array_sum = np.sum(res) + >>> np.isnan(array_sum) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the # input nodes with the first hidden layer nodes. self.layer_between_input_and_first_hidden_layer = sigmoid( - numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) + np.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( - numpy.dot( + np.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) @@ -86,7 +84,7 @@ class TwoHiddenLayerNeuralNetwork: # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. self.layer_between_second_hidden_layer_and_output = sigmoid( - numpy.dot( + np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) @@ -100,8 +98,8 @@ class TwoHiddenLayerNeuralNetwork: error rate obtained in the previous epoch (i.e., iteration). Updation is done using derivative of sogmoid activation function. - >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) - >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = np.array(([0], [0], [0]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> nn.back_propagation() @@ -110,15 +108,15 @@ class TwoHiddenLayerNeuralNetwork: False """ - updated_second_hidden_layer_and_output_layer_weights = numpy.dot( + updated_second_hidden_layer_and_output_layer_weights = np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), ) - updated_first_hidden_layer_and_second_hidden_layer_weights = numpy.dot( + updated_first_hidden_layer_and_second_hidden_layer_weights = np.dot( self.layer_between_input_and_first_hidden_layer.T, - numpy.dot( + np.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), @@ -128,10 +126,10 @@ class TwoHiddenLayerNeuralNetwork: self.layer_between_first_hidden_layer_and_second_hidden_layer ), ) - updated_input_layer_and_first_hidden_layer_weights = numpy.dot( + updated_input_layer_and_first_hidden_layer_weights = np.dot( self.input_array.T, - numpy.dot( - numpy.dot( + np.dot( + np.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), @@ -155,7 +153,7 @@ class TwoHiddenLayerNeuralNetwork: updated_second_hidden_layer_and_output_layer_weights ) - def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None: + def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None: """ Performs the feedforwarding and back propagation process for the given number of iterations. @@ -166,8 +164,8 @@ class TwoHiddenLayerNeuralNetwork: give_loss : boolean value, If True then prints loss for each iteration, If False then nothing is printed - >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) - >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = np.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() @@ -179,10 +177,10 @@ class TwoHiddenLayerNeuralNetwork: self.output = self.feedforward() self.back_propagation() if give_loss: - loss = numpy.mean(numpy.square(output - self.feedforward())) + loss = np.mean(np.square(output - self.feedforward())) print(f"Iteration {iteration} Loss: {loss}") - def predict(self, input_arr: numpy.ndarray) -> int: + def predict(self, input_arr: np.ndarray) -> int: """ Predict's the output for the given input values using the trained neural network. @@ -192,8 +190,8 @@ class TwoHiddenLayerNeuralNetwork: than the threshold value else returns 0, as the real output values are in binary. - >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) - >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = np.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> nn.train(output_val, 1000, False) >>> nn.predict([0, 1, 0]) in (0, 1) @@ -204,18 +202,18 @@ class TwoHiddenLayerNeuralNetwork: self.array = input_arr self.layer_between_input_and_first_hidden_layer = sigmoid( - numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights) + np.dot(self.array, self.input_layer_and_first_hidden_layer_weights) ) self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( - numpy.dot( + np.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) ) self.layer_between_second_hidden_layer_and_output = sigmoid( - numpy.dot( + np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) @@ -224,26 +222,26 @@ class TwoHiddenLayerNeuralNetwork: return int((self.layer_between_second_hidden_layer_and_output > 0.6)[0]) -def sigmoid(value: numpy.ndarray) -> numpy.ndarray: +def sigmoid(value: np.ndarray) -> np.ndarray: """ Applies sigmoid activation function. return normalized values - >>> sigmoid(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + >>> sigmoid(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64)) array([[0.73105858, 0.5 , 0.88079708], [0.73105858, 0.5 , 0.5 ]]) """ - return 1 / (1 + numpy.exp(-value)) + return 1 / (1 + np.exp(-value)) -def sigmoid_derivative(value: numpy.ndarray) -> numpy.ndarray: +def sigmoid_derivative(value: np.ndarray) -> np.ndarray: """ Provides the derivative value of the sigmoid function. returns derivative of the sigmoid value - >>> sigmoid_derivative(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + >>> sigmoid_derivative(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64)) array([[ 0., 0., -2.], [ 0., 0., 0.]]) """ @@ -264,7 +262,7 @@ def example() -> int: True """ # Input values. - test_input = numpy.array( + test_input = np.array( ( [0, 0, 0], [0, 0, 1], @@ -275,11 +273,11 @@ def example() -> int: [1, 1, 0], [1, 1, 1], ), - dtype=numpy.float64, + dtype=np.float64, ) # True output values for the given input values. - output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64) + output = np.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=np.float64) # Calling neural network class. neural_network = TwoHiddenLayerNeuralNetwork( @@ -290,7 +288,7 @@ def example() -> int: # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=output, iterations=10, give_loss=False) - return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.float64)) + return neural_network.predict(np.array(([1, 1, 1]), dtype=np.float64)) if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index b9f3115df..22da7cb77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME From c328b000ecdd4ad08d029999144e7ec702022390 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 21:35:37 +0200 Subject: [PATCH 006/104] [pre-commit.ci] pre-commit autoupdate (#11339) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.4 → v0.3.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.4...v0.3.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8b101207d..e6b1b0442 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.4 + rev: v0.3.5 hooks: - id: ruff - id: ruff-format From 39daaf8248b37404f69e8459d0378d77b59c6c0f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 1 Apr 2024 22:36:41 +0300 Subject: [PATCH 007/104] Enable ruff RUF100 rule (#11337) --- audio_filters/butterworth_filter.py | 14 +++++++------- data_structures/binary_tree/basic_binary_tree.py | 2 +- .../binary_tree/non_recursive_segment_tree.py | 2 +- data_structures/binary_tree/red_black_tree.py | 2 +- data_structures/binary_tree/segment_tree.py | 6 +++--- data_structures/heap/min_heap.py | 2 +- dynamic_programming/longest_common_subsequence.py | 2 +- .../longest_increasing_subsequence_o_nlogn.py | 4 ++-- graphs/articulation_points.py | 2 +- graphs/dinic.py | 2 +- other/sdes.py | 4 ++-- project_euler/problem_011/sol2.py | 2 +- pyproject.toml | 1 - strings/manacher.py | 2 +- 14 files changed, 23 insertions(+), 24 deletions(-) diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py index 6449bc3f3..4e6ea1b18 100644 --- a/audio_filters/butterworth_filter.py +++ b/audio_filters/butterworth_filter.py @@ -13,7 +13,7 @@ Alternatively you can use scipy.signal.butter, which should yield the same resul def make_lowpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a low-pass filter @@ -43,7 +43,7 @@ def make_lowpass( def make_highpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a high-pass filter @@ -73,7 +73,7 @@ def make_highpass( def make_bandpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a band-pass filter @@ -104,7 +104,7 @@ def make_bandpass( def make_allpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates an all-pass filter @@ -132,7 +132,7 @@ def make_peak( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a peak filter @@ -164,7 +164,7 @@ def make_lowshelf( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a low-shelf filter @@ -201,7 +201,7 @@ def make_highshelf( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a high-shelf filter diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 0439413d9..9d4c1bdbb 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -85,7 +85,7 @@ class BinaryTree: """ return self._depth(self.root) - def _depth(self, node: Node | None) -> int: # noqa: UP007 + def _depth(self, node: Node | None) -> int: if not node: return 0 return 1 + max(self._depth(node.left), self._depth(node.right)) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 42c78a3a1..45c476701 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -87,7 +87,7 @@ class SegmentTree(Generic[T]): p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T | None: # noqa: E741 + def query(self, l: int, r: int) -> T | None: """ Get range query value in log(N) time :param l: left element index diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index bdd808c82..e68d8d1e3 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -152,7 +152,7 @@ class RedBlackTree: self.grandparent.color = 1 self.grandparent._insert_repair() - def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912 + def remove(self, label: int) -> RedBlackTree: """Remove label from this tree.""" if self.label == label: if self.left and self.right: diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 3b0b32946..bb9c1ae22 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -35,7 +35,7 @@ class SegmentTree: """ return idx * 2 + 1 - def build(self, idx, l, r): # noqa: E741 + def build(self, idx, l, r): if l == r: self.st[idx] = self.A[l] else: @@ -56,7 +56,7 @@ class SegmentTree: """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) - def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 + def update_recursive(self, idx, l, r, a, b, val): """ update(1, 1, N, a, b, v) for update val v to [a,b] """ @@ -83,7 +83,7 @@ class SegmentTree: """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) - def query_recursive(self, idx, l, r, a, b): # noqa: E741 + def query_recursive(self, idx, l, r, a, b): """ query(1, 1, N, a, b) for query max of [a,b] """ diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index ecb187649..39f6d99e8 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -66,7 +66,7 @@ class MinHeap: # this is min-heapify method def sift_down(self, idx, array): while True: - l = self.get_left_child_idx(idx) # noqa: E741 + l = self.get_left_child_idx(idx) r = self.get_right_child_idx(idx) smallest = idx diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 178b4169b..22f50a166 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,7 +38,7 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741 + l = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): diff --git a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py index 5e11d729f..44e333e97 100644 --- a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py +++ b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py @@ -7,13 +7,13 @@ from __future__ import annotations -def ceil_index(v, l, r, key): # noqa: E741 +def ceil_index(v, l, r, key): while r - l > 1: m = (l + r) // 2 if v[m] >= key: r = m else: - l = m # noqa: E741 + l = m return r diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index d28045282..3fcaffd73 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,5 +1,5 @@ # Finding Articulation Points in Undirected Graph -def compute_ap(l): # noqa: E741 +def compute_ap(l): n = len(l) out_edge_count = 0 low = [0] * n diff --git a/graphs/dinic.py b/graphs/dinic.py index aaf3a1195..4f5e81236 100644 --- a/graphs/dinic.py +++ b/graphs/dinic.py @@ -37,7 +37,7 @@ class Dinic: # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source - for l in range(31): # noqa: E741 l = 30 maybe faster for random data + for l in range(31): # l = 30 maybe faster for random data while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 diff --git a/other/sdes.py b/other/sdes.py index 31105984b..a69add343 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -44,9 +44,9 @@ def function(expansion, s0, s1, key, message): right = message[4:] temp = apply_table(right, expansion) temp = xor(temp, key) - l = apply_sbox(s0, temp[:4]) # noqa: E741 + l = apply_sbox(s0, temp[:4]) r = apply_sbox(s1, temp[4:]) - l = "0" * (2 - len(l)) + l # noqa: E741 + l = "0" * (2 - len(l)) + l r = "0" * (2 - len(r)) + r temp = apply_table(l + r, p4_table) temp = xor(left, temp) diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 9ea0db991..295830533 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -35,7 +35,7 @@ def solution(): 70600674 """ with open(os.path.dirname(__file__) + "/grid.txt") as f: - l = [] # noqa: E741 + l = [] for _ in range(20): l.append([int(x) for x in f.readline().split()]) diff --git a/pyproject.toml b/pyproject.toml index 22da7cb77..c8a8744ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules - "RUF100", # Unused `noqa` directive -- FIX ME "S101", # Use of `assert` detected -- DO NOT FIX "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME diff --git a/strings/manacher.py b/strings/manacher.py index c58c7c19e..ca546e533 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -50,7 +50,7 @@ def palindromic_string(input_string: str) -> str: # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: - l = j - k + 1 # noqa: E741 + l = j - k + 1 r = j + k - 1 # update max_length and start position From f8a948914b928d9fd3c0e32c034bd90315caa389 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 1 Apr 2024 22:39:31 +0300 Subject: [PATCH 008/104] Enable ruff NPY002 rule (#11336) --- linear_algebra/src/conjugate_gradient.py | 6 ++++-- machine_learning/decision_tree.py | 3 ++- machine_learning/k_means_clust.py | 6 +++--- machine_learning/sequential_minimum_optimization.py | 5 +++-- neural_network/back_propagation_neural_network.py | 8 +++++--- neural_network/convolution_neural_network.py | 13 +++++++------ neural_network/input_data.py | 6 +++--- neural_network/two_hidden_layers_neural_network.py | 9 +++++---- pyproject.toml | 1 - 9 files changed, 32 insertions(+), 25 deletions(-) diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 4c0b58deb..45da35813 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -61,7 +61,8 @@ def _create_spd_matrix(dimension: int) -> Any: >>> _is_matrix_spd(spd_matrix) True """ - random_matrix = np.random.randn(dimension, dimension) + rng = np.random.default_rng() + random_matrix = rng.normal(size=(dimension, dimension)) spd_matrix = np.dot(random_matrix, random_matrix.T) assert _is_matrix_spd(spd_matrix) return spd_matrix @@ -157,7 +158,8 @@ def test_conjugate_gradient() -> None: # Create linear system with SPD matrix and known solution x_true. dimension = 3 spd_matrix = _create_spd_matrix(dimension) - x_true = np.random.randn(dimension, 1) + rng = np.random.default_rng() + x_true = rng.normal(size=(dimension, 1)) b = np.dot(spd_matrix, x_true) # Numpy solution. diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 7f129919a..e48905eea 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -187,7 +187,8 @@ def main(): tree = DecisionTree(depth=10, min_leaf_size=10) tree.train(x, y) - test_cases = (np.random.rand(10) * 2) - 1 + rng = np.random.default_rng() + test_cases = (rng.random(10) * 2) - 1 predictions = np.array([tree.predict(x) for x in test_cases]) avg_error = np.mean((predictions - test_cases) ** 2) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 9f6646944..a926362fc 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -55,12 +55,12 @@ TAG = "K-MEANS-CLUST/ " def get_initial_centroids(data, k, seed=None): """Randomly choose k data points as initial centroids""" - if seed is not None: # useful for obtaining consistent results - np.random.seed(seed) + # useful for obtaining consistent results + rng = np.random.default_rng(seed) n = data.shape[0] # number of data points # Pick K indices from range [0, N). - rand_indices = np.random.randint(0, n, k) + rand_indices = rng.integers(0, n, k) # Keep centroids as dense format, as many entries will be nonzero due to averaging. # As long as at least one document in a cluster contains a word, diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index be16baca1..408d59ab5 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -289,12 +289,13 @@ class SmoSVM: if cmd is None: return - for i2 in np.roll(self.unbound, np.random.choice(self.length)): + rng = np.random.default_rng() + for i2 in np.roll(self.unbound, rng.choice(self.length)): cmd = yield i1, i2 if cmd is None: return - for i2 in np.roll(self._all_samples, np.random.choice(self.length)): + for i2 in np.roll(self._all_samples, rng.choice(self.length)): cmd = yield i1, i2 if cmd is None: return diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 7e0bdbbe2..6131a13e9 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -51,8 +51,9 @@ class DenseLayer: self.is_input_layer = is_input_layer def initializer(self, back_units): - self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.units, back_units))) - self.bias = np.asmatrix(np.random.normal(0, 0.5, self.units)).T + rng = np.random.default_rng() + self.weight = np.asmatrix(rng.normal(0, 0.5, (self.units, back_units))) + self.bias = np.asmatrix(rng.normal(0, 0.5, self.units)).T if self.activation is None: self.activation = sigmoid @@ -174,7 +175,8 @@ class BPNN: def example(): - x = np.random.randn(10, 10) + rng = np.random.default_rng() + x = rng.normal(size=(10, 10)) y = np.asarray( [ [0.8, 0.4], diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 07cc456b7..3c5519244 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -41,15 +41,16 @@ class CNN: self.size_pooling1 = size_p1 self.rate_weight = rate_w self.rate_thre = rate_t + rng = np.random.default_rng() self.w_conv1 = [ - np.asmatrix(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) + np.asmatrix(-1 * rng.random((self.conv1[0], self.conv1[0])) + 0.5) for i in range(self.conv1[1]) ] - self.wkj = np.asmatrix(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5) - self.vji = np.asmatrix(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5) - self.thre_conv1 = -2 * np.random.rand(self.conv1[1]) + 1 - self.thre_bp2 = -2 * np.random.rand(self.num_bp2) + 1 - self.thre_bp3 = -2 * np.random.rand(self.num_bp3) + 1 + self.wkj = np.asmatrix(-1 * rng.random((self.num_bp3, self.num_bp2)) + 0.5) + self.vji = np.asmatrix(-1 * rng.random((self.num_bp2, self.num_bp1)) + 0.5) + self.thre_conv1 = -2 * rng.random(self.conv1[1]) + 1 + self.thre_bp2 = -2 * rng.random(self.num_bp2) + 1 + self.thre_bp3 = -2 * rng.random(self.num_bp3) + 1 def save_model(self, save_path): # save model dict with pickle diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 9d4195487..d189e3f9e 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -153,7 +153,7 @@ class _DataSet: """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned - np.random.seed(seed1 if seed is None else seed2) + self._rng = np.random.default_rng(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) @@ -211,7 +211,7 @@ class _DataSet: # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: perm0 = np.arange(self._num_examples) - np.random.shuffle(perm0) + self._rng.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch @@ -225,7 +225,7 @@ class _DataSet: # Shuffle the data if shuffle: perm = np.arange(self._num_examples) - np.random.shuffle(perm) + self._rng.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index dea7e2342..d488de590 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -28,19 +28,20 @@ class TwoHiddenLayerNeuralNetwork: # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. - self.input_layer_and_first_hidden_layer_weights = np.random.rand( - self.input_array.shape[1], 4 + rng = np.random.default_rng() + self.input_layer_and_first_hidden_layer_weights = rng.random( + (self.input_array.shape[1], 4) ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. - self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3) + self.first_hidden_layer_and_second_hidden_layer_weights = rng.random((4, 3)) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. - self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1) + self.second_hidden_layer_and_output_layer_weights = rng.random((3, 1)) # Real output values provided. self.output_array = output_array diff --git a/pyproject.toml b/pyproject.toml index c8a8744ab..50cd38005 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME - "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From 93fb555e0a97096f62a122e73cfdc6f0579cefbe Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 04:27:56 +0300 Subject: [PATCH 009/104] Enable ruff SIM102 rule (#11341) * Enable ruff SIM102 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/sudoku_solver.py | 7 +++---- .../stacks/balanced_parentheses.py | 7 ++++--- graphs/a_star.py | 20 ++++++++++++------- graphs/bi_directional_dijkstra.py | 8 +++++--- other/davis_putnam_logemann_loveland.py | 7 +++---- project_euler/problem_033/sol1.py | 10 +++++++--- project_euler/problem_037/sol1.py | 7 ++++--- project_euler/problem_107/sol1.py | 9 +++++---- project_euler/problem_207/sol1.py | 8 +++++--- pyproject.toml | 1 - scheduling/shortest_job_first.py | 13 +++++++----- scripts/validate_solutions.py | 11 ++++++---- web_programming/emails_from_url.py | 15 ++++++++------ 13 files changed, 73 insertions(+), 50 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index c9dffcde2..5c1cff06f 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -92,10 +92,9 @@ def eliminate(values, s, d): dplaces = [s for s in u if d in values[s]] if len(dplaces) == 0: return False ## Contradiction: no place for this value - elif len(dplaces) == 1: - # d can only be in one place in unit; assign it there - if not assign(values, dplaces[0], d): - return False + # d can only be in one place in unit; assign it there + elif len(dplaces) == 1 and not assign(values, dplaces[0], d): + return False return values diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py index 3c036c220..928815bb2 100644 --- a/data_structures/stacks/balanced_parentheses.py +++ b/data_structures/stacks/balanced_parentheses.py @@ -19,9 +19,10 @@ def balanced_parentheses(parentheses: str) -> bool: for bracket in parentheses: if bracket in bracket_pairs: stack.push(bracket) - elif bracket in (")", "]", "}"): - if stack.is_empty() or bracket_pairs[stack.pop()] != bracket: - return False + elif bracket in (")", "]", "}") and ( + stack.is_empty() or bracket_pairs[stack.pop()] != bracket + ): + return False return stack.is_empty() diff --git a/graphs/a_star.py b/graphs/a_star.py index 06da3b5cd..1d7063ccc 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -75,13 +75,19 @@ def search( for i in range(len(DIRECTIONS)): # to try out different valid actions x2 = x + DIRECTIONS[i][0] y2 = y + DIRECTIONS[i][1] - if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]): - if closed[x2][y2] == 0 and grid[x2][y2] == 0: - g2 = g + cost - f2 = g2 + heuristic[x2][y2] - cell.append([f2, g2, x2, y2]) - closed[x2][y2] = 1 - action[x2][y2] = i + if ( + x2 >= 0 + and x2 < len(grid) + and y2 >= 0 + and y2 < len(grid[0]) + and closed[x2][y2] == 0 + and grid[x2][y2] == 0 + ): + g2 = g + cost + f2 = g2 + heuristic[x2][y2] + cell.append([f2, g2, x2, y2]) + closed[x2][y2] = 1 + action[x2][y2] = i invpath = [] x = goal[0] y = goal[1] diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index 7b9eac6c8..d2c4030b9 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -36,9 +36,11 @@ def pass_and_relaxation( queue.put((new_cost_f, nxt)) cst_fwd[nxt] = new_cost_f parent[nxt] = v - if nxt in visited_backward: - if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: - shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] + if ( + nxt in visited_backward + and cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance + ): + shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 5c6e2d9ff..3a76f3dfe 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -64,10 +64,9 @@ class Clause: value = model[symbol] else: continue - if value is not None: - # Complement assignment if literal is in complemented form - if literal.endswith("'"): - value = not value + # Complement assignment if literal is in complemented form + if value is not None and literal.endswith("'"): + value = not value self.literals[literal] = value def evaluate(self, model: dict[str, bool | None]) -> bool | None: diff --git a/project_euler/problem_033/sol1.py b/project_euler/problem_033/sol1.py index 187fd61bd..71790d34f 100644 --- a/project_euler/problem_033/sol1.py +++ b/project_euler/problem_033/sol1.py @@ -44,9 +44,13 @@ def fraction_list(digit_len: int) -> list[str]: last_digit = int("1" + "0" * digit_len) for num in range(den, last_digit): while den <= 99: - if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): - if is_digit_cancelling(num, den): - solutions.append(f"{num}/{den}") + if ( + (num != den) + and (num % 10 == den // 10) + and (den % 10 != 0) + and is_digit_cancelling(num, den) + ): + solutions.append(f"{num}/{den}") den += 1 num += 1 den = 10 diff --git a/project_euler/problem_037/sol1.py b/project_euler/problem_037/sol1.py index ef7686cbc..9c09065f4 100644 --- a/project_euler/problem_037/sol1.py +++ b/project_euler/problem_037/sol1.py @@ -85,9 +85,10 @@ def validate(n: int) -> bool: >>> validate(3797) True """ - if len(str(n)) > 3: - if not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])): - return False + if len(str(n)) > 3 and ( + not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])) + ): + return False return True diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 3fe75909e..79cdd9370 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -81,10 +81,11 @@ class Graph: while len(subgraph.vertices) < len(self.vertices): min_weight = max(self.edges.values()) + 1 for edge, weight in self.edges.items(): - if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): - if weight < min_weight: - min_edge = edge - min_weight = weight + if (edge[0] in subgraph.vertices) ^ ( + edge[1] in subgraph.vertices + ) and weight < min_weight: + min_edge = edge + min_weight = weight subgraph.add_edge(min_edge, min_weight) diff --git a/project_euler/problem_207/sol1.py b/project_euler/problem_207/sol1.py index 2b3591f51..c83dc1d4a 100644 --- a/project_euler/problem_207/sol1.py +++ b/project_euler/problem_207/sol1.py @@ -88,9 +88,11 @@ def solution(max_proportion: float = 1 / 12345) -> int: total_partitions += 1 if check_partition_perfect(partition_candidate): perfect_partitions += 1 - if perfect_partitions > 0: - if perfect_partitions / total_partitions < max_proportion: - return int(partition_candidate) + if ( + perfect_partitions > 0 + and perfect_partitions / total_partitions < max_proportion + ): + return int(partition_candidate) integer += 1 diff --git a/pyproject.toml b/pyproject.toml index 50cd38005..e3cf42c92 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SIM102", # Use a single `if` statement instead of nested `if` statements -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index cfd0417ea..6899ec87c 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -37,11 +37,14 @@ def calculate_waitingtime( # Process until all processes are completed while complete != no_of_processes: for j in range(no_of_processes): - if arrival_time[j] <= increment_time and remaining_time[j] > 0: - if remaining_time[j] < minm: - minm = remaining_time[j] - short = j - check = True + if ( + arrival_time[j] <= increment_time + and remaining_time[j] > 0 + and remaining_time[j] < minm + ): + minm = remaining_time[j] + short = j + check = True if not check: increment_time += 1 diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index ca4af5261..0afbdde31 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -71,10 +71,13 @@ def added_solution_file_path() -> list[pathlib.Path]: def collect_solution_file_paths() -> list[pathlib.Path]: - if os.environ.get("CI") and os.environ.get("GITHUB_EVENT_NAME") == "pull_request": - # Return only if there are any, otherwise default to all solutions - if filepaths := added_solution_file_path(): - return filepaths + # Return only if there are any, otherwise default to all solutions + if ( + os.environ.get("CI") + and os.environ.get("GITHUB_EVENT_NAME") == "pull_request" + and (filepaths := added_solution_file_path()) + ): + return filepaths return all_solution_file_paths() diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 6b4bacfe7..26c88e1b1 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -30,12 +30,15 @@ class Parser(HTMLParser): if tag == "a": # Check the list of defined attributes. for name, value in attrs: - # If href is defined, and not empty nor # print it. - if name == "href" and value != "#" and value != "": - # If not already in urls. - if value not in self.urls: - url = parse.urljoin(self.domain, value) - self.urls.append(url) + # If href is defined, not empty nor # print it and not already in urls. + if ( + name == "href" + and value != "#" + and value != "" + and value not in self.urls + ): + url = parse.urljoin(self.domain, value) + self.urls.append(url) # Get main domain name (example.com) From f8cdb3e9482ddca85cd1bffa96c038afc13f9c85 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 19:44:37 +0300 Subject: [PATCH 010/104] Enable ruff S105 rule (#11343) * Enable ruff S105 rule * Update web_programming/recaptcha_verification.py --------- Co-authored-by: Christian Clauss --- pyproject.toml | 1 - web_programming/recaptcha_verification.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e3cf42c92..65a0754d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules "S101", # Use of `assert` detected -- DO NOT FIX - "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME diff --git a/web_programming/recaptcha_verification.py b/web_programming/recaptcha_verification.py index b03afb28e..c9b691b28 100644 --- a/web_programming/recaptcha_verification.py +++ b/web_programming/recaptcha_verification.py @@ -43,7 +43,7 @@ except ImportError: def login_using_recaptcha(request): # Enter your recaptcha secret key here - secret_key = "secretKey" + secret_key = "secretKey" # noqa: S105 url = "https://www.google.com/recaptcha/api/siteverify" # when method is not POST, direct user to login page From f437f922792b8c7e3fbb168a1ec6bfdf183a7304 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:13:56 +0300 Subject: [PATCH 011/104] Enable ruff INP001 rule (#11346) * Enable ruff INP001 rule * Fix * Fix * Fix * Fix * Fix --- data_structures/arrays/__init__.py | 0 data_structures/hashing/tests/__init__.py | 0 digital_image_processing/morphological_operations/__init__.py | 0 electronics/__init__.py | 0 electronics/circular_convolution.py | 3 +-- fractals/__init__.py | 0 geometry/__init__.py | 0 greedy_methods/__init__.py | 0 linear_algebra/src/gaussian_elimination_pivoting/__init__.py | 0 linear_programming/__init__.py | 0 maths/numerical_analysis/__init__.py | 0 maths/special_numbers/__init__.py | 0 neural_network/activation_functions/__init__.py | 0 neural_network/activation_functions/mish.py | 3 ++- pyproject.toml | 1 - 15 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 data_structures/arrays/__init__.py create mode 100644 data_structures/hashing/tests/__init__.py create mode 100644 digital_image_processing/morphological_operations/__init__.py create mode 100644 electronics/__init__.py create mode 100644 fractals/__init__.py create mode 100644 geometry/__init__.py create mode 100644 greedy_methods/__init__.py create mode 100644 linear_algebra/src/gaussian_elimination_pivoting/__init__.py create mode 100644 linear_programming/__init__.py create mode 100644 maths/numerical_analysis/__init__.py create mode 100644 maths/special_numbers/__init__.py create mode 100644 neural_network/activation_functions/__init__.py diff --git a/data_structures/arrays/__init__.py b/data_structures/arrays/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data_structures/hashing/tests/__init__.py b/data_structures/hashing/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/digital_image_processing/morphological_operations/__init__.py b/digital_image_processing/morphological_operations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/electronics/__init__.py b/electronics/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py index f2e35742e..768f2ad94 100644 --- a/electronics/circular_convolution.py +++ b/electronics/circular_convolution.py @@ -37,8 +37,7 @@ class CircularConvolution: using matrix method Usage: - >>> import circular_convolution as cc - >>> convolution = cc.CircularConvolution() + >>> convolution = CircularConvolution() >>> convolution.circular_convolution() [10, 10, 6, 14] diff --git a/fractals/__init__.py b/fractals/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/geometry/__init__.py b/geometry/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/greedy_methods/__init__.py b/greedy_methods/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/linear_algebra/src/gaussian_elimination_pivoting/__init__.py b/linear_algebra/src/gaussian_elimination_pivoting/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/linear_programming/__init__.py b/linear_programming/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/maths/numerical_analysis/__init__.py b/maths/numerical_analysis/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/maths/special_numbers/__init__.py b/maths/special_numbers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neural_network/activation_functions/__init__.py b/neural_network/activation_functions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neural_network/activation_functions/mish.py b/neural_network/activation_functions/mish.py index e51655df8..57a91413f 100644 --- a/neural_network/activation_functions/mish.py +++ b/neural_network/activation_functions/mish.py @@ -7,7 +7,8 @@ https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Mish """ import numpy as np -from softplus import softplus + +from .softplus import softplus def mish(vector: np.ndarray) -> np.ndarray: diff --git a/pyproject.toml b/pyproject.toml index 65a0754d6..9689cf2b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From f5bbea3776a5038d0e428ce3c06c25086076e212 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:18:47 +0300 Subject: [PATCH 012/104] Enable ruff RUF005 rule (#11344) --- data_structures/binary_tree/binary_search_tree.py | 2 +- dynamic_programming/subset_generation.py | 2 +- maths/odd_sieve.py | 2 +- pyproject.toml | 5 ++++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 090e3e25f..32194ddc2 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -336,7 +336,7 @@ def inorder(curr_node: Node | None) -> list[Node]: """ node_list = [] if curr_node is not None: - node_list = inorder(curr_node.left) + [curr_node] + inorder(curr_node.right) + node_list = [*inorder(curr_node.left), curr_node, *inorder(curr_node.right)] return node_list diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index 1be412b93..d490bca73 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -45,7 +45,7 @@ def subset_combinations(elements: list[int], n: int) -> list: for i in range(1, r + 1): for j in range(i, 0, -1): for prev_combination in dp[j - 1]: - dp[j].append(tuple(prev_combination) + (elements[i - 1],)) + dp[j].append((*prev_combination, elements[i - 1])) try: return sorted(dp[n]) diff --git a/maths/odd_sieve.py b/maths/odd_sieve.py index 60e92921a..06605ca54 100644 --- a/maths/odd_sieve.py +++ b/maths/odd_sieve.py @@ -33,7 +33,7 @@ def odd_sieve(num: int) -> list[int]: 0, ceil((num - i_squared) / (i << 1)) ) - return [2] + list(compress(range(3, num, 2), sieve)) + return [2, *list(compress(range(3, num, 2), sieve))] if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 9689cf2b3..e1d7dc91b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,10 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF00", # Ambiguous unicode character and other rules + "RUF001", # String contains ambiguous {}. Did you mean {}? + "RUF002", # Docstring contains ambiguous {}. Did you mean {}? + "RUF003", # Comment contains ambiguous {}. Did you mean {}? + "RUF007", # Prefer itertools.pairwise() over zip() when iterating over successive pairs "S101", # Use of `assert` detected -- DO NOT FIX "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME From 53b2926704f3ad3ec2134a114be3a338e755e28a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:29:34 +0300 Subject: [PATCH 013/104] Enable ruff PGH003 rule (#11345) * Enable ruff PGH003 rule * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- compression/huffman.py | 4 ++-- data_structures/binary_tree/binary_search_tree.py | 4 ++-- data_structures/linked_list/rotate_to_the_right.py | 2 +- fractals/mandelbrot.py | 2 +- graphics/bezier_curve.py | 2 +- maths/entropy.py | 4 ++-- matrix/spiral_print.py | 4 +++- matrix/tests/test_matrix_operation.py | 2 +- project_euler/problem_092/sol1.py | 2 +- project_euler/problem_104/sol1.py | 2 +- pyproject.toml | 1 - scripts/validate_filenames.py | 2 +- scripts/validate_solutions.py | 6 +++--- web_programming/covid_stats_via_xpath.py | 2 +- 14 files changed, 20 insertions(+), 19 deletions(-) diff --git a/compression/huffman.py b/compression/huffman.py index 65e5c2f25..44eda6c03 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -40,7 +40,7 @@ def build_tree(letters: list[Letter]) -> Letter | TreeNode: Run through the list of Letters and build the min heap for the Huffman Tree. """ - response: list[Letter | TreeNode] = letters # type: ignore + response: list[Letter | TreeNode] = list(letters) while len(response) > 1: left = response.pop(0) right = response.pop(0) @@ -59,7 +59,7 @@ def traverse_tree(root: Letter | TreeNode, bitstring: str) -> list[Letter]: if isinstance(root, Letter): root.bitstring[root.letter] = bitstring return [root] - treenode: TreeNode = root # type: ignore + treenode: TreeNode = root letters = [] letters += traverse_tree(treenode.left, bitstring + "0") letters += traverse_tree(treenode.right, bitstring + "1") diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 32194ddc2..3f214d011 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -294,9 +294,9 @@ class BinarySearchTree: predecessor = self.get_max( node.left ) # Gets the max value of the left branch - self.remove(predecessor.value) # type: ignore + self.remove(predecessor.value) # type: ignore[union-attr] node.value = ( - predecessor.value # type: ignore + predecessor.value # type: ignore[union-attr] ) # Assigns the value to the node to delete and keep tree structure def preorder_traverse(self, node: Node | None) -> Iterable: diff --git a/data_structures/linked_list/rotate_to_the_right.py b/data_structures/linked_list/rotate_to_the_right.py index 51b10481c..6b1c54f4b 100644 --- a/data_structures/linked_list/rotate_to_the_right.py +++ b/data_structures/linked_list/rotate_to_the_right.py @@ -63,7 +63,7 @@ def insert_node(head: Node | None, data: int) -> Node: while temp_node.next_node: temp_node = temp_node.next_node - temp_node.next_node = new_node # type: ignore + temp_node.next_node = new_node return head diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index 5eb9af0aa..359d965a8 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -17,7 +17,7 @@ the boundary of the Mandelbrot set a fractal curve. import colorsys -from PIL import Image # type: ignore +from PIL import Image def get_distance(x: float, y: float, max_step: int) -> float: diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 6eeb89da6..9d906f179 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -2,7 +2,7 @@ # https://www.tutorialspoint.com/computer_graphics/computer_graphics_curves.htm from __future__ import annotations -from scipy.special import comb # type: ignore +from scipy.special import comb class BezierCurve: diff --git a/maths/entropy.py b/maths/entropy.py index 76fac4ee7..39ec67bea 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -96,8 +96,8 @@ def analyze_text(text: str) -> tuple[dict, dict]: The first dictionary stores the frequency of single character strings. The second dictionary stores the frequency of two character strings. """ - single_char_strings = Counter() # type: ignore - two_char_strings = Counter() # type: ignore + single_char_strings = Counter() # type: ignore[var-annotated] + two_char_strings = Counter() # type: ignore[var-annotated] single_char_strings[text[-1]] += 1 # first case when we have space at start. diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 7ba0a2751..c16dde69c 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -116,7 +116,9 @@ def spiral_traversal(matrix: list[list]) -> list[int]: [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + spiral_traversal([]) """ if matrix: - return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) # type: ignore + return list(matrix.pop(0)) + spiral_traversal( + [list(row) for row in zip(*matrix)][::-1] + ) else: return [] diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index 638f97daa..addc870ca 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -12,7 +12,7 @@ import logging import sys import numpy as np -import pytest # type: ignore +import pytest # Custom/local libraries from matrix import matrix_operation as matop diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index 8d3f0c9dd..3e45e8220 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -68,7 +68,7 @@ def chain(number: int) -> bool: """ if CHAINS[number - 1] is not None: - return CHAINS[number - 1] # type: ignore + return CHAINS[number - 1] # type: ignore[return-value] number_chain = chain(next_number(number)) CHAINS[number - 1] = number_chain diff --git a/project_euler/problem_104/sol1.py b/project_euler/problem_104/sol1.py index 60fd6fe99..d84dbcfc9 100644 --- a/project_euler/problem_104/sol1.py +++ b/project_euler/problem_104/sol1.py @@ -15,7 +15,7 @@ the last nine digits are 1-9 pandigital, find k. import sys -sys.set_int_max_str_digits(0) # type: ignore +sys.set_int_max_str_digits(0) def check(number: int) -> bool: diff --git a/pyproject.toml b/pyproject.toml index e1d7dc91b..7eac81139 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index ed23f3907..0890024dd 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -4,7 +4,7 @@ import os try: from .build_directory_md import good_file_paths except ImportError: - from build_directory_md import good_file_paths # type: ignore + from build_directory_md import good_file_paths # type: ignore[no-redef] filepaths = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 0afbdde31..68dcd68b3 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -21,8 +21,8 @@ with open(PROJECT_EULER_ANSWERS_PATH) as file_handle: def convert_path_to_module(file_path: pathlib.Path) -> ModuleType: """Converts a file path to a Python module""" spec = importlib.util.spec_from_file_location(file_path.name, str(file_path)) - module = importlib.util.module_from_spec(spec) # type: ignore - spec.loader.exec_module(module) # type: ignore + module = importlib.util.module_from_spec(spec) # type: ignore[arg-type] + spec.loader.exec_module(module) # type: ignore[union-attr] return module @@ -92,7 +92,7 @@ def test_project_euler(solution_path: pathlib.Path) -> None: problem_number: str = solution_path.parent.name[8:].zfill(3) expected: str = PROBLEM_ANSWERS[problem_number] solution_module = convert_path_to_module(solution_path) - answer = str(solution_module.solution()) # type: ignore + answer = str(solution_module.solution()) answer = hashlib.sha256(answer.encode()).hexdigest() assert ( answer == expected diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index a95130bad..7011a02bf 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -7,7 +7,7 @@ more convenient to use in Python web projects (e.g. Django or Flask-based) from typing import NamedTuple import requests -from lxml import html # type: ignore +from lxml import html class CovidData(NamedTuple): From cc2f5b13088b8a98181983b5589f48749016d4ce Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 8 Apr 2024 14:22:54 +0300 Subject: [PATCH 014/104] Do not fix ruff EXE001 rule (#11350) * Do not fix ruff EXE001 rule * Fix --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7eac81139..264f06d1f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable" -- FIX ME + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From 9e55c9d9845c07ce6390ab92a2d86be4816d4a69 Mon Sep 17 00:00:00 2001 From: Jiayou Qin <90779499+Jiayoqin@users.noreply.github.com> Date: Mon, 8 Apr 2024 07:35:22 -0400 Subject: [PATCH 015/104] Added documentations (#11352) * Added documentations * Update data_structures/queue/circular_queue.py --------- Co-authored-by: Christian Clauss --- data_structures/queue/circular_queue.py | 7 +++++-- data_structures/queue/circular_queue_linked_list.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/data_structures/queue/circular_queue.py b/data_structures/queue/circular_queue.py index 93a6ef805..f2fb4c01e 100644 --- a/data_structures/queue/circular_queue.py +++ b/data_structures/queue/circular_queue.py @@ -25,6 +25,7 @@ class CircularQueue: def is_empty(self) -> bool: """ + Checks whether the queue is empty or not >>> cq = CircularQueue(5) >>> cq.is_empty() True @@ -35,6 +36,7 @@ class CircularQueue: def first(self): """ + Returns the first element of the queue >>> cq = CircularQueue(5) >>> cq.first() False @@ -45,7 +47,8 @@ class CircularQueue: def enqueue(self, data): """ - This function insert an element in the queue using self.rear value as an index + This function inserts an element at the end of the queue using self.rear value + as an index. >>> cq = CircularQueue(5) >>> cq.enqueue("A") # doctest: +ELLIPSIS >> cq = CircularQueue(5) >>> cq.dequeue() Traceback (most recent call last): diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queue/circular_queue_linked_list.py index 62042c4bc..da8629678 100644 --- a/data_structures/queue/circular_queue_linked_list.py +++ b/data_structures/queue/circular_queue_linked_list.py @@ -39,7 +39,7 @@ class CircularQueueLinkedList: def is_empty(self) -> bool: """ - Checks where the queue is empty or not + Checks whether the queue is empty or not >>> cq = CircularQueueLinkedList() >>> cq.is_empty() True From 14ca726951473dd1993b6b13993105ea3b077ac3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 9 Apr 2024 07:23:51 +0200 Subject: [PATCH 016/104] [pre-commit.ci] pre-commit autoupdate (#11355) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.5.0 → v4.6.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.5.0...v4.6.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e6b1b0442..d4b8d1136 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-executables-have-shebangs - id: check-toml From 0a9a860eb1174a513b231db2cf1a3378ff7c5b33 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:21:33 +0200 Subject: [PATCH 017/104] [pre-commit.ci] pre-commit autoupdate (#11364) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/MarcoGorelli/auto-walrus: v0.2.2 → 0.3.3](https://github.com/MarcoGorelli/auto-walrus/compare/v0.2.2...0.3.3) - [github.com/astral-sh/ruff-pre-commit: v0.3.5 → v0.3.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.5...v0.3.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d4b8d1136..9472bcfa3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus - rev: v0.2.2 + rev: 0.3.3 hooks: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.5 + rev: v0.3.7 hooks: - id: ruff - id: ruff-format From a42eb357027328085f928a4ab6c7aa770aeb1d6b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 19 Apr 2024 22:30:22 +0300 Subject: [PATCH 018/104] Enable ruff E741 rule (#11370) * Enable ruff E741 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../binary_tree/non_recursive_segment_tree.py | 22 ++++++------ data_structures/binary_tree/segment_tree.py | 36 +++++++++---------- data_structures/heap/min_heap.py | 12 +++---- .../longest_common_subsequence.py | 10 +++--- .../longest_increasing_subsequence_o_nlogn.py | 14 ++++---- graphs/articulation_points.py | 10 +++--- graphs/dinic.py | 2 +- .../sequential_minimum_optimization.py | 4 +-- maths/pi_generator.py | 10 +++--- other/sdes.py | 10 +++--- project_euler/problem_011/sol2.py | 22 ++++++++---- pyproject.toml | 1 - strings/jaro_winkler.py | 8 ++--- strings/manacher.py | 33 ++++++++--------- 14 files changed, 102 insertions(+), 92 deletions(-) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 45c476701..ca0d5c111 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -87,12 +87,12 @@ class SegmentTree(Generic[T]): p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T | None: + def query(self, left: int, right: int) -> T | None: """ Get range query value in log(N) time - :param l: left element index - :param r: right element index - :return: element combined in the range [l, r] + :param left: left element index + :param right: right element index + :return: element combined in the range [left, right] >>> st = SegmentTree([1, 2, 3, 4], lambda a, b: a + b) >>> st.query(0, 2) @@ -104,15 +104,15 @@ class SegmentTree(Generic[T]): >>> st.query(2, 3) 7 """ - l, r = l + self.N, r + self.N + left, right = left + self.N, right + self.N res: T | None = None - while l <= r: - if l % 2 == 1: - res = self.st[l] if res is None else self.fn(res, self.st[l]) - if r % 2 == 0: - res = self.st[r] if res is None else self.fn(res, self.st[r]) - l, r = (l + 1) // 2, (r - 1) // 2 + while left <= right: + if left % 2 == 1: + res = self.st[left] if res is None else self.fn(res, self.st[left]) + if right % 2 == 0: + res = self.st[right] if res is None else self.fn(res, self.st[right]) + left, right = (left + 1) // 2, (right - 1) // 2 return res diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index bb9c1ae22..c7069b3f6 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -35,13 +35,13 @@ class SegmentTree: """ return idx * 2 + 1 - def build(self, idx, l, r): - if l == r: - self.st[idx] = self.A[l] + def build(self, idx, left, right): + if left == right: + self.st[idx] = self.A[left] else: - mid = (l + r) // 2 - self.build(self.left(idx), l, mid) - self.build(self.right(idx), mid + 1, r) + mid = (left + right) // 2 + self.build(self.left(idx), left, mid) + self.build(self.right(idx), mid + 1, right) self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) def update(self, a, b, val): @@ -56,18 +56,18 @@ class SegmentTree: """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) - def update_recursive(self, idx, l, r, a, b, val): + def update_recursive(self, idx, left, right, a, b, val): """ update(1, 1, N, a, b, v) for update val v to [a,b] """ - if r < a or l > b: + if right < a or left > b: return True - if l == r: + if left == right: self.st[idx] = val return True - mid = (l + r) // 2 - self.update_recursive(self.left(idx), l, mid, a, b, val) - self.update_recursive(self.right(idx), mid + 1, r, a, b, val) + mid = (left + right) // 2 + self.update_recursive(self.left(idx), left, mid, a, b, val) + self.update_recursive(self.right(idx), mid + 1, right, a, b, val) self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) return True @@ -83,17 +83,17 @@ class SegmentTree: """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) - def query_recursive(self, idx, l, r, a, b): + def query_recursive(self, idx, left, right, a, b): """ query(1, 1, N, a, b) for query max of [a,b] """ - if r < a or l > b: + if right < a or left > b: return -math.inf - if l >= a and r <= b: + if left >= a and right <= b: return self.st[idx] - mid = (l + r) // 2 - q1 = self.query_recursive(self.left(idx), l, mid, a, b) - q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b) + mid = (left + right) // 2 + q1 = self.query_recursive(self.left(idx), left, mid, a, b) + q2 = self.query_recursive(self.right(idx), mid + 1, right, a, b) return max(q1, q2) def show_data(self): diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index 39f6d99e8..ce7ed570a 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -66,14 +66,14 @@ class MinHeap: # this is min-heapify method def sift_down(self, idx, array): while True: - l = self.get_left_child_idx(idx) - r = self.get_right_child_idx(idx) + left = self.get_left_child_idx(idx) + right = self.get_right_child_idx(idx) smallest = idx - if l < len(array) and array[l] < array[idx]: - smallest = l - if r < len(array) and array[r] < array[smallest]: - smallest = r + if left < len(array) and array[left] < array[idx]: + smallest = left + if right < len(array) and array[right] < array[smallest]: + smallest = right if smallest != idx: array[idx], array[smallest] = array[smallest], array[idx] diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 22f50a166..9a98b1736 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,30 +38,30 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - l = [[0] * (n + 1) for _ in range(m + 1)] + dp = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): match = 1 if x[i - 1] == y[j - 1] else 0 - l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match) + dp[i][j] = max(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1] + match) seq = "" i, j = m, n while i > 0 and j > 0: match = 1 if x[i - 1] == y[j - 1] else 0 - if l[i][j] == l[i - 1][j - 1] + match: + if dp[i][j] == dp[i - 1][j - 1] + match: if match == 1: seq = x[i - 1] + seq i -= 1 j -= 1 - elif l[i][j] == l[i - 1][j]: + elif dp[i][j] == dp[i - 1][j]: i -= 1 else: j -= 1 - return l[m][n], seq + return dp[m][n], seq if __name__ == "__main__": diff --git a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py index 44e333e97..bbc7a62b6 100644 --- a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py +++ b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py @@ -7,14 +7,14 @@ from __future__ import annotations -def ceil_index(v, l, r, key): - while r - l > 1: - m = (l + r) // 2 - if v[m] >= key: - r = m +def ceil_index(v, left, right, key): + while right - left > 1: + middle = (left + right) // 2 + if v[middle] >= key: + right = middle else: - l = m - return r + left = middle + return right def longest_increasing_subsequence_length(v: list[int]) -> int: diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index 3fcaffd73..0bf16e55b 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,6 +1,6 @@ # Finding Articulation Points in Undirected Graph -def compute_ap(l): - n = len(l) +def compute_ap(graph): + n = len(graph) out_edge_count = 0 low = [0] * n visited = [False] * n @@ -12,7 +12,7 @@ def compute_ap(l): visited[at] = True low[at] = at - for to in l[at]: + for to in graph[at]: if to == parent: pass elif not visited[to]: @@ -41,7 +41,7 @@ def compute_ap(l): # Adjacency list of graph -data = { +graph = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], @@ -52,4 +52,4 @@ data = { 7: [6, 8], 8: [5, 7], } -compute_ap(data) +compute_ap(graph) diff --git a/graphs/dinic.py b/graphs/dinic.py index 4f5e81236..7919e6bc0 100644 --- a/graphs/dinic.py +++ b/graphs/dinic.py @@ -37,7 +37,7 @@ class Dinic: # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source - for l in range(31): # l = 30 maybe faster for random data + for l in range(31): # l = 30 maybe faster for random data # noqa: E741 while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 408d59ab5..3abdd6ccb 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -309,9 +309,9 @@ class SmoSVM: # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: - l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) + l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) # noqa: E741 else: - l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) + l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) # noqa: E741 if l == h: return None, None diff --git a/maths/pi_generator.py b/maths/pi_generator.py index addd92174..97f2c540c 100644 --- a/maths/pi_generator.py +++ b/maths/pi_generator.py @@ -41,7 +41,7 @@ def calculate_pi(limit: int) -> str: t = 1 k = 1 n = 3 - l = 3 + m = 3 decimal = limit counter = 0 @@ -65,11 +65,11 @@ def calculate_pi(limit: int) -> str: q *= 10 r = nr else: - nr = (2 * q + r) * l - nn = (q * (7 * k) + 2 + (r * l)) // (t * l) + nr = (2 * q + r) * m + nn = (q * (7 * k) + 2 + (r * m)) // (t * m) q *= k - t *= l - l += 2 + t *= m + m += 2 k += 1 n = nn r = nr diff --git a/other/sdes.py b/other/sdes.py index a69add343..42186f453 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -44,11 +44,11 @@ def function(expansion, s0, s1, key, message): right = message[4:] temp = apply_table(right, expansion) temp = xor(temp, key) - l = apply_sbox(s0, temp[:4]) - r = apply_sbox(s1, temp[4:]) - l = "0" * (2 - len(l)) + l - r = "0" * (2 - len(r)) + r - temp = apply_table(l + r, p4_table) + left_bin_str = apply_sbox(s0, temp[:4]) + right_bin_str = apply_sbox(s1, temp[4:]) + left_bin_str = "0" * (2 - len(left_bin_str)) + left_bin_str + right_bin_str = "0" * (2 - len(right_bin_str)) + right_bin_str + temp = apply_table(left_bin_str + right_bin_str, p4_table) temp = xor(left, temp) return temp + right diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 295830533..09bf31570 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -35,37 +35,47 @@ def solution(): 70600674 """ with open(os.path.dirname(__file__) + "/grid.txt") as f: - l = [] + grid = [] for _ in range(20): - l.append([int(x) for x in f.readline().split()]) + grid.append([int(x) for x in f.readline().split()]) maximum = 0 # right for i in range(20): for j in range(17): - temp = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] + temp = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] if temp > maximum: maximum = temp # down for i in range(17): for j in range(20): - temp = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] + temp = grid[i][j] * grid[i + 1][j] * grid[i + 2][j] * grid[i + 3][j] if temp > maximum: maximum = temp # diagonal 1 for i in range(17): for j in range(17): - temp = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] + temp = ( + grid[i][j] + * grid[i + 1][j + 1] + * grid[i + 2][j + 2] + * grid[i + 3][j + 3] + ) if temp > maximum: maximum = temp # diagonal 2 for i in range(17): for j in range(3, 20): - temp = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] + temp = ( + grid[i][j] + * grid[i + 1][j - 1] + * grid[i + 2][j - 2] + * grid[i + 3][j - 3] + ) if temp > maximum: maximum = temp return maximum diff --git a/pyproject.toml b/pyproject.toml index 264f06d1f..1ac70b2fa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string diff --git a/strings/jaro_winkler.py b/strings/jaro_winkler.py index f4a8fbad3..c18f0d85d 100644 --- a/strings/jaro_winkler.py +++ b/strings/jaro_winkler.py @@ -28,12 +28,12 @@ def jaro_winkler(str1: str, str2: str) -> float: def get_matched_characters(_str1: str, _str2: str) -> str: matched = [] limit = min(len(_str1), len(_str2)) // 2 - for i, l in enumerate(_str1): + for i, char in enumerate(_str1): left = int(max(0, i - limit)) right = int(min(i + limit + 1, len(_str2))) - if l in _str2[left:right]: - matched.append(l) - _str2 = f"{_str2[0:_str2.index(l)]} {_str2[_str2.index(l) + 1:]}" + if char in _str2[left:right]: + matched.append(char) + _str2 = f"{_str2[0:_str2.index(char)]} {_str2[_str2.index(char) + 1:]}" return "".join(matched) diff --git a/strings/manacher.py b/strings/manacher.py index ca546e533..fc8b01cd9 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -9,9 +9,9 @@ def palindromic_string(input_string: str) -> str: 1. first this convert input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. - 2. for each character in new_string it find corresponding length and store the - length and l,r to store previously calculated info.(please look the explanation - for details) + 2. for each character in new_string it find corresponding length and + store the length and left,right to store previously calculated info. + (please look the explanation for details) 3. return corresponding output_string by removing all "|" """ @@ -29,7 +29,7 @@ def palindromic_string(input_string: str) -> str: # we will store the starting and ending of previous furthest ending palindromic # substring - l, r = 0, 0 + left, right = 0, 0 # length[i] shows the length of palindromic substring with center i length = [1 for i in range(len(new_input_string))] @@ -37,7 +37,7 @@ def palindromic_string(input_string: str) -> str: # for each character in new_string find corresponding palindromic string start = 0 for j in range(len(new_input_string)): - k = 1 if j > r else min(length[l + r - j] // 2, r - j + 1) + k = 1 if j > right else min(length[left + right - j] // 2, right - j + 1) while ( j - k >= 0 and j + k < len(new_input_string) @@ -47,11 +47,11 @@ def palindromic_string(input_string: str) -> str: length[j] = 2 * k - 1 - # does this string is ending after the previously explored end (that is r) ? - # if yes the update the new r to the last index of this - if j + k - 1 > r: - l = j - k + 1 - r = j + k - 1 + # does this string is ending after the previously explored end (that is right) ? + # if yes the update the new right to the last index of this + if j + k - 1 > right: + left = j - k + 1 + right = j + k - 1 # update max_length and start position if max_length < length[j]: @@ -78,8 +78,9 @@ if __name__ == "__main__": consider the string for which we are calculating the longest palindromic substring is shown above where ... are some characters in between and right now we are calculating the length of palindromic substring with center at a5 with following conditions : -i) we have stored the length of palindromic substring which has center at a3 (starts at - l ends at r) and it is the furthest ending till now, and it has ending after a6 +i) we have stored the length of palindromic substring which has center at a3 + (starts at left ends at right) and it is the furthest ending till now, + and it has ending after a6 ii) a2 and a4 are equally distant from a3 so char(a2) == char(a4) iii) a0 and a6 are equally distant from a3 so char(a0) == char(a6) iv) a1 is corresponding equal character of a5 in palindrome with center a3 (remember @@ -98,11 +99,11 @@ so we can say that palindrome at center a5 is at least as long as palindrome at a1 but this only holds if a0 and a6 are inside the limits of palindrome centered at a3 so finally .. -len_of_palindrome__at(a5) = min(len_of_palindrome_at(a1), r-a5) -where a3 lies from l to r and we have to keep updating that +len_of_palindrome__at(a5) = min(len_of_palindrome_at(a1), right-a5) +where a3 lies from left to right and we have to keep updating that -and if the a5 lies outside of l,r boundary we calculate length of palindrome with -bruteforce and update l,r. +and if the a5 lies outside of left,right boundary we calculate length of palindrome with +bruteforce and update left,right. it gives the linear time complexity just like z-function """ From 42593489d974feff169cf4f3455e3f209d7bdfcf Mon Sep 17 00:00:00 2001 From: Kelvin Date: Sat, 20 Apr 2024 16:20:37 +0530 Subject: [PATCH 019/104] Add doctests in all functions in basic_string.py (#11374) * Add doctests in all functions in basic_string.py * Revert back to original basic_string.py * Add doctest in basic_string.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update genetic_algorithm/basic_string.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- genetic_algorithm/basic_string.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 089c5c99a..a906ce85a 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -33,7 +33,12 @@ def evaluate(item: str, main_target: str) -> tuple[str, float]: def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: - """Slice and combine two string at a random point.""" + """ + Slice and combine two strings at a random point. + >>> random.seed(42) + >>> crossover("123456", "abcdef") + ('12345f', 'abcde6') + """ random_slice = random.randint(0, len(parent_1) - 1) child_1 = parent_1[:random_slice] + parent_2[random_slice:] child_2 = parent_2[:random_slice] + parent_1[random_slice:] @@ -41,7 +46,12 @@ def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: def mutate(child: str, genes: list[str]) -> str: - """Mutate a random gene of a child with another one from the list.""" + """ + Mutate a random gene of a child with another one from the list. + >>> random.seed(123) + >>> mutate("123456", list("ABCDEF")) + '12345A' + """ child_list = list(child) if random.uniform(0, 1) < MUTATION_PROBABILITY: child_list[random.randint(0, len(child)) - 1] = random.choice(genes) @@ -54,7 +64,22 @@ def select( population_score: list[tuple[str, float]], genes: list[str], ) -> list[str]: - """Select the second parent and generate new population""" + """ + Select the second parent and generate new population + + >>> random.seed(42) + >>> parent_1 = ("123456", 8.0) + >>> population_score = [("abcdef", 4.0), ("ghijkl", 5.0), ("mnopqr", 7.0)] + >>> genes = list("ABCDEF") + >>> child_n = int(min(parent_1[1] + 1, 10)) + >>> population = [] + >>> for _ in range(child_n): + ... parent_2 = population_score[random.randrange(len(population_score))][0] + ... child_1, child_2 = crossover(parent_1[0], parent_2) + ... population.extend((mutate(child_1, genes), mutate(child_2, genes))) + >>> len(population) == (int(parent_1[1]) + 1) * 2 + True + """ pop = [] # Generate more children proportionally to the fitness score. child_n = int(parent_1[1] * 100) + 1 From 7b88e15b1cc67c784872b0d16189e516474cf5a5 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 20 Apr 2024 17:20:27 +0300 Subject: [PATCH 020/104] Enable ruff RUF007 rule (#11349) * Enable ruff RUF005 rule * Enable ruff RUF007 rule * Fix * Fix * Fix * Update sorts/bead_sort.py Co-authored-by: Christian Clauss * Update sorts/bead_sort.py * Revert "Update sorts/bead_sort.py" This reverts commit b10e5632e4479c2117c8b67113b5aa6545f127aa. * Revert "Update sorts/bead_sort.py" This reverts commit 2c1816bf102eeec5aa39cb2f1806afb64b672d14. * Update sorts/bead_sort.py --------- Co-authored-by: Christian Clauss --- data_structures/linked_list/skip_list.py | 3 ++- pyproject.toml | 1 - sorts/bead_sort.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 88d3e0dad..13e9a94a8 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -5,6 +5,7 @@ https://epaperpress.com/sortsearch/download/skiplist.pdf from __future__ import annotations +from itertools import pairwise from random import random from typing import Generic, TypeVar @@ -389,7 +390,7 @@ def test_delete_doesnt_leave_dead_nodes(): def test_iter_always_yields_sorted_values(): def is_sorted(lst): - return all(next_item >= item for item, next_item in zip(lst, lst[1:])) + return all(next_item >= item for item, next_item in pairwise(lst)) skip_list = SkipList() for i in range(10): diff --git a/pyproject.toml b/pyproject.toml index 1ac70b2fa..e46293a8d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? - "RUF007", # Prefer itertools.pairwise() over zip() when iterating over successive pairs "S101", # Use of `assert` detected -- DO NOT FIX "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME diff --git a/sorts/bead_sort.py b/sorts/bead_sort.py index e51173643..8ce0619fd 100644 --- a/sorts/bead_sort.py +++ b/sorts/bead_sort.py @@ -31,7 +31,7 @@ def bead_sort(sequence: list) -> list: if any(not isinstance(x, int) or x < 0 for x in sequence): raise TypeError("Sequence must be list of non-negative integers") for _ in range(len(sequence)): - for i, (rod_upper, rod_lower) in enumerate(zip(sequence, sequence[1:])): + for i, (rod_upper, rod_lower) in enumerate(zip(sequence, sequence[1:])): # noqa: RUF007 if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower From 2702bf9400faece97a1ebc76d0f91b9cfe9658f6 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 21 Apr 2024 20:34:18 +0300 Subject: [PATCH 021/104] Enable ruff S113 rule (#11375) * Enable ruff S113 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/linear_regression.py | 3 ++- pyproject.toml | 1 - scripts/validate_solutions.py | 2 +- web_programming/co2_emission.py | 4 ++-- web_programming/covid_stats_via_xpath.py | 4 +++- web_programming/crawl_google_results.py | 2 +- web_programming/crawl_google_scholar_citation.py | 4 +++- web_programming/currency_converter.py | 2 +- web_programming/current_stock_price.py | 4 +++- web_programming/current_weather.py | 4 ++-- web_programming/daily_horoscope.py | 2 +- web_programming/download_images_from_google_query.py | 4 +++- web_programming/emails_from_url.py | 4 ++-- web_programming/fetch_anime_and_play.py | 8 +++++--- web_programming/fetch_bbc_news.py | 2 +- web_programming/fetch_github_info.py | 2 +- web_programming/fetch_jobs.py | 4 +++- web_programming/fetch_quotes.py | 4 ++-- web_programming/fetch_well_rx_price.py | 2 +- web_programming/get_amazon_product_data.py | 4 +++- web_programming/get_imdb_top_250_movies_csv.py | 2 +- web_programming/get_ip_geolocation.py | 2 +- web_programming/get_top_billionaires.py | 2 +- web_programming/get_top_hn_posts.py | 4 ++-- web_programming/giphy.py | 2 +- web_programming/instagram_crawler.py | 2 +- web_programming/instagram_pic.py | 4 ++-- web_programming/instagram_video.py | 4 ++-- web_programming/nasa_data.py | 6 +++--- web_programming/open_google_results.py | 1 + web_programming/random_anime_character.py | 6 ++++-- web_programming/recaptcha_verification.py | 4 +++- web_programming/reddit.py | 1 + web_programming/search_books_by_isbn.py | 2 +- web_programming/slack_message.py | 4 +++- web_programming/world_covid19_stats.py | 2 +- 36 files changed, 68 insertions(+), 46 deletions(-) diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 39bee5712..839a5366d 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -19,7 +19,8 @@ def collect_dataset(): """ response = requests.get( "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/" - "master/Week1/ADRvsRating.csv" + "master/Week1/ADRvsRating.csv", + timeout=10, ) lines = response.text.splitlines() data = [] diff --git a/pyproject.toml b/pyproject.toml index e46293a8d..ff22fba81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX - "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 68dcd68b3..325c245e0 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -57,7 +57,7 @@ def added_solution_file_path() -> list[pathlib.Path]: "Accept": "application/vnd.github.v3+json", "Authorization": "token " + os.environ["GITHUB_TOKEN"], } - files = requests.get(get_files_url(), headers=headers).json() + files = requests.get(get_files_url(), headers=headers, timeout=10).json() for file in files: filepath = pathlib.Path.cwd().joinpath(file["filename"]) if ( diff --git a/web_programming/co2_emission.py b/web_programming/co2_emission.py index 88a426cb9..19af70489 100644 --- a/web_programming/co2_emission.py +++ b/web_programming/co2_emission.py @@ -11,13 +11,13 @@ BASE_URL = "https://api.carbonintensity.org.uk/intensity" # Emission in the last half hour def fetch_last_half_hour() -> str: - last_half_hour = requests.get(BASE_URL).json()["data"][0] + last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0] return last_half_hour["intensity"]["actual"] # Emissions in a specific date range def fetch_from_to(start, end) -> list: - return requests.get(f"{BASE_URL}/{start}/{end}").json()["data"] + return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"] if __name__ == "__main__": diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index 7011a02bf..c27a5d12b 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -18,7 +18,9 @@ class CovidData(NamedTuple): def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData: xpath_str = '//div[@class = "maincounter-number"]/span/text()' - return CovidData(*html.fromstring(requests.get(url).content).xpath(xpath_str)) + return CovidData( + *html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str) + ) fmt = """Total COVID-19 cases in the world: {} diff --git a/web_programming/crawl_google_results.py b/web_programming/crawl_google_results.py index 1f5e6d319..cb75d450f 100644 --- a/web_programming/crawl_google_results.py +++ b/web_programming/crawl_google_results.py @@ -8,7 +8,7 @@ from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) - res = requests.get(url, headers={"UserAgent": UserAgent().random}) + res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): diff --git a/web_programming/crawl_google_scholar_citation.py b/web_programming/crawl_google_scholar_citation.py index f92a3d139..5f2ccad5f 100644 --- a/web_programming/crawl_google_scholar_citation.py +++ b/web_programming/crawl_google_scholar_citation.py @@ -11,7 +11,9 @@ def get_citation(base_url: str, params: dict) -> str: """ Return the citation number. """ - soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser") + soup = BeautifulSoup( + requests.get(base_url, params=params, timeout=10).content, "html.parser" + ) div = soup.find("div", attrs={"class": "gs_ri"}) anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a") return anchors[2].get_text() diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 3bbcafa8f..9623504b8 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -176,7 +176,7 @@ def convert_currency( params = locals() # from is a reserved keyword params["from"] = params.pop("from_") - res = requests.get(URL_BASE, params=params).json() + res = requests.get(URL_BASE, params=params, timeout=10).json() return str(res["amount"]) if res["error"] == 0 else res["error_message"] diff --git a/web_programming/current_stock_price.py b/web_programming/current_stock_price.py index 0c06354d8..9567c05b0 100644 --- a/web_programming/current_stock_price.py +++ b/web_programming/current_stock_price.py @@ -4,7 +4,9 @@ from bs4 import BeautifulSoup def stock_price(symbol: str = "AAPL") -> str: url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}" - yahoo_finance_source = requests.get(url, headers={"USER-AGENT": "Mozilla/5.0"}).text + yahoo_finance_source = requests.get( + url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10 + ).text soup = BeautifulSoup(yahoo_finance_source, "html.parser") specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"}) diff --git a/web_programming/current_weather.py b/web_programming/current_weather.py index 3b6cd177c..4a8fa5e3c 100644 --- a/web_programming/current_weather.py +++ b/web_programming/current_weather.py @@ -20,13 +20,13 @@ def current_weather(location: str) -> list[dict]: if OPENWEATHERMAP_API_KEY: params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY} response_openweathermap = requests.get( - OPENWEATHERMAP_URL_BASE, params=params_openweathermap + OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10 ) weather_data.append({"OpenWeatherMap": response_openweathermap.json()}) if WEATHERSTACK_API_KEY: params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY} response_weatherstack = requests.get( - WEATHERSTACK_URL_BASE, params=params_weatherstack + WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10 ) weather_data.append({"Weatherstack": response_weatherstack.json()}) if not weather_data: diff --git a/web_programming/daily_horoscope.py b/web_programming/daily_horoscope.py index b0dd1cd65..75e637d8e 100644 --- a/web_programming/daily_horoscope.py +++ b/web_programming/daily_horoscope.py @@ -7,7 +7,7 @@ def horoscope(zodiac_sign: int, day: str) -> str: "https://www.horoscope.com/us/horoscopes/general/" f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}" ) - soup = BeautifulSoup(requests.get(url).content, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser") return soup.find("div", class_="main-horoscope").p.text diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py index 441347459..235cd3576 100644 --- a/web_programming/download_images_from_google_query.py +++ b/web_programming/download_images_from_google_query.py @@ -39,7 +39,9 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) "ijn": "0", } - html = requests.get("https://www.google.com/search", params=params, headers=headers) + html = requests.get( + "https://www.google.com/search", params=params, headers=headers, timeout=10 + ) soup = BeautifulSoup(html.text, "html.parser") matched_images_data = "".join( re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script"))) diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 26c88e1b1..43fd78dcf 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -77,7 +77,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]: try: # Open URL - r = requests.get(url) + r = requests.get(url, timeout=10) # pass the raw HTML to the parser to get links parser.feed(r.text) @@ -88,7 +88,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]: # open URL. # read = requests.get(link) try: - read = requests.get(link) + read = requests.get(link, timeout=10) # Get the valid email. emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text) # If not in list then append it. diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index 366807785..fd7c3a3a7 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -28,7 +28,7 @@ def search_scraper(anime_name: str) -> list: search_url = f"{BASE_URL}/search/{anime_name}" response = requests.get( - search_url, headers={"UserAgent": UserAgent().chrome} + search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10 ) # request the url. # Is the response ok? @@ -82,7 +82,9 @@ def search_anime_episode_list(episode_endpoint: str) -> list: request_url = f"{BASE_URL}{episode_endpoint}" - response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome}) + response = requests.get( + url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10 + ) response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") @@ -132,7 +134,7 @@ def get_anime_episode(episode_endpoint: str) -> list: episode_page_url = f"{BASE_URL}{episode_endpoint}" response = requests.get( - url=episode_page_url, headers={"User-Agent": UserAgent().chrome} + url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10 ) response.raise_for_status() diff --git a/web_programming/fetch_bbc_news.py b/web_programming/fetch_bbc_news.py index 7f8bc57b6..e5cd864a9 100644 --- a/web_programming/fetch_bbc_news.py +++ b/web_programming/fetch_bbc_news.py @@ -7,7 +7,7 @@ _NEWS_API = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def fetch_bbc_news(bbc_news_api_key: str) -> None: # fetching a list of articles in json format - bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key).json() + bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key, timeout=10).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"], 1): print(f"{i}.) {article['title']}") diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py index 7a4985b68..25d44245b 100644 --- a/web_programming/fetch_github_info.py +++ b/web_programming/fetch_github_info.py @@ -42,7 +42,7 @@ def fetch_github_info(auth_token: str) -> dict[Any, Any]: "Authorization": f"token {auth_token}", "Accept": "application/vnd.github.v3+json", } - return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json() + return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json() if __name__ == "__main__": # pragma: no cover diff --git a/web_programming/fetch_jobs.py b/web_programming/fetch_jobs.py index 49abd3c88..0d89bf45d 100644 --- a/web_programming/fetch_jobs.py +++ b/web_programming/fetch_jobs.py @@ -13,7 +13,9 @@ url = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str], None, None]: - soup = BeautifulSoup(requests.get(url + location).content, "html.parser") + soup = BeautifulSoup( + requests.get(url + location, timeout=10).content, "html.parser" + ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}): job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip() diff --git a/web_programming/fetch_quotes.py b/web_programming/fetch_quotes.py index d557e2d95..cf0add43f 100644 --- a/web_programming/fetch_quotes.py +++ b/web_programming/fetch_quotes.py @@ -14,11 +14,11 @@ API_ENDPOINT_URL = "https://zenquotes.io/api" def quote_of_the_day() -> list: - return requests.get(API_ENDPOINT_URL + "/today").json() + return requests.get(API_ENDPOINT_URL + "/today", timeout=10).json() def random_quotes() -> list: - return requests.get(API_ENDPOINT_URL + "/random").json() + return requests.get(API_ENDPOINT_URL + "/random", timeout=10).json() if __name__ == "__main__": diff --git a/web_programming/fetch_well_rx_price.py b/web_programming/fetch_well_rx_price.py index ee51b9a50..93be2a923 100644 --- a/web_programming/fetch_well_rx_price.py +++ b/web_programming/fetch_well_rx_price.py @@ -42,7 +42,7 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: return None request_url = BASE_URL.format(drug_name, zip_code) - response = get(request_url) + response = get(request_url, timeout=10) # Is the response ok? response.raise_for_status() diff --git a/web_programming/get_amazon_product_data.py b/web_programming/get_amazon_product_data.py index c2f2ac5ab..b98ff2c03 100644 --- a/web_programming/get_amazon_product_data.py +++ b/web_programming/get_amazon_product_data.py @@ -24,7 +24,9 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame: ), "Accept-Language": "en-US, en;q=0.5", } - soup = BeautifulSoup(requests.get(url, headers=header).text, features="lxml") + soup = BeautifulSoup( + requests.get(url, headers=header, timeout=10).text, features="lxml" + ) # Initialize a Pandas dataframe with the column titles data_frame = DataFrame( columns=[ diff --git a/web_programming/get_imdb_top_250_movies_csv.py b/web_programming/get_imdb_top_250_movies_csv.py index e54b076eb..c914b29cb 100644 --- a/web_programming/get_imdb_top_250_movies_csv.py +++ b/web_programming/get_imdb_top_250_movies_csv.py @@ -8,7 +8,7 @@ from bs4 import BeautifulSoup def get_imdb_top_250_movies(url: str = "") -> dict[str, float]: url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" - soup = BeautifulSoup(requests.get(url).text, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser") titles = soup.find_all("td", attrs="titleColumn") ratings = soup.find_all("td", class_="ratingColumn imdbRating") return { diff --git a/web_programming/get_ip_geolocation.py b/web_programming/get_ip_geolocation.py index 62eaeafce..574d287f0 100644 --- a/web_programming/get_ip_geolocation.py +++ b/web_programming/get_ip_geolocation.py @@ -8,7 +8,7 @@ def get_ip_geolocation(ip_address: str) -> str: url = f"https://ipinfo.io/{ip_address}/json" # Send a GET request to the API - response = requests.get(url) + response = requests.get(url, timeout=10) # Check if the HTTP request was successful response.raise_for_status() diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py index 703b635ee..24828b6d7 100644 --- a/web_programming/get_top_billionaires.py +++ b/web_programming/get_top_billionaires.py @@ -57,7 +57,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]: Returns: List of top 10 realtime billionaires data. """ - response_json = requests.get(API_URL).json() + response_json = requests.get(API_URL, timeout=10).json() return [ { "Name": person["personName"], diff --git a/web_programming/get_top_hn_posts.py b/web_programming/get_top_hn_posts.py index fbb7c051a..f5d4f874c 100644 --- a/web_programming/get_top_hn_posts.py +++ b/web_programming/get_top_hn_posts.py @@ -5,7 +5,7 @@ import requests def get_hackernews_story(story_id: str) -> dict: url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty" - return requests.get(url).json() + return requests.get(url, timeout=10).json() def hackernews_top_stories(max_stories: int = 10) -> list[dict]: @@ -13,7 +13,7 @@ def hackernews_top_stories(max_stories: int = 10) -> list[dict]: Get the top max_stories posts from HackerNews - https://news.ycombinator.com/ """ url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" - story_ids = requests.get(url).json()[:max_stories] + story_ids = requests.get(url, timeout=10).json()[:max_stories] return [get_hackernews_story(story_id) for story_id in story_ids] diff --git a/web_programming/giphy.py b/web_programming/giphy.py index a5c3f8f74..2bf3e3ea9 100644 --- a/web_programming/giphy.py +++ b/web_programming/giphy.py @@ -11,7 +11,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list: """ formatted_query = "+".join(query.split()) url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" - gifs = requests.get(url).json()["data"] + gifs = requests.get(url, timeout=10).json()["data"] return [gif["url"] for gif in gifs] diff --git a/web_programming/instagram_crawler.py b/web_programming/instagram_crawler.py index 0816cd181..df62735fb 100644 --- a/web_programming/instagram_crawler.py +++ b/web_programming/instagram_crawler.py @@ -39,7 +39,7 @@ class InstagramUser: """ Return a dict of user information """ - html = requests.get(self.url, headers=headers).text + html = requests.get(self.url, headers=headers, timeout=10).text scripts = BeautifulSoup(html, "html.parser").find_all("script") try: return extract_user_profile(scripts[4]) diff --git a/web_programming/instagram_pic.py b/web_programming/instagram_pic.py index 2d987c176..292cacc16 100644 --- a/web_programming/instagram_pic.py +++ b/web_programming/instagram_pic.py @@ -15,7 +15,7 @@ def download_image(url: str) -> str: A message indicating the result of the operation. """ try: - response = requests.get(url) + response = requests.get(url, timeout=10) response.raise_for_status() except requests.exceptions.RequestException as e: return f"An error occurred during the HTTP request to {url}: {e!r}" @@ -30,7 +30,7 @@ def download_image(url: str) -> str: return f"Image URL not found in meta tag {image_meta_tag}." try: - image_data = requests.get(image_url).content + image_data = requests.get(image_url, timeout=10).content except requests.exceptions.RequestException as e: return f"An error occurred during the HTTP request to {image_url}: {e!r}" if not image_data: diff --git a/web_programming/instagram_video.py b/web_programming/instagram_video.py index 1f1b0e297..a4cddce25 100644 --- a/web_programming/instagram_video.py +++ b/web_programming/instagram_video.py @@ -5,8 +5,8 @@ import requests def download_video(url: str) -> bytes: base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" - video_url = requests.get(base_url + url).json()[0]["urls"][0]["src"] - return requests.get(video_url).content + video_url = requests.get(base_url + url, timeout=10).json()[0]["urls"][0]["src"] + return requests.get(video_url, timeout=10).content if __name__ == "__main__": diff --git a/web_programming/nasa_data.py b/web_programming/nasa_data.py index 81125e0a4..33a6406c5 100644 --- a/web_programming/nasa_data.py +++ b/web_programming/nasa_data.py @@ -9,14 +9,14 @@ def get_apod_data(api_key: str) -> dict: Get your API Key from: https://api.nasa.gov/ """ url = "https://api.nasa.gov/planetary/apod" - return requests.get(url, params={"api_key": api_key}).json() + return requests.get(url, params={"api_key": api_key}, timeout=10).json() def save_apod(api_key: str, path: str = ".") -> dict: apod_data = get_apod_data(api_key) img_url = apod_data["url"] img_name = img_url.split("/")[-1] - response = requests.get(img_url, stream=True) + response = requests.get(img_url, stream=True, timeout=10) with open(f"{path}/{img_name}", "wb+") as img_file: shutil.copyfileobj(response.raw, img_file) @@ -29,7 +29,7 @@ def get_archive_data(query: str) -> dict: Get the data of a particular query from NASA archives """ url = "https://images-api.nasa.gov/search" - return requests.get(url, params={"q": query}).json() + return requests.get(url, params={"q": query}, timeout=10).json() if __name__ == "__main__": diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py index f61e3666d..52dd37d7b 100644 --- a/web_programming/open_google_results.py +++ b/web_programming/open_google_results.py @@ -16,6 +16,7 @@ if __name__ == "__main__": res = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, + timeout=10, ) try: diff --git a/web_programming/random_anime_character.py b/web_programming/random_anime_character.py index f15a9c05d..aed932866 100644 --- a/web_programming/random_anime_character.py +++ b/web_programming/random_anime_character.py @@ -12,7 +12,7 @@ def save_image(image_url: str, image_title: str) -> None: """ Saves the image of anime character """ - image = requests.get(image_url, headers=headers) + image = requests.get(image_url, headers=headers, timeout=10) with open(image_title, "wb") as file: file.write(image.content) @@ -21,7 +21,9 @@ def random_anime_character() -> tuple[str, str, str]: """ Returns the Title, Description, and Image Title of a random anime character . """ - soup = BeautifulSoup(requests.get(URL, headers=headers).text, "html.parser") + soup = BeautifulSoup( + requests.get(URL, headers=headers, timeout=10).text, "html.parser" + ) title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"] image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"] description = soup.find("p", id="description").get_text() diff --git a/web_programming/recaptcha_verification.py b/web_programming/recaptcha_verification.py index c9b691b28..168862204 100644 --- a/web_programming/recaptcha_verification.py +++ b/web_programming/recaptcha_verification.py @@ -56,7 +56,9 @@ def login_using_recaptcha(request): client_key = request.POST.get("g-recaptcha-response") # post recaptcha response to Google's recaptcha api - response = requests.post(url, data={"secret": secret_key, "response": client_key}) + response = requests.post( + url, data={"secret": secret_key, "response": client_key}, timeout=10 + ) # if the recaptcha api verified our keys if response.json().get("success", False): # authenticate the user diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 1c165ecc4..6cc1a6b62 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -31,6 +31,7 @@ def get_subreddit_data( response = requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"User-agent": "A random string"}, + timeout=10, ) if response.status_code == 429: raise requests.HTTPError(response=response) diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index 07429e9a9..6b69018e6 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -25,7 +25,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: if new_olid.count("/") != 1: msg = f"{olid} is not a valid Open Library olid" raise ValueError(msg) - return requests.get(f"https://openlibrary.org/{new_olid}.json").json() + return requests.get(f"https://openlibrary.org/{new_olid}.json", timeout=10).json() def summarize_book(ol_book_data: dict) -> dict: diff --git a/web_programming/slack_message.py b/web_programming/slack_message.py index 5e97d6b64..d4d565889 100644 --- a/web_programming/slack_message.py +++ b/web_programming/slack_message.py @@ -5,7 +5,9 @@ import requests def send_slack_message(message_body: str, slack_url: str) -> None: headers = {"Content-Type": "application/json"} - response = requests.post(slack_url, json={"text": message_body}, headers=headers) + response = requests.post( + slack_url, json={"text": message_body}, headers=headers, timeout=10 + ) if response.status_code != 200: msg = ( "Request to slack returned an error " diff --git a/web_programming/world_covid19_stats.py b/web_programming/world_covid19_stats.py index ca81abdc4..4948d8cfd 100644 --- a/web_programming/world_covid19_stats.py +++ b/web_programming/world_covid19_stats.py @@ -13,7 +13,7 @@ def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus") """ Return a dict of current worldwide COVID-19 statistics """ - soup = BeautifulSoup(requests.get(url).text, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser") keys = soup.findAll("h1") values = soup.findAll("div", {"class": "maincounter-number"}) keys += soup.findAll("span", {"class": "panel-title"}) From dbfa21813ff6fe2d7b439dfd6daa60b14a64d24f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 21:43:19 +0200 Subject: [PATCH 022/104] [pre-commit.ci] pre-commit autoupdate (#11380) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.7 → v0.4.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.7...v0.4.1) - [github.com/tox-dev/pyproject-fmt: 1.7.0 → 1.8.0](https://github.com/tox-dev/pyproject-fmt/compare/1.7.0...1.8.0) * from keras import layers, models * Update lstm_prediction.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- computer_vision/cnn_classification.py | 2 +- machine_learning/lstm/lstm_prediction.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9472bcfa3..eedf6d939 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.7 + rev: v0.4.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.7.0" + rev: "1.8.0" hooks: - id: pyproject-fmt diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index b813b7103..115333eba 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -25,7 +25,7 @@ import numpy as np # Importing the Keras libraries and packages import tensorflow as tf -from tensorflow.keras import layers, models +from keras import layers, models if __name__ == "__main__": # Initialising the CNN diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py index f0fd12c9d..81ac5f01d 100644 --- a/machine_learning/lstm/lstm_prediction.py +++ b/machine_learning/lstm/lstm_prediction.py @@ -7,9 +7,9 @@ An LSTM is a type of Recurrent Neural Network (RNN) as discussed at: import numpy as np import pandas as pd +from keras.layers import LSTM, Dense +from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler -from tensorflow.keras.layers import LSTM, Dense -from tensorflow.keras.models import Sequential if __name__ == "__main__": """ From 79dc7c97acc492d657b5f2f50686cee5b0f64b30 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:45:24 +0300 Subject: [PATCH 023/104] Enable ruff RUF001 rule (#11378) * Enable ruff RUF001 rule * Fix * Fix --- fuzzy_logic/fuzzy_operations.py | 6 +++--- physics/basic_orbital_capture.py | 6 +++--- physics/malus_law.py | 2 +- pyproject.toml | 1 - 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py index e41cd2120..c5e4cbde0 100644 --- a/fuzzy_logic/fuzzy_operations.py +++ b/fuzzy_logic/fuzzy_operations.py @@ -57,7 +57,7 @@ class FuzzySet: # Union Operations >>> siya.union(sheru) - FuzzySet(name='Siya ∪ Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0) + FuzzySet(name='Siya U Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0) """ name: str @@ -147,10 +147,10 @@ class FuzzySet: FuzzySet: A new fuzzy set representing the union. >>> FuzzySet("a", 0.1, 0.2, 0.3).union(FuzzySet("b", 0.4, 0.5, 0.6)) - FuzzySet(name='a ∪ b', left_boundary=0.1, peak=0.6, right_boundary=0.35) + FuzzySet(name='a U b', left_boundary=0.1, peak=0.6, right_boundary=0.35) """ return FuzzySet( - f"{self.name} ∪ {other.name}", + f"{self.name} U {other.name}", min(self.left_boundary, other.left_boundary), max(self.right_boundary, other.right_boundary), (self.peak + other.peak) / 2, diff --git a/physics/basic_orbital_capture.py b/physics/basic_orbital_capture.py index eeb45e602..a5434b5cb 100644 --- a/physics/basic_orbital_capture.py +++ b/physics/basic_orbital_capture.py @@ -4,14 +4,14 @@ from scipy.constants import G, c, pi """ These two functions will return the radii of impact for a target object -of mass M and radius R as well as it's effective cross sectional area σ(sigma). -That is to say any projectile with velocity v passing within σ, will impact the +of mass M and radius R as well as it's effective cross sectional area sigma. +That is to say any projectile with velocity v passing within sigma, will impact the target object with mass M. The derivation of which is given at the bottom of this file. The derivation shows that a projectile does not need to aim directly at the target body in order to hit it, as R_capture>R_target. Astronomers refer to the effective -cross section for capture as σ=π*R_capture**2. +cross section for capture as sigma=π*R_capture**2. This algorithm does not account for an N-body problem. diff --git a/physics/malus_law.py b/physics/malus_law.py index ae77d45cf..374b3423f 100644 --- a/physics/malus_law.py +++ b/physics/malus_law.py @@ -31,7 +31,7 @@ This effect is used in polarimetry to measure the optical activity of a sample. Real polarizers are also not perfect blockers of the polarization orthogonal to their polarization axis; the ratio of the transmission of the unwanted component to the wanted component is called the extinction ratio, and varies from around -1:500 for Polaroid to about 1:106 for Glan–Taylor prism polarizers. +1:500 for Polaroid to about 1:106 for Glan-Taylor prism polarizers. Reference : "https://en.wikipedia.org/wiki/Polarizer#Malus's_law_and_other_properties" """ diff --git a/pyproject.toml b/pyproject.toml index ff22fba81..0185f4d7b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX From 4700297b3e332701eed1d0667f3afefc5b9b66be Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:51:47 +0300 Subject: [PATCH 024/104] Enable ruff RUF002 rule (#11377) * Enable ruff RUF002 rule * Fix --------- Co-authored-by: Christian Clauss --- backtracking/sudoku.py | 4 ++-- .../single_bit_manipulation_operations.py | 14 +++++++------- compression/burrows_wheeler.py | 2 +- compression/lempel_ziv.py | 4 ++-- compression/lempel_ziv_decompress.py | 4 ++-- data_structures/binary_tree/red_black_tree.py | 2 +- digital_image_processing/edge_detection/canny.py | 4 ++-- digital_image_processing/index_calculation.py | 2 +- dynamic_programming/combination_sum_iv.py | 2 +- electronics/coulombs_law.py | 4 ++-- hashes/fletcher16.py | 2 +- linear_algebra/lu_decomposition.py | 2 +- linear_algebra/src/schur_complement.py | 2 +- machine_learning/polynomial_regression.py | 4 ++-- maths/chudnovsky_algorithm.py | 2 +- maths/entropy.py | 4 ++-- maths/lucas_lehmer_primality_test.py | 4 ++-- maths/modular_division.py | 2 +- maths/numerical_analysis/bisection_2.py | 2 +- maths/numerical_analysis/nevilles_method.py | 2 +- maths/simultaneous_linear_equation_solver.py | 6 +++--- matrix/largest_square_area_in_matrix.py | 4 ++-- matrix/spiral_print.py | 2 +- neural_network/back_propagation_neural_network.py | 4 ++-- other/davis_putnam_logemann_loveland.py | 2 +- other/fischer_yates_shuffle.py | 2 +- physics/archimedes_principle_of_buoyant_force.py | 2 +- physics/center_of_mass.py | 8 ++++---- physics/centripetal_force.py | 2 +- physics/lorentz_transformation_four_vector.py | 14 +++++++------- physics/reynolds_number.py | 4 ++-- physics/terminal_velocity.py | 4 ++-- project_euler/problem_004/sol1.py | 2 +- project_euler/problem_004/sol2.py | 2 +- project_euler/problem_008/sol1.py | 2 +- project_euler/problem_008/sol2.py | 2 +- project_euler/problem_008/sol3.py | 2 +- project_euler/problem_015/sol1.py | 4 ++-- project_euler/problem_020/sol1.py | 4 ++-- project_euler/problem_020/sol2.py | 4 ++-- project_euler/problem_020/sol3.py | 4 ++-- project_euler/problem_020/sol4.py | 4 ++-- project_euler/problem_022/sol1.py | 2 +- project_euler/problem_022/sol2.py | 2 +- project_euler/problem_025/sol1.py | 2 +- project_euler/problem_025/sol2.py | 2 +- project_euler/problem_025/sol3.py | 2 +- project_euler/problem_027/sol1.py | 8 ++++---- project_euler/problem_031/sol1.py | 10 +++++----- project_euler/problem_031/sol2.py | 12 ++++++------ project_euler/problem_032/sol32.py | 2 +- project_euler/problem_038/sol1.py | 6 +++--- project_euler/problem_040/sol1.py | 2 +- project_euler/problem_044/sol1.py | 6 +++--- project_euler/problem_045/sol1.py | 4 ++-- project_euler/problem_046/sol1.py | 12 ++++++------ project_euler/problem_047/sol1.py | 10 +++++----- project_euler/problem_053/sol1.py | 2 +- project_euler/problem_097/sol1.py | 4 ++-- project_euler/problem_104/sol1.py | 2 +- project_euler/problem_120/sol1.py | 2 +- project_euler/problem_123/sol1.py | 2 +- project_euler/problem_135/sol1.py | 4 ++-- project_euler/problem_144/sol1.py | 4 ++-- project_euler/problem_174/sol1.py | 2 +- pyproject.toml | 1 + strings/jaro_winkler.py | 2 +- strings/manacher.py | 2 +- strings/prefix_function.py | 2 +- 69 files changed, 132 insertions(+), 131 deletions(-) diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 8f5459c76..cabeebb90 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -1,7 +1,7 @@ """ -Given a partially filled 9×9 2D array, the objective is to fill a 9×9 +Given a partially filled 9x9 2D array, the objective is to fill a 9x9 square grid with digits numbered 1 to 9, so that every row, column, and -and each of the nine 3×3 sub-grids contains all of the digits. +and each of the nine 3x3 sub-grids contains all of the digits. This can be solved using Backtracking and is similar to n-queens. We check to see if a cell is safe or not and recursively call the diff --git a/bit_manipulation/single_bit_manipulation_operations.py b/bit_manipulation/single_bit_manipulation_operations.py index b43ff07b7..fcbf033cc 100644 --- a/bit_manipulation/single_bit_manipulation_operations.py +++ b/bit_manipulation/single_bit_manipulation_operations.py @@ -8,8 +8,8 @@ def set_bit(number: int, position: int) -> int: Set the bit at position to 1. Details: perform bitwise or for given number and X. - Where X is a number with all the bits – zeroes and bit on given - position – one. + Where X is a number with all the bits - zeroes and bit on given + position - one. >>> set_bit(0b1101, 1) # 0b1111 15 @@ -26,8 +26,8 @@ def clear_bit(number: int, position: int) -> int: Set the bit at position to 0. Details: perform bitwise and for given number and X. - Where X is a number with all the bits – ones and bit on given - position – zero. + Where X is a number with all the bits - ones and bit on given + position - zero. >>> clear_bit(0b10010, 1) # 0b10000 16 @@ -42,8 +42,8 @@ def flip_bit(number: int, position: int) -> int: Flip the bit at position. Details: perform bitwise xor for given number and X. - Where X is a number with all the bits – zeroes and bit on given - position – one. + Where X is a number with all the bits - zeroes and bit on given + position - one. >>> flip_bit(0b101, 1) # 0b111 7 @@ -79,7 +79,7 @@ def get_bit(number: int, position: int) -> int: Get the bit at the given position Details: perform bitwise and for the given number and X, - Where X is a number with all the bits – zeroes and bit on given position – one. + Where X is a number with all the bits - zeroes and bit on given position - one. If the result is not equal to 0, then the bit on the given position is 1, else 0. >>> get_bit(0b1010, 0) diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index ce493a70c..857d677c9 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -1,7 +1,7 @@ """ https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform -The Burrows–Wheeler transform (BWT, also called block-sorting compression) +The Burrows-Wheeler transform (BWT, also called block-sorting compression) rearranges a character string into runs of similar characters. This is useful for compression, since it tends to be easy to compress a string that has runs of repeated characters by techniques such as move-to-front transform and diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index ac3f0c6cf..2751a0ebc 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -1,5 +1,5 @@ """ -One of the several implementations of Lempel–Ziv–Welch compression algorithm +One of the several implementations of Lempel-Ziv-Welch compression algorithm https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ @@ -43,7 +43,7 @@ def add_key_to_lexicon( def compress_data(data_bits: str) -> str: """ - Compresses given data_bits using Lempel–Ziv–Welch compression algorithm + Compresses given data_bits using Lempel-Ziv-Welch compression algorithm and returns the result as a string """ lexicon = {"0": "0", "1": "1"} diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py index 0e49c83fb..225e96236 100644 --- a/compression/lempel_ziv_decompress.py +++ b/compression/lempel_ziv_decompress.py @@ -1,5 +1,5 @@ """ -One of the several implementations of Lempel–Ziv–Welch decompression algorithm +One of the several implementations of Lempel-Ziv-Welch decompression algorithm https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ @@ -26,7 +26,7 @@ def read_file_binary(file_path: str) -> str: def decompress_data(data_bits: str) -> str: """ - Decompresses given data_bits using Lempel–Ziv–Welch compression algorithm + Decompresses given data_bits using Lempel-Ziv-Welch compression algorithm and returns the result as a string """ lexicon = {"0": "0", "1": "1"} diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index e68d8d1e3..a9ecf897c 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -17,7 +17,7 @@ class RedBlackTree: and slower for reading in the average case, though, because they're both balanced binary search trees, both will get the same asymptotic performance. - To read more about them, https://en.wikipedia.org/wiki/Red–black_tree + To read more about them, https://en.wikipedia.org/wiki/Red-black_tree Unless otherwise specified, all asymptotic runtimes are specified in terms of the size of the tree. """ diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index f8cbeedb3..944161c31 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -74,9 +74,9 @@ def detect_high_low_threshold( image_shape, destination, threshold_low, threshold_high, weak, strong ): """ - High-Low threshold detection. If an edge pixel’s gradient value is higher + High-Low threshold detection. If an edge pixel's gradient value is higher than the high threshold value, it is marked as a strong edge pixel. If an - edge pixel’s gradient value is smaller than the high threshold value and + edge pixel's gradient value is smaller than the high threshold value and larger than the low threshold value, it is marked as a weak edge pixel. If an edge pixel's value is smaller than the low threshold value, it will be suppressed. diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 67830668b..988f8e72b 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -182,7 +182,7 @@ class IndexCalculation: Atmospherically Resistant Vegetation Index 2 https://www.indexdatabase.de/db/i-single.php?id=396 :return: index - −0.18+1.17*(self.nir−self.red)/(self.nir+self.red) + -0.18+1.17*(self.nir-self.red)/(self.nir+self.red) """ return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py index 4526729b7..113c06a27 100644 --- a/dynamic_programming/combination_sum_iv.py +++ b/dynamic_programming/combination_sum_iv.py @@ -18,7 +18,7 @@ Approach: The basic idea is to go over recursively to find the way such that the sum of chosen elements is “tar”. For every element, we have two choices 1. Include the element in our set of chosen elements. - 2. Don’t include the element in our set of chosen elements. + 2. Don't include the element in our set of chosen elements. """ diff --git a/electronics/coulombs_law.py b/electronics/coulombs_law.py index 18c1a8179..74bbea5ea 100644 --- a/electronics/coulombs_law.py +++ b/electronics/coulombs_law.py @@ -20,8 +20,8 @@ def couloumbs_law( Reference ---------- - Coulomb (1785) "Premier mémoire sur l’électricité et le magnétisme," - Histoire de l’Académie Royale des Sciences, pp. 569–577. + Coulomb (1785) "Premier mémoire sur l'électricité et le magnétisme," + Histoire de l'Académie Royale des Sciences, pp. 569-577. Parameters ---------- diff --git a/hashes/fletcher16.py b/hashes/fletcher16.py index 7c23c98d7..add8e185b 100644 --- a/hashes/fletcher16.py +++ b/hashes/fletcher16.py @@ -1,6 +1,6 @@ """ The Fletcher checksum is an algorithm for computing a position-dependent -checksum devised by John G. Fletcher (1934–2012) at Lawrence Livermore Labs +checksum devised by John G. Fletcher (1934-2012) at Lawrence Livermore Labs in the late 1970s.[1] The objective of the Fletcher checksum was to provide error-detection properties approaching those of a cyclic redundancy check but with the lower computational effort associated diff --git a/linear_algebra/lu_decomposition.py b/linear_algebra/lu_decomposition.py index 1d364163d..362067483 100644 --- a/linear_algebra/lu_decomposition.py +++ b/linear_algebra/lu_decomposition.py @@ -1,5 +1,5 @@ """ -Lower–upper (LU) decomposition factors a matrix as a product of a lower +Lower-upper (LU) decomposition factors a matrix as a product of a lower triangular matrix and an upper triangular matrix. A square matrix has an LU decomposition under the following conditions: - If the matrix is invertible, then it has an LU decomposition if and only diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 1cc084043..7c79bb70a 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -18,7 +18,7 @@ def schur_complement( the pseudo_inv argument. Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement - See also Convex Optimization – Boyd and Vandenberghe, A.5.5 + See also Convex Optimization - Boyd and Vandenberghe, A.5.5 >>> import numpy as np >>> a = np.array([[1, 2], [2, 1]]) >>> b = np.array([[0, 3], [3, 0]]) diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py index 5bafea96f..19f7dc994 100644 --- a/machine_learning/polynomial_regression.py +++ b/machine_learning/polynomial_regression.py @@ -11,7 +11,7 @@ for polynomial regression: β = (XᵀX)⁻¹Xᵀy = X⁺y -where X is the design matrix, y is the response vector, and X⁺ denotes the Moore–Penrose +where X is the design matrix, y is the response vector, and X⁺ denotes the Moore-Penrose pseudoinverse of X. In the case of polynomial regression, the design matrix is |1 x₁ x₁² ⋯ x₁ᵐ| @@ -106,7 +106,7 @@ class PolynomialRegression: β = (XᵀX)⁻¹Xᵀy = X⁺y - where X⁺ denotes the Moore–Penrose pseudoinverse of the design matrix X. This + where X⁺ denotes the Moore-Penrose pseudoinverse of the design matrix X. This function computes X⁺ using singular value decomposition (SVD). References: diff --git a/maths/chudnovsky_algorithm.py b/maths/chudnovsky_algorithm.py index aaee74628..d122bf075 100644 --- a/maths/chudnovsky_algorithm.py +++ b/maths/chudnovsky_algorithm.py @@ -5,7 +5,7 @@ from math import ceil, factorial def pi(precision: int) -> str: """ The Chudnovsky algorithm is a fast method for calculating the digits of PI, - based on Ramanujan’s PI formulae. + based on Ramanujan's PI formulae. https://en.wikipedia.org/wiki/Chudnovsky_algorithm diff --git a/maths/entropy.py b/maths/entropy.py index 39ec67bea..b816f1d19 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -21,10 +21,10 @@ def calculate_prob(text: str) -> None: :return: Prints 1) Entropy of information based on 1 alphabet 2) Entropy of information based on couples of 2 alphabet - 3) print Entropy of H(X n∣Xn−1) + 3) print Entropy of H(X n|Xn-1) Text from random books. Also, random quotes. - >>> text = ("Behind Winston’s back the voice " + >>> text = ("Behind Winston's back the voice " ... "from the telescreen was still " ... "babbling and the overfulfilment") >>> calculate_prob(text) diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 292387414..af5c81133 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -1,12 +1,12 @@ """ -In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne +In mathematics, the Lucas-Lehmer test (LLT) is a primality test for Mersenne numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test A Mersenne number is a number that is one less than a power of two. That is M_p = 2^p - 1 https://en.wikipedia.org/wiki/Mersenne_prime -The Lucas–Lehmer test is the primality test used by the +The Lucas-Lehmer test is the primality test used by the Great Internet Mersenne Prime Search (GIMPS) to locate large primes. """ diff --git a/maths/modular_division.py b/maths/modular_division.py index 260d56837..2f8f4479b 100644 --- a/maths/modular_division.py +++ b/maths/modular_division.py @@ -9,7 +9,7 @@ def modular_division(a: int, b: int, n: int) -> int: GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) Given three integers a, b, and n, such that gcd(a,n)=1 and n>1, the algorithm should - return an integer x such that 0≤x≤n−1, and b/a=x(modn) (that is, b=ax(modn)). + return an integer x such that 0≤x≤n-1, and b/a=x(modn) (that is, b=ax(modn)). Theorem: a has a multiplicative inverse modulo n iff gcd(a,n) = 1 diff --git a/maths/numerical_analysis/bisection_2.py b/maths/numerical_analysis/bisection_2.py index 45f26d8d8..68ba6577c 100644 --- a/maths/numerical_analysis/bisection_2.py +++ b/maths/numerical_analysis/bisection_2.py @@ -1,5 +1,5 @@ """ -Given a function on floating number f(x) and two floating numbers ‘a’ and ‘b’ such that +Given a function on floating number f(x) and two floating numbers `a` and `b` such that f(a) * f(b) < 0 and f(x) is continuous in [a, b]. Here f(x) represents algebraic or transcendental equation. Find root of function in interval [a, b] (Or find a value of x such that f(x) is 0) diff --git a/maths/numerical_analysis/nevilles_method.py b/maths/numerical_analysis/nevilles_method.py index 256b61f5f..25c93ac6c 100644 --- a/maths/numerical_analysis/nevilles_method.py +++ b/maths/numerical_analysis/nevilles_method.py @@ -1,7 +1,7 @@ """ Python program to show how to interpolate and evaluate a polynomial using Neville's method. -Neville’s method evaluates a polynomial that passes through a +Neville's method evaluates a polynomial that passes through a given set of x and y points for a particular x value (x0) using the Newton polynomial form. Reference: diff --git a/maths/simultaneous_linear_equation_solver.py b/maths/simultaneous_linear_equation_solver.py index 1287b2002..9685a33e8 100644 --- a/maths/simultaneous_linear_equation_solver.py +++ b/maths/simultaneous_linear_equation_solver.py @@ -2,10 +2,10 @@ https://en.wikipedia.org/wiki/Augmented_matrix This algorithm solves simultaneous linear equations of the form -λa + λb + λc + λd + ... = γ as [λ, λ, λ, λ, ..., γ] -Where λ & γ are individual coefficients, the no. of equations = no. of coefficients - 1 +λa + λb + λc + λd + ... = y as [λ, λ, λ, λ, ..., y] +Where λ & y are individual coefficients, the no. of equations = no. of coefficients - 1 -Note in order to work there must exist 1 equation where all instances of λ and γ != 0 +Note in order to work there must exist 1 equation where all instances of λ and y != 0 """ diff --git a/matrix/largest_square_area_in_matrix.py b/matrix/largest_square_area_in_matrix.py index a93369c56..16263fb79 100644 --- a/matrix/largest_square_area_in_matrix.py +++ b/matrix/largest_square_area_in_matrix.py @@ -31,7 +31,7 @@ Explanation: There is no 1 in the matrix. Approach: We initialize another matrix (dp) with the same dimensions -as the original one initialized with all 0’s. +as the original one initialized with all 0's. dp_array(i,j) represents the side length of the maximum square whose bottom right corner is the cell with index (i,j) in the original matrix. @@ -39,7 +39,7 @@ bottom right corner is the cell with index (i,j) in the original matrix. Starting from index (0,0), for every 1 found in the original matrix, we update the value of the current element as -dp_array(i,j)=dp_array(dp(i−1,j),dp_array(i−1,j−1),dp_array(i,j−1)) + 1. +dp_array(i,j)=dp_array(dp(i-1,j),dp_array(i-1,j-1),dp_array(i,j-1)) + 1. """ diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index c16dde69c..88bde1db5 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -89,7 +89,7 @@ def spiral_traversal(matrix: list[list]) -> list[int]: Algorithm: Step 1. first pop the 0 index list. (which is [1,2,3,4] and concatenate the output of [step 2]) - Step 2. Now perform matrix’s Transpose operation (Change rows to column + Step 2. Now perform matrix's Transpose operation (Change rows to column and vice versa) and reverse the resultant matrix. Step 3. Pass the output of [2nd step], to same recursive function till base case hits. diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 6131a13e9..182f759c5 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -2,10 +2,10 @@ """ -A Framework of Back Propagation Neural Network(BP) model +A Framework of Back Propagation Neural Network (BP) model Easy to use: - * add many layers as you want !!! + * add many layers as you want ! ! ! * clearly see how the loss decreasing Easy to expand: * more activation functions diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 3a76f3dfe..0f3100b1b 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 """ -Davis–Putnam–Logemann–Loveland (DPLL) algorithm is a complete, backtracking-based +Davis-Putnam-Logemann-Loveland (DPLL) algorithm is a complete, backtracking-based search algorithm for deciding the satisfiability of propositional logic formulae in conjunctive normal form, i.e, for solving the Conjunctive Normal Form SATisfiability (CNF-SAT) problem. diff --git a/other/fischer_yates_shuffle.py b/other/fischer_yates_shuffle.py index 37e11479a..5e90b10ed 100644 --- a/other/fischer_yates_shuffle.py +++ b/other/fischer_yates_shuffle.py @@ -1,6 +1,6 @@ #!/usr/bin/python """ -The Fisher–Yates shuffle is an algorithm for generating a random permutation of a +The Fisher-Yates shuffle is an algorithm for generating a random permutation of a finite sequence. For more details visit wikipedia/Fischer-Yates-Shuffle. diff --git a/physics/archimedes_principle_of_buoyant_force.py b/physics/archimedes_principle_of_buoyant_force.py index 71043e0e1..38f1a0a83 100644 --- a/physics/archimedes_principle_of_buoyant_force.py +++ b/physics/archimedes_principle_of_buoyant_force.py @@ -3,7 +3,7 @@ Calculate the buoyant force of any body completely or partially submerged in a s fluid. This principle was discovered by the Greek mathematician Archimedes. Equation for calculating buoyant force: -Fb = ρ * V * g +Fb = p * V * g https://en.wikipedia.org/wiki/Archimedes%27_principle """ diff --git a/physics/center_of_mass.py b/physics/center_of_mass.py index 59c3b807f..7a20e71be 100644 --- a/physics/center_of_mass.py +++ b/physics/center_of_mass.py @@ -16,8 +16,8 @@ assumed to be concentrated to visualize its motion. In other words, the center o is the particle equivalent of a given object for the application of Newton's laws of motion. -In the case of a system of particles P_i, i = 1, ..., n , each with mass m_i that are -located in space with coordinates r_i, i = 1, ..., n , the coordinates R of the center +In the case of a system of particles P_i, i = 1, ..., n , each with mass m_i that are +located in space with coordinates r_i, i = 1, ..., n , the coordinates R of the center of mass corresponds to: R = (Σ(mi * ri) / Σ(mi)) @@ -36,8 +36,8 @@ def center_of_mass(particles: list[Particle]) -> Coord3D: Input Parameters ---------------- particles: list(Particle): - A list of particles where each particle is a tuple with it´s (x, y, z) position and - it´s mass. + A list of particles where each particle is a tuple with it's (x, y, z) position and + it's mass. Returns ------- diff --git a/physics/centripetal_force.py b/physics/centripetal_force.py index 04069d256..a4c624582 100644 --- a/physics/centripetal_force.py +++ b/physics/centripetal_force.py @@ -6,7 +6,7 @@ or centre of curvature. The unit of centripetal force is newton. The centripetal force is always directed perpendicular to the -direction of the object’s displacement. Using Newton’s second +direction of the object's displacement. Using Newton's second law of motion, it is found that the centripetal force of an object moving in a circular path always acts towards the centre of the circle. The Centripetal Force Formula is given as the product of mass (in kg) diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index f4fda4dff..3b0fd83d4 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -12,13 +12,13 @@ two inertial reference frames and X' moves in the x direction with velocity v with respect to X, then the Lorentz transformation from X to X' is X' = BX, where - | γ -γβ 0 0| -B = |-γβ γ 0 0| + | y -γβ 0 0| +B = |-γβ y 0 0| | 0 0 1 0| | 0 0 0 1| is the matrix describing the Lorentz boost between X and X', -γ = 1 / √(1 - v²/c²) is the Lorentz factor, and β = v/c is the velocity as +y = 1 / √(1 - v²/c²) is the Lorentz factor, and β = v/c is the velocity as a fraction of c. Reference: https://en.wikipedia.org/wiki/Lorentz_transformation @@ -63,7 +63,7 @@ def beta(velocity: float) -> float: def gamma(velocity: float) -> float: """ - Calculate the Lorentz factor γ = 1 / √(1 - v²/c²) for a given velocity + Calculate the Lorentz factor y = 1 / √(1 - v²/c²) for a given velocity >>> gamma(4) 1.0000000000000002 >>> gamma(1e5) @@ -90,12 +90,12 @@ def transformation_matrix(velocity: float) -> np.ndarray: """ Calculate the Lorentz transformation matrix for movement in the x direction: - | γ -γβ 0 0| - |-γβ γ 0 0| + | y -γβ 0 0| + |-γβ y 0 0| | 0 0 1 0| | 0 0 0 1| - where γ is the Lorentz factor and β is the velocity as a fraction of c + where y is the Lorentz factor and β is the velocity as a fraction of c >>> transformation_matrix(29979245) array([[ 1.00503781, -0.10050378, 0. , 0. ], [-0.10050378, 1.00503781, 0. , 0. ], diff --git a/physics/reynolds_number.py b/physics/reynolds_number.py index dffe690f8..c24a9e002 100644 --- a/physics/reynolds_number.py +++ b/physics/reynolds_number.py @@ -8,10 +8,10 @@ pipe. Reynolds number is defined by the ratio of inertial forces to that of viscous forces. R = Inertial Forces / Viscous Forces -R = (ρ * V * D)/μ +R = (p * V * D)/μ where : -ρ = Density of fluid (in Kg/m^3) +p = Density of fluid (in Kg/m^3) D = Diameter of pipe through which fluid flows (in m) V = Velocity of flow of the fluid (in m/s) μ = Viscosity of the fluid (in Ns/m^2) diff --git a/physics/terminal_velocity.py b/physics/terminal_velocity.py index cec54162e..16714bd02 100644 --- a/physics/terminal_velocity.py +++ b/physics/terminal_velocity.py @@ -8,13 +8,13 @@ and buoyancy is equal to the downward gravity force acting on the object. The acceleration of the object is zero as the net force acting on the object is zero. -Vt = ((2 * m * g)/(ρ * A * Cd))^0.5 +Vt = ((2 * m * g)/(p * A * Cd))^0.5 where : Vt = Terminal velocity (in m/s) m = Mass of the falling object (in Kg) g = Acceleration due to gravity (value taken : imported from scipy) -ρ = Density of the fluid through which the object is falling (in Kg/m^3) +p = Density of the fluid through which the object is falling (in Kg/m^3) A = Projected area of the object (in m^2) Cd = Drag coefficient (dimensionless) diff --git a/project_euler/problem_004/sol1.py b/project_euler/problem_004/sol1.py index f237afdd9..f80a3253e 100644 --- a/project_euler/problem_004/sol1.py +++ b/project_euler/problem_004/sol1.py @@ -4,7 +4,7 @@ Project Euler Problem 4: https://projecteuler.net/problem=4 Largest palindrome product A palindromic number reads the same both ways. The largest palindrome made -from the product of two 2-digit numbers is 9009 = 91 × 99. +from the product of two 2-digit numbers is 9009 = 91 x 99. Find the largest palindrome made from the product of two 3-digit numbers. diff --git a/project_euler/problem_004/sol2.py b/project_euler/problem_004/sol2.py index abc880966..1fa75e7d0 100644 --- a/project_euler/problem_004/sol2.py +++ b/project_euler/problem_004/sol2.py @@ -4,7 +4,7 @@ Project Euler Problem 4: https://projecteuler.net/problem=4 Largest palindrome product A palindromic number reads the same both ways. The largest palindrome made -from the product of two 2-digit numbers is 9009 = 91 × 99. +from the product of two 2-digit numbers is 9009 = 91 x 99. Find the largest palindrome made from the product of two 3-digit numbers. diff --git a/project_euler/problem_008/sol1.py b/project_euler/problem_008/sol1.py index 69dd1b473..adbac8d5a 100644 --- a/project_euler/problem_008/sol1.py +++ b/project_euler/problem_008/sol1.py @@ -4,7 +4,7 @@ Project Euler Problem 8: https://projecteuler.net/problem=8 Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_008/sol2.py b/project_euler/problem_008/sol2.py index f83cb1db3..e48231e40 100644 --- a/project_euler/problem_008/sol2.py +++ b/project_euler/problem_008/sol2.py @@ -4,7 +4,7 @@ Project Euler Problem 8: https://projecteuler.net/problem=8 Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_008/sol3.py b/project_euler/problem_008/sol3.py index bf3bcb05b..0d319b968 100644 --- a/project_euler/problem_008/sol3.py +++ b/project_euler/problem_008/sol3.py @@ -4,7 +4,7 @@ Project Euler Problem 8: https://projecteuler.net/problem=8 Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_015/sol1.py b/project_euler/problem_015/sol1.py index fd9014a40..3c9dae1ae 100644 --- a/project_euler/problem_015/sol1.py +++ b/project_euler/problem_015/sol1.py @@ -1,9 +1,9 @@ """ Problem 15: https://projecteuler.net/problem=15 -Starting in the top left corner of a 2×2 grid, and only being able to move to +Starting in the top left corner of a 2x2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner. -How many such routes are there through a 20×20 grid? +How many such routes are there through a 20x20 grid? """ from math import factorial diff --git a/project_euler/problem_020/sol1.py b/project_euler/problem_020/sol1.py index b472024e5..1439bdca3 100644 --- a/project_euler/problem_020/sol1.py +++ b/project_euler/problem_020/sol1.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol2.py b/project_euler/problem_020/sol2.py index a1d56ade7..61684cd5e 100644 --- a/project_euler/problem_020/sol2.py +++ b/project_euler/problem_020/sol2.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol3.py b/project_euler/problem_020/sol3.py index 1886e0546..8984def9c 100644 --- a/project_euler/problem_020/sol3.py +++ b/project_euler/problem_020/sol3.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol4.py b/project_euler/problem_020/sol4.py index b32ce309d..511ac81e1 100644 --- a/project_euler/problem_020/sol4.py +++ b/project_euler/problem_020/sol4.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_022/sol1.py b/project_euler/problem_022/sol1.py index b6386186e..c4af5dfa8 100644 --- a/project_euler/problem_022/sol1.py +++ b/project_euler/problem_022/sol1.py @@ -10,7 +10,7 @@ score. For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would -obtain a score of 938 × 53 = 49714. +obtain a score of 938 x 53 = 49714. What is the total of all the name scores in the file? """ diff --git a/project_euler/problem_022/sol2.py b/project_euler/problem_022/sol2.py index f7092ea1c..9c22b6bba 100644 --- a/project_euler/problem_022/sol2.py +++ b/project_euler/problem_022/sol2.py @@ -10,7 +10,7 @@ score. For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would -obtain a score of 938 × 53 = 49714. +obtain a score of 938 x 53 = 49714. What is the total of all the name scores in the file? """ diff --git a/project_euler/problem_025/sol1.py b/project_euler/problem_025/sol1.py index 803464b5d..b3bbb56d2 100644 --- a/project_euler/problem_025/sol1.py +++ b/project_euler/problem_025/sol1.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index 9e950b355..a0f056023 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_025/sol3.py b/project_euler/problem_025/sol3.py index 0b9f3a0c8..e33b159ac 100644 --- a/project_euler/problem_025/sol3.py +++ b/project_euler/problem_025/sol3.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_027/sol1.py b/project_euler/problem_027/sol1.py index c93e2b4fa..48755ec19 100644 --- a/project_euler/problem_027/sol1.py +++ b/project_euler/problem_027/sol1.py @@ -9,12 +9,12 @@ n2 + n + 41 It turns out that the formula will produce 40 primes for the consecutive values n = 0 to 39. However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is divisible by 41, and certainly when n = 41, 412 + 41 + 41 is clearly divisible by 41. -The incredible formula n2 − 79n + 1601 was discovered, which produces 80 primes -for the consecutive values n = 0 to 79. The product of the coefficients, −79 and -1601, is −126479. +The incredible formula n2 - 79n + 1601 was discovered, which produces 80 primes +for the consecutive values n = 0 to 79. The product of the coefficients, -79 and +1601, is -126479. Considering quadratics of the form: n² + an + b, where |a| < 1000 and |b| < 1000 -where |n| is the modulus/absolute value of ne.g. |11| = 11 and |−4| = 4 +where |n| is the modulus/absolute value of ne.g. |11| = 11 and |-4| = 4 Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n = 0. diff --git a/project_euler/problem_031/sol1.py b/project_euler/problem_031/sol1.py index ba40cf383..4c9c533ee 100644 --- a/project_euler/problem_031/sol1.py +++ b/project_euler/problem_031/sol1.py @@ -2,14 +2,14 @@ Coin sums Problem 31: https://projecteuler.net/problem=31 -In England the currency is made up of pound, £, and pence, p, and there are +In England the currency is made up of pound, f, and pence, p, and there are eight coins in general circulation: -1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p). -It is possible to make £2 in the following way: +1p, 2p, 5p, 10p, 20p, 50p, f1 (100p) and f2 (200p). +It is possible to make f2 in the following way: -1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p -How many different ways can £2 be made using any number of coins? +1xf1 + 1x50p + 2x20p + 1x5p + 1x2p + 3x1p +How many different ways can f2 be made using any number of coins? """ diff --git a/project_euler/problem_031/sol2.py b/project_euler/problem_031/sol2.py index f9e4dc384..574f8d410 100644 --- a/project_euler/problem_031/sol2.py +++ b/project_euler/problem_031/sol2.py @@ -3,17 +3,17 @@ Problem 31: https://projecteuler.net/problem=31 Coin sums -In England the currency is made up of pound, £, and pence, p, and there are +In England the currency is made up of pound, f, and pence, p, and there are eight coins in general circulation: -1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p). -It is possible to make £2 in the following way: +1p, 2p, 5p, 10p, 20p, 50p, f1 (100p) and f2 (200p). +It is possible to make f2 in the following way: -1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p -How many different ways can £2 be made using any number of coins? +1xf1 + 1x50p + 2x20p + 1x5p + 1x2p + 3x1p +How many different ways can f2 be made using any number of coins? Hint: - > There are 100 pence in a pound (£1 = 100p) + > There are 100 pence in a pound (f1 = 100p) > There are coins(in pence) are available: 1, 2, 5, 10, 20, 50, 100 and 200. > how many different ways you can combine these values to create 200 pence. diff --git a/project_euler/problem_032/sol32.py b/project_euler/problem_032/sol32.py index a402b5584..c0ca2ce10 100644 --- a/project_euler/problem_032/sol32.py +++ b/project_euler/problem_032/sol32.py @@ -3,7 +3,7 @@ We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once; for example, the 5-digit number, 15234, is 1 through 5 pandigital. -The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing +The product 7254 is unusual, as the identity, 39 x 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital. Find the sum of all products whose multiplicand/multiplier/product identity can diff --git a/project_euler/problem_038/sol1.py b/project_euler/problem_038/sol1.py index 5bef273ea..382892723 100644 --- a/project_euler/problem_038/sol1.py +++ b/project_euler/problem_038/sol1.py @@ -3,9 +3,9 @@ Project Euler Problem 38: https://projecteuler.net/problem=38 Take the number 192 and multiply it by each of 1, 2, and 3: -192 × 1 = 192 -192 × 2 = 384 -192 × 3 = 576 +192 x 1 = 192 +192 x 2 = 384 +192 x 3 = 576 By concatenating each product we get the 1 to 9 pandigital, 192384576. We will call 192384576 the concatenated product of 192 and (1,2,3) diff --git a/project_euler/problem_040/sol1.py b/project_euler/problem_040/sol1.py index 69be37772..721bd063c 100644 --- a/project_euler/problem_040/sol1.py +++ b/project_euler/problem_040/sol1.py @@ -11,7 +11,7 @@ It can be seen that the 12th digit of the fractional part is 1. If dn represents the nth digit of the fractional part, find the value of the following expression. -d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000 +d1 x d10 x d100 x d1000 x d10000 x d100000 x d1000000 """ diff --git a/project_euler/problem_044/sol1.py b/project_euler/problem_044/sol1.py index 3b75b6a56..2613563a4 100644 --- a/project_euler/problem_044/sol1.py +++ b/project_euler/problem_044/sol1.py @@ -1,14 +1,14 @@ """ Problem 44: https://projecteuler.net/problem=44 -Pentagonal numbers are generated by the formula, Pn=n(3n−1)/2. The first ten +Pentagonal numbers are generated by the formula, Pn=n(3n-1)/2. The first ten pentagonal numbers are: 1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ... It can be seen that P4 + P7 = 22 + 70 = 92 = P8. However, their difference, -70 − 22 = 48, is not pentagonal. +70 - 22 = 48, is not pentagonal. Find the pair of pentagonal numbers, Pj and Pk, for which their sum and difference -are pentagonal and D = |Pk − Pj| is minimised; what is the value of D? +are pentagonal and D = |Pk - Pj| is minimised; what is the value of D? """ diff --git a/project_euler/problem_045/sol1.py b/project_euler/problem_045/sol1.py index d921b2802..8d016de6e 100644 --- a/project_euler/problem_045/sol1.py +++ b/project_euler/problem_045/sol1.py @@ -3,8 +3,8 @@ Problem 45: https://projecteuler.net/problem=45 Triangle, pentagonal, and hexagonal numbers are generated by the following formulae: Triangle T(n) = (n * (n + 1)) / 2 1, 3, 6, 10, 15, ... -Pentagonal P(n) = (n * (3 * n − 1)) / 2 1, 5, 12, 22, 35, ... -Hexagonal H(n) = n * (2 * n − 1) 1, 6, 15, 28, 45, ... +Pentagonal P(n) = (n * (3 * n - 1)) / 2 1, 5, 12, 22, 35, ... +Hexagonal H(n) = n * (2 * n - 1) 1, 6, 15, 28, 45, ... It can be verified that T(285) = P(165) = H(143) = 40755. Find the next triangle number that is also pentagonal and hexagonal. diff --git a/project_euler/problem_046/sol1.py b/project_euler/problem_046/sol1.py index 07dd9bbf8..f27f658e6 100644 --- a/project_euler/problem_046/sol1.py +++ b/project_euler/problem_046/sol1.py @@ -4,12 +4,12 @@ Problem 46: https://projecteuler.net/problem=46 It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square. -9 = 7 + 2 × 12 -15 = 7 + 2 × 22 -21 = 3 + 2 × 32 -25 = 7 + 2 × 32 -27 = 19 + 2 × 22 -33 = 31 + 2 × 12 +9 = 7 + 2 x 12 +15 = 7 + 2 x 22 +21 = 3 + 2 x 32 +25 = 7 + 2 x 32 +27 = 19 + 2 x 22 +33 = 31 + 2 x 12 It turns out that the conjecture was false. diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index 1287e0d9e..c9c44a983 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -5,14 +5,14 @@ Problem 47 The first two consecutive numbers to have two distinct prime factors are: -14 = 2 × 7 -15 = 3 × 5 +14 = 2 x 7 +15 = 3 x 5 The first three consecutive numbers to have three distinct prime factors are: -644 = 2² × 7 × 23 -645 = 3 × 5 × 43 -646 = 2 × 17 × 19. +644 = 2² x 7 x 23 +645 = 3 x 5 x 43 +646 = 2 x 17 x 19. Find the first four consecutive integers to have four distinct prime factors each. What is the first of these numbers? diff --git a/project_euler/problem_053/sol1.py b/project_euler/problem_053/sol1.py index a32b73c54..192cbf25e 100644 --- a/project_euler/problem_053/sol1.py +++ b/project_euler/problem_053/sol1.py @@ -10,7 +10,7 @@ In combinatorics, we use the notation, 5C3 = 10. In general, -nCr = n!/(r!(n−r)!),where r ≤ n, n! = n×(n−1)×...×3×2×1, and 0! = 1. +nCr = n!/(r!(n-r)!),where r ≤ n, n! = nx(n-1)x...x3x2x1, and 0! = 1. It is not until n = 23, that a value exceeds one-million: 23C10 = 1144066. How many, not necessarily distinct, values of nCr, for 1 ≤ n ≤ 100, are greater diff --git a/project_euler/problem_097/sol1.py b/project_euler/problem_097/sol1.py index 2807e893d..a349f3a1d 100644 --- a/project_euler/problem_097/sol1.py +++ b/project_euler/problem_097/sol1.py @@ -1,7 +1,7 @@ """ The first known prime found to exceed one million digits was discovered in 1999, -and is a Mersenne prime of the form 2**6972593 − 1; it contains exactly 2,098,960 -digits. Subsequently other Mersenne primes, of the form 2**p − 1, have been found +and is a Mersenne prime of the form 2**6972593 - 1; it contains exactly 2,098,960 +digits. Subsequently other Mersenne primes, of the form 2**p - 1, have been found which contain more digits. However, in 2004 there was found a massive non-Mersenne prime which contains 2,357,207 digits: (28433 * (2 ** 7830457 + 1)). diff --git a/project_euler/problem_104/sol1.py b/project_euler/problem_104/sol1.py index d84dbcfc9..a0267faa6 100644 --- a/project_euler/problem_104/sol1.py +++ b/project_euler/problem_104/sol1.py @@ -3,7 +3,7 @@ Project Euler Problem 104 : https://projecteuler.net/problem=104 The Fibonacci sequence is defined by the recurrence relation: -Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. +Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. It turns out that F541, which contains 113 digits, is the first Fibonacci number for which the last nine digits are 1-9 pandigital (contain all the digits 1 to 9, but not necessarily in order). And F2749, which contains 575 digits, is the first diff --git a/project_euler/problem_120/sol1.py b/project_euler/problem_120/sol1.py index 0e6821214..2f4039725 100644 --- a/project_euler/problem_120/sol1.py +++ b/project_euler/problem_120/sol1.py @@ -3,7 +3,7 @@ Problem 120 Square remainders: https://projecteuler.net/problem=120 Description: -Let r be the remainder when (a−1)^n + (a+1)^n is divided by a^2. +Let r be the remainder when (a-1)^n + (a+1)^n is divided by a^2. For example, if a = 7 and n = 3, then r = 42: 6^3 + 8^3 = 728 ≡ 42 mod 49. And as n varies, so too will r, but for a = 7 it turns out that r_max = 42. For 3 ≤ a ≤ 1000, find ∑ r_max. diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index 7239e13a5..3dd31a2e8 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -4,7 +4,7 @@ Problem 123: https://projecteuler.net/problem=123 Name: Prime square remainders Let pn be the nth prime: 2, 3, 5, 7, 11, ..., and -let r be the remainder when (pn−1)^n + (pn+1)^n is divided by pn^2. +let r be the remainder when (pn-1)^n + (pn+1)^n is divided by pn^2. For example, when n = 3, p3 = 5, and 43 + 63 = 280 ≡ 5 mod 25. The least value of n for which the remainder first exceeds 10^9 is 7037. diff --git a/project_euler/problem_135/sol1.py b/project_euler/problem_135/sol1.py index ac91fa4e2..d57ace489 100644 --- a/project_euler/problem_135/sol1.py +++ b/project_euler/problem_135/sol1.py @@ -3,9 +3,9 @@ Project Euler Problem 135: https://projecteuler.net/problem=135 Given the positive integers, x, y, and z, are consecutive terms of an arithmetic progression, the least value of the positive integer, n, for which the equation, -x2 − y2 − z2 = n, has exactly two solutions is n = 27: +x2 - y2 - z2 = n, has exactly two solutions is n = 27: -342 − 272 − 202 = 122 − 92 − 62 = 27 +342 - 272 - 202 = 122 - 92 - 62 = 27 It turns out that n = 1155 is the least value which has exactly ten solutions. diff --git a/project_euler/problem_144/sol1.py b/project_euler/problem_144/sol1.py index bc16bf985..9070455de 100644 --- a/project_euler/problem_144/sol1.py +++ b/project_euler/problem_144/sol1.py @@ -6,7 +6,7 @@ works its way back out. The specific white cell we will be considering is an ellipse with the equation 4x^2 + y^2 = 100 -The section corresponding to −0.01 ≤ x ≤ +0.01 at the top is missing, allowing the +The section corresponding to -0.01 ≤ x ≤ +0.01 at the top is missing, allowing the light to enter and exit through the hole.  The light beam in this problem starts at the point (0.0,10.1) just outside the white @@ -20,7 +20,7 @@ In the figure on the left, the red line shows the first two points of contact be the laser beam and the wall of the white cell; the blue line shows the line tangent to the ellipse at the point of incidence of the first bounce. -The slope m of the tangent line at any point (x,y) of the given ellipse is: m = −4x/y +The slope m of the tangent line at any point (x,y) of the given ellipse is: m = -4x/y The normal line is perpendicular to this tangent line at the point of incidence. diff --git a/project_euler/problem_174/sol1.py b/project_euler/problem_174/sol1.py index 33c1b158a..9a75e8638 100644 --- a/project_euler/problem_174/sol1.py +++ b/project_euler/problem_174/sol1.py @@ -14,7 +14,7 @@ t = 32 is type L(2). Let N(n) be the number of t ≤ 1000000 such that t is type L(n); for example, N(15) = 832. -What is ∑ N(n) for 1 ≤ n ≤ 10? +What is sum N(n) for 1 ≤ n ≤ 10? """ from collections import defaultdict diff --git a/pyproject.toml b/pyproject.toml index 0185f4d7b..ff22fba81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,7 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts + "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX diff --git a/strings/jaro_winkler.py b/strings/jaro_winkler.py index c18f0d85d..cae2068fa 100644 --- a/strings/jaro_winkler.py +++ b/strings/jaro_winkler.py @@ -3,7 +3,7 @@ def jaro_winkler(str1: str, str2: str) -> float: """ - Jaro–Winkler distance is a string metric measuring an edit distance between two + Jaro-Winkler distance is a string metric measuring an edit distance between two sequences. Output value is between 0.0 and 1.0. diff --git a/strings/manacher.py b/strings/manacher.py index fc8b01cd9..af1b10cf8 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -5,7 +5,7 @@ def palindromic_string(input_string: str) -> str: >>> palindromic_string('ababa') 'ababa' - Manacher’s algorithm which finds Longest palindromic Substring in linear time. + Manacher's algorithm which finds Longest palindromic Substring in linear time. 1. first this convert input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. diff --git a/strings/prefix_function.py b/strings/prefix_function.py index 65bbe9100..04987deef 100644 --- a/strings/prefix_function.py +++ b/strings/prefix_function.py @@ -1,7 +1,7 @@ """ https://cp-algorithms.com/string/prefix-function.html -Prefix function Knuth–Morris–Pratt algorithm +Prefix function Knuth-Morris-Pratt algorithm Different algorithm than Knuth-Morris-Pratt pattern finding From d016fda51c08a604738e556a7ccb19e0f9c81dcb Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:56:14 +0300 Subject: [PATCH 025/104] Enable ruff RUF003 rule (#11376) * Enable ruff RUF003 rule * Update pyproject.toml --------- Co-authored-by: Christian Clauss --- dynamic_programming/fast_fibonacci.py | 2 +- graphs/ant_colony_optimization_algorithms.py | 4 ++-- machine_learning/polynomial_regression.py | 2 +- pyproject.toml | 3 --- strings/credit_card_validator.py | 2 +- 5 files changed, 5 insertions(+), 8 deletions(-) diff --git a/dynamic_programming/fast_fibonacci.py b/dynamic_programming/fast_fibonacci.py index 9f956ca2f..d04a5ac82 100644 --- a/dynamic_programming/fast_fibonacci.py +++ b/dynamic_programming/fast_fibonacci.py @@ -26,7 +26,7 @@ def _fib(n: int) -> tuple[int, int]: if n == 0: # (F(0), F(1)) return (0, 1) - # F(2n) = F(n)[2F(n+1) − F(n)] + # F(2n) = F(n)[2F(n+1) - F(n)] # F(2n+1) = F(n+1)^2+F(n)^2 a, b = _fib(n // 2) c = a * (b * 2 - a) diff --git a/graphs/ant_colony_optimization_algorithms.py b/graphs/ant_colony_optimization_algorithms.py index 652ad6144..13637da44 100644 --- a/graphs/ant_colony_optimization_algorithms.py +++ b/graphs/ant_colony_optimization_algorithms.py @@ -33,7 +33,7 @@ def main( pheromone_evaporation: float, alpha: float, beta: float, - q: float, # Pheromone system parameters Q,which is a constant + q: float, # Pheromone system parameters Q, which is a constant ) -> tuple[list[int], float]: """ Ant colony algorithm main function @@ -117,7 +117,7 @@ def pheromone_update( cities: dict[int, list[int]], pheromone_evaporation: float, ants_route: list[list[int]], - q: float, # Pheromone system parameters Q,which is a constant + q: float, # Pheromone system parameters Q, which is a constant best_path: list[int], best_distance: float, ) -> tuple[list[list[float]], list[int], float]: diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py index 19f7dc994..212f40bea 100644 --- a/machine_learning/polynomial_regression.py +++ b/machine_learning/polynomial_regression.py @@ -146,7 +146,7 @@ class PolynomialRegression: "Design matrix is not full rank, can't compute coefficients" ) - # np.linalg.pinv() computes the Moore–Penrose pseudoinverse using SVD + # np.linalg.pinv() computes the Moore-Penrose pseudoinverse using SVD self.params = np.linalg.pinv(X) @ y_train def predict(self, data: np.ndarray) -> np.ndarray: diff --git a/pyproject.toml b/pyproject.toml index ff22fba81..1134b7733 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,9 +10,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF001", # String contains ambiguous {}. Did you mean {}? - "RUF002", # Docstring contains ambiguous {}. Did you mean {}? - "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME diff --git a/strings/credit_card_validator.py b/strings/credit_card_validator.py index 78bf45740..b8da1c745 100644 --- a/strings/credit_card_validator.py +++ b/strings/credit_card_validator.py @@ -36,7 +36,7 @@ def luhn_validation(credit_card_number: str) -> bool: digit = int(cc_number[i]) digit *= 2 # If doubling of a number results in a two digit number - # i.e greater than 9(e.g., 6 × 2 = 12), + # i.e greater than 9(e.g., 6 x 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: From 3925b8155bebd84eababfba0f5a12e5129cfaa44 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 24 Apr 2024 07:32:25 +0300 Subject: [PATCH 026/104] Fix ARG005 per file ignore (#11383) --- machine_learning/linear_discriminant_analysis.py | 2 +- pyproject.toml | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 606e11f36..86f28aef6 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -256,7 +256,7 @@ def valid_input( input_type: Callable[[object], num], # Usually float or int input_msg: str, err_msg: str, - condition: Callable[[num], bool] = lambda x: True, + condition: Callable[[num], bool] = lambda _: True, default: str | None = None, ) -> num: """ diff --git a/pyproject.toml b/pyproject.toml index 1134b7733..37ebeeb9c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,7 +86,6 @@ max-complexity = 17 # default: 10 "graphs/minimum_spanning_tree_prims.py" = ["SIM114"] "hashes/enigma_machine.py" = ["BLE001"] "machine_learning/decision_tree.py" = ["SIM114"] -"machine_learning/linear_discriminant_analysis.py" = ["ARG005"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] "matrix/sherman_morrison.py" = ["SIM103", "SIM114"] "other/l*u_cache.py" = ["RUF012"] From 2d6be5fbb0be2b738d2c246138db9ccda9b6a853 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 30 Apr 2024 07:40:26 +0300 Subject: [PATCH 027/104] Enable ruff UP031 rule (#11388) --- data_structures/arrays/sudoku_solver.py | 4 ++-- neural_network/input_data.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 5c1cff06f..a8157a520 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -150,7 +150,7 @@ def solve_all(grids, name="", showif=0.0): display(grid_values(grid)) if values: display(values) - print("(%.5f seconds)\n" % t) + print(f"({t:.5f} seconds)\n") return (t, solved(values)) times, results = zip(*[time_solve(grid) for grid in grids]) @@ -217,4 +217,4 @@ if __name__ == "__main__": start = time.monotonic() solve(puzzle) t = time.monotonic() - start - print("Solved: %.5f sec" % t) + print(f"Solved: {t:.5f} sec") diff --git a/neural_network/input_data.py b/neural_network/input_data.py index d189e3f9e..f90287fe3 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -156,7 +156,8 @@ class _DataSet: self._rng = np.random.default_rng(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): - raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) + msg = f"Invalid image dtype {dtype!r}, expected uint8 or float32" + raise TypeError(msg) if fake_data: self._num_examples = 10000 self.one_hot = one_hot From a7e0b141d8eac30e8f9c4f01c3050e6cdb90f7d4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 06:58:03 +0200 Subject: [PATCH 028/104] [pre-commit.ci] pre-commit autoupdate (#11387) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/MarcoGorelli/auto-walrus: 0.3.3 → 0.3.4](https://github.com/MarcoGorelli/auto-walrus/compare/0.3.3...0.3.4) - [github.com/astral-sh/ruff-pre-commit: v0.4.1 → v0.4.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.1...v0.4.2) - [github.com/pre-commit/mirrors-mypy: v1.9.0 → v1.10.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.9.0...v1.10.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index eedf6d939..744efc55f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus - rev: 0.3.3 + rev: 0.3.4 hooks: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.1 + rev: v0.4.2 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.9.0 + rev: v1.10.0 hooks: - id: mypy args: From c026b1952f92836c58e63017f4c75e76c43448a1 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 1 May 2024 13:42:54 +0300 Subject: [PATCH 029/104] adding a matrix equalization algorithm (#11360) * adding a matrix equalization algorithm * Adding url for more details * Implementing suggestions --- matrix/matrix_equalization.py | 55 +++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 matrix/matrix_equalization.py diff --git a/matrix/matrix_equalization.py b/matrix/matrix_equalization.py new file mode 100644 index 000000000..e7e76505c --- /dev/null +++ b/matrix/matrix_equalization.py @@ -0,0 +1,55 @@ +from sys import maxsize + + +def array_equalization(vector: list[int], step_size: int) -> int: + """ + This algorithm equalizes all elements of the input vector + to a common value, by making the minimal number of + "updates" under the constraint of a step size (step_size). + + >>> array_equalization([1, 1, 6, 2, 4, 6, 5, 1, 7, 2, 2, 1, 7, 2, 2], 4) + 4 + >>> array_equalization([22, 81, 88, 71, 22, 81, 632, 81, 81, 22, 92], 2) + 5 + >>> array_equalization([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 5) + 0 + >>> array_equalization([22, 22, 22, 33, 33, 33], 2) + 2 + >>> array_equalization([1, 2, 3], 0) + Traceback (most recent call last): + ValueError: Step size must be positive and non-zero. + >>> array_equalization([1, 2, 3], -1) + Traceback (most recent call last): + ValueError: Step size must be positive and non-zero. + >>> array_equalization([1, 2, 3], 0.5) + Traceback (most recent call last): + ValueError: Step size must be an integer. + >>> array_equalization([1, 2, 3], maxsize) + 1 + """ + if step_size <= 0: + raise ValueError("Step size must be positive and non-zero.") + if not isinstance(step_size, int): + raise ValueError("Step size must be an integer.") + + unique_elements = set(vector) + min_updates = maxsize + + for element in unique_elements: + elem_index = 0 + updates = 0 + while elem_index < len(vector): + if vector[elem_index] != element: + updates += 1 + elem_index += step_size + else: + elem_index += 1 + min_updates = min(min_updates, updates) + + return min_updates + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 5131e3145dcec9e232c8e8a807ad387f4f9a3d38 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 1 May 2024 22:27:59 +0300 Subject: [PATCH 030/104] Fix some ARG002 per file ignores (#11382) * Fix some ARG002 per file ignores * Fix * updating DIRECTORY.md * Fix review issue * Fix review issue --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + audio_filters/show_response.py | 3 ++- data_structures/hashing/hash_table.py | 3 +++ data_structures/hashing/quadratic_probing.py | 2 +- pyproject.toml | 3 --- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index f6d6cb463..4a053a3f1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -773,6 +773,7 @@ * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) * [Matrix Class](matrix/matrix_class.py) + * [Matrix Equalization](matrix/matrix_equalization.py) * [Matrix Multiplication Recursion](matrix/matrix_multiplication_recursion.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) diff --git a/audio_filters/show_response.py b/audio_filters/show_response.py index 097b8152b..f9c9537c0 100644 --- a/audio_filters/show_response.py +++ b/audio_filters/show_response.py @@ -1,5 +1,6 @@ from __future__ import annotations +from abc import abstractmethod from math import pi from typing import Protocol @@ -8,6 +9,7 @@ import numpy as np class FilterType(Protocol): + @abstractmethod def process(self, sample: float) -> float: """ Calculate y[n] @@ -15,7 +17,6 @@ class FilterType(Protocol): >>> issubclass(FilterType, Protocol) True """ - return 0.0 def get_bounds( diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 7fe57068f..40fcad9a3 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 +from abc import abstractmethod + from .number_theory.prime_numbers import next_prime @@ -173,6 +175,7 @@ class HashTable: self.values[key] = data self._keys[key] = data + @abstractmethod def _collision_resolution(self, key, data=None): """ This method is a type of open addressing which is used for handling collision. diff --git a/data_structures/hashing/quadratic_probing.py b/data_structures/hashing/quadratic_probing.py index 2f3401ec8..56d4926ee 100644 --- a/data_structures/hashing/quadratic_probing.py +++ b/data_structures/hashing/quadratic_probing.py @@ -11,7 +11,7 @@ class QuadraticProbing(HashTable): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - def _collision_resolution(self, key, data=None): + def _collision_resolution(self, key, data=None): # noqa: ARG002 """ Quadratic probing is an open addressing scheme used for resolving collisions in hash table. diff --git a/pyproject.toml b/pyproject.toml index 37ebeeb9c..4c512ca89 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,11 +76,8 @@ max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] "arithmetic_analysis/newton_raphson.py" = ["PGH001"] -"audio_filters/show_response.py" = ["ARG002"] "data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] "data_structures/binary_tree/treap.py" = ["SIM114"] -"data_structures/hashing/hash_table.py" = ["ARG002"] -"data_structures/hashing/quadratic_probing.py" = ["ARG002"] "data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] "data_structures/heap/max_heap.py" = ["SIM114"] "graphs/minimum_spanning_tree_prims.py" = ["SIM114"] From ea53051576a9c5e7398ca2ae6a0823ca54ac3947 Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Fri, 3 May 2024 00:43:59 +0800 Subject: [PATCH 031/104] Use `spawn` start method in multiprocessing programs (#11391) * Use `spawn` start method in multiprocessing programs * Set `spawn` start method in doctest * Use `with` statement for locks * Pass multiprocessing context explicitly --- sorts/odd_even_transposition_parallel.py | 79 ++++++++++++++++-------- 1 file changed, 53 insertions(+), 26 deletions(-) diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index 9d2bcdbd7..5d4e09b21 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -11,11 +11,11 @@ They are synchronized with locks and message passing but other forms of synchronization could be used. """ -from multiprocessing import Lock, Pipe, Process +import multiprocessing as mp # lock used to ensure that two processes do not access a pipe at the same time # NOTE This breaks testing on build runner. May work better locally -# process_lock = Lock() +# process_lock = mp.Lock() """ The function run by the processes that sorts the list @@ -29,8 +29,17 @@ resultPipe = the pipe used to send results back to main """ -def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): - process_lock = Lock() +def oe_process( + position, + value, + l_send, + r_send, + lr_cv, + rr_cv, + result_pipe, + multiprocessing_context, +): + process_lock = multiprocessing_context.Lock() # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to @@ -38,27 +47,23 @@ def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): for i in range(10): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor - process_lock.acquire() - r_send[1].send(value) - process_lock.release() + with process_lock: + r_send[1].send(value) # receive your right neighbor's value - process_lock.acquire() - temp = rr_cv[0].recv() - process_lock.release() + with process_lock: + temp = rr_cv[0].recv() # take the lower value since you are on the left value = min(value, temp) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor - process_lock.acquire() - l_send[1].send(value) - process_lock.release() + with process_lock: + l_send[1].send(value) # receive your left neighbor's value - process_lock.acquire() - temp = lr_cv[0].recv() - process_lock.release() + with process_lock: + temp = lr_cv[0].recv() # take the higher value since you are on the right value = max(value, temp) @@ -94,39 +99,60 @@ def odd_even_transposition(arr): >>> odd_even_transposition(unsorted_list) == sorted(unsorted_list + [1]) False """ + # spawn method is considered safer than fork + multiprocessing_context = mp.get_context("spawn") + process_array_ = [] result_pipe = [] # initialize the list of pipes where the values will be retrieved for _ in arr: - result_pipe.append(Pipe()) + result_pipe.append(multiprocessing_context.Pipe()) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop - temp_rs = Pipe() - temp_rr = Pipe() + temp_rs = multiprocessing_context.Pipe() + temp_rr = multiprocessing_context.Pipe() process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, - args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), + args=( + 0, + arr[0], + None, + temp_rs, + None, + temp_rr, + result_pipe[0], + multiprocessing_context, + ), ) ) temp_lr = temp_rs temp_ls = temp_rr for i in range(1, len(arr) - 1): - temp_rs = Pipe() - temp_rr = Pipe() + temp_rs = multiprocessing_context.Pipe() + temp_rr = multiprocessing_context.Pipe() process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, - args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), + args=( + i, + arr[i], + temp_ls, + temp_rs, + temp_lr, + temp_rr, + result_pipe[i], + multiprocessing_context, + ), ) ) temp_lr = temp_rs temp_ls = temp_rr process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, args=( len(arr) - 1, @@ -136,6 +162,7 @@ def odd_even_transposition(arr): temp_lr, None, result_pipe[len(arr) - 1], + multiprocessing_context, ), ) ) From 1868c0b6375188a9034478a2711e40c343d00c2e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 May 2024 21:38:58 +0200 Subject: [PATCH 032/104] [pre-commit.ci] pre-commit autoupdate (#11394) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.2 → v0.4.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.2...v0.4.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 744efc55f..210b74940 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.2 + rev: v0.4.3 hooks: - id: ruff - id: ruff-format From c599f6c9107a1b09c08ddce17053d7b5d0895a83 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 10 May 2024 22:59:53 +0300 Subject: [PATCH 033/104] Fix some SIM114 per file ignores (#11395) * updating DIRECTORY.md * Fix some SIM114 per file ignores * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix review issue --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/binary_tree/treap.py | 4 +--- data_structures/heap/max_heap.py | 2 +- graphs/minimum_spanning_tree_prims.py | 2 +- machine_learning/decision_tree.py | 2 +- matrix/sherman_morrison.py | 2 +- pyproject.toml | 6 +----- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index e7ddf931b..3114c6fa1 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -39,9 +39,7 @@ def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]: Left tree contains all values less than split value. Right tree contains all values greater or equal, than split value """ - if root is None: # None tree is split into 2 Nones - return None, None - elif root.value is None: + if root is None or root.value is None: # None tree is split into 2 Nones return None, None elif value < root.value: """ diff --git a/data_structures/heap/max_heap.py b/data_structures/heap/max_heap.py index 5a9f9cf88..589f2595a 100644 --- a/data_structures/heap/max_heap.py +++ b/data_structures/heap/max_heap.py @@ -38,7 +38,7 @@ class BinaryHeap: def __swap_down(self, i: int) -> None: """Swap the element down""" while self.__size >= 2 * i: - if 2 * i + 1 > self.__size: + if 2 * i + 1 > self.__size: # noqa: SIM114 bigger_child = 2 * i elif self.__heap[2 * i] > self.__heap[2 * i + 1]: bigger_child = 2 * i diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 90c9f4c91..d0b45d7ef 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -16,7 +16,7 @@ class Heap: if start > size // 2 - 1: return else: - if 2 * start + 2 >= size: + if 2 * start + 2 >= size: # noqa: SIM114 smallest_child = 2 * start + 1 elif heap[2 * start + 1] < heap[2 * start + 2]: smallest_child = 2 * start + 1 diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index e48905eea..d0bd6ab0b 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -105,7 +105,7 @@ class DecisionTree: the predictor """ for i in range(len(x)): - if len(x[:i]) < self.min_leaf_size: + if len(x[:i]) < self.min_leaf_size: # noqa: SIM114 continue elif len(x[i:]) < self.min_leaf_size: continue diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 7f10ae706..e2a09c1d0 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -65,7 +65,7 @@ class Matrix: >>> a.validate_indices((0, 0)) True """ - if not (isinstance(loc, (list, tuple)) and len(loc) == 2): + if not (isinstance(loc, (list, tuple)) and len(loc) == 2): # noqa: SIM114 return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False diff --git a/pyproject.toml b/pyproject.toml index 4c512ca89..c07bc9c48 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,14 +77,10 @@ max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] "arithmetic_analysis/newton_raphson.py" = ["PGH001"] "data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] -"data_structures/binary_tree/treap.py" = ["SIM114"] "data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] -"data_structures/heap/max_heap.py" = ["SIM114"] -"graphs/minimum_spanning_tree_prims.py" = ["SIM114"] "hashes/enigma_machine.py" = ["BLE001"] -"machine_learning/decision_tree.py" = ["SIM114"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] -"matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"matrix/sherman_morrison.py" = ["SIM103"] "other/l*u_cache.py" = ["RUF012"] "physics/newtons_second_law_of_motion.py" = ["BLE001"] "project_euler/problem_099/sol1.py" = ["SIM115"] From 1f368da06d361e3d1415a2ec7d8857068b746586 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 13:38:55 +0200 Subject: [PATCH 034/104] [pre-commit.ci] pre-commit autoupdate (#11402) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.3 → v0.4.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.3...v0.4.4) - [github.com/tox-dev/pyproject-fmt: 1.8.0 → 2.0.4](https://github.com/tox-dev/pyproject-fmt/compare/1.8.0...2.0.4) - [github.com/abravalheri/validate-pyproject: v0.16 → v0.17](https://github.com/abravalheri/validate-pyproject/compare/v0.16...v0.17) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +- pyproject.toml | 182 +++++++++++++++++++++++----------------- 2 files changed, 106 insertions(+), 82 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 210b74940..521769096 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.3 + rev: v0.4.4 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.8.0" + rev: "2.0.4" hooks: - id: pyproject-fmt @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.16 + rev: v0.17 hooks: - id: validate-pyproject diff --git a/pyproject.toml b/pyproject.toml index c07bc9c48..89ed22bc6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,61 +1,61 @@ [tool.ruff] -lint.ignore = [ # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +lint.ignore = [ # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -lint.select = [ # https://beta.ruff.rs/docs/rules - "A", # flake8-builtins - "ARG", # flake8-unused-arguments - "ASYNC", # flake8-async - "B", # flake8-bugbear - "BLE", # flake8-blind-except - "C4", # flake8-comprehensions - "C90", # McCabe cyclomatic complexity - "DJ", # flake8-django - "DTZ", # flake8-datetimez - "E", # pycodestyle - "EM", # flake8-errmsg - "EXE", # flake8-executable - "F", # Pyflakes - "FA", # flake8-future-annotations - "FLY", # flynt - "G", # flake8-logging-format - "I", # isort - "ICN", # flake8-import-conventions - "INP", # flake8-no-pep420 - "INT", # flake8-gettext - "ISC", # flake8-implicit-str-concat - "N", # pep8-naming - "NPY", # NumPy-specific rules - "PD", # pandas-vet - "PGH", # pygrep-hooks - "PIE", # flake8-pie - "PL", # Pylint - "PT", # flake8-pytest-style - "PYI", # flake8-pyi - "RSE", # flake8-raise - "RUF", # Ruff-specific rules - "S", # flake8-bandit - "SIM", # flake8-simplify - "SLF", # flake8-self - "T10", # flake8-debugger - "TD", # flake8-todos - "TID", # flake8-tidy-imports - "UP", # pyupgrade - "W", # pycodestyle - "YTT", # flake8-2020 +lint.select = [ # https://beta.ruff.rs/docs/rules + "A", # flake8-builtins + "ARG", # flake8-unused-arguments + "ASYNC", # flake8-async + "B", # flake8-bugbear + "BLE", # flake8-blind-except + "C4", # flake8-comprehensions + "C90", # McCabe cyclomatic complexity + "DJ", # flake8-django + "DTZ", # flake8-datetimez + "E", # pycodestyle + "EM", # flake8-errmsg + "EXE", # flake8-executable + "F", # Pyflakes + "FA", # flake8-future-annotations + "FLY", # flynt + "G", # flake8-logging-format + "I", # isort + "ICN", # flake8-import-conventions + "INP", # flake8-no-pep420 + "INT", # flake8-gettext + "ISC", # flake8-implicit-str-concat + "N", # pep8-naming + "NPY", # NumPy-specific rules + "PD", # pandas-vet + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # Pylint + "PT", # flake8-pytest-style + "PYI", # flake8-pyi + "RSE", # flake8-raise + "RUF", # Ruff-specific rules + "S", # flake8-bandit + "SIM", # flake8-simplify + "SLF", # flake8-self + "T10", # flake8-debugger + "TD", # flake8-todos + "TID", # flake8-tidy-imports + "UP", # pyupgrade + "W", # pycodestyle + "YTT", # flake8-2020 # "ANN", # flake8-annotations # FIX ME? # "COM", # flake8-commas # "D", # pydocstyle -- FIX ME? @@ -71,27 +71,51 @@ lint.select = [ # https://beta.ruff.rs/docs/rules output-format = "full" target-version = "py312" -[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 17 # default: 10 +[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE +max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] -"arithmetic_analysis/newton_raphson.py" = ["PGH001"] -"data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] -"data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] -"hashes/enigma_machine.py" = ["BLE001"] -"machine_learning/sequential_minimum_optimization.py" = ["SIM115"] -"matrix/sherman_morrison.py" = ["SIM103"] -"other/l*u_cache.py" = ["RUF012"] -"physics/newtons_second_law_of_motion.py" = ["BLE001"] -"project_euler/problem_099/sol1.py" = ["SIM115"] -"sorts/external_sort.py" = ["SIM115"] +"arithmetic_analysis/newton_raphson.py" = [ + "PGH001", +] +"data_structures/binary_tree/binary_search_tree_recursive.py" = [ + "BLE001", +] +"data_structures/hashing/tests/test_hash_map.py" = [ + "BLE001", +] +"hashes/enigma_machine.py" = [ + "BLE001", +] +"machine_learning/sequential_minimum_optimization.py" = [ + "SIM115", +] +"matrix/sherman_morrison.py" = [ + "SIM103", +] +"other/l*u_cache.py" = [ + "RUF012", +] +"physics/newtons_second_law_of_motion.py" = [ + "BLE001", +] +"project_euler/problem_099/sol1.py" = [ + "SIM115", +] +"sorts/external_sort.py" = [ + "SIM115", +] -[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES -allow-magic-value-types = ["float", "int", "str"] -max-args = 10 # default: 5 -max-branches = 20 # default: 12 -max-returns = 8 # default: 6 -max-statements = 88 # default: 50 +[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES +allow-magic-value-types = [ + "float", + "int", + "str", +] +max-args = 10 # default: 5 +max-branches = 20 # default: 12 +max-returns = 8 # default: 6 +max-statements = 88 # default: 50 [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" @@ -99,17 +123,17 @@ skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_n [tool.pytest.ini_options] markers = [ - "mat_ops: mark a test as utilizing matrix operations.", + "mat_ops: mark a test as utilizing matrix operations.", ] addopts = [ - "--durations=10", - "--doctest-modules", - "--showlocals", + "--durations=10", + "--doctest-modules", + "--showlocals", ] [tool.coverage.report] omit = [ ".env/*", - "project_euler/*" + "project_euler/*", ] sort = "Cover" From 0139143abb286027bd3954f3862aab4558642019 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 22:44:57 +0200 Subject: [PATCH 035/104] [pre-commit.ci] pre-commit autoupdate (#11408) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 2.0.4 → 2.1.1](https://github.com/tox-dev/pyproject-fmt/compare/2.0.4...2.1.1) - [github.com/abravalheri/validate-pyproject: v0.17 → v0.18](https://github.com/abravalheri/validate-pyproject/compare/v0.17...v0.18) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +-- pyproject.toml | 79 ++++++++++++++++++++--------------------- 2 files changed, 40 insertions(+), 43 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 521769096..b63457ca8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.0.4" + rev: "2.1.1" hooks: - id: pyproject-fmt @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.17 + rev: v0.18 hooks: - id: validate-pyproject diff --git a/pyproject.toml b/pyproject.toml index 89ed22bc6..5b8ce4e72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,9 @@ [tool.ruff] -lint.ignore = [ # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX -] -lint.select = [ # https://beta.ruff.rs/docs/rules +target-version = "py312" + +output-format = "full" +lint.select = [ + # https://beta.ruff.rs/docs/rules "A", # flake8-builtins "ARG", # flake8-unused-arguments "ASYNC", # flake8-async @@ -68,54 +56,63 @@ lint.select = [ # https://beta.ruff.rs/docs/rules # "TCH", # flake8-type-checking # "TRY", # tryceratops ] -output-format = "full" -target-version = "py312" - -[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 17 # default: 10 - -[tool.ruff.lint.per-file-ignores] -"arithmetic_analysis/newton_raphson.py" = [ +lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ "PGH001", ] -"data_structures/binary_tree/binary_search_tree_recursive.py" = [ +lint.per-file-ignores."data_structures/binary_tree/binary_search_tree_recursive.py" = [ "BLE001", ] -"data_structures/hashing/tests/test_hash_map.py" = [ +lint.per-file-ignores."data_structures/hashing/tests/test_hash_map.py" = [ "BLE001", ] -"hashes/enigma_machine.py" = [ +lint.per-file-ignores."hashes/enigma_machine.py" = [ "BLE001", ] -"machine_learning/sequential_minimum_optimization.py" = [ +lint.per-file-ignores."machine_learning/sequential_minimum_optimization.py" = [ "SIM115", ] -"matrix/sherman_morrison.py" = [ +lint.per-file-ignores."matrix/sherman_morrison.py" = [ "SIM103", ] -"other/l*u_cache.py" = [ +lint.per-file-ignores."other/l*u_cache.py" = [ "RUF012", ] -"physics/newtons_second_law_of_motion.py" = [ +lint.per-file-ignores."physics/newtons_second_law_of_motion.py" = [ "BLE001", ] -"project_euler/problem_099/sol1.py" = [ +lint.per-file-ignores."project_euler/problem_099/sol1.py" = [ "SIM115", ] -"sorts/external_sort.py" = [ +lint.per-file-ignores."sorts/external_sort.py" = [ "SIM115", ] - -[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES -allow-magic-value-types = [ +lint.mccabe.max-complexity = 17 # default: 10 +lint.pylint.allow-magic-value-types = [ "float", "int", "str", ] -max-args = 10 # default: 5 -max-branches = 20 # default: 12 -max-returns = 8 # default: 6 -max-statements = 88 # default: 50 +lint.pylint.max-args = 10 # default: 5 +lint.pylint.max-branches = 20 # default: 12 +lint.pylint.max-returns = 8 # default: 6 +lint.pylint.max-statements = 88 # default: 50 +lint.ignore = [ + # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" From 82aa909db7736d8022532bee4dc381072d8c5b1f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 May 2024 21:56:48 -0400 Subject: [PATCH 036/104] [pre-commit.ci] pre-commit autoupdate (#11417) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.4 → v0.4.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.4...v0.4.5) - [github.com/codespell-project/codespell: v2.2.6 → v2.3.0](https://github.com/codespell-project/codespell/compare/v2.2.6...v2.3.0) - [github.com/tox-dev/pyproject-fmt: 2.1.1 → 2.1.3](https://github.com/tox-dev/pyproject-fmt/compare/2.1.1...2.1.3) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * iterable * at most --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 6 +++--- graphs/dijkstra_algorithm.py | 2 +- project_euler/problem_047/sol1.py | 2 +- pyproject.toml | 35 ++++++++++++++++--------------- 4 files changed, 23 insertions(+), 22 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b63457ca8..43bf547de 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,20 +16,20 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.4 + rev: v0.4.5 hooks: - id: ruff - id: ruff-format - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.3.0 hooks: - id: codespell additional_dependencies: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.1" + rev: "2.1.3" hooks: - id: pyproject-fmt diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 2efa2cb63..51412b790 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -215,7 +215,7 @@ class PriorityQueue: [(5, 'A'), (15, 'B')] """ idx = self.pos[tup[1]] - # assuming the new_d is atmost old_d + # assuming the new_d is at most old_d self.array[idx] = (new_d, tup[1]) while idx > 0 and self.array[self.par(idx)][0] > self.array[idx][0]: self.swap(idx, self.par(idx)) diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index c9c44a983..4ecd4f4b4 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -58,7 +58,7 @@ def upf_len(num: int) -> int: def equality(iterable: list) -> bool: """ - Check equality of ALL elements in an interable. + Check equality of ALL elements in an iterable >>> equality([1, 2, 3, 4]) False >>> equality([2, 2, 2, 2]) diff --git a/pyproject.toml b/pyproject.toml index 5b8ce4e72..429f4fab9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,24 @@ lint.select = [ # "TCH", # flake8-type-checking # "TRY", # tryceratops ] +lint.ignore = [ + # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] + lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ "PGH001", ] @@ -96,23 +114,6 @@ lint.pylint.max-args = 10 # default: 5 lint.pylint.max-branches = 20 # default: 12 lint.pylint.max-returns = 8 # default: 6 lint.pylint.max-statements = 88 # default: 50 -lint.ignore = [ - # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX -] [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" From b8afb214f8c8d185dc42dafb9676becf512ca7fa Mon Sep 17 00:00:00 2001 From: Marco-campione-github <80974790+Marco-campione-github@users.noreply.github.com> Date: Fri, 31 May 2024 10:11:09 +0200 Subject: [PATCH 037/104] Changed the N to self.N in show_data in segment_tree.py (#11276) --- data_structures/binary_tree/segment_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index c7069b3f6..084fcf849 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -98,7 +98,7 @@ class SegmentTree: def show_data(self): show_list = [] - for i in range(1, N + 1): + for i in range(1, self.N + 1): show_list += [self.query(i, i)] print(show_list) From 70bd06db4642a2323ff397b041d40bc95ed6a5bf Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Sat, 1 Jun 2024 05:09:03 -0400 Subject: [PATCH 038/104] add doctest/document to actual_power and document to power (#11187) * Update power.py * Update divide_and_conquer/power.py --------- Co-authored-by: Tianyi Zheng --- divide_and_conquer/power.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/divide_and_conquer/power.py b/divide_and_conquer/power.py index f2e023afd..faf6a3476 100644 --- a/divide_and_conquer/power.py +++ b/divide_and_conquer/power.py @@ -2,6 +2,20 @@ def actual_power(a: int, b: int): """ Function using divide and conquer to calculate a^b. It only works for integer a,b. + + :param a: The base of the power operation, an integer. + :param b: The exponent of the power operation, a non-negative integer. + :return: The result of a^b. + + Examples: + >>> actual_power(3, 2) + 9 + >>> actual_power(5, 3) + 125 + >>> actual_power(2, 5) + 32 + >>> actual_power(7, 0) + 1 """ if b == 0: return 1 @@ -13,6 +27,10 @@ def actual_power(a: int, b: int): def power(a: int, b: int) -> float: """ + :param a: The base (integer). + :param b: The exponent (integer). + :return: The result of a^b, as a float for negative exponents. + >>> power(4,6) 4096 >>> power(2,3) From 723cf9c42839c47e9e6fb83362a7391177355505 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Jun 2024 02:17:07 -0700 Subject: [PATCH 039/104] Remove duplicate implementation of median of two arrays algorithm (#11420) * Remove duplicate implementation of median of two arrays algorithm Remove maths/median_of_two_arrays.py because the repo has two implementations of this algorithm, with data_structures/arrays/median_two_array.py being the other. Even though maths/median_of_two_arrays.py is the older implementation, the newer implementation is better documented, has better error handling, and is already located in a more appropriate directory. * updating DIRECTORY.md --------- Co-authored-by: tianyizheng02 --- DIRECTORY.md | 1 - maths/median_of_two_arrays.py | 33 --------------------------------- 2 files changed, 34 deletions(-) delete mode 100644 maths/median_of_two_arrays.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 4a053a3f1..2094fc3a9 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -661,7 +661,6 @@ * [Manhattan Distance](maths/manhattan_distance.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) - * [Median Of Two Arrays](maths/median_of_two_arrays.py) * [Minkowski Distance](maths/minkowski_distance.py) * [Mobius Function](maths/mobius_function.py) * [Modular Division](maths/modular_division.py) diff --git a/maths/median_of_two_arrays.py b/maths/median_of_two_arrays.py deleted file mode 100644 index 55aa587a9..000000000 --- a/maths/median_of_two_arrays.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import annotations - - -def median_of_two_arrays(nums1: list[float], nums2: list[float]) -> float: - """ - >>> median_of_two_arrays([1, 2], [3]) - 2 - >>> median_of_two_arrays([0, -1.1], [2.5, 1]) - 0.5 - >>> median_of_two_arrays([], [2.5, 1]) - 1.75 - >>> median_of_two_arrays([], [0]) - 0 - >>> median_of_two_arrays([], []) - Traceback (most recent call last): - ... - IndexError: list index out of range - """ - all_numbers = sorted(nums1 + nums2) - div, mod = divmod(len(all_numbers), 2) - if mod == 1: - return all_numbers[div] - else: - return (all_numbers[div] + all_numbers[div - 1]) / 2 - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - array_1 = [float(x) for x in input("Enter the elements of first array: ").split()] - array_2 = [float(x) for x in input("Enter the elements of second array: ").split()] - print(f"The median of two arrays is: {median_of_two_arrays(array_1, array_2)}") From edee8e644b09a21a1f70d3a59d57feed51c74004 Mon Sep 17 00:00:00 2001 From: Vishal Kumar Gupta Date: Sun, 2 Jun 2024 02:41:40 +0100 Subject: [PATCH 040/104] use format to remove '0b' (#11307) * use format to remove '0b' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: error message for float input --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- bit_manipulation/binary_and_operator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bit_manipulation/binary_and_operator.py b/bit_manipulation/binary_and_operator.py index 36f6c668d..f33b8b1c0 100644 --- a/bit_manipulation/binary_and_operator.py +++ b/bit_manipulation/binary_and_operator.py @@ -26,7 +26,7 @@ def binary_and(a: int, b: int) -> str: >>> binary_and(0, 1.1) Traceback (most recent call last): ... - TypeError: 'float' object cannot be interpreted as an integer + ValueError: Unknown format code 'b' for object of type 'float' >>> binary_and("0", "1") Traceback (most recent call last): ... @@ -35,8 +35,8 @@ def binary_and(a: int, b: int) -> str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive") - a_binary = str(bin(a))[2:] # remove the leading "0b" - b_binary = str(bin(b))[2:] # remove the leading "0b" + a_binary = format(a, "b") + b_binary = format(b, "b") max_len = max(len(a_binary), len(b_binary)) From 2f1704dae579295ea2f47584ef80b4b321a284d7 Mon Sep 17 00:00:00 2001 From: Mandeep Singh <135956602+MannCode@users.noreply.github.com> Date: Sun, 2 Jun 2024 18:27:35 -0700 Subject: [PATCH 041/104] issue #11150 Ensure explicit column selection and data type setting in data reading process. (#11302) * issue #11150 Ensure explicit column selection and data type setting in data reading process. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/sequential_minimum_optimization.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 3abdd6ccb..2ebdeb764 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -463,7 +463,11 @@ def test_cancel_data(): with open(r"cancel_data.csv", "w") as f: f.write(content) - data = pd.read_csv(r"cancel_data.csv", header=None) + data = pd.read_csv( + "cancel_data.csv", + header=None, + dtype={0: str}, # Assuming the first column contains string data + ) # 1: pre-processing data del data[data.columns.tolist()[0]] From ffaa976f6c5a5de30e284ae2fc8122f40cd3fa6a Mon Sep 17 00:00:00 2001 From: Harsh buddhdev Date: Sun, 2 Jun 2024 23:00:26 -0400 Subject: [PATCH 042/104] Fixes #9943 (#10252) * added doctest for all_permutations.py * added doctest for all_subsequences.py * added doctest for all_subsequences.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * doctest added * updated * Update backtracking/all_subsequences.py --------- Co-authored-by: Harsh Buddhdev Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/all_permutations.py | 36 ++++++++++++++++++++++ backtracking/all_subsequences.py | 52 +++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py index c483cd62c..f376e6fa0 100644 --- a/backtracking/all_permutations.py +++ b/backtracking/all_permutations.py @@ -23,6 +23,42 @@ def create_state_space_tree( Creates a state space tree to iterate through each branch using DFS. We know that each state has exactly len(sequence) - index children. It terminates when it reaches the end of the given sequence. + + :param sequence: The input sequence for which permutations are generated. + :param current_sequence: The current permutation being built. + :param index: The current index in the sequence. + :param index_used: list to track which elements are used in permutation. + + Example 1: + >>> sequence = [1, 2, 3] + >>> current_sequence = [] + >>> index_used = [False, False, False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + [1, 2, 3] + [1, 3, 2] + [2, 1, 3] + [2, 3, 1] + [3, 1, 2] + [3, 2, 1] + + Example 2: + >>> sequence = ["A", "B", "C"] + >>> current_sequence = [] + >>> index_used = [False, False, False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + ['A', 'B', 'C'] + ['A', 'C', 'B'] + ['B', 'A', 'C'] + ['B', 'C', 'A'] + ['C', 'A', 'B'] + ['C', 'B', 'A'] + + Example 3: + >>> sequence = [1] + >>> current_sequence = [] + >>> index_used = [False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + [1] """ if index == len(sequence): diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py index 7844a829d..18696054e 100644 --- a/backtracking/all_subsequences.py +++ b/backtracking/all_subsequences.py @@ -22,6 +22,56 @@ def create_state_space_tree( Creates a state space tree to iterate through each branch using DFS. We know that each state has exactly two children. It terminates when it reaches the end of the given sequence. + + :param sequence: The input sequence for which subsequences are generated. + :param current_subsequence: The current subsequence being built. + :param index: The current index in the sequence. + + Example: + >>> sequence = [3, 2, 1] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + [1] + [2] + [2, 1] + [3] + [3, 1] + [3, 2] + [3, 2, 1] + + >>> sequence = ["A", "B"] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + ['B'] + ['A'] + ['A', 'B'] + + >>> sequence = [] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + + >>> sequence = [1, 2, 3, 4] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + [4] + [3] + [3, 4] + [2] + [2, 4] + [2, 3] + [2, 3, 4] + [1] + [1, 4] + [1, 3] + [1, 3, 4] + [1, 2] + [1, 2, 4] + [1, 2, 3] + [1, 2, 3, 4] """ if index == len(sequence): @@ -35,7 +85,7 @@ def create_state_space_tree( if __name__ == "__main__": - seq: list[Any] = [3, 1, 2, 4] + seq: list[Any] = [1, 2, 3] generate_all_subsequences(seq) seq.clear() From c919579869ae9f57d6878336af6de6bc9a001c61 Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Mon, 3 Jun 2024 11:15:01 +0800 Subject: [PATCH 043/104] Add KL divergence loss algorithm (#11238) * Add KL divergence loss algorithm * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 16e5a3278..150035661 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -629,6 +629,40 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> return np.mean(loss) +def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the Kullback-Leibler divergence (KL divergence) loss between true labels + and predicted probabilities. + + KL divergence loss quantifies dissimilarity between true labels and predicted + probabilities. It's often used in training generative models. + + KL = Σ(y_true * ln(y_true / y_pred)) + + Reference: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence + + Parameters: + - y_true: True class probabilities + - y_pred: Predicted class probabilities + + >>> true_labels = np.array([0.2, 0.3, 0.5]) + >>> predicted_probs = np.array([0.3, 0.3, 0.4]) + >>> kullback_leibler_divergence(true_labels, predicted_probs) + 0.030478754035472025 + >>> true_labels = np.array([0.2, 0.3, 0.5]) + >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) + >>> kullback_leibler_divergence(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + kl_loss = y_true * np.log(y_true / y_pred) + return np.sum(kl_loss) + + if __name__ == "__main__": import doctest From 5827aac79a36f0d43e9bd9f1c9ca11da07b2d623 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 18:21:27 -0300 Subject: [PATCH 044/104] [pre-commit.ci] pre-commit autoupdate (#11430) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.5 → v0.4.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.5...v0.4.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 43bf547de..a04f4f8b2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.5 + rev: v0.4.7 hooks: - id: ruff - id: ruff-format From 41a1cdf38d9cb1a14c9149d2d815efa2259679ef Mon Sep 17 00:00:00 2001 From: Yuri Batista Ishizawa Date: Tue, 11 Jun 2024 06:45:00 -0300 Subject: [PATCH 045/104] Add rainfall intensity calculation function (#11432) * Add rainfall intensity calculation function * chore: improve fuction and coefficient documentation * Update physics/rainfall_intensity.py --------- Co-authored-by: Tianyi Zheng --- physics/rainfall_intensity.py | 143 ++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 physics/rainfall_intensity.py diff --git a/physics/rainfall_intensity.py b/physics/rainfall_intensity.py new file mode 100644 index 000000000..cee8d50dd --- /dev/null +++ b/physics/rainfall_intensity.py @@ -0,0 +1,143 @@ +""" +Rainfall Intensity +================== +This module contains functions to calculate the intensity of +a rainfall event for a given duration and return period. + +This function uses the Sherman intensity-duration-frequency curve. + +References +---------- +- Aparicio, F. (1997): Fundamentos de Hidrología de Superficie. + Balderas, México, Limusa. 303 p. +- https://en.wikipedia.org/wiki/Intensity-duration-frequency_curve +""" + + +def rainfall_intensity( + coefficient_k: float, + coefficient_a: float, + coefficient_b: float, + coefficient_c: float, + return_period: float, + duration: float, +) -> float: + """ + Calculate the intensity of a rainfall event for a given duration and return period. + It's based on the Sherman intensity-duration-frequency curve: + + I = k * T^a / (D + b)^c + + where: + I = Intensity of the rainfall event [mm/h] + k, a, b, c = Coefficients obtained through statistical distribution adjust + T = Return period in years + D = Rainfall event duration in minutes + + Parameters + ---------- + coefficient_k : float + Coefficient obtained through statistical distribution adjust. + coefficient_a : float + Coefficient obtained through statistical distribution adjust. + coefficient_b : float + Coefficient obtained through statistical distribution adjust. + coefficient_c : float + Coefficient obtained through statistical distribution adjust. + return_period : float + Return period in years. + duration : float + Rainfall event duration in minutes. + + Returns + ------- + intensity : float + Intensity of the rainfall event in mm/h. + + Raises + ------ + ValueError + If any of the parameters are not positive. + + Examples + -------- + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 60) + 49.83339231138578 + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 30) + 77.36319588106228 + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 5, 60) + 43.382487747633625 + + >>> rainfall_intensity(0, 0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, -0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, -11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, -0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 0, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(0, 0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 0, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 0) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + """ + if ( + coefficient_k <= 0 + or coefficient_a <= 0 + or coefficient_b <= 0 + or coefficient_c <= 0 + or return_period <= 0 + or duration <= 0 + ): + raise ValueError("All parameters must be positive.") + intensity = (coefficient_k * (return_period**coefficient_a)) / ( + (duration + coefficient_b) ** coefficient_c + ) + return intensity + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 446742387e83f94f3d54ce640cb07004180130ee Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 13 Jun 2024 14:47:29 -0700 Subject: [PATCH 046/104] Fix grammar and spelling mistakes in sequential_minimum_optimization.py (#11427) --- .../sequential_minimum_optimization.py | 135 +++++++++--------- 1 file changed, 66 insertions(+), 69 deletions(-) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 2ebdeb764..625fc28fe 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -1,11 +1,9 @@ """ - Implementation of sequential minimal optimization (SMO) for support vector machines - (SVM). +Sequential minimal optimization (SMO) for support vector machines (SVM) - Sequential minimal optimization (SMO) is an algorithm for solving the quadratic - programming (QP) problem that arises during the training of support vector - machines. - It was invented by John Platt in 1998. +Sequential minimal optimization (SMO) is an algorithm for solving the quadratic +programming (QP) problem that arises during the training of SVMs. It was invented by +John Platt in 1998. Input: 0: type: numpy.ndarray. @@ -124,8 +122,7 @@ class SmoSVM: b_old = self._b self._b = b - # 4: update error value,here we only calculate those non-bound samples' - # error + # 4: update error, here we only calculate the error for non-bound samples self._unbound = [i for i in self._all_samples if self._is_unbound(i)] for s in self.unbound: if s in (i1, i2): @@ -136,7 +133,7 @@ class SmoSVM: + (self._b - b_old) ) - # if i1 or i2 is non-bound,update there error value to zero + # if i1 or i2 is non-bound, update their error value to zero if self._is_unbound(i1): self._error[i1] = 0 if self._is_unbound(i2): @@ -161,7 +158,7 @@ class SmoSVM: results.append(result) return np.array(results) - # Check if alpha violate KKT condition + # Check if alpha violates the KKT condition def _check_obey_kkt(self, index): alphas = self.alphas tol = self._tol @@ -172,20 +169,19 @@ class SmoSVM: # Get value calculated from kernel function def _k(self, i1, i2): - # for test samples,use Kernel function + # for test samples, use kernel function if isinstance(i2, np.ndarray): return self.Kernel(self.samples[i1], i2) - # for train samples,Kernel values have been saved in matrix + # for training samples, kernel values have been saved in matrix else: return self._K_matrix[i1, i2] - # Get sample's error + # Get error for sample def _e(self, index): """ Two cases: - 1:Sample[index] is non-bound,Fetch error from list: _error - 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi - + 1: Sample[index] is non-bound, fetch error from list: _error + 2: sample[index] is bound, use predicted value minus true value: g(xi) - yi """ # get from error data if self._is_unbound(index): @@ -196,7 +192,7 @@ class SmoSVM: yi = self.tags[index] return gx - yi - # Calculate Kernel matrix of all possible i1,i2 ,saving time + # Calculate kernel matrix of all possible i1, i2, saving time def _calculate_k_matrix(self): k_matrix = np.zeros([self.length, self.length]) for i in self._all_samples: @@ -206,7 +202,7 @@ class SmoSVM: ) return k_matrix - # Predict test sample's tag + # Predict tag for test sample def _predict(self, sample): k = self._k predicted_value = ( @@ -222,30 +218,31 @@ class SmoSVM: # Choose alpha1 and alpha2 def _choose_alphas(self): - locis = yield from self._choose_a1() - if not locis: + loci = yield from self._choose_a1() + if not loci: return None - return locis + return loci def _choose_a1(self): """ - Choose first alpha ;steps: - 1:First loop over all sample - 2:Second loop over all non-bound samples till all non-bound samples does not - voilate kkt condition. - 3:Repeat this two process endlessly,till all samples does not voilate kkt - condition samples after first loop. + Choose first alpha + Steps: + 1: First loop over all samples + 2: Second loop over all non-bound samples until no non-bound samples violate + the KKT condition. + 3: Repeat these two processes until no samples violate the KKT condition + after the first loop. """ while True: all_not_obey = True # all sample - print("scanning all sample!") + print("Scanning all samples!") for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]: all_not_obey = False yield from self._choose_a2(i1) # non-bound sample - print("scanning non-bound sample!") + print("Scanning non-bound samples!") while True: not_obey = True for i1 in [ @@ -256,20 +253,21 @@ class SmoSVM: not_obey = False yield from self._choose_a2(i1) if not_obey: - print("all non-bound samples fit the KKT condition!") + print("All non-bound samples satisfy the KKT condition!") break if all_not_obey: - print("all samples fit the KKT condition! Optimization done!") + print("All samples satisfy the KKT condition!") break return False def _choose_a2(self, i1): """ - Choose the second alpha by using heuristic algorithm ;steps: - 1: Choose alpha2 which gets the maximum step size (|E1 - E2|). - 2: Start in a random point,loop over all non-bound samples till alpha1 and + Choose the second alpha using a heuristic algorithm + Steps: + 1: Choose alpha2 that maximizes the step size (|E1 - E2|). + 2: Start in a random point, loop over all non-bound samples till alpha1 and alpha2 are optimized. - 3: Start in a random point,loop over all samples till alpha1 and alpha2 are + 3: Start in a random point, loop over all samples till alpha1 and alpha2 are optimized. """ self._unbound = [i for i in self._all_samples if self._is_unbound(i)] @@ -306,7 +304,7 @@ class SmoSVM: if i1 == i2: return None, None - # calculate L and H which bound the new alpha2 + # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) # noqa: E741 @@ -320,7 +318,7 @@ class SmoSVM: k22 = k(i2, i2) k12 = k(i1, i2) - # select the new alpha2 which could get the minimal objectives + # select the new alpha2 which could achieve the minimal objectives if (eta := k11 + k22 - 2.0 * k12) > 0.0: a2_new_unc = a2 + (y2 * (e1 - e2)) / eta # a2_new has a boundary @@ -335,7 +333,7 @@ class SmoSVM: l1 = a1 + s * (a2 - l) h1 = a1 + s * (a2 - h) - # way 1 + # Method 1 f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) ol = ( @@ -353,9 +351,8 @@ class SmoSVM: + s * h * h1 * k(i1, i2) ) """ - # way 2 - Use objective function check which alpha2 new could get the minimal - objectives + Method 2: Use objective function to check which alpha2_new could achieve the + minimal objectives """ if ol < (oh - self._eps): a2_new = l @@ -375,7 +372,7 @@ class SmoSVM: return a1_new, a2_new - # Normalise data using min_max way + # Normalize data using min-max method def _norm(self, data): if self._init: self._min = np.min(data, axis=0) @@ -424,7 +421,7 @@ class Kernel: def _check(self): if self._kernel == self._rbf and self.gamma < 0: - raise ValueError("gamma value must greater than 0") + raise ValueError("gamma value must be non-negative") def _get_kernel(self, kernel_name): maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} @@ -444,27 +441,27 @@ def count_time(func): start_time = time.time() func(*args, **kwargs) end_time = time.time() - print(f"smo algorithm cost {end_time - start_time} seconds") + print(f"SMO algorithm cost {end_time - start_time} seconds") return call_func @count_time -def test_cancel_data(): - print("Hello!\nStart test svm by smo algorithm!") +def test_cancer_data(): + print("Hello!\nStart test SVM using the SMO algorithm!") # 0: download dataset and load into pandas' dataframe - if not os.path.exists(r"cancel_data.csv"): + if not os.path.exists(r"cancer_data.csv"): request = urllib.request.Request( # noqa: S310 CANCER_DATASET_URL, headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, ) response = urllib.request.urlopen(request) # noqa: S310 content = response.read().decode("utf-8") - with open(r"cancel_data.csv", "w") as f: + with open(r"cancer_data.csv", "w") as f: f.write(content) data = pd.read_csv( - "cancel_data.csv", + "cancer_data.csv", header=None, dtype={0: str}, # Assuming the first column contains string data ) @@ -479,14 +476,14 @@ def test_cancel_data(): train_data, test_data = samples[:328, :], samples[328:, :] test_tags, test_samples = test_data[:, 0], test_data[:, 1:] - # 3: choose kernel function,and set initial alphas to zero(optional) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + # 3: choose kernel function, and set initial alphas to zero (optional) + my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) al = np.zeros(train_data.shape[0]) # 4: calculating best alphas using SMO algorithm and predict test_data samples mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, alpha_list=al, cost=0.4, b=0.0, @@ -501,30 +498,30 @@ def test_cancel_data(): for i in range(test_tags.shape[0]): if test_tags[i] == predict[i]: score += 1 - print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}") + print(f"\nAll: {test_num}\nCorrect: {score}\nIncorrect: {test_num - score}") print(f"Rough Accuracy: {score / test_tags.shape[0]}") def test_demonstration(): # change stdout - print("\nStart plot,please wait!!!") + print("\nStarting plot, please wait!") sys.stdout = open(os.devnull, "w") ax1 = plt.subplot2grid((2, 2), (0, 0)) ax2 = plt.subplot2grid((2, 2), (0, 1)) ax3 = plt.subplot2grid((2, 2), (1, 0)) ax4 = plt.subplot2grid((2, 2), (1, 1)) - ax1.set_title("linear svm,cost:0.1") + ax1.set_title("Linear SVM, cost = 0.1") test_linear_kernel(ax1, cost=0.1) - ax2.set_title("linear svm,cost:500") + ax2.set_title("Linear SVM, cost = 500") test_linear_kernel(ax2, cost=500) - ax3.set_title("rbf kernel svm,cost:0.1") + ax3.set_title("RBF kernel SVM, cost = 0.1") test_rbf_kernel(ax3, cost=0.1) - ax4.set_title("rbf kernel svm,cost:500") + ax4.set_title("RBF kernel SVM, cost = 500") test_rbf_kernel(ax4, cost=500) sys.stdout = sys.__stdout__ - print("Plot done!!!") + print("Plot done!") def test_linear_kernel(ax, cost): @@ -535,10 +532,10 @@ def test_linear_kernel(ax, cost): scaler = StandardScaler() train_x_scaled = scaler.fit_transform(train_x, train_y) train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) + my_kernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, cost=cost, tolerance=0.001, auto_norm=False, @@ -555,10 +552,10 @@ def test_rbf_kernel(ax, cost): scaler = StandardScaler() train_x_scaled = scaler.fit_transform(train_x, train_y) train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, cost=cost, tolerance=0.001, auto_norm=False, @@ -571,11 +568,11 @@ def plot_partition_boundary( model, train_data, ax, resolution=100, colors=("b", "k", "r") ): """ - We can not get the optimum w of our kernel svm model which is different from linear - svm. For this reason, we generate randomly distributed points with high desity and - prediced values of these points are calculated by using our trained model. Then we - could use this prediced values to draw contour map. - And this contour map can represent svm's partition boundary. + We cannot get the optimal w of our kernel SVM model, which is different from a + linear SVM. For this reason, we generate randomly distributed points with high + density, and predicted values of these points are calculated using our trained + model. Then we could use this predicted values to draw contour map, and this contour + map represents the SVM's partition boundary. """ train_data_x = train_data[:, 1] train_data_y = train_data[:, 2] @@ -620,6 +617,6 @@ def plot_partition_boundary( if __name__ == "__main__": - test_cancel_data() + test_cancer_data() test_demonstration() plt.show() From af6a45e982213ef52a2f747dec6b58d668bfce5b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 17 Jun 2024 00:19:32 +0300 Subject: [PATCH 047/104] Remove some per file ignores (#11381) * Remove some per file ignores * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + pyproject.toml | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 2094fc3a9..04551fad3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -863,6 +863,7 @@ * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) + * [Rainfall Intensity](physics/rainfall_intensity.py) * [Reynolds Number](physics/reynolds_number.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) diff --git a/pyproject.toml b/pyproject.toml index 429f4fab9..bb8657183 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,12 +74,6 @@ lint.ignore = [ "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ - "PGH001", -] -lint.per-file-ignores."data_structures/binary_tree/binary_search_tree_recursive.py" = [ - "BLE001", -] lint.per-file-ignores."data_structures/hashing/tests/test_hash_map.py" = [ "BLE001", ] From df94d460ac8d220f97851f358abc0102ae47d3db Mon Sep 17 00:00:00 2001 From: raj <64704676+ra230537@users.noreply.github.com> Date: Sun, 16 Jun 2024 19:17:55 -0300 Subject: [PATCH 048/104] Fix/fixes get top billionaries code (#11466) * fix: modify the depracated code and add new tests * fix: remove test from pr * fix: remove the useless utc import * fix: add explicit tz argument * fix: fixes ruff checking * Remove UP017 #noqa comments from code * Update get_top_billionaires.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_top_billionaires.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/get_top_billionaires.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py index 24828b6d7..99f6e0be9 100644 --- a/web_programming/get_top_billionaires.py +++ b/web_programming/get_top_billionaires.py @@ -65,7 +65,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]: "Country": person["countryOfCitizenship"], "Gender": person["gender"], "Worth ($)": f"{person['finalWorth'] / 1000:.1f} Billion", - "Age": years_old(person["birthDate"]), + "Age": str(years_old(person["birthDate"] / 1000)), } for person in response_json["personList"]["personsLists"] ] @@ -95,4 +95,7 @@ def display_billionaires(forbes_billionaires: list[dict[str, int | str]]) -> Non if __name__ == "__main__": + from doctest import testmod + + testmod() display_billionaires(get_forbes_real_time_billionaires()) From 31d1cd8402ba48aca26d9f1d2774f929610e7180 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 08:31:32 -0400 Subject: [PATCH 049/104] [pre-commit.ci] pre-commit autoupdate (#11435) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.7 → v0.4.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.7...v0.4.8) * Update .pre-commit-config.yaml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a04f4f8b2..fc8545b51 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.7 + rev: v0.4.9 hooks: - id: ruff - id: ruff-format From 1cfca52db73ee18b9e9e08febe9e7d42f96e43db Mon Sep 17 00:00:00 2001 From: Snoppy Date: Mon, 17 Jun 2024 21:27:07 +0800 Subject: [PATCH 050/104] chore: fix typos (#11467) * chore: fix typos Signed-off-by: snoppy * Apply suggestions from code review Co-authored-by: Tianyi Zheng --------- Signed-off-by: snoppy Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- computer_vision/haralick_descriptors.py | 2 +- graphs/strongly_connected_components.py | 2 +- maths/points_are_collinear_3d.py | 10 +++++----- neural_network/convolution_neural_network.py | 8 ++++---- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 712bd4966..634f04957 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -141,7 +141,7 @@ def transform( center_x, center_y = (x // 2 for x in kernel.shape) - # Use padded image when applying convolotion + # Use padded image when applying convolution # to not go out of bounds of the original the image transformed = np.zeros(image.shape, dtype=np.uint8) padded = np.pad(image, 1, "constant", constant_values=constant) diff --git a/graphs/strongly_connected_components.py b/graphs/strongly_connected_components.py index 325e5c1f3..4d4cf8803 100644 --- a/graphs/strongly_connected_components.py +++ b/graphs/strongly_connected_components.py @@ -38,7 +38,7 @@ def find_components( reversed_graph: dict[int, list[int]], vert: int, visited: list[bool] ) -> list[int]: """ - Use depth first search to find strongliy connected + Use depth first search to find strongly connected vertices. Now graph is reversed >>> find_components({0: [1], 1: [2], 2: [0]}, 0, 5 * [False]) [0, 1, 2] diff --git a/maths/points_are_collinear_3d.py b/maths/points_are_collinear_3d.py index 3bc0b3b9e..c7adddda9 100644 --- a/maths/points_are_collinear_3d.py +++ b/maths/points_are_collinear_3d.py @@ -76,9 +76,9 @@ def get_3d_vectors_cross(ab: Vector3d, ac: Vector3d) -> Vector3d: def is_zero_vector(vector: Vector3d, accuracy: int) -> bool: """ - Check if vector is equal to (0, 0, 0) of not. + Check if vector is equal to (0, 0, 0) or not. - Sine the algorithm is very accurate, we will never get a zero vector, + Since the algorithm is very accurate, we will never get a zero vector, so we need to round the vector axis, because we want a result that is either True or False. In other applications, we can return a float that represents the collinearity ratio. @@ -97,9 +97,9 @@ def are_collinear(a: Point3d, b: Point3d, c: Point3d, accuracy: int = 10) -> boo """ Check if three points are collinear or not. - 1- Create tow vectors AB and AC. - 2- Get the cross vector of the tow vectors. - 3- Calcolate the length of the cross vector. + 1- Create two vectors AB and AC. + 2- Get the cross vector of the two vectors. + 3- Calculate the length of the cross vector. 4- If the length is zero then the points are collinear, else they are not. The use of the accuracy parameter is explained in is_zero_vector docstring. diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 3c5519244..d4ac360a9 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -1,7 +1,7 @@ """ - - - - - -- - - - - - - - - - - - - - - - - - - - - - - Name - - CNN - Convolution Neural Network For Photo Recognizing -Goal - - Recognize Handing Writing Word Photo +Goal - - Recognize Handwriting Word Photo Detail: Total 5 layers neural network * Convolution layer * Pooling layer @@ -135,7 +135,7 @@ class CNN: ) data_featuremap.append(featuremap) - # expanding the data slice to One dimenssion + # expanding the data slice to one dimension focus1_list = [] for each_focus in data_focus: focus1_list.extend(self.Expand_Mat(each_focus)) @@ -304,7 +304,7 @@ class CNN: plt.grid(True, alpha=0.5) plt.show() - print("------------------Training Complished---------------------") + print("------------------Training Complete---------------------") print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}")) if draw_e: draw_error() @@ -353,5 +353,5 @@ class CNN: if __name__ == "__main__": """ - I will put the example on other file + I will put the example in another file """ From 75b86671879cfbb83d241c3a3487b32c6dac9d91 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Jun 2024 00:00:47 +0200 Subject: [PATCH 051/104] [pre-commit.ci] pre-commit autoupdate (#11472) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.9 → v0.4.10](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.9...v0.4.10) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fc8545b51..1eddff7ab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.9 + rev: v0.4.10 hooks: - id: ruff - id: ruff-format From 6882a8b80806f2dc53d53a0ecc00c2c98bec3fba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Vitor?= <92267577+ShiryuReb@users.noreply.github.com> Date: Wed, 26 Jun 2024 03:06:57 -0300 Subject: [PATCH 052/104] Tests/add new test case weight_conversion (#11468) * add new test * add new test --- conversions/weight_conversion.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index e8326e0b6..0777aead9 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -297,6 +297,12 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 1.660540199e-23 >>> weight_conversion("atomic-mass-unit","atomic-mass-unit",2) 1.999999998903455 + >>> weight_conversion("slug", "kilogram", 1) + Traceback (most recent call last): + ... + ValueError: Invalid 'from_type' or 'to_type' value: 'slug', 'kilogram' + Supported values are: kilogram, gram, milligram, metric-ton, long-ton, short-ton, \ +pound, stone, ounce, carrat, atomic-mass-unit """ if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART: msg = ( From 716bdeb68b1e81aafe886e382319c6dab882dacc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 07:02:29 +0200 Subject: [PATCH 053/104] [pre-commit.ci] pre-commit autoupdate (#11473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.10 → v0.5.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.10...v0.5.0) - [github.com/pre-commit/mirrors-mypy: v1.10.0 → v1.10.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.10.0...v1.10.1) * Fix ruff issues * Fix ruff issues --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 5 ++- backtracking/knight_tour.py | 6 +-- data_structures/binary_tree/is_sorted.py | 6 +-- data_structures/binary_tree/red_black_tree.py | 37 +++++-------------- docs/source/__init__.py | 0 graphs/graph_adjacency_matrix.py | 8 ++-- graphs/multi_heuristic_astar.py | 4 +- graphs/tarjans_scc.py | 2 +- hashes/md5.py | 4 +- maths/radix2_fft.py | 1 - project_euler/problem_034/__init__.py | 1 - project_euler/problem_035/__init__.py | 1 - project_euler/problem_037/__init__.py | 1 - project_euler/problem_037/sol1.py | 9 ++--- project_euler/problem_039/__init__.py | 1 - project_euler/problem_041/__init__.py | 1 - project_euler/problem_043/__init__.py | 1 - project_euler/problem_044/__init__.py | 1 - project_euler/problem_045/__init__.py | 1 - project_euler/problem_046/__init__.py | 1 - project_euler/problem_055/__init__.py | 1 - project_euler/problem_058/__init__.py | 1 - project_euler/problem_063/__init__.py | 1 - project_euler/problem_072/sol1.py | 2 +- project_euler/problem_089/__init__.py | 1 - project_euler/problem_097/__init__.py | 1 - searches/binary_tree_traversal.py | 6 +-- sorts/external_sort.py | 5 +-- source/__init__.py | 0 .../can_string_be_rearranged_as_palindrome.py | 4 +- strings/is_valid_email_address.py | 4 +- strings/text_justification.py | 12 +++--- 32 files changed, 44 insertions(+), 85 deletions(-) create mode 100644 docs/source/__init__.py create mode 100644 source/__init__.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1eddff7ab..a3f5a5e51 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.10 + rev: v0.5.0 hooks: - id: ruff - id: ruff-format @@ -47,10 +47,11 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.0 + rev: v1.10.1 hooks: - id: mypy args: + - --explicit-package-bases - --ignore-missing-imports - --install-types # See mirrors-mypy README.md - --non-interactive diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index 5f7dee8d9..8906aaa10 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -24,10 +24,10 @@ def get_valid_pos(position: tuple[int, int], n: int) -> list[tuple[int, int]]: ] permissible_positions = [] - for position in positions: - y_test, x_test = position + for inner_position in positions: + y_test, x_test = inner_position if 0 <= y_test < n and 0 <= x_test < n: - permissible_positions.append(position) + permissible_positions.append(inner_position) return permissible_positions diff --git a/data_structures/binary_tree/is_sorted.py b/data_structures/binary_tree/is_sorted.py index 509a42661..91fc8ca82 100644 --- a/data_structures/binary_tree/is_sorted.py +++ b/data_structures/binary_tree/is_sorted.py @@ -80,9 +80,9 @@ class Node: """ if self.left and (self.data < self.left.data or not self.left.is_sorted): return False - if self.right and (self.data > self.right.data or not self.right.is_sorted): - return False - return True + return not ( + self.right and (self.data > self.right.data or not self.right.is_sorted) + ) if __name__ == "__main__": diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index a9ecf897c..752db1e70 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -1,8 +1,3 @@ -""" -psf/black : true -ruff : passed -""" - from __future__ import annotations from collections.abc import Iterator @@ -321,9 +316,7 @@ class RedBlackTree: return False if self.left and not self.left.check_coloring(): return False - if self.right and not self.right.check_coloring(): - return False - return True + return not (self.right and not self.right.check_coloring()) def black_height(self) -> int | None: """Returns the number of black nodes from this node to the @@ -561,9 +554,7 @@ def test_rotations() -> bool: right_rot.right.right = RedBlackTree(10, parent=right_rot.right) right_rot.right.right.left = RedBlackTree(5, parent=right_rot.right.right) right_rot.right.right.right = RedBlackTree(20, parent=right_rot.right.right) - if tree != right_rot: - return False - return True + return tree == right_rot def test_insertion_speed() -> bool: @@ -606,13 +597,11 @@ def test_insert_and_search() -> bool: tree.insert(12) tree.insert(10) tree.insert(11) - if 5 in tree or -6 in tree or -10 in tree or 13 in tree: + if any(i in tree for i in (5, -6, -10, 13)): # Found something not in there return False - if not (11 in tree and 12 in tree and -8 in tree and 0 in tree): - # Didn't find something in there - return False - return True + # Find all these things in there + return all(i in tree for i in (11, 12, -8, 0)) def test_insert_delete() -> bool: @@ -634,9 +623,7 @@ def test_insert_delete() -> bool: tree = tree.remove(9) if not tree.check_color_properties(): return False - if list(tree.inorder_traverse()) != [-8, 0, 4, 8, 10, 11, 12]: - return False - return True + return list(tree.inorder_traverse()) == [-8, 0, 4, 8, 10, 11, 12] def test_floor_ceil() -> bool: @@ -664,9 +651,7 @@ def test_min_max() -> bool: tree.insert(24) tree.insert(20) tree.insert(22) - if tree.get_max() != 22 or tree.get_min() != -16: - return False - return True + return not (tree.get_max() != 22 or tree.get_min() != -16) def test_tree_traversal() -> bool: @@ -682,9 +667,7 @@ def test_tree_traversal() -> bool: return False if list(tree.preorder_traverse()) != [0, -16, 16, 8, 22, 20, 24]: return False - if list(tree.postorder_traverse()) != [-16, 8, 20, 24, 22, 16, 0]: - return False - return True + return list(tree.postorder_traverse()) == [-16, 8, 20, 24, 22, 16, 0] def test_tree_chaining() -> bool: @@ -695,9 +678,7 @@ def test_tree_chaining() -> bool: return False if list(tree.preorder_traverse()) != [0, -16, 16, 8, 22, 20, 24]: return False - if list(tree.postorder_traverse()) != [-16, 8, 20, 24, 22, 16, 0]: - return False - return True + return list(tree.postorder_traverse()) == [-16, 8, 20, 24, 22, 16, 0] def print_results(msg: str, passes: bool) -> None: diff --git a/docs/source/__init__.py b/docs/source/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py index 059a6aa9f..568c84166 100644 --- a/graphs/graph_adjacency_matrix.py +++ b/graphs/graph_adjacency_matrix.py @@ -156,9 +156,11 @@ class GraphAdjacencyMatrix(Generic[T]): self.vertex_to_index.pop(vertex) # decrement indices for vertices shifted by the deleted vertex in the adj matrix - for vertex in self.vertex_to_index: - if self.vertex_to_index[vertex] >= start_index: - self.vertex_to_index[vertex] = self.vertex_to_index[vertex] - 1 + for inner_vertex in self.vertex_to_index: + if self.vertex_to_index[inner_vertex] >= start_index: + self.vertex_to_index[inner_vertex] = ( + self.vertex_to_index[inner_vertex] - 1 + ) def contains_vertex(self, vertex: T) -> bool: """ diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 6af9a187a..47509beb8 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -123,9 +123,7 @@ def do_something(back_pointer, goal, start): def valid(p: TPos): if p[0] < 0 or p[0] > n - 1: return False - if p[1] < 0 or p[1] > n - 1: - return False - return True + return not (p[1] < 0 or p[1] > n - 1) def expand_state( diff --git a/graphs/tarjans_scc.py b/graphs/tarjans_scc.py index a75dc4d2c..b4a3bd5c4 100644 --- a/graphs/tarjans_scc.py +++ b/graphs/tarjans_scc.py @@ -103,4 +103,4 @@ if __name__ == "__main__": edges = list(zip(source, target)) g = create_graph(n_vertices, edges) - assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) + assert tarjan(g) == [[5], [6], [4], [3, 2, 1, 0]] diff --git a/hashes/md5.py b/hashes/md5.py index 2187006ec..622a50d29 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -82,8 +82,8 @@ def reformat_hex(i: int) -> bytes: hex_rep = format(i, "08x")[-8:] little_endian_hex = b"" - for i in [3, 2, 1, 0]: - little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8") + for j in [3, 2, 1, 0]: + little_endian_hex += hex_rep[2 * j : 2 * j + 2].encode("utf-8") return little_endian_hex diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 2c5cdc004..d41dc82d5 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -84,7 +84,6 @@ class FFT: # Corner case if len(dft) <= 1: return dft[0] - # next_ncol = self.c_max_length // 2 while next_ncol > 0: new_dft = [[] for i in range(next_ncol)] diff --git a/project_euler/problem_034/__init__.py b/project_euler/problem_034/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_034/__init__.py +++ b/project_euler/problem_034/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_035/__init__.py b/project_euler/problem_035/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_035/__init__.py +++ b/project_euler/problem_035/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_037/__init__.py b/project_euler/problem_037/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_037/__init__.py +++ b/project_euler/problem_037/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_037/sol1.py b/project_euler/problem_037/sol1.py index 9c09065f4..c66eb9fb1 100644 --- a/project_euler/problem_037/sol1.py +++ b/project_euler/problem_037/sol1.py @@ -85,11 +85,10 @@ def validate(n: int) -> bool: >>> validate(3797) True """ - if len(str(n)) > 3 and ( - not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])) - ): - return False - return True + return not ( + len(str(n)) > 3 + and (not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3]))) + ) def compute_truncated_primes(count: int = 11) -> list[int]: diff --git a/project_euler/problem_039/__init__.py b/project_euler/problem_039/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_039/__init__.py +++ b/project_euler/problem_039/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_041/__init__.py b/project_euler/problem_041/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_041/__init__.py +++ b/project_euler/problem_041/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_043/__init__.py b/project_euler/problem_043/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_043/__init__.py +++ b/project_euler/problem_043/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_044/__init__.py b/project_euler/problem_044/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_044/__init__.py +++ b/project_euler/problem_044/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_045/__init__.py b/project_euler/problem_045/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_045/__init__.py +++ b/project_euler/problem_045/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_046/__init__.py b/project_euler/problem_046/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_046/__init__.py +++ b/project_euler/problem_046/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_055/__init__.py b/project_euler/problem_055/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_055/__init__.py +++ b/project_euler/problem_055/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_058/__init__.py b/project_euler/problem_058/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_058/__init__.py +++ b/project_euler/problem_058/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_063/__init__.py b/project_euler/problem_063/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_063/__init__.py +++ b/project_euler/problem_063/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_072/sol1.py b/project_euler/problem_072/sol1.py index 5a28be564..f09db0673 100644 --- a/project_euler/problem_072/sol1.py +++ b/project_euler/problem_072/sol1.py @@ -43,7 +43,7 @@ def solution(limit: int = 1_000_000) -> int: ind = np.arange(2 * i, limit + 1, i) # indexes for selection phi[ind] -= phi[ind] // i - return np.sum(phi[2 : limit + 1]) + return int(np.sum(phi[2 : limit + 1])) if __name__ == "__main__": diff --git a/project_euler/problem_089/__init__.py b/project_euler/problem_089/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_089/__init__.py +++ b/project_euler/problem_089/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_097/__init__.py b/project_euler/problem_097/__init__.py index 792d60054..e69de29bb 100644 --- a/project_euler/problem_097/__init__.py +++ b/project_euler/problem_097/__init__.py @@ -1 +0,0 @@ -# diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 4897ef172..47af57f7f 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -36,7 +36,7 @@ def build_tree() -> TreeNode: right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) - raise + raise ValueError("Something went wrong") def pre_order(node: TreeNode) -> None: @@ -164,8 +164,8 @@ def level_order_actual(node: TreeNode) -> None: if node_dequeued.right: list_.append(node_dequeued.right) print() - for node in list_: - q.put(node) + for inner_node in list_: + q.put(inner_node) # iteration version diff --git a/sorts/external_sort.py b/sorts/external_sort.py index e6b0d47f7..3fa7cacc0 100644 --- a/sorts/external_sort.py +++ b/sorts/external_sort.py @@ -77,10 +77,7 @@ class FilesArray: self.empty.add(i) self.files[i].close() - if len(self.empty) == self.num_buffers: - return False - - return True + return len(self.empty) != self.num_buffers def unshift(self, index): value = self.buffers[index] diff --git a/source/__init__.py b/source/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/strings/can_string_be_rearranged_as_palindrome.py b/strings/can_string_be_rearranged_as_palindrome.py index 21d653db1..95cda8b72 100644 --- a/strings/can_string_be_rearranged_as_palindrome.py +++ b/strings/can_string_be_rearranged_as_palindrome.py @@ -72,9 +72,7 @@ def can_string_be_rearranged_as_palindrome(input_str: str = "") -> bool: for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 - if odd_char > 1: - return False - return True + return not odd_char > 1 def benchmark(input_str: str = "") -> None: diff --git a/strings/is_valid_email_address.py b/strings/is_valid_email_address.py index 205394f81..c3bf7df73 100644 --- a/strings/is_valid_email_address.py +++ b/strings/is_valid_email_address.py @@ -101,9 +101,7 @@ def is_valid_email_address(email: str) -> bool: return False # (7.) Validate the placement of "." characters - if domain.startswith(".") or domain.endswith(".") or ".." in domain: - return False - return True + return not (domain.startswith(".") or domain.endswith(".") or ".." in domain) if __name__ == "__main__": diff --git a/strings/text_justification.py b/strings/text_justification.py index b0ef12231..e025edcfe 100644 --- a/strings/text_justification.py +++ b/strings/text_justification.py @@ -67,19 +67,19 @@ def text_justification(word: str, max_width: int) -> list: answer = [] line: list[str] = [] width = 0 - for word in words: - if width + len(word) + len(line) <= max_width: + for inner_word in words: + if width + len(inner_word) + len(line) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) - # len(word) = length of current word + # len(inner_word) = length of current inner_word # len(line) = number of overall_spaces_count to insert between words - line.append(word) - width += len(word) + line.append(inner_word) + width += len(inner_word) else: # justify the line and add it to result answer.append(justify(line, width, max_width)) # reset new line and new width - line, width = [word], len(word) + line, width = [inner_word], len(inner_word) remaining_spaces = max_width - width - len(line) answer.append(" ".join(line) + (remaining_spaces + 1) * " ") return answer From c1dc8e97f7992c132c671da2da60da9d926d0fca Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Thu, 4 Jul 2024 23:46:24 +0530 Subject: [PATCH 054/104] Create count_vowels.py (#11474) * Create count_vowels.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/count_vowels.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 strings/count_vowels.py diff --git a/strings/count_vowels.py b/strings/count_vowels.py new file mode 100644 index 000000000..8a52b331c --- /dev/null +++ b/strings/count_vowels.py @@ -0,0 +1,34 @@ +def count_vowels(s: str) -> int: + """ + Count the number of vowels in a given string. + + :param s: Input string to count vowels in. + :return: Number of vowels in the input string. + + Examples: + >>> count_vowels("hello world") + 3 + >>> count_vowels("HELLO WORLD") + 3 + >>> count_vowels("123 hello world") + 3 + >>> count_vowels("") + 0 + >>> count_vowels("a quick brown fox") + 5 + >>> count_vowels("the quick BROWN fox") + 5 + >>> count_vowels("PYTHON") + 1 + """ + if not isinstance(s, str): + raise ValueError("Input must be a string") + + vowels = "aeiouAEIOU" + return sum(1 for char in s if char in vowels) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 9190888f89c55d927881c7b08f6df361ab1b0af4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 22:55:30 +0200 Subject: [PATCH 055/104] [pre-commit.ci] pre-commit autoupdate (#11481) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.0 → v0.5.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.0...v0.5.1) - [github.com/tox-dev/pyproject-fmt: 2.1.3 → 2.1.4](https://github.com/tox-dev/pyproject-fmt/compare/2.1.3...2.1.4) * updating DIRECTORY.md * grid = np.char.chararray((n, n)) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + graphs/multi_heuristic_astar.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a3f5a5e51..7fd689adc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.0 + rev: v0.5.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.3" + rev: "2.1.4" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 04551fad3..54bb8f148 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1260,6 +1260,7 @@ * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) + * [Count Vowels](strings/count_vowels.py) * [Credit Card Validator](strings/credit_card_validator.py) * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 47509beb8..38b07e1ca 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -79,7 +79,7 @@ def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]): def do_something(back_pointer, goal, start): - grid = np.chararray((n, n)) + grid = np.char.chararray((n, n)) for i in range(n): for j in range(n): grid[i][j] = "*" From 2d8f22ab615085d36c53346283528f33b18a3b6d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 21:52:48 +0200 Subject: [PATCH 056/104] [pre-commit.ci] pre-commit autoupdate (#11489) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.1 → v0.5.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.1...v0.5.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7fd689adc..c72b55fde 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.1 + rev: v0.5.2 hooks: - id: ruff - id: ruff-format From d9ded0727a7a209bfcbf9bd81c5c75183cfd026f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 23 Jul 2024 10:40:10 +0200 Subject: [PATCH 057/104] [pre-commit.ci] pre-commit autoupdate (#11495) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.2 → v0.5.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.2...v0.5.4) - [github.com/pre-commit/mirrors-mypy: v1.10.1 → v1.11.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.10.1...v1.11.0) * ruff rule PLR1714 Consider merging multiple comparisons * ruff rule RUF005 Consider `[*self.urls, "", "#"]` instead of concatenation * Update emails_from_url.py * Update emails_from_url.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- web_programming/emails_from_url.py | 7 +------ 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c72b55fde..e9f57a7b7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.2 + rev: v0.5.4 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.1 + rev: v1.11.0 hooks: - id: mypy args: diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 43fd78dcf..d41dc4893 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -31,12 +31,7 @@ class Parser(HTMLParser): # Check the list of defined attributes. for name, value in attrs: # If href is defined, not empty nor # print it and not already in urls. - if ( - name == "href" - and value != "#" - and value != "" - and value not in self.urls - ): + if name == "href" and value not in (*self.urls, "", "#"): url = parse.urljoin(self.domain, value) self.urls.append(url) From 146800307c5d2a4393d57b7c97c63b89a21abba1 Mon Sep 17 00:00:00 2001 From: Ihor Pryyma <83470037+Ihor-Pryyma@users.noreply.github.com> Date: Thu, 25 Jul 2024 18:56:31 +0300 Subject: [PATCH 058/104] Add doctests to interpolation_search.py (#11492) * Add doctests to interpolation_search.py * update docs * update tests * update tests 2 * clean code --- searches/interpolation_search.py | 139 ++++++++++++++++--------------- 1 file changed, 70 insertions(+), 69 deletions(-) diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index 0591788aa..cb3e0011d 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -3,13 +3,41 @@ This is pure Python implementation of interpolation search algorithm """ -def interpolation_search(sorted_collection, item): - """Pure implementation of interpolation search algorithm in Python - Be careful collection must be ascending sorted, otherwise result will be - unpredictable - :param sorted_collection: some ascending sorted collection with comparable items - :param item: item value to search - :return: index of found item or None if item is not found +def interpolation_search(sorted_collection: list[int], item: int) -> int | None: + """ + Searches for an item in a sorted collection by interpolation search algorithm. + + Args: + sorted_collection: sorted list of integers + item: item value to search + + Returns: + int: The index of the found item, or None if the item is not found. + Examples: + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([1, 2, 3, 4, 5], 4) + 3 + >>> interpolation_search([1, 2, 3, 4, 5], 6) is None + True + >>> interpolation_search([], 1) is None + True + >>> interpolation_search([100], 100) + 0 + >>> interpolation_search([1, 2, 3, 4, 5], 0) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 7) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([1, 2, 3, 4, 5], 0) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 7) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([5, 5, 5, 5, 5], 3) is None + True """ left = 0 right = len(sorted_collection) - 1 @@ -19,8 +47,7 @@ def interpolation_search(sorted_collection, item): if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left - else: - return None + return None point = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] @@ -33,7 +60,7 @@ def interpolation_search(sorted_collection, item): current_item = sorted_collection[point] if current_item == item: return point - elif point < left: + if point < left: right = left left = point elif point > right: @@ -46,22 +73,42 @@ def interpolation_search(sorted_collection, item): return None -def interpolation_search_by_recursion(sorted_collection, item, left, right): +def interpolation_search_by_recursion( + sorted_collection: list[int], item: int, left: int = 0, right: int | None = None +) -> int | None: """Pure implementation of interpolation search algorithm in Python by recursion Be careful collection must be ascending sorted, otherwise result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) - :param sorted_collection: some ascending sorted collection with comparable items - :param item: item value to search - :return: index of found item or None if item is not found - """ + Args: + sorted_collection: some sorted collection with comparable items + item: item value to search + left: left index in collection + right: right index in collection + + Returns: + index of item in collection or None if item is not present + + Examples: + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 0) + 0 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 15) + 4 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 5) + 1 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 100) is None + True + >>> interpolation_search_by_recursion([5, 5, 5, 5, 5], 3) is None + True + """ + if right is None: + right = len(sorted_collection) - 1 # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left - else: - return None + return None point = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] @@ -73,64 +120,18 @@ def interpolation_search_by_recursion(sorted_collection, item, left, right): if sorted_collection[point] == item: return point - elif point < left: + if point < left: return interpolation_search_by_recursion(sorted_collection, item, point, left) - elif point > right: + if point > right: return interpolation_search_by_recursion(sorted_collection, item, right, left) - elif sorted_collection[point] > item: + if sorted_collection[point] > item: return interpolation_search_by_recursion( sorted_collection, item, left, point - 1 ) - else: - return interpolation_search_by_recursion( - sorted_collection, item, point + 1, right - ) - - -def __assert_sorted(collection): - """Check if collection is ascending sorted, if not - raises :py:class:`ValueError` - :param collection: collection - :return: True if collection is ascending sorted - :raise: :py:class:`ValueError` if collection is not ascending sorted - Examples: - >>> __assert_sorted([0, 1, 2, 4]) - True - >>> __assert_sorted([10, -1, 5]) - Traceback (most recent call last): - ... - ValueError: Collection must be ascending sorted - """ - if collection != sorted(collection): - raise ValueError("Collection must be ascending sorted") - return True + return interpolation_search_by_recursion(sorted_collection, item, point + 1, right) if __name__ == "__main__": - import sys + import doctest - """ - user_input = input('Enter numbers separated by comma:\n').strip() - collection = [int(item) for item in user_input.split(',')] - try: - __assert_sorted(collection) - except ValueError: - sys.exit('Sequence must be ascending sorted to apply interpolation search') - - target_input = input('Enter a single number to be found in the list:\n') - target = int(target_input) - """ - - debug = 0 - if debug == 1: - collection = [10, 30, 40, 45, 50, 66, 77, 93] - try: - __assert_sorted(collection) - except ValueError: - sys.exit("Sequence must be ascending sorted to apply interpolation search") - target = 67 - - result = interpolation_search(collection, target) - if result is not None: - print(f"{target} found at positions: {result}") - else: - print("Not found") + doctest.testmod() From 240d1b7cd47df86d86b26f4d658b26e3656a27d9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2024 21:41:09 +0200 Subject: [PATCH 059/104] [pre-commit.ci] pre-commit autoupdate (#11500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.4 → v0.5.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.4...v0.5.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e9f57a7b7..09542dd7e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.4 + rev: v0.5.5 hooks: - id: ruff - id: ruff-format From dfe67954f7218703e3aadca1768a0ad4c97c73a1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 00:11:14 +0200 Subject: [PATCH 060/104] [pre-commit.ci] pre-commit autoupdate (#11507) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.5 → v0.5.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.5...v0.5.6) - [github.com/tox-dev/pyproject-fmt: 2.1.4 → 2.2.1](https://github.com/tox-dev/pyproject-fmt/compare/2.1.4...2.2.1) - [github.com/pre-commit/mirrors-mypy: v1.11.0 → v1.11.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.0...v1.11.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 09542dd7e..c112b6d86 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.5 + rev: v0.5.6 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.4" + rev: "2.2.1" hooks: - id: pyproject-fmt @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.0 + rev: v1.11.1 hooks: - id: mypy args: From ed1900f1b37234f25486cfb3223988b3295a5549 Mon Sep 17 00:00:00 2001 From: CarlosZamG <54159355+CarlosZamG@users.noreply.github.com> Date: Tue, 6 Aug 2024 02:44:58 -0600 Subject: [PATCH 061/104] Fix typo in integration_by_simpson_approx.py (#11501) --- maths/numerical_analysis/integration_by_simpson_approx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/numerical_analysis/integration_by_simpson_approx.py b/maths/numerical_analysis/integration_by_simpson_approx.py index f77ae7613..934299997 100644 --- a/maths/numerical_analysis/integration_by_simpson_approx.py +++ b/maths/numerical_analysis/integration_by_simpson_approx.py @@ -4,7 +4,7 @@ Github : faizan2700 Purpose : You have one function f(x) which takes float integer and returns float you have to integrate the function in limits a to b. -The approximation proposed by Thomas Simpsons in 1743 is one way to calculate +The approximation proposed by Thomas Simpson in 1743 is one way to calculate integration. ( read article : https://cp-algorithms.com/num_methods/simpson-integration.html ) From 31c424fc8654877d3731bdcb50dcc1ce5d6860ab Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 22:55:46 +0200 Subject: [PATCH 062/104] [pre-commit.ci] pre-commit autoupdate (#11515) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.6 → v0.5.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.6...v0.5.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c112b6d86..c797af6c5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.6 + rev: v0.5.7 hooks: - id: ruff - id: ruff-format From 48418280b1331d1efaa14dc48da62d313dfcee43 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 22 Aug 2024 09:42:40 -0700 Subject: [PATCH 063/104] Remove separate directory for `gaussian_elimination_pivoting.py` (#11445) * updating DIRECTORY.md * Remove separate directory for gaussian_elimination_pivoting.py Delete the directory linear_algebra/src/gaussian_elimination_pivoting/ and move its algorithm file, gaussian_elimination_pivoting.py, into the parent src/ directory. The gaussian_elimination_pivoting/ directory only exists because gaussian_elimination_pivoting.py reads an example numpy array from matrix.txt, but this input file and IO operation is entirely unnecessary because gaussian_elimination_pivoting.py already has the exact same array hard-coded into a variable. * updating DIRECTORY.md --------- Co-authored-by: tianyizheng02 --- DIRECTORY.md | 3 +- .../gaussian_elimination_pivoting.py | 31 +++++++------------ .../gaussian_elimination_pivoting/__init__.py | 0 .../gaussian_elimination_pivoting/matrix.txt | 4 --- 4 files changed, 13 insertions(+), 25 deletions(-) rename linear_algebra/src/{gaussian_elimination_pivoting => }/gaussian_elimination_pivoting.py (83%) delete mode 100644 linear_algebra/src/gaussian_elimination_pivoting/__init__.py delete mode 100644 linear_algebra/src/gaussian_elimination_pivoting/matrix.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index 54bb8f148..11de569a2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -540,8 +540,7 @@ * [Lu Decomposition](linear_algebra/lu_decomposition.py) * Src * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) - * Gaussian Elimination Pivoting - * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py) + * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting.py) * [Lib](linear_algebra/src/lib.py) * [Polynom For Points](linear_algebra/src/polynom_for_points.py) * [Power Iteration](linear_algebra/src/power_iteration.py) diff --git a/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting.py similarity index 83% rename from linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py rename to linear_algebra/src/gaussian_elimination_pivoting.py index 2a86350e9..ecaacce19 100644 --- a/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py +++ b/linear_algebra/src/gaussian_elimination_pivoting.py @@ -1,15 +1,5 @@ import numpy as np -matrix = np.array( - [ - [5.0, -5.0, -3.0, 4.0, -11.0], - [1.0, -4.0, 6.0, -4.0, -10.0], - [-2.0, -5.0, 4.0, -5.0, -12.0], - [-3.0, -3.0, 5.0, -5.0, 8.0], - ], - dtype=float, -) - def solve_linear_system(matrix: np.ndarray) -> np.ndarray: """ @@ -87,15 +77,18 @@ def solve_linear_system(matrix: np.ndarray) -> np.ndarray: if __name__ == "__main__": from doctest import testmod - from pathlib import Path testmod() - file_path = Path(__file__).parent / "matrix.txt" - try: - matrix = np.loadtxt(file_path) - except FileNotFoundError: - print(f"Error: {file_path} not found. Using default matrix instead.") - # Example usage: - print(f"Matrix:\n{matrix}") - print(f"{solve_linear_system(matrix) = }") + example_matrix = np.array( + [ + [5.0, -5.0, -3.0, 4.0, -11.0], + [1.0, -4.0, 6.0, -4.0, -10.0], + [-2.0, -5.0, 4.0, -5.0, -12.0], + [-3.0, -3.0, 5.0, -5.0, 8.0], + ], + dtype=float, + ) + + print(f"Matrix:\n{example_matrix}") + print(f"{solve_linear_system(example_matrix) = }") diff --git a/linear_algebra/src/gaussian_elimination_pivoting/__init__.py b/linear_algebra/src/gaussian_elimination_pivoting/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt b/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt deleted file mode 100644 index dd895ad85..000000000 --- a/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt +++ /dev/null @@ -1,4 +0,0 @@ -5.0 -5.0 -3.0 4.0 -11.0 -1.0 -4.0 6.0 -4.0 -10.0 --2.0 -5.0 4.0 -5.0 -12.0 --3.0 -3.0 5.0 -5.0 8.0 \ No newline at end of file From e3fa014a5ab4887f93aae7bb193b152bb155323a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 25 Aug 2024 18:33:11 +0300 Subject: [PATCH 064/104] Fix ruff (#11527) * updating DIRECTORY.md * Fix ruff * Fix * Fix * Fix * Revert "Fix" This reverts commit 5bc3bf342208dd707da02dea7173c059317b6bc6. * find_max.py: noqa: PLR1730 --------- Co-authored-by: MaximSmolskiy Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- .../binary_tree/number_of_possible_binary_trees.py | 3 +-- divide_and_conquer/closest_pair_of_points.py | 6 ++---- graphs/kahns_algorithm_long.py | 3 +-- maths/find_max.py | 2 +- maths/special_numbers/bell_numbers.py | 3 +-- matrix/tests/test_matrix_operation.py | 12 ++++++------ project_euler/problem_008/sol1.py | 3 +-- project_euler/problem_009/sol2.py | 3 +-- project_euler/problem_011/sol1.py | 3 +-- project_euler/problem_011/sol2.py | 12 ++++-------- scheduling/highest_response_ratio_next.py | 3 +-- scheduling/shortest_job_first.py | 3 +-- 13 files changed, 22 insertions(+), 36 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c797af6c5..06f8ba004 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.7 + rev: v0.6.2 hooks: - id: ruff - id: ruff-format diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py index 1c3dff37e..b39cbafd0 100644 --- a/data_structures/binary_tree/number_of_possible_binary_trees.py +++ b/data_structures/binary_tree/number_of_possible_binary_trees.py @@ -31,8 +31,7 @@ def binomial_coefficient(n: int, k: int) -> int: """ result = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) - if k > (n - k): - k = n - k + k = min(k, n - k) # Calculate C(n,k) for i in range(k): result *= n - i diff --git a/divide_and_conquer/closest_pair_of_points.py b/divide_and_conquer/closest_pair_of_points.py index cb7fa00d1..534cbba9b 100644 --- a/divide_and_conquer/closest_pair_of_points.py +++ b/divide_and_conquer/closest_pair_of_points.py @@ -54,8 +54,7 @@ def dis_between_closest_pair(points, points_counts, min_dis=float("inf")): for i in range(points_counts - 1): for j in range(i + 1, points_counts): current_dis = euclidean_distance_sqr(points[i], points[j]) - if current_dis < min_dis: - min_dis = current_dis + min_dis = min(min_dis, current_dis) return min_dis @@ -76,8 +75,7 @@ def dis_between_closest_in_strip(points, points_counts, min_dis=float("inf")): for i in range(min(6, points_counts - 1), points_counts): for j in range(max(0, i - 6), i): current_dis = euclidean_distance_sqr(points[i], points[j]) - if current_dis < min_dis: - min_dis = current_dis + min_dis = min(min_dis, current_dis) return min_dis diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py index 63cbeb909..1f16b90c0 100644 --- a/graphs/kahns_algorithm_long.py +++ b/graphs/kahns_algorithm_long.py @@ -17,8 +17,7 @@ def longest_distance(graph): for x in graph[vertex]: indegree[x] -= 1 - if long_dist[vertex] + 1 > long_dist[x]: - long_dist[x] = long_dist[vertex] + 1 + long_dist[x] = max(long_dist[x], long_dist[vertex] + 1) if indegree[x] == 0: queue.append(x) diff --git a/maths/find_max.py b/maths/find_max.py index 729a80ab4..4765d3006 100644 --- a/maths/find_max.py +++ b/maths/find_max.py @@ -20,7 +20,7 @@ def find_max_iterative(nums: list[int | float]) -> int | float: raise ValueError("find_max_iterative() arg is an empty sequence") max_num = nums[0] for x in nums: - if x > max_num: + if x > max_num: # noqa: PLR1730 max_num = x return max_num diff --git a/maths/special_numbers/bell_numbers.py b/maths/special_numbers/bell_numbers.py index 660ec6e6a..5d99334d7 100644 --- a/maths/special_numbers/bell_numbers.py +++ b/maths/special_numbers/bell_numbers.py @@ -61,8 +61,7 @@ def _binomial_coefficient(total_elements: int, elements_to_choose: int) -> int: if elements_to_choose in {0, total_elements}: return 1 - if elements_to_choose > total_elements - elements_to_choose: - elements_to_choose = total_elements - elements_to_choose + elements_to_choose = min(elements_to_choose, total_elements - elements_to_choose) coefficient = 1 for i in range(elements_to_choose): diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index addc870ca..21ed7e371 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -31,7 +31,7 @@ stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -51,7 +51,7 @@ def test_addition(mat1, mat2): matop.add(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -71,7 +71,7 @@ def test_subtraction(mat1, mat2): assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -93,21 +93,21 @@ def test_multiplication(mat1, mat2): assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops def test_scalar_multiply(): act = (3.5 * np.array(mat_a)).tolist() theo = matop.scalar_multiply(mat_a, 3.5) assert theo == act -@pytest.mark.mat_ops() +@pytest.mark.mat_ops def test_identity(): act = (np.identity(5)).tolist() theo = matop.identity(5) assert theo == act -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize("mat", [mat_a, mat_b, mat_c, mat_d, mat_e, mat_f]) def test_transpose(mat): if (np.array(mat)).shape < (2, 2): diff --git a/project_euler/problem_008/sol1.py b/project_euler/problem_008/sol1.py index adbac8d5a..a38b2045f 100644 --- a/project_euler/problem_008/sol1.py +++ b/project_euler/problem_008/sol1.py @@ -75,8 +75,7 @@ def solution(n: str = N) -> int: product = 1 for j in range(13): product *= int(n[i + j]) - if product > largest_product: - largest_product = product + largest_product = max(largest_product, product) return largest_product diff --git a/project_euler/problem_009/sol2.py b/project_euler/problem_009/sol2.py index 722ad522e..443a52957 100644 --- a/project_euler/problem_009/sol2.py +++ b/project_euler/problem_009/sol2.py @@ -39,8 +39,7 @@ def solution(n: int = 1000) -> int: c = n - a - b if c * c == (a * a + b * b): candidate = a * b * c - if candidate >= product: - product = candidate + product = max(product, candidate) return product diff --git a/project_euler/problem_011/sol1.py b/project_euler/problem_011/sol1.py index ad45f0983..3d3e864f9 100644 --- a/project_euler/problem_011/sol1.py +++ b/project_euler/problem_011/sol1.py @@ -63,8 +63,7 @@ def largest_product(grid): max_product = max( vert_product, horz_product, lr_diag_product, rl_diag_product ) - if max_product > largest: - largest = max_product + largest = max(largest, max_product) return largest diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 09bf31570..7637deafc 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -45,15 +45,13 @@ def solution(): for i in range(20): for j in range(17): temp = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # down for i in range(17): for j in range(20): temp = grid[i][j] * grid[i + 1][j] * grid[i + 2][j] * grid[i + 3][j] - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # diagonal 1 for i in range(17): @@ -64,8 +62,7 @@ def solution(): * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # diagonal 2 for i in range(17): @@ -76,8 +73,7 @@ def solution(): * grid[i + 2][j - 2] * grid[i + 3][j - 3] ) - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) return maximum diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index b54983561..f858be2ee 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -46,8 +46,7 @@ def calculate_turn_around_time( i = 0 while finished_process[i] == 1: i += 1 - if current_time < arrival_time[i]: - current_time = arrival_time[i] + current_time = max(current_time, arrival_time[i]) response_ratio = 0 # Index showing the location of the process being performed diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index 6899ec87c..91012ee3a 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -66,8 +66,7 @@ def calculate_waitingtime( finar = finish_time - arrival_time[short] waiting_time[short] = finar - burst_time[short] - if waiting_time[short] < 0: - waiting_time[short] = 0 + waiting_time[short] = max(waiting_time[short], 0) # Increment time increment_time += 1 From c8e131b86c35c8fa4ca14aa85edbd4a106575882 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 21:49:42 +0200 Subject: [PATCH 065/104] [pre-commit.ci] pre-commit autoupdate (#11522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/abravalheri/validate-pyproject: v0.18 → v0.19](https://github.com/abravalheri/validate-pyproject/compare/v0.18...v0.19) - [github.com/pre-commit/mirrors-mypy: v1.11.1 → v1.11.2](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.1...v1.11.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 06f8ba004..2724dff23 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.18 + rev: v0.19 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.1 + rev: v1.11.2 hooks: - id: mypy args: From bd8085cfc18784a21d792a44dcd683e11e802c6b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 21:41:55 +0200 Subject: [PATCH 066/104] [pre-commit.ci] pre-commit autoupdate (#11535) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.2 → v0.6.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.2...v0.6.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2724dff23..e36319749 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.2 + rev: v0.6.3 hooks: - id: ruff - id: ruff-format From f16d38f26f13683cf3ea75caf0474dedde059b86 Mon Sep 17 00:00:00 2001 From: Ramy <126559907+Ramy-Badr-Ahmed@users.noreply.github.com> Date: Tue, 3 Sep 2024 14:39:09 +0200 Subject: [PATCH 067/104] kd tree data structure implementation (#11532) * Implemented KD-Tree Data Structure * Implemented KD-Tree Data Structure. updated DIRECTORY.md. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/example_usage.py * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/hypercube_points.py * added typehints and docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * docstring for search() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added tests. Updated docstrings/typehints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated tests and used | for type annotations * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * E501 for build_kdtree.py, hypercube_points.py, nearest_neighbour_search.py * I001 for example_usage.py and test_kdtree.py * I001 for example_usage.py and test_kdtree.py * Update data_structures/kd_tree/build_kdtree.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Added new test cases requested in Review. Refactored the test_build_kdtree() to include various checks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considered ruff errors * Considered ruff errors * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update kd_node.py * imported annotations from __future__ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 ++ data_structures/kd_tree/__init__.py | 0 data_structures/kd_tree/build_kdtree.py | 35 ++++++ data_structures/kd_tree/example/__init__.py | 0 .../kd_tree/example/example_usage.py | 38 +++++++ .../kd_tree/example/hypercube_points.py | 21 ++++ data_structures/kd_tree/kd_node.py | 30 ++++++ .../kd_tree/nearest_neighbour_search.py | 71 +++++++++++++ data_structures/kd_tree/tests/__init__.py | 0 data_structures/kd_tree/tests/test_kdtree.py | 100 ++++++++++++++++++ 10 files changed, 301 insertions(+) create mode 100644 data_structures/kd_tree/__init__.py create mode 100644 data_structures/kd_tree/build_kdtree.py create mode 100644 data_structures/kd_tree/example/__init__.py create mode 100644 data_structures/kd_tree/example/example_usage.py create mode 100644 data_structures/kd_tree/example/hypercube_points.py create mode 100644 data_structures/kd_tree/kd_node.py create mode 100644 data_structures/kd_tree/nearest_neighbour_search.py create mode 100644 data_structures/kd_tree/tests/__init__.py create mode 100644 data_structures/kd_tree/tests/test_kdtree.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 11de569a2..1ca537b99 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -285,6 +285,12 @@ * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) + * KD Tree + * [KD Tree Node](data_structures/kd_tree/kd_node.py) + * [Build KD Tree](data_structures/kd_tree/build_kdtree.py) + * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) + * [Hypercibe Points](data_structures/kd_tree/example/hypercube_points.py) + * [Example Usage](data_structures/kd_tree/example/example_usage.py) ## Digital Image Processing * [Change Brightness](digital_image_processing/change_brightness.py) diff --git a/data_structures/kd_tree/__init__.py b/data_structures/kd_tree/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data_structures/kd_tree/build_kdtree.py b/data_structures/kd_tree/build_kdtree.py new file mode 100644 index 000000000..c5b800a2c --- /dev/null +++ b/data_structures/kd_tree/build_kdtree.py @@ -0,0 +1,35 @@ +from data_structures.kd_tree.kd_node import KDNode + + +def build_kdtree(points: list[list[float]], depth: int = 0) -> KDNode | None: + """ + Builds a KD-Tree from a list of points. + + Args: + points: The list of points to build the KD-Tree from. + depth: The current depth in the tree + (used to determine axis for splitting). + + Returns: + The root node of the KD-Tree, + or None if no points are provided. + """ + if not points: + return None + + k = len(points[0]) # Dimensionality of the points + axis = depth % k + + # Sort point list and choose median as pivot element + points.sort(key=lambda point: point[axis]) + median_idx = len(points) // 2 + + # Create node and construct subtrees + left_points = points[:median_idx] + right_points = points[median_idx + 1 :] + + return KDNode( + point=points[median_idx], + left=build_kdtree(left_points, depth + 1), + right=build_kdtree(right_points, depth + 1), + ) diff --git a/data_structures/kd_tree/example/__init__.py b/data_structures/kd_tree/example/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data_structures/kd_tree/example/example_usage.py b/data_structures/kd_tree/example/example_usage.py new file mode 100644 index 000000000..e270f0cdd --- /dev/null +++ b/data_structures/kd_tree/example/example_usage.py @@ -0,0 +1,38 @@ +import numpy as np + +from data_structures.kd_tree.build_kdtree import build_kdtree +from data_structures.kd_tree.example.hypercube_points import hypercube_points +from data_structures.kd_tree.nearest_neighbour_search import nearest_neighbour_search + + +def main() -> None: + """ + Demonstrates the use of KD-Tree by building it from random points + in a 10-dimensional hypercube and performing a nearest neighbor search. + """ + num_points: int = 5000 + cube_size: float = 10.0 # Size of the hypercube (edge length) + num_dimensions: int = 10 + + # Generate random points within the hypercube + points: np.ndarray = hypercube_points(num_points, cube_size, num_dimensions) + hypercube_kdtree = build_kdtree(points.tolist()) + + # Generate a random query point within the same space + rng = np.random.default_rng() + query_point: list[float] = rng.random(num_dimensions).tolist() + + # Perform nearest neighbor search + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + hypercube_kdtree, query_point + ) + + # Print the results + print(f"Query point: {query_point}") + print(f"Nearest point: {nearest_point}") + print(f"Distance: {nearest_dist:.4f}") + print(f"Nodes visited: {nodes_visited}") + + +if __name__ == "__main__": + main() diff --git a/data_structures/kd_tree/example/hypercube_points.py b/data_structures/kd_tree/example/hypercube_points.py new file mode 100644 index 000000000..2d8800ac9 --- /dev/null +++ b/data_structures/kd_tree/example/hypercube_points.py @@ -0,0 +1,21 @@ +import numpy as np + + +def hypercube_points( + num_points: int, hypercube_size: float, num_dimensions: int +) -> np.ndarray: + """ + Generates random points uniformly distributed within an n-dimensional hypercube. + + Args: + num_points: Number of points to generate. + hypercube_size: Size of the hypercube. + num_dimensions: Number of dimensions of the hypercube. + + Returns: + An array of shape (num_points, num_dimensions) + with generated points. + """ + rng = np.random.default_rng() + shape = (num_points, num_dimensions) + return hypercube_size * rng.random(shape) diff --git a/data_structures/kd_tree/kd_node.py b/data_structures/kd_tree/kd_node.py new file mode 100644 index 000000000..e10110279 --- /dev/null +++ b/data_structures/kd_tree/kd_node.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + +class KDNode: + """ + Represents a node in a KD-Tree. + + Attributes: + point: The point stored in this node. + left: The left child node. + right: The right child node. + """ + + def __init__( + self, + point: list[float], + left: KDNode | None = None, + right: KDNode | None = None, + ) -> None: + """ + Initializes a KDNode with the given point and child nodes. + + Args: + point (list[float]): The point stored in this node. + left (Optional[KDNode]): The left child node. + right (Optional[KDNode]): The right child node. + """ + self.point = point + self.left = left + self.right = right diff --git a/data_structures/kd_tree/nearest_neighbour_search.py b/data_structures/kd_tree/nearest_neighbour_search.py new file mode 100644 index 000000000..d9727736f --- /dev/null +++ b/data_structures/kd_tree/nearest_neighbour_search.py @@ -0,0 +1,71 @@ +from data_structures.kd_tree.kd_node import KDNode + + +def nearest_neighbour_search( + root: KDNode | None, query_point: list[float] +) -> tuple[list[float] | None, float, int]: + """ + Performs a nearest neighbor search in a KD-Tree for a given query point. + + Args: + root (KDNode | None): The root node of the KD-Tree. + query_point (list[float]): The point for which the nearest neighbor + is being searched. + + Returns: + tuple[list[float] | None, float, int]: + - The nearest point found in the KD-Tree to the query point, + or None if no point is found. + - The squared distance to the nearest point. + - The number of nodes visited during the search. + """ + nearest_point: list[float] | None = None + nearest_dist: float = float("inf") + nodes_visited: int = 0 + + def search(node: KDNode | None, depth: int = 0) -> None: + """ + Recursively searches for the nearest neighbor in the KD-Tree. + + Args: + node: The current node in the KD-Tree. + depth: The current depth in the KD-Tree. + """ + nonlocal nearest_point, nearest_dist, nodes_visited + if node is None: + return + + nodes_visited += 1 + + # Calculate the current distance (squared distance) + current_point = node.point + current_dist = sum( + (query_coord - point_coord) ** 2 + for query_coord, point_coord in zip(query_point, current_point) + ) + + # Update nearest point if the current node is closer + if nearest_point is None or current_dist < nearest_dist: + nearest_point = current_point + nearest_dist = current_dist + + # Determine which subtree to search first (based on axis and query point) + k = len(query_point) # Dimensionality of points + axis = depth % k + + if query_point[axis] <= current_point[axis]: + nearer_subtree = node.left + further_subtree = node.right + else: + nearer_subtree = node.right + further_subtree = node.left + + # Search the nearer subtree first + search(nearer_subtree, depth + 1) + + # If the further subtree has a closer point + if (query_point[axis] - current_point[axis]) ** 2 < nearest_dist: + search(further_subtree, depth + 1) + + search(root, 0) + return nearest_point, nearest_dist, nodes_visited diff --git a/data_structures/kd_tree/tests/__init__.py b/data_structures/kd_tree/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data_structures/kd_tree/tests/test_kdtree.py b/data_structures/kd_tree/tests/test_kdtree.py new file mode 100644 index 000000000..81f2cc990 --- /dev/null +++ b/data_structures/kd_tree/tests/test_kdtree.py @@ -0,0 +1,100 @@ +import numpy as np +import pytest + +from data_structures.kd_tree.build_kdtree import build_kdtree +from data_structures.kd_tree.example.hypercube_points import hypercube_points +from data_structures.kd_tree.kd_node import KDNode +from data_structures.kd_tree.nearest_neighbour_search import nearest_neighbour_search + + +@pytest.mark.parametrize( + ("num_points", "cube_size", "num_dimensions", "depth", "expected_result"), + [ + (0, 10.0, 2, 0, None), # Empty points list + (10, 10.0, 2, 2, KDNode), # Depth = 2, 2D points + (10, 10.0, 3, -2, KDNode), # Depth = -2, 3D points + ], +) +def test_build_kdtree(num_points, cube_size, num_dimensions, depth, expected_result): + """ + Test that KD-Tree is built correctly. + + Cases: + - Empty points list. + - Positive depth value. + - Negative depth value. + """ + points = ( + hypercube_points(num_points, cube_size, num_dimensions).tolist() + if num_points > 0 + else [] + ) + + kdtree = build_kdtree(points, depth=depth) + + if expected_result is None: + # Empty points list case + assert kdtree is None, f"Expected None for empty points list, got {kdtree}" + else: + # Check if root node is not None + assert kdtree is not None, "Expected a KDNode, got None" + + # Check if root has correct dimensions + assert ( + len(kdtree.point) == num_dimensions + ), f"Expected point dimension {num_dimensions}, got {len(kdtree.point)}" + + # Check that the tree is balanced to some extent (simplistic check) + assert isinstance( + kdtree, KDNode + ), f"Expected KDNode instance, got {type(kdtree)}" + + +def test_nearest_neighbour_search(): + """ + Test the nearest neighbor search function. + """ + num_points = 10 + cube_size = 10.0 + num_dimensions = 2 + points = hypercube_points(num_points, cube_size, num_dimensions) + kdtree = build_kdtree(points.tolist()) + + rng = np.random.default_rng() + query_point = rng.random(num_dimensions).tolist() + + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + kdtree, query_point + ) + + # Check that nearest point is not None + assert nearest_point is not None + + # Check that distance is a non-negative number + assert nearest_dist >= 0 + + # Check that nodes visited is a non-negative integer + assert nodes_visited >= 0 + + +def test_edge_cases(): + """ + Test edge cases such as an empty KD-Tree. + """ + empty_kdtree = build_kdtree([]) + query_point = [0.0] * 2 # Using a default 2D query point + + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + empty_kdtree, query_point + ) + + # With an empty KD-Tree, nearest_point should be None + assert nearest_point is None + assert nearest_dist == float("inf") + assert nodes_visited == 0 + + +if __name__ == "__main__": + import pytest + + pytest.main() From 729c1f923bb621ed246983a5d3309135c3b1fc8c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 22:15:17 +0200 Subject: [PATCH 068/104] [pre-commit.ci] pre-commit autoupdate (#11557) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.3 → v0.6.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.3...v0.6.4) - [github.com/tox-dev/pyproject-fmt: 2.2.1 → 2.2.3](https://github.com/tox-dev/pyproject-fmt/compare/2.2.1...2.2.3) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e36319749..ff76e87a3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.3 + rev: v0.6.4 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.1" + rev: "2.2.3" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 1ca537b99..e965d3b32 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -243,6 +243,15 @@ * [Min Heap](data_structures/heap/min_heap.py) * [Randomized Heap](data_structures/heap/randomized_heap.py) * [Skew Heap](data_structures/heap/skew_heap.py) + * Kd Tree + * [Build Kdtree](data_structures/kd_tree/build_kdtree.py) + * Example + * [Example Usage](data_structures/kd_tree/example/example_usage.py) + * [Hypercube Points](data_structures/kd_tree/example/hypercube_points.py) + * [Kd Node](data_structures/kd_tree/kd_node.py) + * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) + * Tests + * [Test Kdtree](data_structures/kd_tree/tests/test_kdtree.py) * Linked List * [Circular Linked List](data_structures/linked_list/circular_linked_list.py) * [Deque Doubly](data_structures/linked_list/deque_doubly.py) @@ -285,12 +294,6 @@ * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) - * KD Tree - * [KD Tree Node](data_structures/kd_tree/kd_node.py) - * [Build KD Tree](data_structures/kd_tree/build_kdtree.py) - * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) - * [Hypercibe Points](data_structures/kd_tree/example/hypercube_points.py) - * [Example Usage](data_structures/kd_tree/example/example_usage.py) ## Digital Image Processing * [Change Brightness](digital_image_processing/change_brightness.py) From 77bbe584216c0925e249e0baab77fef34561ecaa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:14:55 +0200 Subject: [PATCH 069/104] [pre-commit.ci] pre-commit autoupdate (#11568) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.4 → v0.6.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.4...v0.6.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff76e87a3..a4a456865 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.4 + rev: v0.6.5 hooks: - id: ruff - id: ruff-format From 50cc00bb2da26fd234dabdfa7f93c96d6b7d72d5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:45:14 +0200 Subject: [PATCH 070/104] [pre-commit.ci] pre-commit autoupdate (#11579) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.5 → v0.6.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.5...v0.6.7) - [github.com/tox-dev/pyproject-fmt: 2.2.3 → 2.2.4](https://github.com/tox-dev/pyproject-fmt/compare/2.2.3...2.2.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a4a456865..7b219597f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.5 + rev: v0.6.7 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.3" + rev: "2.2.4" hooks: - id: pyproject-fmt From 9b5641d2d333d04eb474ecbcb15c40ccf18a3d7b Mon Sep 17 00:00:00 2001 From: apples53 Date: Tue, 24 Sep 2024 13:00:36 +0530 Subject: [PATCH 071/104] balance parenthesis (add closing bracket) (#11563) * balance parenthesis (add closing bracket) * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- fuzzy_logic/fuzzy_operations.py.DISABLED.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fuzzy_logic/fuzzy_operations.py.DISABLED.txt b/fuzzy_logic/fuzzy_operations.py.DISABLED.txt index 0786ef8b0..67fd587f4 100644 --- a/fuzzy_logic/fuzzy_operations.py.DISABLED.txt +++ b/fuzzy_logic/fuzzy_operations.py.DISABLED.txt @@ -28,7 +28,7 @@ if __name__ == "__main__": union = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) intersection = fuzz.fuzzy_and(X, young, X, middle_aged)[1] - # 3. Complement (A) = (1- min(µA(x)) + # 3. Complement (A) = (1 - min(µA(x))) complement_a = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) difference = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] From 976e385c1d9df92c075575125475b22c423205b9 Mon Sep 17 00:00:00 2001 From: Ramy Date: Sat, 28 Sep 2024 15:37:00 +0200 Subject: [PATCH 072/104] Implemented Suffix Tree Data Structure (#11554) * Implemented KD-Tree Data Structure * Implemented KD-Tree Data Structure. updated DIRECTORY.md. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/example_usage.py * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/hypercube_points.py * added typehints and docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * docstring for search() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added tests. Updated docstrings/typehints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated tests and used | for type annotations * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * E501 for build_kdtree.py, hypercube_points.py, nearest_neighbour_search.py * I001 for example_usage.py and test_kdtree.py * I001 for example_usage.py and test_kdtree.py * Update data_structures/kd_tree/build_kdtree.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Added new test cases requested in Review. Refactored the test_build_kdtree() to include various checks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considered ruff errors * Considered ruff errors * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update kd_node.py * imported annotations from __future__ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Implementation of the suffix tree data structure * Adding data to DIRECTORY.md * Minor file renaming * minor correction * renaming in DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-1 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-3 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-5 * Implemented Suffix Tree Data Structure. Added some comments to my files in #11532, #11554. * updating DIRECTORY.md * Implemented Suffix Tree Data Structure. Added some comments to my files in #11532, #11554. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Ramy-Badr-Ahmed --- DIRECTORY.md | 7 ++ data_structures/kd_tree/build_kdtree.py | 8 +++ .../kd_tree/example/example_usage.py | 8 +++ .../kd_tree/example/hypercube_points.py | 8 +++ data_structures/kd_tree/kd_node.py | 8 +++ .../kd_tree/nearest_neighbour_search.py | 8 +++ data_structures/kd_tree/tests/test_kdtree.py | 8 +++ data_structures/suffix_tree/__init__.py | 0 .../suffix_tree/example/__init__.py | 0 .../suffix_tree/example/example_usage.py | 37 +++++++++++ data_structures/suffix_tree/suffix_tree.py | 66 +++++++++++++++++++ .../suffix_tree/suffix_tree_node.py | 36 ++++++++++ data_structures/suffix_tree/tests/__init__.py | 0 .../suffix_tree/tests/test_suffix_tree.py | 59 +++++++++++++++++ 14 files changed, 253 insertions(+) create mode 100644 data_structures/suffix_tree/__init__.py create mode 100644 data_structures/suffix_tree/example/__init__.py create mode 100644 data_structures/suffix_tree/example/example_usage.py create mode 100644 data_structures/suffix_tree/suffix_tree.py create mode 100644 data_structures/suffix_tree/suffix_tree_node.py create mode 100644 data_structures/suffix_tree/tests/__init__.py create mode 100644 data_structures/suffix_tree/tests/test_suffix_tree.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e965d3b32..955001e2a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -291,6 +291,13 @@ * [Stack With Doubly Linked List](data_structures/stacks/stack_with_doubly_linked_list.py) * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py) * [Stock Span Problem](data_structures/stacks/stock_span_problem.py) + * Suffix Tree + * Example + * [Example Usage](data_structures/suffix_tree/example/example_usage.py) + * [Suffix Tree](data_structures/suffix_tree/suffix_tree.py) + * [Suffix Tree Node](data_structures/suffix_tree/suffix_tree_node.py) + * Tests + * [Test Suffix Tree](data_structures/suffix_tree/tests/test_suffix_tree.py) * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) diff --git a/data_structures/kd_tree/build_kdtree.py b/data_structures/kd_tree/build_kdtree.py index c5b800a2c..074a5dac4 100644 --- a/data_structures/kd_tree/build_kdtree.py +++ b/data_structures/kd_tree/build_kdtree.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from data_structures.kd_tree.kd_node import KDNode diff --git a/data_structures/kd_tree/example/example_usage.py b/data_structures/kd_tree/example/example_usage.py index e270f0cdd..892c3b8c4 100644 --- a/data_structures/kd_tree/example/example_usage.py +++ b/data_structures/kd_tree/example/example_usage.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np from data_structures.kd_tree.build_kdtree import build_kdtree diff --git a/data_structures/kd_tree/example/hypercube_points.py b/data_structures/kd_tree/example/hypercube_points.py index 2d8800ac9..66744856e 100644 --- a/data_structures/kd_tree/example/hypercube_points.py +++ b/data_structures/kd_tree/example/hypercube_points.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np diff --git a/data_structures/kd_tree/kd_node.py b/data_structures/kd_tree/kd_node.py index e10110279..5a22ef609 100644 --- a/data_structures/kd_tree/kd_node.py +++ b/data_structures/kd_tree/kd_node.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from __future__ import annotations diff --git a/data_structures/kd_tree/nearest_neighbour_search.py b/data_structures/kd_tree/nearest_neighbour_search.py index d9727736f..8104944c0 100644 --- a/data_structures/kd_tree/nearest_neighbour_search.py +++ b/data_structures/kd_tree/nearest_neighbour_search.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from data_structures.kd_tree.kd_node import KDNode diff --git a/data_structures/kd_tree/tests/test_kdtree.py b/data_structures/kd_tree/tests/test_kdtree.py index 81f2cc990..dce5e4f34 100644 --- a/data_structures/kd_tree/tests/test_kdtree.py +++ b/data_structures/kd_tree/tests/test_kdtree.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np import pytest diff --git a/data_structures/suffix_tree/__init__.py b/data_structures/suffix_tree/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data_structures/suffix_tree/example/__init__.py b/data_structures/suffix_tree/example/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data_structures/suffix_tree/example/example_usage.py b/data_structures/suffix_tree/example/example_usage.py new file mode 100644 index 000000000..724ac57e8 --- /dev/null +++ b/data_structures/suffix_tree/example/example_usage.py @@ -0,0 +1,37 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from data_structures.suffix_tree.suffix_tree import SuffixTree + + +def main() -> None: + """ + Demonstrate the usage of the SuffixTree class. + + - Initializes a SuffixTree with a predefined text. + - Defines a list of patterns to search for within the suffix tree. + - Searches for each pattern in the suffix tree. + + Patterns tested: + - "ana" (found) --> True + - "ban" (found) --> True + - "na" (found) --> True + - "xyz" (not found) --> False + - "mon" (found) --> True + """ + text = "monkey banana" + suffix_tree = SuffixTree(text) + + patterns = ["ana", "ban", "na", "xyz", "mon"] + for pattern in patterns: + found = suffix_tree.search(pattern) + print(f"Pattern '{pattern}' found: {found}") + + +if __name__ == "__main__": + main() diff --git a/data_structures/suffix_tree/suffix_tree.py b/data_structures/suffix_tree/suffix_tree.py new file mode 100644 index 000000000..ad54fb0ba --- /dev/null +++ b/data_structures/suffix_tree/suffix_tree.py @@ -0,0 +1,66 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from data_structures.suffix_tree.suffix_tree_node import SuffixTreeNode + + +class SuffixTree: + def __init__(self, text: str) -> None: + """ + Initializes the suffix tree with the given text. + + Args: + text (str): The text for which the suffix tree is to be built. + """ + self.text: str = text + self.root: SuffixTreeNode = SuffixTreeNode() + self.build_suffix_tree() + + def build_suffix_tree(self) -> None: + """ + Builds the suffix tree for the given text by adding all suffixes. + """ + text = self.text + n = len(text) + for i in range(n): + suffix = text[i:] + self._add_suffix(suffix, i) + + def _add_suffix(self, suffix: str, index: int) -> None: + """ + Adds a suffix to the suffix tree. + + Args: + suffix (str): The suffix to add. + index (int): The starting index of the suffix in the original text. + """ + node = self.root + for char in suffix: + if char not in node.children: + node.children[char] = SuffixTreeNode() + node = node.children[char] + node.is_end_of_string = True + node.start = index + node.end = index + len(suffix) - 1 + + def search(self, pattern: str) -> bool: + """ + Searches for a pattern in the suffix tree. + + Args: + pattern (str): The pattern to search for. + + Returns: + bool: True if the pattern is found, False otherwise. + """ + node = self.root + for char in pattern: + if char not in node.children: + return False + node = node.children[char] + return True diff --git a/data_structures/suffix_tree/suffix_tree_node.py b/data_structures/suffix_tree/suffix_tree_node.py new file mode 100644 index 000000000..e5b628645 --- /dev/null +++ b/data_structures/suffix_tree/suffix_tree_node.py @@ -0,0 +1,36 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from __future__ import annotations + + +class SuffixTreeNode: + def __init__( + self, + children: dict[str, SuffixTreeNode] | None = None, + is_end_of_string: bool = False, + start: int | None = None, + end: int | None = None, + suffix_link: SuffixTreeNode | None = None, + ) -> None: + """ + Initializes a suffix tree node. + + Parameters: + children (dict[str, SuffixTreeNode] | None): The children of this node. + is_end_of_string (bool): Indicates if this node represents + the end of a string. + start (int | None): The start index of the suffix in the text. + end (int | None): The end index of the suffix in the text. + suffix_link (SuffixTreeNode | None): Link to another suffix tree node. + """ + self.children = children or {} + self.is_end_of_string = is_end_of_string + self.start = start + self.end = end + self.suffix_link = suffix_link diff --git a/data_structures/suffix_tree/tests/__init__.py b/data_structures/suffix_tree/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data_structures/suffix_tree/tests/test_suffix_tree.py b/data_structures/suffix_tree/tests/test_suffix_tree.py new file mode 100644 index 000000000..45c6790ac --- /dev/null +++ b/data_structures/suffix_tree/tests/test_suffix_tree.py @@ -0,0 +1,59 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +import unittest + +from data_structures.suffix_tree.suffix_tree import SuffixTree + + +class TestSuffixTree(unittest.TestCase): + def setUp(self) -> None: + """Set up the initial conditions for each test.""" + self.text = "banana" + self.suffix_tree = SuffixTree(self.text) + + def test_search_existing_patterns(self) -> None: + """Test searching for patterns that exist in the suffix tree.""" + patterns = ["ana", "ban", "na"] + for pattern in patterns: + with self.subTest(pattern=pattern): + assert self.suffix_tree.search( + pattern + ), f"Pattern '{pattern}' should be found." + + def test_search_non_existing_patterns(self) -> None: + """Test searching for patterns that do not exist in the suffix tree.""" + patterns = ["xyz", "apple", "cat"] + for pattern in patterns: + with self.subTest(pattern=pattern): + assert not self.suffix_tree.search( + pattern + ), f"Pattern '{pattern}' should not be found." + + def test_search_empty_pattern(self) -> None: + """Test searching for an empty pattern.""" + assert self.suffix_tree.search(""), "An empty pattern should be found." + + def test_search_full_text(self) -> None: + """Test searching for the full text.""" + assert self.suffix_tree.search( + self.text + ), "The full text should be found in the suffix tree." + + def test_search_substrings(self) -> None: + """Test searching for substrings of the full text.""" + substrings = ["ban", "ana", "a", "na"] + for substring in substrings: + with self.subTest(substring=substring): + assert self.suffix_tree.search( + substring + ), f"Substring '{substring}' should be found." + + +if __name__ == "__main__": + unittest.main() From a9ca110d6b6e4921119fdcca3b2a01e7f649f1ed Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 30 Sep 2024 12:49:31 +0200 Subject: [PATCH 073/104] Scripts for closing pull requests for Hacktoberfest (#11587) * Scripts for closing pull requests for Hacktoberfest * --limit=500 * Lose 2024 --- ...ose_pull_requests_with_awaiting_changes.sh | 22 +++++++++++++++++++ .../close_pull_requests_with_failing_tests.sh | 22 +++++++++++++++++++ ...requests_with_require_descriptive_names.sh | 21 ++++++++++++++++++ .../close_pull_requests_with_require_tests.sh | 22 +++++++++++++++++++ ...e_pull_requests_with_require_type_hints.sh | 21 ++++++++++++++++++ 5 files changed, 108 insertions(+) create mode 100755 scripts/close_pull_requests_with_awaiting_changes.sh create mode 100755 scripts/close_pull_requests_with_failing_tests.sh create mode 100755 scripts/close_pull_requests_with_require_descriptive_names.sh create mode 100755 scripts/close_pull_requests_with_require_tests.sh create mode 100755 scripts/close_pull_requests_with_require_type_hints.sh diff --git a/scripts/close_pull_requests_with_awaiting_changes.sh b/scripts/close_pull_requests_with_awaiting_changes.sh new file mode 100755 index 000000000..55e19c980 --- /dev/null +++ b/scripts/close_pull_requests_with_awaiting_changes.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "awaiting changes" label is present + awaiting_changes=$(echo "$pr_labels" | jq -r '.[] | select(.name == "awaiting changes")') + echo "Checking PR #$pr_number $pr_title ($awaiting_changes) ($pr_labels)" + + # If awaiting_changes, close the pull request + if [[ -n "$awaiting_changes" ]]; then + echo "Closing PR #$pr_number $pr_title due to awaiting_changes label" + gh pr close "$pr_number" --comment "Closing awaiting_changes PRs to prepare for Hacktoberfest" + sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_failing_tests.sh b/scripts/close_pull_requests_with_failing_tests.sh new file mode 100755 index 000000000..3ec5960ae --- /dev/null +++ b/scripts/close_pull_requests_with_failing_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "tests are failing" label is present + tests_are_failing=$(echo "$pr_labels" | jq -r '.[] | select(.name == "tests are failing")') + echo "Checking PR #$pr_number $pr_title ($tests_are_failing) ($pr_labels)" + + # If there are failing tests, close the pull request + if [[ -n "$tests_are_failing" ]]; then + echo "Closing PR #$pr_number $pr_title due to tests_are_failing label" + gh pr close "$pr_number" --comment "Closing tests_are_failing PRs to prepare for Hacktoberfest" + sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_require_descriptive_names.sh b/scripts/close_pull_requests_with_require_descriptive_names.sh new file mode 100755 index 000000000..0fc3cec1d --- /dev/null +++ b/scripts/close_pull_requests_with_require_descriptive_names.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require descriptive names" label is present + require_descriptive_names=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require descriptive names")') + echo "Checking PR #$pr_number $pr_title ($require_descriptive_names) ($pr_labels)" + + # If there are require_descriptive_names, close the pull request + if [[ -n "$require_descriptive_names" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_descriptive_names label" + gh pr close "$pr_number" --comment "Closing require_descriptive_names PRs to prepare for Hacktoberfest" + fi +done diff --git a/scripts/close_pull_requests_with_require_tests.sh b/scripts/close_pull_requests_with_require_tests.sh new file mode 100755 index 000000000..89a54996b --- /dev/null +++ b/scripts/close_pull_requests_with_require_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require_tests" label is present + require_tests=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require tests")') + echo "Checking PR #$pr_number $pr_title ($require_tests) ($pr_labels)" + + # If there require tests, close the pull request + if [[ -n "$require_tests" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_tests label" + gh pr close "$pr_number" --comment "Closing require_tests PRs to prepare for Hacktoberfest" + # sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_require_type_hints.sh b/scripts/close_pull_requests_with_require_type_hints.sh new file mode 100755 index 000000000..df5d88289 --- /dev/null +++ b/scripts/close_pull_requests_with_require_type_hints.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require type hints" label is present + require_type_hints=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require type hints")') + echo "Checking PR #$pr_number $pr_title ($require_type_hints) ($pr_labels)" + + # If require_type_hints, close the pull request + if [[ -n "$require_type_hints" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_type_hints label" + gh pr close "$pr_number" --comment "Closing require_type_hints PRs to prepare for Hacktoberfest" + fi +done From a7bfa224554f277ed68be9e4ef3f6d1cd89008af Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 22:16:17 +0200 Subject: [PATCH 074/104] [pre-commit.ci] pre-commit autoupdate (#11594) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.7 → v0.6.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.7...v0.6.8) - [github.com/abravalheri/validate-pyproject: v0.19 → v0.20.2](https://github.com/abravalheri/validate-pyproject/compare/v0.19...v0.20.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7b219597f..8a8e5c1f6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.7 + rev: v0.6.8 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.19 + rev: v0.20.2 hooks: - id: validate-pyproject From 0177ae1cd596f4f3c0ee7490666d74504deb0298 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 30 Sep 2024 23:01:15 +0200 Subject: [PATCH 075/104] Upgrade to Python 3.13 (#11588) --- .github/workflows/build.yml | 6 ++- DIRECTORY.md | 1 - computer_vision/haralick_descriptors.py | 8 ++-- data_structures/heap/binomial_heap.py | 6 +-- electronics/circular_convolution.py | 6 +-- fractals/julia_sets.py | 18 ++++----- graphics/bezier_curve.py | 8 ++-- graphs/dijkstra_binary_grid.py | 2 +- linear_algebra/src/power_iteration.py | 2 +- linear_programming/simplex.py | 32 +++++++-------- machine_learning/decision_tree.py | 8 ++-- machine_learning/forecasting/run.py | 8 ++-- machine_learning/k_nearest_neighbours.py | 2 +- machine_learning/logistic_regression.py | 4 +- machine_learning/loss_functions.py | 40 +++++++++---------- machine_learning/mfcc.py | 13 +++--- .../multilayer_perceptron_classifier.py | 2 +- machine_learning/scoring_functions.py | 22 +++++----- machine_learning/similarity_search.py | 2 +- machine_learning/support_vector_machines.py | 6 +-- maths/euclidean_distance.py | 8 ++-- maths/euler_method.py | 2 +- maths/euler_modified.py | 4 +- maths/gaussian.py | 16 ++++---- maths/minkowski_distance.py | 2 +- maths/numerical_analysis/adams_bashforth.py | 8 ++-- maths/numerical_analysis/runge_kutta.py | 2 +- .../runge_kutta_fehlberg_45.py | 4 +- maths/numerical_analysis/runge_kutta_gills.py | 2 +- maths/softmax.py | 2 +- .../two_hidden_layers_neural_network.py | 6 +-- other/bankers_algorithm.py | 8 ++-- physics/in_static_equilibrium.py | 2 +- requirements.txt | 4 +- ..._tweets.py => get_user_tweets.py.DISABLED} | 0 35 files changed, 135 insertions(+), 131 deletions(-) rename web_programming/{get_user_tweets.py => get_user_tweets.py.DISABLED} (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a113b4608..dad2b2fac 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: 3.12 + python-version: 3.13 allow-prereleases: true - uses: actions/cache@v4 with: @@ -26,6 +26,10 @@ jobs: # TODO: #8818 Re-enable quantum tests run: pytest --ignore=quantum/q_fourier_transform.py + --ignore=computer_vision/cnn_classification.py + --ignore=dynamic_programming/k_means_clustering_tensorflow.py + --ignore=machine_learning/lstm/lstm_prediction.py + --ignore=neural_network/input_data.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered diff --git a/DIRECTORY.md b/DIRECTORY.md index 955001e2a..56ab8377f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1343,7 +1343,6 @@ * [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) - * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) * [Instagram Crawler](web_programming/instagram_crawler.py) * [Instagram Pic](web_programming/instagram_pic.py) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 634f04957..54632160d 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -19,7 +19,7 @@ def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float >>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2])) 3.1622776601683795 """ - return np.sqrt(((original - reference) ** 2).mean()) + return float(np.sqrt(((original - reference) ** 2).mean())) def normalize_image( @@ -273,7 +273,7 @@ def haralick_descriptors(matrix: np.ndarray) -> list[float]: >>> morphological = opening_filter(binary) >>> mask_1 = binary_mask(gray, morphological)[0] >>> concurrency = matrix_concurrency(mask_1, (0, 1)) - >>> haralick_descriptors(concurrency) + >>> [float(f) for f in haralick_descriptors(concurrency)] [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] """ # Function np.indices could be used for bigger input types, @@ -335,7 +335,7 @@ def get_descriptors( return np.concatenate(descriptors, axis=None) -def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: +def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float: """ Simple method for calculating the euclidean distance between two points, with type np.ndarray. @@ -346,7 +346,7 @@ def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: >>> euclidean(a, b) 3.3166247903554 """ - return np.sqrt(np.sum(np.square(point_1 - point_2))) + return float(np.sqrt(np.sum(np.square(point_1 - point_2)))) def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]: diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 099bd2871..9cfdf0c12 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -73,7 +73,7 @@ class BinomialHeap: 30 Deleting - delete() test - >>> [first_heap.delete_min() for _ in range(20)] + >>> [int(first_heap.delete_min()) for _ in range(20)] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Create a new Heap @@ -118,7 +118,7 @@ class BinomialHeap: values in merged heap; (merge is inplace) >>> results = [] >>> while not first_heap.is_empty(): - ... results.append(first_heap.delete_min()) + ... results.append(int(first_heap.delete_min())) >>> results [17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34] """ @@ -354,7 +354,7 @@ class BinomialHeap: # Merge heaps self.merge_heaps(new_heap) - return min_value + return int(min_value) def pre_order(self): """ diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py index 768f2ad94..d06e76be7 100644 --- a/electronics/circular_convolution.py +++ b/electronics/circular_convolution.py @@ -39,7 +39,7 @@ class CircularConvolution: Usage: >>> convolution = CircularConvolution() >>> convolution.circular_convolution() - [10, 10, 6, 14] + [10.0, 10.0, 6.0, 14.0] >>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6] >>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5] @@ -54,7 +54,7 @@ class CircularConvolution: >>> convolution.first_signal = [1, -1, 2, 3, -1] >>> convolution.second_signal = [1, 2, 3] >>> convolution.circular_convolution() - [8, -2, 3, 4, 11] + [8.0, -2.0, 3.0, 4.0, 11.0] """ @@ -91,7 +91,7 @@ class CircularConvolution: final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) # rounding-off to two decimal places - return [round(i, 2) for i in final_signal] + return [float(round(i, 2)) for i in final_signal] if __name__ == "__main__": diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 1eef4573b..bea599d44 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -40,11 +40,11 @@ nb_pixels = 666 def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. - >>> eval_exponential(0, 0) + >>> float(eval_exponential(0, 0)) 1.0 - >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 + >>> bool(abs(eval_exponential(1, np.pi*1.j)) < 1e-15) True - >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 + >>> bool(abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15) True """ return np.exp(z_values) + c_parameter @@ -98,20 +98,20 @@ def iterate_function( >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape (3,) - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0])) 0j - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1])) (1+0j) - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2])) (256+0j) """ diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 9d906f179..6c7dcd4f0 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -30,9 +30,9 @@ class BezierCurve: returns the x, y values of basis function at time t >>> curve = BezierCurve([(1,1), (1,2)]) - >>> curve.basis_function(0) + >>> [float(x) for x in curve.basis_function(0)] [1.0, 0.0] - >>> curve.basis_function(1) + >>> [float(x) for x in curve.basis_function(1)] [0.0, 1.0] """ assert 0 <= t <= 1, "Time t must be between 0 and 1." @@ -55,9 +55,9 @@ class BezierCurve: The last point in the curve is when t = 1. >>> curve = BezierCurve([(1,1), (1,2)]) - >>> curve.bezier_curve_function(0) + >>> tuple(float(x) for x in curve.bezier_curve_function(0)) (1.0, 1.0) - >>> curve.bezier_curve_function(1) + >>> tuple(float(x) for x in curve.bezier_curve_function(1)) (1.0, 2.0) """ diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py index c23d82343..06293a87d 100644 --- a/graphs/dijkstra_binary_grid.py +++ b/graphs/dijkstra_binary_grid.py @@ -69,7 +69,7 @@ def dijkstra( x, y = predecessors[x, y] path.append(source) # add the source manually path.reverse() - return matrix[destination], path + return float(matrix[destination]), path for i in range(len(dx)): nx, ny = x + dx[i], y + dy[i] diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 24fbd9a5e..83c2ce48c 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -78,7 +78,7 @@ def power_iteration( if is_complex: lambda_ = np.real(lambda_) - return lambda_, vector + return float(lambda_), vector def test_power_iteration() -> None: diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py index dc171bacd..a8affe1b7 100644 --- a/linear_programming/simplex.py +++ b/linear_programming/simplex.py @@ -107,8 +107,8 @@ class Tableau: def find_pivot(self) -> tuple[Any, Any]: """Finds the pivot row and column. - >>> Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), - ... 2, 0).find_pivot() + >>> tuple(int(x) for x in Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], + ... [1,2,0,1,7.]]), 2, 0).find_pivot()) (1, 0) """ objective = self.objectives[-1] @@ -215,8 +215,8 @@ class Tableau: Max: x1 + x2 ST: x1 + 3x2 <= 4 3x1 + x2 <= 4 - >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), - ... 2, 0).run_simplex() + >>> {key: float(value) for key, value in Tableau(np.array([[-1,-1,0,0,0], + ... [1,3,1,0,4],[3,1,0,1,4.]]), 2, 0).run_simplex().items()} {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Standard linear program with 3 variables: @@ -224,21 +224,21 @@ class Tableau: ST: 2x1 + x2 + x3 ≤ 2 x1 + 2x2 + 3x3 ≤ 5 2x1 + 2x2 + x3 ≤ 6 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [-3,-1,-3,0,0,0,0], ... [2,1,1,1,0,0,2], ... [1,2,3,0,1,0,5], ... [2,2,1,0,0,1,6.] - ... ]),3,0).run_simplex() # doctest: +ELLIPSIS + ... ]),3,0).run_simplex().items()} # doctest: +ELLIPSIS {'P': 5.4, 'x1': 0.199..., 'x3': 1.6} # Optimal tableau input: - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [0, 0, 0.25, 0.25, 2], ... [0, 1, 0.375, -0.125, 1], ... [1, 0, -0.125, 0.375, 1] - ... ]), 2, 0).run_simplex() + ... ]), 2, 0).run_simplex().items()} {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Non-standard: >= constraints @@ -246,25 +246,25 @@ class Tableau: ST: x1 + x2 + x3 <= 40 2x1 + x2 - x3 >= 10 - x2 + x3 >= 10 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [2, 0, 0, 0, -1, -1, 0, 0, 20], ... [-2, -3, -1, 0, 0, 0, 0, 0, 0], ... [1, 1, 1, 1, 0, 0, 0, 0, 40], ... [2, 1, -1, 0, -1, 0, 1, 0, 10], ... [0, -1, 1, 0, 0, -1, 0, 1, 10.] - ... ]), 3, 2).run_simplex() + ... ]), 3, 2).run_simplex().items()} {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} # Non standard: minimisation and equalities Min: x1 + x2 ST: 2x1 + x2 = 12 6x1 + 5x2 = 40 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [8, 6, 0, 0, 52], ... [1, 1, 0, 0, 0], ... [2, 1, 1, 0, 12], ... [6, 5, 0, 1, 40.], - ... ]), 2, 2).run_simplex() + ... ]), 2, 2).run_simplex().items()} {'P': 7.0, 'x1': 5.0, 'x2': 2.0} @@ -275,7 +275,7 @@ class Tableau: 2x1 + 4x2 <= 48 x1 + x2 >= 10 x1 >= 2 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0], ... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0], ... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0], @@ -283,7 +283,7 @@ class Tableau: ... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0], ... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0], ... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0] - ... ]), 2, 2).run_simplex() # doctest: +ELLIPSIS + ... ]), 2, 2).run_simplex().items()} # doctest: +ELLIPSIS {'P': 132.0, 'x1': 12.000... 'x2': 5.999...} """ # Stop simplex algorithm from cycling. @@ -307,11 +307,11 @@ class Tableau: def interpret_tableau(self) -> dict[str, float]: """Given the final tableau, add the corresponding values of the basic decision variables to the `output_dict` - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [0,0,0.875,0.375,5], ... [0,1,0.375,-0.125,1], ... [1,0,-0.125,0.375,1] - ... ]),2, 0).interpret_tableau() + ... ]),2, 0).interpret_tableau().items()} {'P': 5.0, 'x1': 1.0, 'x2': 1.0} """ # P = RHS of final tableau diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index d0bd6ab0b..72970431c 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -26,15 +26,15 @@ class DecisionTree: >>> tester = DecisionTree() >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) >>> test_prediction = float(6) - >>> tester.mean_squared_error(test_labels, test_prediction) == ( + >>> bool(tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, - ... test_prediction)) + ... test_prediction))) True >>> test_labels = np.array([1,2,3]) >>> test_prediction = float(2) - >>> tester.mean_squared_error(test_labels, test_prediction) == ( + >>> bool(tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, - ... test_prediction)) + ... test_prediction))) True """ if labels.ndim != 1: diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index dbb86caf8..9d81b03cd 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -28,7 +28,7 @@ def linear_regression_prediction( input : training data (date, total_user, total_event) in list of float output : list of total user prediction in float >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2]) - >>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors + >>> bool(abs(n - 5.0) < 1e-6) # Checking precision because of floating point errors True """ x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]) @@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> ) model_fit = model.fit(disp=False, maxiter=600, method="nm") result = model_fit.predict(1, len(test_match), exog=[test_match]) - return result[0] + return float(result[0]) def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float: @@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1) regressor.fit(x_train, train_user) y_pred = regressor.predict(x_test) - return y_pred[0] + return float(y_pred[0]) def interquartile_range_checker(train_user: list) -> float: @@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float: q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) - return low_lim + return float(low_lim) def data_safety_checker(list_vote: list, actual_result: float) -> bool: diff --git a/machine_learning/k_nearest_neighbours.py b/machine_learning/k_nearest_neighbours.py index a43757c5c..fbc1b8bd2 100644 --- a/machine_learning/k_nearest_neighbours.py +++ b/machine_learning/k_nearest_neighbours.py @@ -42,7 +42,7 @@ class KNN: >>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11])) 10.0 """ - return np.linalg.norm(a - b) + return float(np.linalg.norm(a - b)) def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str: """ diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 090af5382..496026631 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray: @returns: returns value in the range 0 to 1 Examples: - >>> sigmoid_function(4) + >>> float(sigmoid_function(4)) 0.9820137900379085 >>> sigmoid_function(np.array([-3, 3])) array([0.04742587, 0.95257413]) @@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float: References: - https://en.wikipedia.org/wiki/Logistic_regression """ - return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() + return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()) def log_likelihood(x, y, weights): diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 150035661..0bd9aa8b5 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -22,7 +22,7 @@ def binary_cross_entropy( >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_cross_entropy(true_labels, predicted_probs) + >>> float(binary_cross_entropy(true_labels, predicted_probs)) 0.2529995012327421 >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -68,7 +68,7 @@ def binary_focal_cross_entropy( >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_focal_cross_entropy(true_labels, predicted_probs) + >>> float(binary_focal_cross_entropy(true_labels, predicted_probs)) 0.008257977659239775 >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -108,7 +108,7 @@ def categorical_cross_entropy( >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) - >>> categorical_cross_entropy(true_labels, pred_probs) + >>> float(categorical_cross_entropy(true_labels, pred_probs)) 0.567395975254385 >>> true_labels = np.array([[1, 0], [0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) @@ -179,13 +179,13 @@ def categorical_focal_cross_entropy( >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) >>> alpha = np.array([0.6, 0.2, 0.7]) - >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha)) 0.0025966118981496423 >>> true_labels = np.array([[0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> alpha = np.array([0.25, 0.25, 0.25]) - >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha)) 0.23315276982014324 >>> true_labels = np.array([[1, 0], [0, 1]]) @@ -265,7 +265,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_labels = np.array([-1, 1, 1, -1, 1]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(true_labels, pred) + >>> float(hinge_loss(true_labels, pred)) 1.52 >>> true_labels = np.array([-1, 1, 1, -1, 1, 1]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) @@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + >>> bool(np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)) True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) - >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + >>> bool(np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)) True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) @@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028) + >>> bool(np.isclose(mean_squared_error(true_values, predicted_values), 0.028)) True >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) + >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)) True >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) + >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)) False >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2]) @@ -420,7 +420,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> mean_squared_logarithmic_error(true_values, predicted_values) + >>> float(mean_squared_logarithmic_error(true_values, predicted_values)) 0.0030860877925181344 >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -459,17 +459,17 @@ def mean_absolute_percentage_error( Examples: >>> y_true = np.array([10, 20, 30, 40]) >>> y_pred = np.array([12, 18, 33, 45]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.13125 >>> y_true = np.array([1, 2, 3, 4]) >>> y_pred = np.array([2, 3, 4, 5]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.5208333333333333 >>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24]) >>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.064671076436071 """ if len(y_true) != len(y_pred): @@ -511,7 +511,7 @@ def perplexity_loss( ... [[0.03, 0.26, 0.21, 0.18, 0.30], ... [0.28, 0.10, 0.33, 0.15, 0.12]]] ... ) - >>> perplexity_loss(y_true, y_pred) + >>> float(perplexity_loss(y_true, y_pred)) 5.0247347775367945 >>> y_true = np.array([[1, 4], [2, 3]]) >>> y_pred = np.array( @@ -600,17 +600,17 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> >>> y_true = np.array([3, 5, 2, 7]) >>> y_pred = np.array([2.9, 4.8, 2.1, 7.2]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.012500000000000022 >>> y_true = np.array([2, 4, 6]) >>> y_pred = np.array([1, 5, 7]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.5 >>> y_true = np.array([1, 3, 5, 7]) >>> y_pred = np.array([1, 3, 5, 7]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.0 >>> y_true = np.array([1, 3, 5]) @@ -647,7 +647,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4]) - >>> kullback_leibler_divergence(true_labels, predicted_probs) + >>> float(kullback_leibler_divergence(true_labels, predicted_probs)) 0.030478754035472025 >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) diff --git a/machine_learning/mfcc.py b/machine_learning/mfcc.py index a1e99ce4a..dcc3151d5 100644 --- a/machine_learning/mfcc.py +++ b/machine_learning/mfcc.py @@ -162,9 +162,9 @@ def normalize(audio: np.ndarray) -> np.ndarray: Examples: >>> audio = np.array([1, 2, 3, 4, 5]) >>> normalized_audio = normalize(audio) - >>> np.max(normalized_audio) + >>> float(np.max(normalized_audio)) 1.0 - >>> np.min(normalized_audio) + >>> float(np.min(normalized_audio)) 0.2 """ # Divide the entire audio signal by the maximum absolute value @@ -229,7 +229,8 @@ def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarra Examples: >>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) >>> audio_fft = calculate_fft(audio_windowed, ftt_size=4) - >>> np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, -1.5-0.8660254j])) + >>> bool(np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, + ... -1.5-0.8660254j]))) True """ # Transpose the audio data to have time in rows and channels in columns @@ -281,7 +282,7 @@ def freq_to_mel(freq: float) -> float: The frequency in mel scale. Examples: - >>> round(freq_to_mel(1000), 2) + >>> float(round(freq_to_mel(1000), 2)) 999.99 """ # Use the formula to convert frequency to the mel scale @@ -321,7 +322,7 @@ def mel_spaced_filterbank( Mel-spaced filter bank. Examples: - >>> round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10) + >>> float(round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10)) 0.0004603981 """ freq_min = 0 @@ -438,7 +439,7 @@ def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarra The DCT basis matrix. Examples: - >>> round(discrete_cosine_transform(3, 5)[0][0], 5) + >>> float(round(discrete_cosine_transform(3, 5)[0][0], 5)) 0.44721 """ basis = np.empty((dct_filter_num, filter_num)) diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py index e99a4131e..40f998c7d 100644 --- a/machine_learning/multilayer_perceptron_classifier.py +++ b/machine_learning/multilayer_perceptron_classifier.py @@ -17,7 +17,7 @@ Y = clf.predict(test) def wrapper(y): """ - >>> wrapper(Y) + >>> [int(x) for x in wrapper(Y)] [0, 0, 1] """ return list(y) diff --git a/machine_learning/scoring_functions.py b/machine_learning/scoring_functions.py index 08b969a95..f6b685f4f 100644 --- a/machine_learning/scoring_functions.py +++ b/machine_learning/scoring_functions.py @@ -20,11 +20,11 @@ def mae(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mae(predict,actual),decimals = 2) + >>> float(np.around(mae(predict,actual),decimals = 2)) 0.67 >>> actual = [1,1,1];predict = [1,1,1] - >>> mae(predict,actual) + >>> float(mae(predict,actual)) 0.0 """ predict = np.array(predict) @@ -41,11 +41,11 @@ def mse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mse(predict,actual),decimals = 2) + >>> float(np.around(mse(predict,actual),decimals = 2)) 1.33 >>> actual = [1,1,1];predict = [1,1,1] - >>> mse(predict,actual) + >>> float(mse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -63,11 +63,11 @@ def rmse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(rmse(predict,actual),decimals = 2) + >>> float(np.around(rmse(predict,actual),decimals = 2)) 1.15 >>> actual = [1,1,1];predict = [1,1,1] - >>> rmse(predict,actual) + >>> float(rmse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -84,12 +84,10 @@ def rmse(predict, actual): def rmsle(predict, actual): """ Examples(rounded for precision): - >>> actual = [10,10,30];predict = [10,2,30] - >>> np.around(rmsle(predict,actual),decimals = 2) + >>> float(np.around(rmsle(predict=[10, 2, 30], actual=[10, 10, 30]), decimals=2)) 0.75 - >>> actual = [1,1,1];predict = [1,1,1] - >>> rmsle(predict,actual) + >>> float(rmsle(predict=[1, 1, 1], actual=[1, 1, 1])) 0.0 """ predict = np.array(predict) @@ -117,12 +115,12 @@ def mbd(predict, actual): Here the model overpredicts >>> actual = [1,2,3];predict = [2,3,4] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) 50.0 Here the model underpredicts >>> actual = [1,2,3];predict = [0,1,1] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) -66.67 """ predict = np.array(predict) diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 0bc3b17d7..c8a573796 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -153,7 +153,7 @@ def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float: >>> cosine_similarity(np.array([1, 2]), np.array([6, 32])) 0.9615239476408232 """ - return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) + return float(np.dot(input_a, input_b) / (norm(input_a) * norm(input_b))) if __name__ == "__main__": diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index 24046115e..d17c9044a 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -14,11 +14,11 @@ def norm_squared(vector: ndarray) -> float: Returns: float: squared second norm of vector - >>> norm_squared([1, 2]) + >>> int(norm_squared([1, 2])) 5 - >>> norm_squared(np.asarray([1, 2])) + >>> int(norm_squared(np.asarray([1, 2]))) 5 - >>> norm_squared([0, 0]) + >>> int(norm_squared([0, 0])) 0 """ return np.dot(vector, vector) diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 9b29b37b0..aa7f3efc7 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -13,13 +13,13 @@ def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: """ Calculate the distance between the two endpoints of two vectors. A vector is defined as a list, tuple, or numpy 1D array. - >>> euclidean_distance((0, 0), (2, 2)) + >>> float(euclidean_distance((0, 0), (2, 2))) 2.8284271247461903 - >>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])) + >>> float(euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2]))) 3.4641016151377544 - >>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])) + >>> float(euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]))) 8.0 - >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) + >>> float(euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8])) 8.0 """ return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) diff --git a/maths/euler_method.py b/maths/euler_method.py index 30f193e6d..c6adb07e2 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -26,7 +26,7 @@ def explicit_euler( ... return y >>> y0 = 1 >>> y = explicit_euler(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 144.77277243257308 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/euler_modified.py b/maths/euler_modified.py index d02123e1e..bb282e9f0 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -24,13 +24,13 @@ def euler_modified( >>> def f1(x, y): ... return -2*x*(y**2) >>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0) - >>> y[-1] + >>> float(y[-1]) 0.503338255442106 >>> import math >>> def f2(x, y): ... return -2*y + (x**3)*math.exp(-2*x) >>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3) - >>> y[-1] + >>> float(y[-1]) 0.5525976431951775 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/gaussian.py b/maths/gaussian.py index 0e02010a9..b1e62ea77 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -5,18 +5,18 @@ Reference: https://en.wikipedia.org/wiki/Gaussian_function from numpy import exp, pi, sqrt -def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: +def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> float: """ - >>> gaussian(1) + >>> float(gaussian(1)) 0.24197072451914337 - >>> gaussian(24) + >>> float(gaussian(24)) 3.342714441794458e-126 - >>> gaussian(1, 4, 2) + >>> float(gaussian(1, 4, 2)) 0.06475879783294587 - >>> gaussian(1, 5, 3) + >>> float(gaussian(1, 5, 3)) 0.05467002489199788 Supports NumPy Arrays @@ -29,7 +29,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27, 2.14638374e-32, 7.99882776e-38, 1.09660656e-43]) - >>> gaussian(15) + >>> float(gaussian(15)) 5.530709549844416e-50 >>> gaussian([1,2, 'string']) @@ -47,10 +47,10 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: ... OverflowError: (34, 'Result too large') - >>> gaussian(10**-326) + >>> float(gaussian(10**-326)) 0.3989422804014327 - >>> gaussian(2523, mu=234234, sigma=3425) + >>> float(gaussian(2523, mu=234234, sigma=3425)) 0.0 """ return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2)) diff --git a/maths/minkowski_distance.py b/maths/minkowski_distance.py index 3237124e8..99f02e31e 100644 --- a/maths/minkowski_distance.py +++ b/maths/minkowski_distance.py @@ -19,7 +19,7 @@ def minkowski_distance( >>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2) 8.0 >>> import numpy as np - >>> np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)) + >>> bool(np.isclose(5.0, minkowski_distance([5.0], [0.0], 3))) True >>> minkowski_distance([1.0], [2.0], -1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py index fb4061710..26244a585 100644 --- a/maths/numerical_analysis/adams_bashforth.py +++ b/maths/numerical_analysis/adams_bashforth.py @@ -102,7 +102,7 @@ class AdamsBashforth: >>> def f(x, y): ... return x + y >>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3() - >>> y[3] + >>> float(y[3]) 0.15533333333333332 >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3() @@ -140,9 +140,9 @@ class AdamsBashforth: ... return x + y >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4() - >>> y[4] + >>> float(y[4]) 0.30699999999999994 - >>> y[5] + >>> float(y[5]) 0.5771083333333333 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4() @@ -185,7 +185,7 @@ class AdamsBashforth: >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536], ... 0.2, 1).step_5() - >>> y[-1] + >>> float(y[-1]) 0.05436839444444452 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5() diff --git a/maths/numerical_analysis/runge_kutta.py b/maths/numerical_analysis/runge_kutta.py index 4cac017ee..3a25b0fb0 100644 --- a/maths/numerical_analysis/runge_kutta.py +++ b/maths/numerical_analysis/runge_kutta.py @@ -19,7 +19,7 @@ def runge_kutta(f, y0, x0, h, x_end): ... return y >>> y0 = 1 >>> y = runge_kutta(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 148.41315904125113 """ n = int(np.ceil((x_end - x0) / h)) diff --git a/maths/numerical_analysis/runge_kutta_fehlberg_45.py b/maths/numerical_analysis/runge_kutta_fehlberg_45.py index 8181fe301..0fbd60a35 100644 --- a/maths/numerical_analysis/runge_kutta_fehlberg_45.py +++ b/maths/numerical_analysis/runge_kutta_fehlberg_45.py @@ -34,12 +34,12 @@ def runge_kutta_fehlberg_45( >>> def f(x, y): ... return 1 + y**2 >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1) - >>> y[1] + >>> float(y[1]) 0.2027100937470787 >>> def f(x,y): ... return x >>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0) - >>> y[1] + >>> float(y[1]) -0.18000000000000002 >>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py index 451cde4cb..5d9672679 100644 --- a/maths/numerical_analysis/runge_kutta_gills.py +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -34,7 +34,7 @@ def runge_kutta_gills( >>> def f(x, y): ... return (x-y)/2 >>> y = runge_kutta_gills(f, 0, 3, 0.2, 5) - >>> y[-1] + >>> float(y[-1]) 3.4104259225717537 >>> def f(x,y): diff --git a/maths/softmax.py b/maths/softmax.py index 04cf77525..95c95e66f 100644 --- a/maths/softmax.py +++ b/maths/softmax.py @@ -28,7 +28,7 @@ def softmax(vector): The softmax vector adds up to one. We need to ceil to mitigate for precision - >>> np.ceil(np.sum(softmax([1,2,3,4]))) + >>> float(np.ceil(np.sum(softmax([1,2,3,4])))) 1.0 >>> vec = np.array([5,5]) diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index d488de590..1b7c0beed 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -64,7 +64,7 @@ class TwoHiddenLayerNeuralNetwork: >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> array_sum = np.sum(res) - >>> np.isnan(array_sum) + >>> bool(np.isnan(array_sum)) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the @@ -105,7 +105,7 @@ class TwoHiddenLayerNeuralNetwork: >>> res = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (res == updated_weights).all() + >>> bool((res == updated_weights).all()) False """ @@ -171,7 +171,7 @@ class TwoHiddenLayerNeuralNetwork: >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (first_iteration_weights == updated_weights).all() + >>> bool((first_iteration_weights == updated_weights).all()) False """ for iteration in range(1, iterations + 1): diff --git a/other/bankers_algorithm.py b/other/bankers_algorithm.py index 858eb0b2c..d4254f479 100644 --- a/other/bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -87,9 +87,11 @@ class BankersAlgorithm: This function builds an index control dictionary to track original ids/indices of processes when altered during execution of method "main" Return: {0: [a: int, b: int], 1: [c: int, d: int]} - >>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table, - ... test_maximum_claim_table)._BankersAlgorithm__need_index_manager() - ... ) # doctest: +NORMALIZE_WHITESPACE + >>> index_control = BankersAlgorithm( + ... test_claim_vector, test_allocated_res_table, test_maximum_claim_table + ... )._BankersAlgorithm__need_index_manager() + >>> {key: [int(x) for x in value] for key, value + ... in index_control.items()} # doctest: +NORMALIZE_WHITESPACE {0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], 4: [2, 0, 0, 3]} """ diff --git a/physics/in_static_equilibrium.py b/physics/in_static_equilibrium.py index e3c2f9d07..fb5a9b5ff 100644 --- a/physics/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -53,7 +53,7 @@ def in_static_equilibrium( # summation of moments is zero moments: NDArray[float64] = cross(location, forces) sum_moments: float = sum(moments) - return abs(sum_moments) < eps + return bool(abs(sum_moments) < eps) if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index bb3d67139..afbf25ba6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ beautifulsoup4 fake_useragent imageio -keras ; python_version < '3.12' +keras lxml matplotlib numpy @@ -17,7 +17,7 @@ rich scikit-learn statsmodels sympy -tensorflow +tensorflow ; python_version < '3.13' tweepy # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed typing_extensions diff --git a/web_programming/get_user_tweets.py b/web_programming/get_user_tweets.py.DISABLED similarity index 100% rename from web_programming/get_user_tweets.py rename to web_programming/get_user_tweets.py.DISABLED From 0abeeab39f4a612968a10b0541f630239b78f34f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 1 Oct 2024 17:32:31 +0200 Subject: [PATCH 076/104] Drop six from our GitHub Actions (#11621) Drop https://six.readthedocs.io --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index dad2b2fac..f54cc982d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,7 +20,7 @@ jobs: key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - name: Install dependencies run: | - python -m pip install --upgrade pip setuptools six wheel + python -m pip install --upgrade pip setuptools wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests # TODO: #8818 Re-enable quantum tests From 43a47e01eb2c2b681fa377b02150edba5cc76e32 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:18:17 +0530 Subject: [PATCH 077/104] Add word ladder algorithm in backtracking (#11590) * Add word ladder algorithm in backtracking * Improve comments and implement ruff checks * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change BFS to Backtracking * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Incorporate PR Changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add type hints for backtrack function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Hardvan Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + backtracking/word_ladder.py | 100 ++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 backtracking/word_ladder.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 56ab8377f..cdbbac684 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -22,6 +22,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Ladder](backtracking/word_ladder.py) * [Word Search](backtracking/word_search.py) ## Bit Manipulation diff --git a/backtracking/word_ladder.py b/backtracking/word_ladder.py new file mode 100644 index 000000000..7d9fd00f6 --- /dev/null +++ b/backtracking/word_ladder.py @@ -0,0 +1,100 @@ +""" +Word Ladder is a classic problem in computer science. +The problem is to transform a start word into an end word +by changing one letter at a time. +Each intermediate word must be a valid word from a given list of words. +The goal is to find a transformation sequence +from the start word to the end word. + +Wikipedia: https://en.wikipedia.org/wiki/Word_ladder +""" + +import string + + +def backtrack( + current_word: str, path: list[str], end_word: str, word_set: set[str] +) -> list[str]: + """ + Helper function to perform backtracking to find the transformation + from the current_word to the end_word. + + Parameters: + current_word (str): The current word in the transformation sequence. + path (list[str]): The list of transformations from begin_word to current_word. + end_word (str): The target word for transformation. + word_set (set[str]): The set of valid words for transformation. + + Returns: + list[str]: The list of transformations from begin_word to end_word. + Returns an empty list if there is no valid + transformation from current_word to end_word. + + Example: + >>> backtrack("hit", ["hit"], "cog", {"hot", "dot", "dog", "lot", "log", "cog"}) + ['hit', 'hot', 'dot', 'lot', 'log', 'cog'] + + >>> backtrack("hit", ["hit"], "cog", {"hot", "dot", "dog", "lot", "log"}) + [] + + >>> backtrack("lead", ["lead"], "gold", {"load", "goad", "gold", "lead", "lord"}) + ['lead', 'lead', 'load', 'goad', 'gold'] + + >>> backtrack("game", ["game"], "code", {"came", "cage", "code", "cade", "gave"}) + ['game', 'came', 'cade', 'code'] + """ + + # Base case: If the current word is the end word, return the path + if current_word == end_word: + return path + + # Try all possible single-letter transformations + for i in range(len(current_word)): + for c in string.ascii_lowercase: # Try changing each letter + transformed_word = current_word[:i] + c + current_word[i + 1 :] + if transformed_word in word_set: + word_set.remove(transformed_word) + # Recur with the new word added to the path + result = backtrack( + transformed_word, [*path, transformed_word], end_word, word_set + ) + if result: # valid transformation found + return result + word_set.add(transformed_word) # backtrack + + return [] # No valid transformation found + + +def word_ladder(begin_word: str, end_word: str, word_set: set[str]) -> list[str]: + """ + Solve the Word Ladder problem using Backtracking and return + the list of transformations from begin_word to end_word. + + Parameters: + begin_word (str): The word from which the transformation starts. + end_word (str): The target word for transformation. + word_list (list[str]): The list of valid words for transformation. + + Returns: + list[str]: The list of transformations from begin_word to end_word. + Returns an empty list if there is no valid transformation. + + Example: + >>> word_ladder("hit", "cog", ["hot", "dot", "dog", "lot", "log", "cog"]) + ['hit', 'hot', 'dot', 'lot', 'log', 'cog'] + + >>> word_ladder("hit", "cog", ["hot", "dot", "dog", "lot", "log"]) + [] + + >>> word_ladder("lead", "gold", ["load", "goad", "gold", "lead", "lord"]) + ['lead', 'lead', 'load', 'goad', 'gold'] + + >>> word_ladder("game", "code", ["came", "cage", "code", "cade", "gave"]) + ['game', 'came', 'cade', 'code'] + """ + + if end_word not in word_set: # no valid transformation possible + return [] + + # Perform backtracking starting from the begin_word + return backtrack(begin_word, [begin_word], end_word, word_set) From 00e9d862248a27281d4de24c8c7eb2d7b018531c Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:24:12 +0530 Subject: [PATCH 078/104] Improve comments, add doctests in symmetric_tree.py (#11619) --- data_structures/binary_tree/symmetric_tree.py | 67 +++++++++++++++++-- 1 file changed, 62 insertions(+), 5 deletions(-) diff --git a/data_structures/binary_tree/symmetric_tree.py b/data_structures/binary_tree/symmetric_tree.py index 98a766cab..2bfeac98b 100644 --- a/data_structures/binary_tree/symmetric_tree.py +++ b/data_structures/binary_tree/symmetric_tree.py @@ -13,7 +13,21 @@ from dataclasses import dataclass @dataclass class Node: """ - A Node has data variable and pointers to Nodes to its left and right. + A Node represents an element of a binary tree, which contains: + + Attributes: + data: The value stored in the node (int). + left: Pointer to the left child node (Node or None). + right: Pointer to the right child node (Node or None). + + Example: + >>> node = Node(1, Node(2), Node(3)) + >>> node.data + 1 + >>> node.left.data + 2 + >>> node.right.data + 3 """ data: int @@ -24,12 +38,25 @@ class Node: def make_symmetric_tree() -> Node: r""" Create a symmetric tree for testing. + The tree looks like this: 1 / \ 2 2 / \ / \ 3 4 4 3 + + Returns: + Node: Root node of a symmetric tree. + + Example: + >>> tree = make_symmetric_tree() + >>> tree.data + 1 + >>> tree.left.data == tree.right.data + True + >>> tree.left.left.data == tree.right.right.data + True """ root = Node(1) root.left = Node(2) @@ -43,13 +70,26 @@ def make_symmetric_tree() -> Node: def make_asymmetric_tree() -> Node: r""" - Create a asymmetric tree for testing. + Create an asymmetric tree for testing. + The tree looks like this: 1 / \ 2 2 / \ / \ 3 4 3 4 + + Returns: + Node: Root node of an asymmetric tree. + + Example: + >>> tree = make_asymmetric_tree() + >>> tree.data + 1 + >>> tree.left.data == tree.right.data + True + >>> tree.left.left.data == tree.right.right.data + False """ root = Node(1) root.left = Node(2) @@ -63,7 +103,15 @@ def make_asymmetric_tree() -> Node: def is_symmetric_tree(tree: Node) -> bool: """ - Test cases for is_symmetric_tree function + Check if a binary tree is symmetric (i.e., a mirror of itself). + + Parameters: + tree: The root node of the binary tree. + + Returns: + bool: True if the tree is symmetric, False otherwise. + + Example: >>> is_symmetric_tree(make_symmetric_tree()) True >>> is_symmetric_tree(make_asymmetric_tree()) @@ -76,8 +124,17 @@ def is_symmetric_tree(tree: Node) -> bool: def is_mirror(left: Node | None, right: Node | None) -> bool: """ + Check if two subtrees are mirror images of each other. + + Parameters: + left: The root node of the left subtree. + right: The root node of the right subtree. + + Returns: + bool: True if the two subtrees are mirrors of each other, False otherwise. + + Example: >>> tree1 = make_symmetric_tree() - >>> tree1.right.right = Node(3) >>> is_mirror(tree1.left, tree1.right) True >>> tree2 = make_asymmetric_tree() @@ -91,7 +148,7 @@ def is_mirror(left: Node | None, right: Node | None) -> bool: # One side is empty while the other is not, which is not symmetric. return False if left.data == right.data: - # The values match, so check the subtree + # The values match, so check the subtrees recursively. return is_mirror(left.left, right.right) and is_mirror(left.right, right.left) return False From 918fa8bb8ae1f052921fffd188d229d4713c73c9 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Wed, 2 Oct 2024 23:37:07 +0530 Subject: [PATCH 079/104] Optimized O(n) to O(1) (#11669) --- data_structures/linked_list/has_loop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data_structures/linked_list/has_loop.py b/data_structures/linked_list/has_loop.py index bc06ffe15..f49e01579 100644 --- a/data_structures/linked_list/has_loop.py +++ b/data_structures/linked_list/has_loop.py @@ -14,11 +14,11 @@ class Node: def __iter__(self): node = self - visited = [] + visited = set() while node: if node in visited: raise ContainsLoopError - visited.append(node) + visited.add(node) yield node.data node = node.next_node From f4b4ac159a17e0621e7f37141b165d58ca655b81 Mon Sep 17 00:00:00 2001 From: Ali Rashid <110668489+alirashidAR@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:24:56 +0530 Subject: [PATCH 080/104] Adding Doctests to floyd_warshall.py (#11690) * Ruff test resolution * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/floyd_warshall.py | 47 +++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/dynamic_programming/floyd_warshall.py b/dynamic_programming/floyd_warshall.py index 2331f3e65..b92c6667f 100644 --- a/dynamic_programming/floyd_warshall.py +++ b/dynamic_programming/floyd_warshall.py @@ -12,19 +12,58 @@ class Graph: ] # dp[i][j] stores minimum distance from i to j def add_edge(self, u, v, w): + """ + Adds a directed edge from node u + to node v with weight w. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 5) + >>> g.dp[0][1] + 5 + """ self.dp[u][v] = w def floyd_warshall(self): + """ + Computes the shortest paths between all pairs of + nodes using the Floyd-Warshall algorithm. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 1) + >>> g.add_edge(1, 2, 2) + >>> g.floyd_warshall() + >>> g.show_min(0, 2) + 3 + >>> g.show_min(2, 0) + inf + """ for k in range(self.n): for i in range(self.n): for j in range(self.n): self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j]) def show_min(self, u, v): + """ + Returns the minimum distance from node u to node v. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 3) + >>> g.add_edge(1, 2, 4) + >>> g.floyd_warshall() + >>> g.show_min(0, 2) + 7 + >>> g.show_min(1, 0) + inf + """ return self.dp[u][v] if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Example usage graph = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) @@ -38,5 +77,9 @@ if __name__ == "__main__": graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() - graph.show_min(1, 4) - graph.show_min(0, 3) + print( + graph.show_min(1, 4) + ) # Should output the minimum distance from node 1 to node 4 + print( + graph.show_min(0, 3) + ) # Should output the minimum distance from node 0 to node 3 From 080e7903a06765808c12c0c9c0b242f485cb9ce7 Mon Sep 17 00:00:00 2001 From: Aswin P Kumar <118362715+AswinPKumar01@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:33:48 +0530 Subject: [PATCH 081/104] Add Word Break algorithm (#11687) * Add Word Break algorithm * Add Word Break algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/word_break.py | 71 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 backtracking/word_break.py diff --git a/backtracking/word_break.py b/backtracking/word_break.py new file mode 100644 index 000000000..1f2ab073f --- /dev/null +++ b/backtracking/word_break.py @@ -0,0 +1,71 @@ +""" +Word Break Problem is a well-known problem in computer science. +Given a string and a dictionary of words, the task is to determine if +the string can be segmented into a sequence of one or more dictionary words. + +Wikipedia: https://en.wikipedia.org/wiki/Word_break_problem +""" + + +def backtrack(input_string: str, word_dict: set[str], start: int) -> bool: + """ + Helper function that uses backtracking to determine if a valid + word segmentation is possible starting from index 'start'. + + Parameters: + input_string (str): The input string to be segmented. + word_dict (set[str]): A set of valid dictionary words. + start (int): The starting index of the substring to be checked. + + Returns: + bool: True if a valid segmentation is possible, otherwise False. + + Example: + >>> backtrack("leetcode", {"leet", "code"}, 0) + True + + >>> backtrack("applepenapple", {"apple", "pen"}, 0) + True + + >>> backtrack("catsandog", {"cats", "dog", "sand", "and", "cat"}, 0) + False + """ + + # Base case: if the starting index has reached the end of the string + if start == len(input_string): + return True + + # Try every possible substring from 'start' to 'end' + for end in range(start + 1, len(input_string) + 1): + if input_string[start:end] in word_dict and backtrack( + input_string, word_dict, end + ): + return True + + return False + + +def word_break(input_string: str, word_dict: set[str]) -> bool: + """ + Determines if the input string can be segmented into a sequence of + valid dictionary words using backtracking. + + Parameters: + input_string (str): The input string to segment. + word_dict (set[str]): The set of valid words. + + Returns: + bool: True if the string can be segmented into valid words, otherwise False. + + Example: + >>> word_break("leetcode", {"leet", "code"}) + True + + >>> word_break("applepenapple", {"apple", "pen"}) + True + + >>> word_break("catsandog", {"cats", "dog", "sand", "and", "cat"}) + False + """ + + return backtrack(input_string, word_dict, 0) From 40f65e8150045dc82a7a58fe7cff6bfb353999f2 Mon Sep 17 00:00:00 2001 From: JeevaRamanathan <64531160+JeevaRamanathan@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:48:01 +0530 Subject: [PATCH 082/104] Improve comments, docstrings in next_greatest_element.py (#11685) * Improve comments in next_greatest_element.py Signed-off-by: JeevaRamanathan * few changes Signed-off-by: JeevaRamanathan * updated descriptions of the functions parameters Signed-off-by: JeevaRamanathan --------- Signed-off-by: JeevaRamanathan --- .../stacks/next_greater_element.py | 58 ++++++++++++++----- 1 file changed, 45 insertions(+), 13 deletions(-) diff --git a/data_structures/stacks/next_greater_element.py b/data_structures/stacks/next_greater_element.py index 7d76d1f47..216850b4b 100644 --- a/data_structures/stacks/next_greater_element.py +++ b/data_structures/stacks/next_greater_element.py @@ -6,9 +6,20 @@ expect = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def next_greatest_element_slow(arr: list[float]) -> list[float]: """ - Get the Next Greatest Element (NGE) for all elements in a list. - Maximum element present after the current one which is also greater than the - current one. + Get the Next Greatest Element (NGE) for each element in the array + by checking all subsequent elements to find the next greater one. + + This is a brute-force implementation, and it has a time complexity + of O(n^2), where n is the size of the array. + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element_slow(arr) == expect True """ @@ -28,9 +39,21 @@ def next_greatest_element_slow(arr: list[float]) -> list[float]: def next_greatest_element_fast(arr: list[float]) -> list[float]: """ - Like next_greatest_element_slow() but changes the loops to use - enumerate() instead of range(len()) for the outer loop and - for in a slice of arr for the inner loop. + Find the Next Greatest Element (NGE) for each element in the array + using a more readable approach. This implementation utilizes + enumerate() for the outer loop and slicing for the inner loop. + + While this improves readability over next_greatest_element_slow(), + it still has a time complexity of O(n^2). + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element_fast(arr) == expect True """ @@ -47,14 +70,23 @@ def next_greatest_element_fast(arr: list[float]) -> list[float]: def next_greatest_element(arr: list[float]) -> list[float]: """ - Get the Next Greatest Element (NGE) for all elements in a list. - Maximum element present after the current one which is also greater than the - current one. + Efficient solution to find the Next Greatest Element (NGE) for all elements + using a stack. The time complexity is reduced to O(n), making it suitable + for larger arrays. - A naive way to solve this is to take two loops and check for the next bigger - number but that will make the time complexity as O(n^2). The better way to solve - this would be to use a stack to keep track of maximum number giving a linear time - solution. + The stack keeps track of elements for which the next greater element hasn't + been found yet. By iterating through the array in reverse (from the last + element to the first), the stack is used to efficiently determine the next + greatest element for each element. + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element(arr) == expect True """ From e20b503b24fc271321a23584772ad8f0db17daf2 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:36:08 +0530 Subject: [PATCH 083/104] Improve comments, add doctests for kahns_algorithm_topo.py (#11668) * Improve comments, add doctests for kahns_algorithm_topo.py * Improve function docstring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename variables, remove print --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/kahns_algorithm_topo.py | 61 ++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index b1260bd5b..c956cf9f4 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -1,36 +1,61 @@ -def topological_sort(graph): +def topological_sort(graph: dict[int, list[int]]) -> list[int] | None: """ - Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph - using BFS + Perform topological sorting of a Directed Acyclic Graph (DAG) + using Kahn's Algorithm via Breadth-First Search (BFS). + + Topological sorting is a linear ordering of vertices in a graph such that for + every directed edge u → v, vertex u comes before vertex v in the ordering. + + Parameters: + graph: Adjacency list representing the directed graph where keys are + vertices, and values are lists of adjacent vertices. + + Returns: + The topologically sorted order of vertices if the graph is a DAG. + Returns None if the graph contains a cycle. + + Example: + >>> graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} + >>> topological_sort(graph) + [0, 1, 2, 3, 4, 5] + + >>> graph_with_cycle = {0: [1], 1: [2], 2: [0]} + >>> topological_sort(graph_with_cycle) """ + indegree = [0] * len(graph) queue = [] - topo = [] - cnt = 0 + topo_order = [] + processed_vertices_count = 0 + # Calculate the indegree of each vertex for values in graph.values(): for i in values: indegree[i] += 1 + # Add all vertices with 0 indegree to the queue for i in range(len(indegree)): if indegree[i] == 0: queue.append(i) + # Perform BFS while queue: vertex = queue.pop(0) - cnt += 1 - topo.append(vertex) - for x in graph[vertex]: - indegree[x] -= 1 - if indegree[x] == 0: - queue.append(x) + processed_vertices_count += 1 + topo_order.append(vertex) - if cnt != len(graph): - print("Cycle exists") - else: - print(topo) + # Traverse neighbors + for neighbor in graph[vertex]: + indegree[neighbor] -= 1 + if indegree[neighbor] == 0: + queue.append(neighbor) + + if processed_vertices_count != len(graph): + return None # no topological ordering exists due to cycle + return topo_order # valid topological ordering -# Adjacency List of Graph -graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} -topological_sort(graph) +if __name__ == "__main__": + import doctest + + doctest.testmod() From 917ad62105dc829e45c0732d9ac2aae7ef358627 Mon Sep 17 00:00:00 2001 From: Sai Aswin Madhavan Date: Fri, 4 Oct 2024 14:58:50 +0530 Subject: [PATCH 084/104] Removed incorrect type hints (#11711) --- strings/min_cost_string_conversion.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index d147a9d79..40d54f0e8 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -21,7 +21,6 @@ def compute_transform_tables( destination_seq = list(destination_string) len_source_seq = len(source_seq) len_destination_seq = len(destination_seq) - costs = [ [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] @@ -31,28 +30,28 @@ def compute_transform_tables( for i in range(1, len_source_seq + 1): costs[i][0] = i * delete_cost - ops[i][0] = f"D{source_seq[i - 1]:c}" + ops[i][0] = f"D{source_seq[i - 1]}" for i in range(1, len_destination_seq + 1): costs[0][i] = i * insert_cost - ops[0][i] = f"I{destination_seq[i - 1]:c}" + ops[0][i] = f"I{destination_seq[i - 1]}" for i in range(1, len_source_seq + 1): for j in range(1, len_destination_seq + 1): if source_seq[i - 1] == destination_seq[j - 1]: costs[i][j] = costs[i - 1][j - 1] + copy_cost - ops[i][j] = f"C{source_seq[i - 1]:c}" + ops[i][j] = f"C{source_seq[i - 1]}" else: costs[i][j] = costs[i - 1][j - 1] + replace_cost - ops[i][j] = f"R{source_seq[i - 1]:c}" + str(destination_seq[j - 1]) + ops[i][j] = f"R{source_seq[i - 1]}" + str(destination_seq[j - 1]) if costs[i - 1][j] + delete_cost < costs[i][j]: costs[i][j] = costs[i - 1][j] + delete_cost - ops[i][j] = f"D{source_seq[i - 1]:c}" + ops[i][j] = f"D{source_seq[i - 1]}" if costs[i][j - 1] + insert_cost < costs[i][j]: costs[i][j] = costs[i][j - 1] + insert_cost - ops[i][j] = f"I{destination_seq[j - 1]:c}" + ops[i][j] = f"I{destination_seq[j - 1]}" return costs, ops From 59ff87dc55b704dc7d3683bb6fabc7c4dc0afade Mon Sep 17 00:00:00 2001 From: Lonercode <91500485+Lonercode@users.noreply.github.com> Date: Fri, 4 Oct 2024 10:36:14 +0100 Subject: [PATCH 085/104] Added doctests to min_cost_string_conversion.py and removed :c specifier (#11721) * Added doctests to min_cost_string_conversion.py and removed :c specifier * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved line length issues based on ruff requirements * modified in compliance with ruff for line length * Update strings/min_cost_string_conversion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- strings/min_cost_string_conversion.py | 35 +++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 40d54f0e8..a5a3c4a4e 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -17,6 +17,23 @@ def compute_transform_tables( delete_cost: int, insert_cost: int, ) -> tuple[list[list[int]], list[list[str]]]: + """ + Finds the most cost efficient sequence + for converting one string into another. + + >>> costs, operations = compute_transform_tables("cat", "cut", 1, 2, 3, 3) + >>> costs[0][:4] + [0, 3, 6, 9] + >>> costs[2][:4] + [6, 4, 3, 6] + >>> operations[0][:4] + ['0', 'Ic', 'Iu', 'It'] + >>> operations[3][:4] + ['Dt', 'Dt', 'Rtu', 'Ct'] + + >>> compute_transform_tables("", "", 1, 2, 3, 3) + ([[0]], [['0']]) + """ source_seq = list(source_string) destination_seq = list(destination_string) len_source_seq = len(source_seq) @@ -57,6 +74,24 @@ def compute_transform_tables( def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: + """ + Assembles the transformations based on the ops table. + + >>> ops = [['0', 'Ic', 'Iu', 'It'], + ... ['Dc', 'Cc', 'Iu', 'It'], + ... ['Da', 'Da', 'Rau', 'Rat'], + ... ['Dt', 'Dt', 'Rtu', 'Ct']] + >>> x = len(ops) - 1 + >>> y = len(ops[0]) - 1 + >>> assemble_transformation(ops, x, y) + ['Cc', 'Rau', 'Ct'] + + >>> ops1 = [['0']] + >>> x1 = len(ops1) - 1 + >>> y1 = len(ops1[0]) - 1 + >>> assemble_transformation(ops1, x1, y1) + [] + """ if i == 0 and j == 0: return [] elif ops[i][j][0] in {"C", "R"}: From 9a572dec2b6011e7c2c0d82f50989b3a404ea426 Mon Sep 17 00:00:00 2001 From: ARNAV RAJ <126798788+Acuspeedster@users.noreply.github.com> Date: Fri, 4 Oct 2024 21:59:39 +0530 Subject: [PATCH 086/104] feat: Implemented Matrix Exponentiation Method (#11747) * feat: add Matrix Exponentiation method docs: updated the header documentation and added new documentation for the new function. * feat: added new function matrix exponetiation method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat: This function uses the tail-recursive form of the Euclidean algorithm to calculate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reduced the number of characters per line in the comments * removed unwanted code * feat: Implemented a new function to swaap numbers without dummy variable * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed previos code * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * Done with the required changes * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/fibonacci.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * Done with the required changes * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/fibonacci.py | 88 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 927700b04..24b2d7ae4 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -7,6 +7,8 @@ the Binet's formula function because the Binet formula function uses floats NOTE 2: the Binet's formula function is much more limited in the size of inputs that it can handle due to the size limitations of Python floats +NOTE 3: the matrix function is the fastest and most memory efficient for large n + See benchmark numbers in __main__ for performance comparisons/ https://en.wikipedia.org/wiki/Fibonacci_number for more information @@ -17,6 +19,9 @@ from collections.abc import Iterator from math import sqrt from time import time +import numpy as np +from numpy import ndarray + def time_func(func, *args, **kwargs): """ @@ -230,6 +235,88 @@ def fib_binet(n: int) -> list[int]: return [round(phi**i / sqrt_5) for i in range(n + 1)] +def matrix_pow_np(m: ndarray, power: int) -> ndarray: + """ + Raises a matrix to the power of 'power' using binary exponentiation. + + Args: + m: Matrix as a numpy array. + power: The power to which the matrix is to be raised. + + Returns: + The matrix raised to the power. + + Raises: + ValueError: If power is negative. + + >>> m = np.array([[1, 1], [1, 0]], dtype=int) + >>> matrix_pow_np(m, 0) # Identity matrix when raised to the power of 0 + array([[1, 0], + [0, 1]]) + + >>> matrix_pow_np(m, 1) # Same matrix when raised to the power of 1 + array([[1, 1], + [1, 0]]) + + >>> matrix_pow_np(m, 5) + array([[8, 5], + [5, 3]]) + + >>> matrix_pow_np(m, -1) + Traceback (most recent call last): + ... + ValueError: power is negative + """ + result = np.array([[1, 0], [0, 1]], dtype=int) # Identity Matrix + base = m + if power < 0: # Negative power is not allowed + raise ValueError("power is negative") + while power: + if power % 2 == 1: + result = np.dot(result, base) + base = np.dot(base, base) + power //= 2 + return result + + +def fib_matrix_np(n: int) -> int: + """ + Calculates the n-th Fibonacci number using matrix exponentiation. + https://www.nayuki.io/page/fast-fibonacci-algorithms#:~:text= + Summary:%20The%20two%20fast%20Fibonacci%20algorithms%20are%20matrix + + Args: + n: Fibonacci sequence index + + Returns: + The n-th Fibonacci number. + + Raises: + ValueError: If n is negative. + + >>> fib_matrix_np(0) + 0 + >>> fib_matrix_np(1) + 1 + >>> fib_matrix_np(5) + 5 + >>> fib_matrix_np(10) + 55 + >>> fib_matrix_np(-1) + Traceback (most recent call last): + ... + ValueError: n is negative + """ + if n < 0: + raise ValueError("n is negative") + if n == 0: + return 0 + + m = np.array([[1, 1], [1, 0]], dtype=int) + result = matrix_pow_np(m, n - 1) + return int(result[0, 0]) + + if __name__ == "__main__": from doctest import testmod @@ -242,3 +329,4 @@ if __name__ == "__main__": time_func(fib_memoization, num) # 0.0100 ms time_func(fib_recursive_cached, num) # 0.0153 ms time_func(fib_recursive, num) # 257.0910 ms + time_func(fib_matrix_np, num) # 0.0000 ms From 5a8655d306d872085112d965067fcdc440286928 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Sat, 5 Oct 2024 22:49:58 +0530 Subject: [PATCH 087/104] Added new algorithm to generate numbers in lexicographical order (#11674) * Added algorithm to generate numbers in lexicographical order * Removed the test cases * Updated camelcase to snakecase * Added doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added descriptive name for n * Reduced the number of letters * Updated the return type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated import statement * Updated return type to Iterator[int] * removed parentheses --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../stacks/lexicographical_numbers.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 data_structures/stacks/lexicographical_numbers.py diff --git a/data_structures/stacks/lexicographical_numbers.py b/data_structures/stacks/lexicographical_numbers.py new file mode 100644 index 000000000..6a174e7d9 --- /dev/null +++ b/data_structures/stacks/lexicographical_numbers.py @@ -0,0 +1,38 @@ +from collections.abc import Iterator + + +def lexical_order(max_number: int) -> Iterator[int]: + """ + Generate numbers in lexical order from 1 to max_number. + + >>> " ".join(map(str, lexical_order(13))) + '1 10 11 12 13 2 3 4 5 6 7 8 9' + >>> list(lexical_order(1)) + [1] + >>> " ".join(map(str, lexical_order(20))) + '1 10 11 12 13 14 15 16 17 18 19 2 20 3 4 5 6 7 8 9' + >>> " ".join(map(str, lexical_order(25))) + '1 10 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 3 4 5 6 7 8 9' + >>> list(lexical_order(12)) + [1, 10, 11, 12, 2, 3, 4, 5, 6, 7, 8, 9] + """ + + stack = [1] + + while stack: + num = stack.pop() + if num > max_number: + continue + + yield num + if (num % 10) != 9: + stack.append(num + 1) + + stack.append(num * 10) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(f"Numbers from 1 to 25 in lexical order: {list(lexical_order(26))}") From 50aca04c67315ef7de7ef03e51a018075d8d026b Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Sat, 5 Oct 2024 22:51:43 +0530 Subject: [PATCH 088/104] feat: increase test coverage of longest_common_subsequence to 75% (#11777) --- .../longest_common_subsequence.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 9a98b1736..4a6c880af 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -28,6 +28,24 @@ def longest_common_subsequence(x: str, y: str): (2, 'ph') >>> longest_common_subsequence("computer", "food") (1, 'o') + >>> longest_common_subsequence("", "abc") # One string is empty + (0, '') + >>> longest_common_subsequence("abc", "") # Other string is empty + (0, '') + >>> longest_common_subsequence("", "") # Both strings are empty + (0, '') + >>> longest_common_subsequence("abc", "def") # No common subsequence + (0, '') + >>> longest_common_subsequence("abc", "abc") # Identical strings + (3, 'abc') + >>> longest_common_subsequence("a", "a") # Single character match + (1, 'a') + >>> longest_common_subsequence("a", "b") # Single character no match + (0, '') + >>> longest_common_subsequence("abcdef", "ace") # Interleaved subsequence + (3, 'ace') + >>> longest_common_subsequence("ABCD", "ACBD") # No repeated characters + (3, 'ABD') """ # find the length of strings From ad6395d3408b9d80a0bef4d180d1e7613a55d807 Mon Sep 17 00:00:00 2001 From: Andrey Ivanov <97749666+ivnvxd@users.noreply.github.com> Date: Sat, 5 Oct 2024 18:24:58 +0100 Subject: [PATCH 089/104] Update ruff usage example in CONTRIBUTING.md (#11772) * Update ruff usage example * Update CONTRIBUTING.md Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 096582e45..b51132129 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -96,7 +96,7 @@ We want your work to be readable by others; therefore, we encourage you to note ```bash python3 -m pip install ruff # only required the first time - ruff . + ruff check ``` - Original code submission require docstrings or comments to describe your work. From fcf82a1eda21dcf36254a8fcaadc913f6a94c8da Mon Sep 17 00:00:00 2001 From: Vineet Kumar <108144301+whyvineet@users.noreply.github.com> Date: Sat, 5 Oct 2024 23:04:48 +0530 Subject: [PATCH 090/104] =?UTF-8?q?Implemented=20Exponential=20Search=20wi?= =?UTF-8?q?th=20binary=20search=20for=20improved=20perfor=E2=80=A6=20(#116?= =?UTF-8?q?66)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Implemented Exponential Search with binary search for improved performance on large sorted arrays. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added type hints and doctests for binary_search and exponential_search functions. Improved code documentation and ensured testability. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename Exponential_Search.py to exponential_search.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- searches/exponential_search.py | 113 +++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 searches/exponential_search.py diff --git a/searches/exponential_search.py b/searches/exponential_search.py new file mode 100644 index 000000000..ed09b14e1 --- /dev/null +++ b/searches/exponential_search.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 + +""" +Pure Python implementation of exponential search algorithm + +For more information, see the Wikipedia page: +https://en.wikipedia.org/wiki/Exponential_search + +For doctests run the following command: +python3 -m doctest -v exponential_search.py + +For manual testing run: +python3 exponential_search.py +""" + +from __future__ import annotations + + +def binary_search_by_recursion( + sorted_collection: list[int], item: int, left: int = 0, right: int = -1 +) -> int: + """Pure implementation of binary search algorithm in Python using recursion + + Be careful: the collection must be ascending sorted otherwise, the result will be + unpredictable. + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :param left: starting index for the search + :param right: ending index for the search + :return: index of the found item or -1 if the item is not found + + Examples: + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 0, 0, 4) + 0 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 15, 0, 4) + 4 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 5, 0, 4) + 1 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 6, 0, 4) + -1 + """ + if right < 0: + right = len(sorted_collection) - 1 + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + if right < left: + return -1 + + midpoint = left + (right - left) // 2 + + if sorted_collection[midpoint] == item: + return midpoint + elif sorted_collection[midpoint] > item: + return binary_search_by_recursion(sorted_collection, item, left, midpoint - 1) + else: + return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right) + + +def exponential_search(sorted_collection: list[int], item: int) -> int: + """ + Pure implementation of an exponential search algorithm in Python. + For more information, refer to: + https://en.wikipedia.org/wiki/Exponential_search + + Be careful: the collection must be ascending sorted, otherwise the result will be + unpredictable. + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :return: index of the found item or -1 if the item is not found + + The time complexity of this algorithm is O(log i) where i is the index of the item. + + Examples: + >>> exponential_search([0, 5, 7, 10, 15], 0) + 0 + >>> exponential_search([0, 5, 7, 10, 15], 15) + 4 + >>> exponential_search([0, 5, 7, 10, 15], 5) + 1 + >>> exponential_search([0, 5, 7, 10, 15], 6) + -1 + """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + + if sorted_collection[0] == item: + return 0 + + bound = 1 + while bound < len(sorted_collection) and sorted_collection[bound] < item: + bound *= 2 + + left = bound // 2 + right = min(bound, len(sorted_collection) - 1) + return binary_search_by_recursion(sorted_collection, item, left, right) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Manual testing + user_input = input("Enter numbers separated by commas: ").strip() + collection = sorted(int(item) for item in user_input.split(",")) + target = int(input("Enter a number to search for: ")) + result = exponential_search(sorted_collection=collection, item=target) + if result == -1: + print(f"{target} was not found in {collection}.") + else: + print(f"{target} was found at index {result} in {collection}.") From 3422ebc75bda6aba9b234eb217a79f25bec65f21 Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Mon, 7 Oct 2024 12:00:11 +0530 Subject: [PATCH 091/104] feat: add testcase of polynom_for_points (#11811) * feat: add testcase of polynom_for_points * fix: remove the print from the testcase of points_to_polynomial * fix: remove print statement from old test cases --- linear_algebra/src/polynom_for_points.py | 42 ++++++++++++++---------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index a9a9a8117..452f3edd4 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -3,30 +3,36 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: coordinates is a two dimensional matrix: [[x, y], [x, y], ...] number of points you want to use - >>> print(points_to_polynomial([])) + >>> points_to_polynomial([]) Traceback (most recent call last): ... ValueError: The program cannot work out a fitting polynomial. - >>> print(points_to_polynomial([[]])) + >>> points_to_polynomial([[]]) + Traceback (most recent call last): + ... + ValueError: The program cannot work out a fitting polynomial. + >>> points_to_polynomial([[1, 0], [2, 0], [3, 0]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*0.0' + >>> points_to_polynomial([[1, 1], [2, 1], [3, 1]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*1.0' + >>> points_to_polynomial([[1, 3], [2, 3], [3, 3]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*3.0' + >>> points_to_polynomial([[1, 1], [2, 2], [3, 3]]) + 'f(x)=x^2*0.0+x^1*1.0+x^0*0.0' + >>> points_to_polynomial([[1, 1], [2, 4], [3, 9]]) + 'f(x)=x^2*1.0+x^1*-0.0+x^0*0.0' + >>> points_to_polynomial([[1, 3], [2, 6], [3, 11]]) + 'f(x)=x^2*1.0+x^1*-0.0+x^0*2.0' + >>> points_to_polynomial([[1, -3], [2, -6], [3, -11]]) + 'f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0' + >>> points_to_polynomial([[1, 5], [2, 2], [3, 9]]) + 'f(x)=x^2*5.0+x^1*-18.0+x^0*18.0' + >>> points_to_polynomial([[1, 1], [1, 2], [1, 3]]) + 'x=1' + >>> points_to_polynomial([[1, 1], [2, 2], [2, 2]]) Traceback (most recent call last): ... ValueError: The program cannot work out a fitting polynomial. - >>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*1.0 - >>> print(points_to_polynomial([[1, 3], [2, 3], [3, 3]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*3.0 - >>> print(points_to_polynomial([[1, 1], [2, 2], [3, 3]])) - f(x)=x^2*0.0+x^1*1.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 1], [2, 4], [3, 9]])) - f(x)=x^2*1.0+x^1*-0.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 3], [2, 6], [3, 11]])) - f(x)=x^2*1.0+x^1*-0.0+x^0*2.0 - >>> print(points_to_polynomial([[1, -3], [2, -6], [3, -11]])) - f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0 - >>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]])) - f(x)=x^2*5.0+x^1*-18.0+x^0*18.0 """ if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): raise ValueError("The program cannot work out a fitting polynomial.") From cfd6d095f122d1d3ef2f3c2cdcf84864aac56fa7 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:06:15 +0530 Subject: [PATCH 092/104] Added max_sum_bst.py (#11832) * Added new algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated filename * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * Updated code * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * Updated code * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update maximum_sum_bst.py * def max_sum_bst(root: TreeNode | None) -> int: * def solver(node: TreeNode | None) -> tuple[bool, int, int, int]: --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/maximum_sum_bst.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 data_structures/binary_tree/maximum_sum_bst.py diff --git a/data_structures/binary_tree/maximum_sum_bst.py b/data_structures/binary_tree/maximum_sum_bst.py new file mode 100644 index 000000000..7dadc7b95 --- /dev/null +++ b/data_structures/binary_tree/maximum_sum_bst.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import sys +from dataclasses import dataclass + +INT_MIN = -sys.maxsize + 1 +INT_MAX = sys.maxsize - 1 + + +@dataclass +class TreeNode: + val: int = 0 + left: TreeNode | None = None + right: TreeNode | None = None + + +def max_sum_bst(root: TreeNode | None) -> int: + """ + The solution traverses a binary tree to find the maximum sum of + keys in any subtree that is a Binary Search Tree (BST). It uses + recursion to validate BST properties and calculates sums, returning + the highest sum found among all valid BST subtrees. + + >>> t1 = TreeNode(4) + >>> t1.left = TreeNode(3) + >>> t1.left.left = TreeNode(1) + >>> t1.left.right = TreeNode(2) + >>> print(max_sum_bst(t1)) + 2 + >>> t2 = TreeNode(-4) + >>> t2.left = TreeNode(-2) + >>> t2.right = TreeNode(-5) + >>> print(max_sum_bst(t2)) + 0 + >>> t3 = TreeNode(1) + >>> t3.left = TreeNode(4) + >>> t3.left.left = TreeNode(2) + >>> t3.left.right = TreeNode(4) + >>> t3.right = TreeNode(3) + >>> t3.right.left = TreeNode(2) + >>> t3.right.right = TreeNode(5) + >>> t3.right.right.left = TreeNode(4) + >>> t3.right.right.right = TreeNode(6) + >>> print(max_sum_bst(t3)) + 20 + """ + ans: int = 0 + + def solver(node: TreeNode | None) -> tuple[bool, int, int, int]: + """ + Returns the maximum sum by making recursive calls + >>> t1 = TreeNode(1) + >>> print(solver(t1)) + 1 + """ + nonlocal ans + + if not node: + return True, INT_MAX, INT_MIN, 0 # Valid BST, min, max, sum + + is_left_valid, min_left, max_left, sum_left = solver(node.left) + is_right_valid, min_right, max_right, sum_right = solver(node.right) + + if is_left_valid and is_right_valid and max_left < node.val < min_right: + total_sum = sum_left + sum_right + node.val + ans = max(ans, total_sum) + return True, min(min_left, node.val), max(max_right, node.val), total_sum + + return False, -1, -1, -1 # Not a valid BST + + solver(root) + return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From dba8eecb47cea7f11ac383344524afbc0ca7cf5b Mon Sep 17 00:00:00 2001 From: Lonercode <91500485+Lonercode@users.noreply.github.com> Date: Mon, 7 Oct 2024 10:58:07 +0100 Subject: [PATCH 093/104] added gronsfeld cipher implementation (#11835) * added gronsfeld cipher implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * from string import ascii_uppercase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gronsfeld_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/gronsfeld_cipher.py | 45 +++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 ciphers/gronsfeld_cipher.py diff --git a/ciphers/gronsfeld_cipher.py b/ciphers/gronsfeld_cipher.py new file mode 100644 index 000000000..8fbeab430 --- /dev/null +++ b/ciphers/gronsfeld_cipher.py @@ -0,0 +1,45 @@ +from string import ascii_uppercase + + +def gronsfeld(text: str, key: str) -> str: + """ + Encrypt plaintext with the Gronsfeld cipher + + >>> gronsfeld('hello', '412') + 'LFNPP' + >>> gronsfeld('hello', '123') + 'IGOMQ' + >>> gronsfeld('', '123') + '' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '0') + 'YES, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '01') + 'YFS, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '012') + 'YFU, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '') + Traceback (most recent call last): + ... + ZeroDivisionError: integer modulo by zero + """ + ascii_len = len(ascii_uppercase) + key_len = len(key) + encrypted_text = "" + keys = [int(char) for char in key] + upper_case_text = text.upper() + + for i, char in enumerate(upper_case_text): + if char in ascii_uppercase: + new_position = (ascii_uppercase.index(char) + keys[i % key_len]) % ascii_len + shifted_letter = ascii_uppercase[new_position] + encrypted_text += shifted_letter + else: + encrypted_text += char + + return encrypted_text + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 2d671df073770f0122658f462c17b838ddbe4d2a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 22:49:29 +0200 Subject: [PATCH 094/104] [pre-commit.ci] pre-commit autoupdate (#11874) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/pre-commit-hooks: v4.6.0 → v5.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.6.0...v5.0.0) - [github.com/astral-sh/ruff-pre-commit: v0.6.8 → v0.6.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.8...v0.6.9) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a8e5c1f6..77541027a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-executables-have-shebangs - id: check-toml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.8 + rev: v0.6.9 hooks: - id: ruff - id: ruff-format diff --git a/DIRECTORY.md b/DIRECTORY.md index cdbbac684..0a3be2a06 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -22,6 +22,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Break](backtracking/word_break.py) * [Word Ladder](backtracking/word_ladder.py) * [Word Search](backtracking/word_search.py) @@ -99,6 +100,7 @@ * [Elgamal Key Generator](ciphers/elgamal_key_generator.py) * [Enigma Machine2](ciphers/enigma_machine2.py) * [Fractionated Morse Cipher](ciphers/fractionated_morse_cipher.py) + * [Gronsfeld Cipher](ciphers/gronsfeld_cipher.py) * [Hill Cipher](ciphers/hill_cipher.py) * [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py) * [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py) @@ -211,6 +213,7 @@ * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) + * [Maximum Sum Bst](data_structures/binary_tree/maximum_sum_bst.py) * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py) * [Mirror Binary Tree](data_structures/binary_tree/mirror_binary_tree.py) * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py) @@ -284,6 +287,7 @@ * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py) * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py) + * [Lexicographical Numbers](data_structures/stacks/lexicographical_numbers.py) * [Next Greater Element](data_structures/stacks/next_greater_element.py) * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) * [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py) @@ -1201,6 +1205,7 @@ * [Binary Tree Traversal](searches/binary_tree_traversal.py) * [Double Linear Search](searches/double_linear_search.py) * [Double Linear Search Recursion](searches/double_linear_search_recursion.py) + * [Exponential Search](searches/exponential_search.py) * [Fibonacci Search](searches/fibonacci_search.py) * [Hill Climbing](searches/hill_climbing.py) * [Interpolation Search](searches/interpolation_search.py) From 260e3d8b350c64e927ecb1d62b953b8bf25490ea Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Tue, 8 Oct 2024 17:03:28 +0530 Subject: [PATCH 095/104] feat: add test cases in cipher's autokey (#11881) --- ciphers/autokey.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ciphers/autokey.py b/ciphers/autokey.py index 8683e6d37..05d8c066b 100644 --- a/ciphers/autokey.py +++ b/ciphers/autokey.py @@ -24,6 +24,14 @@ def encrypt(plaintext: str, key: str) -> str: Traceback (most recent call last): ... ValueError: plaintext is empty + >>> encrypt("coffee is good as python", "") + Traceback (most recent call last): + ... + ValueError: key is empty + >>> encrypt(527.26, "TheAlgorithms") + Traceback (most recent call last): + ... + TypeError: plaintext must be a string """ if not isinstance(plaintext, str): raise TypeError("plaintext must be a string") @@ -80,6 +88,14 @@ def decrypt(ciphertext: str, key: str) -> str: Traceback (most recent call last): ... TypeError: ciphertext must be a string + >>> decrypt("", "TheAlgorithms") + Traceback (most recent call last): + ... + ValueError: ciphertext is empty + >>> decrypt("vvjfpk wj ohvp su ddylsv", 2) + Traceback (most recent call last): + ... + TypeError: key must be a string """ if not isinstance(ciphertext, str): raise TypeError("ciphertext must be a string") From e9e7c964655015819e0120694465928df1abefb0 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 8 Oct 2024 19:09:28 +0200 Subject: [PATCH 096/104] Create GitHub Pages docs with Sphinx (#11888) --- .devcontainer/Dockerfile | 2 +- .devcontainer/devcontainer.json | 2 +- .github/CODEOWNERS | 2 - .github/workflows/build.yml | 3 +- .github/workflows/sphinx.yml | 50 +++++++++ CONTRIBUTING.md | 2 +- DIRECTORY.md | 3 + LICENSE.md | 2 +- docs/{source => }/__init__.py | 0 docs/conf.py | 3 + financial/{ABOUT.md => README.md} | 2 +- index.md | 10 ++ .../{local_weighted_learning.md => README.md} | 0 pyproject.toml | 106 +++++++++++++++++- requirements.txt | 1 + source/__init__.py | 0 16 files changed, 179 insertions(+), 9 deletions(-) create mode 100644 .github/workflows/sphinx.yml rename docs/{source => }/__init__.py (100%) create mode 100644 docs/conf.py rename financial/{ABOUT.md => README.md} (97%) create mode 100644 index.md rename machine_learning/local_weighted_learning/{local_weighted_learning.md => README.md} (100%) delete mode 100644 source/__init__.py diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 6aa0073bf..a0bd05f47 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,5 +1,5 @@ # https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md -ARG VARIANT=3.12-bookworm +ARG VARIANT=3.13-bookworm FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} COPY requirements.txt /tmp/pip-tmp/ RUN python3 -m pip install --upgrade pip \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ae1d4fb74..e23263f5b 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -7,7 +7,7 @@ // Update 'VARIANT' to pick a Python version: 3, 3.11, 3.10, 3.9, 3.8 // Append -bullseye or -buster to pin to an OS version. // Use -bullseye variants on local on arm64/Apple Silicon. - "VARIANT": "3.12-bookworm", + "VARIANT": "3.13-bookworm", } }, diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d2ac43c7d..3cc25d1ba 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,8 +9,6 @@ /.* @cclauss -# /arithmetic_analysis/ - # /backtracking/ # /bit_manipulation/ diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f54cc982d..b5703e2f1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,12 +25,13 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/q_fourier_transform.py --ignore=computer_vision/cnn_classification.py + --ignore=docs/conf.py --ignore=dynamic_programming/k_means_clustering_tensorflow.py --ignore=machine_learning/lstm/lstm_prediction.py --ignore=neural_network/input_data.py --ignore=project_euler/ + --ignore=quantum/q_fourier_transform.py --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml new file mode 100644 index 000000000..9dfe344f9 --- /dev/null +++ b/.github/workflows/sphinx.yml @@ -0,0 +1,50 @@ +name: sphinx + +on: + # Triggers the workflow on push or pull request events but only for the "master" branch + push: + branches: ["master"] + pull_request: + branches: ["master"] + # Or manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build_docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.13 + allow-prereleases: true + - run: pip install --upgrade pip + - run: pip install myst-parser sphinx-autoapi sphinx-pyproject + - uses: actions/configure-pages@v5 + - run: sphinx-build -c docs . docs/_build/html + - uses: actions/upload-pages-artifact@v3 + with: + path: docs/_build/html + + deploy_docs: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + if: github.event_name != 'pull_request' + needs: build_docs + runs-on: ubuntu-latest + steps: + - uses: actions/deploy-pages@v4 + id: deployment diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b51132129..3df39f95b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,7 +77,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.13+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index 0a3be2a06..f0a34a553 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -351,6 +351,9 @@ * [Power](divide_and_conquer/power.py) * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) +## Docs + * [Conf](docs/conf.py) + ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) * [All Construct](dynamic_programming/all_construct.py) diff --git a/LICENSE.md b/LICENSE.md index 2897d02e2..de631c3ef 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -MIT License +## MIT License Copyright (c) 2016-2022 TheAlgorithms and contributors diff --git a/docs/source/__init__.py b/docs/__init__.py similarity index 100% rename from docs/source/__init__.py rename to docs/__init__.py diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..f2481f107 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,3 @@ +from sphinx_pyproject import SphinxConfig + +project = SphinxConfig("../pyproject.toml", globalns=globals()).name diff --git a/financial/ABOUT.md b/financial/README.md similarity index 97% rename from financial/ABOUT.md rename to financial/README.md index f6b0647f8..e5d3a84c8 100644 --- a/financial/ABOUT.md +++ b/financial/README.md @@ -1,4 +1,4 @@ -### Interest +# Interest * Compound Interest: "Compound interest is calculated by multiplying the initial principal amount by one plus the annual interest rate raised to the number of compound periods minus one." [Compound Interest](https://www.investopedia.com/) * Simple Interest: "Simple interest paid or received over a certain period is a fixed percentage of the principal amount that was borrowed or lent. " [Simple Interest](https://www.investopedia.com/) diff --git a/index.md b/index.md new file mode 100644 index 000000000..134520cb9 --- /dev/null +++ b/index.md @@ -0,0 +1,10 @@ +# TheAlgorithms/Python +```{toctree} +:maxdepth: 2 +:caption: index.md + + +CONTRIBUTING.md +README.md +LICENSE.md +``` diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.md b/machine_learning/local_weighted_learning/README.md similarity index 100% rename from machine_learning/local_weighted_learning/local_weighted_learning.md rename to machine_learning/local_weighted_learning/README.md diff --git a/pyproject.toml b/pyproject.toml index bb8657183..c57419e79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,22 @@ +[project] +name = "thealgorithms-python" +version = "0.0.1" +description = "TheAlgorithms in Python" +authors = [ { name = "TheAlgorithms Contributors" } ] +requires-python = ">=3.13" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.13", + +] +optional-dependencies.docs = [ + "myst-parser", + "sphinx-autoapi", + "sphinx-pyproject", +] + [tool.ruff] -target-version = "py312" +target-version = "py313" output-format = "full" lint.select = [ @@ -113,6 +130,9 @@ lint.pylint.max-statements = 88 # default: 50 ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" +[tool.pyproject-fmt] +max_supported_python = "3.13" + [tool.pytest.ini_options] markers = [ "mat_ops: mark a test as utilizing matrix operations.", @@ -129,3 +149,87 @@ omit = [ "project_euler/*", ] sort = "Cover" + +[tool.sphinx-pyproject] +copyright = "2014, TheAlgorithms" +autoapi_dirs = [ + "audio_filters", + "backtracking", + "bit_manipulation", + "blockchain", + "boolean_algebra", + "cellular_automata", + "ciphers", + "compression", + "computer_vision", + "conversions", + "data_structures", + "digital_image_processing", + "divide_and_conquer", + "dynamic_programming", + "electronics", + "file_transfer", + "financial", + "fractals", + "fuzzy_logic", + "genetic_algorithm", + "geodesy", + "geometry", + "graphics", + "graphs", + "greedy_methods", + "hashes", + "knapsack", + "linear_algebra", + "linear_programming", + "machine_learning", + "maths", + "matrix", + "networking_flow", + "neural_network", + "other", + "physics", + "project_euler", + "quantum", + "scheduling", + "searches", + "sorts", + "strings", + "web_programming", +] +autoapi_member_order = "groupwise" +# autoapi_python_use_implicit_namespaces = true +exclude_patterns = [ + ".*/*", + "docs/", +] +extensions = [ + "autoapi.extension", + "myst_parser", +] +html_static_path = [ "_static" ] +html_theme = "alabaster" +myst_enable_extensions = [ + "amsmath", + "attrs_inline", + "colon_fence", + "deflist", + "dollarmath", + "fieldlist", + "html_admonition", + "html_image", + # "linkify", + "replacements", + "smartquotes", + "strikethrough", + "substitution", + "tasklist", +] +myst_fence_as_directive = [ + "include", +] +templates_path = [ "_templates" ] +[tool.sphinx-pyproject.source_suffix] +".rst" = "restructuredtext" +# ".txt" = "markdown" +".md" = "markdown" diff --git a/requirements.txt b/requirements.txt index afbf25ba6..675436333 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ requests rich # scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed scikit-learn +sphinx_pyproject statsmodels sympy tensorflow ; python_version < '3.13' diff --git a/source/__init__.py b/source/__init__.py deleted file mode 100644 index e69de29bb..000000000 From 03a42510b01c574292ca9c6525cbf0572ff5a2a5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 22:42:24 +0200 Subject: [PATCH 097/104] [pre-commit.ci] pre-commit autoupdate (#12071) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/tox-dev/pyproject-fmt: 2.2.4 → 2.3.0](https://github.com/tox-dev/pyproject-fmt/compare/2.2.4...2.3.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 77541027a..e1d185fab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.4" + rev: "2.3.0" hooks: - id: pyproject-fmt From 6e24935f8860965dd7f2f5a50fd05724e84e9e8d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:22:34 +0200 Subject: [PATCH 098/104] [pre-commit.ci] pre-commit autoupdate (#12234) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.9 → v0.7.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.9...v0.7.0) - [github.com/tox-dev/pyproject-fmt: 2.3.0 → 2.4.3](https://github.com/tox-dev/pyproject-fmt/compare/2.3.0...2.4.3) - [github.com/abravalheri/validate-pyproject: v0.20.2 → v0.21](https://github.com/abravalheri/validate-pyproject/compare/v0.20.2...v0.21) - [github.com/pre-commit/mirrors-mypy: v1.11.2 → v1.12.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.2...v1.12.1) * project_euler/problem_047/sol1.py: def solution(n: int = 4) -> int | None: * Update sol1.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 8 ++++---- project_euler/problem_047/sol1.py | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e1d185fab..a849de0c4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.9 + rev: v0.7.0 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.3.0" + rev: "2.4.3" hooks: - id: pyproject-fmt @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.20.2 + rev: v0.21 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.2 + rev: v1.12.1 hooks: - id: mypy args: diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index 4ecd4f4b4..d174de27d 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -24,7 +24,7 @@ from functools import lru_cache def unique_prime_factors(n: int) -> set: """ Find unique prime factors of an integer. - Tests include sorting because only the set really matters, + Tests include sorting because only the set matters, not the order in which it is produced. >>> sorted(set(unique_prime_factors(14))) [2, 7] @@ -58,7 +58,7 @@ def upf_len(num: int) -> int: def equality(iterable: list) -> bool: """ - Check equality of ALL elements in an iterable + Check the equality of ALL elements in an iterable >>> equality([1, 2, 3, 4]) False >>> equality([2, 2, 2, 2]) @@ -69,7 +69,7 @@ def equality(iterable: list) -> bool: return len(set(iterable)) in (0, 1) -def run(n: int) -> list: +def run(n: int) -> list[int]: """ Runs core process to find problem solution. >>> run(3) @@ -77,7 +77,7 @@ def run(n: int) -> list: """ # Incrementor variable for our group list comprehension. - # This serves as the first number in each list of values + # This is the first number in each list of values # to test. base = 2 @@ -85,7 +85,7 @@ def run(n: int) -> list: # Increment each value of a generated range group = [base + i for i in range(n)] - # Run elements through out unique_prime_factors function + # Run elements through the unique_prime_factors function # Append our target number to the end. checker = [upf_len(x) for x in group] checker.append(n) @@ -98,7 +98,7 @@ def run(n: int) -> list: base += 1 -def solution(n: int = 4) -> int: +def solution(n: int = 4) -> int | None: """Return the first value of the first four consecutive integers to have four distinct prime factors each. >>> solution() From 52602ea5b6dd8179aa662c002891c6506f519435 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 21:27:00 +0100 Subject: [PATCH 099/104] [pre-commit.ci] pre-commit autoupdate (#12313) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.0 → v0.7.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.0...v0.7.1) - [github.com/tox-dev/pyproject-fmt: 2.4.3 → v2.4.3](https://github.com/tox-dev/pyproject-fmt/compare/2.4.3...v2.4.3) - [github.com/abravalheri/validate-pyproject: v0.21 → v0.22](https://github.com/abravalheri/validate-pyproject/compare/v0.21...v0.22) - [github.com/pre-commit/mirrors-mypy: v1.12.1 → v1.13.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.12.1...v1.13.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a849de0c4..0828b7151 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.0 + rev: v0.7.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.4.3" + rev: "v2.4.3" hooks: - id: pyproject-fmt @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.21 + rev: v0.22 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.12.1 + rev: v1.13.0 hooks: - id: mypy args: From a19bede190ddb4fa3c1c9850b612a47fc69d6709 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 1 Nov 2024 13:40:09 +0100 Subject: [PATCH 100/104] Add scripts/find_git_conflicts.sh (#12343) --- scripts/find_git_conflicts.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100755 scripts/find_git_conflicts.sh diff --git a/scripts/find_git_conflicts.sh b/scripts/find_git_conflicts.sh new file mode 100755 index 000000000..8af33fa75 --- /dev/null +++ b/scripts/find_git_conflicts.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Replace with your repository (format: owner/repo) +REPO="TheAlgorithms/Python" + +# Fetch open pull requests with conflicts into a variable +echo "Checking for pull requests with conflicts in $REPO..." + +prs=$(gh pr list --repo "$REPO" --state open --json number,title,mergeable --jq '.[] | select(.mergeable == "CONFLICTING") | {number, title}' --limit 500) + +# Process each conflicting PR +echo "$prs" | jq -c '.[]' | while read -r pr; do + PR_NUMBER=$(echo "$pr" | jq -r '.number') + PR_TITLE=$(echo "$pr" | jq -r '.title') + echo "PR #$PR_NUMBER - $PR_TITLE has conflicts." +done From 3e9ca92ca972bbe752d32b43c71a88789dce94c0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 21:09:03 +0100 Subject: [PATCH 101/104] [pre-commit.ci] pre-commit autoupdate (#12349) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.1 → v0.7.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.1...v0.7.2) - [github.com/tox-dev/pyproject-fmt: v2.4.3 → v2.5.0](https://github.com/tox-dev/pyproject-fmt/compare/v2.4.3...v2.5.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0828b7151..f112ee553 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.1 + rev: v0.7.2 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "v2.4.3" + rev: "v2.5.0" hooks: - id: pyproject-fmt From e3f3d668be4ada7aee82eea0bc75c50436c1ab3a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 21:05:50 +0100 Subject: [PATCH 102/104] [pre-commit.ci] pre-commit autoupdate (#12370) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.2 → v0.7.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.2...v0.7.3) - [github.com/abravalheri/validate-pyproject: v0.22 → v0.23](https://github.com/abravalheri/validate-pyproject/compare/v0.22...v0.23) * Update sudoku_solver.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- data_structures/arrays/sudoku_solver.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f112ee553..9d794473c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.2 + rev: v0.7.3 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.22 + rev: v0.23 hooks: - id: validate-pyproject diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index a8157a520..70bcdc748 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -172,7 +172,7 @@ def solved(values): def from_file(filename, sep="\n"): "Parse a file into a list of strings, separated by sep." - return open(filename).read().strip().split(sep) # noqa: SIM115 + return open(filename).read().strip().split(sep) def random_puzzle(assignments=17): From e3bd7721c8241a6db77254bac44757dced1b96f8 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 15 Nov 2024 14:59:14 +0100 Subject: [PATCH 103/104] `validate_filenames.py` Shebang `python` for Windows (#12371) --- scripts/validate_filenames.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index 0890024dd..e76b4dbfe 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!python import os try: From f3f32ae3ca818f64de2ed3267803882956681044 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 22:07:12 +0100 Subject: [PATCH 104/104] [pre-commit.ci] pre-commit autoupdate (#12385) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.3 → v0.7.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.3...v0.7.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9d794473c..6ad19f1fd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.3 + rev: v0.7.4 hooks: - id: ruff - id: ruff-format