From e20b503b24fc271321a23584772ad8f0db17daf2 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:36:08 +0530 Subject: [PATCH 01/14] Improve comments, add doctests for kahns_algorithm_topo.py (#11668) * Improve comments, add doctests for kahns_algorithm_topo.py * Improve function docstring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename variables, remove print --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/kahns_algorithm_topo.py | 61 ++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index b1260bd5b..c956cf9f4 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -1,36 +1,61 @@ -def topological_sort(graph): +def topological_sort(graph: dict[int, list[int]]) -> list[int] | None: """ - Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph - using BFS + Perform topological sorting of a Directed Acyclic Graph (DAG) + using Kahn's Algorithm via Breadth-First Search (BFS). + + Topological sorting is a linear ordering of vertices in a graph such that for + every directed edge u → v, vertex u comes before vertex v in the ordering. + + Parameters: + graph: Adjacency list representing the directed graph where keys are + vertices, and values are lists of adjacent vertices. + + Returns: + The topologically sorted order of vertices if the graph is a DAG. + Returns None if the graph contains a cycle. + + Example: + >>> graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} + >>> topological_sort(graph) + [0, 1, 2, 3, 4, 5] + + >>> graph_with_cycle = {0: [1], 1: [2], 2: [0]} + >>> topological_sort(graph_with_cycle) """ + indegree = [0] * len(graph) queue = [] - topo = [] - cnt = 0 + topo_order = [] + processed_vertices_count = 0 + # Calculate the indegree of each vertex for values in graph.values(): for i in values: indegree[i] += 1 + # Add all vertices with 0 indegree to the queue for i in range(len(indegree)): if indegree[i] == 0: queue.append(i) + # Perform BFS while queue: vertex = queue.pop(0) - cnt += 1 - topo.append(vertex) - for x in graph[vertex]: - indegree[x] -= 1 - if indegree[x] == 0: - queue.append(x) + processed_vertices_count += 1 + topo_order.append(vertex) - if cnt != len(graph): - print("Cycle exists") - else: - print(topo) + # Traverse neighbors + for neighbor in graph[vertex]: + indegree[neighbor] -= 1 + if indegree[neighbor] == 0: + queue.append(neighbor) + + if processed_vertices_count != len(graph): + return None # no topological ordering exists due to cycle + return topo_order # valid topological ordering -# Adjacency List of Graph -graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} -topological_sort(graph) +if __name__ == "__main__": + import doctest + + doctest.testmod() From 917ad62105dc829e45c0732d9ac2aae7ef358627 Mon Sep 17 00:00:00 2001 From: Sai Aswin Madhavan <saiaswin.211ec244@nitk.edu.in> Date: Fri, 4 Oct 2024 14:58:50 +0530 Subject: [PATCH 02/14] Removed incorrect type hints (#11711) --- strings/min_cost_string_conversion.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index d147a9d79..40d54f0e8 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -21,7 +21,6 @@ def compute_transform_tables( destination_seq = list(destination_string) len_source_seq = len(source_seq) len_destination_seq = len(destination_seq) - costs = [ [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] @@ -31,28 +30,28 @@ def compute_transform_tables( for i in range(1, len_source_seq + 1): costs[i][0] = i * delete_cost - ops[i][0] = f"D{source_seq[i - 1]:c}" + ops[i][0] = f"D{source_seq[i - 1]}" for i in range(1, len_destination_seq + 1): costs[0][i] = i * insert_cost - ops[0][i] = f"I{destination_seq[i - 1]:c}" + ops[0][i] = f"I{destination_seq[i - 1]}" for i in range(1, len_source_seq + 1): for j in range(1, len_destination_seq + 1): if source_seq[i - 1] == destination_seq[j - 1]: costs[i][j] = costs[i - 1][j - 1] + copy_cost - ops[i][j] = f"C{source_seq[i - 1]:c}" + ops[i][j] = f"C{source_seq[i - 1]}" else: costs[i][j] = costs[i - 1][j - 1] + replace_cost - ops[i][j] = f"R{source_seq[i - 1]:c}" + str(destination_seq[j - 1]) + ops[i][j] = f"R{source_seq[i - 1]}" + str(destination_seq[j - 1]) if costs[i - 1][j] + delete_cost < costs[i][j]: costs[i][j] = costs[i - 1][j] + delete_cost - ops[i][j] = f"D{source_seq[i - 1]:c}" + ops[i][j] = f"D{source_seq[i - 1]}" if costs[i][j - 1] + insert_cost < costs[i][j]: costs[i][j] = costs[i][j - 1] + insert_cost - ops[i][j] = f"I{destination_seq[j - 1]:c}" + ops[i][j] = f"I{destination_seq[j - 1]}" return costs, ops From 59ff87dc55b704dc7d3683bb6fabc7c4dc0afade Mon Sep 17 00:00:00 2001 From: Lonercode <91500485+Lonercode@users.noreply.github.com> Date: Fri, 4 Oct 2024 10:36:14 +0100 Subject: [PATCH 03/14] Added doctests to min_cost_string_conversion.py and removed :c specifier (#11721) * Added doctests to min_cost_string_conversion.py and removed :c specifier * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved line length issues based on ruff requirements * modified in compliance with ruff for line length * Update strings/min_cost_string_conversion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng <tianyizheng02@gmail.com> --- strings/min_cost_string_conversion.py | 35 +++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 40d54f0e8..a5a3c4a4e 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -17,6 +17,23 @@ def compute_transform_tables( delete_cost: int, insert_cost: int, ) -> tuple[list[list[int]], list[list[str]]]: + """ + Finds the most cost efficient sequence + for converting one string into another. + + >>> costs, operations = compute_transform_tables("cat", "cut", 1, 2, 3, 3) + >>> costs[0][:4] + [0, 3, 6, 9] + >>> costs[2][:4] + [6, 4, 3, 6] + >>> operations[0][:4] + ['0', 'Ic', 'Iu', 'It'] + >>> operations[3][:4] + ['Dt', 'Dt', 'Rtu', 'Ct'] + + >>> compute_transform_tables("", "", 1, 2, 3, 3) + ([[0]], [['0']]) + """ source_seq = list(source_string) destination_seq = list(destination_string) len_source_seq = len(source_seq) @@ -57,6 +74,24 @@ def compute_transform_tables( def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: + """ + Assembles the transformations based on the ops table. + + >>> ops = [['0', 'Ic', 'Iu', 'It'], + ... ['Dc', 'Cc', 'Iu', 'It'], + ... ['Da', 'Da', 'Rau', 'Rat'], + ... ['Dt', 'Dt', 'Rtu', 'Ct']] + >>> x = len(ops) - 1 + >>> y = len(ops[0]) - 1 + >>> assemble_transformation(ops, x, y) + ['Cc', 'Rau', 'Ct'] + + >>> ops1 = [['0']] + >>> x1 = len(ops1) - 1 + >>> y1 = len(ops1[0]) - 1 + >>> assemble_transformation(ops1, x1, y1) + [] + """ if i == 0 and j == 0: return [] elif ops[i][j][0] in {"C", "R"}: From 9a572dec2b6011e7c2c0d82f50989b3a404ea426 Mon Sep 17 00:00:00 2001 From: ARNAV RAJ <126798788+Acuspeedster@users.noreply.github.com> Date: Fri, 4 Oct 2024 21:59:39 +0530 Subject: [PATCH 04/14] feat: Implemented Matrix Exponentiation Method (#11747) * feat: add Matrix Exponentiation method docs: updated the header documentation and added new documentation for the new function. * feat: added new function matrix exponetiation method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat: This function uses the tail-recursive form of the Euclidean algorithm to calculate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reduced the number of characters per line in the comments * removed unwanted code * feat: Implemented a new function to swaap numbers without dummy variable * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed previos code * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * Done with the required changes * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/fibonacci.py Co-authored-by: Tianyi Zheng <tianyizheng02@gmail.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * Done with the required changes * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng <tianyizheng02@gmail.com> --- maths/fibonacci.py | 88 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 927700b04..24b2d7ae4 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -7,6 +7,8 @@ the Binet's formula function because the Binet formula function uses floats NOTE 2: the Binet's formula function is much more limited in the size of inputs that it can handle due to the size limitations of Python floats +NOTE 3: the matrix function is the fastest and most memory efficient for large n + See benchmark numbers in __main__ for performance comparisons/ https://en.wikipedia.org/wiki/Fibonacci_number for more information @@ -17,6 +19,9 @@ from collections.abc import Iterator from math import sqrt from time import time +import numpy as np +from numpy import ndarray + def time_func(func, *args, **kwargs): """ @@ -230,6 +235,88 @@ def fib_binet(n: int) -> list[int]: return [round(phi**i / sqrt_5) for i in range(n + 1)] +def matrix_pow_np(m: ndarray, power: int) -> ndarray: + """ + Raises a matrix to the power of 'power' using binary exponentiation. + + Args: + m: Matrix as a numpy array. + power: The power to which the matrix is to be raised. + + Returns: + The matrix raised to the power. + + Raises: + ValueError: If power is negative. + + >>> m = np.array([[1, 1], [1, 0]], dtype=int) + >>> matrix_pow_np(m, 0) # Identity matrix when raised to the power of 0 + array([[1, 0], + [0, 1]]) + + >>> matrix_pow_np(m, 1) # Same matrix when raised to the power of 1 + array([[1, 1], + [1, 0]]) + + >>> matrix_pow_np(m, 5) + array([[8, 5], + [5, 3]]) + + >>> matrix_pow_np(m, -1) + Traceback (most recent call last): + ... + ValueError: power is negative + """ + result = np.array([[1, 0], [0, 1]], dtype=int) # Identity Matrix + base = m + if power < 0: # Negative power is not allowed + raise ValueError("power is negative") + while power: + if power % 2 == 1: + result = np.dot(result, base) + base = np.dot(base, base) + power //= 2 + return result + + +def fib_matrix_np(n: int) -> int: + """ + Calculates the n-th Fibonacci number using matrix exponentiation. + https://www.nayuki.io/page/fast-fibonacci-algorithms#:~:text= + Summary:%20The%20two%20fast%20Fibonacci%20algorithms%20are%20matrix + + Args: + n: Fibonacci sequence index + + Returns: + The n-th Fibonacci number. + + Raises: + ValueError: If n is negative. + + >>> fib_matrix_np(0) + 0 + >>> fib_matrix_np(1) + 1 + >>> fib_matrix_np(5) + 5 + >>> fib_matrix_np(10) + 55 + >>> fib_matrix_np(-1) + Traceback (most recent call last): + ... + ValueError: n is negative + """ + if n < 0: + raise ValueError("n is negative") + if n == 0: + return 0 + + m = np.array([[1, 1], [1, 0]], dtype=int) + result = matrix_pow_np(m, n - 1) + return int(result[0, 0]) + + if __name__ == "__main__": from doctest import testmod @@ -242,3 +329,4 @@ if __name__ == "__main__": time_func(fib_memoization, num) # 0.0100 ms time_func(fib_recursive_cached, num) # 0.0153 ms time_func(fib_recursive, num) # 257.0910 ms + time_func(fib_matrix_np, num) # 0.0000 ms From 5a8655d306d872085112d965067fcdc440286928 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Sat, 5 Oct 2024 22:49:58 +0530 Subject: [PATCH 05/14] Added new algorithm to generate numbers in lexicographical order (#11674) * Added algorithm to generate numbers in lexicographical order * Removed the test cases * Updated camelcase to snakecase * Added doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added descriptive name for n * Reduced the number of letters * Updated the return type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated import statement * Updated return type to Iterator[int] * removed parentheses --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../stacks/lexicographical_numbers.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 data_structures/stacks/lexicographical_numbers.py diff --git a/data_structures/stacks/lexicographical_numbers.py b/data_structures/stacks/lexicographical_numbers.py new file mode 100644 index 000000000..6a174e7d9 --- /dev/null +++ b/data_structures/stacks/lexicographical_numbers.py @@ -0,0 +1,38 @@ +from collections.abc import Iterator + + +def lexical_order(max_number: int) -> Iterator[int]: + """ + Generate numbers in lexical order from 1 to max_number. + + >>> " ".join(map(str, lexical_order(13))) + '1 10 11 12 13 2 3 4 5 6 7 8 9' + >>> list(lexical_order(1)) + [1] + >>> " ".join(map(str, lexical_order(20))) + '1 10 11 12 13 14 15 16 17 18 19 2 20 3 4 5 6 7 8 9' + >>> " ".join(map(str, lexical_order(25))) + '1 10 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 3 4 5 6 7 8 9' + >>> list(lexical_order(12)) + [1, 10, 11, 12, 2, 3, 4, 5, 6, 7, 8, 9] + """ + + stack = [1] + + while stack: + num = stack.pop() + if num > max_number: + continue + + yield num + if (num % 10) != 9: + stack.append(num + 1) + + stack.append(num * 10) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(f"Numbers from 1 to 25 in lexical order: {list(lexical_order(26))}") From 50aca04c67315ef7de7ef03e51a018075d8d026b Mon Sep 17 00:00:00 2001 From: Jeel Rupapara <zeelrupapara@gmail.com> Date: Sat, 5 Oct 2024 22:51:43 +0530 Subject: [PATCH 06/14] feat: increase test coverage of longest_common_subsequence to 75% (#11777) --- .../longest_common_subsequence.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 9a98b1736..4a6c880af 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -28,6 +28,24 @@ def longest_common_subsequence(x: str, y: str): (2, 'ph') >>> longest_common_subsequence("computer", "food") (1, 'o') + >>> longest_common_subsequence("", "abc") # One string is empty + (0, '') + >>> longest_common_subsequence("abc", "") # Other string is empty + (0, '') + >>> longest_common_subsequence("", "") # Both strings are empty + (0, '') + >>> longest_common_subsequence("abc", "def") # No common subsequence + (0, '') + >>> longest_common_subsequence("abc", "abc") # Identical strings + (3, 'abc') + >>> longest_common_subsequence("a", "a") # Single character match + (1, 'a') + >>> longest_common_subsequence("a", "b") # Single character no match + (0, '') + >>> longest_common_subsequence("abcdef", "ace") # Interleaved subsequence + (3, 'ace') + >>> longest_common_subsequence("ABCD", "ACBD") # No repeated characters + (3, 'ABD') """ # find the length of strings From ad6395d3408b9d80a0bef4d180d1e7613a55d807 Mon Sep 17 00:00:00 2001 From: Andrey Ivanov <97749666+ivnvxd@users.noreply.github.com> Date: Sat, 5 Oct 2024 18:24:58 +0100 Subject: [PATCH 07/14] Update ruff usage example in CONTRIBUTING.md (#11772) * Update ruff usage example * Update CONTRIBUTING.md Co-authored-by: Tianyi Zheng <tianyizheng02@gmail.com> --------- Co-authored-by: Tianyi Zheng <tianyizheng02@gmail.com> --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 096582e45..b51132129 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -96,7 +96,7 @@ We want your work to be readable by others; therefore, we encourage you to note ```bash python3 -m pip install ruff # only required the first time - ruff . + ruff check ``` - Original code submission require docstrings or comments to describe your work. From fcf82a1eda21dcf36254a8fcaadc913f6a94c8da Mon Sep 17 00:00:00 2001 From: Vineet Kumar <108144301+whyvineet@users.noreply.github.com> Date: Sat, 5 Oct 2024 23:04:48 +0530 Subject: [PATCH 08/14] =?UTF-8?q?Implemented=20Exponential=20Search=20with?= =?UTF-8?q?=20binary=20search=20for=20improved=20perfor=E2=80=A6=20(#11666?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Implemented Exponential Search with binary search for improved performance on large sorted arrays. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added type hints and doctests for binary_search and exponential_search functions. Improved code documentation and ensured testability. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename Exponential_Search.py to exponential_search.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- searches/exponential_search.py | 113 +++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 searches/exponential_search.py diff --git a/searches/exponential_search.py b/searches/exponential_search.py new file mode 100644 index 000000000..ed09b14e1 --- /dev/null +++ b/searches/exponential_search.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 + +""" +Pure Python implementation of exponential search algorithm + +For more information, see the Wikipedia page: +https://en.wikipedia.org/wiki/Exponential_search + +For doctests run the following command: +python3 -m doctest -v exponential_search.py + +For manual testing run: +python3 exponential_search.py +""" + +from __future__ import annotations + + +def binary_search_by_recursion( + sorted_collection: list[int], item: int, left: int = 0, right: int = -1 +) -> int: + """Pure implementation of binary search algorithm in Python using recursion + + Be careful: the collection must be ascending sorted otherwise, the result will be + unpredictable. + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :param left: starting index for the search + :param right: ending index for the search + :return: index of the found item or -1 if the item is not found + + Examples: + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 0, 0, 4) + 0 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 15, 0, 4) + 4 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 5, 0, 4) + 1 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 6, 0, 4) + -1 + """ + if right < 0: + right = len(sorted_collection) - 1 + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + if right < left: + return -1 + + midpoint = left + (right - left) // 2 + + if sorted_collection[midpoint] == item: + return midpoint + elif sorted_collection[midpoint] > item: + return binary_search_by_recursion(sorted_collection, item, left, midpoint - 1) + else: + return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right) + + +def exponential_search(sorted_collection: list[int], item: int) -> int: + """ + Pure implementation of an exponential search algorithm in Python. + For more information, refer to: + https://en.wikipedia.org/wiki/Exponential_search + + Be careful: the collection must be ascending sorted, otherwise the result will be + unpredictable. + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :return: index of the found item or -1 if the item is not found + + The time complexity of this algorithm is O(log i) where i is the index of the item. + + Examples: + >>> exponential_search([0, 5, 7, 10, 15], 0) + 0 + >>> exponential_search([0, 5, 7, 10, 15], 15) + 4 + >>> exponential_search([0, 5, 7, 10, 15], 5) + 1 + >>> exponential_search([0, 5, 7, 10, 15], 6) + -1 + """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + + if sorted_collection[0] == item: + return 0 + + bound = 1 + while bound < len(sorted_collection) and sorted_collection[bound] < item: + bound *= 2 + + left = bound // 2 + right = min(bound, len(sorted_collection) - 1) + return binary_search_by_recursion(sorted_collection, item, left, right) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Manual testing + user_input = input("Enter numbers separated by commas: ").strip() + collection = sorted(int(item) for item in user_input.split(",")) + target = int(input("Enter a number to search for: ")) + result = exponential_search(sorted_collection=collection, item=target) + if result == -1: + print(f"{target} was not found in {collection}.") + else: + print(f"{target} was found at index {result} in {collection}.") From 3422ebc75bda6aba9b234eb217a79f25bec65f21 Mon Sep 17 00:00:00 2001 From: Jeel Rupapara <zeelrupapara@gmail.com> Date: Mon, 7 Oct 2024 12:00:11 +0530 Subject: [PATCH 09/14] feat: add testcase of polynom_for_points (#11811) * feat: add testcase of polynom_for_points * fix: remove the print from the testcase of points_to_polynomial * fix: remove print statement from old test cases --- linear_algebra/src/polynom_for_points.py | 42 ++++++++++++++---------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index a9a9a8117..452f3edd4 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -3,30 +3,36 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: coordinates is a two dimensional matrix: [[x, y], [x, y], ...] number of points you want to use - >>> print(points_to_polynomial([])) + >>> points_to_polynomial([]) Traceback (most recent call last): ... ValueError: The program cannot work out a fitting polynomial. - >>> print(points_to_polynomial([[]])) + >>> points_to_polynomial([[]]) + Traceback (most recent call last): + ... + ValueError: The program cannot work out a fitting polynomial. + >>> points_to_polynomial([[1, 0], [2, 0], [3, 0]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*0.0' + >>> points_to_polynomial([[1, 1], [2, 1], [3, 1]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*1.0' + >>> points_to_polynomial([[1, 3], [2, 3], [3, 3]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*3.0' + >>> points_to_polynomial([[1, 1], [2, 2], [3, 3]]) + 'f(x)=x^2*0.0+x^1*1.0+x^0*0.0' + >>> points_to_polynomial([[1, 1], [2, 4], [3, 9]]) + 'f(x)=x^2*1.0+x^1*-0.0+x^0*0.0' + >>> points_to_polynomial([[1, 3], [2, 6], [3, 11]]) + 'f(x)=x^2*1.0+x^1*-0.0+x^0*2.0' + >>> points_to_polynomial([[1, -3], [2, -6], [3, -11]]) + 'f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0' + >>> points_to_polynomial([[1, 5], [2, 2], [3, 9]]) + 'f(x)=x^2*5.0+x^1*-18.0+x^0*18.0' + >>> points_to_polynomial([[1, 1], [1, 2], [1, 3]]) + 'x=1' + >>> points_to_polynomial([[1, 1], [2, 2], [2, 2]]) Traceback (most recent call last): ... ValueError: The program cannot work out a fitting polynomial. - >>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*1.0 - >>> print(points_to_polynomial([[1, 3], [2, 3], [3, 3]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*3.0 - >>> print(points_to_polynomial([[1, 1], [2, 2], [3, 3]])) - f(x)=x^2*0.0+x^1*1.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 1], [2, 4], [3, 9]])) - f(x)=x^2*1.0+x^1*-0.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 3], [2, 6], [3, 11]])) - f(x)=x^2*1.0+x^1*-0.0+x^0*2.0 - >>> print(points_to_polynomial([[1, -3], [2, -6], [3, -11]])) - f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0 - >>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]])) - f(x)=x^2*5.0+x^1*-18.0+x^0*18.0 """ if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): raise ValueError("The program cannot work out a fitting polynomial.") From cfd6d095f122d1d3ef2f3c2cdcf84864aac56fa7 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:06:15 +0530 Subject: [PATCH 10/14] Added max_sum_bst.py (#11832) * Added new algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated filename * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * Updated code * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * Updated code * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update maximum_sum_bst.py * def max_sum_bst(root: TreeNode | None) -> int: * def solver(node: TreeNode | None) -> tuple[bool, int, int, int]: --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss <cclauss@me.com> --- .../binary_tree/maximum_sum_bst.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 data_structures/binary_tree/maximum_sum_bst.py diff --git a/data_structures/binary_tree/maximum_sum_bst.py b/data_structures/binary_tree/maximum_sum_bst.py new file mode 100644 index 000000000..7dadc7b95 --- /dev/null +++ b/data_structures/binary_tree/maximum_sum_bst.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import sys +from dataclasses import dataclass + +INT_MIN = -sys.maxsize + 1 +INT_MAX = sys.maxsize - 1 + + +@dataclass +class TreeNode: + val: int = 0 + left: TreeNode | None = None + right: TreeNode | None = None + + +def max_sum_bst(root: TreeNode | None) -> int: + """ + The solution traverses a binary tree to find the maximum sum of + keys in any subtree that is a Binary Search Tree (BST). It uses + recursion to validate BST properties and calculates sums, returning + the highest sum found among all valid BST subtrees. + + >>> t1 = TreeNode(4) + >>> t1.left = TreeNode(3) + >>> t1.left.left = TreeNode(1) + >>> t1.left.right = TreeNode(2) + >>> print(max_sum_bst(t1)) + 2 + >>> t2 = TreeNode(-4) + >>> t2.left = TreeNode(-2) + >>> t2.right = TreeNode(-5) + >>> print(max_sum_bst(t2)) + 0 + >>> t3 = TreeNode(1) + >>> t3.left = TreeNode(4) + >>> t3.left.left = TreeNode(2) + >>> t3.left.right = TreeNode(4) + >>> t3.right = TreeNode(3) + >>> t3.right.left = TreeNode(2) + >>> t3.right.right = TreeNode(5) + >>> t3.right.right.left = TreeNode(4) + >>> t3.right.right.right = TreeNode(6) + >>> print(max_sum_bst(t3)) + 20 + """ + ans: int = 0 + + def solver(node: TreeNode | None) -> tuple[bool, int, int, int]: + """ + Returns the maximum sum by making recursive calls + >>> t1 = TreeNode(1) + >>> print(solver(t1)) + 1 + """ + nonlocal ans + + if not node: + return True, INT_MAX, INT_MIN, 0 # Valid BST, min, max, sum + + is_left_valid, min_left, max_left, sum_left = solver(node.left) + is_right_valid, min_right, max_right, sum_right = solver(node.right) + + if is_left_valid and is_right_valid and max_left < node.val < min_right: + total_sum = sum_left + sum_right + node.val + ans = max(ans, total_sum) + return True, min(min_left, node.val), max(max_right, node.val), total_sum + + return False, -1, -1, -1 # Not a valid BST + + solver(root) + return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From dba8eecb47cea7f11ac383344524afbc0ca7cf5b Mon Sep 17 00:00:00 2001 From: Lonercode <91500485+Lonercode@users.noreply.github.com> Date: Mon, 7 Oct 2024 10:58:07 +0100 Subject: [PATCH 11/14] added gronsfeld cipher implementation (#11835) * added gronsfeld cipher implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * from string import ascii_uppercase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gronsfeld_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss <cclauss@me.com> --- ciphers/gronsfeld_cipher.py | 45 +++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 ciphers/gronsfeld_cipher.py diff --git a/ciphers/gronsfeld_cipher.py b/ciphers/gronsfeld_cipher.py new file mode 100644 index 000000000..8fbeab430 --- /dev/null +++ b/ciphers/gronsfeld_cipher.py @@ -0,0 +1,45 @@ +from string import ascii_uppercase + + +def gronsfeld(text: str, key: str) -> str: + """ + Encrypt plaintext with the Gronsfeld cipher + + >>> gronsfeld('hello', '412') + 'LFNPP' + >>> gronsfeld('hello', '123') + 'IGOMQ' + >>> gronsfeld('', '123') + '' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '0') + 'YES, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '01') + 'YFS, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '012') + 'YFU, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '') + Traceback (most recent call last): + ... + ZeroDivisionError: integer modulo by zero + """ + ascii_len = len(ascii_uppercase) + key_len = len(key) + encrypted_text = "" + keys = [int(char) for char in key] + upper_case_text = text.upper() + + for i, char in enumerate(upper_case_text): + if char in ascii_uppercase: + new_position = (ascii_uppercase.index(char) + keys[i % key_len]) % ascii_len + shifted_letter = ascii_uppercase[new_position] + encrypted_text += shifted_letter + else: + encrypted_text += char + + return encrypted_text + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 2d671df073770f0122658f462c17b838ddbe4d2a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 22:49:29 +0200 Subject: [PATCH 12/14] [pre-commit.ci] pre-commit autoupdate (#11874) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/pre-commit-hooks: v4.6.0 → v5.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.6.0...v5.0.0) - [github.com/astral-sh/ruff-pre-commit: v0.6.8 → v0.6.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.8...v0.6.9) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a8e5c1f6..77541027a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-executables-have-shebangs - id: check-toml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.8 + rev: v0.6.9 hooks: - id: ruff - id: ruff-format diff --git a/DIRECTORY.md b/DIRECTORY.md index cdbbac684..0a3be2a06 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -22,6 +22,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Break](backtracking/word_break.py) * [Word Ladder](backtracking/word_ladder.py) * [Word Search](backtracking/word_search.py) @@ -99,6 +100,7 @@ * [Elgamal Key Generator](ciphers/elgamal_key_generator.py) * [Enigma Machine2](ciphers/enigma_machine2.py) * [Fractionated Morse Cipher](ciphers/fractionated_morse_cipher.py) + * [Gronsfeld Cipher](ciphers/gronsfeld_cipher.py) * [Hill Cipher](ciphers/hill_cipher.py) * [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py) * [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py) @@ -211,6 +213,7 @@ * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) + * [Maximum Sum Bst](data_structures/binary_tree/maximum_sum_bst.py) * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py) * [Mirror Binary Tree](data_structures/binary_tree/mirror_binary_tree.py) * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py) @@ -284,6 +287,7 @@ * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py) * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py) + * [Lexicographical Numbers](data_structures/stacks/lexicographical_numbers.py) * [Next Greater Element](data_structures/stacks/next_greater_element.py) * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) * [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py) @@ -1201,6 +1205,7 @@ * [Binary Tree Traversal](searches/binary_tree_traversal.py) * [Double Linear Search](searches/double_linear_search.py) * [Double Linear Search Recursion](searches/double_linear_search_recursion.py) + * [Exponential Search](searches/exponential_search.py) * [Fibonacci Search](searches/fibonacci_search.py) * [Hill Climbing](searches/hill_climbing.py) * [Interpolation Search](searches/interpolation_search.py) From 260e3d8b350c64e927ecb1d62b953b8bf25490ea Mon Sep 17 00:00:00 2001 From: Jeel Rupapara <zeelrupapara@gmail.com> Date: Tue, 8 Oct 2024 17:03:28 +0530 Subject: [PATCH 13/14] feat: add test cases in cipher's autokey (#11881) --- ciphers/autokey.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ciphers/autokey.py b/ciphers/autokey.py index 8683e6d37..05d8c066b 100644 --- a/ciphers/autokey.py +++ b/ciphers/autokey.py @@ -24,6 +24,14 @@ def encrypt(plaintext: str, key: str) -> str: Traceback (most recent call last): ... ValueError: plaintext is empty + >>> encrypt("coffee is good as python", "") + Traceback (most recent call last): + ... + ValueError: key is empty + >>> encrypt(527.26, "TheAlgorithms") + Traceback (most recent call last): + ... + TypeError: plaintext must be a string """ if not isinstance(plaintext, str): raise TypeError("plaintext must be a string") @@ -80,6 +88,14 @@ def decrypt(ciphertext: str, key: str) -> str: Traceback (most recent call last): ... TypeError: ciphertext must be a string + >>> decrypt("", "TheAlgorithms") + Traceback (most recent call last): + ... + ValueError: ciphertext is empty + >>> decrypt("vvjfpk wj ohvp su ddylsv", 2) + Traceback (most recent call last): + ... + TypeError: key must be a string """ if not isinstance(ciphertext, str): raise TypeError("ciphertext must be a string") From e9e7c964655015819e0120694465928df1abefb0 Mon Sep 17 00:00:00 2001 From: Christian Clauss <cclauss@me.com> Date: Tue, 8 Oct 2024 19:09:28 +0200 Subject: [PATCH 14/14] Create GitHub Pages docs with Sphinx (#11888) --- .devcontainer/Dockerfile | 2 +- .devcontainer/devcontainer.json | 2 +- .github/CODEOWNERS | 2 - .github/workflows/build.yml | 3 +- .github/workflows/sphinx.yml | 50 +++++++++ CONTRIBUTING.md | 2 +- DIRECTORY.md | 3 + LICENSE.md | 2 +- docs/{source => }/__init__.py | 0 docs/conf.py | 3 + financial/{ABOUT.md => README.md} | 2 +- index.md | 10 ++ .../{local_weighted_learning.md => README.md} | 0 pyproject.toml | 106 +++++++++++++++++- requirements.txt | 1 + source/__init__.py | 0 16 files changed, 179 insertions(+), 9 deletions(-) create mode 100644 .github/workflows/sphinx.yml rename docs/{source => }/__init__.py (100%) create mode 100644 docs/conf.py rename financial/{ABOUT.md => README.md} (97%) create mode 100644 index.md rename machine_learning/local_weighted_learning/{local_weighted_learning.md => README.md} (100%) delete mode 100644 source/__init__.py diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 6aa0073bf..a0bd05f47 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,5 +1,5 @@ # https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md -ARG VARIANT=3.12-bookworm +ARG VARIANT=3.13-bookworm FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} COPY requirements.txt /tmp/pip-tmp/ RUN python3 -m pip install --upgrade pip \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ae1d4fb74..e23263f5b 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -7,7 +7,7 @@ // Update 'VARIANT' to pick a Python version: 3, 3.11, 3.10, 3.9, 3.8 // Append -bullseye or -buster to pin to an OS version. // Use -bullseye variants on local on arm64/Apple Silicon. - "VARIANT": "3.12-bookworm", + "VARIANT": "3.13-bookworm", } }, diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d2ac43c7d..3cc25d1ba 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,8 +9,6 @@ /.* @cclauss -# /arithmetic_analysis/ - # /backtracking/ # /bit_manipulation/ diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f54cc982d..b5703e2f1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,12 +25,13 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/q_fourier_transform.py --ignore=computer_vision/cnn_classification.py + --ignore=docs/conf.py --ignore=dynamic_programming/k_means_clustering_tensorflow.py --ignore=machine_learning/lstm/lstm_prediction.py --ignore=neural_network/input_data.py --ignore=project_euler/ + --ignore=quantum/q_fourier_transform.py --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml new file mode 100644 index 000000000..9dfe344f9 --- /dev/null +++ b/.github/workflows/sphinx.yml @@ -0,0 +1,50 @@ +name: sphinx + +on: + # Triggers the workflow on push or pull request events but only for the "master" branch + push: + branches: ["master"] + pull_request: + branches: ["master"] + # Or manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build_docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.13 + allow-prereleases: true + - run: pip install --upgrade pip + - run: pip install myst-parser sphinx-autoapi sphinx-pyproject + - uses: actions/configure-pages@v5 + - run: sphinx-build -c docs . docs/_build/html + - uses: actions/upload-pages-artifact@v3 + with: + path: docs/_build/html + + deploy_docs: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + if: github.event_name != 'pull_request' + needs: build_docs + runs-on: ubuntu-latest + steps: + - uses: actions/deploy-pages@v4 + id: deployment diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b51132129..3df39f95b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,7 +77,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.13+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index 0a3be2a06..f0a34a553 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -351,6 +351,9 @@ * [Power](divide_and_conquer/power.py) * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) +## Docs + * [Conf](docs/conf.py) + ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) * [All Construct](dynamic_programming/all_construct.py) diff --git a/LICENSE.md b/LICENSE.md index 2897d02e2..de631c3ef 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -MIT License +## MIT License Copyright (c) 2016-2022 TheAlgorithms and contributors diff --git a/docs/source/__init__.py b/docs/__init__.py similarity index 100% rename from docs/source/__init__.py rename to docs/__init__.py diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..f2481f107 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,3 @@ +from sphinx_pyproject import SphinxConfig + +project = SphinxConfig("../pyproject.toml", globalns=globals()).name diff --git a/financial/ABOUT.md b/financial/README.md similarity index 97% rename from financial/ABOUT.md rename to financial/README.md index f6b0647f8..e5d3a84c8 100644 --- a/financial/ABOUT.md +++ b/financial/README.md @@ -1,4 +1,4 @@ -### Interest +# Interest * Compound Interest: "Compound interest is calculated by multiplying the initial principal amount by one plus the annual interest rate raised to the number of compound periods minus one." [Compound Interest](https://www.investopedia.com/) * Simple Interest: "Simple interest paid or received over a certain period is a fixed percentage of the principal amount that was borrowed or lent. " [Simple Interest](https://www.investopedia.com/) diff --git a/index.md b/index.md new file mode 100644 index 000000000..134520cb9 --- /dev/null +++ b/index.md @@ -0,0 +1,10 @@ +# TheAlgorithms/Python +```{toctree} +:maxdepth: 2 +:caption: index.md + +<!-- CONTRIBUTING.md must be the FIRST doc and README.md can come after. --> +CONTRIBUTING.md +README.md +LICENSE.md +``` diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.md b/machine_learning/local_weighted_learning/README.md similarity index 100% rename from machine_learning/local_weighted_learning/local_weighted_learning.md rename to machine_learning/local_weighted_learning/README.md diff --git a/pyproject.toml b/pyproject.toml index bb8657183..c57419e79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,22 @@ +[project] +name = "thealgorithms-python" +version = "0.0.1" +description = "TheAlgorithms in Python" +authors = [ { name = "TheAlgorithms Contributors" } ] +requires-python = ">=3.13" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.13", + +] +optional-dependencies.docs = [ + "myst-parser", + "sphinx-autoapi", + "sphinx-pyproject", +] + [tool.ruff] -target-version = "py312" +target-version = "py313" output-format = "full" lint.select = [ @@ -113,6 +130,9 @@ lint.pylint.max-statements = 88 # default: 50 ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" +[tool.pyproject-fmt] +max_supported_python = "3.13" + [tool.pytest.ini_options] markers = [ "mat_ops: mark a test as utilizing matrix operations.", @@ -129,3 +149,87 @@ omit = [ "project_euler/*", ] sort = "Cover" + +[tool.sphinx-pyproject] +copyright = "2014, TheAlgorithms" +autoapi_dirs = [ + "audio_filters", + "backtracking", + "bit_manipulation", + "blockchain", + "boolean_algebra", + "cellular_automata", + "ciphers", + "compression", + "computer_vision", + "conversions", + "data_structures", + "digital_image_processing", + "divide_and_conquer", + "dynamic_programming", + "electronics", + "file_transfer", + "financial", + "fractals", + "fuzzy_logic", + "genetic_algorithm", + "geodesy", + "geometry", + "graphics", + "graphs", + "greedy_methods", + "hashes", + "knapsack", + "linear_algebra", + "linear_programming", + "machine_learning", + "maths", + "matrix", + "networking_flow", + "neural_network", + "other", + "physics", + "project_euler", + "quantum", + "scheduling", + "searches", + "sorts", + "strings", + "web_programming", +] +autoapi_member_order = "groupwise" +# autoapi_python_use_implicit_namespaces = true +exclude_patterns = [ + ".*/*", + "docs/", +] +extensions = [ + "autoapi.extension", + "myst_parser", +] +html_static_path = [ "_static" ] +html_theme = "alabaster" +myst_enable_extensions = [ + "amsmath", + "attrs_inline", + "colon_fence", + "deflist", + "dollarmath", + "fieldlist", + "html_admonition", + "html_image", + # "linkify", + "replacements", + "smartquotes", + "strikethrough", + "substitution", + "tasklist", +] +myst_fence_as_directive = [ + "include", +] +templates_path = [ "_templates" ] +[tool.sphinx-pyproject.source_suffix] +".rst" = "restructuredtext" +# ".txt" = "markdown" +".md" = "markdown" diff --git a/requirements.txt b/requirements.txt index afbf25ba6..675436333 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ requests rich # scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed scikit-learn +sphinx_pyproject statsmodels sympy tensorflow ; python_version < '3.13' diff --git a/source/__init__.py b/source/__init__.py deleted file mode 100644 index e69de29bb..000000000