Merge branch 'TheAlgorithms:master' into master

This commit is contained in:
Vijayalaxmi Wakode 2024-10-15 00:08:51 +05:30 committed by GitHub
commit eddb5480b7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
27 changed files with 691 additions and 55 deletions

View File

@ -1,5 +1,5 @@
# https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md
ARG VARIANT=3.12-bookworm
ARG VARIANT=3.13-bookworm
FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT}
COPY requirements.txt /tmp/pip-tmp/
RUN python3 -m pip install --upgrade pip \

View File

@ -7,7 +7,7 @@
// Update 'VARIANT' to pick a Python version: 3, 3.11, 3.10, 3.9, 3.8
// Append -bullseye or -buster to pin to an OS version.
// Use -bullseye variants on local on arm64/Apple Silicon.
"VARIANT": "3.12-bookworm",
"VARIANT": "3.13-bookworm",
}
},

2
.github/CODEOWNERS vendored
View File

@ -9,8 +9,6 @@
/.* @cclauss
# /arithmetic_analysis/
# /backtracking/
# /bit_manipulation/

View File

@ -25,12 +25,13 @@ jobs:
- name: Run tests
# TODO: #8818 Re-enable quantum tests
run: pytest
--ignore=quantum/q_fourier_transform.py
--ignore=computer_vision/cnn_classification.py
--ignore=docs/conf.py
--ignore=dynamic_programming/k_means_clustering_tensorflow.py
--ignore=machine_learning/lstm/lstm_prediction.py
--ignore=neural_network/input_data.py
--ignore=project_euler/
--ignore=quantum/q_fourier_transform.py
--ignore=scripts/validate_solutions.py
--cov-report=term-missing:skip-covered
--cov=. .

50
.github/workflows/sphinx.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: sphinx
on:
# Triggers the workflow on push or pull request events but only for the "master" branch
push:
branches: ["master"]
pull_request:
branches: ["master"]
# Or manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: read
pages: write
id-token: write
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
group: "pages"
cancel-in-progress: false
jobs:
build_docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: 3.13
allow-prereleases: true
- run: pip install --upgrade pip
- run: pip install myst-parser sphinx-autoapi sphinx-pyproject
- uses: actions/configure-pages@v5
- run: sphinx-build -c docs . docs/_build/html
- uses: actions/upload-pages-artifact@v3
with:
path: docs/_build/html
deploy_docs:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
if: github.event_name != 'pull_request'
needs: build_docs
runs-on: ubuntu-latest
steps:
- uses: actions/deploy-pages@v4
id: deployment

View File

@ -1,6 +1,6 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v5.0.0
hooks:
- id: check-executables-have-shebangs
- id: check-toml
@ -16,7 +16,7 @@ repos:
- id: auto-walrus
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.8
rev: v0.6.9
hooks:
- id: ruff
- id: ruff-format

View File

@ -77,7 +77,7 @@ pre-commit run --all-files --show-diff-on-failure
We want your work to be readable by others; therefore, we encourage you to note the following:
- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will.
- Please write in Python 3.13+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will.
- Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments.
- Single letter variable names are *old school* so please avoid them unless their life only spans a few lines.
- Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not.
@ -96,7 +96,7 @@ We want your work to be readable by others; therefore, we encourage you to note
```bash
python3 -m pip install ruff # only required the first time
ruff .
ruff check
```
- Original code submission require docstrings or comments to describe your work.

View File

@ -22,6 +22,7 @@
* [Rat In Maze](backtracking/rat_in_maze.py)
* [Sudoku](backtracking/sudoku.py)
* [Sum Of Subsets](backtracking/sum_of_subsets.py)
* [Word Break](backtracking/word_break.py)
* [Word Ladder](backtracking/word_ladder.py)
* [Word Search](backtracking/word_search.py)
@ -99,6 +100,7 @@
* [Elgamal Key Generator](ciphers/elgamal_key_generator.py)
* [Enigma Machine2](ciphers/enigma_machine2.py)
* [Fractionated Morse Cipher](ciphers/fractionated_morse_cipher.py)
* [Gronsfeld Cipher](ciphers/gronsfeld_cipher.py)
* [Hill Cipher](ciphers/hill_cipher.py)
* [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py)
* [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py)
@ -211,6 +213,7 @@
* [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py)
* [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py)
* [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py)
* [Maximum Sum Bst](data_structures/binary_tree/maximum_sum_bst.py)
* [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py)
* [Mirror Binary Tree](data_structures/binary_tree/mirror_binary_tree.py)
* [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py)
@ -284,6 +287,7 @@
* [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py)
* [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py)
* [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py)
* [Lexicographical Numbers](data_structures/stacks/lexicographical_numbers.py)
* [Next Greater Element](data_structures/stacks/next_greater_element.py)
* [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py)
* [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py)
@ -347,6 +351,9 @@
* [Power](divide_and_conquer/power.py)
* [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py)
## Docs
* [Conf](docs/conf.py)
## Dynamic Programming
* [Abbreviation](dynamic_programming/abbreviation.py)
* [All Construct](dynamic_programming/all_construct.py)
@ -1201,6 +1208,7 @@
* [Binary Tree Traversal](searches/binary_tree_traversal.py)
* [Double Linear Search](searches/double_linear_search.py)
* [Double Linear Search Recursion](searches/double_linear_search_recursion.py)
* [Exponential Search](searches/exponential_search.py)
* [Fibonacci Search](searches/fibonacci_search.py)
* [Hill Climbing](searches/hill_climbing.py)
* [Interpolation Search](searches/interpolation_search.py)

View File

@ -1,4 +1,4 @@
MIT License
## MIT License
Copyright (c) 2016-2022 TheAlgorithms and contributors

View File

@ -24,6 +24,14 @@ def encrypt(plaintext: str, key: str) -> str:
Traceback (most recent call last):
...
ValueError: plaintext is empty
>>> encrypt("coffee is good as python", "")
Traceback (most recent call last):
...
ValueError: key is empty
>>> encrypt(527.26, "TheAlgorithms")
Traceback (most recent call last):
...
TypeError: plaintext must be a string
"""
if not isinstance(plaintext, str):
raise TypeError("plaintext must be a string")
@ -80,6 +88,14 @@ def decrypt(ciphertext: str, key: str) -> str:
Traceback (most recent call last):
...
TypeError: ciphertext must be a string
>>> decrypt("", "TheAlgorithms")
Traceback (most recent call last):
...
ValueError: ciphertext is empty
>>> decrypt("vvjfpk wj ohvp su ddylsv", 2)
Traceback (most recent call last):
...
TypeError: key must be a string
"""
if not isinstance(ciphertext, str):
raise TypeError("ciphertext must be a string")

View File

@ -0,0 +1,45 @@
from string import ascii_uppercase
def gronsfeld(text: str, key: str) -> str:
"""
Encrypt plaintext with the Gronsfeld cipher
>>> gronsfeld('hello', '412')
'LFNPP'
>>> gronsfeld('hello', '123')
'IGOMQ'
>>> gronsfeld('', '123')
''
>>> gronsfeld('yes, ¥€$ - _!@#%?', '0')
'YES, ¥€$ - _!@#%?'
>>> gronsfeld('yes, ¥€$ - _!@#%?', '01')
'YFS, ¥€$ - _!@#%?'
>>> gronsfeld('yes, ¥€$ - _!@#%?', '012')
'YFU, ¥€$ - _!@#%?'
>>> gronsfeld('yes, ¥€$ - _!@#%?', '')
Traceback (most recent call last):
...
ZeroDivisionError: integer modulo by zero
"""
ascii_len = len(ascii_uppercase)
key_len = len(key)
encrypted_text = ""
keys = [int(char) for char in key]
upper_case_text = text.upper()
for i, char in enumerate(upper_case_text):
if char in ascii_uppercase:
new_position = (ascii_uppercase.index(char) + keys[i % key_len]) % ascii_len
shifted_letter = ascii_uppercase[new_position]
encrypted_text += shifted_letter
else:
encrypted_text += char
return encrypted_text
if __name__ == "__main__":
from doctest import testmod
testmod()

View File

@ -0,0 +1,78 @@
from __future__ import annotations
import sys
from dataclasses import dataclass
INT_MIN = -sys.maxsize + 1
INT_MAX = sys.maxsize - 1
@dataclass
class TreeNode:
val: int = 0
left: TreeNode | None = None
right: TreeNode | None = None
def max_sum_bst(root: TreeNode | None) -> int:
"""
The solution traverses a binary tree to find the maximum sum of
keys in any subtree that is a Binary Search Tree (BST). It uses
recursion to validate BST properties and calculates sums, returning
the highest sum found among all valid BST subtrees.
>>> t1 = TreeNode(4)
>>> t1.left = TreeNode(3)
>>> t1.left.left = TreeNode(1)
>>> t1.left.right = TreeNode(2)
>>> print(max_sum_bst(t1))
2
>>> t2 = TreeNode(-4)
>>> t2.left = TreeNode(-2)
>>> t2.right = TreeNode(-5)
>>> print(max_sum_bst(t2))
0
>>> t3 = TreeNode(1)
>>> t3.left = TreeNode(4)
>>> t3.left.left = TreeNode(2)
>>> t3.left.right = TreeNode(4)
>>> t3.right = TreeNode(3)
>>> t3.right.left = TreeNode(2)
>>> t3.right.right = TreeNode(5)
>>> t3.right.right.left = TreeNode(4)
>>> t3.right.right.right = TreeNode(6)
>>> print(max_sum_bst(t3))
20
"""
ans: int = 0
def solver(node: TreeNode | None) -> tuple[bool, int, int, int]:
"""
Returns the maximum sum by making recursive calls
>>> t1 = TreeNode(1)
>>> print(solver(t1))
1
"""
nonlocal ans
if not node:
return True, INT_MAX, INT_MIN, 0 # Valid BST, min, max, sum
is_left_valid, min_left, max_left, sum_left = solver(node.left)
is_right_valid, min_right, max_right, sum_right = solver(node.right)
if is_left_valid and is_right_valid and max_left < node.val < min_right:
total_sum = sum_left + sum_right + node.val
ans = max(ans, total_sum)
return True, min(min_left, node.val), max(max_right, node.val), total_sum
return False, -1, -1, -1 # Not a valid BST
solver(root)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()

View File

@ -0,0 +1,38 @@
from collections.abc import Iterator
def lexical_order(max_number: int) -> Iterator[int]:
"""
Generate numbers in lexical order from 1 to max_number.
>>> " ".join(map(str, lexical_order(13)))
'1 10 11 12 13 2 3 4 5 6 7 8 9'
>>> list(lexical_order(1))
[1]
>>> " ".join(map(str, lexical_order(20)))
'1 10 11 12 13 14 15 16 17 18 19 2 20 3 4 5 6 7 8 9'
>>> " ".join(map(str, lexical_order(25)))
'1 10 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 3 4 5 6 7 8 9'
>>> list(lexical_order(12))
[1, 10, 11, 12, 2, 3, 4, 5, 6, 7, 8, 9]
"""
stack = [1]
while stack:
num = stack.pop()
if num > max_number:
continue
yield num
if (num % 10) != 9:
stack.append(num + 1)
stack.append(num * 10)
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"Numbers from 1 to 25 in lexical order: {list(lexical_order(26))}")

3
docs/conf.py Normal file
View File

@ -0,0 +1,3 @@
from sphinx_pyproject import SphinxConfig
project = SphinxConfig("../pyproject.toml", globalns=globals()).name

View File

@ -28,6 +28,24 @@ def longest_common_subsequence(x: str, y: str):
(2, 'ph')
>>> longest_common_subsequence("computer", "food")
(1, 'o')
>>> longest_common_subsequence("", "abc") # One string is empty
(0, '')
>>> longest_common_subsequence("abc", "") # Other string is empty
(0, '')
>>> longest_common_subsequence("", "") # Both strings are empty
(0, '')
>>> longest_common_subsequence("abc", "def") # No common subsequence
(0, '')
>>> longest_common_subsequence("abc", "abc") # Identical strings
(3, 'abc')
>>> longest_common_subsequence("a", "a") # Single character match
(1, 'a')
>>> longest_common_subsequence("a", "b") # Single character no match
(0, '')
>>> longest_common_subsequence("abcdef", "ace") # Interleaved subsequence
(3, 'ace')
>>> longest_common_subsequence("ABCD", "ACBD") # No repeated characters
(3, 'ABD')
"""
# find the length of strings

View File

@ -1,4 +1,4 @@
### Interest
# Interest
* Compound Interest: "Compound interest is calculated by multiplying the initial principal amount by one plus the annual interest rate raised to the number of compound periods minus one." [Compound Interest](https://www.investopedia.com/)
* Simple Interest: "Simple interest paid or received over a certain period is a fixed percentage of the principal amount that was borrowed or lent. " [Simple Interest](https://www.investopedia.com/)

View File

@ -1,36 +1,61 @@
def topological_sort(graph):
def topological_sort(graph: dict[int, list[int]]) -> list[int] | None:
"""
Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph
using BFS
Perform topological sorting of a Directed Acyclic Graph (DAG)
using Kahn's Algorithm via Breadth-First Search (BFS).
Topological sorting is a linear ordering of vertices in a graph such that for
every directed edge u v, vertex u comes before vertex v in the ordering.
Parameters:
graph: Adjacency list representing the directed graph where keys are
vertices, and values are lists of adjacent vertices.
Returns:
The topologically sorted order of vertices if the graph is a DAG.
Returns None if the graph contains a cycle.
Example:
>>> graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
>>> topological_sort(graph)
[0, 1, 2, 3, 4, 5]
>>> graph_with_cycle = {0: [1], 1: [2], 2: [0]}
>>> topological_sort(graph_with_cycle)
"""
indegree = [0] * len(graph)
queue = []
topo = []
cnt = 0
topo_order = []
processed_vertices_count = 0
# Calculate the indegree of each vertex
for values in graph.values():
for i in values:
indegree[i] += 1
# Add all vertices with 0 indegree to the queue
for i in range(len(indegree)):
if indegree[i] == 0:
queue.append(i)
# Perform BFS
while queue:
vertex = queue.pop(0)
cnt += 1
topo.append(vertex)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(x)
processed_vertices_count += 1
topo_order.append(vertex)
if cnt != len(graph):
print("Cycle exists")
else:
print(topo)
# Traverse neighbors
for neighbor in graph[vertex]:
indegree[neighbor] -= 1
if indegree[neighbor] == 0:
queue.append(neighbor)
if processed_vertices_count != len(graph):
return None # no topological ordering exists due to cycle
return topo_order # valid topological ordering
# Adjacency List of Graph
graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
if __name__ == "__main__":
import doctest
doctest.testmod()

10
index.md Normal file
View File

@ -0,0 +1,10 @@
# TheAlgorithms/Python
```{toctree}
:maxdepth: 2
:caption: index.md
<!-- CONTRIBUTING.md must be the FIRST doc and README.md can come after. -->
CONTRIBUTING.md
README.md
LICENSE.md
```

View File

@ -3,30 +3,36 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str:
coordinates is a two dimensional matrix: [[x, y], [x, y], ...]
number of points you want to use
>>> print(points_to_polynomial([]))
>>> points_to_polynomial([])
Traceback (most recent call last):
...
ValueError: The program cannot work out a fitting polynomial.
>>> print(points_to_polynomial([[]]))
>>> points_to_polynomial([[]])
Traceback (most recent call last):
...
ValueError: The program cannot work out a fitting polynomial.
>>> points_to_polynomial([[1, 0], [2, 0], [3, 0]])
'f(x)=x^2*0.0+x^1*-0.0+x^0*0.0'
>>> points_to_polynomial([[1, 1], [2, 1], [3, 1]])
'f(x)=x^2*0.0+x^1*-0.0+x^0*1.0'
>>> points_to_polynomial([[1, 3], [2, 3], [3, 3]])
'f(x)=x^2*0.0+x^1*-0.0+x^0*3.0'
>>> points_to_polynomial([[1, 1], [2, 2], [3, 3]])
'f(x)=x^2*0.0+x^1*1.0+x^0*0.0'
>>> points_to_polynomial([[1, 1], [2, 4], [3, 9]])
'f(x)=x^2*1.0+x^1*-0.0+x^0*0.0'
>>> points_to_polynomial([[1, 3], [2, 6], [3, 11]])
'f(x)=x^2*1.0+x^1*-0.0+x^0*2.0'
>>> points_to_polynomial([[1, -3], [2, -6], [3, -11]])
'f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0'
>>> points_to_polynomial([[1, 5], [2, 2], [3, 9]])
'f(x)=x^2*5.0+x^1*-18.0+x^0*18.0'
>>> points_to_polynomial([[1, 1], [1, 2], [1, 3]])
'x=1'
>>> points_to_polynomial([[1, 1], [2, 2], [2, 2]])
Traceback (most recent call last):
...
ValueError: The program cannot work out a fitting polynomial.
>>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]]))
f(x)=x^2*0.0+x^1*-0.0+x^0*0.0
>>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]]))
f(x)=x^2*0.0+x^1*-0.0+x^0*1.0
>>> print(points_to_polynomial([[1, 3], [2, 3], [3, 3]]))
f(x)=x^2*0.0+x^1*-0.0+x^0*3.0
>>> print(points_to_polynomial([[1, 1], [2, 2], [3, 3]]))
f(x)=x^2*0.0+x^1*1.0+x^0*0.0
>>> print(points_to_polynomial([[1, 1], [2, 4], [3, 9]]))
f(x)=x^2*1.0+x^1*-0.0+x^0*0.0
>>> print(points_to_polynomial([[1, 3], [2, 6], [3, 11]]))
f(x)=x^2*1.0+x^1*-0.0+x^0*2.0
>>> print(points_to_polynomial([[1, -3], [2, -6], [3, -11]]))
f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0
>>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]]))
f(x)=x^2*5.0+x^1*-18.0+x^0*18.0
"""
if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates):
raise ValueError("The program cannot work out a fitting polynomial.")

View File

@ -7,6 +7,8 @@ the Binet's formula function because the Binet formula function uses floats
NOTE 2: the Binet's formula function is much more limited in the size of inputs
that it can handle due to the size limitations of Python floats
NOTE 3: the matrix function is the fastest and most memory efficient for large n
See benchmark numbers in __main__ for performance comparisons/
https://en.wikipedia.org/wiki/Fibonacci_number for more information
@ -17,6 +19,9 @@ from collections.abc import Iterator
from math import sqrt
from time import time
import numpy as np
from numpy import ndarray
def time_func(func, *args, **kwargs):
"""
@ -230,6 +235,88 @@ def fib_binet(n: int) -> list[int]:
return [round(phi**i / sqrt_5) for i in range(n + 1)]
def matrix_pow_np(m: ndarray, power: int) -> ndarray:
"""
Raises a matrix to the power of 'power' using binary exponentiation.
Args:
m: Matrix as a numpy array.
power: The power to which the matrix is to be raised.
Returns:
The matrix raised to the power.
Raises:
ValueError: If power is negative.
>>> m = np.array([[1, 1], [1, 0]], dtype=int)
>>> matrix_pow_np(m, 0) # Identity matrix when raised to the power of 0
array([[1, 0],
[0, 1]])
>>> matrix_pow_np(m, 1) # Same matrix when raised to the power of 1
array([[1, 1],
[1, 0]])
>>> matrix_pow_np(m, 5)
array([[8, 5],
[5, 3]])
>>> matrix_pow_np(m, -1)
Traceback (most recent call last):
...
ValueError: power is negative
"""
result = np.array([[1, 0], [0, 1]], dtype=int) # Identity Matrix
base = m
if power < 0: # Negative power is not allowed
raise ValueError("power is negative")
while power:
if power % 2 == 1:
result = np.dot(result, base)
base = np.dot(base, base)
power //= 2
return result
def fib_matrix_np(n: int) -> int:
"""
Calculates the n-th Fibonacci number using matrix exponentiation.
https://www.nayuki.io/page/fast-fibonacci-algorithms#:~:text=
Summary:%20The%20two%20fast%20Fibonacci%20algorithms%20are%20matrix
Args:
n: Fibonacci sequence index
Returns:
The n-th Fibonacci number.
Raises:
ValueError: If n is negative.
>>> fib_matrix_np(0)
0
>>> fib_matrix_np(1)
1
>>> fib_matrix_np(5)
5
>>> fib_matrix_np(10)
55
>>> fib_matrix_np(-1)
Traceback (most recent call last):
...
ValueError: n is negative
"""
if n < 0:
raise ValueError("n is negative")
if n == 0:
return 0
m = np.array([[1, 1], [1, 0]], dtype=int)
result = matrix_pow_np(m, n - 1)
return int(result[0, 0])
if __name__ == "__main__":
from doctest import testmod
@ -242,3 +329,4 @@ if __name__ == "__main__":
time_func(fib_memoization, num) # 0.0100 ms
time_func(fib_recursive_cached, num) # 0.0153 ms
time_func(fib_recursive, num) # 257.0910 ms
time_func(fib_matrix_np, num) # 0.0000 ms

View File

@ -1,5 +1,22 @@
[project]
name = "thealgorithms-python"
version = "0.0.1"
description = "TheAlgorithms in Python"
authors = [ { name = "TheAlgorithms Contributors" } ]
requires-python = ">=3.13"
classifiers = [
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.13",
]
optional-dependencies.docs = [
"myst-parser",
"sphinx-autoapi",
"sphinx-pyproject",
]
[tool.ruff]
target-version = "py312"
target-version = "py313"
output-format = "full"
lint.select = [
@ -113,6 +130,9 @@ lint.pylint.max-statements = 88 # default: 50
ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar"
skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt"
[tool.pyproject-fmt]
max_supported_python = "3.13"
[tool.pytest.ini_options]
markers = [
"mat_ops: mark a test as utilizing matrix operations.",
@ -129,3 +149,87 @@ omit = [
"project_euler/*",
]
sort = "Cover"
[tool.sphinx-pyproject]
copyright = "2014, TheAlgorithms"
autoapi_dirs = [
"audio_filters",
"backtracking",
"bit_manipulation",
"blockchain",
"boolean_algebra",
"cellular_automata",
"ciphers",
"compression",
"computer_vision",
"conversions",
"data_structures",
"digital_image_processing",
"divide_and_conquer",
"dynamic_programming",
"electronics",
"file_transfer",
"financial",
"fractals",
"fuzzy_logic",
"genetic_algorithm",
"geodesy",
"geometry",
"graphics",
"graphs",
"greedy_methods",
"hashes",
"knapsack",
"linear_algebra",
"linear_programming",
"machine_learning",
"maths",
"matrix",
"networking_flow",
"neural_network",
"other",
"physics",
"project_euler",
"quantum",
"scheduling",
"searches",
"sorts",
"strings",
"web_programming",
]
autoapi_member_order = "groupwise"
# autoapi_python_use_implicit_namespaces = true
exclude_patterns = [
".*/*",
"docs/",
]
extensions = [
"autoapi.extension",
"myst_parser",
]
html_static_path = [ "_static" ]
html_theme = "alabaster"
myst_enable_extensions = [
"amsmath",
"attrs_inline",
"colon_fence",
"deflist",
"dollarmath",
"fieldlist",
"html_admonition",
"html_image",
# "linkify",
"replacements",
"smartquotes",
"strikethrough",
"substitution",
"tasklist",
]
myst_fence_as_directive = [
"include",
]
templates_path = [ "_templates" ]
[tool.sphinx-pyproject.source_suffix]
".rst" = "restructuredtext"
# ".txt" = "markdown"
".md" = "markdown"

View File

@ -15,6 +15,7 @@ requests
rich
# scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed
scikit-learn
sphinx_pyproject
statsmodels
sympy
tensorflow ; python_version < '3.13'

View File

@ -0,0 +1,113 @@
#!/usr/bin/env python3
"""
Pure Python implementation of exponential search algorithm
For more information, see the Wikipedia page:
https://en.wikipedia.org/wiki/Exponential_search
For doctests run the following command:
python3 -m doctest -v exponential_search.py
For manual testing run:
python3 exponential_search.py
"""
from __future__ import annotations
def binary_search_by_recursion(
sorted_collection: list[int], item: int, left: int = 0, right: int = -1
) -> int:
"""Pure implementation of binary search algorithm in Python using recursion
Be careful: the collection must be ascending sorted otherwise, the result will be
unpredictable.
:param sorted_collection: some ascending sorted collection with comparable items
:param item: item value to search
:param left: starting index for the search
:param right: ending index for the search
:return: index of the found item or -1 if the item is not found
Examples:
>>> binary_search_by_recursion([0, 5, 7, 10, 15], 0, 0, 4)
0
>>> binary_search_by_recursion([0, 5, 7, 10, 15], 15, 0, 4)
4
>>> binary_search_by_recursion([0, 5, 7, 10, 15], 5, 0, 4)
1
>>> binary_search_by_recursion([0, 5, 7, 10, 15], 6, 0, 4)
-1
"""
if right < 0:
right = len(sorted_collection) - 1
if list(sorted_collection) != sorted(sorted_collection):
raise ValueError("sorted_collection must be sorted in ascending order")
if right < left:
return -1
midpoint = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(sorted_collection, item, left, midpoint - 1)
else:
return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right)
def exponential_search(sorted_collection: list[int], item: int) -> int:
"""
Pure implementation of an exponential search algorithm in Python.
For more information, refer to:
https://en.wikipedia.org/wiki/Exponential_search
Be careful: the collection must be ascending sorted, otherwise the result will be
unpredictable.
:param sorted_collection: some ascending sorted collection with comparable items
:param item: item value to search
:return: index of the found item or -1 if the item is not found
The time complexity of this algorithm is O(log i) where i is the index of the item.
Examples:
>>> exponential_search([0, 5, 7, 10, 15], 0)
0
>>> exponential_search([0, 5, 7, 10, 15], 15)
4
>>> exponential_search([0, 5, 7, 10, 15], 5)
1
>>> exponential_search([0, 5, 7, 10, 15], 6)
-1
"""
if list(sorted_collection) != sorted(sorted_collection):
raise ValueError("sorted_collection must be sorted in ascending order")
if sorted_collection[0] == item:
return 0
bound = 1
while bound < len(sorted_collection) and sorted_collection[bound] < item:
bound *= 2
left = bound // 2
right = min(bound, len(sorted_collection) - 1)
return binary_search_by_recursion(sorted_collection, item, left, right)
if __name__ == "__main__":
import doctest
doctest.testmod()
# Manual testing
user_input = input("Enter numbers separated by commas: ").strip()
collection = sorted(int(item) for item in user_input.split(","))
target = int(input("Enter a number to search for: "))
result = exponential_search(sorted_collection=collection, item=target)
if result == -1:
print(f"{target} was not found in {collection}.")
else:
print(f"{target} was found at index {result} in {collection}.")

View File

View File

@ -17,11 +17,27 @@ def compute_transform_tables(
delete_cost: int,
insert_cost: int,
) -> tuple[list[list[int]], list[list[str]]]:
"""
Finds the most cost efficient sequence
for converting one string into another.
>>> costs, operations = compute_transform_tables("cat", "cut", 1, 2, 3, 3)
>>> costs[0][:4]
[0, 3, 6, 9]
>>> costs[2][:4]
[6, 4, 3, 6]
>>> operations[0][:4]
['0', 'Ic', 'Iu', 'It']
>>> operations[3][:4]
['Dt', 'Dt', 'Rtu', 'Ct']
>>> compute_transform_tables("", "", 1, 2, 3, 3)
([[0]], [['0']])
"""
source_seq = list(source_string)
destination_seq = list(destination_string)
len_source_seq = len(source_seq)
len_destination_seq = len(destination_seq)
costs = [
[0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1)
]
@ -31,33 +47,51 @@ def compute_transform_tables(
for i in range(1, len_source_seq + 1):
costs[i][0] = i * delete_cost
ops[i][0] = f"D{source_seq[i - 1]:c}"
ops[i][0] = f"D{source_seq[i - 1]}"
for i in range(1, len_destination_seq + 1):
costs[0][i] = i * insert_cost
ops[0][i] = f"I{destination_seq[i - 1]:c}"
ops[0][i] = f"I{destination_seq[i - 1]}"
for i in range(1, len_source_seq + 1):
for j in range(1, len_destination_seq + 1):
if source_seq[i - 1] == destination_seq[j - 1]:
costs[i][j] = costs[i - 1][j - 1] + copy_cost
ops[i][j] = f"C{source_seq[i - 1]:c}"
ops[i][j] = f"C{source_seq[i - 1]}"
else:
costs[i][j] = costs[i - 1][j - 1] + replace_cost
ops[i][j] = f"R{source_seq[i - 1]:c}" + str(destination_seq[j - 1])
ops[i][j] = f"R{source_seq[i - 1]}" + str(destination_seq[j - 1])
if costs[i - 1][j] + delete_cost < costs[i][j]:
costs[i][j] = costs[i - 1][j] + delete_cost
ops[i][j] = f"D{source_seq[i - 1]:c}"
ops[i][j] = f"D{source_seq[i - 1]}"
if costs[i][j - 1] + insert_cost < costs[i][j]:
costs[i][j] = costs[i][j - 1] + insert_cost
ops[i][j] = f"I{destination_seq[j - 1]:c}"
ops[i][j] = f"I{destination_seq[j - 1]}"
return costs, ops
def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]:
"""
Assembles the transformations based on the ops table.
>>> ops = [['0', 'Ic', 'Iu', 'It'],
... ['Dc', 'Cc', 'Iu', 'It'],
... ['Da', 'Da', 'Rau', 'Rat'],
... ['Dt', 'Dt', 'Rtu', 'Ct']]
>>> x = len(ops) - 1
>>> y = len(ops[0]) - 1
>>> assemble_transformation(ops, x, y)
['Cc', 'Rau', 'Ct']
>>> ops1 = [['0']]
>>> x1 = len(ops1) - 1
>>> y1 = len(ops1[0]) - 1
>>> assemble_transformation(ops1, x1, y1)
[]
"""
if i == 0 and j == 0:
return []
elif ops[i][j][0] in {"C", "R"}: