Merge branch 'TheAlgorithms:master' into master

This commit is contained in:
Muhammad Junaid Khalid 2024-10-03 11:19:02 +05:00 committed by GitHub
commit 7d701cc066
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
42 changed files with 464 additions and 156 deletions

View File

@ -12,7 +12,7 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/setup-python@v5 - uses: actions/setup-python@v5
with: with:
python-version: 3.12 python-version: 3.13
allow-prereleases: true allow-prereleases: true
- uses: actions/cache@v4 - uses: actions/cache@v4
with: with:
@ -20,12 +20,16 @@ jobs:
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
- name: Install dependencies - name: Install dependencies
run: | run: |
python -m pip install --upgrade pip setuptools six wheel python -m pip install --upgrade pip setuptools wheel
python -m pip install pytest-cov -r requirements.txt python -m pip install pytest-cov -r requirements.txt
- name: Run tests - name: Run tests
# TODO: #8818 Re-enable quantum tests # TODO: #8818 Re-enable quantum tests
run: pytest run: pytest
--ignore=quantum/q_fourier_transform.py --ignore=quantum/q_fourier_transform.py
--ignore=computer_vision/cnn_classification.py
--ignore=dynamic_programming/k_means_clustering_tensorflow.py
--ignore=machine_learning/lstm/lstm_prediction.py
--ignore=neural_network/input_data.py
--ignore=project_euler/ --ignore=project_euler/
--ignore=scripts/validate_solutions.py --ignore=scripts/validate_solutions.py
--cov-report=term-missing:skip-covered --cov-report=term-missing:skip-covered

View File

@ -16,7 +16,7 @@ repos:
- id: auto-walrus - id: auto-walrus
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.7 rev: v0.6.8
hooks: hooks:
- id: ruff - id: ruff
- id: ruff-format - id: ruff-format
@ -42,7 +42,7 @@ repos:
pass_filenames: false pass_filenames: false
- repo: https://github.com/abravalheri/validate-pyproject - repo: https://github.com/abravalheri/validate-pyproject
rev: v0.19 rev: v0.20.2
hooks: hooks:
- id: validate-pyproject - id: validate-pyproject

View File

@ -22,6 +22,7 @@
* [Rat In Maze](backtracking/rat_in_maze.py) * [Rat In Maze](backtracking/rat_in_maze.py)
* [Sudoku](backtracking/sudoku.py) * [Sudoku](backtracking/sudoku.py)
* [Sum Of Subsets](backtracking/sum_of_subsets.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py)
* [Word Ladder](backtracking/word_ladder.py)
* [Word Search](backtracking/word_search.py) * [Word Search](backtracking/word_search.py)
## Bit Manipulation ## Bit Manipulation
@ -1343,7 +1344,6 @@
* [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Ip Geolocation](web_programming/get_ip_geolocation.py)
* [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py)
* [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py)
* [Get User Tweets](web_programming/get_user_tweets.py)
* [Giphy](web_programming/giphy.py) * [Giphy](web_programming/giphy.py)
* [Instagram Crawler](web_programming/instagram_crawler.py) * [Instagram Crawler](web_programming/instagram_crawler.py)
* [Instagram Pic](web_programming/instagram_pic.py) * [Instagram Pic](web_programming/instagram_pic.py)

View File

@ -0,0 +1,71 @@
"""
Word Break Problem is a well-known problem in computer science.
Given a string and a dictionary of words, the task is to determine if
the string can be segmented into a sequence of one or more dictionary words.
Wikipedia: https://en.wikipedia.org/wiki/Word_break_problem
"""
def backtrack(input_string: str, word_dict: set[str], start: int) -> bool:
"""
Helper function that uses backtracking to determine if a valid
word segmentation is possible starting from index 'start'.
Parameters:
input_string (str): The input string to be segmented.
word_dict (set[str]): A set of valid dictionary words.
start (int): The starting index of the substring to be checked.
Returns:
bool: True if a valid segmentation is possible, otherwise False.
Example:
>>> backtrack("leetcode", {"leet", "code"}, 0)
True
>>> backtrack("applepenapple", {"apple", "pen"}, 0)
True
>>> backtrack("catsandog", {"cats", "dog", "sand", "and", "cat"}, 0)
False
"""
# Base case: if the starting index has reached the end of the string
if start == len(input_string):
return True
# Try every possible substring from 'start' to 'end'
for end in range(start + 1, len(input_string) + 1):
if input_string[start:end] in word_dict and backtrack(
input_string, word_dict, end
):
return True
return False
def word_break(input_string: str, word_dict: set[str]) -> bool:
"""
Determines if the input string can be segmented into a sequence of
valid dictionary words using backtracking.
Parameters:
input_string (str): The input string to segment.
word_dict (set[str]): The set of valid words.
Returns:
bool: True if the string can be segmented into valid words, otherwise False.
Example:
>>> word_break("leetcode", {"leet", "code"})
True
>>> word_break("applepenapple", {"apple", "pen"})
True
>>> word_break("catsandog", {"cats", "dog", "sand", "and", "cat"})
False
"""
return backtrack(input_string, word_dict, 0)

100
backtracking/word_ladder.py Normal file
View File

@ -0,0 +1,100 @@
"""
Word Ladder is a classic problem in computer science.
The problem is to transform a start word into an end word
by changing one letter at a time.
Each intermediate word must be a valid word from a given list of words.
The goal is to find a transformation sequence
from the start word to the end word.
Wikipedia: https://en.wikipedia.org/wiki/Word_ladder
"""
import string
def backtrack(
current_word: str, path: list[str], end_word: str, word_set: set[str]
) -> list[str]:
"""
Helper function to perform backtracking to find the transformation
from the current_word to the end_word.
Parameters:
current_word (str): The current word in the transformation sequence.
path (list[str]): The list of transformations from begin_word to current_word.
end_word (str): The target word for transformation.
word_set (set[str]): The set of valid words for transformation.
Returns:
list[str]: The list of transformations from begin_word to end_word.
Returns an empty list if there is no valid
transformation from current_word to end_word.
Example:
>>> backtrack("hit", ["hit"], "cog", {"hot", "dot", "dog", "lot", "log", "cog"})
['hit', 'hot', 'dot', 'lot', 'log', 'cog']
>>> backtrack("hit", ["hit"], "cog", {"hot", "dot", "dog", "lot", "log"})
[]
>>> backtrack("lead", ["lead"], "gold", {"load", "goad", "gold", "lead", "lord"})
['lead', 'lead', 'load', 'goad', 'gold']
>>> backtrack("game", ["game"], "code", {"came", "cage", "code", "cade", "gave"})
['game', 'came', 'cade', 'code']
"""
# Base case: If the current word is the end word, return the path
if current_word == end_word:
return path
# Try all possible single-letter transformations
for i in range(len(current_word)):
for c in string.ascii_lowercase: # Try changing each letter
transformed_word = current_word[:i] + c + current_word[i + 1 :]
if transformed_word in word_set:
word_set.remove(transformed_word)
# Recur with the new word added to the path
result = backtrack(
transformed_word, [*path, transformed_word], end_word, word_set
)
if result: # valid transformation found
return result
word_set.add(transformed_word) # backtrack
return [] # No valid transformation found
def word_ladder(begin_word: str, end_word: str, word_set: set[str]) -> list[str]:
"""
Solve the Word Ladder problem using Backtracking and return
the list of transformations from begin_word to end_word.
Parameters:
begin_word (str): The word from which the transformation starts.
end_word (str): The target word for transformation.
word_list (list[str]): The list of valid words for transformation.
Returns:
list[str]: The list of transformations from begin_word to end_word.
Returns an empty list if there is no valid transformation.
Example:
>>> word_ladder("hit", "cog", ["hot", "dot", "dog", "lot", "log", "cog"])
['hit', 'hot', 'dot', 'lot', 'log', 'cog']
>>> word_ladder("hit", "cog", ["hot", "dot", "dog", "lot", "log"])
[]
>>> word_ladder("lead", "gold", ["load", "goad", "gold", "lead", "lord"])
['lead', 'lead', 'load', 'goad', 'gold']
>>> word_ladder("game", "code", ["came", "cage", "code", "cade", "gave"])
['game', 'came', 'cade', 'code']
"""
if end_word not in word_set: # no valid transformation possible
return []
# Perform backtracking starting from the begin_word
return backtrack(begin_word, [begin_word], end_word, word_set)

View File

@ -19,7 +19,7 @@ def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float
>>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2])) >>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2]))
3.1622776601683795 3.1622776601683795
""" """
return np.sqrt(((original - reference) ** 2).mean()) return float(np.sqrt(((original - reference) ** 2).mean()))
def normalize_image( def normalize_image(
@ -273,7 +273,7 @@ def haralick_descriptors(matrix: np.ndarray) -> list[float]:
>>> morphological = opening_filter(binary) >>> morphological = opening_filter(binary)
>>> mask_1 = binary_mask(gray, morphological)[0] >>> mask_1 = binary_mask(gray, morphological)[0]
>>> concurrency = matrix_concurrency(mask_1, (0, 1)) >>> concurrency = matrix_concurrency(mask_1, (0, 1))
>>> haralick_descriptors(concurrency) >>> [float(f) for f in haralick_descriptors(concurrency)]
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
""" """
# Function np.indices could be used for bigger input types, # Function np.indices could be used for bigger input types,
@ -335,7 +335,7 @@ def get_descriptors(
return np.concatenate(descriptors, axis=None) return np.concatenate(descriptors, axis=None)
def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float:
""" """
Simple method for calculating the euclidean distance between two points, Simple method for calculating the euclidean distance between two points,
with type np.ndarray. with type np.ndarray.
@ -346,7 +346,7 @@ def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32:
>>> euclidean(a, b) >>> euclidean(a, b)
3.3166247903554 3.3166247903554
""" """
return np.sqrt(np.sum(np.square(point_1 - point_2))) return float(np.sqrt(np.sum(np.square(point_1 - point_2))))
def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]: def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]:

View File

@ -13,7 +13,21 @@ from dataclasses import dataclass
@dataclass @dataclass
class Node: class Node:
""" """
A Node has data variable and pointers to Nodes to its left and right. A Node represents an element of a binary tree, which contains:
Attributes:
data: The value stored in the node (int).
left: Pointer to the left child node (Node or None).
right: Pointer to the right child node (Node or None).
Example:
>>> node = Node(1, Node(2), Node(3))
>>> node.data
1
>>> node.left.data
2
>>> node.right.data
3
""" """
data: int data: int
@ -24,12 +38,25 @@ class Node:
def make_symmetric_tree() -> Node: def make_symmetric_tree() -> Node:
r""" r"""
Create a symmetric tree for testing. Create a symmetric tree for testing.
The tree looks like this: The tree looks like this:
1 1
/ \ / \
2 2 2 2
/ \ / \ / \ / \
3 4 4 3 3 4 4 3
Returns:
Node: Root node of a symmetric tree.
Example:
>>> tree = make_symmetric_tree()
>>> tree.data
1
>>> tree.left.data == tree.right.data
True
>>> tree.left.left.data == tree.right.right.data
True
""" """
root = Node(1) root = Node(1)
root.left = Node(2) root.left = Node(2)
@ -43,13 +70,26 @@ def make_symmetric_tree() -> Node:
def make_asymmetric_tree() -> Node: def make_asymmetric_tree() -> Node:
r""" r"""
Create a asymmetric tree for testing. Create an asymmetric tree for testing.
The tree looks like this: The tree looks like this:
1 1
/ \ / \
2 2 2 2
/ \ / \ / \ / \
3 4 3 4 3 4 3 4
Returns:
Node: Root node of an asymmetric tree.
Example:
>>> tree = make_asymmetric_tree()
>>> tree.data
1
>>> tree.left.data == tree.right.data
True
>>> tree.left.left.data == tree.right.right.data
False
""" """
root = Node(1) root = Node(1)
root.left = Node(2) root.left = Node(2)
@ -63,7 +103,15 @@ def make_asymmetric_tree() -> Node:
def is_symmetric_tree(tree: Node) -> bool: def is_symmetric_tree(tree: Node) -> bool:
""" """
Test cases for is_symmetric_tree function Check if a binary tree is symmetric (i.e., a mirror of itself).
Parameters:
tree: The root node of the binary tree.
Returns:
bool: True if the tree is symmetric, False otherwise.
Example:
>>> is_symmetric_tree(make_symmetric_tree()) >>> is_symmetric_tree(make_symmetric_tree())
True True
>>> is_symmetric_tree(make_asymmetric_tree()) >>> is_symmetric_tree(make_asymmetric_tree())
@ -76,8 +124,17 @@ def is_symmetric_tree(tree: Node) -> bool:
def is_mirror(left: Node | None, right: Node | None) -> bool: def is_mirror(left: Node | None, right: Node | None) -> bool:
""" """
Check if two subtrees are mirror images of each other.
Parameters:
left: The root node of the left subtree.
right: The root node of the right subtree.
Returns:
bool: True if the two subtrees are mirrors of each other, False otherwise.
Example:
>>> tree1 = make_symmetric_tree() >>> tree1 = make_symmetric_tree()
>>> tree1.right.right = Node(3)
>>> is_mirror(tree1.left, tree1.right) >>> is_mirror(tree1.left, tree1.right)
True True
>>> tree2 = make_asymmetric_tree() >>> tree2 = make_asymmetric_tree()
@ -91,7 +148,7 @@ def is_mirror(left: Node | None, right: Node | None) -> bool:
# One side is empty while the other is not, which is not symmetric. # One side is empty while the other is not, which is not symmetric.
return False return False
if left.data == right.data: if left.data == right.data:
# The values match, so check the subtree # The values match, so check the subtrees recursively.
return is_mirror(left.left, right.right) and is_mirror(left.right, right.left) return is_mirror(left.left, right.right) and is_mirror(left.right, right.left)
return False return False

View File

@ -73,7 +73,7 @@ class BinomialHeap:
30 30
Deleting - delete() test Deleting - delete() test
>>> [first_heap.delete_min() for _ in range(20)] >>> [int(first_heap.delete_min()) for _ in range(20)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
Create a new Heap Create a new Heap
@ -118,7 +118,7 @@ class BinomialHeap:
values in merged heap; (merge is inplace) values in merged heap; (merge is inplace)
>>> results = [] >>> results = []
>>> while not first_heap.is_empty(): >>> while not first_heap.is_empty():
... results.append(first_heap.delete_min()) ... results.append(int(first_heap.delete_min()))
>>> results >>> results
[17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34] [17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34]
""" """
@ -354,7 +354,7 @@ class BinomialHeap:
# Merge heaps # Merge heaps
self.merge_heaps(new_heap) self.merge_heaps(new_heap)
return min_value return int(min_value)
def pre_order(self): def pre_order(self):
""" """

View File

@ -14,11 +14,11 @@ class Node:
def __iter__(self): def __iter__(self):
node = self node = self
visited = [] visited = set()
while node: while node:
if node in visited: if node in visited:
raise ContainsLoopError raise ContainsLoopError
visited.append(node) visited.add(node)
yield node.data yield node.data
node = node.next_node node = node.next_node

View File

@ -6,9 +6,20 @@ expect = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def next_greatest_element_slow(arr: list[float]) -> list[float]: def next_greatest_element_slow(arr: list[float]) -> list[float]:
""" """
Get the Next Greatest Element (NGE) for all elements in a list. Get the Next Greatest Element (NGE) for each element in the array
Maximum element present after the current one which is also greater than the by checking all subsequent elements to find the next greater one.
current one.
This is a brute-force implementation, and it has a time complexity
of O(n^2), where n is the size of the array.
Args:
arr: List of numbers for which the NGE is calculated.
Returns:
List containing the next greatest elements. If no
greater element is found, -1 is placed in the result.
Example:
>>> next_greatest_element_slow(arr) == expect >>> next_greatest_element_slow(arr) == expect
True True
""" """
@ -28,9 +39,21 @@ def next_greatest_element_slow(arr: list[float]) -> list[float]:
def next_greatest_element_fast(arr: list[float]) -> list[float]: def next_greatest_element_fast(arr: list[float]) -> list[float]:
""" """
Like next_greatest_element_slow() but changes the loops to use Find the Next Greatest Element (NGE) for each element in the array
enumerate() instead of range(len()) for the outer loop and using a more readable approach. This implementation utilizes
for in a slice of arr for the inner loop. enumerate() for the outer loop and slicing for the inner loop.
While this improves readability over next_greatest_element_slow(),
it still has a time complexity of O(n^2).
Args:
arr: List of numbers for which the NGE is calculated.
Returns:
List containing the next greatest elements. If no
greater element is found, -1 is placed in the result.
Example:
>>> next_greatest_element_fast(arr) == expect >>> next_greatest_element_fast(arr) == expect
True True
""" """
@ -47,14 +70,23 @@ def next_greatest_element_fast(arr: list[float]) -> list[float]:
def next_greatest_element(arr: list[float]) -> list[float]: def next_greatest_element(arr: list[float]) -> list[float]:
""" """
Get the Next Greatest Element (NGE) for all elements in a list. Efficient solution to find the Next Greatest Element (NGE) for all elements
Maximum element present after the current one which is also greater than the using a stack. The time complexity is reduced to O(n), making it suitable
current one. for larger arrays.
A naive way to solve this is to take two loops and check for the next bigger The stack keeps track of elements for which the next greater element hasn't
number but that will make the time complexity as O(n^2). The better way to solve been found yet. By iterating through the array in reverse (from the last
this would be to use a stack to keep track of maximum number giving a linear time element to the first), the stack is used to efficiently determine the next
solution. greatest element for each element.
Args:
arr: List of numbers for which the NGE is calculated.
Returns:
List containing the next greatest elements. If no
greater element is found, -1 is placed in the result.
Example:
>>> next_greatest_element(arr) == expect >>> next_greatest_element(arr) == expect
True True
""" """

View File

@ -12,19 +12,58 @@ class Graph:
] # dp[i][j] stores minimum distance from i to j ] # dp[i][j] stores minimum distance from i to j
def add_edge(self, u, v, w): def add_edge(self, u, v, w):
"""
Adds a directed edge from node u
to node v with weight w.
>>> g = Graph(3)
>>> g.add_edge(0, 1, 5)
>>> g.dp[0][1]
5
"""
self.dp[u][v] = w self.dp[u][v] = w
def floyd_warshall(self): def floyd_warshall(self):
"""
Computes the shortest paths between all pairs of
nodes using the Floyd-Warshall algorithm.
>>> g = Graph(3)
>>> g.add_edge(0, 1, 1)
>>> g.add_edge(1, 2, 2)
>>> g.floyd_warshall()
>>> g.show_min(0, 2)
3
>>> g.show_min(2, 0)
inf
"""
for k in range(self.n): for k in range(self.n):
for i in range(self.n): for i in range(self.n):
for j in range(self.n): for j in range(self.n):
self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j]) self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j])
def show_min(self, u, v): def show_min(self, u, v):
"""
Returns the minimum distance from node u to node v.
>>> g = Graph(3)
>>> g.add_edge(0, 1, 3)
>>> g.add_edge(1, 2, 4)
>>> g.floyd_warshall()
>>> g.show_min(0, 2)
7
>>> g.show_min(1, 0)
inf
"""
return self.dp[u][v] return self.dp[u][v]
if __name__ == "__main__": if __name__ == "__main__":
import doctest
doctest.testmod()
# Example usage
graph = Graph(5) graph = Graph(5)
graph.add_edge(0, 2, 9) graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10) graph.add_edge(0, 4, 10)
@ -38,5 +77,9 @@ if __name__ == "__main__":
graph.add_edge(4, 2, 4) graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9) graph.add_edge(4, 3, 9)
graph.floyd_warshall() graph.floyd_warshall()
print(
graph.show_min(1, 4) graph.show_min(1, 4)
) # Should output the minimum distance from node 1 to node 4
print(
graph.show_min(0, 3) graph.show_min(0, 3)
) # Should output the minimum distance from node 0 to node 3

View File

@ -39,7 +39,7 @@ class CircularConvolution:
Usage: Usage:
>>> convolution = CircularConvolution() >>> convolution = CircularConvolution()
>>> convolution.circular_convolution() >>> convolution.circular_convolution()
[10, 10, 6, 14] [10.0, 10.0, 6.0, 14.0]
>>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6] >>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6]
>>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5] >>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5]
@ -54,7 +54,7 @@ class CircularConvolution:
>>> convolution.first_signal = [1, -1, 2, 3, -1] >>> convolution.first_signal = [1, -1, 2, 3, -1]
>>> convolution.second_signal = [1, 2, 3] >>> convolution.second_signal = [1, 2, 3]
>>> convolution.circular_convolution() >>> convolution.circular_convolution()
[8, -2, 3, 4, 11] [8.0, -2.0, 3.0, 4.0, 11.0]
""" """
@ -91,7 +91,7 @@ class CircularConvolution:
final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal))
# rounding-off to two decimal places # rounding-off to two decimal places
return [round(i, 2) for i in final_signal] return [float(round(i, 2)) for i in final_signal]
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -40,11 +40,11 @@ nb_pixels = 666
def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
""" """
Evaluate $e^z + c$. Evaluate $e^z + c$.
>>> eval_exponential(0, 0) >>> float(eval_exponential(0, 0))
1.0 1.0
>>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 >>> bool(abs(eval_exponential(1, np.pi*1.j)) < 1e-15)
True True
>>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 >>> bool(abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15)
True True
""" """
return np.exp(z_values) + c_parameter return np.exp(z_values) + c_parameter
@ -98,20 +98,20 @@ def iterate_function(
>>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape
(3,) (3,)
>>> np.round(iterate_function(eval_quadratic_polynomial, >>> complex(np.round(iterate_function(eval_quadratic_polynomial,
... 0, ... 0,
... 3, ... 3,
... np.array([0,1,2]))[0]) ... np.array([0,1,2]))[0]))
0j 0j
>>> np.round(iterate_function(eval_quadratic_polynomial, >>> complex(np.round(iterate_function(eval_quadratic_polynomial,
... 0, ... 0,
... 3, ... 3,
... np.array([0,1,2]))[1]) ... np.array([0,1,2]))[1]))
(1+0j) (1+0j)
>>> np.round(iterate_function(eval_quadratic_polynomial, >>> complex(np.round(iterate_function(eval_quadratic_polynomial,
... 0, ... 0,
... 3, ... 3,
... np.array([0,1,2]))[2]) ... np.array([0,1,2]))[2]))
(256+0j) (256+0j)
""" """

View File

@ -30,9 +30,9 @@ class BezierCurve:
returns the x, y values of basis function at time t returns the x, y values of basis function at time t
>>> curve = BezierCurve([(1,1), (1,2)]) >>> curve = BezierCurve([(1,1), (1,2)])
>>> curve.basis_function(0) >>> [float(x) for x in curve.basis_function(0)]
[1.0, 0.0] [1.0, 0.0]
>>> curve.basis_function(1) >>> [float(x) for x in curve.basis_function(1)]
[0.0, 1.0] [0.0, 1.0]
""" """
assert 0 <= t <= 1, "Time t must be between 0 and 1." assert 0 <= t <= 1, "Time t must be between 0 and 1."
@ -55,9 +55,9 @@ class BezierCurve:
The last point in the curve is when t = 1. The last point in the curve is when t = 1.
>>> curve = BezierCurve([(1,1), (1,2)]) >>> curve = BezierCurve([(1,1), (1,2)])
>>> curve.bezier_curve_function(0) >>> tuple(float(x) for x in curve.bezier_curve_function(0))
(1.0, 1.0) (1.0, 1.0)
>>> curve.bezier_curve_function(1) >>> tuple(float(x) for x in curve.bezier_curve_function(1))
(1.0, 2.0) (1.0, 2.0)
""" """

View File

@ -69,7 +69,7 @@ def dijkstra(
x, y = predecessors[x, y] x, y = predecessors[x, y]
path.append(source) # add the source manually path.append(source) # add the source manually
path.reverse() path.reverse()
return matrix[destination], path return float(matrix[destination]), path
for i in range(len(dx)): for i in range(len(dx)):
nx, ny = x + dx[i], y + dy[i] nx, ny = x + dx[i], y + dy[i]

View File

@ -78,7 +78,7 @@ def power_iteration(
if is_complex: if is_complex:
lambda_ = np.real(lambda_) lambda_ = np.real(lambda_)
return lambda_, vector return float(lambda_), vector
def test_power_iteration() -> None: def test_power_iteration() -> None:

View File

@ -107,8 +107,8 @@ class Tableau:
def find_pivot(self) -> tuple[Any, Any]: def find_pivot(self) -> tuple[Any, Any]:
"""Finds the pivot row and column. """Finds the pivot row and column.
>>> Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), >>> tuple(int(x) for x in Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6],
... 2, 0).find_pivot() ... [1,2,0,1,7.]]), 2, 0).find_pivot())
(1, 0) (1, 0)
""" """
objective = self.objectives[-1] objective = self.objectives[-1]
@ -215,8 +215,8 @@ class Tableau:
Max: x1 + x2 Max: x1 + x2
ST: x1 + 3x2 <= 4 ST: x1 + 3x2 <= 4
3x1 + x2 <= 4 3x1 + x2 <= 4
>>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), >>> {key: float(value) for key, value in Tableau(np.array([[-1,-1,0,0,0],
... 2, 0).run_simplex() ... [1,3,1,0,4],[3,1,0,1,4.]]), 2, 0).run_simplex().items()}
{'P': 2.0, 'x1': 1.0, 'x2': 1.0} {'P': 2.0, 'x1': 1.0, 'x2': 1.0}
# Standard linear program with 3 variables: # Standard linear program with 3 variables:
@ -224,21 +224,21 @@ class Tableau:
ST: 2x1 + x2 + x3 2 ST: 2x1 + x2 + x3 2
x1 + 2x2 + 3x3 5 x1 + 2x2 + 3x3 5
2x1 + 2x2 + x3 6 2x1 + 2x2 + x3 6
>>> Tableau(np.array([ >>> {key: float(value) for key, value in Tableau(np.array([
... [-3,-1,-3,0,0,0,0], ... [-3,-1,-3,0,0,0,0],
... [2,1,1,1,0,0,2], ... [2,1,1,1,0,0,2],
... [1,2,3,0,1,0,5], ... [1,2,3,0,1,0,5],
... [2,2,1,0,0,1,6.] ... [2,2,1,0,0,1,6.]
... ]),3,0).run_simplex() # doctest: +ELLIPSIS ... ]),3,0).run_simplex().items()} # doctest: +ELLIPSIS
{'P': 5.4, 'x1': 0.199..., 'x3': 1.6} {'P': 5.4, 'x1': 0.199..., 'x3': 1.6}
# Optimal tableau input: # Optimal tableau input:
>>> Tableau(np.array([ >>> {key: float(value) for key, value in Tableau(np.array([
... [0, 0, 0.25, 0.25, 2], ... [0, 0, 0.25, 0.25, 2],
... [0, 1, 0.375, -0.125, 1], ... [0, 1, 0.375, -0.125, 1],
... [1, 0, -0.125, 0.375, 1] ... [1, 0, -0.125, 0.375, 1]
... ]), 2, 0).run_simplex() ... ]), 2, 0).run_simplex().items()}
{'P': 2.0, 'x1': 1.0, 'x2': 1.0} {'P': 2.0, 'x1': 1.0, 'x2': 1.0}
# Non-standard: >= constraints # Non-standard: >= constraints
@ -246,25 +246,25 @@ class Tableau:
ST: x1 + x2 + x3 <= 40 ST: x1 + x2 + x3 <= 40
2x1 + x2 - x3 >= 10 2x1 + x2 - x3 >= 10
- x2 + x3 >= 10 - x2 + x3 >= 10
>>> Tableau(np.array([ >>> {key: float(value) for key, value in Tableau(np.array([
... [2, 0, 0, 0, -1, -1, 0, 0, 20], ... [2, 0, 0, 0, -1, -1, 0, 0, 20],
... [-2, -3, -1, 0, 0, 0, 0, 0, 0], ... [-2, -3, -1, 0, 0, 0, 0, 0, 0],
... [1, 1, 1, 1, 0, 0, 0, 0, 40], ... [1, 1, 1, 1, 0, 0, 0, 0, 40],
... [2, 1, -1, 0, -1, 0, 1, 0, 10], ... [2, 1, -1, 0, -1, 0, 1, 0, 10],
... [0, -1, 1, 0, 0, -1, 0, 1, 10.] ... [0, -1, 1, 0, 0, -1, 0, 1, 10.]
... ]), 3, 2).run_simplex() ... ]), 3, 2).run_simplex().items()}
{'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0}
# Non standard: minimisation and equalities # Non standard: minimisation and equalities
Min: x1 + x2 Min: x1 + x2
ST: 2x1 + x2 = 12 ST: 2x1 + x2 = 12
6x1 + 5x2 = 40 6x1 + 5x2 = 40
>>> Tableau(np.array([ >>> {key: float(value) for key, value in Tableau(np.array([
... [8, 6, 0, 0, 52], ... [8, 6, 0, 0, 52],
... [1, 1, 0, 0, 0], ... [1, 1, 0, 0, 0],
... [2, 1, 1, 0, 12], ... [2, 1, 1, 0, 12],
... [6, 5, 0, 1, 40.], ... [6, 5, 0, 1, 40.],
... ]), 2, 2).run_simplex() ... ]), 2, 2).run_simplex().items()}
{'P': 7.0, 'x1': 5.0, 'x2': 2.0} {'P': 7.0, 'x1': 5.0, 'x2': 2.0}
@ -275,7 +275,7 @@ class Tableau:
2x1 + 4x2 <= 48 2x1 + 4x2 <= 48
x1 + x2 >= 10 x1 + x2 >= 10
x1 >= 2 x1 >= 2
>>> Tableau(np.array([ >>> {key: float(value) for key, value in Tableau(np.array([
... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0], ... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0],
... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0], ... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0],
... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0], ... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0],
@ -283,7 +283,7 @@ class Tableau:
... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0], ... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0],
... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0], ... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0],
... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0] ... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0]
... ]), 2, 2).run_simplex() # doctest: +ELLIPSIS ... ]), 2, 2).run_simplex().items()} # doctest: +ELLIPSIS
{'P': 132.0, 'x1': 12.000... 'x2': 5.999...} {'P': 132.0, 'x1': 12.000... 'x2': 5.999...}
""" """
# Stop simplex algorithm from cycling. # Stop simplex algorithm from cycling.
@ -307,11 +307,11 @@ class Tableau:
def interpret_tableau(self) -> dict[str, float]: def interpret_tableau(self) -> dict[str, float]:
"""Given the final tableau, add the corresponding values of the basic """Given the final tableau, add the corresponding values of the basic
decision variables to the `output_dict` decision variables to the `output_dict`
>>> Tableau(np.array([ >>> {key: float(value) for key, value in Tableau(np.array([
... [0,0,0.875,0.375,5], ... [0,0,0.875,0.375,5],
... [0,1,0.375,-0.125,1], ... [0,1,0.375,-0.125,1],
... [1,0,-0.125,0.375,1] ... [1,0,-0.125,0.375,1]
... ]),2, 0).interpret_tableau() ... ]),2, 0).interpret_tableau().items()}
{'P': 5.0, 'x1': 1.0, 'x2': 1.0} {'P': 5.0, 'x1': 1.0, 'x2': 1.0}
""" """
# P = RHS of final tableau # P = RHS of final tableau

View File

@ -26,15 +26,15 @@ class DecisionTree:
>>> tester = DecisionTree() >>> tester = DecisionTree()
>>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10])
>>> test_prediction = float(6) >>> test_prediction = float(6)
>>> tester.mean_squared_error(test_labels, test_prediction) == ( >>> bool(tester.mean_squared_error(test_labels, test_prediction) == (
... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... TestDecisionTree.helper_mean_squared_error_test(test_labels,
... test_prediction)) ... test_prediction)))
True True
>>> test_labels = np.array([1,2,3]) >>> test_labels = np.array([1,2,3])
>>> test_prediction = float(2) >>> test_prediction = float(2)
>>> tester.mean_squared_error(test_labels, test_prediction) == ( >>> bool(tester.mean_squared_error(test_labels, test_prediction) == (
... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... TestDecisionTree.helper_mean_squared_error_test(test_labels,
... test_prediction)) ... test_prediction)))
True True
""" """
if labels.ndim != 1: if labels.ndim != 1:

View File

@ -28,7 +28,7 @@ def linear_regression_prediction(
input : training data (date, total_user, total_event) in list of float input : training data (date, total_user, total_event) in list of float
output : list of total user prediction in float output : list of total user prediction in float
>>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2]) >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2])
>>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors >>> bool(abs(n - 5.0) < 1e-6) # Checking precision because of floating point errors
True True
""" """
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]) x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) ->
) )
model_fit = model.fit(disp=False, maxiter=600, method="nm") model_fit = model.fit(disp=False, maxiter=600, method="nm")
result = model_fit.predict(1, len(test_match), exog=[test_match]) result = model_fit.predict(1, len(test_match), exog=[test_match])
return result[0] return float(result[0])
def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float: def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1) regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
regressor.fit(x_train, train_user) regressor.fit(x_train, train_user)
y_pred = regressor.predict(x_test) y_pred = regressor.predict(x_test)
return y_pred[0] return float(y_pred[0])
def interquartile_range_checker(train_user: list) -> float: def interquartile_range_checker(train_user: list) -> float:
@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float:
q3 = np.percentile(train_user, 75) q3 = np.percentile(train_user, 75)
iqr = q3 - q1 iqr = q3 - q1
low_lim = q1 - (iqr * 0.1) low_lim = q1 - (iqr * 0.1)
return low_lim return float(low_lim)
def data_safety_checker(list_vote: list, actual_result: float) -> bool: def data_safety_checker(list_vote: list, actual_result: float) -> bool:

View File

@ -42,7 +42,7 @@ class KNN:
>>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11])) >>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11]))
10.0 10.0
""" """
return np.linalg.norm(a - b) return float(np.linalg.norm(a - b))
def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str: def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str:
""" """

View File

@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray:
@returns: returns value in the range 0 to 1 @returns: returns value in the range 0 to 1
Examples: Examples:
>>> sigmoid_function(4) >>> float(sigmoid_function(4))
0.9820137900379085 0.9820137900379085
>>> sigmoid_function(np.array([-3, 3])) >>> sigmoid_function(np.array([-3, 3]))
array([0.04742587, 0.95257413]) array([0.04742587, 0.95257413])
@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float:
References: References:
- https://en.wikipedia.org/wiki/Logistic_regression - https://en.wikipedia.org/wiki/Logistic_regression
""" """
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean())
def log_likelihood(x, y, weights): def log_likelihood(x, y, weights):

View File

@ -22,7 +22,7 @@ def binary_cross_entropy(
>>> true_labels = np.array([0, 1, 1, 0, 1]) >>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> binary_cross_entropy(true_labels, predicted_probs) >>> float(binary_cross_entropy(true_labels, predicted_probs))
0.2529995012327421 0.2529995012327421
>>> true_labels = np.array([0, 1, 1, 0, 1]) >>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@ -68,7 +68,7 @@ def binary_focal_cross_entropy(
>>> true_labels = np.array([0, 1, 1, 0, 1]) >>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> binary_focal_cross_entropy(true_labels, predicted_probs) >>> float(binary_focal_cross_entropy(true_labels, predicted_probs))
0.008257977659239775 0.008257977659239775
>>> true_labels = np.array([0, 1, 1, 0, 1]) >>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@ -108,7 +108,7 @@ def categorical_cross_entropy(
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> categorical_cross_entropy(true_labels, pred_probs) >>> float(categorical_cross_entropy(true_labels, pred_probs))
0.567395975254385 0.567395975254385
>>> true_labels = np.array([[1, 0], [0, 1]]) >>> true_labels = np.array([[1, 0], [0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
@ -179,13 +179,13 @@ def categorical_focal_cross_entropy(
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> alpha = np.array([0.6, 0.2, 0.7]) >>> alpha = np.array([0.6, 0.2, 0.7])
>>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha))
0.0025966118981496423 0.0025966118981496423
>>> true_labels = np.array([[0, 1, 0], [0, 0, 1]]) >>> true_labels = np.array([[0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> alpha = np.array([0.25, 0.25, 0.25]) >>> alpha = np.array([0.25, 0.25, 0.25])
>>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha))
0.23315276982014324 0.23315276982014324
>>> true_labels = np.array([[1, 0], [0, 1]]) >>> true_labels = np.array([[1, 0], [0, 1]])
@ -265,7 +265,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
>>> true_labels = np.array([-1, 1, 1, -1, 1]) >>> true_labels = np.array([-1, 1, 1, -1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10])
>>> hinge_loss(true_labels, pred) >>> float(hinge_loss(true_labels, pred))
1.52 1.52
>>> true_labels = np.array([-1, 1, 1, -1, 1, 1]) >>> true_labels = np.array([-1, 1, 1, -1, 1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10])
@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
>>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) >>> bool(np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102))
True True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
>>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) >>> bool(np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164))
True True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0]) >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028) >>> bool(np.isclose(mean_squared_error(true_values, predicted_values), 0.028))
True True
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 0.16))
True True
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 2.16))
False False
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2])
@ -420,7 +420,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> mean_squared_logarithmic_error(true_values, predicted_values) >>> float(mean_squared_logarithmic_error(true_values, predicted_values))
0.0030860877925181344 0.0030860877925181344
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@ -459,17 +459,17 @@ def mean_absolute_percentage_error(
Examples: Examples:
>>> y_true = np.array([10, 20, 30, 40]) >>> y_true = np.array([10, 20, 30, 40])
>>> y_pred = np.array([12, 18, 33, 45]) >>> y_pred = np.array([12, 18, 33, 45])
>>> mean_absolute_percentage_error(y_true, y_pred) >>> float(mean_absolute_percentage_error(y_true, y_pred))
0.13125 0.13125
>>> y_true = np.array([1, 2, 3, 4]) >>> y_true = np.array([1, 2, 3, 4])
>>> y_pred = np.array([2, 3, 4, 5]) >>> y_pred = np.array([2, 3, 4, 5])
>>> mean_absolute_percentage_error(y_true, y_pred) >>> float(mean_absolute_percentage_error(y_true, y_pred))
0.5208333333333333 0.5208333333333333
>>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24]) >>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24])
>>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23]) >>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23])
>>> mean_absolute_percentage_error(y_true, y_pred) >>> float(mean_absolute_percentage_error(y_true, y_pred))
0.064671076436071 0.064671076436071
""" """
if len(y_true) != len(y_pred): if len(y_true) != len(y_pred):
@ -511,7 +511,7 @@ def perplexity_loss(
... [[0.03, 0.26, 0.21, 0.18, 0.30], ... [[0.03, 0.26, 0.21, 0.18, 0.30],
... [0.28, 0.10, 0.33, 0.15, 0.12]]] ... [0.28, 0.10, 0.33, 0.15, 0.12]]]
... ) ... )
>>> perplexity_loss(y_true, y_pred) >>> float(perplexity_loss(y_true, y_pred))
5.0247347775367945 5.0247347775367945
>>> y_true = np.array([[1, 4], [2, 3]]) >>> y_true = np.array([[1, 4], [2, 3]])
>>> y_pred = np.array( >>> y_pred = np.array(
@ -600,17 +600,17 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) ->
>>> y_true = np.array([3, 5, 2, 7]) >>> y_true = np.array([3, 5, 2, 7])
>>> y_pred = np.array([2.9, 4.8, 2.1, 7.2]) >>> y_pred = np.array([2.9, 4.8, 2.1, 7.2])
>>> smooth_l1_loss(y_true, y_pred, 1.0) >>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.012500000000000022 0.012500000000000022
>>> y_true = np.array([2, 4, 6]) >>> y_true = np.array([2, 4, 6])
>>> y_pred = np.array([1, 5, 7]) >>> y_pred = np.array([1, 5, 7])
>>> smooth_l1_loss(y_true, y_pred, 1.0) >>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.5 0.5
>>> y_true = np.array([1, 3, 5, 7]) >>> y_true = np.array([1, 3, 5, 7])
>>> y_pred = np.array([1, 3, 5, 7]) >>> y_pred = np.array([1, 3, 5, 7])
>>> smooth_l1_loss(y_true, y_pred, 1.0) >>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.0 0.0
>>> y_true = np.array([1, 3, 5]) >>> y_true = np.array([1, 3, 5])
@ -647,7 +647,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
>>> true_labels = np.array([0.2, 0.3, 0.5]) >>> true_labels = np.array([0.2, 0.3, 0.5])
>>> predicted_probs = np.array([0.3, 0.3, 0.4]) >>> predicted_probs = np.array([0.3, 0.3, 0.4])
>>> kullback_leibler_divergence(true_labels, predicted_probs) >>> float(kullback_leibler_divergence(true_labels, predicted_probs))
0.030478754035472025 0.030478754035472025
>>> true_labels = np.array([0.2, 0.3, 0.5]) >>> true_labels = np.array([0.2, 0.3, 0.5])
>>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5])

View File

@ -162,9 +162,9 @@ def normalize(audio: np.ndarray) -> np.ndarray:
Examples: Examples:
>>> audio = np.array([1, 2, 3, 4, 5]) >>> audio = np.array([1, 2, 3, 4, 5])
>>> normalized_audio = normalize(audio) >>> normalized_audio = normalize(audio)
>>> np.max(normalized_audio) >>> float(np.max(normalized_audio))
1.0 1.0
>>> np.min(normalized_audio) >>> float(np.min(normalized_audio))
0.2 0.2
""" """
# Divide the entire audio signal by the maximum absolute value # Divide the entire audio signal by the maximum absolute value
@ -229,7 +229,8 @@ def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarra
Examples: Examples:
>>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) >>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> audio_fft = calculate_fft(audio_windowed, ftt_size=4) >>> audio_fft = calculate_fft(audio_windowed, ftt_size=4)
>>> np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, -1.5-0.8660254j])) >>> bool(np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j,
... -1.5-0.8660254j])))
True True
""" """
# Transpose the audio data to have time in rows and channels in columns # Transpose the audio data to have time in rows and channels in columns
@ -281,7 +282,7 @@ def freq_to_mel(freq: float) -> float:
The frequency in mel scale. The frequency in mel scale.
Examples: Examples:
>>> round(freq_to_mel(1000), 2) >>> float(round(freq_to_mel(1000), 2))
999.99 999.99
""" """
# Use the formula to convert frequency to the mel scale # Use the formula to convert frequency to the mel scale
@ -321,7 +322,7 @@ def mel_spaced_filterbank(
Mel-spaced filter bank. Mel-spaced filter bank.
Examples: Examples:
>>> round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10) >>> float(round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10))
0.0004603981 0.0004603981
""" """
freq_min = 0 freq_min = 0
@ -438,7 +439,7 @@ def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarra
The DCT basis matrix. The DCT basis matrix.
Examples: Examples:
>>> round(discrete_cosine_transform(3, 5)[0][0], 5) >>> float(round(discrete_cosine_transform(3, 5)[0][0], 5))
0.44721 0.44721
""" """
basis = np.empty((dct_filter_num, filter_num)) basis = np.empty((dct_filter_num, filter_num))

View File

@ -17,7 +17,7 @@ Y = clf.predict(test)
def wrapper(y): def wrapper(y):
""" """
>>> wrapper(Y) >>> [int(x) for x in wrapper(Y)]
[0, 0, 1] [0, 0, 1]
""" """
return list(y) return list(y)

View File

@ -20,11 +20,11 @@ def mae(predict, actual):
""" """
Examples(rounded for precision): Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3] >>> actual = [1,2,3];predict = [1,4,3]
>>> np.around(mae(predict,actual),decimals = 2) >>> float(np.around(mae(predict,actual),decimals = 2))
0.67 0.67
>>> actual = [1,1,1];predict = [1,1,1] >>> actual = [1,1,1];predict = [1,1,1]
>>> mae(predict,actual) >>> float(mae(predict,actual))
0.0 0.0
""" """
predict = np.array(predict) predict = np.array(predict)
@ -41,11 +41,11 @@ def mse(predict, actual):
""" """
Examples(rounded for precision): Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3] >>> actual = [1,2,3];predict = [1,4,3]
>>> np.around(mse(predict,actual),decimals = 2) >>> float(np.around(mse(predict,actual),decimals = 2))
1.33 1.33
>>> actual = [1,1,1];predict = [1,1,1] >>> actual = [1,1,1];predict = [1,1,1]
>>> mse(predict,actual) >>> float(mse(predict,actual))
0.0 0.0
""" """
predict = np.array(predict) predict = np.array(predict)
@ -63,11 +63,11 @@ def rmse(predict, actual):
""" """
Examples(rounded for precision): Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3] >>> actual = [1,2,3];predict = [1,4,3]
>>> np.around(rmse(predict,actual),decimals = 2) >>> float(np.around(rmse(predict,actual),decimals = 2))
1.15 1.15
>>> actual = [1,1,1];predict = [1,1,1] >>> actual = [1,1,1];predict = [1,1,1]
>>> rmse(predict,actual) >>> float(rmse(predict,actual))
0.0 0.0
""" """
predict = np.array(predict) predict = np.array(predict)
@ -84,12 +84,10 @@ def rmse(predict, actual):
def rmsle(predict, actual): def rmsle(predict, actual):
""" """
Examples(rounded for precision): Examples(rounded for precision):
>>> actual = [10,10,30];predict = [10,2,30] >>> float(np.around(rmsle(predict=[10, 2, 30], actual=[10, 10, 30]), decimals=2))
>>> np.around(rmsle(predict,actual),decimals = 2)
0.75 0.75
>>> actual = [1,1,1];predict = [1,1,1] >>> float(rmsle(predict=[1, 1, 1], actual=[1, 1, 1]))
>>> rmsle(predict,actual)
0.0 0.0
""" """
predict = np.array(predict) predict = np.array(predict)
@ -117,12 +115,12 @@ def mbd(predict, actual):
Here the model overpredicts Here the model overpredicts
>>> actual = [1,2,3];predict = [2,3,4] >>> actual = [1,2,3];predict = [2,3,4]
>>> np.around(mbd(predict,actual),decimals = 2) >>> float(np.around(mbd(predict,actual),decimals = 2))
50.0 50.0
Here the model underpredicts Here the model underpredicts
>>> actual = [1,2,3];predict = [0,1,1] >>> actual = [1,2,3];predict = [0,1,1]
>>> np.around(mbd(predict,actual),decimals = 2) >>> float(np.around(mbd(predict,actual),decimals = 2))
-66.67 -66.67
""" """
predict = np.array(predict) predict = np.array(predict)

View File

@ -153,7 +153,7 @@ def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float:
>>> cosine_similarity(np.array([1, 2]), np.array([6, 32])) >>> cosine_similarity(np.array([1, 2]), np.array([6, 32]))
0.9615239476408232 0.9615239476408232
""" """
return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) return float(np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)))
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -14,11 +14,11 @@ def norm_squared(vector: ndarray) -> float:
Returns: Returns:
float: squared second norm of vector float: squared second norm of vector
>>> norm_squared([1, 2]) >>> int(norm_squared([1, 2]))
5 5
>>> norm_squared(np.asarray([1, 2])) >>> int(norm_squared(np.asarray([1, 2])))
5 5
>>> norm_squared([0, 0]) >>> int(norm_squared([0, 0]))
0 0
""" """
return np.dot(vector, vector) return np.dot(vector, vector)

View File

@ -13,13 +13,13 @@ def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:
""" """
Calculate the distance between the two endpoints of two vectors. Calculate the distance between the two endpoints of two vectors.
A vector is defined as a list, tuple, or numpy 1D array. A vector is defined as a list, tuple, or numpy 1D array.
>>> euclidean_distance((0, 0), (2, 2)) >>> float(euclidean_distance((0, 0), (2, 2)))
2.8284271247461903 2.8284271247461903
>>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])) >>> float(euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])))
3.4641016151377544 3.4641016151377544
>>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])) >>> float(euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])))
8.0 8.0
>>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) >>> float(euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]))
8.0 8.0
""" """
return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))

View File

@ -26,7 +26,7 @@ def explicit_euler(
... return y ... return y
>>> y0 = 1 >>> y0 = 1
>>> y = explicit_euler(f, y0, 0.0, 0.01, 5) >>> y = explicit_euler(f, y0, 0.0, 0.01, 5)
>>> y[-1] >>> float(y[-1])
144.77277243257308 144.77277243257308
""" """
n = int(np.ceil((x_end - x0) / step_size)) n = int(np.ceil((x_end - x0) / step_size))

View File

@ -24,13 +24,13 @@ def euler_modified(
>>> def f1(x, y): >>> def f1(x, y):
... return -2*x*(y**2) ... return -2*x*(y**2)
>>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0) >>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0)
>>> y[-1] >>> float(y[-1])
0.503338255442106 0.503338255442106
>>> import math >>> import math
>>> def f2(x, y): >>> def f2(x, y):
... return -2*y + (x**3)*math.exp(-2*x) ... return -2*y + (x**3)*math.exp(-2*x)
>>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3) >>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3)
>>> y[-1] >>> float(y[-1])
0.5525976431951775 0.5525976431951775
""" """
n = int(np.ceil((x_end - x0) / step_size)) n = int(np.ceil((x_end - x0) / step_size))

View File

@ -5,18 +5,18 @@ Reference: https://en.wikipedia.org/wiki/Gaussian_function
from numpy import exp, pi, sqrt from numpy import exp, pi, sqrt
def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> float:
""" """
>>> gaussian(1) >>> float(gaussian(1))
0.24197072451914337 0.24197072451914337
>>> gaussian(24) >>> float(gaussian(24))
3.342714441794458e-126 3.342714441794458e-126
>>> gaussian(1, 4, 2) >>> float(gaussian(1, 4, 2))
0.06475879783294587 0.06475879783294587
>>> gaussian(1, 5, 3) >>> float(gaussian(1, 5, 3))
0.05467002489199788 0.05467002489199788
Supports NumPy Arrays Supports NumPy Arrays
@ -29,7 +29,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int:
5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27, 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27,
2.14638374e-32, 7.99882776e-38, 1.09660656e-43]) 2.14638374e-32, 7.99882776e-38, 1.09660656e-43])
>>> gaussian(15) >>> float(gaussian(15))
5.530709549844416e-50 5.530709549844416e-50
>>> gaussian([1,2, 'string']) >>> gaussian([1,2, 'string'])
@ -47,10 +47,10 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int:
... ...
OverflowError: (34, 'Result too large') OverflowError: (34, 'Result too large')
>>> gaussian(10**-326) >>> float(gaussian(10**-326))
0.3989422804014327 0.3989422804014327
>>> gaussian(2523, mu=234234, sigma=3425) >>> float(gaussian(2523, mu=234234, sigma=3425))
0.0 0.0
""" """
return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2)) return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2))

View File

@ -19,7 +19,7 @@ def minkowski_distance(
>>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2) >>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2)
8.0 8.0
>>> import numpy as np >>> import numpy as np
>>> np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)) >>> bool(np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)))
True True
>>> minkowski_distance([1.0], [2.0], -1) >>> minkowski_distance([1.0], [2.0], -1)
Traceback (most recent call last): Traceback (most recent call last):

View File

@ -102,7 +102,7 @@ class AdamsBashforth:
>>> def f(x, y): >>> def f(x, y):
... return x + y ... return x + y
>>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3() >>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3()
>>> y[3] >>> float(y[3])
0.15533333333333332 0.15533333333333332
>>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3() >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3()
@ -140,9 +140,9 @@ class AdamsBashforth:
... return x + y ... return x + y
>>> y = AdamsBashforth( >>> y = AdamsBashforth(
... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4() ... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4()
>>> y[4] >>> float(y[4])
0.30699999999999994 0.30699999999999994
>>> y[5] >>> float(y[5])
0.5771083333333333 0.5771083333333333
>>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4() >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4()
@ -185,7 +185,7 @@ class AdamsBashforth:
>>> y = AdamsBashforth( >>> y = AdamsBashforth(
... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536], ... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536],
... 0.2, 1).step_5() ... 0.2, 1).step_5()
>>> y[-1] >>> float(y[-1])
0.05436839444444452 0.05436839444444452
>>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5() >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5()

View File

@ -19,7 +19,7 @@ def runge_kutta(f, y0, x0, h, x_end):
... return y ... return y
>>> y0 = 1 >>> y0 = 1
>>> y = runge_kutta(f, y0, 0.0, 0.01, 5) >>> y = runge_kutta(f, y0, 0.0, 0.01, 5)
>>> y[-1] >>> float(y[-1])
148.41315904125113 148.41315904125113
""" """
n = int(np.ceil((x_end - x0) / h)) n = int(np.ceil((x_end - x0) / h))

View File

@ -34,12 +34,12 @@ def runge_kutta_fehlberg_45(
>>> def f(x, y): >>> def f(x, y):
... return 1 + y**2 ... return 1 + y**2
>>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1) >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1)
>>> y[1] >>> float(y[1])
0.2027100937470787 0.2027100937470787
>>> def f(x,y): >>> def f(x,y):
... return x ... return x
>>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0) >>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0)
>>> y[1] >>> float(y[1])
-0.18000000000000002 -0.18000000000000002
>>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1) >>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1)
Traceback (most recent call last): Traceback (most recent call last):

View File

@ -34,7 +34,7 @@ def runge_kutta_gills(
>>> def f(x, y): >>> def f(x, y):
... return (x-y)/2 ... return (x-y)/2
>>> y = runge_kutta_gills(f, 0, 3, 0.2, 5) >>> y = runge_kutta_gills(f, 0, 3, 0.2, 5)
>>> y[-1] >>> float(y[-1])
3.4104259225717537 3.4104259225717537
>>> def f(x,y): >>> def f(x,y):

View File

@ -28,7 +28,7 @@ def softmax(vector):
The softmax vector adds up to one. We need to ceil to mitigate for The softmax vector adds up to one. We need to ceil to mitigate for
precision precision
>>> np.ceil(np.sum(softmax([1,2,3,4]))) >>> float(np.ceil(np.sum(softmax([1,2,3,4]))))
1.0 1.0
>>> vec = np.array([5,5]) >>> vec = np.array([5,5])

View File

@ -64,7 +64,7 @@ class TwoHiddenLayerNeuralNetwork:
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
>>> res = nn.feedforward() >>> res = nn.feedforward()
>>> array_sum = np.sum(res) >>> array_sum = np.sum(res)
>>> np.isnan(array_sum) >>> bool(np.isnan(array_sum))
False False
""" """
# Layer_between_input_and_first_hidden_layer is the layer connecting the # Layer_between_input_and_first_hidden_layer is the layer connecting the
@ -105,7 +105,7 @@ class TwoHiddenLayerNeuralNetwork:
>>> res = nn.feedforward() >>> res = nn.feedforward()
>>> nn.back_propagation() >>> nn.back_propagation()
>>> updated_weights = nn.second_hidden_layer_and_output_layer_weights >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights
>>> (res == updated_weights).all() >>> bool((res == updated_weights).all())
False False
""" """
@ -171,7 +171,7 @@ class TwoHiddenLayerNeuralNetwork:
>>> first_iteration_weights = nn.feedforward() >>> first_iteration_weights = nn.feedforward()
>>> nn.back_propagation() >>> nn.back_propagation()
>>> updated_weights = nn.second_hidden_layer_and_output_layer_weights >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights
>>> (first_iteration_weights == updated_weights).all() >>> bool((first_iteration_weights == updated_weights).all())
False False
""" """
for iteration in range(1, iterations + 1): for iteration in range(1, iterations + 1):

View File

@ -87,9 +87,11 @@ class BankersAlgorithm:
This function builds an index control dictionary to track original ids/indices This function builds an index control dictionary to track original ids/indices
of processes when altered during execution of method "main" of processes when altered during execution of method "main"
Return: {0: [a: int, b: int], 1: [c: int, d: int]} Return: {0: [a: int, b: int], 1: [c: int, d: int]}
>>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table, >>> index_control = BankersAlgorithm(
... test_maximum_claim_table)._BankersAlgorithm__need_index_manager() ... test_claim_vector, test_allocated_res_table, test_maximum_claim_table
... ) # doctest: +NORMALIZE_WHITESPACE ... )._BankersAlgorithm__need_index_manager()
>>> {key: [int(x) for x in value] for key, value
... in index_control.items()} # doctest: +NORMALIZE_WHITESPACE
{0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], {0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0],
4: [2, 0, 0, 3]} 4: [2, 0, 0, 3]}
""" """

View File

@ -53,7 +53,7 @@ def in_static_equilibrium(
# summation of moments is zero # summation of moments is zero
moments: NDArray[float64] = cross(location, forces) moments: NDArray[float64] = cross(location, forces)
sum_moments: float = sum(moments) sum_moments: float = sum(moments)
return abs(sum_moments) < eps return bool(abs(sum_moments) < eps)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -1,7 +1,7 @@
beautifulsoup4 beautifulsoup4
fake_useragent fake_useragent
imageio imageio
keras ; python_version < '3.12' keras
lxml lxml
matplotlib matplotlib
numpy numpy
@ -17,7 +17,7 @@ rich
scikit-learn scikit-learn
statsmodels statsmodels
sympy sympy
tensorflow tensorflow ; python_version < '3.13'
tweepy tweepy
# yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed
typing_extensions typing_extensions