Remove useless code in doctests (#7733)

* refactor: Fix matrix display deprecation

* refactor: Remove useless `print` and `pass` statements

* revert: Replace broken doctests

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* revert: Fix failing doctests

* chore: Satisfy pre-commit

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Caeden Perelli-Harris 2022-10-27 21:52:00 +01:00 committed by GitHub
parent 501a1cf0c7
commit 61eedc16c3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 51 additions and 61 deletions

View File

@ -71,7 +71,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int)
>>> curr_ind = 1 >>> curr_ind = 1
>>> util_hamilton_cycle(graph, path, curr_ind) >>> util_hamilton_cycle(graph, path, curr_ind)
True True
>>> print(path) >>> path
[0, 1, 2, 4, 3, 0] [0, 1, 2, 4, 3, 0]
Case 2: Use exact graph as in previous case, but in the properties taken from Case 2: Use exact graph as in previous case, but in the properties taken from
@ -85,7 +85,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int)
>>> curr_ind = 3 >>> curr_ind = 3
>>> util_hamilton_cycle(graph, path, curr_ind) >>> util_hamilton_cycle(graph, path, curr_ind)
True True
>>> print(path) >>> path
[0, 1, 2, 4, 3, 0] [0, 1, 2, 4, 3, 0]
""" """

View File

@ -22,7 +22,6 @@ def main() -> None:
Get images list and annotations list from input dir. Get images list and annotations list from input dir.
Update new images and annotations. Update new images and annotations.
Save images and annotations in output dir. Save images and annotations in output dir.
>>> pass # A doctest is not possible for this function.
""" """
img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR) img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR)
print("Processing...") print("Processing...")
@ -48,7 +47,6 @@ def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]:
- label_dir <type: str>: Path to label include annotation of images - label_dir <type: str>: Path to label include annotation of images
- img_dir <type: str>: Path to folder contain images - img_dir <type: str>: Path to folder contain images
Return <type: list>: List of images path and labels Return <type: list>: List of images path and labels
>>> pass # A doctest is not possible for this function.
""" """
img_paths = [] img_paths = []
labels = [] labels = []
@ -88,7 +86,6 @@ def update_image_and_anno(
- new_imgs_list <type: narray>: image after resize - new_imgs_list <type: narray>: image after resize
- new_annos_lists <type: list>: list of new annotation after scale - new_annos_lists <type: list>: list of new annotation after scale
- path_list <type: list>: list the name of image file - path_list <type: list>: list the name of image file
>>> pass # A doctest is not possible for this function.
""" """
new_annos_lists = [] new_annos_lists = []
path_list = [] path_list = []

View File

@ -23,7 +23,6 @@ def main() -> None:
Get images list and annotations list from input dir. Get images list and annotations list from input dir.
Update new images and annotations. Update new images and annotations.
Save images and annotations in output dir. Save images and annotations in output dir.
>>> pass # A doctest is not possible for this function.
""" """
img_paths, annos = get_dataset(LABEL_DIR, IMG_DIR) img_paths, annos = get_dataset(LABEL_DIR, IMG_DIR)
for index in range(NUMBER_IMAGES): for index in range(NUMBER_IMAGES):
@ -60,7 +59,6 @@ def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]:
- label_dir <type: str>: Path to label include annotation of images - label_dir <type: str>: Path to label include annotation of images
- img_dir <type: str>: Path to folder contain images - img_dir <type: str>: Path to folder contain images
Return <type: list>: List of images path and labels Return <type: list>: List of images path and labels
>>> pass # A doctest is not possible for this function.
""" """
img_paths = [] img_paths = []
labels = [] labels = []
@ -105,7 +103,6 @@ def update_image_and_anno(
- output_img <type: narray>: image after resize - output_img <type: narray>: image after resize
- new_anno <type: list>: list of new annotation after scale - new_anno <type: list>: list of new annotation after scale
- path[0] <type: string>: get the name of image file - path[0] <type: string>: get the name of image file
>>> pass # A doctest is not possible for this function.
""" """
output_img = np.zeros([output_size[0], output_size[1], 3], dtype=np.uint8) output_img = np.zeros([output_size[0], output_size[1], 3], dtype=np.uint8)
scale_x = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) scale_x = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])

View File

@ -71,7 +71,7 @@ class BinomialHeap:
... first_heap.insert(number) ... first_heap.insert(number)
Size test Size test
>>> print(first_heap.size) >>> first_heap.size
30 30
Deleting - delete() test Deleting - delete() test
@ -97,7 +97,7 @@ class BinomialHeap:
# # # # # # # #
preOrder() test preOrder() test
>>> print(second_heap.preOrder()) >>> second_heap.preOrder()
[(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)] [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)]
printing Heap - __str__() test printing Heap - __str__() test

View File

@ -9,20 +9,20 @@ class Heap:
>>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5] >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5]
>>> h = Heap() >>> h = Heap()
>>> h.build_max_heap(unsorted) >>> h.build_max_heap(unsorted)
>>> print(h) >>> h
[209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5] [209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5]
>>> >>>
>>> h.extract_max() >>> h.extract_max()
209 209
>>> print(h) >>> h
[201, 107, 25, 103, 11, 15, 1, 9, 7, 5] [201, 107, 25, 103, 11, 15, 1, 9, 7, 5]
>>> >>>
>>> h.insert(100) >>> h.insert(100)
>>> print(h) >>> h
[201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11] [201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11]
>>> >>>
>>> h.heap_sort() >>> h.heap_sort()
>>> print(h) >>> h
[1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201] [1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201]
""" """

View File

@ -27,7 +27,7 @@ class MinHeap:
>>> myMinHeap.decrease_key(b, -17) >>> myMinHeap.decrease_key(b, -17)
>>> print(b) >>> print(b)
Node(B, -17) Node(B, -17)
>>> print(myMinHeap["B"]) >>> myMinHeap["B"]
-17 -17
""" """

View File

@ -443,4 +443,7 @@ def main():
if __name__ == "__main__": if __name__ == "__main__":
import doctest
doctest.testmod()
main() main()

View File

@ -17,7 +17,7 @@ def stable_matching(
>>> donor_pref = [[0, 1, 3, 2], [0, 2, 3, 1], [1, 0, 2, 3], [0, 3, 1, 2]] >>> donor_pref = [[0, 1, 3, 2], [0, 2, 3, 1], [1, 0, 2, 3], [0, 3, 1, 2]]
>>> recipient_pref = [[3, 1, 2, 0], [3, 1, 0, 2], [0, 3, 1, 2], [1, 0, 3, 2]] >>> recipient_pref = [[3, 1, 2, 0], [3, 1, 0, 2], [0, 3, 1, 2], [1, 0, 3, 2]]
>>> print(stable_matching(donor_pref, recipient_pref)) >>> stable_matching(donor_pref, recipient_pref)
[1, 2, 3, 0] [1, 2, 3, 0]
""" """
assert len(donor_pref) == len(recipient_pref) assert len(donor_pref) == len(recipient_pref)

View File

@ -18,7 +18,7 @@ class GraphAdjacencyList(Generic[T]):
Directed graph example: Directed graph example:
>>> d_graph = GraphAdjacencyList() >>> d_graph = GraphAdjacencyList()
>>> d_graph >>> print(d_graph)
{} {}
>>> d_graph.add_edge(0, 1) >>> d_graph.add_edge(0, 1)
{0: [1], 1: []} {0: [1], 1: []}
@ -26,7 +26,7 @@ class GraphAdjacencyList(Generic[T]):
{0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []} {0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []}
>>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7) >>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7)
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
>>> print(d_graph) >>> d_graph
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
>>> print(repr(d_graph)) >>> print(repr(d_graph))
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
@ -68,7 +68,7 @@ class GraphAdjacencyList(Generic[T]):
{'a': ['b'], 'b': ['a']} {'a': ['b'], 'b': ['a']}
>>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f') >>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f')
{'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']}
>>> print(char_graph) >>> char_graph
{'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']}
""" """

View File

@ -69,16 +69,16 @@ class MinPriorityQueue(Generic[T]):
>>> queue.push(3, 4000) >>> queue.push(3, 4000)
>>> queue.push(4, 3000) >>> queue.push(4, 3000)
>>> print(queue.extract_min()) >>> queue.extract_min()
2 2
>>> queue.update_key(4, 50) >>> queue.update_key(4, 50)
>>> print(queue.extract_min()) >>> queue.extract_min()
4 4
>>> print(queue.extract_min()) >>> queue.extract_min()
1 1
>>> print(queue.extract_min()) >>> queue.extract_min()
3 3
""" """

View File

@ -53,7 +53,7 @@ def complete_graph(vertices_number: int) -> dict:
@input: vertices_number (number of vertices), @input: vertices_number (number of vertices),
directed (False if the graph is undirected, True otherwise) directed (False if the graph is undirected, True otherwise)
@example: @example:
>>> print(complete_graph(3)) >>> complete_graph(3)
{0: [1, 2], 1: [0, 2], 2: [0, 1]} {0: [1, 2], 1: [0, 2], 2: [0, 1]}
""" """
return { return {

View File

@ -71,7 +71,6 @@ def local_weight_regression(
def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat:
""" """
Function used for loading data from the seaborn splitting into x and y points Function used for loading data from the seaborn splitting into x and y points
>>> pass # this function has no doctest
""" """
import seaborn as sns import seaborn as sns
@ -112,7 +111,6 @@ def plot_preds(
) -> plt.plot: ) -> plt.plot:
""" """
This function used to plot predictions and display the graph This function used to plot predictions and display the graph
>>> pass #this function has no doctest
""" """
xsort = training_data_x.copy() xsort = training_data_x.copy()
xsort.sort(axis=0) xsort.sort(axis=0)

View File

@ -45,7 +45,7 @@ if __name__ == "__main__":
>>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7.0x^4 + 9.3x^3 + 5.0x^2 >>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7.0x^4 + 9.3x^3 + 5.0x^2
>>> x = -13.0 >>> x = -13.0
>>> # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9 >>> # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9
>>> print(evaluate_poly(poly, x)) >>> evaluate_poly(poly, x)
180339.9 180339.9
""" """
poly = (0.0, 0.0, 5.0, 9.3, 7.0) poly = (0.0, 0.0, 5.0, 9.3, 7.0)

View File

@ -39,7 +39,7 @@ class FFT:
>>> x = FFT(A, B) >>> x = FFT(A, B)
Print product Print product
>>> print(x.product) # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 >>> x.product # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5
[(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)] [(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)]
__str__ test __str__ test

View File

@ -21,9 +21,9 @@ class Matrix:
[7. 8. 9.]] [7. 8. 9.]]
Matrix rows and columns are available as 2D arrays Matrix rows and columns are available as 2D arrays
>>> print(matrix.rows) >>> matrix.rows
[[1, 2, 3], [4, 5, 6], [7, 8, 9]] [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> print(matrix.columns()) >>> matrix.columns()
[[1, 4, 7], [2, 5, 8], [3, 6, 9]] [[1, 4, 7], [2, 5, 8], [3, 6, 9]]
Order is returned as a tuple Order is returned as a tuple
@ -55,7 +55,7 @@ class Matrix:
[[-3. 6. -3.] [[-3. 6. -3.]
[6. -12. 6.] [6. -12. 6.]
[-3. 6. -3.]] [-3. 6. -3.]]
>>> print(matrix.inverse()) >>> matrix.inverse()
Traceback (most recent call last): Traceback (most recent call last):
... ...
TypeError: Only matrices with a non-zero determinant have an inverse TypeError: Only matrices with a non-zero determinant have an inverse

View File

@ -13,25 +13,25 @@ from __future__ import annotations
def binary_search(a_list: list[int], item: int) -> bool: def binary_search(a_list: list[int], item: int) -> bool:
""" """
>>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42] >>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42]
>>> print(binary_search(test_list, 3)) >>> binary_search(test_list, 3)
False False
>>> print(binary_search(test_list, 13)) >>> binary_search(test_list, 13)
True True
>>> print(binary_search([4, 4, 5, 6, 7], 4)) >>> binary_search([4, 4, 5, 6, 7], 4)
True True
>>> print(binary_search([4, 4, 5, 6, 7], -10)) >>> binary_search([4, 4, 5, 6, 7], -10)
False False
>>> print(binary_search([-18, 2], -18)) >>> binary_search([-18, 2], -18)
True True
>>> print(binary_search([5], 5)) >>> binary_search([5], 5)
True True
>>> print(binary_search(['a', 'c', 'd'], 'c')) >>> binary_search(['a', 'c', 'd'], 'c')
True True
>>> print(binary_search(['a', 'c', 'd'], 'f')) >>> binary_search(['a', 'c', 'd'], 'f')
False False
>>> print(binary_search([], 1)) >>> binary_search([], 1)
False False
>>> print(binary_search([-.1, .1 , .8], .1)) >>> binary_search([-.1, .1 , .8], .1)
True True
>>> binary_search(range(-5000, 5000, 10), 80) >>> binary_search(range(-5000, 5000, 10), 80)
True True

View File

@ -16,19 +16,19 @@ def comp_and_swap(array: list[int], index1: int, index2: int, direction: int) ->
>>> arr = [12, 42, -21, 1] >>> arr = [12, 42, -21, 1]
>>> comp_and_swap(arr, 1, 2, 1) >>> comp_and_swap(arr, 1, 2, 1)
>>> print(arr) >>> arr
[12, -21, 42, 1] [12, -21, 42, 1]
>>> comp_and_swap(arr, 1, 2, 0) >>> comp_and_swap(arr, 1, 2, 0)
>>> print(arr) >>> arr
[12, 42, -21, 1] [12, 42, -21, 1]
>>> comp_and_swap(arr, 0, 3, 1) >>> comp_and_swap(arr, 0, 3, 1)
>>> print(arr) >>> arr
[1, 42, -21, 12] [1, 42, -21, 12]
>>> comp_and_swap(arr, 0, 3, 0) >>> comp_and_swap(arr, 0, 3, 0)
>>> print(arr) >>> arr
[12, 42, -21, 1] [12, 42, -21, 1]
""" """
if (direction == 1 and array[index1] > array[index2]) or ( if (direction == 1 and array[index1] > array[index2]) or (
@ -46,11 +46,11 @@ def bitonic_merge(array: list[int], low: int, length: int, direction: int) -> No
>>> arr = [12, 42, -21, 1] >>> arr = [12, 42, -21, 1]
>>> bitonic_merge(arr, 0, 4, 1) >>> bitonic_merge(arr, 0, 4, 1)
>>> print(arr) >>> arr
[-21, 1, 12, 42] [-21, 1, 12, 42]
>>> bitonic_merge(arr, 0, 4, 0) >>> bitonic_merge(arr, 0, 4, 0)
>>> print(arr) >>> arr
[42, 12, 1, -21] [42, 12, 1, -21]
""" """
if length > 1: if length > 1:

View File

@ -17,8 +17,8 @@ The array elements are taken from a Standard Normal Distribution, having mean =
>>> mu, sigma = 0, 1 # mean and standard deviation >>> mu, sigma = 0, 1 # mean and standard deviation
>>> X = np.random.normal(mu, sigma, p) >>> X = np.random.normal(mu, sigma, p)
>>> np.save(outfile, X) >>> np.save(outfile, X)
>>> print('The array is') >>> 'The array is'
>>> print(X) >>> X
``` ```

View File

@ -14,17 +14,17 @@ def rec_insertion_sort(collection: list, n: int):
>>> col = [1, 2, 1] >>> col = [1, 2, 1]
>>> rec_insertion_sort(col, len(col)) >>> rec_insertion_sort(col, len(col))
>>> print(col) >>> col
[1, 1, 2] [1, 1, 2]
>>> col = [2, 1, 0, -1, -2] >>> col = [2, 1, 0, -1, -2]
>>> rec_insertion_sort(col, len(col)) >>> rec_insertion_sort(col, len(col))
>>> print(col) >>> col
[-2, -1, 0, 1, 2] [-2, -1, 0, 1, 2]
>>> col = [1] >>> col = [1]
>>> rec_insertion_sort(col, len(col)) >>> rec_insertion_sort(col, len(col))
>>> print(col) >>> col
[1] [1]
""" """
# Checks if the entire collection has been sorted # Checks if the entire collection has been sorted
@ -41,17 +41,17 @@ def insert_next(collection: list, index: int):
>>> col = [3, 2, 4, 2] >>> col = [3, 2, 4, 2]
>>> insert_next(col, 1) >>> insert_next(col, 1)
>>> print(col) >>> col
[2, 3, 4, 2] [2, 3, 4, 2]
>>> col = [3, 2, 3] >>> col = [3, 2, 3]
>>> insert_next(col, 2) >>> insert_next(col, 2)
>>> print(col) >>> col
[3, 2, 3] [3, 2, 3]
>>> col = [] >>> col = []
>>> insert_next(col, 1) >>> insert_next(col, 1)
>>> print(col) >>> col
[] []
""" """
# Checks order between adjacent elements # Checks order between adjacent elements

View File

@ -23,8 +23,6 @@ def get_subreddit_data(
limit : Number of posts to fetch limit : Number of posts to fetch
age : ["new", "top", "hot"] age : ["new", "top", "hot"]
wanted_data : Get only the required data in the list wanted_data : Get only the required data in the list
>>> pass
""" """
wanted_data = wanted_data or [] wanted_data = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)):

View File

@ -19,7 +19,6 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict:
{'publishers': ['Puffin'], 'number_of_pages': 96, 'isbn_10': ['0140328726'], ... {'publishers': ['Puffin'], 'number_of_pages': 96, 'isbn_10': ['0140328726'], ...
# >>> get_openlibrary_data(olid='/authors/OL7353617A') # doctest: +ELLIPSIS # >>> get_openlibrary_data(olid='/authors/OL7353617A') # doctest: +ELLIPSIS
{'name': 'Adrian Brisku', 'created': {'type': '/type/datetime', ... {'name': 'Adrian Brisku', 'created': {'type': '/type/datetime', ...
>>> pass # Placate https://github.com/apps/algorithms-keeper
""" """
new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes
if new_olid.count("/") != 1: if new_olid.count("/") != 1:
@ -29,9 +28,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict:
def summarize_book(ol_book_data: dict) -> dict: def summarize_book(ol_book_data: dict) -> dict:
""" """
Given Open Library book data, return a summary as a Python dict. Given Open Library book data, return a summary as a Python dict.
>>> pass # Placate https://github.com/apps/algorithms-keeper
""" """
desired_keys = { desired_keys = {
"title": "Title", "title": "Title",