Misc fixes across multiple algorithms (#6912)

Source: Snyk code quality
Add scikit-fuzzy to requirements

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Dhruv Manilawala <dhruvmanila@gmail.com>
This commit is contained in:
CenTdemeern1 2022-10-15 22:25:38 -07:00 committed by GitHub
parent c94e215c8d
commit 04698538d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 40 additions and 48 deletions

View File

@ -31,7 +31,7 @@ def parse_file(file_path: str) -> list[Letter]:
c = f.read(1) c = f.read(1)
if not c: if not c:
break break
chars[c] = chars[c] + 1 if c in chars.keys() else 1 chars[c] = chars[c] + 1 if c in chars else 1
return sorted((Letter(c, f) for c, f in chars.items()), key=lambda l: l.freq) return sorted((Letter(c, f) for c, f in chars.items()), key=lambda l: l.freq)

View File

@ -55,7 +55,7 @@ def is_palindrome_dict(head):
d = {} d = {}
pos = 0 pos = 0
while head: while head:
if head.val in d.keys(): if head.val in d:
d[head.val].append(pos) d[head.val].append(pos)
else: else:
d[head.val] = [pos] d[head.val] = [pos]

View File

@ -60,7 +60,7 @@ def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int)
) )
if __name__ == "main": if __name__ == "__main__":
# Reading the image and converting it to grayscale. # Reading the image and converting it to grayscale.
image = cv2.imread( image = cv2.imread(

View File

@ -8,11 +8,7 @@ Python:
- 3.5 - 3.5
""" """
import numpy as np import numpy as np
try:
import skfuzzy as fuzz import skfuzzy as fuzz
except ImportError:
fuzz = None
if __name__ == "__main__": if __name__ == "__main__":
# Create universe of discourse in Python using linspace () # Create universe of discourse in Python using linspace ()

View File

@ -89,13 +89,13 @@ class Graph:
# Edge going from node u to v and v to u with weight w # Edge going from node u to v and v to u with weight w
# u (w)-> v, v (w) -> u # u (w)-> v, v (w) -> u
# Check if u already in graph # Check if u already in graph
if u in self.adjList.keys(): if u in self.adjList:
self.adjList[u].append((v, w)) self.adjList[u].append((v, w))
else: else:
self.adjList[u] = [(v, w)] self.adjList[u] = [(v, w)]
# Assuming undirected graph # Assuming undirected graph
if v in self.adjList.keys(): if v in self.adjList:
self.adjList[v].append((u, w)) self.adjList[v].append((u, w))
else: else:
self.adjList[v] = [(u, w)] self.adjList[v] = [(u, w)]

View File

@ -226,9 +226,6 @@ class DirectedGraph:
break break
else: else:
return True return True
# TODO:The following code is unreachable.
anticipating_nodes.add(stack[len_stack_minus_one])
len_stack_minus_one -= 1
if visited.count(node[1]) < 1: if visited.count(node[1]) < 1:
stack.append(node[1]) stack.append(node[1])
visited.append(node[1]) visited.append(node[1])
@ -454,10 +451,6 @@ class Graph:
break break
else: else:
return True return True
# TODO: the following code is unreachable
# is this meant to be called in the else ?
anticipating_nodes.add(stack[len_stack_minus_one])
len_stack_minus_one -= 1
if visited.count(node[1]) < 1: if visited.count(node[1]) < 1:
stack.append(node[1]) stack.append(node[1])
visited.append(node[1]) visited.append(node[1])

View File

@ -79,8 +79,7 @@ def emitter_converter(size_par, data):
['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1'] ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1']
""" """
if size_par + len(data) <= 2**size_par - (len(data) - 1): if size_par + len(data) <= 2**size_par - (len(data) - 1):
print("ERROR - size of parity don't match with size of data") raise ValueError("size of parity don't match with size of data")
exit(0)
data_out = [] data_out = []
parity = [] parity = []

View File

@ -89,7 +89,7 @@ class Test(unittest.TestCase):
""" """
test for global function zero_vector() test for global function zero_vector()
""" """
self.assertTrue(str(zero_vector(10)).count("0") == 10) self.assertEqual(str(zero_vector(10)).count("0"), 10)
def test_unit_basis_vector(self) -> None: def test_unit_basis_vector(self) -> None:
""" """

View File

@ -75,11 +75,12 @@ def main():
"""Call Extended Euclidean Algorithm.""" """Call Extended Euclidean Algorithm."""
if len(sys.argv) < 3: if len(sys.argv) < 3:
print("2 integer arguments required") print("2 integer arguments required")
exit(1) return 1
a = int(sys.argv[1]) a = int(sys.argv[1])
b = int(sys.argv[2]) b = int(sys.argv[2])
print(extended_euclidean_algorithm(a, b)) print(extended_euclidean_algorithm(a, b))
return 0
if __name__ == "__main__": if __name__ == "__main__":
main() raise SystemExit(main())

View File

@ -14,7 +14,7 @@ Jaccard similarity is widely used with MinHashing.
""" """
def jaccard_similariy(set_a, set_b, alternative_union=False): def jaccard_similarity(set_a, set_b, alternative_union=False):
""" """
Finds the jaccard similarity between two sets. Finds the jaccard similarity between two sets.
Essentially, its intersection over union. Essentially, its intersection over union.
@ -35,18 +35,18 @@ def jaccard_similariy(set_a, set_b, alternative_union=False):
Examples: Examples:
>>> set_a = {'a', 'b', 'c', 'd', 'e'} >>> set_a = {'a', 'b', 'c', 'd', 'e'}
>>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'} >>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'}
>>> jaccard_similariy(set_a, set_b) >>> jaccard_similarity(set_a, set_b)
0.375 0.375
>>> jaccard_similariy(set_a, set_a) >>> jaccard_similarity(set_a, set_a)
1.0 1.0
>>> jaccard_similariy(set_a, set_a, True) >>> jaccard_similarity(set_a, set_a, True)
0.5 0.5
>>> set_a = ['a', 'b', 'c', 'd', 'e'] >>> set_a = ['a', 'b', 'c', 'd', 'e']
>>> set_b = ('c', 'd', 'e', 'f', 'h', 'i') >>> set_b = ('c', 'd', 'e', 'f', 'h', 'i')
>>> jaccard_similariy(set_a, set_b) >>> jaccard_similarity(set_a, set_b)
0.375 0.375
""" """
@ -67,14 +67,15 @@ def jaccard_similariy(set_a, set_b, alternative_union=False):
if alternative_union: if alternative_union:
union = len(set_a) + len(set_b) union = len(set_a) + len(set_b)
return len(intersection) / union
else: else:
union = set_a + [element for element in set_b if element not in set_a] union = set_a + [element for element in set_b if element not in set_a]
return len(intersection) / len(union)
return len(intersection) / len(union) return len(intersection) / len(union)
if __name__ == "__main__": if __name__ == "__main__":
set_a = {"a", "b", "c", "d", "e"} set_a = {"a", "b", "c", "d", "e"}
set_b = {"c", "d", "e", "f", "h", "i"} set_b = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similariy(set_a, set_b)) print(jaccard_similarity(set_a, set_b))

View File

@ -286,7 +286,7 @@ class Matrix:
# MATRIX OPERATIONS # MATRIX OPERATIONS
def __eq__(self, other: object) -> bool: def __eq__(self, other: object) -> bool:
if not isinstance(other, Matrix): if not isinstance(other, Matrix):
raise TypeError("A Matrix can only be compared with another Matrix") return NotImplemented
return self.rows == other.rows return self.rows == other.rows
def __ne__(self, other: object) -> bool: def __ne__(self, other: object) -> bool:

View File

@ -26,9 +26,7 @@ def solution(n: int = 1000) -> int:
result = 0 result = 0
for i in range(n): for i in range(n):
if i % 3 == 0: if i % 3 == 0 or i % 5 == 0:
result += i
elif i % 5 == 0:
result += i result += i
return result return result

View File

@ -34,12 +34,11 @@ def solution():
words = f.readline() words = f.readline()
words = [word.strip('"') for word in words.strip("\r\n").split(",")] words = [word.strip('"') for word in words.strip("\r\n").split(",")]
words = list( words = [
filter( word
lambda word: word in TRIANGULAR_NUMBERS, for word in [sum(ord(x) - 64 for x in word) for word in words]
(sum(ord(x) - 64 for x in word) for word in words), if word in TRIANGULAR_NUMBERS
) ]
)
return len(words) return len(words)

View File

@ -28,8 +28,12 @@ def solution():
with open(triangle) as f: with open(triangle) as f:
triangle = f.readlines() triangle = f.readlines()
a = (x.rstrip("\r\n").split(" ") for x in triangle) a = []
a = [list(map(int, x)) for x in a] for line in triangle:
numbers_from_line = []
for number in line.strip().split(" "):
numbers_from_line.append(int(number))
a.append(numbers_from_line)
for i in range(1, len(a)): for i in range(1, len(a)):
for j in range(len(a[i])): for j in range(len(a[i])):

View File

@ -125,8 +125,9 @@ def solution(roman_numerals_filename: str = "/p089_roman.txt") -> int:
savings = 0 savings = 0
file1 = open(os.path.dirname(__file__) + roman_numerals_filename) with open(os.path.dirname(__file__) + roman_numerals_filename) as file1:
lines = file1.readlines() lines = file1.readlines()
for line in lines: for line in lines:
original = line.strip() original = line.strip()
num = parse_roman_numerals(original) num = parse_roman_numerals(original)

View File

@ -9,7 +9,7 @@ pandas
pillow pillow
qiskit qiskit
requests requests
# scikit-fuzzy # Causing broken builds scikit-fuzzy
sklearn sklearn
statsmodels statsmodels
sympy sympy

View File

@ -79,7 +79,7 @@ if __name__ == "__main__":
# ensure that we actually have processes # ensure that we actually have processes
if len(processes) == 0: if len(processes) == 0:
print("Zero amount of processes") print("Zero amount of processes")
exit() raise SystemExit(0)
# duration time of all processes # duration time of all processes
duration_times = [19, 8, 9] duration_times = [19, 8, 9]
@ -87,7 +87,7 @@ if __name__ == "__main__":
# ensure we can match each id to a duration time # ensure we can match each id to a duration time
if len(duration_times) != len(processes): if len(duration_times) != len(processes):
print("Unable to match all id's with their duration time") print("Unable to match all id's with their duration time")
exit() raise SystemExit(0)
# get the waiting times and the turnaround times # get the waiting times and the turnaround times
waiting_times = calculate_waiting_times(duration_times) waiting_times = calculate_waiting_times(duration_times)

View File

@ -276,7 +276,7 @@ if __name__ == "__main__":
queue = deque([P1, P2, P3, P4]) queue = deque([P1, P2, P3, P4])
if len(time_slices) != number_of_queues - 1: if len(time_slices) != number_of_queues - 1:
exit() raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])}) doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])})

View File

@ -93,7 +93,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
except ValueError: except ValueError:
pass pass
except ValueError: except ValueError:
exit(-1) raise SystemExit(1)
# Finally return a sorted list of email addresses with no duplicates. # Finally return a sorted list of email addresses with no duplicates.
return sorted(valid_emails) return sorted(valid_emails)