mirror of
https://github.com/TheAlgorithms/Python.git
synced 2024-11-23 21:11:08 +00:00
Remove some print statements within algorithmic functions (#7499)
* Remove commented-out print statements in algorithmic functions * Encapsulate non-algorithmic code in __main__ * Remove unused print_matrix function * Remove print statement in __init__ * Remove print statement from doctest * Encapsulate non-algorithmic code in __main__ * Modify algorithm to return instead of print * Encapsulate non-algorithmic code in __main__ * Refactor data_safety_checker to return instead of print * updating DIRECTORY.md * updating DIRECTORY.md * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss <cclauss@me.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
717f0e46d9
commit
cc10b20beb
|
@ -360,6 +360,7 @@
|
|||
* [Dijkstra](graphs/dijkstra.py)
|
||||
* [Dijkstra 2](graphs/dijkstra_2.py)
|
||||
* [Dijkstra Algorithm](graphs/dijkstra_algorithm.py)
|
||||
* [Dijkstra Alternate](graphs/dijkstra_alternate.py)
|
||||
* [Dinic](graphs/dinic.py)
|
||||
* [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py)
|
||||
* [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py)
|
||||
|
@ -460,6 +461,7 @@
|
|||
* [Similarity Search](machine_learning/similarity_search.py)
|
||||
* [Support Vector Machines](machine_learning/support_vector_machines.py)
|
||||
* [Word Frequency Functions](machine_learning/word_frequency_functions.py)
|
||||
* [Xgboostclassifier](machine_learning/xgboostclassifier.py)
|
||||
|
||||
## Maths
|
||||
* [3N Plus 1](maths/3n_plus_1.py)
|
||||
|
@ -534,6 +536,7 @@
|
|||
* [Line Length](maths/line_length.py)
|
||||
* [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py)
|
||||
* [Lucas Series](maths/lucas_series.py)
|
||||
* [Maclaurin Sin](maths/maclaurin_sin.py)
|
||||
* [Matrix Exponentiation](maths/matrix_exponentiation.py)
|
||||
* [Max Sum Sliding Window](maths/max_sum_sliding_window.py)
|
||||
* [Median Of Two Arrays](maths/median_of_two_arrays.py)
|
||||
|
@ -936,6 +939,7 @@
|
|||
* [Not Gate](quantum/not_gate.py)
|
||||
* [Q Full Adder](quantum/q_full_adder.py)
|
||||
* [Quantum Entanglement](quantum/quantum_entanglement.py)
|
||||
* [Quantum Random](quantum/quantum_random.py)
|
||||
* [Ripple Adder Classic](quantum/ripple_adder_classic.py)
|
||||
* [Single Qubit Measure](quantum/single_qubit_measure.py)
|
||||
|
||||
|
|
|
@ -66,7 +66,6 @@ def run(canvas: list[list[bool]]) -> list[list[bool]]:
|
|||
next_gen_canvas = np.array(create_canvas(current_canvas.shape[0]))
|
||||
for r, row in enumerate(current_canvas):
|
||||
for c, pt in enumerate(row):
|
||||
# print(r-1,r+2,c-1,c+2)
|
||||
next_gen_canvas[r][c] = __judge_point(
|
||||
pt, current_canvas[r - 1 : r + 2, c - 1 : c + 2]
|
||||
)
|
||||
|
|
|
@ -105,7 +105,6 @@ class IndexCalculation:
|
|||
"""
|
||||
|
||||
def __init__(self, red=None, green=None, blue=None, red_edge=None, nir=None):
|
||||
# print("Numpy version: " + np.__version__)
|
||||
self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir)
|
||||
|
||||
def set_matricies(self, red=None, green=None, blue=None, red_edge=None, nir=None):
|
||||
|
|
|
@ -69,8 +69,10 @@ def max_subarray_sum(array, left, right):
|
|||
return max(left_half_sum, right_half_sum, cross_sum)
|
||||
|
||||
|
||||
array = [-2, -5, 6, -2, -3, 1, 5, -6]
|
||||
array_length = len(array)
|
||||
print(
|
||||
"Maximum sum of contiguous subarray:", max_subarray_sum(array, 0, array_length - 1)
|
||||
)
|
||||
if __name__ == "__main__":
|
||||
array = [-2, -5, 6, -2, -3, 1, 5, -6]
|
||||
array_length = len(array)
|
||||
print(
|
||||
"Maximum sum of contiguous subarray:",
|
||||
max_subarray_sum(array, 0, array_length - 1),
|
||||
)
|
||||
|
|
|
@ -68,8 +68,7 @@ def matrix_dimensions(matrix: list) -> tuple[int, int]:
|
|||
|
||||
|
||||
def print_matrix(matrix: list) -> None:
|
||||
for i in range(len(matrix)):
|
||||
print(matrix[i])
|
||||
print("\n".join(str(line) for line in matrix))
|
||||
|
||||
|
||||
def actual_strassen(matrix_a: list, matrix_b: list) -> list:
|
||||
|
|
|
@ -14,7 +14,6 @@ class SubArray:
|
|||
def __init__(self, arr):
|
||||
# we need a list not a string, so do something to change the type
|
||||
self.array = arr.split(",")
|
||||
print(("the input array is:", self.array))
|
||||
|
||||
def solve_sub_array(self):
|
||||
rear = [int(self.array[0])] * len(self.array)
|
||||
|
|
|
@ -7,7 +7,7 @@ def maximum_non_adjacent_sum(nums: list[int]) -> int:
|
|||
"""
|
||||
Find the maximum non-adjacent sum of the integers in the nums input list
|
||||
|
||||
>>> print(maximum_non_adjacent_sum([1, 2, 3]))
|
||||
>>> maximum_non_adjacent_sum([1, 2, 3])
|
||||
4
|
||||
>>> maximum_non_adjacent_sum([1, 5, 3, 7, 2, 2, 6])
|
||||
18
|
||||
|
|
|
@ -37,7 +37,8 @@ def print_combination(arr, n, r):
|
|||
combination_util(arr, n, r, 0, data, 0)
|
||||
|
||||
|
||||
# Driver function to check for above function
|
||||
arr = [10, 20, 30, 40, 50]
|
||||
print_combination(arr, len(arr), 3)
|
||||
# This code is contributed by Ambuj sahu
|
||||
if __name__ == "__main__":
|
||||
# Driver code to check the function above
|
||||
arr = [10, 20, 30, 40, 50]
|
||||
print_combination(arr, len(arr), 3)
|
||||
# This code is contributed by Ambuj sahu
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
def is_sum_subset(arr, arr_len, required_sum):
|
||||
def is_sum_subset(arr: list[int], required_sum: int) -> bool:
|
||||
"""
|
||||
>>> is_sum_subset([2, 4, 6, 8], 4, 5)
|
||||
>>> is_sum_subset([2, 4, 6, 8], 5)
|
||||
False
|
||||
>>> is_sum_subset([2, 4, 6, 8], 4, 14)
|
||||
>>> is_sum_subset([2, 4, 6, 8], 14)
|
||||
True
|
||||
"""
|
||||
# a subset value says 1 if that subset sum can be formed else 0
|
||||
# initially no subsets can be formed hence False/0
|
||||
subset = [[False for i in range(required_sum + 1)] for i in range(arr_len + 1)]
|
||||
arr_len = len(arr)
|
||||
subset = [[False] * (required_sum + 1) for _ in range(arr_len + 1)]
|
||||
|
||||
# for each arr value, a sum of zero(0) can be formed by not taking any element
|
||||
# hence True/1
|
||||
|
@ -25,10 +26,7 @@ def is_sum_subset(arr, arr_len, required_sum):
|
|||
if arr[i - 1] <= j:
|
||||
subset[i][j] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
|
||||
|
||||
# uncomment to print the subset
|
||||
# for i in range(arrLen+1):
|
||||
# print(subset[i])
|
||||
print(subset[arr_len][required_sum])
|
||||
return subset[arr_len][required_sum]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""
|
||||
this is code for forecasting
|
||||
but i modified it and used it for safety checker of data
|
||||
for ex: you have a online shop and for some reason some data are
|
||||
for ex: you have an online shop and for some reason some data are
|
||||
missing (the amount of data that u expected are not supposed to be)
|
||||
then we can use it
|
||||
*ps : 1. ofc we can use normal statistic method but in this case
|
||||
|
@ -91,14 +91,14 @@ def interquartile_range_checker(train_user: list) -> float:
|
|||
return low_lim
|
||||
|
||||
|
||||
def data_safety_checker(list_vote: list, actual_result: float) -> None:
|
||||
def data_safety_checker(list_vote: list, actual_result: float) -> bool:
|
||||
"""
|
||||
Used to review all the votes (list result prediction)
|
||||
and compare it to the actual result.
|
||||
input : list of predictions
|
||||
output : print whether it's safe or not
|
||||
>>> data_safety_checker([2,3,4],5.0)
|
||||
Today's data is not safe.
|
||||
>>> data_safety_checker([2, 3, 4], 5.0)
|
||||
False
|
||||
"""
|
||||
safe = 0
|
||||
not_safe = 0
|
||||
|
@ -107,50 +107,54 @@ def data_safety_checker(list_vote: list, actual_result: float) -> None:
|
|||
safe = not_safe + 1
|
||||
else:
|
||||
if abs(abs(i) - abs(actual_result)) <= 0.1:
|
||||
safe = safe + 1
|
||||
safe += 1
|
||||
else:
|
||||
not_safe = not_safe + 1
|
||||
print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.")
|
||||
not_safe += 1
|
||||
return safe > not_safe
|
||||
|
||||
|
||||
# data_input_df = pd.read_csv("ex_data.csv", header=None)
|
||||
data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
|
||||
data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"])
|
||||
if __name__ == "__main__":
|
||||
# data_input_df = pd.read_csv("ex_data.csv", header=None)
|
||||
data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
|
||||
data_input_df = pd.DataFrame(
|
||||
data_input, columns=["total_user", "total_even", "days"]
|
||||
)
|
||||
|
||||
"""
|
||||
data column = total user in a day, how much online event held in one day,
|
||||
what day is that(sunday-saturday)
|
||||
"""
|
||||
"""
|
||||
data column = total user in a day, how much online event held in one day,
|
||||
what day is that(sunday-saturday)
|
||||
"""
|
||||
|
||||
# start normalization
|
||||
normalize_df = Normalizer().fit_transform(data_input_df.values)
|
||||
# split data
|
||||
total_date = normalize_df[:, 2].tolist()
|
||||
total_user = normalize_df[:, 0].tolist()
|
||||
total_match = normalize_df[:, 1].tolist()
|
||||
# start normalization
|
||||
normalize_df = Normalizer().fit_transform(data_input_df.values)
|
||||
# split data
|
||||
total_date = normalize_df[:, 2].tolist()
|
||||
total_user = normalize_df[:, 0].tolist()
|
||||
total_match = normalize_df[:, 1].tolist()
|
||||
|
||||
# for svr (input variable = total date and total match)
|
||||
x = normalize_df[:, [1, 2]].tolist()
|
||||
x_train = x[: len(x) - 1]
|
||||
x_test = x[len(x) - 1 :]
|
||||
# for svr (input variable = total date and total match)
|
||||
x = normalize_df[:, [1, 2]].tolist()
|
||||
x_train = x[: len(x) - 1]
|
||||
x_test = x[len(x) - 1 :]
|
||||
|
||||
# for linear reression & sarimax
|
||||
trn_date = total_date[: len(total_date) - 1]
|
||||
trn_user = total_user[: len(total_user) - 1]
|
||||
trn_match = total_match[: len(total_match) - 1]
|
||||
# for linear regression & sarimax
|
||||
trn_date = total_date[: len(total_date) - 1]
|
||||
trn_user = total_user[: len(total_user) - 1]
|
||||
trn_match = total_match[: len(total_match) - 1]
|
||||
|
||||
tst_date = total_date[len(total_date) - 1 :]
|
||||
tst_user = total_user[len(total_user) - 1 :]
|
||||
tst_match = total_match[len(total_match) - 1 :]
|
||||
tst_date = total_date[len(total_date) - 1 :]
|
||||
tst_user = total_user[len(total_user) - 1 :]
|
||||
tst_match = total_match[len(total_match) - 1 :]
|
||||
|
||||
# voting system with forecasting
|
||||
res_vote = [
|
||||
linear_regression_prediction(
|
||||
trn_date, trn_user, trn_match, tst_date, tst_match
|
||||
),
|
||||
sarimax_predictor(trn_user, trn_match, tst_match),
|
||||
support_vector_regressor(x_train, x_test, trn_user),
|
||||
]
|
||||
|
||||
# voting system with forecasting
|
||||
res_vote = []
|
||||
res_vote.append(
|
||||
linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match)
|
||||
)
|
||||
res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match))
|
||||
res_vote.append(support_vector_regressor(x_train, x_test, trn_user))
|
||||
|
||||
# check the safety of todays'data^^
|
||||
data_safety_checker(res_vote, tst_user)
|
||||
# check the safety of today's data
|
||||
not_str = "" if data_safety_checker(res_vote, tst_user) else "not "
|
||||
print("Today's data is {not_str}safe.")
|
||||
|
|
Loading…
Reference in New Issue
Block a user