From 0177ae1cd596f4f3c0ee7490666d74504deb0298 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 30 Sep 2024 23:01:15 +0200 Subject: [PATCH] Upgrade to Python 3.13 (#11588) --- .github/workflows/build.yml | 6 ++- DIRECTORY.md | 1 - computer_vision/haralick_descriptors.py | 8 ++-- data_structures/heap/binomial_heap.py | 6 +-- electronics/circular_convolution.py | 6 +-- fractals/julia_sets.py | 18 ++++----- graphics/bezier_curve.py | 8 ++-- graphs/dijkstra_binary_grid.py | 2 +- linear_algebra/src/power_iteration.py | 2 +- linear_programming/simplex.py | 32 +++++++-------- machine_learning/decision_tree.py | 8 ++-- machine_learning/forecasting/run.py | 8 ++-- machine_learning/k_nearest_neighbours.py | 2 +- machine_learning/logistic_regression.py | 4 +- machine_learning/loss_functions.py | 40 +++++++++---------- machine_learning/mfcc.py | 13 +++--- .../multilayer_perceptron_classifier.py | 2 +- machine_learning/scoring_functions.py | 22 +++++----- machine_learning/similarity_search.py | 2 +- machine_learning/support_vector_machines.py | 6 +-- maths/euclidean_distance.py | 8 ++-- maths/euler_method.py | 2 +- maths/euler_modified.py | 4 +- maths/gaussian.py | 16 ++++---- maths/minkowski_distance.py | 2 +- maths/numerical_analysis/adams_bashforth.py | 8 ++-- maths/numerical_analysis/runge_kutta.py | 2 +- .../runge_kutta_fehlberg_45.py | 4 +- maths/numerical_analysis/runge_kutta_gills.py | 2 +- maths/softmax.py | 2 +- .../two_hidden_layers_neural_network.py | 6 +-- other/bankers_algorithm.py | 8 ++-- physics/in_static_equilibrium.py | 2 +- requirements.txt | 4 +- ..._tweets.py => get_user_tweets.py.DISABLED} | 0 35 files changed, 135 insertions(+), 131 deletions(-) rename web_programming/{get_user_tweets.py => get_user_tweets.py.DISABLED} (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a113b4608..dad2b2fac 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: 3.12 + python-version: 3.13 allow-prereleases: true - uses: actions/cache@v4 with: @@ -26,6 +26,10 @@ jobs: # TODO: #8818 Re-enable quantum tests run: pytest --ignore=quantum/q_fourier_transform.py + --ignore=computer_vision/cnn_classification.py + --ignore=dynamic_programming/k_means_clustering_tensorflow.py + --ignore=machine_learning/lstm/lstm_prediction.py + --ignore=neural_network/input_data.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered diff --git a/DIRECTORY.md b/DIRECTORY.md index 955001e2a..56ab8377f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1343,7 +1343,6 @@ * [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) - * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) * [Instagram Crawler](web_programming/instagram_crawler.py) * [Instagram Pic](web_programming/instagram_pic.py) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 634f04957..54632160d 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -19,7 +19,7 @@ def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float >>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2])) 3.1622776601683795 """ - return np.sqrt(((original - reference) ** 2).mean()) + return float(np.sqrt(((original - reference) ** 2).mean())) def normalize_image( @@ -273,7 +273,7 @@ def haralick_descriptors(matrix: np.ndarray) -> list[float]: >>> morphological = opening_filter(binary) >>> mask_1 = binary_mask(gray, morphological)[0] >>> concurrency = matrix_concurrency(mask_1, (0, 1)) - >>> haralick_descriptors(concurrency) + >>> [float(f) for f in haralick_descriptors(concurrency)] [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] """ # Function np.indices could be used for bigger input types, @@ -335,7 +335,7 @@ def get_descriptors( return np.concatenate(descriptors, axis=None) -def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: +def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float: """ Simple method for calculating the euclidean distance between two points, with type np.ndarray. @@ -346,7 +346,7 @@ def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: >>> euclidean(a, b) 3.3166247903554 """ - return np.sqrt(np.sum(np.square(point_1 - point_2))) + return float(np.sqrt(np.sum(np.square(point_1 - point_2)))) def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]: diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 099bd2871..9cfdf0c12 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -73,7 +73,7 @@ class BinomialHeap: 30 Deleting - delete() test - >>> [first_heap.delete_min() for _ in range(20)] + >>> [int(first_heap.delete_min()) for _ in range(20)] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Create a new Heap @@ -118,7 +118,7 @@ class BinomialHeap: values in merged heap; (merge is inplace) >>> results = [] >>> while not first_heap.is_empty(): - ... results.append(first_heap.delete_min()) + ... results.append(int(first_heap.delete_min())) >>> results [17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34] """ @@ -354,7 +354,7 @@ class BinomialHeap: # Merge heaps self.merge_heaps(new_heap) - return min_value + return int(min_value) def pre_order(self): """ diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py index 768f2ad94..d06e76be7 100644 --- a/electronics/circular_convolution.py +++ b/electronics/circular_convolution.py @@ -39,7 +39,7 @@ class CircularConvolution: Usage: >>> convolution = CircularConvolution() >>> convolution.circular_convolution() - [10, 10, 6, 14] + [10.0, 10.0, 6.0, 14.0] >>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6] >>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5] @@ -54,7 +54,7 @@ class CircularConvolution: >>> convolution.first_signal = [1, -1, 2, 3, -1] >>> convolution.second_signal = [1, 2, 3] >>> convolution.circular_convolution() - [8, -2, 3, 4, 11] + [8.0, -2.0, 3.0, 4.0, 11.0] """ @@ -91,7 +91,7 @@ class CircularConvolution: final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) # rounding-off to two decimal places - return [round(i, 2) for i in final_signal] + return [float(round(i, 2)) for i in final_signal] if __name__ == "__main__": diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 1eef4573b..bea599d44 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -40,11 +40,11 @@ nb_pixels = 666 def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. - >>> eval_exponential(0, 0) + >>> float(eval_exponential(0, 0)) 1.0 - >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 + >>> bool(abs(eval_exponential(1, np.pi*1.j)) < 1e-15) True - >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 + >>> bool(abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15) True """ return np.exp(z_values) + c_parameter @@ -98,20 +98,20 @@ def iterate_function( >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape (3,) - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0])) 0j - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1])) (1+0j) - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2])) (256+0j) """ diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 9d906f179..6c7dcd4f0 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -30,9 +30,9 @@ class BezierCurve: returns the x, y values of basis function at time t >>> curve = BezierCurve([(1,1), (1,2)]) - >>> curve.basis_function(0) + >>> [float(x) for x in curve.basis_function(0)] [1.0, 0.0] - >>> curve.basis_function(1) + >>> [float(x) for x in curve.basis_function(1)] [0.0, 1.0] """ assert 0 <= t <= 1, "Time t must be between 0 and 1." @@ -55,9 +55,9 @@ class BezierCurve: The last point in the curve is when t = 1. >>> curve = BezierCurve([(1,1), (1,2)]) - >>> curve.bezier_curve_function(0) + >>> tuple(float(x) for x in curve.bezier_curve_function(0)) (1.0, 1.0) - >>> curve.bezier_curve_function(1) + >>> tuple(float(x) for x in curve.bezier_curve_function(1)) (1.0, 2.0) """ diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py index c23d82343..06293a87d 100644 --- a/graphs/dijkstra_binary_grid.py +++ b/graphs/dijkstra_binary_grid.py @@ -69,7 +69,7 @@ def dijkstra( x, y = predecessors[x, y] path.append(source) # add the source manually path.reverse() - return matrix[destination], path + return float(matrix[destination]), path for i in range(len(dx)): nx, ny = x + dx[i], y + dy[i] diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 24fbd9a5e..83c2ce48c 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -78,7 +78,7 @@ def power_iteration( if is_complex: lambda_ = np.real(lambda_) - return lambda_, vector + return float(lambda_), vector def test_power_iteration() -> None: diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py index dc171bacd..a8affe1b7 100644 --- a/linear_programming/simplex.py +++ b/linear_programming/simplex.py @@ -107,8 +107,8 @@ class Tableau: def find_pivot(self) -> tuple[Any, Any]: """Finds the pivot row and column. - >>> Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), - ... 2, 0).find_pivot() + >>> tuple(int(x) for x in Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], + ... [1,2,0,1,7.]]), 2, 0).find_pivot()) (1, 0) """ objective = self.objectives[-1] @@ -215,8 +215,8 @@ class Tableau: Max: x1 + x2 ST: x1 + 3x2 <= 4 3x1 + x2 <= 4 - >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), - ... 2, 0).run_simplex() + >>> {key: float(value) for key, value in Tableau(np.array([[-1,-1,0,0,0], + ... [1,3,1,0,4],[3,1,0,1,4.]]), 2, 0).run_simplex().items()} {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Standard linear program with 3 variables: @@ -224,21 +224,21 @@ class Tableau: ST: 2x1 + x2 + x3 ≤ 2 x1 + 2x2 + 3x3 ≤ 5 2x1 + 2x2 + x3 ≤ 6 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [-3,-1,-3,0,0,0,0], ... [2,1,1,1,0,0,2], ... [1,2,3,0,1,0,5], ... [2,2,1,0,0,1,6.] - ... ]),3,0).run_simplex() # doctest: +ELLIPSIS + ... ]),3,0).run_simplex().items()} # doctest: +ELLIPSIS {'P': 5.4, 'x1': 0.199..., 'x3': 1.6} # Optimal tableau input: - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [0, 0, 0.25, 0.25, 2], ... [0, 1, 0.375, -0.125, 1], ... [1, 0, -0.125, 0.375, 1] - ... ]), 2, 0).run_simplex() + ... ]), 2, 0).run_simplex().items()} {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Non-standard: >= constraints @@ -246,25 +246,25 @@ class Tableau: ST: x1 + x2 + x3 <= 40 2x1 + x2 - x3 >= 10 - x2 + x3 >= 10 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [2, 0, 0, 0, -1, -1, 0, 0, 20], ... [-2, -3, -1, 0, 0, 0, 0, 0, 0], ... [1, 1, 1, 1, 0, 0, 0, 0, 40], ... [2, 1, -1, 0, -1, 0, 1, 0, 10], ... [0, -1, 1, 0, 0, -1, 0, 1, 10.] - ... ]), 3, 2).run_simplex() + ... ]), 3, 2).run_simplex().items()} {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} # Non standard: minimisation and equalities Min: x1 + x2 ST: 2x1 + x2 = 12 6x1 + 5x2 = 40 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [8, 6, 0, 0, 52], ... [1, 1, 0, 0, 0], ... [2, 1, 1, 0, 12], ... [6, 5, 0, 1, 40.], - ... ]), 2, 2).run_simplex() + ... ]), 2, 2).run_simplex().items()} {'P': 7.0, 'x1': 5.0, 'x2': 2.0} @@ -275,7 +275,7 @@ class Tableau: 2x1 + 4x2 <= 48 x1 + x2 >= 10 x1 >= 2 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0], ... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0], ... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0], @@ -283,7 +283,7 @@ class Tableau: ... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0], ... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0], ... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0] - ... ]), 2, 2).run_simplex() # doctest: +ELLIPSIS + ... ]), 2, 2).run_simplex().items()} # doctest: +ELLIPSIS {'P': 132.0, 'x1': 12.000... 'x2': 5.999...} """ # Stop simplex algorithm from cycling. @@ -307,11 +307,11 @@ class Tableau: def interpret_tableau(self) -> dict[str, float]: """Given the final tableau, add the corresponding values of the basic decision variables to the `output_dict` - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [0,0,0.875,0.375,5], ... [0,1,0.375,-0.125,1], ... [1,0,-0.125,0.375,1] - ... ]),2, 0).interpret_tableau() + ... ]),2, 0).interpret_tableau().items()} {'P': 5.0, 'x1': 1.0, 'x2': 1.0} """ # P = RHS of final tableau diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index d0bd6ab0b..72970431c 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -26,15 +26,15 @@ class DecisionTree: >>> tester = DecisionTree() >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) >>> test_prediction = float(6) - >>> tester.mean_squared_error(test_labels, test_prediction) == ( + >>> bool(tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, - ... test_prediction)) + ... test_prediction))) True >>> test_labels = np.array([1,2,3]) >>> test_prediction = float(2) - >>> tester.mean_squared_error(test_labels, test_prediction) == ( + >>> bool(tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, - ... test_prediction)) + ... test_prediction))) True """ if labels.ndim != 1: diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index dbb86caf8..9d81b03cd 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -28,7 +28,7 @@ def linear_regression_prediction( input : training data (date, total_user, total_event) in list of float output : list of total user prediction in float >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2]) - >>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors + >>> bool(abs(n - 5.0) < 1e-6) # Checking precision because of floating point errors True """ x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]) @@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> ) model_fit = model.fit(disp=False, maxiter=600, method="nm") result = model_fit.predict(1, len(test_match), exog=[test_match]) - return result[0] + return float(result[0]) def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float: @@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1) regressor.fit(x_train, train_user) y_pred = regressor.predict(x_test) - return y_pred[0] + return float(y_pred[0]) def interquartile_range_checker(train_user: list) -> float: @@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float: q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) - return low_lim + return float(low_lim) def data_safety_checker(list_vote: list, actual_result: float) -> bool: diff --git a/machine_learning/k_nearest_neighbours.py b/machine_learning/k_nearest_neighbours.py index a43757c5c..fbc1b8bd2 100644 --- a/machine_learning/k_nearest_neighbours.py +++ b/machine_learning/k_nearest_neighbours.py @@ -42,7 +42,7 @@ class KNN: >>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11])) 10.0 """ - return np.linalg.norm(a - b) + return float(np.linalg.norm(a - b)) def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str: """ diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 090af5382..496026631 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray: @returns: returns value in the range 0 to 1 Examples: - >>> sigmoid_function(4) + >>> float(sigmoid_function(4)) 0.9820137900379085 >>> sigmoid_function(np.array([-3, 3])) array([0.04742587, 0.95257413]) @@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float: References: - https://en.wikipedia.org/wiki/Logistic_regression """ - return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() + return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()) def log_likelihood(x, y, weights): diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 150035661..0bd9aa8b5 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -22,7 +22,7 @@ def binary_cross_entropy( >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_cross_entropy(true_labels, predicted_probs) + >>> float(binary_cross_entropy(true_labels, predicted_probs)) 0.2529995012327421 >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -68,7 +68,7 @@ def binary_focal_cross_entropy( >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_focal_cross_entropy(true_labels, predicted_probs) + >>> float(binary_focal_cross_entropy(true_labels, predicted_probs)) 0.008257977659239775 >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -108,7 +108,7 @@ def categorical_cross_entropy( >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) - >>> categorical_cross_entropy(true_labels, pred_probs) + >>> float(categorical_cross_entropy(true_labels, pred_probs)) 0.567395975254385 >>> true_labels = np.array([[1, 0], [0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) @@ -179,13 +179,13 @@ def categorical_focal_cross_entropy( >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) >>> alpha = np.array([0.6, 0.2, 0.7]) - >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha)) 0.0025966118981496423 >>> true_labels = np.array([[0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> alpha = np.array([0.25, 0.25, 0.25]) - >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha)) 0.23315276982014324 >>> true_labels = np.array([[1, 0], [0, 1]]) @@ -265,7 +265,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_labels = np.array([-1, 1, 1, -1, 1]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(true_labels, pred) + >>> float(hinge_loss(true_labels, pred)) 1.52 >>> true_labels = np.array([-1, 1, 1, -1, 1, 1]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) @@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + >>> bool(np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)) True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) - >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + >>> bool(np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)) True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) @@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028) + >>> bool(np.isclose(mean_squared_error(true_values, predicted_values), 0.028)) True >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) + >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)) True >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) + >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)) False >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2]) @@ -420,7 +420,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> mean_squared_logarithmic_error(true_values, predicted_values) + >>> float(mean_squared_logarithmic_error(true_values, predicted_values)) 0.0030860877925181344 >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -459,17 +459,17 @@ def mean_absolute_percentage_error( Examples: >>> y_true = np.array([10, 20, 30, 40]) >>> y_pred = np.array([12, 18, 33, 45]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.13125 >>> y_true = np.array([1, 2, 3, 4]) >>> y_pred = np.array([2, 3, 4, 5]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.5208333333333333 >>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24]) >>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.064671076436071 """ if len(y_true) != len(y_pred): @@ -511,7 +511,7 @@ def perplexity_loss( ... [[0.03, 0.26, 0.21, 0.18, 0.30], ... [0.28, 0.10, 0.33, 0.15, 0.12]]] ... ) - >>> perplexity_loss(y_true, y_pred) + >>> float(perplexity_loss(y_true, y_pred)) 5.0247347775367945 >>> y_true = np.array([[1, 4], [2, 3]]) >>> y_pred = np.array( @@ -600,17 +600,17 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> >>> y_true = np.array([3, 5, 2, 7]) >>> y_pred = np.array([2.9, 4.8, 2.1, 7.2]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.012500000000000022 >>> y_true = np.array([2, 4, 6]) >>> y_pred = np.array([1, 5, 7]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.5 >>> y_true = np.array([1, 3, 5, 7]) >>> y_pred = np.array([1, 3, 5, 7]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.0 >>> y_true = np.array([1, 3, 5]) @@ -647,7 +647,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4]) - >>> kullback_leibler_divergence(true_labels, predicted_probs) + >>> float(kullback_leibler_divergence(true_labels, predicted_probs)) 0.030478754035472025 >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) diff --git a/machine_learning/mfcc.py b/machine_learning/mfcc.py index a1e99ce4a..dcc3151d5 100644 --- a/machine_learning/mfcc.py +++ b/machine_learning/mfcc.py @@ -162,9 +162,9 @@ def normalize(audio: np.ndarray) -> np.ndarray: Examples: >>> audio = np.array([1, 2, 3, 4, 5]) >>> normalized_audio = normalize(audio) - >>> np.max(normalized_audio) + >>> float(np.max(normalized_audio)) 1.0 - >>> np.min(normalized_audio) + >>> float(np.min(normalized_audio)) 0.2 """ # Divide the entire audio signal by the maximum absolute value @@ -229,7 +229,8 @@ def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarra Examples: >>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) >>> audio_fft = calculate_fft(audio_windowed, ftt_size=4) - >>> np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, -1.5-0.8660254j])) + >>> bool(np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, + ... -1.5-0.8660254j]))) True """ # Transpose the audio data to have time in rows and channels in columns @@ -281,7 +282,7 @@ def freq_to_mel(freq: float) -> float: The frequency in mel scale. Examples: - >>> round(freq_to_mel(1000), 2) + >>> float(round(freq_to_mel(1000), 2)) 999.99 """ # Use the formula to convert frequency to the mel scale @@ -321,7 +322,7 @@ def mel_spaced_filterbank( Mel-spaced filter bank. Examples: - >>> round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10) + >>> float(round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10)) 0.0004603981 """ freq_min = 0 @@ -438,7 +439,7 @@ def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarra The DCT basis matrix. Examples: - >>> round(discrete_cosine_transform(3, 5)[0][0], 5) + >>> float(round(discrete_cosine_transform(3, 5)[0][0], 5)) 0.44721 """ basis = np.empty((dct_filter_num, filter_num)) diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py index e99a4131e..40f998c7d 100644 --- a/machine_learning/multilayer_perceptron_classifier.py +++ b/machine_learning/multilayer_perceptron_classifier.py @@ -17,7 +17,7 @@ Y = clf.predict(test) def wrapper(y): """ - >>> wrapper(Y) + >>> [int(x) for x in wrapper(Y)] [0, 0, 1] """ return list(y) diff --git a/machine_learning/scoring_functions.py b/machine_learning/scoring_functions.py index 08b969a95..f6b685f4f 100644 --- a/machine_learning/scoring_functions.py +++ b/machine_learning/scoring_functions.py @@ -20,11 +20,11 @@ def mae(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mae(predict,actual),decimals = 2) + >>> float(np.around(mae(predict,actual),decimals = 2)) 0.67 >>> actual = [1,1,1];predict = [1,1,1] - >>> mae(predict,actual) + >>> float(mae(predict,actual)) 0.0 """ predict = np.array(predict) @@ -41,11 +41,11 @@ def mse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mse(predict,actual),decimals = 2) + >>> float(np.around(mse(predict,actual),decimals = 2)) 1.33 >>> actual = [1,1,1];predict = [1,1,1] - >>> mse(predict,actual) + >>> float(mse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -63,11 +63,11 @@ def rmse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(rmse(predict,actual),decimals = 2) + >>> float(np.around(rmse(predict,actual),decimals = 2)) 1.15 >>> actual = [1,1,1];predict = [1,1,1] - >>> rmse(predict,actual) + >>> float(rmse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -84,12 +84,10 @@ def rmse(predict, actual): def rmsle(predict, actual): """ Examples(rounded for precision): - >>> actual = [10,10,30];predict = [10,2,30] - >>> np.around(rmsle(predict,actual),decimals = 2) + >>> float(np.around(rmsle(predict=[10, 2, 30], actual=[10, 10, 30]), decimals=2)) 0.75 - >>> actual = [1,1,1];predict = [1,1,1] - >>> rmsle(predict,actual) + >>> float(rmsle(predict=[1, 1, 1], actual=[1, 1, 1])) 0.0 """ predict = np.array(predict) @@ -117,12 +115,12 @@ def mbd(predict, actual): Here the model overpredicts >>> actual = [1,2,3];predict = [2,3,4] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) 50.0 Here the model underpredicts >>> actual = [1,2,3];predict = [0,1,1] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) -66.67 """ predict = np.array(predict) diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 0bc3b17d7..c8a573796 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -153,7 +153,7 @@ def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float: >>> cosine_similarity(np.array([1, 2]), np.array([6, 32])) 0.9615239476408232 """ - return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) + return float(np.dot(input_a, input_b) / (norm(input_a) * norm(input_b))) if __name__ == "__main__": diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index 24046115e..d17c9044a 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -14,11 +14,11 @@ def norm_squared(vector: ndarray) -> float: Returns: float: squared second norm of vector - >>> norm_squared([1, 2]) + >>> int(norm_squared([1, 2])) 5 - >>> norm_squared(np.asarray([1, 2])) + >>> int(norm_squared(np.asarray([1, 2]))) 5 - >>> norm_squared([0, 0]) + >>> int(norm_squared([0, 0])) 0 """ return np.dot(vector, vector) diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 9b29b37b0..aa7f3efc7 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -13,13 +13,13 @@ def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: """ Calculate the distance between the two endpoints of two vectors. A vector is defined as a list, tuple, or numpy 1D array. - >>> euclidean_distance((0, 0), (2, 2)) + >>> float(euclidean_distance((0, 0), (2, 2))) 2.8284271247461903 - >>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])) + >>> float(euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2]))) 3.4641016151377544 - >>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])) + >>> float(euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]))) 8.0 - >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) + >>> float(euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8])) 8.0 """ return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) diff --git a/maths/euler_method.py b/maths/euler_method.py index 30f193e6d..c6adb07e2 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -26,7 +26,7 @@ def explicit_euler( ... return y >>> y0 = 1 >>> y = explicit_euler(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 144.77277243257308 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/euler_modified.py b/maths/euler_modified.py index d02123e1e..bb282e9f0 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -24,13 +24,13 @@ def euler_modified( >>> def f1(x, y): ... return -2*x*(y**2) >>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0) - >>> y[-1] + >>> float(y[-1]) 0.503338255442106 >>> import math >>> def f2(x, y): ... return -2*y + (x**3)*math.exp(-2*x) >>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3) - >>> y[-1] + >>> float(y[-1]) 0.5525976431951775 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/gaussian.py b/maths/gaussian.py index 0e02010a9..b1e62ea77 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -5,18 +5,18 @@ Reference: https://en.wikipedia.org/wiki/Gaussian_function from numpy import exp, pi, sqrt -def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: +def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> float: """ - >>> gaussian(1) + >>> float(gaussian(1)) 0.24197072451914337 - >>> gaussian(24) + >>> float(gaussian(24)) 3.342714441794458e-126 - >>> gaussian(1, 4, 2) + >>> float(gaussian(1, 4, 2)) 0.06475879783294587 - >>> gaussian(1, 5, 3) + >>> float(gaussian(1, 5, 3)) 0.05467002489199788 Supports NumPy Arrays @@ -29,7 +29,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27, 2.14638374e-32, 7.99882776e-38, 1.09660656e-43]) - >>> gaussian(15) + >>> float(gaussian(15)) 5.530709549844416e-50 >>> gaussian([1,2, 'string']) @@ -47,10 +47,10 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: ... OverflowError: (34, 'Result too large') - >>> gaussian(10**-326) + >>> float(gaussian(10**-326)) 0.3989422804014327 - >>> gaussian(2523, mu=234234, sigma=3425) + >>> float(gaussian(2523, mu=234234, sigma=3425)) 0.0 """ return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2)) diff --git a/maths/minkowski_distance.py b/maths/minkowski_distance.py index 3237124e8..99f02e31e 100644 --- a/maths/minkowski_distance.py +++ b/maths/minkowski_distance.py @@ -19,7 +19,7 @@ def minkowski_distance( >>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2) 8.0 >>> import numpy as np - >>> np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)) + >>> bool(np.isclose(5.0, minkowski_distance([5.0], [0.0], 3))) True >>> minkowski_distance([1.0], [2.0], -1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py index fb4061710..26244a585 100644 --- a/maths/numerical_analysis/adams_bashforth.py +++ b/maths/numerical_analysis/adams_bashforth.py @@ -102,7 +102,7 @@ class AdamsBashforth: >>> def f(x, y): ... return x + y >>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3() - >>> y[3] + >>> float(y[3]) 0.15533333333333332 >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3() @@ -140,9 +140,9 @@ class AdamsBashforth: ... return x + y >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4() - >>> y[4] + >>> float(y[4]) 0.30699999999999994 - >>> y[5] + >>> float(y[5]) 0.5771083333333333 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4() @@ -185,7 +185,7 @@ class AdamsBashforth: >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536], ... 0.2, 1).step_5() - >>> y[-1] + >>> float(y[-1]) 0.05436839444444452 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5() diff --git a/maths/numerical_analysis/runge_kutta.py b/maths/numerical_analysis/runge_kutta.py index 4cac017ee..3a25b0fb0 100644 --- a/maths/numerical_analysis/runge_kutta.py +++ b/maths/numerical_analysis/runge_kutta.py @@ -19,7 +19,7 @@ def runge_kutta(f, y0, x0, h, x_end): ... return y >>> y0 = 1 >>> y = runge_kutta(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 148.41315904125113 """ n = int(np.ceil((x_end - x0) / h)) diff --git a/maths/numerical_analysis/runge_kutta_fehlberg_45.py b/maths/numerical_analysis/runge_kutta_fehlberg_45.py index 8181fe301..0fbd60a35 100644 --- a/maths/numerical_analysis/runge_kutta_fehlberg_45.py +++ b/maths/numerical_analysis/runge_kutta_fehlberg_45.py @@ -34,12 +34,12 @@ def runge_kutta_fehlberg_45( >>> def f(x, y): ... return 1 + y**2 >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1) - >>> y[1] + >>> float(y[1]) 0.2027100937470787 >>> def f(x,y): ... return x >>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0) - >>> y[1] + >>> float(y[1]) -0.18000000000000002 >>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py index 451cde4cb..5d9672679 100644 --- a/maths/numerical_analysis/runge_kutta_gills.py +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -34,7 +34,7 @@ def runge_kutta_gills( >>> def f(x, y): ... return (x-y)/2 >>> y = runge_kutta_gills(f, 0, 3, 0.2, 5) - >>> y[-1] + >>> float(y[-1]) 3.4104259225717537 >>> def f(x,y): diff --git a/maths/softmax.py b/maths/softmax.py index 04cf77525..95c95e66f 100644 --- a/maths/softmax.py +++ b/maths/softmax.py @@ -28,7 +28,7 @@ def softmax(vector): The softmax vector adds up to one. We need to ceil to mitigate for precision - >>> np.ceil(np.sum(softmax([1,2,3,4]))) + >>> float(np.ceil(np.sum(softmax([1,2,3,4])))) 1.0 >>> vec = np.array([5,5]) diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index d488de590..1b7c0beed 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -64,7 +64,7 @@ class TwoHiddenLayerNeuralNetwork: >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> array_sum = np.sum(res) - >>> np.isnan(array_sum) + >>> bool(np.isnan(array_sum)) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the @@ -105,7 +105,7 @@ class TwoHiddenLayerNeuralNetwork: >>> res = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (res == updated_weights).all() + >>> bool((res == updated_weights).all()) False """ @@ -171,7 +171,7 @@ class TwoHiddenLayerNeuralNetwork: >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (first_iteration_weights == updated_weights).all() + >>> bool((first_iteration_weights == updated_weights).all()) False """ for iteration in range(1, iterations + 1): diff --git a/other/bankers_algorithm.py b/other/bankers_algorithm.py index 858eb0b2c..d4254f479 100644 --- a/other/bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -87,9 +87,11 @@ class BankersAlgorithm: This function builds an index control dictionary to track original ids/indices of processes when altered during execution of method "main" Return: {0: [a: int, b: int], 1: [c: int, d: int]} - >>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table, - ... test_maximum_claim_table)._BankersAlgorithm__need_index_manager() - ... ) # doctest: +NORMALIZE_WHITESPACE + >>> index_control = BankersAlgorithm( + ... test_claim_vector, test_allocated_res_table, test_maximum_claim_table + ... )._BankersAlgorithm__need_index_manager() + >>> {key: [int(x) for x in value] for key, value + ... in index_control.items()} # doctest: +NORMALIZE_WHITESPACE {0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], 4: [2, 0, 0, 3]} """ diff --git a/physics/in_static_equilibrium.py b/physics/in_static_equilibrium.py index e3c2f9d07..fb5a9b5ff 100644 --- a/physics/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -53,7 +53,7 @@ def in_static_equilibrium( # summation of moments is zero moments: NDArray[float64] = cross(location, forces) sum_moments: float = sum(moments) - return abs(sum_moments) < eps + return bool(abs(sum_moments) < eps) if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index bb3d67139..afbf25ba6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ beautifulsoup4 fake_useragent imageio -keras ; python_version < '3.12' +keras lxml matplotlib numpy @@ -17,7 +17,7 @@ rich scikit-learn statsmodels sympy -tensorflow +tensorflow ; python_version < '3.13' tweepy # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed typing_extensions diff --git a/web_programming/get_user_tweets.py b/web_programming/get_user_tweets.py.DISABLED similarity index 100% rename from web_programming/get_user_tweets.py rename to web_programming/get_user_tweets.py.DISABLED