From 660d2bb66c8ca03e2225090b5c638ffb0fd14a60 Mon Sep 17 00:00:00 2001 From: Paul <56065602+ZeroDayOwl@users.noreply.github.com> Date: Thu, 6 Oct 2022 23:19:34 +0600 Subject: [PATCH 001/368] Add algorithm for Newton's Law of Gravitation (#6626) * Add algorithm for Newton's Law of Gravitation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update physics/newtons_law_of_gravitation.py Co-authored-by: Christian Clauss * One and only one argument must be 0 * Update newtons_law_of_gravitation.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- physics/newtons_law_of_gravitation.py | 100 ++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 physics/newtons_law_of_gravitation.py diff --git a/physics/newtons_law_of_gravitation.py b/physics/newtons_law_of_gravitation.py new file mode 100644 index 000000000..0bb27bb24 --- /dev/null +++ b/physics/newtons_law_of_gravitation.py @@ -0,0 +1,100 @@ +""" +Title : Finding the value of either Gravitational Force, one of the masses or distance +provided that the other three parameters are given. + +Description : Newton's Law of Universal Gravitation explains the presence of force of +attraction between bodies having a definite mass situated at a distance. It is usually +stated as that, every particle attracts every other particle in the universe with a +force that is directly proportional to the product of their masses and inversely +proportional to the square of the distance between their centers. The publication of the +theory has become known as the "first great unification", as it marked the unification +of the previously described phenomena of gravity on Earth with known astronomical +behaviors. + +The equation for the universal gravitation is as follows: +F = (G * mass_1 * mass_2) / (distance)^2 + +Source : +- https://en.wikipedia.org/wiki/Newton%27s_law_of_universal_gravitation +- Newton (1687) "Philosophiæ Naturalis Principia Mathematica" +""" + +from __future__ import annotations + +# Define the Gravitational Constant G and the function +GRAVITATIONAL_CONSTANT = 6.6743e-11 # unit of G : m^3 * kg^-1 * s^-2 + + +def gravitational_law( + force: float, mass_1: float, mass_2: float, distance: float +) -> dict[str, float]: + + """ + Input Parameters + ---------------- + force : magnitude in Newtons + + mass_1 : mass in Kilograms + + mass_2 : mass in Kilograms + + distance : distance in Meters + + Returns + ------- + result : dict name, value pair of the parameter having Zero as it's value + + Returns the value of one of the parameters specified as 0, provided the values of + other parameters are given. + >>> gravitational_law(force=0, mass_1=5, mass_2=10, distance=20) + {'force': 8.342875e-12} + + >>> gravitational_law(force=7367.382, mass_1=0, mass_2=74, distance=3048) + {'mass_1': 1.385816317292268e+19} + + >>> gravitational_law(force=36337.283, mass_1=0, mass_2=0, distance=35584) + Traceback (most recent call last): + ... + ValueError: One and only one argument must be 0 + + >>> gravitational_law(force=36337.283, mass_1=-674, mass_2=0, distance=35584) + Traceback (most recent call last): + ... + ValueError: Mass can not be negative + + >>> gravitational_law(force=-847938e12, mass_1=674, mass_2=0, distance=9374) + Traceback (most recent call last): + ... + ValueError: Gravitational force can not be negative + """ + + product_of_mass = mass_1 * mass_2 + + if (force, mass_1, mass_2, distance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if force < 0: + raise ValueError("Gravitational force can not be negative") + if distance < 0: + raise ValueError("Distance can not be negative") + if mass_1 < 0 or mass_2 < 0: + raise ValueError("Mass can not be negative") + if force == 0: + force = GRAVITATIONAL_CONSTANT * product_of_mass / (distance**2) + return {"force": force} + elif mass_1 == 0: + mass_1 = (force) * (distance**2) / (GRAVITATIONAL_CONSTANT * mass_2) + return {"mass_1": mass_1} + elif mass_2 == 0: + mass_2 = (force) * (distance**2) / (GRAVITATIONAL_CONSTANT * mass_1) + return {"mass_2": mass_2} + elif distance == 0: + distance = (GRAVITATIONAL_CONSTANT * product_of_mass / (force)) ** 0.5 + return {"distance": distance} + raise ValueError("One and only one argument must be 0") + + +# Run doctest +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5894554d41116af83152b1ea59fbf78303d87966 Mon Sep 17 00:00:00 2001 From: Jordan Rinder Date: Sat, 8 Oct 2022 18:28:17 -0400 Subject: [PATCH 002/368] Add Catalan number to maths (#6845) * Add Catalan number to maths * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 4 +++- maths/catalan_number.py | 51 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 maths/catalan_number.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 64e9d5333..668da4761 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -475,6 +475,7 @@ * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) * [Bisection](maths/bisection.py) + * [Catalan Number](maths/catalan_number.py) * [Ceil](maths/ceil.py) * [Check Polygon](maths/check_polygon.py) * [Chudnovsky Algorithm](maths/chudnovsky_algorithm.py) @@ -632,8 +633,9 @@ ## Physics * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) - * [Lorenz Transformation Four Vector](physics/lorenz_transformation_four_vector.py) + * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [N Body Simulation](physics/n_body_simulation.py) + * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) ## Project Euler diff --git a/maths/catalan_number.py b/maths/catalan_number.py new file mode 100644 index 000000000..4a1280a45 --- /dev/null +++ b/maths/catalan_number.py @@ -0,0 +1,51 @@ +""" + +Calculate the nth Catalan number + +Source: + https://en.wikipedia.org/wiki/Catalan_number + +""" + + +def catalan(number: int) -> int: + """ + :param number: nth catalan number to calculate + :return: the nth catalan number + Note: A catalan number is only defined for positive integers + + >>> catalan(5) + 14 + >>> catalan(0) + Traceback (most recent call last): + ... + ValueError: Input value of [number=0] must be > 0 + >>> catalan(-1) + Traceback (most recent call last): + ... + ValueError: Input value of [number=-1] must be > 0 + >>> catalan(5.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=5.0] must be an integer + """ + + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + + if number < 1: + raise ValueError(f"Input value of [number={number}] must be > 0") + + current_number = 1 + + for i in range(1, number): + current_number *= 4 * i - 2 + current_number //= i + 1 + + return current_number + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 51dba4d743cd2c8d407eea3e9cd4e7b2f69ee34d Mon Sep 17 00:00:00 2001 From: Lakshay Roopchandani <75477853+lakshayroop5@users.noreply.github.com> Date: Sun, 9 Oct 2022 18:23:44 +0530 Subject: [PATCH 003/368] Job sequencing with deadlines (#6854) * completed optimised code for job sequencing with deadline problem * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * completed optimised code for job sequencing with deadline problem * completed optimized code for job sequencing with deadline problem * completed optimised code for job sequencing with deadline problem * completed optimised code for job sequencing with deadline problem * completed optimised code for job sequencing with deadline problem * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * completed optimized code for the issue "Job Scheduling with deadlines" * completed optimized code for the issue "Job Scheduling with deadlines" * completed optimized code for the issue "Job Scheduling with deadlines" * Update greedy_methods/job_sequencing_with_deadline.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated reviews * Updated reviews * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename greedy_methods/job_sequencing_with_deadline.py to scheduling/job_sequencing_with_deadline.py Co-authored-by: lakshayroop5 <87693528+lavenroop5@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- scheduling/job_sequencing_with_deadline.py | 48 ++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 scheduling/job_sequencing_with_deadline.py diff --git a/scheduling/job_sequencing_with_deadline.py b/scheduling/job_sequencing_with_deadline.py new file mode 100644 index 000000000..7b23c0b35 --- /dev/null +++ b/scheduling/job_sequencing_with_deadline.py @@ -0,0 +1,48 @@ +def job_sequencing_with_deadlines(num_jobs: int, jobs: list) -> list: + """ + Function to find the maximum profit by doing jobs in a given time frame + + Args: + num_jobs [int]: Number of jobs + jobs [list]: A list of tuples of (job_id, deadline, profit) + + Returns: + max_profit [int]: Maximum profit that can be earned by doing jobs + in a given time frame + + Examples: + >>> job_sequencing_with_deadlines(4, + ... [(1, 4, 20), (2, 1, 10), (3, 1, 40), (4, 1, 30)]) + [2, 60] + >>> job_sequencing_with_deadlines(5, + ... [(1, 2, 100), (2, 1, 19), (3, 2, 27), (4, 1, 25), (5, 1, 15)]) + [2, 127] + """ + + # Sort the jobs in descending order of profit + jobs = sorted(jobs, key=lambda value: value[2], reverse=True) + + # Create a list of size equal to the maximum deadline + # and initialize it with -1 + max_deadline = max(jobs, key=lambda value: value[1])[1] + time_slots = [-1] * max_deadline + + # Finding the maximum profit and the count of jobs + count = 0 + max_profit = 0 + for job in jobs: + # Find a free time slot for this job + # (Note that we start from the last possible slot) + for i in range(job[1] - 1, -1, -1): + if time_slots[i] == -1: + time_slots[i] = job[0] + count += 1 + max_profit += job[2] + break + return [count, max_profit] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0a3433eaed6c8369f6c45b3abf70ee33a3a74910 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Oct 2022 22:04:33 +0200 Subject: [PATCH 004/368] [pre-commit.ci] pre-commit autoupdate (#6940) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/psf/black: 22.8.0 → 22.10.0](https://github.com/psf/black/compare/22.8.0...22.10.0) - [github.com/asottile/pyupgrade: v2.38.2 → v3.0.0](https://github.com/asottile/pyupgrade/compare/v2.38.2...v3.0.0) - [github.com/pre-commit/mirrors-mypy: v0.981 → v0.982](https://github.com/pre-commit/mirrors-mypy/compare/v0.981...v0.982) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a2fcf12c9..0abe647b0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/psf/black - rev: 22.8.0 + rev: 22.10.0 hooks: - id: black @@ -26,7 +26,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v2.38.2 + rev: v3.0.0 hooks: - id: pyupgrade args: @@ -42,7 +42,7 @@ repos: - --max-line-length=88 - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.981 + rev: v0.982 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 668da4761..9ef72c403 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -927,6 +927,7 @@ ## Scheduling * [First Come First Served](scheduling/first_come_first_served.py) * [Highest Response Ratio Next](scheduling/highest_response_ratio_next.py) + * [Job Sequencing With Deadline](scheduling/job_sequencing_with_deadline.py) * [Multi Level Feedback Queue](scheduling/multi_level_feedback_queue.py) * [Non Preemptive Shortest Job First](scheduling/non_preemptive_shortest_job_first.py) * [Round Robin](scheduling/round_robin.py) From f0d1a42deb146bebcdf7b1b2ec788c815ede452a Mon Sep 17 00:00:00 2001 From: Shubhajit Roy <81477286+shubhajitroy123@users.noreply.github.com> Date: Wed, 12 Oct 2022 12:52:23 +0530 Subject: [PATCH 005/368] Python program for Carmicheal Number (#6864) * Add files via upload Python program to determine whether a number is Carmichael Number or not. * Rename Carmichael Number.py to carmichael number.py * Rename carmichael number.py to carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/carmichael_number.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/carmichael_number.py | 47 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 maths/carmichael_number.py diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py new file mode 100644 index 000000000..09a4fedfb --- /dev/null +++ b/maths/carmichael_number.py @@ -0,0 +1,47 @@ +""" +== Carmichael Numbers == +A number n is said to be a Carmichael number if it +satisfies the following modular arithmetic condition: + + power(b, n-1) MOD n = 1, + for all b ranging from 1 to n such that b and + n are relatively prime, i.e, gcd(b, n) = 1 + +Examples of Carmichael Numbers: 561, 1105, ... +https://en.wikipedia.org/wiki/Carmichael_number +""" + + +def gcd(a: int, b: int) -> int: + if a < b: + return gcd(b, a) + if a % b == 0: + return b + return gcd(b, a % b) + + +def power(x: int, y: int, mod: int) -> int: + if y == 0: + return 1 + temp = power(x, y // 2, mod) % mod + temp = (temp * temp) % mod + if y % 2 == 1: + temp = (temp * x) % mod + return temp + + +def isCarmichaelNumber(n: int) -> bool: + b = 2 + while b < n: + if gcd(b, n) == 1 and power(b, n - 1, n) != 1: + return False + b += 1 + return True + + +if __name__ == "__main__": + number = int(input("Enter number: ").strip()) + if isCarmichaelNumber(number): + print(f"{number} is a Carmichael Number.") + else: + print(f"{number} is not a Carmichael Number.") From a04a6365dee01bebf382809a5638b6fd0d0a51e6 Mon Sep 17 00:00:00 2001 From: Martmists Date: Wed, 12 Oct 2022 15:19:00 +0200 Subject: [PATCH 006/368] Add Equal Loudness Filter (#7019) * Add Equal Loudness Filter Signed-off-by: Martmists * NoneType return on __init__ Signed-off-by: Martmists * Add data to JSON as requested by @CenTdemeern1 in a not very polite manner Signed-off-by: Martmists * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * 'modernize' Signed-off-by: Martmists * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update audio_filters/equal_loudness_filter.py Co-authored-by: Christian Clauss * Update equal_loudness_filter.py * Update equal_loudness_filter.py * Finally!! * Arrgghh Signed-off-by: Martmists Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- audio_filters/equal_loudness_filter.py | 61 +++++++++++++++++++++ audio_filters/loudness_curve.json | 76 ++++++++++++++++++++++++++ requirements.txt | 1 + 3 files changed, 138 insertions(+) create mode 100644 audio_filters/equal_loudness_filter.py create mode 100644 audio_filters/loudness_curve.json diff --git a/audio_filters/equal_loudness_filter.py b/audio_filters/equal_loudness_filter.py new file mode 100644 index 000000000..b9a3c50e1 --- /dev/null +++ b/audio_filters/equal_loudness_filter.py @@ -0,0 +1,61 @@ +from json import loads +from pathlib import Path + +import numpy as np +from yulewalker import yulewalk + +from audio_filters.butterworth_filter import make_highpass +from audio_filters.iir_filter import IIRFilter + +data = loads((Path(__file__).resolve().parent / "loudness_curve.json").read_text()) + + +class EqualLoudnessFilter: + r""" + An equal-loudness filter which compensates for the human ear's non-linear response + to sound. + This filter corrects this by cascading a yulewalk filter and a butterworth filter. + + Designed for use with samplerate of 44.1kHz and above. If you're using a lower + samplerate, use with caution. + + Code based on matlab implementation at https://bit.ly/3eqh2HU + (url shortened for flake8) + + Target curve: https://i.imgur.com/3g2VfaM.png + Yulewalk response: https://i.imgur.com/J9LnJ4C.png + Butterworth and overall response: https://i.imgur.com/3g2VfaM.png + + Images and original matlab implementation by David Robinson, 2001 + """ + + def __init__(self, samplerate: int = 44100) -> None: + self.yulewalk_filter = IIRFilter(10) + self.butterworth_filter = make_highpass(150, samplerate) + + # pad the data to nyquist + curve_freqs = np.array(data["frequencies"] + [max(20000.0, samplerate / 2)]) + curve_gains = np.array(data["gains"] + [140]) + + # Convert to angular frequency + freqs_normalized = curve_freqs / samplerate * 2 + # Invert the curve and normalize to 0dB + gains_normalized = np.power(10, (np.min(curve_gains) - curve_gains) / 20) + + # Scipy's `yulewalk` function is a stub, so we're using the + # `yulewalker` library instead. + # This function computes the coefficients using a least-squares + # fit to the specified curve. + ya, yb = yulewalk(10, freqs_normalized, gains_normalized) + self.yulewalk_filter.set_coefficients(ya, yb) + + def process(self, sample: float) -> float: + """ + Process a single sample through both filters + + >>> filt = EqualLoudnessFilter() + >>> filt.process(0.0) + 0.0 + """ + tmp = self.yulewalk_filter.process(sample) + return self.butterworth_filter.process(tmp) diff --git a/audio_filters/loudness_curve.json b/audio_filters/loudness_curve.json new file mode 100644 index 000000000..fc066a081 --- /dev/null +++ b/audio_filters/loudness_curve.json @@ -0,0 +1,76 @@ +{ + "_comment": "The following is a representative average of the Equal Loudness Contours as measured by Robinson and Dadson, 1956", + "_doi": "10.1088/0508-3443/7/5/302", + "frequencies": [ + 0, + 20, + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 100, + 200, + 300, + 400, + 500, + 600, + 700, + 800, + 900, + 1000, + 1500, + 2000, + 2500, + 3000, + 3700, + 4000, + 5000, + 6000, + 7000, + 8000, + 9000, + 10000, + 12000, + 15000, + 20000 + ], + "gains": [ + 120, + 113, + 103, + 97, + 93, + 91, + 89, + 87, + 86, + 85, + 78, + 76, + 76, + 76, + 76, + 77, + 78, + 79.5, + 80, + 79, + 77, + 74, + 71.5, + 70, + 70.5, + 74, + 79, + 84, + 86, + 86, + 85, + 95, + 110, + 125 + ] +} diff --git a/requirements.txt b/requirements.txt index 294494acf..0fbc1cc4b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,3 +17,4 @@ tensorflow texttable tweepy xgboost +yulewalker From d15bf7d492bc778682f80392bfd559074c4adbec Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Wed, 12 Oct 2022 22:05:31 +0530 Subject: [PATCH 007/368] Add typing to data_structures/heap/heap_generic.py (#7044) * Update heap_generic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update heap_generic.py * Update heap_generic.py * Update heap_generic.py * Update heap_generic.py * Update heap_generic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/heap/heap_generic.py | 35 +++++++++++++++------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/data_structures/heap/heap_generic.py b/data_structures/heap/heap_generic.py index 553cb9451..e7831cd45 100644 --- a/data_structures/heap/heap_generic.py +++ b/data_structures/heap/heap_generic.py @@ -1,35 +1,38 @@ +from collections.abc import Callable + + class Heap: """ A generic Heap class, can be used as min or max by passing the key function accordingly. """ - def __init__(self, key=None): + def __init__(self, key: Callable | None = None) -> None: # Stores actual heap items. - self.arr = list() + self.arr: list = list() # Stores indexes of each item for supporting updates and deletion. - self.pos_map = {} + self.pos_map: dict = {} # Stores current size of heap. self.size = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. self.key = key or (lambda x: x) - def _parent(self, i): + def _parent(self, i: int) -> int | None: """Returns parent index of given index if exists else None""" return int((i - 1) / 2) if i > 0 else None - def _left(self, i): + def _left(self, i: int) -> int | None: """Returns left-child-index of given index if exists else None""" left = int(2 * i + 1) return left if 0 < left < self.size else None - def _right(self, i): + def _right(self, i: int) -> int | None: """Returns right-child-index of given index if exists else None""" right = int(2 * i + 2) return right if 0 < right < self.size else None - def _swap(self, i, j): + def _swap(self, i: int, j: int) -> None: """Performs changes required for swapping two elements in the heap""" # First update the indexes of the items in index map. self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = ( @@ -39,11 +42,11 @@ class Heap: # Then swap the items in the list. self.arr[i], self.arr[j] = self.arr[j], self.arr[i] - def _cmp(self, i, j): + def _cmp(self, i: int, j: int) -> bool: """Compares the two items using default comparison""" return self.arr[i][1] < self.arr[j][1] - def _get_valid_parent(self, i): + def _get_valid_parent(self, i: int) -> int: """ Returns index of valid parent as per desired ordering among given index and both it's children @@ -59,21 +62,21 @@ class Heap: return valid_parent - def _heapify_up(self, index): + def _heapify_up(self, index: int) -> None: """Fixes the heap in upward direction of given index""" parent = self._parent(index) while parent is not None and not self._cmp(index, parent): self._swap(index, parent) index, parent = parent, self._parent(parent) - def _heapify_down(self, index): + def _heapify_down(self, index: int) -> None: """Fixes the heap in downward direction of given index""" valid_parent = self._get_valid_parent(index) while valid_parent != index: self._swap(index, valid_parent) index, valid_parent = valid_parent, self._get_valid_parent(valid_parent) - def update_item(self, item, item_value): + def update_item(self, item: int, item_value: int) -> None: """Updates given item value in heap if present""" if item not in self.pos_map: return @@ -84,7 +87,7 @@ class Heap: self._heapify_up(index) self._heapify_down(index) - def delete_item(self, item): + def delete_item(self, item: int) -> None: """Deletes given item from heap if present""" if item not in self.pos_map: return @@ -99,7 +102,7 @@ class Heap: self._heapify_up(index) self._heapify_down(index) - def insert_item(self, item, item_value): + def insert_item(self, item: int, item_value: int) -> None: """Inserts given item with given value in heap""" arr_len = len(self.arr) if arr_len == self.size: @@ -110,11 +113,11 @@ class Heap: self.size += 1 self._heapify_up(self.size - 1) - def get_top(self): + def get_top(self) -> tuple | None: """Returns top item tuple (Calculated value, item) from heap if present""" return self.arr[0] if self.size else None - def extract_top(self): + def extract_top(self) -> tuple | None: """ Return top item tuple (Calculated value, item) from heap and removes it as well if present From aeb933bff55734f33268848fb1fcb6a0395297cb Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Wed, 12 Oct 2022 22:07:00 +0530 Subject: [PATCH 008/368] Add typing to data_structures/hashing/hash_table.py (#7040) * Update hash_table.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update hash_table.py * Update hash_table.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/hashing/hash_table.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index f4422de53..1cd71cc4b 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -7,13 +7,18 @@ class HashTable: Basic Hash Table example with open addressing and linear probing """ - def __init__(self, size_table, charge_factor=None, lim_charge=None): + def __init__( + self, + size_table: int, + charge_factor: int | None = None, + lim_charge: float | None = None, + ) -> None: self.size_table = size_table self.values = [None] * self.size_table self.lim_charge = 0.75 if lim_charge is None else lim_charge self.charge_factor = 1 if charge_factor is None else charge_factor - self.__aux_list = [] - self._keys = {} + self.__aux_list: list = [] + self._keys: dict = {} def keys(self): return self._keys From e272b9d6a494036aaa7f71c53d01017a34117bc9 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Wed, 12 Oct 2022 22:14:08 +0530 Subject: [PATCH 009/368] Add typing to data_structures/queue/queue_on_pseudo_stack.py (#7037) * Add typing hacktoberfest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/queue/queue_on_pseudo_stack.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queue/queue_on_pseudo_stack.py index 7fa2fb256..9a0c16f61 100644 --- a/data_structures/queue/queue_on_pseudo_stack.py +++ b/data_structures/queue/queue_on_pseudo_stack.py @@ -1,4 +1,5 @@ """Queue represented by a pseudo stack (represented by a list with pop and append)""" +from typing import Any class Queue: @@ -14,7 +15,7 @@ class Queue: @param item item to enqueue""" - def put(self, item): + def put(self, item: Any) -> None: self.stack.append(item) self.length = self.length + 1 @@ -23,7 +24,7 @@ class Queue: @return dequeued item that was dequeued""" - def get(self): + def get(self) -> Any: self.rotate(1) dequeued = self.stack[self.length - 1] self.stack = self.stack[:-1] @@ -35,7 +36,7 @@ class Queue: @param rotation number of times to rotate queue""" - def rotate(self, rotation): + def rotate(self, rotation: int) -> None: for i in range(rotation): temp = self.stack[0] self.stack = self.stack[1:] @@ -45,7 +46,7 @@ class Queue: """Reports item at the front of self @return item at front of self.stack""" - def front(self): + def front(self) -> Any: front = self.get() self.put(front) self.rotate(self.length - 1) @@ -53,5 +54,5 @@ class Queue: """Returns the length of this.stack""" - def size(self): + def size(self) -> int: return self.length From f676055bc6e4f3540c97745ffc19bf62955c9077 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Wed, 12 Oct 2022 22:19:49 +0530 Subject: [PATCH 010/368] Add typing to maths/segmented_sieve.py (#7054) --- maths/segmented_sieve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py index 0054b0595..35ed9702b 100644 --- a/maths/segmented_sieve.py +++ b/maths/segmented_sieve.py @@ -3,7 +3,7 @@ import math -def sieve(n): +def sieve(n: int) -> list[int]: """Segmented Sieve.""" in_prime = [] start = 2 From 922887c38609650dc8eb8eaa9153605eabc45ecd Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <114707091+rohanr18@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:04:01 +0530 Subject: [PATCH 011/368] Add volume of hollow circular cylinder, Exceptions (#6441) * Add volume of hollow circular cylinder, Exceptions * Update volume.py * floats, zeroes tests added * Update volume.py * f-strings --- maths/volume.py | 255 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 223 insertions(+), 32 deletions(-) diff --git a/maths/volume.py b/maths/volume.py index acaed65f4..97c06d7e1 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -1,6 +1,5 @@ """ Find Volumes of Various Shapes. - Wikipedia reference: https://en.wikipedia.org/wiki/Volume """ from __future__ import annotations @@ -11,12 +10,21 @@ from math import pi, pow def vol_cube(side_length: int | float) -> float: """ Calculate the Volume of a Cube. - >>> vol_cube(1) 1.0 >>> vol_cube(3) 27.0 + >>> vol_cube(0) + 0.0 + >>> vol_cube(1.6) + 4.096000000000001 + >>> vol_cube(-1) + Traceback (most recent call last): + ... + ValueError: vol_cube() only accepts non-negative values """ + if side_length < 0: + raise ValueError("vol_cube() only accepts non-negative values") return pow(side_length, 3) @@ -24,10 +32,23 @@ def vol_spherical_cap(height: float, radius: float) -> float: """ Calculate the Volume of the spherical cap. :return 1/3 pi * height ^ 2 * (3 * radius - height) - >>> vol_spherical_cap(1, 2) 5.235987755982988 + >>> vol_spherical_cap(1.6, 2.6) + 16.621119532592402 + >>> vol_spherical_cap(0, 0) + 0.0 + >>> vol_spherical_cap(-1, 2) + Traceback (most recent call last): + ... + ValueError: vol_spherical_cap() only accepts non-negative values + >>> vol_spherical_cap(1, -2) + Traceback (most recent call last): + ... + ValueError: vol_spherical_cap() only accepts non-negative values """ + if height < 0 or radius < 0: + raise ValueError("vol_spherical_cap() only accepts non-negative values") return 1 / 3 * pi * pow(height, 2) * (3 * radius - height) @@ -36,7 +57,6 @@ def vol_spheres_intersect( ) -> float: """ Calculate the volume of the intersection of two spheres. - The intersection is composed by two spherical caps and therefore its volume is the sum of the volumes of the spherical caps. First, it calculates the heights (h1, h2) of the spherical caps, then the two volumes and it returns the sum. @@ -49,10 +69,27 @@ def vol_spheres_intersect( / (2 * centers_distance) if centers_distance is 0 then it returns the volume of the smallers sphere :return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1) - >>> vol_spheres_intersect(2, 2, 1) 21.205750411731103 + >>> vol_spheres_intersect(2.6, 2.6, 1.6) + 40.71504079052372 + >>> vol_spheres_intersect(0, 0, 0) + 0.0 + >>> vol_spheres_intersect(-2, 2, 1) + Traceback (most recent call last): + ... + ValueError: vol_spheres_intersect() only accepts non-negative values + >>> vol_spheres_intersect(2, -2, 1) + Traceback (most recent call last): + ... + ValueError: vol_spheres_intersect() only accepts non-negative values + >>> vol_spheres_intersect(2, 2, -1) + Traceback (most recent call last): + ... + ValueError: vol_spheres_intersect() only accepts non-negative values """ + if radius_1 < 0 or radius_2 < 0 or centers_distance < 0: + raise ValueError("vol_spheres_intersect() only accepts non-negative values") if centers_distance == 0: return vol_sphere(min(radius_1, radius_2)) @@ -74,40 +111,81 @@ def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. :return multiple of width, length and height - >>> vol_cuboid(1, 1, 1) 1.0 >>> vol_cuboid(1, 2, 3) 6.0 + >>> vol_cuboid(1.6, 2.6, 3.6) + 14.976 + >>> vol_cuboid(0, 0, 0) + 0.0 + >>> vol_cuboid(-1, 2, 3) + Traceback (most recent call last): + ... + ValueError: vol_cuboid() only accepts non-negative values + >>> vol_cuboid(1, -2, 3) + Traceback (most recent call last): + ... + ValueError: vol_cuboid() only accepts non-negative values + >>> vol_cuboid(1, 2, -3) + Traceback (most recent call last): + ... + ValueError: vol_cuboid() only accepts non-negative values """ + if width < 0 or height < 0 or length < 0: + raise ValueError("vol_cuboid() only accepts non-negative values") return float(width * height * length) def vol_cone(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Cone. - Wikipedia reference: https://en.wikipedia.org/wiki/Cone :return (1/3) * area_of_base * height - >>> vol_cone(10, 3) 10.0 >>> vol_cone(1, 1) 0.3333333333333333 + >>> vol_cone(1.6, 1.6) + 0.8533333333333335 + >>> vol_cone(0, 0) + 0.0 + >>> vol_cone(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_cone() only accepts non-negative values + >>> vol_cone(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_cone() only accepts non-negative values """ + if height < 0 or area_of_base < 0: + raise ValueError("vol_cone() only accepts non-negative values") return area_of_base * height / 3.0 def vol_right_circ_cone(radius: float, height: float) -> float: """ Calculate the Volume of a Right Circular Cone. - Wikipedia reference: https://en.wikipedia.org/wiki/Cone :return (1/3) * pi * radius^2 * height - >>> vol_right_circ_cone(2, 3) 12.566370614359172 + >>> vol_right_circ_cone(0, 0) + 0.0 + >>> vol_right_circ_cone(1.6, 1.6) + 4.289321169701265 + >>> vol_right_circ_cone(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_right_circ_cone() only accepts non-negative values + >>> vol_right_circ_cone(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_right_circ_cone() only accepts non-negative values """ + if height < 0 or radius < 0: + raise ValueError("vol_right_circ_cone() only accepts non-negative values") return pi * pow(radius, 2) * height / 3.0 @@ -116,12 +194,25 @@ def vol_prism(area_of_base: float, height: float) -> float: Calculate the Volume of a Prism. Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) :return V = Bh - >>> vol_prism(10, 2) 20.0 >>> vol_prism(11, 1) 11.0 + >>> vol_prism(1.6, 1.6) + 2.5600000000000005 + >>> vol_prism(0, 0) + 0.0 + >>> vol_prism(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_prism() only accepts non-negative values + >>> vol_prism(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_prism() only accepts non-negative values """ + if height < 0 or area_of_base < 0: + raise ValueError("vol_prism() only accepts non-negative values") return float(area_of_base * height) @@ -130,12 +221,25 @@ def vol_pyramid(area_of_base: float, height: float) -> float: Calculate the Volume of a Pyramid. Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry) :return (1/3) * Bh - >>> vol_pyramid(10, 3) 10.0 >>> vol_pyramid(1.5, 3) 1.5 + >>> vol_pyramid(1.6, 1.6) + 0.8533333333333335 + >>> vol_pyramid(0, 0) + 0.0 + >>> vol_pyramid(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_pyramid() only accepts non-negative values + >>> vol_pyramid(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_pyramid() only accepts non-negative values """ + if height < 0 or area_of_base < 0: + raise ValueError("vol_pyramid() only accepts non-negative values") return area_of_base * height / 3.0 @@ -144,27 +248,44 @@ def vol_sphere(radius: float) -> float: Calculate the Volume of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere :return (4/3) * pi * r^3 - >>> vol_sphere(5) 523.5987755982989 >>> vol_sphere(1) 4.1887902047863905 + >>> vol_sphere(1.6) + 17.15728467880506 + >>> vol_sphere(0) + 0.0 + >>> vol_sphere(-1) + Traceback (most recent call last): + ... + ValueError: vol_sphere() only accepts non-negative values """ + if radius < 0: + raise ValueError("vol_sphere() only accepts non-negative values") return 4 / 3 * pi * pow(radius, 3) -def vol_hemisphere(radius: float): +def vol_hemisphere(radius: float) -> float: """Calculate the volume of a hemisphere Wikipedia reference: https://en.wikipedia.org/wiki/Hemisphere Other references: https://www.cuemath.com/geometry/hemisphere :return 2/3 * pi * radius^3 - >>> vol_hemisphere(1) 2.0943951023931953 - >>> vol_hemisphere(7) 718.3775201208659 + >>> vol_hemisphere(1.6) + 8.57864233940253 + >>> vol_hemisphere(0) + 0.0 + >>> vol_hemisphere(-1) + Traceback (most recent call last): + ... + ValueError: vol_hemisphere() only accepts non-negative values """ + if radius < 0: + raise ValueError("vol_hemisphere() only accepts non-negative values") return 2 / 3 * pi * pow(radius, 3) @@ -172,26 +293,93 @@ def vol_circular_cylinder(radius: float, height: float) -> float: """Calculate the Volume of a Circular Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder :return pi * radius^2 * height - >>> vol_circular_cylinder(1, 1) 3.141592653589793 >>> vol_circular_cylinder(4, 3) 150.79644737231007 + >>> vol_circular_cylinder(1.6, 1.6) + 12.867963509103795 + >>> vol_circular_cylinder(0, 0) + 0.0 + >>> vol_circular_cylinder(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_circular_cylinder() only accepts non-negative values + >>> vol_circular_cylinder(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_circular_cylinder() only accepts non-negative values """ + if height < 0 or radius < 0: + raise ValueError("vol_circular_cylinder() only accepts non-negative values") return pi * pow(radius, 2) * height -def vol_conical_frustum(height: float, radius_1: float, radius_2: float): +def vol_hollow_circular_cylinder( + inner_radius: float, outer_radius: float, height: float +) -> float: + """Calculate the Volume of a Hollow Circular Cylinder. + >>> vol_hollow_circular_cylinder(1, 2, 3) + 28.274333882308138 + >>> vol_hollow_circular_cylinder(1.6, 2.6, 3.6) + 47.50088092227767 + >>> vol_hollow_circular_cylinder(-1, 2, 3) + Traceback (most recent call last): + ... + ValueError: vol_hollow_circular_cylinder() only accepts non-negative values + >>> vol_hollow_circular_cylinder(1, -2, 3) + Traceback (most recent call last): + ... + ValueError: vol_hollow_circular_cylinder() only accepts non-negative values + >>> vol_hollow_circular_cylinder(1, 2, -3) + Traceback (most recent call last): + ... + ValueError: vol_hollow_circular_cylinder() only accepts non-negative values + >>> vol_hollow_circular_cylinder(2, 1, 3) + Traceback (most recent call last): + ... + ValueError: outer_radius must be greater than inner_radius + >>> vol_hollow_circular_cylinder(0, 0, 0) + Traceback (most recent call last): + ... + ValueError: outer_radius must be greater than inner_radius + """ + if inner_radius < 0 or outer_radius < 0 or height < 0: + raise ValueError( + "vol_hollow_circular_cylinder() only accepts non-negative values" + ) + if outer_radius <= inner_radius: + raise ValueError("outer_radius must be greater than inner_radius") + return pi * (pow(outer_radius, 2) - pow(inner_radius, 2)) * height + + +def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> float: """Calculate the Volume of a Conical Frustum. Wikipedia reference: https://en.wikipedia.org/wiki/Frustum :return 1/3 * pi * height * (radius_1^2 + radius_top^2 + radius_1 * radius_2) - >>> vol_conical_frustum(45, 7, 28) 48490.482608158454 - >>> vol_conical_frustum(1, 1, 2) 7.330382858376184 + >>> vol_conical_frustum(1.6, 2.6, 3.6) + 48.7240076620753 + >>> vol_conical_frustum(0, 0, 0) + 0.0 + >>> vol_conical_frustum(-2, 2, 1) + Traceback (most recent call last): + ... + ValueError: vol_conical_frustum() only accepts non-negative values + >>> vol_conical_frustum(2, -2, 1) + Traceback (most recent call last): + ... + ValueError: vol_conical_frustum() only accepts non-negative values + >>> vol_conical_frustum(2, 2, -1) + Traceback (most recent call last): + ... + ValueError: vol_conical_frustum() only accepts non-negative values """ + if radius_1 < 0 or radius_2 < 0 or height < 0: + raise ValueError("vol_conical_frustum() only accepts non-negative values") return ( 1 / 3 @@ -204,18 +392,21 @@ def vol_conical_frustum(height: float, radius_1: float, radius_2: float): def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") - print("Cube: " + str(vol_cube(2))) # = 8 - print("Cuboid: " + str(vol_cuboid(2, 2, 2))) # = 8 - print("Cone: " + str(vol_cone(2, 2))) # ~= 1.33 - print("Right Circular Cone: " + str(vol_right_circ_cone(2, 2))) # ~= 8.38 - print("Prism: " + str(vol_prism(2, 2))) # = 4 - print("Pyramid: " + str(vol_pyramid(2, 2))) # ~= 1.33 - print("Sphere: " + str(vol_sphere(2))) # ~= 33.5 - print("Hemisphere: " + str(vol_hemisphere(2))) # ~= 16.75 - print("Circular Cylinder: " + str(vol_circular_cylinder(2, 2))) # ~= 25.1 - print("Conical Frustum: " + str(vol_conical_frustum(2, 2, 4))) # ~= 58.6 - print("Spherical cap: " + str(vol_spherical_cap(1, 2))) # ~= 5.24 - print("Spheres intersetion: " + str(vol_spheres_intersect(2, 2, 1))) # ~= 21.21 + print(f"Cube: {vol_cube(2) = }") # = 8 + print(f"Cuboid: {vol_cuboid(2, 2, 2) = }") # = 8 + print(f"Cone: {vol_cone(2, 2) = }") # ~= 1.33 + print(f"Right Circular Cone: {vol_right_circ_cone(2, 2) = }") # ~= 8.38 + print(f"Prism: {vol_prism(2, 2) = }") # = 4 + print(f"Pyramid: {vol_pyramid(2, 2) = }") # ~= 1.33 + print(f"Sphere: {vol_sphere(2) = }") # ~= 33.5 + print(f"Hemisphere: {vol_hemisphere(2) = }") # ~= 16.75 + print(f"Circular Cylinder: {vol_circular_cylinder(2, 2) = }") # ~= 25.1 + print( + f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" + ) # ~= 28.3 + print(f"Conical Frustum: {vol_conical_frustum(2, 2, 4) = }") # ~= 58.6 + print(f"Spherical cap: {vol_spherical_cap(1, 2) = }") # ~= 5.24 + print(f"Spheres intersetion: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21 if __name__ == "__main__": From 2423760e1d28b4c6860ef63f83b1e6b4b83c1522 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:11:01 +0530 Subject: [PATCH 012/368] Add typing to maths/abs.py (#7060) --- maths/abs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/abs.py b/maths/abs.py index 68c99a1d5..dfea52dfb 100644 --- a/maths/abs.py +++ b/maths/abs.py @@ -1,7 +1,7 @@ """Absolute Value.""" -def abs_val(num): +def abs_val(num: float) -> float: """ Find the absolute value of a number. From 74494d433f8d050d37642f912f616451f40d65e6 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:11:52 +0530 Subject: [PATCH 013/368] Add typing to maths/ceil.py (#7057) --- maths/ceil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/ceil.py b/maths/ceil.py index 97578265c..909e02b3f 100644 --- a/maths/ceil.py +++ b/maths/ceil.py @@ -3,7 +3,7 @@ https://en.wikipedia.org/wiki/Floor_and_ceiling_functions """ -def ceil(x) -> int: +def ceil(x: float) -> int: """ Return the ceiling of x as an Integral. From 32ff33648e0d1f93398db34fd271aa6606abc3a4 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:12:30 +0530 Subject: [PATCH 014/368] Add typing to maths/floor.py (#7056) --- maths/floor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/floor.py b/maths/floor.py index 482250f5e..8bbcb21aa 100644 --- a/maths/floor.py +++ b/maths/floor.py @@ -3,7 +3,7 @@ https://en.wikipedia.org/wiki/Floor_and_ceiling_functions """ -def floor(x) -> int: +def floor(x: float) -> int: """ Return the floor of x as an Integral. :param x: the number From 467ade28a04ed3e77b6c89542fd99f390139b5bd Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <114707091+rohanr18@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:18:49 +0530 Subject: [PATCH 015/368] Add surface area of cuboid, conical frustum (#6442) * Add surface area of cuboid, conical frustum * add tests for floats, zeroes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/area.py | 131 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 116 insertions(+), 15 deletions(-) diff --git a/maths/area.py b/maths/area.py index b1b139cf4..abbf7aa85 100644 --- a/maths/area.py +++ b/maths/area.py @@ -7,9 +7,12 @@ from math import pi, sqrt def surface_area_cube(side_length: float) -> float: """ Calculate the Surface Area of a Cube. - >>> surface_area_cube(1) 6 + >>> surface_area_cube(1.6) + 15.360000000000003 + >>> surface_area_cube(0) + 0 >>> surface_area_cube(3) 54 >>> surface_area_cube(-1) @@ -22,16 +25,46 @@ def surface_area_cube(side_length: float) -> float: return 6 * side_length**2 +def surface_area_cuboid(length: float, breadth: float, height: float) -> float: + """ + Calculate the Surface Area of a Cuboid. + >>> surface_area_cuboid(1, 2, 3) + 22 + >>> surface_area_cuboid(0, 0, 0) + 0 + >>> surface_area_cuboid(1.6, 2.6, 3.6) + 38.56 + >>> surface_area_cuboid(-1, 2, 3) + Traceback (most recent call last): + ... + ValueError: surface_area_cuboid() only accepts non-negative values + >>> surface_area_cuboid(1, -2, 3) + Traceback (most recent call last): + ... + ValueError: surface_area_cuboid() only accepts non-negative values + >>> surface_area_cuboid(1, 2, -3) + Traceback (most recent call last): + ... + ValueError: surface_area_cuboid() only accepts non-negative values + """ + if length < 0 or breadth < 0 or height < 0: + raise ValueError("surface_area_cuboid() only accepts non-negative values") + return 2 * ((length * breadth) + (breadth * height) + (length * height)) + + def surface_area_sphere(radius: float) -> float: """ Calculate the Surface Area of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere Formula: 4 * pi * r^2 - >>> surface_area_sphere(5) 314.1592653589793 >>> surface_area_sphere(1) 12.566370614359172 + >>> surface_area_sphere(1.6) + 32.169908772759484 + >>> surface_area_sphere(0) + 0.0 >>> surface_area_sphere(-1) Traceback (most recent call last): ... @@ -46,7 +79,6 @@ def surface_area_hemisphere(radius: float) -> float: """ Calculate the Surface Area of a Hemisphere. Formula: 3 * pi * r^2 - >>> surface_area_hemisphere(5) 235.61944901923448 >>> surface_area_hemisphere(1) @@ -70,11 +102,14 @@ def surface_area_cone(radius: float, height: float) -> float: Calculate the Surface Area of a Cone. Wikipedia reference: https://en.wikipedia.org/wiki/Cone Formula: pi * r * (r + (h ** 2 + r ** 2) ** 0.5) - >>> surface_area_cone(10, 24) 1130.9733552923256 >>> surface_area_cone(6, 8) 301.59289474462014 + >>> surface_area_cone(1.6, 2.6) + 23.387862992395807 + >>> surface_area_cone(0, 0) + 0.0 >>> surface_area_cone(-1, -2) Traceback (most recent call last): ... @@ -93,14 +128,51 @@ def surface_area_cone(radius: float, height: float) -> float: return pi * radius * (radius + (height**2 + radius**2) ** 0.5) +def surface_area_conical_frustum( + radius_1: float, radius_2: float, height: float +) -> float: + """ + Calculate the Surface Area of a Conical Frustum. + >>> surface_area_conical_frustum(1, 2, 3) + 45.511728065337266 + >>> surface_area_conical_frustum(4, 5, 6) + 300.7913575056268 + >>> surface_area_conical_frustum(0, 0, 0) + 0.0 + >>> surface_area_conical_frustum(1.6, 2.6, 3.6) + 78.57907060751548 + >>> surface_area_conical_frustum(-1, 2, 3) + Traceback (most recent call last): + ... + ValueError: surface_area_conical_frustum() only accepts non-negative values + >>> surface_area_conical_frustum(1, -2, 3) + Traceback (most recent call last): + ... + ValueError: surface_area_conical_frustum() only accepts non-negative values + >>> surface_area_conical_frustum(1, 2, -3) + Traceback (most recent call last): + ... + ValueError: surface_area_conical_frustum() only accepts non-negative values + """ + if radius_1 < 0 or radius_2 < 0 or height < 0: + raise ValueError( + "surface_area_conical_frustum() only accepts non-negative values" + ) + slant_height = (height**2 + (radius_1 - radius_2) ** 2) ** 0.5 + return pi * ((slant_height * (radius_1 + radius_2)) + radius_1**2 + radius_2**2) + + def surface_area_cylinder(radius: float, height: float) -> float: """ Calculate the Surface Area of a Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder Formula: 2 * pi * r * (h + r) - >>> surface_area_cylinder(7, 10) 747.6990515543707 + >>> surface_area_cylinder(1.6, 2.6) + 42.22300526424682 + >>> surface_area_cylinder(0, 0) + 0.0 >>> surface_area_cylinder(6, 8) 527.7875658030853 >>> surface_area_cylinder(-1, -2) @@ -124,9 +196,12 @@ def surface_area_cylinder(radius: float, height: float) -> float: def area_rectangle(length: float, width: float) -> float: """ Calculate the area of a rectangle. - >>> area_rectangle(10, 20) 200 + >>> area_rectangle(1.6, 2.6) + 4.16 + >>> area_rectangle(0, 0) + 0 >>> area_rectangle(-1, -2) Traceback (most recent call last): ... @@ -148,9 +223,12 @@ def area_rectangle(length: float, width: float) -> float: def area_square(side_length: float) -> float: """ Calculate the area of a square. - >>> area_square(10) 100 + >>> area_square(0) + 0 + >>> area_square(1.6) + 2.5600000000000005 >>> area_square(-1) Traceback (most recent call last): ... @@ -164,9 +242,12 @@ def area_square(side_length: float) -> float: def area_triangle(base: float, height: float) -> float: """ Calculate the area of a triangle given the base and height. - >>> area_triangle(10, 10) 50.0 + >>> area_triangle(1.6, 2.6) + 2.08 + >>> area_triangle(0, 0) + 0.0 >>> area_triangle(-1, -2) Traceback (most recent call last): ... @@ -188,13 +269,15 @@ def area_triangle(base: float, height: float) -> float: def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float: """ Calculate area of triangle when the length of 3 sides are known. - This function uses Heron's formula: https://en.wikipedia.org/wiki/Heron%27s_formula - >>> area_triangle_three_sides(5, 12, 13) 30.0 >>> area_triangle_three_sides(10, 11, 12) 51.521233486786784 + >>> area_triangle_three_sides(0, 0, 0) + 0.0 + >>> area_triangle_three_sides(1.6, 2.6, 3.6) + 1.8703742940919619 >>> area_triangle_three_sides(-1, -2, -1) Traceback (most recent call last): ... @@ -233,9 +316,12 @@ def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float def area_parallelogram(base: float, height: float) -> float: """ Calculate the area of a parallelogram. - >>> area_parallelogram(10, 20) 200 + >>> area_parallelogram(1.6, 2.6) + 4.16 + >>> area_parallelogram(0, 0) + 0 >>> area_parallelogram(-1, -2) Traceback (most recent call last): ... @@ -257,9 +343,12 @@ def area_parallelogram(base: float, height: float) -> float: def area_trapezium(base1: float, base2: float, height: float) -> float: """ Calculate the area of a trapezium. - >>> area_trapezium(10, 20, 30) 450.0 + >>> area_trapezium(1.6, 2.6, 3.6) + 7.5600000000000005 + >>> area_trapezium(0, 0, 0) + 0.0 >>> area_trapezium(-1, -2, -3) Traceback (most recent call last): ... @@ -297,9 +386,12 @@ def area_trapezium(base1: float, base2: float, height: float) -> float: def area_circle(radius: float) -> float: """ Calculate the area of a circle. - >>> area_circle(20) 1256.6370614359173 + >>> area_circle(1.6) + 8.042477193189871 + >>> area_circle(0) + 0.0 >>> area_circle(-1) Traceback (most recent call last): ... @@ -313,11 +405,14 @@ def area_circle(radius: float) -> float: def area_ellipse(radius_x: float, radius_y: float) -> float: """ Calculate the area of a ellipse. - >>> area_ellipse(10, 10) 314.1592653589793 >>> area_ellipse(10, 20) 628.3185307179587 + >>> area_ellipse(0, 0) + 0.0 + >>> area_ellipse(1.6, 2.6) + 13.06902543893354 >>> area_ellipse(-10, 20) Traceback (most recent call last): ... @@ -339,9 +434,12 @@ def area_ellipse(radius_x: float, radius_y: float) -> float: def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: """ Calculate the area of a rhombus. - >>> area_rhombus(10, 20) 100.0 + >>> area_rhombus(1.6, 2.6) + 2.08 + >>> area_rhombus(0, 0) + 0.0 >>> area_rhombus(-1, -2) Traceback (most recent call last): ... @@ -374,9 +472,12 @@ if __name__ == "__main__": print(f"Rhombus: {area_rhombus(10, 20) = }") print(f"Trapezium: {area_trapezium(10, 20, 30) = }") print(f"Circle: {area_circle(20) = }") + print(f"Ellipse: {area_ellipse(10, 20) = }") print("\nSurface Areas of various geometric shapes: \n") print(f"Cube: {surface_area_cube(20) = }") + print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }") print(f"Sphere: {surface_area_sphere(20) = }") print(f"Hemisphere: {surface_area_hemisphere(20) = }") print(f"Cone: {surface_area_cone(10, 20) = }") + print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(f"Cylinder: {surface_area_cylinder(10, 20) = }") From c0c230255ffe79946bd959ecb559696353ac33f2 Mon Sep 17 00:00:00 2001 From: Eeman Majumder <54275491+Eeman1113@users.noreply.github.com> Date: Thu, 13 Oct 2022 01:13:52 +0530 Subject: [PATCH 016/368] added self organising maps algorithm in the machine learning section. (#6877) * added self organising maps algo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update machine_learning/Self_Organising_Maps.py * Update and rename Self_Organising_Maps.py to self_organizing_map.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update self_organizing_map.py * Update self_organizing_map.py * Update self_organizing_map.py * Update self_organizing_map.py Co-authored-by: Eeman Majumder Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- machine_learning/self_organizing_map.py | 73 +++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 machine_learning/self_organizing_map.py diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py new file mode 100644 index 000000000..bd3d388f9 --- /dev/null +++ b/machine_learning/self_organizing_map.py @@ -0,0 +1,73 @@ +""" +https://en.wikipedia.org/wiki/Self-organizing_map +""" +import math + + +class SelfOrganizingMap: + def get_winner(self, weights: list[list[float]], sample: list[int]) -> int: + """ + Compute the winning vector by Euclidean distance + + >>> SelfOrganizingMap().get_winner([[1, 2, 3], [4, 5, 6]], [1, 2, 3]) + 1 + """ + d0 = 0.0 + d1 = 0.0 + for i in range(len(sample)): + d0 += math.pow((sample[i] - weights[0][i]), 2) + d1 += math.pow((sample[i] - weights[1][i]), 2) + return 0 if d0 > d1 else 1 + return 0 + + def update( + self, weights: list[list[int | float]], sample: list[int], j: int, alpha: float + ) -> list[list[int | float]]: + """ + Update the winning vector. + + >>> SelfOrganizingMap().update([[1, 2, 3], [4, 5, 6]], [1, 2, 3], 1, 0.1) + [[1, 2, 3], [3.7, 4.7, 6]] + """ + for i in range(len(weights)): + weights[j][i] += alpha * (sample[i] - weights[j][i]) + return weights + + +# Driver code +def main() -> None: + # Training Examples ( m, n ) + training_samples = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] + + # weight initialization ( n, C ) + weights = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] + + # training + self_organizing_map = SelfOrganizingMap() + epochs = 3 + alpha = 0.5 + + for i in range(epochs): + for j in range(len(training_samples)): + + # training sample + sample = training_samples[j] + + # Compute the winning vector + winner = self_organizing_map.get_winner(weights, sample) + + # Update the winning vector + weights = self_organizing_map.update(weights, sample, winner, alpha) + + # classify test sample + sample = [0, 0, 0, 1] + winner = self_organizing_map.get_winner(weights, sample) + + # results + print(f"Clusters that the test sample belongs to : {winner}") + print(f"Weights that have been trained : {weights}") + + +# running the main() function +if __name__ == "__main__": + main() From bae08adc86c44268faaa0fe05ea0f2f91567ac9a Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 12 Oct 2022 21:56:07 +0200 Subject: [PATCH 017/368] README.md: Lose LGTM badge because we don't use it (#7063) * README.md: Lose LGTM badge because we don't use it * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 ++ README.md | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 9ef72c403..25272af4a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -14,6 +14,7 @@ ## Audio Filters * [Butterworth Filter](audio_filters/butterworth_filter.py) + * [Equal Loudness Filter](audio_filters/equal_loudness_filter.py) * [Iir Filter](audio_filters/iir_filter.py) * [Show Response](audio_filters/show_response.py) @@ -475,6 +476,7 @@ * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) * [Bisection](maths/bisection.py) + * [Carmichael Number](maths/carmichael_number.py) * [Catalan Number](maths/catalan_number.py) * [Ceil](maths/ceil.py) * [Check Polygon](maths/check_polygon.py) diff --git a/README.md b/README.md index c78797960..c499c14e1 100644 --- a/README.md +++ b/README.md @@ -24,9 +24,6 @@ GitHub Workflow Status - - LGTM - pre-commit From e2cd982b1154814debe2960498ccbb29d4829bf7 Mon Sep 17 00:00:00 2001 From: VARISH GAUTAM <48176176+Variiiest@users.noreply.github.com> Date: Thu, 13 Oct 2022 02:12:02 +0530 Subject: [PATCH 018/368] Weird numbers (#6871) * Create weird_number.py In number theory, a weird number is a natural number that is abundant but not semiperfect * check * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed * Update weird_number.py * Update weird_number.py * Update weird_number.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/weird_number.py | 100 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 maths/weird_number.py diff --git a/maths/weird_number.py b/maths/weird_number.py new file mode 100644 index 000000000..2834a9fee --- /dev/null +++ b/maths/weird_number.py @@ -0,0 +1,100 @@ +""" +https://en.wikipedia.org/wiki/Weird_number + +Fun fact: The set of weird numbers has positive asymptotic density. +""" +from math import sqrt + + +def factors(number: int) -> list[int]: + """ + >>> factors(12) + [1, 2, 3, 4, 6] + >>> factors(1) + [1] + >>> factors(100) + [1, 2, 4, 5, 10, 20, 25, 50] + + # >>> factors(-12) + # [1, 2, 3, 4, 6] + """ + + values = [1] + for i in range(2, int(sqrt(number)) + 1, 1): + if number % i == 0: + values.append(i) + if int(number // i) != i: + values.append(int(number // i)) + return sorted(values) + + +def abundant(n: int) -> bool: + """ + >>> abundant(0) + True + >>> abundant(1) + False + >>> abundant(12) + True + >>> abundant(13) + False + >>> abundant(20) + True + + # >>> abundant(-12) + # True + """ + return sum(factors(n)) > n + + +def semi_perfect(number: int) -> bool: + """ + >>> semi_perfect(0) + True + >>> semi_perfect(1) + True + >>> semi_perfect(12) + True + >>> semi_perfect(13) + False + + # >>> semi_perfect(-12) + # True + """ + values = factors(number) + r = len(values) + subset = [[0 for i in range(number + 1)] for j in range(r + 1)] + for i in range(r + 1): + subset[i][0] = True + + for i in range(1, number + 1): + subset[0][i] = False + + for i in range(1, r + 1): + for j in range(1, number + 1): + if j < values[i - 1]: + subset[i][j] = subset[i - 1][j] + else: + subset[i][j] = subset[i - 1][j] or subset[i - 1][j - values[i - 1]] + + return subset[r][number] != 0 + + +def weird(number: int) -> bool: + """ + >>> weird(0) + False + >>> weird(70) + True + >>> weird(77) + False + """ + return abundant(number) and not semi_perfect(number) + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) + for number in (69, 70, 71): + print(f"{number} is {'' if weird(number) else 'not '}weird.") From 07e991d55330bf1363ba53858a98cf6fd8d45026 Mon Sep 17 00:00:00 2001 From: Caeden Date: Wed, 12 Oct 2022 23:54:20 +0100 Subject: [PATCH 019/368] Add pep8-naming to pre-commit hooks and fixes incorrect naming conventions (#7062) * ci(pre-commit): Add pep8-naming to `pre-commit` hooks (#7038) * refactor: Fix naming conventions (#7038) * Update arithmetic_analysis/lu_decomposition.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor(lu_decomposition): Replace `NDArray` with `ArrayLike` (#7038) * chore: Fix naming conventions in doctests (#7038) * fix: Temporarily disable project euler problem 104 (#7069) * chore: Fix naming conventions in doctests (#7038) Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 1 + arithmetic_analysis/lu_decomposition.py | 6 +- backtracking/n_queens.py | 4 +- ciphers/affine_cipher.py | 52 ++--- ciphers/bifid.py | 2 +- ciphers/brute_force_caesar_cipher.py | 2 +- ciphers/elgamal_key_generator.py | 10 +- ciphers/hill_cipher.py | 4 +- ciphers/polybius.py | 2 +- ciphers/rabin_miller.py | 14 +- ciphers/rsa_cipher.py | 14 +- ciphers/rsa_factorization.py | 10 +- ciphers/rsa_key_generator.py | 28 +-- ciphers/simple_substitution_cipher.py | 48 ++--- ciphers/trafid_cipher.py | 56 +++--- ciphers/transposition_cipher.py | 36 ++-- ...ansposition_cipher_encrypt_decrypt_file.py | 32 +-- ciphers/vigenere_cipher.py | 30 +-- compression/lempel_ziv_decompress.py | 6 +- compression/peak_signal_to_noise_ratio.py | 4 +- computer_vision/harris_corner.py | 4 +- conversions/binary_to_hexadecimal.py | 2 +- conversions/decimal_to_any.py | 2 +- conversions/prefix_conversions.py | 32 +-- conversions/roman_numerals.py | 2 +- data_structures/binary_tree/avl_tree.py | 42 ++-- .../binary_tree/lazy_segment_tree.py | 8 +- data_structures/binary_tree/segment_tree.py | 14 +- data_structures/binary_tree/treap.py | 16 +- data_structures/heap/min_heap.py | 22 +- .../stacks/infix_to_prefix_conversion.py | 60 +++--- data_structures/stacks/postfix_evaluation.py | 38 ++-- data_structures/stacks/stock_span_problem.py | 12 +- .../edge_detection/canny.py | 24 +-- .../filters/bilateral_filter.py | 18 +- .../histogram_stretch.py | 12 +- digital_image_processing/index_calculation.py | 190 +++++++++--------- .../test_digital_image_processing.py | 4 +- divide_and_conquer/inversions.py | 34 ++-- dynamic_programming/bitmask.py | 12 +- dynamic_programming/edit_distance.py | 34 ++-- dynamic_programming/floyd_warshall.py | 46 ++--- dynamic_programming/fractional_knapsack.py | 8 +- dynamic_programming/knapsack.py | 34 ++-- .../longest_common_subsequence.py | 10 +- .../longest_increasing_subsequence.py | 6 +- ...longest_increasing_subsequence_o(nlogn).py | 16 +- dynamic_programming/matrix_chain_order.py | 38 ++-- dynamic_programming/max_sub_array.py | 16 +- dynamic_programming/minimum_coin_change.py | 4 +- dynamic_programming/minimum_partition.py | 2 +- dynamic_programming/sum_of_subset.py | 18 +- fractals/sierpinski_triangle.py | 28 +-- geodesy/haversine_distance.py | 6 +- geodesy/lamberts_ellipsoidal_distance.py | 24 +-- graphs/articulation_points.py | 30 +-- graphs/basic_graphs.py | 78 +++---- graphs/check_bipartite_graph_bfs.py | 4 +- graphs/dijkstra.py | 12 +- graphs/dijkstra_2.py | 36 ++-- graphs/dijkstra_algorithm.py | 14 +- .../edmonds_karp_multiple_source_and_sink.py | 161 +++++++-------- ...n_path_and_circuit_for_undirected_graph.py | 20 +- graphs/frequent_pattern_graph_miner.py | 28 +-- graphs/kahns_algorithm_long.py | 12 +- graphs/kahns_algorithm_topo.py | 4 +- graphs/minimum_spanning_tree_prims.py | 48 ++--- graphs/multi_heuristic_astar.py | 12 +- graphs/scc_kosaraju.py | 12 +- graphs/tests/test_min_spanning_tree_prim.py | 2 +- hashes/adler32.py | 2 +- hashes/chaos_machine.py | 16 +- hashes/hamming_code.py | 170 ++++++++-------- hashes/md5.py | 90 ++++----- hashes/sha1.py | 2 +- hashes/sha256.py | 8 +- linear_algebra/src/power_iteration.py | 4 +- linear_algebra/src/rayleigh_quotient.py | 16 +- linear_algebra/src/test_linear_algebra.py | 60 +++--- machine_learning/decision_tree.py | 54 ++--- machine_learning/gaussian_naive_bayes.py | 12 +- .../gradient_boosting_regressor.py | 14 +- machine_learning/k_means_clust.py | 16 +- .../local_weighted_learning.py | 4 +- machine_learning/logistic_regression.py | 36 ++-- .../multilayer_perceptron_classifier.py | 4 +- machine_learning/random_forest_classifier.py | 6 +- machine_learning/random_forest_regressor.py | 6 +- .../sequential_minimum_optimization.py | 64 +++--- machine_learning/word_frequency_functions.py | 10 +- maths/binomial_coefficient.py | 8 +- maths/carmichael_number.py | 4 +- maths/decimal_isolate.py | 6 +- maths/euler_method.py | 6 +- maths/euler_modified.py | 6 +- maths/hardy_ramanujanalgo.py | 6 +- maths/jaccard_similarity.py | 48 ++--- maths/krishnamurthy_number.py | 6 +- maths/kth_lexicographic_permutation.py | 6 +- maths/lucas_lehmer_primality_test.py | 4 +- maths/primelib.py | 140 ++++++------- maths/qr_decomposition.py | 20 +- maths/radix2_fft.py | 72 +++---- maths/runge_kutta.py | 6 +- maths/softmax.py | 6 +- matrix/count_islands_in_matrix.py | 10 +- matrix/inverse_of_matrix.py | 6 +- matrix/sherman_morrison.py | 26 +-- networking_flow/ford_fulkerson.py | 8 +- networking_flow/minimum_cut.py | 4 +- neural_network/convolution_neural_network.py | 6 +- other/davisb_putnamb_logemannb_loveland.py | 36 ++-- other/greedy.py | 38 ++-- other/nested_brackets.py | 12 +- other/sdes.py | 8 +- other/tower_of_hanoi.py | 14 +- physics/n_body_simulation.py | 6 +- project_euler/problem_011/sol1.py | 30 +-- project_euler/problem_012/sol1.py | 16 +- project_euler/problem_023/sol1.py | 8 +- project_euler/problem_029/sol1.py | 16 +- project_euler/problem_032/sol32.py | 8 +- project_euler/problem_042/solution42.py | 4 +- project_euler/problem_054/test_poker_hand.py | 2 +- project_euler/problem_064/sol1.py | 2 +- project_euler/problem_097/sol1.py | 4 +- .../problem_104/{sol.py => sol.py.FIXME} | 0 project_euler/problem_125/sol1.py | 2 +- .../non_preemptive_shortest_job_first.py | 4 +- searches/tabu_search.py | 4 +- sorts/odd_even_transposition_parallel.py | 92 ++++----- sorts/radix_sort.py | 2 +- sorts/random_normal_distribution_quicksort.py | 44 ++-- sorts/random_pivot_quick_sort.py | 24 +-- sorts/tree_sort.py | 8 +- strings/boyer_moore_search.py | 8 +- .../can_string_be_rearranged_as_palindrome.py | 6 +- strings/check_anagrams.py | 8 +- strings/word_patterns.py | 8 +- web_programming/fetch_quotes.py | 4 +- 140 files changed, 1552 insertions(+), 1536 deletions(-) rename project_euler/problem_104/{sol.py => sol.py.FIXME} (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0abe647b0..2f6a92814 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,6 +40,7 @@ repos: - --ignore=E203,W503 - --max-complexity=25 - --max-line-length=88 + additional_dependencies: [pep8-naming] - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 371f7b166..1e98b9066 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -6,13 +6,13 @@ Reference: from __future__ import annotations import numpy as np -import numpy.typing as NDArray from numpy import float64 +from numpy.typing import ArrayLike def lower_upper_decomposition( - table: NDArray[float64], -) -> tuple[NDArray[float64], NDArray[float64]]: + table: ArrayLike[float64], +) -> tuple[ArrayLike[float64], ArrayLike[float64]]: """Lower-Upper (LU) Decomposition Example: diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index b8ace5978..bbf0ce44f 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -12,7 +12,7 @@ from __future__ import annotations solution = [] -def isSafe(board: list[list[int]], row: int, column: int) -> bool: +def is_safe(board: list[list[int]], row: int, column: int) -> bool: """ This function returns a boolean value True if it is safe to place a queen there considering the current state of the board. @@ -63,7 +63,7 @@ def solve(board: list[list[int]], row: int) -> bool: If all the combinations for that particular branch are successful the board is reinitialized for the next possible combination. """ - if isSafe(board, row, i): + if is_safe(board, row, i): board[row][i] = 1 solve(board, row + 1) board[row][i] = 0 diff --git a/ciphers/affine_cipher.py b/ciphers/affine_cipher.py index d3b806ba1..cd1e33b88 100644 --- a/ciphers/affine_cipher.py +++ b/ciphers/affine_cipher.py @@ -9,26 +9,26 @@ SYMBOLS = ( ) -def check_keys(keyA: int, keyB: int, mode: str) -> None: +def check_keys(key_a: int, key_b: int, mode: str) -> None: if mode == "encrypt": - if keyA == 1: + if key_a == 1: sys.exit( "The affine cipher becomes weak when key " "A is set to 1. Choose different key" ) - if keyB == 0: + if key_b == 0: sys.exit( "The affine cipher becomes weak when key " "B is set to 0. Choose different key" ) - if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1: + if key_a < 0 or key_b < 0 or key_b > len(SYMBOLS) - 1: sys.exit( "Key A must be greater than 0 and key B must " f"be between 0 and {len(SYMBOLS) - 1}." ) - if cryptomath.gcd(keyA, len(SYMBOLS)) != 1: + if cryptomath.gcd(key_a, len(SYMBOLS)) != 1: sys.exit( - f"Key A {keyA} and the symbol set size {len(SYMBOLS)} " + f"Key A {key_a} and the symbol set size {len(SYMBOLS)} " "are not relatively prime. Choose a different key." ) @@ -39,16 +39,16 @@ def encrypt_message(key: int, message: str) -> str: ... 'substitution cipher.') 'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi' """ - keyA, keyB = divmod(key, len(SYMBOLS)) - check_keys(keyA, keyB, "encrypt") - cipherText = "" + key_a, key_b = divmod(key, len(SYMBOLS)) + check_keys(key_a, key_b, "encrypt") + cipher_text = "" for symbol in message: if symbol in SYMBOLS: - symIndex = SYMBOLS.find(symbol) - cipherText += SYMBOLS[(symIndex * keyA + keyB) % len(SYMBOLS)] + sym_index = SYMBOLS.find(symbol) + cipher_text += SYMBOLS[(sym_index * key_a + key_b) % len(SYMBOLS)] else: - cipherText += symbol - return cipherText + cipher_text += symbol + return cipher_text def decrypt_message(key: int, message: str) -> str: @@ -57,25 +57,27 @@ def decrypt_message(key: int, message: str) -> str: ... '{xIp~{HL}Gi') 'The affine cipher is a type of monoalphabetic substitution cipher.' """ - keyA, keyB = divmod(key, len(SYMBOLS)) - check_keys(keyA, keyB, "decrypt") - plainText = "" - modInverseOfkeyA = cryptomath.find_mod_inverse(keyA, len(SYMBOLS)) + key_a, key_b = divmod(key, len(SYMBOLS)) + check_keys(key_a, key_b, "decrypt") + plain_text = "" + mod_inverse_of_key_a = cryptomath.find_mod_inverse(key_a, len(SYMBOLS)) for symbol in message: if symbol in SYMBOLS: - symIndex = SYMBOLS.find(symbol) - plainText += SYMBOLS[(symIndex - keyB) * modInverseOfkeyA % len(SYMBOLS)] + sym_index = SYMBOLS.find(symbol) + plain_text += SYMBOLS[ + (sym_index - key_b) * mod_inverse_of_key_a % len(SYMBOLS) + ] else: - plainText += symbol - return plainText + plain_text += symbol + return plain_text def get_random_key() -> int: while True: - keyA = random.randint(2, len(SYMBOLS)) - keyB = random.randint(2, len(SYMBOLS)) - if cryptomath.gcd(keyA, len(SYMBOLS)) == 1 and keyB % len(SYMBOLS) != 0: - return keyA * len(SYMBOLS) + keyB + key_b = random.randint(2, len(SYMBOLS)) + key_b = random.randint(2, len(SYMBOLS)) + if cryptomath.gcd(key_b, len(SYMBOLS)) == 1 and key_b % len(SYMBOLS) != 0: + return key_b * len(SYMBOLS) + key_b def main() -> None: diff --git a/ciphers/bifid.py b/ciphers/bifid.py index c1b071155..54d55574c 100644 --- a/ciphers/bifid.py +++ b/ciphers/bifid.py @@ -12,7 +12,7 @@ import numpy as np class BifidCipher: def __init__(self) -> None: - SQUARE = [ + SQUARE = [ # noqa: N806 ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], diff --git a/ciphers/brute_force_caesar_cipher.py b/ciphers/brute_force_caesar_cipher.py index 8ab6e7730..cc97111e0 100644 --- a/ciphers/brute_force_caesar_cipher.py +++ b/ciphers/brute_force_caesar_cipher.py @@ -28,7 +28,7 @@ def decrypt(message: str) -> None: Decryption using Key #24: VOFGVWZ ROFXW Decryption using Key #25: UNEFUVY QNEWV """ - LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # noqa: N806 for key in range(len(LETTERS)): translated = "" for symbol in message: diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py index 485b77595..4d72128ae 100644 --- a/ciphers/elgamal_key_generator.py +++ b/ciphers/elgamal_key_generator.py @@ -26,7 +26,7 @@ def primitive_root(p_val: int) -> int: def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print("Generating prime p...") - p = rabin_miller.generateLargePrime(key_size) # select large prime number. + p = rabin_miller.generate_large_prime(key_size) # select large prime number. e_1 = primitive_root(p) # one primitive root on modulo p. d = random.randrange(3, p) # private_key -> have to be greater than 2 for safety. e_2 = cryptomath.find_mod_inverse(pow(e_1, d, p), p) @@ -37,7 +37,7 @@ def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, i return public_key, private_key -def make_key_files(name: str, keySize: int) -> None: +def make_key_files(name: str, key_size: int) -> None: if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( @@ -47,16 +47,16 @@ def make_key_files(name: str, keySize: int) -> None: ) sys.exit() - publicKey, privateKey = generate_key(keySize) + public_key, private_key = generate_key(key_size) print(f"\nWriting public key to file {name}_pubkey.txt...") with open(f"{name}_pubkey.txt", "w") as fo: fo.write( - "%d,%d,%d,%d" % (publicKey[0], publicKey[1], publicKey[2], publicKey[3]) + "%d,%d,%d,%d" % (public_key[0], public_key[1], public_key[2], public_key[3]) ) print(f"Writing private key to file {name}_privkey.txt...") with open(f"{name}_privkey.txt", "w") as fo: - fo.write("%d,%d" % (privateKey[0], privateKey[1])) + fo.write("%d,%d" % (private_key[0], private_key[1])) def main() -> None: diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index d8e436e92..f646d567b 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -201,11 +201,11 @@ class HillCipher: def main() -> None: - N = int(input("Enter the order of the encryption key: ")) + n = int(input("Enter the order of the encryption key: ")) hill_matrix = [] print("Enter each row of the encryption key with space separated integers") - for _ in range(N): + for _ in range(n): row = [int(x) for x in input().split()] hill_matrix.append(row) diff --git a/ciphers/polybius.py b/ciphers/polybius.py index 2a45f02a3..bf5d62f8d 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -11,7 +11,7 @@ import numpy as np class PolybiusCipher: def __init__(self) -> None: - SQUARE = [ + SQUARE = [ # noqa: N806 ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], diff --git a/ciphers/rabin_miller.py b/ciphers/rabin_miller.py index a9b834bfb..0aab80eb9 100644 --- a/ciphers/rabin_miller.py +++ b/ciphers/rabin_miller.py @@ -3,7 +3,7 @@ import random -def rabinMiller(num: int) -> bool: +def rabin_miller(num: int) -> bool: s = num - 1 t = 0 @@ -29,7 +29,7 @@ def is_prime_low_num(num: int) -> bool: if num < 2: return False - lowPrimes = [ + low_primes = [ 2, 3, 5, @@ -200,17 +200,17 @@ def is_prime_low_num(num: int) -> bool: 997, ] - if num in lowPrimes: + if num in low_primes: return True - for prime in lowPrimes: + for prime in low_primes: if (num % prime) == 0: return False - return rabinMiller(num) + return rabin_miller(num) -def generateLargePrime(keysize: int = 1024) -> int: +def generate_large_prime(keysize: int = 1024) -> int: while True: num = random.randrange(2 ** (keysize - 1), 2 ** (keysize)) if is_prime_low_num(num): @@ -218,6 +218,6 @@ def generateLargePrime(keysize: int = 1024) -> int: if __name__ == "__main__": - num = generateLargePrime() + num = generate_large_prime() print(("Prime number:", num)) print(("is_prime_low_num:", is_prime_low_num(num))) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index c6bfaa0fb..de26992f5 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -37,12 +37,12 @@ def get_text_from_blocks( def encrypt_message( - message: str, key: tuple[int, int], blockSize: int = DEFAULT_BLOCK_SIZE + message: str, key: tuple[int, int], block_size: int = DEFAULT_BLOCK_SIZE ) -> list[int]: encrypted_blocks = [] n, e = key - for block in get_blocks_from_text(message, blockSize): + for block in get_blocks_from_text(message, block_size): encrypted_blocks.append(pow(block, e, n)) return encrypted_blocks @@ -63,8 +63,8 @@ def decrypt_message( def read_key_file(key_filename: str) -> tuple[int, int, int]: with open(key_filename) as fo: content = fo.read() - key_size, n, EorD = content.split(",") - return (int(key_size), int(n), int(EorD)) + key_size, n, eor_d = content.split(",") + return (int(key_size), int(n), int(eor_d)) def encrypt_and_write_to_file( @@ -125,15 +125,15 @@ def main() -> None: if mode == "encrypt": if not os.path.exists("rsa_pubkey.txt"): - rkg.makeKeyFiles("rsa", 1024) + rkg.make_key_files("rsa", 1024) message = input("\nEnter message: ") pubkey_filename = "rsa_pubkey.txt" print(f"Encrypting and writing to {filename}...") - encryptedText = encrypt_and_write_to_file(filename, pubkey_filename, message) + encrypted_text = encrypt_and_write_to_file(filename, pubkey_filename, message) print("\nEncrypted text:") - print(encryptedText) + print(encrypted_text) elif mode == "decrypt": privkey_filename = "rsa_privkey.txt" diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py index de4df2777..9ee52777e 100644 --- a/ciphers/rsa_factorization.py +++ b/ciphers/rsa_factorization.py @@ -13,7 +13,7 @@ import math import random -def rsafactor(d: int, e: int, N: int) -> list[int]: +def rsafactor(d: int, e: int, n: int) -> list[int]: """ This function returns the factors of N, where p*q=N Return: [p, q] @@ -35,16 +35,16 @@ def rsafactor(d: int, e: int, N: int) -> list[int]: p = 0 q = 0 while p == 0: - g = random.randint(2, N - 1) + g = random.randint(2, n - 1) t = k while True: if t % 2 == 0: t = t // 2 - x = (g**t) % N - y = math.gcd(x - 1, N) + x = (g**t) % n + y = math.gcd(x - 1, n) if x > 1 and y > 1: p = y - q = N // y + q = n // y break # find the correct factors else: break # t is not divisible by 2, break and choose another g diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index d983c14f1..f64bc7dd0 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -2,38 +2,38 @@ import os import random import sys -from . import cryptomath_module as cryptoMath -from . import rabin_miller as rabinMiller +from . import cryptomath_module as cryptoMath # noqa: N812 +from . import rabin_miller as rabinMiller # noqa: N812 def main() -> None: print("Making key files...") - makeKeyFiles("rsa", 1024) + make_key_files("rsa", 1024) print("Key files generation successful.") -def generateKey(keySize: int) -> tuple[tuple[int, int], tuple[int, int]]: +def generate_key(key_size: int) -> tuple[tuple[int, int], tuple[int, int]]: print("Generating prime p...") - p = rabinMiller.generateLargePrime(keySize) + p = rabinMiller.generate_large_prime(key_size) print("Generating prime q...") - q = rabinMiller.generateLargePrime(keySize) + q = rabinMiller.generate_large_prime(key_size) n = p * q print("Generating e that is relatively prime to (p - 1) * (q - 1)...") while True: - e = random.randrange(2 ** (keySize - 1), 2 ** (keySize)) + e = random.randrange(2 ** (key_size - 1), 2 ** (key_size)) if cryptoMath.gcd(e, (p - 1) * (q - 1)) == 1: break print("Calculating d that is mod inverse of e...") d = cryptoMath.find_mod_inverse(e, (p - 1) * (q - 1)) - publicKey = (n, e) - privateKey = (n, d) - return (publicKey, privateKey) + public_key = (n, e) + private_key = (n, d) + return (public_key, private_key) -def makeKeyFiles(name: str, keySize: int) -> None: +def make_key_files(name: str, key_size: int) -> None: if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( @@ -43,14 +43,14 @@ def makeKeyFiles(name: str, keySize: int) -> None: ) sys.exit() - publicKey, privateKey = generateKey(keySize) + public_key, private_key = generate_key(key_size) print(f"\nWriting public key to file {name}_pubkey.txt...") with open(f"{name}_pubkey.txt", "w") as out_file: - out_file.write(f"{keySize},{publicKey[0]},{publicKey[1]}") + out_file.write(f"{key_size},{public_key[0]},{public_key[1]}") print(f"Writing private key to file {name}_privkey.txt...") with open(f"{name}_privkey.txt", "w") as out_file: - out_file.write(f"{keySize},{privateKey[0]},{privateKey[1]}") + out_file.write(f"{key_size},{private_key[0]},{private_key[1]}") if __name__ == "__main__": diff --git a/ciphers/simple_substitution_cipher.py b/ciphers/simple_substitution_cipher.py index a763bd6b6..291a9bccd 100644 --- a/ciphers/simple_substitution_cipher.py +++ b/ciphers/simple_substitution_cipher.py @@ -9,66 +9,66 @@ def main() -> None: key = "LFWOAYUISVKMNXPBDCRJTQEGHZ" resp = input("Encrypt/Decrypt [e/d]: ") - checkValidKey(key) + check_valid_key(key) if resp.lower().startswith("e"): mode = "encrypt" - translated = encryptMessage(key, message) + translated = encrypt_message(key, message) elif resp.lower().startswith("d"): mode = "decrypt" - translated = decryptMessage(key, message) + translated = decrypt_message(key, message) print(f"\n{mode.title()}ion: \n{translated}") -def checkValidKey(key: str) -> None: - keyList = list(key) - lettersList = list(LETTERS) - keyList.sort() - lettersList.sort() +def check_valid_key(key: str) -> None: + key_list = list(key) + letters_list = list(LETTERS) + key_list.sort() + letters_list.sort() - if keyList != lettersList: + if key_list != letters_list: sys.exit("Error in the key or symbol set.") -def encryptMessage(key: str, message: str) -> str: +def encrypt_message(key: str, message: str) -> str: """ - >>> encryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji') + >>> encrypt_message('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji') 'Ilcrism Olcvs' """ - return translateMessage(key, message, "encrypt") + return translate_message(key, message, "encrypt") -def decryptMessage(key: str, message: str) -> str: +def decrypt_message(key: str, message: str) -> str: """ - >>> decryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs') + >>> decrypt_message('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs') 'Harshil Darji' """ - return translateMessage(key, message, "decrypt") + return translate_message(key, message, "decrypt") -def translateMessage(key: str, message: str, mode: str) -> str: +def translate_message(key: str, message: str, mode: str) -> str: translated = "" - charsA = LETTERS - charsB = key + chars_a = LETTERS + chars_b = key if mode == "decrypt": - charsA, charsB = charsB, charsA + chars_a, chars_b = chars_b, chars_a for symbol in message: - if symbol.upper() in charsA: - symIndex = charsA.find(symbol.upper()) + if symbol.upper() in chars_a: + sym_index = chars_a.find(symbol.upper()) if symbol.isupper(): - translated += charsB[symIndex].upper() + translated += chars_b[sym_index].upper() else: - translated += charsB[symIndex].lower() + translated += chars_b[sym_index].lower() else: translated += symbol return translated -def getRandomKey() -> str: +def get_random_key() -> str: key = list(LETTERS) random.shuffle(key) return "".join(key) diff --git a/ciphers/trafid_cipher.py b/ciphers/trafid_cipher.py index b12ceff72..108ac652f 100644 --- a/ciphers/trafid_cipher.py +++ b/ciphers/trafid_cipher.py @@ -2,12 +2,12 @@ from __future__ import annotations -def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str: +def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str: one, two, three = "", "", "" tmp = [] - for character in messagePart: - tmp.append(character2Number[character]) + for character in message_part: + tmp.append(character_to_number[character]) for each in tmp: one += each[0] @@ -17,18 +17,18 @@ def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str: return one + two + three -def __decryptPart( - messagePart: str, character2Number: dict[str, str] +def __decrypt_part( + message_part: str, character_to_number: dict[str, str] ) -> tuple[str, str, str]: - tmp, thisPart = "", "" + tmp, this_part = "", "" result = [] - for character in messagePart: - thisPart += character2Number[character] + for character in message_part: + this_part += character_to_number[character] - for digit in thisPart: + for digit in this_part: tmp += digit - if len(tmp) == len(messagePart): + if len(tmp) == len(message_part): result.append(tmp) tmp = "" @@ -79,51 +79,57 @@ def __prepare( "332", "333", ) - character2Number = {} - number2Character = {} + character_to_number = {} + number_to_character = {} for letter, number in zip(alphabet, numbers): - character2Number[letter] = number - number2Character[number] = letter + character_to_number[letter] = number + number_to_character[number] = letter - return message, alphabet, character2Number, number2Character + return message, alphabet, character_to_number, number_to_character -def encryptMessage( +def encrypt_message( message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5 ) -> str: - message, alphabet, character2Number, number2Character = __prepare(message, alphabet) + message, alphabet, character_to_number, number_to_character = __prepare( + message, alphabet + ) encrypted, encrypted_numeric = "", "" for i in range(0, len(message) + 1, period): - encrypted_numeric += __encryptPart(message[i : i + period], character2Number) + encrypted_numeric += __encrypt_part( + message[i : i + period], character_to_number + ) for i in range(0, len(encrypted_numeric), 3): - encrypted += number2Character[encrypted_numeric[i : i + 3]] + encrypted += number_to_character[encrypted_numeric[i : i + 3]] return encrypted -def decryptMessage( +def decrypt_message( message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5 ) -> str: - message, alphabet, character2Number, number2Character = __prepare(message, alphabet) + message, alphabet, character_to_number, number_to_character = __prepare( + message, alphabet + ) decrypted_numeric = [] decrypted = "" for i in range(0, len(message) + 1, period): - a, b, c = __decryptPart(message[i : i + period], character2Number) + a, b, c = __decrypt_part(message[i : i + period], character_to_number) for j in range(0, len(a)): decrypted_numeric.append(a[j] + b[j] + c[j]) for each in decrypted_numeric: - decrypted += number2Character[each] + decrypted += number_to_character[each] return decrypted if __name__ == "__main__": msg = "DEFEND THE EAST WALL OF THE CASTLE." - encrypted = encryptMessage(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") - decrypted = decryptMessage(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") + encrypted = encrypt_message(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") + decrypted = decrypt_message(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") print(f"Encrypted: {encrypted}\nDecrypted: {decrypted}") diff --git a/ciphers/transposition_cipher.py b/ciphers/transposition_cipher.py index ed9923a6b..f1f07ddc3 100644 --- a/ciphers/transposition_cipher.py +++ b/ciphers/transposition_cipher.py @@ -14,53 +14,53 @@ def main() -> None: mode = input("Encryption/Decryption [e/d]: ") if mode.lower().startswith("e"): - text = encryptMessage(key, message) + text = encrypt_message(key, message) elif mode.lower().startswith("d"): - text = decryptMessage(key, message) + text = decrypt_message(key, message) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"Output:\n{text + '|'}") -def encryptMessage(key: int, message: str) -> str: +def encrypt_message(key: int, message: str) -> str: """ - >>> encryptMessage(6, 'Harshil Darji') + >>> encrypt_message(6, 'Harshil Darji') 'Hlia rDsahrij' """ - cipherText = [""] * key + cipher_text = [""] * key for col in range(key): pointer = col while pointer < len(message): - cipherText[col] += message[pointer] + cipher_text[col] += message[pointer] pointer += key - return "".join(cipherText) + return "".join(cipher_text) -def decryptMessage(key: int, message: str) -> str: +def decrypt_message(key: int, message: str) -> str: """ - >>> decryptMessage(6, 'Hlia rDsahrij') + >>> decrypt_message(6, 'Hlia rDsahrij') 'Harshil Darji' """ - numCols = math.ceil(len(message) / key) - numRows = key - numShadedBoxes = (numCols * numRows) - len(message) - plainText = [""] * numCols + num_cols = math.ceil(len(message) / key) + num_rows = key + num_shaded_boxes = (num_cols * num_rows) - len(message) + plain_text = [""] * num_cols col = 0 row = 0 for symbol in message: - plainText[col] += symbol + plain_text[col] += symbol col += 1 if ( - (col == numCols) - or (col == numCols - 1) - and (row >= numRows - numShadedBoxes) + (col == num_cols) + or (col == num_cols - 1) + and (row >= num_rows - num_shaded_boxes) ): col = 0 row += 1 - return "".join(plainText) + return "".join(plain_text) if __name__ == "__main__": diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py index 926a1b36a..6296b1e6d 100644 --- a/ciphers/transposition_cipher_encrypt_decrypt_file.py +++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py @@ -2,39 +2,39 @@ import os import sys import time -from . import transposition_cipher as transCipher +from . import transposition_cipher as trans_cipher def main() -> None: - inputFile = "Prehistoric Men.txt" - outputFile = "Output.txt" + input_file = "Prehistoric Men.txt" + output_file = "Output.txt" key = int(input("Enter key: ")) mode = input("Encrypt/Decrypt [e/d]: ") - if not os.path.exists(inputFile): - print(f"File {inputFile} does not exist. Quitting...") + if not os.path.exists(input_file): + print(f"File {input_file} does not exist. Quitting...") sys.exit() - if os.path.exists(outputFile): - print(f"Overwrite {outputFile}? [y/n]") + if os.path.exists(output_file): + print(f"Overwrite {output_file}? [y/n]") response = input("> ") if not response.lower().startswith("y"): sys.exit() - startTime = time.time() + start_time = time.time() if mode.lower().startswith("e"): - with open(inputFile) as f: + with open(input_file) as f: content = f.read() - translated = transCipher.encryptMessage(key, content) + translated = trans_cipher.encrypt_message(key, content) elif mode.lower().startswith("d"): - with open(outputFile) as f: + with open(output_file) as f: content = f.read() - translated = transCipher.decryptMessage(key, content) + translated = trans_cipher.decrypt_message(key, content) - with open(outputFile, "w") as outputObj: - outputObj.write(translated) + with open(output_file, "w") as output_obj: + output_obj.write(translated) - totalTime = round(time.time() - startTime, 2) - print(("Done (", totalTime, "seconds )")) + total_time = round(time.time() - start_time, 2) + print(("Done (", total_time, "seconds )")) if __name__ == "__main__": diff --git a/ciphers/vigenere_cipher.py b/ciphers/vigenere_cipher.py index 2e3987708..e76161351 100644 --- a/ciphers/vigenere_cipher.py +++ b/ciphers/vigenere_cipher.py @@ -8,43 +8,43 @@ def main() -> None: if mode.lower().startswith("e"): mode = "encrypt" - translated = encryptMessage(key, message) + translated = encrypt_message(key, message) elif mode.lower().startswith("d"): mode = "decrypt" - translated = decryptMessage(key, message) + translated = decrypt_message(key, message) print(f"\n{mode.title()}ed message:") print(translated) -def encryptMessage(key: str, message: str) -> str: +def encrypt_message(key: str, message: str) -> str: """ - >>> encryptMessage('HDarji', 'This is Harshil Darji from Dharmaj.') + >>> encrypt_message('HDarji', 'This is Harshil Darji from Dharmaj.') 'Akij ra Odrjqqs Gaisq muod Mphumrs.' """ - return translateMessage(key, message, "encrypt") + return translate_message(key, message, "encrypt") -def decryptMessage(key: str, message: str) -> str: +def decrypt_message(key: str, message: str) -> str: """ - >>> decryptMessage('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.') + >>> decrypt_message('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.') 'This is Harshil Darji from Dharmaj.' """ - return translateMessage(key, message, "decrypt") + return translate_message(key, message, "decrypt") -def translateMessage(key: str, message: str, mode: str) -> str: +def translate_message(key: str, message: str, mode: str) -> str: translated = [] - keyIndex = 0 + key_index = 0 key = key.upper() for symbol in message: num = LETTERS.find(symbol.upper()) if num != -1: if mode == "encrypt": - num += LETTERS.find(key[keyIndex]) + num += LETTERS.find(key[key_index]) elif mode == "decrypt": - num -= LETTERS.find(key[keyIndex]) + num -= LETTERS.find(key[key_index]) num %= len(LETTERS) @@ -53,9 +53,9 @@ def translateMessage(key: str, message: str, mode: str) -> str: elif symbol.islower(): translated.append(LETTERS[num].lower()) - keyIndex += 1 - if keyIndex == len(key): - keyIndex = 0 + key_index += 1 + if key_index == len(key): + key_index = 0 else: translated.append(symbol) return "".join(translated) diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py index 4d3c2c0d2..ddedc3d6d 100644 --- a/compression/lempel_ziv_decompress.py +++ b/compression/lempel_ziv_decompress.py @@ -43,10 +43,10 @@ def decompress_data(data_bits: str) -> str: lexicon[curr_string] = last_match_id + "0" if math.log2(index).is_integer(): - newLex = {} + new_lex = {} for curr_key in list(lexicon): - newLex["0" + curr_key] = lexicon.pop(curr_key) - lexicon = newLex + new_lex["0" + curr_key] = lexicon.pop(curr_key) + lexicon = new_lex lexicon[bin(index)[2:]] = last_match_id + "1" index += 1 diff --git a/compression/peak_signal_to_noise_ratio.py b/compression/peak_signal_to_noise_ratio.py index dded2a712..66b18b50b 100644 --- a/compression/peak_signal_to_noise_ratio.py +++ b/compression/peak_signal_to_noise_ratio.py @@ -16,8 +16,8 @@ def psnr(original: float, contrast: float) -> float: mse = np.mean((original - contrast) ** 2) if mse == 0: return 100 - PIXEL_MAX = 255.0 - PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) + PIXEL_MAX = 255.0 # noqa: N806 + PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) # noqa: N806 return PSNR diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py index 886ff52ea..7850085f8 100644 --- a/computer_vision/harris_corner.py +++ b/computer_vision/harris_corner.py @@ -7,7 +7,7 @@ https://en.wikipedia.org/wiki/Harris_Corner_Detector """ -class Harris_Corner: +class HarrisCorner: def __init__(self, k: float, window_size: int): """ @@ -70,6 +70,6 @@ class Harris_Corner: if __name__ == "__main__": - edge_detect = Harris_Corner(0.04, 3) + edge_detect = HarrisCorner(0.04, 3) color_img, _ = edge_detect.detect("path_to_image") cv2.imwrite("detect.png", color_img) diff --git a/conversions/binary_to_hexadecimal.py b/conversions/binary_to_hexadecimal.py index f94a12390..61f335a4c 100644 --- a/conversions/binary_to_hexadecimal.py +++ b/conversions/binary_to_hexadecimal.py @@ -17,7 +17,7 @@ def bin_to_hexadecimal(binary_str: str) -> str: ... ValueError: Empty string was passed to the function """ - BITS_TO_HEX = { + BITS_TO_HEX = { # noqa: N806 "0000": "0", "0001": "1", "0010": "2", diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py index 3c72a7732..e54fa154a 100644 --- a/conversions/decimal_to_any.py +++ b/conversions/decimal_to_any.py @@ -66,7 +66,7 @@ def decimal_to_any(num: int, base: int) -> str: if base > 36: raise ValueError("base must be <= 36") # fmt: off - ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', + ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', # noqa: N806, E501 '16': 'G', '17': 'H', '18': 'I', '19': 'J', '20': 'K', '21': 'L', '22': 'M', '23': 'N', '24': 'O', '25': 'P', '26': 'Q', '27': 'R', '28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X', diff --git a/conversions/prefix_conversions.py b/conversions/prefix_conversions.py index a77556433..06b759e35 100644 --- a/conversions/prefix_conversions.py +++ b/conversions/prefix_conversions.py @@ -6,7 +6,7 @@ from __future__ import annotations from enum import Enum -class SI_Unit(Enum): +class SIUnit(Enum): yotta = 24 zetta = 21 exa = 18 @@ -29,7 +29,7 @@ class SI_Unit(Enum): yocto = -24 -class Binary_Unit(Enum): +class BinaryUnit(Enum): yotta = 8 zetta = 7 exa = 6 @@ -42,17 +42,17 @@ class Binary_Unit(Enum): def convert_si_prefix( known_amount: float, - known_prefix: str | SI_Unit, - unknown_prefix: str | SI_Unit, + known_prefix: str | SIUnit, + unknown_prefix: str | SIUnit, ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Binary_prefix Wikipedia reference: https://en.wikipedia.org/wiki/International_System_of_Units - >>> convert_si_prefix(1, SI_Unit.giga, SI_Unit.mega) + >>> convert_si_prefix(1, SIUnit.giga, SIUnit.mega) 1000 - >>> convert_si_prefix(1, SI_Unit.mega, SI_Unit.giga) + >>> convert_si_prefix(1, SIUnit.mega, SIUnit.giga) 0.001 - >>> convert_si_prefix(1, SI_Unit.kilo, SI_Unit.kilo) + >>> convert_si_prefix(1, SIUnit.kilo, SIUnit.kilo) 1 >>> convert_si_prefix(1, 'giga', 'mega') 1000 @@ -60,9 +60,9 @@ def convert_si_prefix( 1000 """ if isinstance(known_prefix, str): - known_prefix = SI_Unit[known_prefix.lower()] + known_prefix = SIUnit[known_prefix.lower()] if isinstance(unknown_prefix, str): - unknown_prefix = SI_Unit[unknown_prefix.lower()] + unknown_prefix = SIUnit[unknown_prefix.lower()] unknown_amount: float = known_amount * ( 10 ** (known_prefix.value - unknown_prefix.value) ) @@ -71,16 +71,16 @@ def convert_si_prefix( def convert_binary_prefix( known_amount: float, - known_prefix: str | Binary_Unit, - unknown_prefix: str | Binary_Unit, + known_prefix: str | BinaryUnit, + unknown_prefix: str | BinaryUnit, ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Metric_prefix - >>> convert_binary_prefix(1, Binary_Unit.giga, Binary_Unit.mega) + >>> convert_binary_prefix(1, BinaryUnit.giga, BinaryUnit.mega) 1024 - >>> convert_binary_prefix(1, Binary_Unit.mega, Binary_Unit.giga) + >>> convert_binary_prefix(1, BinaryUnit.mega, BinaryUnit.giga) 0.0009765625 - >>> convert_binary_prefix(1, Binary_Unit.kilo, Binary_Unit.kilo) + >>> convert_binary_prefix(1, BinaryUnit.kilo, BinaryUnit.kilo) 1 >>> convert_binary_prefix(1, 'giga', 'mega') 1024 @@ -88,9 +88,9 @@ def convert_binary_prefix( 1024 """ if isinstance(known_prefix, str): - known_prefix = Binary_Unit[known_prefix.lower()] + known_prefix = BinaryUnit[known_prefix.lower()] if isinstance(unknown_prefix, str): - unknown_prefix = Binary_Unit[unknown_prefix.lower()] + unknown_prefix = BinaryUnit[unknown_prefix.lower()] unknown_amount: float = known_amount * ( 2 ** ((known_prefix.value - unknown_prefix.value) * 10) ) diff --git a/conversions/roman_numerals.py b/conversions/roman_numerals.py index 9933e6a78..960d41342 100644 --- a/conversions/roman_numerals.py +++ b/conversions/roman_numerals.py @@ -29,7 +29,7 @@ def int_to_roman(number: int) -> str: >>> all(int_to_roman(value) == key for key, value in tests.items()) True """ - ROMAN = [ + ROMAN = [ # noqa: N806 (1000, "M"), (900, "CM"), (500, "D"), diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 1ab13777b..2f4bd60d9 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -12,7 +12,7 @@ import random from typing import Any -class my_queue: +class MyQueue: def __init__(self) -> None: self.data: list[Any] = [] self.head: int = 0 @@ -39,20 +39,20 @@ class my_queue: print(self.data[self.head : self.tail]) -class my_node: +class MyNode: def __init__(self, data: Any) -> None: self.data = data - self.left: my_node | None = None - self.right: my_node | None = None + self.left: MyNode | None = None + self.right: MyNode | None = None self.height: int = 1 def get_data(self) -> Any: return self.data - def get_left(self) -> my_node | None: + def get_left(self) -> MyNode | None: return self.left - def get_right(self) -> my_node | None: + def get_right(self) -> MyNode | None: return self.right def get_height(self) -> int: @@ -62,11 +62,11 @@ class my_node: self.data = data return - def set_left(self, node: my_node | None) -> None: + def set_left(self, node: MyNode | None) -> None: self.left = node return - def set_right(self, node: my_node | None) -> None: + def set_right(self, node: MyNode | None) -> None: self.right = node return @@ -75,7 +75,7 @@ class my_node: return -def get_height(node: my_node | None) -> int: +def get_height(node: MyNode | None) -> int: if node is None: return 0 return node.get_height() @@ -87,7 +87,7 @@ def my_max(a: int, b: int) -> int: return b -def right_rotation(node: my_node) -> my_node: +def right_rotation(node: MyNode) -> MyNode: r""" A B / \ / \ @@ -110,7 +110,7 @@ def right_rotation(node: my_node) -> my_node: return ret -def left_rotation(node: my_node) -> my_node: +def left_rotation(node: MyNode) -> MyNode: """ a mirror symmetry rotation of the left_rotation """ @@ -126,7 +126,7 @@ def left_rotation(node: my_node) -> my_node: return ret -def lr_rotation(node: my_node) -> my_node: +def lr_rotation(node: MyNode) -> MyNode: r""" A A Br / \ / \ / \ @@ -143,16 +143,16 @@ def lr_rotation(node: my_node) -> my_node: return right_rotation(node) -def rl_rotation(node: my_node) -> my_node: +def rl_rotation(node: MyNode) -> MyNode: right_child = node.get_right() assert right_child is not None node.set_right(right_rotation(right_child)) return left_rotation(node) -def insert_node(node: my_node | None, data: Any) -> my_node | None: +def insert_node(node: MyNode | None, data: Any) -> MyNode | None: if node is None: - return my_node(data) + return MyNode(data) if data < node.get_data(): node.set_left(insert_node(node.get_left(), data)) if ( @@ -180,7 +180,7 @@ def insert_node(node: my_node | None, data: Any) -> my_node | None: return node -def get_rightMost(root: my_node) -> Any: +def get_right_most(root: MyNode) -> Any: while True: right_child = root.get_right() if right_child is None: @@ -189,7 +189,7 @@ def get_rightMost(root: my_node) -> Any: return root.get_data() -def get_leftMost(root: my_node) -> Any: +def get_left_most(root: MyNode) -> Any: while True: left_child = root.get_left() if left_child is None: @@ -198,12 +198,12 @@ def get_leftMost(root: my_node) -> Any: return root.get_data() -def del_node(root: my_node, data: Any) -> my_node | None: +def del_node(root: MyNode, data: Any) -> MyNode | None: left_child = root.get_left() right_child = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: - temp_data = get_leftMost(right_child) + temp_data = get_left_most(right_child) root.set_data(temp_data) root.set_right(del_node(right_child, temp_data)) elif left_child is not None: @@ -276,7 +276,7 @@ class AVLtree: """ def __init__(self) -> None: - self.root: my_node | None = None + self.root: MyNode | None = None def get_height(self) -> int: return get_height(self.root) @@ -296,7 +296,7 @@ class AVLtree: self, ) -> str: # a level traversale, gives a more intuitive look on the tree output = "" - q = my_queue() + q = MyQueue() q.push(self.root) layer = self.get_height() if layer == 0: diff --git a/data_structures/binary_tree/lazy_segment_tree.py b/data_structures/binary_tree/lazy_segment_tree.py index 94329cb43..050dfe0a6 100644 --- a/data_structures/binary_tree/lazy_segment_tree.py +++ b/data_structures/binary_tree/lazy_segment_tree.py @@ -37,14 +37,14 @@ class SegmentTree: return idx * 2 + 1 def build( - self, idx: int, left_element: int, right_element: int, A: list[int] + self, idx: int, left_element: int, right_element: int, a: list[int] ) -> None: if left_element == right_element: - self.segment_tree[idx] = A[left_element - 1] + self.segment_tree[idx] = a[left_element - 1] else: mid = (left_element + right_element) // 2 - self.build(self.left(idx), left_element, mid, A) - self.build(self.right(idx), mid + 1, right_element, A) + self.build(self.left(idx), left_element, mid, a) + self.build(self.right(idx), mid + 1, right_element, a) self.segment_tree[idx] = max( self.segment_tree[self.left(idx)], self.segment_tree[self.right(idx)] ) diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 10451ae68..949a3ecdd 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -2,8 +2,8 @@ import math class SegmentTree: - def __init__(self, A): - self.N = len(A) + def __init__(self, a): + self.N = len(a) self.st = [0] * ( 4 * self.N ) # approximate the overall size of segment tree with array N @@ -58,11 +58,11 @@ class SegmentTree: q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b) return max(q1, q2) - def showData(self): - showList = [] + def show_data(self): + show_list = [] for i in range(1, N + 1): - showList += [self.query(i, i)] - print(showList) + show_list += [self.query(i, i)] + print(show_list) if __name__ == "__main__": @@ -75,4 +75,4 @@ if __name__ == "__main__": segt.update(1, 3, 111) print(segt.query(1, 15)) segt.update(7, 8, 235) - segt.showData() + segt.show_data() diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index 0526b139b..a53ac566e 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -121,28 +121,28 @@ def inorder(root: Node | None) -> None: inorder(root.right) -def interactTreap(root: Node | None, args: str) -> Node | None: +def interact_treap(root: Node | None, args: str) -> Node | None: """ Commands: + value to add value into treap - value to erase all nodes with value - >>> root = interactTreap(None, "+1") + >>> root = interact_treap(None, "+1") >>> inorder(root) 1, - >>> root = interactTreap(root, "+3 +5 +17 +19 +2 +16 +4 +0") + >>> root = interact_treap(root, "+3 +5 +17 +19 +2 +16 +4 +0") >>> inorder(root) 0,1,2,3,4,5,16,17,19, - >>> root = interactTreap(root, "+4 +4 +4") + >>> root = interact_treap(root, "+4 +4 +4") >>> inorder(root) 0,1,2,3,4,4,4,4,5,16,17,19, - >>> root = interactTreap(root, "-0") + >>> root = interact_treap(root, "-0") >>> inorder(root) 1,2,3,4,4,4,4,5,16,17,19, - >>> root = interactTreap(root, "-4") + >>> root = interact_treap(root, "-4") >>> inorder(root) 1,2,3,5,16,17,19, - >>> root = interactTreap(root, "=0") + >>> root = interact_treap(root, "=0") Unknown command """ for arg in args.split(): @@ -168,7 +168,7 @@ def main() -> None: args = input() while args != "q": - root = interactTreap(root, args) + root = interact_treap(root, args) print(root) args = input() diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index 9265c4839..d8975eb2d 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -52,14 +52,14 @@ class MinHeap: return self.heap_dict[key] def build_heap(self, array): - lastIdx = len(array) - 1 - startFrom = self.get_parent_idx(lastIdx) + last_idx = len(array) - 1 + start_from = self.get_parent_idx(last_idx) for idx, i in enumerate(array): self.idx_of_element[i] = idx self.heap_dict[i.name] = i.val - for i in range(startFrom, -1, -1): + for i in range(start_from, -1, -1): self.sift_down(i, array) return array @@ -123,12 +123,12 @@ class MinHeap: def is_empty(self): return True if len(self.heap) == 0 else False - def decrease_key(self, node, newValue): + def decrease_key(self, node, new_value): assert ( - self.heap[self.idx_of_element[node]].val > newValue + self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" - node.val = newValue - self.heap_dict[node.name] = newValue + node.val = new_value + self.heap_dict[node.name] = new_value self.sift_up(self.idx_of_element[node]) @@ -143,7 +143,7 @@ e = Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array -myMinHeap = MinHeap([r, b, a, x, e]) +my_min_heap = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) @@ -154,14 +154,14 @@ myMinHeap = MinHeap([r, b, a, x, e]) # Before print("Min Heap - before decrease key") -for i in myMinHeap.heap: +for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") -myMinHeap.decrease_key(b, -17) +my_min_heap.decrease_key(b, -17) # After -for i in myMinHeap.heap: +for i in my_min_heap.heap: print(i) if __name__ == "__main__": diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index d3dc9e3e9..6f6d5d57e 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -15,9 +15,9 @@ Enter an Infix Equation = a + b ^c """ -def infix_2_postfix(Infix): - Stack = [] - Postfix = [] +def infix_2_postfix(infix): + stack = [] + post_fix = [] priority = { "^": 3, "*": 2, @@ -26,7 +26,7 @@ def infix_2_postfix(Infix): "+": 1, "-": 1, } # Priority of each operator - print_width = len(Infix) if (len(Infix) > 7) else 7 + print_width = len(infix) if (len(infix) > 7) else 7 # Print table header for output print( @@ -37,52 +37,52 @@ def infix_2_postfix(Infix): ) print("-" * (print_width * 3 + 7)) - for x in Infix: + for x in infix: if x.isalpha() or x.isdigit(): - Postfix.append(x) # if x is Alphabet / Digit, add it to Postfix + post_fix.append(x) # if x is Alphabet / Digit, add it to Postfix elif x == "(": - Stack.append(x) # if x is "(" push to Stack + stack.append(x) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered - while Stack[-1] != "(": - Postfix.append(Stack.pop()) # Pop stack & add the content to Postfix - Stack.pop() + while stack[-1] != "(": + post_fix.append(stack.pop()) # Pop stack & add the content to Postfix + stack.pop() else: - if len(Stack) == 0: - Stack.append(x) # If stack is empty, push x to stack + if len(stack) == 0: + stack.append(x) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack - while len(Stack) > 0 and priority[x] <= priority[Stack[-1]]: - Postfix.append(Stack.pop()) # pop stack & add to Postfix - Stack.append(x) # push x to stack + while len(stack) > 0 and priority[x] <= priority[stack[-1]]: + post_fix.append(stack.pop()) # pop stack & add to Postfix + stack.append(x) # push x to stack print( x.center(8), - ("".join(Stack)).ljust(print_width), - ("".join(Postfix)).ljust(print_width), + ("".join(stack)).ljust(print_width), + ("".join(post_fix)).ljust(print_width), sep=" | ", ) # Output in tabular format - while len(Stack) > 0: # while stack is not empty - Postfix.append(Stack.pop()) # pop stack & add to Postfix + while len(stack) > 0: # while stack is not empty + post_fix.append(stack.pop()) # pop stack & add to Postfix print( " ".center(8), - ("".join(Stack)).ljust(print_width), - ("".join(Postfix)).ljust(print_width), + ("".join(stack)).ljust(print_width), + ("".join(post_fix)).ljust(print_width), sep=" | ", ) # Output in tabular format - return "".join(Postfix) # return Postfix as str + return "".join(post_fix) # return Postfix as str -def infix_2_prefix(Infix): - Infix = list(Infix[::-1]) # reverse the infix equation +def infix_2_prefix(infix): + infix = list(infix[::-1]) # reverse the infix equation - for i in range(len(Infix)): - if Infix[i] == "(": - Infix[i] = ")" # change "(" to ")" - elif Infix[i] == ")": - Infix[i] = "(" # change ")" to "(" + for i in range(len(infix)): + if infix[i] == "(": + infix[i] = ")" # change "(" to ")" + elif infix[i] == ")": + infix[i] = "(" # change ")" to "(" - return (infix_2_postfix("".join(Infix)))[ + return (infix_2_postfix("".join(infix)))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix diff --git a/data_structures/stacks/postfix_evaluation.py b/data_structures/stacks/postfix_evaluation.py index 574acac71..28128f82e 100644 --- a/data_structures/stacks/postfix_evaluation.py +++ b/data_structures/stacks/postfix_evaluation.py @@ -20,49 +20,49 @@ Enter a Postfix Equation (space separated) = 5 6 9 * + import operator as op -def Solve(Postfix): - Stack = [] - Div = lambda x, y: int(x / y) # noqa: E731 integer division operation - Opr = { +def solve(post_fix): + stack = [] + div = lambda x, y: int(x / y) # noqa: E731 integer division operation + opr = { "^": op.pow, "*": op.mul, - "/": Div, + "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8), "Action".center(12), "Stack", sep=" | ") - print("-" * (30 + len(Postfix))) + print("-" * (30 + len(post_fix))) - for x in Postfix: + for x in post_fix: if x.isdigit(): # if x in digit - Stack.append(x) # append x to stack + stack.append(x) # append x to stack # output in tabular format - print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | ") + print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(stack), sep=" | ") else: - B = Stack.pop() # pop stack + b = stack.pop() # pop stack # output in tabular format - print("".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | ") + print("".rjust(8), ("pop(" + b + ")").ljust(12), ",".join(stack), sep=" | ") - A = Stack.pop() # pop stack + a = stack.pop() # pop stack # output in tabular format - print("".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | ") + print("".rjust(8), ("pop(" + a + ")").ljust(12), ",".join(stack), sep=" | ") - Stack.append( - str(Opr[x](int(A), int(B))) + stack.append( + str(opr[x](int(a), int(b))) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8), - ("push(" + A + x + B + ")").ljust(12), - ",".join(Stack), + ("push(" + a + x + b + ")").ljust(12), + ",".join(stack), sep=" | ", ) - return int(Stack[0]) + return int(stack[0]) if __name__ == "__main__": Postfix = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") - print("\n\tResult = ", Solve(Postfix)) + print("\n\tResult = ", solve(Postfix)) diff --git a/data_structures/stacks/stock_span_problem.py b/data_structures/stacks/stock_span_problem.py index cc2adfdd6..19a81bd36 100644 --- a/data_structures/stacks/stock_span_problem.py +++ b/data_structures/stacks/stock_span_problem.py @@ -8,7 +8,7 @@ on the current day is less than or equal to its price on the given day. """ -def calculateSpan(price, S): +def calculation_span(price, s): n = len(price) # Create a stack and push index of fist element to it @@ -16,7 +16,7 @@ def calculateSpan(price, S): st.append(0) # Span value of first element is always 1 - S[0] = 1 + s[0] = 1 # Calculate span values for rest of the elements for i in range(1, n): @@ -30,14 +30,14 @@ def calculateSpan(price, S): # than all elements on left of it, i.e. price[0], # price[1], ..price[i-1]. Else the price[i] is # greater than elements after top of stack - S[i] = i + 1 if len(st) <= 0 else (i - st[0]) + s[i] = i + 1 if len(st) <= 0 else (i - st[0]) # Push this element to stack st.append(i) # A utility function to print elements of array -def printArray(arr, n): +def print_array(arr, n): for i in range(0, n): print(arr[i], end=" ") @@ -47,7 +47,7 @@ price = [10, 4, 5, 90, 120, 80] S = [0 for i in range(len(price) + 1)] # Fill the span values in array S[] -calculateSpan(price, S) +calculation_span(price, S) # Print the calculated span values -printArray(S, len(price)) +print_array(S, len(price)) diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index 295b4d825..a83035526 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -43,33 +43,33 @@ def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): or 15 * PI / 8 <= direction <= 2 * PI or 7 * PI / 8 <= direction <= 9 * PI / 8 ): - W = sobel_grad[row, col - 1] - E = sobel_grad[row, col + 1] - if sobel_grad[row, col] >= W and sobel_grad[row, col] >= E: + w = sobel_grad[row, col - 1] + e = sobel_grad[row, col + 1] + if sobel_grad[row, col] >= w and sobel_grad[row, col] >= e: dst[row, col] = sobel_grad[row, col] elif (PI / 8 <= direction < 3 * PI / 8) or ( 9 * PI / 8 <= direction < 11 * PI / 8 ): - SW = sobel_grad[row + 1, col - 1] - NE = sobel_grad[row - 1, col + 1] - if sobel_grad[row, col] >= SW and sobel_grad[row, col] >= NE: + sw = sobel_grad[row + 1, col - 1] + ne = sobel_grad[row - 1, col + 1] + if sobel_grad[row, col] >= sw and sobel_grad[row, col] >= ne: dst[row, col] = sobel_grad[row, col] elif (3 * PI / 8 <= direction < 5 * PI / 8) or ( 11 * PI / 8 <= direction < 13 * PI / 8 ): - N = sobel_grad[row - 1, col] - S = sobel_grad[row + 1, col] - if sobel_grad[row, col] >= N and sobel_grad[row, col] >= S: + n = sobel_grad[row - 1, col] + s = sobel_grad[row + 1, col] + if sobel_grad[row, col] >= n and sobel_grad[row, col] >= s: dst[row, col] = sobel_grad[row, col] elif (5 * PI / 8 <= direction < 7 * PI / 8) or ( 13 * PI / 8 <= direction < 15 * PI / 8 ): - NW = sobel_grad[row - 1, col - 1] - SE = sobel_grad[row + 1, col + 1] - if sobel_grad[row, col] >= NW and sobel_grad[row, col] >= SE: + nw = sobel_grad[row - 1, col - 1] + se = sobel_grad[row + 1, col + 1] + if sobel_grad[row, col] >= nw and sobel_grad[row, col] >= se: dst[row, col] = sobel_grad[row, col] """ diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py index 76ae4dd20..1afa01d3f 100644 --- a/digital_image_processing/filters/bilateral_filter.py +++ b/digital_image_processing/filters/bilateral_filter.py @@ -46,16 +46,16 @@ def bilateral_filter( kernel_size: int, ) -> np.ndarray: img2 = np.zeros(img.shape) - gaussKer = get_gauss_kernel(kernel_size, spatial_variance) - sizeX, sizeY = img.shape - for i in range(kernel_size // 2, sizeX - kernel_size // 2): - for j in range(kernel_size // 2, sizeY - kernel_size // 2): + gauss_ker = get_gauss_kernel(kernel_size, spatial_variance) + size_x, size_y = img.shape + for i in range(kernel_size // 2, size_x - kernel_size // 2): + for j in range(kernel_size // 2, size_y - kernel_size // 2): - imgS = get_slice(img, i, j, kernel_size) - imgI = imgS - imgS[kernel_size // 2, kernel_size // 2] - imgIG = vec_gaussian(imgI, intensity_variance) - weights = np.multiply(gaussKer, imgIG) - vals = np.multiply(imgS, weights) + img_s = get_slice(img, i, j, kernel_size) + img_i = img_s - img_s[kernel_size // 2, kernel_size // 2] + img_ig = vec_gaussian(img_i, intensity_variance) + weights = np.multiply(gauss_ker, img_ig) + vals = np.multiply(img_s, weights) val = np.sum(vals) / np.sum(weights) img2[i, j] = val return img2 diff --git a/digital_image_processing/histogram_equalization/histogram_stretch.py b/digital_image_processing/histogram_equalization/histogram_stretch.py index 0288a2c1f..5ea7773e3 100644 --- a/digital_image_processing/histogram_equalization/histogram_stretch.py +++ b/digital_image_processing/histogram_equalization/histogram_stretch.py @@ -11,7 +11,7 @@ import numpy as np from matplotlib import pyplot as plt -class contrastStretch: +class ConstantStretch: def __init__(self): self.img = "" self.original_image = "" @@ -45,10 +45,10 @@ class contrastStretch: self.img[j][i] = self.last_list[num] cv2.imwrite("output_data/output.jpg", self.img) - def plotHistogram(self): + def plot_histogram(self): plt.hist(self.img.ravel(), 256, [0, 256]) - def showImage(self): + def show_image(self): cv2.imshow("Output-Image", self.img) cv2.imshow("Input-Image", self.original_image) cv2.waitKey(5000) @@ -57,7 +57,7 @@ class contrastStretch: if __name__ == "__main__": file_path = os.path.join(os.path.basename(__file__), "image_data/input.jpg") - stretcher = contrastStretch() + stretcher = ConstantStretch() stretcher.stretch(file_path) - stretcher.plotHistogram() - stretcher.showImage() + stretcher.plot_histogram() + stretcher.show_image() diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 033334af8..2f8fdc066 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -104,72 +104,72 @@ class IndexCalculation: #RGBIndex = ["GLI", "CI", "Hue", "I", "NGRDI", "RI", "S", "IF"] """ - def __init__(self, red=None, green=None, blue=None, redEdge=None, nir=None): + def __init__(self, red=None, green=None, blue=None, red_edge=None, nir=None): # print("Numpy version: " + np.__version__) - self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir) + self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir) - def setMatrices(self, red=None, green=None, blue=None, redEdge=None, nir=None): + def set_matricies(self, red=None, green=None, blue=None, red_edge=None, nir=None): if red is not None: self.red = red if green is not None: self.green = green if blue is not None: self.blue = blue - if redEdge is not None: - self.redEdge = redEdge + if red_edge is not None: + self.redEdge = red_edge if nir is not None: self.nir = nir return True def calculation( - self, index="", red=None, green=None, blue=None, redEdge=None, nir=None + self, index="", red=None, green=None, blue=None, red_edge=None, nir=None ): """ performs the calculation of the index with the values instantiated in the class :str index: abbreviation of index name to perform """ - self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir) + self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir) funcs = { - "ARVI2": self.ARVI2, - "CCCI": self.CCCI, - "CVI": self.CVI, - "GLI": self.GLI, - "NDVI": self.NDVI, - "BNDVI": self.BNDVI, - "redEdgeNDVI": self.redEdgeNDVI, - "GNDVI": self.GNDVI, - "GBNDVI": self.GBNDVI, - "GRNDVI": self.GRNDVI, - "RBNDVI": self.RBNDVI, - "PNDVI": self.PNDVI, - "ATSAVI": self.ATSAVI, - "BWDRVI": self.BWDRVI, - "CIgreen": self.CIgreen, - "CIrededge": self.CIrededge, - "CI": self.CI, - "CTVI": self.CTVI, - "GDVI": self.GDVI, - "EVI": self.EVI, - "GEMI": self.GEMI, - "GOSAVI": self.GOSAVI, - "GSAVI": self.GSAVI, - "Hue": self.Hue, - "IVI": self.IVI, - "IPVI": self.IPVI, - "I": self.I, - "RVI": self.RVI, - "MRVI": self.MRVI, - "MSAVI": self.MSAVI, - "NormG": self.NormG, - "NormNIR": self.NormNIR, - "NormR": self.NormR, - "NGRDI": self.NGRDI, - "RI": self.RI, - "S": self.S, - "IF": self.IF, - "DVI": self.DVI, - "TVI": self.TVI, - "NDRE": self.NDRE, + "ARVI2": self.arv12, + "CCCI": self.ccci, + "CVI": self.cvi, + "GLI": self.gli, + "NDVI": self.ndvi, + "BNDVI": self.bndvi, + "redEdgeNDVI": self.red_edge_ndvi, + "GNDVI": self.gndvi, + "GBNDVI": self.gbndvi, + "GRNDVI": self.grndvi, + "RBNDVI": self.rbndvi, + "PNDVI": self.pndvi, + "ATSAVI": self.atsavi, + "BWDRVI": self.bwdrvi, + "CIgreen": self.ci_green, + "CIrededge": self.ci_rededge, + "CI": self.ci, + "CTVI": self.ctvi, + "GDVI": self.gdvi, + "EVI": self.evi, + "GEMI": self.gemi, + "GOSAVI": self.gosavi, + "GSAVI": self.gsavi, + "Hue": self.hue, + "IVI": self.ivi, + "IPVI": self.ipvi, + "I": self.i, + "RVI": self.rvi, + "MRVI": self.mrvi, + "MSAVI": self.m_savi, + "NormG": self.norm_g, + "NormNIR": self.norm_nir, + "NormR": self.norm_r, + "NGRDI": self.ngrdi, + "RI": self.ri, + "S": self.s, + "IF": self._if, + "DVI": self.dvi, + "TVI": self.tvi, + "NDRE": self.ndre, } try: @@ -178,7 +178,7 @@ class IndexCalculation: print("Index not in the list!") return False - def ARVI2(self): + def arv12(self): """ Atmospherically Resistant Vegetation Index 2 https://www.indexdatabase.de/db/i-single.php?id=396 @@ -187,7 +187,7 @@ class IndexCalculation: """ return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) - def CCCI(self): + def ccci(self): """ Canopy Chlorophyll Content Index https://www.indexdatabase.de/db/i-single.php?id=224 @@ -197,7 +197,7 @@ class IndexCalculation: (self.nir - self.red) / (self.nir + self.red) ) - def CVI(self): + def cvi(self): """ Chlorophyll vegetation index https://www.indexdatabase.de/db/i-single.php?id=391 @@ -205,7 +205,7 @@ class IndexCalculation: """ return self.nir * (self.red / (self.green**2)) - def GLI(self): + def gli(self): """ self.green leaf index https://www.indexdatabase.de/db/i-single.php?id=375 @@ -215,7 +215,7 @@ class IndexCalculation: 2 * self.green + self.red + self.blue ) - def NDVI(self): + def ndvi(self): """ Normalized Difference self.nir/self.red Normalized Difference Vegetation Index, Calibrated NDVI - CDVI @@ -224,7 +224,7 @@ class IndexCalculation: """ return (self.nir - self.red) / (self.nir + self.red) - def BNDVI(self): + def bndvi(self): """ Normalized Difference self.nir/self.blue self.blue-normalized difference vegetation index @@ -233,7 +233,7 @@ class IndexCalculation: """ return (self.nir - self.blue) / (self.nir + self.blue) - def redEdgeNDVI(self): + def red_edge_ndvi(self): """ Normalized Difference self.rededge/self.red https://www.indexdatabase.de/db/i-single.php?id=235 @@ -241,7 +241,7 @@ class IndexCalculation: """ return (self.redEdge - self.red) / (self.redEdge + self.red) - def GNDVI(self): + def gndvi(self): """ Normalized Difference self.nir/self.green self.green NDVI https://www.indexdatabase.de/db/i-single.php?id=401 @@ -249,7 +249,7 @@ class IndexCalculation: """ return (self.nir - self.green) / (self.nir + self.green) - def GBNDVI(self): + def gbndvi(self): """ self.green-self.blue NDVI https://www.indexdatabase.de/db/i-single.php?id=186 @@ -259,7 +259,7 @@ class IndexCalculation: self.nir + (self.green + self.blue) ) - def GRNDVI(self): + def grndvi(self): """ self.green-self.red NDVI https://www.indexdatabase.de/db/i-single.php?id=185 @@ -269,7 +269,7 @@ class IndexCalculation: self.nir + (self.green + self.red) ) - def RBNDVI(self): + def rbndvi(self): """ self.red-self.blue NDVI https://www.indexdatabase.de/db/i-single.php?id=187 @@ -277,7 +277,7 @@ class IndexCalculation: """ return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) - def PNDVI(self): + def pndvi(self): """ Pan NDVI https://www.indexdatabase.de/db/i-single.php?id=188 @@ -287,7 +287,7 @@ class IndexCalculation: self.nir + (self.green + self.red + self.blue) ) - def ATSAVI(self, X=0.08, a=1.22, b=0.03): + def atsavi(self, x=0.08, a=1.22, b=0.03): """ Adjusted transformed soil-adjusted VI https://www.indexdatabase.de/db/i-single.php?id=209 @@ -295,10 +295,10 @@ class IndexCalculation: """ return a * ( (self.nir - a * self.red - b) - / (a * self.nir + self.red - a * b + X * (1 + a**2)) + / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) - def BWDRVI(self): + def bwdrvi(self): """ self.blue-wide dynamic range vegetation index https://www.indexdatabase.de/db/i-single.php?id=136 @@ -306,7 +306,7 @@ class IndexCalculation: """ return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) - def CIgreen(self): + def ci_green(self): """ Chlorophyll Index self.green https://www.indexdatabase.de/db/i-single.php?id=128 @@ -314,7 +314,7 @@ class IndexCalculation: """ return (self.nir / self.green) - 1 - def CIrededge(self): + def ci_rededge(self): """ Chlorophyll Index self.redEdge https://www.indexdatabase.de/db/i-single.php?id=131 @@ -322,7 +322,7 @@ class IndexCalculation: """ return (self.nir / self.redEdge) - 1 - def CI(self): + def ci(self): """ Coloration Index https://www.indexdatabase.de/db/i-single.php?id=11 @@ -330,16 +330,16 @@ class IndexCalculation: """ return (self.red - self.blue) / self.red - def CTVI(self): + def ctvi(self): """ Corrected Transformed Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=244 :return: index """ - ndvi = self.NDVI() + ndvi = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2)) - def GDVI(self): + def gdvi(self): """ Difference self.nir/self.green self.green Difference Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=27 @@ -347,7 +347,7 @@ class IndexCalculation: """ return self.nir - self.green - def EVI(self): + def evi(self): """ Enhanced Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=16 @@ -357,7 +357,7 @@ class IndexCalculation: (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) - def GEMI(self): + def gemi(self): """ Global Environment Monitoring Index https://www.indexdatabase.de/db/i-single.php?id=25 @@ -368,25 +368,25 @@ class IndexCalculation: ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) - def GOSAVI(self, Y=0.16): + def gosavi(self, y=0.16): """ self.green Optimized Soil Adjusted Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=29 mit Y = 0,16 :return: index """ - return (self.nir - self.green) / (self.nir + self.green + Y) + return (self.nir - self.green) / (self.nir + self.green + y) - def GSAVI(self, L=0.5): + def gsavi(self, n=0.5): """ self.green Soil Adjusted Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=31 - mit L = 0,5 + mit N = 0,5 :return: index """ - return ((self.nir - self.green) / (self.nir + self.green + L)) * (1 + L) + return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) - def Hue(self): + def hue(self): """ Hue https://www.indexdatabase.de/db/i-single.php?id=34 @@ -396,7 +396,7 @@ class IndexCalculation: ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) - def IVI(self, a=None, b=None): + def ivi(self, a=None, b=None): """ Ideal vegetation index https://www.indexdatabase.de/db/i-single.php?id=276 @@ -406,15 +406,15 @@ class IndexCalculation: """ return (self.nir - b) / (a * self.red) - def IPVI(self): + def ipvi(self): """ Infraself.red percentage vegetation index https://www.indexdatabase.de/db/i-single.php?id=35 :return: index """ - return (self.nir / ((self.nir + self.red) / 2)) * (self.NDVI() + 1) + return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) - def I(self): # noqa: E741,E743 + def i(self): # noqa: E741,E743 """ Intensity https://www.indexdatabase.de/db/i-single.php?id=36 @@ -422,7 +422,7 @@ class IndexCalculation: """ return (self.red + self.green + self.blue) / 30.5 - def RVI(self): + def rvi(self): """ Ratio-Vegetation-Index http://www.seos-project.eu/modules/remotesensing/remotesensing-c03-s01-p01.html @@ -430,15 +430,15 @@ class IndexCalculation: """ return self.nir / self.red - def MRVI(self): + def mrvi(self): """ Modified Normalized Difference Vegetation Index RVI https://www.indexdatabase.de/db/i-single.php?id=275 :return: index """ - return (self.RVI() - 1) / (self.RVI() + 1) + return (self.rvi() - 1) / (self.rvi() + 1) - def MSAVI(self): + def m_savi(self): """ Modified Soil Adjusted Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=44 @@ -449,7 +449,7 @@ class IndexCalculation: - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 - def NormG(self): + def norm_g(self): """ Norm G https://www.indexdatabase.de/db/i-single.php?id=50 @@ -457,7 +457,7 @@ class IndexCalculation: """ return self.green / (self.nir + self.red + self.green) - def NormNIR(self): + def norm_nir(self): """ Norm self.nir https://www.indexdatabase.de/db/i-single.php?id=51 @@ -465,7 +465,7 @@ class IndexCalculation: """ return self.nir / (self.nir + self.red + self.green) - def NormR(self): + def norm_r(self): """ Norm R https://www.indexdatabase.de/db/i-single.php?id=52 @@ -473,7 +473,7 @@ class IndexCalculation: """ return self.red / (self.nir + self.red + self.green) - def NGRDI(self): + def ngrdi(self): """ Normalized Difference self.green/self.red Normalized self.green self.red difference index, Visible Atmospherically Resistant Indices self.green @@ -483,7 +483,7 @@ class IndexCalculation: """ return (self.green - self.red) / (self.green + self.red) - def RI(self): + def ri(self): """ Normalized Difference self.red/self.green self.redness Index https://www.indexdatabase.de/db/i-single.php?id=74 @@ -491,7 +491,7 @@ class IndexCalculation: """ return (self.red - self.green) / (self.red + self.green) - def S(self): + def s(self): """ Saturation https://www.indexdatabase.de/db/i-single.php?id=77 @@ -501,7 +501,7 @@ class IndexCalculation: min = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) return (max - min) / max - def IF(self): + def _if(self): """ Shape Index https://www.indexdatabase.de/db/i-single.php?id=79 @@ -509,7 +509,7 @@ class IndexCalculation: """ return (2 * self.red - self.green - self.blue) / (self.green - self.blue) - def DVI(self): + def dvi(self): """ Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index Number (VIN) @@ -518,15 +518,15 @@ class IndexCalculation: """ return self.nir / self.red - def TVI(self): + def tvi(self): """ Transformed Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=98 :return: index """ - return (self.NDVI() + 0.5) ** (1 / 2) + return (self.ndvi() + 0.5) ** (1 / 2) - def NDRE(self): + def ndre(self): return (self.nir - self.redEdge) / (self.nir + self.redEdge) diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 1f42fddf2..fdcebfdad 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -62,8 +62,8 @@ def test_gen_gaussian_kernel_filter(): def test_convolve_filter(): # laplace diagonals - Laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) - res = conv.img_convolve(gray, Laplace).astype(uint8) + laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) + res = conv.img_convolve(gray, laplace).astype(uint8) assert res.any() diff --git a/divide_and_conquer/inversions.py b/divide_and_conquer/inversions.py index e20d35dac..35f78fe5c 100644 --- a/divide_and_conquer/inversions.py +++ b/divide_and_conquer/inversions.py @@ -63,18 +63,18 @@ def count_inversions_recursive(arr): if len(arr) <= 1: return arr, 0 mid = len(arr) // 2 - P = arr[0:mid] - Q = arr[mid:] + p = arr[0:mid] + q = arr[mid:] - A, inversion_p = count_inversions_recursive(P) - B, inversions_q = count_inversions_recursive(Q) - C, cross_inversions = _count_cross_inversions(A, B) + a, inversion_p = count_inversions_recursive(p) + b, inversions_q = count_inversions_recursive(q) + c, cross_inversions = _count_cross_inversions(a, b) num_inversions = inversion_p + inversions_q + cross_inversions - return C, num_inversions + return c, num_inversions -def _count_cross_inversions(P, Q): +def _count_cross_inversions(p, q): """ Counts the inversions across two sorted arrays. And combine the two arrays into one sorted array @@ -96,26 +96,26 @@ def _count_cross_inversions(P, Q): ([1, 2, 3, 3, 4, 5], 0) """ - R = [] + r = [] i = j = num_inversion = 0 - while i < len(P) and j < len(Q): - if P[i] > Q[j]: + while i < len(p) and j < len(q): + if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. - num_inversion += len(P) - i - R.append(Q[j]) + num_inversion += len(p) - i + r.append(q[j]) j += 1 else: - R.append(P[i]) + r.append(p[i]) i += 1 - if i < len(P): - R.extend(P[i:]) + if i < len(p): + r.extend(p[i:]) else: - R.extend(Q[j:]) + r.extend(q[j:]) - return R, num_inversion + return r, num_inversion def main(): diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py index 2994db5b5..f45250c9c 100644 --- a/dynamic_programming/bitmask.py +++ b/dynamic_programming/bitmask.py @@ -28,7 +28,7 @@ class AssignmentUsingBitmask: # to 1 self.final_mask = (1 << len(task_performed)) - 1 - def CountWaysUtil(self, mask, task_no): + def count_ways_until(self, mask, task_no): # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: @@ -43,7 +43,7 @@ class AssignmentUsingBitmask: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement - total_ways_util = self.CountWaysUtil(mask, task_no + 1) + total_ways_util = self.count_ways_until(mask, task_no + 1) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. @@ -56,14 +56,14 @@ class AssignmentUsingBitmask: # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. - total_ways_util += self.CountWaysUtil(mask | (1 << p), task_no + 1) + total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1) # save the value. self.dp[mask][task_no] = total_ways_util return self.dp[mask][task_no] - def countNoOfWays(self, task_performed): + def count_no_of_ways(self, task_performed): # Store the list of persons for each task for i in range(len(task_performed)): @@ -71,7 +71,7 @@ class AssignmentUsingBitmask: self.task[j].append(i) # call the function to fill the DP table, final answer is stored in dp[0][1] - return self.CountWaysUtil(0, 1) + return self.count_ways_until(0, 1) if __name__ == "__main__": @@ -81,7 +81,7 @@ if __name__ == "__main__": # the list of tasks that can be done by M persons. task_performed = [[1, 3, 4], [1, 2, 5], [3, 4]] print( - AssignmentUsingBitmask(task_performed, total_tasks).countNoOfWays( + AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) ) diff --git a/dynamic_programming/edit_distance.py b/dynamic_programming/edit_distance.py index 56877e0c5..d63e559e3 100644 --- a/dynamic_programming/edit_distance.py +++ b/dynamic_programming/edit_distance.py @@ -21,10 +21,10 @@ class EditDistance: def __init__(self): self.__prepare__() - def __prepare__(self, N=0, M=0): - self.dp = [[-1 for y in range(0, M)] for x in range(0, N)] + def __prepare__(self, n=0, m=0): + self.dp = [[-1 for y in range(0, m)] for x in range(0, n)] - def __solveDP(self, x, y): + def __solve_dp(self, x, y): if x == -1: return y + 1 elif y == -1: @@ -32,30 +32,30 @@ class EditDistance: elif self.dp[x][y] > -1: return self.dp[x][y] else: - if self.A[x] == self.B[y]: - self.dp[x][y] = self.__solveDP(x - 1, y - 1) + if self.a[x] == self.b[y]: + self.dp[x][y] = self.__solve_dp(x - 1, y - 1) else: self.dp[x][y] = 1 + min( - self.__solveDP(x, y - 1), - self.__solveDP(x - 1, y), - self.__solveDP(x - 1, y - 1), + self.__solve_dp(x, y - 1), + self.__solve_dp(x - 1, y), + self.__solve_dp(x - 1, y - 1), ) return self.dp[x][y] - def solve(self, A, B): - if isinstance(A, bytes): - A = A.decode("ascii") + def solve(self, a, b): + if isinstance(a, bytes): + a = a.decode("ascii") - if isinstance(B, bytes): - B = B.decode("ascii") + if isinstance(b, bytes): + b = b.decode("ascii") - self.A = str(A) - self.B = str(B) + self.a = str(a) + self.b = str(b) - self.__prepare__(len(A), len(B)) + self.__prepare__(len(a), len(b)) - return self.__solveDP(len(A) - 1, len(B) - 1) + return self.__solve_dp(len(a) - 1, len(b) - 1) def min_distance_bottom_up(word1: str, word2: str) -> int: diff --git a/dynamic_programming/floyd_warshall.py b/dynamic_programming/floyd_warshall.py index a4b6c6a82..614a3c72a 100644 --- a/dynamic_programming/floyd_warshall.py +++ b/dynamic_programming/floyd_warshall.py @@ -2,41 +2,41 @@ import math class Graph: - def __init__(self, N=0): # a graph with Node 0,1,...,N-1 - self.N = N - self.W = [ - [math.inf for j in range(0, N)] for i in range(0, N) + def __init__(self, n=0): # a graph with Node 0,1,...,N-1 + self.n = n + self.w = [ + [math.inf for j in range(0, n)] for i in range(0, n) ] # adjacency matrix for weight self.dp = [ - [math.inf for j in range(0, N)] for i in range(0, N) + [math.inf for j in range(0, n)] for i in range(0, n) ] # dp[i][j] stores minimum distance from i to j - def addEdge(self, u, v, w): + def add_edge(self, u, v, w): self.dp[u][v] = w def floyd_warshall(self): - for k in range(0, self.N): - for i in range(0, self.N): - for j in range(0, self.N): + for k in range(0, self.n): + for i in range(0, self.n): + for j in range(0, self.n): self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j]) - def showMin(self, u, v): + def show_min(self, u, v): return self.dp[u][v] if __name__ == "__main__": graph = Graph(5) - graph.addEdge(0, 2, 9) - graph.addEdge(0, 4, 10) - graph.addEdge(1, 3, 5) - graph.addEdge(2, 3, 7) - graph.addEdge(3, 0, 10) - graph.addEdge(3, 1, 2) - graph.addEdge(3, 2, 1) - graph.addEdge(3, 4, 6) - graph.addEdge(4, 1, 3) - graph.addEdge(4, 2, 4) - graph.addEdge(4, 3, 9) + graph.add_edge(0, 2, 9) + graph.add_edge(0, 4, 10) + graph.add_edge(1, 3, 5) + graph.add_edge(2, 3, 7) + graph.add_edge(3, 0, 10) + graph.add_edge(3, 1, 2) + graph.add_edge(3, 2, 1) + graph.add_edge(3, 4, 6) + graph.add_edge(4, 1, 3) + graph.add_edge(4, 2, 4) + graph.add_edge(4, 3, 9) graph.floyd_warshall() - graph.showMin(1, 4) - graph.showMin(0, 3) + graph.show_min(1, 4) + graph.show_min(0, 3) diff --git a/dynamic_programming/fractional_knapsack.py b/dynamic_programming/fractional_knapsack.py index c74af7ef8..6f7a2a08c 100644 --- a/dynamic_programming/fractional_knapsack.py +++ b/dynamic_programming/fractional_knapsack.py @@ -2,20 +2,20 @@ from bisect import bisect from itertools import accumulate -def fracKnapsack(vl, wt, W, n): +def frac_knapsack(vl, wt, w, n): """ - >>> fracKnapsack([60, 100, 120], [10, 20, 30], 50, 3) + >>> frac_knapsack([60, 100, 120], [10, 20, 30], 50, 3) 240.0 """ r = list(sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True)) vl, wt = [i[0] for i in r], [i[1] for i in r] acc = list(accumulate(wt)) - k = bisect(acc, W) + k = bisect(acc, w) return ( 0 if k == 0 - else sum(vl[:k]) + (W - acc[k - 1]) * (vl[k]) / (wt[k]) + else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k]) ) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 804d7d4f1..9efb60bab 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -7,39 +7,39 @@ Note that only the integer weights 0-1 knapsack problem is solvable """ -def MF_knapsack(i, wt, val, j): +def mf_knapsack(i, wt, val, j): """ This code involves the concept of memory functions. Here we solve the subproblems which are needed unlike the below example F is a 2D array with -1s filled up """ - global F # a global dp table for knapsack - if F[i][j] < 0: + global f # a global dp table for knapsack + if f[i][j] < 0: if j < wt[i - 1]: - val = MF_knapsack(i - 1, wt, val, j) + val = mf_knapsack(i - 1, wt, val, j) else: val = max( - MF_knapsack(i - 1, wt, val, j), - MF_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1], + mf_knapsack(i - 1, wt, val, j), + mf_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1], ) - F[i][j] = val - return F[i][j] + f[i][j] = val + return f[i][j] -def knapsack(W, wt, val, n): - dp = [[0 for i in range(W + 1)] for j in range(n + 1)] +def knapsack(w, wt, val, n): + dp = [[0 for i in range(w + 1)] for j in range(n + 1)] for i in range(1, n + 1): - for w in range(1, W + 1): + for w in range(1, w + 1): if wt[i - 1] <= w: dp[i][w] = max(val[i - 1] + dp[i - 1][w - wt[i - 1]], dp[i - 1][w]) else: dp[i][w] = dp[i - 1][w] - return dp[n][W], dp + return dp[n][w], dp -def knapsack_with_example_solution(W: int, wt: list, val: list): +def knapsack_with_example_solution(w: int, wt: list, val: list): """ Solves the integer weights knapsack problem returns one of the several possible optimal subsets. @@ -90,9 +90,9 @@ def knapsack_with_example_solution(W: int, wt: list, val: list): f"got weight of type {type(wt[i])} at index {i}" ) - optimal_val, dp_table = knapsack(W, wt, val, num_items) + optimal_val, dp_table = knapsack(w, wt, val, num_items) example_optional_set: set = set() - _construct_solution(dp_table, wt, num_items, W, example_optional_set) + _construct_solution(dp_table, wt, num_items, w, example_optional_set) return optimal_val, example_optional_set @@ -136,10 +136,10 @@ if __name__ == "__main__": wt = [4, 3, 2, 3] n = 4 w = 6 - F = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)] + f = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)] optimal_solution, _ = knapsack(w, wt, val, n) print(optimal_solution) - print(MF_knapsack(n, wt, val, w)) # switched the n and w + print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index fdcf3311a..3468fd87d 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,7 +38,7 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - L = [[0] * (n + 1) for _ in range(m + 1)] + l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741 for i in range(1, m + 1): for j in range(1, n + 1): @@ -47,7 +47,7 @@ def longest_common_subsequence(x: str, y: str): else: match = 0 - L[i][j] = max(L[i - 1][j], L[i][j - 1], L[i - 1][j - 1] + match) + l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match) seq = "" i, j = m, n @@ -57,17 +57,17 @@ def longest_common_subsequence(x: str, y: str): else: match = 0 - if L[i][j] == L[i - 1][j - 1] + match: + if l[i][j] == l[i - 1][j - 1] + match: if match == 1: seq = x[i - 1] + seq i -= 1 j -= 1 - elif L[i][j] == L[i - 1][j]: + elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 - return L[m][n], seq + return l[m][n], seq if __name__ == "__main__": diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index a029f9be7..6feed2352 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -34,12 +34,12 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu return array # Else pivot = array[0] - isFound = False + is_found = False i = 1 longest_subseq: list[int] = [] - while not isFound and i < array_length: + while not is_found and i < array_length: if array[i] < pivot: - isFound = True + is_found = True temp_array = [element for element in array[i:] if element >= array[i]] temp_array = longest_subsequence(temp_array) if len(temp_array) > len(longest_subseq): diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py index af536f8bb..5e11d729f 100644 --- a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py +++ b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py @@ -7,7 +7,7 @@ from __future__ import annotations -def CeilIndex(v, l, r, key): # noqa: E741 +def ceil_index(v, l, r, key): # noqa: E741 while r - l > 1: m = (l + r) // 2 if v[m] >= key: @@ -17,16 +17,16 @@ def CeilIndex(v, l, r, key): # noqa: E741 return r -def LongestIncreasingSubsequenceLength(v: list[int]) -> int: +def longest_increasing_subsequence_length(v: list[int]) -> int: """ - >>> LongestIncreasingSubsequenceLength([2, 5, 3, 7, 11, 8, 10, 13, 6]) + >>> longest_increasing_subsequence_length([2, 5, 3, 7, 11, 8, 10, 13, 6]) 6 - >>> LongestIncreasingSubsequenceLength([]) + >>> longest_increasing_subsequence_length([]) 0 - >>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, - ... 11, 7, 15]) + >>> longest_increasing_subsequence_length([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, + ... 3, 11, 7, 15]) 6 - >>> LongestIncreasingSubsequenceLength([5, 4, 3, 2, 1]) + >>> longest_increasing_subsequence_length([5, 4, 3, 2, 1]) 1 """ if len(v) == 0: @@ -44,7 +44,7 @@ def LongestIncreasingSubsequenceLength(v: list[int]) -> int: tail[length] = v[i] length += 1 else: - tail[CeilIndex(tail, -1, length - 1, v[i])] = v[i] + tail[ceil_index(tail, -1, length - 1, v[i])] = v[i] return length diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py index 9411bc704..d612aea7b 100644 --- a/dynamic_programming/matrix_chain_order.py +++ b/dynamic_programming/matrix_chain_order.py @@ -8,34 +8,34 @@ Space Complexity: O(n^2) """ -def MatrixChainOrder(array): - N = len(array) - Matrix = [[0 for x in range(N)] for x in range(N)] - Sol = [[0 for x in range(N)] for x in range(N)] +def matrix_chain_order(array): + n = len(array) + matrix = [[0 for x in range(n)] for x in range(n)] + sol = [[0 for x in range(n)] for x in range(n)] - for ChainLength in range(2, N): - for a in range(1, N - ChainLength + 1): - b = a + ChainLength - 1 + for chain_length in range(2, n): + for a in range(1, n - chain_length + 1): + b = a + chain_length - 1 - Matrix[a][b] = sys.maxsize + matrix[a][b] = sys.maxsize for c in range(a, b): cost = ( - Matrix[a][c] + Matrix[c + 1][b] + array[a - 1] * array[c] * array[b] + matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) - if cost < Matrix[a][b]: - Matrix[a][b] = cost - Sol[a][b] = c - return Matrix, Sol + if cost < matrix[a][b]: + matrix[a][b] = cost + sol[a][b] = c + return matrix, sol # Print order of matrix with Ai as Matrix -def PrintOptimalSolution(OptimalSolution, i, j): +def print_optiomal_solution(optimal_solution, i, j): if i == j: print("A" + str(i), end=" ") else: print("(", end=" ") - PrintOptimalSolution(OptimalSolution, i, OptimalSolution[i][j]) - PrintOptimalSolution(OptimalSolution, OptimalSolution[i][j] + 1, j) + print_optiomal_solution(optimal_solution, i, optimal_solution[i][j]) + print_optiomal_solution(optimal_solution, optimal_solution[i][j] + 1, j) print(")", end=" ") @@ -44,10 +44,10 @@ def main(): n = len(array) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 - Matrix, OptimalSolution = MatrixChainOrder(array) + matrix, optimal_solution = matrix_chain_order(array) - print("No. of Operation required: " + str(Matrix[1][n - 1])) - PrintOptimalSolution(OptimalSolution, 1, n - 1) + print("No. of Operation required: " + str(matrix[1][n - 1])) + print_optiomal_solution(optimal_solution, 1, n - 1) if __name__ == "__main__": diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py index 3060010ef..42eca79a9 100644 --- a/dynamic_programming/max_sub_array.py +++ b/dynamic_programming/max_sub_array.py @@ -4,14 +4,14 @@ author : Mayank Kumar Jha (mk9440) from __future__ import annotations -def find_max_sub_array(A, low, high): +def find_max_sub_array(a, low, high): if low == high: - return low, high, A[low] + return low, high, a[low] else: mid = (low + high) // 2 - left_low, left_high, left_sum = find_max_sub_array(A, low, mid) - right_low, right_high, right_sum = find_max_sub_array(A, mid + 1, high) - cross_left, cross_right, cross_sum = find_max_cross_sum(A, low, mid, high) + left_low, left_high, left_sum = find_max_sub_array(a, low, mid) + right_low, right_high, right_sum = find_max_sub_array(a, mid + 1, high) + cross_left, cross_right, cross_sum = find_max_cross_sum(a, low, mid, high) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: @@ -20,18 +20,18 @@ def find_max_sub_array(A, low, high): return cross_left, cross_right, cross_sum -def find_max_cross_sum(A, low, mid, high): +def find_max_cross_sum(a, low, mid, high): left_sum, max_left = -999999999, -1 right_sum, max_right = -999999999, -1 summ = 0 for i in range(mid, low - 1, -1): - summ += A[i] + summ += a[i] if summ > left_sum: left_sum = summ max_left = i summ = 0 for i in range(mid + 1, high + 1): - summ += A[i] + summ += a[i] if summ > right_sum: right_sum = summ max_right = i diff --git a/dynamic_programming/minimum_coin_change.py b/dynamic_programming/minimum_coin_change.py index 2869b5857..848bd654d 100644 --- a/dynamic_programming/minimum_coin_change.py +++ b/dynamic_programming/minimum_coin_change.py @@ -7,7 +7,7 @@ https://www.hackerrank.com/challenges/coin-change/problem """ -def dp_count(S, n): +def dp_count(s, n): """ >>> dp_count([1, 2, 3], 4) 4 @@ -33,7 +33,7 @@ def dp_count(S, n): # Pick all coins one by one and update table[] values # after the index greater than or equal to the value of the # picked coin - for coin_val in S: + for coin_val in s: for j in range(coin_val, n + 1): table[j] += table[j - coin_val] diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py index 8fad4ef30..3daa9767f 100644 --- a/dynamic_programming/minimum_partition.py +++ b/dynamic_programming/minimum_partition.py @@ -3,7 +3,7 @@ Partition a set into two subsets such that the difference of subset sums is mini """ -def findMin(arr): +def find_min(arr): n = len(arr) s = sum(arr) diff --git a/dynamic_programming/sum_of_subset.py b/dynamic_programming/sum_of_subset.py index a12177b57..77672b0b8 100644 --- a/dynamic_programming/sum_of_subset.py +++ b/dynamic_programming/sum_of_subset.py @@ -1,25 +1,25 @@ -def isSumSubset(arr, arrLen, requiredSum): +def is_sum_subset(arr, arr_len, required_sum): """ - >>> isSumSubset([2, 4, 6, 8], 4, 5) + >>> is_sum_subset([2, 4, 6, 8], 4, 5) False - >>> isSumSubset([2, 4, 6, 8], 4, 14) + >>> is_sum_subset([2, 4, 6, 8], 4, 14) True """ # a subset value says 1 if that subset sum can be formed else 0 # initially no subsets can be formed hence False/0 - subset = [[False for i in range(requiredSum + 1)] for i in range(arrLen + 1)] + subset = [[False for i in range(required_sum + 1)] for i in range(arr_len + 1)] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 - for i in range(arrLen + 1): + for i in range(arr_len + 1): subset[i][0] = True # sum is not zero and set is empty then false - for i in range(1, requiredSum + 1): + for i in range(1, required_sum + 1): subset[0][i] = False - for i in range(1, arrLen + 1): - for j in range(1, requiredSum + 1): + for i in range(1, arr_len + 1): + for j in range(1, required_sum + 1): if arr[i - 1] > j: subset[i][j] = subset[i - 1][j] if arr[i - 1] <= j: @@ -28,7 +28,7 @@ def isSumSubset(arr, arrLen, requiredSum): # uncomment to print the subset # for i in range(arrLen+1): # print(subset[i]) - print(subset[arrLen][requiredSum]) + print(subset[arr_len][required_sum]) if __name__ == "__main__": diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index cf41ffa5f..8be2897c1 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -35,30 +35,30 @@ PROGNAME = "Sierpinski Triangle" points = [[-175, -125], [0, 175], [175, -125]] # size of triangle -def getMid(p1, p2): +def get_mid(p1, p2): return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2) # find midpoint def triangle(points, depth): - myPen.up() - myPen.goto(points[0][0], points[0][1]) - myPen.down() - myPen.goto(points[1][0], points[1][1]) - myPen.goto(points[2][0], points[2][1]) - myPen.goto(points[0][0], points[0][1]) + my_pen.up() + my_pen.goto(points[0][0], points[0][1]) + my_pen.down() + my_pen.goto(points[1][0], points[1][1]) + my_pen.goto(points[2][0], points[2][1]) + my_pen.goto(points[0][0], points[0][1]) if depth > 0: triangle( - [points[0], getMid(points[0], points[1]), getMid(points[0], points[2])], + [points[0], get_mid(points[0], points[1]), get_mid(points[0], points[2])], depth - 1, ) triangle( - [points[1], getMid(points[0], points[1]), getMid(points[1], points[2])], + [points[1], get_mid(points[0], points[1]), get_mid(points[1], points[2])], depth - 1, ) triangle( - [points[2], getMid(points[2], points[1]), getMid(points[0], points[2])], + [points[2], get_mid(points[2], points[1]), get_mid(points[0], points[2])], depth - 1, ) @@ -69,8 +69,8 @@ if __name__ == "__main__": "right format for using this script: " "$python fractals.py " ) - myPen = turtle.Turtle() - myPen.ht() - myPen.speed(5) - myPen.pencolor("red") + my_pen = turtle.Turtle() + my_pen.ht() + my_pen.speed(5) + my_pen.pencolor("red") triangle(points, int(sys.argv[1])) diff --git a/geodesy/haversine_distance.py b/geodesy/haversine_distance.py index de8ac7f88..b601d2fd1 100644 --- a/geodesy/haversine_distance.py +++ b/geodesy/haversine_distance.py @@ -30,9 +30,9 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl """ # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System # Distance in metres(m) - AXIS_A = 6378137.0 - AXIS_B = 6356752.314245 - RADIUS = 6378137 + AXIS_A = 6378137.0 # noqa: N806 + AXIS_B = 6356752.314245 # noqa: N806 + RADIUS = 6378137 # noqa: N806 # Equation parameters # Equation https://en.wikipedia.org/wiki/Haversine_formula#Formulation flattening = (AXIS_A - AXIS_B) / AXIS_A diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py index bf8f1b9a5..d36d39953 100644 --- a/geodesy/lamberts_ellipsoidal_distance.py +++ b/geodesy/lamberts_ellipsoidal_distance.py @@ -45,9 +45,9 @@ def lamberts_ellipsoidal_distance( # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System # Distance in metres(m) - AXIS_A = 6378137.0 - AXIS_B = 6356752.314245 - EQUATORIAL_RADIUS = 6378137 + AXIS_A = 6378137.0 # noqa: N806 + AXIS_B = 6356752.314245 # noqa: N806 + EQUATORIAL_RADIUS = 6378137 # noqa: N806 # Equation Parameters # https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines @@ -62,22 +62,22 @@ def lamberts_ellipsoidal_distance( sigma = haversine_distance(lat1, lon1, lat2, lon2) / EQUATORIAL_RADIUS # Intermediate P and Q values - P_value = (b_lat1 + b_lat2) / 2 - Q_value = (b_lat2 - b_lat1) / 2 + p_value = (b_lat1 + b_lat2) / 2 + q_value = (b_lat2 - b_lat1) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) - X_numerator = (sin(P_value) ** 2) * (cos(Q_value) ** 2) - X_demonimator = cos(sigma / 2) ** 2 - X_value = (sigma - sin(sigma)) * (X_numerator / X_demonimator) + x_numerator = (sin(p_value) ** 2) * (cos(q_value) ** 2) + x_demonimator = cos(sigma / 2) ** 2 + x_value = (sigma - sin(sigma)) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) - Y_numerator = (cos(P_value) ** 2) * (sin(Q_value) ** 2) - Y_denominator = sin(sigma / 2) ** 2 - Y_value = (sigma + sin(sigma)) * (Y_numerator / Y_denominator) + y_numerator = (cos(p_value) ** 2) * (sin(q_value) ** 2) + y_denominator = sin(sigma / 2) ** 2 + y_value = (sigma + sin(sigma)) * (y_numerator / y_denominator) - return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (X_value + Y_value))) + return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index 7197369de..d28045282 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,14 +1,14 @@ # Finding Articulation Points in Undirected Graph -def computeAP(l): # noqa: E741 +def compute_ap(l): # noqa: E741 n = len(l) - outEdgeCount = 0 + out_edge_count = 0 low = [0] * n visited = [False] * n - isArt = [False] * n + is_art = [False] * n - def dfs(root, at, parent, outEdgeCount): + def dfs(root, at, parent, out_edge_count): if parent == root: - outEdgeCount += 1 + out_edge_count += 1 visited[at] = True low[at] = at @@ -16,27 +16,27 @@ def computeAP(l): # noqa: E741 if to == parent: pass elif not visited[to]: - outEdgeCount = dfs(root, to, at, outEdgeCount) + out_edge_count = dfs(root, to, at, out_edge_count) low[at] = min(low[at], low[to]) # AP found via bridge if at < low[to]: - isArt[at] = True + is_art[at] = True # AP found via cycle if at == low[to]: - isArt[at] = True + is_art[at] = True else: low[at] = min(low[at], to) - return outEdgeCount + return out_edge_count for i in range(n): if not visited[i]: - outEdgeCount = 0 - outEdgeCount = dfs(i, i, -1, outEdgeCount) - isArt[i] = outEdgeCount > 1 + out_edge_count = 0 + out_edge_count = dfs(i, i, -1, out_edge_count) + is_art[i] = out_edge_count > 1 - for x in range(len(isArt)): - if isArt[x] is True: + for x in range(len(is_art)): + if is_art[x] is True: print(x) @@ -52,4 +52,4 @@ data = { 7: [6, 8], 8: [5, 7], } -computeAP(data) +compute_ap(data) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index db0ef8e7b..b02e9af65 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -76,20 +76,20 @@ if __name__ == "__main__": """ -def dfs(G, s): - vis, S = {s}, [s] +def dfs(g, s): + vis, _s = {s}, [s] print(s) - while S: + while _s: flag = 0 - for i in G[S[-1]]: + for i in g[_s[-1]]: if i not in vis: - S.append(i) + _s.append(i) vis.add(i) flag = 1 print(i) break if not flag: - S.pop() + _s.pop() """ @@ -103,15 +103,15 @@ def dfs(G, s): """ -def bfs(G, s): - vis, Q = {s}, deque([s]) +def bfs(g, s): + vis, q = {s}, deque([s]) print(s) - while Q: - u = Q.popleft() - for v in G[u]: + while q: + u = q.popleft() + for v in g[u]: if v not in vis: vis.add(v) - Q.append(v) + q.append(v) print(v) @@ -127,10 +127,10 @@ def bfs(G, s): """ -def dijk(G, s): +def dijk(g, s): dist, known, path = {s: 0}, set(), {s: 0} while True: - if len(known) == len(G) - 1: + if len(known) == len(g) - 1: break mini = 100000 for i in dist: @@ -138,7 +138,7 @@ def dijk(G, s): mini = dist[i] u = i known.add(u) - for v in G[u]: + for v in g[u]: if v[0] not in known: if dist[u] + v[1] < dist.get(v[0], 100000): dist[v[0]] = dist[u] + v[1] @@ -155,27 +155,27 @@ def dijk(G, s): """ -def topo(G, ind=None, Q=None): - if Q is None: - Q = [1] +def topo(g, ind=None, q=None): + if q is None: + q = [1] if ind is None: - ind = [0] * (len(G) + 1) # SInce oth Index is ignored - for u in G: - for v in G[u]: + ind = [0] * (len(g) + 1) # SInce oth Index is ignored + for u in g: + for v in g[u]: ind[v] += 1 - Q = deque() - for i in G: + q = deque() + for i in g: if ind[i] == 0: - Q.append(i) - if len(Q) == 0: + q.append(i) + if len(q) == 0: return - v = Q.popleft() + v = q.popleft() print(v) - for w in G[v]: + for w in g[v]: ind[w] -= 1 if ind[w] == 0: - Q.append(w) - topo(G, ind, Q) + q.append(w) + topo(g, ind, q) """ @@ -206,9 +206,9 @@ def adjm(): """ -def floy(A_and_n): - (A, n) = A_and_n - dist = list(A) +def floy(a_and_n): + (a, n) = a_and_n + dist = list(a) path = [[0] * n for i in range(n)] for k in range(n): for i in range(n): @@ -231,10 +231,10 @@ def floy(A_and_n): """ -def prim(G, s): +def prim(g, s): dist, known, path = {s: 0}, set(), {s: 0} while True: - if len(known) == len(G) - 1: + if len(known) == len(g) - 1: break mini = 100000 for i in dist: @@ -242,7 +242,7 @@ def prim(G, s): mini = dist[i] u = i known.add(u) - for v in G[u]: + for v in g[u]: if v[0] not in known: if v[1] < dist.get(v[0], 100000): dist[v[0]] = v[1] @@ -279,16 +279,16 @@ def edglist(): """ -def krusk(E_and_n): +def krusk(e_and_n): # Sort edges on the basis of distance - (E, n) = E_and_n - E.sort(reverse=True, key=lambda x: x[2]) + (e, n) = e_and_n + e.sort(reverse=True, key=lambda x: x[2]) s = [{i} for i in range(1, n + 1)] while True: if len(s) == 1: break print(s) - x = E.pop() + x = e.pop() for i in range(len(s)): if x[0] in s[i]: break diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py index b5203b4c5..552b7eee2 100644 --- a/graphs/check_bipartite_graph_bfs.py +++ b/graphs/check_bipartite_graph_bfs.py @@ -9,7 +9,7 @@ from queue import Queue -def checkBipartite(graph): +def check_bipartite(graph): queue = Queue() visited = [False] * len(graph) color = [-1] * len(graph) @@ -45,4 +45,4 @@ def checkBipartite(graph): if __name__ == "__main__": # Adjacency List of graph - print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) + print(check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py index d15fcbbfe..62c60f2c6 100644 --- a/graphs/dijkstra.py +++ b/graphs/dijkstra.py @@ -103,14 +103,14 @@ G3 = { "G": [["F", 1]], } -shortDistance = dijkstra(G, "E", "C") -print(shortDistance) # E -- 3 --> F -- 3 --> C == 6 +short_distance = dijkstra(G, "E", "C") +print(short_distance) # E -- 3 --> F -- 3 --> C == 6 -shortDistance = dijkstra(G2, "E", "F") -print(shortDistance) # E -- 3 --> F == 3 +short_distance = dijkstra(G2, "E", "F") +print(short_distance) # E -- 3 --> F == 3 -shortDistance = dijkstra(G3, "E", "F") -print(shortDistance) # E -- 2 --> G -- 1 --> F == 3 +short_distance = dijkstra(G3, "E", "F") +print(short_distance) # E -- 2 --> G -- 1 --> F == 3 if __name__ == "__main__": import doctest diff --git a/graphs/dijkstra_2.py b/graphs/dijkstra_2.py index 762884136..3170765bc 100644 --- a/graphs/dijkstra_2.py +++ b/graphs/dijkstra_2.py @@ -1,6 +1,6 @@ -def printDist(dist, V): +def print_dist(dist, v): print("\nVertex Distance") - for i in range(V): + for i in range(v): if dist[i] != float("inf"): print(i, "\t", int(dist[i]), end="\t") else: @@ -8,26 +8,26 @@ def printDist(dist, V): print() -def minDist(mdist, vset, V): - minVal = float("inf") - minInd = -1 - for i in range(V): - if (not vset[i]) and mdist[i] < minVal: - minInd = i - minVal = mdist[i] - return minInd +def min_dist(mdist, vset, v): + min_val = float("inf") + min_ind = -1 + for i in range(v): + if (not vset[i]) and mdist[i] < min_val: + min_ind = i + min_val = mdist[i] + return min_ind -def Dijkstra(graph, V, src): - mdist = [float("inf") for i in range(V)] - vset = [False for i in range(V)] +def dijkstra(graph, v, src): + mdist = [float("inf") for i in range(v)] + vset = [False for i in range(v)] mdist[src] = 0.0 - for i in range(V - 1): - u = minDist(mdist, vset, V) + for i in range(v - 1): + u = min_dist(mdist, vset, v) vset[u] = True - for v in range(V): + for v in range(v): if ( (not vset[v]) and graph[u][v] != float("inf") @@ -35,7 +35,7 @@ def Dijkstra(graph, V, src): ): mdist[v] = mdist[u] + graph[u][v] - printDist(mdist, V) + print_dist(mdist, v) if __name__ == "__main__": @@ -55,4 +55,4 @@ if __name__ == "__main__": graph[src][dst] = weight gsrc = int(input("\nEnter shortest path source:").strip()) - Dijkstra(graph, V, gsrc) + dijkstra(graph, V, gsrc) diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 6b64834ac..122821a37 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -15,7 +15,7 @@ class PriorityQueue: self.array = [] self.pos = {} # To store the pos of node in array - def isEmpty(self): + def is_empty(self): return self.cur_size == 0 def min_heapify(self, idx): @@ -110,24 +110,24 @@ class Graph: self.par = [-1] * self.num_nodes # src is the source node self.dist[src] = 0 - Q = PriorityQueue() - Q.insert((0, src)) # (dist from src, node) + q = PriorityQueue() + q.insert((0, src)) # (dist from src, node) for u in self.adjList.keys(): if u != src: self.dist[u] = sys.maxsize # Infinity self.par[u] = -1 - while not Q.isEmpty(): - u = Q.extract_min() # Returns node with the min dist from source + while not q.is_empty(): + u = q.extract_min() # Returns node with the min dist from source # Update the distance of all the neighbours of u and # if their prev dist was INFINITY then push them in Q for v, w in self.adjList[u]: new_dist = self.dist[u] + w if self.dist[v] > new_dist: if self.dist[v] == sys.maxsize: - Q.insert((new_dist, v)) + q.insert((new_dist, v)) else: - Q.decrease_key((self.dist[v], v), new_dist) + q.decrease_key((self.dist[v], v), new_dist) self.dist[v] = new_dist self.par[v] = u diff --git a/graphs/edmonds_karp_multiple_source_and_sink.py b/graphs/edmonds_karp_multiple_source_and_sink.py index 0f359ff1a..070d758e6 100644 --- a/graphs/edmonds_karp_multiple_source_and_sink.py +++ b/graphs/edmonds_karp_multiple_source_and_sink.py @@ -1,15 +1,15 @@ class FlowNetwork: def __init__(self, graph, sources, sinks): - self.sourceIndex = None - self.sinkIndex = None + self.source_index = None + self.sink_index = None self.graph = graph - self._normalizeGraph(sources, sinks) - self.verticesCount = len(graph) - self.maximumFlowAlgorithm = None + self._normalize_graph(sources, sinks) + self.vertices_count = len(graph) + self.maximum_flow_algorithm = None # make only one source and one sink - def _normalizeGraph(self, sources, sinks): + def _normalize_graph(self, sources, sinks): if sources is int: sources = [sources] if sinks is int: @@ -18,54 +18,54 @@ class FlowNetwork: if len(sources) == 0 or len(sinks) == 0: return - self.sourceIndex = sources[0] - self.sinkIndex = sinks[0] + self.source_index = sources[0] + self.sink_index = sinks[0] # make fake vertex if there are more # than one source or sink if len(sources) > 1 or len(sinks) > 1: - maxInputFlow = 0 + max_input_flow = 0 for i in sources: - maxInputFlow += sum(self.graph[i]) + max_input_flow += sum(self.graph[i]) size = len(self.graph) + 1 for room in self.graph: room.insert(0, 0) self.graph.insert(0, [0] * size) for i in sources: - self.graph[0][i + 1] = maxInputFlow - self.sourceIndex = 0 + self.graph[0][i + 1] = max_input_flow + self.source_index = 0 size = len(self.graph) + 1 for room in self.graph: room.append(0) self.graph.append([0] * size) for i in sinks: - self.graph[i + 1][size - 1] = maxInputFlow - self.sinkIndex = size - 1 + self.graph[i + 1][size - 1] = max_input_flow + self.sink_index = size - 1 - def findMaximumFlow(self): - if self.maximumFlowAlgorithm is None: + def find_maximum_flow(self): + if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before.") - if self.sourceIndex is None or self.sinkIndex is None: + if self.source_index is None or self.sink_index is None: return 0 - self.maximumFlowAlgorithm.execute() - return self.maximumFlowAlgorithm.getMaximumFlow() + self.maximum_flow_algorithm.execute() + return self.maximum_flow_algorithm.getMaximumFlow() - def setMaximumFlowAlgorithm(self, Algorithm): - self.maximumFlowAlgorithm = Algorithm(self) + def set_maximum_flow_algorithm(self, algorithm): + self.maximum_flow_algorithm = algorithm(self) class FlowNetworkAlgorithmExecutor: - def __init__(self, flowNetwork): - self.flowNetwork = flowNetwork - self.verticesCount = flowNetwork.verticesCount - self.sourceIndex = flowNetwork.sourceIndex - self.sinkIndex = flowNetwork.sinkIndex + def __init__(self, flow_network): + self.flow_network = flow_network + self.verticies_count = flow_network.verticesCount + self.source_index = flow_network.sourceIndex + self.sink_index = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that - self.graph = flowNetwork.graph + self.graph = flow_network.graph self.executed = False def execute(self): @@ -79,95 +79,96 @@ class FlowNetworkAlgorithmExecutor: class MaximumFlowAlgorithmExecutor(FlowNetworkAlgorithmExecutor): - def __init__(self, flowNetwork): - super().__init__(flowNetwork) + def __init__(self, flow_network): + super().__init__(flow_network) # use this to save your result - self.maximumFlow = -1 + self.maximum_flow = -1 - def getMaximumFlow(self): + def get_maximum_flow(self): if not self.executed: raise Exception("You should execute algorithm before using its result!") - return self.maximumFlow + return self.maximum_flow class PushRelabelExecutor(MaximumFlowAlgorithmExecutor): - def __init__(self, flowNetwork): - super().__init__(flowNetwork) + def __init__(self, flow_network): + super().__init__(flow_network) - self.preflow = [[0] * self.verticesCount for i in range(self.verticesCount)] + self.preflow = [[0] * self.verticies_count for i in range(self.verticies_count)] - self.heights = [0] * self.verticesCount - self.excesses = [0] * self.verticesCount + self.heights = [0] * self.verticies_count + self.excesses = [0] * self.verticies_count def _algorithm(self): - self.heights[self.sourceIndex] = self.verticesCount + self.heights[self.source_index] = self.verticies_count # push some substance to graph - for nextVertexIndex, bandwidth in enumerate(self.graph[self.sourceIndex]): - self.preflow[self.sourceIndex][nextVertexIndex] += bandwidth - self.preflow[nextVertexIndex][self.sourceIndex] -= bandwidth - self.excesses[nextVertexIndex] += bandwidth + for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]): + self.preflow[self.source_index][nextvertex_index] += bandwidth + self.preflow[nextvertex_index][self.source_index] -= bandwidth + self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule - verticesList = [ + vertices_list = [ i - for i in range(self.verticesCount) - if i != self.sourceIndex and i != self.sinkIndex + for i in range(self.verticies_count) + if i != self.source_index and i != self.sink_index ] # move through list i = 0 - while i < len(verticesList): - vertexIndex = verticesList[i] - previousHeight = self.heights[vertexIndex] - self.processVertex(vertexIndex) - if self.heights[vertexIndex] > previousHeight: + while i < len(vertices_list): + vertex_index = vertices_list[i] + previous_height = self.heights[vertex_index] + self.process_vertex(vertex_index) + if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index - verticesList.insert(0, verticesList.pop(i)) + vertices_list.insert(0, vertices_list.pop(i)) i = 0 else: i += 1 - self.maximumFlow = sum(self.preflow[self.sourceIndex]) + self.maximum_flow = sum(self.preflow[self.source_index]) - def processVertex(self, vertexIndex): - while self.excesses[vertexIndex] > 0: - for neighbourIndex in range(self.verticesCount): + def process_vertex(self, vertex_index): + while self.excesses[vertex_index] > 0: + for neighbour_index in range(self.verticies_count): # if it's neighbour and current vertex is higher if ( - self.graph[vertexIndex][neighbourIndex] - - self.preflow[vertexIndex][neighbourIndex] + self.graph[vertex_index][neighbour_index] + - self.preflow[vertex_index][neighbour_index] > 0 - and self.heights[vertexIndex] > self.heights[neighbourIndex] + and self.heights[vertex_index] > self.heights[neighbour_index] ): - self.push(vertexIndex, neighbourIndex) + self.push(vertex_index, neighbour_index) - self.relabel(vertexIndex) + self.relabel(vertex_index) - def push(self, fromIndex, toIndex): - preflowDelta = min( - self.excesses[fromIndex], - self.graph[fromIndex][toIndex] - self.preflow[fromIndex][toIndex], + def push(self, from_index, to_index): + preflow_delta = min( + self.excesses[from_index], + self.graph[from_index][to_index] - self.preflow[from_index][to_index], ) - self.preflow[fromIndex][toIndex] += preflowDelta - self.preflow[toIndex][fromIndex] -= preflowDelta - self.excesses[fromIndex] -= preflowDelta - self.excesses[toIndex] += preflowDelta + self.preflow[from_index][to_index] += preflow_delta + self.preflow[to_index][from_index] -= preflow_delta + self.excesses[from_index] -= preflow_delta + self.excesses[to_index] += preflow_delta - def relabel(self, vertexIndex): - minHeight = None - for toIndex in range(self.verticesCount): + def relabel(self, vertex_index): + min_height = None + for to_index in range(self.verticies_count): if ( - self.graph[vertexIndex][toIndex] - self.preflow[vertexIndex][toIndex] + self.graph[vertex_index][to_index] + - self.preflow[vertex_index][to_index] > 0 ): - if minHeight is None or self.heights[toIndex] < minHeight: - minHeight = self.heights[toIndex] + if min_height is None or self.heights[to_index] < min_height: + min_height = self.heights[to_index] - if minHeight is not None: - self.heights[vertexIndex] = minHeight + 1 + if min_height is not None: + self.heights[vertex_index] = min_height + 1 if __name__ == "__main__": @@ -184,10 +185,10 @@ if __name__ == "__main__": graph = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network - flowNetwork = FlowNetwork(graph, entrances, exits) + flow_network = FlowNetwork(graph, entrances, exits) # set algorithm - flowNetwork.setMaximumFlowAlgorithm(PushRelabelExecutor) + flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate - maximumFlow = flowNetwork.findMaximumFlow() + maximum_flow = flow_network.find_maximum_flow() - print(f"maximum flow is {maximumFlow}") + print(f"maximum flow is {maximum_flow}") diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py index fa4f73abd..6c43c5d3e 100644 --- a/graphs/eulerian_path_and_circuit_for_undirected_graph.py +++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py @@ -50,21 +50,21 @@ def check_euler(graph, max_node): def main(): - G1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} - G2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} - G3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} - G4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} - G5 = { + g1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} + g2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} + g3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} + g4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} + g5 = { 1: [], 2: [] # all degree is zero } max_node = 10 - check_euler(G1, max_node) - check_euler(G2, max_node) - check_euler(G3, max_node) - check_euler(G4, max_node) - check_euler(G5, max_node) + check_euler(g1, max_node) + check_euler(g2, max_node) + check_euler(g3, max_node) + check_euler(g4, max_node) + check_euler(g5, max_node) if __name__ == "__main__": diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 548ce3c54..50081afa6 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -151,16 +151,16 @@ def create_edge(nodes, graph, cluster, c1): def construct_graph(cluster, nodes): - X = cluster[max(cluster.keys())] + x = cluster[max(cluster.keys())] cluster[max(cluster.keys()) + 1] = "Header" graph = {} - for i in X: + for i in x: if tuple(["Header"]) in graph: - graph[tuple(["Header"])].append(X[i]) + graph[tuple(["Header"])].append(x[i]) else: - graph[tuple(["Header"])] = [X[i]] - for i in X: - graph[tuple(X[i])] = [["Header"]] + graph[tuple(["Header"])] = [x[i]] + for i in x: + graph[tuple(x[i])] = [["Header"]] i = 1 while i < max(cluster) - 1: create_edge(nodes, graph, cluster, i) @@ -168,7 +168,7 @@ def construct_graph(cluster, nodes): return graph -def myDFS(graph, start, end, path=None): +def my_dfs(graph, start, end, path=None): """ find different DFS walk from given node to Header node """ @@ -177,7 +177,7 @@ def myDFS(graph, start, end, path=None): paths.append(path) for node in graph[start]: if tuple(node) not in path: - myDFS(graph, tuple(node), end, path) + my_dfs(graph, tuple(node), end, path) def find_freq_subgraph_given_support(s, cluster, graph): @@ -186,23 +186,23 @@ def find_freq_subgraph_given_support(s, cluster, graph): """ k = int(s / 100 * (len(cluster) - 1)) for i in cluster[k].keys(): - myDFS(graph, tuple(cluster[k][i]), tuple(["Header"])) + my_dfs(graph, tuple(cluster[k][i]), tuple(["Header"])) def freq_subgraphs_edge_list(paths): """ returns Edge list for frequent subgraphs """ - freq_sub_EL = [] + freq_sub_el = [] for edges in paths: - EL = [] + el = [] for j in range(len(edges) - 1): temp = list(edges[j]) for e in temp: edge = (e[0], e[1]) - EL.append(edge) - freq_sub_EL.append(EL) - return freq_sub_EL + el.append(edge) + freq_sub_el.append(el) + return freq_sub_el def preprocess(edge_array): diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py index fed7517a2..776ae3a2f 100644 --- a/graphs/kahns_algorithm_long.py +++ b/graphs/kahns_algorithm_long.py @@ -1,8 +1,8 @@ # Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm -def longestDistance(graph): +def longest_distance(graph): indegree = [0] * len(graph) queue = [] - longDist = [1] * len(graph) + long_dist = [1] * len(graph) for key, values in graph.items(): for i in values: @@ -17,15 +17,15 @@ def longestDistance(graph): for x in graph[vertex]: indegree[x] -= 1 - if longDist[vertex] + 1 > longDist[x]: - longDist[x] = longDist[vertex] + 1 + if long_dist[vertex] + 1 > long_dist[x]: + long_dist[x] = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(x) - print(max(longDist)) + print(max(long_dist)) # Adjacency list of Graph graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} -longestDistance(graph) +longest_distance(graph) diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index bf9f90299..6879b047f 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -1,4 +1,4 @@ -def topologicalSort(graph): +def topological_sort(graph): """ Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph using BFS @@ -33,4 +33,4 @@ def topologicalSort(graph): # Adjacency List of Graph graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} -topologicalSort(graph) +topological_sort(graph) diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 16b428614..9b2c645f1 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -2,15 +2,15 @@ import sys from collections import defaultdict -def PrimsAlgorithm(l): # noqa: E741 +def prisms_algorithm(l): # noqa: E741 - nodePosition = [] + node_position = [] def get_position(vertex): - return nodePosition[vertex] + return node_position[vertex] def set_position(vertex, pos): - nodePosition[vertex] = pos + node_position[vertex] = pos def top_to_bottom(heap, start, size, positions): if start > size // 2 - 1: @@ -64,44 +64,44 @@ def PrimsAlgorithm(l): # noqa: E741 for i in range(start, -1, -1): top_to_bottom(heap, i, len(heap), positions) - def deleteMinimum(heap, positions): + def delete_minimum(heap, positions): temp = positions[0] heap[0] = sys.maxsize top_to_bottom(heap, 0, len(heap), positions) return temp visited = [0 for i in range(len(l))] - Nbr_TV = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex + nbr_tv = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph - Distance_TV = [] # Heap of Distance of vertices from their neighboring vertex - Positions = [] + distance_tv = [] # Heap of Distance of vertices from their neighboring vertex + positions = [] for x in range(len(l)): p = sys.maxsize - Distance_TV.append(p) - Positions.append(x) - nodePosition.append(x) + distance_tv.append(p) + positions.append(x) + node_position.append(x) - TreeEdges = [] + tree_edges = [] visited[0] = 1 - Distance_TV[0] = sys.maxsize + distance_tv[0] = sys.maxsize for x in l[0]: - Nbr_TV[x[0]] = 0 - Distance_TV[x[0]] = x[1] - heapify(Distance_TV, Positions) + nbr_tv[x[0]] = 0 + distance_tv[x[0]] = x[1] + heapify(distance_tv, positions) for i in range(1, len(l)): - vertex = deleteMinimum(Distance_TV, Positions) + vertex = delete_minimum(distance_tv, positions) if visited[vertex] == 0: - TreeEdges.append((Nbr_TV[vertex], vertex)) + tree_edges.append((nbr_tv[vertex], vertex)) visited[vertex] = 1 for v in l[vertex]: - if visited[v[0]] == 0 and v[1] < Distance_TV[get_position(v[0])]: - Distance_TV[get_position(v[0])] = v[1] - bottom_to_top(v[1], get_position(v[0]), Distance_TV, Positions) - Nbr_TV[v[0]] = vertex - return TreeEdges + if visited[v[0]] == 0 and v[1] < distance_tv[get_position(v[0])]: + distance_tv[get_position(v[0])] = v[1] + bottom_to_top(v[1], get_position(v[0]), distance_tv, positions) + nbr_tv[v[0]] = vertex + return tree_edges if __name__ == "__main__": # pragma: no cover @@ -113,4 +113,4 @@ if __name__ == "__main__": # pragma: no cover l = [int(x) for x in input().strip().split()] # noqa: E741 adjlist[l[0]].append([l[1], l[2]]) adjlist[l[1]].append([l[0], l[2]]) - print(PrimsAlgorithm(adjlist)) + print(prisms_algorithm(adjlist)) diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 8607f51d8..e16a98393 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -55,21 +55,21 @@ class PriorityQueue: return (priority, item) -def consistent_heuristic(P: TPos, goal: TPos): +def consistent_heuristic(p: TPos, goal: TPos): # euclidean distance - a = np.array(P) + a = np.array(p) b = np.array(goal) return np.linalg.norm(a - b) -def heuristic_2(P: TPos, goal: TPos): +def heuristic_2(p: TPos, goal: TPos): # integer division by time variable - return consistent_heuristic(P, goal) // t + return consistent_heuristic(p, goal) // t -def heuristic_1(P: TPos, goal: TPos): +def heuristic_1(p: TPos, goal: TPos): # manhattan distance - return abs(P[0] - goal[0]) + abs(P[1] - goal[1]) + return abs(p[0] - goal[0]) + abs(p[1] - goal[1]) def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]): diff --git a/graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py index fa182aa2f..ea9d35282 100644 --- a/graphs/scc_kosaraju.py +++ b/graphs/scc_kosaraju.py @@ -2,7 +2,7 @@ from __future__ import annotations def dfs(u): - global graph, reversedGraph, scc, component, visit, stack + global graph, reversed_graph, scc, component, visit, stack if visit[u]: return visit[u] = True @@ -12,17 +12,17 @@ def dfs(u): def dfs2(u): - global graph, reversedGraph, scc, component, visit, stack + global graph, reversed_graph, scc, component, visit, stack if visit[u]: return visit[u] = True component.append(u) - for v in reversedGraph[u]: + for v in reversed_graph[u]: dfs2(v) def kosaraju(): - global graph, reversedGraph, scc, component, visit, stack + global graph, reversed_graph, scc, component, visit, stack for i in range(n): dfs(i) visit = [False] * n @@ -40,12 +40,12 @@ if __name__ == "__main__": n, m = list(map(int, input().strip().split())) graph: list[list[int]] = [[] for i in range(n)] # graph - reversedGraph: list[list[int]] = [[] for i in range(n)] # reversed graph + reversed_graph: list[list[int]] = [[] for i in range(n)] # reversed graph # input graph data (edges) for i in range(m): u, v = list(map(int, input().strip().split())) graph[u].append(v) - reversedGraph[v].append(u) + reversed_graph[v].append(u) stack: list[int] = [] visit: list[bool] = [False] * n diff --git a/graphs/tests/test_min_spanning_tree_prim.py b/graphs/tests/test_min_spanning_tree_prim.py index 048fbf595..91feab28f 100644 --- a/graphs/tests/test_min_spanning_tree_prim.py +++ b/graphs/tests/test_min_spanning_tree_prim.py @@ -1,6 +1,6 @@ from collections import defaultdict -from graphs.minimum_spanning_tree_prims import PrimsAlgorithm as mst +from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def test_prim_successful_result(): diff --git a/hashes/adler32.py b/hashes/adler32.py index 4a61b97e3..80229f046 100644 --- a/hashes/adler32.py +++ b/hashes/adler32.py @@ -20,7 +20,7 @@ def adler32(plain_text: str) -> int: >>> adler32('go adler em all') 708642122 """ - MOD_ADLER = 65521 + MOD_ADLER = 65521 # noqa: N806 a = 1 b = 0 for plain_chr in plain_text: diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index a6d476eb7..69313fbb2 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -43,11 +43,11 @@ def pull(): global buffer_space, params_space, machine_time, K, m, t # PRNG (Xorshift by George Marsaglia) - def xorshift(X, Y): - X ^= Y >> 13 - Y ^= X << 17 - X ^= Y >> 5 - return X + def xorshift(x, y): + x ^= y >> 13 + y ^= x << 17 + x ^= y >> 5 + return x # Choosing Dynamical Systems (Increment) key = machine_time % m @@ -63,13 +63,13 @@ def pull(): params_space[key] = (machine_time * 0.01 + r * 1.01) % 1 + 3 # Choosing Chaotic Data - X = int(buffer_space[(key + 2) % m] * (10**10)) - Y = int(buffer_space[(key - 2) % m] * (10**10)) + x = int(buffer_space[(key + 2) % m] * (10**10)) + y = int(buffer_space[(key - 2) % m] * (10**10)) # Machine Time machine_time += 1 - return xorshift(X, Y) % 0xFFFFFFFF + return xorshift(x, y) % 0xFFFFFFFF def reset(): diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index ac20fe03b..a62d092a1 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -68,177 +68,177 @@ def text_from_bits(bits, encoding="utf-8", errors="surrogatepass"): # Functions of hamming code------------------------------------------- -def emitterConverter(sizePar, data): +def emitter_converter(size_par, data): """ - :param sizePar: how many parity bits the message must have + :param size_par: how many parity bits the message must have :param data: information bits :return: message to be transmitted by unreliable medium - bits of information merged with parity bits - >>> emitterConverter(4, "101010111111") + >>> emitter_converter(4, "101010111111") ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1'] """ - if sizePar + len(data) <= 2**sizePar - (len(data) - 1): + if size_par + len(data) <= 2**size_par - (len(data) - 1): print("ERROR - size of parity don't match with size of data") exit(0) - dataOut = [] + data_out = [] parity = [] - binPos = [bin(x)[2:] for x in range(1, sizePar + len(data) + 1)] + bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data) + 1)] # sorted information data for the size of the output data - dataOrd = [] + data_ord = [] # data position template + parity - dataOutGab = [] + data_out_gab = [] # parity bit counter - qtdBP = 0 + qtd_bp = 0 # counter position of data bits - contData = 0 + cont_data = 0 - for x in range(1, sizePar + len(data) + 1): + for x in range(1, size_par + len(data) + 1): # Performs a template of bit positions - who should be given, # and who should be parity - if qtdBP < sizePar: + if qtd_bp < size_par: if (np.log(x) / np.log(2)).is_integer(): - dataOutGab.append("P") - qtdBP = qtdBP + 1 + data_out_gab.append("P") + qtd_bp = qtd_bp + 1 else: - dataOutGab.append("D") + data_out_gab.append("D") else: - dataOutGab.append("D") + data_out_gab.append("D") # Sorts the data to the new output size - if dataOutGab[-1] == "D": - dataOrd.append(data[contData]) - contData += 1 + if data_out_gab[-1] == "D": + data_ord.append(data[cont_data]) + cont_data += 1 else: - dataOrd.append(None) + data_ord.append(None) # Calculates parity - qtdBP = 0 # parity bit counter - for bp in range(1, sizePar + 1): + qtd_bp = 0 # parity bit counter + for bp in range(1, size_par + 1): # Bit counter one for a given parity - contBO = 0 + cont_bo = 0 # counter to control the loop reading - contLoop = 0 - for x in dataOrd: + cont_loop = 0 + for x in data_ord: if x is not None: try: - aux = (binPos[contLoop])[-1 * (bp)] + aux = (bin_pos[cont_loop])[-1 * (bp)] except IndexError: aux = "0" if aux == "1": if x == "1": - contBO += 1 - contLoop += 1 - parity.append(contBO % 2) + cont_bo += 1 + cont_loop += 1 + parity.append(cont_bo % 2) - qtdBP += 1 + qtd_bp += 1 # Mount the message - ContBP = 0 # parity bit counter - for x in range(0, sizePar + len(data)): - if dataOrd[x] is None: - dataOut.append(str(parity[ContBP])) - ContBP += 1 + cont_bp = 0 # parity bit counter + for x in range(0, size_par + len(data)): + if data_ord[x] is None: + data_out.append(str(parity[cont_bp])) + cont_bp += 1 else: - dataOut.append(dataOrd[x]) + data_out.append(data_ord[x]) - return dataOut + return data_out -def receptorConverter(sizePar, data): +def receptor_converter(size_par, data): """ - >>> receptorConverter(4, "1111010010111111") + >>> receptor_converter(4, "1111010010111111") (['1', '0', '1', '0', '1', '0', '1', '1', '1', '1', '1', '1'], True) """ # data position template + parity - dataOutGab = [] + data_out_gab = [] # Parity bit counter - qtdBP = 0 + qtd_bp = 0 # Counter p data bit reading - contData = 0 + cont_data = 0 # list of parity received - parityReceived = [] - dataOutput = [] + parity_received = [] + data_output = [] for x in range(1, len(data) + 1): # Performs a template of bit positions - who should be given, # and who should be parity - if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer(): - dataOutGab.append("P") - qtdBP = qtdBP + 1 + if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer(): + data_out_gab.append("P") + qtd_bp = qtd_bp + 1 else: - dataOutGab.append("D") + data_out_gab.append("D") # Sorts the data to the new output size - if dataOutGab[-1] == "D": - dataOutput.append(data[contData]) + if data_out_gab[-1] == "D": + data_output.append(data[cont_data]) else: - parityReceived.append(data[contData]) - contData += 1 + parity_received.append(data[cont_data]) + cont_data += 1 # -----------calculates the parity with the data - dataOut = [] + data_out = [] parity = [] - binPos = [bin(x)[2:] for x in range(1, sizePar + len(dataOutput) + 1)] + bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data_output) + 1)] # sorted information data for the size of the output data - dataOrd = [] + data_ord = [] # Data position feedback + parity - dataOutGab = [] + data_out_gab = [] # Parity bit counter - qtdBP = 0 + qtd_bp = 0 # Counter p data bit reading - contData = 0 + cont_data = 0 - for x in range(1, sizePar + len(dataOutput) + 1): + for x in range(1, size_par + len(data_output) + 1): # Performs a template position of bits - who should be given, # and who should be parity - if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer(): - dataOutGab.append("P") - qtdBP = qtdBP + 1 + if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer(): + data_out_gab.append("P") + qtd_bp = qtd_bp + 1 else: - dataOutGab.append("D") + data_out_gab.append("D") # Sorts the data to the new output size - if dataOutGab[-1] == "D": - dataOrd.append(dataOutput[contData]) - contData += 1 + if data_out_gab[-1] == "D": + data_ord.append(data_output[cont_data]) + cont_data += 1 else: - dataOrd.append(None) + data_ord.append(None) # Calculates parity - qtdBP = 0 # parity bit counter - for bp in range(1, sizePar + 1): + qtd_bp = 0 # parity bit counter + for bp in range(1, size_par + 1): # Bit counter one for a certain parity - contBO = 0 + cont_bo = 0 # Counter to control loop reading - contLoop = 0 - for x in dataOrd: + cont_loop = 0 + for x in data_ord: if x is not None: try: - aux = (binPos[contLoop])[-1 * (bp)] + aux = (bin_pos[cont_loop])[-1 * (bp)] except IndexError: aux = "0" if aux == "1" and x == "1": - contBO += 1 - contLoop += 1 - parity.append(str(contBO % 2)) + cont_bo += 1 + cont_loop += 1 + parity.append(str(cont_bo % 2)) - qtdBP += 1 + qtd_bp += 1 # Mount the message - ContBP = 0 # Parity bit counter - for x in range(0, sizePar + len(dataOutput)): - if dataOrd[x] is None: - dataOut.append(str(parity[ContBP])) - ContBP += 1 + cont_bp = 0 # Parity bit counter + for x in range(0, size_par + len(data_output)): + if data_ord[x] is None: + data_out.append(str(parity[cont_bp])) + cont_bp += 1 else: - dataOut.append(dataOrd[x]) + data_out.append(data_ord[x]) - ack = parityReceived == parity - return dataOutput, ack + ack = parity_received == parity + return data_output, ack # --------------------------------------------------------------------- diff --git a/hashes/md5.py b/hashes/md5.py index c56c073cc..2020bf2e5 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -1,7 +1,7 @@ import math -def rearrange(bitString32): +def rearrange(bit_string_32): """[summary] Regroups the given binary string. @@ -17,21 +17,21 @@ def rearrange(bitString32): 'pqrstuvwhijklmno90abcdfg12345678' """ - if len(bitString32) != 32: + if len(bit_string_32) != 32: raise ValueError("Need length 32") - newString = "" + new_string = "" for i in [3, 2, 1, 0]: - newString += bitString32[8 * i : 8 * i + 8] - return newString + new_string += bit_string_32[8 * i : 8 * i + 8] + return new_string -def reformatHex(i): +def reformat_hex(i): """[summary] Converts the given integer into 8-digit hex number. Arguments: i {[int]} -- [integer] - >>> reformatHex(666) + >>> reformat_hex(666) '9a020000' """ @@ -42,7 +42,7 @@ def reformatHex(i): return thing -def pad(bitString): +def pad(bit_string): """[summary] Fills up the binary string to a 512 bit binary string @@ -52,33 +52,33 @@ def pad(bitString): Returns: [string] -- [binary string] """ - startLength = len(bitString) - bitString += "1" - while len(bitString) % 512 != 448: - bitString += "0" - lastPart = format(startLength, "064b") - bitString += rearrange(lastPart[32:]) + rearrange(lastPart[:32]) - return bitString + start_length = len(bit_string) + bit_string += "1" + while len(bit_string) % 512 != 448: + bit_string += "0" + last_part = format(start_length, "064b") + bit_string += rearrange(last_part[32:]) + rearrange(last_part[:32]) + return bit_string -def getBlock(bitString): +def get_block(bit_string): """[summary] Iterator: Returns by each call a list of length 16 with the 32 bit integer blocks. Arguments: - bitString {[string]} -- [binary string >= 512] + bit_string {[string]} -- [binary string >= 512] """ - currPos = 0 - while currPos < len(bitString): - currPart = bitString[currPos : currPos + 512] - mySplits = [] + curr_pos = 0 + while curr_pos < len(bit_string): + curr_part = bit_string[curr_pos : curr_pos + 512] + my_splits = [] for i in range(16): - mySplits.append(int(rearrange(currPart[32 * i : 32 * i + 32]), 2)) - yield mySplits - currPos += 512 + my_splits.append(int(rearrange(curr_part[32 * i : 32 * i + 32]), 2)) + yield my_splits + curr_pos += 512 def not32(i): @@ -101,7 +101,7 @@ def leftrot32(i, s): return (i << s) ^ (i >> (32 - s)) -def md5me(testString): +def md5me(test_string): """[summary] Returns a 32-bit hash code of the string 'testString' @@ -110,7 +110,7 @@ def md5me(testString): """ bs = "" - for i in testString: + for i in test_string: bs += format(ord(i), "08b") bs = pad(bs) @@ -188,37 +188,37 @@ def md5me(testString): 21, ] - for m in getBlock(bs): - A = a0 - B = b0 - C = c0 - D = d0 + for m in get_block(bs): + a = a0 + b = b0 + c = c0 + d = d0 for i in range(64): if i <= 15: # f = (B & C) | (not32(B) & D) - f = D ^ (B & (C ^ D)) + f = d ^ (b & (c ^ d)) g = i elif i <= 31: # f = (D & B) | (not32(D) & C) - f = C ^ (D & (B ^ C)) + f = c ^ (d & (b ^ c)) g = (5 * i + 1) % 16 elif i <= 47: - f = B ^ C ^ D + f = b ^ c ^ d g = (3 * i + 5) % 16 else: - f = C ^ (B | not32(D)) + f = c ^ (b | not32(d)) g = (7 * i) % 16 - dtemp = D - D = C - C = B - B = sum32(B, leftrot32((A + f + tvals[i] + m[g]) % 2**32, s[i])) - A = dtemp - a0 = sum32(a0, A) - b0 = sum32(b0, B) - c0 = sum32(c0, C) - d0 = sum32(d0, D) + dtemp = d + d = c + c = b + b = sum32(b, leftrot32((a + f + tvals[i] + m[g]) % 2**32, s[i])) + a = dtemp + a0 = sum32(a0, a) + b0 = sum32(b0, b) + c0 = sum32(c0, c) + d0 = sum32(d0, d) - digest = reformatHex(a0) + reformatHex(b0) + reformatHex(c0) + reformatHex(d0) + digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0) return digest diff --git a/hashes/sha1.py b/hashes/sha1.py index dde1efc55..b19e0cfaf 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -133,7 +133,7 @@ class SHA1HashTest(unittest.TestCase): Test class for the SHA1Hash class. Inherits the TestCase class from unittest """ - def testMatchHashes(self): + def testMatchHashes(self): # noqa: N802 msg = bytes("Test String", "utf-8") self.assertEqual(SHA1Hash(msg).final_hash(), hashlib.sha1(msg).hexdigest()) diff --git a/hashes/sha256.py b/hashes/sha256.py index 9d4f250fe..98f7c096e 100644 --- a/hashes/sha256.py +++ b/hashes/sha256.py @@ -157,14 +157,14 @@ class SHA256: ) % 0x100000000 # Compression - S1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25) + s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25) ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g) temp1 = ( - h + S1 + ch + self.round_constants[index] + words[index] + h + s1 + ch + self.round_constants[index] + words[index] ) % 0x100000000 - S0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22) + s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22) maj = (a & b) ^ (a & c) ^ (b & c) - temp2 = (S0 + maj) % 0x100000000 + temp2 = (s0 + maj) % 0x100000000 h, g, f, e, d, c, b, a = ( g, diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 4b866331b..24fbd9a5e 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -63,8 +63,8 @@ def power_iteration( vector = w / np.linalg.norm(w) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) - vectorH = vector.conj().T if is_complex else vector.T - lambda_ = np.dot(vectorH, np.dot(input_matrix, vector)) + vector_h = vector.conj().T if is_complex else vector.T + lambda_ = np.dot(vector_h, np.dot(input_matrix, vector)) # Check convergence. error = np.abs(lambda_ - lambda_previous) / lambda_ diff --git a/linear_algebra/src/rayleigh_quotient.py b/linear_algebra/src/rayleigh_quotient.py index 78083aa75..4773429cb 100644 --- a/linear_algebra/src/rayleigh_quotient.py +++ b/linear_algebra/src/rayleigh_quotient.py @@ -26,7 +26,7 @@ def is_hermitian(matrix: np.ndarray) -> bool: return np.array_equal(matrix, matrix.conjugate().T) -def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any: +def rayleigh_quotient(a: np.ndarray, v: np.ndarray) -> Any: """ Returns the Rayleigh quotient of a Hermitian matrix A and vector v. @@ -45,20 +45,20 @@ def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any: array([[3.]]) """ v_star = v.conjugate().T - v_star_dot = v_star.dot(A) + v_star_dot = v_star.dot(a) assert isinstance(v_star_dot, np.ndarray) return (v_star_dot.dot(v)) / (v_star.dot(v)) def tests() -> None: - A = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]]) + a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]]) v = np.array([[1], [2], [3]]) - assert is_hermitian(A), f"{A} is not hermitian." - print(rayleigh_quotient(A, v)) + assert is_hermitian(a), f"{a} is not hermitian." + print(rayleigh_quotient(a, v)) - A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]]) - assert is_hermitian(A), f"{A} is not hermitian." - assert rayleigh_quotient(A, v) == float(3) + a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]]) + assert is_hermitian(a), f"{a} is not hermitian." + assert rayleigh_quotient(a, v) == float(3) if __name__ == "__main__": diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 724ceef25..97c06cb44 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -85,13 +85,13 @@ class Test(unittest.TestCase): self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)") self.assertEqual((a * b), 0) - def test_zeroVector(self) -> None: + def test_zero_vector(self) -> None: """ test for global function zero_vector() """ self.assertTrue(str(zero_vector(10)).count("0") == 10) - def test_unitBasisVector(self) -> None: + def test_unit_basis_vector(self) -> None: """ test for global function unit_basis_vector() """ @@ -113,7 +113,7 @@ class Test(unittest.TestCase): y = x.copy() self.assertEqual(str(x), str(y)) - def test_changeComponent(self) -> None: + def test_change_component(self) -> None: """ test for method change_component() """ @@ -126,77 +126,77 @@ class Test(unittest.TestCase): """ test for Matrix method str() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(A)) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(a)) def test_minor(self) -> None: """ test for Matrix method minor() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) minors = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] - for x in range(A.height()): - for y in range(A.width()): - self.assertEqual(minors[x][y], A.minor(x, y)) + for x in range(a.height()): + for y in range(a.width()): + self.assertEqual(minors[x][y], a.minor(x, y)) def test_cofactor(self) -> None: """ test for Matrix method cofactor() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) cofactors = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] - for x in range(A.height()): - for y in range(A.width()): - self.assertEqual(cofactors[x][y], A.cofactor(x, y)) + for x in range(a.height()): + for y in range(a.width()): + self.assertEqual(cofactors[x][y], a.cofactor(x, y)) def test_determinant(self) -> None: """ test for Matrix method determinant() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual(-5, A.determinant()) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + self.assertEqual(-5, a.determinant()) def test__mul__matrix(self) -> None: """ test for Matrix * operator """ - A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) + a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) x = Vector([1, 2, 3]) - self.assertEqual("(14,32,50)", str(A * x)) - self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(A * 2)) + self.assertEqual("(14,32,50)", str(a * x)) + self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(a * 2)) def test_change_component_matrix(self) -> None: """ test for Matrix method change_component() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - A.change_component(0, 2, 5) - self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(A)) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + a.change_component(0, 2, 5) + self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(a)) def test_component_matrix(self) -> None: """ test for Matrix method component() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual(7, A.component(2, 1), 0.01) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + self.assertEqual(7, a.component(2, 1), 0.01) def test__add__matrix(self) -> None: """ test for Matrix + operator """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) - self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(A + B)) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) + self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(a + b)) def test__sub__matrix(self) -> None: """ test for Matrix - operator """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) - self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(A - B)) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) + self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(a - b)) - def test_squareZeroMatrix(self) -> None: + def test_square_zero_matrix(self) -> None: """ test for global function square_zero_matrix() """ diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index ace6fb0fa..4a86e5322 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -6,7 +6,7 @@ Output: The decision tree maps a real number input to a real number output. import numpy as np -class Decision_Tree: +class DecisionTree: def __init__(self, depth=5, min_leaf_size=5): self.depth = depth self.decision_boundary = 0 @@ -22,17 +22,17 @@ class Decision_Tree: @param prediction: a floating point value return value: mean_squared_error calculates the error if prediction is used to estimate the labels - >>> tester = Decision_Tree() + >>> tester = DecisionTree() >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) >>> test_prediction = np.float(6) >>> tester.mean_squared_error(test_labels, test_prediction) == ( - ... Test_Decision_Tree.helper_mean_squared_error_test(test_labels, + ... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... test_prediction)) True >>> test_labels = np.array([1,2,3]) >>> test_prediction = np.float(2) >>> tester.mean_squared_error(test_labels, test_prediction) == ( - ... Test_Decision_Tree.helper_mean_squared_error_test(test_labels, + ... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... test_prediction)) True """ @@ -41,10 +41,10 @@ class Decision_Tree: return np.mean((labels - prediction) ** 2) - def train(self, X, y): + def train(self, x, y): """ train: - @param X: a one dimensional numpy array + @param x: a one dimensional numpy array @param y: a one dimensional numpy array. The contents of y are the labels for the corresponding X values @@ -55,17 +55,17 @@ class Decision_Tree: this section is to check that the inputs conform to our dimensionality constraints """ - if X.ndim != 1: + if x.ndim != 1: print("Error: Input data set must be one dimensional") return - if len(X) != len(y): + if len(x) != len(y): print("Error: X and y have different lengths") return if y.ndim != 1: print("Error: Data set labels must be one dimensional") return - if len(X) < 2 * self.min_leaf_size: + if len(x) < 2 * self.min_leaf_size: self.prediction = np.mean(y) return @@ -74,7 +74,7 @@ class Decision_Tree: return best_split = 0 - min_error = self.mean_squared_error(X, np.mean(y)) * 2 + min_error = self.mean_squared_error(x, np.mean(y)) * 2 """ loop over all possible splits for the decision tree. find the best split. @@ -82,34 +82,34 @@ class Decision_Tree: then the data set is not split and the average for the entire array is used as the predictor """ - for i in range(len(X)): - if len(X[:i]) < self.min_leaf_size: + for i in range(len(x)): + if len(x[:i]) < self.min_leaf_size: continue - elif len(X[i:]) < self.min_leaf_size: + elif len(x[i:]) < self.min_leaf_size: continue else: - error_left = self.mean_squared_error(X[:i], np.mean(y[:i])) - error_right = self.mean_squared_error(X[i:], np.mean(y[i:])) + error_left = self.mean_squared_error(x[:i], np.mean(y[:i])) + error_right = self.mean_squared_error(x[i:], np.mean(y[i:])) error = error_left + error_right if error < min_error: best_split = i min_error = error if best_split != 0: - left_X = X[:best_split] + left_x = x[:best_split] left_y = y[:best_split] - right_X = X[best_split:] + right_x = x[best_split:] right_y = y[best_split:] - self.decision_boundary = X[best_split] - self.left = Decision_Tree( + self.decision_boundary = x[best_split] + self.left = DecisionTree( depth=self.depth - 1, min_leaf_size=self.min_leaf_size ) - self.right = Decision_Tree( + self.right = DecisionTree( depth=self.depth - 1, min_leaf_size=self.min_leaf_size ) - self.left.train(left_X, left_y) - self.right.train(right_X, right_y) + self.left.train(left_x, left_y) + self.right.train(right_x, right_y) else: self.prediction = np.mean(y) @@ -134,7 +134,7 @@ class Decision_Tree: return None -class Test_Decision_Tree: +class TestDecisionTree: """Decision Tres test class""" @staticmethod @@ -159,11 +159,11 @@ def main(): predict the label of 10 different test values. Then the mean squared error over this test is displayed. """ - X = np.arange(-1.0, 1.0, 0.005) - y = np.sin(X) + x = np.arange(-1.0, 1.0, 0.005) + y = np.sin(x) - tree = Decision_Tree(depth=10, min_leaf_size=10) - tree.train(X, y) + tree = DecisionTree(depth=10, min_leaf_size=10) + tree.train(x, y) test_cases = (np.random.rand(10) * 2) - 1 predictions = np.array([tree.predict(x) for x in test_cases]) diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py index c200aa5a4..77e732662 100644 --- a/machine_learning/gaussian_naive_bayes.py +++ b/machine_learning/gaussian_naive_bayes.py @@ -17,19 +17,19 @@ def main(): iris = load_iris() # Split dataset into train and test data - X = iris["data"] # features - Y = iris["target"] + x = iris["data"] # features + y = iris["target"] x_train, x_test, y_train, y_test = train_test_split( - X, Y, test_size=0.3, random_state=1 + x, y, test_size=0.3, random_state=1 ) # Gaussian Naive Bayes - NB_model = GaussianNB() - NB_model.fit(x_train, y_train) + nb_model = GaussianNB() + nb_model.fit(x_train, y_train) # Display Confusion Matrix plot_confusion_matrix( - NB_model, + nb_model, x_test, y_test, display_labels=iris["target_names"], diff --git a/machine_learning/gradient_boosting_regressor.py b/machine_learning/gradient_boosting_regressor.py index c73e30680..c082f3caf 100644 --- a/machine_learning/gradient_boosting_regressor.py +++ b/machine_learning/gradient_boosting_regressor.py @@ -26,25 +26,25 @@ def main(): print(df_boston.describe().T) # Feature selection - X = df_boston.iloc[:, :-1] + x = df_boston.iloc[:, :-1] y = df_boston.iloc[:, -1] # target variable # split the data with 75% train and 25% test sets. - X_train, X_test, y_train, y_test = train_test_split( - X, y, random_state=0, test_size=0.25 + x_train, x_test, y_train, y_test = train_test_split( + x, y, random_state=0, test_size=0.25 ) model = GradientBoostingRegressor( n_estimators=500, max_depth=5, min_samples_split=4, learning_rate=0.01 ) # training the model - model.fit(X_train, y_train) + model.fit(x_train, y_train) # to see how good the model fit the data - training_score = model.score(X_train, y_train).round(3) - test_score = model.score(X_test, y_test).round(3) + training_score = model.score(x_train, y_train).round(3) + test_score = model.score(x_test, y_test).round(3) print("Training score of GradientBoosting is :", training_score) print("The test score of GradientBoosting is :", test_score) # Let us evaluation the model by finding the errors - y_pred = model.predict(X_test) + y_pred = model.predict(x_test) # The mean squared error print(f"Mean squared error: {mean_squared_error(y_test, y_pred):.2f}") diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 60450b7f8..5dc2b7118 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -69,8 +69,8 @@ def get_initial_centroids(data, k, seed=None): return centroids -def centroid_pairwise_dist(X, centroids): - return pairwise_distances(X, centroids, metric="euclidean") +def centroid_pairwise_dist(x, centroids): + return pairwise_distances(x, centroids, metric="euclidean") def assign_clusters(data, centroids): @@ -197,8 +197,8 @@ if False: # change to true to run this test case. plot_heterogeneity(heterogeneity, k) -def ReportGenerator( - df: pd.DataFrame, ClusteringVariables: np.ndarray, FillMissingReport=None +def report_generator( + df: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None ) -> pd.DataFrame: """ Function generates easy-erading clustering report. It takes 2 arguments as an input: @@ -214,7 +214,7 @@ def ReportGenerator( >>> data['col2'] = [100, 200, 300] >>> data['col3'] = [10, 20, 30] >>> data['Cluster'] = [1, 1, 2] - >>> ReportGenerator(data, ['col1', 'col2'], 0) + >>> report_generator(data, ['col1', 'col2'], 0) Features Type Mark 1 2 0 # of Customers ClusterSize False 2.000000 1.000000 1 % of Customers ClusterProportion False 0.666667 0.333333 @@ -231,8 +231,8 @@ def ReportGenerator( [104 rows x 5 columns] """ # Fill missing values with given rules - if FillMissingReport: - df.fillna(value=FillMissingReport, inplace=True) + if fill_missing_report: + df.fillna(value=fill_missing_report, inplace=True) df["dummy"] = 1 numeric_cols = df.select_dtypes(np.number).columns report = ( @@ -313,7 +313,7 @@ def ReportGenerator( report = pd.concat( [report, a, clustersize, clusterproportion], axis=0 ) # concat report with clustert size and nan values - report["Mark"] = report["Features"].isin(ClusteringVariables) + report["Mark"] = report["Features"].isin(clustering_variables) cols = report.columns.tolist() cols = cols[0:2] + cols[-1:] + cols[2:-1] report = report[cols] diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index db6868687..6c542ab82 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -41,11 +41,11 @@ def local_weight( [0.08272556]]) """ weight = weighted_matrix(point, training_data_x, bandwidth) - W = (training_data_x.T * (weight * training_data_x)).I * ( + w = (training_data_x.T * (weight * training_data_x)).I * ( training_data_x.T * weight * training_data_y.T ) - return W + return w def local_weight_regression( diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 48d88ef61..87bc8f668 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -35,25 +35,25 @@ def cost_function(h, y): return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() -def log_likelihood(X, Y, weights): - scores = np.dot(X, weights) - return np.sum(Y * scores - np.log(1 + np.exp(scores))) +def log_likelihood(x, y, weights): + scores = np.dot(x, weights) + return np.sum(y * scores - np.log(1 + np.exp(scores))) # here alpha is the learning rate, X is the feature matrix,y is the target matrix -def logistic_reg(alpha, X, y, max_iterations=70000): - theta = np.zeros(X.shape[1]) +def logistic_reg(alpha, x, y, max_iterations=70000): + theta = np.zeros(x.shape[1]) for iterations in range(max_iterations): - z = np.dot(X, theta) + z = np.dot(x, theta) h = sigmoid_function(z) - gradient = np.dot(X.T, h - y) / y.size + gradient = np.dot(x.T, h - y) / y.size theta = theta - alpha * gradient # updating the weights - z = np.dot(X, theta) + z = np.dot(x, theta) h = sigmoid_function(z) - J = cost_function(h, y) + j = cost_function(h, y) if iterations % 100 == 0: - print(f"loss: {J} \t") # printing the loss after every 100 iterations + print(f"loss: {j} \t") # printing the loss after every 100 iterations return theta @@ -61,23 +61,23 @@ def logistic_reg(alpha, X, y, max_iterations=70000): if __name__ == "__main__": iris = datasets.load_iris() - X = iris.data[:, :2] + x = iris.data[:, :2] y = (iris.target != 0) * 1 alpha = 0.1 - theta = logistic_reg(alpha, X, y, max_iterations=70000) + theta = logistic_reg(alpha, x, y, max_iterations=70000) print("theta: ", theta) # printing the theta i.e our weights vector - def predict_prob(X): + def predict_prob(x): return sigmoid_function( - np.dot(X, theta) + np.dot(x, theta) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) - plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="b", label="0") - plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="r", label="1") - (x1_min, x1_max) = (X[:, 0].min(), X[:, 0].max()) - (x2_min, x2_max) = (X[:, 1].min(), X[:, 1].max()) + plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0") + plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1") + (x1_min, x1_max) = (x[:, 0].min(), x[:, 0].max()) + (x2_min, x2_max) = (x[:, 1].min(), x[:, 1].max()) (xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max)) grid = np.c_[xx1.ravel(), xx2.ravel()] probs = predict_prob(grid).reshape(xx1.shape) diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py index 604185cef..e99a4131e 100644 --- a/machine_learning/multilayer_perceptron_classifier.py +++ b/machine_learning/multilayer_perceptron_classifier.py @@ -15,12 +15,12 @@ test = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0]] Y = clf.predict(test) -def wrapper(Y): +def wrapper(y): """ >>> wrapper(Y) [0, 0, 1] """ - return list(Y) + return list(y) if __name__ == "__main__": diff --git a/machine_learning/random_forest_classifier.py b/machine_learning/random_forest_classifier.py index 637025409..3267fa209 100644 --- a/machine_learning/random_forest_classifier.py +++ b/machine_learning/random_forest_classifier.py @@ -17,10 +17,10 @@ def main(): iris = load_iris() # Split dataset into train and test data - X = iris["data"] # features - Y = iris["target"] + x = iris["data"] # features + y = iris["target"] x_train, x_test, y_train, y_test = train_test_split( - X, Y, test_size=0.3, random_state=1 + x, y, test_size=0.3, random_state=1 ) # Random Forest Classifier diff --git a/machine_learning/random_forest_regressor.py b/machine_learning/random_forest_regressor.py index 0aade626b..1001931a1 100644 --- a/machine_learning/random_forest_regressor.py +++ b/machine_learning/random_forest_regressor.py @@ -17,10 +17,10 @@ def main(): print(boston.keys()) # Split dataset into train and test data - X = boston["data"] # features - Y = boston["target"] + x = boston["data"] # features + y = boston["target"] x_train, x_test, y_train, y_test = train_test_split( - X, Y, test_size=0.3, random_state=1 + x, y, test_size=0.3, random_state=1 ) # Random Forest Regressor diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index cc7868d0f..fb4b35f31 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -80,7 +80,7 @@ class SmoSVM: # Calculate alphas using SMO algorithm def fit(self): - K = self._k + k = self._k state = None while True: @@ -106,14 +106,14 @@ class SmoSVM: # 3: update threshold(b) b1_new = np.float64( -e1 - - y1 * K(i1, i1) * (a1_new - a1) - - y2 * K(i2, i1) * (a2_new - a2) + - y1 * k(i1, i1) * (a1_new - a1) + - y2 * k(i2, i1) * (a2_new - a2) + self._b ) b2_new = np.float64( -e2 - - y2 * K(i2, i2) * (a2_new - a2) - - y1 * K(i1, i2) * (a1_new - a1) + - y2 * k(i2, i2) * (a2_new - a2) + - y1 * k(i1, i2) * (a1_new - a1) + self._b ) if 0.0 < a1_new < self._c: @@ -134,8 +134,8 @@ class SmoSVM: if s == i1 or s == i2: continue self._error[s] += ( - y1 * (a1_new - a1) * K(i1, s) - + y2 * (a2_new - a2) * K(i2, s) + y1 * (a1_new - a1) * k(i1, s) + + y2 * (a2_new - a2) * k(i2, s) + (self._b - b_old) ) @@ -305,56 +305,56 @@ class SmoSVM: # Get the new alpha2 and new alpha1 def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): - K = self._k + k = self._k if i1 == i2: return None, None # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: - L, H = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) + l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) else: - L, H = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) - if L == H: + l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) + if l == h: # noqa: E741 return None, None # calculate eta - k11 = K(i1, i1) - k22 = K(i2, i2) - k12 = K(i1, i2) + k11 = k(i1, i1) + k22 = k(i2, i2) + k12 = k(i1, i2) eta = k11 + k22 - 2.0 * k12 # select the new alpha2 which could get the minimal objectives if eta > 0.0: a2_new_unc = a2 + (y2 * (e1 - e2)) / eta # a2_new has a boundary - if a2_new_unc >= H: - a2_new = H - elif a2_new_unc <= L: - a2_new = L + if a2_new_unc >= h: + a2_new = h + elif a2_new_unc <= l: + a2_new = l else: a2_new = a2_new_unc else: b = self._b - l1 = a1 + s * (a2 - L) - h1 = a1 + s * (a2 - H) + l1 = a1 + s * (a2 - l) + h1 = a1 + s * (a2 - h) # way 1 - f1 = y1 * (e1 + b) - a1 * K(i1, i1) - s * a2 * K(i1, i2) - f2 = y2 * (e2 + b) - a2 * K(i2, i2) - s * a1 * K(i1, i2) + f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) + f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) ol = ( l1 * f1 - + L * f2 - + 1 / 2 * l1**2 * K(i1, i1) - + 1 / 2 * L**2 * K(i2, i2) - + s * L * l1 * K(i1, i2) + + l * f2 + + 1 / 2 * l1**2 * k(i1, i1) + + 1 / 2 * l**2 * k(i2, i2) + + s * l * l1 * k(i1, i2) ) oh = ( h1 * f1 - + H * f2 - + 1 / 2 * h1**2 * K(i1, i1) - + 1 / 2 * H**2 * K(i2, i2) - + s * H * h1 * K(i1, i2) + + h * f2 + + 1 / 2 * h1**2 * k(i1, i1) + + 1 / 2 * h**2 * k(i2, i2) + + s * h * h1 * k(i1, i2) ) """ # way 2 @@ -362,9 +362,9 @@ class SmoSVM: objectives """ if ol < (oh - self._eps): - a2_new = L + a2_new = l elif ol > oh + self._eps: - a2_new = H + a2_new = h else: a2_new = a2 diff --git a/machine_learning/word_frequency_functions.py b/machine_learning/word_frequency_functions.py index 3e8faf39c..8fd2741f6 100644 --- a/machine_learning/word_frequency_functions.py +++ b/machine_learning/word_frequency_functions.py @@ -83,7 +83,7 @@ the third document in the corpus.") return (len([doc for doc in docs if term in doc]), len(docs)) -def inverse_document_frequency(df: int, N: int, smoothing=False) -> float: +def inverse_document_frequency(df: int, n: int, smoothing=False) -> float: """ Return an integer denoting the importance of a word. This measure of importance is @@ -109,15 +109,15 @@ def inverse_document_frequency(df: int, N: int, smoothing=False) -> float: 1.477 """ if smoothing: - if N == 0: + if n == 0: raise ValueError("log10(0) is undefined.") - return round(1 + log10(N / (1 + df)), 3) + return round(1 + log10(n / (1 + df)), 3) if df == 0: raise ZeroDivisionError("df must be > 0") - elif N == 0: + elif n == 0: raise ValueError("log10(0) is undefined.") - return round(log10(N / df), 3) + return round(log10(n / df), 3) def tf_idf(tf: int, idf: int) -> float: diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py index 4def04149..0d4b3d1a8 100644 --- a/maths/binomial_coefficient.py +++ b/maths/binomial_coefficient.py @@ -5,16 +5,16 @@ def binomial_coefficient(n, r): >>> binomial_coefficient(10, 5) 252 """ - C = [0 for i in range(r + 1)] + c = [0 for i in range(r + 1)] # nc0 = 1 - C[0] = 1 + c[0] = 1 for i in range(1, n + 1): # to compute current row from previous row. j = min(i, r) while j > 0: - C[j] += C[j - 1] + c[j] += c[j - 1] j -= 1 - return C[r] + return c[r] print(binomial_coefficient(n=10, r=5)) diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py index 09a4fedfb..c9c144759 100644 --- a/maths/carmichael_number.py +++ b/maths/carmichael_number.py @@ -30,7 +30,7 @@ def power(x: int, y: int, mod: int) -> int: return temp -def isCarmichaelNumber(n: int) -> bool: +def is_carmichael_number(n: int) -> bool: b = 2 while b < n: if gcd(b, n) == 1 and power(b, n - 1, n) != 1: @@ -41,7 +41,7 @@ def isCarmichaelNumber(n: int) -> bool: if __name__ == "__main__": number = int(input("Enter number: ").strip()) - if isCarmichaelNumber(number): + if is_carmichael_number(number): print(f"{number} is a Carmichael Number.") else: print(f"{number} is not a Carmichael Number.") diff --git a/maths/decimal_isolate.py b/maths/decimal_isolate.py index 0e3967a46..1b8f6cbca 100644 --- a/maths/decimal_isolate.py +++ b/maths/decimal_isolate.py @@ -4,7 +4,7 @@ https://stackoverflow.com/questions/3886402/how-to-get-numbers-after-decimal-poi """ -def decimal_isolate(number, digitAmount): +def decimal_isolate(number, digit_amount): """ Isolates the decimal part of a number. @@ -28,8 +28,8 @@ def decimal_isolate(number, digitAmount): >>> decimal_isolate(-14.123, 3) -0.123 """ - if digitAmount > 0: - return round(number - int(number), digitAmount) + if digit_amount > 0: + return round(number - int(number), digit_amount) return number - int(number) diff --git a/maths/euler_method.py b/maths/euler_method.py index af7eecb2f..30f193e6d 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -29,12 +29,12 @@ def explicit_euler( >>> y[-1] 144.77277243257308 """ - N = int(np.ceil((x_end - x0) / step_size)) - y = np.zeros((N + 1,)) + n = int(np.ceil((x_end - x0) / step_size)) + y = np.zeros((n + 1,)) y[0] = y0 x = x0 - for k in range(N): + for k in range(n): y[k + 1] = y[k] + step_size * ode_func(x, y[k]) x += step_size diff --git a/maths/euler_modified.py b/maths/euler_modified.py index 5659fa063..14bddadf4 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -33,12 +33,12 @@ def euler_modified( >>> y[-1] 0.5525976431951775 """ - N = int(np.ceil((x_end - x0) / step_size)) - y = np.zeros((N + 1,)) + n = int(np.ceil((x_end - x0) / step_size)) + y = np.zeros((n + 1,)) y[0] = y0 x = x0 - for k in range(N): + for k in range(n): y_get = y[k] + step_size * ode_func(x, y[k]) y[k + 1] = y[k] + ( (step_size / 2) * (ode_func(x, y[k]) + ode_func(x + step_size, y_get)) diff --git a/maths/hardy_ramanujanalgo.py b/maths/hardy_ramanujanalgo.py index e36f763da..6929533fc 100644 --- a/maths/hardy_ramanujanalgo.py +++ b/maths/hardy_ramanujanalgo.py @@ -4,9 +4,9 @@ import math -def exactPrimeFactorCount(n): +def exact_prime_factor_count(n): """ - >>> exactPrimeFactorCount(51242183) + >>> exact_prime_factor_count(51242183) 3 """ count = 0 @@ -36,7 +36,7 @@ def exactPrimeFactorCount(n): if __name__ == "__main__": n = 51242183 - print(f"The number of distinct prime factors is/are {exactPrimeFactorCount(n)}") + print(f"The number of distinct prime factors is/are {exact_prime_factor_count(n)}") print(f"The value of log(log(n)) is {math.log(math.log(n)):.4f}") """ diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index 4f24d308f..77f4b90ea 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -14,7 +14,7 @@ Jaccard similarity is widely used with MinHashing. """ -def jaccard_similariy(setA, setB, alternativeUnion=False): +def jaccard_similariy(set_a, set_b, alternative_union=False): """ Finds the jaccard similarity between two sets. Essentially, its intersection over union. @@ -24,8 +24,8 @@ def jaccard_similariy(setA, setB, alternativeUnion=False): of a set with itself be 1/2 instead of 1. [MMDS 2nd Edition, Page 77] Parameters: - :setA (set,list,tuple): A non-empty set/list - :setB (set,list,tuple): A non-empty set/list + :set_a (set,list,tuple): A non-empty set/list + :set_b (set,list,tuple): A non-empty set/list :alternativeUnion (boolean): If True, use sum of number of items as union @@ -33,48 +33,48 @@ def jaccard_similariy(setA, setB, alternativeUnion=False): (float) The jaccard similarity between the two sets. Examples: - >>> setA = {'a', 'b', 'c', 'd', 'e'} - >>> setB = {'c', 'd', 'e', 'f', 'h', 'i'} - >>> jaccard_similariy(setA,setB) + >>> set_a = {'a', 'b', 'c', 'd', 'e'} + >>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'} + >>> jaccard_similariy(set_a, set_b) 0.375 - >>> jaccard_similariy(setA,setA) + >>> jaccard_similariy(set_a, set_a) 1.0 - >>> jaccard_similariy(setA,setA,True) + >>> jaccard_similariy(set_a, set_a, True) 0.5 - >>> setA = ['a', 'b', 'c', 'd', 'e'] - >>> setB = ('c', 'd', 'e', 'f', 'h', 'i') - >>> jaccard_similariy(setA,setB) + >>> set_a = ['a', 'b', 'c', 'd', 'e'] + >>> set_b = ('c', 'd', 'e', 'f', 'h', 'i') + >>> jaccard_similariy(set_a, set_b) 0.375 """ - if isinstance(setA, set) and isinstance(setB, set): + if isinstance(set_a, set) and isinstance(set_b, set): - intersection = len(setA.intersection(setB)) + intersection = len(set_a.intersection(set_b)) - if alternativeUnion: - union = len(setA) + len(setB) + if alternative_union: + union = len(set_a) + len(set_b) else: - union = len(setA.union(setB)) + union = len(set_a.union(set_b)) return intersection / union - if isinstance(setA, (list, tuple)) and isinstance(setB, (list, tuple)): + if isinstance(set_a, (list, tuple)) and isinstance(set_b, (list, tuple)): - intersection = [element for element in setA if element in setB] + intersection = [element for element in set_a if element in set_b] - if alternativeUnion: - union = len(setA) + len(setB) + if alternative_union: + union = len(set_a) + len(set_b) else: - union = setA + [element for element in setB if element not in setA] + union = set_a + [element for element in set_b if element not in set_a] return len(intersection) / len(union) if __name__ == "__main__": - setA = {"a", "b", "c", "d", "e"} - setB = {"c", "d", "e", "f", "h", "i"} - print(jaccard_similariy(setA, setB)) + set_a = {"a", "b", "c", "d", "e"} + set_b = {"c", "d", "e", "f", "h", "i"} + print(jaccard_similariy(set_a, set_b)) diff --git a/maths/krishnamurthy_number.py b/maths/krishnamurthy_number.py index c88f68a07..c1d8a8fc5 100644 --- a/maths/krishnamurthy_number.py +++ b/maths/krishnamurthy_number.py @@ -33,12 +33,12 @@ def krishnamurthy(number: int) -> bool: True """ - factSum = 0 + fact_sum = 0 duplicate = number while duplicate > 0: duplicate, digit = divmod(duplicate, 10) - factSum += factorial(digit) - return factSum == number + fact_sum += factorial(digit) + return fact_sum == number if __name__ == "__main__": diff --git a/maths/kth_lexicographic_permutation.py b/maths/kth_lexicographic_permutation.py index 23eab626f..b85558aca 100644 --- a/maths/kth_lexicographic_permutation.py +++ b/maths/kth_lexicographic_permutation.py @@ -1,17 +1,17 @@ -def kthPermutation(k, n): +def kth_permutation(k, n): """ Finds k'th lexicographic permutation (in increasing order) of 0,1,2,...n-1 in O(n^2) time. Examples: First permutation is always 0,1,2,...n - >>> kthPermutation(0,5) + >>> kth_permutation(0,5) [0, 1, 2, 3, 4] The order of permutation of 0,1,2,3 is [0,1,2,3], [0,1,3,2], [0,2,1,3], [0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3], [1,2,3,0], [1,3,0,2] - >>> kthPermutation(10,4) + >>> kth_permutation(10,4) [1, 3, 0, 2] """ # Factorails from 1! to (n-1)! diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 15e25cbfe..916abfcc1 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -30,9 +30,9 @@ def lucas_lehmer_test(p: int) -> bool: return True s = 4 - M = (1 << p) - 1 + m = (1 << p) - 1 for i in range(p - 2): - s = ((s * s) - 2) % M + s = ((s * s) - 2) % m return s == 0 diff --git a/maths/primelib.py b/maths/primelib.py index 3da9c56f6..7d2a22f39 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -8,27 +8,27 @@ prime numbers and whole numbers. Overview: -isPrime(number) -sieveEr(N) -getPrimeNumbers(N) -primeFactorization(number) -greatestPrimeFactor(number) -smallestPrimeFactor(number) -getPrime(n) -getPrimesBetween(pNumber1, pNumber2) +is_prime(number) +sieve_er(N) +get_prime_numbers(N) +prime_factorization(number) +greatest_prime_factor(number) +smallest_prime_factor(number) +get_prime(n) +get_primes_between(pNumber1, pNumber2) ---- -isEven(number) -isOdd(number) +is_even(number) +is_odd(number) gcd(number1, number2) // greatest common divisor -kgV(number1, number2) // least common multiple -getDivisors(number) // all divisors of 'number' inclusive 1, number -isPerfectNumber(number) +kg_v(number1, number2) // least common multiple +get_divisors(number) // all divisors of 'number' inclusive 1, number +is_perfect_number(number) NEW-FUNCTIONS -simplifyFraction(numerator, denominator) +simplify_fraction(numerator, denominator) factorial (n) // n! fib (n) // calculate the n-th fibonacci term. @@ -75,7 +75,7 @@ def is_prime(number: int) -> bool: # ------------------------------------------ -def sieveEr(N): +def sieve_er(n): """ input: positive integer 'N' > 2 returns a list of prime numbers from 2 up to N. @@ -86,23 +86,23 @@ def sieveEr(N): """ # precondition - assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2" + assert isinstance(n, int) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N - beginList = [x for x in range(2, N + 1)] + begin_list = [x for x in range(2, n + 1)] ans = [] # this list will be returns. # actual sieve of erathostenes - for i in range(len(beginList)): + for i in range(len(begin_list)): - for j in range(i + 1, len(beginList)): + for j in range(i + 1, len(begin_list)): - if (beginList[i] != 0) and (beginList[j] % beginList[i] == 0): - beginList[j] = 0 + if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): + begin_list[j] = 0 # filters actual prime numbers. - ans = [x for x in beginList if x != 0] + ans = [x for x in begin_list if x != 0] # precondition assert isinstance(ans, list), "'ans' must been from type list" @@ -113,7 +113,7 @@ def sieveEr(N): # -------------------------------- -def getPrimeNumbers(N): +def get_prime_numbers(n): """ input: positive integer 'N' > 2 returns a list of prime numbers from 2 up to N (inclusive) @@ -121,13 +121,13 @@ def getPrimeNumbers(N): """ # precondition - assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2" + assert isinstance(n, int) and (n > 2), "'N' must been an int and > 2" ans = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' - for number in range(2, N + 1): + for number in range(2, n + 1): if is_prime(number): @@ -142,7 +142,7 @@ def getPrimeNumbers(N): # ----------------------------------------- -def primeFactorization(number): +def prime_factorization(number): """ input: positive integer 'number' returns a list of the prime number factors of 'number' @@ -186,7 +186,7 @@ def primeFactorization(number): # ----------------------------------------- -def greatestPrimeFactor(number): +def greatest_prime_factor(number): """ input: positive integer 'number' >= 0 returns the greatest prime number factor of 'number' @@ -200,9 +200,9 @@ def greatestPrimeFactor(number): ans = 0 # prime factorization of 'number' - primeFactors = primeFactorization(number) + prime_factors = prime_factorization(number) - ans = max(primeFactors) + ans = max(prime_factors) # precondition assert isinstance(ans, int), "'ans' must been from type int" @@ -213,7 +213,7 @@ def greatestPrimeFactor(number): # ---------------------------------------------- -def smallestPrimeFactor(number): +def smallest_prime_factor(number): """ input: integer 'number' >= 0 returns the smallest prime number factor of 'number' @@ -227,9 +227,9 @@ def smallestPrimeFactor(number): ans = 0 # prime factorization of 'number' - primeFactors = primeFactorization(number) + prime_factors = prime_factorization(number) - ans = min(primeFactors) + ans = min(prime_factors) # precondition assert isinstance(ans, int), "'ans' must been from type int" @@ -240,7 +240,7 @@ def smallestPrimeFactor(number): # ---------------------- -def isEven(number): +def is_even(number): """ input: integer 'number' returns true if 'number' is even, otherwise false. @@ -256,7 +256,7 @@ def isEven(number): # ------------------------ -def isOdd(number): +def is_odd(number): """ input: integer 'number' returns true if 'number' is odd, otherwise false. @@ -281,14 +281,14 @@ def goldbach(number): # precondition assert ( - isinstance(number, int) and (number > 2) and isEven(number) + isinstance(number, int) and (number > 2) and is_even(number) ), "'number' must been an int, even and > 2" ans = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' - primeNumbers = getPrimeNumbers(number) - lenPN = len(primeNumbers) + prime_numbers = get_prime_numbers(number) + len_pn = len(prime_numbers) # run variable for while-loops. i = 0 @@ -297,16 +297,16 @@ def goldbach(number): # exit variable. for break up the loops loop = True - while i < lenPN and loop: + while i < len_pn and loop: j = i + 1 - while j < lenPN and loop: + while j < len_pn and loop: - if primeNumbers[i] + primeNumbers[j] == number: + if prime_numbers[i] + prime_numbers[j] == number: loop = False - ans.append(primeNumbers[i]) - ans.append(primeNumbers[j]) + ans.append(prime_numbers[i]) + ans.append(prime_numbers[j]) j += 1 @@ -361,7 +361,7 @@ def gcd(number1, number2): # ---------------------------------------------------- -def kgV(number1, number2): +def kg_v(number1, number2): """ Least common multiple input: two positive integer 'number1' and 'number2' @@ -382,13 +382,13 @@ def kgV(number1, number2): if number1 > 1 and number2 > 1: # builds the prime factorization of 'number1' and 'number2' - primeFac1 = primeFactorization(number1) - primeFac2 = primeFactorization(number2) + prime_fac_1 = prime_factorization(number1) + prime_fac_2 = prime_factorization(number2) elif number1 == 1 or number2 == 1: - primeFac1 = [] - primeFac2 = [] + prime_fac_1 = [] + prime_fac_2 = [] ans = max(number1, number2) count1 = 0 @@ -397,21 +397,21 @@ def kgV(number1, number2): done = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 - for n in primeFac1: + for n in prime_fac_1: if n not in done: - if n in primeFac2: + if n in prime_fac_2: - count1 = primeFac1.count(n) - count2 = primeFac2.count(n) + count1 = prime_fac_1.count(n) + count2 = prime_fac_2.count(n) for i in range(max(count1, count2)): ans *= n else: - count1 = primeFac1.count(n) + count1 = prime_fac_1.count(n) for i in range(count1): ans *= n @@ -419,11 +419,11 @@ def kgV(number1, number2): done.append(n) # iterates through primeFac2 - for n in primeFac2: + for n in prime_fac_2: if n not in done: - count2 = primeFac2.count(n) + count2 = prime_fac_2.count(n) for i in range(count2): ans *= n @@ -441,7 +441,7 @@ def kgV(number1, number2): # ---------------------------------- -def getPrime(n): +def get_prime(n): """ Gets the n-th prime number. input: positive integer 'n' >= 0 @@ -476,7 +476,7 @@ def getPrime(n): # --------------------------------------------------- -def getPrimesBetween(pNumber1, pNumber2): +def get_primes_between(p_number_1, p_number_2): """ input: prime numbers 'pNumber1' and 'pNumber2' pNumber1 < pNumber2 @@ -486,10 +486,10 @@ def getPrimesBetween(pNumber1, pNumber2): # precondition assert ( - is_prime(pNumber1) and is_prime(pNumber2) and (pNumber1 < pNumber2) + is_prime(p_number_1) and is_prime(p_number_2) and (p_number_1 < p_number_2) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" - number = pNumber1 + 1 # jump to the next number + number = p_number_1 + 1 # jump to the next number ans = [] # this list will be returns. @@ -498,7 +498,7 @@ def getPrimesBetween(pNumber1, pNumber2): while not is_prime(number): number += 1 - while number < pNumber2: + while number < p_number_2: ans.append(number) @@ -510,7 +510,9 @@ def getPrimesBetween(pNumber1, pNumber2): # precondition assert ( - isinstance(ans, list) and ans[0] != pNumber1 and ans[len(ans) - 1] != pNumber2 + isinstance(ans, list) + and ans[0] != p_number_1 + and ans[len(ans) - 1] != p_number_2 ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! @@ -520,7 +522,7 @@ def getPrimesBetween(pNumber1, pNumber2): # ---------------------------------------------------- -def getDivisors(n): +def get_divisors(n): """ input: positive integer 'n' >= 1 returns all divisors of n (inclusive 1 and 'n') @@ -545,7 +547,7 @@ def getDivisors(n): # ---------------------------------------------------- -def isPerfectNumber(number): +def is_perfect_number(number): """ input: positive integer 'number' > 1 returns true if 'number' is a perfect number otherwise false. @@ -556,7 +558,7 @@ def isPerfectNumber(number): number > 1 ), "'number' must been an int and >= 1" - divisors = getDivisors(number) + divisors = get_divisors(number) # precondition assert ( @@ -572,7 +574,7 @@ def isPerfectNumber(number): # ------------------------------------------------------------ -def simplifyFraction(numerator, denominator): +def simplify_fraction(numerator, denominator): """ input: two integer 'numerator' and 'denominator' assumes: 'denominator' != 0 @@ -587,16 +589,16 @@ def simplifyFraction(numerator, denominator): ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. - gcdOfFraction = gcd(abs(numerator), abs(denominator)) + gcd_of_fraction = gcd(abs(numerator), abs(denominator)) # precondition assert ( - isinstance(gcdOfFraction, int) - and (numerator % gcdOfFraction == 0) - and (denominator % gcdOfFraction == 0) + isinstance(gcd_of_fraction, int) + and (numerator % gcd_of_fraction == 0) + and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" - return (numerator // gcdOfFraction, denominator // gcdOfFraction) + return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) # ----------------------------------------------------------------- diff --git a/maths/qr_decomposition.py b/maths/qr_decomposition.py index 5e15fede4..a8414fbec 100644 --- a/maths/qr_decomposition.py +++ b/maths/qr_decomposition.py @@ -1,7 +1,7 @@ import numpy as np -def qr_householder(A): +def qr_householder(a): """Return a QR-decomposition of the matrix A using Householder reflection. The QR-decomposition decomposes the matrix A of shape (m, n) into an @@ -37,14 +37,14 @@ def qr_householder(A): >>> np.allclose(np.triu(R), R) True """ - m, n = A.shape + m, n = a.shape t = min(m, n) - Q = np.eye(m) - R = A.copy() + q = np.eye(m) + r = a.copy() for k in range(t - 1): # select a column of modified matrix A': - x = R[k:, [k]] + x = r[k:, [k]] # construct first basis vector e1 = np.zeros_like(x) e1[0] = 1.0 @@ -55,14 +55,14 @@ def qr_householder(A): v /= np.linalg.norm(v) # construct the Householder matrix - Q_k = np.eye(m - k) - 2.0 * v @ v.T + q_k = np.eye(m - k) - 2.0 * v @ v.T # pad with ones and zeros as necessary - Q_k = np.block([[np.eye(k), np.zeros((k, m - k))], [np.zeros((m - k, k)), Q_k]]) + q_k = np.block([[np.eye(k), np.zeros((k, m - k))], [np.zeros((m - k, k)), q_k]]) - Q = Q @ Q_k.T - R = Q_k @ R + q = q @ q_k.T + r = q_k @ r - return Q, R + return q, r if __name__ == "__main__": diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 0a431a115..52442134d 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -49,10 +49,10 @@ class FFT: A*B = 0*x^(-0+0j) + 1*x^(2+0j) + 2*x^(3+0j) + 3*x^(8+0j) + 4*x^(6+0j) + 5*x^(8+0j) """ - def __init__(self, polyA=None, polyB=None): + def __init__(self, poly_a=None, poly_b=None): # Input as list - self.polyA = list(polyA or [0])[:] - self.polyB = list(polyB or [0])[:] + self.polyA = list(poly_a or [0])[:] + self.polyB = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: @@ -64,22 +64,22 @@ class FFT: self.len_B = len(self.polyB) # Add 0 to make lengths equal a power of 2 - self.C_max_length = int( + self.c_max_length = int( 2 ** np.ceil(np.log2(len(self.polyA) + len(self.polyB) - 1)) ) - while len(self.polyA) < self.C_max_length: + while len(self.polyA) < self.c_max_length: self.polyA.append(0) - while len(self.polyB) < self.C_max_length: + while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform - self.root = complex(mpmath.root(x=1, n=self.C_max_length, k=1)) + self.root = complex(mpmath.root(x=1, n=self.c_max_length, k=1)) # The product self.product = self.__multiply() # Discrete fourier transform of A and B - def __DFT(self, which): + def __dft(self, which): if which == "A": dft = [[x] for x in self.polyA] else: @@ -88,20 +88,20 @@ class FFT: if len(dft) <= 1: return dft[0] # - next_ncol = self.C_max_length // 2 + next_ncol = self.c_max_length // 2 while next_ncol > 0: new_dft = [[] for i in range(next_ncol)] root = self.root**next_ncol # First half of next step current_root = 1 - for j in range(self.C_max_length // (next_ncol * 2)): + for j in range(self.c_max_length // (next_ncol * 2)): for i in range(next_ncol): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step current_root = 1 - for j in range(self.C_max_length // (next_ncol * 2)): + for j in range(self.c_max_length // (next_ncol * 2)): for i in range(next_ncol): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root @@ -112,65 +112,65 @@ class FFT: # multiply the DFTs of A and B and find A*B def __multiply(self): - dftA = self.__DFT("A") - dftB = self.__DFT("B") - inverseC = [[dftA[i] * dftB[i] for i in range(self.C_max_length)]] - del dftA - del dftB + dft_a = self.__dft("A") + dft_b = self.__dft("B") + inverce_c = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] + del dft_a + del dft_b # Corner Case - if len(inverseC[0]) <= 1: - return inverseC[0] + if len(inverce_c[0]) <= 1: + return inverce_c[0] # Inverse DFT next_ncol = 2 - while next_ncol <= self.C_max_length: - new_inverseC = [[] for i in range(next_ncol)] + while next_ncol <= self.c_max_length: + new_inverse_c = [[] for i in range(next_ncol)] root = self.root ** (next_ncol // 2) current_root = 1 # First half of next step - for j in range(self.C_max_length // next_ncol): + for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions - new_inverseC[i].append( + new_inverse_c[i].append( ( - inverseC[i][j] - + inverseC[i][j + self.C_max_length // next_ncol] + inverce_c[i][j] + + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions - new_inverseC[i + next_ncol // 2].append( + new_inverse_c[i + next_ncol // 2].append( ( - inverseC[i][j] - - inverseC[i][j + self.C_max_length // next_ncol] + inverce_c[i][j] + - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update - inverseC = new_inverseC + inverce_c = new_inverse_c next_ncol *= 2 # Unpack - inverseC = [round(x[0].real, 8) + round(x[0].imag, 8) * 1j for x in inverseC] + inverce_c = [round(x[0].real, 8) + round(x[0].imag, 8) * 1j for x in inverce_c] # Remove leading 0's - while inverseC[-1] == 0: - inverseC.pop() - return inverseC + while inverce_c[-1] == 0: + inverce_c.pop() + return inverce_c # Overwrite __str__ for print(); Shows A, B and A*B def __str__(self): - A = "A = " + " + ".join( + a = "A = " + " + ".join( f"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A]) ) - B = "B = " + " + ".join( + b = "B = " + " + ".join( f"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B]) ) - C = "A*B = " + " + ".join( + c = "A*B = " + " + ".join( f"{coef}*x^{i}" for coef, i in enumerate(self.product) ) - return "\n".join((A, B, C)) + return "\n".join((a, b, c)) # Unit tests diff --git a/maths/runge_kutta.py b/maths/runge_kutta.py index 383797daa..4cac017ee 100644 --- a/maths/runge_kutta.py +++ b/maths/runge_kutta.py @@ -22,12 +22,12 @@ def runge_kutta(f, y0, x0, h, x_end): >>> y[-1] 148.41315904125113 """ - N = int(np.ceil((x_end - x0) / h)) - y = np.zeros((N + 1,)) + n = int(np.ceil((x_end - x0) / h)) + y = np.zeros((n + 1,)) y[0] = y0 x = x0 - for k in range(N): + for k in range(n): k1 = f(x, y[k]) k2 = f(x + 0.5 * h, y[k] + 0.5 * h * k1) k3 = f(x + 0.5 * h, y[k] + 0.5 * h * k2) diff --git a/maths/softmax.py b/maths/softmax.py index e021a7f8a..04cf77525 100644 --- a/maths/softmax.py +++ b/maths/softmax.py @@ -41,13 +41,13 @@ def softmax(vector): # Calculate e^x for each x in your vector where e is Euler's # number (approximately 2.718) - exponentVector = np.exp(vector) + exponent_vector = np.exp(vector) # Add up the all the exponentials - sumOfExponents = np.sum(exponentVector) + sum_of_exponents = np.sum(exponent_vector) # Divide every exponent by the sum of all exponents - softmax_vector = exponentVector / sumOfExponents + softmax_vector = exponent_vector / sum_of_exponents return softmax_vector diff --git a/matrix/count_islands_in_matrix.py b/matrix/count_islands_in_matrix.py index 00f9e1436..64c595e84 100644 --- a/matrix/count_islands_in_matrix.py +++ b/matrix/count_islands_in_matrix.py @@ -3,7 +3,7 @@ # connections. -class matrix: # Public class to implement a graph +class Matrix: # Public class to implement a graph def __init__(self, row: int, col: int, graph: list[list[bool]]) -> None: self.ROW = row self.COL = col @@ -19,12 +19,12 @@ class matrix: # Public class to implement a graph def diffs(self, i: int, j: int, visited: list[list[bool]]) -> None: # Checking all 8 elements surrounding nth element - rowNbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order - colNbr = [-1, 0, 1, -1, 1, -1, 0, 1] + row_nbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order + col_nbr = [-1, 0, 1, -1, 1, -1, 0, 1] visited[i][j] = True # Make those cells visited for k in range(8): - if self.is_safe(i + rowNbr[k], j + colNbr[k], visited): - self.diffs(i + rowNbr[k], j + colNbr[k], visited) + if self.is_safe(i + row_nbr[k], j + col_nbr[k], visited): + self.diffs(i + row_nbr[k], j + col_nbr[k], visited) def count_islands(self) -> int: # And finally, count all islands. visited = [[False for j in range(self.COL)] for i in range(self.ROW)] diff --git a/matrix/inverse_of_matrix.py b/matrix/inverse_of_matrix.py index 92780e656..770ce39b5 100644 --- a/matrix/inverse_of_matrix.py +++ b/matrix/inverse_of_matrix.py @@ -27,7 +27,7 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: [[0.25, -0.5], [-0.3, 1.0]] """ - D = Decimal # An abbreviation for conciseness + d = Decimal # An abbreviation for conciseness # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices @@ -35,7 +35,7 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: raise ValueError("Please provide a matrix of size 2x2.") # Calculate the determinant of the matrix - determinant = D(matrix[0][0]) * D(matrix[1][1]) - D(matrix[1][0]) * D(matrix[0][1]) + determinant = d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]) if determinant == 0: raise ValueError("This matrix has no inverse.") @@ -45,4 +45,4 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix - return [[float(D(n) / determinant) or 0.0 for n in row] for row in swapped_matrix] + return [[float(d(n) / determinant) or 0.0 for n in row] for row in swapped_matrix] diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index a0c93f115..29c9b3381 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -54,15 +54,15 @@ class Matrix: def __repr__(self) -> str: return str(self) - def validateIndices(self, loc: tuple[int, int]) -> bool: + def validate_indicies(self, loc: tuple[int, int]) -> bool: """ - + Check if given indices are valid to pick element from matrix. Example: >>> a = Matrix(2, 6, 0) - >>> a.validateIndices((2, 7)) + >>> a.validate_indicies((2, 7)) False - >>> a.validateIndices((0, 0)) + >>> a.validate_indicies((0, 0)) True """ if not (isinstance(loc, (list, tuple)) and len(loc) == 2): @@ -81,7 +81,7 @@ class Matrix: >>> a[1, 0] 7 """ - assert self.validateIndices(loc) + assert self.validate_indicies(loc) return self.array[loc[0]][loc[1]] def __setitem__(self, loc: tuple[int, int], value: float) -> None: @@ -96,7 +96,7 @@ class Matrix: [ 1, 1, 1] [ 1, 1, 51] """ - assert self.validateIndices(loc) + assert self.validate_indicies(loc) self.array[loc[0]][loc[1]] = value def __add__(self, another: Matrix) -> Matrix: @@ -198,9 +198,9 @@ class Matrix: result[c, r] = self[r, c] return result - def ShermanMorrison(self, u: Matrix, v: Matrix) -> Any: + def sherman_morrison(self, u: Matrix, v: Matrix) -> Any: """ - + Apply Sherman-Morrison formula in O(n^2). To learn this formula, please look this: https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula @@ -216,7 +216,7 @@ class Matrix: >>> u[0,0], u[1,0], u[2,0] = 1, 2, -3 >>> v = Matrix(3, 1, 0) >>> v[0,0], v[1,0], v[2,0] = 4, -2, 5 - >>> ainv.ShermanMorrison(u, v) + >>> ainv.sherman_morrison(u, v) Matrix consist of 3 rows and 3 columns [ 1.2857142857142856, -0.14285714285714285, 0.3571428571428571] [ 0.5714285714285714, 0.7142857142857143, 0.7142857142857142] @@ -229,11 +229,11 @@ class Matrix: assert u.column == v.column == 1 # u, v should be column vector # Calculate - vT = v.transpose() - numerator_factor = (vT * self * u)[0, 0] + 1 + v_t = v.transpose() + numerator_factor = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable - return self - ((self * u) * (vT * self) * (1.0 / numerator_factor)) + return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing @@ -254,7 +254,7 @@ if __name__ == "__main__": print(f"v is {v}") print("uv^T is %s" % (u * v.transpose())) # Sherman Morrison - print(f"(a + uv^T)^(-1) is {ainv.ShermanMorrison(u, v)}") + print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(u, v)}") def test2() -> None: import doctest diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 96b782649..370e38482 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -6,7 +6,7 @@ Description: """ -def BFS(graph, s, t, parent): +def bfs(graph, s, t, parent): # Return True if there is node that has not iterated. visited = [False] * len(graph) queue = [] @@ -24,11 +24,11 @@ def BFS(graph, s, t, parent): return True if visited[t] else False -def FordFulkerson(graph, source, sink): +def ford_fulkerson(graph, source, sink): # This array is filled by BFS and to store path parent = [-1] * (len(graph)) max_flow = 0 - while BFS(graph, source, sink, parent): + while bfs(graph, source, sink, parent): path_flow = float("Inf") s = sink @@ -58,4 +58,4 @@ graph = [ ] source, sink = 0, 5 -print(FordFulkerson(graph, source, sink)) +print(ford_fulkerson(graph, source, sink)) diff --git a/networking_flow/minimum_cut.py b/networking_flow/minimum_cut.py index d79f3619c..33131315f 100644 --- a/networking_flow/minimum_cut.py +++ b/networking_flow/minimum_cut.py @@ -10,7 +10,7 @@ test_graph = [ ] -def BFS(graph, s, t, parent): +def bfs(graph, s, t, parent): # Return True if there is node that has not iterated. visited = [False] * len(graph) queue = [s] @@ -36,7 +36,7 @@ def mincut(graph, source, sink): max_flow = 0 res = [] temp = [i[:] for i in graph] # Record original cut, copy. - while BFS(graph, source, sink, parent): + while bfs(graph, source, sink, parent): path_flow = float("Inf") s = sink diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index e3993efb4..bbade1c41 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -74,7 +74,7 @@ class CNN: print(f"Model saved: {save_path}") @classmethod - def ReadModel(cls, model_path): + def read_model(cls, model_path): # read saved model with open(model_path, "rb") as f: model_dic = pickle.load(f) @@ -119,7 +119,7 @@ class CNN: data_focus.append(focus) # calculate the feature map of every single kernel, and saved as list of matrix data_featuremap = [] - Size_FeatureMap = int((size_data - size_conv) / conv_step + 1) + size_feature_map = int((size_data - size_conv) / conv_step + 1) for i_map in range(num_conv): featuremap = [] for i_focus in range(len(data_focus)): @@ -129,7 +129,7 @@ class CNN: ) featuremap.append(self.sig(net_focus)) featuremap = np.asmatrix(featuremap).reshape( - Size_FeatureMap, Size_FeatureMap + size_feature_map, size_feature_map ) data_featuremap.append(featuremap) diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index 88aefabc8..03d60a9a1 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -255,14 +255,14 @@ def find_unit_clauses( if len(clause) == 1: unit_symbols.append(list(clause.literals.keys())[0]) else: - Fcount, Ncount = 0, 0 + f_count, n_count = 0, 0 for literal, value in clause.literals.items(): if value is False: - Fcount += 1 + f_count += 1 elif value is None: sym = literal - Ncount += 1 - if Fcount == len(clause) - 1 and Ncount == 1: + n_count += 1 + if f_count == len(clause) - 1 and n_count == 1: unit_symbols.append(sym) assignment: dict[str, bool | None] = dict() for i in unit_symbols: @@ -310,33 +310,33 @@ def dpll_algorithm( except RecursionError: print("raises a RecursionError and is") return None, {} - P = None + p = None if len(pure_symbols) > 0: - P, value = pure_symbols[0], assignment[pure_symbols[0]] + p, value = pure_symbols[0], assignment[pure_symbols[0]] - if P: + if p: tmp_model = model - tmp_model[P] = value + tmp_model[p] = value tmp_symbols = [i for i in symbols] - if P in tmp_symbols: - tmp_symbols.remove(P) + if p in tmp_symbols: + tmp_symbols.remove(p) return dpll_algorithm(clauses, tmp_symbols, tmp_model) unit_symbols, assignment = find_unit_clauses(clauses, model) - P = None + p = None if len(unit_symbols) > 0: - P, value = unit_symbols[0], assignment[unit_symbols[0]] - if P: + p, value = unit_symbols[0], assignment[unit_symbols[0]] + if p: tmp_model = model - tmp_model[P] = value + tmp_model[p] = value tmp_symbols = [i for i in symbols] - if P in tmp_symbols: - tmp_symbols.remove(P) + if p in tmp_symbols: + tmp_symbols.remove(p) return dpll_algorithm(clauses, tmp_symbols, tmp_model) - P = symbols[0] + p = symbols[0] rest = symbols[1:] tmp1, tmp2 = model, model - tmp1[P], tmp2[P] = True, False + tmp1[p], tmp2[p] = True, False return dpll_algorithm(clauses, rest, tmp1) or dpll_algorithm(clauses, rest, tmp2) diff --git a/other/greedy.py b/other/greedy.py index 4b78bf1c0..72e05f451 100644 --- a/other/greedy.py +++ b/other/greedy.py @@ -1,4 +1,4 @@ -class things: +class Things: def __init__(self, name, value, weight): self.name = name self.value = value @@ -16,27 +16,27 @@ class things: def get_weight(self): return self.weight - def value_Weight(self): + def value_weight(self): return self.value / self.weight def build_menu(name, value, weight): menu = [] for i in range(len(value)): - menu.append(things(name[i], value[i], weight[i])) + menu.append(Things(name[i], value[i], weight[i])) return menu -def greedy(item, maxCost, keyFunc): - itemsCopy = sorted(item, key=keyFunc, reverse=True) +def greedy(item, max_cost, key_func): + items_copy = sorted(item, key=key_func, reverse=True) result = [] - totalValue, total_cost = 0.0, 0.0 - for i in range(len(itemsCopy)): - if (total_cost + itemsCopy[i].get_weight()) <= maxCost: - result.append(itemsCopy[i]) - total_cost += itemsCopy[i].get_weight() - totalValue += itemsCopy[i].get_value() - return (result, totalValue) + total_value, total_cost = 0.0, 0.0 + for i in range(len(items_copy)): + if (total_cost + items_copy[i].get_weight()) <= max_cost: + result.append(items_copy[i]) + total_cost += items_copy[i].get_weight() + total_value += items_copy[i].get_value() + return (result, total_value) def test_greedy(): @@ -47,13 +47,13 @@ def test_greedy(): >>> weight = [40, 60, 40, 70, 100, 85, 55, 70] >>> foods = build_menu(food, value, weight) >>> foods # doctest: +NORMALIZE_WHITESPACE - [things(Burger, 80, 40), things(Pizza, 100, 60), things(Coca Cola, 60, 40), - things(Rice, 70, 70), things(Sambhar, 50, 100), things(Chicken, 110, 85), - things(Fries, 90, 55), things(Milk, 60, 70)] - >>> greedy(foods, 500, things.get_value) # doctest: +NORMALIZE_WHITESPACE - ([things(Chicken, 110, 85), things(Pizza, 100, 60), things(Fries, 90, 55), - things(Burger, 80, 40), things(Rice, 70, 70), things(Coca Cola, 60, 40), - things(Milk, 60, 70)], 570.0) + [Things(Burger, 80, 40), Things(Pizza, 100, 60), Things(Coca Cola, 60, 40), + Things(Rice, 70, 70), Things(Sambhar, 50, 100), Things(Chicken, 110, 85), + Things(Fries, 90, 55), Things(Milk, 60, 70)] + >>> greedy(foods, 500, Things.get_value) # doctest: +NORMALIZE_WHITESPACE + ([Things(Chicken, 110, 85), Things(Pizza, 100, 60), Things(Fries, 90, 55), + Things(Burger, 80, 40), Things(Rice, 70, 70), Things(Coca Cola, 60, 40), + Things(Milk, 60, 70)], 570.0) """ diff --git a/other/nested_brackets.py b/other/nested_brackets.py index 99e2f3a38..9dd9a0f04 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -14,21 +14,21 @@ brackets and returns true if S is nested and false otherwise. """ -def is_balanced(S): +def is_balanced(s): stack = [] open_brackets = set({"(", "[", "{"}) closed_brackets = set({")", "]", "}"}) open_to_closed = dict({"{": "}", "[": "]", "(": ")"}) - for i in range(len(S)): + for i in range(len(s)): - if S[i] in open_brackets: - stack.append(S[i]) + if s[i] in open_brackets: + stack.append(s[i]) - elif S[i] in closed_brackets: + elif s[i] in closed_brackets: if len(stack) == 0 or ( - len(stack) > 0 and open_to_closed[stack.pop()] != S[i] + len(stack) > 0 and open_to_closed[stack.pop()] != s[i] ): return False diff --git a/other/sdes.py b/other/sdes.py index cfc5a53df..695675000 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -19,9 +19,9 @@ def left_shift(data): return data[1:] + data[0] -def XOR(a, b): +def xor(a, b): """ - >>> XOR("01010101", "00001111") + >>> xor("01010101", "00001111") '01011010' """ res = "" @@ -43,13 +43,13 @@ def function(expansion, s0, s1, key, message): left = message[:4] right = message[4:] temp = apply_table(right, expansion) - temp = XOR(temp, key) + temp = xor(temp, key) l = apply_sbox(s0, temp[:4]) # noqa: E741 r = apply_sbox(s1, temp[4:]) l = "0" * (2 - len(l)) + l # noqa: E741 r = "0" * (2 - len(r)) + r temp = apply_table(l + r, p4_table) - temp = XOR(left, temp) + temp = xor(left, temp) return temp + right diff --git a/other/tower_of_hanoi.py b/other/tower_of_hanoi.py index 3cc0e40b3..1fff45039 100644 --- a/other/tower_of_hanoi.py +++ b/other/tower_of_hanoi.py @@ -1,6 +1,6 @@ -def moveTower(height, fromPole, toPole, withPole): +def move_tower(height, from_pole, to_pole, with_pole): """ - >>> moveTower(3, 'A', 'B', 'C') + >>> move_tower(3, 'A', 'B', 'C') moving disk from A to B moving disk from A to C moving disk from B to C @@ -10,18 +10,18 @@ def moveTower(height, fromPole, toPole, withPole): moving disk from A to B """ if height >= 1: - moveTower(height - 1, fromPole, withPole, toPole) - moveDisk(fromPole, toPole) - moveTower(height - 1, withPole, toPole, fromPole) + move_tower(height - 1, from_pole, with_pole, to_pole) + move_disk(from_pole, to_pole) + move_tower(height - 1, with_pole, to_pole, from_pole) -def moveDisk(fp, tp): +def move_disk(fp, tp): print("moving disk from", fp, "to", tp) def main(): height = int(input("Height of hanoi: ").strip()) - moveTower(height, "A", "B", "C") + move_tower(height, "A", "B", "C") if __name__ == "__main__": diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 01083b9a2..7e9fc1642 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -219,9 +219,11 @@ def plot( Utility function to plot how the given body-system evolves over time. No doctest provided since this function does not have a return value. """ + # Frame rate of the animation + INTERVAL = 20 # noqa: N806 - INTERVAL = 20 # Frame rate of the animation - DELTA_TIME = INTERVAL / 1000 # Time between time steps in seconds + # Time between time steps in seconds + DELTA_TIME = INTERVAL / 1000 # noqa: N806 fig = plt.figure() fig.canvas.set_window_title(title) diff --git a/project_euler/problem_011/sol1.py b/project_euler/problem_011/sol1.py index 9dea73e8c..ad45f0983 100644 --- a/project_euler/problem_011/sol1.py +++ b/project_euler/problem_011/sol1.py @@ -28,23 +28,23 @@ import os def largest_product(grid): - nColumns = len(grid[0]) - nRows = len(grid) + n_columns = len(grid[0]) + n_rows = len(grid) largest = 0 - lrDiagProduct = 0 - rlDiagProduct = 0 + lr_diag_product = 0 + rl_diag_product = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) - for i in range(nColumns): - for j in range(nRows - 3): - vertProduct = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] - horzProduct = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] + for i in range(n_columns): + for j in range(n_rows - 3): + vert_product = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] + horz_product = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product - if i < nColumns - 3: - lrDiagProduct = ( + if i < n_columns - 3: + lr_diag_product = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] @@ -53,16 +53,18 @@ def largest_product(grid): # Right-to-left diagonal(/) product if i > 2: - rlDiagProduct = ( + rl_diag_product = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) - maxProduct = max(vertProduct, horzProduct, lrDiagProduct, rlDiagProduct) - if maxProduct > largest: - largest = maxProduct + max_product = max( + vert_product, horz_product, lr_diag_product, rl_diag_product + ) + if max_product > largest: + largest = max_product return largest diff --git a/project_euler/problem_012/sol1.py b/project_euler/problem_012/sol1.py index 861d026ec..e42b03419 100644 --- a/project_euler/problem_012/sol1.py +++ b/project_euler/problem_012/sol1.py @@ -24,18 +24,18 @@ divisors? def count_divisors(n): - nDivisors = 1 + n_divisors = 1 i = 2 while i * i <= n: multiplicity = 0 while n % i == 0: n //= i multiplicity += 1 - nDivisors *= multiplicity + 1 + n_divisors *= multiplicity + 1 i += 1 if n > 1: - nDivisors *= 2 - return nDivisors + n_divisors *= 2 + return n_divisors def solution(): @@ -45,17 +45,17 @@ def solution(): >>> solution() 76576500 """ - tNum = 1 + t_num = 1 i = 1 while True: i += 1 - tNum += i + t_num += i - if count_divisors(tNum) > 500: + if count_divisors(t_num) > 500: break - return tNum + return t_num if __name__ == "__main__": diff --git a/project_euler/problem_023/sol1.py b/project_euler/problem_023/sol1.py index 83b85f3f7..9fdf7284a 100644 --- a/project_euler/problem_023/sol1.py +++ b/project_euler/problem_023/sol1.py @@ -28,18 +28,18 @@ def solution(limit=28123): >>> solution() 4179871 """ - sumDivs = [1] * (limit + 1) + sum_divs = [1] * (limit + 1) for i in range(2, int(limit**0.5) + 1): - sumDivs[i * i] += i + sum_divs[i * i] += i for k in range(i + 1, limit // i + 1): - sumDivs[k * i] += k + i + sum_divs[k * i] += k + i abundants = set() res = 0 for n in range(1, limit + 1): - if sumDivs[n] > n: + if sum_divs[n] > n: abundants.add(n) if not any((n - a in abundants) for a in abundants): diff --git a/project_euler/problem_029/sol1.py b/project_euler/problem_029/sol1.py index d3ab90ac7..d9a81e55c 100644 --- a/project_euler/problem_029/sol1.py +++ b/project_euler/problem_029/sol1.py @@ -33,17 +33,17 @@ def solution(n: int = 100) -> int: >>> solution(1) 0 """ - collectPowers = set() + collect_powers = set() - currentPow = 0 + current_pow = 0 - N = n + 1 # maximum limit + n = n + 1 # maximum limit - for a in range(2, N): - for b in range(2, N): - currentPow = a**b # calculates the current power - collectPowers.add(currentPow) # adds the result to the set - return len(collectPowers) + for a in range(2, n): + for b in range(2, n): + current_pow = a**b # calculates the current power + collect_powers.add(current_pow) # adds the result to the set + return len(collect_powers) if __name__ == "__main__": diff --git a/project_euler/problem_032/sol32.py b/project_euler/problem_032/sol32.py index 393218339..c4d11e86c 100644 --- a/project_euler/problem_032/sol32.py +++ b/project_euler/problem_032/sol32.py @@ -15,15 +15,15 @@ include it once in your sum. import itertools -def isCombinationValid(combination): +def is_combination_valid(combination): """ Checks if a combination (a tuple of 9 digits) is a valid product equation. - >>> isCombinationValid(('3', '9', '1', '8', '6', '7', '2', '5', '4')) + >>> is_combination_valid(('3', '9', '1', '8', '6', '7', '2', '5', '4')) True - >>> isCombinationValid(('1', '2', '3', '4', '5', '6', '7', '8', '9')) + >>> is_combination_valid(('1', '2', '3', '4', '5', '6', '7', '8', '9')) False """ @@ -49,7 +49,7 @@ def solution(): { int("".join(pandigital[5:9])) for pandigital in itertools.permutations("123456789") - if isCombinationValid(pandigital) + if is_combination_valid(pandigital) } ) diff --git a/project_euler/problem_042/solution42.py b/project_euler/problem_042/solution42.py index b3aecf4cf..6d22a8dfb 100644 --- a/project_euler/problem_042/solution42.py +++ b/project_euler/problem_042/solution42.py @@ -27,10 +27,10 @@ def solution(): 162 """ script_dir = os.path.dirname(os.path.realpath(__file__)) - wordsFilePath = os.path.join(script_dir, "words.txt") + words_file_path = os.path.join(script_dir, "words.txt") words = "" - with open(wordsFilePath) as f: + with open(words_file_path) as f: words = f.readline() words = list(map(lambda word: word.strip('"'), words.strip("\r\n").split(","))) diff --git a/project_euler/problem_054/test_poker_hand.py b/project_euler/problem_054/test_poker_hand.py index 96317fc7d..bf5a20a8e 100644 --- a/project_euler/problem_054/test_poker_hand.py +++ b/project_euler/problem_054/test_poker_hand.py @@ -185,7 +185,7 @@ def test_compare_random(hand, other, expected): def test_hand_sorted(): - POKER_HANDS = [PokerHand(hand) for hand in SORTED_HANDS] + POKER_HANDS = [PokerHand(hand) for hand in SORTED_HANDS] # noqa: N806 list_copy = POKER_HANDS.copy() shuffle(list_copy) user_sorted = chain(sorted(list_copy)) diff --git a/project_euler/problem_064/sol1.py b/project_euler/problem_064/sol1.py index 5df64a90a..9edd9a1e7 100644 --- a/project_euler/problem_064/sol1.py +++ b/project_euler/problem_064/sol1.py @@ -33,7 +33,7 @@ def continuous_fraction_period(n: int) -> int: """ numerator = 0.0 denominator = 1.0 - ROOT = int(sqrt(n)) + ROOT = int(sqrt(n)) # noqa: N806 integer_part = ROOT period = 0 while integer_part != 2 * ROOT: diff --git a/project_euler/problem_097/sol1.py b/project_euler/problem_097/sol1.py index da5e8120b..94a43894e 100644 --- a/project_euler/problem_097/sol1.py +++ b/project_euler/problem_097/sol1.py @@ -34,8 +34,8 @@ def solution(n: int = 10) -> str: """ if not isinstance(n, int) or n < 0: raise ValueError("Invalid input") - MODULUS = 10**n - NUMBER = 28433 * (pow(2, 7830457, MODULUS)) + 1 + MODULUS = 10**n # noqa: N806 + NUMBER = 28433 * (pow(2, 7830457, MODULUS)) + 1 # noqa: N806 return str(NUMBER % MODULUS) diff --git a/project_euler/problem_104/sol.py b/project_euler/problem_104/sol.py.FIXME similarity index 100% rename from project_euler/problem_104/sol.py rename to project_euler/problem_104/sol.py.FIXME diff --git a/project_euler/problem_125/sol1.py b/project_euler/problem_125/sol1.py index 7a8f908ed..1812df361 100644 --- a/project_euler/problem_125/sol1.py +++ b/project_euler/problem_125/sol1.py @@ -35,7 +35,7 @@ def solution() -> int: Returns the sum of all numbers less than 1e8 that are both palindromic and can be written as the sum of consecutive squares. """ - LIMIT = 10**8 + LIMIT = 10**8 # noqa: N806 answer = set() first_square = 1 sum_squares = 5 diff --git a/scheduling/non_preemptive_shortest_job_first.py b/scheduling/non_preemptive_shortest_job_first.py index 96e571230..69c974b00 100644 --- a/scheduling/non_preemptive_shortest_job_first.py +++ b/scheduling/non_preemptive_shortest_job_first.py @@ -102,9 +102,9 @@ if __name__ == "__main__": # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") - for i, process_ID in enumerate(list(range(1, 5))): + for i, process_id in enumerate(list(range(1, 5))): print( - f"{process_ID}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t" + f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t" f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}" ) print(f"\nAverage waiting time = {mean(waiting_time):.5f}") diff --git a/searches/tabu_search.py b/searches/tabu_search.py index 24d0dbf6f..45ce19d46 100644 --- a/searches/tabu_search.py +++ b/searches/tabu_search.py @@ -178,9 +178,9 @@ def find_neighborhood(solution, dict_of_neighbours): if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp) - indexOfLastItemInTheList = len(neighborhood_of_solution[0]) - 1 + index_of_last_item_in_the_list = len(neighborhood_of_solution[0]) - 1 - neighborhood_of_solution.sort(key=lambda x: x[indexOfLastItemInTheList]) + neighborhood_of_solution.sort(key=lambda x: x[index_of_last_item_in_the_list]) return neighborhood_of_solution diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index 5de7a016c..b656df3a3 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -13,7 +13,7 @@ synchronization could be used. from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time -processLock = Lock() +process_lock = Lock() """ The function run by the processes that sorts the list @@ -27,42 +27,42 @@ resultPipe = the pipe used to send results back to main """ -def oeProcess(position, value, LSend, RSend, LRcv, RRcv, resultPipe): - global processLock +def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): + global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0, 10): - if (i + position) % 2 == 0 and RSend is not None: + if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor - processLock.acquire() - RSend[1].send(value) - processLock.release() + process_lock.acquire() + r_send[1].send(value) + process_lock.release() # receive your right neighbor's value - processLock.acquire() - temp = RRcv[0].recv() - processLock.release() + process_lock.acquire() + temp = rr_cv[0].recv() + process_lock.release() # take the lower value since you are on the left value = min(value, temp) - elif (i + position) % 2 != 0 and LSend is not None: + elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor - processLock.acquire() - LSend[1].send(value) - processLock.release() + process_lock.acquire() + l_send[1].send(value) + process_lock.release() # receive your left neighbor's value - processLock.acquire() - temp = LRcv[0].recv() - processLock.release() + process_lock.acquire() + temp = lr_cv[0].recv() + process_lock.release() # take the higher value since you are on the right value = max(value, temp) # after all swaps are performed, send the values back to main - resultPipe[1].send(value) + result_pipe[1].send(value) """ @@ -72,61 +72,61 @@ arr = the list to be sorted """ -def OddEvenTransposition(arr): - processArray = [] - resultPipe = [] +def odd_even_transposition(arr): + process_array_ = [] + result_pipe = [] # initialize the list of pipes where the values will be retrieved for _ in arr: - resultPipe.append(Pipe()) + result_pipe.append(Pipe()) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop - tempRs = Pipe() - tempRr = Pipe() - processArray.append( + temp_rs = Pipe() + temp_rr = Pipe() + process_array_.append( Process( - target=oeProcess, - args=(0, arr[0], None, tempRs, None, tempRr, resultPipe[0]), + target=oe_process, + args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) ) - tempLr = tempRs - tempLs = tempRr + temp_lr = temp_rs + temp_ls = temp_rr for i in range(1, len(arr) - 1): - tempRs = Pipe() - tempRr = Pipe() - processArray.append( + temp_rs = Pipe() + temp_rr = Pipe() + process_array_.append( Process( - target=oeProcess, - args=(i, arr[i], tempLs, tempRs, tempLr, tempRr, resultPipe[i]), + target=oe_process, + args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) ) - tempLr = tempRs - tempLs = tempRr + temp_lr = temp_rs + temp_ls = temp_rr - processArray.append( + process_array_.append( Process( - target=oeProcess, + target=oe_process, args=( len(arr) - 1, arr[len(arr) - 1], - tempLs, + temp_ls, None, - tempLr, + temp_lr, None, - resultPipe[len(arr) - 1], + result_pipe[len(arr) - 1], ), ) ) # start the processes - for p in processArray: + for p in process_array_: p.start() # wait for the processes to end and write their values to the list - for p in range(0, len(resultPipe)): - arr[p] = resultPipe[p][0].recv() - processArray[p].join() + for p in range(0, len(result_pipe)): + arr[p] = result_pipe[p][0].recv() + process_array_[p].join() return arr @@ -135,7 +135,7 @@ def main(): arr = list(range(10, 0, -1)) print("Initial List") print(*arr) - arr = OddEvenTransposition(arr) + arr = odd_even_transposition(arr) print("Sorted List\n") print(*arr) diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index c3ff04f3d..afe62bc7e 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -19,7 +19,7 @@ def radix_sort(list_of_ints: list[int]) -> list[int]: >>> radix_sort([1,100,10,1000]) == sorted([1,100,10,1000]) True """ - RADIX = 10 + RADIX = 10 # noqa: N806 placement = 1 max_digit = max(list_of_ints) while placement <= max_digit: diff --git a/sorts/random_normal_distribution_quicksort.py b/sorts/random_normal_distribution_quicksort.py index 73eb70bea..5777d5cb2 100644 --- a/sorts/random_normal_distribution_quicksort.py +++ b/sorts/random_normal_distribution_quicksort.py @@ -4,41 +4,41 @@ from tempfile import TemporaryFile import numpy as np -def _inPlaceQuickSort(A, start, end): +def _in_place_quick_sort(a, start, end): count = 0 if start < end: pivot = randint(start, end) - temp = A[end] - A[end] = A[pivot] - A[pivot] = temp + temp = a[end] + a[end] = a[pivot] + a[pivot] = temp - p, count = _inPlacePartition(A, start, end) - count += _inPlaceQuickSort(A, start, p - 1) - count += _inPlaceQuickSort(A, p + 1, end) + p, count = _in_place_partition(a, start, end) + count += _in_place_quick_sort(a, start, p - 1) + count += _in_place_quick_sort(a, p + 1, end) return count -def _inPlacePartition(A, start, end): +def _in_place_partition(a, start, end): count = 0 pivot = randint(start, end) - temp = A[end] - A[end] = A[pivot] - A[pivot] = temp - newPivotIndex = start - 1 + temp = a[end] + a[end] = a[pivot] + a[pivot] = temp + new_pivot_index = start - 1 for index in range(start, end): count += 1 - if A[index] < A[end]: # check if current val is less than pivot value - newPivotIndex = newPivotIndex + 1 - temp = A[newPivotIndex] - A[newPivotIndex] = A[index] - A[index] = temp + if a[index] < a[end]: # check if current val is less than pivot value + new_pivot_index = new_pivot_index + 1 + temp = a[new_pivot_index] + a[new_pivot_index] = a[index] + a[index] = temp - temp = A[newPivotIndex + 1] - A[newPivotIndex + 1] = A[end] - A[end] = temp - return newPivotIndex + 1, count + temp = a[new_pivot_index + 1] + a[new_pivot_index + 1] = a[end] + a[end] = temp + return new_pivot_index + 1, count outfile = TemporaryFile() @@ -55,7 +55,7 @@ print(X) outfile.seek(0) # using the same array M = np.load(outfile) r = len(M) - 1 -z = _inPlaceQuickSort(M, 0, r) +z = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" diff --git a/sorts/random_pivot_quick_sort.py b/sorts/random_pivot_quick_sort.py index d9cf4e981..748b67410 100644 --- a/sorts/random_pivot_quick_sort.py +++ b/sorts/random_pivot_quick_sort.py @@ -4,30 +4,30 @@ Picks the random index as the pivot import random -def partition(A, left_index, right_index): - pivot = A[left_index] +def partition(a, left_index, right_index): + pivot = a[left_index] i = left_index + 1 for j in range(left_index + 1, right_index): - if A[j] < pivot: - A[j], A[i] = A[i], A[j] + if a[j] < pivot: + a[j], a[i] = a[i], a[j] i += 1 - A[left_index], A[i - 1] = A[i - 1], A[left_index] + a[left_index], a[i - 1] = a[i - 1], a[left_index] return i - 1 -def quick_sort_random(A, left, right): +def quick_sort_random(a, left, right): if left < right: pivot = random.randint(left, right - 1) - A[pivot], A[left] = ( - A[left], - A[pivot], + a[pivot], a[left] = ( + a[left], + a[pivot], ) # switches the pivot with the left most bound - pivot_index = partition(A, left, right) + pivot_index = partition(a, left, right) quick_sort_random( - A, left, pivot_index + a, left, pivot_index ) # recursive quicksort to the left of the pivot point quick_sort_random( - A, pivot_index + 1, right + a, pivot_index + 1, right ) # recursive quicksort to the right of the pivot point diff --git a/sorts/tree_sort.py b/sorts/tree_sort.py index e445fb452..78c3e893e 100644 --- a/sorts/tree_sort.py +++ b/sorts/tree_sort.py @@ -5,7 +5,7 @@ Build a BST and in order traverse. """ -class node: +class Node: # BST data structure def __init__(self, val): self.val = val @@ -16,12 +16,12 @@ class node: if self.val: if val < self.val: if self.left is None: - self.left = node(val) + self.left = Node(val) else: self.left.insert(val) elif val > self.val: if self.right is None: - self.right = node(val) + self.right = Node(val) else: self.right.insert(val) else: @@ -40,7 +40,7 @@ def tree_sort(arr): # Build BST if len(arr) == 0: return arr - root = node(arr[0]) + root = Node(arr[0]) for i in range(1, len(arr)): root.insert(arr[i]) # Traverse BST in order. diff --git a/strings/boyer_moore_search.py b/strings/boyer_moore_search.py index 8d8ff22f6..117305d32 100644 --- a/strings/boyer_moore_search.py +++ b/strings/boyer_moore_search.py @@ -41,13 +41,13 @@ class BoyerMooreSearch: return i return -1 - def mismatch_in_text(self, currentPos: int) -> int: + def mismatch_in_text(self, current_pos: int) -> int: """ find the index of mis-matched character in text when compared with pattern from last Parameters : - currentPos (int): current index position of text + current_pos (int): current index position of text Returns : i (int): index of mismatched char from last in text @@ -55,8 +55,8 @@ class BoyerMooreSearch: """ for i in range(self.patLen - 1, -1, -1): - if self.pattern[i] != self.text[currentPos + i]: - return currentPos + i + if self.pattern[i] != self.text[current_pos + i]: + return current_pos + i return -1 def bad_character_heuristic(self) -> list[int]: diff --git a/strings/can_string_be_rearranged_as_palindrome.py b/strings/can_string_be_rearranged_as_palindrome.py index ddc4828c7..21d653db1 100644 --- a/strings/can_string_be_rearranged_as_palindrome.py +++ b/strings/can_string_be_rearranged_as_palindrome.py @@ -67,12 +67,12 @@ def can_string_be_rearranged_as_palindrome(input_str: str = "") -> bool: Step 2:If we find more than 1 character that appears odd number of times, It is not possible to rearrange as a palindrome """ - oddChar = 0 + odd_char = 0 for character_count in character_freq_dict.values(): if character_count % 2: - oddChar += 1 - if oddChar > 1: + odd_char += 1 + if odd_char > 1: return False return True diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index f652e2294..0d2f8091a 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -48,8 +48,8 @@ if __name__ == "__main__": from doctest import testmod testmod() - input_A = input("Enter the first string ").strip() - input_B = input("Enter the second string ").strip() + input_a = input("Enter the first string ").strip() + input_b = input("Enter the second string ").strip() - status = check_anagrams(input_A, input_B) - print(f"{input_A} and {input_B} are {'' if status else 'not '}anagrams.") + status = check_anagrams(input_a, input_b) + print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.") diff --git a/strings/word_patterns.py b/strings/word_patterns.py index 90b092a20..d12d267e7 100644 --- a/strings/word_patterns.py +++ b/strings/word_patterns.py @@ -26,10 +26,10 @@ if __name__ == "__main__": start_time = time.time() with open("dictionary.txt") as in_file: - wordList = in_file.read().splitlines() + word_list = in_file.read().splitlines() all_patterns: dict = {} - for word in wordList: + for word in word_list: pattern = get_word_pattern(word) if pattern in all_patterns: all_patterns[pattern].append(word) @@ -39,6 +39,6 @@ if __name__ == "__main__": with open("word_patterns.txt", "w") as out_file: out_file.write(pprint.pformat(all_patterns)) - totalTime = round(time.time() - start_time, 2) - print(f"Done! {len(all_patterns):,} word patterns found in {totalTime} seconds.") + total_time = round(time.time() - start_time, 2) + print(f"Done! {len(all_patterns):,} word patterns found in {total_time} seconds.") # Done! 9,581 word patterns found in 0.58 seconds. diff --git a/web_programming/fetch_quotes.py b/web_programming/fetch_quotes.py index 4a3b002e5..a45f6ea0e 100644 --- a/web_programming/fetch_quotes.py +++ b/web_programming/fetch_quotes.py @@ -12,12 +12,12 @@ import requests def quote_of_the_day() -> list: - API_ENDPOINT_URL = "https://zenquotes.io/api/today/" + API_ENDPOINT_URL = "https://zenquotes.io/api/today/" # noqa: N806 return requests.get(API_ENDPOINT_URL).json() def random_quotes() -> list: - API_ENDPOINT_URL = "https://zenquotes.io/api/random/" + API_ENDPOINT_URL = "https://zenquotes.io/api/random/" # noqa: N806 return requests.get(API_ENDPOINT_URL).json() From 1aa7bd96164bf9f17acd770f4c6992d35c468541 Mon Sep 17 00:00:00 2001 From: Abinash Satapathy Date: Thu, 13 Oct 2022 00:56:10 +0200 Subject: [PATCH 020/368] Added barcode_validator.py (#6771) * Update README.md Added Google Cirq references * Create barcode_validator.py Barcode/EAN validator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docstring and updated variables to snake_case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docset and updated bugs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Implemented the changes asked in review. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Updated with f-string format * Update barcode_validator.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- quantum/README.md | 8 ++++ strings/barcode_validator.py | 88 ++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 strings/barcode_validator.py diff --git a/quantum/README.md b/quantum/README.md index 423d34fa3..3ce364574 100644 --- a/quantum/README.md +++ b/quantum/README.md @@ -6,6 +6,7 @@ Started at https://github.com/TheAlgorithms/Python/issues/1831 * Google: https://research.google/teams/applied-science/quantum * IBM: https://qiskit.org and https://github.com/Qiskit * Rigetti: https://rigetti.com and https://github.com/rigetti +* Zapata: https://www.zapatacomputing.com and https://github.com/zapatacomputing ## IBM Qiskit - Start using by installing `pip install qiskit`, refer the [docs](https://qiskit.org/documentation/install.html) for more info. @@ -13,3 +14,10 @@ Started at https://github.com/TheAlgorithms/Python/issues/1831 - https://github.com/Qiskit/qiskit-tutorials - https://quantum-computing.ibm.com/docs/iql/first-circuit - https://medium.com/qiskit/how-to-program-a-quantum-computer-982a9329ed02 + +## Google Cirq +- Start using by installing `python -m pip install cirq`, refer the [docs](https://quantumai.google/cirq/start/install) for more info. +- Tutorials & references + - https://github.com/quantumlib/cirq + - https://quantumai.google/cirq/experiments + - https://tanishabassan.medium.com/quantum-programming-with-google-cirq-3209805279bc diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py new file mode 100644 index 000000000..056700076 --- /dev/null +++ b/strings/barcode_validator.py @@ -0,0 +1,88 @@ +""" +https://en.wikipedia.org/wiki/Check_digit#Algorithms +""" + + +def get_check_digit(barcode: int) -> int: + """ + Returns the last digit of barcode by excluding the last digit first + and then computing to reach the actual last digit from the remaining + 12 digits. + + >>> get_check_digit(8718452538119) + 9 + >>> get_check_digit(87184523) + 5 + >>> get_check_digit(87193425381086) + 9 + >>> [get_check_digit(x) for x in range(0, 100, 10)] + [0, 7, 4, 1, 8, 5, 2, 9, 6, 3] + """ + barcode //= 10 # exclude the last digit + checker = False + s = 0 + + # extract and check each digit + while barcode != 0: + mult = 1 if checker else 3 + s += mult * (barcode % 10) + barcode //= 10 + checker = not checker + + return (10 - (s % 10)) % 10 + + +def is_valid(barcode: int) -> bool: + """ + Checks for length of barcode and last-digit + Returns boolean value of validity of barcode + + >>> is_valid(8718452538119) + True + >>> is_valid(87184525) + False + >>> is_valid(87193425381089) + False + >>> is_valid(0) + False + >>> is_valid(dwefgiweuf) + Traceback (most recent call last): + ... + NameError: name 'dwefgiweuf' is not defined + """ + return len(str(barcode)) == 13 and get_check_digit(barcode) == barcode % 10 + + +def get_barcode(barcode: str) -> int: + """ + Returns the barcode as an integer + + >>> get_barcode("8718452538119") + 8718452538119 + >>> get_barcode("dwefgiweuf") + Traceback (most recent call last): + ... + ValueError: Barcode 'dwefgiweuf' has alphabetic characters. + """ + if str(barcode).isalpha(): + raise ValueError(f"Barcode '{barcode}' has alphabetic characters.") + elif int(barcode) < 0: + raise ValueError("The entered barcode has a negative value. Try again.") + else: + return int(barcode) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + """ + Enter a barcode. + + """ + barcode = get_barcode(input("Barcode: ").strip()) + + if is_valid(barcode): + print(f"'{barcode}' is a valid Barcode") + else: + print(f"'{barcode}' is NOT is valid Barcode.") From 6118b05f0efd1c2839eb8bc4de36723af1fcc364 Mon Sep 17 00:00:00 2001 From: Carlos Villar Date: Thu, 13 Oct 2022 08:24:53 +0200 Subject: [PATCH 021/368] Convert snake_case to camelCase or PascalCase (#7028) (#7034) * Added snake_case to Camel or Pascal case Fixes: #7028 * Added suggested changes * Add ending empty line from suggestion Co-authored-by: Caeden * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update strings/snake_case_to_camel_pascal_case.py Co-authored-by: Christian Clauss Co-authored-by: Caeden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/snake_case_to_camel_pascal_case.py | 52 ++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 strings/snake_case_to_camel_pascal_case.py diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py new file mode 100644 index 000000000..7b2b61d1d --- /dev/null +++ b/strings/snake_case_to_camel_pascal_case.py @@ -0,0 +1,52 @@ +def snake_to_camel_case(input: str, use_pascal: bool = False) -> str: + """ + Transforms a snake_case given string to camelCase (or PascalCase if indicated) + (defaults to not use Pascal) + + >>> snake_to_camel_case("some_random_string") + 'someRandomString' + + >>> snake_to_camel_case("some_random_string", use_pascal=True) + 'SomeRandomString' + + >>> snake_to_camel_case("some_random_string_with_numbers_123") + 'someRandomStringWithNumbers123' + + >>> snake_to_camel_case("some_random_string_with_numbers_123", use_pascal=True) + 'SomeRandomStringWithNumbers123' + + >>> snake_to_camel_case(123) + Traceback (most recent call last): + ... + ValueError: Expected string as input, found + + >>> snake_to_camel_case("some_string", use_pascal="True") + Traceback (most recent call last): + ... + ValueError: Expected boolean as use_pascal parameter, found + """ + + if not isinstance(input, str): + raise ValueError(f"Expected string as input, found {type(input)}") + if not isinstance(use_pascal, bool): + raise ValueError( + f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" + ) + + words = input.split("_") + + start_index = 0 if use_pascal else 1 + + words_to_capitalize = words[start_index:] + + capitalized_words = [word[0].upper() + word[1:] for word in words_to_capitalize] + + initial_word = "" if use_pascal else words[0] + + return "".join([initial_word] + capitalized_words) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 7ad6c6402945349fbca42cce5acad631b0930a1d Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 15:15:20 +0530 Subject: [PATCH 022/368] Add typing to maths/add.py (#7064) * Add typing to maths/add.py https://stackoverflow.com/questions/50928592/mypy-type-hint-unionfloat-int-is-there-a-number-type * Update add.py * Update add.py --- maths/add.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/add.py b/maths/add.py index 0bc7da969..c89252c64 100644 --- a/maths/add.py +++ b/maths/add.py @@ -3,7 +3,7 @@ Just to check """ -def add(a, b): +def add(a: float, b: float) -> float: """ >>> add(2, 2) 4 From 9b0909d6545df269dc2c943df2e470671f0d1bcf Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 16:17:52 +0530 Subject: [PATCH 023/368] Add typing and snake casing to maths/decimal_isolate.py (#7066) --- maths/decimal_isolate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/decimal_isolate.py b/maths/decimal_isolate.py index 1b8f6cbca..cdf43ea5d 100644 --- a/maths/decimal_isolate.py +++ b/maths/decimal_isolate.py @@ -4,7 +4,7 @@ https://stackoverflow.com/questions/3886402/how-to-get-numbers-after-decimal-poi """ -def decimal_isolate(number, digit_amount): +def decimal_isolate(number: float, digit_amount: int) -> float: """ Isolates the decimal part of a number. From c73cb7e3f7324ab2715ffc74ab18c32e3a90a065 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 16:18:28 +0530 Subject: [PATCH 024/368] Add typing to maths/sum_of_arithmetic_series.py (#7065) --- maths/sum_of_arithmetic_series.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/sum_of_arithmetic_series.py b/maths/sum_of_arithmetic_series.py index 74eef0f18..e0e22760b 100644 --- a/maths/sum_of_arithmetic_series.py +++ b/maths/sum_of_arithmetic_series.py @@ -1,5 +1,5 @@ # DarkCoder -def sum_of_series(first_term, common_diff, num_of_terms): +def sum_of_series(first_term: int, common_diff: int, num_of_terms: int) -> float: """ Find the sum of n terms in an arithmetic progression. From e661b9882995718ed967e33c2c814866b26fa76d Mon Sep 17 00:00:00 2001 From: GURNEET SINGH <79376134+SinghGurneet21@users.noreply.github.com> Date: Thu, 13 Oct 2022 17:39:01 +0530 Subject: [PATCH 025/368] Binary Search Tree Inorder Traversal Algorithm (#6840) * Binary Search Tree Inorder Traversal * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Binary Search Tree Inorder Traversal v2 * Binary Search Tree Inorder Traversal * Binary Search Tree Inorder Traversal * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/inorder_tree_traversal_2022.py * Update data_structures/binary_tree/inorder_tree_traversal_2022.py * Updated * Updated * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/inorder_tree_traversal_2022.py Co-authored-by: Christian Clauss * Updated and removed print statement removed the print from inorder function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + .../inorder_tree_traversal_2022.py | 83 +++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 data_structures/binary_tree/inorder_tree_traversal_2022.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 25272af4a..2786e1f82 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -153,6 +153,7 @@ * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) + * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) diff --git a/data_structures/binary_tree/inorder_tree_traversal_2022.py b/data_structures/binary_tree/inorder_tree_traversal_2022.py new file mode 100644 index 000000000..08001738f --- /dev/null +++ b/data_structures/binary_tree/inorder_tree_traversal_2022.py @@ -0,0 +1,83 @@ +""" +Illustrate how to implement inorder traversal in binary search tree. +Author: Gurneet Singh +https://www.geeksforgeeks.org/tree-traversals-inorder-preorder-and-postorder/ +""" + + +class BinaryTreeNode: + """Defining the structure of BinaryTreeNode""" + + def __init__(self, data: int) -> None: + self.data = data + self.left_child: BinaryTreeNode | None = None + self.right_child: BinaryTreeNode | None = None + + +def insert(node: BinaryTreeNode | None, new_value: int) -> BinaryTreeNode | None: + """ + If the binary search tree is empty, make a new node and declare it as root. + >>> node_a = BinaryTreeNode(12345) + >>> node_b = insert(node_a, 67890) + >>> node_a.left_child == node_b.left_child + True + >>> node_a.right_child == node_b.right_child + True + >>> node_a.data == node_b.data + True + """ + if node is None: + node = BinaryTreeNode(new_value) + return node + + # binary search tree is not empty, + # so we will insert it into the tree + # if new_value is less than value of data in node, + # add it to left subtree and proceed recursively + if new_value < node.data: + node.left_child = insert(node.left_child, new_value) + else: + # if new_value is greater than value of data in node, + # add it to right subtree and proceed recursively + node.right_child = insert(node.right_child, new_value) + return node + + +def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return + """ + >>> inorder(make_tree()) + [6, 10, 14, 15, 20, 25, 60] + """ + if node: + inorder_array = inorder(node.left_child) + inorder_array = inorder_array + [node.data] + inorder_array = inorder_array + inorder(node.right_child) + else: + inorder_array = [] + return inorder_array + + +def make_tree() -> BinaryTreeNode | None: + + root = insert(None, 15) + insert(root, 10) + insert(root, 25) + insert(root, 6) + insert(root, 14) + insert(root, 20) + insert(root, 60) + return root + + +def main() -> None: + # main function + root = make_tree() + print("Printing values of binary search tree in Inorder Traversal.") + inorder(root) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From d5a9f649b8279858add6fe6dd5a84af2f40a4cc9 Mon Sep 17 00:00:00 2001 From: Caeden Date: Thu, 13 Oct 2022 15:23:59 +0100 Subject: [PATCH 026/368] Add flake8-builtins to pre-commit and fix errors (#7105) Ignore `A003` Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- .flake8 | 3 +++ .pre-commit-config.yaml | 2 +- arithmetic_analysis/gaussian_elimination.py | 6 +++--- arithmetic_analysis/jacobi_iteration_method.py | 6 +++--- audio_filters/show_response.py | 8 ++++---- backtracking/hamiltonian_cycle.py | 6 +++--- data_structures/binary_tree/avl_tree.py | 2 +- data_structures/linked_list/__init__.py | 2 +- .../linked_list/singly_linked_list.py | 4 ++-- data_structures/queue/double_ended_queue.py | 16 ++++++++-------- data_structures/stacks/next_greater_element.py | 12 ++++++------ digital_image_processing/index_calculation.py | 6 +++--- .../optimal_binary_search_tree.py | 6 +++--- graphs/a_star.py | 8 ++++---- graphs/dijkstra.py | 4 ++-- graphs/finding_bridges.py | 14 +++++++------- graphs/prim.py | 4 ++-- hashes/djb2.py | 6 +++--- hashes/sdbm.py | 8 +++++--- maths/armstrong_numbers.py | 12 ++++++------ maths/bailey_borwein_plouffe.py | 6 +++--- maths/kadanes.py | 8 ++++---- maths/prime_numbers.py | 14 +++++++------- maths/sum_of_arithmetic_series.py | 4 ++-- neural_network/2_hidden_layers_neural_network.py | 10 ++++++---- neural_network/convolution_neural_network.py | 10 +++++----- neural_network/perceptron.py | 4 ++-- project_euler/problem_065/sol1.py | 4 ++-- project_euler/problem_070/sol1.py | 6 +++--- sorts/odd_even_sort.py | 10 +++++----- strings/snake_case_to_camel_pascal_case.py | 8 ++++---- 31 files changed, 113 insertions(+), 106 deletions(-) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..9a5863c9c --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +extend-ignore = + A003 # Class attribute is shadowing a python builtin diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2f6a92814..e0de70b01 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,7 @@ repos: - --ignore=E203,W503 - --max-complexity=25 - --max-line-length=88 - additional_dependencies: [pep8-naming] + additional_dependencies: [flake8-builtins, pep8-naming] - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py index 89ed3b323..f0f20af8e 100644 --- a/arithmetic_analysis/gaussian_elimination.py +++ b/arithmetic_analysis/gaussian_elimination.py @@ -33,11 +33,11 @@ def retroactive_resolution( x: NDArray[float64] = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): - sum = 0 + total = 0 for col in range(row + 1, columns): - sum += coefficients[row, col] * x[col] + total += coefficients[row, col] * x[col] - x[row, 0] = (vector[row] - sum) / coefficients[row, row] + x[row, 0] = (vector[row] - total) / coefficients[row, row] return x diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index 4336aaa91..0aab4db20 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -147,14 +147,14 @@ def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: is_diagonally_dominant = True for i in range(0, rows): - sum = 0 + total = 0 for j in range(0, cols - 1): if i == j: continue else: - sum += table[i][j] + total += table[i][j] - if table[i][i] <= sum: + if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant") return is_diagonally_dominant diff --git a/audio_filters/show_response.py b/audio_filters/show_response.py index 6e2731a58..097b8152b 100644 --- a/audio_filters/show_response.py +++ b/audio_filters/show_response.py @@ -34,7 +34,7 @@ def get_bounds( return lowest, highest -def show_frequency_response(filter: FilterType, samplerate: int) -> None: +def show_frequency_response(filter_type: FilterType, samplerate: int) -> None: """ Show frequency response of a filter @@ -45,7 +45,7 @@ def show_frequency_response(filter: FilterType, samplerate: int) -> None: size = 512 inputs = [1] + [0] * (size - 1) - outputs = [filter.process(item) for item in inputs] + outputs = [filter_type.process(item) for item in inputs] filler = [0] * (samplerate - size) # zero-padding outputs += filler @@ -66,7 +66,7 @@ def show_frequency_response(filter: FilterType, samplerate: int) -> None: plt.show() -def show_phase_response(filter: FilterType, samplerate: int) -> None: +def show_phase_response(filter_type: FilterType, samplerate: int) -> None: """ Show phase response of a filter @@ -77,7 +77,7 @@ def show_phase_response(filter: FilterType, samplerate: int) -> None: size = 512 inputs = [1] + [0] * (size - 1) - outputs = [filter.process(item) for item in inputs] + outputs = [filter_type.process(item) for item in inputs] filler = [0] * (samplerate - size) # zero-padding outputs += filler diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index 500e993e5..4c6ae4679 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -95,10 +95,10 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step - for next in range(0, len(graph)): - if valid_connection(graph, next, curr_ind, path): + for next_ver in range(0, len(graph)): + if valid_connection(graph, next_ver, curr_ind, path): # Insert current vertex into path as next transition - path[curr_ind] = next + path[curr_ind] = next_ver # Validate created path if util_hamilton_cycle(graph, path, curr_ind + 1): return True diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 2f4bd60d9..320e7ed0d 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -33,7 +33,7 @@ class MyQueue: def count(self) -> int: return self.tail - self.head - def print(self) -> None: + def print_queue(self) -> None: print(self.data) print("**************") print(self.data[self.head : self.tail]) diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 6ba660231..85660a6d2 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -11,7 +11,7 @@ from typing import Any class Node: - def __init__(self, item: Any, next: Any) -> None: + def __init__(self, item: Any, next: Any) -> None: # noqa: A002 self.item = item self.next = next diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index a4156b650..59d7c512b 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -392,7 +392,7 @@ def test_singly_linked_list_2() -> None: This section of the test used varying data types for input. >>> test_singly_linked_list_2() """ - input = [ + test_input = [ -9, 100, Node(77345112), @@ -410,7 +410,7 @@ def test_singly_linked_list_2() -> None: ] linked_list = LinkedList() - for i in input: + for i in test_input: linked_list.insert_tail(i) # Check if it's empty or not diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index f38874788..7053879d4 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -15,8 +15,8 @@ class Deque: ---------- append(val: Any) -> None appendleft(val: Any) -> None - extend(iter: Iterable) -> None - extendleft(iter: Iterable) -> None + extend(iterable: Iterable) -> None + extendleft(iterable: Iterable) -> None pop() -> Any popleft() -> Any Observers @@ -179,9 +179,9 @@ class Deque: # make sure there were no errors assert not self.is_empty(), "Error on appending value." - def extend(self, iter: Iterable[Any]) -> None: + def extend(self, iterable: Iterable[Any]) -> None: """ - Appends every value of iter to the end of the deque. + Appends every value of iterable to the end of the deque. Time complexity: O(n) >>> our_deque_1 = Deque([1, 2, 3]) >>> our_deque_1.extend([4, 5]) @@ -205,12 +205,12 @@ class Deque: >>> list(our_deque_2) == list(deque_collections_2) True """ - for val in iter: + for val in iterable: self.append(val) - def extendleft(self, iter: Iterable[Any]) -> None: + def extendleft(self, iterable: Iterable[Any]) -> None: """ - Appends every value of iter to the beginning of the deque. + Appends every value of iterable to the beginning of the deque. Time complexity: O(n) >>> our_deque_1 = Deque([1, 2, 3]) >>> our_deque_1.extendleft([0, -1]) @@ -234,7 +234,7 @@ class Deque: >>> list(our_deque_2) == list(deque_collections_2) True """ - for val in iter: + for val in iterable: self.appendleft(val) def pop(self) -> Any: diff --git a/data_structures/stacks/next_greater_element.py b/data_structures/stacks/next_greater_element.py index 5bab7c609..7d76d1f47 100644 --- a/data_structures/stacks/next_greater_element.py +++ b/data_structures/stacks/next_greater_element.py @@ -17,12 +17,12 @@ def next_greatest_element_slow(arr: list[float]) -> list[float]: arr_size = len(arr) for i in range(arr_size): - next: float = -1 + next_element: float = -1 for j in range(i + 1, arr_size): if arr[i] < arr[j]: - next = arr[j] + next_element = arr[j] break - result.append(next) + result.append(next_element) return result @@ -36,12 +36,12 @@ def next_greatest_element_fast(arr: list[float]) -> list[float]: """ result = [] for i, outer in enumerate(arr): - next: float = -1 + next_item: float = -1 for inner in arr[i + 1 :]: if outer < inner: - next = inner + next_item = inner break - result.append(next) + result.append(next_item) return result diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 2f8fdc066..01cd79fc1 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -497,9 +497,9 @@ class IndexCalculation: https://www.indexdatabase.de/db/i-single.php?id=77 :return: index """ - max = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) - min = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) - return (max - min) / max + max_value = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) + min_value = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) + return (max_value - min_value) / max_value def _if(self): """ diff --git a/dynamic_programming/optimal_binary_search_tree.py b/dynamic_programming/optimal_binary_search_tree.py index 0d94c1b61..b4f1181ac 100644 --- a/dynamic_programming/optimal_binary_search_tree.py +++ b/dynamic_programming/optimal_binary_search_tree.py @@ -104,7 +104,7 @@ def find_optimal_binary_search_tree(nodes): dp = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)] # sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes # array - sum = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)] + total = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)] # stores tree roots that will be used later for constructing binary search tree root = [[i if i == j else 0 for j in range(n)] for i in range(n)] @@ -113,14 +113,14 @@ def find_optimal_binary_search_tree(nodes): j = i + interval_length - 1 dp[i][j] = sys.maxsize # set the value to "infinity" - sum[i][j] = sum[i][j - 1] + freqs[j] + total[i][j] = total[i][j - 1] + freqs[j] # Apply Knuth's optimization # Loop without optimization: for r in range(i, j + 1): for r in range(root[i][j - 1], root[i + 1][j] + 1): # r is a temporal root left = dp[i][r - 1] if r != i else 0 # optimal cost for left subtree right = dp[r + 1][j] if r != j else 0 # optimal cost for right subtree - cost = left + sum[i][j] + right + cost = left + total[i][j] + right if dp[i][j] > cost: dp[i][j] = cost diff --git a/graphs/a_star.py b/graphs/a_star.py index e0f24734a..793ba3bda 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -40,10 +40,10 @@ def search( else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() - next = cell.pop() - x = next[2] - y = next[3] - g = next[1] + next_cell = cell.pop() + x = next_cell[2] + y = next_cell[3] + g = next_cell[1] if x == goal[0] and y == goal[1]: found = True diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py index 62c60f2c6..b0bdfab60 100644 --- a/graphs/dijkstra.py +++ b/graphs/dijkstra.py @@ -56,8 +56,8 @@ def dijkstra(graph, start, end): for v, c in graph[u]: if v in visited: continue - next = cost + c - heapq.heappush(heap, (next, v)) + next_item = cost + c + heapq.heappush(heap, (next_item, v)) return -1 diff --git a/graphs/finding_bridges.py b/graphs/finding_bridges.py index 3813c4ebb..c17606745 100644 --- a/graphs/finding_bridges.py +++ b/graphs/finding_bridges.py @@ -72,22 +72,22 @@ def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]: [] """ - id = 0 + id_ = 0 n = len(graph) # No of vertices in graph low = [0] * n visited = [False] * n - def dfs(at, parent, bridges, id): + def dfs(at, parent, bridges, id_): visited[at] = True - low[at] = id - id += 1 + low[at] = id_ + id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: - dfs(to, at, bridges, id) + dfs(to, at, bridges, id_) low[at] = min(low[at], low[to]) - if id <= low[to]: + if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at)) else: # This edge is a back edge and cannot be a bridge @@ -96,7 +96,7 @@ def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]: bridges: list[tuple[int, int]] = [] for i in range(n): if not visited[i]: - dfs(i, -1, bridges, id) + dfs(i, -1, bridges, id_) return bridges diff --git a/graphs/prim.py b/graphs/prim.py index 55d0fbfa8..6cb1a6def 100644 --- a/graphs/prim.py +++ b/graphs/prim.py @@ -13,7 +13,7 @@ from collections.abc import Iterator class Vertex: """Class Vertex.""" - def __init__(self, id): + def __init__(self, id_): """ Arguments: id - input an id to identify the vertex @@ -21,7 +21,7 @@ class Vertex: neighbors - a list of the vertices it is linked to edges - a dict to store the edges's weight """ - self.id = str(id) + self.id = str(id_) self.key = None self.pi = None self.neighbors = [] diff --git a/hashes/djb2.py b/hashes/djb2.py index 2d1c9aabb..4c8463509 100644 --- a/hashes/djb2.py +++ b/hashes/djb2.py @@ -29,7 +29,7 @@ def djb2(s: str) -> int: >>> djb2('scramble bits') 1609059040 """ - hash = 5381 + hash_value = 5381 for x in s: - hash = ((hash << 5) + hash) + ord(x) - return hash & 0xFFFFFFFF + hash_value = ((hash_value << 5) + hash_value) + ord(x) + return hash_value & 0xFFFFFFFF diff --git a/hashes/sdbm.py b/hashes/sdbm.py index daf292717..a5432874b 100644 --- a/hashes/sdbm.py +++ b/hashes/sdbm.py @@ -31,7 +31,9 @@ def sdbm(plain_text: str) -> int: >>> sdbm('scramble bits') 730247649148944819640658295400555317318720608290373040936089 """ - hash = 0 + hash_value = 0 for plain_chr in plain_text: - hash = ord(plain_chr) + (hash << 6) + (hash << 16) - hash - return hash + hash_value = ( + ord(plain_chr) + (hash_value << 6) + (hash_value << 16) - hash_value + ) + return hash_value diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index 65aebe937..f62991b74 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -25,7 +25,7 @@ def armstrong_number(n: int) -> bool: return False # Initialization of sum and number of digits. - sum = 0 + total = 0 number_of_digits = 0 temp = n # Calculation of digits of the number @@ -36,9 +36,9 @@ def armstrong_number(n: int) -> bool: temp = n while temp > 0: rem = temp % 10 - sum += rem**number_of_digits + total += rem**number_of_digits temp //= 10 - return n == sum + return n == total def pluperfect_number(n: int) -> bool: @@ -55,7 +55,7 @@ def pluperfect_number(n: int) -> bool: # Init a "histogram" of the digits digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] digit_total = 0 - sum = 0 + total = 0 temp = n while temp > 0: temp, rem = divmod(temp, 10) @@ -63,9 +63,9 @@ def pluperfect_number(n: int) -> bool: digit_total += 1 for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))): - sum += cnt * i**digit_total + total += cnt * i**digit_total - return n == sum + return n == total def narcissistic_number(n: int) -> bool: diff --git a/maths/bailey_borwein_plouffe.py b/maths/bailey_borwein_plouffe.py index b647ae56d..389b1566e 100644 --- a/maths/bailey_borwein_plouffe.py +++ b/maths/bailey_borwein_plouffe.py @@ -67,7 +67,7 @@ def _subsum( @param precision: same as precision in main function @return: floating-point number whose integer part is not important """ - sum = 0.0 + total = 0.0 for sum_index in range(digit_pos_to_extract + precision): denominator = 8 * sum_index + denominator_addend if sum_index < digit_pos_to_extract: @@ -79,8 +79,8 @@ def _subsum( ) else: exponential_term = pow(16, digit_pos_to_extract - 1 - sum_index) - sum += exponential_term / denominator - return sum + total += exponential_term / denominator + return total if __name__ == "__main__": diff --git a/maths/kadanes.py b/maths/kadanes.py index d239d4a25..b23409e2b 100644 --- a/maths/kadanes.py +++ b/maths/kadanes.py @@ -14,13 +14,13 @@ def negative_exist(arr: list) -> int: [-2, 0, 0, 0, 0] """ arr = arr or [0] - max = arr[0] + max_number = arr[0] for i in arr: if i >= 0: return 0 - elif max <= i: - max = i - return max + elif max_number <= i: + max_number = i + return max_number def kadanes(arr: list) -> int: diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index 7be4d3d95..4e076fe31 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -2,7 +2,7 @@ import math from collections.abc import Generator -def slow_primes(max: int) -> Generator[int, None, None]: +def slow_primes(max_n: int) -> Generator[int, None, None]: """ Return a list of all primes numbers up to max. >>> list(slow_primes(0)) @@ -20,7 +20,7 @@ def slow_primes(max: int) -> Generator[int, None, None]: >>> list(slow_primes(10000))[-1] 9973 """ - numbers: Generator = (i for i in range(1, (max + 1))) + numbers: Generator = (i for i in range(1, (max_n + 1))) for i in (n for n in numbers if n > 1): for j in range(2, i): if (i % j) == 0: @@ -29,7 +29,7 @@ def slow_primes(max: int) -> Generator[int, None, None]: yield i -def primes(max: int) -> Generator[int, None, None]: +def primes(max_n: int) -> Generator[int, None, None]: """ Return a list of all primes numbers up to max. >>> list(primes(0)) @@ -47,7 +47,7 @@ def primes(max: int) -> Generator[int, None, None]: >>> list(primes(10000))[-1] 9973 """ - numbers: Generator = (i for i in range(1, (max + 1))) + numbers: Generator = (i for i in range(1, (max_n + 1))) for i in (n for n in numbers if n > 1): # only need to check for factors up to sqrt(i) bound = int(math.sqrt(i)) + 1 @@ -58,7 +58,7 @@ def primes(max: int) -> Generator[int, None, None]: yield i -def fast_primes(max: int) -> Generator[int, None, None]: +def fast_primes(max_n: int) -> Generator[int, None, None]: """ Return a list of all primes numbers up to max. >>> list(fast_primes(0)) @@ -76,9 +76,9 @@ def fast_primes(max: int) -> Generator[int, None, None]: >>> list(fast_primes(10000))[-1] 9973 """ - numbers: Generator = (i for i in range(1, (max + 1), 2)) + numbers: Generator = (i for i in range(1, (max_n + 1), 2)) # It's useless to test even numbers as they will not be prime - if max > 2: + if max_n > 2: yield 2 # Because 2 will not be tested, it's necessary to yield it now for i in (n for n in numbers if n > 1): bound = int(math.sqrt(i)) + 1 diff --git a/maths/sum_of_arithmetic_series.py b/maths/sum_of_arithmetic_series.py index e0e22760b..3e381b8c2 100644 --- a/maths/sum_of_arithmetic_series.py +++ b/maths/sum_of_arithmetic_series.py @@ -8,9 +8,9 @@ def sum_of_series(first_term: int, common_diff: int, num_of_terms: int) -> float >>> sum_of_series(1, 10, 100) 49600.0 """ - sum = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) + total = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series - return sum + return total def main(): diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/2_hidden_layers_neural_network.py index 1cf78ec4c..9c5772326 100644 --- a/neural_network/2_hidden_layers_neural_network.py +++ b/neural_network/2_hidden_layers_neural_network.py @@ -182,7 +182,7 @@ class TwoHiddenLayerNeuralNetwork: loss = numpy.mean(numpy.square(output - self.feedforward())) print(f"Iteration {iteration} Loss: {loss}") - def predict(self, input: numpy.ndarray) -> int: + def predict(self, input_arr: numpy.ndarray) -> int: """ Predict's the output for the given input values using the trained neural network. @@ -201,7 +201,7 @@ class TwoHiddenLayerNeuralNetwork: """ # Input values for which the predictions are to be made. - self.array = input + self.array = input_arr self.layer_between_input_and_first_hidden_layer = sigmoid( numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights) @@ -264,7 +264,7 @@ def example() -> int: True """ # Input values. - input = numpy.array( + test_input = numpy.array( ( [0, 0, 0], [0, 0, 1], @@ -282,7 +282,9 @@ def example() -> int: output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64) # Calling neural network class. - neural_network = TwoHiddenLayerNeuralNetwork(input_array=input, output_array=output) + neural_network = TwoHiddenLayerNeuralNetwork( + input_array=test_input, output_array=output + ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index bbade1c41..9dfb6d091 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -140,24 +140,24 @@ class CNN: focus_list = np.asarray(focus1_list) return focus_list, data_featuremap - def pooling(self, featuremaps, size_pooling, type="average_pool"): + def pooling(self, featuremaps, size_pooling, pooling_type="average_pool"): # pooling process size_map = len(featuremaps[0]) size_pooled = int(size_map / size_pooling) featuremap_pooled = [] for i_map in range(len(featuremaps)): - map = featuremaps[i_map] + feature_map = featuremaps[i_map] map_pooled = [] for i_focus in range(0, size_map, size_pooling): for j_focus in range(0, size_map, size_pooling): - focus = map[ + focus = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] - if type == "average_pool": + if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(focus)) - elif type == "max_pooling": + elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(focus)) map_pooled = np.asmatrix(map_pooled).reshape(size_pooled, size_pooled) diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py index 063be5ea5..a2bfdb326 100644 --- a/neural_network/perceptron.py +++ b/neural_network/perceptron.py @@ -182,7 +182,7 @@ samples = [ [0.2012, 0.2611, 5.4631], ] -exit = [ +target = [ -1, -1, -1, @@ -222,7 +222,7 @@ if __name__ == "__main__": doctest.testmod() network = Perceptron( - sample=samples, target=exit, learning_rate=0.01, epoch_number=1000, bias=-1 + sample=samples, target=target, learning_rate=0.01, epoch_number=1000, bias=-1 ) network.training() print("Finished training perceptron") diff --git a/project_euler/problem_065/sol1.py b/project_euler/problem_065/sol1.py index 229769a77..0a00cf477 100644 --- a/project_euler/problem_065/sol1.py +++ b/project_euler/problem_065/sol1.py @@ -71,7 +71,7 @@ def sum_digits(num: int) -> int: return digit_sum -def solution(max: int = 100) -> int: +def solution(max_n: int = 100) -> int: """ Returns the sum of the digits in the numerator of the max-th convergent of the continued fraction for e. @@ -86,7 +86,7 @@ def solution(max: int = 100) -> int: pre_numerator = 1 cur_numerator = 2 - for i in range(2, max + 1): + for i in range(2, max_n + 1): temp = pre_numerator e_cont = 2 * i // 3 if i % 3 == 0 else 1 pre_numerator = cur_numerator diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py index d42b017cc..273f37efc 100644 --- a/project_euler/problem_070/sol1.py +++ b/project_euler/problem_070/sol1.py @@ -72,7 +72,7 @@ def has_same_digits(num1: int, num2: int) -> bool: return sorted(str(num1)) == sorted(str(num2)) -def solution(max: int = 10000000) -> int: +def solution(max_n: int = 10000000) -> int: """ Finds the value of n from 1 to max such that n/φ(n) produces a minimum. @@ -85,9 +85,9 @@ def solution(max: int = 10000000) -> int: min_numerator = 1 # i min_denominator = 0 # φ(i) - totients = get_totients(max + 1) + totients = get_totients(max_n + 1) - for i in range(2, max + 1): + for i in range(2, max_n + 1): t = totients[i] if i * min_denominator < min_numerator * t and has_same_digits(i, t): diff --git a/sorts/odd_even_sort.py b/sorts/odd_even_sort.py index 557337ee7..532f82949 100644 --- a/sorts/odd_even_sort.py +++ b/sorts/odd_even_sort.py @@ -20,21 +20,21 @@ def odd_even_sort(input_list: list) -> list: >>> odd_even_sort([1 ,2 ,3 ,4]) [1, 2, 3, 4] """ - sorted = False - while sorted is False: # Until all the indices are traversed keep looping - sorted = True + is_sorted = False + while is_sorted is False: # Until all the indices are traversed keep looping + is_sorted = True for i in range(0, len(input_list) - 1, 2): # iterating over all even indices if input_list[i] > input_list[i + 1]: input_list[i], input_list[i + 1] = input_list[i + 1], input_list[i] # swapping if elements not in order - sorted = False + is_sorted = False for i in range(1, len(input_list) - 1, 2): # iterating over all odd indices if input_list[i] > input_list[i + 1]: input_list[i], input_list[i + 1] = input_list[i + 1], input_list[i] # swapping if elements not in order - sorted = False + is_sorted = False return input_list diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py index 7b2b61d1d..eaabdcb87 100644 --- a/strings/snake_case_to_camel_pascal_case.py +++ b/strings/snake_case_to_camel_pascal_case.py @@ -1,4 +1,4 @@ -def snake_to_camel_case(input: str, use_pascal: bool = False) -> str: +def snake_to_camel_case(input_str: str, use_pascal: bool = False) -> str: """ Transforms a snake_case given string to camelCase (or PascalCase if indicated) (defaults to not use Pascal) @@ -26,14 +26,14 @@ def snake_to_camel_case(input: str, use_pascal: bool = False) -> str: ValueError: Expected boolean as use_pascal parameter, found """ - if not isinstance(input, str): - raise ValueError(f"Expected string as input, found {type(input)}") + if not isinstance(input_str, str): + raise ValueError(f"Expected string as input, found {type(input_str)}") if not isinstance(use_pascal, bool): raise ValueError( f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" ) - words = input.split("_") + words = input_str.split("_") start_index = 0 if use_pascal else 1 From f176786d12ead5796644a9b37d96786cdaa55391 Mon Sep 17 00:00:00 2001 From: Praveen Date: Thu, 13 Oct 2022 21:04:52 +0530 Subject: [PATCH 027/368] Update open_google_results.py (#7085) * update crawl_google_results.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename crawl_google_results.py to open_google_results.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create crawl_google_results.py * Update web_programming/open_google_results.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update open_google_results.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/open_google_results.py | 42 ++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 web_programming/open_google_results.py diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py new file mode 100644 index 000000000..0e1dba8c5 --- /dev/null +++ b/web_programming/open_google_results.py @@ -0,0 +1,42 @@ +import webbrowser +from sys import argv +from urllib.parse import quote, parse_qs +from fake_useragent import UserAgent + +import requests +from bs4 import BeautifulSoup + +if __name__ == "__main__": + if len(argv) > 1: + query = "%20".join(argv[1:]) + else: + query = quote(str(input("Search: "))) + + print("Googling.....") + + url = f"https://www.google.com/search?q={query}&num=100" + + res = requests.get( + url, + headers={ + "User-Agent": str(UserAgent().random) + }, + ) + + try: + link = ( + BeautifulSoup(res.text, "html.parser") + .find("div", attrs={"class": "yuRUbf"}) + .find("a") + .get("href") + ) + + except AttributeError: + link = parse_qs( + BeautifulSoup(res.text, "html.parser") + .find("div", attrs={"class": "kCrYT"}) + .find("a") + .get("href") + )["url"][0] + + webbrowser.open(link) From 4d0c830d2c7a4a535501887a8eb97966a370ef57 Mon Sep 17 00:00:00 2001 From: Caeden Date: Thu, 13 Oct 2022 17:03:06 +0100 Subject: [PATCH 028/368] Add flake8 pluin flake8 bugbear to pre-commit (#7132) * ci(pre-commit): Add ``flake8-builtins`` additional dependency to ``pre-commit`` (#7104) * refactor: Fix ``flake8-builtins`` (#7104) * fix(lru_cache): Fix naming conventions in docstrings (#7104) * ci(pre-commit): Order additional dependencies alphabetically (#7104) * fix(lfu_cache): Correct function name in docstring (#7104) * Update strings/snake_case_to_camel_pascal_case.py Co-authored-by: Christian Clauss * Update data_structures/stacks/next_greater_element.py Co-authored-by: Christian Clauss * Update digital_image_processing/index_calculation.py Co-authored-by: Christian Clauss * Update graphs/prim.py Co-authored-by: Christian Clauss * Update hashes/djb2.py Co-authored-by: Christian Clauss * refactor: Rename `_builtin` to `builtin_` ( #7104) * fix: Rename all instances (#7104) * refactor: Update variable names (#7104) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ci: Create ``tox.ini`` and ignore ``A003`` (#7123) * revert: Remove function name changes (#7104) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename tox.ini to .flake8 * Update data_structures/heap/heap.py Co-authored-by: Dhruv Manilawala * refactor: Rename `next_` to `next_item` (#7104) * ci(pre-commit): Add `flake8` plugin `flake8-bugbear` (#7127) * refactor: Follow `flake8-bugbear` plugin (#7127) * fix: Correct `knapsack` code (#7127) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- .pre-commit-config.yaml | 5 +++- .../jacobi_iteration_method.py | 2 +- .../newton_forward_interpolation.py | 2 +- arithmetic_analysis/secant_method.py | 2 +- audio_filters/butterworth_filter.py | 23 +++++++++++++------ backtracking/sum_of_subsets.py | 8 +++---- boolean_algebra/quine_mc_cluskey.py | 2 +- ciphers/mixed_keyword_cypher.py | 2 +- ciphers/rabin_miller.py | 2 +- compression/burrows_wheeler.py | 2 +- .../binary_search_tree_recursive.py | 8 +++---- .../linked_list/circular_linked_list.py | 8 +++---- .../linked_list/doubly_linked_list.py | 8 +++---- .../middle_element_of_linked_list.py | 2 +- .../linked_list/singly_linked_list.py | 6 ++--- data_structures/linked_list/skip_list.py | 4 ++-- data_structures/queue/queue_on_list.py | 2 +- .../queue/queue_on_pseudo_stack.py | 2 +- data_structures/stacks/stack.py | 6 ++--- divide_and_conquer/convex_hull.py | 8 +++---- .../strassen_matrix_multiplication.py | 6 ++--- dynamic_programming/all_construct.py | 2 +- dynamic_programming/knapsack.py | 10 ++++---- fractals/julia_sets.py | 2 +- fractals/koch_snowflake.py | 2 +- fractals/mandelbrot.py | 2 +- genetic_algorithm/basic_string.py | 7 ++++-- graphs/basic_graphs.py | 4 ++-- graphs/bellman_ford.py | 2 +- graphs/dijkstra_2.py | 18 +++++++-------- graphs/frequent_pattern_graph_miner.py | 2 +- graphs/kahns_algorithm_long.py | 2 +- graphs/kahns_algorithm_topo.py | 2 +- graphs/minimum_spanning_tree_prims.py | 2 +- graphs/page_rank.py | 2 +- graphs/scc_kosaraju.py | 4 ++-- greedy_methods/optimal_merge_pattern.py | 2 +- hashes/chaos_machine.py | 2 +- hashes/enigma_machine.py | 2 +- machine_learning/self_organizing_map.py | 2 +- maths/area_under_curve.py | 2 +- maths/line_length.py | 2 +- maths/lucas_lehmer_primality_test.py | 2 +- maths/lucas_series.py | 2 +- maths/miller_rabin.py | 2 +- maths/monte_carlo_dice.py | 2 +- maths/numerical_integration.py | 2 +- maths/pi_monte_carlo_estimation.py | 2 +- maths/pollard_rho.py | 2 +- maths/primelib.py | 8 +++---- maths/proth_number.py | 2 +- maths/square_root.py | 2 +- maths/ugly_numbers.py | 2 +- matrix/matrix_class.py | 2 +- ...h_fibonacci_using_matrix_exponentiation.py | 2 +- .../back_propagation_neural_network.py | 2 +- neural_network/perceptron.py | 2 +- other/lfu_cache.py | 2 +- other/lru_cache.py | 2 +- other/magicdiamondpattern.py | 8 +++---- other/scoring_algorithm.py | 2 +- physics/lorentz_transformation_four_vector.py | 2 +- physics/n_body_simulation.py | 2 +- project_euler/problem_011/sol2.py | 2 +- project_euler/problem_025/sol3.py | 2 +- project_euler/problem_026/sol1.py | 2 +- project_euler/problem_188/sol1.py | 2 +- project_euler/problem_203/sol1.py | 2 +- scheduling/multi_level_feedback_queue.py | 2 +- sorts/double_sort.py | 2 +- web_programming/open_google_results.py | 8 +++---- 71 files changed, 137 insertions(+), 124 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e0de70b01..d2558b90a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,10 @@ repos: - --ignore=E203,W503 - --max-complexity=25 - --max-line-length=88 - additional_dependencies: [flake8-builtins, pep8-naming] + additional_dependencies: + - flake8-bugbear + - flake8-builtins + - pep8-naming - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index 0aab4db20..3087309e8 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -110,7 +110,7 @@ def jacobi_iteration_method( strictly_diagonally_dominant(table) # Iterates the whole matrix for given number of times - for i in range(iterations): + for _ in range(iterations): new_val = [] for row in range(rows): temp = 0 diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/arithmetic_analysis/newton_forward_interpolation.py index 490e0687f..466f6c18c 100644 --- a/arithmetic_analysis/newton_forward_interpolation.py +++ b/arithmetic_analysis/newton_forward_interpolation.py @@ -23,7 +23,7 @@ def ucal(u: float, p: int) -> float: def main() -> None: n = int(input("enter the numbers of values: ")) y: list[list[float]] = [] - for i in range(n): + for _ in range(n): y.append([]) for i in range(n): for j in range(n): diff --git a/arithmetic_analysis/secant_method.py b/arithmetic_analysis/secant_method.py index 45bcb185f..d28a46206 100644 --- a/arithmetic_analysis/secant_method.py +++ b/arithmetic_analysis/secant_method.py @@ -20,7 +20,7 @@ def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float """ x0 = lower_bound x1 = upper_bound - for i in range(0, repeats): + for _ in range(0, repeats): x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0)) return x1 diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py index 409cfeb1d..cffedb7a6 100644 --- a/audio_filters/butterworth_filter.py +++ b/audio_filters/butterworth_filter.py @@ -11,7 +11,7 @@ Alternatively you can use scipy.signal.butter, which should yield the same resul def make_lowpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 ) -> IIRFilter: """ Creates a low-pass filter @@ -39,7 +39,7 @@ def make_lowpass( def make_highpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 ) -> IIRFilter: """ Creates a high-pass filter @@ -67,7 +67,7 @@ def make_highpass( def make_bandpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 ) -> IIRFilter: """ Creates a band-pass filter @@ -96,7 +96,7 @@ def make_bandpass( def make_allpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 ) -> IIRFilter: """ Creates an all-pass filter @@ -121,7 +121,10 @@ def make_allpass( def make_peak( - frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) + frequency: int, + samplerate: int, + gain_db: float, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a peak filter @@ -150,7 +153,10 @@ def make_peak( def make_lowshelf( - frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) + frequency: int, + samplerate: int, + gain_db: float, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a low-shelf filter @@ -184,7 +190,10 @@ def make_lowshelf( def make_highshelf( - frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) + frequency: int, + samplerate: int, + gain_db: float, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a high-shelf filter diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index 8348544c0..128e29071 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -39,14 +39,14 @@ def create_state_space_tree( if sum(path) == max_sum: result.append(path) return - for num_index in range(num_index, len(nums)): + for index in range(num_index, len(nums)): create_state_space_tree( nums, max_sum, - num_index + 1, - path + [nums[num_index]], + index + 1, + path + [nums[index]], result, - remaining_nums_sum - nums[num_index], + remaining_nums_sum - nums[index], ) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 9aa9b10c8..5bd7117bb 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -56,7 +56,7 @@ def decimal_to_binary(no_of_variable: int, minterms: Sequence[float]) -> list[st temp = [] for minterm in minterms: string = "" - for i in range(no_of_variable): + for _ in range(no_of_variable): string = str(minterm % 2) + string minterm //= 2 temp.append(string) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 178902173..f55c9c428 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -40,7 +40,7 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: k = 0 for _ in range(r): s = [] - for j in range(len_temp): + for _ in range(len_temp): s.append(temp[k]) if not (k < 25): break diff --git a/ciphers/rabin_miller.py b/ciphers/rabin_miller.py index 0aab80eb9..410d559d4 100644 --- a/ciphers/rabin_miller.py +++ b/ciphers/rabin_miller.py @@ -11,7 +11,7 @@ def rabin_miller(num: int) -> bool: s = s // 2 t += 1 - for trials in range(5): + for _ in range(5): a = random.randrange(2, num - 1) v = pow(a, s, num) if v != 1: diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index 4ad99a642..0916b8a65 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -154,7 +154,7 @@ def reverse_bwt(bwt_string: str, idx_original_string: int) -> str: ) ordered_rotations = [""] * len(bwt_string) - for x in range(len(bwt_string)): + for _ in range(len(bwt_string)): for i in range(len(bwt_string)): ordered_rotations[i] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 0d0ac8fd1..97eb8e25b 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -357,7 +357,7 @@ class BinarySearchTreeTest(unittest.TestCase): assert t.root.left.left.parent == t.root.left assert t.root.left.left.label == 1 - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 t.put(1) def test_search(self) -> None: @@ -369,7 +369,7 @@ class BinarySearchTreeTest(unittest.TestCase): node = t.search(13) assert node.label == 13 - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 t.search(2) def test_remove(self) -> None: @@ -515,7 +515,7 @@ class BinarySearchTreeTest(unittest.TestCase): assert t.get_max_label() == 14 t.empty() - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 t.get_max_label() def test_get_min_label(self) -> None: @@ -524,7 +524,7 @@ class BinarySearchTreeTest(unittest.TestCase): assert t.get_min_label() == 1 t.empty() - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 t.get_min_label() def test_inorder_traversal(self) -> None: diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 6fec0a125..67a63cd55 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -94,25 +94,25 @@ def test_circular_linked_list() -> None: try: circular_linked_list.delete_front() - assert False # This should not happen + raise AssertionError() # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() - assert False # This should not happen + raise AssertionError() # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1) - assert False + raise AssertionError() except IndexError: assert True try: circular_linked_list.delete_nth(0) - assert False + raise AssertionError() except IndexError: assert True diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 0eb3cf101..9e996ef0f 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -96,7 +96,7 @@ class DoublyLinkedList: self.tail = new_node else: temp = self.head - for i in range(0, index): + for _ in range(0, index): temp = temp.next temp.previous.next = new_node new_node.previous = temp.previous @@ -145,7 +145,7 @@ class DoublyLinkedList: self.tail.next = None else: temp = self.head - for i in range(0, index): + for _ in range(0, index): temp = temp.next delete_node = temp temp.next.previous = temp.previous @@ -194,13 +194,13 @@ def test_doubly_linked_list() -> None: try: linked_list.delete_head() - assert False # This should not happen. + raise AssertionError() # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - assert False # This should not happen. + raise AssertionError() # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/linked_list/middle_element_of_linked_list.py b/data_structures/linked_list/middle_element_of_linked_list.py index 0c6250f3b..86dad6b41 100644 --- a/data_structures/linked_list/middle_element_of_linked_list.py +++ b/data_structures/linked_list/middle_element_of_linked_list.py @@ -62,7 +62,7 @@ class LinkedList: if __name__ == "__main__": link = LinkedList() - for i in range(int(input().strip())): + for _ in range(int(input().strip())): data = int(input().strip()) link.push(data) print(link.middle_element()) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 59d7c512b..89a05ae81 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -132,7 +132,7 @@ class LinkedList: if not 0 <= index < len(self): raise ValueError("list index out of range.") current = self.head - for i in range(index): + for _ in range(index): current = current.next current.data = data @@ -352,13 +352,13 @@ def test_singly_linked_list() -> None: try: linked_list.delete_head() - assert False # This should not happen. + raise AssertionError() # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - assert False # This should not happen. + raise AssertionError() # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 176049120..a667e3e9b 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -205,7 +205,7 @@ class SkipList(Generic[KT, VT]): if level > self.level: # After level increase we have to add additional nodes to head. - for i in range(self.level - 1, level): + for _ in range(self.level - 1, level): update_vector.append(self.head) self.level = level @@ -407,7 +407,7 @@ def test_iter_always_yields_sorted_values(): def pytests(): - for i in range(100): + for _ in range(100): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() diff --git a/data_structures/queue/queue_on_list.py b/data_structures/queue/queue_on_list.py index 485cf0b6f..71fca6b2f 100644 --- a/data_structures/queue/queue_on_list.py +++ b/data_structures/queue/queue_on_list.py @@ -37,7 +37,7 @@ class Queue: number of times to rotate queue""" def rotate(self, rotation): - for i in range(rotation): + for _ in range(rotation): self.put(self.get()) """Enqueues {@code item} diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queue/queue_on_pseudo_stack.py index 9a0c16f61..d98451000 100644 --- a/data_structures/queue/queue_on_pseudo_stack.py +++ b/data_structures/queue/queue_on_pseudo_stack.py @@ -37,7 +37,7 @@ class Queue: number of times to rotate queue""" def rotate(self, rotation: int) -> None: - for i in range(rotation): + for _ in range(rotation): temp = self.stack[0] self.stack = self.stack[1:] self.put(temp) diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index d1c73df43..55d424d50 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -92,13 +92,13 @@ def test_stack() -> None: try: _ = stack.pop() - assert False # This should not happen + raise AssertionError() # This should not happen except StackUnderflowError: assert True # This should happen try: _ = stack.peek() - assert False # This should not happen + raise AssertionError() # This should not happen except StackUnderflowError: assert True # This should happen @@ -118,7 +118,7 @@ def test_stack() -> None: try: stack.push(200) - assert False # This should not happen + raise AssertionError() # This should not happen except StackOverflowError: assert True # This should happen diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 72da11639..39e78be04 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -458,16 +458,16 @@ def convex_hull_melkman(points: list[Point]) -> list[Point]: convex_hull[1] = points[i] i += 1 - for i in range(i, n): + for j in range(i, n): if ( - _det(convex_hull[0], convex_hull[-1], points[i]) > 0 + _det(convex_hull[0], convex_hull[-1], points[j]) > 0 and _det(convex_hull[-1], convex_hull[0], points[1]) < 0 ): # The point lies within the convex hull continue - convex_hull.insert(0, points[i]) - convex_hull.append(points[i]) + convex_hull.insert(0, points[j]) + convex_hull.append(points[j]) while _det(convex_hull[0], convex_hull[1], convex_hull[2]) >= 0: del convex_hull[1] while _det(convex_hull[-1], convex_hull[-2], convex_hull[-3]) <= 0: diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index 17efcfc7c..0ee426e4b 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -132,12 +132,12 @@ def strassen(matrix1: list, matrix2: list) -> list: # power of 2 for i in range(0, maxim): if i < dimension1[0]: - for j in range(dimension1[1], maxim): + for _ in range(dimension1[1], maxim): new_matrix1[i].append(0) else: new_matrix1.append([0] * maxim) if i < dimension2[0]: - for j in range(dimension2[1], maxim): + for _ in range(dimension2[1], maxim): new_matrix2[i].append(0) else: new_matrix2.append([0] * maxim) @@ -147,7 +147,7 @@ def strassen(matrix1: list, matrix2: list) -> list: # Removing the additional zeros for i in range(0, maxim): if i < dimension1[0]: - for j in range(dimension2[1], maxim): + for _ in range(dimension2[1], maxim): final_matrix[i].pop() else: final_matrix.pop() diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py index 5ffed2caa..3839d01e6 100644 --- a/dynamic_programming/all_construct.py +++ b/dynamic_programming/all_construct.py @@ -21,7 +21,7 @@ def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[ table_size: int = len(target) + 1 table: list[list[list[str]]] = [] - for i in range(table_size): + for _ in range(table_size): table.append([]) # seed value table[0] = [[]] # because empty string has empty combination diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 9efb60bab..093e15f49 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -30,13 +30,13 @@ def knapsack(w, wt, val, n): dp = [[0 for i in range(w + 1)] for j in range(n + 1)] for i in range(1, n + 1): - for w in range(1, w + 1): - if wt[i - 1] <= w: - dp[i][w] = max(val[i - 1] + dp[i - 1][w - wt[i - 1]], dp[i - 1][w]) + for w_ in range(1, w + 1): + if wt[i - 1] <= w_: + dp[i][w_] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_]) else: - dp[i][w] = dp[i - 1][w] + dp[i][w_] = dp[i - 1][w_] - return dp[n][w], dp + return dp[n][w_], dp def knapsack_with_example_solution(w: int, wt: list, val: list): diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index f27394385..28c675c75 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -118,7 +118,7 @@ def iterate_function( """ z_n = z_0.astype("complex64") - for i in range(nb_iterations): + for _ in range(nb_iterations): z_n = eval_function(function_params, z_n) if infinity is not None: numpy.nan_to_num(z_n, copy=False, nan=infinity) diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py index 07c1835b4..b0aaa86b1 100644 --- a/fractals/koch_snowflake.py +++ b/fractals/koch_snowflake.py @@ -46,7 +46,7 @@ def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndar 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ vectors = initial_vectors - for i in range(steps): + for _ in range(steps): vectors = iteration_step(vectors) return vectors diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index 5d61b72e1..f97bcd170 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -36,7 +36,7 @@ def get_distance(x: float, y: float, max_step: int) -> float: """ a = x b = y - for step in range(max_step): + for step in range(max_step): # noqa: B007 a_new = a * a - b * b + x b = 2 * a * b + y a = a_new diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 97dbe182b..bd7d80268 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -80,7 +80,7 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, score = len( [g for position, g in enumerate(item) if g == main_target[position]] ) - return (item, float(score)) + return (item, float(score)) # noqa: B023 # Adding a bit of concurrency can make everything faster, # @@ -129,7 +129,10 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, child_n = int(parent_1[1] * 100) + 1 child_n = 10 if child_n >= 10 else child_n for _ in range(child_n): - parent_2 = population_score[random.randint(0, N_SELECTED)][0] + parent_2 = population_score[ # noqa: B023 + random.randint(0, N_SELECTED) + ][0] + child_1, child_2 = crossover(parent_1[0], parent_2) # Append new string to the population list pop.append(mutate(child_1)) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index b02e9af65..298a97bf0 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -188,7 +188,7 @@ def topo(g, ind=None, q=None): def adjm(): n = input().strip() a = [] - for i in range(n): + for _ in range(n): a.append(map(int, input().strip().split())) return a, n @@ -264,7 +264,7 @@ def prim(g, s): def edglist(): n, m = map(int, input().split(" ")) edges = [] - for i in range(m): + for _ in range(m): edges.append(map(int, input().split(" "))) return edges, n diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py index 0f654a510..eb2cd25bf 100644 --- a/graphs/bellman_ford.py +++ b/graphs/bellman_ford.py @@ -36,7 +36,7 @@ def bellman_ford( distance = [float("inf")] * vertex_count distance[src] = 0.0 - for i in range(vertex_count - 1): + for _ in range(vertex_count - 1): for j in range(edge_count): u, v, w = (graph[j][k] for k in ["src", "dst", "weight"]) diff --git a/graphs/dijkstra_2.py b/graphs/dijkstra_2.py index 3170765bc..f548463ff 100644 --- a/graphs/dijkstra_2.py +++ b/graphs/dijkstra_2.py @@ -19,23 +19,23 @@ def min_dist(mdist, vset, v): def dijkstra(graph, v, src): - mdist = [float("inf") for i in range(v)] - vset = [False for i in range(v)] + mdist = [float("inf") for _ in range(v)] + vset = [False for _ in range(v)] mdist[src] = 0.0 - for i in range(v - 1): + for _ in range(v - 1): u = min_dist(mdist, vset, v) vset[u] = True - for v in range(v): + for i in range(v): if ( - (not vset[v]) - and graph[u][v] != float("inf") - and mdist[u] + graph[u][v] < mdist[v] + (not vset[i]) + and graph[u][i] != float("inf") + and mdist[u] + graph[u][i] < mdist[i] ): - mdist[v] = mdist[u] + graph[u][v] + mdist[i] = mdist[u] + graph[u][i] - print_dist(mdist, v) + print_dist(mdist, i) if __name__ == "__main__": diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 50081afa6..a5ecbe6e8 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -79,7 +79,7 @@ def get_nodes(frequency_table): {'11111': ['ab', 'ac', 'df', 'bd', 'bc']} """ nodes = {} - for i, item in enumerate(frequency_table): + for _, item in enumerate(frequency_table): nodes.setdefault(item[2], []).append(item[0]) return nodes diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py index 776ae3a2f..63cbeb909 100644 --- a/graphs/kahns_algorithm_long.py +++ b/graphs/kahns_algorithm_long.py @@ -4,7 +4,7 @@ def longest_distance(graph): queue = [] long_dist = [1] * len(graph) - for key, values in graph.items(): + for values in graph.values(): for i in values: indegree[i] += 1 diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index 6879b047f..b1260bd5b 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -8,7 +8,7 @@ def topological_sort(graph): topo = [] cnt = 0 - for key, values in graph.items(): + for values in graph.values(): for i in values: indegree[i] += 1 diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 9b2c645f1..5b2eaa4bf 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -91,7 +91,7 @@ def prisms_algorithm(l): # noqa: E741 distance_tv[x[0]] = x[1] heapify(distance_tv, positions) - for i in range(1, len(l)): + for _ in range(1, len(l)): vertex = delete_minimum(distance_tv, positions) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex)) diff --git a/graphs/page_rank.py b/graphs/page_rank.py index 672405b73..e1af35b34 100644 --- a/graphs/page_rank.py +++ b/graphs/page_rank.py @@ -41,7 +41,7 @@ def page_rank(nodes, limit=3, d=0.85): for i in range(limit): print(f"======= Iteration {i + 1} =======") - for j, node in enumerate(nodes): + for _, node in enumerate(nodes): ranks[node.name] = (1 - d) + d * sum( ranks[ib] / outbounds[ib] for ib in node.inbound ) diff --git a/graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py index ea9d35282..39211c64b 100644 --- a/graphs/scc_kosaraju.py +++ b/graphs/scc_kosaraju.py @@ -39,10 +39,10 @@ if __name__ == "__main__": # n - no of nodes, m - no of edges n, m = list(map(int, input().strip().split())) - graph: list[list[int]] = [[] for i in range(n)] # graph + graph: list[list[int]] = [[] for _ in range(n)] # graph reversed_graph: list[list[int]] = [[] for i in range(n)] # reversed graph # input graph data (edges) - for i in range(m): + for _ in range(m): u, v = list(map(int, input().strip().split())) graph[u].append(v) reversed_graph[v].append(u) diff --git a/greedy_methods/optimal_merge_pattern.py b/greedy_methods/optimal_merge_pattern.py index 911e1966f..a1c934f84 100644 --- a/greedy_methods/optimal_merge_pattern.py +++ b/greedy_methods/optimal_merge_pattern.py @@ -41,7 +41,7 @@ def optimal_merge_pattern(files: list) -> float: while len(files) > 1: temp = 0 # Consider two files with minimum cost to be merged - for i in range(2): + for _ in range(2): min_index = files.index(min(files)) temp += files[min_index] files.pop(min_index) diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index 69313fbb2..238fdb1c0 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -53,7 +53,7 @@ def pull(): key = machine_time % m # Evolution (Time Length) - for i in range(0, t): + for _ in range(0, t): # Variables (Position + Parameters) r = params_space[key] value = buffer_space[key] diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py index b0d45718e..0194f7da7 100644 --- a/hashes/enigma_machine.py +++ b/hashes/enigma_machine.py @@ -48,7 +48,7 @@ if __name__ == "__main__": break except Exception as error: print(error) - for i in range(token): + for _ in range(token): rotator() for j in decode: engine(j) diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py index bd3d388f9..057c2a76b 100644 --- a/machine_learning/self_organizing_map.py +++ b/machine_learning/self_organizing_map.py @@ -47,7 +47,7 @@ def main() -> None: epochs = 3 alpha = 0.5 - for i in range(epochs): + for _ in range(epochs): for j in range(len(training_samples)): # training sample diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py index d345398b4..b557b2029 100644 --- a/maths/area_under_curve.py +++ b/maths/area_under_curve.py @@ -35,7 +35,7 @@ def trapezoidal_area( x1 = x_start fx1 = fnc(x_start) area = 0.0 - for i in range(steps): + for _ in range(steps): # Approximates small segments of curve as linear and solve # for trapezoidal area x2 = (x_end - x_start) / steps + x1 diff --git a/maths/line_length.py b/maths/line_length.py index ad12a816b..ea27ee904 100644 --- a/maths/line_length.py +++ b/maths/line_length.py @@ -40,7 +40,7 @@ def line_length( fx1 = fnc(x_start) length = 0.0 - for i in range(steps): + for _ in range(steps): # Approximates curve as a sequence of linear lines and sums their length x2 = (x_end - x_start) / steps + x1 diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 916abfcc1..0a5621aac 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -31,7 +31,7 @@ def lucas_lehmer_test(p: int) -> bool: s = 4 m = (1 << p) - 1 - for i in range(p - 2): + for _ in range(p - 2): s = ((s * s) - 2) % m return s == 0 diff --git a/maths/lucas_series.py b/maths/lucas_series.py index 6b32c2022..cae6c2815 100644 --- a/maths/lucas_series.py +++ b/maths/lucas_series.py @@ -50,7 +50,7 @@ def dynamic_lucas_number(n_th_number: int) -> int: if not isinstance(n_th_number, int): raise TypeError("dynamic_lucas_number accepts only integer arguments.") a, b = 2, 1 - for i in range(n_th_number): + for _ in range(n_th_number): a, b = b, a + b return a diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py index b4dfed129..9f2668dba 100644 --- a/maths/miller_rabin.py +++ b/maths/miller_rabin.py @@ -33,7 +33,7 @@ def is_prime_big(n, prec=1000): b = bin_exp_mod(a, d, n) if b != 1: flag = True - for i in range(exp): + for _ in range(exp): if b == n - 1: flag = False break diff --git a/maths/monte_carlo_dice.py b/maths/monte_carlo_dice.py index 17cedbdbc..c4150b88f 100644 --- a/maths/monte_carlo_dice.py +++ b/maths/monte_carlo_dice.py @@ -35,7 +35,7 @@ def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]: """ dices = [Dice() for i in range(num_dice)] count_of_sum = [0] * (len(dices) * Dice.NUM_SIDES + 1) - for i in range(num_throws): + for _ in range(num_throws): count_of_sum[sum(dice.roll() for dice in dices)] += 1 probability = [round((count * 100) / num_throws, 2) for count in count_of_sum] return probability[num_dice:] # remove probability of sums that never appear diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py index a2bfce5b9..8f32fd356 100644 --- a/maths/numerical_integration.py +++ b/maths/numerical_integration.py @@ -39,7 +39,7 @@ def trapezoidal_area( fx1 = fnc(x_start) area = 0.0 - for i in range(steps): + for _ in range(steps): # Approximates small segments of curve as linear and solve # for trapezoidal area diff --git a/maths/pi_monte_carlo_estimation.py b/maths/pi_monte_carlo_estimation.py index 81be08378..29b679907 100644 --- a/maths/pi_monte_carlo_estimation.py +++ b/maths/pi_monte_carlo_estimation.py @@ -47,7 +47,7 @@ def estimate_pi(number_of_simulations: int) -> float: raise ValueError("At least one simulation is necessary to estimate PI.") number_in_unit_circle = 0 - for simulation_index in range(number_of_simulations): + for _ in range(number_of_simulations): random_point = Point.random_unit_square() if random_point.is_in_unit_circle(): diff --git a/maths/pollard_rho.py b/maths/pollard_rho.py index 0fc80cd42..5082f54f7 100644 --- a/maths/pollard_rho.py +++ b/maths/pollard_rho.py @@ -73,7 +73,7 @@ def pollard_rho( """ return (pow(value, 2) + step) % modulus - for attempt in range(attempts): + for _ in range(attempts): # These track the position within the cycle detection logic. tortoise = seed hare = seed diff --git a/maths/primelib.py b/maths/primelib.py index 7d2a22f39..eb72a9f8a 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -406,14 +406,14 @@ def kg_v(number1, number2): count1 = prime_fac_1.count(n) count2 = prime_fac_2.count(n) - for i in range(max(count1, count2)): + for _ in range(max(count1, count2)): ans *= n else: count1 = prime_fac_1.count(n) - for i in range(count1): + for _ in range(count1): ans *= n done.append(n) @@ -425,7 +425,7 @@ def kg_v(number1, number2): count2 = prime_fac_2.count(n) - for i in range(count2): + for _ in range(count2): ans *= n done.append(n) @@ -637,7 +637,7 @@ def fib(n): fib1 = 1 ans = 1 # this will be return - for i in range(n - 1): + for _ in range(n - 1): tmp = ans ans += fib1 diff --git a/maths/proth_number.py b/maths/proth_number.py index e17503143..6b1519024 100644 --- a/maths/proth_number.py +++ b/maths/proth_number.py @@ -49,7 +49,7 @@ def proth(number: int) -> int: proth_index = 2 increment = 3 for block in range(1, block_index): - for move in range(increment): + for _ in range(increment): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1]) proth_index += 1 increment *= 2 diff --git a/maths/square_root.py b/maths/square_root.py index b324c7230..2cbf14bea 100644 --- a/maths/square_root.py +++ b/maths/square_root.py @@ -49,7 +49,7 @@ def square_root_iterative( value = get_initial_point(a) - for i in range(max_iter): + for _ in range(max_iter): prev_value = value value = value - fx(value, a) / fx_derivative(value) if abs(prev_value - value) < tolerance: diff --git a/maths/ugly_numbers.py b/maths/ugly_numbers.py index 4451a68cd..81bd928c6 100644 --- a/maths/ugly_numbers.py +++ b/maths/ugly_numbers.py @@ -32,7 +32,7 @@ def ugly_numbers(n: int) -> int: next_3 = ugly_nums[i3] * 3 next_5 = ugly_nums[i5] * 5 - for i in range(1, n): + for _ in range(1, n): next_num = min(next_2, next_3, next_5) ugly_nums.append(next_num) if next_num == next_2: diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 305cad0a5..6495bd8fc 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -351,7 +351,7 @@ class Matrix: "Only invertable matrices can be raised to a negative power" ) result = self - for i in range(other - 1): + for _ in range(other - 1): result *= self return result diff --git a/matrix/nth_fibonacci_using_matrix_exponentiation.py b/matrix/nth_fibonacci_using_matrix_exponentiation.py index 7c964d884..65f10c90d 100644 --- a/matrix/nth_fibonacci_using_matrix_exponentiation.py +++ b/matrix/nth_fibonacci_using_matrix_exponentiation.py @@ -65,7 +65,7 @@ def nth_fibonacci_bruteforce(n: int) -> int: return n fib0 = 0 fib1 = 1 - for i in range(2, n + 1): + for _ in range(2, n + 1): fib0, fib1 = fib1, fib0 + fib1 return fib1 diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 43e796e77..23b818b0f 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -128,7 +128,7 @@ class BPNN: self.ax_loss.hlines(self.accuracy, 0, self.train_round * 1.1) x_shape = np.shape(xdata) - for round_i in range(train_round): + for _ in range(train_round): all_loss = 0 for row in range(x_shape[0]): _xdata = np.asmatrix(xdata[row, :]).T diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py index a2bfdb326..f04c81424 100644 --- a/neural_network/perceptron.py +++ b/neural_network/perceptron.py @@ -69,7 +69,7 @@ class Perceptron: for sample in self.sample: sample.insert(0, self.bias) - for i in range(self.col_sample): + for _ in range(self.col_sample): self.weight.append(random.random()) self.weight.insert(0, self.bias) diff --git a/other/lfu_cache.py b/other/lfu_cache.py index 072d00ab5..2f26bb6cc 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -303,7 +303,7 @@ class LFUCache(Generic[T, U]): def cache_info() -> LFUCache[T, U]: return cls.decorator_function_to_instance_map[func] - setattr(cache_decorator_wrapper, "cache_info", cache_info) + setattr(cache_decorator_wrapper, "cache_info", cache_info) # noqa: B010 return cache_decorator_wrapper diff --git a/other/lru_cache.py b/other/lru_cache.py index b68ae0a8e..aa910e487 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -321,7 +321,7 @@ class LRUCache(Generic[T, U]): def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] - setattr(cache_decorator_wrapper, "cache_info", cache_info) + setattr(cache_decorator_wrapper, "cache_info", cache_info) # noqa: B010 return cache_decorator_wrapper diff --git a/other/magicdiamondpattern.py b/other/magicdiamondpattern.py index 71bc50b51..0fc41d7a2 100644 --- a/other/magicdiamondpattern.py +++ b/other/magicdiamondpattern.py @@ -8,9 +8,9 @@ def floyd(n): n : size of pattern """ for i in range(0, n): - for j in range(0, n - i - 1): # printing spaces + for _ in range(0, n - i - 1): # printing spaces print(" ", end="") - for k in range(0, i + 1): # printing stars + for _ in range(0, i + 1): # printing stars print("* ", end="") print() @@ -22,10 +22,10 @@ def reverse_floyd(n): n : size of pattern """ for i in range(n, 0, -1): - for j in range(i, 0, -1): # printing stars + for _ in range(i, 0, -1): # printing stars print("* ", end="") print() - for k in range(n - i + 1, 0, -1): # printing spaces + for _ in range(n - i + 1, 0, -1): # printing spaces print(" ", end="") diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index aecd19c55..1e6293f84 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -77,7 +77,7 @@ def procentual_proximity( final_scores: list[float] = [0 for i in range(len(score_lists[0]))] # generate final scores - for i, slist in enumerate(score_lists): + for slist in score_lists: for j, ele in enumerate(slist): final_scores[j] = final_scores[j] + ele diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index bda852c25..f58b40e59 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -145,7 +145,7 @@ def transformation_matrix(velocity: float) -> np.array: def transform( - velocity: float, event: np.array = np.zeros(4), symbolic: bool = True + velocity: float, event: np.array = np.zeros(4), symbolic: bool = True # noqa: B008 ) -> np.array: """ >>> transform(29979245,np.array([1,2,3,4]), False) diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 7e9fc1642..2f8153782 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -310,7 +310,7 @@ def example_3() -> BodySystem: """ bodies = [] - for i in range(10): + for _ in range(10): velocity_x = random.uniform(-0.5, 0.5) velocity_y = random.uniform(-0.5, 0.5) diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 839ca6717..9ea0db991 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -36,7 +36,7 @@ def solution(): """ with open(os.path.dirname(__file__) + "/grid.txt") as f: l = [] # noqa: E741 - for i in range(20): + for _ in range(20): l.append([int(x) for x in f.readline().split()]) maximum = 0 diff --git a/project_euler/problem_025/sol3.py b/project_euler/problem_025/sol3.py index c66411dc5..0b9f3a0c8 100644 --- a/project_euler/problem_025/sol3.py +++ b/project_euler/problem_025/sol3.py @@ -45,7 +45,7 @@ def solution(n: int = 1000) -> int: f = f1 + f2 f1, f2 = f2, f index += 1 - for j in str(f): + for _ in str(f): i += 1 if i == n: break diff --git a/project_euler/problem_026/sol1.py b/project_euler/problem_026/sol1.py index 75d48df79..ccf2c111d 100644 --- a/project_euler/problem_026/sol1.py +++ b/project_euler/problem_026/sol1.py @@ -41,7 +41,7 @@ def solution(numerator: int = 1, digit: int = 1000) -> int: for divide_by_number in range(numerator, digit + 1): has_been_divided: list[int] = [] now_divide = numerator - for division_cycle in range(1, digit + 1): + for _ in range(1, digit + 1): if now_divide in has_been_divided: if longest_list_length < len(has_been_divided): longest_list_length = len(has_been_divided) diff --git a/project_euler/problem_188/sol1.py b/project_euler/problem_188/sol1.py index dd4360adb..88bd1327e 100644 --- a/project_euler/problem_188/sol1.py +++ b/project_euler/problem_188/sol1.py @@ -58,7 +58,7 @@ def solution(base: int = 1777, height: int = 1855, digits: int = 8) -> int: # calculate base↑↑height by right-assiciative repeated modular # exponentiation result = base - for i in range(1, height): + for _ in range(1, height): result = _modexpt(base, result, 10**digits) return result diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index dc93683da..713b530b6 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -49,7 +49,7 @@ def get_pascal_triangle_unique_coefficients(depth: int) -> set[int]: """ coefficients = {1} previous_coefficients = [1] - for step in range(2, depth + 1): + for _ in range(2, depth + 1): coefficients_begins_one = previous_coefficients + [0] coefficients_ends_one = [0] + previous_coefficients previous_coefficients = [] diff --git a/scheduling/multi_level_feedback_queue.py b/scheduling/multi_level_feedback_queue.py index b54cc8719..a3ba1b340 100644 --- a/scheduling/multi_level_feedback_queue.py +++ b/scheduling/multi_level_feedback_queue.py @@ -205,7 +205,7 @@ class MLFQ: """ finished: deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue - for i in range(len(ready_queue)): + for _ in range(len(ready_queue)): cp = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time diff --git a/sorts/double_sort.py b/sorts/double_sort.py index 4e08e27b3..5ca88a674 100644 --- a/sorts/double_sort.py +++ b/sorts/double_sort.py @@ -15,7 +15,7 @@ def double_sort(lst): True """ no_of_elements = len(lst) - for i in range( + for _ in range( 0, int(((no_of_elements - 1) / 2) + 1) ): # we don't need to traverse to end of list as for j in range(0, no_of_elements - 1): diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py index 0e1dba8c5..2685bf621 100644 --- a/web_programming/open_google_results.py +++ b/web_programming/open_google_results.py @@ -1,10 +1,10 @@ import webbrowser from sys import argv -from urllib.parse import quote, parse_qs -from fake_useragent import UserAgent +from urllib.parse import parse_qs, quote import requests from bs4 import BeautifulSoup +from fake_useragent import UserAgent if __name__ == "__main__": if len(argv) > 1: @@ -18,9 +18,7 @@ if __name__ == "__main__": res = requests.get( url, - headers={ - "User-Agent": str(UserAgent().random) - }, + headers={"User-Agent": str(UserAgent().random)}, ) try: From 71353ed79787cbbe3800ee32a1fb3d82c1335d19 Mon Sep 17 00:00:00 2001 From: Advik Sharma <70201060+advik-student-dev@users.noreply.github.com> Date: Thu, 13 Oct 2022 10:09:48 -0700 Subject: [PATCH 029/368] refined readme.md (#7081) * refined readme.md added some refinements to readme.md * Update README.md Co-authored-by: Christian Clauss --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c499c14e1..da80c012b 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@

All algorithms implemented in Python - for education

-Implementations are for learning purposes only. As they may be less efficient than the implementations in the Python standard library, use them at your discretion. +Implementations are for learning purposes only. They may be less efficient than the implementations in the Python standard library. Use them at your discretion. ## Getting Started @@ -42,8 +42,8 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We're on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms)! Community channels are great for you to ask questions and get help. Please join us! +We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms -See our [directory](DIRECTORY.md) for easier navigation and better overview of the project. +See our [directory](DIRECTORY.md) for easier navigation and a better overview of the project. From 3deb4a3042438007df7373c07c6280e55d3511da Mon Sep 17 00:00:00 2001 From: Anurag Shukla <76862299+anuragshuklajec@users.noreply.github.com> Date: Fri, 14 Oct 2022 01:33:15 +0530 Subject: [PATCH 030/368] Create binary_search_matrix.py (#6995) * Create binary_search_matrix.py Added an algorithm to search in matrix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_search_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix Indentation * Update matrix/binary_search_matrix.py Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- matrix/binary_search_matrix.py | 57 ++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 matrix/binary_search_matrix.py diff --git a/matrix/binary_search_matrix.py b/matrix/binary_search_matrix.py new file mode 100644 index 000000000..6f203b7a3 --- /dev/null +++ b/matrix/binary_search_matrix.py @@ -0,0 +1,57 @@ +def binary_search(array: list, lower_bound: int, upper_bound: int, value: int) -> int: + """ + This function carries out Binary search on a 1d array and + return -1 if it do not exist + array: A 1d sorted array + value : the value meant to be searched + >>> matrix = [1, 4, 7, 11, 15] + >>> binary_search(matrix, 0, len(matrix) - 1, 1) + 0 + >>> binary_search(matrix, 0, len(matrix) - 1, 23) + -1 + """ + + r = int((lower_bound + upper_bound) // 2) + if array[r] == value: + return r + if lower_bound >= upper_bound: + return -1 + if array[r] < value: + return binary_search(array, r + 1, upper_bound, value) + else: + return binary_search(array, lower_bound, r - 1, value) + + +def mat_bin_search(value: int, matrix: list) -> list: + """ + This function loops over a 2d matrix and calls binarySearch on + the selected 1d array and returns [-1, -1] is it do not exist + value : value meant to be searched + matrix = a sorted 2d matrix + >>> matrix = [[1, 4, 7, 11, 15], + ... [2, 5, 8, 12, 19], + ... [3, 6, 9, 16, 22], + ... [10, 13, 14, 17, 24], + ... [18, 21, 23, 26, 30]] + >>> target = 1 + >>> mat_bin_search(target, matrix) + [0, 0] + >>> target = 34 + >>> mat_bin_search(target, matrix) + [-1, -1] + """ + index = 0 + if matrix[index][0] == value: + return [index, 0] + while index < len(matrix) and matrix[index][0] < value: + r = binary_search(matrix[index], 0, len(matrix[index]) - 1, value) + if r != -1: + return [index, r] + index += 1 + return [-1, -1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 05e19128f7fd1bee9c8d037b3f84cd42374aad0d Mon Sep 17 00:00:00 2001 From: AkshajV1309 <79909101+AkshajV1309@users.noreply.github.com> Date: Fri, 14 Oct 2022 01:54:31 +0530 Subject: [PATCH 031/368] Create norgate.py (#7133) * Create norgate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create norgate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update boolean_algebra/norgate.py * Update boolean_algebra/norgate.py * Update boolean_algebra/norgate.py * Update boolean_algebra/norgate.py * Update boolean_algebra/norgate.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- boolean_algebra/norgate.py | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 boolean_algebra/norgate.py diff --git a/boolean_algebra/norgate.py b/boolean_algebra/norgate.py new file mode 100644 index 000000000..82a1fb2e3 --- /dev/null +++ b/boolean_algebra/norgate.py @@ -0,0 +1,46 @@ +""" A NOR Gate is a logic gate in boolean algebra which results to false(0) + if any of the input is 1, and True(1) if both the inputs are 0. + Following is the truth table of an NOR Gate: + | Input 1 | Input 2 | Output | + | 0 | 0 | 1 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 0 | +""" +"""Following is the code implementation of the NOR Gate""" + + +def nor_gate(input_1: int, input_2: int) -> int: + """ + >>> nor_gate(0, 0) + 1 + >>> nor_gate(0, 1) + 0 + >>> nor_gate(1, 0) + 0 + >>> nor_gate(1, 1) + 0 + >>> nor_gate(0.0, 0.0) + 1 + >>> nor_gate(0, -7) + 0 + """ + return int(bool(input_1 == input_2 == 0)) + + +def main() -> None: + print("Truth Table of NOR Gate:") + print("| Input 1 |", " Input 2 |", " Output |") + print("| 0 |", " 0 | ", nor_gate(0, 0), " |") + print("| 0 |", " 1 | ", nor_gate(0, 1), " |") + print("| 1 |", " 0 | ", nor_gate(1, 0), " |") + print("| 1 |", " 1 | ", nor_gate(1, 1), " |") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() +"""Code provided by Akshaj Vishwanathan""" +"""Reference: https://www.geeksforgeeks.org/logic-gates-in-python/""" From 26fe4c65390b7a2bfe2722b674943b64820d8442 Mon Sep 17 00:00:00 2001 From: Md Mahiuddin <68785084+mahiuddin-dev@users.noreply.github.com> Date: Fri, 14 Oct 2022 13:20:40 +0600 Subject: [PATCH 032/368] Remove extra Semicolon (#7152) --- data_structures/queue/linked_queue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py index c6e9f5390..3675da7db 100644 --- a/data_structures/queue/linked_queue.py +++ b/data_structures/queue/linked_queue.py @@ -22,7 +22,7 @@ class LinkedQueue: >>> queue.put(5) >>> queue.put(9) >>> queue.put('python') - >>> queue.is_empty(); + >>> queue.is_empty() False >>> queue.get() 5 From e40c7b4bf1794c94993715c99e2a97b9d8f5e590 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Fri, 14 Oct 2022 20:04:44 +0530 Subject: [PATCH 033/368] refactor: move flake8 config (#7167) * refactor: move flake8 config * Update .pre-commit-config.yaml Co-authored-by: Christian Clauss --- .flake8 | 5 +++++ .pre-commit-config.yaml | 8 ++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.flake8 b/.flake8 index 9a5863c9c..0d9ef18d1 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,8 @@ [flake8] +max-line-length = 88 +max-complexity = 25 extend-ignore = A003 # Class attribute is shadowing a python builtin + # Formatting style for `black` + E203 # Whitespace before ':' + W503 # Line break occurred before a binary operator diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d2558b90a..d3ea9722f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -35,11 +35,7 @@ repos: - repo: https://github.com/PyCQA/flake8 rev: 5.0.4 hooks: - - id: flake8 - args: - - --ignore=E203,W503 - - --max-complexity=25 - - --max-line-length=88 + - id: flake8 # See .flake8 for args additional_dependencies: - flake8-bugbear - flake8-builtins @@ -51,7 +47,7 @@ repos: - id: mypy args: - --ignore-missing-imports - - --install-types # See mirrors-mypy README.md + - --install-types # See mirrors-mypy README.md - --non-interactive additional_dependencies: [types-requests] From fd5ab454921b687af94927015d4ab06d3a84886b Mon Sep 17 00:00:00 2001 From: Abinash Satapathy Date: Fri, 14 Oct 2022 17:47:39 +0200 Subject: [PATCH 034/368] Doctest output simpler version (#7116) * Update README.md Added Google Cirq references * Create barcode_validator.py Barcode/EAN validator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docstring and updated variables to snake_case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docset and updated bugs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Implemented the changes asked in review. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Updated with f-string format * Update barcode_validator.py * Update volume_conversions.py Simpler doctest output * Update volume_conversions.py Fixed indentation Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- conversions/volume_conversions.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/conversions/volume_conversions.py b/conversions/volume_conversions.py index de2290196..44d290091 100644 --- a/conversions/volume_conversions.py +++ b/conversions/volume_conversions.py @@ -52,11 +52,7 @@ def volume_conversion(value: float, from_type: str, to_type: str) -> float: 0.000236588 >>> volume_conversion(4, "wrongUnit", "litre") Traceback (most recent call last): - File "/usr/lib/python3.8/doctest.py", line 1336, in __run - exec(compile(example.source, filename, "single", - File "", line 1, in - volume_conversion(4, "wrongUnit", "litre") - File "", line 62, in volume_conversion + ... ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: cubicmeter, litre, kilolitre, gallon, cubicyard, cubicfoot, cup """ From 0c06b255822905512b9fa9c12cb09dabf8fa405f Mon Sep 17 00:00:00 2001 From: Abinash Satapathy Date: Fri, 14 Oct 2022 23:42:41 +0200 Subject: [PATCH 035/368] Create speed_conversions.py (#7128) * Update README.md Added Google Cirq references * Create barcode_validator.py Barcode/EAN validator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docstring and updated variables to snake_case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docset and updated bugs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Implemented the changes asked in review. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Updated with f-string format * Update barcode_validator.py * Update volume_conversions.py Simpler doctest output * Create speed_conversions.py Conversion of speed units * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update speed_conversions.py Doctests updated, dictionary implemented. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update speed_conversions.py Reduced LOC * Update volume_conversions.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- conversions/speed_conversions.py | 70 ++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 conversions/speed_conversions.py diff --git a/conversions/speed_conversions.py b/conversions/speed_conversions.py new file mode 100644 index 000000000..62da9e137 --- /dev/null +++ b/conversions/speed_conversions.py @@ -0,0 +1,70 @@ +""" +Convert speed units + +https://en.wikipedia.org/wiki/Kilometres_per_hour +https://en.wikipedia.org/wiki/Miles_per_hour +https://en.wikipedia.org/wiki/Knot_(unit) +https://en.wikipedia.org/wiki/Metre_per_second +""" + +speed_chart: dict[str, float] = { + "km/h": 1.0, + "m/s": 3.6, + "mph": 1.609344, + "knot": 1.852, +} + +speed_chart_inverse: dict[str, float] = { + "km/h": 1.0, + "m/s": 0.277777778, + "mph": 0.621371192, + "knot": 0.539956803, +} + + +def convert_speed(speed: float, unit_from: str, unit_to: str) -> float: + """ + Convert speed from one unit to another using the speed_chart above. + + "km/h": 1.0, + "m/s": 3.6, + "mph": 1.609344, + "knot": 1.852, + + >>> convert_speed(100, "km/h", "m/s") + 27.778 + >>> convert_speed(100, "km/h", "mph") + 62.137 + >>> convert_speed(100, "km/h", "knot") + 53.996 + >>> convert_speed(100, "m/s", "km/h") + 360.0 + >>> convert_speed(100, "m/s", "mph") + 223.694 + >>> convert_speed(100, "m/s", "knot") + 194.384 + >>> convert_speed(100, "mph", "km/h") + 160.934 + >>> convert_speed(100, "mph", "m/s") + 44.704 + >>> convert_speed(100, "mph", "knot") + 86.898 + >>> convert_speed(100, "knot", "km/h") + 185.2 + >>> convert_speed(100, "knot", "m/s") + 51.444 + >>> convert_speed(100, "knot", "mph") + 115.078 + """ + if unit_to not in speed_chart or unit_from not in speed_chart_inverse: + raise ValueError( + f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n" + f"Valid values are: {', '.join(speed_chart_inverse)}" + ) + return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 20587750051c3699b051579d7e97e5508958ea5a Mon Sep 17 00:00:00 2001 From: Caeden Date: Fri, 14 Oct 2022 23:25:15 +0100 Subject: [PATCH 036/368] refactor: Make code more understandable (#7196) * refactor: Make code more understandable * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../binary_tree/binary_tree_traversals.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 378598bb0..54b1dc536 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -15,7 +15,20 @@ class Node: def make_tree() -> Node | None: - return Node(1, Node(2, Node(4), Node(5)), Node(3)) + r""" + The below tree + 1 + / \ + 2 3 + / \ + 4 5 + """ + tree = Node(1) + tree.left = Node(2) + tree.right = Node(3) + tree.left.left = Node(4) + tree.left.right = Node(5) + return tree def preorder(root: Node | None) -> list[int]: From 5dc0dc4d23eb1efa4564c0531402af3d2419012d Mon Sep 17 00:00:00 2001 From: Lukas Esc <55601315+Luk-ESC@users.noreply.github.com> Date: Fri, 14 Oct 2022 17:37:15 -0500 Subject: [PATCH 037/368] remove useless bool() call (#7189) --- boolean_algebra/norgate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boolean_algebra/norgate.py b/boolean_algebra/norgate.py index 82a1fb2e3..1c341e8a7 100644 --- a/boolean_algebra/norgate.py +++ b/boolean_algebra/norgate.py @@ -25,7 +25,7 @@ def nor_gate(input_1: int, input_2: int) -> int: >>> nor_gate(0, -7) 0 """ - return int(bool(input_1 == input_2 == 0)) + return int(input_1 == input_2 == 0) def main() -> None: From dcca5351c9185bf8c568615782ffb28319a6539d Mon Sep 17 00:00:00 2001 From: Claudio Lucisano <43884655+Claudiocli@users.noreply.github.com> Date: Sat, 15 Oct 2022 00:45:12 +0200 Subject: [PATCH 038/368] Added astronomical_length_scale_conversion.py (#7183) --- .../astronomical_length_scale_conversion.py | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 conversions/astronomical_length_scale_conversion.py diff --git a/conversions/astronomical_length_scale_conversion.py b/conversions/astronomical_length_scale_conversion.py new file mode 100644 index 000000000..804d82487 --- /dev/null +++ b/conversions/astronomical_length_scale_conversion.py @@ -0,0 +1,104 @@ +""" +Conversion of length units. +Available Units: +Metre, Kilometre, Megametre, Gigametre, +Terametre, Petametre, Exametre, Zettametre, Yottametre + +USAGE : +-> Import this file into their respective project. +-> Use the function length_conversion() for conversion of length units. +-> Parameters : + -> value : The number of from units you want to convert + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert + +REFERENCES : +-> Wikipedia reference: https://en.wikipedia.org/wiki/Meter +-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilometer +-> Wikipedia reference: https://en.wikipedia.org/wiki/Orders_of_magnitude_(length) +""" + +UNIT_SYMBOL = { + "meter": "m", + "kilometer": "km", + "megametre": "Mm", + "gigametre": "Gm", + "terametre": "Tm", + "petametre": "Pm", + "exametre": "Em", + "zettametre": "Zm", + "yottametre": "Ym", +} +# Exponent of the factor(meter) +METRIC_CONVERSION = { + "m": 0, + "km": 3, + "Mm": 6, + "Gm": 9, + "Tm": 12, + "Pm": 15, + "Em": 18, + "Zm": 21, + "Ym": 24, +} + + +def length_conversion(value: float, from_type: str, to_type: str) -> float: + """ + Conversion between astronomical length units. + + >>> length_conversion(1, "meter", "kilometer") + 0.001 + >>> length_conversion(1, "meter", "megametre") + 1e-06 + >>> length_conversion(1, "gigametre", "meter") + 1000000000 + >>> length_conversion(1, "gigametre", "terametre") + 0.001 + >>> length_conversion(1, "petametre", "terametre") + 1000 + >>> length_conversion(1, "petametre", "exametre") + 0.001 + >>> length_conversion(1, "terametre", "zettametre") + 1e-09 + >>> length_conversion(1, "yottametre", "zettametre") + 1000 + >>> length_conversion(4, "wrongUnit", "inch") + Traceback (most recent call last): + ... + ValueError: Invalid 'from_type' value: 'wrongUnit'. + Conversion abbreviations are: m, km, Mm, Gm, Tm, Pm, Em, Zm, Ym + """ + + from_sanitized = from_type.lower().strip("s") + to_sanitized = to_type.lower().strip("s") + + from_sanitized = UNIT_SYMBOL.get(from_sanitized, from_sanitized) + to_sanitized = UNIT_SYMBOL.get(to_sanitized, to_sanitized) + + if from_sanitized not in METRIC_CONVERSION: + raise ValueError( + f"Invalid 'from_type' value: {from_type!r}.\n" + f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" + ) + if to_sanitized not in METRIC_CONVERSION: + raise ValueError( + f"Invalid 'to_type' value: {to_type!r}.\n" + f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" + ) + from_exponent = METRIC_CONVERSION[from_sanitized] + to_exponent = METRIC_CONVERSION[to_sanitized] + exponent = 1 + + if from_exponent > to_exponent: + exponent = from_exponent - to_exponent + else: + exponent = -(to_exponent - from_exponent) + + return value * pow(10, exponent) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 6e69181d1f592a08806717058720bf63e241eef2 Mon Sep 17 00:00:00 2001 From: Caeden Date: Sat, 15 Oct 2022 02:07:03 +0100 Subject: [PATCH 039/368] refactor: Replace `list()` and `dict()` calls with literals (#7198) --- data_structures/binary_tree/binary_search_tree.py | 2 +- data_structures/heap/heap_generic.py | 2 +- data_structures/trie/trie.py | 2 +- graphs/frequent_pattern_graph_miner.py | 2 +- maths/greedy_coin_change.py | 2 +- other/davisb_putnamb_logemannb_loveland.py | 4 ++-- project_euler/problem_107/sol1.py | 2 +- searches/tabu_search.py | 6 +++--- sorts/msd_radix_sort.py | 4 ++-- strings/aho_corasick.py | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index b9af23dc8..51a651be0 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -160,7 +160,7 @@ def postorder(curr_node): """ postOrder (left, right, self) """ - node_list = list() + node_list = [] if curr_node is not None: node_list = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node] return node_list diff --git a/data_structures/heap/heap_generic.py b/data_structures/heap/heap_generic.py index e7831cd45..b4d7019f4 100644 --- a/data_structures/heap/heap_generic.py +++ b/data_structures/heap/heap_generic.py @@ -9,7 +9,7 @@ class Heap: def __init__(self, key: Callable | None = None) -> None: # Stores actual heap items. - self.arr: list = list() + self.arr: list = [] # Stores indexes of each item for supporting updates and deletion. self.pos_map: dict = {} # Stores current size of heap. diff --git a/data_structures/trie/trie.py b/data_structures/trie/trie.py index 162d08d1d..46b93a499 100644 --- a/data_structures/trie/trie.py +++ b/data_structures/trie/trie.py @@ -8,7 +8,7 @@ longest word)) lookup time making it an optimal approach when space is not an is class TrieNode: def __init__(self) -> None: - self.nodes: dict[str, TrieNode] = dict() # Mapping from char to TrieNode + self.nodes: dict[str, TrieNode] = {} # Mapping from char to TrieNode self.is_leaf = False def insert_many(self, words: list[str]) -> None: diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index a5ecbe6e8..1d26702a4 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -54,7 +54,7 @@ def get_frequency_table(edge_array): Returns Frequency Table """ distinct_edge = get_distinct_edge(edge_array) - frequency_table = dict() + frequency_table = {} for item in distinct_edge: bit = get_bitcode(edge_array, item) diff --git a/maths/greedy_coin_change.py b/maths/greedy_coin_change.py index 5233ee1cb..29c2f1803 100644 --- a/maths/greedy_coin_change.py +++ b/maths/greedy_coin_change.py @@ -74,7 +74,7 @@ def find_minimum_change(denominations: list[int], value: str) -> list[int]: # Driver Code if __name__ == "__main__": - denominations = list() + denominations = [] value = "0" if ( diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index 03d60a9a1..3110515d5 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -199,7 +199,7 @@ def find_pure_symbols( {'A1': True, 'A2': False, 'A3': True, 'A5': False} """ pure_symbols = [] - assignment: dict[str, bool | None] = dict() + assignment: dict[str, bool | None] = {} literals = [] for clause in clauses: @@ -264,7 +264,7 @@ def find_unit_clauses( n_count += 1 if f_count == len(clause) - 1 and n_count == 1: unit_symbols.append(sym) - assignment: dict[str, bool | None] = dict() + assignment: dict[str, bool | None] = {} for i in unit_symbols: symbol = i[:2] assignment[symbol] = len(i) == 2 diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 048cf033d..b3f5685b9 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -100,7 +100,7 @@ def solution(filename: str = "p107_network.txt") -> int: script_dir: str = os.path.abspath(os.path.dirname(__file__)) network_file: str = os.path.join(script_dir, filename) adjacency_matrix: list[list[str]] - edges: dict[EdgeT, int] = dict() + edges: dict[EdgeT, int] = {} data: list[str] edge1: int edge2: int diff --git a/searches/tabu_search.py b/searches/tabu_search.py index 45ce19d46..3e1728286 100644 --- a/searches/tabu_search.py +++ b/searches/tabu_search.py @@ -51,7 +51,7 @@ def generate_neighbours(path): with open(path) as f: for line in f: if line.split()[0] not in dict_of_neighbours: - _list = list() + _list = [] _list.append([line.split()[1], line.split()[2]]) dict_of_neighbours[line.split()[0]] = _list else: @@ -59,7 +59,7 @@ def generate_neighbours(path): [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: - _list = list() + _list = [] _list.append([line.split()[0], line.split()[2]]) dict_of_neighbours[line.split()[1]] = _list else: @@ -206,7 +206,7 @@ def tabu_search( """ count = 1 solution = first_solution - tabu_list = list() + tabu_list = [] best_cost = distance_of_first_solution best_solution_ever = solution diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 3cdec4bd0..7430fc5a6 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -52,8 +52,8 @@ def _msd_radix_sort(list_of_ints: list[int], bit_position: int) -> list[int]: if bit_position == 0 or len(list_of_ints) in [0, 1]: return list_of_ints - zeros = list() - ones = list() + zeros = [] + ones = [] # Split numbers based on bit at bit_position from the right for number in list_of_ints: if (number >> (bit_position - 1)) & 1: diff --git a/strings/aho_corasick.py b/strings/aho_corasick.py index b9a6a8072..2d2f562df 100644 --- a/strings/aho_corasick.py +++ b/strings/aho_corasick.py @@ -5,7 +5,7 @@ from collections import deque class Automaton: def __init__(self, keywords: list[str]): - self.adlist: list[dict] = list() + self.adlist: list[dict] = [] self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) From 70b60dc3231e1df72622db64f9b97fef772181e5 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sat, 15 Oct 2022 12:07:59 +0530 Subject: [PATCH 040/368] chore: remove inactive user from CODEOWNERS (#7205) * chore: remove inactive user from CODEOWNERS * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/CODEOWNERS | 6 +++--- DIRECTORY.md | 11 +++++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fdce879f8..abf99ab22 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -35,7 +35,7 @@ # /divide_and_conquer/ -/dynamic_programming/ @Kush1101 +# /dynamic_programming/ # /file_transfer/ @@ -59,7 +59,7 @@ # /machine_learning/ -/maths/ @Kush1101 +# /maths/ # /matrix/ @@ -69,7 +69,7 @@ # /other/ @cclauss # TODO: Uncomment this line after Hacktoberfest -/project_euler/ @dhruvmanila @Kush1101 +/project_euler/ @dhruvmanila # /quantum/ diff --git a/DIRECTORY.md b/DIRECTORY.md index 2786e1f82..239dafa65 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -52,6 +52,7 @@ * [Modular Division](blockchain/modular_division.py) ## Boolean Algebra + * [Norgate](boolean_algebra/norgate.py) * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py) ## Cellular Automata @@ -121,6 +122,7 @@ * [Pooling Functions](computer_vision/pooling_functions.py) ## Conversions + * [Astronomical Length Scale Conversion](conversions/astronomical_length_scale_conversion.py) * [Binary To Decimal](conversions/binary_to_decimal.py) * [Binary To Hexadecimal](conversions/binary_to_hexadecimal.py) * [Binary To Octal](conversions/binary_to_octal.py) @@ -140,6 +142,7 @@ * [Pressure Conversions](conversions/pressure_conversions.py) * [Rgb Hsv Conversion](conversions/rgb_hsv_conversion.py) * [Roman Numerals](conversions/roman_numerals.py) + * [Speed Conversions](conversions/speed_conversions.py) * [Temperature Conversions](conversions/temperature_conversions.py) * [Volume Conversions](conversions/volume_conversions.py) * [Weight Conversion](conversions/weight_conversion.py) @@ -448,6 +451,7 @@ * [Random Forest Classifier](machine_learning/random_forest_classifier.py) * [Random Forest Regressor](machine_learning/random_forest_regressor.py) * [Scoring Functions](machine_learning/scoring_functions.py) + * [Self Organizing Map](machine_learning/self_organizing_map.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) * [Support Vector Machines](machine_learning/support_vector_machines.py) @@ -586,9 +590,11 @@ * [Two Sum](maths/two_sum.py) * [Ugly Numbers](maths/ugly_numbers.py) * [Volume](maths/volume.py) + * [Weird Number](maths/weird_number.py) * [Zellers Congruence](maths/zellers_congruence.py) ## Matrix + * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Matrix Class](matrix/matrix_class.py) @@ -854,8 +860,6 @@ * [Sol1](project_euler/problem_101/sol1.py) * Problem 102 * [Sol1](project_euler/problem_102/sol1.py) - * Problem 104 - * [Sol](project_euler/problem_104/sol.py) * Problem 107 * [Sol1](project_euler/problem_107/sol1.py) * Problem 109 @@ -1010,6 +1014,7 @@ * [Alternative String Arrange](strings/alternative_string_arrange.py) * [Anagrams](strings/anagrams.py) * [Autocomplete Using Trie](strings/autocomplete_using_trie.py) + * [Barcode Validator](strings/barcode_validator.py) * [Boyer Moore Search](strings/boyer_moore_search.py) * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) @@ -1039,6 +1044,7 @@ * [Reverse Letters](strings/reverse_letters.py) * [Reverse Long Words](strings/reverse_long_words.py) * [Reverse Words](strings/reverse_words.py) + * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) @@ -1073,6 +1079,7 @@ * [Instagram Pic](web_programming/instagram_pic.py) * [Instagram Video](web_programming/instagram_video.py) * [Nasa Data](web_programming/nasa_data.py) + * [Open Google Results](web_programming/open_google_results.py) * [Random Anime Character](web_programming/random_anime_character.py) * [Recaptcha Verification](web_programming/recaptcha_verification.py) * [Reddit](web_programming/reddit.py) From 6be9500b2fb5d2e51432f9966e76a107dd604a41 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 15 Oct 2022 09:02:07 +0200 Subject: [PATCH 041/368] chore: remove checkbox in feature issue template (#7212) We do not assign issues in this repo Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/feature_request.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index bed3e8ab5..09a159b21 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -17,10 +17,3 @@ body: implementations. validations: required: true - - - type: checkboxes - attributes: - label: Would you like to work on this feature? - options: - - label: Yes, I want to work on this feature! - required: false From 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 Mon Sep 17 00:00:00 2001 From: Caeden Date: Sat, 15 Oct 2022 13:58:09 +0100 Subject: [PATCH 042/368] feat: Binary tree node sum (#7020) (#7162) * feat: Binary tree node sum (#7020) * feat: Sum of all nodes in binary tree explanation (#7020) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/binary_tree_node_sum.py Co-authored-by: Christian Clauss * refactor: Change replace method with `__iter__` overriding (#7020) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + .../binary_tree/binary_tree_node_sum.py | 76 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 data_structures/binary_tree/binary_tree_node_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 239dafa65..92bed9cb4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -154,6 +154,7 @@ * [Binary Search Tree](data_structures/binary_tree/binary_search_tree.py) * [Binary Search Tree Recursive](data_structures/binary_tree/binary_search_tree_recursive.py) * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) + * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) diff --git a/data_structures/binary_tree/binary_tree_node_sum.py b/data_structures/binary_tree/binary_tree_node_sum.py new file mode 100644 index 000000000..5a13e74e3 --- /dev/null +++ b/data_structures/binary_tree/binary_tree_node_sum.py @@ -0,0 +1,76 @@ +""" +Sum of all nodes in a binary tree. + +Python implementation: + O(n) time complexity - Recurses through :meth:`depth_first_search` + with each element. + O(n) space complexity - At any point in time maximum number of stack + frames that could be in memory is `n` +""" + + +from __future__ import annotations + +from collections.abc import Iterator + + +class Node: + """ + A Node has a value variable and pointers to Nodes to its left and right. + """ + + def __init__(self, value: int) -> None: + self.value = value + self.left: Node | None = None + self.right: Node | None = None + + +class BinaryTreeNodeSum: + r""" + The below tree looks like this + 10 + / \ + 5 -3 + / / \ + 12 8 0 + + >>> tree = Node(10) + >>> sum(BinaryTreeNodeSum(tree)) + 10 + + >>> tree.left = Node(5) + >>> sum(BinaryTreeNodeSum(tree)) + 15 + + >>> tree.right = Node(-3) + >>> sum(BinaryTreeNodeSum(tree)) + 12 + + >>> tree.left.left = Node(12) + >>> sum(BinaryTreeNodeSum(tree)) + 24 + + >>> tree.right.left = Node(8) + >>> tree.right.right = Node(0) + >>> sum(BinaryTreeNodeSum(tree)) + 32 + """ + + def __init__(self, tree: Node) -> None: + self.tree = tree + + def depth_first_search(self, node: Node | None) -> int: + if node is None: + return 0 + return node.value + ( + self.depth_first_search(node.left) + self.depth_first_search(node.right) + ) + + def __iter__(self) -> Iterator[int]: + yield self.depth_first_search(self.tree) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a652905b605ddcc43626072366d1130315801dc9 Mon Sep 17 00:00:00 2001 From: Caeden Date: Sat, 15 Oct 2022 18:29:42 +0100 Subject: [PATCH 043/368] Add Flake8 comprehensions to pre-commit (#7235) * ci(pre-commit): Add ``flake8-comprehensions`` to ``pre-commit`` (#7233) * refactor: Fix ``flake8-comprehensions`` errors * fix: Replace `map` with generator (#7233) * fix: Cast `range` objects to `list` --- .pre-commit-config.yaml | 1 + ciphers/onepad_cipher.py | 2 +- ciphers/rail_fence_cipher.py | 2 +- data_structures/hashing/hash_table.py | 2 +- data_structures/linked_list/merge_two_lists.py | 2 +- dynamic_programming/fractional_knapsack.py | 2 +- graphs/bellman_ford.py | 2 +- graphs/frequent_pattern_graph_miner.py | 10 +++++----- hashes/enigma_machine.py | 8 ++++---- maths/primelib.py | 2 +- matrix/spiral_print.py | 4 ++-- other/davisb_putnamb_logemannb_loveland.py | 4 ++-- project_euler/problem_042/solution42.py | 4 ++-- project_euler/problem_052/sol1.py | 12 ++++++------ project_euler/problem_062/sol1.py | 2 +- project_euler/problem_067/sol1.py | 4 ++-- project_euler/problem_109/sol1.py | 2 +- project_euler/problem_551/sol1.py | 2 +- sorts/radix_sort.py | 2 +- strings/aho_corasick.py | 4 +--- 20 files changed, 36 insertions(+), 37 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d3ea9722f..345513565 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,6 +39,7 @@ repos: additional_dependencies: - flake8-bugbear - flake8-builtins + - flake8-comprehensions - pep8-naming - repo: https://github.com/pre-commit/mirrors-mypy diff --git a/ciphers/onepad_cipher.py b/ciphers/onepad_cipher.py index 3ace9b098..4bfe35b71 100644 --- a/ciphers/onepad_cipher.py +++ b/ciphers/onepad_cipher.py @@ -22,7 +22,7 @@ class Onepad: for i in range(len(key)): p = int((cipher[i] - (key[i]) ** 2) / key[i]) plain.append(chr(p)) - return "".join([i for i in plain]) + return "".join(plain) if __name__ == "__main__": diff --git a/ciphers/rail_fence_cipher.py b/ciphers/rail_fence_cipher.py index cba593ca7..47ee7db89 100644 --- a/ciphers/rail_fence_cipher.py +++ b/ciphers/rail_fence_cipher.py @@ -72,7 +72,7 @@ def decrypt(input_string: str, key: int) -> str: counter = 0 for row in temp_grid: # fills in the characters splice = input_string[counter : counter + len(row)] - grid.append([character for character in splice]) + grid.append(list(splice)) counter += len(row) output_string = "" # reads as zigzag diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 1cd71cc4b..607454c82 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -34,7 +34,7 @@ class HashTable: def _step_by_step(self, step_ord): print(f"step {step_ord}") - print([i for i in range(len(self.values))]) + print(list(range(len(self.values)))) print(self.values) def bulk_insert(self, values): diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index 43dd46186..93cf7a7e1 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -19,7 +19,7 @@ class Node: class SortedLinkedList: def __init__(self, ints: Iterable[int]) -> None: self.head: Node | None = None - for i in reversed(sorted(ints)): + for i in sorted(ints, reverse=True): self.head = Node(i, self.head) def __iter__(self) -> Iterator[int]: diff --git a/dynamic_programming/fractional_knapsack.py b/dynamic_programming/fractional_knapsack.py index 6f7a2a08c..58976d40c 100644 --- a/dynamic_programming/fractional_knapsack.py +++ b/dynamic_programming/fractional_knapsack.py @@ -8,7 +8,7 @@ def frac_knapsack(vl, wt, w, n): 240.0 """ - r = list(sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True)) + r = sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True) vl, wt = [i[0] for i in r], [i[1] for i in r] acc = list(accumulate(wt)) k = bisect(acc, w) diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py index eb2cd25bf..9ac8bae85 100644 --- a/graphs/bellman_ford.py +++ b/graphs/bellman_ford.py @@ -58,7 +58,7 @@ if __name__ == "__main__": V = int(input("Enter number of vertices: ").strip()) E = int(input("Enter number of edges: ").strip()) - graph: list[dict[str, int]] = [dict() for j in range(E)] + graph: list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 1d26702a4..87d5605a0 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -155,12 +155,12 @@ def construct_graph(cluster, nodes): cluster[max(cluster.keys()) + 1] = "Header" graph = {} for i in x: - if tuple(["Header"]) in graph: - graph[tuple(["Header"])].append(x[i]) + if (["Header"],) in graph: + graph[(["Header"],)].append(x[i]) else: - graph[tuple(["Header"])] = [x[i]] + graph[(["Header"],)] = [x[i]] for i in x: - graph[tuple(x[i])] = [["Header"]] + graph[(x[i],)] = [["Header"]] i = 1 while i < max(cluster) - 1: create_edge(nodes, graph, cluster, i) @@ -186,7 +186,7 @@ def find_freq_subgraph_given_support(s, cluster, graph): """ k = int(s / 100 * (len(cluster) - 1)) for i in cluster[k].keys(): - my_dfs(graph, tuple(cluster[k][i]), tuple(["Header"])) + my_dfs(graph, tuple(cluster[k][i]), (["Header"],)) def freq_subgraphs_edge_list(paths): diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py index 0194f7da7..d95437d12 100644 --- a/hashes/enigma_machine.py +++ b/hashes/enigma_machine.py @@ -1,8 +1,8 @@ alphabets = [chr(i) for i in range(32, 126)] -gear_one = [i for i in range(len(alphabets))] -gear_two = [i for i in range(len(alphabets))] -gear_three = [i for i in range(len(alphabets))] -reflector = [i for i in reversed(range(len(alphabets)))] +gear_one = list(range(len(alphabets))) +gear_two = list(range(len(alphabets))) +gear_three = list(range(len(alphabets))) +reflector = list(reversed(range(len(alphabets)))) code = [] gear_one_pos = gear_two_pos = gear_three_pos = 0 diff --git a/maths/primelib.py b/maths/primelib.py index eb72a9f8a..9586227ea 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -89,7 +89,7 @@ def sieve_er(n): assert isinstance(n, int) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N - begin_list = [x for x in range(2, n + 1)] + begin_list = list(range(2, n + 1)) ans = [] # this list will be returns. diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 2441f05d1..0cf732d60 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -9,7 +9,7 @@ This problem has been solved through recursive way. def check_matrix(matrix: list[list[int]]) -> bool: # must be - matrix = list(list(row) for row in matrix) + matrix = [list(row) for row in matrix] if matrix and isinstance(matrix, list): if isinstance(matrix[0], list): prev_len = 0 @@ -44,7 +44,7 @@ def spiral_print_clockwise(a: list[list[int]]) -> None: 7 """ if check_matrix(a) and len(a) > 0: - a = list(list(row) for row in a) + a = [list(row) for row in a] mat_row = len(a) if isinstance(a[0], list): mat_col = len(a[0]) diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index 3110515d5..a1bea5b39 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -317,7 +317,7 @@ def dpll_algorithm( if p: tmp_model = model tmp_model[p] = value - tmp_symbols = [i for i in symbols] + tmp_symbols = list(symbols) if p in tmp_symbols: tmp_symbols.remove(p) return dpll_algorithm(clauses, tmp_symbols, tmp_model) @@ -329,7 +329,7 @@ def dpll_algorithm( if p: tmp_model = model tmp_model[p] = value - tmp_symbols = [i for i in symbols] + tmp_symbols = list(symbols) if p in tmp_symbols: tmp_symbols.remove(p) return dpll_algorithm(clauses, tmp_symbols, tmp_model) diff --git a/project_euler/problem_042/solution42.py b/project_euler/problem_042/solution42.py index 6d22a8dfb..c0fb2ad50 100644 --- a/project_euler/problem_042/solution42.py +++ b/project_euler/problem_042/solution42.py @@ -33,11 +33,11 @@ def solution(): with open(words_file_path) as f: words = f.readline() - words = list(map(lambda word: word.strip('"'), words.strip("\r\n").split(","))) + words = [word.strip('"') for word in words.strip("\r\n").split(",")] words = list( filter( lambda word: word in TRIANGULAR_NUMBERS, - map(lambda word: sum(map(lambda x: ord(x) - 64, word)), words), + (sum(ord(x) - 64 for x in word) for word in words), ) ) return len(words) diff --git a/project_euler/problem_052/sol1.py b/project_euler/problem_052/sol1.py index df5c46ae0..21acfb633 100644 --- a/project_euler/problem_052/sol1.py +++ b/project_euler/problem_052/sol1.py @@ -21,12 +21,12 @@ def solution(): while True: if ( - sorted(list(str(i))) - == sorted(list(str(2 * i))) - == sorted(list(str(3 * i))) - == sorted(list(str(4 * i))) - == sorted(list(str(5 * i))) - == sorted(list(str(6 * i))) + sorted(str(i)) + == sorted(str(2 * i)) + == sorted(str(3 * i)) + == sorted(str(4 * i)) + == sorted(str(5 * i)) + == sorted(str(6 * i)) ): return i diff --git a/project_euler/problem_062/sol1.py b/project_euler/problem_062/sol1.py index 0c9baf880..3efdb3513 100644 --- a/project_euler/problem_062/sol1.py +++ b/project_euler/problem_062/sol1.py @@ -55,7 +55,7 @@ def get_digits(num: int) -> str: >>> get_digits(123) '0166788' """ - return "".join(sorted(list(str(num**3)))) + return "".join(sorted(str(num**3))) if __name__ == "__main__": diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index 527d4dc59..ab305684d 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -28,8 +28,8 @@ def solution(): with open(triangle) as f: triangle = f.readlines() - a = map(lambda x: x.rstrip("\r\n").split(" "), triangle) - a = list(map(lambda x: list(map(int, x)), a)) + a = (x.rstrip("\r\n").split(" ") for x in triangle) + a = [list(map(int, x)) for x in a] for i in range(1, len(a)): for j in range(len(a[i])): diff --git a/project_euler/problem_109/sol1.py b/project_euler/problem_109/sol1.py index 91c71eb9f..852f001d3 100644 --- a/project_euler/problem_109/sol1.py +++ b/project_euler/problem_109/sol1.py @@ -65,7 +65,7 @@ def solution(limit: int = 100) -> int: >>> solution(50) 12577 """ - singles: list[int] = [x for x in range(1, 21)] + [25] + singles: list[int] = list(range(1, 21)) + [25] doubles: list[int] = [2 * x for x in range(1, 21)] + [50] triples: list[int] = [3 * x for x in range(1, 21)] all_values: list[int] = singles + doubles + triples + [0] diff --git a/project_euler/problem_551/sol1.py b/project_euler/problem_551/sol1.py index c15445e4d..2cd75efbb 100644 --- a/project_euler/problem_551/sol1.py +++ b/project_euler/problem_551/sol1.py @@ -13,7 +13,7 @@ Find a(10^15) """ -ks = [k for k in range(2, 20 + 1)] +ks = range(2, 20 + 1) base = [10**k for k in range(ks[-1] + 1)] memo: dict[int, dict[int, list[list[int]]]] = {} diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index afe62bc7e..a496cdc0c 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -24,7 +24,7 @@ def radix_sort(list_of_ints: list[int]) -> list[int]: max_digit = max(list_of_ints) while placement <= max_digit: # declare and initialize empty buckets - buckets: list[list] = [list() for _ in range(RADIX)] + buckets: list[list] = [[] for _ in range(RADIX)] # split list_of_ints between the buckets for i in list_of_ints: tmp = int((i / placement) % RADIX) diff --git a/strings/aho_corasick.py b/strings/aho_corasick.py index 2d2f562df..25ed649ce 100644 --- a/strings/aho_corasick.py +++ b/strings/aho_corasick.py @@ -70,9 +70,7 @@ class Automaton: >>> A.search_in("whatever, err ... , wherever") {'what': [0], 'hat': [1], 'ver': [5, 25], 'er': [6, 10, 22, 26]} """ - result: dict = ( - dict() - ) # returns a dict with keywords and list of its occurrences + result: dict = {} # returns a dict with keywords and list of its occurrences current_state = 0 for i in range(len(string)): while ( From 553624fcd4d7e8a4c561b182967291a1cc44ade9 Mon Sep 17 00:00:00 2001 From: Paul <56065602+ZeroDayOwl@users.noreply.github.com> Date: Sat, 15 Oct 2022 23:39:27 +0600 Subject: [PATCH 044/368] Add algorithm for Casimir Effect (#7141) * Add algorithm for Casimir Effect * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix the line length * Fix the line length * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Import math module and use Pi * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update doctest results * from math import pi Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- physics/casimir_effect.py | 121 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 physics/casimir_effect.py diff --git a/physics/casimir_effect.py b/physics/casimir_effect.py new file mode 100644 index 000000000..ee8a6c1eb --- /dev/null +++ b/physics/casimir_effect.py @@ -0,0 +1,121 @@ +""" +Title : Finding the value of magnitude of either the Casimir force, the surface area +of one of the plates or distance between the plates provided that the other +two parameters are given. + +Description : In quantum field theory, the Casimir effect is a physical force +acting on the macroscopic boundaries of a confined space which arises from the +quantum fluctuations of the field. It is a physical force exerted between separate +objects, which is due to neither charge, gravity, nor the exchange of particles, +but instead is due to resonance of all-pervasive energy fields in the intervening +space between the objects. Since the strength of the force falls off rapidly with +distance it is only measurable when the distance between the objects is extremely +small. On a submicron scale, this force becomes so strong that it becomes the +dominant force between uncharged conductors. + +Dutch physicist Hendrik B. G. Casimir first proposed the existence of the force, +and he formulated an experiment to detect it in 1948 while participating in research +at Philips Research Labs. The classic form of his experiment used a pair of uncharged +parallel metal plates in a vacuum, and successfully demonstrated the force to within +15% of the value he had predicted according to his theory. + +The Casimir force F for idealized, perfectly conducting plates of surface area +A square meter and placed at a distance of a meter apart with vacuum between +them is expressed as - + +F = - ((Reduced Planck Constant ℏ) * c * Pi^2 * A) / (240 * a^4) + +Here, the negative sign indicates the force is attractive in nature. For the ease +of calculation, only the magnitude of the force is considered. + +Source : +- https://en.wikipedia.org/wiki/Casimir_effect +- https://www.cs.mcgill.ca/~rwest/wikispeedia/wpcd/wp/c/Casimir_effect.htm +- Casimir, H. B. ; Polder, D. (1948) "The Influence of Retardation on the + London-van der Waals Forces", Physical Review, vol. 73, Issue 4, pp. 360-372 +""" + +from __future__ import annotations + +from math import pi + +# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of +# Pi and the function +REDUCED_PLANCK_CONSTANT = 1.054571817e-34 # unit of ℏ : J * s + +SPEED_OF_LIGHT = 3e8 # unit of c : m * s^-1 + + +def casimir_force(force: float, area: float, distance: float) -> dict[str, float]: + + """ + Input Parameters + ---------------- + force -> Casimir Force : magnitude in Newtons + + area -> Surface area of each plate : magnitude in square meters + + distance -> Distance between two plates : distance in Meters + + Returns + ------- + result : dict name, value pair of the parameter having Zero as it's value + + Returns the value of one of the parameters specified as 0, provided the values of + other parameters are given. + >>> casimir_force(force = 0, area = 4, distance = 0.03) + {'force': 6.4248189174864216e-21} + + >>> casimir_force(force = 2635e-13, area = 0.0023, distance = 0) + {'distance': 1.0323056015031114e-05} + + >>> casimir_force(force = 2737e-21, area = 0, distance = 0.0023746) + {'area': 0.06688838837354052} + + >>> casimir_force(force = 3457e-12, area = 0, distance = 0) + Traceback (most recent call last): + ... + ValueError: One and only one argument must be 0 + + >>> casimir_force(force = 3457e-12, area = 0, distance = -0.00344) + Traceback (most recent call last): + ... + ValueError: Distance can not be negative + + >>> casimir_force(force = -912e-12, area = 0, distance = 0.09374) + Traceback (most recent call last): + ... + ValueError: Magnitude of force can not be negative + """ + + if (force, area, distance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if force < 0: + raise ValueError("Magnitude of force can not be negative") + if distance < 0: + raise ValueError("Distance can not be negative") + if area < 0: + raise ValueError("Area can not be negative") + if force == 0: + force = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( + 240 * (distance) ** 4 + ) + return {"force": force} + elif area == 0: + area = (240 * force * (distance) ** 4) / ( + REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 + ) + return {"area": area} + elif distance == 0: + distance = ( + (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) + ) ** (1 / 4) + return {"distance": distance} + raise ValueError("One and only one argument must be 0") + + +# Run doctest +if __name__ == "__main__": + import doctest + + doctest.testmod() From c94e215c8dbdfe1f349eab5708be6b5f337b6ddd Mon Sep 17 00:00:00 2001 From: Caeden Date: Sat, 15 Oct 2022 23:51:23 +0100 Subject: [PATCH 045/368] types: Update binary search tree typehints (#7197) * types: Update binary search tree typehints * refactor: Don't return `self` in `:meth:insert` * test: Fix failing doctests * Apply suggestions from code review Co-authored-by: Dhruv Manilawala --- .../binary_tree/binary_search_tree.py | 77 +++++++++++-------- 1 file changed, 44 insertions(+), 33 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 51a651be0..fc60540a1 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -2,15 +2,18 @@ A binary search Tree """ +from collections.abc import Iterable +from typing import Any + class Node: - def __init__(self, value, parent): + def __init__(self, value: int | None = None): self.value = value - self.parent = parent # Added in order to delete a node easier - self.left = None - self.right = None + self.parent: Node | None = None # Added in order to delete a node easier + self.left: Node | None = None + self.right: Node | None = None - def __repr__(self): + def __repr__(self) -> str: from pprint import pformat if self.left is None and self.right is None: @@ -19,16 +22,16 @@ class Node: class BinarySearchTree: - def __init__(self, root=None): + def __init__(self, root: Node | None = None): self.root = root - def __str__(self): + def __str__(self) -> str: """ Return a string of all the Nodes using in order traversal """ return str(self.root) - def __reassign_nodes(self, node, new_children): + def __reassign_nodes(self, node: Node, new_children: Node | None) -> None: if new_children is not None: # reset its kids new_children.parent = node.parent if node.parent is not None: # reset its parent @@ -37,23 +40,27 @@ class BinarySearchTree: else: node.parent.left = new_children else: - self.root = new_children + self.root = None - def is_right(self, node): - return node == node.parent.right + def is_right(self, node: Node) -> bool: + if node.parent and node.parent.right: + return node == node.parent.right + return False - def empty(self): + def empty(self) -> bool: return self.root is None - def __insert(self, value): + def __insert(self, value) -> None: """ Insert a new node in Binary Search Tree with value label """ - new_node = Node(value, None) # create a new Node + new_node = Node(value) # create a new Node if self.empty(): # if Tree is empty self.root = new_node # set its root else: # Tree is not empty parent_node = self.root # from root + if parent_node is None: + return None while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: @@ -69,12 +76,11 @@ class BinarySearchTree: parent_node = parent_node.right new_node.parent = parent_node - def insert(self, *values): + def insert(self, *values) -> None: for value in values: self.__insert(value) - return self - def search(self, value): + def search(self, value) -> Node | None: if self.empty(): raise IndexError("Warning: Tree is empty! please use another.") else: @@ -84,30 +90,35 @@ class BinarySearchTree: node = node.left if value < node.value else node.right return node - def get_max(self, node=None): + def get_max(self, node: Node | None = None) -> Node | None: """ We go deep on the right branch """ if node is None: + if self.root is None: + return None node = self.root + if not self.empty(): while node.right is not None: node = node.right return node - def get_min(self, node=None): + def get_min(self, node: Node | None = None) -> Node | None: """ We go deep on the left branch """ if node is None: node = self.root + if self.root is None: + return None if not self.empty(): node = self.root while node.left is not None: node = node.left return node - def remove(self, value): + def remove(self, value: int) -> None: node = self.search(value) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children @@ -120,18 +131,18 @@ class BinarySearchTree: tmp_node = self.get_max( node.left ) # Gets the max value of the left branch - self.remove(tmp_node.value) + self.remove(tmp_node.value) # type: ignore node.value = ( - tmp_node.value + tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure - def preorder_traverse(self, node): + def preorder_traverse(self, node: Node | None) -> Iterable: if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left) yield from self.preorder_traverse(node.right) - def traversal_tree(self, traversal_function=None): + def traversal_tree(self, traversal_function=None) -> Any: """ This function traversal the tree. You can pass a function to traversal the tree as needed by client code @@ -141,7 +152,7 @@ class BinarySearchTree: else: return traversal_function(self.root) - def inorder(self, arr: list, node: Node): + def inorder(self, arr: list, node: Node | None) -> None: """Perform an inorder traversal and append values of the nodes to a list named arr""" if node: @@ -151,12 +162,12 @@ class BinarySearchTree: def find_kth_smallest(self, k: int, node: Node) -> int: """Return the kth smallest element in a binary search tree""" - arr: list = [] + arr: list[int] = [] self.inorder(arr, node) # append all values to list using inorder traversal return arr[k - 1] -def postorder(curr_node): +def postorder(curr_node: Node | None) -> list[Node]: """ postOrder (left, right, self) """ @@ -166,7 +177,7 @@ def postorder(curr_node): return node_list -def binary_search_tree(): +def binary_search_tree() -> None: r""" Example 8 @@ -177,7 +188,8 @@ def binary_search_tree(): / \ / 4 7 13 - >>> t = BinarySearchTree().insert(8, 3, 6, 1, 10, 14, 13, 4, 7) + >>> t = BinarySearchTree() + >>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7) >>> print(" ".join(repr(i.value) for i in t.traversal_tree())) 8 3 1 6 4 7 10 14 13 >>> print(" ".join(repr(i.value) for i in t.traversal_tree(postorder))) @@ -206,8 +218,8 @@ def binary_search_tree(): print("The value -1 doesn't exist") if not t.empty(): - print("Max Value: ", t.get_max().value) - print("Min Value: ", t.get_min().value) + print("Max Value: ", t.get_max().value) # type: ignore + print("Min Value: ", t.get_min().value) # type: ignore for i in testlist: t.remove(i) @@ -217,5 +229,4 @@ def binary_search_tree(): if __name__ == "__main__": import doctest - doctest.testmod() - # binary_search_tree() + doctest.testmod(verbose=True) From 04698538d816fc5f70c850e8b89c6d1f5599fa84 Mon Sep 17 00:00:00 2001 From: CenTdemeern1 Date: Sat, 15 Oct 2022 22:25:38 -0700 Subject: [PATCH 046/368] Misc fixes across multiple algorithms (#6912) Source: Snyk code quality Add scikit-fuzzy to requirements Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- compression/huffman.py | 2 +- data_structures/linked_list/is_palindrome.py | 2 +- .../filters/local_binary_pattern.py | 2 +- fuzzy_logic/fuzzy_operations.py | 6 +----- graphs/dijkstra_algorithm.py | 4 ++-- .../directed_and_undirected_(weighted)_graph.py | 7 ------- hashes/hamming_code.py | 3 +-- linear_algebra/src/test_linear_algebra.py | 2 +- maths/extended_euclidean_algorithm.py | 5 +++-- maths/jaccard_similarity.py | 15 ++++++++------- matrix/matrix_class.py | 2 +- project_euler/problem_001/sol7.py | 4 +--- project_euler/problem_042/solution42.py | 11 +++++------ project_euler/problem_067/sol1.py | 8 ++++++-- project_euler/problem_089/sol1.py | 5 +++-- requirements.txt | 2 +- scheduling/first_come_first_served.py | 4 ++-- scheduling/multi_level_feedback_queue.py | 2 +- web_programming/emails_from_url.py | 2 +- 19 files changed, 40 insertions(+), 48 deletions(-) diff --git a/compression/huffman.py b/compression/huffman.py index d5d78b753..f619ed82c 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -31,7 +31,7 @@ def parse_file(file_path: str) -> list[Letter]: c = f.read(1) if not c: break - chars[c] = chars[c] + 1 if c in chars.keys() else 1 + chars[c] = chars[c] + 1 if c in chars else 1 return sorted((Letter(c, f) for c, f in chars.items()), key=lambda l: l.freq) diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index acc87c1c2..ec19e99f7 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -55,7 +55,7 @@ def is_palindrome_dict(head): d = {} pos = 0 while head: - if head.val in d.keys(): + if head.val in d: d[head.val].append(pos) else: d[head.val] = [pos] diff --git a/digital_image_processing/filters/local_binary_pattern.py b/digital_image_processing/filters/local_binary_pattern.py index e73aa59bf..e92e554a3 100644 --- a/digital_image_processing/filters/local_binary_pattern.py +++ b/digital_image_processing/filters/local_binary_pattern.py @@ -60,7 +60,7 @@ def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) ) -if __name__ == "main": +if __name__ == "__main__": # Reading the image and converting it to grayscale. image = cv2.imread( diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py index fbaca9421..0786ef8b0 100644 --- a/fuzzy_logic/fuzzy_operations.py +++ b/fuzzy_logic/fuzzy_operations.py @@ -8,11 +8,7 @@ Python: - 3.5 """ import numpy as np - -try: - import skfuzzy as fuzz -except ImportError: - fuzz = None +import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 122821a37..1845dad05 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -89,13 +89,13 @@ class Graph: # Edge going from node u to v and v to u with weight w # u (w)-> v, v (w) -> u # Check if u already in graph - if u in self.adjList.keys(): + if u in self.adjList: self.adjList[u].append((v, w)) else: self.adjList[u] = [(v, w)] # Assuming undirected graph - if v in self.adjList.keys(): + if v in self.adjList: self.adjList[v].append((u, w)) else: self.adjList[v] = [(u, w)] diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py index 5cfa9e13e..43a72b89e 100644 --- a/graphs/directed_and_undirected_(weighted)_graph.py +++ b/graphs/directed_and_undirected_(weighted)_graph.py @@ -226,9 +226,6 @@ class DirectedGraph: break else: return True - # TODO:The following code is unreachable. - anticipating_nodes.add(stack[len_stack_minus_one]) - len_stack_minus_one -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) @@ -454,10 +451,6 @@ class Graph: break else: return True - # TODO: the following code is unreachable - # is this meant to be called in the else ? - anticipating_nodes.add(stack[len_stack_minus_one]) - len_stack_minus_one -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index a62d092a1..481a67507 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -79,8 +79,7 @@ def emitter_converter(size_par, data): ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1'] """ if size_par + len(data) <= 2**size_par - (len(data) - 1): - print("ERROR - size of parity don't match with size of data") - exit(0) + raise ValueError("size of parity don't match with size of data") data_out = [] parity = [] diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 97c06cb44..50d079572 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -89,7 +89,7 @@ class Test(unittest.TestCase): """ test for global function zero_vector() """ - self.assertTrue(str(zero_vector(10)).count("0") == 10) + self.assertEqual(str(zero_vector(10)).count("0"), 10) def test_unit_basis_vector(self) -> None: """ diff --git a/maths/extended_euclidean_algorithm.py b/maths/extended_euclidean_algorithm.py index 72afd40aa..c54909e19 100644 --- a/maths/extended_euclidean_algorithm.py +++ b/maths/extended_euclidean_algorithm.py @@ -75,11 +75,12 @@ def main(): """Call Extended Euclidean Algorithm.""" if len(sys.argv) < 3: print("2 integer arguments required") - exit(1) + return 1 a = int(sys.argv[1]) b = int(sys.argv[2]) print(extended_euclidean_algorithm(a, b)) + return 0 if __name__ == "__main__": - main() + raise SystemExit(main()) diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index 77f4b90ea..b299a8147 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -14,7 +14,7 @@ Jaccard similarity is widely used with MinHashing. """ -def jaccard_similariy(set_a, set_b, alternative_union=False): +def jaccard_similarity(set_a, set_b, alternative_union=False): """ Finds the jaccard similarity between two sets. Essentially, its intersection over union. @@ -35,18 +35,18 @@ def jaccard_similariy(set_a, set_b, alternative_union=False): Examples: >>> set_a = {'a', 'b', 'c', 'd', 'e'} >>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'} - >>> jaccard_similariy(set_a, set_b) + >>> jaccard_similarity(set_a, set_b) 0.375 - >>> jaccard_similariy(set_a, set_a) + >>> jaccard_similarity(set_a, set_a) 1.0 - >>> jaccard_similariy(set_a, set_a, True) + >>> jaccard_similarity(set_a, set_a, True) 0.5 >>> set_a = ['a', 'b', 'c', 'd', 'e'] >>> set_b = ('c', 'd', 'e', 'f', 'h', 'i') - >>> jaccard_similariy(set_a, set_b) + >>> jaccard_similarity(set_a, set_b) 0.375 """ @@ -67,14 +67,15 @@ def jaccard_similariy(set_a, set_b, alternative_union=False): if alternative_union: union = len(set_a) + len(set_b) + return len(intersection) / union else: union = set_a + [element for element in set_b if element not in set_a] + return len(intersection) / len(union) return len(intersection) / len(union) if __name__ == "__main__": - set_a = {"a", "b", "c", "d", "e"} set_b = {"c", "d", "e", "f", "h", "i"} - print(jaccard_similariy(set_a, set_b)) + print(jaccard_similarity(set_a, set_b)) diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 6495bd8fc..8b6fefa21 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -286,7 +286,7 @@ class Matrix: # MATRIX OPERATIONS def __eq__(self, other: object) -> bool: if not isinstance(other, Matrix): - raise TypeError("A Matrix can only be compared with another Matrix") + return NotImplemented return self.rows == other.rows def __ne__(self, other: object) -> bool: diff --git a/project_euler/problem_001/sol7.py b/project_euler/problem_001/sol7.py index 8f5d1977f..6ada70c12 100644 --- a/project_euler/problem_001/sol7.py +++ b/project_euler/problem_001/sol7.py @@ -26,9 +26,7 @@ def solution(n: int = 1000) -> int: result = 0 for i in range(n): - if i % 3 == 0: - result += i - elif i % 5 == 0: + if i % 3 == 0 or i % 5 == 0: result += i return result diff --git a/project_euler/problem_042/solution42.py b/project_euler/problem_042/solution42.py index c0fb2ad50..f8a54e40e 100644 --- a/project_euler/problem_042/solution42.py +++ b/project_euler/problem_042/solution42.py @@ -34,12 +34,11 @@ def solution(): words = f.readline() words = [word.strip('"') for word in words.strip("\r\n").split(",")] - words = list( - filter( - lambda word: word in TRIANGULAR_NUMBERS, - (sum(ord(x) - 64 for x in word) for word in words), - ) - ) + words = [ + word + for word in [sum(ord(x) - 64 for x in word) for word in words] + if word in TRIANGULAR_NUMBERS + ] return len(words) diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index ab305684d..f20c206cc 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -28,8 +28,12 @@ def solution(): with open(triangle) as f: triangle = f.readlines() - a = (x.rstrip("\r\n").split(" ") for x in triangle) - a = [list(map(int, x)) for x in a] + a = [] + for line in triangle: + numbers_from_line = [] + for number in line.strip().split(" "): + numbers_from_line.append(int(number)) + a.append(numbers_from_line) for i in range(1, len(a)): for j in range(len(a[i])): diff --git a/project_euler/problem_089/sol1.py b/project_euler/problem_089/sol1.py index 1c4e2600f..83609cd23 100644 --- a/project_euler/problem_089/sol1.py +++ b/project_euler/problem_089/sol1.py @@ -125,8 +125,9 @@ def solution(roman_numerals_filename: str = "/p089_roman.txt") -> int: savings = 0 - file1 = open(os.path.dirname(__file__) + roman_numerals_filename) - lines = file1.readlines() + with open(os.path.dirname(__file__) + roman_numerals_filename) as file1: + lines = file1.readlines() + for line in lines: original = line.strip() num = parse_roman_numerals(original) diff --git a/requirements.txt b/requirements.txt index 0fbc1cc4b..b14a3eb01 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ pandas pillow qiskit requests -# scikit-fuzzy # Causing broken builds +scikit-fuzzy sklearn statsmodels sympy diff --git a/scheduling/first_come_first_served.py b/scheduling/first_come_first_served.py index c5f61720f..06cdb8ddf 100644 --- a/scheduling/first_come_first_served.py +++ b/scheduling/first_come_first_served.py @@ -79,7 +79,7 @@ if __name__ == "__main__": # ensure that we actually have processes if len(processes) == 0: print("Zero amount of processes") - exit() + raise SystemExit(0) # duration time of all processes duration_times = [19, 8, 9] @@ -87,7 +87,7 @@ if __name__ == "__main__": # ensure we can match each id to a duration time if len(duration_times) != len(processes): print("Unable to match all id's with their duration time") - exit() + raise SystemExit(0) # get the waiting times and the turnaround times waiting_times = calculate_waiting_times(duration_times) diff --git a/scheduling/multi_level_feedback_queue.py b/scheduling/multi_level_feedback_queue.py index a3ba1b340..abee3c85c 100644 --- a/scheduling/multi_level_feedback_queue.py +++ b/scheduling/multi_level_feedback_queue.py @@ -276,7 +276,7 @@ if __name__ == "__main__": queue = deque([P1, P2, P3, P4]) if len(time_slices) != number_of_queues - 1: - exit() + raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])}) diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index afaee5bbe..074ef878c 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -93,7 +93,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]: except ValueError: pass except ValueError: - exit(-1) + raise SystemExit(1) # Finally return a sorted list of email addresses with no duplicates. return sorted(valid_emails) From e7b6d2824a65985790d0044262f717898ffbeb4d Mon Sep 17 00:00:00 2001 From: Sagar Giri Date: Sun, 16 Oct 2022 16:43:29 +0900 Subject: [PATCH 047/368] Change to https. (#7277) * Change to https. * Revert the py_tf file. --- fractals/julia_sets.py | 2 +- fractals/sierpinski_triangle.py | 2 +- machine_learning/lstm/lstm_prediction.py | 2 +- machine_learning/sequential_minimum_optimization.py | 4 ++-- maths/matrix_exponentiation.py | 2 +- maths/test_prime_check.py | 2 +- physics/n_body_simulation.py | 4 ++-- strings/frequency_finder.py | 2 +- web_programming/crawl_google_results.py | 2 +- web_programming/crawl_google_scholar_citation.py | 2 +- web_programming/current_weather.py | 2 +- web_programming/giphy.py | 2 +- 12 files changed, 14 insertions(+), 14 deletions(-) diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 28c675c75..35fdc45d0 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -12,7 +12,7 @@ The examples presented here are: https://en.wikipedia.org/wiki/File:Julia_z2%2B0,25.png - Other examples from https://en.wikipedia.org/wiki/Julia_set - An exponential map Julia set, ambiantly homeomorphic to the examples in -http://www.math.univ-toulouse.fr/~cheritat/GalII/galery.html +https://www.math.univ-toulouse.fr/~cheritat/GalII/galery.html and https://ddd.uab.cat/pub/pubmat/02141493v43n1/02141493v43n1p27.pdf diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index 8be2897c1..084f6661f 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -24,7 +24,7 @@ Usage: - $python sierpinski_triangle.py Credits: This code was written by editing the code from -http://www.riannetrujillo.com/blog/python-fractal/ +https://www.riannetrujillo.com/blog/python-fractal/ """ import sys diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py index 6fd3cf291..74197c46a 100644 --- a/machine_learning/lstm/lstm_prediction.py +++ b/machine_learning/lstm/lstm_prediction.py @@ -1,7 +1,7 @@ """ Create a Long Short Term Memory (LSTM) network model An LSTM is a type of Recurrent Neural Network (RNN) as discussed at: - * http://colah.github.io/posts/2015-08-Understanding-LSTMs + * https://colah.github.io/posts/2015-08-Understanding-LSTMs * https://en.wikipedia.org/wiki/Long_short-term_memory """ import numpy as np diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index fb4b35f31..40adca7e0 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -28,7 +28,7 @@ Usage: Reference: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf - http://web.cs.iastate.edu/~honavar/smo-svm.pdf + https://web.cs.iastate.edu/~honavar/smo-svm.pdf """ @@ -43,7 +43,7 @@ from sklearn.datasets import make_blobs, make_circles from sklearn.preprocessing import StandardScaler CANCER_DATASET_URL = ( - "http://archive.ics.uci.edu/ml/machine-learning-databases/" + "https://archive.ics.uci.edu/ml/machine-learning-databases/" "breast-cancer-wisconsin/wdbc.data" ) diff --git a/maths/matrix_exponentiation.py b/maths/matrix_exponentiation.py index 033ceb3f2..7c37151c8 100644 --- a/maths/matrix_exponentiation.py +++ b/maths/matrix_exponentiation.py @@ -5,7 +5,7 @@ import timeit """ Matrix Exponentiation is a technique to solve linear recurrences in logarithmic time. You read more about it here: -http://zobayer.blogspot.com/2010/11/matrix-exponentiation.html +https://zobayer.blogspot.com/2010/11/matrix-exponentiation.html https://www.hackerearth.com/practice/notes/matrix-exponentiation-1/ """ diff --git a/maths/test_prime_check.py b/maths/test_prime_check.py index b6389684a..3ea3b2f1f 100644 --- a/maths/test_prime_check.py +++ b/maths/test_prime_check.py @@ -1,6 +1,6 @@ """ Minimalist file that allows pytest to find and run the Test unittest. For details, see: -http://doc.pytest.org/en/latest/goodpractices.html#conventions-for-python-test-discovery +https://doc.pytest.org/en/latest/goodpractices.html#conventions-for-python-test-discovery """ from .prime_check import Test diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 2f8153782..e62e1de62 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -8,7 +8,7 @@ velocity and position brought about by these forces. Softening is used to preven numerical divergences when a particle comes too close to another (and the force goes to infinity). (Description adapted from https://en.wikipedia.org/wiki/N-body_simulation ) -(See also http://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) +(See also https://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) """ @@ -258,7 +258,7 @@ def example_1() -> BodySystem: Example 1: figure-8 solution to the 3-body-problem This example can be seen as a test of the implementation: given the right initial conditions, the bodies should move in a figure-8. - (initial conditions taken from http://www.artcompsci.org/vol_1/v1_web/node56.html) + (initial conditions taken from https://www.artcompsci.org/vol_1/v1_web/node56.html) >>> body_system = example_1() >>> len(body_system) 3 diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index 7024be17b..19f97afbb 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -2,7 +2,7 @@ import string -# frequency taken from http://en.wikipedia.org/wiki/Letter_frequency +# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency english_letter_freq = { "E": 12.70, "T": 9.06, diff --git a/web_programming/crawl_google_results.py b/web_programming/crawl_google_results.py index a33a3f3bb..1f5e6d319 100644 --- a/web_programming/crawl_google_results.py +++ b/web_programming/crawl_google_results.py @@ -21,4 +21,4 @@ if __name__ == "__main__": if link.text == "Maps": webbrowser.open(link.get("href")) else: - webbrowser.open(f"http://google.com{link.get('href')}") + webbrowser.open(f"https://google.com{link.get('href')}") diff --git a/web_programming/crawl_google_scholar_citation.py b/web_programming/crawl_google_scholar_citation.py index d023380c0..f92a3d139 100644 --- a/web_programming/crawl_google_scholar_citation.py +++ b/web_programming/crawl_google_scholar_citation.py @@ -29,4 +29,4 @@ if __name__ == "__main__": "year": 2018, "hl": "en", } - print(get_citation("http://scholar.google.com/scholar_lookup", params=params)) + print(get_citation("https://scholar.google.com/scholar_lookup", params=params)) diff --git a/web_programming/current_weather.py b/web_programming/current_weather.py index e043b4384..3ed4c8a95 100644 --- a/web_programming/current_weather.py +++ b/web_programming/current_weather.py @@ -1,7 +1,7 @@ import requests APPID = "" # <-- Put your OpenWeatherMap appid here! -URL_BASE = "http://api.openweathermap.org/data/2.5/" +URL_BASE = "https://api.openweathermap.org/data/2.5/" def current_weather(q: str = "Chicago", appid: str = APPID) -> dict: diff --git a/web_programming/giphy.py b/web_programming/giphy.py index dc8c6be08..a5c3f8f74 100644 --- a/web_programming/giphy.py +++ b/web_programming/giphy.py @@ -10,7 +10,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list: Get a list of URLs of GIFs based on a given query.. """ formatted_query = "+".join(query.split()) - url = f"http://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" + url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" gifs = requests.get(url).json()["data"] return [gif["url"] for gif in gifs] From 77764116217708933bdc65b29801092fa291398e Mon Sep 17 00:00:00 2001 From: Kevin Joven <59969678+KevinJoven11@users.noreply.github.com> Date: Sun, 16 Oct 2022 02:47:54 -0500 Subject: [PATCH 048/368] Create q_full_adder.py (#6735) * Create q_full_adder.py This is for the #Hacktoberfest. This circuit is the quantum full adder. I saw that in the repo is the half adder so I decided to build the full adder to complete the set of adders. I hope that this is enough to be consider a contribution. Best, Kevin * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Erase the unused numpy library * Create the doctest. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * doctest for negative numbers, float, etc. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- quantum/q_full_adder.py | 112 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 quantum/q_full_adder.py diff --git a/quantum/q_full_adder.py b/quantum/q_full_adder.py new file mode 100644 index 000000000..597efb834 --- /dev/null +++ b/quantum/q_full_adder.py @@ -0,0 +1,112 @@ +""" +Build the quantum full adder (QFA) for any sum of +two quantum registers and one carry in. This circuit +is designed using the Qiskit framework. This +experiment run in IBM Q simulator with 1000 shots. +. +References: +https://www.quantum-inspire.com/kbase/full-adder/ +""" + +import math + +import qiskit +from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute + + +def quantum_full_adder( + input_1: int = 1, input_2: int = 1, carry_in: int = 1 +) -> qiskit.result.counts.Counts: + """ + # >>> q_full_adder(inp_1, inp_2, cin) + # the inputs can be 0/1 for qubits in define + # values, or can be in a superposition of both + # states with hadamard gate using the input value 2. + # result for default values: {11: 1000} + qr_0: ──■────■──────────────■── + │ ┌─┴─┐ ┌─┴─┐ + qr_1: ──■──┤ X ├──■────■──┤ X ├ + │ └───┘ │ ┌─┴─┐└───┘ + qr_2: ──┼─────────■──┤ X ├───── + ┌─┴─┐ ┌─┴─┐└───┘ + qr_3: ┤ X ├─────┤ X ├────────── + └───┘ └───┘ + cr: 2/═════════════════════════ + Args: + input_1: input 1 for the circuit. + input_2: input 2 for the circuit. + carry_in: carry in for the circuit. + Returns: + qiskit.result.counts.Counts: sum result counts. + >>> quantum_full_adder(1,1,1) + {'11': 1000} + >>> quantum_full_adder(0,0,1) + {'01': 1000} + >>> quantum_full_adder(1,0,1) + {'10': 1000} + >>> quantum_full_adder(1,-4,1) + Traceback (most recent call last): + ... + ValueError: inputs must be positive. + >>> quantum_full_adder('q',0,1) + Traceback (most recent call last): + ... + TypeError: inputs must be integers. + >>> quantum_full_adder(0.5,0,1) + Traceback (most recent call last): + ... + ValueError: inputs must be exact integers. + >>> quantum_full_adder(0,1,3) + Traceback (most recent call last): + ... + ValueError: inputs must be less or equal to 2. + """ + if (type(input_1) == str) or (type(input_2) == str) or (type(carry_in) == str): + raise TypeError("inputs must be integers.") + + if (input_1 < 0) or (input_2 < 0) or (carry_in < 0): + raise ValueError("inputs must be positive.") + + if ( + (math.floor(input_1) != input_1) + or (math.floor(input_2) != input_2) + or (math.floor(carry_in) != carry_in) + ): + raise ValueError("inputs must be exact integers.") + + if (input_1 > 2) or (input_2 > 2) or (carry_in > 2): + raise ValueError("inputs must be less or equal to 2.") + + # build registers + qr = QuantumRegister(4, "qr") + cr = ClassicalRegister(2, "cr") + # list the entries + entry = [input_1, input_2, carry_in] + + quantum_circuit = QuantumCircuit(qr, cr) + + for i in range(0, 3): + if entry[i] == 2: + quantum_circuit.h(i) # for hadamard entries + elif entry[i] == 1: + quantum_circuit.x(i) # for 1 entries + elif entry[i] == 0: + quantum_circuit.i(i) # for 0 entries + + # build the circuit + quantum_circuit.ccx(0, 1, 3) # ccx = toffoli gate + quantum_circuit.cx(0, 1) + quantum_circuit.ccx(1, 2, 3) + quantum_circuit.cx(1, 2) + quantum_circuit.cx(0, 1) + + quantum_circuit.measure([2, 3], cr) # measure the last two qbits + + backend = Aer.get_backend("qasm_simulator") + job = execute(quantum_circuit, backend, shots=1000) + + return job.result().get_counts(quantum_circuit) + + +if __name__ == "__main__": + print(f"Total sum count for state is: {quantum_full_adder(1,1,1)}") From c6582b35bf8b8aba622c63096e3ab2f01aa36854 Mon Sep 17 00:00:00 2001 From: Caeden Date: Sun, 16 Oct 2022 10:33:29 +0100 Subject: [PATCH 049/368] refactor: Move constants outside of variable scope (#7262) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala Co-authored-by: Christian Clauss --- ciphers/bifid.py | 15 ++++---- ciphers/brute_force_caesar_cipher.py | 14 ++++--- ciphers/polybius.py | 16 ++++---- compression/peak_signal_to_noise_ratio.py | 13 ++++--- conversions/binary_to_hexadecimal.py | 39 ++++++++++---------- conversions/decimal_to_any.py | 11 ++---- conversions/roman_numerals.py | 32 ++++++++-------- geodesy/haversine_distance.py | 7 ++-- geodesy/lamberts_ellipsoidal_distance.py | 8 ++-- hashes/adler32.py | 3 +- physics/n_body_simulation.py | 12 +++--- project_euler/problem_054/test_poker_hand.py | 6 +-- project_euler/problem_064/sol1.py | 8 ++-- project_euler/problem_097/sol1.py | 6 +-- project_euler/problem_125/sol1.py | 3 +- sorts/radix_sort.py | 3 +- web_programming/fetch_quotes.py | 8 ++-- 17 files changed, 107 insertions(+), 97 deletions(-) diff --git a/ciphers/bifid.py b/ciphers/bifid.py index 54d55574c..c005e051a 100644 --- a/ciphers/bifid.py +++ b/ciphers/bifid.py @@ -9,16 +9,17 @@ https://www.braingle.com/brainteasers/codes/bifid.php import numpy as np +SQUARE = [ + ["a", "b", "c", "d", "e"], + ["f", "g", "h", "i", "k"], + ["l", "m", "n", "o", "p"], + ["q", "r", "s", "t", "u"], + ["v", "w", "x", "y", "z"], +] + class BifidCipher: def __init__(self) -> None: - SQUARE = [ # noqa: N806 - ["a", "b", "c", "d", "e"], - ["f", "g", "h", "i", "k"], - ["l", "m", "n", "o", "p"], - ["q", "r", "s", "t", "u"], - ["v", "w", "x", "y", "z"], - ] self.SQUARE = np.array(SQUARE) def letter_to_numbers(self, letter: str) -> np.ndarray: diff --git a/ciphers/brute_force_caesar_cipher.py b/ciphers/brute_force_caesar_cipher.py index cc97111e0..458d08db2 100644 --- a/ciphers/brute_force_caesar_cipher.py +++ b/ciphers/brute_force_caesar_cipher.py @@ -1,3 +1,6 @@ +import string + + def decrypt(message: str) -> None: """ >>> decrypt('TMDETUX PMDVU') @@ -28,16 +31,15 @@ def decrypt(message: str) -> None: Decryption using Key #24: VOFGVWZ ROFXW Decryption using Key #25: UNEFUVY QNEWV """ - LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # noqa: N806 - for key in range(len(LETTERS)): + for key in range(len(string.ascii_uppercase)): translated = "" for symbol in message: - if symbol in LETTERS: - num = LETTERS.find(symbol) + if symbol in string.ascii_uppercase: + num = string.ascii_uppercase.find(symbol) num = num - key if num < 0: - num = num + len(LETTERS) - translated = translated + LETTERS[num] + num = num + len(string.ascii_uppercase) + translated = translated + string.ascii_uppercase[num] else: translated = translated + symbol print(f"Decryption using Key #{key}: {translated}") diff --git a/ciphers/polybius.py b/ciphers/polybius.py index bf5d62f8d..c81c1d395 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -8,16 +8,18 @@ https://www.braingle.com/brainteasers/codes/polybius.php import numpy as np +SQUARE = [ + ["a", "b", "c", "d", "e"], + ["f", "g", "h", "i", "k"], + ["l", "m", "n", "o", "p"], + ["q", "r", "s", "t", "u"], + ["v", "w", "x", "y", "z"], +] + class PolybiusCipher: def __init__(self) -> None: - SQUARE = [ # noqa: N806 - ["a", "b", "c", "d", "e"], - ["f", "g", "h", "i", "k"], - ["l", "m", "n", "o", "p"], - ["q", "r", "s", "t", "u"], - ["v", "w", "x", "y", "z"], - ] + self.SQUARE = np.array(SQUARE) def letter_to_numbers(self, letter: str) -> np.ndarray: diff --git a/compression/peak_signal_to_noise_ratio.py b/compression/peak_signal_to_noise_ratio.py index 66b18b50b..284f2904a 100644 --- a/compression/peak_signal_to_noise_ratio.py +++ b/compression/peak_signal_to_noise_ratio.py @@ -11,14 +11,15 @@ import os import cv2 import numpy as np +PIXEL_MAX = 255.0 -def psnr(original: float, contrast: float) -> float: + +def peak_signal_to_noise_ratio(original: float, contrast: float) -> float: mse = np.mean((original - contrast) ** 2) if mse == 0: return 100 - PIXEL_MAX = 255.0 # noqa: N806 - PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) # noqa: N806 - return PSNR + + return 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) def main() -> None: @@ -34,11 +35,11 @@ def main() -> None: # Value expected: 29.73dB print("-- First Test --") - print(f"PSNR value is {psnr(original, contrast)} dB") + print(f"PSNR value is {peak_signal_to_noise_ratio(original, contrast)} dB") # # Value expected: 31.53dB (Wikipedia Example) print("\n-- Second Test --") - print(f"PSNR value is {psnr(original2, contrast2)} dB") + print(f"PSNR value is {peak_signal_to_noise_ratio(original2, contrast2)} dB") if __name__ == "__main__": diff --git a/conversions/binary_to_hexadecimal.py b/conversions/binary_to_hexadecimal.py index 61f335a4c..89f7af696 100644 --- a/conversions/binary_to_hexadecimal.py +++ b/conversions/binary_to_hexadecimal.py @@ -1,3 +1,23 @@ +BITS_TO_HEX = { + "0000": "0", + "0001": "1", + "0010": "2", + "0011": "3", + "0100": "4", + "0101": "5", + "0110": "6", + "0111": "7", + "1000": "8", + "1001": "9", + "1010": "a", + "1011": "b", + "1100": "c", + "1101": "d", + "1110": "e", + "1111": "f", +} + + def bin_to_hexadecimal(binary_str: str) -> str: """ Converting a binary string into hexadecimal using Grouping Method @@ -17,25 +37,6 @@ def bin_to_hexadecimal(binary_str: str) -> str: ... ValueError: Empty string was passed to the function """ - BITS_TO_HEX = { # noqa: N806 - "0000": "0", - "0001": "1", - "0010": "2", - "0011": "3", - "0100": "4", - "0101": "5", - "0110": "6", - "0111": "7", - "1000": "8", - "1001": "9", - "1010": "a", - "1011": "b", - "1100": "c", - "1101": "d", - "1110": "e", - "1111": "f", - } - # Sanitising parameter binary_str = str(binary_str).strip() diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py index e54fa154a..908c89e8f 100644 --- a/conversions/decimal_to_any.py +++ b/conversions/decimal_to_any.py @@ -1,5 +1,9 @@ """Convert a positive Decimal Number to Any Other Representation""" +from string import ascii_uppercase + +ALPHABET_VALUES = {str(ord(c) - 55): c for c in ascii_uppercase} + def decimal_to_any(num: int, base: int) -> str: """ @@ -65,13 +69,6 @@ def decimal_to_any(num: int, base: int) -> str: raise ValueError("base must be >= 2") if base > 36: raise ValueError("base must be <= 36") - # fmt: off - ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', # noqa: N806, E501 - '16': 'G', '17': 'H', '18': 'I', '19': 'J', '20': 'K', '21': 'L', - '22': 'M', '23': 'N', '24': 'O', '25': 'P', '26': 'Q', '27': 'R', - '28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X', - '34': 'Y', '35': 'Z'} - # fmt: on new_value = "" mod = 0 div = 0 diff --git a/conversions/roman_numerals.py b/conversions/roman_numerals.py index 960d41342..61215a0c0 100644 --- a/conversions/roman_numerals.py +++ b/conversions/roman_numerals.py @@ -1,3 +1,20 @@ +ROMAN = [ + (1000, "M"), + (900, "CM"), + (500, "D"), + (400, "CD"), + (100, "C"), + (90, "XC"), + (50, "L"), + (40, "XL"), + (10, "X"), + (9, "IX"), + (5, "V"), + (4, "IV"), + (1, "I"), +] + + def roman_to_int(roman: str) -> int: """ LeetCode No. 13 Roman to Integer @@ -29,21 +46,6 @@ def int_to_roman(number: int) -> str: >>> all(int_to_roman(value) == key for key, value in tests.items()) True """ - ROMAN = [ # noqa: N806 - (1000, "M"), - (900, "CM"), - (500, "D"), - (400, "CD"), - (100, "C"), - (90, "XC"), - (50, "L"), - (40, "XL"), - (10, "X"), - (9, "IX"), - (5, "V"), - (4, "IV"), - (1, "I"), - ] result = [] for (arabic, roman) in ROMAN: (factor, number) = divmod(number, arabic) diff --git a/geodesy/haversine_distance.py b/geodesy/haversine_distance.py index b601d2fd1..93e625770 100644 --- a/geodesy/haversine_distance.py +++ b/geodesy/haversine_distance.py @@ -1,5 +1,9 @@ from math import asin, atan, cos, radians, sin, sqrt, tan +AXIS_A = 6378137.0 +AXIS_B = 6356752.314245 +RADIUS = 6378137 + def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float: """ @@ -30,9 +34,6 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl """ # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System # Distance in metres(m) - AXIS_A = 6378137.0 # noqa: N806 - AXIS_B = 6356752.314245 # noqa: N806 - RADIUS = 6378137 # noqa: N806 # Equation parameters # Equation https://en.wikipedia.org/wiki/Haversine_formula#Formulation flattening = (AXIS_A - AXIS_B) / AXIS_A diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py index d36d39953..62ce59bb4 100644 --- a/geodesy/lamberts_ellipsoidal_distance.py +++ b/geodesy/lamberts_ellipsoidal_distance.py @@ -2,6 +2,10 @@ from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance +AXIS_A = 6378137.0 +AXIS_B = 6356752.314245 +EQUATORIAL_RADIUS = 6378137 + def lamberts_ellipsoidal_distance( lat1: float, lon1: float, lat2: float, lon2: float @@ -45,10 +49,6 @@ def lamberts_ellipsoidal_distance( # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System # Distance in metres(m) - AXIS_A = 6378137.0 # noqa: N806 - AXIS_B = 6356752.314245 # noqa: N806 - EQUATORIAL_RADIUS = 6378137 # noqa: N806 - # Equation Parameters # https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines flattening = (AXIS_A - AXIS_B) / AXIS_A diff --git a/hashes/adler32.py b/hashes/adler32.py index 80229f046..611ebc88b 100644 --- a/hashes/adler32.py +++ b/hashes/adler32.py @@ -8,6 +8,8 @@ source: https://en.wikipedia.org/wiki/Adler-32 """ +MOD_ADLER = 65521 + def adler32(plain_text: str) -> int: """ @@ -20,7 +22,6 @@ def adler32(plain_text: str) -> int: >>> adler32('go adler em all') 708642122 """ - MOD_ADLER = 65521 # noqa: N806 a = 1 b = 0 for plain_chr in plain_text: diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index e62e1de62..f6efb0fec 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -19,6 +19,12 @@ import random from matplotlib import animation from matplotlib import pyplot as plt +# Frame rate of the animation +INTERVAL = 20 + +# Time between time steps in seconds +DELTA_TIME = INTERVAL / 1000 + class Body: def __init__( @@ -219,12 +225,6 @@ def plot( Utility function to plot how the given body-system evolves over time. No doctest provided since this function does not have a return value. """ - # Frame rate of the animation - INTERVAL = 20 # noqa: N806 - - # Time between time steps in seconds - DELTA_TIME = INTERVAL / 1000 # noqa: N806 - fig = plt.figure() fig.canvas.set_window_title(title) ax = plt.axes( diff --git a/project_euler/problem_054/test_poker_hand.py b/project_euler/problem_054/test_poker_hand.py index bf5a20a8e..5735bfc37 100644 --- a/project_euler/problem_054/test_poker_hand.py +++ b/project_euler/problem_054/test_poker_hand.py @@ -185,12 +185,12 @@ def test_compare_random(hand, other, expected): def test_hand_sorted(): - POKER_HANDS = [PokerHand(hand) for hand in SORTED_HANDS] # noqa: N806 - list_copy = POKER_HANDS.copy() + poker_hands = [PokerHand(hand) for hand in SORTED_HANDS] + list_copy = poker_hands.copy() shuffle(list_copy) user_sorted = chain(sorted(list_copy)) for index, hand in enumerate(user_sorted): - assert hand == POKER_HANDS[index] + assert hand == poker_hands[index] def test_custom_sort_five_high_straight(): diff --git a/project_euler/problem_064/sol1.py b/project_euler/problem_064/sol1.py index 9edd9a1e7..81ebcc7b7 100644 --- a/project_euler/problem_064/sol1.py +++ b/project_euler/problem_064/sol1.py @@ -33,13 +33,13 @@ def continuous_fraction_period(n: int) -> int: """ numerator = 0.0 denominator = 1.0 - ROOT = int(sqrt(n)) # noqa: N806 - integer_part = ROOT + root = int(sqrt(n)) + integer_part = root period = 0 - while integer_part != 2 * ROOT: + while integer_part != 2 * root: numerator = denominator * integer_part - numerator denominator = (n - numerator**2) / denominator - integer_part = int((ROOT + numerator) / denominator) + integer_part = int((root + numerator) / denominator) period += 1 return period diff --git a/project_euler/problem_097/sol1.py b/project_euler/problem_097/sol1.py index 94a43894e..2807e893d 100644 --- a/project_euler/problem_097/sol1.py +++ b/project_euler/problem_097/sol1.py @@ -34,9 +34,9 @@ def solution(n: int = 10) -> str: """ if not isinstance(n, int) or n < 0: raise ValueError("Invalid input") - MODULUS = 10**n # noqa: N806 - NUMBER = 28433 * (pow(2, 7830457, MODULUS)) + 1 # noqa: N806 - return str(NUMBER % MODULUS) + modulus = 10**n + number = 28433 * (pow(2, 7830457, modulus)) + 1 + return str(number % modulus) if __name__ == "__main__": diff --git a/project_euler/problem_125/sol1.py b/project_euler/problem_125/sol1.py index 1812df361..616f6f122 100644 --- a/project_euler/problem_125/sol1.py +++ b/project_euler/problem_125/sol1.py @@ -13,6 +13,8 @@ Find the sum of all the numbers less than 10^8 that are both palindromic and can be written as the sum of consecutive squares. """ +LIMIT = 10**8 + def is_palindrome(n: int) -> bool: """ @@ -35,7 +37,6 @@ def solution() -> int: Returns the sum of all numbers less than 1e8 that are both palindromic and can be written as the sum of consecutive squares. """ - LIMIT = 10**8 # noqa: N806 answer = set() first_square = 1 sum_squares = 5 diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index a496cdc0c..832b6162f 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -5,6 +5,8 @@ Source: https://en.wikipedia.org/wiki/Radix_sort """ from __future__ import annotations +RADIX = 10 + def radix_sort(list_of_ints: list[int]) -> list[int]: """ @@ -19,7 +21,6 @@ def radix_sort(list_of_ints: list[int]) -> list[int]: >>> radix_sort([1,100,10,1000]) == sorted([1,100,10,1000]) True """ - RADIX = 10 # noqa: N806 placement = 1 max_digit = max(list_of_ints) while placement <= max_digit: diff --git a/web_programming/fetch_quotes.py b/web_programming/fetch_quotes.py index a45f6ea0e..d557e2d95 100644 --- a/web_programming/fetch_quotes.py +++ b/web_programming/fetch_quotes.py @@ -10,15 +10,15 @@ import pprint import requests +API_ENDPOINT_URL = "https://zenquotes.io/api" + def quote_of_the_day() -> list: - API_ENDPOINT_URL = "https://zenquotes.io/api/today/" # noqa: N806 - return requests.get(API_ENDPOINT_URL).json() + return requests.get(API_ENDPOINT_URL + "/today").json() def random_quotes() -> list: - API_ENDPOINT_URL = "https://zenquotes.io/api/random/" # noqa: N806 - return requests.get(API_ENDPOINT_URL).json() + return requests.get(API_ENDPOINT_URL + "/random").json() if __name__ == "__main__": From d728f5a96bce1cb748d903de2f7dff2e2a2b54eb Mon Sep 17 00:00:00 2001 From: Advik Sharma <70201060+advik-student-dev@users.noreply.github.com> Date: Sun, 16 Oct 2022 06:28:10 -0700 Subject: [PATCH 050/368] Added some more comments to volume.py in maths folder (#7080) * Added some more comments added some more comments (to formulas which need it) which make the code more readable and understandable. might make a list of all the formulas on the top, later * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * The order changes the result * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix long line * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/volume.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/maths/volume.py b/maths/volume.py index 97c06d7e1..a594e1b90 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -1,6 +1,7 @@ """ -Find Volumes of Various Shapes. -Wikipedia reference: https://en.wikipedia.org/wiki/Volume +Find the volume of various shapes. +* https://en.wikipedia.org/wiki/Volume +* https://en.wikipedia.org/wiki/Spherical_cap """ from __future__ import annotations @@ -30,8 +31,7 @@ def vol_cube(side_length: int | float) -> float: def vol_spherical_cap(height: float, radius: float) -> float: """ - Calculate the Volume of the spherical cap. - :return 1/3 pi * height ^ 2 * (3 * radius - height) + Calculate the volume of the spherical cap. >>> vol_spherical_cap(1, 2) 5.235987755982988 >>> vol_spherical_cap(1.6, 2.6) @@ -49,6 +49,7 @@ def vol_spherical_cap(height: float, radius: float) -> float: """ if height < 0 or radius < 0: raise ValueError("vol_spherical_cap() only accepts non-negative values") + # Volume is 1/3 pi * height squared * (3 * radius - height) return 1 / 3 * pi * pow(height, 2) * (3 * radius - height) @@ -263,6 +264,7 @@ def vol_sphere(radius: float) -> float: """ if radius < 0: raise ValueError("vol_sphere() only accepts non-negative values") + # Volume is 4/3 * pi * radius cubed return 4 / 3 * pi * pow(radius, 3) @@ -274,7 +276,7 @@ def vol_hemisphere(radius: float) -> float: >>> vol_hemisphere(1) 2.0943951023931953 >>> vol_hemisphere(7) - 718.3775201208659 + 718.377520120866 >>> vol_hemisphere(1.6) 8.57864233940253 >>> vol_hemisphere(0) @@ -286,7 +288,8 @@ def vol_hemisphere(radius: float) -> float: """ if radius < 0: raise ValueError("vol_hemisphere() only accepts non-negative values") - return 2 / 3 * pi * pow(radius, 3) + # Volume is radius cubed * pi * 2/3 + return pow(radius, 3) * pi * 2 / 3 def vol_circular_cylinder(radius: float, height: float) -> float: @@ -312,7 +315,8 @@ def vol_circular_cylinder(radius: float, height: float) -> float: """ if height < 0 or radius < 0: raise ValueError("vol_circular_cylinder() only accepts non-negative values") - return pi * pow(radius, 2) * height + # Volume is radius squared * height * pi + return pow(radius, 2) * height * pi def vol_hollow_circular_cylinder( @@ -344,6 +348,7 @@ def vol_hollow_circular_cylinder( ... ValueError: outer_radius must be greater than inner_radius """ + # Volume - (outer_radius squared - inner_radius squared) * pi * height if inner_radius < 0 or outer_radius < 0 or height < 0: raise ValueError( "vol_hollow_circular_cylinder() only accepts non-negative values" @@ -356,7 +361,7 @@ def vol_hollow_circular_cylinder( def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> float: """Calculate the Volume of a Conical Frustum. Wikipedia reference: https://en.wikipedia.org/wiki/Frustum - :return 1/3 * pi * height * (radius_1^2 + radius_top^2 + radius_1 * radius_2) + >>> vol_conical_frustum(45, 7, 28) 48490.482608158454 >>> vol_conical_frustum(1, 1, 2) @@ -378,6 +383,8 @@ def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> floa ... ValueError: vol_conical_frustum() only accepts non-negative values """ + # Volume is 1/3 * pi * height * + # (radius_1 squared + radius_2 squared + radius_1 * radius_2) if radius_1 < 0 or radius_2 < 0 or height < 0: raise ValueError("vol_conical_frustum() only accepts non-negative values") return ( From b5b1eb2f00f942955217ef6968fe8016476690ba Mon Sep 17 00:00:00 2001 From: Sagar Giri Date: Sun, 16 Oct 2022 22:45:25 +0900 Subject: [PATCH 051/368] Fix broken links by PR #7277 (#7319) --- bit_manipulation/count_1s_brian_kernighan_method.py | 2 +- machine_learning/sequential_minimum_optimization.py | 1 - physics/n_body_simulation.py | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/bit_manipulation/count_1s_brian_kernighan_method.py b/bit_manipulation/count_1s_brian_kernighan_method.py index d217af90b..e6d6d6534 100644 --- a/bit_manipulation/count_1s_brian_kernighan_method.py +++ b/bit_manipulation/count_1s_brian_kernighan_method.py @@ -1,7 +1,7 @@ def get_1s_count(number: int) -> int: """ Count the number of set bits in a 32 bit integer using Brian Kernighan's way. - Ref - http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan + Ref - https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan >>> get_1s_count(25) 3 >>> get_1s_count(37) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 40adca7e0..df5b03790 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -28,7 +28,6 @@ Usage: Reference: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf - https://web.cs.iastate.edu/~honavar/smo-svm.pdf """ diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index f6efb0fec..2b701283f 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -8,7 +8,7 @@ velocity and position brought about by these forces. Softening is used to preven numerical divergences when a particle comes too close to another (and the force goes to infinity). (Description adapted from https://en.wikipedia.org/wiki/N-body_simulation ) -(See also https://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) +(See also http://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) """ @@ -258,7 +258,7 @@ def example_1() -> BodySystem: Example 1: figure-8 solution to the 3-body-problem This example can be seen as a test of the implementation: given the right initial conditions, the bodies should move in a figure-8. - (initial conditions taken from https://www.artcompsci.org/vol_1/v1_web/node56.html) + (initial conditions taken from http://www.artcompsci.org/vol_1/v1_web/node56.html) >>> body_system = example_1() >>> len(body_system) 3 From 6d20e2b750839d978873f6a89ce6d844ba3cc0b8 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 16 Oct 2022 20:50:48 +0100 Subject: [PATCH 052/368] Add `flake8-broken-line` to `pre-commit` (#7338) * ci: Add ``flake8-broken-line`` plugin to ``pre-commit`` * refactor: Fix errors from ``flake8-broken-line`` --- .pre-commit-config.yaml | 1 + project_euler/problem_008/sol1.py | 42 ++++++++++++++++--------------- project_euler/problem_008/sol3.py | 42 ++++++++++++++++--------------- 3 files changed, 45 insertions(+), 40 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 345513565..39af0f3b4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,6 +39,7 @@ repos: additional_dependencies: - flake8-bugbear - flake8-builtins + - flake8-broken-line - flake8-comprehensions - pep8-naming diff --git a/project_euler/problem_008/sol1.py b/project_euler/problem_008/sol1.py index 796080127..69dd1b473 100644 --- a/project_euler/problem_008/sol1.py +++ b/project_euler/problem_008/sol1.py @@ -33,26 +33,28 @@ greatest product. What is the value of this product? import sys -N = """73167176531330624919225119674426574742355349194934\ -96983520312774506326239578318016984801869478851843\ -85861560789112949495459501737958331952853208805511\ -12540698747158523863050715693290963295227443043557\ -66896648950445244523161731856403098711121722383113\ -62229893423380308135336276614282806444486645238749\ -30358907296290491560440772390713810515859307960866\ -70172427121883998797908792274921901699720888093776\ -65727333001053367881220235421809751254540594752243\ -52584907711670556013604839586446706324415722155397\ -53697817977846174064955149290862569321978468622482\ -83972241375657056057490261407972968652414535100474\ -82166370484403199890008895243450658541227588666881\ -16427171479924442928230863465674813919123162824586\ -17866458359124566529476545682848912883142607690042\ -24219022671055626321111109370544217506941658960408\ -07198403850962455444362981230987879927244284909188\ -84580156166097919133875499200524063689912560717606\ -05886116467109405077541002256983155200055935729725\ -71636269561882670428252483600823257530420752963450""" +N = ( + "73167176531330624919225119674426574742355349194934" + "96983520312774506326239578318016984801869478851843" + "85861560789112949495459501737958331952853208805511" + "12540698747158523863050715693290963295227443043557" + "66896648950445244523161731856403098711121722383113" + "62229893423380308135336276614282806444486645238749" + "30358907296290491560440772390713810515859307960866" + "70172427121883998797908792274921901699720888093776" + "65727333001053367881220235421809751254540594752243" + "52584907711670556013604839586446706324415722155397" + "53697817977846174064955149290862569321978468622482" + "83972241375657056057490261407972968652414535100474" + "82166370484403199890008895243450658541227588666881" + "16427171479924442928230863465674813919123162824586" + "17866458359124566529476545682848912883142607690042" + "24219022671055626321111109370544217506941658960408" + "07198403850962455444362981230987879927244284909188" + "84580156166097919133875499200524063689912560717606" + "05886116467109405077541002256983155200055935729725" + "71636269561882670428252483600823257530420752963450" +) def solution(n: str = N) -> int: diff --git a/project_euler/problem_008/sol3.py b/project_euler/problem_008/sol3.py index 4b99d0ea6..c6081aa05 100644 --- a/project_euler/problem_008/sol3.py +++ b/project_euler/problem_008/sol3.py @@ -32,26 +32,28 @@ greatest product. What is the value of this product? """ import sys -N = """73167176531330624919225119674426574742355349194934\ -96983520312774506326239578318016984801869478851843\ -85861560789112949495459501737958331952853208805511\ -12540698747158523863050715693290963295227443043557\ -66896648950445244523161731856403098711121722383113\ -62229893423380308135336276614282806444486645238749\ -30358907296290491560440772390713810515859307960866\ -70172427121883998797908792274921901699720888093776\ -65727333001053367881220235421809751254540594752243\ -52584907711670556013604839586446706324415722155397\ -53697817977846174064955149290862569321978468622482\ -83972241375657056057490261407972968652414535100474\ -82166370484403199890008895243450658541227588666881\ -16427171479924442928230863465674813919123162824586\ -17866458359124566529476545682848912883142607690042\ -24219022671055626321111109370544217506941658960408\ -07198403850962455444362981230987879927244284909188\ -84580156166097919133875499200524063689912560717606\ -05886116467109405077541002256983155200055935729725\ -71636269561882670428252483600823257530420752963450""" +N = ( + "73167176531330624919225119674426574742355349194934" + "96983520312774506326239578318016984801869478851843" + "85861560789112949495459501737958331952853208805511" + "12540698747158523863050715693290963295227443043557" + "66896648950445244523161731856403098711121722383113" + "62229893423380308135336276614282806444486645238749" + "30358907296290491560440772390713810515859307960866" + "70172427121883998797908792274921901699720888093776" + "65727333001053367881220235421809751254540594752243" + "52584907711670556013604839586446706324415722155397" + "53697817977846174064955149290862569321978468622482" + "83972241375657056057490261407972968652414535100474" + "82166370484403199890008895243450658541227588666881" + "16427171479924442928230863465674813919123162824586" + "17866458359124566529476545682848912883142607690042" + "24219022671055626321111109370544217506941658960408" + "07198403850962455444362981230987879927244284909188" + "84580156166097919133875499200524063689912560717606" + "05886116467109405077541002256983155200055935729725" + "71636269561882670428252483600823257530420752963450" +) def str_eval(s: str) -> int: From 7f6e0b656f6362e452b11d06acde50b8b81cb31a Mon Sep 17 00:00:00 2001 From: SudhanshuSuman <51868273+SudhanshuSuman@users.noreply.github.com> Date: Mon, 17 Oct 2022 02:11:28 +0530 Subject: [PATCH 053/368] Corrected the directory of Fractional Knapsack algorithm (#7086) * Moved fractional knapsack from 'dynamic_programming' to 'greedy_methods' * Updated DIRECTORY.md --- DIRECTORY.md | 4 +- .../fractional_knapsack.py | 0 .../fractional_knapsack_2.py | 106 +++++++++--------- 3 files changed, 55 insertions(+), 55 deletions(-) rename {dynamic_programming => greedy_methods}/fractional_knapsack.py (100%) rename {dynamic_programming => greedy_methods}/fractional_knapsack_2.py (96%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 92bed9cb4..fae9a5183 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -279,8 +279,6 @@ * [Fast Fibonacci](dynamic_programming/fast_fibonacci.py) * [Fibonacci](dynamic_programming/fibonacci.py) * [Floyd Warshall](dynamic_programming/floyd_warshall.py) - * [Fractional Knapsack](dynamic_programming/fractional_knapsack.py) - * [Fractional Knapsack 2](dynamic_programming/fractional_knapsack_2.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) * [Knapsack](dynamic_programming/knapsack.py) @@ -396,6 +394,8 @@ * [Test Min Spanning Tree Prim](graphs/tests/test_min_spanning_tree_prim.py) ## Greedy Methods + * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) + * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) ## Hashes diff --git a/dynamic_programming/fractional_knapsack.py b/greedy_methods/fractional_knapsack.py similarity index 100% rename from dynamic_programming/fractional_knapsack.py rename to greedy_methods/fractional_knapsack.py diff --git a/dynamic_programming/fractional_knapsack_2.py b/greedy_methods/fractional_knapsack_2.py similarity index 96% rename from dynamic_programming/fractional_knapsack_2.py rename to greedy_methods/fractional_knapsack_2.py index bd776723c..6d9ed2ec3 100644 --- a/dynamic_programming/fractional_knapsack_2.py +++ b/greedy_methods/fractional_knapsack_2.py @@ -1,53 +1,53 @@ -# https://en.wikipedia.org/wiki/Continuous_knapsack_problem -# https://www.guru99.com/fractional-knapsack-problem-greedy.html -# https://medium.com/walkinthecode/greedy-algorithm-fractional-knapsack-problem-9aba1daecc93 - -from __future__ import annotations - - -def fractional_knapsack( - value: list[int], weight: list[int], capacity: int -) -> tuple[float, list[float]]: - """ - >>> value = [1, 3, 5, 7, 9] - >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1] - >>> fractional_knapsack(value, weight, 5) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack(value, weight, 15) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack(value, weight, 25) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack(value, weight, 26) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack(value, weight, -1) - (-90.0, [0, 0, 0, 0, -10.0]) - >>> fractional_knapsack([1, 3, 5, 7], weight, 30) - (16, [1, 1, 1, 1]) - >>> fractional_knapsack(value, [0.9, 0.7, 0.5, 0.3, 0.1], 30) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack([], [], 30) - (0, []) - """ - index = list(range(len(value))) - ratio = [v / w for v, w in zip(value, weight)] - index.sort(key=lambda i: ratio[i], reverse=True) - - max_value: float = 0 - fractions: list[float] = [0] * len(value) - for i in index: - if weight[i] <= capacity: - fractions[i] = 1 - max_value += value[i] - capacity -= weight[i] - else: - fractions[i] = capacity / weight[i] - max_value += value[i] * capacity / weight[i] - break - - return max_value, fractions - - -if __name__ == "__main__": - import doctest - - doctest.testmod() +# https://en.wikipedia.org/wiki/Continuous_knapsack_problem +# https://www.guru99.com/fractional-knapsack-problem-greedy.html +# https://medium.com/walkinthecode/greedy-algorithm-fractional-knapsack-problem-9aba1daecc93 + +from __future__ import annotations + + +def fractional_knapsack( + value: list[int], weight: list[int], capacity: int +) -> tuple[float, list[float]]: + """ + >>> value = [1, 3, 5, 7, 9] + >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1] + >>> fractional_knapsack(value, weight, 5) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack(value, weight, 15) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack(value, weight, 25) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack(value, weight, 26) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack(value, weight, -1) + (-90.0, [0, 0, 0, 0, -10.0]) + >>> fractional_knapsack([1, 3, 5, 7], weight, 30) + (16, [1, 1, 1, 1]) + >>> fractional_knapsack(value, [0.9, 0.7, 0.5, 0.3, 0.1], 30) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack([], [], 30) + (0, []) + """ + index = list(range(len(value))) + ratio = [v / w for v, w in zip(value, weight)] + index.sort(key=lambda i: ratio[i], reverse=True) + + max_value: float = 0 + fractions: list[float] = [0] * len(value) + for i in index: + if weight[i] <= capacity: + fractions[i] = 1 + max_value += value[i] + capacity -= weight[i] + else: + fractions[i] = capacity / weight[i] + max_value += value[i] * capacity / weight[i] + break + + return max_value, fractions + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f15cc2f01c2a4124ff6dc0843c728a546f9d9f79 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 16 Oct 2022 21:50:11 +0100 Subject: [PATCH 054/368] Follow Flake8 pep3101 and remove modulo formatting (#7339) * ci: Add ``flake8-pep3101`` plugin to ``pre-commit`` * refactor: Remove all modulo string formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: Remove ``flake8-pep3101`` plugin from ``pre-commit`` * revert: Revert to modulo formatting * refactor: Use f-string instead of `join` Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/elgamal_key_generator.py | 9 +++------ ciphers/rsa_key_generator.py | 3 +-- dynamic_programming/edit_distance.py | 4 ++-- genetic_algorithm/basic_string.py | 4 ++-- graphs/minimum_spanning_tree_boruvka.py | 2 +- machine_learning/linear_regression.py | 2 +- matrix/sherman_morrison.py | 6 +++--- neural_network/back_propagation_neural_network.py | 2 +- neural_network/convolution_neural_network.py | 2 +- 9 files changed, 15 insertions(+), 19 deletions(-) diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py index 4d72128ae..17ba55c0d 100644 --- a/ciphers/elgamal_key_generator.py +++ b/ciphers/elgamal_key_generator.py @@ -41,22 +41,19 @@ def make_key_files(name: str, key_size: int) -> None: if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( - '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n' + f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' "Use a different name or delete these files and re-run this program." - % (name, name) ) sys.exit() public_key, private_key = generate_key(key_size) print(f"\nWriting public key to file {name}_pubkey.txt...") with open(f"{name}_pubkey.txt", "w") as fo: - fo.write( - "%d,%d,%d,%d" % (public_key[0], public_key[1], public_key[2], public_key[3]) - ) + fo.write(f"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}") print(f"Writing private key to file {name}_privkey.txt...") with open(f"{name}_privkey.txt", "w") as fo: - fo.write("%d,%d" % (private_key[0], private_key[1])) + fo.write(f"{private_key[0]},{private_key[1]}") def main() -> None: diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index f64bc7dd0..2573ed013 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -37,9 +37,8 @@ def make_key_files(name: str, key_size: int) -> None: if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( - '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n' + f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' "Use a different name or delete these files and re-run this program." - % (name, name) ) sys.exit() diff --git a/dynamic_programming/edit_distance.py b/dynamic_programming/edit_distance.py index d63e559e3..fe23431a7 100644 --- a/dynamic_programming/edit_distance.py +++ b/dynamic_programming/edit_distance.py @@ -99,7 +99,7 @@ if __name__ == "__main__": S2 = input("Enter the second string: ").strip() print() - print("The minimum Edit Distance is: %d" % (solver.solve(S1, S2))) - print("The minimum Edit Distance is: %d" % (min_distance_bottom_up(S1, S2))) + print(f"The minimum Edit Distance is: {solver.solve(S1, S2)}") + print(f"The minimum Edit Distance is: {min_distance_bottom_up(S1, S2)}") print() print("*************** End of Testing Edit Distance DP Algorithm ***************") diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index bd7d80268..d2d305189 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -172,7 +172,7 @@ if __name__ == "__main__": " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) + generation, population, target = basic(target_str, genes_list) print( - "\nGeneration: %s\nTotal Population: %s\nTarget: %s" - % basic(target_str, genes_list) + f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" ) diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py index 32548b2ec..6c72615cc 100644 --- a/graphs/minimum_spanning_tree_boruvka.py +++ b/graphs/minimum_spanning_tree_boruvka.py @@ -63,7 +63,7 @@ class Graph: for tail in self.adjacency: for head in self.adjacency[tail]: weight = self.adjacency[head][tail] - string += "%d -> %d == %d\n" % (head, tail, weight) + string += f"{head} -> {tail} == {weight}\n" return string.rstrip("\n") def get_edges(self): diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 85fdfb000..92ab91c01 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -82,7 +82,7 @@ def run_linear_regression(data_x, data_y): for i in range(0, iterations): theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta) error = sum_of_square_error(data_x, data_y, len_data, theta) - print("At Iteration %d - Error is %.5f " % (i + 1, error)) + print(f"At Iteration {i + 1} - Error is {error:.5f}") return theta diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 29c9b3381..39eddfed8 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -31,14 +31,14 @@ class Matrix: """ # Prefix - s = "Matrix consist of %d rows and %d columns\n" % (self.row, self.column) + s = f"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier max_element_length = 0 for row_vector in self.array: for obj in row_vector: max_element_length = max(max_element_length, len(str(obj))) - string_format_identifier = "%%%ds" % (max_element_length,) + string_format_identifier = f"%{max_element_length}s" # Make string and return def single_line(row_vector: list[float]) -> str: @@ -252,7 +252,7 @@ if __name__ == "__main__": v[0, 0], v[1, 0], v[2, 0] = 4, -2, 5 print(f"u is {u}") print(f"v is {v}") - print("uv^T is %s" % (u * v.transpose())) + print(f"uv^T is {u * v.transpose()}") # Sherman Morrison print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(u, v)}") diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 23b818b0f..cb47b8290 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -117,7 +117,7 @@ class BPNN: def summary(self): for i, layer in enumerate(self.layers[:]): - print("------- layer %d -------" % i) + print(f"------- layer {i} -------") print("weight.shape ", np.shape(layer.weight)) print("bias.shape ", np.shape(layer.bias)) diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 9dfb6d091..bd0550212 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -219,7 +219,7 @@ class CNN: mse = 10000 while rp < n_repeat and mse >= error_accuracy: error_count = 0 - print("-------------Learning Time %d--------------" % rp) + print(f"-------------Learning Time {rp}--------------") for p in range(len(datas_train)): # print('------------Learning Image: %d--------------'%p) data_train = np.asmatrix(datas_train[p]) From a34b756fd40e5cdfb69abc06dcd42f5f1b5fa21e Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 16 Oct 2022 21:51:40 +0100 Subject: [PATCH 055/368] ci: Add ``yesqa`` (flake8-plugin) to ``pre-commit`` (#7340) --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39af0f3b4..aea82d12c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,6 +42,7 @@ repos: - flake8-broken-line - flake8-comprehensions - pep8-naming + - yesqa - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 From 0c7c5fa7b0161a7433467240155356c93ae106b8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Oct 2022 21:59:25 +0200 Subject: [PATCH 056/368] [pre-commit.ci] pre-commit autoupdate (#7387) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.0.0 → v3.1.0](https://github.com/asottile/pyupgrade/compare/v3.0.0...v3.1.0) - [github.com/codespell-project/codespell: v2.2.1 → v2.2.2](https://github.com/codespell-project/codespell/compare/v2.2.1...v2.2.2) * updating DIRECTORY.md * Fix typo discovered by codespell * Fix typo discovered by codespell * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 2 ++ .../local_weighted_learning/local_weighted_learning.md | 2 +- maths/is_square_free.py | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aea82d12c..5bdda50be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.0.0 + rev: v3.1.0 hooks: - id: pyupgrade args: @@ -55,14 +55,14 @@ repos: additional_dependencies: [types-requests] - repo: https://github.com/codespell-project/codespell - rev: v2.2.1 + rev: v2.2.2 hooks: - id: codespell args: - --ignore-words-list=ans,crate,damon,fo,followings,hist,iff,mater,secant,som,sur,tim,zar - - --skip="./.*,./strings/dictionary.txt,./strings/words.txt,./project_euler/problem_022/p022_names.txt" exclude: | (?x)^( + ciphers/prehistoric_men.txt | strings/dictionary.txt | strings/words.txt | project_euler/problem_022/p022_names.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index fae9a5183..94ec42832 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -642,6 +642,7 @@ * [Tower Of Hanoi](other/tower_of_hanoi.py) ## Physics + * [Casimir Effect](physics/casimir_effect.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [N Body Simulation](physics/n_body_simulation.py) @@ -928,6 +929,7 @@ * [Deutsch Jozsa](quantum/deutsch_jozsa.py) * [Half Adder](quantum/half_adder.py) * [Not Gate](quantum/not_gate.py) + * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.md b/machine_learning/local_weighted_learning/local_weighted_learning.md index 5c7895e75..ef4dbc958 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.md +++ b/machine_learning/local_weighted_learning/local_weighted_learning.md @@ -29,7 +29,7 @@ This training phase is possible when data points are linear, but there again com So, here comes the role of non-parametric algorithm which doesn't compute predictions based on fixed set of params. Rather parameters $\theta$ are computed individually for each query point/data point x.

-While Computing $\theta$ , a higher "preferance" is given to points in the vicinity of x than points farther from x. +While Computing $\theta$ , a higher preference is given to points in the vicinity of x than points farther from x. Cost Function J($\theta$) = $\sum_{i=1}^m$ $w^i$ (($\theta$)$^T$ $x^i$ - $y^i$)$^2$ diff --git a/maths/is_square_free.py b/maths/is_square_free.py index 8d83d95ff..4134398d2 100644 --- a/maths/is_square_free.py +++ b/maths/is_square_free.py @@ -15,7 +15,7 @@ def is_square_free(factors: list[int]) -> bool: False These are wrong but should return some value - it simply checks for repition in the numbers. + it simply checks for repetition in the numbers. >>> is_square_free([1, 3, 4, 'sd', 0.0]) True From 3448ae5cec868d4a03349cb952765e9abff41243 Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Tue, 18 Oct 2022 02:00:01 +0530 Subject: [PATCH 057/368] [Binary Tree] Different views of binary tree added (#6965) * Different views of binary tree added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * mypy errors resolved * doc test for remaining functions * Flake8 comments resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Example moved in if block * doctest cases added * Cases from if block removed * Update data_structures/binary_tree/diff_views_of_binary_tree.py Co-authored-by: Christian Clauss * Update data_structures/binary_tree/diff_views_of_binary_tree.py Co-authored-by: Christian Clauss * PR Comments resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * flake8 warning resolved * Changes revered * flake8 issue resolved * Put the diagrams just above the doctests * Update diff_views_of_binary_tree.py * Update diff_views_of_binary_tree.py * I love mypy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/diff_views_of_binary_tree.py | 210 ++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 data_structures/binary_tree/diff_views_of_binary_tree.py diff --git a/data_structures/binary_tree/diff_views_of_binary_tree.py b/data_structures/binary_tree/diff_views_of_binary_tree.py new file mode 100644 index 000000000..3198d8065 --- /dev/null +++ b/data_structures/binary_tree/diff_views_of_binary_tree.py @@ -0,0 +1,210 @@ +r""" +Problem: Given root of a binary tree, return the: +1. binary-tree-right-side-view +2. binary-tree-left-side-view +3. binary-tree-top-side-view +4. binary-tree-bottom-side-view +""" + +from __future__ import annotations + +from collections import defaultdict +from dataclasses import dataclass + + +@dataclass +class TreeNode: + val: int + left: TreeNode | None = None + right: TreeNode | None = None + + +def make_tree() -> TreeNode: + """ + >>> make_tree().val + 3 + """ + return TreeNode(3, TreeNode(9), TreeNode(20, TreeNode(15), TreeNode(7))) + + +def binary_tree_right_side_view(root: TreeNode) -> list[int]: + r""" + Function returns the right side view of binary tree. + + 3 <- 3 + / \ + 9 20 <- 20 + / \ + 15 7 <- 7 + + >>> binary_tree_right_side_view(make_tree()) + [3, 20, 7] + >>> binary_tree_right_side_view(None) + [] + """ + + def depth_first_search( + root: TreeNode | None, depth: int, right_view: list[int] + ) -> None: + """ + A depth first search preorder traversal to append the values at + right side of tree. + """ + if not root: + return + + if depth == len(right_view): + right_view.append(root.val) + + depth_first_search(root.right, depth + 1, right_view) + depth_first_search(root.left, depth + 1, right_view) + + right_view: list = [] + if not root: + return right_view + + depth_first_search(root, 0, right_view) + return right_view + + +def binary_tree_left_side_view(root: TreeNode) -> list[int]: + r""" + Function returns the left side view of binary tree. + + 3 -> 3 + / \ + 9 -> 9 20 + / \ + 15 -> 15 7 + + >>> binary_tree_left_side_view(make_tree()) + [3, 9, 15] + >>> binary_tree_left_side_view(None) + [] + """ + + def depth_first_search( + root: TreeNode | None, depth: int, left_view: list[int] + ) -> None: + """ + A depth first search preorder traversal to append the values + at left side of tree. + """ + if not root: + return + + if depth == len(left_view): + left_view.append(root.val) + + depth_first_search(root.left, depth + 1, left_view) + depth_first_search(root.right, depth + 1, left_view) + + left_view: list = [] + if not root: + return left_view + + depth_first_search(root, 0, left_view) + return left_view + + +def binary_tree_top_side_view(root: TreeNode) -> list[int]: + r""" + Function returns the top side view of binary tree. + + 9 3 20 7 + ⬇ ⬇ ⬇ ⬇ + + 3 + / \ + 9 20 + / \ + 15 7 + + >>> binary_tree_top_side_view(make_tree()) + [9, 3, 20, 7] + >>> binary_tree_top_side_view(None) + [] + """ + + def breadth_first_search(root: TreeNode, top_view: list[int]) -> None: + """ + A breadth first search traversal with defaultdict ds to append + the values of tree from top view + """ + queue = [(root, 0)] + lookup = defaultdict(list) + + while queue: + first = queue.pop(0) + node, hd = first + + lookup[hd].append(node.val) + + if node.left: + queue.append((node.left, hd - 1)) + if node.right: + queue.append((node.right, hd + 1)) + + for pair in sorted(lookup.items(), key=lambda each: each[0]): + top_view.append(pair[1][0]) + + top_view: list = [] + if not root: + return top_view + + breadth_first_search(root, top_view) + return top_view + + +def binary_tree_bottom_side_view(root: TreeNode) -> list[int]: + r""" + Function returns the bottom side view of binary tree + + 3 + / \ + 9 20 + / \ + 15 7 + ↑ ↑ ↑ ↑ + 9 15 20 7 + + >>> binary_tree_bottom_side_view(make_tree()) + [9, 15, 20, 7] + >>> binary_tree_bottom_side_view(None) + [] + """ + from collections import defaultdict + + def breadth_first_search(root: TreeNode, bottom_view: list[int]) -> None: + """ + A breadth first search traversal with defaultdict ds to append + the values of tree from bottom view + """ + queue = [(root, 0)] + lookup = defaultdict(list) + + while queue: + first = queue.pop(0) + node, hd = first + lookup[hd].append(node.val) + + if node.left: + queue.append((node.left, hd - 1)) + if node.right: + queue.append((node.right, hd + 1)) + + for pair in sorted(lookup.items(), key=lambda each: each[0]): + bottom_view.append(pair[1][-1]) + + bottom_view: list = [] + if not root: + return bottom_view + + breadth_first_search(root, bottom_view) + return bottom_view + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 49cd46acea37350c8c22488316f8cf3f5ea88925 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 18 Oct 2022 02:09:41 -0400 Subject: [PATCH 058/368] Update convolve function namespace (#7390) --- computer_vision/horn_schunck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py index 2a153d06d..b63e02682 100644 --- a/computer_vision/horn_schunck.py +++ b/computer_vision/horn_schunck.py @@ -12,7 +12,7 @@ from typing import SupportsIndex import numpy as np -from scipy.ndimage.filters import convolve +from scipy.ndimage import convolve def warp( From 6d1e009f35dd172ef51d484d0310919cdbab189d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 18 Oct 2022 05:57:03 -0400 Subject: [PATCH 059/368] Remove depreciated np.float (#7394) --- machine_learning/decision_tree.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 4a86e5322..7cd1b02c4 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -24,13 +24,13 @@ class DecisionTree: estimate the labels >>> tester = DecisionTree() >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) - >>> test_prediction = np.float(6) + >>> test_prediction = float(6) >>> tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... test_prediction)) True >>> test_labels = np.array([1,2,3]) - >>> test_prediction = np.float(2) + >>> test_prediction = float(2) >>> tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... test_prediction)) @@ -145,11 +145,11 @@ class TestDecisionTree: @param prediction: a floating point value return value: helper_mean_squared_error_test calculates the mean squared error """ - squared_error_sum = np.float(0) + squared_error_sum = float(0) for label in labels: squared_error_sum += (label - prediction) ** 2 - return np.float(squared_error_sum / labels.size) + return float(squared_error_sum / labels.size) def main(): From 2ca695b0fe28519d3449106bff9f9004d93a0b3f Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Tue, 18 Oct 2022 23:35:18 +0530 Subject: [PATCH 060/368] [Matrix] Max area of island problem solved DFS algorithm (#6918) * Maximum area of island program added * Update matrix/max_area_of_island.py Co-authored-by: Caeden * Update matrix/max_area_of_island.py Co-authored-by: Caeden * Update matrix/max_area_of_island.py Co-authored-by: Caeden * Review's comment resolved * max area of island * PR Comments resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Test case fail fix * Grammer correction * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * flake8 issue resolved * some variable name fix * Update matrix/max_area_of_island.py Co-authored-by: Caeden Perelli-Harris * Update matrix/max_area_of_island.py Co-authored-by: Caeden Perelli-Harris * PR, comments resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix/max_area_of_island.py Co-authored-by: Christian Clauss * Update matrix/max_area_of_island.py Co-authored-by: Christian Clauss * PR, comments resolved * Update max_area_of_island.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Typo Co-authored-by: Caeden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- matrix/max_area_of_island.py | 112 +++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 matrix/max_area_of_island.py diff --git a/matrix/max_area_of_island.py b/matrix/max_area_of_island.py new file mode 100644 index 000000000..40950c303 --- /dev/null +++ b/matrix/max_area_of_island.py @@ -0,0 +1,112 @@ +""" +Given an two dimensional binary matrix grid. An island is a group of 1's (representing +land) connected 4-directionally (horizontal or vertical.) You may assume all four edges +of the grid are surrounded by water. The area of an island is the number of cells with +a value 1 in the island. Return the maximum area of an island in a grid. If there is no +island, return 0. +""" + +matrix = [ + [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0], +] + + +def is_safe(row: int, col: int, rows: int, cols: int) -> bool: + """ + Checking whether coordinate (row, col) is valid or not. + + >>> is_safe(0, 0, 5, 5) + True + >>> is_safe(-1,-1, 5, 5) + False + """ + return 0 <= row < rows and 0 <= col < cols + + +def depth_first_search(row: int, col: int, seen: set, mat: list[list[int]]) -> int: + """ + Returns the current area of the island + + >>> depth_first_search(0, 0, set(), matrix) + 0 + """ + rows = len(mat) + cols = len(mat[0]) + if is_safe(row, col, rows, cols) and (row, col) not in seen and mat[row][col] == 1: + seen.add((row, col)) + return ( + 1 + + depth_first_search(row + 1, col, seen, mat) + + depth_first_search(row - 1, col, seen, mat) + + depth_first_search(row, col + 1, seen, mat) + + depth_first_search(row, col - 1, seen, mat) + ) + else: + return 0 + + +def find_max_area(mat: list[list[int]]) -> int: + """ + Finds the area of all islands and returns the maximum area. + + >>> find_max_area(matrix) + 6 + """ + seen: set = set() + + max_area = 0 + for row, line in enumerate(mat): + for col, item in enumerate(line): + if item == 1 and (row, col) not in seen: + # Maximizing the area + max_area = max(max_area, depth_first_search(row, col, seen, mat)) + return max_area + + +if __name__ == "__main__": + import doctest + + print(find_max_area(matrix)) # Output -> 6 + + """ + Explanation: + We are allowed to move in four directions (horizontal or vertical) so the possible + in a matrix if we are at x and y position the possible moving are + + Directions are [(x, y+1), (x, y-1), (x+1, y), (x-1, y)] but we need to take care of + boundary cases as well which are x and y can not be smaller than 0 and greater than + the number of rows and columns respectively. + + Visualization + mat = [ + [0,0,A,0,0,0,0,B,0,0,0,0,0], + [0,0,0,0,0,0,0,B,B,B,0,0,0], + [0,C,C,0,D,0,0,0,0,0,0,0,0], + [0,C,0,0,D,D,0,0,E,0,E,0,0], + [0,C,0,0,D,D,0,0,E,E,E,0,0], + [0,0,0,0,0,0,0,0,0,0,E,0,0], + [0,0,0,0,0,0,0,F,F,F,0,0,0], + [0,0,0,0,0,0,0,F,F,0,0,0,0] + ] + + For visualization, I have defined the connected island with letters + by observation, we can see that + A island is of area 1 + B island is of area 4 + C island is of area 4 + D island is of area 5 + E island is of area 6 and + F island is of area 5 + + it has 6 unique islands of mentioned areas + and the maximum of all of them is 6 so we return 6. + """ + + doctest.testmod() From 5bfcab1aa4392e4e3f43927a7fbd8bf6c6815c88 Mon Sep 17 00:00:00 2001 From: Manish Kumar <73126278+ManishKumar219@users.noreply.github.com> Date: Wed, 19 Oct 2022 00:52:38 +0530 Subject: [PATCH 061/368] Create minmax.py (#7409) * Create minmax.py * Update minmax.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/minmax.py | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 backtracking/minmax.py diff --git a/backtracking/minmax.py b/backtracking/minmax.py new file mode 100644 index 000000000..9b87183cf --- /dev/null +++ b/backtracking/minmax.py @@ -0,0 +1,69 @@ +""" +Minimax helps to achieve maximum score in a game by checking all possible moves. + +""" +from __future__ import annotations + +import math + + +def minimax( + depth: int, node_index: int, is_max: bool, scores: list[int], height: float +) -> int: + """ + depth is current depth in game tree. + node_index is index of current node in scores[]. + scores[] contains the leaves of game tree. + height is maximum height of game tree. + + >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423] + >>> height = math.log(len(scores), 2) + >>> minimax(0, 0, True, scores, height) + 65 + >>> minimax(-1, 0, True, scores, height) + Traceback (most recent call last): + ... + ValueError: Depth cannot be less than 0 + >>> minimax(0, 0, True, [], 2) + Traceback (most recent call last): + ... + ValueError: Scores cannot be empty + >>> scores = [3, 5, 2, 9, 12, 5, 23, 23] + >>> height = math.log(len(scores), 2) + >>> minimax(0, 0, True, scores, height) + 12 + """ + + if depth < 0: + raise ValueError("Depth cannot be less than 0") + + if not scores: + raise ValueError("Scores cannot be empty") + + if depth == height: + return scores[node_index] + + return ( + max( + minimax(depth + 1, node_index * 2, False, scores, height), + minimax(depth + 1, node_index * 2 + 1, False, scores, height), + ) + if is_max + else min( + minimax(depth + 1, node_index * 2, True, scores, height), + minimax(depth + 1, node_index * 2 + 1, True, scores, height), + ) + ) + + +def main() -> None: + scores = [90, 23, 6, 33, 21, 65, 123, 34423] + height = math.log(len(scores), 2) + print(f"Optimal value : {minimax(0, 0, True, scores, height)}") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From b90ec303989b864996e31e021863f8b2c8852054 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nadirhan=20=C5=9Eahin?= Date: Tue, 18 Oct 2022 22:55:43 +0300 Subject: [PATCH 062/368] Create combination_sum.py (#7403) * Create combination_sum.py * Update DIRECTORY.md * Adds doctests Co-authored-by: Christian Clauss * Update combination_sum.py * Update combination_sum.py Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + backtracking/combination_sum.py | 66 +++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 backtracking/combination_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 94ec42832..c1fad8d9d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -23,6 +23,7 @@ * [All Permutations](backtracking/all_permutations.py) * [All Subsequences](backtracking/all_subsequences.py) * [Coloring](backtracking/coloring.py) + * [Combination Sum](backtracking/combination_sum.py) * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) * [Minimax](backtracking/minimax.py) diff --git a/backtracking/combination_sum.py b/backtracking/combination_sum.py new file mode 100644 index 000000000..f555adb75 --- /dev/null +++ b/backtracking/combination_sum.py @@ -0,0 +1,66 @@ +""" +In the Combination Sum problem, we are given a list consisting of distinct integers. +We need to find all the combinations whose sum equals to target given. +We can use an element more than one. + +Time complexity(Average Case): O(n!) + +Constraints: +1 <= candidates.length <= 30 +2 <= candidates[i] <= 40 +All elements of candidates are distinct. +1 <= target <= 40 +""" + + +def backtrack( + candidates: list, path: list, answer: list, target: int, previous_index: int +) -> None: + """ + A recursive function that searches for possible combinations. Backtracks in case + of a bigger current combination value than the target value. + + Parameters + ---------- + previous_index: Last index from the previous search + target: The value we need to obtain by summing our integers in the path list. + answer: A list of possible combinations + path: Current combination + candidates: A list of integers we can use. + """ + if target == 0: + answer.append(path.copy()) + else: + for index in range(previous_index, len(candidates)): + if target >= candidates[index]: + path.append(candidates[index]) + backtrack(candidates, path, answer, target - candidates[index], index) + path.pop(len(path) - 1) + + +def combination_sum(candidates: list, target: int) -> list: + """ + >>> combination_sum([2, 3, 5], 8) + [[2, 2, 2, 2], [2, 3, 3], [3, 5]] + >>> combination_sum([2, 3, 6, 7], 7) + [[2, 2, 3], [7]] + >>> combination_sum([-8, 2.3, 0], 1) + Traceback (most recent call last): + ... + RecursionError: maximum recursion depth exceeded in comparison + """ + path = [] # type: list[int] + answer = [] # type: list[int] + backtrack(candidates, path, answer, target, 0) + return answer + + +def main() -> None: + print(combination_sum([-8, 2.3, 0], 1)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From 80ff25ed38e62bcf2e51a4a51bf7bf8f9b03ea11 Mon Sep 17 00:00:00 2001 From: Sai Ganesh Manda <89340753+mvsg2@users.noreply.github.com> Date: Wed, 19 Oct 2022 17:13:26 +0530 Subject: [PATCH 063/368] Update gaussian_naive_bayes.py (#7406) * Update gaussian_naive_bayes.py Just adding in a final metric of accuracy to declare... * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/gaussian_naive_bayes.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py index 77e732662..7e9a8d7f6 100644 --- a/machine_learning/gaussian_naive_bayes.py +++ b/machine_learning/gaussian_naive_bayes.py @@ -1,7 +1,9 @@ # Gaussian Naive Bayes Example +import time + from matplotlib import pyplot as plt from sklearn.datasets import load_iris -from sklearn.metrics import plot_confusion_matrix +from sklearn.metrics import accuracy_score, plot_confusion_matrix from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB @@ -25,7 +27,9 @@ def main(): # Gaussian Naive Bayes nb_model = GaussianNB() - nb_model.fit(x_train, y_train) + time.sleep(2.9) + model_fit = nb_model.fit(x_train, y_train) + y_pred = model_fit.predict(x_test) # Predictions on the test set # Display Confusion Matrix plot_confusion_matrix( @@ -33,12 +37,16 @@ def main(): x_test, y_test, display_labels=iris["target_names"], - cmap="Blues", + cmap="Blues", # although, Greys_r has a better contrast... normalize="true", ) plt.title("Normalized Confusion Matrix - IRIS Dataset") plt.show() + time.sleep(1.8) + final_accuracy = 100 * accuracy_score(y_true=y_test, y_pred=y_pred) + print(f"The overall accuracy of the model is: {round(final_accuracy, 2)}%") + if __name__ == "__main__": main() From b8281d79ef6fdfa11bdd697be3f4a1ef7824cf7f Mon Sep 17 00:00:00 2001 From: Kuldeep Borkar <74557588+KuldeepBorkar@users.noreply.github.com> Date: Wed, 19 Oct 2022 17:16:56 +0530 Subject: [PATCH 064/368] Fixed a typo of 'a' and 'an' and used f string in print statement (#7398) --- boolean_algebra/norgate.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/boolean_algebra/norgate.py b/boolean_algebra/norgate.py index 1c341e8a7..2c27b80af 100644 --- a/boolean_algebra/norgate.py +++ b/boolean_algebra/norgate.py @@ -1,13 +1,15 @@ -""" A NOR Gate is a logic gate in boolean algebra which results to false(0) - if any of the input is 1, and True(1) if both the inputs are 0. - Following is the truth table of an NOR Gate: +""" +A NOR Gate is a logic gate in boolean algebra which results to false(0) +if any of the input is 1, and True(1) if both the inputs are 0. +Following is the truth table of a NOR Gate: | Input 1 | Input 2 | Output | | 0 | 0 | 1 | | 0 | 1 | 0 | | 1 | 0 | 0 | | 1 | 1 | 0 | + +Following is the code implementation of the NOR Gate """ -"""Following is the code implementation of the NOR Gate""" def nor_gate(input_1: int, input_2: int) -> int: @@ -30,11 +32,11 @@ def nor_gate(input_1: int, input_2: int) -> int: def main() -> None: print("Truth Table of NOR Gate:") - print("| Input 1 |", " Input 2 |", " Output |") - print("| 0 |", " 0 | ", nor_gate(0, 0), " |") - print("| 0 |", " 1 | ", nor_gate(0, 1), " |") - print("| 1 |", " 0 | ", nor_gate(1, 0), " |") - print("| 1 |", " 1 | ", nor_gate(1, 1), " |") + print("| Input 1 | Input 2 | Output |") + print(f"| 0 | 0 | {nor_gate(0, 0)} |") + print(f"| 0 | 1 | {nor_gate(0, 1)} |") + print(f"| 1 | 0 | {nor_gate(1, 0)} |") + print(f"| 1 | 1 | {nor_gate(1, 1)} |") if __name__ == "__main__": From 50da472ddcdc2d79d1ad325ec05cda3558802fda Mon Sep 17 00:00:00 2001 From: Kuldeep Borkar <74557588+KuldeepBorkar@users.noreply.github.com> Date: Wed, 19 Oct 2022 22:48:33 +0530 Subject: [PATCH 065/368] Implemented Gelu Function (#7368) * Implemented Gelu Function * Renamed file and added more description to function * Extended the name GELU * Update gaussian_error_linear_unit.py Co-authored-by: Christian Clauss --- maths/gaussian_error_linear_unit.py | 53 +++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 maths/gaussian_error_linear_unit.py diff --git a/maths/gaussian_error_linear_unit.py b/maths/gaussian_error_linear_unit.py new file mode 100644 index 000000000..7b5f87514 --- /dev/null +++ b/maths/gaussian_error_linear_unit.py @@ -0,0 +1,53 @@ +""" +This script demonstrates an implementation of the Gaussian Error Linear Unit function. +* https://en.wikipedia.org/wiki/Activation_function#Comparison_of_activation_functions + +The function takes a vector of K real numbers as input and returns x * sigmoid(1.702*x). +Gaussian Error Linear Unit (GELU) is a high-performing neural network activation +function. + +This script is inspired by a corresponding research paper. +* https://arxiv.org/abs/1606.08415 +""" + +import numpy as np + + +def sigmoid(vector: np.array) -> np.array: + """ + Mathematical function sigmoid takes a vector x of K real numbers as input and + returns 1/ (1 + e^-x). + https://en.wikipedia.org/wiki/Sigmoid_function + + >>> sigmoid(np.array([-1.0, 1.0, 2.0])) + array([0.26894142, 0.73105858, 0.88079708]) + """ + return 1 / (1 + np.exp(-vector)) + + +def gaussian_error_linear_unit(vector: np.array) -> np.array: + """ + Implements the Gaussian Error Linear Unit (GELU) function + + Parameters: + vector (np.array): A numpy array of shape (1,n) + consisting of real values + + Returns: + gelu_vec (np.array): The input numpy array, after applying + gelu. + + Examples: + >>> gaussian_error_linear_unit(np.array([-1.0, 1.0, 2.0])) + array([-0.15420423, 0.84579577, 1.93565862]) + + >>> gaussian_error_linear_unit(np.array([-3])) + array([-0.01807131]) + """ + return vector * sigmoid(1.702 * vector) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2859d4bf3aa96737a4715c65d4a9051d9c62d24d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 19 Oct 2022 16:12:44 -0400 Subject: [PATCH 066/368] Remove references to depreciated QasmSimulator (#7417) * Fix typos * Replace depreciated QasmSimulator in Deutsch-Jozsa algorithm * Replace depreciated QasmSimulator in half adder algorithm * Replace depreciated QasmSimulator in not gate algorithm * Replace depreciated QasmSimulator in full adder algorithm * Simplify qiskit import * Make formatting more consistent * Replace depreciated QasmSimulator in quantum entanglement algorithm * Replace depreciated QasmSimulator in ripple adder algorithm * Replace depreciated QasmSimulator in qubit measure algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updating DIRECTORY.md * updating DIRECTORY.md * Remove qiskit import alias for clarity Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 ++++ quantum/deutsch_jozsa.py | 28 +++++++++++++++------------- quantum/half_adder.py | 14 +++++++------- quantum/not_gate.py | 14 ++++++++------ quantum/q_full_adder.py | 27 +++++++++++++-------------- quantum/quantum_entanglement.py | 6 +++--- quantum/ripple_adder_classic.py | 16 ++++++++-------- quantum/single_qubit_measure.py | 16 +++++++++------- 8 files changed, 67 insertions(+), 58 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index c1fad8d9d..1fad28798 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -27,6 +27,7 @@ * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) * [Minimax](backtracking/minimax.py) + * [Minmax](backtracking/minmax.py) * [N Queens](backtracking/n_queens.py) * [N Queens Math](backtracking/n_queens_math.py) * [Rat In Maze](backtracking/rat_in_maze.py) @@ -157,6 +158,7 @@ * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) + * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) @@ -513,6 +515,7 @@ * [Gamma](maths/gamma.py) * [Gamma Recursive](maths/gamma_recursive.py) * [Gaussian](maths/gaussian.py) + * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) @@ -601,6 +604,7 @@ * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Matrix Class](matrix/matrix_class.py) * [Matrix Operation](matrix/matrix_operation.py) + * [Max Area Of Island](matrix/max_area_of_island.py) * [Nth Fibonacci Using Matrix Exponentiation](matrix/nth_fibonacci_using_matrix_exponentiation.py) * [Rotate Matrix](matrix/rotate_matrix.py) * [Searching In Sorted Matrix](matrix/searching_in_sorted_matrix.py) diff --git a/quantum/deutsch_jozsa.py b/quantum/deutsch_jozsa.py index d7e2d8335..95c3e65b5 100755 --- a/quantum/deutsch_jozsa.py +++ b/quantum/deutsch_jozsa.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 """ -Deutsch-Josza Algorithm is one of the first examples of a quantum +Deutsch-Jozsa Algorithm is one of the first examples of a quantum algorithm that is exponentially faster than any possible deterministic classical algorithm @@ -22,10 +22,10 @@ References: """ import numpy as np -import qiskit as q +import qiskit -def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: +def dj_oracle(case: str, num_qubits: int) -> qiskit.QuantumCircuit: """ Returns a Quantum Circuit for the Oracle function. The circuit returned can represent balanced or constant function, @@ -33,7 +33,7 @@ def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: """ # This circuit has num_qubits+1 qubits: the size of the input, # plus one output qubit - oracle_qc = q.QuantumCircuit(num_qubits + 1) + oracle_qc = qiskit.QuantumCircuit(num_qubits + 1) # First, let's deal with the case in which oracle is balanced if case == "balanced": @@ -43,7 +43,7 @@ def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: # Next, format 'b' as a binary string of length 'n', padded with zeros: b_str = format(b, f"0{num_qubits}b") # Next, we place the first X-gates. Each digit in our binary string - # correspopnds to a qubit, if the digit is 0, we do nothing, if it's 1 + # corresponds to a qubit, if the digit is 0, we do nothing, if it's 1 # we apply an X-gate to that qubit: for index, bit in enumerate(b_str): if bit == "1": @@ -70,13 +70,15 @@ def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: return oracle_gate -def dj_algorithm(oracle: q.QuantumCircuit, num_qubits: int) -> q.QuantumCircuit: +def dj_algorithm( + oracle: qiskit.QuantumCircuit, num_qubits: int +) -> qiskit.QuantumCircuit: """ - Returns the complete Deustch-Jozsa Quantum Circuit, + Returns the complete Deutsch-Jozsa Quantum Circuit, adding Input & Output registers and Hadamard & Measurement Gates, to the Oracle Circuit passed in arguments """ - dj_circuit = q.QuantumCircuit(num_qubits + 1, num_qubits) + dj_circuit = qiskit.QuantumCircuit(num_qubits + 1, num_qubits) # Set up the output qubit: dj_circuit.x(num_qubits) dj_circuit.h(num_qubits) @@ -95,7 +97,7 @@ def dj_algorithm(oracle: q.QuantumCircuit, num_qubits: int) -> q.QuantumCircuit: return dj_circuit -def deutsch_jozsa(case: str, num_qubits: int) -> q.result.counts.Counts: +def deutsch_jozsa(case: str, num_qubits: int) -> qiskit.result.counts.Counts: """ Main function that builds the circuit using other helper functions, runs the experiment 1000 times & returns the resultant qubit counts @@ -104,14 +106,14 @@ def deutsch_jozsa(case: str, num_qubits: int) -> q.result.counts.Counts: >>> deutsch_jozsa("balanced", 3) {'111': 1000} """ - # Use Aer's qasm_simulator - simulator = q.Aer.get_backend("qasm_simulator") + # Use Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") oracle_gate = dj_oracle(case, num_qubits) dj_circuit = dj_algorithm(oracle_gate, num_qubits) - # Execute the circuit on the qasm simulator - job = q.execute(dj_circuit, simulator, shots=1000) + # Execute the circuit on the simulator + job = qiskit.execute(dj_circuit, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(dj_circuit) diff --git a/quantum/half_adder.py b/quantum/half_adder.py index 4af704e64..21a57ddcf 100755 --- a/quantum/half_adder.py +++ b/quantum/half_adder.py @@ -10,10 +10,10 @@ https://en.wikipedia.org/wiki/Adder_(electronics) https://qiskit.org/textbook/ch-states/atoms-computation.html#4.2-Remembering-how-to-add- """ -import qiskit as q +import qiskit -def half_adder(bit0: int, bit1: int) -> q.result.counts.Counts: +def half_adder(bit0: int, bit1: int) -> qiskit.result.counts.Counts: """ >>> half_adder(0, 0) {'00': 1000} @@ -24,10 +24,10 @@ def half_adder(bit0: int, bit1: int) -> q.result.counts.Counts: >>> half_adder(1, 1) {'10': 1000} """ - # Use Aer's qasm_simulator - simulator = q.Aer.get_backend("qasm_simulator") + # Use Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") - qc_ha = q.QuantumCircuit(4, 2) + qc_ha = qiskit.QuantumCircuit(4, 2) # encode inputs in qubits 0 and 1 if bit0 == 1: qc_ha.x(0) @@ -48,9 +48,9 @@ def half_adder(bit0: int, bit1: int) -> q.result.counts.Counts: qc_ha.measure(3, 1) # extract AND value # Execute the circuit on the qasm simulator - job = q.execute(qc_ha, simulator, shots=1000) + job = qiskit.execute(qc_ha, simulator, shots=1000) - # Return the histogram data of the results of the experiment. + # Return the histogram data of the results of the experiment return job.result().get_counts(qc_ha) diff --git a/quantum/not_gate.py b/quantum/not_gate.py index e68a78009..ee23272d7 100644 --- a/quantum/not_gate.py +++ b/quantum/not_gate.py @@ -6,21 +6,23 @@ times and print the total count of the states finally observed. Qiskit Docs: https://qiskit.org/documentation/getting_started.html """ -import qiskit as q +import qiskit -def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts: +def single_qubit_measure( + qubits: int, classical_bits: int +) -> qiskit.result.counts.Counts: """ >>> single_qubit_measure(2, 2) {'11': 1000} >>> single_qubit_measure(4, 4) {'0011': 1000} """ - # Use Aer's qasm_simulator - simulator = q.Aer.get_backend("qasm_simulator") + # Use Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") # Create a Quantum Circuit acting on the q register - circuit = q.QuantumCircuit(qubits, classical_bits) + circuit = qiskit.QuantumCircuit(qubits, classical_bits) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0) @@ -30,7 +32,7 @@ def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Co circuit.measure([0, 1], [0, 1]) # Execute the circuit on the qasm simulator - job = q.execute(circuit, simulator, shots=1000) + job = qiskit.execute(circuit, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(circuit) diff --git a/quantum/q_full_adder.py b/quantum/q_full_adder.py index 597efb834..c6d03d170 100644 --- a/quantum/q_full_adder.py +++ b/quantum/q_full_adder.py @@ -11,7 +11,6 @@ https://www.quantum-inspire.com/kbase/full-adder/ import math import qiskit -from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def quantum_full_adder( @@ -38,25 +37,25 @@ def quantum_full_adder( carry_in: carry in for the circuit. Returns: qiskit.result.counts.Counts: sum result counts. - >>> quantum_full_adder(1,1,1) + >>> quantum_full_adder(1, 1, 1) {'11': 1000} - >>> quantum_full_adder(0,0,1) + >>> quantum_full_adder(0, 0, 1) {'01': 1000} - >>> quantum_full_adder(1,0,1) + >>> quantum_full_adder(1, 0, 1) {'10': 1000} - >>> quantum_full_adder(1,-4,1) + >>> quantum_full_adder(1, -4, 1) Traceback (most recent call last): ... ValueError: inputs must be positive. - >>> quantum_full_adder('q',0,1) + >>> quantum_full_adder('q', 0, 1) Traceback (most recent call last): ... TypeError: inputs must be integers. - >>> quantum_full_adder(0.5,0,1) + >>> quantum_full_adder(0.5, 0, 1) Traceback (most recent call last): ... ValueError: inputs must be exact integers. - >>> quantum_full_adder(0,1,3) + >>> quantum_full_adder(0, 1, 3) Traceback (most recent call last): ... ValueError: inputs must be less or equal to 2. @@ -78,12 +77,12 @@ def quantum_full_adder( raise ValueError("inputs must be less or equal to 2.") # build registers - qr = QuantumRegister(4, "qr") - cr = ClassicalRegister(2, "cr") + qr = qiskit.QuantumRegister(4, "qr") + cr = qiskit.ClassicalRegister(2, "cr") # list the entries entry = [input_1, input_2, carry_in] - quantum_circuit = QuantumCircuit(qr, cr) + quantum_circuit = qiskit.QuantumCircuit(qr, cr) for i in range(0, 3): if entry[i] == 2: @@ -102,11 +101,11 @@ def quantum_full_adder( quantum_circuit.measure([2, 3], cr) # measure the last two qbits - backend = Aer.get_backend("qasm_simulator") - job = execute(quantum_circuit, backend, shots=1000) + backend = qiskit.Aer.get_backend("aer_simulator") + job = qiskit.execute(quantum_circuit, backend, shots=1000) return job.result().get_counts(quantum_circuit) if __name__ == "__main__": - print(f"Total sum count for state is: {quantum_full_adder(1,1,1)}") + print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}") diff --git a/quantum/quantum_entanglement.py b/quantum/quantum_entanglement.py index 3d8e27713..08fc32e49 100644 --- a/quantum/quantum_entanglement.py +++ b/quantum/quantum_entanglement.py @@ -29,8 +29,8 @@ def quantum_entanglement(qubits: int = 2) -> qiskit.result.counts.Counts: """ classical_bits = qubits - # Using Aer's qasm_simulator - simulator = qiskit.Aer.get_backend("qasm_simulator") + # Using Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") # Creating a Quantum Circuit acting on the q register circuit = qiskit.QuantumCircuit(qubits, classical_bits) @@ -48,7 +48,7 @@ def quantum_entanglement(qubits: int = 2) -> qiskit.result.counts.Counts: # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. - # Executing the circuit on the qasm simulator + # Executing the circuit on the simulator job = qiskit.execute(circuit, simulator, shots=1000) return job.result().get_counts(circuit) diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index 1d3724476..c07757af7 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -2,11 +2,11 @@ # https://en.wikipedia.org/wiki/Adder_(electronics)#Full_adder # https://en.wikipedia.org/wiki/Controlled_NOT_gate -from qiskit import Aer, QuantumCircuit, execute +import qiskit from qiskit.providers import Backend -def store_two_classics(val1: int, val2: int) -> tuple[QuantumCircuit, str, str]: +def store_two_classics(val1: int, val2: int) -> tuple[qiskit.QuantumCircuit, str, str]: """ Generates a Quantum Circuit which stores two classical integers Returns the circuit and binary representation of the integers @@ -21,10 +21,10 @@ def store_two_classics(val1: int, val2: int) -> tuple[QuantumCircuit, str, str]: # We need (3 * number of bits in the larger number)+1 qBits # The second parameter is the number of classical registers, to measure the result - circuit = QuantumCircuit((len(x) * 3) + 1, len(x) + 1) + circuit = qiskit.QuantumCircuit((len(x) * 3) + 1, len(x) + 1) # We are essentially "not-ing" the bits that are 1 - # Reversed because its easier to perform ops on more significant bits + # Reversed because it's easier to perform ops on more significant bits for i in range(len(x)): if x[::-1][i] == "1": circuit.x(i) @@ -36,7 +36,7 @@ def store_two_classics(val1: int, val2: int) -> tuple[QuantumCircuit, str, str]: def full_adder( - circuit: QuantumCircuit, + circuit: qiskit.QuantumCircuit, input1_loc: int, input2_loc: int, carry_in: int, @@ -55,14 +55,14 @@ def full_adder( # The default value for **backend** is the result of a function call which is not # normally recommended and causes flake8-bugbear to raise a B008 error. However, -# in this case, this is accptable because `Aer.get_backend()` is called when the +# in this case, this is acceptable because `Aer.get_backend()` is called when the # function is defined and that same backend is then reused for all function calls. def ripple_adder( val1: int, val2: int, - backend: Backend = Aer.get_backend("qasm_simulator"), # noqa: B008 + backend: Backend = qiskit.Aer.get_backend("aer_simulator"), # noqa: B008 ) -> int: """ Quantum Equivalent of a Ripple Adder Circuit @@ -104,7 +104,7 @@ def ripple_adder( for i in range(len(x) + 1): circuit.measure([(len(x) * 2) + i], [i]) - res = execute(circuit, backend, shots=1).result() + res = qiskit.execute(circuit, backend, shots=1).result() # The result is in binary. Convert it back to int return int(list(res.get_counts())[0], 2) diff --git a/quantum/single_qubit_measure.py b/quantum/single_qubit_measure.py index 7f058c217..605bd8043 100755 --- a/quantum/single_qubit_measure.py +++ b/quantum/single_qubit_measure.py @@ -6,25 +6,27 @@ finally prints the total count of the states finally observed. Qiskit Docs: https://qiskit.org/documentation/getting_started.html """ -import qiskit as q +import qiskit -def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts: +def single_qubit_measure( + qubits: int, classical_bits: int +) -> qiskit.result.counts.Counts: """ >>> single_qubit_measure(1, 1) {'0': 1000} """ - # Use Aer's qasm_simulator - simulator = q.Aer.get_backend("qasm_simulator") + # Use Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") # Create a Quantum Circuit acting on the q register - circuit = q.QuantumCircuit(qubits, classical_bits) + circuit = qiskit.QuantumCircuit(qubits, classical_bits) # Map the quantum measurement to the classical bits circuit.measure([0], [0]) - # Execute the circuit on the qasm simulator - job = q.execute(circuit, simulator, shots=1000) + # Execute the circuit on the simulator + job = qiskit.execute(circuit, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(circuit) From 4829fea24dc2c75ffc49571538fc40bce2d7e64b Mon Sep 17 00:00:00 2001 From: Atul Rajput <92659293+AtulRajput01@users.noreply.github.com> Date: Thu, 20 Oct 2022 13:18:28 +0530 Subject: [PATCH 067/368] Create graphs/dijkstra_alternate.py (#7405) * Update dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra.py * Update graphs/dijkstra.py Co-authored-by: Christian Clauss * Update graphs/dijkstra.py Co-authored-by: Christian Clauss * Update graphs/dijkstra.py Co-authored-by: Christian Clauss * Update dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra.py * Apply suggestions from code review * Create dijkstra_alternate.py * Update dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * int(1e7) * Update dijkstra_alternate.py * Update graphs/dijkstra_alternate.py * sptset --> visited Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/dijkstra_alternate.py | 98 ++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 graphs/dijkstra_alternate.py diff --git a/graphs/dijkstra_alternate.py b/graphs/dijkstra_alternate.py new file mode 100644 index 000000000..7beef6b04 --- /dev/null +++ b/graphs/dijkstra_alternate.py @@ -0,0 +1,98 @@ +from __future__ import annotations + + +class Graph: + def __init__(self, vertices: int) -> None: + """ + >>> graph = Graph(2) + >>> graph.vertices + 2 + >>> len(graph.graph) + 2 + >>> len(graph.graph[0]) + 2 + """ + self.vertices = vertices + self.graph = [[0] * vertices for _ in range(vertices)] + + def print_solution(self, distances_from_source: list[int]) -> None: + """ + >>> Graph(0).print_solution([]) # doctest: +NORMALIZE_WHITESPACE + Vertex Distance from Source + """ + print("Vertex \t Distance from Source") + for vertex in range(self.vertices): + print(vertex, "\t\t", distances_from_source[vertex]) + + def minimum_distance( + self, distances_from_source: list[int], visited: list[bool] + ) -> int: + """ + A utility function to find the vertex with minimum distance value, from the set + of vertices not yet included in shortest path tree. + + >>> Graph(3).minimum_distance([1, 2, 3], [False, False, True]) + 0 + """ + + # Initialize minimum distance for next node + minimum = 1e7 + min_index = 0 + + # Search not nearest vertex not in the shortest path tree + for vertex in range(self.vertices): + if distances_from_source[vertex] < minimum and visited[vertex] is False: + minimum = distances_from_source[vertex] + min_index = vertex + return min_index + + def dijkstra(self, source: int) -> None: + """ + Function that implements Dijkstra's single source shortest path algorithm for a + graph represented using adjacency matrix representation. + + >>> Graph(4).dijkstra(1) # doctest: +NORMALIZE_WHITESPACE + Vertex Distance from Source + 0 10000000 + 1 0 + 2 10000000 + 3 10000000 + """ + + distances = [int(1e7)] * self.vertices # distances from the source + distances[source] = 0 + visited = [False] * self.vertices + + for _ in range(self.vertices): + u = self.minimum_distance(distances, visited) + visited[u] = True + + # Update dist value of the adjacent vertices + # of the picked vertex only if the current + # distance is greater than new distance and + # the vertex in not in the shortest path tree + for v in range(self.vertices): + if ( + self.graph[u][v] > 0 + and visited[v] is False + and distances[v] > distances[u] + self.graph[u][v] + ): + distances[v] = distances[u] + self.graph[u][v] + + self.print_solution(distances) + + +if __name__ == "__main__": + graph = Graph(9) + graph.graph = [ + [0, 4, 0, 0, 0, 0, 0, 8, 0], + [4, 0, 8, 0, 0, 0, 0, 11, 0], + [0, 8, 0, 7, 0, 4, 0, 0, 2], + [0, 0, 7, 0, 9, 14, 0, 0, 0], + [0, 0, 0, 9, 0, 10, 0, 0, 0], + [0, 0, 4, 14, 10, 0, 2, 0, 0], + [0, 0, 0, 0, 0, 2, 0, 1, 6], + [8, 11, 0, 0, 0, 0, 1, 0, 7], + [0, 0, 2, 0, 0, 0, 6, 7, 0], + ] + graph.dijkstra(0) From 831280ceddb1e37bb0215fd32899a52acbbccf2d Mon Sep 17 00:00:00 2001 From: Alan Paul <57307037+Alanzz@users.noreply.github.com> Date: Thu, 20 Oct 2022 15:57:13 +0530 Subject: [PATCH 068/368] Add quantum_random.py (#7446) * Create quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum/quantum_random.py Co-authored-by: Christian Clauss * Update quantum/quantum_random.py Co-authored-by: Christian Clauss * Update quantum/quantum_random.py Co-authored-by: Christian Clauss * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * requirements.txt: Add projectq * Update quantum_random.py * Update quantum/quantum_random.py Co-authored-by: Christian Clauss * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum_random.py * Update quantum_random.py * Update quantum/quantum_random.py * Update quantum/quantum_random.py * Update quantum_random.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- quantum/quantum_random.py | 30 ++++++++++++++++++++++++++++++ requirements.txt | 1 + 2 files changed, 31 insertions(+) create mode 100644 quantum/quantum_random.py diff --git a/quantum/quantum_random.py b/quantum/quantum_random.py new file mode 100644 index 000000000..01c8faa12 --- /dev/null +++ b/quantum/quantum_random.py @@ -0,0 +1,30 @@ +import doctest + +import projectq +from projectq.ops import H, Measure + + +def get_random_number(quantum_engine: projectq.cengines._main.MainEngine) -> int: + """ + >>> isinstance(get_random_number(projectq.MainEngine()), int) + True + """ + qubit = quantum_engine.allocate_qubit() + H | qubit + Measure | qubit + return int(qubit) + + +if __name__ == "__main__": + doctest.testmod() + + # initialises a new quantum backend + quantum_engine = projectq.MainEngine() + + # Generate a list of 10 random numbers + random_numbers_list = [get_random_number(quantum_engine) for _ in range(10)] + + # Flushes the quantum engine from memory + quantum_engine.flush() + + print("Random numbers", random_numbers_list) diff --git a/requirements.txt b/requirements.txt index b14a3eb01..25d2b4ef9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,7 @@ numpy opencv-python pandas pillow +projectq qiskit requests scikit-fuzzy From 42b56f2345ed4566ea48306d3a727f1aa5c88218 Mon Sep 17 00:00:00 2001 From: Modassir Afzal <60973906+Moddy2024@users.noreply.github.com> Date: Fri, 21 Oct 2022 03:29:11 +0530 Subject: [PATCH 069/368] XGBoost Classifier (#7106) * Fixes: #{6551} * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * Update xgboostclassifier.py * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: #{6551} * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * Fixes : #6551 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes : #6551 * Fixes : #6551 * Fixes: #6551 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * Update xgboostclassifier.py * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: #6551 * Fixes #6551 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: {#6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: {#6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: #6551 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * FIXES: {#6551} * Fixes : { #6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes : { #6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: { #6551] * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * Update xgboostclassifier.py * Apply suggestions from code review * Update xgboostclassifier.py * Update xgboostclassifier.py * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: { #6551} * Update xgboostclassifier.py * Fixes: { #6551} * Update xgboostclassifier.py * Fixes: ( #6551) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: { #6551} Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- machine_learning/xgboostclassifier.py | 82 +++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 machine_learning/xgboostclassifier.py diff --git a/machine_learning/xgboostclassifier.py b/machine_learning/xgboostclassifier.py new file mode 100644 index 000000000..bb5b48b7a --- /dev/null +++ b/machine_learning/xgboostclassifier.py @@ -0,0 +1,82 @@ +# XGBoost Classifier Example +import numpy as np +from matplotlib import pyplot as plt +from sklearn.datasets import load_iris +from sklearn.metrics import plot_confusion_matrix +from sklearn.model_selection import train_test_split +from xgboost import XGBClassifier + + +def data_handling(data: dict) -> tuple: + # Split dataset into features and target + # data is features + """ + >>> data_handling(({'data':'[5.1, 3.5, 1.4, 0.2]','target':([0])})) + ('[5.1, 3.5, 1.4, 0.2]', [0]) + >>> data_handling( + ... {'data': '[4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2]', 'target': ([0, 0])} + ... ) + ('[4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2]', [0, 0]) + """ + return (data["data"], data["target"]) + + +def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier: + """ + >>> xgboost(np.array([[5.1, 3.6, 1.4, 0.2]]), np.array([0])) + XGBClassifier(base_score=0.5, booster='gbtree', callbacks=None, + colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, + early_stopping_rounds=None, enable_categorical=False, + eval_metric=None, gamma=0, gpu_id=-1, grow_policy='depthwise', + importance_type=None, interaction_constraints='', + learning_rate=0.300000012, max_bin=256, max_cat_to_onehot=4, + max_delta_step=0, max_depth=6, max_leaves=0, min_child_weight=1, + missing=nan, monotone_constraints='()', n_estimators=100, + n_jobs=0, num_parallel_tree=1, predictor='auto', random_state=0, + reg_alpha=0, reg_lambda=1, ...) + """ + classifier = XGBClassifier() + classifier.fit(features, target) + return classifier + + +def main() -> None: + + """ + >>> main() + + Url for the algorithm: + https://xgboost.readthedocs.io/en/stable/ + Iris type dataset is used to demonstrate algorithm. + """ + + # Load Iris dataset + iris = load_iris() + features, targets = data_handling(iris) + x_train, x_test, y_train, y_test = train_test_split( + features, targets, test_size=0.25 + ) + + names = iris["target_names"] + + # Create an XGBoost Classifier from the training data + xgboost_classifier = xgboost(x_train, y_train) + + # Display the confusion matrix of the classifier with both training and test sets + plot_confusion_matrix( + xgboost_classifier, + x_test, + y_test, + display_labels=names, + cmap="Blues", + normalize="true", + ) + plt.title("Normalized Confusion Matrix - IRIS Dataset") + plt.show() + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) + main() From 717f0e46d950060f2147f022f65b7e44e72cfdd8 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Fri, 21 Oct 2022 20:03:57 +1300 Subject: [PATCH 070/368] Maclaurin series approximation of sin (#7451) * added maclaurin_sin.py function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added type hints and fixed line overflows * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed incompatable type examples * Update maths/maclaurin_sin.py Co-authored-by: Caeden Perelli-Harris * changed error details * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed grammatical errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * improved function accuracy and added test case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/maclaurin_sin.py Co-authored-by: Christian Clauss * removed redundant return * fixed pytest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- maths/maclaurin_sin.py | 64 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 maths/maclaurin_sin.py diff --git a/maths/maclaurin_sin.py b/maths/maclaurin_sin.py new file mode 100644 index 000000000..3c27ccf63 --- /dev/null +++ b/maths/maclaurin_sin.py @@ -0,0 +1,64 @@ +""" +https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions +""" +from math import factorial, pi + + +def maclaurin_sin(theta: float, accuracy: int = 30) -> float: + """ + Finds the maclaurin approximation of sin + + :param theta: the angle to which sin is found + :param accuracy: the degree of accuracy wanted minimum ~ 1.5 theta + :return: the value of sine in radians + + + >>> from math import isclose, sin + >>> all(isclose(maclaurin_sin(x, 50), sin(x)) for x in range(-25, 25)) + True + >>> maclaurin_sin(10) + -0.544021110889369 + >>> maclaurin_sin(-10) + 0.5440211108893703 + >>> maclaurin_sin(10, 15) + -0.5440211108893689 + >>> maclaurin_sin(-10, 15) + 0.5440211108893703 + >>> maclaurin_sin("10") + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires either an int or float for theta + >>> maclaurin_sin(10, -30) + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + >>> maclaurin_sin(10, 30.5) + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + >>> maclaurin_sin(10, "30") + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + """ + + if not isinstance(theta, (int, float)): + raise ValueError("maclaurin_sin() requires either an int or float for theta") + + if not isinstance(accuracy, int) or accuracy <= 0: + raise ValueError("maclaurin_sin() requires a positive int for accuracy") + + theta = float(theta) + div = theta // (2 * pi) + theta -= 2 * div * pi + return sum( + (((-1) ** r) * ((theta ** (2 * r + 1)) / factorial(2 * r + 1))) + for r in range(accuracy) + ) + + +if __name__ == "__main__": + print(maclaurin_sin(10)) + print(maclaurin_sin(-10)) + print(maclaurin_sin(10, 15)) + print(maclaurin_sin(-10, 15)) From cc10b20beb8f0b10b50c84bd523bf41095fe9f37 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 22 Oct 2022 07:33:51 -0400 Subject: [PATCH 071/368] Remove some print statements within algorithmic functions (#7499) * Remove commented-out print statements in algorithmic functions * Encapsulate non-algorithmic code in __main__ * Remove unused print_matrix function * Remove print statement in __init__ * Remove print statement from doctest * Encapsulate non-algorithmic code in __main__ * Modify algorithm to return instead of print * Encapsulate non-algorithmic code in __main__ * Refactor data_safety_checker to return instead of print * updating DIRECTORY.md * updating DIRECTORY.md * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 4 + cellular_automata/game_of_life.py | 1 - digital_image_processing/index_calculation.py | 1 - divide_and_conquer/max_subarray_sum.py | 12 +-- .../strassen_matrix_multiplication.py | 3 +- dynamic_programming/longest_sub_array.py | 1 - dynamic_programming/max_non_adjacent_sum.py | 2 +- dynamic_programming/subset_generation.py | 9 +- dynamic_programming/sum_of_subset.py | 14 ++- machine_learning/forecasting/run.py | 86 ++++++++++--------- 10 files changed, 69 insertions(+), 64 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 1fad28798..70644d063 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -360,6 +360,7 @@ * [Dijkstra](graphs/dijkstra.py) * [Dijkstra 2](graphs/dijkstra_2.py) * [Dijkstra Algorithm](graphs/dijkstra_algorithm.py) + * [Dijkstra Alternate](graphs/dijkstra_alternate.py) * [Dinic](graphs/dinic.py) * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) @@ -460,6 +461,7 @@ * [Similarity Search](machine_learning/similarity_search.py) * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) + * [Xgboostclassifier](machine_learning/xgboostclassifier.py) ## Maths * [3N Plus 1](maths/3n_plus_1.py) @@ -534,6 +536,7 @@ * [Line Length](maths/line_length.py) * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) * [Lucas Series](maths/lucas_series.py) + * [Maclaurin Sin](maths/maclaurin_sin.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) @@ -936,6 +939,7 @@ * [Not Gate](quantum/not_gate.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) + * [Quantum Random](quantum/quantum_random.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index c5324da73..8e5470251 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -66,7 +66,6 @@ def run(canvas: list[list[bool]]) -> list[list[bool]]: next_gen_canvas = np.array(create_canvas(current_canvas.shape[0])) for r, row in enumerate(current_canvas): for c, pt in enumerate(row): - # print(r-1,r+2,c-1,c+2) next_gen_canvas[r][c] = __judge_point( pt, current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 01cd79fc1..be1855e99 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -105,7 +105,6 @@ class IndexCalculation: """ def __init__(self, red=None, green=None, blue=None, red_edge=None, nir=None): - # print("Numpy version: " + np.__version__) self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir) def set_matricies(self, red=None, green=None, blue=None, red_edge=None, nir=None): diff --git a/divide_and_conquer/max_subarray_sum.py b/divide_and_conquer/max_subarray_sum.py index 43f58086e..f23e81719 100644 --- a/divide_and_conquer/max_subarray_sum.py +++ b/divide_and_conquer/max_subarray_sum.py @@ -69,8 +69,10 @@ def max_subarray_sum(array, left, right): return max(left_half_sum, right_half_sum, cross_sum) -array = [-2, -5, 6, -2, -3, 1, 5, -6] -array_length = len(array) -print( - "Maximum sum of contiguous subarray:", max_subarray_sum(array, 0, array_length - 1) -) +if __name__ == "__main__": + array = [-2, -5, 6, -2, -3, 1, 5, -6] + array_length = len(array) + print( + "Maximum sum of contiguous subarray:", + max_subarray_sum(array, 0, array_length - 1), + ) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index 0ee426e4b..371605d6d 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -68,8 +68,7 @@ def matrix_dimensions(matrix: list) -> tuple[int, int]: def print_matrix(matrix: list) -> None: - for i in range(len(matrix)): - print(matrix[i]) + print("\n".join(str(line) for line in matrix)) def actual_strassen(matrix_a: list, matrix_b: list) -> list: diff --git a/dynamic_programming/longest_sub_array.py b/dynamic_programming/longest_sub_array.py index 30159a138..b477acf61 100644 --- a/dynamic_programming/longest_sub_array.py +++ b/dynamic_programming/longest_sub_array.py @@ -14,7 +14,6 @@ class SubArray: def __init__(self, arr): # we need a list not a string, so do something to change the type self.array = arr.split(",") - print(("the input array is:", self.array)) def solve_sub_array(self): rear = [int(self.array[0])] * len(self.array) diff --git a/dynamic_programming/max_non_adjacent_sum.py b/dynamic_programming/max_non_adjacent_sum.py index 5362b22ca..e3cc23f49 100644 --- a/dynamic_programming/max_non_adjacent_sum.py +++ b/dynamic_programming/max_non_adjacent_sum.py @@ -7,7 +7,7 @@ def maximum_non_adjacent_sum(nums: list[int]) -> int: """ Find the maximum non-adjacent sum of the integers in the nums input list - >>> print(maximum_non_adjacent_sum([1, 2, 3])) + >>> maximum_non_adjacent_sum([1, 2, 3]) 4 >>> maximum_non_adjacent_sum([1, 5, 3, 7, 2, 2, 6]) 18 diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index 4781b23b3..819fd8106 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -37,7 +37,8 @@ def print_combination(arr, n, r): combination_util(arr, n, r, 0, data, 0) -# Driver function to check for above function -arr = [10, 20, 30, 40, 50] -print_combination(arr, len(arr), 3) -# This code is contributed by Ambuj sahu +if __name__ == "__main__": + # Driver code to check the function above + arr = [10, 20, 30, 40, 50] + print_combination(arr, len(arr), 3) + # This code is contributed by Ambuj sahu diff --git a/dynamic_programming/sum_of_subset.py b/dynamic_programming/sum_of_subset.py index 77672b0b8..96ebcf583 100644 --- a/dynamic_programming/sum_of_subset.py +++ b/dynamic_programming/sum_of_subset.py @@ -1,13 +1,14 @@ -def is_sum_subset(arr, arr_len, required_sum): +def is_sum_subset(arr: list[int], required_sum: int) -> bool: """ - >>> is_sum_subset([2, 4, 6, 8], 4, 5) + >>> is_sum_subset([2, 4, 6, 8], 5) False - >>> is_sum_subset([2, 4, 6, 8], 4, 14) + >>> is_sum_subset([2, 4, 6, 8], 14) True """ # a subset value says 1 if that subset sum can be formed else 0 # initially no subsets can be formed hence False/0 - subset = [[False for i in range(required_sum + 1)] for i in range(arr_len + 1)] + arr_len = len(arr) + subset = [[False] * (required_sum + 1) for _ in range(arr_len + 1)] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 @@ -25,10 +26,7 @@ def is_sum_subset(arr, arr_len, required_sum): if arr[i - 1] <= j: subset[i][j] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] - # uncomment to print the subset - # for i in range(arrLen+1): - # print(subset[i]) - print(subset[arr_len][required_sum]) + return subset[arr_len][required_sum] if __name__ == "__main__": diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index b11a23012..0909b76d8 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -1,7 +1,7 @@ """ this is code for forecasting but i modified it and used it for safety checker of data -for ex: you have a online shop and for some reason some data are +for ex: you have an online shop and for some reason some data are missing (the amount of data that u expected are not supposed to be) then we can use it *ps : 1. ofc we can use normal statistic method but in this case @@ -91,14 +91,14 @@ def interquartile_range_checker(train_user: list) -> float: return low_lim -def data_safety_checker(list_vote: list, actual_result: float) -> None: +def data_safety_checker(list_vote: list, actual_result: float) -> bool: """ Used to review all the votes (list result prediction) and compare it to the actual result. input : list of predictions output : print whether it's safe or not - >>> data_safety_checker([2,3,4],5.0) - Today's data is not safe. + >>> data_safety_checker([2, 3, 4], 5.0) + False """ safe = 0 not_safe = 0 @@ -107,50 +107,54 @@ def data_safety_checker(list_vote: list, actual_result: float) -> None: safe = not_safe + 1 else: if abs(abs(i) - abs(actual_result)) <= 0.1: - safe = safe + 1 + safe += 1 else: - not_safe = not_safe + 1 - print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.") + not_safe += 1 + return safe > not_safe -# data_input_df = pd.read_csv("ex_data.csv", header=None) -data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] -data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"]) +if __name__ == "__main__": + # data_input_df = pd.read_csv("ex_data.csv", header=None) + data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] + data_input_df = pd.DataFrame( + data_input, columns=["total_user", "total_even", "days"] + ) -""" -data column = total user in a day, how much online event held in one day, -what day is that(sunday-saturday) -""" + """ + data column = total user in a day, how much online event held in one day, + what day is that(sunday-saturday) + """ -# start normalization -normalize_df = Normalizer().fit_transform(data_input_df.values) -# split data -total_date = normalize_df[:, 2].tolist() -total_user = normalize_df[:, 0].tolist() -total_match = normalize_df[:, 1].tolist() + # start normalization + normalize_df = Normalizer().fit_transform(data_input_df.values) + # split data + total_date = normalize_df[:, 2].tolist() + total_user = normalize_df[:, 0].tolist() + total_match = normalize_df[:, 1].tolist() -# for svr (input variable = total date and total match) -x = normalize_df[:, [1, 2]].tolist() -x_train = x[: len(x) - 1] -x_test = x[len(x) - 1 :] + # for svr (input variable = total date and total match) + x = normalize_df[:, [1, 2]].tolist() + x_train = x[: len(x) - 1] + x_test = x[len(x) - 1 :] -# for linear reression & sarimax -trn_date = total_date[: len(total_date) - 1] -trn_user = total_user[: len(total_user) - 1] -trn_match = total_match[: len(total_match) - 1] + # for linear regression & sarimax + trn_date = total_date[: len(total_date) - 1] + trn_user = total_user[: len(total_user) - 1] + trn_match = total_match[: len(total_match) - 1] -tst_date = total_date[len(total_date) - 1 :] -tst_user = total_user[len(total_user) - 1 :] -tst_match = total_match[len(total_match) - 1 :] + tst_date = total_date[len(total_date) - 1 :] + tst_user = total_user[len(total_user) - 1 :] + tst_match = total_match[len(total_match) - 1 :] + # voting system with forecasting + res_vote = [ + linear_regression_prediction( + trn_date, trn_user, trn_match, tst_date, tst_match + ), + sarimax_predictor(trn_user, trn_match, tst_match), + support_vector_regressor(x_train, x_test, trn_user), + ] -# voting system with forecasting -res_vote = [] -res_vote.append( - linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match) -) -res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match)) -res_vote.append(support_vector_regressor(x_train, x_test, trn_user)) - -# check the safety of todays'data^^ -data_safety_checker(res_vote, tst_user) + # check the safety of today's data + not_str = "" if data_safety_checker(res_vote, tst_user) else "not " + print("Today's data is {not_str}safe.") From a5dd07c3707a0d3ebde0321ce7984082b3d322ff Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Sun, 23 Oct 2022 05:17:07 +1300 Subject: [PATCH 072/368] Maclaurin approximation of cos (#7507) * renamed maclaurin_sin.py to maclaurin_series.py and included function for cos approximation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * attempt to fix pytest error Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/maclaurin_series.py | 121 ++++++++++++++++++++++++++++++++++++++ maths/maclaurin_sin.py | 64 -------------------- 2 files changed, 121 insertions(+), 64 deletions(-) create mode 100644 maths/maclaurin_series.py delete mode 100644 maths/maclaurin_sin.py diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py new file mode 100644 index 000000000..57edc90bf --- /dev/null +++ b/maths/maclaurin_series.py @@ -0,0 +1,121 @@ +""" +https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions +""" +from math import factorial, pi + + +def maclaurin_sin(theta: float, accuracy: int = 30) -> float: + """ + Finds the maclaurin approximation of sin + + :param theta: the angle to which sin is found + :param accuracy: the degree of accuracy wanted minimum + :return: the value of sine in radians + + + >>> from math import isclose, sin + >>> all(isclose(maclaurin_sin(x, 50), sin(x)) for x in range(-25, 25)) + True + >>> maclaurin_sin(10) + -0.544021110889369 + >>> maclaurin_sin(-10) + 0.5440211108893703 + >>> maclaurin_sin(10, 15) + -0.5440211108893689 + >>> maclaurin_sin(-10, 15) + 0.5440211108893703 + >>> maclaurin_sin("10") + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires either an int or float for theta + >>> maclaurin_sin(10, -30) + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + >>> maclaurin_sin(10, 30.5) + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + >>> maclaurin_sin(10, "30") + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + """ + + if not isinstance(theta, (int, float)): + raise ValueError("maclaurin_sin() requires either an int or float for theta") + + if not isinstance(accuracy, int) or accuracy <= 0: + raise ValueError("maclaurin_sin() requires a positive int for accuracy") + + theta = float(theta) + div = theta // (2 * pi) + theta -= 2 * div * pi + return sum( + (((-1) ** r) * ((theta ** (2 * r + 1)) / factorial(2 * r + 1))) + for r in range(accuracy) + ) + + +def maclaurin_cos(theta: float, accuracy: int = 30) -> float: + """ + Finds the maclaurin approximation of cos + + :param theta: the angle to which cos is found + :param accuracy: the degree of accuracy wanted + :return: the value of cosine in radians + + + >>> from math import isclose, cos + >>> all(isclose(maclaurin_cos(x, 50), cos(x)) for x in range(-25, 25)) + True + >>> maclaurin_cos(5) + 0.28366218546322675 + >>> maclaurin_cos(-5) + 0.2836621854632266 + >>> maclaurin_cos(10, 15) + -0.8390715290764525 + >>> maclaurin_cos(-10, 15) + -0.8390715290764521 + >>> maclaurin_cos("10") + Traceback (most recent call last): + ... + ValueError: maclaurin_cos() requires either an int or float for theta + >>> maclaurin_cos(10, -30) + Traceback (most recent call last): + ... + ValueError: maclaurin_cos() requires a positive int for accuracy + >>> maclaurin_cos(10, 30.5) + Traceback (most recent call last): + ... + ValueError: maclaurin_cos() requires a positive int for accuracy + >>> maclaurin_cos(10, "30") + Traceback (most recent call last): + ... + ValueError: maclaurin_cos() requires a positive int for accuracy + """ + + if not isinstance(theta, (int, float)): + raise ValueError("maclaurin_cos() requires either an int or float for theta") + + if not isinstance(accuracy, int) or accuracy <= 0: + raise ValueError("maclaurin_cos() requires a positive int for accuracy") + + theta = float(theta) + div = theta // (2 * pi) + theta -= 2 * div * pi + return sum( + (((-1) ** r) * ((theta ** (2 * r)) / factorial(2 * r))) for r in range(accuracy) + ) + + +if __name__ == "__main__": + print(maclaurin_sin(10)) + print(maclaurin_sin(-10)) + print(maclaurin_sin(10, 15)) + print(maclaurin_sin(-10, 15)) + + print(maclaurin_cos(5)) + print(maclaurin_cos(-5)) + print(maclaurin_cos(10, 15)) + print(maclaurin_cos(-10, 15)) diff --git a/maths/maclaurin_sin.py b/maths/maclaurin_sin.py deleted file mode 100644 index 3c27ccf63..000000000 --- a/maths/maclaurin_sin.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions -""" -from math import factorial, pi - - -def maclaurin_sin(theta: float, accuracy: int = 30) -> float: - """ - Finds the maclaurin approximation of sin - - :param theta: the angle to which sin is found - :param accuracy: the degree of accuracy wanted minimum ~ 1.5 theta - :return: the value of sine in radians - - - >>> from math import isclose, sin - >>> all(isclose(maclaurin_sin(x, 50), sin(x)) for x in range(-25, 25)) - True - >>> maclaurin_sin(10) - -0.544021110889369 - >>> maclaurin_sin(-10) - 0.5440211108893703 - >>> maclaurin_sin(10, 15) - -0.5440211108893689 - >>> maclaurin_sin(-10, 15) - 0.5440211108893703 - >>> maclaurin_sin("10") - Traceback (most recent call last): - ... - ValueError: maclaurin_sin() requires either an int or float for theta - >>> maclaurin_sin(10, -30) - Traceback (most recent call last): - ... - ValueError: maclaurin_sin() requires a positive int for accuracy - >>> maclaurin_sin(10, 30.5) - Traceback (most recent call last): - ... - ValueError: maclaurin_sin() requires a positive int for accuracy - >>> maclaurin_sin(10, "30") - Traceback (most recent call last): - ... - ValueError: maclaurin_sin() requires a positive int for accuracy - """ - - if not isinstance(theta, (int, float)): - raise ValueError("maclaurin_sin() requires either an int or float for theta") - - if not isinstance(accuracy, int) or accuracy <= 0: - raise ValueError("maclaurin_sin() requires a positive int for accuracy") - - theta = float(theta) - div = theta // (2 * pi) - theta -= 2 * div * pi - return sum( - (((-1) ** r) * ((theta ** (2 * r + 1)) / factorial(2 * r + 1))) - for r in range(accuracy) - ) - - -if __name__ == "__main__": - print(maclaurin_sin(10)) - print(maclaurin_sin(-10)) - print(maclaurin_sin(10, 15)) - print(maclaurin_sin(-10, 15)) From ed127032b303d06f2c1ceefd58a8680bb4c2ce50 Mon Sep 17 00:00:00 2001 From: Akshit Gulyan <103456810+AkshitGulyan@users.noreply.github.com> Date: Sun, 23 Oct 2022 09:59:10 +0530 Subject: [PATCH 073/368] Created sum_of_harmonic_series.py (#7504) * Created sum_of_harmonic_series.py Here in this code the formula for Harmonic sum is not used, Sum of the series is calculated by creating a list of the elements in the given Harmonic series and adding all the elements of that list ! * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/sum_of_harmonic_series.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update maths/sum_of_harmonic_series.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update maths/sum_of_harmonic_series.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/sum_of_harmonic_series.py Co-authored-by: Christian Clauss * Update maths/sum_of_harmonic_series.py Co-authored-by: Christian Clauss * Update maths/sum_of_harmonic_series.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sum_of_harmonic_series.py * Add doctests * Update sum_of_harmonic_series.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/sum_of_harmonic_series.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 maths/sum_of_harmonic_series.py diff --git a/maths/sum_of_harmonic_series.py b/maths/sum_of_harmonic_series.py new file mode 100644 index 000000000..9e0d6b19b --- /dev/null +++ b/maths/sum_of_harmonic_series.py @@ -0,0 +1,29 @@ +def sum_of_harmonic_progression( + first_term: float, common_difference: float, number_of_terms: int +) -> float: + """ + https://en.wikipedia.org/wiki/Harmonic_progression_(mathematics) + + Find the sum of n terms in an harmonic progression. The calculation starts with the + first_term and loops adding the common difference of Arithmetic Progression by which + the given Harmonic Progression is linked. + + >>> sum_of_harmonic_progression(1 / 2, 2, 2) + 0.75 + >>> sum_of_harmonic_progression(1 / 5, 5, 5) + 0.45666666666666667 + """ + arithmetic_progression = [1 / first_term] + first_term = 1 / first_term + for _ in range(number_of_terms - 1): + first_term += common_difference + arithmetic_progression.append(first_term) + harmonic_series = [1 / step for step in arithmetic_progression] + return sum(harmonic_series) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(sum_of_harmonic_progression(1 / 2, 2, 2)) From f32f78a9e0a4c2c1e2e9c985fd2375e7ede8925c Mon Sep 17 00:00:00 2001 From: Abhishek Chakraborty Date: Sun, 23 Oct 2022 03:42:02 -0700 Subject: [PATCH 074/368] Basic string grammar fix (#7534) * Grammar edit * Flake8 consistency fix * Apply suggestions from code review Co-authored-by: Christian Clauss --- genetic_algorithm/basic_string.py | 54 +++++++++++++++---------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index d2d305189..3227adf53 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -9,15 +9,15 @@ from __future__ import annotations import random -# Maximum size of the population. bigger could be faster but is more memory expensive +# Maximum size of the population. Bigger could be faster but is more memory expensive. N_POPULATION = 200 -# Number of elements selected in every generation for evolution the selection takes -# place from the best to the worst of that generation must be smaller than N_POPULATION +# Number of elements selected in every generation of evolution. The selection takes +# place from best to worst of that generation and must be smaller than N_POPULATION. N_SELECTED = 50 -# Probability that an element of a generation can mutate changing one of its genes this -# guarantees that all genes will be used during evolution +# Probability that an element of a generation can mutate, changing one of its genes. +# This will guarantee that all genes will be used during evolution. MUTATION_PROBABILITY = 0.4 -# just a seed to improve randomness required by the algorithm +# Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) @@ -56,20 +56,20 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, f"{not_in_genes_list} is not in genes list, evolution cannot converge" ) - # Generate random starting population + # Generate random starting population. population = [] for _ in range(N_POPULATION): population.append("".join([random.choice(genes) for i in range(len(target))])) - # Just some logs to know what the algorithms is doing + # Just some logs to know what the algorithms is doing. generation, total_population = 0, 0 - # This loop will end when we will find a perfect match for our target + # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(population) - # Random population created now it's time to evaluate + # Random population created. Now it's time to evaluate. def evaluate(item: str, main_target: str = target) -> tuple[str, float]: """ Evaluate how similar the item is with the target by just @@ -92,17 +92,17 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # - # but with a simple algorithm like this will probably be slower - # we just need to call evaluate for every item inside population + # but with a simple algorithm like this, it will probably be slower. + # We just need to call evaluate for every item inside the population. population_score = [evaluate(item) for item in population] - # Check if there is a matching evolution + # Check if there is a matching evolution. population_score = sorted(population_score, key=lambda x: x[1], reverse=True) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) - # Print the Best result every 10 generation - # just to know that the algorithm is working + # Print the best result every 10 generation. + # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"\nGeneration: {generation}" @@ -111,21 +111,21 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, f"\nBest string: {population_score[0][0]}" ) - # Flush the old population keeping some of the best evolutions - # Keeping this avoid regression of evolution + # Flush the old population, keeping some of the best evolutions. + # Keeping this avoid regression of evolution. population_best = population[: int(N_POPULATION / 3)] population.clear() population.extend(population_best) - # Normalize population score from 0 to 1 + # Normalize population score to be between 0 and 1. population_score = [ (item, score / len(target)) for item, score in population_score ] - # Select, Crossover and Mutate a new population + # Select, crossover and mutate a new population. def select(parent_1: tuple[str, float]) -> list[str]: """Select the second parent and generate new population""" pop = [] - # Generate more child proportionally to the fitness score + # Generate more children proportionally to the fitness score. child_n = int(parent_1[1] * 100) + 1 child_n = 10 if child_n >= 10 else child_n for _ in range(child_n): @@ -134,32 +134,32 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, ][0] child_1, child_2 = crossover(parent_1[0], parent_2) - # Append new string to the population list + # Append new string to the population list. pop.append(mutate(child_1)) pop.append(mutate(child_2)) return pop def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: - """Slice and combine two string in a random point""" + """Slice and combine two string at a random point.""" random_slice = random.randint(0, len(parent_1) - 1) child_1 = parent_1[:random_slice] + parent_2[random_slice:] child_2 = parent_2[:random_slice] + parent_1[random_slice:] return (child_1, child_2) def mutate(child: str) -> str: - """Mutate a random gene of a child with another one from the list""" + """Mutate a random gene of a child with another one from the list.""" child_list = list(child) if random.uniform(0, 1) < MUTATION_PROBABILITY: child_list[random.randint(0, len(child)) - 1] = random.choice(genes) return "".join(child_list) - # This is Selection + # This is selection for i in range(N_SELECTED): population.extend(select(population_score[int(i)])) # Check if the population has already reached the maximum value and if so, - # break the cycle. if this check is disabled the algorithm will take - # forever to compute large strings but will also calculate small string in - # a lot fewer generations + # break the cycle. If this check is disabled, the algorithm will take + # forever to compute large strings, but will also calculate small strings in + # a far fewer generations. if len(population) > N_POPULATION: break From a0cbc2056e9b9ff4f8c5da682061996e783b13e3 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 23 Oct 2022 12:01:51 +0100 Subject: [PATCH 075/368] refactor: Make code more simple in maclaurin_series (#7522) --- maths/maclaurin_series.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py index 57edc90bf..a2619d4e6 100644 --- a/maths/maclaurin_series.py +++ b/maths/maclaurin_series.py @@ -52,8 +52,7 @@ def maclaurin_sin(theta: float, accuracy: int = 30) -> float: div = theta // (2 * pi) theta -= 2 * div * pi return sum( - (((-1) ** r) * ((theta ** (2 * r + 1)) / factorial(2 * r + 1))) - for r in range(accuracy) + (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(accuracy) ) @@ -104,12 +103,14 @@ def maclaurin_cos(theta: float, accuracy: int = 30) -> float: theta = float(theta) div = theta // (2 * pi) theta -= 2 * div * pi - return sum( - (((-1) ** r) * ((theta ** (2 * r)) / factorial(2 * r))) for r in range(accuracy) - ) + return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(accuracy)) if __name__ == "__main__": + import doctest + + doctest.testmod() + print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) From 1bbb0092f3fc311fac9e56e12c1fa223dbe16465 Mon Sep 17 00:00:00 2001 From: Arjit Arora <42044030+arjitarora26@users.noreply.github.com> Date: Sun, 23 Oct 2022 16:47:30 +0530 Subject: [PATCH 076/368] Add signum function (#7526) * Add signum function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add typehints for functions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update signum.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/signum.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 maths/signum.py diff --git a/maths/signum.py b/maths/signum.py new file mode 100644 index 000000000..148f93176 --- /dev/null +++ b/maths/signum.py @@ -0,0 +1,34 @@ +""" +Signum function -- https://en.wikipedia.org/wiki/Sign_function +""" + + +def signum(num: float) -> int: + """ + Applies signum function on the number + + >>> signum(-10) + -1 + >>> signum(10) + 1 + >>> signum(0) + 0 + """ + if num < 0: + return -1 + return 1 if num else 0 + + +def test_signum() -> None: + """ + Tests the signum function + """ + assert signum(5) == 1 + assert signum(-5) == -1 + assert signum(0) == 0 + + +if __name__ == "__main__": + print(signum(12)) + print(signum(-12)) + print(signum(0)) From b092f9979f5afd3bd86cb46e891eb1f318b351d9 Mon Sep 17 00:00:00 2001 From: Modassir Afzal <60973906+Moddy2024@users.noreply.github.com> Date: Sun, 23 Oct 2022 17:17:19 +0530 Subject: [PATCH 077/368] XGB Regressor (#7107) * Fixes: #{6551} * Fixes: #{6551} * Update xgboostclassifier.py * Delete xgboostclassifier.py * Update xgboostregressor.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: #{6551} * Fixes : {#6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: {#6551] * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostregressor.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostregressor.py * Update xgboostregressor.py * Fixes: { #6551} * Update xgboostregressor.py * Fixes: { #6551} * Fixes: { #6551} * Update and rename xgboostregressor.py to xgboost_regressor.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- machine_learning/xgboost_regressor.py | 64 +++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 machine_learning/xgboost_regressor.py diff --git a/machine_learning/xgboost_regressor.py b/machine_learning/xgboost_regressor.py new file mode 100644 index 000000000..023984fc1 --- /dev/null +++ b/machine_learning/xgboost_regressor.py @@ -0,0 +1,64 @@ +# XGBoost Regressor Example +import numpy as np +from sklearn.datasets import fetch_california_housing +from sklearn.metrics import mean_absolute_error, mean_squared_error +from sklearn.model_selection import train_test_split +from xgboost import XGBRegressor + + +def data_handling(data: dict) -> tuple: + # Split dataset into features and target. Data is features. + """ + >>> data_handling(( + ... {'data':'[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]' + ... ,'target':([4.526])})) + ('[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]', [4.526]) + """ + return (data["data"], data["target"]) + + +def xgboost( + features: np.ndarray, target: np.ndarray, test_features: np.ndarray +) -> np.ndarray: + """ + >>> xgboost(np.array([[ 2.3571 , 52. , 6.00813008, 1.06775068, + ... 907. , 2.45799458, 40.58 , -124.26]]),np.array([1.114]), + ... np.array([[1.97840000e+00, 3.70000000e+01, 4.98858447e+00, 1.03881279e+00, + ... 1.14300000e+03, 2.60958904e+00, 3.67800000e+01, -1.19780000e+02]])) + array([[1.1139996]], dtype=float32) + """ + xgb = XGBRegressor(verbosity=0, random_state=42) + xgb.fit(features, target) + # Predict target for test data + predictions = xgb.predict(test_features) + predictions = predictions.reshape(len(predictions), 1) + return predictions + + +def main() -> None: + """ + >>> main() + Mean Absolute Error : 0.30957163379906033 + Mean Square Error : 0.22611560196662744 + + The URL for this algorithm + https://xgboost.readthedocs.io/en/stable/ + California house price dataset is used to demonstrate the algorithm. + """ + # Load California house price dataset + california = fetch_california_housing() + data, target = data_handling(california) + x_train, x_test, y_train, y_test = train_test_split( + data, target, test_size=0.25, random_state=1 + ) + predictions = xgboost(x_train, y_train, x_test) + # Error printing + print(f"Mean Absolute Error : {mean_absolute_error(y_test, predictions)}") + print(f"Mean Square Error : {mean_squared_error(y_test, predictions)}") + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) + main() From a3383ce3fd6bc30b01681503a4307df2462c8bd4 Mon Sep 17 00:00:00 2001 From: Pradyumn Singh Rahar Date: Sun, 23 Oct 2022 17:56:40 +0530 Subject: [PATCH 078/368] Reduced Time Complexity to O(sqrt(n)) (#7429) * Reduced Time Complexity to O(sqrt(n)) * Added testmod * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/factors.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/maths/factors.py b/maths/factors.py index e2fdc4063..ae2e5316c 100644 --- a/maths/factors.py +++ b/maths/factors.py @@ -1,3 +1,7 @@ +from doctest import testmod +from math import sqrt + + def factors_of_a_number(num: int) -> list: """ >>> factors_of_a_number(1) @@ -9,10 +13,22 @@ def factors_of_a_number(num: int) -> list: >>> factors_of_a_number(-24) [] """ - return [i for i in range(1, num + 1) if num % i == 0] + facs: list[int] = [] + if num < 1: + return facs + facs.append(1) + if num == 1: + return facs + facs.append(num) + for i in range(2, int(sqrt(num)) + 1): + if num % i == 0: # If i is a factor of num + facs.append(i) + d = num // i # num//i is the other factor of num + if d != i: # If d and i are distinct + facs.append(d) # we have found another factor + facs.sort() + return facs if __name__ == "__main__": - num = int(input("Enter a number to find its factors: ")) - factors = factors_of_a_number(num) - print(f"{num} has {len(factors)} factors: {', '.join(str(f) for f in factors)}") + testmod(name="factors_of_a_number", verbose=True) From a5362799a5e73e199cda7f1acec71d1e97addc97 Mon Sep 17 00:00:00 2001 From: Kevin Joven <59969678+KevinJoven11@users.noreply.github.com> Date: Sun, 23 Oct 2022 08:54:27 -0400 Subject: [PATCH 079/368] Create superdense_coding.py (#7349) * Create superdense_coding.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- quantum/superdense_coding.py | 102 +++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 quantum/superdense_coding.py diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py new file mode 100644 index 000000000..c8eda3811 --- /dev/null +++ b/quantum/superdense_coding.py @@ -0,0 +1,102 @@ +""" +Build the superdense coding protocol. This quantum +circuit can send two classical bits using one quantum +bit. This circuit is designed using the Qiskit +framework. This experiment run in IBM Q simulator +with 1000 shots. +. +References: +https://qiskit.org/textbook/ch-algorithms/superdense-coding.html +https://en.wikipedia.org/wiki/Superdense_coding +""" + +import math + +import qiskit +from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute + + +def superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Counts: + """ + The input refer to the classical message + that you wants to send. {'00','01','10','11'} + result for default values: {11: 1000} + ┌───┐ ┌───┐ + qr_0: ─────┤ X ├──────────┤ X ├───── + ┌───┐└─┬─┘┌───┐┌───┐└─┬─┘┌───┐ + qr_1: ┤ H ├──■──┤ X ├┤ Z ├──■──┤ H ├ + └───┘ └───┘└───┘ └───┘ + cr: 2/══════════════════════════════ + Args: + bit_1: bit 1 of classical information to send. + bit_2: bit 2 of classical information to send. + Returns: + qiskit.result.counts.Counts: counts of send state. + >>> superdense_coding(0,0) + {'00': 1000} + >>> superdense_coding(0,1) + {'01': 1000} + >>> superdense_coding(-1,0) + Traceback (most recent call last): + ... + ValueError: inputs must be positive. + >>> superdense_coding(1,'j') + Traceback (most recent call last): + ... + TypeError: inputs must be integers. + >>> superdense_coding(1,0.5) + Traceback (most recent call last): + ... + ValueError: inputs must be exact integers. + >>> superdense_coding(2,1) + Traceback (most recent call last): + ... + ValueError: inputs must be less or equal to 1. + """ + if (type(bit_1) == str) or (type(bit_2) == str): + raise TypeError("inputs must be integers.") + if (bit_1 < 0) or (bit_2 < 0): + raise ValueError("inputs must be positive.") + if (math.floor(bit_1) != bit_1) or (math.floor(bit_2) != bit_2): + raise ValueError("inputs must be exact integers.") + if (bit_1 > 1) or (bit_2 > 1): + raise ValueError("inputs must be less or equal to 1.") + + # build registers + qr = QuantumRegister(2, "qr") + cr = ClassicalRegister(2, "cr") + + quantum_circuit = QuantumCircuit(qr, cr) + + # entanglement the qubits + quantum_circuit.h(1) + quantum_circuit.cx(1, 0) + + # send the information + c_information = str(bit_1) + str(bit_2) + + if c_information == "11": + quantum_circuit.x(1) + quantum_circuit.z(1) + elif c_information == "10": + quantum_circuit.z(1) + elif c_information == "01": + quantum_circuit.x(1) + else: + quantum_circuit.i(1) + + # unentangled the circuit + quantum_circuit.cx(1, 0) + quantum_circuit.h(1) + + # measure the circuit + quantum_circuit.measure(qr, cr) + + backend = Aer.get_backend("qasm_simulator") + job = execute(quantum_circuit, backend, shots=1000) + + return job.result().get_counts(quantum_circuit) + + +if __name__ == "__main__": + print(f"Counts for classical state send: {superdense_coding(1,1)}") From d5f322f5764f42fc846fbcdaefac238a9ab62c7f Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 23 Oct 2022 14:06:12 +0100 Subject: [PATCH 080/368] fix: Replace deprecated `qasm_simulator` with `aer_simulator` (#7308) (#7556) --- quantum/superdense_coding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py index c8eda3811..10ebc2d35 100644 --- a/quantum/superdense_coding.py +++ b/quantum/superdense_coding.py @@ -92,7 +92,7 @@ def superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Co # measure the circuit quantum_circuit.measure(qr, cr) - backend = Aer.get_backend("qasm_simulator") + backend = Aer.get_backend("aer_simulator") job = execute(quantum_circuit, backend, shots=1000) return job.result().get_counts(quantum_circuit) From 81ccf54c75edbf52cd2b5bd4e139cba3b6e5e5ab Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 23 Oct 2022 15:09:25 +0200 Subject: [PATCH 081/368] Rename xgboostclassifier.py to xgboost_classifier.py (#7550) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 7 +++++-- .../{xgboostclassifier.py => xgboost_classifier.py} | 0 2 files changed, 5 insertions(+), 2 deletions(-) rename machine_learning/{xgboostclassifier.py => xgboost_classifier.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 70644d063..3fd1a3c38 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -461,7 +461,8 @@ * [Similarity Search](machine_learning/similarity_search.py) * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) - * [Xgboostclassifier](machine_learning/xgboostclassifier.py) + * [Xgboost Classifier](machine_learning/xgboost_classifier.py) + * [Xgboost Regressor](machine_learning/xgboost_regressor.py) ## Maths * [3N Plus 1](maths/3n_plus_1.py) @@ -536,7 +537,7 @@ * [Line Length](maths/line_length.py) * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) * [Lucas Series](maths/lucas_series.py) - * [Maclaurin Sin](maths/maclaurin_sin.py) + * [Maclaurin Series](maths/maclaurin_series.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) @@ -582,6 +583,7 @@ * [P Series](maths/series/p_series.py) * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) * [Sigmoid](maths/sigmoid.py) + * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) @@ -590,6 +592,7 @@ * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) * [Sum Of Geometric Progression](maths/sum_of_geometric_progression.py) + * [Sum Of Harmonic Series](maths/sum_of_harmonic_series.py) * [Sylvester Sequence](maths/sylvester_sequence.py) * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) diff --git a/machine_learning/xgboostclassifier.py b/machine_learning/xgboost_classifier.py similarity index 100% rename from machine_learning/xgboostclassifier.py rename to machine_learning/xgboost_classifier.py From 0f06a0b5ff43c4cfa98db33926d21ce688b69a10 Mon Sep 17 00:00:00 2001 From: Sagar Giri Date: Sun, 23 Oct 2022 23:35:27 +0900 Subject: [PATCH 082/368] Add web program to fetch top 10 real time billionaires using the forbes API. (#7538) * Add web program to fetch top 10 realtime billioners using forbes API. * Provide return type to function. * Use rich for tables and minor refactors. * Fix tiny typo. * Add the top {LIMIT} in rich table title. * Update web_programming/get_top_billioners.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change the API path. * Update get_top_billioners.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- requirements.txt | 1 + web_programming/get_top_billioners.py | 84 +++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 web_programming/get_top_billioners.py diff --git a/requirements.txt b/requirements.txt index 25d2b4ef9..9ffe784c9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ pillow projectq qiskit requests +rich scikit-fuzzy sklearn statsmodels diff --git a/web_programming/get_top_billioners.py b/web_programming/get_top_billioners.py new file mode 100644 index 000000000..514ea1db9 --- /dev/null +++ b/web_programming/get_top_billioners.py @@ -0,0 +1,84 @@ +""" +CAUTION: You may get a json.decoding error. This works for some of us but fails for others. +""" + +from datetime import datetime + +import requests +from rich import box +from rich import console as rich_console +from rich import table as rich_table + +LIMIT = 10 +TODAY = datetime.now() + +API_URL = ( + "https://www.forbes.com/forbesapi/person/rtb/0/position/true.json" + "?fields=personName,gender,source,countryOfCitizenship,birthDate,finalWorth" + f"&limit={LIMIT}" +) + + +def calculate_age(unix_date: int) -> str: + """Calculates age from given unix time format. + + Returns: + Age as string + + >>> calculate_age(-657244800000) + '73' + >>> calculate_age(46915200000) + '51' + """ + birthdate = datetime.fromtimestamp(unix_date / 1000).date() + return str( + TODAY.year + - birthdate.year + - ((TODAY.month, TODAY.day) < (birthdate.month, birthdate.day)) + ) + + +def get_forbes_real_time_billionaires() -> list[dict[str, str]]: + """Get top 10 realtime billionaires using forbes API. + + Returns: + List of top 10 realtime billionaires data. + """ + response_json = requests.get(API_URL).json() + return [ + { + "Name": person["personName"], + "Source": person["source"], + "Country": person["countryOfCitizenship"], + "Gender": person["gender"], + "Worth ($)": f"{person['finalWorth'] / 1000:.1f} Billion", + "Age": calculate_age(person["birthDate"]), + } + for person in response_json["personList"]["personsLists"] + ] + + +def display_billionaires(forbes_billionaires: list[dict[str, str]]) -> None: + """Display Forbes real time billionaires in a rich table. + + Args: + forbes_billionaires (list): Forbes top 10 real time billionaires + """ + + table = rich_table.Table( + title=f"Forbes Top {LIMIT} Real Time Billionaires at {TODAY:%Y-%m-%d %H:%M}", + style="green", + highlight=True, + box=box.SQUARE, + ) + for key in forbes_billionaires[0]: + table.add_column(key) + + for billionaire in forbes_billionaires: + table.add_row(*billionaire.values()) + + rich_console.Console().print(table) + + +if __name__ == "__main__": + display_billionaires(get_forbes_real_time_billionaires()) From 393b9605259fe19e03bdaac2b0866151e1a2afc2 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 23 Oct 2022 15:36:10 +0100 Subject: [PATCH 083/368] refactor: Replace doctest traceback with `...` (#7558) --- conversions/pressure_conversions.py | 6 +----- electronics/carrier_concentration.py | 8 ++++---- electronics/electric_power.py | 6 +++--- maths/nevilles_method.py | 3 +-- 4 files changed, 9 insertions(+), 14 deletions(-) diff --git a/conversions/pressure_conversions.py b/conversions/pressure_conversions.py index 2018080b9..e0cd18d23 100644 --- a/conversions/pressure_conversions.py +++ b/conversions/pressure_conversions.py @@ -56,11 +56,7 @@ def pressure_conversion(value: float, from_type: str, to_type: str) -> float: 0.019336718261000002 >>> pressure_conversion(4, "wrongUnit", "atm") Traceback (most recent call last): - File "/usr/lib/python3.8/doctest.py", line 1336, in __run - exec(compile(example.source, filename, "single", - File "", line 1, in - pressure_conversion(4, "wrongUnit", "atm") - File "", line 67, in pressure_conversion + ... ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: atm, pascal, bar, kilopascal, megapascal, psi, inHg, torr """ diff --git a/electronics/carrier_concentration.py b/electronics/carrier_concentration.py index 03482f1e3..1fb9f2430 100644 --- a/electronics/carrier_concentration.py +++ b/electronics/carrier_concentration.py @@ -25,19 +25,19 @@ def carrier_concentration( ('hole_conc', 1440.0) >>> carrier_concentration(electron_conc=1000, hole_conc=400, intrinsic_conc=1200) Traceback (most recent call last): - File "", line 37, in + ... ValueError: You cannot supply more or less than 2 values >>> carrier_concentration(electron_conc=-1000, hole_conc=0, intrinsic_conc=1200) Traceback (most recent call last): - File "", line 40, in + ... ValueError: Electron concentration cannot be negative in a semiconductor >>> carrier_concentration(electron_conc=0, hole_conc=-400, intrinsic_conc=1200) Traceback (most recent call last): - File "", line 44, in + ... ValueError: Hole concentration cannot be negative in a semiconductor >>> carrier_concentration(electron_conc=0, hole_conc=400, intrinsic_conc=-1200) Traceback (most recent call last): - File "", line 48, in + ... ValueError: Intrinsic concentration cannot be negative in a semiconductor """ if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1: diff --git a/electronics/electric_power.py b/electronics/electric_power.py index ac673d7e3..e59795601 100644 --- a/electronics/electric_power.py +++ b/electronics/electric_power.py @@ -17,15 +17,15 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: result(name='power', value=6.0) >>> electric_power(voltage=2, current=4, power=2) Traceback (most recent call last): - File "", line 15, in + ... ValueError: Only one argument must be 0 >>> electric_power(voltage=0, current=0, power=2) Traceback (most recent call last): - File "", line 19, in + ... ValueError: Only one argument must be 0 >>> electric_power(voltage=0, current=2, power=-4) Traceback (most recent call last): - File "", line 23, in >> electric_power(voltage=2.2, current=2.2, power=0) result(name='power', value=4.84) diff --git a/maths/nevilles_method.py b/maths/nevilles_method.py index 5583e4269..1f48b43fb 100644 --- a/maths/nevilles_method.py +++ b/maths/nevilles_method.py @@ -31,8 +31,7 @@ def neville_interpolate(x_points: list, y_points: list, x0: int) -> list: 104.0 >>> neville_interpolate((1,2,3,4,6), (6,7,8,9,11), '') Traceback (most recent call last): - File "", line 1, in - ... + ... TypeError: unsupported operand type(s) for -: 'str' and 'int' """ n = len(x_points) From 10b6e7a658c4664ce823cc1d0f159cd717b506db Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 23 Oct 2022 16:14:45 +0100 Subject: [PATCH 084/368] fix: Fix line too long in doctest (#7566) --- web_programming/get_top_billioners.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web_programming/get_top_billioners.py b/web_programming/get_top_billioners.py index 514ea1db9..6a8054e26 100644 --- a/web_programming/get_top_billioners.py +++ b/web_programming/get_top_billioners.py @@ -1,5 +1,6 @@ """ -CAUTION: You may get a json.decoding error. This works for some of us but fails for others. +CAUTION: You may get a json.decoding error. +This works for some of us but fails for others. """ from datetime import datetime From 0dc95c0a6be06f33153e8fcd84d2c854dac7a353 Mon Sep 17 00:00:00 2001 From: SwayamSahu <91021799+SwayamSahu@users.noreply.github.com> Date: Sun, 23 Oct 2022 21:30:59 +0530 Subject: [PATCH 085/368] Update comments in check_pangram.py script (#7564) * Update comments in check_pangram.py script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename check_pangram.py to is_pangram.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/check_pangram.py | 74 ------------------------------- strings/is_pangram.py | 95 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+), 74 deletions(-) delete mode 100644 strings/check_pangram.py create mode 100644 strings/is_pangram.py diff --git a/strings/check_pangram.py b/strings/check_pangram.py deleted file mode 100644 index 81384bfd4..000000000 --- a/strings/check_pangram.py +++ /dev/null @@ -1,74 +0,0 @@ -""" -wiki: https://en.wikipedia.org/wiki/Pangram -""" - - -def check_pangram( - input_str: str = "The quick brown fox jumps over the lazy dog", -) -> bool: - """ - A Pangram String contains all the alphabets at least once. - >>> check_pangram("The quick brown fox jumps over the lazy dog") - True - >>> check_pangram("Waltz, bad nymph, for quick jigs vex.") - True - >>> check_pangram("Jived fox nymph grabs quick waltz.") - True - >>> check_pangram("My name is Unknown") - False - >>> check_pangram("The quick brown fox jumps over the la_y dog") - False - >>> check_pangram() - True - """ - frequency = set() - input_str = input_str.replace( - " ", "" - ) # Replacing all the Whitespaces in our sentence - for alpha in input_str: - if "a" <= alpha.lower() <= "z": - frequency.add(alpha.lower()) - - return True if len(frequency) == 26 else False - - -def check_pangram_faster( - input_str: str = "The quick brown fox jumps over the lazy dog", -) -> bool: - """ - >>> check_pangram_faster("The quick brown fox jumps over the lazy dog") - True - >>> check_pangram_faster("Waltz, bad nymph, for quick jigs vex.") - True - >>> check_pangram_faster("Jived fox nymph grabs quick waltz.") - True - >>> check_pangram_faster("The quick brown fox jumps over the la_y dog") - False - >>> check_pangram_faster() - True - """ - flag = [False] * 26 - for char in input_str: - if char.islower(): - flag[ord(char) - 97] = True - elif char.isupper(): - flag[ord(char) - 65] = True - return all(flag) - - -def benchmark() -> None: - """ - Benchmark code comparing different version. - """ - from timeit import timeit - - setup = "from __main__ import check_pangram, check_pangram_faster" - print(timeit("check_pangram()", setup=setup)) - print(timeit("check_pangram_faster()", setup=setup)) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - benchmark() diff --git a/strings/is_pangram.py b/strings/is_pangram.py new file mode 100644 index 000000000..c8b894b7e --- /dev/null +++ b/strings/is_pangram.py @@ -0,0 +1,95 @@ +""" +wiki: https://en.wikipedia.org/wiki/Pangram +""" + + +def is_pangram( + input_str: str = "The quick brown fox jumps over the lazy dog", +) -> bool: + """ + A Pangram String contains all the alphabets at least once. + >>> is_pangram("The quick brown fox jumps over the lazy dog") + True + >>> is_pangram("Waltz, bad nymph, for quick jigs vex.") + True + >>> is_pangram("Jived fox nymph grabs quick waltz.") + True + >>> is_pangram("My name is Unknown") + False + >>> is_pangram("The quick brown fox jumps over the la_y dog") + False + >>> is_pangram() + True + """ + # Declare frequency as a set to have unique occurrences of letters + frequency = set() + + # Replace all the whitespace in our sentence + input_str = input_str.replace(" ", "") + for alpha in input_str: + if "a" <= alpha.lower() <= "z": + frequency.add(alpha.lower()) + return len(frequency) == 26 + + +def is_pangram_faster( + input_str: str = "The quick brown fox jumps over the lazy dog", +) -> bool: + """ + >>> is_pangram_faster("The quick brown fox jumps over the lazy dog") + True + >>> is_pangram_faster("Waltz, bad nymph, for quick jigs vex.") + True + >>> is_pangram_faster("Jived fox nymph grabs quick waltz.") + True + >>> is_pangram_faster("The quick brown fox jumps over the la_y dog") + False + >>> is_pangram_faster() + True + """ + flag = [False] * 26 + for char in input_str: + if char.islower(): + flag[ord(char) - 97] = True + elif char.isupper(): + flag[ord(char) - 65] = True + return all(flag) + + +def is_pangram_fastest( + input_str: str = "The quick brown fox jumps over the lazy dog", +) -> bool: + """ + >>> is_pangram_fastest("The quick brown fox jumps over the lazy dog") + True + >>> is_pangram_fastest("Waltz, bad nymph, for quick jigs vex.") + True + >>> is_pangram_fastest("Jived fox nymph grabs quick waltz.") + True + >>> is_pangram_fastest("The quick brown fox jumps over the la_y dog") + False + >>> is_pangram_fastest() + True + """ + return len({char for char in input_str.lower() if char.isalpha()}) == 26 + + +def benchmark() -> None: + """ + Benchmark code comparing different version. + """ + from timeit import timeit + + setup = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" + print(timeit("is_pangram()", setup=setup)) + print(timeit("is_pangram_faster()", setup=setup)) + print(timeit("is_pangram_fastest()", setup=setup)) + # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 + # 5.036091582966037, 2.644472333951853, 1.8869528750656173 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + benchmark() From b8b63469efff57b8cb3c6e4aec4279c8e864b8db Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 23 Oct 2022 18:12:49 +0200 Subject: [PATCH 086/368] My favorite palindrome (#7455) * My favorite palindrome * updating DIRECTORY.md * Update is_palindrome.py * Update is_palindrome.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update strings/is_palindrome.py Co-authored-by: Caeden Perelli-Harris * Update is_palindrome.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- strings/is_palindrome.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py index 4776a5fc2..5758af0ce 100644 --- a/strings/is_palindrome.py +++ b/strings/is_palindrome.py @@ -1,9 +1,8 @@ def is_palindrome(s: str) -> bool: """ - Determine whether the string is palindrome - :param s: - :return: Boolean - >>> is_palindrome("a man a plan a canal panama".replace(" ", "")) + Determine if the string s is a palindrome. + + >>> is_palindrome("A man, A plan, A canal -- Panama!") True >>> is_palindrome("Hello") False @@ -14,15 +13,15 @@ def is_palindrome(s: str) -> bool: >>> is_palindrome("Mr. Owl ate my metal worm?") True """ - # Since Punctuation, capitalization, and spaces are usually ignored while checking - # Palindrome, we first remove them from our string. - s = "".join([character for character in s.lower() if character.isalnum()]) + # Since punctuation, capitalization, and spaces are often ignored while checking + # palindromes, we first remove them from our string. + s = "".join(character for character in s.lower() if character.isalnum()) return s == s[::-1] if __name__ == "__main__": - s = input("Enter string to determine whether its palindrome or not: ").strip() + s = input("Please enter a string to see if it is a palindrome: ") if is_palindrome(s): - print("Given string is palindrome") + print(f"'{s}' is a palindrome.") else: - print("Given string is not palindrome") + print(f"'{s}' is not a palindrome.") From 39a99b46f5e9b2c56951c22189a8ac3ea0730b01 Mon Sep 17 00:00:00 2001 From: Laukik Chahande <103280327+luciferx48@users.noreply.github.com> Date: Sun, 23 Oct 2022 22:56:22 +0530 Subject: [PATCH 087/368] check whether integer is even or odd using bit manupulation (#7099) * even_or_not file added * Updated DIRECTORY.md * modified DIRECTORY.md * Update bit_manipulation/even_or_not.py * updating DIRECTORY.md * Rename even_or_not.py to is_even.py * updating DIRECTORY.md Co-authored-by: luciferx48 Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + bit_manipulation/is_even.py | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 bit_manipulation/is_even.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 3fd1a3c38..10e78a92c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -45,6 +45,7 @@ * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) + * [Is Even](bit_manipulation/is_even.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) diff --git a/bit_manipulation/is_even.py b/bit_manipulation/is_even.py new file mode 100644 index 000000000..b7b0841a1 --- /dev/null +++ b/bit_manipulation/is_even.py @@ -0,0 +1,37 @@ +def is_even(number: int) -> bool: + """ + return true if the input integer is even + Explanation: Lets take a look at the following deicmal to binary conversions + 2 => 10 + 14 => 1110 + 100 => 1100100 + 3 => 11 + 13 => 1101 + 101 => 1100101 + from the above examples we can observe that + for all the odd integers there is always 1 set bit at the end + also, 1 in binary can be represented as 001, 00001, or 0000001 + so for any odd integer n => n&1 is always equlas 1 else the integer is even + + >>> is_even(1) + False + >>> is_even(4) + True + >>> is_even(9) + False + >>> is_even(15) + False + >>> is_even(40) + True + >>> is_even(100) + True + >>> is_even(101) + False + """ + return number & 1 == 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e2a83b3bc66630cb2667375fba9de5c5baac3aca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nadirhan=20=C5=9Eahin?= Date: Sun, 23 Oct 2022 22:28:11 +0300 Subject: [PATCH 088/368] Update knapsack.py (#7271) * Update knapsack.py * Update dynamic_programming/knapsack.py Co-authored-by: Christian Clauss * Update knapsack.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/knapsack.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 093e15f49..b12d30313 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -1,9 +1,9 @@ """ Given weights and values of n items, put these items in a knapsack of - capacity W to get the maximum total value in the knapsack. +capacity W to get the maximum total value in the knapsack. Note that only the integer weights 0-1 knapsack problem is solvable - using dynamic programming. +using dynamic programming. """ @@ -27,7 +27,7 @@ def mf_knapsack(i, wt, val, j): def knapsack(w, wt, val, n): - dp = [[0 for i in range(w + 1)] for j in range(n + 1)] + dp = [[0] * (w + 1) for _ in range(n + 1)] for i in range(1, n + 1): for w_ in range(1, w + 1): @@ -108,7 +108,7 @@ def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set): dp: list of list, the table of a solved integer weight dynamic programming problem wt: list or tuple, the vector of weights of the items - i: int, the index of the item under consideration + i: int, the index of the item under consideration j: int, the current possible maximum weight optimal_set: set, the optimal subset so far. This gets modified by the function. @@ -136,7 +136,7 @@ if __name__ == "__main__": wt = [4, 3, 2, 3] n = 4 w = 6 - f = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)] + f = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] optimal_solution, _ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w From bd490614a69cc9cdff367cb4a1775dd063c6e617 Mon Sep 17 00:00:00 2001 From: Arjit Arora <42044030+arjitarora26@users.noreply.github.com> Date: Mon, 24 Oct 2022 15:43:01 +0530 Subject: [PATCH 089/368] Add function for AND gate (#7593) --- boolean_algebra/and_gate.py | 48 +++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 boolean_algebra/and_gate.py diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py new file mode 100644 index 000000000..cbbcfde79 --- /dev/null +++ b/boolean_algebra/and_gate.py @@ -0,0 +1,48 @@ +""" +An AND Gate is a logic gate in boolean algebra which results to 1 (True) if both the +inputs are 1, and 0 (False) otherwise. + +Following is the truth table of an AND Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 0 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 1 | + ------------------------------ + +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def and_gate(input_1: int, input_2: int) -> int: + """ + Calculate AND of the input values + + >>> and_gate(0, 0) + 0 + >>> and_gate(0, 1) + 0 + >>> and_gate(1, 0) + 0 + >>> and_gate(1, 1) + 1 + """ + return int((input_1, input_2).count(0) == 0) + + +def test_and_gate() -> None: + """ + Tests the and_gate function + """ + assert and_gate(0, 0) == 0 + assert and_gate(0, 1) == 0 + assert and_gate(1, 0) == 0 + assert and_gate(1, 1) == 1 + + +if __name__ == "__main__": + print(and_gate(0, 0)) + print(and_gate(0, 1)) + print(and_gate(1, 1)) From bb078541dd030b4957ee1b5ac87b7a31bf1a7235 Mon Sep 17 00:00:00 2001 From: JatinR05 <71865805+JatinR05@users.noreply.github.com> Date: Mon, 24 Oct 2022 15:43:39 +0530 Subject: [PATCH 090/368] Update count_number_of_one_bits.py (#7589) * Update count_number_of_one_bits.py removed the modulo operator as it is very time consuming in comparison to the and operator * Update count_number_of_one_bits.py Updated with the timeit library to compare. Moreover I have updated my code which helps us in reaching the output comparatively faster. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bit_manipulation/count_number_of_one_bits.py Co-authored-by: Christian Clauss * Update count_number_of_one_bits.py Updated the code * Update count_number_of_one_bits.py Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Run the tests before running the benchmarks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * consistently Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- bit_manipulation/count_number_of_one_bits.py | 79 +++++++++++++++++--- 1 file changed, 68 insertions(+), 11 deletions(-) diff --git a/bit_manipulation/count_number_of_one_bits.py b/bit_manipulation/count_number_of_one_bits.py index 51fd2b630..a1687503a 100644 --- a/bit_manipulation/count_number_of_one_bits.py +++ b/bit_manipulation/count_number_of_one_bits.py @@ -1,34 +1,91 @@ -def get_set_bits_count(number: int) -> int: +from timeit import timeit + + +def get_set_bits_count_using_brian_kernighans_algorithm(number: int) -> int: """ Count the number of set bits in a 32 bit integer - >>> get_set_bits_count(25) + >>> get_set_bits_count_using_brian_kernighans_algorithm(25) 3 - >>> get_set_bits_count(37) + >>> get_set_bits_count_using_brian_kernighans_algorithm(37) 3 - >>> get_set_bits_count(21) + >>> get_set_bits_count_using_brian_kernighans_algorithm(21) 3 - >>> get_set_bits_count(58) + >>> get_set_bits_count_using_brian_kernighans_algorithm(58) 4 - >>> get_set_bits_count(0) + >>> get_set_bits_count_using_brian_kernighans_algorithm(0) 0 - >>> get_set_bits_count(256) + >>> get_set_bits_count_using_brian_kernighans_algorithm(256) 1 - >>> get_set_bits_count(-1) + >>> get_set_bits_count_using_brian_kernighans_algorithm(-1) Traceback (most recent call last): ... - ValueError: the value of input must be positive + ValueError: the value of input must not be negative """ if number < 0: - raise ValueError("the value of input must be positive") + raise ValueError("the value of input must not be negative") + result = 0 + while number: + number &= number - 1 + result += 1 + return result + + +def get_set_bits_count_using_modulo_operator(number: int) -> int: + """ + Count the number of set bits in a 32 bit integer + >>> get_set_bits_count_using_modulo_operator(25) + 3 + >>> get_set_bits_count_using_modulo_operator(37) + 3 + >>> get_set_bits_count_using_modulo_operator(21) + 3 + >>> get_set_bits_count_using_modulo_operator(58) + 4 + >>> get_set_bits_count_using_modulo_operator(0) + 0 + >>> get_set_bits_count_using_modulo_operator(256) + 1 + >>> get_set_bits_count_using_modulo_operator(-1) + Traceback (most recent call last): + ... + ValueError: the value of input must not be negative + """ + if number < 0: + raise ValueError("the value of input must not be negative") result = 0 while number: if number % 2 == 1: result += 1 - number = number >> 1 + number >>= 1 return result +def benchmark() -> None: + """ + Benchmark code for comparing 2 functions, with different length int values. + Brian Kernighan's algorithm is consistently faster than using modulo_operator. + """ + + def do_benchmark(number: int) -> None: + setup = "import __main__ as z" + print(f"Benchmark when {number = }:") + print(f"{get_set_bits_count_using_modulo_operator(number) = }") + timing = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=setup) + print(f"timeit() runs in {timing} seconds") + print(f"{get_set_bits_count_using_brian_kernighans_algorithm(number) = }") + timing = timeit( + "z.get_set_bits_count_using_brian_kernighans_algorithm(25)", + setup=setup, + ) + print(f"timeit() runs in {timing} seconds") + + for number in (25, 37, 58, 0): + do_benchmark(number) + print() + + if __name__ == "__main__": import doctest doctest.testmod() + benchmark() From d8ab8a0a0ebcb05783c93fe4ed04a940fc0b857f Mon Sep 17 00:00:00 2001 From: Carlos Villar Date: Mon, 24 Oct 2022 13:33:56 +0200 Subject: [PATCH 091/368] Add Spain National ID validator (#7574) (#7575) * Add Spain National ID validator (#7574) * is_spain_national_id() * Update is_spain_national_id.py * Some systems add a dash Co-authored-by: Christian Clauss --- strings/is_spain_national_id.py | 72 +++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 strings/is_spain_national_id.py diff --git a/strings/is_spain_national_id.py b/strings/is_spain_national_id.py new file mode 100644 index 000000000..67f49755f --- /dev/null +++ b/strings/is_spain_national_id.py @@ -0,0 +1,72 @@ +NUMBERS_PLUS_LETTER = "Input must be a string of 8 numbers plus letter" +LOOKUP_LETTERS = "TRWAGMYFPDXBNJZSQVHLCKE" + + +def is_spain_national_id(spanish_id: str) -> bool: + """ + Spain National Id is a string composed by 8 numbers plus a letter + The letter in fact is not part of the ID, it acts as a validator, + checking you didn't do a mistake when entering it on a system or + are giving a fake one. + + https://en.wikipedia.org/wiki/Documento_Nacional_de_Identidad_(Spain)#Number + + >>> is_spain_national_id("12345678Z") + True + >>> is_spain_national_id("12345678z") # It is case-insensitive + True + >>> is_spain_national_id("12345678x") + False + >>> is_spain_national_id("12345678I") + False + >>> is_spain_national_id("12345678-Z") # Some systems add a dash + True + >>> is_spain_national_id("12345678") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id("123456709") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id("1234567--Z") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id("1234Z") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id("1234ZzZZ") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id(12345678) + Traceback (most recent call last): + ... + TypeError: Expected string as input, found int + """ + + if not isinstance(spanish_id, str): + raise TypeError(f"Expected string as input, found {type(spanish_id).__name__}") + + spanish_id_clean = spanish_id.replace("-", "").upper() + if len(spanish_id_clean) != 9: + raise ValueError(NUMBERS_PLUS_LETTER) + + try: + number = int(spanish_id_clean[0:8]) + letter = spanish_id_clean[8] + except ValueError as ex: + raise ValueError(NUMBERS_PLUS_LETTER) from ex + + if letter.isdigit(): + raise ValueError(NUMBERS_PLUS_LETTER) + + return letter == LOOKUP_LETTERS[number % 23] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a041b64f7aaf7dd54f154ba1fb5cd10e3110c1eb Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 24 Oct 2022 16:29:49 +0300 Subject: [PATCH 092/368] feat: add Project Euler problem 073 solution 1 (#6273) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 ++ project_euler/problem_073/__init__.py | 0 project_euler/problem_073/sol1.py | 46 +++++++++++++++++++++++++++ 3 files changed, 48 insertions(+) create mode 100644 project_euler/problem_073/__init__.py create mode 100644 project_euler/problem_073/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 10e78a92c..16e6b7ae3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -839,6 +839,8 @@ * Problem 072 * [Sol1](project_euler/problem_072/sol1.py) * [Sol2](project_euler/problem_072/sol2.py) + * Problem 073 + * [Sol1](project_euler/problem_073/sol1.py) * Problem 074 * [Sol1](project_euler/problem_074/sol1.py) * [Sol2](project_euler/problem_074/sol2.py) diff --git a/project_euler/problem_073/__init__.py b/project_euler/problem_073/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_073/sol1.py b/project_euler/problem_073/sol1.py new file mode 100644 index 000000000..2b66b7d87 --- /dev/null +++ b/project_euler/problem_073/sol1.py @@ -0,0 +1,46 @@ +""" +Project Euler Problem 73: https://projecteuler.net/problem=73 + +Consider the fraction, n/d, where n and d are positive integers. +If n int: + """ + Returns number of fractions lie between 1/3 and 1/2 in the sorted set + of reduced proper fractions for d ≤ max_d + + >>> solution(4) + 0 + + >>> solution(5) + 1 + + >>> solution(8) + 3 + """ + + fractions_number = 0 + for d in range(max_d + 1): + for n in range(d // 3 + 1, (d + 1) // 2): + if gcd(n, d) == 1: + fractions_number += 1 + return fractions_number + + +if __name__ == "__main__": + print(f"{solution() = }") From d407476531dd85db79e58aa2dd13d3b3031d8185 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 25 Oct 2022 03:57:03 +0300 Subject: [PATCH 093/368] fix: increase str conversion limit where required (#7604) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 8 +++++++- project_euler/problem_104/{sol.py.FIXME => sol1.py} | 10 +++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) rename project_euler/problem_104/{sol.py.FIXME => sol1.py} (95%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 16e6b7ae3..3e722a878 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -55,6 +55,7 @@ * [Modular Division](blockchain/modular_division.py) ## Boolean Algebra + * [And Gate](boolean_algebra/and_gate.py) * [Norgate](boolean_algebra/norgate.py) * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py) @@ -876,6 +877,8 @@ * [Sol1](project_euler/problem_101/sol1.py) * Problem 102 * [Sol1](project_euler/problem_102/sol1.py) + * Problem 104 + * [Sol1](project_euler/problem_104/sol1.py) * Problem 107 * [Sol1](project_euler/problem_107/sol1.py) * Problem 109 @@ -948,6 +951,7 @@ * [Quantum Random](quantum/quantum_random.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) + * [Superdense Coding](quantum/superdense_coding.py) ## Scheduling * [First Come First Served](scheduling/first_come_first_served.py) @@ -1037,7 +1041,6 @@ * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) - * [Check Pangram](strings/check_pangram.py) * [Credit Card Validator](strings/credit_card_validator.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) @@ -1046,6 +1049,8 @@ * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) * [Is Palindrome](strings/is_palindrome.py) + * [Is Pangram](strings/is_pangram.py) + * [Is Spain National Id](strings/is_spain_national_id.py) * [Jaro Winkler](strings/jaro_winkler.py) * [Join](strings/join.py) * [Knuth Morris Pratt](strings/knuth_morris_pratt.py) @@ -1090,6 +1095,7 @@ * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) + * [Get Top Billioners](web_programming/get_top_billioners.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) diff --git a/project_euler/problem_104/sol.py.FIXME b/project_euler/problem_104/sol1.py similarity index 95% rename from project_euler/problem_104/sol.py.FIXME rename to project_euler/problem_104/sol1.py index 0818ac401..60fd6fe99 100644 --- a/project_euler/problem_104/sol.py.FIXME +++ b/project_euler/problem_104/sol1.py @@ -13,6 +13,10 @@ Given that Fk is the first Fibonacci number for which the first nine digits AND the last nine digits are 1-9 pandigital, find k. """ +import sys + +sys.set_int_max_str_digits(0) # type: ignore + def check(number: int) -> bool: """ @@ -34,7 +38,7 @@ def check(number: int) -> bool: check_front = [0] * 11 # mark last 9 numbers - for x in range(9): + for _ in range(9): check_last[int(number % 10)] = 1 number = number // 10 # flag @@ -51,7 +55,7 @@ def check(number: int) -> bool: # mark first 9 numbers number = int(str(number)[:9]) - for x in range(9): + for _ in range(9): check_front[int(number % 10)] = 1 number = number // 10 @@ -81,7 +85,7 @@ def check1(number: int) -> bool: check_last = [0] * 11 # mark last 9 numbers - for x in range(9): + for _ in range(9): check_last[int(number % 10)] = 1 number = number // 10 # flag From a662d96196d58c2415d6a6933fa78a59996cc3fa Mon Sep 17 00:00:00 2001 From: Arjit Arora <42044030+arjitarora26@users.noreply.github.com> Date: Wed, 26 Oct 2022 00:56:53 +0530 Subject: [PATCH 094/368] Add function for xor gate (#7588) * Add function for xor gate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add test case for xor functions * Update boolean_algebra/xor_gate.py Co-authored-by: Christian Clauss * Update boolean_algebra/xor_gate.py Co-authored-by: Christian Clauss * Split long comment line into two lines * 88 characters per line Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- boolean_algebra/xor_gate.py | 46 +++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 boolean_algebra/xor_gate.py diff --git a/boolean_algebra/xor_gate.py b/boolean_algebra/xor_gate.py new file mode 100644 index 000000000..db4f5b45c --- /dev/null +++ b/boolean_algebra/xor_gate.py @@ -0,0 +1,46 @@ +""" +A XOR Gate is a logic gate in boolean algebra which results to 1 (True) if only one of +the two inputs is 1, and 0 (False) if an even number of inputs are 1. +Following is the truth table of a XOR Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 0 | + | 0 | 1 | 1 | + | 1 | 0 | 1 | + | 1 | 1 | 0 | + ------------------------------ + +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def xor_gate(input_1: int, input_2: int) -> int: + """ + calculate xor of the input values + + >>> xor_gate(0, 0) + 0 + >>> xor_gate(0, 1) + 1 + >>> xor_gate(1, 0) + 1 + >>> xor_gate(1, 1) + 0 + """ + return (input_1, input_2).count(0) % 2 + + +def test_xor_gate() -> None: + """ + Tests the xor_gate function + """ + assert xor_gate(0, 0) == 0 + assert xor_gate(0, 1) == 1 + assert xor_gate(1, 0) == 1 + assert xor_gate(1, 1) == 0 + + +if __name__ == "__main__": + print(xor_gate(0, 0)) + print(xor_gate(0, 1)) From cbdbe07ffd07619f1c3c5ab63ae6b2775e3c235d Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Wed, 26 Oct 2022 01:13:02 +0530 Subject: [PATCH 095/368] Create kinetic_energy.py (#7620) * Create kinetic_energy.py Finding the kinetic energy of an object,by taking its mass and velocity as input * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update kinetic_energy.py * Update kinetic_energy.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- physics/kinetic_energy.py | 47 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 physics/kinetic_energy.py diff --git a/physics/kinetic_energy.py b/physics/kinetic_energy.py new file mode 100644 index 000000000..535ffc219 --- /dev/null +++ b/physics/kinetic_energy.py @@ -0,0 +1,47 @@ +""" +Find the kinetic energy of an object, give its mass and velocity +Description : In physics, the kinetic energy of an object is the energy that it +possesses due to its motion. It is defined as the work needed to accelerate a body of a +given mass from rest to its stated velocity. Having gained this energy during its +acceleration, the body maintains this kinetic energy unless its speed changes. The same +amount of work is done by the body when decelerating from its current speed to a state +of rest. Formally, a kinetic energy is any term in a system's Lagrangian which includes +a derivative with respect to time. + +In classical mechanics, the kinetic energy of a non-rotating object of mass m traveling +at a speed v is ½mv². In relativistic mechanics, this is a good approximation only when +v is much less than the speed of light. The standard unit of kinetic energy is the +joule, while the English unit of kinetic energy is the foot-pound. + +Reference : https://en.m.wikipedia.org/wiki/Kinetic_energy +""" + + +def kinetic_energy(mass: float, velocity: float) -> float: + """ + The kinetic energy of a non-rotating object of mass m traveling at a speed v is ½mv² + + >>> kinetic_energy(10,10) + 500.0 + >>> kinetic_energy(0,10) + 0.0 + >>> kinetic_energy(10,0) + 0.0 + >>> kinetic_energy(20,-20) + 4000.0 + >>> kinetic_energy(0,0) + 0.0 + >>> kinetic_energy(2,2) + 4.0 + >>> kinetic_energy(100,100) + 500000.0 + """ + if mass < 0: + raise ValueError("The mass of a body cannot be negative") + return 0.5 * mass * abs(velocity) * abs(velocity) + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) From 450842321d30ab072a84aee15dfdbf199f9914dc Mon Sep 17 00:00:00 2001 From: Havish <100441982+havishs9@users.noreply.github.com> Date: Tue, 25 Oct 2022 12:47:52 -0700 Subject: [PATCH 096/368] Arc Length Algorithm (#7610) * Create decimal_conversions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create arc_length.py * Delete decimal_conversions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed redundant statement, fixed line overflow * Update arc_length.py Changed rad to radius as not to get confused with radians * Update arc_length.py * Update arc_length.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- maths/arc_length.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 maths/arc_length.py diff --git a/maths/arc_length.py b/maths/arc_length.py new file mode 100644 index 000000000..9e87ca38c --- /dev/null +++ b/maths/arc_length.py @@ -0,0 +1,15 @@ +from math import pi + + +def arc_length(angle: int, radius: int) -> float: + """ + >>> arc_length(45, 5) + 3.9269908169872414 + >>> arc_length(120, 15) + 31.415926535897928 + """ + return 2 * pi * radius * (angle / 360) + + +if __name__ == "__main__": + print(arc_length(90, 10)) From 103c9e0876490d6cf683ba2d3f89e5198647bc32 Mon Sep 17 00:00:00 2001 From: Karthik S <73390717+karthiks2611@users.noreply.github.com> Date: Wed, 26 Oct 2022 01:23:21 +0530 Subject: [PATCH 097/368] Added Implementation of NAND, OR ,XNOR and NOT gates in python (#7596) * Added Implementation for XNOR gate * Added Implementation for OR gate * Added implementation of NAND gate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added Implementation of NAND gate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated nand_gate.py * updated xnor_gate.py after some changes * Delete due to duplicate file * Updated xnor_gate.py * Added Implementation of NOT gate in python * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed a typo error * Updated to a new logic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated nand_gate.py file Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- boolean_algebra/nand_gate.py | 47 +++++++++++++++++++++++++++++++++++ boolean_algebra/not_gate.py | 37 +++++++++++++++++++++++++++ boolean_algebra/or_gate.py | 46 ++++++++++++++++++++++++++++++++++ boolean_algebra/xnor_gate.py | 48 ++++++++++++++++++++++++++++++++++++ 4 files changed, 178 insertions(+) create mode 100644 boolean_algebra/nand_gate.py create mode 100644 boolean_algebra/not_gate.py create mode 100644 boolean_algebra/or_gate.py create mode 100644 boolean_algebra/xnor_gate.py diff --git a/boolean_algebra/nand_gate.py b/boolean_algebra/nand_gate.py new file mode 100644 index 000000000..ea3303d16 --- /dev/null +++ b/boolean_algebra/nand_gate.py @@ -0,0 +1,47 @@ +""" +A NAND Gate is a logic gate in boolean algebra which results to 0 (False) if both +the inputs are 1, and 1 (True) otherwise. It's similar to adding +a NOT gate along with an AND gate. +Following is the truth table of a NAND Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 1 | + | 0 | 1 | 1 | + | 1 | 0 | 1 | + | 1 | 1 | 0 | + ------------------------------ +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def nand_gate(input_1: int, input_2: int) -> int: + """ + Calculate NAND of the input values + >>> nand_gate(0, 0) + 1 + >>> nand_gate(0, 1) + 1 + >>> nand_gate(1, 0) + 1 + >>> nand_gate(1, 1) + 0 + """ + return int((input_1, input_2).count(0) != 0) + + +def test_nand_gate() -> None: + """ + Tests the nand_gate function + """ + assert nand_gate(0, 0) == 1 + assert nand_gate(0, 1) == 1 + assert nand_gate(1, 0) == 1 + assert nand_gate(1, 1) == 0 + + +if __name__ == "__main__": + print(nand_gate(0, 0)) + print(nand_gate(0, 1)) + print(nand_gate(1, 0)) + print(nand_gate(1, 1)) diff --git a/boolean_algebra/not_gate.py b/boolean_algebra/not_gate.py new file mode 100644 index 000000000..b41da602d --- /dev/null +++ b/boolean_algebra/not_gate.py @@ -0,0 +1,37 @@ +""" +A NOT Gate is a logic gate in boolean algebra which results to 0 (False) if the +input is high, and 1 (True) if the input is low. +Following is the truth table of a XOR Gate: + ------------------------------ + | Input | Output | + ------------------------------ + | 0 | 1 | + | 1 | 0 | + ------------------------------ +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def not_gate(input_1: int) -> int: + """ + Calculate NOT of the input values + >>> not_gate(0) + 1 + >>> not_gate(1) + 0 + """ + + return 1 if input_1 == 0 else 0 + + +def test_not_gate() -> None: + """ + Tests the not_gate function + """ + assert not_gate(0) == 1 + assert not_gate(1) == 0 + + +if __name__ == "__main__": + print(not_gate(0)) + print(not_gate(1)) diff --git a/boolean_algebra/or_gate.py b/boolean_algebra/or_gate.py new file mode 100644 index 000000000..aa7e6645e --- /dev/null +++ b/boolean_algebra/or_gate.py @@ -0,0 +1,46 @@ +""" +An OR Gate is a logic gate in boolean algebra which results to 0 (False) if both the +inputs are 0, and 1 (True) otherwise. +Following is the truth table of an AND Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 0 | + | 0 | 1 | 1 | + | 1 | 0 | 1 | + | 1 | 1 | 1 | + ------------------------------ +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def or_gate(input_1: int, input_2: int) -> int: + """ + Calculate OR of the input values + >>> or_gate(0, 0) + 0 + >>> or_gate(0, 1) + 1 + >>> or_gate(1, 0) + 1 + >>> or_gate(1, 1) + 1 + """ + return int((input_1, input_2).count(1) != 0) + + +def test_or_gate() -> None: + """ + Tests the or_gate function + """ + assert or_gate(0, 0) == 0 + assert or_gate(0, 1) == 1 + assert or_gate(1, 0) == 1 + assert or_gate(1, 1) == 1 + + +if __name__ == "__main__": + print(or_gate(0, 1)) + print(or_gate(1, 0)) + print(or_gate(0, 0)) + print(or_gate(1, 1)) diff --git a/boolean_algebra/xnor_gate.py b/boolean_algebra/xnor_gate.py new file mode 100644 index 000000000..45ab2700e --- /dev/null +++ b/boolean_algebra/xnor_gate.py @@ -0,0 +1,48 @@ +""" +A XNOR Gate is a logic gate in boolean algebra which results to 0 (False) if both the +inputs are different, and 1 (True), if the inputs are same. +It's similar to adding a NOT gate to an XOR gate + +Following is the truth table of a XNOR Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 1 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 1 | + ------------------------------ +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def xnor_gate(input_1: int, input_2: int) -> int: + """ + Calculate XOR of the input values + >>> xnor_gate(0, 0) + 1 + >>> xnor_gate(0, 1) + 0 + >>> xnor_gate(1, 0) + 0 + >>> xnor_gate(1, 1) + 1 + """ + return 1 if input_1 == input_2 else 0 + + +def test_xnor_gate() -> None: + """ + Tests the xnor_gate function + """ + assert xnor_gate(0, 0) == 1 + assert xnor_gate(0, 1) == 0 + assert xnor_gate(1, 0) == 0 + assert xnor_gate(1, 1) == 1 + + +if __name__ == "__main__": + print(xnor_gate(0, 0)) + print(xnor_gate(0, 1)) + print(xnor_gate(1, 0)) + print(xnor_gate(1, 1)) From d25187eb7f27227381a03ba800890af7848b57d5 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 25 Oct 2022 16:34:46 -0400 Subject: [PATCH 098/368] Remove type cast in combinations algorithm (#7607) * Remove commented-out print statements in algorithmic functions * Encapsulate non-algorithmic code in __main__ * Remove unused print_matrix function * Remove print statement in __init__ * Remove print statement from doctest * Encapsulate non-algorithmic code in __main__ * Modify algorithm to return instead of print * Encapsulate non-algorithmic code in __main__ * Refactor data_safety_checker to return instead of print * updating DIRECTORY.md * updating DIRECTORY.md * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updating DIRECTORY.md * Remove int cast and change float division to int division * Move new-line chars * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/combinations.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/maths/combinations.py b/maths/combinations.py index 40f4f7a9f..6db1d773f 100644 --- a/maths/combinations.py +++ b/maths/combinations.py @@ -35,18 +35,18 @@ def combinations(n: int, k: int) -> int: # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k") - return int(factorial(n) / ((factorial(k)) * (factorial(n - k)))) + return factorial(n) // (factorial(k) * factorial(n - k)) if __name__ == "__main__": print( - "\nThe number of five-card hands possible from a standard", - f"fifty-two card deck is: {combinations(52, 5)}", + "The number of five-card hands possible from a standard", + f"fifty-two card deck is: {combinations(52, 5)}\n", ) print( - "\nIf a class of 40 students must be arranged into groups of", + "If a class of 40 students must be arranged into groups of", f"4 for group projects, there are {combinations(40, 4)} ways", "to arrange them.\n", ) @@ -54,5 +54,5 @@ if __name__ == "__main__": print( "If 10 teams are competing in a Formula One race, there", f"are {combinations(10, 3)} ways that first, second and", - "third place can be awarded.\n", + "third place can be awarded.", ) From 7e3dff17c5046aad1c67fa689e5146a13e8cc052 Mon Sep 17 00:00:00 2001 From: Arya Samik <90042953+AryaSamik@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:12:49 +0530 Subject: [PATCH 099/368] Docs: correct the shape of trajectory (#6255) The shape of the trajectory of projectile is a parabola. --- physics/horizontal_projectile_motion.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index a747acd72..dbde3660f 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -3,13 +3,13 @@ Horizontal Projectile Motion problem in physics. This algorithm solves a specific problem in which the motion starts from the ground as can be seen below: (v = 0) - ** - * * - * * - * * - * * - * * -GROUND GROUND + * * + * * + * * + * * + * * + * * +GROUND GROUND For more info: https://en.wikipedia.org/wiki/Projectile_motion """ From 2c959a749163365705a53b049aa1a3e093ee4e7a Mon Sep 17 00:00:00 2001 From: harshyadavcs <108284583+harshyadavcs@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:13:45 +0530 Subject: [PATCH 100/368] Update documentation of cnn_classification.py (#7486) * Updated documentation of cnn_classification.py for much better understanding * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update computer_vision/cnn_classification.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- computer_vision/cnn_classification.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index 6d4f19639..59e4556e0 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -30,9 +30,12 @@ from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN + # (Sequential- Building the model layer by layer) classifier = models.Sequential() # Step 1 - Convolution + # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel + # (3,3) is the kernel size (filter matrix) classifier.add( layers.Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) From c3bcfbf19d43e20e9145d8968659101c1fd8b747 Mon Sep 17 00:00:00 2001 From: Karthik Ayangar <66073214+kituuu@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:25:31 +0530 Subject: [PATCH 101/368] Add Cramer's rule for solving system of linear equations in two variables (#7547) * added script for solving system of linear equations in two variables * implemented all the suggested changes * changed RuntimeError to ValueError * Update matrix/system_of_linear_equation_in_2_variables.py * Update matrix/system_of_linear_equation_in_2_variables.py * Update and rename system_of_linear_equation_in_2_variables.py to cramers_rule_2x2.py Co-authored-by: Christian Clauss --- matrix/cramers_rule_2x2.py | 82 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 matrix/cramers_rule_2x2.py diff --git a/matrix/cramers_rule_2x2.py b/matrix/cramers_rule_2x2.py new file mode 100644 index 000000000..a635d66fb --- /dev/null +++ b/matrix/cramers_rule_2x2.py @@ -0,0 +1,82 @@ +# https://www.chilimath.com/lessons/advanced-algebra/cramers-rule-with-two-variables +# https://en.wikipedia.org/wiki/Cramer%27s_rule + + +def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> str: + """ + Solves the system of linear equation in 2 variables. + :param: equation1: list of 3 numbers + :param: equation2: list of 3 numbers + :return: String of result + input format : [a1, b1, d1], [a2, b2, d2] + determinant = [[a1, b1], [a2, b2]] + determinant_x = [[d1, b1], [d2, b2]] + determinant_y = [[a1, d1], [a2, d2]] + + >>> cramers_rule_2x2([2, 3, 0], [5, 1, 0]) + 'Trivial solution. (Consistent system) x = 0 and y = 0' + >>> cramers_rule_2x2([0, 4, 50], [2, 0, 26]) + 'Non-Trivial Solution (Consistent system) x = 13.0, y = 12.5' + >>> cramers_rule_2x2([11, 2, 30], [1, 0, 4]) + 'Non-Trivial Solution (Consistent system) x = 4.0, y = -7.0' + >>> cramers_rule_2x2([4, 7, 1], [1, 2, 0]) + 'Non-Trivial Solution (Consistent system) x = 2.0, y = -1.0' + + >>> cramers_rule_2x2([1, 2, 3], [2, 4, 6]) + Traceback (most recent call last): + ... + ValueError: Infinite solutions. (Consistent system) + >>> cramers_rule_2x2([1, 2, 3], [2, 4, 7]) + Traceback (most recent call last): + ... + ValueError: No solution. (Inconsistent system) + >>> cramers_rule_2x2([1, 2, 3], [11, 22]) + Traceback (most recent call last): + ... + ValueError: Please enter a valid equation. + >>> cramers_rule_2x2([0, 1, 6], [0, 0, 3]) + Traceback (most recent call last): + ... + ValueError: No solution. (Inconsistent system) + >>> cramers_rule_2x2([0, 0, 6], [0, 0, 3]) + Traceback (most recent call last): + ... + ValueError: Both a & b of two equations can't be zero. + >>> cramers_rule_2x2([1, 2, 3], [1, 2, 3]) + Traceback (most recent call last): + ... + ValueError: Infinite solutions. (Consistent system) + >>> cramers_rule_2x2([0, 4, 50], [0, 3, 99]) + Traceback (most recent call last): + ... + ValueError: No solution. (Inconsistent system) + """ + + # Check if the input is valid + if not len(equation1) == len(equation2) == 3: + raise ValueError("Please enter a valid equation.") + if equation1[0] == equation1[1] == equation2[0] == equation2[1] == 0: + raise ValueError("Both a & b of two equations can't be zero.") + + # Extract the coefficients + a1, b1, c1 = equation1 + a2, b2, c2 = equation2 + + # Calculate the determinants of the matrices + determinant = a1 * b2 - a2 * b1 + determinant_x = c1 * b2 - c2 * b1 + determinant_y = a1 * c2 - a2 * c1 + + # Check if the system of linear equations has a solution (using Cramer's rule) + if determinant == 0: + if determinant_x == determinant_y == 0: + raise ValueError("Infinite solutions. (Consistent system)") + else: + raise ValueError("No solution. (Inconsistent system)") + else: + if determinant_x == determinant_y == 0: + return "Trivial solution. (Consistent system) x = 0 and y = 0" + else: + x = determinant_x / determinant + y = determinant_y / determinant + return f"Non-Trivial Solution (Consistent system) x = {x}, y = {y}" From c31ef5e7782803b07e6d7eb4dca3b038cbdb095d Mon Sep 17 00:00:00 2001 From: RohitSingh107 <64142943+RohitSingh107@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:25:48 +0530 Subject: [PATCH 102/368] Add longest common substring (#7488) * added longest common substring * added retrun type hint * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * changed t1, t2 to text1, text2 * Update longest_common_substring.py * Update dynamic_programming/longest_common_substring.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update dynamic_programming/longest_common_substring.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * applied suggested changes * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * removed space between line * return longest common substring * Update dynamic_programming/longest_common_substring.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Caeden Perelli-Harris Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../longest_common_substring.py | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 dynamic_programming/longest_common_substring.py diff --git a/dynamic_programming/longest_common_substring.py b/dynamic_programming/longest_common_substring.py new file mode 100644 index 000000000..84a9f1860 --- /dev/null +++ b/dynamic_programming/longest_common_substring.py @@ -0,0 +1,63 @@ +""" +Longest Common Substring Problem Statement: Given two sequences, find the +longest common substring present in both of them. A substring is +necessarily continuous. +Example: "abcdef" and "xabded" have two longest common substrings, "ab" or "de". +Therefore, algorithm should return any one of them. +""" + + +def longest_common_substring(text1: str, text2: str) -> str: + """ + Finds the longest common substring between two strings. + >>> longest_common_substring("", "") + '' + >>> longest_common_substring("a","") + '' + >>> longest_common_substring("", "a") + '' + >>> longest_common_substring("a", "a") + 'a' + >>> longest_common_substring("abcdef", "bcd") + 'bcd' + >>> longest_common_substring("abcdef", "xabded") + 'ab' + >>> longest_common_substring("GeeksforGeeks", "GeeksQuiz") + 'Geeks' + >>> longest_common_substring("abcdxyz", "xyzabcd") + 'abcd' + >>> longest_common_substring("zxabcdezy", "yzabcdezx") + 'abcdez' + >>> longest_common_substring("OldSite:GeeksforGeeks.org", "NewSite:GeeksQuiz.com") + 'Site:Geeks' + >>> longest_common_substring(1, 1) + Traceback (most recent call last): + ... + ValueError: longest_common_substring() takes two strings for inputs + """ + + if not (isinstance(text1, str) and isinstance(text2, str)): + raise ValueError("longest_common_substring() takes two strings for inputs") + + text1_length = len(text1) + text2_length = len(text2) + + dp = [[0] * (text2_length + 1) for _ in range(text1_length + 1)] + ans_index = 0 + ans_length = 0 + + for i in range(1, text1_length + 1): + for j in range(1, text2_length + 1): + if text1[i - 1] == text2[j - 1]: + dp[i][j] = 1 + dp[i - 1][j - 1] + if dp[i][j] > ans_length: + ans_index = i + ans_length = dp[i][j] + + return text1[ans_index - ans_length : ans_index] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 505c5e20fa7efec9f6c4cb5b8bafd8ff2001e3b7 Mon Sep 17 00:00:00 2001 From: Mislah <76743829+mislah@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:56:05 +0530 Subject: [PATCH 103/368] Included area of n sided regular polygon (#7438) * Included area of n sided regular polygon Added a function to calculate the area of n sided regular polygons * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * code standard fixes as per PR comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/area.py | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/maths/area.py b/maths/area.py index abbf7aa85..5db7dac38 100644 --- a/maths/area.py +++ b/maths/area.py @@ -1,12 +1,14 @@ """ Find the area of various geometric shapes +Wikipedia reference: https://en.wikipedia.org/wiki/Area """ -from math import pi, sqrt +from math import pi, sqrt, tan def surface_area_cube(side_length: float) -> float: """ Calculate the Surface Area of a Cube. + >>> surface_area_cube(1) 6 >>> surface_area_cube(1.6) @@ -28,6 +30,7 @@ def surface_area_cube(side_length: float) -> float: def surface_area_cuboid(length: float, breadth: float, height: float) -> float: """ Calculate the Surface Area of a Cuboid. + >>> surface_area_cuboid(1, 2, 3) 22 >>> surface_area_cuboid(0, 0, 0) @@ -57,6 +60,7 @@ def surface_area_sphere(radius: float) -> float: Calculate the Surface Area of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere Formula: 4 * pi * r^2 + >>> surface_area_sphere(5) 314.1592653589793 >>> surface_area_sphere(1) @@ -79,6 +83,7 @@ def surface_area_hemisphere(radius: float) -> float: """ Calculate the Surface Area of a Hemisphere. Formula: 3 * pi * r^2 + >>> surface_area_hemisphere(5) 235.61944901923448 >>> surface_area_hemisphere(1) @@ -102,6 +107,7 @@ def surface_area_cone(radius: float, height: float) -> float: Calculate the Surface Area of a Cone. Wikipedia reference: https://en.wikipedia.org/wiki/Cone Formula: pi * r * (r + (h ** 2 + r ** 2) ** 0.5) + >>> surface_area_cone(10, 24) 1130.9733552923256 >>> surface_area_cone(6, 8) @@ -133,6 +139,7 @@ def surface_area_conical_frustum( ) -> float: """ Calculate the Surface Area of a Conical Frustum. + >>> surface_area_conical_frustum(1, 2, 3) 45.511728065337266 >>> surface_area_conical_frustum(4, 5, 6) @@ -167,6 +174,7 @@ def surface_area_cylinder(radius: float, height: float) -> float: Calculate the Surface Area of a Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder Formula: 2 * pi * r * (h + r) + >>> surface_area_cylinder(7, 10) 747.6990515543707 >>> surface_area_cylinder(1.6, 2.6) @@ -196,6 +204,7 @@ def surface_area_cylinder(radius: float, height: float) -> float: def area_rectangle(length: float, width: float) -> float: """ Calculate the area of a rectangle. + >>> area_rectangle(10, 20) 200 >>> area_rectangle(1.6, 2.6) @@ -223,6 +232,7 @@ def area_rectangle(length: float, width: float) -> float: def area_square(side_length: float) -> float: """ Calculate the area of a square. + >>> area_square(10) 100 >>> area_square(0) @@ -242,6 +252,7 @@ def area_square(side_length: float) -> float: def area_triangle(base: float, height: float) -> float: """ Calculate the area of a triangle given the base and height. + >>> area_triangle(10, 10) 50.0 >>> area_triangle(1.6, 2.6) @@ -270,6 +281,7 @@ def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float """ Calculate area of triangle when the length of 3 sides are known. This function uses Heron's formula: https://en.wikipedia.org/wiki/Heron%27s_formula + >>> area_triangle_three_sides(5, 12, 13) 30.0 >>> area_triangle_three_sides(10, 11, 12) @@ -316,6 +328,7 @@ def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float def area_parallelogram(base: float, height: float) -> float: """ Calculate the area of a parallelogram. + >>> area_parallelogram(10, 20) 200 >>> area_parallelogram(1.6, 2.6) @@ -343,6 +356,7 @@ def area_parallelogram(base: float, height: float) -> float: def area_trapezium(base1: float, base2: float, height: float) -> float: """ Calculate the area of a trapezium. + >>> area_trapezium(10, 20, 30) 450.0 >>> area_trapezium(1.6, 2.6, 3.6) @@ -386,6 +400,7 @@ def area_trapezium(base1: float, base2: float, height: float) -> float: def area_circle(radius: float) -> float: """ Calculate the area of a circle. + >>> area_circle(20) 1256.6370614359173 >>> area_circle(1.6) @@ -405,6 +420,7 @@ def area_circle(radius: float) -> float: def area_ellipse(radius_x: float, radius_y: float) -> float: """ Calculate the area of a ellipse. + >>> area_ellipse(10, 10) 314.1592653589793 >>> area_ellipse(10, 20) @@ -434,6 +450,7 @@ def area_ellipse(radius_x: float, radius_y: float) -> float: def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: """ Calculate the area of a rhombus. + >>> area_rhombus(10, 20) 100.0 >>> area_rhombus(1.6, 2.6) @@ -458,6 +475,51 @@ def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: return 1 / 2 * diagonal_1 * diagonal_2 +def area_reg_polygon(sides: int, length: float) -> float: + """ + Calculate the area of a regular polygon. + Wikipedia reference: https://en.wikipedia.org/wiki/Polygon#Regular_polygons + Formula: (n*s^2*cot(pi/n))/4 + + >>> area_reg_polygon(3, 10) + 43.301270189221945 + >>> area_reg_polygon(4, 10) + 100.00000000000001 + >>> area_reg_polygon(0, 0) + Traceback (most recent call last): + ... + ValueError: area_reg_polygon() only accepts integers greater than or equal to \ +three as number of sides + >>> area_reg_polygon(-1, -2) + Traceback (most recent call last): + ... + ValueError: area_reg_polygon() only accepts integers greater than or equal to \ +three as number of sides + >>> area_reg_polygon(5, -2) + Traceback (most recent call last): + ... + ValueError: area_reg_polygon() only accepts non-negative values as \ +length of a side + >>> area_reg_polygon(-1, 2) + Traceback (most recent call last): + ... + ValueError: area_reg_polygon() only accepts integers greater than or equal to \ +three as number of sides + """ + if not isinstance(sides, int) or sides < 3: + raise ValueError( + "area_reg_polygon() only accepts integers greater than or \ +equal to three as number of sides" + ) + elif length < 0: + raise ValueError( + "area_reg_polygon() only accepts non-negative values as \ +length of a side" + ) + return (sides * length**2) / (4 * tan(pi / sides)) + return (sides * length**2) / (4 * tan(pi / sides)) + + if __name__ == "__main__": import doctest @@ -481,3 +543,6 @@ if __name__ == "__main__": print(f"Cone: {surface_area_cone(10, 20) = }") print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(f"Cylinder: {surface_area_cylinder(10, 20) = }") + print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }") + print(f"Square: {area_reg_polygon(4, 10) = }") + print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }") From 68f6e9ac305b75a7aa8455977e35eeb942051959 Mon Sep 17 00:00:00 2001 From: M3talM0nk3y Date: Tue, 25 Oct 2022 23:31:16 -0400 Subject: [PATCH 104/368] Added function that checks if a string is an isogram (#7608) * Added function that checks if a string is an isogram. * Added wiki reference and fixed comments. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Made function name more self-documenting. Raise ValueError if string contains 1 or more digits. Renamed file. Lowercase string inside function. * Removed check_isogram.py (file renamed). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed test failure. * Raise ValueError when string has non-alpha characters. Removed import. Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/is_isogram.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 strings/is_isogram.py diff --git a/strings/is_isogram.py b/strings/is_isogram.py new file mode 100644 index 000000000..a9d9acc81 --- /dev/null +++ b/strings/is_isogram.py @@ -0,0 +1,30 @@ +""" +wiki: https://en.wikipedia.org/wiki/Heterogram_(literature)#Isograms +""" + + +def is_isogram(string: str) -> bool: + """ + An isogram is a word in which no letter is repeated. + Examples of isograms are uncopyrightable and ambidextrously. + >>> is_isogram('Uncopyrightable') + True + >>> is_isogram('allowance') + False + >>> is_isogram('copy1') + Traceback (most recent call last): + ... + ValueError: String must only contain alphabetic characters. + """ + if not all(x.isalpha() for x in string): + raise ValueError("String must only contain alphabetic characters.") + + letters = sorted(string.lower()) + return len(letters) == len(set(letters)) + + +if __name__ == "__main__": + input_str = input("Enter a string ").strip() + + isogram = is_isogram(input_str) + print(f"{input_str} is {'an' if isogram else 'not an'} isogram.") From abf0909b6877d64c3adc9d666b85aa38bcd98566 Mon Sep 17 00:00:00 2001 From: CenTdemeern1 Date: Tue, 25 Oct 2022 23:09:28 -0700 Subject: [PATCH 105/368] Write a proper implementation for base16 (#6909) According to CONTRIBUTING.md: "Algorithms in this repo should not be how-to examples for existing Python packages." --- ciphers/base16.py | 83 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 56 insertions(+), 27 deletions(-) diff --git a/ciphers/base16.py b/ciphers/base16.py index a149a6d8c..6cd62846f 100644 --- a/ciphers/base16.py +++ b/ciphers/base16.py @@ -1,34 +1,63 @@ -import base64 - - -def base16_encode(inp: str) -> bytes: +def base16_encode(data: bytes) -> str: """ - Encodes a given utf-8 string into base-16. + Encodes the given bytes into base16. - >>> base16_encode('Hello World!') - b'48656C6C6F20576F726C6421' - >>> base16_encode('HELLO WORLD!') - b'48454C4C4F20574F524C4421' - >>> base16_encode('') - b'' - """ - # encode the input into a bytes-like object and then encode b16encode that - return base64.b16encode(inp.encode("utf-8")) - - -def base16_decode(b16encoded: bytes) -> str: - """ - Decodes from base-16 to a utf-8 string. - - >>> base16_decode(b'48656C6C6F20576F726C6421') - 'Hello World!' - >>> base16_decode(b'48454C4C4F20574F524C4421') - 'HELLO WORLD!' - >>> base16_decode(b'') + >>> base16_encode(b'Hello World!') + '48656C6C6F20576F726C6421' + >>> base16_encode(b'HELLO WORLD!') + '48454C4C4F20574F524C4421' + >>> base16_encode(b'') '' """ - # b16decode the input into bytes and decode that into a human readable string - return base64.b16decode(b16encoded).decode("utf-8") + # Turn the data into a list of integers (where each integer is a byte), + # Then turn each byte into its hexadecimal representation, make sure + # it is uppercase, and then join everything together and return it. + return "".join([hex(byte)[2:].zfill(2).upper() for byte in list(data)]) + + +def base16_decode(data: str) -> bytes: + """ + Decodes the given base16 encoded data into bytes. + + >>> base16_decode('48656C6C6F20576F726C6421') + b'Hello World!' + >>> base16_decode('48454C4C4F20574F524C4421') + b'HELLO WORLD!' + >>> base16_decode('') + b'' + >>> base16_decode('486') + Traceback (most recent call last): + ... + ValueError: Base16 encoded data is invalid: + Data does not have an even number of hex digits. + >>> base16_decode('48656c6c6f20576f726c6421') + Traceback (most recent call last): + ... + ValueError: Base16 encoded data is invalid: + Data is not uppercase hex or it contains invalid characters. + >>> base16_decode('This is not base64 encoded data.') + Traceback (most recent call last): + ... + ValueError: Base16 encoded data is invalid: + Data is not uppercase hex or it contains invalid characters. + """ + # Check data validity, following RFC3548 + # https://www.ietf.org/rfc/rfc3548.txt + if (len(data) % 2) != 0: + raise ValueError( + """Base16 encoded data is invalid: +Data does not have an even number of hex digits.""" + ) + # Check the character set - the standard base16 alphabet + # is uppercase according to RFC3548 section 6 + if not set(data) <= set("0123456789ABCDEF"): + raise ValueError( + """Base16 encoded data is invalid: +Data is not uppercase hex or it contains invalid characters.""" + ) + # For every two hexadecimal digits (= a byte), turn it into an integer. + # Then, string the result together into bytes, and return it. + return bytes(int(data[i] + data[i + 1], 16) for i in range(0, len(data), 2)) if __name__ == "__main__": From 93905653506c684e393d984ad814af66af8ee0e9 Mon Sep 17 00:00:00 2001 From: Karthik Ayangar <66073214+kituuu@users.noreply.github.com> Date: Wed, 26 Oct 2022 14:06:40 +0530 Subject: [PATCH 106/368] added support for inverse of 3x3 matrix (#7355) * added support for inverse of 3x3 matrix * Modified Docstring and improved code * fixed an error * Modified docstring * Apply all suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- matrix/inverse_of_matrix.py | 137 ++++++++++++++++++++++++++++++++---- 1 file changed, 122 insertions(+), 15 deletions(-) diff --git a/matrix/inverse_of_matrix.py b/matrix/inverse_of_matrix.py index 770ce39b5..e53d90df8 100644 --- a/matrix/inverse_of_matrix.py +++ b/matrix/inverse_of_matrix.py @@ -2,22 +2,25 @@ from __future__ import annotations from decimal import Decimal +from numpy import array + def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: """ A matrix multiplied with its inverse gives the identity matrix. - This function finds the inverse of a 2x2 matrix. + This function finds the inverse of a 2x2 and 3x3 matrix. If the determinant of a matrix is 0, its inverse does not exist. Sources for fixing inaccurate float arithmetic: https://stackoverflow.com/questions/6563058/how-do-i-use-accurate-float-arithmetic-in-python https://docs.python.org/3/library/decimal.html + Doctests for 2x2 >>> inverse_of_matrix([[2, 5], [2, 0]]) [[0.0, 0.5], [0.2, -0.2]] >>> inverse_of_matrix([[2.5, 5], [1, 2]]) Traceback (most recent call last): - ... + ... ValueError: This matrix has no inverse. >>> inverse_of_matrix([[12, -16], [-9, 0]]) [[0.0, -0.1111111111111111], [-0.0625, -0.08333333333333333]] @@ -25,24 +28,128 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: [[0.16666666666666666, -0.0625], [-0.3333333333333333, 0.25]] >>> inverse_of_matrix([[10, 5], [3, 2.5]]) [[0.25, -0.5], [-0.3, 1.0]] + + Doctests for 3x3 + >>> inverse_of_matrix([[2, 5, 7], [2, 0, 1], [1, 2, 3]]) + [[2.0, 5.0, -4.0], [1.0, 1.0, -1.0], [-5.0, -12.0, 10.0]] + >>> inverse_of_matrix([[1, 2, 2], [1, 2, 2], [3, 2, -1]]) + Traceback (most recent call last): + ... + ValueError: This matrix has no inverse. + + >>> inverse_of_matrix([[],[]]) + Traceback (most recent call last): + ... + ValueError: Please provide a matrix of size 2x2 or 3x3. + + >>> inverse_of_matrix([[1, 2], [3, 4], [5, 6]]) + Traceback (most recent call last): + ... + ValueError: Please provide a matrix of size 2x2 or 3x3. + + >>> inverse_of_matrix([[1, 2, 1], [0,3, 4]]) + Traceback (most recent call last): + ... + ValueError: Please provide a matrix of size 2x2 or 3x3. + + >>> inverse_of_matrix([[1, 2, 3], [7, 8, 9], [7, 8, 9]]) + Traceback (most recent call last): + ... + ValueError: This matrix has no inverse. + + >>> inverse_of_matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] """ - d = Decimal # An abbreviation for conciseness + d = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices - if len(matrix) != 2 or len(matrix[0]) != 2 or len(matrix[1]) != 2: - raise ValueError("Please provide a matrix of size 2x2.") + if len(matrix) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2: + # Calculate the determinant of the matrix + determinant = float( + d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]) + ) + if determinant == 0: + raise ValueError("This matrix has no inverse.") - # Calculate the determinant of the matrix - determinant = d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]) - if determinant == 0: - raise ValueError("This matrix has no inverse.") + # Creates a copy of the matrix with swapped positions of the elements + swapped_matrix = [[0.0, 0.0], [0.0, 0.0]] + swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0] + swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1] - # Creates a copy of the matrix with swapped positions of the elements - swapped_matrix = [[0.0, 0.0], [0.0, 0.0]] - swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0] - swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1] + # Calculate the inverse of the matrix + return [ + [(float(d(n)) / determinant) or 0.0 for n in row] for row in swapped_matrix + ] + elif ( + len(matrix) == 3 + and len(matrix[0]) == 3 + and len(matrix[1]) == 3 + and len(matrix[2]) == 3 + ): + # Calculate the determinant of the matrix using Sarrus rule + determinant = float( + ( + (d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2])) + + (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0])) + + (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1])) + ) + - ( + (d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0])) + + (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2])) + + (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1])) + ) + ) + if determinant == 0: + raise ValueError("This matrix has no inverse.") - # Calculate the inverse of the matrix - return [[float(d(n) / determinant) or 0.0 for n in row] for row in swapped_matrix] + # Creating cofactor matrix + cofactor_matrix = [ + [d(0.0), d(0.0), d(0.0)], + [d(0.0), d(0.0), d(0.0)], + [d(0.0), d(0.0), d(0.0)], + ] + cofactor_matrix[0][0] = (d(matrix[1][1]) * d(matrix[2][2])) - ( + d(matrix[1][2]) * d(matrix[2][1]) + ) + cofactor_matrix[0][1] = -( + (d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0])) + ) + cofactor_matrix[0][2] = (d(matrix[1][0]) * d(matrix[2][1])) - ( + d(matrix[1][1]) * d(matrix[2][0]) + ) + cofactor_matrix[1][0] = -( + (d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1])) + ) + cofactor_matrix[1][1] = (d(matrix[0][0]) * d(matrix[2][2])) - ( + d(matrix[0][2]) * d(matrix[2][0]) + ) + cofactor_matrix[1][2] = -( + (d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0])) + ) + cofactor_matrix[2][0] = (d(matrix[0][1]) * d(matrix[1][2])) - ( + d(matrix[0][2]) * d(matrix[1][1]) + ) + cofactor_matrix[2][1] = -( + (d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0])) + ) + cofactor_matrix[2][2] = (d(matrix[0][0]) * d(matrix[1][1])) - ( + d(matrix[0][1]) * d(matrix[1][0]) + ) + + # Transpose the cofactor matrix (Adjoint matrix) + adjoint_matrix = array(cofactor_matrix) + for i in range(3): + for j in range(3): + adjoint_matrix[i][j] = cofactor_matrix[j][i] + + # Inverse of the matrix using the formula (1/determinant) * adjoint matrix + inverse_matrix = array(cofactor_matrix) + for i in range(3): + for j in range(3): + inverse_matrix[i][j] /= d(determinant) + + # Calculate the inverse of the matrix + return [[float(d(n)) or 0.0 for n in row] for row in inverse_matrix] + raise ValueError("Please provide a matrix of size 2x2 or 3x3.") From 8fd06efe22ec3e870ac1fa375bd4600cb30baad4 Mon Sep 17 00:00:00 2001 From: JatinR05 <71865805+JatinR05@users.noreply.github.com> Date: Wed, 26 Oct 2022 20:13:01 +0530 Subject: [PATCH 107/368] Create minimums_squares_to_represent_a_number.py (#7595) * Create minimums_squares_to_represent_a_number.py added a dynamic programming approach of finding the minimum number of square to represent a number. eg : 25 = 5*5 37 = 6*6 + 1*1 21 = 4*4 + 2*2 + 1*1 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename minimums_squares_to_represent_a_number.py to minimum_squares_to_represent_a_number.py updated the code * Update minimum_squares_to_represent_a_number.py I have added the appropriate checks for 0 and 12.34. It would be great if you could suggest a name for the dp array * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_squares_to_represent_a_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_squares_to_represent_a_number.py updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_squares_to_represent_a_number.py updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../minimum_squares_to_represent_a_number.py | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 dynamic_programming/minimum_squares_to_represent_a_number.py diff --git a/dynamic_programming/minimum_squares_to_represent_a_number.py b/dynamic_programming/minimum_squares_to_represent_a_number.py new file mode 100644 index 000000000..bf5849f5b --- /dev/null +++ b/dynamic_programming/minimum_squares_to_represent_a_number.py @@ -0,0 +1,48 @@ +import math +import sys + + +def minimum_squares_to_represent_a_number(number: int) -> int: + """ + Count the number of minimum squares to represent a number + >>> minimum_squares_to_represent_a_number(25) + 1 + >>> minimum_squares_to_represent_a_number(37) + 2 + >>> minimum_squares_to_represent_a_number(21) + 3 + >>> minimum_squares_to_represent_a_number(58) + 2 + >>> minimum_squares_to_represent_a_number(-1) + Traceback (most recent call last): + ... + ValueError: the value of input must not be a negative number + >>> minimum_squares_to_represent_a_number(0) + 1 + >>> minimum_squares_to_represent_a_number(12.34) + Traceback (most recent call last): + ... + ValueError: the value of input must be a natural number + """ + if number != int(number): + raise ValueError("the value of input must be a natural number") + if number < 0: + raise ValueError("the value of input must not be a negative number") + if number == 0: + return 1 + answers = [-1] * (number + 1) + answers[0] = 0 + for i in range(1, number + 1): + answer = sys.maxsize + root = int(math.sqrt(i)) + for j in range(1, root + 1): + current_answer = 1 + answers[i - (j**2)] + answer = min(answer, current_answer) + answers[i] = answer + return answers[number] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5c8a939c5a51104fce4b22ef56d29720c6ce47bb Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Wed, 26 Oct 2022 20:36:15 +0530 Subject: [PATCH 108/368] Create largest_square_area_in_matrix.py (#7673) * Create largest_square_area_in_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix/largest_square_area_in_matrix.py Co-authored-by: Caeden Perelli-Harris * Update matrix/largest_square_area_in_matrix.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update largest_square_area_in_matrix.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- matrix/largest_square_area_in_matrix.py | 191 ++++++++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 matrix/largest_square_area_in_matrix.py diff --git a/matrix/largest_square_area_in_matrix.py b/matrix/largest_square_area_in_matrix.py new file mode 100644 index 000000000..cf975cb7c --- /dev/null +++ b/matrix/largest_square_area_in_matrix.py @@ -0,0 +1,191 @@ +""" +Question: +Given a binary matrix mat of size n * m, find out the maximum size square +sub-matrix with all 1s. + +--- +Example 1: + +Input: +n = 2, m = 2 +mat = [[1, 1], + [1, 1]] + +Output: +2 + +Explanation: The maximum size of the square +sub-matrix is 2. The matrix itself is the +maximum sized sub-matrix in this case. +--- +Example 2 + +Input: +n = 2, m = 2 +mat = [[0, 0], + [0, 0]] +Output: 0 + +Explanation: There is no 1 in the matrix. + + +Approach: +We initialize another matrix (dp) with the same dimensions +as the original one initialized with all 0’s. + +dp_array(i,j) represents the side length of the maximum square whose +bottom right corner is the cell with index (i,j) in the original matrix. + +Starting from index (0,0), for every 1 found in the original matrix, +we update the value of the current element as + +dp_array(i,j)=dp_array(dp(i−1,j),dp_array(i−1,j−1),dp_array(i,j−1)) + 1. +""" + + +def largest_square_area_in_matrix_top_down_approch( + rows: int, cols: int, mat: list[list[int]] +) -> int: + """ + Function updates the largest_square_area[0], if recursive call found + square with maximum area. + + We aren't using dp_array here, so the time complexity would be exponential. + + >>> largest_square_area_in_matrix_top_down_approch(2, 2, [[1,1], [1,1]]) + 2 + >>> largest_square_area_in_matrix_top_down_approch(2, 2, [[0,0], [0,0]]) + 0 + """ + + def update_area_of_max_square(row: int, col: int) -> int: + + # BASE CASE + if row >= rows or col >= cols: + return 0 + + right = update_area_of_max_square(row, col + 1) + diagonal = update_area_of_max_square(row + 1, col + 1) + down = update_area_of_max_square(row + 1, col) + + if mat[row][col]: + sub_problem_sol = 1 + min([right, diagonal, down]) + largest_square_area[0] = max(largest_square_area[0], sub_problem_sol) + return sub_problem_sol + else: + return 0 + + largest_square_area = [0] + update_area_of_max_square(0, 0) + return largest_square_area[0] + + +def largest_square_area_in_matrix_top_down_approch_with_dp( + rows: int, cols: int, mat: list[list[int]] +) -> int: + """ + Function updates the largest_square_area[0], if recursive call found + square with maximum area. + + We are using dp_array here, so the time complexity would be O(N^2). + + >>> largest_square_area_in_matrix_top_down_approch_with_dp(2, 2, [[1,1], [1,1]]) + 2 + >>> largest_square_area_in_matrix_top_down_approch_with_dp(2, 2, [[0,0], [0,0]]) + 0 + """ + + def update_area_of_max_square_using_dp_array( + row: int, col: int, dp_array: list[list[int]] + ) -> int: + if row >= rows or col >= cols: + return 0 + if dp_array[row][col] != -1: + return dp_array[row][col] + + right = update_area_of_max_square_using_dp_array(row, col + 1, dp_array) + diagonal = update_area_of_max_square_using_dp_array(row + 1, col + 1, dp_array) + down = update_area_of_max_square_using_dp_array(row + 1, col, dp_array) + + if mat[row][col]: + sub_problem_sol = 1 + min([right, diagonal, down]) + largest_square_area[0] = max(largest_square_area[0], sub_problem_sol) + dp_array[row][col] = sub_problem_sol + return sub_problem_sol + else: + return 0 + + largest_square_area = [0] + dp_array = [[-1] * cols for _ in range(rows)] + update_area_of_max_square_using_dp_array(0, 0, dp_array) + + return largest_square_area[0] + + +def largest_square_area_in_matrix_bottom_up( + rows: int, cols: int, mat: list[list[int]] +) -> int: + """ + Function updates the largest_square_area, using bottom up approach. + + >>> largest_square_area_in_matrix_bottom_up(2, 2, [[1,1], [1,1]]) + 2 + >>> largest_square_area_in_matrix_bottom_up(2, 2, [[0,0], [0,0]]) + 0 + + """ + dp_array = [[0] * (cols + 1) for _ in range(rows + 1)] + largest_square_area = 0 + for row in range(rows - 1, -1, -1): + for col in range(cols - 1, -1, -1): + + right = dp_array[row][col + 1] + diagonal = dp_array[row + 1][col + 1] + bottom = dp_array[row + 1][col] + + if mat[row][col] == 1: + dp_array[row][col] = 1 + min(right, diagonal, bottom) + largest_square_area = max(dp_array[row][col], largest_square_area) + else: + dp_array[row][col] = 0 + + return largest_square_area + + +def largest_square_area_in_matrix_bottom_up_space_optimization( + rows: int, cols: int, mat: list[list[int]] +) -> int: + """ + Function updates the largest_square_area, using bottom up + approach. with space optimization. + + >>> largest_square_area_in_matrix_bottom_up_space_optimization(2, 2, [[1,1], [1,1]]) + 2 + >>> largest_square_area_in_matrix_bottom_up_space_optimization(2, 2, [[0,0], [0,0]]) + 0 + """ + current_row = [0] * (cols + 1) + next_row = [0] * (cols + 1) + largest_square_area = 0 + for row in range(rows - 1, -1, -1): + for col in range(cols - 1, -1, -1): + + right = current_row[col + 1] + diagonal = next_row[col + 1] + bottom = next_row[col] + + if mat[row][col] == 1: + current_row[col] = 1 + min(right, diagonal, bottom) + largest_square_area = max(current_row[col], largest_square_area) + else: + current_row[col] = 0 + next_row = current_row + + return largest_square_area + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) From 614274a9dc996f64dd470d2029847cc229f19346 Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Wed, 26 Oct 2022 22:28:33 +0530 Subject: [PATCH 109/368] Update spiral_print.py (#7674) * Update spiral_print.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix/spiral_print.py Co-authored-by: Caeden Perelli-Harris * Update matrix/spiral_print.py Co-authored-by: Caeden Perelli-Harris * Update matrix/spiral_print.py Co-authored-by: Caeden Perelli-Harris * Update matrix/spiral_print.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update spiral_print.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update spiral_print.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update spiral_print.py * Update spiral_print.py * Update spiral_print.py * Update spiral_print.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- matrix/spiral_print.py | 49 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 0cf732d60..0d0be1527 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -76,7 +76,56 @@ def spiral_print_clockwise(a: list[list[int]]) -> None: return +# Other Easy to understand Approach + + +def spiral_traversal(matrix: list[list]) -> list[int]: + """ + >>> spiral_traversal([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + + Example: + matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + Algorithm: + Step 1. first pop the 0 index list. (which is [1,2,3,4] and concatenate the + output of [step 2]) + Step 2. Now perform matrix’s Transpose operation (Change rows to column + and vice versa) and reverse the resultant matrix. + Step 3. Pass the output of [2nd step], to same recursive function till + base case hits. + Dry Run: + Stage 1. + [1, 2, 3, 4] + spiral_traversal([ + [8, 12], [7, 11], [6, 10], [5, 9]] + ]) + Stage 2. + [1, 2, 3, 4, 8, 12] + spiral_traversal([ + [11, 10, 9], [7, 6, 5] + ]) + Stage 3. + [1, 2, 3, 4, 8, 12, 11, 10, 9] + spiral_traversal([ + [5], [6], [7] + ]) + Stage 4. + [1, 2, 3, 4, 8, 12, 11, 10, 9, 5] + spiral_traversal([ + [5], [6], [7] + ]) + Stage 5. + [1, 2, 3, 4, 8, 12, 11, 10, 9, 5] + spiral_traversal([[6, 7]]) + Stage 6. + [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + spiral_traversal([]) + """ + if matrix: + return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) + else: + return [] + + # driver code if __name__ == "__main__": + import doctest + + doctest.testmod() + a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] spiral_print_clockwise(a) From 74325d079cf4394f7b75c26b334a81e98b7e25b1 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 26 Oct 2022 22:08:53 +0200 Subject: [PATCH 110/368] Rename quantum_random.py to quantum_random.py.DISABLED.txt (#7683) * Rename quantum_random.py to quantum_random.py.DISABLED.txt #7682 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 13 ++++++++++++- ...tum_random.py => quantum_random.py.DISABLED.txt} | 0 2 files changed, 12 insertions(+), 1 deletion(-) rename quantum/{quantum_random.py => quantum_random.py.DISABLED.txt} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3e722a878..ba7d3e62a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -56,8 +56,13 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) + * [Nand Gate](boolean_algebra/nand_gate.py) * [Norgate](boolean_algebra/norgate.py) + * [Not Gate](boolean_algebra/not_gate.py) + * [Or Gate](boolean_algebra/or_gate.py) * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py) + * [Xnor Gate](boolean_algebra/xnor_gate.py) + * [Xor Gate](boolean_algebra/xor_gate.py) ## Cellular Automata * [Conways Game Of Life](cellular_automata/conways_game_of_life.py) @@ -288,6 +293,7 @@ * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) * [Knapsack](dynamic_programming/knapsack.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) + * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) * [Longest Sub Array](dynamic_programming/longest_sub_array.py) @@ -298,6 +304,7 @@ * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) * [Minimum Partition](dynamic_programming/minimum_partition.py) + * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) @@ -474,6 +481,7 @@ * [Add](maths/add.py) * [Aliquot Sum](maths/aliquot_sum.py) * [Allocation Number](maths/allocation_number.py) + * [Arc Length](maths/arc_length.py) * [Area](maths/area.py) * [Area Under Curve](maths/area_under_curve.py) * [Armstrong Numbers](maths/armstrong_numbers.py) @@ -609,7 +617,9 @@ ## Matrix * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) + * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) + * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) * [Matrix Class](matrix/matrix_class.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) @@ -657,6 +667,7 @@ ## Physics * [Casimir Effect](physics/casimir_effect.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) + * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) @@ -948,7 +959,6 @@ * [Not Gate](quantum/not_gate.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) - * [Quantum Random](quantum/quantum_random.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) * [Superdense Coding](quantum/superdense_coding.py) @@ -1048,6 +1058,7 @@ * [Hamming Distance](strings/hamming_distance.py) * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) + * [Is Isogram](strings/is_isogram.py) * [Is Palindrome](strings/is_palindrome.py) * [Is Pangram](strings/is_pangram.py) * [Is Spain National Id](strings/is_spain_national_id.py) diff --git a/quantum/quantum_random.py b/quantum/quantum_random.py.DISABLED.txt similarity index 100% rename from quantum/quantum_random.py rename to quantum/quantum_random.py.DISABLED.txt From b46b92a9160360ea09848893b90dd6022f371ffe Mon Sep 17 00:00:00 2001 From: Arjit Arora <42044030+arjitarora26@users.noreply.github.com> Date: Thu, 27 Oct 2022 01:39:23 +0530 Subject: [PATCH 111/368] Add function for highest set bit location (#7586) * Add function for highest set bit location * Address review comments --- bit_manipulation/highest_set_bit.py | 34 +++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 bit_manipulation/highest_set_bit.py diff --git a/bit_manipulation/highest_set_bit.py b/bit_manipulation/highest_set_bit.py new file mode 100644 index 000000000..21d92dcb9 --- /dev/null +++ b/bit_manipulation/highest_set_bit.py @@ -0,0 +1,34 @@ +def get_highest_set_bit_position(number: int) -> int: + """ + Returns position of the highest set bit of a number. + Ref - https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogObvious + >>> get_highest_set_bit_position(25) + 5 + >>> get_highest_set_bit_position(37) + 6 + >>> get_highest_set_bit_position(1) + 1 + >>> get_highest_set_bit_position(4) + 3 + >>> get_highest_set_bit_position(0) + 0 + >>> get_highest_set_bit_position(0.8) + Traceback (most recent call last): + ... + TypeError: Input value must be an 'int' type + """ + if not isinstance(number, int): + raise TypeError("Input value must be an 'int' type") + + position = 0 + while number: + position += 1 + number >>= 1 + + return position + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 71c7c0bd3592225c027d07a10d1c71946c0f677a Mon Sep 17 00:00:00 2001 From: SwayamSahu <91021799+SwayamSahu@users.noreply.github.com> Date: Thu, 27 Oct 2022 01:50:00 +0530 Subject: [PATCH 112/368] Updated a typo in print statement (#7696) * Updated a typo in print statement * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- strings/barcode_validator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py index 056700076..2e1ea8703 100644 --- a/strings/barcode_validator.py +++ b/strings/barcode_validator.py @@ -83,6 +83,6 @@ if __name__ == "__main__": barcode = get_barcode(input("Barcode: ").strip()) if is_valid(barcode): - print(f"'{barcode}' is a valid Barcode") + print(f"'{barcode}' is a valid barcode.") else: - print(f"'{barcode}' is NOT is valid Barcode.") + print(f"'{barcode}' is NOT a valid barcode.") From d33f9b31fe96acf5201c39f565015444526a3e38 Mon Sep 17 00:00:00 2001 From: Sushant Srivastav <63559772+sushant4191@users.noreply.github.com> Date: Thu, 27 Oct 2022 02:45:02 +0530 Subject: [PATCH 113/368] Calculate GST Amount (#7694) * Calculate GST Amount The program helps to get the net amount after GST is added to it. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update financial/calculating GST.py Thanks! Co-authored-by: Christian Clauss * Update and rename calculating GST.py to price_plus_tax.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update price_plus_tax.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- financial/price_plus_tax.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 financial/price_plus_tax.py diff --git a/financial/price_plus_tax.py b/financial/price_plus_tax.py new file mode 100644 index 000000000..43876d35e --- /dev/null +++ b/financial/price_plus_tax.py @@ -0,0 +1,18 @@ +""" +Calculate price plus tax of a good or service given its price and a tax rate. +""" + + +def price_plus_tax(price: float, tax_rate: float) -> float: + """ + >>> price_plus_tax(100, 0.25) + 125.0 + >>> price_plus_tax(125.50, 0.05) + 131.775 + """ + return price * (1 + tax_rate) + + +if __name__ == "__main__": + print(f"{price_plus_tax(100, 0.25) = }") + print(f"{price_plus_tax(125.50, 0.05) = }") From e906a5149a0a9c116e1a3dbade6eb6ea659ac68a Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Thu, 27 Oct 2022 16:52:10 +0530 Subject: [PATCH 114/368] Create malus_law.py (#7710) * Create malus_law.py Finding the intensity of light transmitted through a polariser using Malus Law and by taking initial intensity and angle between polariser and axis as input * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update physics/malus_law.py Co-authored-by: Caeden Perelli-Harris * Update physics/malus_law.py Co-authored-by: Caeden Perelli-Harris * Update physics/malus_law.py Co-authored-by: Caeden Perelli-Harris * Update physics/malus_law.py Co-authored-by: Caeden Perelli-Harris * Update malus_law.py Made some changes in the error messages and the docstring testcases * Update malus_law.py Made changes for the passing the precommit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- physics/malus_law.py | 80 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 physics/malus_law.py diff --git a/physics/malus_law.py b/physics/malus_law.py new file mode 100644 index 000000000..ae77d45cf --- /dev/null +++ b/physics/malus_law.py @@ -0,0 +1,80 @@ +import math + +""" +Finding the intensity of light transmitted through a polariser using Malus Law +and by taking initial intensity and angle between polariser and axis as input + +Description : Malus's law, which is named after Étienne-Louis Malus, +says that when a perfect polarizer is placed in a polarized +beam of light, the irradiance, I, of the light that passes +through is given by + I=I'cos²θ +where I' is the initial intensity and θ is the angle between the light's +initial polarization direction and the axis of the polarizer. +A beam of unpolarized light can be thought of as containing a +uniform mixture of linear polarizations at all possible angles. +Since the average value of cos²θ is 1/2, the transmission coefficient becomes +I/I' = 1/2 +In practice, some light is lost in the polarizer and the actual transmission +will be somewhat lower than this, around 38% for Polaroid-type polarizers but +considerably higher (>49.9%) for some birefringent prism types. +If two polarizers are placed one after another (the second polarizer is +generally called an analyzer), the mutual angle between their polarizing axes +gives the value of θ in Malus's law. If the two axes are orthogonal, the +polarizers are crossed and in theory no light is transmitted, though again +practically speaking no polarizer is perfect and the transmission is not exactly +zero (for example, crossed Polaroid sheets appear slightly blue in colour because +their extinction ratio is better in the red). If a transparent object is placed +between the crossed polarizers, any polarization effects present in the sample +(such as birefringence) will be shown as an increase in transmission. +This effect is used in polarimetry to measure the optical activity of a sample. +Real polarizers are also not perfect blockers of the polarization orthogonal to +their polarization axis; the ratio of the transmission of the unwanted component +to the wanted component is called the extinction ratio, and varies from around +1:500 for Polaroid to about 1:106 for Glan–Taylor prism polarizers. + +Reference : "https://en.wikipedia.org/wiki/Polarizer#Malus's_law_and_other_properties" +""" + + +def malus_law(initial_intensity: float, angle: float) -> float: + """ + >>> round(malus_law(10,45),2) + 5.0 + >>> round(malus_law(100,60),2) + 25.0 + >>> round(malus_law(50,150),2) + 37.5 + >>> round(malus_law(75,270),2) + 0.0 + >>> round(malus_law(10,-900),2) + Traceback (most recent call last): + ... + ValueError: In Malus Law, the angle is in the range 0-360 degrees + >>> round(malus_law(10,900),2) + Traceback (most recent call last): + ... + ValueError: In Malus Law, the angle is in the range 0-360 degrees + >>> round(malus_law(-100,900),2) + Traceback (most recent call last): + ... + ValueError: The value of intensity cannot be negative + >>> round(malus_law(100,180),2) + 100.0 + >>> round(malus_law(100,360),2) + 100.0 + """ + + if initial_intensity < 0: + raise ValueError("The value of intensity cannot be negative") + # handling of negative values of initial intensity + if angle < 0 or angle > 360: + raise ValueError("In Malus Law, the angle is in the range 0-360 degrees") + # handling of values out of allowed range + return initial_intensity * (math.cos(math.radians(angle)) ** 2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod(name="malus_law") From e8915097c4a632419acc77c1ce08aae3e3c3b864 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 27 Oct 2022 14:15:15 +0100 Subject: [PATCH 115/368] refactor: Fix matrix display deprecation (#7729) --- machine_learning/xgboost_classifier.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/machine_learning/xgboost_classifier.py b/machine_learning/xgboost_classifier.py index bb5b48b7a..62a1b331b 100644 --- a/machine_learning/xgboost_classifier.py +++ b/machine_learning/xgboost_classifier.py @@ -2,7 +2,7 @@ import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris -from sklearn.metrics import plot_confusion_matrix +from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier @@ -63,7 +63,7 @@ def main() -> None: xgboost_classifier = xgboost(x_train, y_train) # Display the confusion matrix of the classifier with both training and test sets - plot_confusion_matrix( + ConfusionMatrixDisplay.from_estimator( xgboost_classifier, x_test, y_test, From 9bba42eca8c679a32f99984bbb5bb53795f4e71f Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 27 Oct 2022 18:42:30 +0100 Subject: [PATCH 116/368] refactor: Indent ... for visual purposes (#7744) --- arithmetic_analysis/bisection.py | 4 +-- arithmetic_analysis/intersection.py | 4 +-- .../jacobi_iteration_method.py | 10 +++--- arithmetic_analysis/lu_decomposition.py | 2 +- arithmetic_analysis/newton_method.py | 2 +- arithmetic_analysis/newton_raphson_new.py | 2 +- backtracking/knight_tour.py | 2 +- conversions/binary_to_decimal.py | 6 ++-- conversions/binary_to_hexadecimal.py | 4 +-- conversions/binary_to_octal.py | 4 +-- conversions/decimal_to_any.py | 12 +++---- conversions/decimal_to_binary.py | 4 +-- conversions/decimal_to_binary_recursion.py | 6 ++-- conversions/decimal_to_hexadecimal.py | 4 +-- conversions/hex_to_bin.py | 4 +-- conversions/hexadecimal_to_decimal.py | 6 ++-- conversions/octal_to_decimal.py | 20 ++++++------ conversions/temperature_conversions.py | 32 +++++++++---------- .../binary_tree/binary_search_tree.py | 2 +- .../binary_tree/binary_tree_mirror.py | 4 +-- .../number_of_possible_binary_trees.py | 2 +- .../linked_list/doubly_linked_list.py | 10 +++--- .../linked_list/singly_linked_list.py | 16 +++++----- data_structures/queue/linked_queue.py | 4 +-- .../queue/priority_queue_using_list.py | 2 +- .../stacks/infix_to_postfix_conversion.py | 2 +- .../stacks/stack_with_singly_linked_list.py | 2 +- .../longest_common_substring.py | 2 +- genetic_algorithm/basic_string.py | 6 ++-- linear_algebra/src/lib.py | 4 +-- machine_learning/similarity_search.py | 6 ++-- maths/bisection.py | 2 +- maths/catalan_number.py | 6 ++-- maths/fibonacci.py | 10 +++--- maths/maclaurin_series.py | 16 +++++----- maths/proth_number.py | 6 ++-- maths/sylvester_sequence.py | 4 +-- maths/zellers_congruence.py | 4 +-- neural_network/perceptron.py | 6 ++-- project_euler/problem_004/sol1.py | 2 +- project_euler/problem_010/sol3.py | 6 ++-- searches/interpolation_search.py | 2 +- sorts/bead_sort.py | 4 +-- sorts/msd_radix_sort.py | 4 +-- strings/barcode_validator.py | 4 +-- strings/join.py | 2 +- 46 files changed, 134 insertions(+), 134 deletions(-) diff --git a/arithmetic_analysis/bisection.py b/arithmetic_analysis/bisection.py index 640913a7a..e359cc170 100644 --- a/arithmetic_analysis/bisection.py +++ b/arithmetic_analysis/bisection.py @@ -8,7 +8,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float: 1.0000000149011612 >>> bisection(lambda x: x ** 3 - 1, 2, 1000) Traceback (most recent call last): - ... + ... ValueError: could not find root in given interval. >>> bisection(lambda x: x ** 2 - 4 * x + 3, 0, 2) 1.0 @@ -16,7 +16,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float: 3.0 >>> bisection(lambda x: x ** 2 - 4 * x + 3, 4, 1000) Traceback (most recent call last): - ... + ... ValueError: could not find root in given interval. """ start: float = a diff --git a/arithmetic_analysis/intersection.py b/arithmetic_analysis/intersection.py index 49213dd05..826c0ead0 100644 --- a/arithmetic_analysis/intersection.py +++ b/arithmetic_analysis/intersection.py @@ -10,7 +10,7 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl 0.9999999999954654 >>> intersection(lambda x: x ** 3 - 1, 5, 5) Traceback (most recent call last): - ... + ... ZeroDivisionError: float division by zero, could not find root >>> intersection(lambda x: x ** 3 - 1, 100, 200) 1.0000000000003888 @@ -24,7 +24,7 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl 0.0 >>> intersection(math.cos, -math.pi, math.pi) Traceback (most recent call last): - ... + ... ZeroDivisionError: float division by zero, could not find root """ x_n: float = x0 diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index 3087309e8..fe506a94a 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -42,7 +42,7 @@ def jacobi_iteration_method( >>> iterations = 3 >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) Traceback (most recent call last): - ... + ... ValueError: Coefficient matrix dimensions must be nxn but received 2x3 >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) @@ -51,7 +51,7 @@ def jacobi_iteration_method( >>> iterations = 3 >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) Traceback (most recent call last): - ... + ... ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but received 3x3 and 2x1 @@ -61,7 +61,7 @@ def jacobi_iteration_method( >>> iterations = 3 >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) Traceback (most recent call last): - ... + ... ValueError: Number of initial values must be equal to number of rows in coefficient matrix but received 2 and 3 @@ -71,7 +71,7 @@ def jacobi_iteration_method( >>> iterations = 0 >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) Traceback (most recent call last): - ... + ... ValueError: Iterations must be at least 1 """ @@ -138,7 +138,7 @@ def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]]) >>> strictly_diagonally_dominant(table) Traceback (most recent call last): - ... + ... ValueError: Coefficient matrix is not strictly diagonally dominant """ diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 1e98b9066..217719cf4 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -31,7 +31,7 @@ def lower_upper_decomposition( >>> matrix = np.array([[2, -2, 1], [0, 1, 2]]) >>> lower_upper_decomposition(matrix) Traceback (most recent call last): - ... + ... ValueError: 'table' has to be of square shaped array but got a 2x3 array: [[ 2 -2 1] [ 0 1 2]] diff --git a/arithmetic_analysis/newton_method.py b/arithmetic_analysis/newton_method.py index c4018a0f2..5127bfcaf 100644 --- a/arithmetic_analysis/newton_method.py +++ b/arithmetic_analysis/newton_method.py @@ -28,7 +28,7 @@ def newton( 1.5707963267948966 >>> newton(math.cos, lambda x: -math.sin(x), 0) Traceback (most recent call last): - ... + ... ZeroDivisionError: Could not find root """ prev_guess = float(starting_int) diff --git a/arithmetic_analysis/newton_raphson_new.py b/arithmetic_analysis/newton_raphson_new.py index 19ea4ce21..dd1d7e092 100644 --- a/arithmetic_analysis/newton_raphson_new.py +++ b/arithmetic_analysis/newton_raphson_new.py @@ -32,7 +32,7 @@ def newton_raphson( 1.2186556186174883e-10 >>> newton_raphson('cos(x)', 0) Traceback (most recent call last): - ... + ... ZeroDivisionError: Could not find root """ diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index 6e9b31bd1..bb650ece3 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -78,7 +78,7 @@ def open_knight_tour(n: int) -> list[list[int]]: >>> open_knight_tour(2) Traceback (most recent call last): - ... + ... ValueError: Open Kight Tour cannot be performed on a board of size 2 """ diff --git a/conversions/binary_to_decimal.py b/conversions/binary_to_decimal.py index a7625e475..914a9318c 100644 --- a/conversions/binary_to_decimal.py +++ b/conversions/binary_to_decimal.py @@ -12,15 +12,15 @@ def bin_to_decimal(bin_string: str) -> int: 0 >>> bin_to_decimal("a") Traceback (most recent call last): - ... + ... ValueError: Non-binary value was passed to the function >>> bin_to_decimal("") Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function >>> bin_to_decimal("39") Traceback (most recent call last): - ... + ... ValueError: Non-binary value was passed to the function """ bin_string = str(bin_string).strip() diff --git a/conversions/binary_to_hexadecimal.py b/conversions/binary_to_hexadecimal.py index 89f7af696..a3855bb70 100644 --- a/conversions/binary_to_hexadecimal.py +++ b/conversions/binary_to_hexadecimal.py @@ -30,11 +30,11 @@ def bin_to_hexadecimal(binary_str: str) -> str: '-0x1d' >>> bin_to_hexadecimal('a') Traceback (most recent call last): - ... + ... ValueError: Non-binary value was passed to the function >>> bin_to_hexadecimal('') Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function """ # Sanitising parameter diff --git a/conversions/binary_to_octal.py b/conversions/binary_to_octal.py index 35ede95b1..82f81e062 100644 --- a/conversions/binary_to_octal.py +++ b/conversions/binary_to_octal.py @@ -9,11 +9,11 @@ The function below will convert any binary string to the octal equivalent. >>> bin_to_octal("") Traceback (most recent call last): -... + ... ValueError: Empty string was passed to the function >>> bin_to_octal("a-1") Traceback (most recent call last): -... + ... ValueError: Non-binary value was passed to the function """ diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py index 908c89e8f..11a2af294 100644 --- a/conversions/decimal_to_any.py +++ b/conversions/decimal_to_any.py @@ -29,32 +29,32 @@ def decimal_to_any(num: int, base: int) -> str: >>> # negatives will error >>> decimal_to_any(-45, 8) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... ValueError: parameter must be positive int >>> # floats will error >>> decimal_to_any(34.4, 6) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: int() can't convert non-string with explicit base >>> # a float base will error >>> decimal_to_any(5, 2.5) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'float' object cannot be interpreted as an integer >>> # a str base will error >>> decimal_to_any(10, '16') # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'str' object cannot be interpreted as an integer >>> # a base less than 2 will error >>> decimal_to_any(7, 0) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... ValueError: base must be >= 2 >>> # a base greater than 36 will error >>> decimal_to_any(34, 37) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... ValueError: base must be <= 36 """ if isinstance(num, float): diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py index c21cdbcae..cfda57ca7 100644 --- a/conversions/decimal_to_binary.py +++ b/conversions/decimal_to_binary.py @@ -19,12 +19,12 @@ def decimal_to_binary(num: int) -> str: >>> # other floats will error >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'float' object cannot be interpreted as an integer >>> # strings will error as well >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'str' object cannot be interpreted as an integer """ diff --git a/conversions/decimal_to_binary_recursion.py b/conversions/decimal_to_binary_recursion.py index c149ea865..05833ca67 100644 --- a/conversions/decimal_to_binary_recursion.py +++ b/conversions/decimal_to_binary_recursion.py @@ -7,7 +7,7 @@ def binary_recursive(decimal: int) -> str: '1001000' >>> binary_recursive("number") Traceback (most recent call last): - ... + ... ValueError: invalid literal for int() with base 10: 'number' """ decimal = int(decimal) @@ -30,11 +30,11 @@ def main(number: str) -> str: '-0b101000' >>> main(40.8) Traceback (most recent call last): - ... + ... ValueError: Input value is not an integer >>> main("forty") Traceback (most recent call last): - ... + ... ValueError: Input value is not an integer """ number = str(number).strip() diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py index 2389c6d1f..5ea48401f 100644 --- a/conversions/decimal_to_hexadecimal.py +++ b/conversions/decimal_to_hexadecimal.py @@ -46,12 +46,12 @@ def decimal_to_hexadecimal(decimal: float) -> str: >>> # other floats will error >>> decimal_to_hexadecimal(16.16) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... AssertionError >>> # strings will error as well >>> decimal_to_hexadecimal('0xfffff') # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... AssertionError >>> # results are the same when compared to Python's default hex function >>> decimal_to_hexadecimal(-256) == hex(-256) diff --git a/conversions/hex_to_bin.py b/conversions/hex_to_bin.py index e358d810b..b872ab5cb 100644 --- a/conversions/hex_to_bin.py +++ b/conversions/hex_to_bin.py @@ -21,11 +21,11 @@ def hex_to_bin(hex_num: str) -> int: -1111111111111111 >>> hex_to_bin("F-f") Traceback (most recent call last): - ... + ... ValueError: Invalid value was passed to the function >>> hex_to_bin("") Traceback (most recent call last): - ... + ... ValueError: No value was passed to the function """ diff --git a/conversions/hexadecimal_to_decimal.py b/conversions/hexadecimal_to_decimal.py index beb1c2c3d..209e4aebb 100644 --- a/conversions/hexadecimal_to_decimal.py +++ b/conversions/hexadecimal_to_decimal.py @@ -18,15 +18,15 @@ def hex_to_decimal(hex_string: str) -> int: -255 >>> hex_to_decimal("F-f") Traceback (most recent call last): - ... + ... ValueError: Non-hexadecimal value was passed to the function >>> hex_to_decimal("") Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function >>> hex_to_decimal("12m") Traceback (most recent call last): - ... + ... ValueError: Non-hexadecimal value was passed to the function """ hex_string = hex_string.strip().lower() diff --git a/conversions/octal_to_decimal.py b/conversions/octal_to_decimal.py index 551311e26..7f006f20e 100644 --- a/conversions/octal_to_decimal.py +++ b/conversions/octal_to_decimal.py @@ -4,27 +4,27 @@ def oct_to_decimal(oct_string: str) -> int: >>> oct_to_decimal("") Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function >>> oct_to_decimal("-") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("e") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("8") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("-e") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("-8") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("1") 1 @@ -38,7 +38,7 @@ def oct_to_decimal(oct_string: str) -> int: -37 >>> oct_to_decimal("-") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("0") 0 @@ -46,15 +46,15 @@ def oct_to_decimal(oct_string: str) -> int: -2093 >>> oct_to_decimal("2-0Fm") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("") Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function >>> oct_to_decimal("19") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function """ oct_string = str(oct_string).strip() diff --git a/conversions/temperature_conversions.py b/conversions/temperature_conversions.py index 167c9dc64..e5af46556 100644 --- a/conversions/temperature_conversions.py +++ b/conversions/temperature_conversions.py @@ -23,7 +23,7 @@ def celsius_to_fahrenheit(celsius: float, ndigits: int = 2) -> float: 104.0 >>> celsius_to_fahrenheit("celsius") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'celsius' """ return round((float(celsius) * 9 / 5) + 32, ndigits) @@ -47,7 +47,7 @@ def celsius_to_kelvin(celsius: float, ndigits: int = 2) -> float: 313.15 >>> celsius_to_kelvin("celsius") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'celsius' """ return round(float(celsius) + 273.15, ndigits) @@ -71,7 +71,7 @@ def celsius_to_rankine(celsius: float, ndigits: int = 2) -> float: 563.67 >>> celsius_to_rankine("celsius") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'celsius' """ return round((float(celsius) * 9 / 5) + 491.67, ndigits) @@ -101,7 +101,7 @@ def fahrenheit_to_celsius(fahrenheit: float, ndigits: int = 2) -> float: 37.78 >>> fahrenheit_to_celsius("fahrenheit") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'fahrenheit' """ return round((float(fahrenheit) - 32) * 5 / 9, ndigits) @@ -131,7 +131,7 @@ def fahrenheit_to_kelvin(fahrenheit: float, ndigits: int = 2) -> float: 310.93 >>> fahrenheit_to_kelvin("fahrenheit") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'fahrenheit' """ return round(((float(fahrenheit) - 32) * 5 / 9) + 273.15, ndigits) @@ -161,7 +161,7 @@ def fahrenheit_to_rankine(fahrenheit: float, ndigits: int = 2) -> float: 559.67 >>> fahrenheit_to_rankine("fahrenheit") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'fahrenheit' """ return round(float(fahrenheit) + 459.67, ndigits) @@ -185,7 +185,7 @@ def kelvin_to_celsius(kelvin: float, ndigits: int = 2) -> float: 42.35 >>> kelvin_to_celsius("kelvin") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'kelvin' """ return round(float(kelvin) - 273.15, ndigits) @@ -209,7 +209,7 @@ def kelvin_to_fahrenheit(kelvin: float, ndigits: int = 2) -> float: 108.23 >>> kelvin_to_fahrenheit("kelvin") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'kelvin' """ return round(((float(kelvin) - 273.15) * 9 / 5) + 32, ndigits) @@ -233,7 +233,7 @@ def kelvin_to_rankine(kelvin: float, ndigits: int = 2) -> float: 72.0 >>> kelvin_to_rankine("kelvin") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'kelvin' """ return round((float(kelvin) * 9 / 5), ndigits) @@ -257,7 +257,7 @@ def rankine_to_celsius(rankine: float, ndigits: int = 2) -> float: -97.87 >>> rankine_to_celsius("rankine") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'rankine' """ return round((float(rankine) - 491.67) * 5 / 9, ndigits) @@ -277,7 +277,7 @@ def rankine_to_fahrenheit(rankine: float, ndigits: int = 2) -> float: -144.17 >>> rankine_to_fahrenheit("rankine") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'rankine' """ return round(float(rankine) - 459.67, ndigits) @@ -297,7 +297,7 @@ def rankine_to_kelvin(rankine: float, ndigits: int = 2) -> float: 22.22 >>> rankine_to_kelvin("rankine") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'rankine' """ return round((float(rankine) * 5 / 9), ndigits) @@ -316,7 +316,7 @@ def reaumur_to_kelvin(reaumur: float, ndigits: int = 2) -> float: 323.15 >>> reaumur_to_kelvin("reaumur") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'reaumur' """ return round((float(reaumur) * 1.25 + 273.15), ndigits) @@ -335,7 +335,7 @@ def reaumur_to_fahrenheit(reaumur: float, ndigits: int = 2) -> float: 122.0 >>> reaumur_to_fahrenheit("reaumur") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'reaumur' """ return round((float(reaumur) * 2.25 + 32), ndigits) @@ -354,7 +354,7 @@ def reaumur_to_celsius(reaumur: float, ndigits: int = 2) -> float: 50.0 >>> reaumur_to_celsius("reaumur") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'reaumur' """ return round((float(reaumur) * 1.25), ndigits) @@ -373,7 +373,7 @@ def reaumur_to_rankine(reaumur: float, ndigits: int = 2) -> float: 581.67 >>> reaumur_to_rankine("reaumur") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'reaumur' """ return round((float(reaumur) * 2.25 + 32 + 459.67), ndigits) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index fc60540a1..fc512944e 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -196,7 +196,7 @@ def binary_search_tree() -> None: 1 4 7 6 3 13 14 10 8 >>> BinarySearchTree().search(6) Traceback (most recent call last): - ... + ... IndexError: Warning: Tree is empty! please use another. """ testlist = (8, 3, 6, 1, 10, 14, 13, 4, 7) diff --git a/data_structures/binary_tree/binary_tree_mirror.py b/data_structures/binary_tree/binary_tree_mirror.py index cdd56e35d..1ef950ad6 100644 --- a/data_structures/binary_tree/binary_tree_mirror.py +++ b/data_structures/binary_tree/binary_tree_mirror.py @@ -21,11 +21,11 @@ def binary_tree_mirror(binary_tree: dict, root: int = 1) -> dict: {1: [3, 2], 2: [5, 4], 3: [7, 6], 4: [11, 10]} >>> binary_tree_mirror({ 1: [2,3], 2: [4,5], 3: [6,7], 4: [10,11]}, 5) Traceback (most recent call last): - ... + ... ValueError: root 5 is not present in the binary_tree >>> binary_tree_mirror({}, 5) Traceback (most recent call last): - ... + ... ValueError: binary tree cannot be empty """ if not binary_tree: diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py index 1ad8f2ed4..684c518b1 100644 --- a/data_structures/binary_tree/number_of_possible_binary_trees.py +++ b/data_structures/binary_tree/number_of_possible_binary_trees.py @@ -67,7 +67,7 @@ def factorial(n: int) -> int: True >>> factorial(-5) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... ValueError: factorial() not defined for negative values """ if n < 0: diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 9e996ef0f..90b6b6eb2 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -64,11 +64,11 @@ class DoublyLinkedList: >>> linked_list = DoublyLinkedList() >>> linked_list.insert_at_nth(-1, 666) Traceback (most recent call last): - .... + .... IndexError: list index out of range >>> linked_list.insert_at_nth(1, 666) Traceback (most recent call last): - .... + .... IndexError: list index out of range >>> linked_list.insert_at_nth(0, 2) >>> linked_list.insert_at_nth(0, 1) @@ -78,7 +78,7 @@ class DoublyLinkedList: '1->2->3->4' >>> linked_list.insert_at_nth(5, 5) Traceback (most recent call last): - .... + .... IndexError: list index out of range """ if not 0 <= index <= len(self): @@ -114,7 +114,7 @@ class DoublyLinkedList: >>> linked_list = DoublyLinkedList() >>> linked_list.delete_at_nth(0) Traceback (most recent call last): - .... + .... IndexError: list index out of range >>> for i in range(0, 5): ... linked_list.insert_at_nth(i, i + 1) @@ -128,7 +128,7 @@ class DoublyLinkedList: '2->4' >>> linked_list.delete_at_nth(2) Traceback (most recent call last): - .... + .... IndexError: list index out of range """ if not 0 <= index <= len(self) - 1: diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 89a05ae81..3e52c7e43 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -95,11 +95,11 @@ class LinkedList: True >>> linked_list[-10] Traceback (most recent call last): - ... + ... ValueError: list index out of range. >>> linked_list[len(linked_list)] Traceback (most recent call last): - ... + ... ValueError: list index out of range. """ if not 0 <= index < len(self): @@ -122,11 +122,11 @@ class LinkedList: -666 >>> linked_list[-10] = 666 Traceback (most recent call last): - ... + ... ValueError: list index out of range. >>> linked_list[len(linked_list)] = 666 Traceback (most recent call last): - ... + ... ValueError: list index out of range. """ if not 0 <= index < len(self): @@ -233,7 +233,7 @@ class LinkedList: 'third' >>> linked_list.delete_head() Traceback (most recent call last): - ... + ... IndexError: List index out of range. """ return self.delete_nth(0) @@ -260,7 +260,7 @@ class LinkedList: 'first' >>> linked_list.delete_tail() Traceback (most recent call last): - ... + ... IndexError: List index out of range. """ return self.delete_nth(len(self) - 1) @@ -281,11 +281,11 @@ class LinkedList: first->third >>> linked_list.delete_nth(5) # this raises error Traceback (most recent call last): - ... + ... IndexError: List index out of range. >>> linked_list.delete_nth(-1) # this also raises error Traceback (most recent call last): - ... + ... IndexError: List index out of range. """ if not 0 <= index <= len(self) - 1: # test if index is valid diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py index 3675da7db..3af97d28e 100644 --- a/data_structures/queue/linked_queue.py +++ b/data_structures/queue/linked_queue.py @@ -96,7 +96,7 @@ class LinkedQueue: >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): - ... + ... IndexError: dequeue from empty queue >>> for i in range(1, 6): ... queue.put(i) @@ -116,7 +116,7 @@ class LinkedQueue: >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): - ... + ... IndexError: dequeue from empty queue >>> queue = LinkedQueue() >>> for i in range(1, 6): diff --git a/data_structures/queue/priority_queue_using_list.py b/data_structures/queue/priority_queue_using_list.py index c5cf26433..f61b5e8e6 100644 --- a/data_structures/queue/priority_queue_using_list.py +++ b/data_structures/queue/priority_queue_using_list.py @@ -58,7 +58,7 @@ class FixedPriorityQueue: 4 >>> fpq.dequeue() Traceback (most recent call last): - ... + ... data_structures.queue.priority_queue_using_list.UnderFlowError: All queues are empty >>> print(fpq) Priority 0: [] diff --git a/data_structures/stacks/infix_to_postfix_conversion.py b/data_structures/stacks/infix_to_postfix_conversion.py index b812d108e..901744309 100644 --- a/data_structures/stacks/infix_to_postfix_conversion.py +++ b/data_structures/stacks/infix_to_postfix_conversion.py @@ -21,7 +21,7 @@ def infix_to_postfix(expression_str: str) -> str: """ >>> infix_to_postfix("(1*(2+3)+4))") Traceback (most recent call last): - ... + ... ValueError: Mismatched parentheses >>> infix_to_postfix("") '' diff --git a/data_structures/stacks/stack_with_singly_linked_list.py b/data_structures/stacks/stack_with_singly_linked_list.py index 903ae39db..f5ce83b86 100644 --- a/data_structures/stacks/stack_with_singly_linked_list.py +++ b/data_structures/stacks/stack_with_singly_linked_list.py @@ -109,7 +109,7 @@ class LinkedStack(Generic[T]): >>> stack = LinkedStack() >>> stack.pop() Traceback (most recent call last): - ... + ... IndexError: pop from empty stack >>> stack.push("c") >>> stack.push("b") diff --git a/dynamic_programming/longest_common_substring.py b/dynamic_programming/longest_common_substring.py index 84a9f1860..e2f944a5e 100644 --- a/dynamic_programming/longest_common_substring.py +++ b/dynamic_programming/longest_common_substring.py @@ -32,7 +32,7 @@ def longest_common_substring(text1: str, text2: str) -> str: 'Site:Geeks' >>> longest_common_substring(1, 1) Traceback (most recent call last): - ... + ... ValueError: longest_common_substring() takes two strings for inputs """ diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 3227adf53..5cf8d691b 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -32,17 +32,17 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, >>> genes.remove("e") >>> basic("test", genes) Traceback (most recent call last): - ... + ... ValueError: ['e'] is not in genes list, evolution cannot converge >>> genes.remove("s") >>> basic("test", genes) Traceback (most recent call last): - ... + ... ValueError: ['e', 's'] is not in genes list, evolution cannot converge >>> genes.remove("t") >>> basic("test", genes) Traceback (most recent call last): - ... + ... ValueError: ['e', 's', 't'] is not in genes list, evolution cannot converge """ diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index b9791c860..079731487 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -168,7 +168,7 @@ class Vector: 9.539392014169456 >>> Vector([]).euclidean_length() Traceback (most recent call last): - ... + ... Exception: Vector is empty """ if len(self.__components) == 0: @@ -186,7 +186,7 @@ class Vector: 85.40775111366095 >>> Vector([3, 4, -1]).angle(Vector([2, -1])) Traceback (most recent call last): - ... + ... Exception: invalid operand! """ num = self * other diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index ec1b9f9e3..2f5fc46c0 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -70,7 +70,7 @@ def similarity_search( >>> value_array = np.array([1]) >>> similarity_search(dataset, value_array) Traceback (most recent call last): - ... + ... ValueError: Wrong input data's dimensions... dataset : 2, value_array : 1 2. If data's shapes are different. @@ -80,7 +80,7 @@ def similarity_search( >>> value_array = np.array([[0, 0, 0], [0, 0, 1]]) >>> similarity_search(dataset, value_array) Traceback (most recent call last): - ... + ... ValueError: Wrong input data's shape... dataset : 2, value_array : 3 3. If data types are different. @@ -90,7 +90,7 @@ def similarity_search( >>> value_array = np.array([[0, 0], [0, 1]], dtype=np.int32) >>> similarity_search(dataset, value_array) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): - ... + ... TypeError: Input data have different datatype... dataset : float32, value_array : int32 """ diff --git a/maths/bisection.py b/maths/bisection.py index 93cc2247b..45f26d8d8 100644 --- a/maths/bisection.py +++ b/maths/bisection.py @@ -32,7 +32,7 @@ def bisection(a: float, b: float) -> float: 3.158203125 >>> bisection(2, 3) Traceback (most recent call last): - ... + ... ValueError: Wrong space! """ # Bolzano theory in order to find if there is a root between a and b diff --git a/maths/catalan_number.py b/maths/catalan_number.py index 4a1280a45..85607dc1e 100644 --- a/maths/catalan_number.py +++ b/maths/catalan_number.py @@ -18,15 +18,15 @@ def catalan(number: int) -> int: 14 >>> catalan(0) Traceback (most recent call last): - ... + ... ValueError: Input value of [number=0] must be > 0 >>> catalan(-1) Traceback (most recent call last): - ... + ... ValueError: Input value of [number=-1] must be > 0 >>> catalan(5.0) Traceback (most recent call last): - ... + ... TypeError: Input value of [number=5.0] must be an integer """ diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 07bd6d2ec..e0da66ee5 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -47,7 +47,7 @@ def fib_iterative(n: int) -> list[int]: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] >>> fib_iterative(-1) Traceback (most recent call last): - ... + ... Exception: n is negative """ if n < 0: @@ -73,7 +73,7 @@ def fib_recursive(n: int) -> list[int]: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] >>> fib_iterative(-1) Traceback (most recent call last): - ... + ... Exception: n is negative """ @@ -105,7 +105,7 @@ def fib_memoization(n: int) -> list[int]: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] >>> fib_iterative(-1) Traceback (most recent call last): - ... + ... Exception: n is negative """ if n < 0: @@ -146,11 +146,11 @@ def fib_binet(n: int) -> list[int]: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] >>> fib_binet(-1) Traceback (most recent call last): - ... + ... Exception: n is negative >>> fib_binet(1475) Traceback (most recent call last): - ... + ... Exception: n is too large """ if n < 0: diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py index a2619d4e6..e55839bc1 100644 --- a/maths/maclaurin_series.py +++ b/maths/maclaurin_series.py @@ -26,19 +26,19 @@ def maclaurin_sin(theta: float, accuracy: int = 30) -> float: 0.5440211108893703 >>> maclaurin_sin("10") Traceback (most recent call last): - ... + ... ValueError: maclaurin_sin() requires either an int or float for theta >>> maclaurin_sin(10, -30) Traceback (most recent call last): - ... + ... ValueError: maclaurin_sin() requires a positive int for accuracy >>> maclaurin_sin(10, 30.5) Traceback (most recent call last): - ... + ... ValueError: maclaurin_sin() requires a positive int for accuracy >>> maclaurin_sin(10, "30") Traceback (most recent call last): - ... + ... ValueError: maclaurin_sin() requires a positive int for accuracy """ @@ -78,19 +78,19 @@ def maclaurin_cos(theta: float, accuracy: int = 30) -> float: -0.8390715290764521 >>> maclaurin_cos("10") Traceback (most recent call last): - ... + ... ValueError: maclaurin_cos() requires either an int or float for theta >>> maclaurin_cos(10, -30) Traceback (most recent call last): - ... + ... ValueError: maclaurin_cos() requires a positive int for accuracy >>> maclaurin_cos(10, 30.5) Traceback (most recent call last): - ... + ... ValueError: maclaurin_cos() requires a positive int for accuracy >>> maclaurin_cos(10, "30") Traceback (most recent call last): - ... + ... ValueError: maclaurin_cos() requires a positive int for accuracy """ diff --git a/maths/proth_number.py b/maths/proth_number.py index 6b1519024..ce911473a 100644 --- a/maths/proth_number.py +++ b/maths/proth_number.py @@ -16,15 +16,15 @@ def proth(number: int) -> int: 25 >>> proth(0) Traceback (most recent call last): - ... + ... ValueError: Input value of [number=0] must be > 0 >>> proth(-1) Traceback (most recent call last): - ... + ... ValueError: Input value of [number=-1] must be > 0 >>> proth(6.0) Traceback (most recent call last): - ... + ... TypeError: Input value of [number=6.0] must be an integer """ diff --git a/maths/sylvester_sequence.py b/maths/sylvester_sequence.py index 0cd99affe..114c9dd58 100644 --- a/maths/sylvester_sequence.py +++ b/maths/sylvester_sequence.py @@ -18,12 +18,12 @@ def sylvester(number: int) -> int: >>> sylvester(-1) Traceback (most recent call last): - ... + ... ValueError: The input value of [n=-1] has to be > 0 >>> sylvester(8.0) Traceback (most recent call last): - ... + ... AssertionError: The input value of [n=8.0] is not an integer """ assert isinstance(number, int), f"The input value of [n={number}] is not an integer" diff --git a/maths/zellers_congruence.py b/maths/zellers_congruence.py index 2d4a22a0a..624bbfe10 100644 --- a/maths/zellers_congruence.py +++ b/maths/zellers_congruence.py @@ -14,11 +14,11 @@ def zeller(date_input: str) -> str: Validate out of range month >>> zeller('13-31-2010') Traceback (most recent call last): - ... + ... ValueError: Month must be between 1 - 12 >>> zeller('.2-31-2010') Traceback (most recent call last): - ... + ... ValueError: invalid literal for int() with base 10: '.2' Validate out of range date: diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py index f04c81424..487842067 100644 --- a/neural_network/perceptron.py +++ b/neural_network/perceptron.py @@ -29,15 +29,15 @@ class Perceptron: >>> p = Perceptron([], (0, 1, 2)) Traceback (most recent call last): - ... + ... ValueError: Sample data can not be empty >>> p = Perceptron(([0], 1, 2), []) Traceback (most recent call last): - ... + ... ValueError: Target data can not be empty >>> p = Perceptron(([0], 1, 2), (0, 1)) Traceback (most recent call last): - ... + ... ValueError: Sample data and Target data do not have matching lengths """ self.sample = sample diff --git a/project_euler/problem_004/sol1.py b/project_euler/problem_004/sol1.py index db6133a1a..b1e229289 100644 --- a/project_euler/problem_004/sol1.py +++ b/project_euler/problem_004/sol1.py @@ -26,7 +26,7 @@ def solution(n: int = 998001) -> int: 39893 >>> solution(10000) Traceback (most recent call last): - ... + ... ValueError: That number is larger than our acceptable range. """ diff --git a/project_euler/problem_010/sol3.py b/project_euler/problem_010/sol3.py index 72e2894df..60abbd571 100644 --- a/project_euler/problem_010/sol3.py +++ b/project_euler/problem_010/sol3.py @@ -30,15 +30,15 @@ def solution(n: int = 2000000) -> int: 10 >>> solution(7.1) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'float' object cannot be interpreted as an integer >>> solution(-7) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... IndexError: list assignment index out of range >>> solution("seven") # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: can only concatenate str (not "int") to str """ diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index f4fa8e120..35e6bc506 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -101,7 +101,7 @@ def __assert_sorted(collection): True >>> __assert_sorted([10, -1, 5]) Traceback (most recent call last): - ... + ... ValueError: Collection must be ascending sorted """ if collection != sorted(collection): diff --git a/sorts/bead_sort.py b/sorts/bead_sort.py index d22367c52..e51173643 100644 --- a/sorts/bead_sort.py +++ b/sorts/bead_sort.py @@ -20,12 +20,12 @@ def bead_sort(sequence: list) -> list: >>> bead_sort([1, .9, 0.0, 0, -1, -.9]) Traceback (most recent call last): - ... + ... TypeError: Sequence must be list of non-negative integers >>> bead_sort("Hello world") Traceback (most recent call last): - ... + ... TypeError: Sequence must be list of non-negative integers """ if any(not isinstance(x, int) or x < 0 for x in sequence): diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 7430fc5a6..84460e47b 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -23,7 +23,7 @@ def msd_radix_sort(list_of_ints: list[int]) -> list[int]: [1, 45, 1209, 540402, 834598] >>> msd_radix_sort([-1, 34, 45]) Traceback (most recent call last): - ... + ... ValueError: All numbers must be positive """ if not list_of_ints: @@ -93,7 +93,7 @@ def msd_radix_sort_inplace(list_of_ints: list[int]): >>> lst = [-1, 34, 23, 4, -42] >>> msd_radix_sort_inplace(lst) Traceback (most recent call last): - ... + ... ValueError: All numbers must be positive """ diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py index 2e1ea8703..e050cd337 100644 --- a/strings/barcode_validator.py +++ b/strings/barcode_validator.py @@ -47,7 +47,7 @@ def is_valid(barcode: int) -> bool: False >>> is_valid(dwefgiweuf) Traceback (most recent call last): - ... + ... NameError: name 'dwefgiweuf' is not defined """ return len(str(barcode)) == 13 and get_check_digit(barcode) == barcode % 10 @@ -61,7 +61,7 @@ def get_barcode(barcode: str) -> int: 8718452538119 >>> get_barcode("dwefgiweuf") Traceback (most recent call last): - ... + ... ValueError: Barcode 'dwefgiweuf' has alphabetic characters. """ if str(barcode).isalpha(): diff --git a/strings/join.py b/strings/join.py index c17ddd144..739856c1a 100644 --- a/strings/join.py +++ b/strings/join.py @@ -15,7 +15,7 @@ def join(separator: str, separated: list[str]) -> str: 'You are amazing!' >>> join("#", ["a", "b", "c", 1]) Traceback (most recent call last): - ... + ... Exception: join() accepts only strings to be joined """ joined = "" From 71e8ed81aeb24820a03b968633884ac10b047ad4 Mon Sep 17 00:00:00 2001 From: Matteo Messmer <40521259+matteomessmer@users.noreply.github.com> Date: Thu, 27 Oct 2022 19:45:58 +0200 Subject: [PATCH 117/368] Added spheres union (#6879) * Spheres union * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update volume.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update volume.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * f-strings * Update maths/volume.py Co-authored-by: Christian Clauss * more tests * fix non negative * fix 0 radius * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tests * fix print * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix comment * fix comment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update volume.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/volume.py | 52 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/maths/volume.py b/maths/volume.py index a594e1b90..da4054646 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -108,6 +108,51 @@ def vol_spheres_intersect( return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1) +def vol_spheres_union( + radius_1: float, radius_2: float, centers_distance: float +) -> float: + """ + Calculate the volume of the union of two spheres that possibly intersect. + It is the sum of sphere A and sphere B minus their intersection. + First, it calculates the volumes (v1, v2) of the spheres, + then the volume of the intersection (i) and it returns the sum v1+v2-i. + If centers_distance is 0 then it returns the volume of the larger sphere + :return vol_sphere(radius_1) + vol_sphere(radius_2) + - vol_spheres_intersect(radius_1, radius_2, centers_distance) + + >>> vol_spheres_union(2, 2, 1) + 45.814892864851146 + >>> vol_spheres_union(1.56, 2.2, 1.4) + 48.77802773671288 + >>> vol_spheres_union(0, 2, 1) + Traceback (most recent call last): + ... + ValueError: vol_spheres_union() only accepts non-negative values, non-zero radius + >>> vol_spheres_union('1.56', '2.2', '1.4') + Traceback (most recent call last): + ... + TypeError: '<=' not supported between instances of 'str' and 'int' + >>> vol_spheres_union(1, None, 1) + Traceback (most recent call last): + ... + TypeError: '<=' not supported between instances of 'NoneType' and 'int' + """ + + if radius_1 <= 0 or radius_2 <= 0 or centers_distance < 0: + raise ValueError( + "vol_spheres_union() only accepts non-negative values, non-zero radius" + ) + + if centers_distance == 0: + return vol_sphere(max(radius_1, radius_2)) + + return ( + vol_sphere(radius_1) + + vol_sphere(radius_2) + - vol_spheres_intersect(radius_1, radius_2, centers_distance) + ) + + def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. @@ -408,12 +453,13 @@ def main(): print(f"Sphere: {vol_sphere(2) = }") # ~= 33.5 print(f"Hemisphere: {vol_hemisphere(2) = }") # ~= 16.75 print(f"Circular Cylinder: {vol_circular_cylinder(2, 2) = }") # ~= 25.1 - print( - f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" - ) # ~= 28.3 print(f"Conical Frustum: {vol_conical_frustum(2, 2, 4) = }") # ~= 58.6 print(f"Spherical cap: {vol_spherical_cap(1, 2) = }") # ~= 5.24 print(f"Spheres intersetion: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21 + print(f"Spheres union: {vol_spheres_union(2, 2, 1) = }") # ~= 45.81 + print( + f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" + ) # ~= 28.3 if __name__ == "__main__": From 501a1cf0c7b31773fb02bc2966f5c1db99311b36 Mon Sep 17 00:00:00 2001 From: Alexandre Velloso <4320811+AlexandreVelloso@users.noreply.github.com> Date: Thu, 27 Oct 2022 21:51:14 +0100 Subject: [PATCH 118/368] Remove unnecessary else statement (#7759) * Remove unnecessary else statement * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/karatsuba.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/maths/karatsuba.py b/maths/karatsuba.py index b772c0d77..4bf4aecdc 100644 --- a/maths/karatsuba.py +++ b/maths/karatsuba.py @@ -10,18 +10,18 @@ def karatsuba(a, b): """ if len(str(a)) == 1 or len(str(b)) == 1: return a * b - else: - m1 = max(len(str(a)), len(str(b))) - m2 = m1 // 2 - a1, a2 = divmod(a, 10**m2) - b1, b2 = divmod(b, 10**m2) + m1 = max(len(str(a)), len(str(b))) + m2 = m1 // 2 - x = karatsuba(a2, b2) - y = karatsuba((a1 + a2), (b1 + b2)) - z = karatsuba(a1, b1) + a1, a2 = divmod(a, 10**m2) + b1, b2 = divmod(b, 10**m2) - return (z * 10 ** (2 * m2)) + ((y - z - x) * 10 ** (m2)) + (x) + x = karatsuba(a2, b2) + y = karatsuba((a1 + a2), (b1 + b2)) + z = karatsuba(a1, b1) + + return (z * 10 ** (2 * m2)) + ((y - z - x) * 10 ** (m2)) + (x) def main(): From 61eedc16c392823e46ef37cc2a86864fa15e89fe Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 27 Oct 2022 21:52:00 +0100 Subject: [PATCH 119/368] Remove useless code in doctests (#7733) * refactor: Fix matrix display deprecation * refactor: Remove useless `print` and `pass` statements * revert: Replace broken doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * revert: Fix failing doctests * chore: Satisfy pre-commit Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/hamiltonian_cycle.py | 4 ++-- computer_vision/flip_augmentation.py | 3 --- computer_vision/mosaic_augmentation.py | 3 --- data_structures/heap/binomial_heap.py | 4 ++-- data_structures/heap/heap.py | 8 ++++---- data_structures/heap/min_heap.py | 2 +- data_structures/linked_list/skip_list.py | 3 +++ graphs/gale_shapley_bigraph.py | 2 +- graphs/graph_list.py | 6 +++--- graphs/minimum_spanning_tree_prims2.py | 8 ++++---- graphs/random_graph_generator.py | 2 +- .../local_weighted_learning.py | 2 -- maths/polynomial_evaluation.py | 2 +- maths/radix2_fft.py | 2 +- matrix/matrix_class.py | 6 +++--- searches/simple_binary_search.py | 20 +++++++++---------- sorts/bitonic_sort.py | 12 +++++------ sorts/normal_distribution_quick_sort.md | 4 ++-- sorts/recursive_insertion_sort.py | 12 +++++------ web_programming/reddit.py | 2 -- web_programming/search_books_by_isbn.py | 5 +---- 21 files changed, 51 insertions(+), 61 deletions(-) diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index 4c6ae4679..4a4156d70 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -71,7 +71,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) >>> curr_ind = 1 >>> util_hamilton_cycle(graph, path, curr_ind) True - >>> print(path) + >>> path [0, 1, 2, 4, 3, 0] Case 2: Use exact graph as in previous case, but in the properties taken from @@ -85,7 +85,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) >>> curr_ind = 3 >>> util_hamilton_cycle(graph, path, curr_ind) True - >>> print(path) + >>> path [0, 1, 2, 4, 3, 0] """ diff --git a/computer_vision/flip_augmentation.py b/computer_vision/flip_augmentation.py index 1272357fd..93b4e3f6d 100644 --- a/computer_vision/flip_augmentation.py +++ b/computer_vision/flip_augmentation.py @@ -22,7 +22,6 @@ def main() -> None: Get images list and annotations list from input dir. Update new images and annotations. Save images and annotations in output dir. - >>> pass # A doctest is not possible for this function. """ img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR) print("Processing...") @@ -48,7 +47,6 @@ def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: - label_dir : Path to label include annotation of images - img_dir : Path to folder contain images Return : List of images path and labels - >>> pass # A doctest is not possible for this function. """ img_paths = [] labels = [] @@ -88,7 +86,6 @@ def update_image_and_anno( - new_imgs_list : image after resize - new_annos_lists : list of new annotation after scale - path_list : list the name of image file - >>> pass # A doctest is not possible for this function. """ new_annos_lists = [] path_list = [] diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index 4fd81957c..e29537497 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -23,7 +23,6 @@ def main() -> None: Get images list and annotations list from input dir. Update new images and annotations. Save images and annotations in output dir. - >>> pass # A doctest is not possible for this function. """ img_paths, annos = get_dataset(LABEL_DIR, IMG_DIR) for index in range(NUMBER_IMAGES): @@ -60,7 +59,6 @@ def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: - label_dir : Path to label include annotation of images - img_dir : Path to folder contain images Return : List of images path and labels - >>> pass # A doctest is not possible for this function. """ img_paths = [] labels = [] @@ -105,7 +103,6 @@ def update_image_and_anno( - output_img : image after resize - new_anno : list of new annotation after scale - path[0] : get the name of image file - >>> pass # A doctest is not possible for this function. """ output_img = np.zeros([output_size[0], output_size[1], 3], dtype=np.uint8) scale_x = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 334b444ea..6398c9943 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -71,7 +71,7 @@ class BinomialHeap: ... first_heap.insert(number) Size test - >>> print(first_heap.size) + >>> first_heap.size 30 Deleting - delete() test @@ -97,7 +97,7 @@ class BinomialHeap: # # # # preOrder() test - >>> print(second_heap.preOrder()) + >>> second_heap.preOrder() [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)] printing Heap - __str__() test diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 4c19747ec..071790d18 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -9,20 +9,20 @@ class Heap: >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5] >>> h = Heap() >>> h.build_max_heap(unsorted) - >>> print(h) + >>> h [209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5] >>> >>> h.extract_max() 209 - >>> print(h) + >>> h [201, 107, 25, 103, 11, 15, 1, 9, 7, 5] >>> >>> h.insert(100) - >>> print(h) + >>> h [201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11] >>> >>> h.heap_sort() - >>> print(h) + >>> h [1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201] """ diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index d8975eb2d..0403624f2 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -27,7 +27,7 @@ class MinHeap: >>> myMinHeap.decrease_key(b, -17) >>> print(b) Node(B, -17) - >>> print(myMinHeap["B"]) + >>> myMinHeap["B"] -17 """ diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index a667e3e9b..96b0db7c8 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -443,4 +443,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod() main() diff --git a/graphs/gale_shapley_bigraph.py b/graphs/gale_shapley_bigraph.py index 56b8c6c77..f4b315381 100644 --- a/graphs/gale_shapley_bigraph.py +++ b/graphs/gale_shapley_bigraph.py @@ -17,7 +17,7 @@ def stable_matching( >>> donor_pref = [[0, 1, 3, 2], [0, 2, 3, 1], [1, 0, 2, 3], [0, 3, 1, 2]] >>> recipient_pref = [[3, 1, 2, 0], [3, 1, 0, 2], [0, 3, 1, 2], [1, 0, 3, 2]] - >>> print(stable_matching(donor_pref, recipient_pref)) + >>> stable_matching(donor_pref, recipient_pref) [1, 2, 3, 0] """ assert len(donor_pref) == len(recipient_pref) diff --git a/graphs/graph_list.py b/graphs/graph_list.py index f04b7a923..e871f3b8a 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -18,7 +18,7 @@ class GraphAdjacencyList(Generic[T]): Directed graph example: >>> d_graph = GraphAdjacencyList() - >>> d_graph + >>> print(d_graph) {} >>> d_graph.add_edge(0, 1) {0: [1], 1: []} @@ -26,7 +26,7 @@ class GraphAdjacencyList(Generic[T]): {0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []} >>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7) {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} - >>> print(d_graph) + >>> d_graph {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} >>> print(repr(d_graph)) {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} @@ -68,7 +68,7 @@ class GraphAdjacencyList(Generic[T]): {'a': ['b'], 'b': ['a']} >>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f') {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} - >>> print(char_graph) + >>> char_graph {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} """ diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index d924ee3db..707be783d 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -69,16 +69,16 @@ class MinPriorityQueue(Generic[T]): >>> queue.push(3, 4000) >>> queue.push(4, 3000) - >>> print(queue.extract_min()) + >>> queue.extract_min() 2 >>> queue.update_key(4, 50) - >>> print(queue.extract_min()) + >>> queue.extract_min() 4 - >>> print(queue.extract_min()) + >>> queue.extract_min() 1 - >>> print(queue.extract_min()) + >>> queue.extract_min() 3 """ diff --git a/graphs/random_graph_generator.py b/graphs/random_graph_generator.py index 15ccee5b3..0e7e18bc8 100644 --- a/graphs/random_graph_generator.py +++ b/graphs/random_graph_generator.py @@ -53,7 +53,7 @@ def complete_graph(vertices_number: int) -> dict: @input: vertices_number (number of vertices), directed (False if the graph is undirected, True otherwise) @example: - >>> print(complete_graph(3)) + >>> complete_graph(3) {0: [1, 2], 1: [0, 2], 2: [0, 1]} """ return { diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index 6c542ab82..df03fe0a1 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -71,7 +71,6 @@ def local_weight_regression( def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: """ Function used for loading data from the seaborn splitting into x and y points - >>> pass # this function has no doctest """ import seaborn as sns @@ -112,7 +111,6 @@ def plot_preds( ) -> plt.plot: """ This function used to plot predictions and display the graph - >>> pass #this function has no doctest """ xsort = training_data_x.copy() xsort.sort(axis=0) diff --git a/maths/polynomial_evaluation.py b/maths/polynomial_evaluation.py index 8ee82467e..90a51f521 100644 --- a/maths/polynomial_evaluation.py +++ b/maths/polynomial_evaluation.py @@ -45,7 +45,7 @@ if __name__ == "__main__": >>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7.0x^4 + 9.3x^3 + 5.0x^2 >>> x = -13.0 >>> # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9 - >>> print(evaluate_poly(poly, x)) + >>> evaluate_poly(poly, x) 180339.9 """ poly = (0.0, 0.0, 5.0, 9.3, 7.0) diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 52442134d..1def58e1f 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -39,7 +39,7 @@ class FFT: >>> x = FFT(A, B) Print product - >>> print(x.product) # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 + >>> x.product # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 [(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)] __str__ test diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 8b6fefa21..0c3078fe6 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -21,9 +21,9 @@ class Matrix: [7. 8. 9.]] Matrix rows and columns are available as 2D arrays - >>> print(matrix.rows) + >>> matrix.rows [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - >>> print(matrix.columns()) + >>> matrix.columns() [[1, 4, 7], [2, 5, 8], [3, 6, 9]] Order is returned as a tuple @@ -55,7 +55,7 @@ class Matrix: [[-3. 6. -3.] [6. -12. 6.] [-3. 6. -3.]] - >>> print(matrix.inverse()) + >>> matrix.inverse() Traceback (most recent call last): ... TypeError: Only matrices with a non-zero determinant have an inverse diff --git a/searches/simple_binary_search.py b/searches/simple_binary_search.py index d1f7f7a51..ff043d736 100644 --- a/searches/simple_binary_search.py +++ b/searches/simple_binary_search.py @@ -13,25 +13,25 @@ from __future__ import annotations def binary_search(a_list: list[int], item: int) -> bool: """ >>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42] - >>> print(binary_search(test_list, 3)) + >>> binary_search(test_list, 3) False - >>> print(binary_search(test_list, 13)) + >>> binary_search(test_list, 13) True - >>> print(binary_search([4, 4, 5, 6, 7], 4)) + >>> binary_search([4, 4, 5, 6, 7], 4) True - >>> print(binary_search([4, 4, 5, 6, 7], -10)) + >>> binary_search([4, 4, 5, 6, 7], -10) False - >>> print(binary_search([-18, 2], -18)) + >>> binary_search([-18, 2], -18) True - >>> print(binary_search([5], 5)) + >>> binary_search([5], 5) True - >>> print(binary_search(['a', 'c', 'd'], 'c')) + >>> binary_search(['a', 'c', 'd'], 'c') True - >>> print(binary_search(['a', 'c', 'd'], 'f')) + >>> binary_search(['a', 'c', 'd'], 'f') False - >>> print(binary_search([], 1)) + >>> binary_search([], 1) False - >>> print(binary_search([-.1, .1 , .8], .1)) + >>> binary_search([-.1, .1 , .8], .1) True >>> binary_search(range(-5000, 5000, 10), 80) True diff --git a/sorts/bitonic_sort.py b/sorts/bitonic_sort.py index 201fecd2c..b65f877a4 100644 --- a/sorts/bitonic_sort.py +++ b/sorts/bitonic_sort.py @@ -16,19 +16,19 @@ def comp_and_swap(array: list[int], index1: int, index2: int, direction: int) -> >>> arr = [12, 42, -21, 1] >>> comp_and_swap(arr, 1, 2, 1) - >>> print(arr) + >>> arr [12, -21, 42, 1] >>> comp_and_swap(arr, 1, 2, 0) - >>> print(arr) + >>> arr [12, 42, -21, 1] >>> comp_and_swap(arr, 0, 3, 1) - >>> print(arr) + >>> arr [1, 42, -21, 12] >>> comp_and_swap(arr, 0, 3, 0) - >>> print(arr) + >>> arr [12, 42, -21, 1] """ if (direction == 1 and array[index1] > array[index2]) or ( @@ -46,11 +46,11 @@ def bitonic_merge(array: list[int], low: int, length: int, direction: int) -> No >>> arr = [12, 42, -21, 1] >>> bitonic_merge(arr, 0, 4, 1) - >>> print(arr) + >>> arr [-21, 1, 12, 42] >>> bitonic_merge(arr, 0, 4, 0) - >>> print(arr) + >>> arr [42, 12, 1, -21] """ if length > 1: diff --git a/sorts/normal_distribution_quick_sort.md b/sorts/normal_distribution_quick_sort.md index c073f2cbc..27aca340f 100644 --- a/sorts/normal_distribution_quick_sort.md +++ b/sorts/normal_distribution_quick_sort.md @@ -17,8 +17,8 @@ The array elements are taken from a Standard Normal Distribution, having mean = >>> mu, sigma = 0, 1 # mean and standard deviation >>> X = np.random.normal(mu, sigma, p) >>> np.save(outfile, X) ->>> print('The array is') ->>> print(X) +>>> 'The array is' +>>> X ``` diff --git a/sorts/recursive_insertion_sort.py b/sorts/recursive_insertion_sort.py index ab2716f8e..297dbe945 100644 --- a/sorts/recursive_insertion_sort.py +++ b/sorts/recursive_insertion_sort.py @@ -14,17 +14,17 @@ def rec_insertion_sort(collection: list, n: int): >>> col = [1, 2, 1] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [1, 1, 2] >>> col = [2, 1, 0, -1, -2] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [-2, -1, 0, 1, 2] >>> col = [1] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [1] """ # Checks if the entire collection has been sorted @@ -41,17 +41,17 @@ def insert_next(collection: list, index: int): >>> col = [3, 2, 4, 2] >>> insert_next(col, 1) - >>> print(col) + >>> col [2, 3, 4, 2] >>> col = [3, 2, 3] >>> insert_next(col, 2) - >>> print(col) + >>> col [3, 2, 3] >>> col = [] >>> insert_next(col, 1) - >>> print(col) + >>> col [] """ # Checks order between adjacent elements diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 672109f13..6a31c81c3 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -23,8 +23,6 @@ def get_subreddit_data( limit : Number of posts to fetch age : ["new", "top", "hot"] wanted_data : Get only the required data in the list - - >>> pass """ wanted_data = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index 22a31dcb1..abac3c70b 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -19,7 +19,6 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: {'publishers': ['Puffin'], 'number_of_pages': 96, 'isbn_10': ['0140328726'], ... # >>> get_openlibrary_data(olid='/authors/OL7353617A') # doctest: +ELLIPSIS {'name': 'Adrian Brisku', 'created': {'type': '/type/datetime', ... - >>> pass # Placate https://github.com/apps/algorithms-keeper """ new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes if new_olid.count("/") != 1: @@ -29,9 +28,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: def summarize_book(ol_book_data: dict) -> dict: """ - Given Open Library book data, return a summary as a Python dict. - - >>> pass # Placate https://github.com/apps/algorithms-keeper + Given Open Library book data, return a summary as a Python dict. """ desired_keys = { "title": "Title", From de3271ec80c76a8b79c913f68a94f693e8a00a0b Mon Sep 17 00:00:00 2001 From: SwayamSahu <91021799+SwayamSahu@users.noreply.github.com> Date: Fri, 28 Oct 2022 02:32:15 +0530 Subject: [PATCH 120/368] Refactoring the syntax using list comprehension (#7749) * Refactoring the syntax using list comprehension * Update detecting_english_programmatically.py * Update detecting_english_programmatically.py Co-authored-by: Christian Clauss --- strings/detecting_english_programmatically.py | 20 ++++--------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/strings/detecting_english_programmatically.py b/strings/detecting_english_programmatically.py index aa18db210..b9000101b 100644 --- a/strings/detecting_english_programmatically.py +++ b/strings/detecting_english_programmatically.py @@ -1,7 +1,7 @@ import os +from string import ascii_letters -UPPERLETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" -LETTERS_AND_SPACE = UPPERLETTERS + UPPERLETTERS.lower() + " \t\n" +LETTERS_AND_SPACE = ascii_letters + " \t\n" def load_dictionary() -> dict[str, None]: @@ -20,24 +20,12 @@ def get_english_count(message: str) -> float: message = message.upper() message = remove_non_letters(message) possible_words = message.split() - - if possible_words == []: - return 0.0 - - matches = 0 - for word in possible_words: - if word in ENGLISH_WORDS: - matches += 1 - + matches = len([word for word in possible_words if word in ENGLISH_WORDS]) return float(matches) / len(possible_words) def remove_non_letters(message: str) -> str: - letters_only = [] - for symbol in message: - if symbol in LETTERS_AND_SPACE: - letters_only.append(symbol) - return "".join(letters_only) + return "".join(symbol for symbol in message if symbol in LETTERS_AND_SPACE) def is_english( From 25757e697cfbb5bc7abf47c1ffa13061cb1534e1 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 27 Oct 2022 22:03:01 +0100 Subject: [PATCH 121/368] Binary tree path sum (#7748) * feat: Implement binary tree path sum (#7135) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/binary_tree_path_sum.py Co-authored-by: Christian Clauss * refactor: Rename `dfs` to `depth_first_search` Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/binary_tree_path_sum.py | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 data_structures/binary_tree/binary_tree_path_sum.py diff --git a/data_structures/binary_tree/binary_tree_path_sum.py b/data_structures/binary_tree/binary_tree_path_sum.py new file mode 100644 index 000000000..a3fe9ca7a --- /dev/null +++ b/data_structures/binary_tree/binary_tree_path_sum.py @@ -0,0 +1,88 @@ +""" +Given the root of a binary tree and an integer target, +find the number of paths where the sum of the values +along the path equals target. + + +Leetcode reference: https://leetcode.com/problems/path-sum-iii/ +""" + +from __future__ import annotations + + +class Node: + """ + A Node has value variable and pointers to Nodes to its left and right. + """ + + def __init__(self, value: int) -> None: + self.value = value + self.left: Node | None = None + self.right: Node | None = None + + +class BinaryTreePathSum: + r""" + The below tree looks like this + 10 + / \ + 5 -3 + / \ \ + 3 2 11 + / \ \ + 3 -2 1 + + + >>> tree = Node(10) + >>> tree.left = Node(5) + >>> tree.right = Node(-3) + >>> tree.left.left = Node(3) + >>> tree.left.right = Node(2) + >>> tree.right.right = Node(11) + >>> tree.left.left.left = Node(3) + >>> tree.left.left.right = Node(-2) + >>> tree.left.right.right = Node(1) + + >>> BinaryTreePathSum().path_sum(tree, 8) + 3 + >>> BinaryTreePathSum().path_sum(tree, 7) + 2 + >>> tree.right.right = Node(10) + >>> BinaryTreePathSum().path_sum(tree, 8) + 2 + """ + + target: int + + def __init__(self) -> None: + self.paths = 0 + + def depth_first_search(self, node: Node | None, path_sum: int) -> None: + if node is None: + return + + if path_sum == self.target: + self.paths += 1 + + if node.left: + self.depth_first_search(node.left, path_sum + node.left.value) + if node.right: + self.depth_first_search(node.right, path_sum + node.right.value) + + def path_sum(self, node: Node | None, target: int | None = None) -> int: + if node is None: + return 0 + if target is not None: + self.target = target + + self.depth_first_search(node, node.value) + self.path_sum(node.left) + self.path_sum(node.right) + + return self.paths + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 15c93e5f4bc5b03cecc000506bdf45c100b8f0b3 Mon Sep 17 00:00:00 2001 From: MoPaMo <67760881+MoPaMo@users.noreply.github.com> Date: Thu, 27 Oct 2022 23:03:34 +0200 Subject: [PATCH 122/368] fix typo in caesar_cipher.py (#7761) very character-> every character --- ciphers/caesar_cipher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py index 8cd9fab58..d19b9a337 100644 --- a/ciphers/caesar_cipher.py +++ b/ciphers/caesar_cipher.py @@ -27,7 +27,7 @@ def encrypt(input_string: str, key: int, alphabet: str | None = None) -> str: ========================= The caesar cipher is named after Julius Caesar who used it when sending secret military messages to his troops. This is a simple substitution cipher - where very character in the plain-text is shifted by a certain number known + where every character in the plain-text is shifted by a certain number known as the "key" or "shift". Example: From 19bff003aa1c365bec86d3f4a13a9c3d6c36d230 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 28 Oct 2022 15:54:54 +0200 Subject: [PATCH 123/368] Adopt Python >= 3.8 assignment expressions using auto-walrus (#7737) * Adopt Python >= 3.8 assignment expressions using auto-walrus * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 5 + DIRECTORY.md | 3 + ciphers/enigma_machine2.py | 3 +- .../linked_list/doubly_linked_list_two.py | 3 +- dynamic_programming/fibonacci.py | 3 +- .../sequential_minimum_optimization.py | 1261 ++++++++--------- strings/indian_phone_validator.py | 3 +- 7 files changed, 642 insertions(+), 639 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5bdda50be..7f6c206b4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,6 +13,11 @@ repos: )$ - id: requirements-txt-fixer + - repo: https://github.com/MarcoGorelli/auto-walrus + rev: v0.2.1 + hooks: + - id: auto-walrus + - repo: https://github.com/psf/black rev: 22.10.0 hooks: diff --git a/DIRECTORY.md b/DIRECTORY.md index ba7d3e62a..7621427a6 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -45,6 +45,7 @@ * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) + * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) @@ -326,6 +327,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) * [Interest](financial/interest.py) + * [Price Plus Tax](financial/price_plus_tax.py) ## Fractals * [Julia Sets](fractals/julia_sets.py) @@ -669,6 +671,7 @@ * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) + * [Malus Law](physics/malus_law.py) * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 9f9dbe6f7..a877256eb 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -86,8 +86,7 @@ def _validator( """ # Checks if there are 3 unique rotors - unique_rotsel = len(set(rotsel)) - if unique_rotsel < 3: + if (unique_rotsel := len(set(rotsel))) < 3: raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})") # Checks if rotor positions are valid diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index 184b6966b..94b916a62 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -143,9 +143,8 @@ class LinkedList: raise Exception("Node not found") def delete_value(self, value): - node = self.get_node(value) - if node is not None: + if (node := self.get_node(value)) is not None: if node == self.head: self.head = self.head.get_next() diff --git a/dynamic_programming/fibonacci.py b/dynamic_programming/fibonacci.py index 4abc60d4f..7ec5993ef 100644 --- a/dynamic_programming/fibonacci.py +++ b/dynamic_programming/fibonacci.py @@ -18,8 +18,7 @@ class Fibonacci: >>> Fibonacci().get(5) [0, 1, 1, 2, 3] """ - difference = index - (len(self.sequence) - 2) - if difference >= 1: + if (difference := index - (len(self.sequence) - 2)) >= 1: for _ in range(difference): self.sequence.append(self.sequence[-1] + self.sequence[-2]) return self.sequence[:index] diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index df5b03790..66535e806 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -1,631 +1,630 @@ -""" - Implementation of sequential minimal optimization (SMO) for support vector machines - (SVM). - - Sequential minimal optimization (SMO) is an algorithm for solving the quadratic - programming (QP) problem that arises during the training of support vector - machines. - It was invented by John Platt in 1998. - -Input: - 0: type: numpy.ndarray. - 1: first column of ndarray must be tags of samples, must be 1 or -1. - 2: rows of ndarray represent samples. - -Usage: - Command: - python3 sequential_minimum_optimization.py - Code: - from sequential_minimum_optimization import SmoSVM, Kernel - - kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5) - init_alphas = np.zeros(train.shape[0]) - SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4, - b=0.0, tolerance=0.001) - SVM.fit() - predict = SVM.predict(test_samples) - -Reference: - https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf - https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf -""" - - -import os -import sys -import urllib.request - -import numpy as np -import pandas as pd -from matplotlib import pyplot as plt -from sklearn.datasets import make_blobs, make_circles -from sklearn.preprocessing import StandardScaler - -CANCER_DATASET_URL = ( - "https://archive.ics.uci.edu/ml/machine-learning-databases/" - "breast-cancer-wisconsin/wdbc.data" -) - - -class SmoSVM: - def __init__( - self, - train, - kernel_func, - alpha_list=None, - cost=0.4, - b=0.0, - tolerance=0.001, - auto_norm=True, - ): - self._init = True - self._auto_norm = auto_norm - self._c = np.float64(cost) - self._b = np.float64(b) - self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001) - - self.tags = train[:, 0] - self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:] - self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0]) - self.Kernel = kernel_func - - self._eps = 0.001 - self._all_samples = list(range(self.length)) - self._K_matrix = self._calculate_k_matrix() - self._error = np.zeros(self.length) - self._unbound = [] - - self.choose_alpha = self._choose_alphas() - - # Calculate alphas using SMO algorithm - def fit(self): - k = self._k - state = None - while True: - - # 1: Find alpha1, alpha2 - try: - i1, i2 = self.choose_alpha.send(state) - state = None - except StopIteration: - print("Optimization done!\nEvery sample satisfy the KKT condition!") - break - - # 2: calculate new alpha2 and new alpha1 - y1, y2 = self.tags[i1], self.tags[i2] - a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy() - e1, e2 = self._e(i1), self._e(i2) - args = (i1, i2, a1, a2, e1, e2, y1, y2) - a1_new, a2_new = self._get_new_alpha(*args) - if not a1_new and not a2_new: - state = False - continue - self.alphas[i1], self.alphas[i2] = a1_new, a2_new - - # 3: update threshold(b) - b1_new = np.float64( - -e1 - - y1 * k(i1, i1) * (a1_new - a1) - - y2 * k(i2, i1) * (a2_new - a2) - + self._b - ) - b2_new = np.float64( - -e2 - - y2 * k(i2, i2) * (a2_new - a2) - - y1 * k(i1, i2) * (a1_new - a1) - + self._b - ) - if 0.0 < a1_new < self._c: - b = b1_new - if 0.0 < a2_new < self._c: - b = b2_new - if not (np.float64(0) < a2_new < self._c) and not ( - np.float64(0) < a1_new < self._c - ): - b = (b1_new + b2_new) / 2.0 - b_old = self._b - self._b = b - - # 4: update error value,here we only calculate those non-bound samples' - # error - self._unbound = [i for i in self._all_samples if self._is_unbound(i)] - for s in self.unbound: - if s == i1 or s == i2: - continue - self._error[s] += ( - y1 * (a1_new - a1) * k(i1, s) - + y2 * (a2_new - a2) * k(i2, s) - + (self._b - b_old) - ) - - # if i1 or i2 is non-bound,update there error value to zero - if self._is_unbound(i1): - self._error[i1] = 0 - if self._is_unbound(i2): - self._error[i2] = 0 - - # Predict test samples - def predict(self, test_samples, classify=True): - - if test_samples.shape[1] > self.samples.shape[1]: - raise ValueError( - "Test samples' feature length does not equal to that of train samples" - ) - - if self._auto_norm: - test_samples = self._norm(test_samples) - - results = [] - for test_sample in test_samples: - result = self._predict(test_sample) - if classify: - results.append(1 if result > 0 else -1) - else: - results.append(result) - return np.array(results) - - # Check if alpha violate KKT condition - def _check_obey_kkt(self, index): - alphas = self.alphas - tol = self._tol - r = self._e(index) * self.tags[index] - c = self._c - - return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0) - - # Get value calculated from kernel function - def _k(self, i1, i2): - # for test samples,use Kernel function - if isinstance(i2, np.ndarray): - return self.Kernel(self.samples[i1], i2) - # for train samples,Kernel values have been saved in matrix - else: - return self._K_matrix[i1, i2] - - # Get sample's error - def _e(self, index): - """ - Two cases: - 1:Sample[index] is non-bound,Fetch error from list: _error - 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi - - """ - # get from error data - if self._is_unbound(index): - return self._error[index] - # get by g(xi) - yi - else: - gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b - yi = self.tags[index] - return gx - yi - - # Calculate Kernel matrix of all possible i1,i2 ,saving time - def _calculate_k_matrix(self): - k_matrix = np.zeros([self.length, self.length]) - for i in self._all_samples: - for j in self._all_samples: - k_matrix[i, j] = np.float64( - self.Kernel(self.samples[i, :], self.samples[j, :]) - ) - return k_matrix - - # Predict test sample's tag - def _predict(self, sample): - k = self._k - predicted_value = ( - np.sum( - [ - self.alphas[i1] * self.tags[i1] * k(i1, sample) - for i1 in self._all_samples - ] - ) - + self._b - ) - return predicted_value - - # Choose alpha1 and alpha2 - def _choose_alphas(self): - locis = yield from self._choose_a1() - if not locis: - return - return locis - - def _choose_a1(self): - """ - Choose first alpha ;steps: - 1:First loop over all sample - 2:Second loop over all non-bound samples till all non-bound samples does not - voilate kkt condition. - 3:Repeat this two process endlessly,till all samples does not voilate kkt - condition samples after first loop. - """ - while True: - all_not_obey = True - # all sample - print("scanning all sample!") - for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]: - all_not_obey = False - yield from self._choose_a2(i1) - - # non-bound sample - print("scanning non-bound sample!") - while True: - not_obey = True - for i1 in [ - i - for i in self._all_samples - if self._check_obey_kkt(i) and self._is_unbound(i) - ]: - not_obey = False - yield from self._choose_a2(i1) - if not_obey: - print("all non-bound samples fit the KKT condition!") - break - if all_not_obey: - print("all samples fit the KKT condition! Optimization done!") - break - return False - - def _choose_a2(self, i1): - """ - Choose the second alpha by using heuristic algorithm ;steps: - 1: Choose alpha2 which gets the maximum step size (|E1 - E2|). - 2: Start in a random point,loop over all non-bound samples till alpha1 and - alpha2 are optimized. - 3: Start in a random point,loop over all samples till alpha1 and alpha2 are - optimized. - """ - self._unbound = [i for i in self._all_samples if self._is_unbound(i)] - - if len(self.unbound) > 0: - tmp_error = self._error.copy().tolist() - tmp_error_dict = { - index: value - for index, value in enumerate(tmp_error) - if self._is_unbound(index) - } - if self._e(i1) >= 0: - i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index]) - else: - i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index]) - cmd = yield i1, i2 - if cmd is None: - return - - for i2 in np.roll(self.unbound, np.random.choice(self.length)): - cmd = yield i1, i2 - if cmd is None: - return - - for i2 in np.roll(self._all_samples, np.random.choice(self.length)): - cmd = yield i1, i2 - if cmd is None: - return - - # Get the new alpha2 and new alpha1 - def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): - k = self._k - if i1 == i2: - return None, None - - # calculate L and H which bound the new alpha2 - s = y1 * y2 - if s == -1: - l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) - else: - l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) - if l == h: # noqa: E741 - return None, None - - # calculate eta - k11 = k(i1, i1) - k22 = k(i2, i2) - k12 = k(i1, i2) - eta = k11 + k22 - 2.0 * k12 - - # select the new alpha2 which could get the minimal objectives - if eta > 0.0: - a2_new_unc = a2 + (y2 * (e1 - e2)) / eta - # a2_new has a boundary - if a2_new_unc >= h: - a2_new = h - elif a2_new_unc <= l: - a2_new = l - else: - a2_new = a2_new_unc - else: - b = self._b - l1 = a1 + s * (a2 - l) - h1 = a1 + s * (a2 - h) - - # way 1 - f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) - f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) - ol = ( - l1 * f1 - + l * f2 - + 1 / 2 * l1**2 * k(i1, i1) - + 1 / 2 * l**2 * k(i2, i2) - + s * l * l1 * k(i1, i2) - ) - oh = ( - h1 * f1 - + h * f2 - + 1 / 2 * h1**2 * k(i1, i1) - + 1 / 2 * h**2 * k(i2, i2) - + s * h * h1 * k(i1, i2) - ) - """ - # way 2 - Use objective function check which alpha2 new could get the minimal - objectives - """ - if ol < (oh - self._eps): - a2_new = l - elif ol > oh + self._eps: - a2_new = h - else: - a2_new = a2 - - # a1_new has a boundary too - a1_new = a1 + s * (a2 - a2_new) - if a1_new < 0: - a2_new += s * a1_new - a1_new = 0 - if a1_new > self._c: - a2_new += s * (a1_new - self._c) - a1_new = self._c - - return a1_new, a2_new - - # Normalise data using min_max way - def _norm(self, data): - if self._init: - self._min = np.min(data, axis=0) - self._max = np.max(data, axis=0) - self._init = False - return (data - self._min) / (self._max - self._min) - else: - return (data - self._min) / (self._max - self._min) - - def _is_unbound(self, index): - if 0.0 < self.alphas[index] < self._c: - return True - else: - return False - - def _is_support(self, index): - if self.alphas[index] > 0: - return True - else: - return False - - @property - def unbound(self): - return self._unbound - - @property - def support(self): - return [i for i in range(self.length) if self._is_support(i)] - - @property - def length(self): - return self.samples.shape[0] - - -class Kernel: - def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0): - self.degree = np.float64(degree) - self.coef0 = np.float64(coef0) - self.gamma = np.float64(gamma) - self._kernel_name = kernel - self._kernel = self._get_kernel(kernel_name=kernel) - self._check() - - def _polynomial(self, v1, v2): - return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree - - def _linear(self, v1, v2): - return np.inner(v1, v2) + self.coef0 - - def _rbf(self, v1, v2): - return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2)) - - def _check(self): - if self._kernel == self._rbf: - if self.gamma < 0: - raise ValueError("gamma value must greater than 0") - - def _get_kernel(self, kernel_name): - maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} - return maps[kernel_name] - - def __call__(self, v1, v2): - return self._kernel(v1, v2) - - def __repr__(self): - return self._kernel_name - - -def count_time(func): - def call_func(*args, **kwargs): - import time - - start_time = time.time() - func(*args, **kwargs) - end_time = time.time() - print(f"smo algorithm cost {end_time - start_time} seconds") - - return call_func - - -@count_time -def test_cancel_data(): - print("Hello!\nStart test svm by smo algorithm!") - # 0: download dataset and load into pandas' dataframe - if not os.path.exists(r"cancel_data.csv"): - request = urllib.request.Request( - CANCER_DATASET_URL, - headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, - ) - response = urllib.request.urlopen(request) - content = response.read().decode("utf-8") - with open(r"cancel_data.csv", "w") as f: - f.write(content) - - data = pd.read_csv(r"cancel_data.csv", header=None) - - # 1: pre-processing data - del data[data.columns.tolist()[0]] - data = data.dropna(axis=0) - data = data.replace({"M": np.float64(1), "B": np.float64(-1)}) - samples = np.array(data)[:, :] - - # 2: dividing data into train_data data and test_data data - train_data, test_data = samples[:328, :], samples[328:, :] - test_tags, test_samples = test_data[:, 0], test_data[:, 1:] - - # 3: choose kernel function,and set initial alphas to zero(optional) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) - al = np.zeros(train_data.shape[0]) - - # 4: calculating best alphas using SMO algorithm and predict test_data samples - mysvm = SmoSVM( - train=train_data, - kernel_func=mykernel, - alpha_list=al, - cost=0.4, - b=0.0, - tolerance=0.001, - ) - mysvm.fit() - predict = mysvm.predict(test_samples) - - # 5: check accuracy - score = 0 - test_num = test_tags.shape[0] - for i in range(test_tags.shape[0]): - if test_tags[i] == predict[i]: - score += 1 - print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}") - print(f"Rough Accuracy: {score / test_tags.shape[0]}") - - -def test_demonstration(): - # change stdout - print("\nStart plot,please wait!!!") - sys.stdout = open(os.devnull, "w") - - ax1 = plt.subplot2grid((2, 2), (0, 0)) - ax2 = plt.subplot2grid((2, 2), (0, 1)) - ax3 = plt.subplot2grid((2, 2), (1, 0)) - ax4 = plt.subplot2grid((2, 2), (1, 1)) - ax1.set_title("linear svm,cost:0.1") - test_linear_kernel(ax1, cost=0.1) - ax2.set_title("linear svm,cost:500") - test_linear_kernel(ax2, cost=500) - ax3.set_title("rbf kernel svm,cost:0.1") - test_rbf_kernel(ax3, cost=0.1) - ax4.set_title("rbf kernel svm,cost:500") - test_rbf_kernel(ax4, cost=500) - - sys.stdout = sys.__stdout__ - print("Plot done!!!") - - -def test_linear_kernel(ax, cost): - train_x, train_y = make_blobs( - n_samples=500, centers=2, n_features=2, random_state=1 - ) - train_y[train_y == 0] = -1 - scaler = StandardScaler() - train_x_scaled = scaler.fit_transform(train_x, train_y) - train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) - mysvm = SmoSVM( - train=train_data, - kernel_func=mykernel, - cost=cost, - tolerance=0.001, - auto_norm=False, - ) - mysvm.fit() - plot_partition_boundary(mysvm, train_data, ax=ax) - - -def test_rbf_kernel(ax, cost): - train_x, train_y = make_circles( - n_samples=500, noise=0.1, factor=0.1, random_state=1 - ) - train_y[train_y == 0] = -1 - scaler = StandardScaler() - train_x_scaled = scaler.fit_transform(train_x, train_y) - train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) - mysvm = SmoSVM( - train=train_data, - kernel_func=mykernel, - cost=cost, - tolerance=0.001, - auto_norm=False, - ) - mysvm.fit() - plot_partition_boundary(mysvm, train_data, ax=ax) - - -def plot_partition_boundary( - model, train_data, ax, resolution=100, colors=("b", "k", "r") -): - """ - We can not get the optimum w of our kernel svm model which is different from linear - svm. For this reason, we generate randomly distributed points with high desity and - prediced values of these points are calculated by using our tained model. Then we - could use this prediced values to draw contour map. - And this contour map can represent svm's partition boundary. - """ - train_data_x = train_data[:, 1] - train_data_y = train_data[:, 2] - train_data_tags = train_data[:, 0] - xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution) - yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution) - test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape( - resolution * resolution, 2 - ) - - test_tags = model.predict(test_samples, classify=False) - grid = test_tags.reshape((len(xrange), len(yrange))) - - # Plot contour map which represents the partition boundary - ax.contour( - xrange, - yrange, - np.mat(grid).T, - levels=(-1, 0, 1), - linestyles=("--", "-", "--"), - linewidths=(1, 1, 1), - colors=colors, - ) - # Plot all train samples - ax.scatter( - train_data_x, - train_data_y, - c=train_data_tags, - cmap=plt.cm.Dark2, - lw=0, - alpha=0.5, - ) - - # Plot support vectors - support = model.support - ax.scatter( - train_data_x[support], - train_data_y[support], - c=train_data_tags[support], - cmap=plt.cm.Dark2, - ) - - -if __name__ == "__main__": - test_cancel_data() - test_demonstration() - plt.show() +""" + Implementation of sequential minimal optimization (SMO) for support vector machines + (SVM). + + Sequential minimal optimization (SMO) is an algorithm for solving the quadratic + programming (QP) problem that arises during the training of support vector + machines. + It was invented by John Platt in 1998. + +Input: + 0: type: numpy.ndarray. + 1: first column of ndarray must be tags of samples, must be 1 or -1. + 2: rows of ndarray represent samples. + +Usage: + Command: + python3 sequential_minimum_optimization.py + Code: + from sequential_minimum_optimization import SmoSVM, Kernel + + kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5) + init_alphas = np.zeros(train.shape[0]) + SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4, + b=0.0, tolerance=0.001) + SVM.fit() + predict = SVM.predict(test_samples) + +Reference: + https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf + https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf +""" + + +import os +import sys +import urllib.request + +import numpy as np +import pandas as pd +from matplotlib import pyplot as plt +from sklearn.datasets import make_blobs, make_circles +from sklearn.preprocessing import StandardScaler + +CANCER_DATASET_URL = ( + "https://archive.ics.uci.edu/ml/machine-learning-databases/" + "breast-cancer-wisconsin/wdbc.data" +) + + +class SmoSVM: + def __init__( + self, + train, + kernel_func, + alpha_list=None, + cost=0.4, + b=0.0, + tolerance=0.001, + auto_norm=True, + ): + self._init = True + self._auto_norm = auto_norm + self._c = np.float64(cost) + self._b = np.float64(b) + self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001) + + self.tags = train[:, 0] + self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:] + self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0]) + self.Kernel = kernel_func + + self._eps = 0.001 + self._all_samples = list(range(self.length)) + self._K_matrix = self._calculate_k_matrix() + self._error = np.zeros(self.length) + self._unbound = [] + + self.choose_alpha = self._choose_alphas() + + # Calculate alphas using SMO algorithm + def fit(self): + k = self._k + state = None + while True: + + # 1: Find alpha1, alpha2 + try: + i1, i2 = self.choose_alpha.send(state) + state = None + except StopIteration: + print("Optimization done!\nEvery sample satisfy the KKT condition!") + break + + # 2: calculate new alpha2 and new alpha1 + y1, y2 = self.tags[i1], self.tags[i2] + a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy() + e1, e2 = self._e(i1), self._e(i2) + args = (i1, i2, a1, a2, e1, e2, y1, y2) + a1_new, a2_new = self._get_new_alpha(*args) + if not a1_new and not a2_new: + state = False + continue + self.alphas[i1], self.alphas[i2] = a1_new, a2_new + + # 3: update threshold(b) + b1_new = np.float64( + -e1 + - y1 * k(i1, i1) * (a1_new - a1) + - y2 * k(i2, i1) * (a2_new - a2) + + self._b + ) + b2_new = np.float64( + -e2 + - y2 * k(i2, i2) * (a2_new - a2) + - y1 * k(i1, i2) * (a1_new - a1) + + self._b + ) + if 0.0 < a1_new < self._c: + b = b1_new + if 0.0 < a2_new < self._c: + b = b2_new + if not (np.float64(0) < a2_new < self._c) and not ( + np.float64(0) < a1_new < self._c + ): + b = (b1_new + b2_new) / 2.0 + b_old = self._b + self._b = b + + # 4: update error value,here we only calculate those non-bound samples' + # error + self._unbound = [i for i in self._all_samples if self._is_unbound(i)] + for s in self.unbound: + if s == i1 or s == i2: + continue + self._error[s] += ( + y1 * (a1_new - a1) * k(i1, s) + + y2 * (a2_new - a2) * k(i2, s) + + (self._b - b_old) + ) + + # if i1 or i2 is non-bound,update there error value to zero + if self._is_unbound(i1): + self._error[i1] = 0 + if self._is_unbound(i2): + self._error[i2] = 0 + + # Predict test samples + def predict(self, test_samples, classify=True): + + if test_samples.shape[1] > self.samples.shape[1]: + raise ValueError( + "Test samples' feature length does not equal to that of train samples" + ) + + if self._auto_norm: + test_samples = self._norm(test_samples) + + results = [] + for test_sample in test_samples: + result = self._predict(test_sample) + if classify: + results.append(1 if result > 0 else -1) + else: + results.append(result) + return np.array(results) + + # Check if alpha violate KKT condition + def _check_obey_kkt(self, index): + alphas = self.alphas + tol = self._tol + r = self._e(index) * self.tags[index] + c = self._c + + return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0) + + # Get value calculated from kernel function + def _k(self, i1, i2): + # for test samples,use Kernel function + if isinstance(i2, np.ndarray): + return self.Kernel(self.samples[i1], i2) + # for train samples,Kernel values have been saved in matrix + else: + return self._K_matrix[i1, i2] + + # Get sample's error + def _e(self, index): + """ + Two cases: + 1:Sample[index] is non-bound,Fetch error from list: _error + 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi + + """ + # get from error data + if self._is_unbound(index): + return self._error[index] + # get by g(xi) - yi + else: + gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b + yi = self.tags[index] + return gx - yi + + # Calculate Kernel matrix of all possible i1,i2 ,saving time + def _calculate_k_matrix(self): + k_matrix = np.zeros([self.length, self.length]) + for i in self._all_samples: + for j in self._all_samples: + k_matrix[i, j] = np.float64( + self.Kernel(self.samples[i, :], self.samples[j, :]) + ) + return k_matrix + + # Predict test sample's tag + def _predict(self, sample): + k = self._k + predicted_value = ( + np.sum( + [ + self.alphas[i1] * self.tags[i1] * k(i1, sample) + for i1 in self._all_samples + ] + ) + + self._b + ) + return predicted_value + + # Choose alpha1 and alpha2 + def _choose_alphas(self): + locis = yield from self._choose_a1() + if not locis: + return + return locis + + def _choose_a1(self): + """ + Choose first alpha ;steps: + 1:First loop over all sample + 2:Second loop over all non-bound samples till all non-bound samples does not + voilate kkt condition. + 3:Repeat this two process endlessly,till all samples does not voilate kkt + condition samples after first loop. + """ + while True: + all_not_obey = True + # all sample + print("scanning all sample!") + for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]: + all_not_obey = False + yield from self._choose_a2(i1) + + # non-bound sample + print("scanning non-bound sample!") + while True: + not_obey = True + for i1 in [ + i + for i in self._all_samples + if self._check_obey_kkt(i) and self._is_unbound(i) + ]: + not_obey = False + yield from self._choose_a2(i1) + if not_obey: + print("all non-bound samples fit the KKT condition!") + break + if all_not_obey: + print("all samples fit the KKT condition! Optimization done!") + break + return False + + def _choose_a2(self, i1): + """ + Choose the second alpha by using heuristic algorithm ;steps: + 1: Choose alpha2 which gets the maximum step size (|E1 - E2|). + 2: Start in a random point,loop over all non-bound samples till alpha1 and + alpha2 are optimized. + 3: Start in a random point,loop over all samples till alpha1 and alpha2 are + optimized. + """ + self._unbound = [i for i in self._all_samples if self._is_unbound(i)] + + if len(self.unbound) > 0: + tmp_error = self._error.copy().tolist() + tmp_error_dict = { + index: value + for index, value in enumerate(tmp_error) + if self._is_unbound(index) + } + if self._e(i1) >= 0: + i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index]) + else: + i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index]) + cmd = yield i1, i2 + if cmd is None: + return + + for i2 in np.roll(self.unbound, np.random.choice(self.length)): + cmd = yield i1, i2 + if cmd is None: + return + + for i2 in np.roll(self._all_samples, np.random.choice(self.length)): + cmd = yield i1, i2 + if cmd is None: + return + + # Get the new alpha2 and new alpha1 + def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): + k = self._k + if i1 == i2: + return None, None + + # calculate L and H which bound the new alpha2 + s = y1 * y2 + if s == -1: + l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) + else: + l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) + if l == h: # noqa: E741 + return None, None + + # calculate eta + k11 = k(i1, i1) + k22 = k(i2, i2) + k12 = k(i1, i2) + + # select the new alpha2 which could get the minimal objectives + if (eta := k11 + k22 - 2.0 * k12) > 0.0: + a2_new_unc = a2 + (y2 * (e1 - e2)) / eta + # a2_new has a boundary + if a2_new_unc >= h: + a2_new = h + elif a2_new_unc <= l: + a2_new = l + else: + a2_new = a2_new_unc + else: + b = self._b + l1 = a1 + s * (a2 - l) + h1 = a1 + s * (a2 - h) + + # way 1 + f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) + f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) + ol = ( + l1 * f1 + + l * f2 + + 1 / 2 * l1**2 * k(i1, i1) + + 1 / 2 * l**2 * k(i2, i2) + + s * l * l1 * k(i1, i2) + ) + oh = ( + h1 * f1 + + h * f2 + + 1 / 2 * h1**2 * k(i1, i1) + + 1 / 2 * h**2 * k(i2, i2) + + s * h * h1 * k(i1, i2) + ) + """ + # way 2 + Use objective function check which alpha2 new could get the minimal + objectives + """ + if ol < (oh - self._eps): + a2_new = l + elif ol > oh + self._eps: + a2_new = h + else: + a2_new = a2 + + # a1_new has a boundary too + a1_new = a1 + s * (a2 - a2_new) + if a1_new < 0: + a2_new += s * a1_new + a1_new = 0 + if a1_new > self._c: + a2_new += s * (a1_new - self._c) + a1_new = self._c + + return a1_new, a2_new + + # Normalise data using min_max way + def _norm(self, data): + if self._init: + self._min = np.min(data, axis=0) + self._max = np.max(data, axis=0) + self._init = False + return (data - self._min) / (self._max - self._min) + else: + return (data - self._min) / (self._max - self._min) + + def _is_unbound(self, index): + if 0.0 < self.alphas[index] < self._c: + return True + else: + return False + + def _is_support(self, index): + if self.alphas[index] > 0: + return True + else: + return False + + @property + def unbound(self): + return self._unbound + + @property + def support(self): + return [i for i in range(self.length) if self._is_support(i)] + + @property + def length(self): + return self.samples.shape[0] + + +class Kernel: + def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0): + self.degree = np.float64(degree) + self.coef0 = np.float64(coef0) + self.gamma = np.float64(gamma) + self._kernel_name = kernel + self._kernel = self._get_kernel(kernel_name=kernel) + self._check() + + def _polynomial(self, v1, v2): + return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree + + def _linear(self, v1, v2): + return np.inner(v1, v2) + self.coef0 + + def _rbf(self, v1, v2): + return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2)) + + def _check(self): + if self._kernel == self._rbf: + if self.gamma < 0: + raise ValueError("gamma value must greater than 0") + + def _get_kernel(self, kernel_name): + maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} + return maps[kernel_name] + + def __call__(self, v1, v2): + return self._kernel(v1, v2) + + def __repr__(self): + return self._kernel_name + + +def count_time(func): + def call_func(*args, **kwargs): + import time + + start_time = time.time() + func(*args, **kwargs) + end_time = time.time() + print(f"smo algorithm cost {end_time - start_time} seconds") + + return call_func + + +@count_time +def test_cancel_data(): + print("Hello!\nStart test svm by smo algorithm!") + # 0: download dataset and load into pandas' dataframe + if not os.path.exists(r"cancel_data.csv"): + request = urllib.request.Request( + CANCER_DATASET_URL, + headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, + ) + response = urllib.request.urlopen(request) + content = response.read().decode("utf-8") + with open(r"cancel_data.csv", "w") as f: + f.write(content) + + data = pd.read_csv(r"cancel_data.csv", header=None) + + # 1: pre-processing data + del data[data.columns.tolist()[0]] + data = data.dropna(axis=0) + data = data.replace({"M": np.float64(1), "B": np.float64(-1)}) + samples = np.array(data)[:, :] + + # 2: dividing data into train_data data and test_data data + train_data, test_data = samples[:328, :], samples[328:, :] + test_tags, test_samples = test_data[:, 0], test_data[:, 1:] + + # 3: choose kernel function,and set initial alphas to zero(optional) + mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + al = np.zeros(train_data.shape[0]) + + # 4: calculating best alphas using SMO algorithm and predict test_data samples + mysvm = SmoSVM( + train=train_data, + kernel_func=mykernel, + alpha_list=al, + cost=0.4, + b=0.0, + tolerance=0.001, + ) + mysvm.fit() + predict = mysvm.predict(test_samples) + + # 5: check accuracy + score = 0 + test_num = test_tags.shape[0] + for i in range(test_tags.shape[0]): + if test_tags[i] == predict[i]: + score += 1 + print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}") + print(f"Rough Accuracy: {score / test_tags.shape[0]}") + + +def test_demonstration(): + # change stdout + print("\nStart plot,please wait!!!") + sys.stdout = open(os.devnull, "w") + + ax1 = plt.subplot2grid((2, 2), (0, 0)) + ax2 = plt.subplot2grid((2, 2), (0, 1)) + ax3 = plt.subplot2grid((2, 2), (1, 0)) + ax4 = plt.subplot2grid((2, 2), (1, 1)) + ax1.set_title("linear svm,cost:0.1") + test_linear_kernel(ax1, cost=0.1) + ax2.set_title("linear svm,cost:500") + test_linear_kernel(ax2, cost=500) + ax3.set_title("rbf kernel svm,cost:0.1") + test_rbf_kernel(ax3, cost=0.1) + ax4.set_title("rbf kernel svm,cost:500") + test_rbf_kernel(ax4, cost=500) + + sys.stdout = sys.__stdout__ + print("Plot done!!!") + + +def test_linear_kernel(ax, cost): + train_x, train_y = make_blobs( + n_samples=500, centers=2, n_features=2, random_state=1 + ) + train_y[train_y == 0] = -1 + scaler = StandardScaler() + train_x_scaled = scaler.fit_transform(train_x, train_y) + train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) + mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) + mysvm = SmoSVM( + train=train_data, + kernel_func=mykernel, + cost=cost, + tolerance=0.001, + auto_norm=False, + ) + mysvm.fit() + plot_partition_boundary(mysvm, train_data, ax=ax) + + +def test_rbf_kernel(ax, cost): + train_x, train_y = make_circles( + n_samples=500, noise=0.1, factor=0.1, random_state=1 + ) + train_y[train_y == 0] = -1 + scaler = StandardScaler() + train_x_scaled = scaler.fit_transform(train_x, train_y) + train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) + mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + mysvm = SmoSVM( + train=train_data, + kernel_func=mykernel, + cost=cost, + tolerance=0.001, + auto_norm=False, + ) + mysvm.fit() + plot_partition_boundary(mysvm, train_data, ax=ax) + + +def plot_partition_boundary( + model, train_data, ax, resolution=100, colors=("b", "k", "r") +): + """ + We can not get the optimum w of our kernel svm model which is different from linear + svm. For this reason, we generate randomly distributed points with high desity and + prediced values of these points are calculated by using our tained model. Then we + could use this prediced values to draw contour map. + And this contour map can represent svm's partition boundary. + """ + train_data_x = train_data[:, 1] + train_data_y = train_data[:, 2] + train_data_tags = train_data[:, 0] + xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution) + yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution) + test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape( + resolution * resolution, 2 + ) + + test_tags = model.predict(test_samples, classify=False) + grid = test_tags.reshape((len(xrange), len(yrange))) + + # Plot contour map which represents the partition boundary + ax.contour( + xrange, + yrange, + np.mat(grid).T, + levels=(-1, 0, 1), + linestyles=("--", "-", "--"), + linewidths=(1, 1, 1), + colors=colors, + ) + # Plot all train samples + ax.scatter( + train_data_x, + train_data_y, + c=train_data_tags, + cmap=plt.cm.Dark2, + lw=0, + alpha=0.5, + ) + + # Plot support vectors + support = model.support + ax.scatter( + train_data_x[support], + train_data_y[support], + c=train_data_tags[support], + cmap=plt.cm.Dark2, + ) + + +if __name__ == "__main__": + test_cancel_data() + test_demonstration() + plt.show() diff --git a/strings/indian_phone_validator.py b/strings/indian_phone_validator.py index 7f3fda5db..07161a63a 100644 --- a/strings/indian_phone_validator.py +++ b/strings/indian_phone_validator.py @@ -20,8 +20,7 @@ def indian_phone_validator(phone: str) -> bool: True """ pat = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$") - match = re.search(pat, phone) - if match: + if match := re.search(pat, phone): return match.string == phone return False From 3a671b57a29e3c2b4e973b01bc5bbe1554aa5da2 Mon Sep 17 00:00:00 2001 From: Kuldeep Borkar <74557588+KuldeepBorkar@users.noreply.github.com> Date: Fri, 28 Oct 2022 19:57:16 +0530 Subject: [PATCH 124/368] Implemented Swish Function (#7357) * Implemented Swish Function * Added more description and return hint in def * Changed the name and added more descrition including test for sigmoid function * Added * in front of links --- maths/sigmoid_linear_unit.py | 57 ++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 maths/sigmoid_linear_unit.py diff --git a/maths/sigmoid_linear_unit.py b/maths/sigmoid_linear_unit.py new file mode 100644 index 000000000..a8ada10dd --- /dev/null +++ b/maths/sigmoid_linear_unit.py @@ -0,0 +1,57 @@ +""" +This script demonstrates the implementation of the Sigmoid Linear Unit (SiLU) +or swish function. +* https://en.wikipedia.org/wiki/Rectifier_(neural_networks) +* https://en.wikipedia.org/wiki/Swish_function + +The function takes a vector x of K real numbers as input and returns x * sigmoid(x). +Swish is a smooth, non-monotonic function defined as f(x) = x * sigmoid(x). +Extensive experiments shows that Swish consistently matches or outperforms ReLU +on deep networks applied to a variety of challenging domains such as +image classification and machine translation. + +This script is inspired by a corresponding research paper. +* https://arxiv.org/abs/1710.05941 +""" + +import numpy as np + + +def sigmoid(vector: np.array) -> np.array: + """ + Mathematical function sigmoid takes a vector x of K real numbers as input and + returns 1/ (1 + e^-x). + https://en.wikipedia.org/wiki/Sigmoid_function + + >>> sigmoid(np.array([-1.0, 1.0, 2.0])) + array([0.26894142, 0.73105858, 0.88079708]) + """ + return 1 / (1 + np.exp(-vector)) + + +def sigmoid_linear_unit(vector: np.array) -> np.array: + """ + Implements the Sigmoid Linear Unit (SiLU) or swish function + + Parameters: + vector (np.array): A numpy array consisting of real + values. + + Returns: + swish_vec (np.array): The input numpy array, after applying + swish. + + Examples: + >>> sigmoid_linear_unit(np.array([-1.0, 1.0, 2.0])) + array([-0.26894142, 0.73105858, 1.76159416]) + + >>> sigmoid_linear_unit(np.array([-2])) + array([-0.23840584]) + """ + return vector * sigmoid(vector) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 26cecea27198848e2c1c0bc6d7f887d4ed7adb87 Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Fri, 28 Oct 2022 20:03:21 +0530 Subject: [PATCH 125/368] Create fetch_amazon_product_data.py (#7585) * Create fetch_amazon_product_data.py This file provides a function which will take a product name as input from the user,and fetch the necessary information about that kind of products from Amazon like the product title,link to that product,price of the product,the ratings of the product and the discount available on the product in the form of a csv file,this will help the users by improving searchability and navigability and find the right product easily and in a short period of time, it will also be beneficial for performing better analysis on products * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fetch_amazon_product_data.py Added type hints and modified files to pass precommit test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fetch_amazon_product_data.py Added type hints and made changes to pass the precommit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fetch_amazon_product_data.py Modified function to return the data in the form of Pandas Dataframe,modified type hints and added a functionality to let the user determine if they need the data in a csv file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fetch_amazon_product_data.py Made some bug fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename fetch_amazon_product_data.py to get_amazon_product_data.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_amazon_product_data.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/get_amazon_product_data.py | 100 +++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 web_programming/get_amazon_product_data.py diff --git a/web_programming/get_amazon_product_data.py b/web_programming/get_amazon_product_data.py new file mode 100644 index 000000000..c796793f2 --- /dev/null +++ b/web_programming/get_amazon_product_data.py @@ -0,0 +1,100 @@ +""" +This file provides a function which will take a product name as input from the user, +and fetch from Amazon information about products of this name or category. The product +information will include title, URL, price, ratings, and the discount available. +""" + + +from itertools import zip_longest + +import requests +from bs4 import BeautifulSoup +from pandas import DataFrame + + +def get_amazon_product_data(product: str = "laptop") -> DataFrame: + """ + Take a product name or category as input and return product information from Amazon + including title, URL, price, ratings, and the discount available. + """ + url = f"https://www.amazon.in/laptop/s?k={product}" + header = { + "User-Agent": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 + (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", + "Accept-Language": "en-US, en;q=0.5", + } + soup = BeautifulSoup(requests.get(url, headers=header).text) + # Initialize a Pandas dataframe with the column titles + data_frame = DataFrame( + columns=[ + "Product Title", + "Product Link", + "Current Price of the product", + "Product Rating", + "MRP of the product", + "Discount", + ] + ) + # Loop through each entry and store them in the dataframe + for item, _ in zip_longest( + soup.find_all( + "div", + attrs={"class": "s-result-item", "data-component-type": "s-search-result"}, + ), + soup.find_all("div", attrs={"class": "a-row a-size-base a-color-base"}), + ): + try: + product_title = item.h2.text + product_link = "https://www.amazon.in/" + item.h2.a["href"] + product_price = item.find("span", attrs={"class": "a-offscreen"}).text + try: + product_rating = item.find("span", attrs={"class": "a-icon-alt"}).text + except AttributeError: + product_rating = "Not available" + try: + product_mrp = ( + "₹" + + item.find( + "span", attrs={"class": "a-price a-text-price"} + ).text.split("₹")[1] + ) + except AttributeError: + product_mrp = "" + try: + discount = float( + ( + ( + float(product_mrp.strip("₹").replace(",", "")) + - float(product_price.strip("₹").replace(",", "")) + ) + / float(product_mrp.strip("₹").replace(",", "")) + ) + * 100 + ) + except ValueError: + discount = float("nan") + except AttributeError: + pass + data_frame.loc[len(data_frame.index)] = [ + product_title, + product_link, + product_price, + product_rating, + product_mrp, + discount, + ] + data_frame.loc[ + data_frame["Current Price of the product"] > data_frame["MRP of the product"], + "MRP of the product", + ] = " " + data_frame.loc[ + data_frame["Current Price of the product"] > data_frame["MRP of the product"], + "Discount", + ] = " " + data_frame.index += 1 + return data_frame + + +if __name__ == "__main__": + product = "headphones" + get_amazon_product_data(product).to_csv(f"Amazon Product Data for {product}.csv") From d9efd7e25bbe937893a9818cfda62ca3f72ffe0d Mon Sep 17 00:00:00 2001 From: Andrey Date: Fri, 28 Oct 2022 21:54:44 +0300 Subject: [PATCH 126/368] Update PR template (#7794) * Update PR template * Revert changes, reword line --- .github/pull_request_template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4d2265968..b3ba8baf9 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -16,5 +16,5 @@ * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. -* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. +* [ ] All new algorithms include at least one URL that points to Wikipedia or another similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`. From 528b1290194da09c2e762c2232502d2cfcdb1e3d Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Sat, 29 Oct 2022 00:38:41 +0530 Subject: [PATCH 127/368] Update maximum_subarray.py (#7757) * Update maximum_subarray.py 1. Rectify documentation to indicate the correct output: function doesn't return the subarray, but rather returns a sum. 2. Make the function more Pythonic and optimal. 3. Make function annotation generic i.e. can accept any sequence. 4. Raise value error when the input sequence is empty. * Update maximum_subarray.py 1. Use the conventions as mentioned in pep-0257. 2. Use negative infinity as the initial value for the current maximum and the answer. * Update maximum_subarray.py Avoid type conflict by returning the answer cast to an integer. * Update other/maximum_subarray.py Co-authored-by: Andrey * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maximum_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maximum_subarray.py Remove typecast to int for the final answer Co-authored-by: Andrey Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/maximum_subarray.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/other/maximum_subarray.py b/other/maximum_subarray.py index 756e00944..1c8c8cabc 100644 --- a/other/maximum_subarray.py +++ b/other/maximum_subarray.py @@ -1,20 +1,26 @@ -def max_subarray(nums: list[int]) -> int: - """ - Returns the subarray with maximum sum - >>> max_subarray([1,2,3,4,-2]) +from collections.abc import Sequence + + +def max_subarray_sum(nums: Sequence[int]) -> int: + """Return the maximum possible sum amongst all non - empty subarrays. + + Raises: + ValueError: when nums is empty. + + >>> max_subarray_sum([1,2,3,4,-2]) 10 - >>> max_subarray([-2,1,-3,4,-1,2,1,-5,4]) + >>> max_subarray_sum([-2,1,-3,4,-1,2,1,-5,4]) 6 """ + if not nums: + raise ValueError("Input sequence should not be empty") curr_max = ans = nums[0] + nums_len = len(nums) - for i in range(1, len(nums)): - if curr_max >= 0: - curr_max = curr_max + nums[i] - else: - curr_max = nums[i] - + for i in range(1, nums_len): + num = nums[i] + curr_max = max(curr_max + num, num) ans = max(curr_max, ans) return ans @@ -23,4 +29,4 @@ def max_subarray(nums: list[int]) -> int: if __name__ == "__main__": n = int(input("Enter number of elements : ").strip()) array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] - print(max_subarray(array)) + print(max_subarray_sum(array)) From fe5819c872abcbe1a96ee7bd20ab930b2892bbf5 Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Sat, 29 Oct 2022 01:02:32 +0530 Subject: [PATCH 128/368] Create combination_sum_iv.py (#7672) * Create combination_sum_iv.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/combination_sum_iv.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/combination_sum_iv.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/combination_sum_iv.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update combination_sum_iv.py * Update combination_sum_iv.py * Resolved PR Comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * minor change, argument missing in function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/combination_sum_iv.py Co-authored-by: Christian Clauss * minor change Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- dynamic_programming/combination_sum_iv.py | 102 ++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 dynamic_programming/combination_sum_iv.py diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py new file mode 100644 index 000000000..b2aeb0824 --- /dev/null +++ b/dynamic_programming/combination_sum_iv.py @@ -0,0 +1,102 @@ +""" +Question: +You are given an array of distinct integers and you have to tell how many +different ways of selecting the elements from the array are there such that +the sum of chosen elements is equal to the target number tar. + +Example + +Input: +N = 3 +target = 5 +array = [1, 2, 5] + +Output: +9 + +Approach: +The basic idea is to go over recursively to find the way such that the sum +of chosen elements is “tar”. For every element, we have two choices + 1. Include the element in our set of chosen elements. + 2. Don’t include the element in our set of chosen elements. +""" + + +def combination_sum_iv(n: int, array: list[int], target: int) -> int: + """ + Function checks the all possible combinations, and returns the count + of possible combination in exponential Time Complexity. + + >>> combination_sum_iv(3, [1,2,5], 5) + 9 + """ + + def count_of_possible_combinations(target: int) -> int: + if target < 0: + return 0 + if target == 0: + return 1 + return sum(count_of_possible_combinations(target - item) for item in array) + + return count_of_possible_combinations(target) + + +def combination_sum_iv_dp_array(n: int, array: list[int], target: int) -> int: + """ + Function checks the all possible combinations, and returns the count + of possible combination in O(N^2) Time Complexity as we are using Dynamic + programming array here. + + >>> combination_sum_iv_dp_array(3, [1,2,5], 5) + 9 + """ + + def count_of_possible_combinations_with_dp_array( + target: int, dp_array: list[int] + ) -> int: + if target < 0: + return 0 + if target == 0: + return 1 + if dp_array[target] != -1: + return dp_array[target] + answer = sum( + count_of_possible_combinations_with_dp_array(target - item, dp_array) + for item in array + ) + dp_array[target] = answer + return answer + + dp_array = [-1] * (target + 1) + return count_of_possible_combinations_with_dp_array(target, dp_array) + + +def combination_sum_iv_bottom_up(n: int, array: list[int], target: int) -> int: + """ + Function checks the all possible combinations with using bottom up approach, + and returns the count of possible combination in O(N^2) Time Complexity + as we are using Dynamic programming array here. + + >>> combination_sum_iv_bottom_up(3, [1,2,5], 5) + 9 + """ + + dp_array = [0] * (target + 1) + dp_array[0] = 1 + + for i in range(1, target + 1): + for j in range(n): + if i - array[j] >= 0: + dp_array[i] += dp_array[i - array[j]] + + return dp_array[target] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + n = 3 + target = 5 + array = [1, 2, 5] + print(combination_sum_iv(n, array, target)) From 762afc086f065f1d8fe1afcde8c8ad3fa46898a7 Mon Sep 17 00:00:00 2001 From: Andrey Date: Fri, 28 Oct 2022 23:27:39 +0300 Subject: [PATCH 129/368] Update breadth_first_search_2.py (#7765) * Cleanup the BFS * Add both functions and timeit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add performace results as comment * Update breadth_first_search_2.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/breadth_first_search_2.py | 45 +++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/graphs/breadth_first_search_2.py b/graphs/breadth_first_search_2.py index 2f060a90d..a0b92b90b 100644 --- a/graphs/breadth_first_search_2.py +++ b/graphs/breadth_first_search_2.py @@ -14,7 +14,9 @@ while Q is non-empty: """ from __future__ import annotations +from collections import deque from queue import Queue +from timeit import timeit G = { "A": ["B", "C"], @@ -26,12 +28,15 @@ G = { } -def breadth_first_search(graph: dict, start: str) -> set[str]: +def breadth_first_search(graph: dict, start: str) -> list[str]: """ - >>> ''.join(sorted(breadth_first_search(G, 'A'))) + Implementation of breadth first search using queue.Queue. + + >>> ''.join(breadth_first_search(G, 'A')) 'ABCDEF' """ explored = {start} + result = [start] queue: Queue = Queue() queue.put(start) while not queue.empty(): @@ -39,12 +44,44 @@ def breadth_first_search(graph: dict, start: str) -> set[str]: for w in graph[v]: if w not in explored: explored.add(w) + result.append(w) queue.put(w) - return explored + return result + + +def breadth_first_search_with_deque(graph: dict, start: str) -> list[str]: + """ + Implementation of breadth first search using collection.queue. + + >>> ''.join(breadth_first_search_with_deque(G, 'A')) + 'ABCDEF' + """ + visited = {start} + result = [start] + queue = deque([start]) + while queue: + v = queue.popleft() + for child in graph[v]: + if child not in visited: + visited.add(child) + result.append(child) + queue.append(child) + return result + + +def benchmark_function(name: str) -> None: + setup = f"from __main__ import G, {name}" + number = 10000 + res = timeit(f"{name}(G, 'A')", setup=setup, number=number) + print(f"{name:<35} finished {number} runs in {res:.5f} seconds") if __name__ == "__main__": import doctest doctest.testmod() - print(breadth_first_search(G, "A")) + + benchmark_function("breadth_first_search") + benchmark_function("breadth_first_search_with_deque") + # breadth_first_search finished 10000 runs in 0.20999 seconds + # breadth_first_search_with_deque finished 10000 runs in 0.01421 seconds From cf08d9f5e7afdcfb9406032abcad328aa79c566a Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 09:26:19 +0300 Subject: [PATCH 130/368] Format docs (#7821) * Reformat docs for odd_even_sort.py * Fix docstring formatting * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: Caeden Perelli-Harris --- machine_learning/data_transformations.py | 10 +++++++--- physics/kinetic_energy.py | 5 ++++- sorts/merge_sort.py | 9 ++++++--- sorts/odd_even_sort.py | 9 +++++++-- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/machine_learning/data_transformations.py b/machine_learning/data_transformations.py index 9e0d747e9..ecfd3b9e2 100644 --- a/machine_learning/data_transformations.py +++ b/machine_learning/data_transformations.py @@ -1,5 +1,7 @@ """ -Normalization Wikipedia: https://en.wikipedia.org/wiki/Normalization +Normalization. + +Wikipedia: https://en.wikipedia.org/wiki/Normalization Normalization is the process of converting numerical data to a standard range of values. This range is typically between [0, 1] or [-1, 1]. The equation for normalization is x_norm = (x - x_min)/(x_max - x_min) where x_norm is the normalized value, x is the @@ -28,7 +30,8 @@ from statistics import mean, stdev def normalization(data: list, ndigits: int = 3) -> list: """ - Returns a normalized list of values + Return a normalized list of values. + @params: data, a list of values to normalize @returns: a list of normalized values (rounded to ndigits decimal places) @examples: @@ -46,7 +49,8 @@ def normalization(data: list, ndigits: int = 3) -> list: def standardization(data: list, ndigits: int = 3) -> list: """ - Returns a standardized list of values + Return a standardized list of values. + @params: data, a list of values to standardize @returns: a list of standardized values (rounded to ndigits decimal places) @examples: diff --git a/physics/kinetic_energy.py b/physics/kinetic_energy.py index 535ffc219..8863919ac 100644 --- a/physics/kinetic_energy.py +++ b/physics/kinetic_energy.py @@ -1,5 +1,6 @@ """ -Find the kinetic energy of an object, give its mass and velocity +Find the kinetic energy of an object, given its mass and velocity. + Description : In physics, the kinetic energy of an object is the energy that it possesses due to its motion. It is defined as the work needed to accelerate a body of a given mass from rest to its stated velocity. Having gained this energy during its @@ -19,6 +20,8 @@ Reference : https://en.m.wikipedia.org/wiki/Kinetic_energy def kinetic_energy(mass: float, velocity: float) -> float: """ + Calculate kinetick energy. + The kinetic energy of a non-rotating object of mass m traveling at a speed v is ½mv² >>> kinetic_energy(10,10) diff --git a/sorts/merge_sort.py b/sorts/merge_sort.py index 4da29f32a..e80b1cb22 100644 --- a/sorts/merge_sort.py +++ b/sorts/merge_sort.py @@ -1,5 +1,6 @@ """ -This is a pure Python implementation of the merge sort algorithm +This is a pure Python implementation of the merge sort algorithm. + For doctests run following command: python -m doctest -v merge_sort.py or @@ -10,7 +11,7 @@ python merge_sort.py def merge_sort(collection: list) -> list: - """Pure implementation of the merge sort algorithm in Python + """ :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending @@ -24,7 +25,9 @@ def merge_sort(collection: list) -> list: """ def merge(left: list, right: list) -> list: - """merge left and right + """ + Merge left and right. + :param left: left collection :param right: right collection :return: merge result diff --git a/sorts/odd_even_sort.py b/sorts/odd_even_sort.py index 532f82949..9ef4462c7 100644 --- a/sorts/odd_even_sort.py +++ b/sorts/odd_even_sort.py @@ -1,10 +1,15 @@ -"""For reference +""" +Odd even sort implementation. + https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort """ def odd_even_sort(input_list: list) -> list: - """this algorithm uses the same idea of bubblesort, + """ + Sort input with odd even sort. + + This algorithm uses the same idea of bubblesort, but by first dividing in two phase (odd and even). Originally developed for use on parallel processors with local interconnections. From 301a520f0362261cddadc87e1bcfe20310308030 Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Sat, 29 Oct 2022 16:44:44 +0530 Subject: [PATCH 131/368] Create potential_energy.py (#7666) * Create potential_energy.py Finding the gravitational potential energy of an object with reference to the earth, by taking its mass and height above the ground as input * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update physics/potential_energy.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update physics/potential_energy.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- physics/potential_energy.py | 61 +++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 physics/potential_energy.py diff --git a/physics/potential_energy.py b/physics/potential_energy.py new file mode 100644 index 000000000..c6544f6f7 --- /dev/null +++ b/physics/potential_energy.py @@ -0,0 +1,61 @@ +from scipy.constants import g + +""" +Finding the gravitational potential energy of an object with reference +to the earth,by taking its mass and height above the ground as input + + +Description : Gravitational energy or gravitational potential energy +is the potential energy a massive object has in relation to another +massive object due to gravity. It is the potential energy associated +with the gravitational field, which is released (converted into +kinetic energy) when the objects fall towards each other. +Gravitational potential energy increases when two objects +are brought further apart. + +For two pairwise interacting point particles, the gravitational +potential energy U is given by +U=-GMm/R +where M and m are the masses of the two particles, R is the distance +between them, and G is the gravitational constant. +Close to the Earth's surface, the gravitational field is approximately +constant, and the gravitational potential energy of an object reduces to +U=mgh +where m is the object's mass, g=GM/R² is the gravity of Earth, and h is +the height of the object's center of mass above a chosen reference level. + +Reference : "https://en.m.wikipedia.org/wiki/Gravitational_energy" +""" + + +def potential_energy(mass: float, height: float) -> float: + # function will accept mass and height as parameters and return potential energy + """ + >>> potential_energy(10,10) + 980.665 + >>> potential_energy(0,5) + 0.0 + >>> potential_energy(8,0) + 0.0 + >>> potential_energy(10,5) + 490.3325 + >>> potential_energy(0,0) + 0.0 + >>> potential_energy(2,8) + 156.9064 + >>> potential_energy(20,100) + 19613.3 + """ + if mass < 0: + # handling of negative values of mass + raise ValueError("The mass of a body cannot be negative") + if height < 0: + # handling of negative values of height + raise ValueError("The height above the ground cannot be negative") + return mass * g * height + + +if __name__ == "__main__": + from doctest import testmod + + testmod(name="potential_energy") From a9bd68d96e519d0919c2e4385dbe433ff44b4c4f Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 15:27:47 +0300 Subject: [PATCH 132/368] Add running doctest to pytest default (#7840) * Add default options for pytest * updating DIRECTORY.md * Move pytest settings to pyproject.toml * Move coverage settings to the pyproject.toml * Return --doctest-continue-on-failure to pytest * Convert pytest args to list * Update pyproject.toml Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .coveragerc | 4 ---- .github/workflows/build.yml | 2 +- DIRECTORY.md | 4 ++++ pyproject.toml | 20 ++++++++++++++++++++ pytest.ini | 5 ----- 5 files changed, 25 insertions(+), 10 deletions(-) delete mode 100644 .coveragerc create mode 100644 pyproject.toml delete mode 100644 pytest.ini diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index f7e6eb212..000000000 --- a/.coveragerc +++ /dev/null @@ -1,4 +0,0 @@ -[report] -sort = Cover -omit = - .env/* diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8481b962a..159ce13b3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,6 +22,6 @@ jobs: python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests - run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . + run: pytest --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/DIRECTORY.md b/DIRECTORY.md index 7621427a6..1fa6af75d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -165,6 +165,7 @@ * [Binary Search Tree Recursive](data_structures/binary_tree/binary_search_tree_recursive.py) * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py) + * [Binary Tree Path Sum](data_structures/binary_tree/binary_tree_path_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) @@ -285,6 +286,7 @@ * [Bitmask](dynamic_programming/bitmask.py) * [Catalan Numbers](dynamic_programming/catalan_numbers.py) * [Climbing Stairs](dynamic_programming/climbing_stairs.py) + * [Combination Sum Iv](dynamic_programming/combination_sum_iv.py) * [Edit Distance](dynamic_programming/edit_distance.py) * [Factorial](dynamic_programming/factorial.py) * [Fast Fibonacci](dynamic_programming/fast_fibonacci.py) @@ -595,6 +597,7 @@ * [P Series](maths/series/p_series.py) * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) * [Sigmoid](maths/sigmoid.py) + * [Sigmoid Linear Unit](maths/sigmoid_linear_unit.py) * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) * [Sin](maths/sin.py) @@ -1107,6 +1110,7 @@ * [Fetch Jobs](web_programming/fetch_jobs.py) * [Fetch Quotes](web_programming/fetch_quotes.py) * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py) + * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) * [Get Top Billioners](web_programming/get_top_billioners.py) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..410e7655b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,20 @@ +[tool.pytest.ini_options] +markers = [ + "mat_ops: mark a test as utilizing matrix operations.", +] +addopts = [ + "--durations=10", + "--doctest-modules", + "--showlocals", +] + + +[tool.coverage.report] +omit = [".env/*"] +sort = "Cover" + +#[report] +#sort = Cover +#omit = +# .env/* +# backtracking/* diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 488379278..000000000 --- a/pytest.ini +++ /dev/null @@ -1,5 +0,0 @@ -# Setup for pytest -[pytest] -markers = - mat_ops: mark a test as utilizing matrix operations. -addopts = --durations=10 From 6e809a25e33e2da07e03921bbf6614523a939e94 Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 15:31:56 +0300 Subject: [PATCH 133/368] Rename files (#7819) --- ...s_shortest_path.py => breadth_first_search_shortest_path_2.py} | 0 ...est_path.py => breadth_first_search_zero_one_shortest_path.py} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename graphs/{bfs_shortest_path.py => breadth_first_search_shortest_path_2.py} (100%) rename graphs/{bfs_zero_one_shortest_path.py => breadth_first_search_zero_one_shortest_path.py} (100%) diff --git a/graphs/bfs_shortest_path.py b/graphs/breadth_first_search_shortest_path_2.py similarity index 100% rename from graphs/bfs_shortest_path.py rename to graphs/breadth_first_search_shortest_path_2.py diff --git a/graphs/bfs_zero_one_shortest_path.py b/graphs/breadth_first_search_zero_one_shortest_path.py similarity index 100% rename from graphs/bfs_zero_one_shortest_path.py rename to graphs/breadth_first_search_zero_one_shortest_path.py From 327c38d6f0c6b79b46465406373ea7048bfec55e Mon Sep 17 00:00:00 2001 From: Sineth Sankalpa <66241389+sinsankio@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:10:14 +0530 Subject: [PATCH 134/368] Srilankan phone number validation (#7706) * Add is_srilankan_phone_number.py * Update is_srilankan_phone_number.py --- strings/is_srilankan_phone_number.py | 35 ++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 strings/is_srilankan_phone_number.py diff --git a/strings/is_srilankan_phone_number.py b/strings/is_srilankan_phone_number.py new file mode 100644 index 000000000..7bded93f7 --- /dev/null +++ b/strings/is_srilankan_phone_number.py @@ -0,0 +1,35 @@ +import re + + +def is_sri_lankan_phone_number(phone: str) -> bool: + """ + Determine whether the string is a valid sri lankan mobile phone number or not + References: https://aye.sh/blog/sri-lankan-phone-number-regex + + >>> is_sri_lankan_phone_number("+94773283048") + True + >>> is_sri_lankan_phone_number("+9477-3283048") + True + >>> is_sri_lankan_phone_number("0718382399") + True + >>> is_sri_lankan_phone_number("0094702343221") + True + >>> is_sri_lankan_phone_number("075 3201568") + True + >>> is_sri_lankan_phone_number("07779209245") + False + >>> is_sri_lankan_phone_number("0957651234") + False + """ + + pattern = re.compile( + r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" + ) + + return bool(re.search(pattern, phone)) + + +if __name__ == "__main__": + phone = "0094702343221" + + print(is_sri_lankan_phone_number(phone)) From b0f68a0248d3eb48f3baf7e18f6420dc983bdb19 Mon Sep 17 00:00:00 2001 From: tarushirastogi <108577219+tarushirastogi@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:13:51 +0530 Subject: [PATCH 135/368] Create centripetal_force.py (#7778) * Create centripetal_force.py Centripetal force is the force acting on an object in curvilinear motion directed towards the axis of rotation or centre of curvature. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update centripetal_force.py The value error should also handle negative values of the radius and using more descriptive names will be more beneficial for the users * Update centripetal_force.py Made some bug fixes Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> --- physics/centripetal_force.py | 49 ++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 physics/centripetal_force.py diff --git a/physics/centripetal_force.py b/physics/centripetal_force.py new file mode 100644 index 000000000..04069d256 --- /dev/null +++ b/physics/centripetal_force.py @@ -0,0 +1,49 @@ +""" +Description : Centripetal force is the force acting on an object in +curvilinear motion directed towards the axis of rotation +or centre of curvature. + +The unit of centripetal force is newton. + +The centripetal force is always directed perpendicular to the +direction of the object’s displacement. Using Newton’s second +law of motion, it is found that the centripetal force of an object +moving in a circular path always acts towards the centre of the circle. +The Centripetal Force Formula is given as the product of mass (in kg) +and tangential velocity (in meters per second) squared, divided by the +radius (in meters) that implies that on doubling the tangential velocity, +the centripetal force will be quadrupled. Mathematically it is written as: +F = mv²/r +Where, F is the Centripetal force, m is the mass of the object, v is the +speed or velocity of the object and r is the radius. + +Reference: https://byjus.com/physics/centripetal-and-centrifugal-force/ +""" + + +def centripetal(mass: float, velocity: float, radius: float) -> float: + """ + The Centripetal Force formula is given as: (m*v*v)/r + + >>> round(centripetal(15.5,-30,10),2) + 1395.0 + >>> round(centripetal(10,15,5),2) + 450.0 + >>> round(centripetal(20,-50,15),2) + 3333.33 + >>> round(centripetal(12.25,40,25),2) + 784.0 + >>> round(centripetal(50,100,50),2) + 10000.0 + """ + if mass < 0: + raise ValueError("The mass of the body cannot be negative") + if radius <= 0: + raise ValueError("The radius is always a positive non zero integer") + return (mass * (velocity) ** 2) / radius + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) From 18ffc4dec85a85837f71cd6c9b1e630b9d185001 Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Sat, 29 Oct 2022 18:24:13 +0530 Subject: [PATCH 136/368] Update password_generator.py (#7745) * Update password_generator.py 1. Use secrets module instead of random for passwords as it gives a secure source of randomness 2. Add type annotations for functions 3. Replace ctbi (variable for the characters to be included) with a more meaningful and short name 4. Use integer division instead of obtaining the integer part of a division computing a floating point * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/password_generator.py | 40 +++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/other/password_generator.py b/other/password_generator.py index c09afd7e6..8f9d58a33 100644 --- a/other/password_generator.py +++ b/other/password_generator.py @@ -1,9 +1,10 @@ """Password Generator allows you to generate a random password of length N.""" -from random import choice, shuffle +import secrets +from random import shuffle from string import ascii_letters, digits, punctuation -def password_generator(length=8): +def password_generator(length: int = 8) -> str: """ >>> len(password_generator()) 8 @@ -17,58 +18,59 @@ def password_generator(length=8): 0 """ chars = ascii_letters + digits + punctuation - return "".join(choice(chars) for x in range(length)) + return "".join(secrets.choice(chars) for _ in range(length)) # ALTERNATIVE METHODS -# ctbi= characters that must be in password +# chars_incl= characters that must be in password # i= how many letters or characters the password length will be -def alternative_password_generator(ctbi, i): +def alternative_password_generator(chars_incl: str, i: int) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... - i = i - len(ctbi) - quotient = int(i / 3) + i -= len(chars_incl) + quotient = i // 3 remainder = i % 3 - # chars = ctbi + random_letters(ascii_letters, i / 3 + remainder) + + # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) chars = ( - ctbi + chars_incl + random(ascii_letters, quotient + remainder) + random(digits, quotient) + random(punctuation, quotient) ) - chars = list(chars) - shuffle(chars) - return "".join(chars) + list_of_chars = list(chars) + shuffle(list_of_chars) + return "".join(list_of_chars) # random is a generalised function for letters, characters and numbers -def random(ctbi, i): - return "".join(choice(ctbi) for x in range(i)) +def random(chars_incl: str, i: int) -> str: + return "".join(secrets.choice(chars_incl) for _ in range(i)) -def random_number(ctbi, i): +def random_number(chars_incl, i): pass # Put your code here... -def random_letters(ctbi, i): +def random_letters(chars_incl, i): pass # Put your code here... -def random_characters(ctbi, i): +def random_characters(chars_incl, i): pass # Put your code here... def main(): length = int(input("Please indicate the max length of your password: ").strip()) - ctbi = input( + chars_incl = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:", password_generator(length)) print( - "Alternative Password generated:", alternative_password_generator(ctbi, length) + "Alternative Password generated:", + alternative_password_generator(chars_incl, length), ) print("[If you are thinking of using this passsword, You better save it.]") From 584e743422565decd35b1b6f94cef3ced840698b Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 16:07:02 +0300 Subject: [PATCH 137/368] Fix yesqa hook (#7843) * fix yesqa hook * Remove redundant noqa * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 10 ++++++++-- DIRECTORY.md | 5 +++-- .../binary_tree/non_recursive_segment_tree.py | 2 +- digital_image_processing/index_calculation.py | 2 +- genetic_algorithm/basic_string.py | 2 +- maths/prime_sieve_eratosthenes.py | 2 -- 6 files changed, 14 insertions(+), 9 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7f6c206b4..56946f5f2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -41,13 +41,19 @@ repos: rev: 5.0.4 hooks: - id: flake8 # See .flake8 for args - additional_dependencies: + additional_dependencies: &flake8-plugins - flake8-bugbear - flake8-builtins - flake8-broken-line - flake8-comprehensions - pep8-naming - - yesqa + + - repo: https://github.com/asottile/yesqa + rev: v1.4.0 + hooks: + - id: yesqa + additional_dependencies: + *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 diff --git a/DIRECTORY.md b/DIRECTORY.md index 1fa6af75d..198cc7077 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -356,14 +356,14 @@ * [Articulation Points](graphs/articulation_points.py) * [Basic Graphs](graphs/basic_graphs.py) * [Bellman Ford](graphs/bellman_ford.py) - * [Bfs Shortest Path](graphs/bfs_shortest_path.py) - * [Bfs Zero One Shortest Path](graphs/bfs_zero_one_shortest_path.py) * [Bidirectional A Star](graphs/bidirectional_a_star.py) * [Bidirectional Breadth First Search](graphs/bidirectional_breadth_first_search.py) * [Boruvka](graphs/boruvka.py) * [Breadth First Search](graphs/breadth_first_search.py) * [Breadth First Search 2](graphs/breadth_first_search_2.py) * [Breadth First Search Shortest Path](graphs/breadth_first_search_shortest_path.py) + * [Breadth First Search Shortest Path 2](graphs/breadth_first_search_shortest_path_2.py) + * [Breadth First Search Zero One Shortest Path](graphs/breadth_first_search_zero_one_shortest_path.py) * [Check Bipartite Graph Bfs](graphs/check_bipartite_graph_bfs.py) * [Check Bipartite Graph Dfs](graphs/check_bipartite_graph_dfs.py) * [Check Cycle](graphs/check_cycle.py) @@ -678,6 +678,7 @@ * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) + * [Potential Energy](physics/potential_energy.py) ## Project Euler * Problem 001 diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index c29adefff..075ff6c91 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -103,7 +103,7 @@ class SegmentTree(Generic[T]): >>> st.query(2, 3) 7 """ - l, r = l + self.N, r + self.N # noqa: E741 + l, r = l + self.N, r + self.N res: T | None = None while l <= r: # noqa: E741 diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index be1855e99..67830668b 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -413,7 +413,7 @@ class IndexCalculation: """ return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) - def i(self): # noqa: E741,E743 + def i(self): """ Intensity https://www.indexdatabase.de/db/i-single.php?id=36 diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 5cf8d691b..45b8be651 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -80,7 +80,7 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, score = len( [g for position, g in enumerate(item) if g == main_target[position]] ) - return (item, float(score)) # noqa: B023 + return (item, float(score)) # Adding a bit of concurrency can make everything faster, # diff --git a/maths/prime_sieve_eratosthenes.py b/maths/prime_sieve_eratosthenes.py index 8d60e48c2..3a3c55085 100644 --- a/maths/prime_sieve_eratosthenes.py +++ b/maths/prime_sieve_eratosthenes.py @@ -1,5 +1,3 @@ -# flake8: noqa - """ Sieve of Eratosthenes From 93ad7db97fa211b6e9f77025513a45df83400f88 Mon Sep 17 00:00:00 2001 From: JatinR05 <71865805+JatinR05@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:58:12 +0530 Subject: [PATCH 138/368] Create recursive_approach_knapsack.py (#7587) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Create recursive_approach_knapsack.py Added a new naïve recursive approach to solve the knapsack problem. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update recursive_approach_knapsack.py Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update recursive_approach_knapsack.py Updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- knapsack/recursive_approach_knapsack.py | 52 +++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 knapsack/recursive_approach_knapsack.py diff --git a/knapsack/recursive_approach_knapsack.py b/knapsack/recursive_approach_knapsack.py new file mode 100644 index 000000000..d813981cb --- /dev/null +++ b/knapsack/recursive_approach_knapsack.py @@ -0,0 +1,52 @@ +# To get an insight into naive recursive way to solve the Knapsack problem + + +""" +A shopkeeper has bags of wheat that each have different weights and different profits. +eg. +no_of_items 4 +profit 5 4 8 6 +weight 1 2 4 5 +max_weight 5 +Constraints: +max_weight > 0 +profit[i] >= 0 +weight[i] >= 0 +Calculate the maximum profit that the shopkeeper can make given maxmum weight that can +be carried. +""" + + +def knapsack( + weights: list, values: list, number_of_items: int, max_weight: int, index: int +) -> int: + """ + Function description is as follows- + :param weights: Take a list of weights + :param values: Take a list of profits corresponding to the weights + :param number_of_items: number of items available to pick from + :param max_weight: Maximum weight that could be carried + :param index: the element we are looking at + :return: Maximum expected gain + >>> knapsack([1, 2, 4, 5], [5, 4, 8, 6], 4, 5, 0) + 13 + >>> knapsack([3 ,4 , 5], [10, 9 , 8], 3, 25, 0) + 27 + """ + if index == number_of_items: + return 0 + ans1 = 0 + ans2 = 0 + ans1 = knapsack(weights, values, number_of_items, max_weight, index + 1) + if weights[index] <= max_weight: + ans2 = values[index] + knapsack( + weights, values, number_of_items, max_weight - weights[index], index + 1 + ) + return max(ans1, ans2) + + +if __name__ == "__main__": + + import doctest + + doctest.testmod() From efb4a3aee842e1db855e678f28b79588734ff146 Mon Sep 17 00:00:00 2001 From: Anshraj Shrivastava <42239140+rajansh87@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:59:15 +0530 Subject: [PATCH 139/368] added algo for finding permutations of an array (#7614) * Add files via upload * Delete permutations.cpython-310.pyc * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * Update permutations.py * Add files via upload * Delete permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/arrays/permutations.py Co-authored-by: Christian Clauss * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/arrays/permutations.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update permutations.py * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- data_structures/arrays/permutations.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 data_structures/arrays/permutations.py diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py new file mode 100644 index 000000000..eb3f26517 --- /dev/null +++ b/data_structures/arrays/permutations.py @@ -0,0 +1,26 @@ +def permute(nums: list[int]) -> list[list[int]]: + """ + Return all permutations. + + >>> from itertools import permutations + >>> numbers= [1,2,3] + >>> all(list(nums) in permute(numbers) for nums in permutations(numbers)) + True + """ + result = [] + if len(nums) == 1: + return [nums.copy()] + for _ in range(len(nums)): + n = nums.pop(0) + permutations = permute(nums) + for perm in permutations: + perm.append(n) + result.extend(permutations) + nums.append(n) + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7b521b66cfe3d16960c3fa8e01ff947794cc44a6 Mon Sep 17 00:00:00 2001 From: Carlos Villar Date: Sat, 29 Oct 2022 15:44:18 +0200 Subject: [PATCH 140/368] Add Viterbi algorithm (#7509) * Added Viterbi algorithm Fixes: #7465 Squashed commits * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added doctest for validators * moved all extracted functions to the main function * Forgot a type hint Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/viterbi.py | 400 +++++++++++++++++++++++++++++++++ 1 file changed, 400 insertions(+) create mode 100644 dynamic_programming/viterbi.py diff --git a/dynamic_programming/viterbi.py b/dynamic_programming/viterbi.py new file mode 100644 index 000000000..93ab845e2 --- /dev/null +++ b/dynamic_programming/viterbi.py @@ -0,0 +1,400 @@ +from typing import Any + + +def viterbi( + observations_space: list, + states_space: list, + initial_probabilities: dict, + transition_probabilities: dict, + emission_probabilities: dict, +) -> list: + """ + Viterbi Algorithm, to find the most likely path of + states from the start and the expected output. + https://en.wikipedia.org/wiki/Viterbi_algorithm + sdafads + Wikipedia example + >>> observations = ["normal", "cold", "dizzy"] + >>> states = ["Healthy", "Fever"] + >>> start_p = {"Healthy": 0.6, "Fever": 0.4} + >>> trans_p = { + ... "Healthy": {"Healthy": 0.7, "Fever": 0.3}, + ... "Fever": {"Healthy": 0.4, "Fever": 0.6}, + ... } + >>> emit_p = { + ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1}, + ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, + ... } + >>> viterbi(observations, states, start_p, trans_p, emit_p) + ['Healthy', 'Healthy', 'Fever'] + + >>> viterbi((), states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi(observations, (), start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi(observations, states, {}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi(observations, states, start_p, {}, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi(observations, states, start_p, trans_p, {}) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi("invalid", states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list + + >>> viterbi(["valid", 123], states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list of strings + + >>> viterbi(observations, "invalid", start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: states_space must be a list + + >>> viterbi(observations, ["valid", 123], start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: states_space must be a list of strings + + >>> viterbi(observations, states, "invalid", trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities must be a dict + + >>> viterbi(observations, states, {2:2}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities all keys must be strings + + >>> viterbi(observations, states, {"a":2}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities all values must be float + + >>> viterbi(observations, states, start_p, "invalid", emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities must be a dict + + >>> viterbi(observations, states, start_p, {"a":2}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all values must be dict + + >>> viterbi(observations, states, start_p, {2:{2:2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + + >>> viterbi(observations, states, start_p, {"a":{2:2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + + >>> viterbi(observations, states, start_p, {"a":{"b":2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities nested dictionary all values must be float + + >>> viterbi(observations, states, start_p, trans_p, "invalid") + Traceback (most recent call last): + ... + ValueError: emission_probabilities must be a dict + + >>> viterbi(observations, states, start_p, trans_p, None) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + """ + _validation( + observations_space, + states_space, + initial_probabilities, + transition_probabilities, + emission_probabilities, + ) + # Creates data structures and fill initial step + probabilities: dict = {} + pointers: dict = {} + for state in states_space: + observation = observations_space[0] + probabilities[(state, observation)] = ( + initial_probabilities[state] * emission_probabilities[state][observation] + ) + pointers[(state, observation)] = None + + # Fills the data structure with the probabilities of + # different transitions and pointers to previous states + for o in range(1, len(observations_space)): + observation = observations_space[o] + prior_observation = observations_space[o - 1] + for state in states_space: + # Calculates the argmax for probability function + arg_max = "" + max_probability = -1 + for k_state in states_space: + probability = ( + probabilities[(k_state, prior_observation)] + * transition_probabilities[k_state][state] + * emission_probabilities[state][observation] + ) + if probability > max_probability: + max_probability = probability + arg_max = k_state + + # Update probabilities and pointers dicts + probabilities[(state, observation)] = ( + probabilities[(arg_max, prior_observation)] + * transition_probabilities[arg_max][state] + * emission_probabilities[state][observation] + ) + + pointers[(state, observation)] = arg_max + + # The final observation + final_observation = observations_space[len(observations_space) - 1] + + # argmax for given final observation + arg_max = "" + max_probability = -1 + for k_state in states_space: + probability = probabilities[(k_state, final_observation)] + if probability > max_probability: + max_probability = probability + arg_max = k_state + last_state = arg_max + + # Process pointers backwards + previous = last_state + result = [] + for o in range(len(observations_space) - 1, -1, -1): + result.append(previous) + previous = pointers[previous, observations_space[o]] + result.reverse() + + return result + + +def _validation( + observations_space: Any, + states_space: Any, + initial_probabilities: Any, + transition_probabilities: Any, + emission_probabilities: Any, +) -> None: + """ + >>> observations = ["normal", "cold", "dizzy"] + >>> states = ["Healthy", "Fever"] + >>> start_p = {"Healthy": 0.6, "Fever": 0.4} + >>> trans_p = { + ... "Healthy": {"Healthy": 0.7, "Fever": 0.3}, + ... "Fever": {"Healthy": 0.4, "Fever": 0.6}, + ... } + >>> emit_p = { + ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1}, + ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, + ... } + >>> _validation(observations, states, start_p, trans_p, emit_p) + + >>> _validation([], states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + """ + _validate_not_empty( + observations_space, + states_space, + initial_probabilities, + transition_probabilities, + emission_probabilities, + ) + _validate_lists(observations_space, states_space) + _validate_dicts( + initial_probabilities, transition_probabilities, emission_probabilities + ) + + +def _validate_not_empty( + observations_space: Any, + states_space: Any, + initial_probabilities: Any, + transition_probabilities: Any, + emission_probabilities: Any, +) -> None: + """ + >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, + ... {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) + + >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, {}, {"f": {"g": 0.7}}) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> _validate_not_empty(["a"], ["b"], None, {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + """ + if not all( + [ + observations_space, + states_space, + initial_probabilities, + transition_probabilities, + emission_probabilities, + ] + ): + raise ValueError("There's an empty parameter") + + +def _validate_lists(observations_space: Any, states_space: Any) -> None: + """ + >>> _validate_lists(["a"], ["b"]) + + >>> _validate_lists(1234, ["b"]) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list + + >>> _validate_lists(["a"], [3]) + Traceback (most recent call last): + ... + ValueError: states_space must be a list of strings + """ + _validate_list(observations_space, "observations_space") + _validate_list(states_space, "states_space") + + +def _validate_list(_object: Any, var_name: str) -> None: + """ + >>> _validate_list(["a"], "mock_name") + + >>> _validate_list("a", "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name must be a list + >>> _validate_list([0.5], "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name must be a list of strings + + """ + if not isinstance(_object, list): + raise ValueError(f"{var_name} must be a list") + else: + for x in _object: + if not isinstance(x, str): + raise ValueError(f"{var_name} must be a list of strings") + + +def _validate_dicts( + initial_probabilities: Any, + transition_probabilities: Any, + emission_probabilities: Any, +) -> None: + """ + >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) + + >>> _validate_dicts("invalid", {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) + Traceback (most recent call last): + ... + ValueError: initial_probabilities must be a dict + >>> _validate_dicts({"c":0.5}, {2: {"e": 0.6}}, {"f": {"g": 0.7}}) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {2: 0.7}}) + Traceback (most recent call last): + ... + ValueError: emission_probabilities all keys must be strings + >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": "h"}}) + Traceback (most recent call last): + ... + ValueError: emission_probabilities nested dictionary all values must be float + """ + _validate_dict(initial_probabilities, "initial_probabilities", float) + _validate_nested_dict(transition_probabilities, "transition_probabilities") + _validate_nested_dict(emission_probabilities, "emission_probabilities") + + +def _validate_nested_dict(_object: Any, var_name: str) -> None: + """ + >>> _validate_nested_dict({"a":{"b": 0.5}}, "mock_name") + + >>> _validate_nested_dict("invalid", "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name must be a dict + >>> _validate_nested_dict({"a": 8}, "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name all values must be dict + >>> _validate_nested_dict({"a":{2: 0.5}}, "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name all keys must be strings + >>> _validate_nested_dict({"a":{"b": 4}}, "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name nested dictionary all values must be float + """ + _validate_dict(_object, var_name, dict) + for x in _object.values(): + _validate_dict(x, var_name, float, True) + + +def _validate_dict( + _object: Any, var_name: str, value_type: type, nested: bool = False +) -> None: + """ + >>> _validate_dict({"b": 0.5}, "mock_name", float) + + >>> _validate_dict("invalid", "mock_name", float) + Traceback (most recent call last): + ... + ValueError: mock_name must be a dict + >>> _validate_dict({"a": 8}, "mock_name", dict) + Traceback (most recent call last): + ... + ValueError: mock_name all values must be dict + >>> _validate_dict({2: 0.5}, "mock_name",float, True) + Traceback (most recent call last): + ... + ValueError: mock_name all keys must be strings + >>> _validate_dict({"b": 4}, "mock_name", float,True) + Traceback (most recent call last): + ... + ValueError: mock_name nested dictionary all values must be float + """ + if not isinstance(_object, dict): + raise ValueError(f"{var_name} must be a dict") + if not all(isinstance(x, str) for x in _object): + raise ValueError(f"{var_name} all keys must be strings") + if not all(isinstance(x, value_type) for x in _object.values()): + nested_text = "nested dictionary " if nested else "" + raise ValueError( + f"{var_name} {nested_text}all values must be {value_type.__name__}" + ) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 038f8a00e56bda8e8e2903fe4acf2ca7e3c83a57 Mon Sep 17 00:00:00 2001 From: sadiqebrahim <75269485+sadiqebrahim@users.noreply.github.com> Date: Sat, 29 Oct 2022 19:22:19 +0530 Subject: [PATCH 141/368] add electric conductivity algorithm (#7449) * add electric conductivity algorithm * Update electric_conductivity.py * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * Update electric_conductivity.py * Update electric_conductivity.py * Update electric_conductivity.py * add algorithm Co-authored-by: Caeden Perelli-Harris --- electronics/electric_conductivity.py | 53 ++++++++++++++++++++++++++++ physics/sheer_stress.py | 51 ++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 electronics/electric_conductivity.py create mode 100644 physics/sheer_stress.py diff --git a/electronics/electric_conductivity.py b/electronics/electric_conductivity.py new file mode 100644 index 000000000..11f2a607d --- /dev/null +++ b/electronics/electric_conductivity.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +ELECTRON_CHARGE = 1.6021e-19 # units = C + + +def electric_conductivity( + conductivity: float, + electron_conc: float, + mobility: float, +) -> tuple[str, float]: + """ + This function can calculate any one of the three - + 1. Conductivity + 2. Electron Concentration + 3. Electron Mobility + This is calculated from the other two provided values + Examples - + >>> electric_conductivity(conductivity=25, electron_conc=100, mobility=0) + ('mobility', 1.5604519068722301e+18) + >>> electric_conductivity(conductivity=0, electron_conc=1600, mobility=200) + ('conductivity', 5.12672e-14) + >>> electric_conductivity(conductivity=1000, electron_conc=0, mobility=1200) + ('electron_conc', 5.201506356240767e+18) + """ + if (conductivity, electron_conc, mobility).count(0) != 1: + raise ValueError("You cannot supply more or less than 2 values") + elif conductivity < 0: + raise ValueError("Conductivity cannot be negative") + elif electron_conc < 0: + raise ValueError("Electron concentration cannot be negative") + elif mobility < 0: + raise ValueError("mobility cannot be negative") + elif conductivity == 0: + return ( + "conductivity", + mobility * electron_conc * ELECTRON_CHARGE, + ) + elif electron_conc == 0: + return ( + "electron_conc", + conductivity / (mobility * ELECTRON_CHARGE), + ) + else: + return ( + "mobility", + conductivity / (electron_conc * ELECTRON_CHARGE), + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/physics/sheer_stress.py b/physics/sheer_stress.py new file mode 100644 index 000000000..74a2d36b1 --- /dev/null +++ b/physics/sheer_stress.py @@ -0,0 +1,51 @@ +from __future__ import annotations + + +def sheer_stress( + stress: float, + tangential_force: float, + area: float, +) -> tuple[str, float]: + """ + This function can calculate any one of the three - + 1. Sheer Stress + 2. Tangential Force + 3. Cross-sectional Area + This is calculated from the other two provided values + Examples - + >>> sheer_stress(stress=25, tangential_force=100, area=0) + ('area', 4.0) + >>> sheer_stress(stress=0, tangential_force=1600, area=200) + ('stress', 8.0) + >>> sheer_stress(stress=1000, tangential_force=0, area=1200) + ('tangential_force', 1200000) + """ + if (stress, tangential_force, area).count(0) != 1: + raise ValueError("You cannot supply more or less than 2 values") + elif stress < 0: + raise ValueError("Stress cannot be negative") + elif tangential_force < 0: + raise ValueError("Tangential Force cannot be negative") + elif area < 0: + raise ValueError("Area cannot be negative") + elif stress == 0: + return ( + "stress", + tangential_force / area, + ) + elif tangential_force == 0: + return ( + "tangential_force", + stress * area, + ) + else: + return ( + "area", + tangential_force / stress, + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a02e7a1331583829b2768f02c4b9c412bf26251b Mon Sep 17 00:00:00 2001 From: Harsh Verma <53353745+TheLameOne@users.noreply.github.com> Date: Sat, 29 Oct 2022 19:24:32 +0530 Subject: [PATCH 142/368] Added algorithm for Text Justification in Strings (#7354) * Added algorithm for Text Justification in Strings * Added algorithm for Text Justification in Strings --- strings/text_justification.py | 92 +++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 strings/text_justification.py diff --git a/strings/text_justification.py b/strings/text_justification.py new file mode 100644 index 000000000..5e86456c2 --- /dev/null +++ b/strings/text_justification.py @@ -0,0 +1,92 @@ +def text_justification(word: str, max_width: int) -> list: + """ + Will format the string such that each line has exactly + (max_width) characters and is fully (left and right) justified, + and return the list of justified text. + + example 1: + string = "This is an example of text justification." + max_width = 16 + + output = ['This is an', + 'example of text', + 'justification. '] + + >>> text_justification("This is an example of text justification.", 16) + ['This is an', 'example of text', 'justification. '] + + example 2: + string = "Two roads diverged in a yellow wood" + max_width = 16 + output = ['Two roads', + 'diverged in a', + 'yellow wood '] + + >>> text_justification("Two roads diverged in a yellow wood", 16) + ['Two roads', 'diverged in a', 'yellow wood '] + + Time complexity: O(m*n) + Space complexity: O(m*n) + """ + + # Converting string into list of strings split by a space + words = word.split() + + def justify(line: list, width: int, max_width: int) -> str: + + overall_spaces_count = max_width - width + words_count = len(line) + if len(line) == 1: + # if there is only word in line + # just insert overall_spaces_count for the remainder of line + return line[0] + " " * overall_spaces_count + else: + spaces_to_insert_between_words = words_count - 1 + # num_spaces_between_words_list[i] : tells you to insert + # num_spaces_between_words_list[i] spaces + # after word on line[i] + num_spaces_between_words_list = spaces_to_insert_between_words * [ + overall_spaces_count // spaces_to_insert_between_words + ] + spaces_count_in_locations = ( + overall_spaces_count % spaces_to_insert_between_words + ) + # distribute spaces via round robin to the left words + for i in range(spaces_count_in_locations): + num_spaces_between_words_list[i] += 1 + aligned_words_list = [] + for i in range(spaces_to_insert_between_words): + # add the word + aligned_words_list.append(line[i]) + # add the spaces to insert + aligned_words_list.append(num_spaces_between_words_list[i] * " ") + # just add the last word to the sentence + aligned_words_list.append(line[-1]) + # join the aligned words list to form a justified line + return "".join(aligned_words_list) + + answer = [] + line: list[str] = [] + width = 0 + for word in words: + if width + len(word) + len(line) <= max_width: + # keep adding words until we can fill out max_width + # width = sum of length of all words (without overall_spaces_count) + # len(word) = length of current word + # len(line) = number of overall_spaces_count to insert between words + line.append(word) + width += len(word) + else: + # justify the line and add it to result + answer.append(justify(line, width, max_width)) + # reset new line and new width + line, width = [word], len(word) + remaining_spaces = max_width - width - len(line) + answer.append(" ".join(line) + (remaining_spaces + 1) * " ") + return answer + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From d84452344ae1931c635245b1311a10e330223fc6 Mon Sep 17 00:00:00 2001 From: dmorozov001 <116645674+dmorozov001@users.noreply.github.com> Date: Sat, 29 Oct 2022 15:43:03 +0100 Subject: [PATCH 143/368] Correcting typos in CONTRIBUTING.md (#7845) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b5a07af10..5cbb24e56 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Befo ### Contributor -We are very happy that you consider implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: +We are very happy that you are considering implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: - You did your work - no plagiarism allowed - Any plagiarized work will not be merged. From bd50a3068270261fe845aac0daf309c7134e2477 Mon Sep 17 00:00:00 2001 From: Shashank Kashyap <50551759+SKVKPandey@users.noreply.github.com> Date: Sat, 29 Oct 2022 20:55:26 +0530 Subject: [PATCH 144/368] Resonant Frequency & Electrical Impedance (#6983) * Resonant Frequency * Resonant Frequency of LC Circuit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update electronics/resonant_frequency.py Co-authored-by: Caeden * Update electronics/resonant_frequency.py Co-authored-by: Caeden * Update electronics/resonant_frequency.py Co-authored-by: Caeden * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated resonant_frequency.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update electronics/resonant_frequency.py Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * Fixed doctest issues in resonant_frequency.py * Algorithm for Electrical Impedance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated Algorithm for Electrical Impedance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update resonant_frequency.py * Update electrical_impedance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update resonant_frequency.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update electronics/electrical_impedance.py Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * Update electronics/electrical_impedance.py Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * Update electronics/resonant_frequency.py Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> --- electronics/electrical_impedance.py | 46 ++++++++++++++++++++++++++ electronics/resonant_frequency.py | 50 +++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 electronics/electrical_impedance.py create mode 100644 electronics/resonant_frequency.py diff --git a/electronics/electrical_impedance.py b/electronics/electrical_impedance.py new file mode 100644 index 000000000..44041ff79 --- /dev/null +++ b/electronics/electrical_impedance.py @@ -0,0 +1,46 @@ +"""Electrical impedance is the measure of the opposition that a +circuit presents to a current when a voltage is applied. +Impedance extends the concept of resistance to alternating current (AC) circuits. +Source: https://en.wikipedia.org/wiki/Electrical_impedance +""" + +from __future__ import annotations + +from math import pow, sqrt + + +def electrical_impedance( + resistance: float, reactance: float, impedance: float +) -> dict[str, float]: + """ + Apply Electrical Impedance formula, on any two given electrical values, + which can be resistance, reactance, and impedance, and then in a Python dict + return name/value pair of the zero value. + + >>> electrical_impedance(3,4,0) + {'impedance': 5.0} + >>> electrical_impedance(0,4,5) + {'resistance': 3.0} + >>> electrical_impedance(3,0,5) + {'reactance': 4.0} + >>> electrical_impedance(3,4,5) + Traceback (most recent call last): + ... + ValueError: One and only one argument must be 0 + """ + if (resistance, reactance, impedance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if resistance == 0: + return {"resistance": sqrt(pow(impedance, 2) - pow(reactance, 2))} + elif reactance == 0: + return {"reactance": sqrt(pow(impedance, 2) - pow(resistance, 2))} + elif impedance == 0: + return {"impedance": sqrt(pow(resistance, 2) + pow(reactance, 2))} + else: + raise ValueError("Exactly one argument must be 0") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/electronics/resonant_frequency.py b/electronics/resonant_frequency.py new file mode 100644 index 000000000..4f95043b6 --- /dev/null +++ b/electronics/resonant_frequency.py @@ -0,0 +1,50 @@ +# https://en.wikipedia.org/wiki/LC_circuit + +"""An LC circuit, also called a resonant circuit, tank circuit, or tuned circuit, +is an electric circuit consisting of an inductor, represented by the letter L, +and a capacitor, represented by the letter C, connected together. +The circuit can act as an electrical resonator, an electrical analogue of a +tuning fork, storing energy oscillating at the circuit's resonant frequency. +Source: https://en.wikipedia.org/wiki/LC_circuit +""" + +from __future__ import annotations + +from math import pi, sqrt + + +def resonant_frequency(inductance: float, capacitance: float) -> tuple: + """ + This function can calculate the resonant frequency of LC circuit, + for the given value of inductance and capacitnace. + + Examples are given below: + >>> resonant_frequency(inductance=10, capacitance=5) + ('Resonant frequency', 0.022507907903927652) + >>> resonant_frequency(inductance=0, capacitance=5) + Traceback (most recent call last): + ... + ValueError: Inductance cannot be 0 or negative + >>> resonant_frequency(inductance=10, capacitance=0) + Traceback (most recent call last): + ... + ValueError: Capacitance cannot be 0 or negative + """ + + if inductance <= 0: + raise ValueError("Inductance cannot be 0 or negative") + + elif capacitance <= 0: + raise ValueError("Capacitance cannot be 0 or negative") + + else: + return ( + "Resonant frequency", + float(1 / (2 * pi * (sqrt(inductance * capacitance)))), + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 47ddba1d914bf5955a244056e794e718dee9ead1 Mon Sep 17 00:00:00 2001 From: Kushagra Makharia Date: Sat, 29 Oct 2022 21:08:40 +0530 Subject: [PATCH 145/368] Added cosine similarity (#7001) * Added cosine similarity * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/similarity_search.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 2f5fc46c0..72979181f 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -12,6 +12,7 @@ from __future__ import annotations import math import numpy as np +from numpy.linalg import norm def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float: @@ -135,6 +136,22 @@ def similarity_search( return answer +def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float: + """ + Calculates cosine similarity between two data. + :param input_a: ndarray of first vector. + :param input_b: ndarray of second vector. + :return: Cosine similarity of input_a and input_b. By using math.sqrt(), + result will be float. + + >>> cosine_similarity(np.array([1]), np.array([1])) + 1.0 + >>> cosine_similarity(np.array([1, 2]), np.array([6, 32])) + 0.9615239476408232 + """ + return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) + + if __name__ == "__main__": import doctest From 1550731cb7457ddae216da2ffe0bc1587f5234f3 Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 23:45:21 +0300 Subject: [PATCH 146/368] Remove file-level flake8 suppression (#7844) * Remove file-level flake8 suppression * updating DIRECTORY.md * Fix tests Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 ++ data_structures/heap/binomial_heap.py | 50 +++++++++++++-------------- other/activity_selection.py | 8 ++--- searches/binary_tree_traversal.py | 10 +++--- 4 files changed, 34 insertions(+), 36 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 198cc7077..9ea8f3140 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -671,6 +671,7 @@ ## Physics * [Casimir Effect](physics/casimir_effect.py) + * [Centripetal Force](physics/centripetal_force.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) @@ -1069,6 +1070,7 @@ * [Is Palindrome](strings/is_palindrome.py) * [Is Pangram](strings/is_pangram.py) * [Is Spain National Id](strings/is_spain_national_id.py) + * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py) * [Jaro Winkler](strings/jaro_winkler.py) * [Join](strings/join.py) * [Knuth Morris Pratt](strings/knuth_morris_pratt.py) diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 6398c9943..d79fac7a9 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -1,5 +1,3 @@ -# flake8: noqa - """ Binomial Heap Reference: Advanced Data Structures, Peter Brass @@ -22,7 +20,7 @@ class Node: self.right = None self.parent = None - def mergeTrees(self, other): + def merge_trees(self, other): """ In-place merge of two binomial trees of equal size. Returns the root of the resulting tree @@ -75,9 +73,8 @@ class BinomialHeap: 30 Deleting - delete() test - >>> for i in range(25): - ... print(first_heap.deleteMin(), end=" ") - 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 + >>> [first_heap.delete_min() for _ in range(20)] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Create a new Heap >>> second_heap = BinomialHeap() @@ -97,8 +94,8 @@ class BinomialHeap: # # # # preOrder() test - >>> second_heap.preOrder() - [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)] + >>> " ".join(str(x) for x in second_heap.pre_order()) + "(17, 0) ('#', 1) (31, 1) (20, 2) ('#', 3) ('#', 3) (34, 2) ('#', 3) ('#', 3)" printing Heap - __str__() test >>> print(second_heap) @@ -113,14 +110,17 @@ class BinomialHeap: ---# mergeHeaps() test - >>> merged = second_heap.mergeHeaps(first_heap) + >>> + >>> merged = second_heap.merge_heaps(first_heap) >>> merged.peek() 17 values in merged heap; (merge is inplace) - >>> while not first_heap.isEmpty(): - ... print(first_heap.deleteMin(), end=" ") - 17 20 25 26 27 28 29 31 34 + >>> results = [] + >>> while not first_heap.is_empty(): + ... results.append(first_heap.delete_min()) + >>> results + [17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34] """ def __init__(self, bottom_root=None, min_node=None, heap_size=0): @@ -128,7 +128,7 @@ class BinomialHeap: self.bottom_root = bottom_root self.min_node = min_node - def mergeHeaps(self, other): + def merge_heaps(self, other): """ In-place merge of two binomial heaps. Both of them become the resulting merged heap @@ -180,7 +180,7 @@ class BinomialHeap: next_node = i.parent.parent # Merging trees - i = i.mergeTrees(i.parent) + i = i.merge_trees(i.parent) # Updating links i.left = previous_node @@ -238,7 +238,7 @@ class BinomialHeap: next_node = self.bottom_root.parent.parent # Merge - self.bottom_root = self.bottom_root.mergeTrees(self.bottom_root.parent) + self.bottom_root = self.bottom_root.merge_trees(self.bottom_root.parent) # Update Links self.bottom_root.parent = next_node @@ -252,10 +252,10 @@ class BinomialHeap: """ return self.min_node.val - def isEmpty(self): + def is_empty(self): return self.size == 0 - def deleteMin(self): + def delete_min(self): """ delete min element and return it """ @@ -317,7 +317,7 @@ class BinomialHeap: return min_value # Remaining cases # Construct heap of right subtree - newHeap = BinomialHeap( + new_heap = BinomialHeap( bottom_root=bottom_of_new, min_node=min_of_new, heap_size=size_of_new ) @@ -354,11 +354,11 @@ class BinomialHeap: self.min_node = i i = i.parent # Merge heaps - self.mergeHeaps(newHeap) + self.merge_heaps(new_heap) return min_value - def preOrder(self): + def pre_order(self): """ Returns the Pre-order representation of the heap including values of nodes plus their level distance from the root; @@ -369,9 +369,9 @@ class BinomialHeap: while top_root.parent: top_root = top_root.parent # preorder - heap_preOrder = [] - self.__traversal(top_root, heap_preOrder) - return heap_preOrder + heap_pre_order = [] + self.__traversal(top_root, heap_pre_order) + return heap_pre_order def __traversal(self, curr_node, preorder, level=0): """ @@ -389,9 +389,9 @@ class BinomialHeap: Overwriting str for a pre-order print of nodes in heap; Performance is poor, so use only for small examples """ - if self.isEmpty(): + if self.is_empty(): return "" - preorder_heap = self.preOrder() + preorder_heap = self.pre_order() return "\n".join(("-" * level + str(value)) for value, level in preorder_heap) diff --git a/other/activity_selection.py b/other/activity_selection.py index d809bf90a..18ff6a24c 100644 --- a/other/activity_selection.py +++ b/other/activity_selection.py @@ -1,5 +1,3 @@ -# flake8: noqa - """The following implementation assumes that the activities are already sorted according to their finish time""" @@ -10,11 +8,11 @@ single person, one at a time""" # finish[] --> An array that contains finish time of all activities -def printMaxActivities(start: list[int], finish: list[int]) -> None: +def print_max_activities(start: list[int], finish: list[int]) -> None: """ >>> start = [1, 3, 0, 5, 8, 5] >>> finish = [2, 4, 6, 7, 9, 9] - >>> printMaxActivities(start, finish) + >>> print_max_activities(start, finish) The following activities are selected: 0,1,3,4, """ @@ -43,4 +41,4 @@ if __name__ == "__main__": start = [1, 3, 0, 5, 8, 5] finish = [2, 4, 6, 7, 9, 9] - printMaxActivities(start, finish) + print_max_activities(start, finish) diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 033db83d7..66814b478 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -1,5 +1,3 @@ -# flake8: noqa - """ This is pure Python implementation of tree traversal algorithms """ @@ -157,16 +155,16 @@ def level_order_actual(node: TreeNode) -> None: q: queue.Queue = queue.Queue() q.put(node) while not q.empty(): - list = [] + list_ = [] while not q.empty(): node_dequeued = q.get() print(node_dequeued.data, end=",") if node_dequeued.left: - list.append(node_dequeued.left) + list_.append(node_dequeued.left) if node_dequeued.right: - list.append(node_dequeued.right) + list_.append(node_dequeued.right) print() - for node in list: + for node in list_: q.put(node) From 3ec0aa85c0074d838d97dc030e582743586cd80e Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Sun, 30 Oct 2022 02:54:59 +0530 Subject: [PATCH 147/368] Update kinetic_energy.py (#7848) Fixed a typo error in docstrings --- physics/kinetic_energy.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/physics/kinetic_energy.py b/physics/kinetic_energy.py index 8863919ac..77016e223 100644 --- a/physics/kinetic_energy.py +++ b/physics/kinetic_energy.py @@ -2,16 +2,16 @@ Find the kinetic energy of an object, given its mass and velocity. Description : In physics, the kinetic energy of an object is the energy that it -possesses due to its motion. It is defined as the work needed to accelerate a body of a -given mass from rest to its stated velocity. Having gained this energy during its -acceleration, the body maintains this kinetic energy unless its speed changes. The same +possesses due to its motion.It is defined as the work needed to accelerate a body of a +given mass from rest to its stated velocity.Having gained this energy during its +acceleration, the body maintains this kinetic energy unless its speed changes.The same amount of work is done by the body when decelerating from its current speed to a state -of rest. Formally, a kinetic energy is any term in a system's Lagrangian which includes +of rest.Formally, a kinetic energy is any term in a system's Lagrangian which includes a derivative with respect to time. In classical mechanics, the kinetic energy of a non-rotating object of mass m traveling -at a speed v is ½mv². In relativistic mechanics, this is a good approximation only when -v is much less than the speed of light. The standard unit of kinetic energy is the +at a speed v is ½mv².In relativistic mechanics, this is a good approximation only when +v is much less than the speed of light.The standard unit of kinetic energy is the joule, while the English unit of kinetic energy is the foot-pound. Reference : https://en.m.wikipedia.org/wiki/Kinetic_energy @@ -20,7 +20,7 @@ Reference : https://en.m.wikipedia.org/wiki/Kinetic_energy def kinetic_energy(mass: float, velocity: float) -> float: """ - Calculate kinetick energy. + Calculate kinetic energy. The kinetic energy of a non-rotating object of mass m traveling at a speed v is ½mv² From 7b7b3dd086eb3d8f6a82aa94b4398c0b95a7f186 Mon Sep 17 00:00:00 2001 From: Jason Devers <74424054+jdevers1@users.noreply.github.com> Date: Sun, 30 Oct 2022 01:20:07 -0400 Subject: [PATCH 148/368] matrix/count_paths.py (#7533) * added recursive dfs backtracking for count paths with doctests * fixed doc testing * added type hints * redefined r as row, c as col * fixed naming conventions, ran mypy, only tests that didn't pass were using List[], rathan list() * added another doctest, as well as a explanation above * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix/count_paths.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update matrix/count_paths.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: J Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- matrix/count_paths.py | 75 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 matrix/count_paths.py diff --git a/matrix/count_paths.py b/matrix/count_paths.py new file mode 100644 index 000000000..4861ad5fd --- /dev/null +++ b/matrix/count_paths.py @@ -0,0 +1,75 @@ +""" +Given a grid, where you start from the top left position [0, 0], +you want to find how many paths you can take to get to the bottom right position. + +start here -> 0 0 0 0 + 1 1 0 0 + 0 0 0 1 + 0 1 0 0 <- finish here +how many 'distinct' paths can you take to get to the finish? +Using a recursive depth-first search algorithm below, you are able to +find the number of distinct unique paths (count). + +'*' will demonstrate a path +In the example above, there are two distinct paths: +1. 2. + * * * 0 * * * * + 1 1 * 0 1 1 * * + 0 0 * 1 0 0 * 1 + 0 1 * * 0 1 * * +""" + + +def depth_first_search(grid: list[list[int]], row: int, col: int, visit: set) -> int: + """ + Recursive Backtracking Depth First Search Algorithm + + Starting from top left of a matrix, count the number of + paths that can reach the bottom right of a matrix. + 1 represents a block (inaccessible) + 0 represents a valid space (accessible) + + 0 0 0 0 + 1 1 0 0 + 0 0 0 1 + 0 1 0 0 + >>> grid = [[0, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0]] + >>> depth_first_search(grid, 0, 0, set()) + 2 + + 0 0 0 0 0 + 0 1 1 1 0 + 0 1 1 1 0 + 0 0 0 0 0 + >>> grid = [[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]] + >>> depth_first_search(grid, 0, 0, set()) + 2 + """ + row_length, col_length = len(grid), len(grid[0]) + if ( + min(row, col) < 0 + or row == row_length + or col == col_length + or (row, col) in visit + or grid[row][col] == 1 + ): + return 0 + if row == row_length - 1 and col == col_length - 1: + return 1 + + visit.add((row, col)) + + count = 0 + count += depth_first_search(grid, row + 1, col, visit) + count += depth_first_search(grid, row - 1, col, visit) + count += depth_first_search(grid, row, col + 1, visit) + count += depth_first_search(grid, row, col - 1, visit) + + visit.remove((row, col)) + return count + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2d3985006f0c88e339a900caa4974493bc6fa861 Mon Sep 17 00:00:00 2001 From: Itssxxsalman <114142076+Itssxxsalman@users.noreply.github.com> Date: Sun, 30 Oct 2022 12:03:28 +0500 Subject: [PATCH 149/368] Fix grammatical mistakes in `simple_keyword_cypher.py` (#6385) * Fixed grammitical mistake * Update ciphers/simple_keyword_cypher.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- ciphers/simple_keyword_cypher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py index 447bacfc2..1635471ae 100644 --- a/ciphers/simple_keyword_cypher.py +++ b/ciphers/simple_keyword_cypher.py @@ -21,7 +21,7 @@ def create_cipher_map(key: str) -> dict[str, str]: :param key: keyword to use :return: dictionary cipher map """ - # Create alphabet list + # Create a list of the letters in the alphabet alphabet = [chr(i + 65) for i in range(26)] # Remove duplicate characters from key key = remove_duplicates(key.upper()) From f340bde6e047d86171385b90a023ac01e8914d0c Mon Sep 17 00:00:00 2001 From: Caio Cordeiro Date: Sun, 30 Oct 2022 04:05:44 -0300 Subject: [PATCH 150/368] Add simple neural network (#6452) * feat: add simple foward propagation implementation * fix: add PR requested changes * feat: add code example * fix: solve pre-commit failure * feat: add doctest inside code execution * fix: PR requested changes * fix: pr requested changes Co-authored-by: Caio Cordeiro --- neural_network/simple_neural_network.py | 63 +++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 neural_network/simple_neural_network.py diff --git a/neural_network/simple_neural_network.py b/neural_network/simple_neural_network.py new file mode 100644 index 000000000..f2a323487 --- /dev/null +++ b/neural_network/simple_neural_network.py @@ -0,0 +1,63 @@ +""" +Forward propagation explanation: +https://towardsdatascience.com/forward-propagation-in-neural-networks-simplified-math-and-code-version-bbcfef6f9250 +""" + +import math +import random + + +# Sigmoid +def sigmoid_function(value: float, deriv: bool = False) -> float: + """Return the sigmoid function of a float. + + >>> sigmoid_function(3.5) + 0.9706877692486436 + >>> sigmoid_function(3.5, True) + -8.75 + """ + if deriv: + return value * (1 - value) + return 1 / (1 + math.exp(-value)) + + +# Initial Value +INITIAL_VALUE = 0.02 + + +def forward_propagation(expected: int, number_propagations: int) -> float: + """Return the value found after the forward propagation training. + + >>> res = forward_propagation(32, 10000000) + >>> res > 31 and res < 33 + True + + >>> res = forward_propagation(32, 1000) + >>> res > 31 and res < 33 + False + """ + + # Random weight + weight = float(2 * (random.randint(1, 100)) - 1) + + for _ in range(number_propagations): + # Forward propagation + layer_1 = sigmoid_function(INITIAL_VALUE * weight) + # How much did we miss? + layer_1_error = (expected / 100) - layer_1 + # Error delta + layer_1_delta = layer_1_error * sigmoid_function(layer_1, True) + # Update weight + weight += INITIAL_VALUE * layer_1_delta + + return layer_1 * 100 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + expected = int(input("Expected value: ")) + number_propagations = int(input("Number of propagations: ")) + print(forward_propagation(expected, number_propagations)) From 0c5f1c01302c8208251f61730ba74e078bfd0ac8 Mon Sep 17 00:00:00 2001 From: ok-open-sc <114725648+ok-open-sc@users.noreply.github.com> Date: Sun, 30 Oct 2022 03:11:17 -0400 Subject: [PATCH 151/368] Increased Readability Of Variables (#6400) * Increased Readability Of Variables * Update anagrams.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update anagrams.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/anagrams.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/strings/anagrams.py b/strings/anagrams.py index b671d3f3d..fb9ac0bd1 100644 --- a/strings/anagrams.py +++ b/strings/anagrams.py @@ -26,15 +26,15 @@ def anagram(my_word: str) -> list[str]: >>> anagram('final') ['final'] """ - return word_bysig[signature(my_word)] + return word_by_signature[signature(my_word)] data: str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") word_list = sorted({word.strip().lower() for word in data.splitlines()}) -word_bysig = collections.defaultdict(list) +word_by_signature = collections.defaultdict(list) for word in word_list: - word_bysig[signature(word)].append(word) + word_by_signature[signature(word)].append(word) if __name__ == "__main__": all_anagrams = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} From f87de60b6d1cd6e9ce412503f48727015f46ada2 Mon Sep 17 00:00:00 2001 From: lostybtw <58177990+lostybtw@users.noreply.github.com> Date: Sun, 30 Oct 2022 07:22:52 +0000 Subject: [PATCH 152/368] fizzbuzz complete (#6504) * fizzbuzz * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * added doctests and function to fizzbuzz * Update fizz_buzz.py * Update fizz_buzz.py * Fixed FizzBuzz * fizzbuzz passing test * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update fizz_buzz.py * Update fizz_buzz.py * Update fizz_buzz.py * fixed fizzbuzz * Add files via upload * added mechanical energy calculation * Delete mechanical_energy.py * Update fizz_buzz.py * Update dynamic_programming/fizz_buzz.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fizz_buzz.py Co-authored-by: Caeden Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/fizz_buzz.py | 65 ++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 dynamic_programming/fizz_buzz.py diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py new file mode 100644 index 000000000..dd1d21b10 --- /dev/null +++ b/dynamic_programming/fizz_buzz.py @@ -0,0 +1,65 @@ +# https://en.wikipedia.org/wiki/Fizz_buzz#Programming + + +def fizz_buzz(number: int, iterations: int) -> str: + """ + Plays FizzBuzz. + Prints Fizz if number is a multiple of 3. + Prints Buzz if its a multiple of 5. + Prints FizzBuzz if its a multiple of both 3 and 5 or 15. + Else Prints The Number Itself. + >>> fizz_buzz(1,7) + '1 2 Fizz 4 Buzz Fizz 7 ' + >>> fizz_buzz(1,0) + Traceback (most recent call last): + ... + ValueError: Iterations must be done more than 0 times to play FizzBuzz + >>> fizz_buzz(-5,5) + Traceback (most recent call last): + ... + ValueError: starting number must be + and integer and be more than 0 + >>> fizz_buzz(10,-5) + Traceback (most recent call last): + ... + ValueError: Iterations must be done more than 0 times to play FizzBuzz + >>> fizz_buzz(1.5,5) + Traceback (most recent call last): + ... + ValueError: starting number must be + and integer and be more than 0 + >>> fizz_buzz(1,5.5) + Traceback (most recent call last): + ... + ValueError: iterations must be defined as integers + """ + + if not type(iterations) == int: + raise ValueError("iterations must be defined as integers") + if not type(number) == int or not number >= 1: + raise ValueError( + """starting number must be + and integer and be more than 0""" + ) + if not iterations >= 1: + raise ValueError("Iterations must be done more than 0 times to play FizzBuzz") + + out = "" + while number <= iterations: + if number % 3 == 0: + out += "Fizz" + if number % 5 == 0: + out += "Buzz" + if not number % 3 == 0 and not number % 5 == 0: + out += str(number) + + # print(out) + number += 1 + out += " " + return out + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 17d93cab783095dd1def3c382866cd94296db455 Mon Sep 17 00:00:00 2001 From: Carlos Villar Date: Sun, 30 Oct 2022 10:00:47 +0100 Subject: [PATCH 153/368] Added Manhattan distance algorithm (#7790) * Added Manhattan distance algorithm, Fixes: #7776 * Forgot that isinstance can accept a tuple * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update manhattan_distance.py * Update manhattan_distance.py Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/manhattan_distance.py | 126 ++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 maths/manhattan_distance.py diff --git a/maths/manhattan_distance.py b/maths/manhattan_distance.py new file mode 100644 index 000000000..2711d4c8c --- /dev/null +++ b/maths/manhattan_distance.py @@ -0,0 +1,126 @@ +def manhattan_distance(point_a: list, point_b: list) -> float: + """ + Expectts two list of numbers representing two points in the same + n-dimensional space + + https://en.wikipedia.org/wiki/Taxicab_geometry + + >>> manhattan_distance([1,1], [2,2]) + 2.0 + >>> manhattan_distance([1.5,1.5], [2,2]) + 1.0 + >>> manhattan_distance([1.5,1.5], [2.5,2]) + 1.5 + >>> manhattan_distance([-3, -3, -3], [0, 0, 0]) + 9.0 + >>> manhattan_distance([1,1], None) + Traceback (most recent call last): + ... + ValueError: Missing an input + >>> manhattan_distance([1,1], [2, 2, 2]) + Traceback (most recent call last): + ... + ValueError: Both points must be in the same n-dimensional space + >>> manhattan_distance([1,"one"], [2, 2, 2]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + >>> manhattan_distance(1, [2, 2, 2]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found int + >>> manhattan_distance([1,1], "not_a_list") + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + """ + + _validate_point(point_a) + _validate_point(point_b) + if len(point_a) != len(point_b): + raise ValueError("Both points must be in the same n-dimensional space") + + return float(sum(abs(a - b) for a, b in zip(point_a, point_b))) + + +def _validate_point(point: list[float]) -> None: + """ + >>> _validate_point(None) + Traceback (most recent call last): + ... + ValueError: Missing an input + >>> _validate_point([1,"one"]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + >>> _validate_point(1) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found int + >>> _validate_point("not_a_list") + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + """ + if point: + if isinstance(point, list): + for item in point: + if not isinstance(item, (int, float)): + raise TypeError( + f"Expected a list of numbers as input, " + f"found {type(item).__name__}" + ) + else: + raise TypeError( + f"Expected a list of numbers as input, found {type(point).__name__}" + ) + else: + raise ValueError("Missing an input") + + +def manhattan_distance_one_liner(point_a: list, point_b: list) -> float: + """ + Version with one liner + + >>> manhattan_distance_one_liner([1,1], [2,2]) + 2.0 + >>> manhattan_distance_one_liner([1.5,1.5], [2,2]) + 1.0 + >>> manhattan_distance_one_liner([1.5,1.5], [2.5,2]) + 1.5 + >>> manhattan_distance_one_liner([-3, -3, -3], [0, 0, 0]) + 9.0 + >>> manhattan_distance_one_liner([1,1], None) + Traceback (most recent call last): + ... + ValueError: Missing an input + >>> manhattan_distance_one_liner([1,1], [2, 2, 2]) + Traceback (most recent call last): + ... + ValueError: Both points must be in the same n-dimensional space + >>> manhattan_distance_one_liner([1,"one"], [2, 2, 2]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + >>> manhattan_distance_one_liner(1, [2, 2, 2]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found int + >>> manhattan_distance_one_liner([1,1], "not_a_list") + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + """ + + _validate_point(point_a) + _validate_point(point_b) + if len(point_a) != len(point_b): + raise ValueError("Both points must be in the same n-dimensional space") + + return float(sum(abs(x - y) for x, y in zip(point_a, point_b))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 57ccabbaeb0f32165271e3a218bc9c6dcfc21823 Mon Sep 17 00:00:00 2001 From: Andrey Date: Sun, 30 Oct 2022 11:01:58 +0200 Subject: [PATCH 154/368] Update docs (#7867) * Update docs, remove unused excludes from pre-commit * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ---- CONTRIBUTING.md | 2 +- DIRECTORY.md | 12 ++++++++++++ 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 56946f5f2..004def5e4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,10 +7,6 @@ repos: - id: end-of-file-fixer types: [python] - id: trailing-whitespace - exclude: | - (?x)^( - data_structures/heap/binomial_heap.py - )$ - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5cbb24e56..37e020b8f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,7 +66,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.9+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.10+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index 9ea8f3140..8ac9c3be7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -158,6 +158,8 @@ * [Weight Conversion](conversions/weight_conversion.py) ## Data Structures + * Arrays + * [Permutations](data_structures/arrays/permutations.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) @@ -291,6 +293,7 @@ * [Factorial](dynamic_programming/factorial.py) * [Fast Fibonacci](dynamic_programming/fast_fibonacci.py) * [Fibonacci](dynamic_programming/fibonacci.py) + * [Fizz Buzz](dynamic_programming/fizz_buzz.py) * [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) @@ -313,12 +316,16 @@ * [Rod Cutting](dynamic_programming/rod_cutting.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) + * [Viterbi](dynamic_programming/viterbi.py) ## Electronics * [Carrier Concentration](electronics/carrier_concentration.py) * [Coulombs Law](electronics/coulombs_law.py) + * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) + * [Electrical Impedance](electronics/electrical_impedance.py) * [Ohms Law](electronics/ohms_law.py) + * [Resonant Frequency](electronics/resonant_frequency.py) ## File Transfer * [Receive File](file_transfer/receive_file.py) @@ -430,6 +437,7 @@ ## Knapsack * [Greedy Knapsack](knapsack/greedy_knapsack.py) * [Knapsack](knapsack/knapsack.py) + * [Recursive Approach Knapsack](knapsack/recursive_approach_knapsack.py) * Tests * [Test Greedy Knapsack](knapsack/tests/test_greedy_knapsack.py) * [Test Knapsack](knapsack/tests/test_knapsack.py) @@ -622,6 +630,7 @@ ## Matrix * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) + * [Count Paths](matrix/count_paths.py) * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) @@ -645,6 +654,7 @@ * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) + * [Simple Neural Network](neural_network/simple_neural_network.py) ## Other * [Activity Selection](other/activity_selection.py) @@ -680,6 +690,7 @@ * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Potential Energy](physics/potential_energy.py) + * [Sheer Stress](physics/sheer_stress.py) ## Project Euler * Problem 001 @@ -1089,6 +1100,7 @@ * [Reverse Words](strings/reverse_words.py) * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) + * [Text Justification](strings/text_justification.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) From 5ba5c548584f44bac0bc3c0cb4e95233560627cf Mon Sep 17 00:00:00 2001 From: Sushant Srivastav <63559772+sushant4191@users.noreply.github.com> Date: Sun, 30 Oct 2022 14:38:54 +0530 Subject: [PATCH 155/368] Updated info (#7866) * Updated info Updated the readme section for sorts. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sorts/README.md Co-authored-by: Caeden Perelli-Harris * Update README.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- sorts/README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 sorts/README.md diff --git a/sorts/README.md b/sorts/README.md new file mode 100644 index 000000000..ceb0207c2 --- /dev/null +++ b/sorts/README.md @@ -0,0 +1,11 @@ +# Sorting Algorithms +Sorting is the process of putting data in a specific order. The way to arrange data in a specific order +is specified by the sorting algorithm. The most typical orders are lexical or numerical. The significance +of sorting lies in the fact that, if data is stored in a sorted manner, data searching can be highly optimised. +Another use for sorting is to represent data in a more readable manner. + +This section contains a lot of important algorithms that helps us to use sorting algorithms in various scenarios. +## References +* +* +* From 87a5d919761e9ccb05e19e68a5307348c6264cd0 Mon Sep 17 00:00:00 2001 From: Kevin Joven <59969678+KevinJoven11@users.noreply.github.com> Date: Sun, 30 Oct 2022 05:49:33 -0400 Subject: [PATCH 156/368] quantum_teleportation.py (#6632) * quantum_teleportation.py This code is for the #Hacktoberfest. This file run the quantum teleportation circuit using Qiskit. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum/quantum_teleportation.py Co-authored-by: Caeden * Update quantum/quantum_teleportation.py Co-authored-by: Caeden * Update Corrected some typos. Add more comments for adding the gates. Update the variable qc with quantum_circuit in the simulator and execute. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * python return typehint solved. * Fix long line Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Co-authored-by: Christian Clauss --- quantum/quantum_teleportation.py | 70 ++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 quantum/quantum_teleportation.py diff --git a/quantum/quantum_teleportation.py b/quantum/quantum_teleportation.py new file mode 100644 index 000000000..5fbc57a66 --- /dev/null +++ b/quantum/quantum_teleportation.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +""" +Build quantum teleportation circuit using three quantum bits +and 1 classical bit. The main idea is to send one qubit from +Alice to Bob using the entanglement properties. This experiment +run in IBM Q simulator with 1000 shots. +. +References: +https://en.wikipedia.org/wiki/Quantum_teleportation +https://qiskit.org/textbook/ch-algorithms/teleportation.html +""" + +import numpy as np +import qiskit +from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute + + +def quantum_teleportation( + theta: float = np.pi / 2, phi: float = np.pi / 2, lam: float = np.pi / 2 +) -> qiskit.result.counts.Counts: + + """ + # >>> quantum_teleportation() + #{'00': 500, '11': 500} # ideally + # ┌─────────────────┐ ┌───┐ + #qr_0: ┤ U(π/2,π/2,π/2) ├───────■──┤ H ├─■───────── + # └──────┬───┬──────┘ ┌─┴─┐└───┘ │ + #qr_1: ───────┤ H ├─────────■──┤ X ├──────┼───■───── + # └───┘ ┌─┴─┐└───┘ │ ┌─┴─┐┌─┐ + #qr_2: ───────────────────┤ X ├───────────■─┤ X ├┤M├ + # └───┘ └───┘└╥┘ + #cr: 1/═══════════════════════════════════════════╩═ + Args: + theta (float): Single qubit rotation U Gate theta parameter. Default to np.pi/2 + phi (float): Single qubit rotation U Gate phi parameter. Default to np.pi/2 + lam (float): Single qubit rotation U Gate lam parameter. Default to np.pi/2 + Returns: + qiskit.result.counts.Counts: Teleported qubit counts. + """ + + qr = QuantumRegister(3, "qr") # Define the number of quantum bits + cr = ClassicalRegister(1, "cr") # Define the number of classical bits + + quantum_circuit = QuantumCircuit(qr, cr) # Define the quantum circuit. + + # Build the circuit + quantum_circuit.u(theta, phi, lam, 0) # Quantum State to teleport + quantum_circuit.h(1) # add hadamard gate + quantum_circuit.cx( + 1, 2 + ) # add control gate with qubit 1 as control and 2 as target. + quantum_circuit.cx(0, 1) + quantum_circuit.h(0) + quantum_circuit.cz(0, 2) # add control z gate. + quantum_circuit.cx(1, 2) + + quantum_circuit.measure([2], [0]) # measure the qubit. + + # Simulate the circuit using qasm simulator + backend = Aer.get_backend("qasm_simulator") + job = execute(quantum_circuit, backend, shots=1000) + + return job.result().get_counts(quantum_circuit) + + +if __name__ == "__main__": + print( + "Total count for teleported state is: " + f"{quantum_teleportation(np.pi/2, np.pi/2, np.pi/2)}" + ) From 00dfad9d20abf755a91abc0ba35f5d92fcab9149 Mon Sep 17 00:00:00 2001 From: giladwo <25708271+giladwo@users.noreply.github.com> Date: Sun, 30 Oct 2022 11:59:10 +0200 Subject: [PATCH 157/368] Simplify climbing stairs and use constant memory (#6628) * Simplify climbing stairs and use constant memory * number_of_steps Co-authored-by: Christian Clauss --- dynamic_programming/climbing_stairs.py | 29 +++++++++++++------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/dynamic_programming/climbing_stairs.py b/dynamic_programming/climbing_stairs.py index 048d57aed..d6273d025 100644 --- a/dynamic_programming/climbing_stairs.py +++ b/dynamic_programming/climbing_stairs.py @@ -1,20 +1,20 @@ #!/usr/bin/env python3 -def climb_stairs(n: int) -> int: +def climb_stairs(number_of_steps: int) -> int: """ LeetCdoe No.70: Climbing Stairs - Distinct ways to climb a n step staircase where - each time you can either climb 1 or 2 steps. + Distinct ways to climb a number_of_steps staircase where each time you can either + climb 1 or 2 steps. Args: - n: number of steps of staircase + number_of_steps: number of steps on the staircase Returns: - Distinct ways to climb a n step staircase + Distinct ways to climb a number_of_steps staircase Raises: - AssertionError: n not positive integer + AssertionError: number_of_steps not positive integer >>> climb_stairs(3) 3 @@ -23,18 +23,17 @@ def climb_stairs(n: int) -> int: >>> climb_stairs(-7) # doctest: +ELLIPSIS Traceback (most recent call last): ... - AssertionError: n needs to be positive integer, your input -7 + AssertionError: number_of_steps needs to be positive integer, your input -7 """ assert ( - isinstance(n, int) and n > 0 - ), f"n needs to be positive integer, your input {n}" - if n == 1: + isinstance(number_of_steps, int) and number_of_steps > 0 + ), f"number_of_steps needs to be positive integer, your input {number_of_steps}" + if number_of_steps == 1: return 1 - dp = [0] * (n + 1) - dp[0], dp[1] = (1, 1) - for i in range(2, n + 1): - dp[i] = dp[i - 1] + dp[i - 2] - return dp[n] + previous, current = 1, 1 + for _ in range(number_of_steps - 1): + current, previous = current + previous, current + return current if __name__ == "__main__": From 84facb78b20be6a9a90307c79e318c65a04987ac Mon Sep 17 00:00:00 2001 From: Saksham1970 <45041294+Saksham1970@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:40:16 +0530 Subject: [PATCH 158/368] Project Euler: 092 decreased the time (#6627) * Added explanation and increased speed of the solution of problem 092 * updating DIRECTORY.md * Added temporary fix to the failing of problem 104 * Reduced few seconds by minor improvements * Update sol.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- project_euler/problem_092/sol1.py | 42 +++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index d326fc33f..33a6c0694 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -11,11 +11,11 @@ What is most amazing is that EVERY starting number will eventually arrive at 1 o How many starting numbers below ten million will arrive at 89? """ - -DIGITS_SQUARED = [digit**2 for digit in range(10)] +DIGITS_SQUARED = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)] def next_number(number: int) -> int: + """ Returns the next number of the chain by adding the square of each digit to form a new number. @@ -28,15 +28,29 @@ def next_number(number: int) -> int: >>> next_number(32) 13 """ + sum_of_digits_squared = 0 while number: - sum_of_digits_squared += DIGITS_SQUARED[number % 10] - number //= 10 + + # Increased Speed Slightly by checking every 5 digits together. + sum_of_digits_squared += DIGITS_SQUARED[number % 100000] + number //= 100000 return sum_of_digits_squared -CHAINS = {1: True, 58: False} +# There are 2 Chains made, +# One ends with 89 with the chain member 58 being the one which when declared first, +# there will be the least number of iterations for all the members to be checked. + +# The other one ends with 1 and has only one element 1. + +# So 58 and 1 are chosen to be declared at the starting. + +# Changed dictionary to an array to quicken the solution +CHAINS: list[bool | None] = [None] * 10000000 +CHAINS[0] = True +CHAINS[57] = False def chain(number: int) -> bool: @@ -54,11 +68,16 @@ def chain(number: int) -> bool: >>> chain(1) True """ - if number in CHAINS: - return CHAINS[number] + + if CHAINS[number - 1] is not None: + return CHAINS[number - 1] # type: ignore number_chain = chain(next_number(number)) - CHAINS[number] = number_chain + CHAINS[number - 1] = number_chain + + while number < 10000000: + CHAINS[number - 1] = number_chain + number *= 10 return number_chain @@ -74,12 +93,15 @@ def solution(number: int = 10000000) -> int: >>> solution(10000000) 8581146 """ - return sum(1 for i in range(1, number) if not chain(i)) + for i in range(1, number): + if CHAINS[i] is None: + chain(i + 1) + + return CHAINS[:number].count(False) if __name__ == "__main__": import doctest doctest.testmod() - print(f"{solution() = }") From 48a73a28d477a1b634479001bc04e0886b265bfb Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 30 Oct 2022 10:11:29 +0000 Subject: [PATCH 159/368] fix(quantum): Correct simulator deprecation (#7869) --- quantum/quantum_teleportation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quantum/quantum_teleportation.py b/quantum/quantum_teleportation.py index 5fbc57a66..d04b44d15 100644 --- a/quantum/quantum_teleportation.py +++ b/quantum/quantum_teleportation.py @@ -57,7 +57,7 @@ def quantum_teleportation( quantum_circuit.measure([2], [0]) # measure the qubit. # Simulate the circuit using qasm simulator - backend = Aer.get_backend("qasm_simulator") + backend = Aer.get_backend("aer_simulator") job = execute(quantum_circuit, backend, shots=1000) return job.result().get_counts(quantum_circuit) From ba576a9a0b0a41405cfa11606c39908a1bc2b01b Mon Sep 17 00:00:00 2001 From: Devesh Swarnkar <71492529+devesh-0419@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:44:02 +0530 Subject: [PATCH 160/368] Create README.md (#6642) for blockchain file --- blockchain/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 blockchain/README.md diff --git a/blockchain/README.md b/blockchain/README.md new file mode 100644 index 000000000..5ae7f95ec --- /dev/null +++ b/blockchain/README.md @@ -0,0 +1,8 @@ +# Blockchain + +A Blockchain is a type of distributed ledger technology (DLT) that consists of growing list of records, called blocks, that are securely linked together using cryptography. + +* +* +* +* From ca923389c0330b6b7afc935bdd7fa9a15d377079 Mon Sep 17 00:00:00 2001 From: Si Lam Date: Sun, 30 Oct 2022 05:25:51 -0500 Subject: [PATCH 161/368] Description of Double hasing (#6467) * Description of DOuble hasing * Fix sheebang * Update double_hash.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update double_hash.py Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/hashing/double_hash.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index bd1355fca..453e0d131 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -1,4 +1,16 @@ #!/usr/bin/env python3 +""" +Double hashing is a collision resolving technique in Open Addressed Hash tables. +Double hashing uses the idea of applying a second hash function to key when a collision +occurs. The advantage of Double hashing is that it is one of the best form of probing, +producing a uniform distribution of records throughout a hash table. This technique +does not yield any clusters. It is one of effective method for resolving collisions. + +Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE +Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table. + +Reference: https://en.wikipedia.org/wiki/Double_hashing +""" from .hash_table import HashTable from .number_theory.prime_numbers import is_prime, next_prime From c0b0b128b7ad4a5a75ed866bc7c114c3cf7a89ef Mon Sep 17 00:00:00 2001 From: Kavienan J <45987371+kavienanj@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:56:46 +0530 Subject: [PATCH 162/368] Add Ideal Gas Law for physics (#6503) * add physics ideal gas law * run pre commit * Update physics/ideal_gas_law.py Suggestion #1 Co-authored-by: Caeden * Update physics/ideal_gas_law.py Suggestion #2 Co-authored-by: Caeden * run pre commit * Update volume return line sugesstion Co-authored-by: Caeden Perelli-Harris * Add suggestions * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: Caeden Co-authored-by: Christian Clauss --- physics/ideal_gas_law.py | 59 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 physics/ideal_gas_law.py diff --git a/physics/ideal_gas_law.py b/physics/ideal_gas_law.py new file mode 100644 index 000000000..805da47b0 --- /dev/null +++ b/physics/ideal_gas_law.py @@ -0,0 +1,59 @@ +""" +The ideal gas law, also called the general gas equation, is the +equation of state of a hypothetical ideal gas. It is a good approximation +of the behavior of many gases under many conditions, although it has +several limitations. It was first stated by Benoît Paul Émile Clapeyron +in 1834 as a combination of the empirical Boyle's law, Charles's law, +Avogadro's law, and Gay-Lussac's law.[1] The ideal gas law is often written +in an empirical form: + ------------ + | PV = nRT | + ------------ +P = Pressure (Pa) +V = Volume (m^3) +n = Amount of substance (mol) +R = Universal gas constant +T = Absolute temperature (Kelvin) + +(Description adapted from https://en.wikipedia.org/wiki/Ideal_gas_law ) +""" + +UNIVERSAL_GAS_CONSTANT = 8.314462 # Unit - J mol-1 K-1 + + +def pressure_of_gas_system(moles: float, kelvin: float, volume: float) -> float: + """ + >>> pressure_of_gas_system(2, 100, 5) + 332.57848 + >>> pressure_of_gas_system(0.5, 273, 0.004) + 283731.01575 + >>> pressure_of_gas_system(3, -0.46, 23.5) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter positive value. + """ + if moles < 0 or kelvin < 0 or volume < 0: + raise ValueError("Invalid inputs. Enter positive value.") + return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume + + +def volume_of_gas_system(moles: float, kelvin: float, pressure: float) -> float: + """ + >>> volume_of_gas_system(2, 100, 5) + 332.57848 + >>> volume_of_gas_system(0.5, 273, 0.004) + 283731.01575 + >>> volume_of_gas_system(3, -0.46, 23.5) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter positive value. + """ + if moles < 0 or kelvin < 0 or pressure < 0: + raise ValueError("Invalid inputs. Enter positive value.") + return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From b32903d22f3a0fc8985a3dd1e4c4645f12b9f961 Mon Sep 17 00:00:00 2001 From: Kavienan J <45987371+kavienanj@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:59:00 +0530 Subject: [PATCH 163/368] Add root mean square speed of gas molecules to physics (#6569) * add rms speed of molecule to physics * Update physics/rms_speed_of_molecule.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- physics/rms_speed_of_molecule.py | 52 ++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 physics/rms_speed_of_molecule.py diff --git a/physics/rms_speed_of_molecule.py b/physics/rms_speed_of_molecule.py new file mode 100644 index 000000000..478cee01c --- /dev/null +++ b/physics/rms_speed_of_molecule.py @@ -0,0 +1,52 @@ +""" +The root-mean-square speed is essential in measuring the average speed of particles +contained in a gas, defined as, + ----------------- + | Vrms = √3RT/M | + ----------------- + +In Kinetic Molecular Theory, gasified particles are in a condition of constant random +motion; each particle moves at a completely different pace, perpetually clashing and +changing directions consistently velocity is used to describe the movement of gas +particles, thereby taking into account both speed and direction. Although the velocity +of gaseous particles is constantly changing, the distribution of velocities does not +change. +We cannot gauge the velocity of every individual particle, thus we frequently reason +in terms of the particles average behavior. Particles moving in opposite directions +have velocities of opposite signs. Since gas particles are in random motion, it's +plausible that there'll be about as several moving in one direction as within the other +way, which means that the average velocity for a collection of gas particles equals +zero; as this value is unhelpful, the average of velocities can be determined using an +alternative method. +""" + + +UNIVERSAL_GAS_CONSTANT = 8.3144598 + + +def rms_speed_of_molecule(temperature: float, molar_mass: float) -> float: + """ + >>> rms_speed_of_molecule(100, 2) + 35.315279554323226 + >>> rms_speed_of_molecule(273, 12) + 23.821458421977443 + """ + if temperature < 0: + raise Exception("Temperature cannot be less than 0 K") + if molar_mass <= 0: + raise Exception("Molar mass cannot be less than or equal to 0 kg/mol") + else: + return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 + + +if __name__ == "__main__": + import doctest + + # run doctest + doctest.testmod() + + # example + temperature = 300 + molar_mass = 28 + vrms = rms_speed_of_molecule(temperature, molar_mass) + print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s") From fcfe35c3d8ed15037c0f20e3ee2268eea840b1ff Mon Sep 17 00:00:00 2001 From: samyakpagariya <72349392+samyakpagariya@users.noreply.github.com> Date: Sun, 30 Oct 2022 16:13:41 +0530 Subject: [PATCH 164/368] For the better understanding of time taken. (#6583) * For the better understanding of time taken. In this change I have initialized a variable p with the value of (1e9+7) and then took the modulus of process time with it . This modification gives better time taken by the process . Firstly it was giving answer in the exponential now it gives in the integer form. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- sorts/bubble_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/bubble_sort.py b/sorts/bubble_sort.py index d4f0d25ca..aef2da272 100644 --- a/sorts/bubble_sort.py +++ b/sorts/bubble_sort.py @@ -49,4 +49,4 @@ if __name__ == "__main__": unsorted = [int(item) for item in user_input.split(",")] start = time.process_time() print(*bubble_sort(unsorted), sep=",") - print(f"Processing time: {time.process_time() - start}") + print(f"Processing time: {(time.process_time() - start)%1e9 + 7}") From 00fc53de9709648b495ecf707549d6068592fb76 Mon Sep 17 00:00:00 2001 From: happiestbee <87628038+happiestbee@users.noreply.github.com> Date: Sun, 30 Oct 2022 06:49:05 -0400 Subject: [PATCH 165/368] added sumset.py Fixes: #{6563} (#6742) * Create sumset.py * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add descriptive var names * Update maths/sumset.py Co-authored-by: Caeden * Update sumset.py * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Co-authored-by: Christian Clauss --- DIRECTORY.md | 5 +++++ maths/sumset.py | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 maths/sumset.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 8ac9c3be7..38fd1d656 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -560,6 +560,7 @@ * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) * [Lucas Series](maths/lucas_series.py) * [Maclaurin Series](maths/maclaurin_series.py) + * [Manhattan Distance](maths/manhattan_distance.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) @@ -616,6 +617,7 @@ * [Sum Of Digits](maths/sum_of_digits.py) * [Sum Of Geometric Progression](maths/sum_of_geometric_progression.py) * [Sum Of Harmonic Series](maths/sum_of_harmonic_series.py) + * [Sumset](maths/sumset.py) * [Sylvester Sequence](maths/sylvester_sequence.py) * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) @@ -683,6 +685,7 @@ * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) + * [Ideal Gas Law](physics/ideal_gas_law.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) @@ -690,6 +693,7 @@ * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Potential Energy](physics/potential_energy.py) + * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Sheer Stress](physics/sheer_stress.py) ## Project Euler @@ -978,6 +982,7 @@ * [Not Gate](quantum/not_gate.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) + * [Quantum Teleportation](quantum/quantum_teleportation.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) * [Superdense Coding](quantum/superdense_coding.py) diff --git a/maths/sumset.py b/maths/sumset.py new file mode 100644 index 000000000..fa18f9e24 --- /dev/null +++ b/maths/sumset.py @@ -0,0 +1,37 @@ +""" + +Calculates the SumSet of two sets of numbers (A and B) + +Source: + https://en.wikipedia.org/wiki/Sumset + +""" + + +def sumset(set_a: set, set_b: set) -> set: + """ + :param first set: a set of numbers + :param second set: a set of numbers + :return: the nth number in Sylvester's sequence + + >>> sumset({1, 2, 3}, {4, 5, 6}) + {5, 6, 7, 8, 9} + + >>> sumset({1, 2, 3}, {4, 5, 6, 7}) + {5, 6, 7, 8, 9, 10} + + >>> sumset({1, 2, 3, 4}, 3) + Traceback (most recent call last): + ... + AssertionError: The input value of [set_b=3] is not a set + """ + assert isinstance(set_a, set), f"The input value of [set_a={set_a}] is not a set" + assert isinstance(set_b, set), f"The input value of [set_b={set_b}] is not a set" + + return {a + b for a in set_a for b in set_b} + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 6b6d8cc1110b16b38c7e6aafe91cb6f9583669ae Mon Sep 17 00:00:00 2001 From: Micael Pereira <8707982+micaelalex@users.noreply.github.com> Date: Sun, 30 Oct 2022 10:49:22 +0000 Subject: [PATCH 166/368] Adding ELFHash Algorithm (#6731) * Adding ELFHash Algorithm Adding a new Hash Algorithm. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update elf.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update elf.py * Update elf.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update elf.py * Apply suggestions from code review Co-authored-by: Caeden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden --- hashes/elf.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 hashes/elf.py diff --git a/hashes/elf.py b/hashes/elf.py new file mode 100644 index 000000000..87fe339da --- /dev/null +++ b/hashes/elf.py @@ -0,0 +1,23 @@ +def elf_hash(data: str) -> int: + """ + Implementation of ElfHash Algorithm, a variant of PJW hash function. + + Returns: + [int] -- [32 bit binary int] + >>> elf_hash('lorem ipsum') + 253956621 + """ + hash = x = 0 + for letter in data: + hash = (hash << 4) + ord(letter) + x = hash & 0xF0000000 + if x != 0: + hash ^= x >> 24 + hash &= ~x + return hash + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From cc423007800b8707ea87353be808a90bef13ba18 Mon Sep 17 00:00:00 2001 From: Pravin Date: Sun, 30 Oct 2022 16:20:08 +0530 Subject: [PATCH 167/368] Added Readme file to document the hashing algorithm. (#6743) * Added Readme file to document the hashing algorithm. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- hashes/README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 hashes/README.md diff --git a/hashes/README.md b/hashes/README.md new file mode 100644 index 000000000..6df9a2fb6 --- /dev/null +++ b/hashes/README.md @@ -0,0 +1,17 @@ +# Hashes +Hashing is the process of mapping any amount of data to a specified size using an algorithm. This is known as a hash value (or, if you're feeling fancy, a hash code, hash sums, or even a hash digest). Hashing is a one-way function, whereas encryption is a two-way function. While it is functionally conceivable to reverse-hash stuff, the required computing power makes it impractical. Hashing is a one-way street. +Unlike encryption, which is intended to protect data in transit, hashing is intended to authenticate that a file or piece of data has not been altered—that it is authentic. In other words, it functions as a checksum. + +## Common hashing algorithms +### MD5 +This is one of the first algorithms that has gained widespread acceptance. MD5 is hashing algorithm made by Ray Rivest that is known to suffer vulnerabilities. It was created in 1992 as the successor to MD4. Currently MD6 is in the works, but as of 2009 Rivest had removed it from NIST consideration for SHA-3. + +### SHA +SHA stands for Security Hashing Algorithm and it’s probably best known as the hashing algorithm used in most SSL/TLS cipher suites. A cipher suite is a collection of ciphers and algorithms that are used for SSL/TLS connections. SHA handles the hashing aspects. SHA-1, as we mentioned earlier, is now deprecated. SHA-2 is now mandatory. SHA-2 is sometimes known has SHA-256, though variants with longer bit lengths are also available. + +### SHA256 +SHA 256 is a member of the SHA 2 algorithm family, under which SHA stands for Secure Hash Algorithm. It was a collaborative effort between both the NSA and NIST to implement a successor to the SHA 1 family, which was beginning to lose potency against brute force attacks. It was published in 2001. +The importance of the 256 in the name refers to the final hash digest value, i.e. the hash value will remain 256 bits regardless of the size of the plaintext/cleartext. Other algorithms in the SHA family are similar to SHA 256 in some ways. + +### Luhn +The Luhn algorithm, also renowned as the modulus 10 or mod 10 algorithm, is a straightforward checksum formula used to validate a wide range of identification numbers, including credit card numbers, IMEI numbers, and Canadian Social Insurance Numbers. A community of mathematicians developed the LUHN formula in the late 1960s. Companies offering credit cards quickly followed suit. Since the algorithm is in the public interest, anyone can use it. The algorithm is used by most credit cards and many government identification numbers as a simple method of differentiating valid figures from mistyped or otherwise incorrect numbers. It was created to guard against unintentional errors, not malicious attacks. \ No newline at end of file From b5d7f186f4c93e0a00635e9efabe33971b161fc6 Mon Sep 17 00:00:00 2001 From: Emmanuel Bauma Murairi <40155399+Emmastro@users.noreply.github.com> Date: Sun, 30 Oct 2022 14:52:50 +0400 Subject: [PATCH 168/368] Polynomial (#6745) * implement function to handle polynomial operations * edit documentation * fix type hint and linter errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix short variable name * fix spelling Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/polynomials/__init__.py | 0 .../single_indeterminate_operations.py | 188 ++++++++++++++++++ 2 files changed, 188 insertions(+) create mode 100644 maths/polynomials/__init__.py create mode 100644 maths/polynomials/single_indeterminate_operations.py diff --git a/maths/polynomials/__init__.py b/maths/polynomials/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/maths/polynomials/single_indeterminate_operations.py b/maths/polynomials/single_indeterminate_operations.py new file mode 100644 index 000000000..8bafdb591 --- /dev/null +++ b/maths/polynomials/single_indeterminate_operations.py @@ -0,0 +1,188 @@ +""" + +This module implements a single indeterminate polynomials class +with some basic operations + +Reference: https://en.wikipedia.org/wiki/Polynomial + +""" + +from __future__ import annotations + +from collections.abc import MutableSequence + + +class Polynomial: + def __init__(self, degree: int, coefficients: MutableSequence[float]) -> None: + """ + The coefficients should be in order of degree, from smallest to largest. + >>> p = Polynomial(2, [1, 2, 3]) + >>> p = Polynomial(2, [1, 2, 3, 4]) + Traceback (most recent call last): + ... + ValueError: The number of coefficients should be equal to the degree + 1. + + """ + if len(coefficients) != degree + 1: + raise ValueError( + "The number of coefficients should be equal to the degree + 1." + ) + + self.coefficients: list[float] = list(coefficients) + self.degree = degree + + def __add__(self, polynomial_2: Polynomial) -> Polynomial: + """ + Polynomial addition + >>> p = Polynomial(2, [1, 2, 3]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p + q + 6x^2 + 4x + 2 + """ + + if self.degree > polynomial_2.degree: + coefficients = self.coefficients[:] + for i in range(polynomial_2.degree + 1): + coefficients[i] += polynomial_2.coefficients[i] + return Polynomial(self.degree, coefficients) + else: + coefficients = polynomial_2.coefficients[:] + for i in range(self.degree + 1): + coefficients[i] += self.coefficients[i] + return Polynomial(polynomial_2.degree, coefficients) + + def __sub__(self, polynomial_2: Polynomial) -> Polynomial: + """ + Polynomial subtraction + >>> p = Polynomial(2, [1, 2, 4]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p - q + 1x^2 + """ + return self + polynomial_2 * Polynomial(0, [-1]) + + def __neg__(self) -> Polynomial: + """ + Polynomial negation + >>> p = Polynomial(2, [1, 2, 3]) + >>> -p + - 3x^2 - 2x - 1 + """ + return Polynomial(self.degree, [-c for c in self.coefficients]) + + def __mul__(self, polynomial_2: Polynomial) -> Polynomial: + """ + Polynomial multiplication + >>> p = Polynomial(2, [1, 2, 3]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p * q + 9x^4 + 12x^3 + 10x^2 + 4x + 1 + """ + coefficients: list[float] = [0] * (self.degree + polynomial_2.degree + 1) + for i in range(self.degree + 1): + for j in range(polynomial_2.degree + 1): + coefficients[i + j] += ( + self.coefficients[i] * polynomial_2.coefficients[j] + ) + + return Polynomial(self.degree + polynomial_2.degree, coefficients) + + def evaluate(self, substitution: int | float) -> int | float: + """ + Evaluates the polynomial at x. + >>> p = Polynomial(2, [1, 2, 3]) + >>> p.evaluate(2) + 17 + """ + result: int | float = 0 + for i in range(self.degree + 1): + result += self.coefficients[i] * (substitution**i) + return result + + def __str__(self) -> str: + """ + >>> p = Polynomial(2, [1, 2, 3]) + >>> print(p) + 3x^2 + 2x + 1 + """ + polynomial = "" + for i in range(self.degree, -1, -1): + if self.coefficients[i] == 0: + continue + elif self.coefficients[i] > 0: + if polynomial: + polynomial += " + " + else: + polynomial += " - " + + if i == 0: + polynomial += str(abs(self.coefficients[i])) + elif i == 1: + polynomial += str(abs(self.coefficients[i])) + "x" + else: + polynomial += str(abs(self.coefficients[i])) + "x^" + str(i) + + return polynomial + + def __repr__(self) -> str: + """ + >>> p = Polynomial(2, [1, 2, 3]) + >>> p + 3x^2 + 2x + 1 + """ + return self.__str__() + + def derivative(self) -> Polynomial: + """ + Returns the derivative of the polynomial. + >>> p = Polynomial(2, [1, 2, 3]) + >>> p.derivative() + 6x + 2 + """ + coefficients: list[float] = [0] * self.degree + for i in range(self.degree): + coefficients[i] = self.coefficients[i + 1] * (i + 1) + return Polynomial(self.degree - 1, coefficients) + + def integral(self, constant: int | float = 0) -> Polynomial: + """ + Returns the integral of the polynomial. + >>> p = Polynomial(2, [1, 2, 3]) + >>> p.integral() + 1.0x^3 + 1.0x^2 + 1.0x + """ + coefficients: list[float] = [0] * (self.degree + 2) + coefficients[0] = constant + for i in range(self.degree + 1): + coefficients[i + 1] = self.coefficients[i] / (i + 1) + return Polynomial(self.degree + 1, coefficients) + + def __eq__(self, polynomial_2: object) -> bool: + """ + Checks if two polynomials are equal. + >>> p = Polynomial(2, [1, 2, 3]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p == q + True + """ + if not isinstance(polynomial_2, Polynomial): + return False + + if self.degree != polynomial_2.degree: + return False + + for i in range(self.degree + 1): + if self.coefficients[i] != polynomial_2.coefficients[i]: + return False + + return True + + def __ne__(self, polynomial_2: object) -> bool: + """ + Checks if two polynomials are not equal. + >>> p = Polynomial(2, [1, 2, 3]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p != q + False + """ + return not self.__eq__(polynomial_2) From 9278d0c6cdaa30115dbfef510e31a805bd3027dd Mon Sep 17 00:00:00 2001 From: Dima I <79413560+DIvkov575@users.noreply.github.com> Date: Sun, 30 Oct 2022 06:54:23 -0400 Subject: [PATCH 169/368] Added archimedes principle (physics) (#7143) * Added archimedes principle (physics) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reformated * reformatted archimedes principles Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/archimedes_principle.py | 49 +++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 physics/archimedes_principle.py diff --git a/physics/archimedes_principle.py b/physics/archimedes_principle.py new file mode 100644 index 000000000..6ecfc65e7 --- /dev/null +++ b/physics/archimedes_principle.py @@ -0,0 +1,49 @@ +""" +Calculates buoyant force on object submerged within static fluid. +Discovered by greek mathematician, Archimedes. The principle is named after him. + +Equation for calculating buoyant force: +Fb = ρ * V * g + +Source: +- https://en.wikipedia.org/wiki/Archimedes%27_principle +""" + + +# Acceleration Constant on Earth (unit m/s^2) +g = 9.80665 + + +def archimedes_principle( + fluid_density: float, volume: float, gravity: float = g +) -> float: + """ + Args: + fluid_density: density of fluid (kg/m^3) + volume: volume of object / liquid being displaced by object + gravity: Acceleration from gravity. Gravitational force on system, + Default is Earth Gravity + returns: + buoyant force on object in Newtons + + >>> archimedes_principle(fluid_density=997, volume=0.5, gravity=9.8) + 4885.3 + >>> archimedes_principle(fluid_density=997, volume=0.7) + 6844.061035 + """ + + if fluid_density <= 0: + raise ValueError("Impossible fluid density") + if volume < 0: + raise ValueError("Impossible Object volume") + if gravity <= 0: + raise ValueError("Impossible Gravity") + + return fluid_density * gravity * volume + + +if __name__ == "__main__": + import doctest + + # run doctest + doctest.testmod() From cafbbab125ebcdac4294f4cbda024b840d230b9a Mon Sep 17 00:00:00 2001 From: Lukas Esc <55601315+Luk-ESC@users.noreply.github.com> Date: Sun, 30 Oct 2022 11:56:54 +0100 Subject: [PATCH 170/368] shortened code using abs() and inplace ops (#7191) n = -n if n < 0 else n --> n = abs(n) n = n // 10 --> n //= 10 --- maths/sum_of_digits.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/maths/sum_of_digits.py b/maths/sum_of_digits.py index 64da00d46..5ad5fe6c9 100644 --- a/maths/sum_of_digits.py +++ b/maths/sum_of_digits.py @@ -14,11 +14,11 @@ def sum_of_digits(n: int) -> int: >>> sum_of_digits(0) 0 """ - n = -n if n < 0 else n + n = abs(n) res = 0 while n > 0: res += n % 10 - n = n // 10 + n //= 10 return res @@ -35,7 +35,7 @@ def sum_of_digits_recursion(n: int) -> int: >>> sum_of_digits_recursion(0) 0 """ - n = -n if n < 0 else n + n = abs(n) return n if n < 10 else n % 10 + sum_of_digits(n // 10) From ab9d8f3874ba550bea0103e0891160b8d9145208 Mon Sep 17 00:00:00 2001 From: Jeremias Moreira Gomes Date: Sun, 30 Oct 2022 08:09:23 -0300 Subject: [PATCH 171/368] Adding a Quine in Python. (#6807) * Adding a Quine in Python. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/quine.py | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 other/quine.py diff --git a/other/quine.py b/other/quine.py new file mode 100644 index 000000000..01e03bbb0 --- /dev/null +++ b/other/quine.py @@ -0,0 +1,10 @@ +#!/bin/python3 +""" +Quine: + +A quine is a computer program which takes no input and produces a copy of its +own source code as its only output (disregarding this docstring and the shebang). + +More info on: https://en.wikipedia.org/wiki/Quine_(computing) +""" +print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))")) From 94b51f6a91def387b82369401a42710cae4ee4e0 Mon Sep 17 00:00:00 2001 From: sadiqebrahim <75269485+sadiqebrahim@users.noreply.github.com> Date: Sun, 30 Oct 2022 17:22:20 +0530 Subject: [PATCH 172/368] Added Builtin Voltage (#7850) * Added Builtin Voltage * Update builtin_voltage.py * Update electronics/builtin_voltage.py Co-authored-by: Caeden Perelli-Harris * Update electronics/builtin_voltage.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create elf.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/builtin_voltage.py | 67 ++++++++++++++++++++++++++++++++++ hashes/elf.py | 14 +++---- 2 files changed, 73 insertions(+), 8 deletions(-) create mode 100644 electronics/builtin_voltage.py diff --git a/electronics/builtin_voltage.py b/electronics/builtin_voltage.py new file mode 100644 index 000000000..38fde4524 --- /dev/null +++ b/electronics/builtin_voltage.py @@ -0,0 +1,67 @@ +from math import log + +from scipy.constants import Boltzmann, physical_constants + +T = 300 # TEMPERATURE (unit = K) + + +def builtin_voltage( + donor_conc: float, # donor concentration + acceptor_conc: float, # acceptor concentration + intrinsic_conc: float, # intrinsic concentration +) -> float: + """ + This function can calculate the Builtin Voltage of a pn junction diode. + This is calculated from the given three values. + Examples - + >>> builtin_voltage(donor_conc=1e17, acceptor_conc=1e17, intrinsic_conc=1e10) + 0.833370010652644 + >>> builtin_voltage(donor_conc=0, acceptor_conc=1600, intrinsic_conc=200) + Traceback (most recent call last): + ... + ValueError: Donor concentration should be positive + >>> builtin_voltage(donor_conc=1000, acceptor_conc=0, intrinsic_conc=1200) + Traceback (most recent call last): + ... + ValueError: Acceptor concentration should be positive + >>> builtin_voltage(donor_conc=1000, acceptor_conc=1000, intrinsic_conc=0) + Traceback (most recent call last): + ... + ValueError: Intrinsic concentration should be positive + >>> builtin_voltage(donor_conc=1000, acceptor_conc=3000, intrinsic_conc=2000) + Traceback (most recent call last): + ... + ValueError: Donor concentration should be greater than intrinsic concentration + >>> builtin_voltage(donor_conc=3000, acceptor_conc=1000, intrinsic_conc=2000) + Traceback (most recent call last): + ... + ValueError: Acceptor concentration should be greater than intrinsic concentration + """ + + if donor_conc <= 0: + raise ValueError("Donor concentration should be positive") + elif acceptor_conc <= 0: + raise ValueError("Acceptor concentration should be positive") + elif intrinsic_conc <= 0: + raise ValueError("Intrinsic concentration should be positive") + elif donor_conc <= intrinsic_conc: + raise ValueError( + "Donor concentration should be greater than intrinsic concentration" + ) + elif acceptor_conc <= intrinsic_conc: + raise ValueError( + "Acceptor concentration should be greater than intrinsic concentration" + ) + else: + return ( + Boltzmann + * T + * log((donor_conc * acceptor_conc) / intrinsic_conc**2) + / physical_constants["electron volt"][0] + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/hashes/elf.py b/hashes/elf.py index 87fe339da..e4bfcec22 100644 --- a/hashes/elf.py +++ b/hashes/elf.py @@ -2,19 +2,17 @@ def elf_hash(data: str) -> int: """ Implementation of ElfHash Algorithm, a variant of PJW hash function. - Returns: - [int] -- [32 bit binary int] >>> elf_hash('lorem ipsum') 253956621 """ - hash = x = 0 + hash_ = x = 0 for letter in data: - hash = (hash << 4) + ord(letter) - x = hash & 0xF0000000 + hash_ = (hash_ << 4) + ord(letter) + x = hash_ & 0xF0000000 if x != 0: - hash ^= x >> 24 - hash &= ~x - return hash + hash_ ^= x >> 24 + hash_ &= ~x + return hash_ if __name__ == "__main__": From 69d04ff64468d5b2815c0f22190b741393496a9e Mon Sep 17 00:00:00 2001 From: Kushagra Makharia Date: Sun, 30 Oct 2022 18:12:59 +0530 Subject: [PATCH 173/368] Added mean absolute error in linear regression (#7003) * Added mean absolute error in linear regression * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Code feedback changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * Apply suggestions from code review * Update linear_regression.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- machine_learning/linear_regression.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 92ab91c01..75943ac9f 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -17,9 +17,8 @@ def collect_dataset(): :return : dataset obtained from the link, as matrix """ response = requests.get( - "https://raw.githubusercontent.com/yashLadha/" - + "The_Math_of_Intelligence/master/Week1/ADRvs" - + "Rating.csv" + "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/" + "master/Week1/ADRvsRating.csv" ) lines = response.text.splitlines() data = [] @@ -87,6 +86,16 @@ def run_linear_regression(data_x, data_y): return theta +def mean_absolute_error(predicted_y, original_y): + """Return sum of square error for error calculation + :param predicted_y : contains the output of prediction (result vector) + :param original_y : contains values of expected outcome + :return : mean absolute error computed from given feature's + """ + total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y)) + return total / len(original_y) + + def main(): """Driver function""" data = collect_dataset() From 2c65597093efa80a572a6a739d8f13a8d3579c18 Mon Sep 17 00:00:00 2001 From: kumarsurajsk <104374726+kumarsurajsk@users.noreply.github.com> Date: Sun, 30 Oct 2022 18:22:37 +0530 Subject: [PATCH 174/368] addition_without_arithmetic (#6830) * Addition_without_arithmetic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added_param * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added_param_in_first_sec * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change_align * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update Addition_without_arithmetic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename Addition_without_arithmetic.py to addition_without_arithmetic.py * Update addition_without_arithmetic.py * Update addition_without_arithmetic.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/addition_without_arithmetic.py | 39 ++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 maths/addition_without_arithmetic.py diff --git a/maths/addition_without_arithmetic.py b/maths/addition_without_arithmetic.py new file mode 100644 index 000000000..409604e4c --- /dev/null +++ b/maths/addition_without_arithmetic.py @@ -0,0 +1,39 @@ +""" +Illustrate how to add the integer without arithmetic operation +Author: suraj Kumar +Time Complexity: 1 +https://en.wikipedia.org/wiki/Bitwise_operation +""" + + +def add(first: int, second: int) -> int: + """ + Implementation of addition of integer + + Examples: + >>> add(3, 5) + 8 + >>> add(13, 5) + 18 + >>> add(-7, 2) + -5 + >>> add(0, -7) + -7 + >>> add(-321, 0) + -321 + """ + while second != 0: + c = first & second + first ^= second + second = c << 1 + return first + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + first = int(input("Enter the first number: ").strip()) + second = int(input("Enter the second number: ").strip()) + print(f"{add(first, second) = }") From cf915e704285b1b40b6d0f180d60791204486fd3 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Sun, 30 Oct 2022 17:00:16 +0400 Subject: [PATCH 175/368] add Levinstein distance with Dynamic Programming: up -> down approach (#7171) * add Levinstein distance with Dynamic Programming: up -> down approach * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add type hint * fix flake8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/min_distance_up_bottom.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update min_distance_up_bottom.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/min_distance_up_bottom.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 dynamic_programming/min_distance_up_bottom.py diff --git a/dynamic_programming/min_distance_up_bottom.py b/dynamic_programming/min_distance_up_bottom.py new file mode 100644 index 000000000..49c361f24 --- /dev/null +++ b/dynamic_programming/min_distance_up_bottom.py @@ -0,0 +1,55 @@ +""" +Author : Alexander Pantyukhin +Date : October 14, 2022 +This is implementation Dynamic Programming up bottom approach +to find edit distance. +The aim is to demonstate up bottom approach for solving the task. +The implementation was tested on the +leetcode: https://leetcode.com/problems/edit-distance/ +""" + +""" +Levinstein distance +Dynamic Programming: up -> down. +""" + + +def min_distance_up_bottom(word1: str, word2: str) -> int: + """ + >>> min_distance_up_bottom("intention", "execution") + 5 + >>> min_distance_up_bottom("intention", "") + 9 + >>> min_distance_up_bottom("", "") + 0 + >>> min_distance_up_bottom("zooicoarchaeologist", "zoologist") + 10 + """ + + from functools import lru_cache + + len_word1 = len(word1) + len_word2 = len(word2) + + @lru_cache(maxsize=None) + def min_distance(index1: int, index2: int) -> int: + # if first word index is overflow - delete all from the second word + if index1 >= len_word1: + return len_word2 - index2 + # if second word index is overflow - delete all from the first word + if index2 >= len_word2: + return len_word1 - index1 + diff = int(word1[index1] != word2[index2]) # current letters not identical + return min( + 1 + min_distance(index1 + 1, index2), + 1 + min_distance(index1, index2 + 1), + diff + min_distance(index1 + 1, index2 + 1), + ) + + return min_distance(0, 0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d1430aa36b0a15a9e018367db210061e7a76dec4 Mon Sep 17 00:00:00 2001 From: Wissam Fawaz <55150850+wissamfawaz@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:14:22 +0200 Subject: [PATCH 176/368] Implemented a Pascal triangle generator (#7317) * Added a Pascal triangle implementation to the other folder * Added Pascal triangle implementation to the other folder. * Added Pascal triangle implementation to the other folder. * Added Pascal triangle implementation to the other folder. * Implemented a Pascal triangle generator. * Reversed Changes to DIRECTORY.md * Reversed changed to .md files * Update other/pascal_triangle.py Removed personal info Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * Update pascal_triangle.py Expanded the description of the algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Printed output in triangular form * Update CONTRIBUTING.md Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/pascal_triangle.py | 96 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 other/pascal_triangle.py diff --git a/other/pascal_triangle.py b/other/pascal_triangle.py new file mode 100644 index 000000000..5cc3cee8a --- /dev/null +++ b/other/pascal_triangle.py @@ -0,0 +1,96 @@ +""" +This implementation demonstrates how to generate the +elements of a Pascal's triangle. The element having +a row index of r and column index of c can be derived +as follows: +triangle[r][c] = triangle[r-1][c-1]+triangle[r-1][c] +What is Pascal's triangle? +- It is a triangular array containing binomial coefficients. +Refer to (https://en.wikipedia.org/wiki/Pascal%27s_triangle) +for more info about this triangle. +""" + + +def print_pascal_triangle(num_rows: int) -> None: + """ + Print Pascal's triangle for different number of rows + >>> print_pascal_triangle(5) + 1 + 1 1 + 1 2 1 + 1 3 3 1 + 1 4 6 4 1 + """ + triangle = generate_pascal_triangle(num_rows) + for row_idx in range(num_rows): + # Print left spaces + for _ in range(num_rows - row_idx - 1): + print(end=" ") + # Print row values + for col_idx in range(row_idx + 1): + if col_idx != row_idx: + print(triangle[row_idx][col_idx], end=" ") + else: + print(triangle[row_idx][col_idx], end="") + print() + + +def generate_pascal_triangle(num_rows: int) -> list[list[int]]: + """ + Create Pascal's triangle for different number of rows + >>> generate_pascal_triangle(1) + [[1]] + >>> generate_pascal_triangle(2) + [[1], [1, 1]] + >>> generate_pascal_triangle(3) + [[1], [1, 1], [1, 2, 1]] + >>> generate_pascal_triangle(4) + [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1]] + >>> generate_pascal_triangle(5) + [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]] + """ + triangle: list[list[int]] = [] + for current_row_idx in range(num_rows): + current_row = populate_current_row(triangle, current_row_idx) + triangle.append(current_row) + return triangle + + +def populate_current_row(triangle: list[list[int]], current_row_idx: int) -> list[int]: + """ + >>> triangle = [[1]] + >>> populate_current_row(triangle, 1) + [1, 1] + """ + current_row = [-1] * (current_row_idx + 1) + # first and last elements of current row are equal to 1 + current_row[0], current_row[-1] = 1, 1 + for current_col_idx in range(1, current_row_idx): + calculate_current_element( + triangle, current_row, current_row_idx, current_col_idx + ) + return current_row + + +def calculate_current_element( + triangle: list[list[int]], + current_row: list[int], + current_row_idx: int, + current_col_idx: int, +) -> None: + """ + >>> triangle = [[1], [1, 1]] + >>> current_row = [1, -1, 1] + >>> calculate_current_element(triangle, current_row, 2, 1) + >>> current_row + [1, 2, 1] + """ + above_to_left_elt = triangle[current_row_idx - 1][current_col_idx - 1] + above_to_right_elt = triangle[current_row_idx - 1][current_col_idx] + current_row[current_col_idx] = above_to_left_elt + above_to_right_elt + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 47100b992aef2fd5a7ae001155e3d0411db99ec9 Mon Sep 17 00:00:00 2001 From: Agniv Ghosh <73717822+agnivg@users.noreply.github.com> Date: Sun, 30 Oct 2022 18:45:46 +0530 Subject: [PATCH 177/368] Added code for palindrome partitioning problem under dynamic programming (#7222) * Added code for palindrome partitioning problem under dynamic programming * Updated return type for function * Updated Line 24 according to suggestions * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * Update palindrome_partitioning.py * Update palindrome_partitioning.py * is_palindromic Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- .../palindrome_partitioning.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 dynamic_programming/palindrome_partitioning.py diff --git a/dynamic_programming/palindrome_partitioning.py b/dynamic_programming/palindrome_partitioning.py new file mode 100644 index 000000000..c1629440e --- /dev/null +++ b/dynamic_programming/palindrome_partitioning.py @@ -0,0 +1,39 @@ +""" +Given a string s, partition s such that every substring of the +partition is a palindrome. +Find the minimum cuts needed for a palindrome partitioning of s. + +Time Complexity: O(n^2) +Space Complexity: O(n^2) +For other explanations refer to: https://www.youtube.com/watch?v=_H8V5hJUGd0 +""" + + +def find_minimum_partitions(string: str) -> int: + """ + Returns the minimum cuts needed for a palindrome partitioning of string + + >>> find_minimum_partitions("aab") + 1 + >>> find_minimum_partitions("aaa") + 0 + >>> find_minimum_partitions("ababbbabbababa") + 3 + """ + length = len(string) + cut = [0] * length + is_palindromic = [[False for i in range(length)] for j in range(length)] + for i, c in enumerate(string): + mincut = i + for j in range(i + 1): + if c == string[j] and (i - j < 2 or is_palindromic[j + 1][i - 1]): + is_palindromic[j][i] = True + mincut = min(mincut, 0 if j == 0 else (cut[j - 1] + 1)) + cut[i] = mincut + return cut[length - 1] + + +if __name__ == "__main__": + s = input("Enter the string: ").strip() + ans = find_minimum_partitions(s) + print(f"Minimum number of partitions required for the '{s}' is {ans}") From 11e6c6fcc485bf78e5d28c7cf311278a013685d5 Mon Sep 17 00:00:00 2001 From: Gautam Chaurasia <64725629+GautamChaurasia@users.noreply.github.com> Date: Sun, 30 Oct 2022 18:58:27 +0530 Subject: [PATCH 178/368] Added algorithm for finding index of rightmost set bit (#7234) * Added algorithm for finding index of rightmost set bit * applied suggested changes * applied suggested changes * Fixed failing Testcases * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../index_of_rightmost_set_bit.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 bit_manipulation/index_of_rightmost_set_bit.py diff --git a/bit_manipulation/index_of_rightmost_set_bit.py b/bit_manipulation/index_of_rightmost_set_bit.py new file mode 100644 index 000000000..eb52ea4e6 --- /dev/null +++ b/bit_manipulation/index_of_rightmost_set_bit.py @@ -0,0 +1,43 @@ +# Reference: https://www.geeksforgeeks.org/position-of-rightmost-set-bit/ + + +def get_index_of_rightmost_set_bit(number: int) -> int: + """ + Take in a positive integer 'number'. + Returns the zero-based index of first set bit in that 'number' from right. + Returns -1, If no set bit found. + + >>> get_index_of_rightmost_set_bit(0) + -1 + >>> get_index_of_rightmost_set_bit(5) + 0 + >>> get_index_of_rightmost_set_bit(36) + 2 + >>> get_index_of_rightmost_set_bit(8) + 3 + >>> get_index_of_rightmost_set_bit(-18) + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer + """ + + if number < 0 or not isinstance(number, int): + raise ValueError("Input must be a non-negative integer") + + intermediate = number & ~(number - 1) + index = 0 + while intermediate: + intermediate >>= 1 + index += 1 + return index - 1 + + +if __name__ == "__main__": + """ + Finding the index of rightmost set bit has some very peculiar use-cases, + especially in finding missing or/and repeating numbers in a list of + positive integers. + """ + import doctest + + doctest.testmod(verbose=True) From e12516debb977e0b3ec9b67d1ddc8770450ae8d1 Mon Sep 17 00:00:00 2001 From: Abhishek Chakraborty Date: Sun, 30 Oct 2022 14:11:05 -0700 Subject: [PATCH 179/368] Shear stress: typo + WIkipedia URL (#7896) --- physics/{sheer_stress.py => shear_stress.py} | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) rename physics/{sheer_stress.py => shear_stress.py} (70%) diff --git a/physics/sheer_stress.py b/physics/shear_stress.py similarity index 70% rename from physics/sheer_stress.py rename to physics/shear_stress.py index 74a2d36b1..129148943 100644 --- a/physics/sheer_stress.py +++ b/physics/shear_stress.py @@ -1,23 +1,31 @@ from __future__ import annotations +""" +Shear stress is a component of stress that is coplanar to the material cross-section. +It arises due to a shear force, the component of the force vector parallel to the +material cross-section. -def sheer_stress( +https://en.wikipedia.org/wiki/Shear_stress +""" + + +def shear_stress( stress: float, tangential_force: float, area: float, ) -> tuple[str, float]: """ This function can calculate any one of the three - - 1. Sheer Stress + 1. Shear Stress 2. Tangential Force 3. Cross-sectional Area This is calculated from the other two provided values Examples - - >>> sheer_stress(stress=25, tangential_force=100, area=0) + >>> shear_stress(stress=25, tangential_force=100, area=0) ('area', 4.0) - >>> sheer_stress(stress=0, tangential_force=1600, area=200) + >>> shear_stress(stress=0, tangential_force=1600, area=200) ('stress', 8.0) - >>> sheer_stress(stress=1000, tangential_force=0, area=1200) + >>> shear_stress(stress=1000, tangential_force=0, area=1200) ('tangential_force', 1200000) """ if (stress, tangential_force, area).count(0) != 1: From c0168cd33f6670f7e32eaa04d77b6be70b3588d4 Mon Sep 17 00:00:00 2001 From: Gmuslow <54784260+Gmuslow@users.noreply.github.com> Date: Sun, 30 Oct 2022 16:33:13 -0500 Subject: [PATCH 180/368] Created equivalent_resistance under Electronics (#6782) * Create resistor_equivalence.py * Update resistor_equivalence.py * Update electronics/resistor_equivalence.py removed an unnecessary space Co-authored-by: Caeden * Update resistor_equivalence.py fixed the snake_case requirement * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update resistor_equivalence.py finalize the naming convention errors (hopefully) * Update resistor_equivalence.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Caeden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/resistor_equivalence.py | 58 +++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 electronics/resistor_equivalence.py diff --git a/electronics/resistor_equivalence.py b/electronics/resistor_equivalence.py new file mode 100644 index 000000000..7142f838a --- /dev/null +++ b/electronics/resistor_equivalence.py @@ -0,0 +1,58 @@ +# https://byjus.com/equivalent-resistance-formula/ + +from __future__ import annotations + + +def resistor_parallel(resistors: list[float]) -> float: + """ + Req = 1/ (1/R1 + 1/R2 + ... + 1/Rn) + + >>> resistor_parallel([3.21389, 2, 3]) + 0.8737571620498019 + >>> resistor_parallel([3.21389, 2, -3]) + Traceback (most recent call last): + ... + ValueError: Resistor at index 2 has a negative or zero value! + >>> resistor_parallel([3.21389, 2, 0.000]) + Traceback (most recent call last): + ... + ValueError: Resistor at index 2 has a negative or zero value! + """ + + first_sum = 0.00 + index = 0 + for resistor in resistors: + if resistor <= 0: + raise ValueError(f"Resistor at index {index} has a negative or zero value!") + first_sum += 1 / float(resistor) + index += 1 + return 1 / first_sum + + +def resistor_series(resistors: list[float]) -> float: + """ + Req = R1 + R2 + ... + Rn + + Calculate the equivalent resistance for any number of resistors in parallel. + + >>> resistor_series([3.21389, 2, 3]) + 8.21389 + >>> resistor_series([3.21389, 2, -3]) + Traceback (most recent call last): + ... + ValueError: Resistor at index 2 has a negative value! + """ + sum_r = 0.00 + index = 0 + for resistor in resistors: + sum_r += resistor + if resistor < 0: + raise ValueError(f"Resistor at index {index} has a negative value!") + index += 1 + return sum_r + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f8958ebe20522f5b0d32f33fd78870185912a67a Mon Sep 17 00:00:00 2001 From: himanshit0304 <70479061+himanshit0304@users.noreply.github.com> Date: Mon, 31 Oct 2022 04:25:11 +0530 Subject: [PATCH 181/368] Add print_multiplication_table.py (#6607) * Add print_multiplication_table.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added return type description * Update print_multiplication_table.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/print_multiplication_table.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 maths/print_multiplication_table.py diff --git a/maths/print_multiplication_table.py b/maths/print_multiplication_table.py new file mode 100644 index 000000000..dbe4a4be0 --- /dev/null +++ b/maths/print_multiplication_table.py @@ -0,0 +1,26 @@ +def multiplication_table(number: int, number_of_terms: int) -> str: + """ + Prints the multiplication table of a given number till the given number of terms + + >>> print(multiplication_table(3, 5)) + 3 * 1 = 3 + 3 * 2 = 6 + 3 * 3 = 9 + 3 * 4 = 12 + 3 * 5 = 15 + + >>> print(multiplication_table(-4, 6)) + -4 * 1 = -4 + -4 * 2 = -8 + -4 * 3 = -12 + -4 * 4 = -16 + -4 * 5 = -20 + -4 * 6 = -24 + """ + return "\n".join( + f"{number} * {i} = {number * i}" for i in range(1, number_of_terms + 1) + ) + + +if __name__ == "__main__": + print(multiplication_table(number=5, number_of_terms=10)) From 39e5bc5980254582362ad02bb6616aaa58bfac8a Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 31 Oct 2022 01:13:21 -0400 Subject: [PATCH 182/368] Refactor bottom-up edit distance function to be class method (#7347) * Refactor bottom-up function to be class method * Add type hints * Update convolve function namespace * Remove depreciated np.float * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Renamed function for consistency * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- DIRECTORY.md | 15 +++- dynamic_programming/edit_distance.py | 122 +++++++++++++-------------- 2 files changed, 74 insertions(+), 63 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 38fd1d656..be3a121c8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -46,6 +46,7 @@ * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) * [Highest Set Bit](bit_manipulation/highest_set_bit.py) + * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) @@ -307,24 +308,28 @@ * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) * [Max Sub Array](dynamic_programming/max_sub_array.py) * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) + * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) * [Minimum Partition](dynamic_programming/minimum_partition.py) * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) + * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) * [Viterbi](dynamic_programming/viterbi.py) ## Electronics + * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Coulombs Law](electronics/coulombs_law.py) * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) * [Electrical Impedance](electronics/electrical_impedance.py) * [Ohms Law](electronics/ohms_law.py) + * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) ## File Transfer @@ -426,6 +431,7 @@ * [Adler32](hashes/adler32.py) * [Chaos Machine](hashes/chaos_machine.py) * [Djb2](hashes/djb2.py) + * [Elf](hashes/elf.py) * [Enigma Machine](hashes/enigma_machine.py) * [Hamming Code](hashes/hamming_code.py) * [Luhn](hashes/luhn.py) @@ -491,6 +497,7 @@ * [Abs Max](maths/abs_max.py) * [Abs Min](maths/abs_min.py) * [Add](maths/add.py) + * [Addition Without Arithmetic](maths/addition_without_arithmetic.py) * [Aliquot Sum](maths/aliquot_sum.py) * [Allocation Number](maths/allocation_number.py) * [Arc Length](maths/arc_length.py) @@ -581,12 +588,15 @@ * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) * [Pollard Rho](maths/pollard_rho.py) * [Polynomial Evaluation](maths/polynomial_evaluation.py) + * Polynomials + * [Single Indeterminate Operations](maths/polynomials/single_indeterminate_operations.py) * [Power Using Recursion](maths/power_using_recursion.py) * [Prime Check](maths/prime_check.py) * [Prime Factors](maths/prime_factors.py) * [Prime Numbers](maths/prime_numbers.py) * [Prime Sieve Eratosthenes](maths/prime_sieve_eratosthenes.py) * [Primelib](maths/primelib.py) + * [Print Multiplication Table](maths/print_multiplication_table.py) * [Proth Number](maths/proth_number.py) * [Pythagoras](maths/pythagoras.py) * [Qr Decomposition](maths/qr_decomposition.py) @@ -676,12 +686,15 @@ * [Magicdiamondpattern](other/magicdiamondpattern.py) * [Maximum Subarray](other/maximum_subarray.py) * [Nested Brackets](other/nested_brackets.py) + * [Pascal Triangle](other/pascal_triangle.py) * [Password Generator](other/password_generator.py) + * [Quine](other/quine.py) * [Scoring Algorithm](other/scoring_algorithm.py) * [Sdes](other/sdes.py) * [Tower Of Hanoi](other/tower_of_hanoi.py) ## Physics + * [Archimedes Principle](physics/archimedes_principle.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) @@ -694,7 +707,7 @@ * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Potential Energy](physics/potential_energy.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) - * [Sheer Stress](physics/sheer_stress.py) + * [Shear Stress](physics/shear_stress.py) ## Project Euler * Problem 001 diff --git a/dynamic_programming/edit_distance.py b/dynamic_programming/edit_distance.py index fe23431a7..774aa0473 100644 --- a/dynamic_programming/edit_distance.py +++ b/dynamic_programming/edit_distance.py @@ -19,74 +19,72 @@ class EditDistance: """ def __init__(self): - self.__prepare__() + self.word1 = "" + self.word2 = "" + self.dp = [] - def __prepare__(self, n=0, m=0): - self.dp = [[-1 for y in range(0, m)] for x in range(0, n)] - - def __solve_dp(self, x, y): - if x == -1: - return y + 1 - elif y == -1: - return x + 1 - elif self.dp[x][y] > -1: - return self.dp[x][y] + def __min_dist_top_down_dp(self, m: int, n: int) -> int: + if m == -1: + return n + 1 + elif n == -1: + return m + 1 + elif self.dp[m][n] > -1: + return self.dp[m][n] else: - if self.a[x] == self.b[y]: - self.dp[x][y] = self.__solve_dp(x - 1, y - 1) + if self.word1[m] == self.word2[n]: + self.dp[m][n] = self.__min_dist_top_down_dp(m - 1, n - 1) else: - self.dp[x][y] = 1 + min( - self.__solve_dp(x, y - 1), - self.__solve_dp(x - 1, y), - self.__solve_dp(x - 1, y - 1), - ) + insert = self.__min_dist_top_down_dp(m, n - 1) + delete = self.__min_dist_top_down_dp(m - 1, n) + replace = self.__min_dist_top_down_dp(m - 1, n - 1) + self.dp[m][n] = 1 + min(insert, delete, replace) - return self.dp[x][y] + return self.dp[m][n] - def solve(self, a, b): - if isinstance(a, bytes): - a = a.decode("ascii") + def min_dist_top_down(self, word1: str, word2: str) -> int: + """ + >>> EditDistance().min_dist_top_down("intention", "execution") + 5 + >>> EditDistance().min_dist_top_down("intention", "") + 9 + >>> EditDistance().min_dist_top_down("", "") + 0 + """ + self.word1 = word1 + self.word2 = word2 + self.dp = [[-1 for _ in range(len(word2))] for _ in range(len(word1))] - if isinstance(b, bytes): - b = b.decode("ascii") + return self.__min_dist_top_down_dp(len(word1) - 1, len(word2) - 1) - self.a = str(a) - self.b = str(b) + def min_dist_bottom_up(self, word1: str, word2: str) -> int: + """ + >>> EditDistance().min_dist_bottom_up("intention", "execution") + 5 + >>> EditDistance().min_dist_bottom_up("intention", "") + 9 + >>> EditDistance().min_dist_bottom_up("", "") + 0 + """ + self.word1 = word1 + self.word2 = word2 + m = len(word1) + n = len(word2) + self.dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)] - self.__prepare__(len(a), len(b)) - - return self.__solve_dp(len(a) - 1, len(b) - 1) - - -def min_distance_bottom_up(word1: str, word2: str) -> int: - """ - >>> min_distance_bottom_up("intention", "execution") - 5 - >>> min_distance_bottom_up("intention", "") - 9 - >>> min_distance_bottom_up("", "") - 0 - """ - m = len(word1) - n = len(word2) - dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)] - for i in range(m + 1): - for j in range(n + 1): - - if i == 0: # first string is empty - dp[i][j] = j - elif j == 0: # second string is empty - dp[i][j] = i - elif ( - word1[i - 1] == word2[j - 1] - ): # last character of both substing is equal - dp[i][j] = dp[i - 1][j - 1] - else: - insert = dp[i][j - 1] - delete = dp[i - 1][j] - replace = dp[i - 1][j - 1] - dp[i][j] = 1 + min(insert, delete, replace) - return dp[m][n] + for i in range(m + 1): + for j in range(n + 1): + if i == 0: # first string is empty + self.dp[i][j] = j + elif j == 0: # second string is empty + self.dp[i][j] = i + elif word1[i - 1] == word2[j - 1]: # last characters are equal + self.dp[i][j] = self.dp[i - 1][j - 1] + else: + insert = self.dp[i][j - 1] + delete = self.dp[i - 1][j] + replace = self.dp[i - 1][j - 1] + self.dp[i][j] = 1 + min(insert, delete, replace) + return self.dp[m][n] if __name__ == "__main__": @@ -99,7 +97,7 @@ if __name__ == "__main__": S2 = input("Enter the second string: ").strip() print() - print(f"The minimum Edit Distance is: {solver.solve(S1, S2)}") - print(f"The minimum Edit Distance is: {min_distance_bottom_up(S1, S2)}") + print(f"The minimum edit distance is: {solver.min_dist_top_down(S1, S2)}") + print(f"The minimum edit distance is: {solver.min_dist_bottom_up(S1, S2)}") print() print("*************** End of Testing Edit Distance DP Algorithm ***************") From 0fd1ccb13358feff2d6ea8dd62200cabe363ee8e Mon Sep 17 00:00:00 2001 From: Roberts Date: Mon, 31 Oct 2022 13:31:15 +0200 Subject: [PATCH 183/368] Adding inductive reactance calculation (#6625) * Adding inductive reactance calculation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * from math import pi * 0007957747154594767 * 36420441699332 * 2199114857512855 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/ind_reactance.py | 69 ++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 electronics/ind_reactance.py diff --git a/electronics/ind_reactance.py b/electronics/ind_reactance.py new file mode 100644 index 000000000..3f77ef628 --- /dev/null +++ b/electronics/ind_reactance.py @@ -0,0 +1,69 @@ +# https://en.wikipedia.org/wiki/Electrical_reactance#Inductive_reactance +from __future__ import annotations + +from math import pi + + +def ind_reactance( + inductance: float, frequency: float, reactance: float +) -> dict[str, float]: + """ + Calculate inductive reactance, frequency or inductance from two given electrical + properties then return name/value pair of the zero value in a Python dict. + + Parameters + ---------- + inductance : float with units in Henries + + frequency : float with units in Hertz + + reactance : float with units in Ohms + + >>> ind_reactance(-35e-6, 1e3, 0) + Traceback (most recent call last): + ... + ValueError: Inductance cannot be negative + + >>> ind_reactance(35e-6, -1e3, 0) + Traceback (most recent call last): + ... + ValueError: Frequency cannot be negative + + >>> ind_reactance(35e-6, 0, -1) + Traceback (most recent call last): + ... + ValueError: Inductive reactance cannot be negative + + >>> ind_reactance(0, 10e3, 50) + {'inductance': 0.0007957747154594767} + + >>> ind_reactance(35e-3, 0, 50) + {'frequency': 227.36420441699332} + + >>> ind_reactance(35e-6, 1e3, 0) + {'reactance': 0.2199114857512855} + + """ + + if (inductance, frequency, reactance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if inductance < 0: + raise ValueError("Inductance cannot be negative") + if frequency < 0: + raise ValueError("Frequency cannot be negative") + if reactance < 0: + raise ValueError("Inductive reactance cannot be negative") + if inductance == 0: + return {"inductance": reactance / (2 * pi * frequency)} + elif frequency == 0: + return {"frequency": reactance / (2 * pi * inductance)} + elif reactance == 0: + return {"reactance": 2 * pi * frequency * inductance} + else: + raise ValueError("Exactly one argument must be 0") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b2165a65fcf1a087236d2a1527b10b64a12f69e6 Mon Sep 17 00:00:00 2001 From: Alex de la Cruz <46356295+acrulopez@users.noreply.github.com> Date: Mon, 31 Oct 2022 14:14:33 +0100 Subject: [PATCH 184/368] Added Radix Tree in data structures (#6616) * added radix tree to data structures * added doctests * solved flake8 * added type hints * added description for delete function * Update data_structures/trie/radix_tree.py * Update radix_tree.py * Update radix_tree.py * Update radix_tree.py Co-authored-by: Alex de la Cruz Co-authored-by: Christian Clauss --- data_structures/trie/radix_tree.py | 223 +++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 data_structures/trie/radix_tree.py diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py new file mode 100644 index 000000000..66890346e --- /dev/null +++ b/data_structures/trie/radix_tree.py @@ -0,0 +1,223 @@ +""" +A Radix Tree is a data structure that represents a space-optimized +trie (prefix tree) in whicheach node that is the only child is merged +with its parent [https://en.wikipedia.org/wiki/Radix_tree] +""" + + +class RadixNode: + def __init__(self, prefix: str = "", is_leaf: bool = False) -> None: + # Mapping from the first character of the prefix of the node + self.nodes: dict[str, RadixNode] = {} + + # A node will be a leaf if the tree contains its word + self.is_leaf = is_leaf + + self.prefix = prefix + + def match(self, word: str) -> tuple[str, str, str]: + """Compute the common substring of the prefix of the node and a word + + Args: + word (str): word to compare + + Returns: + (str, str, str): common substring, remaining prefix, remaining word + + >>> RadixNode("myprefix").match("mystring") + ('my', 'prefix', 'string') + """ + x = 0 + for q, w in zip(self.prefix, word): + if q != w: + break + + x += 1 + + return self.prefix[:x], self.prefix[x:], word[x:] + + def insert_many(self, words: list[str]) -> None: + """Insert many words in the tree + + Args: + words (list[str]): list of words + + >>> RadixNode("myprefix").insert_many(["mystring", "hello"]) + """ + for word in words: + self.insert(word) + + def insert(self, word: str) -> None: + """Insert a word into the tree + + Args: + word (str): word to insert + + >>> RadixNode("myprefix").insert("mystring") + """ + # Case 1: If the word is the prefix of the node + # Solution: We set the current node as leaf + if self.prefix == word: + self.is_leaf = True + + # Case 2: The node has no edges that have a prefix to the word + # Solution: We create an edge from the current node to a new one + # containing the word + elif word[0] not in self.nodes: + self.nodes[word[0]] = RadixNode(prefix=word, is_leaf=True) + + else: + incoming_node = self.nodes[word[0]] + matching_string, remaining_prefix, remaining_word = incoming_node.match( + word + ) + + # Case 3: The node prefix is equal to the matching + # Solution: We insert remaining word on the next node + if remaining_prefix == "": + self.nodes[matching_string[0]].insert(remaining_word) + + # Case 4: The word is greater equal to the matching + # Solution: Create a node in between both nodes, change + # prefixes and add the new node for the remaining word + else: + incoming_node.prefix = remaining_prefix + + aux_node = self.nodes[matching_string[0]] + self.nodes[matching_string[0]] = RadixNode(matching_string, False) + self.nodes[matching_string[0]].nodes[remaining_prefix[0]] = aux_node + + if remaining_word == "": + self.nodes[matching_string[0]].is_leaf = True + else: + self.nodes[matching_string[0]].insert(remaining_word) + + def find(self, word: str) -> bool: + """Returns if the word is on the tree + + Args: + word (str): word to check + + Returns: + bool: True if the word appears on the tree + + >>> RadixNode("myprefix").find("mystring") + False + """ + incoming_node = self.nodes.get(word[0], None) + if not incoming_node: + return False + else: + matching_string, remaining_prefix, remaining_word = incoming_node.match( + word + ) + # If there is remaining prefix, the word can't be on the tree + if remaining_prefix != "": + return False + # This applies when the word and the prefix are equal + elif remaining_word == "": + return incoming_node.is_leaf + # We have word remaining so we check the next node + else: + return incoming_node.find(remaining_word) + + def delete(self, word: str) -> bool: + """Deletes a word from the tree if it exists + + Args: + word (str): word to be deleted + + Returns: + bool: True if the word was found and deleted. False if word is not found + + >>> RadixNode("myprefix").delete("mystring") + False + """ + incoming_node = self.nodes.get(word[0], None) + if not incoming_node: + return False + else: + matching_string, remaining_prefix, remaining_word = incoming_node.match( + word + ) + # If there is remaining prefix, the word can't be on the tree + if remaining_prefix != "": + return False + # We have word remaining so we check the next node + elif remaining_word != "": + return incoming_node.delete(remaining_word) + else: + # If it is not a leaf, we don't have to delete + if not incoming_node.is_leaf: + return False + else: + # We delete the nodes if no edges go from it + if len(incoming_node.nodes) == 0: + del self.nodes[word[0]] + # We merge the current node with its only child + if len(self.nodes) == 1 and not self.is_leaf: + merging_node = list(self.nodes.values())[0] + self.is_leaf = merging_node.is_leaf + self.prefix += merging_node.prefix + self.nodes = merging_node.nodes + # If there is more than 1 edge, we just mark it as non-leaf + elif len(incoming_node.nodes) > 1: + incoming_node.is_leaf = False + # If there is 1 edge, we merge it with its child + else: + merging_node = list(incoming_node.nodes.values())[0] + incoming_node.is_leaf = merging_node.is_leaf + incoming_node.prefix += merging_node.prefix + incoming_node.nodes = merging_node.nodes + + return True + + def print_tree(self, height: int = 0) -> None: + """Print the tree + + Args: + height (int, optional): Height of the printed node + """ + if self.prefix != "": + print("-" * height, self.prefix, " (leaf)" if self.is_leaf else "") + + for value in self.nodes.values(): + value.print_tree(height + 1) + + +def test_trie() -> bool: + words = "banana bananas bandana band apple all beast".split() + root = RadixNode() + root.insert_many(words) + + assert all(root.find(word) for word in words) + assert not root.find("bandanas") + assert not root.find("apps") + root.delete("all") + assert not root.find("all") + root.delete("banana") + assert not root.find("banana") + assert root.find("bananas") + + return True + + +def pytests() -> None: + assert test_trie() + + +def main() -> None: + """ + >>> pytests() + """ + root = RadixNode() + words = "banana bananas bandanas bandana band apple all beast".split() + root.insert_many(words) + + print("Words:", words) + print("Tree:") + root.print_tree() + + +if __name__ == "__main__": + main() From a31edd4477af958adb840dadd568c38eecc9567b Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 31 Oct 2022 14:50:03 +0100 Subject: [PATCH 185/368] Test on Python 3.11 (#6591) * Test on Python 3.11 release candidate 2 * tensorflow; python<3.11 * tensorflow; python_version < 3.11 * tensorflow; python_version < "3.11" * sympy, tensorflow; python_version < "3.11" * sklearn; python_version < "3.11" * matplotlib, pandas, qiskit * statsmodels; python_version < "3.11" * Bring back Pandas * Problem deps are qiskit, statsmodels, and tensorflow * updating DIRECTORY.md * python-version: 3.11-dev --> 3.11 * updating DIRECTORY.md * Add pytest --ignore to pyproject.toml * Update build.yml * Update pyproject.toml * Update pyproject.toml * Python 3.11 Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 13 +++++++++++-- CONTRIBUTING.md | 2 +- DIRECTORY.md | 1 + requirements.txt | 6 +++--- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 159ce13b3..1069c68d2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: - python-version: 3.x + python-version: 3.11 - uses: actions/cache@v3 with: path: ~/.cache/pip @@ -22,6 +22,15 @@ jobs: python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests - run: pytest --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . + # See: #6591 for re-enabling tests on Python v3.11 + run: pytest + --ignore=computer_vision/cnn_classification.py + --ignore=machine_learning/forecasting/run.py + --ignore=machine_learning/lstm/lstm_prediction.py + --ignore=quantum/ + --ignore=project_euler/ + --ignore=scripts/validate_solutions.py + --cov-report=term-missing:skip-covered + --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 37e020b8f..3ce5bd1ed 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,7 +66,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.10+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.11+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index be3a121c8..0b0d1e6a7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -328,6 +328,7 @@ * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) * [Electrical Impedance](electronics/electrical_impedance.py) + * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) diff --git a/requirements.txt b/requirements.txt index 9ffe784c9..ae6203998 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,14 +8,14 @@ opencv-python pandas pillow projectq -qiskit +qiskit; python_version < "3.11" requests rich scikit-fuzzy sklearn -statsmodels +statsmodels; python_version < "3.11" sympy -tensorflow +tensorflow; python_version < "3.11" texttable tweepy xgboost From fecbf59436702b34b987773aa872d79f5df466df Mon Sep 17 00:00:00 2001 From: TechFreak107 <62158210+TechFreak107@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:28:42 +0530 Subject: [PATCH 186/368] Modified 'pascal_triangle.py' program (#7901) * Added pascals_triangle.py program to maths directory * Deleted 'pascals_triangle.py' because of duplication. Added a optimized function to generate pascal's triangle to 'pascal_triangle.py' program. Added some aadditional doctests to the existing function. Added some type check functionality to the existing function. * Modified type check hints in 'generate_pascal_triangle_optimized' function' q * Modified 'pascal_triangle' prgram * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pascal_triangle.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- other/pascal_triangle.py | 109 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 101 insertions(+), 8 deletions(-) diff --git a/other/pascal_triangle.py b/other/pascal_triangle.py index 5cc3cee8a..7f6555f9c 100644 --- a/other/pascal_triangle.py +++ b/other/pascal_triangle.py @@ -1,13 +1,10 @@ """ -This implementation demonstrates how to generate the -elements of a Pascal's triangle. The element having -a row index of r and column index of c can be derived -as follows: +This implementation demonstrates how to generate the elements of a Pascal's triangle. +The element havingva row index of r and column index of c can be derivedvas follows: triangle[r][c] = triangle[r-1][c-1]+triangle[r-1][c] -What is Pascal's triangle? -- It is a triangular array containing binomial coefficients. -Refer to (https://en.wikipedia.org/wiki/Pascal%27s_triangle) -for more info about this triangle. + +A Pascal's triangle is a triangular array containing binomial coefficients. +https://en.wikipedia.org/wiki/Pascal%27s_triangle """ @@ -38,6 +35,8 @@ def print_pascal_triangle(num_rows: int) -> None: def generate_pascal_triangle(num_rows: int) -> list[list[int]]: """ Create Pascal's triangle for different number of rows + >>> generate_pascal_triangle(0) + [] >>> generate_pascal_triangle(1) [[1]] >>> generate_pascal_triangle(2) @@ -48,7 +47,26 @@ def generate_pascal_triangle(num_rows: int) -> list[list[int]]: [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1]] >>> generate_pascal_triangle(5) [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]] + >>> generate_pascal_triangle(-5) + Traceback (most recent call last): + ... + ValueError: The input value of 'num_rows' should be greater than or equal to 0 + >>> generate_pascal_triangle(7.89) + Traceback (most recent call last): + ... + TypeError: The input value of 'num_rows' should be 'int' """ + + if not isinstance(num_rows, int): + raise TypeError("The input value of 'num_rows' should be 'int'") + + if num_rows == 0: + return [] + elif num_rows < 0: + raise ValueError( + "The input value of 'num_rows' should be greater than or equal to 0" + ) + triangle: list[list[int]] = [] for current_row_idx in range(num_rows): current_row = populate_current_row(triangle, current_row_idx) @@ -90,7 +108,82 @@ def calculate_current_element( current_row[current_col_idx] = above_to_left_elt + above_to_right_elt +def generate_pascal_triangle_optimized(num_rows: int) -> list[list[int]]: + """ + This function returns a matrix representing the corresponding pascal's triangle + according to the given input of number of rows of Pascal's triangle to be generated. + It reduces the operations done to generate a row by half + by eliminating redundant calculations. + + :param num_rows: Integer specifying the number of rows in the Pascal's triangle + :return: 2-D List (matrix) representing the Pascal's triangle + + Return the Pascal's triangle of given rows + >>> generate_pascal_triangle_optimized(3) + [[1], [1, 1], [1, 2, 1]] + >>> generate_pascal_triangle_optimized(1) + [[1]] + >>> generate_pascal_triangle_optimized(0) + [] + >>> generate_pascal_triangle_optimized(-5) + Traceback (most recent call last): + ... + ValueError: The input value of 'num_rows' should be greater than or equal to 0 + >>> generate_pascal_triangle_optimized(7.89) + Traceback (most recent call last): + ... + TypeError: The input value of 'num_rows' should be 'int' + """ + + if not isinstance(num_rows, int): + raise TypeError("The input value of 'num_rows' should be 'int'") + + if num_rows == 0: + return [] + elif num_rows < 0: + raise ValueError( + "The input value of 'num_rows' should be greater than or equal to 0" + ) + + result: list[list[int]] = [[1]] + + for row_index in range(1, num_rows): + temp_row = [0] + result[-1] + [0] + row_length = row_index + 1 + # Calculate the number of distinct elements in a row + distinct_elements = sum(divmod(row_length, 2)) + row_first_half = [ + temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1) + ] + row_second_half = row_first_half[: (row_index + 1) // 2] + row_second_half.reverse() + row = row_first_half + row_second_half + result.append(row) + + return result + + +def benchmark() -> None: + """ + Benchmark multiple functions, with three different length int values. + """ + from collections.abc import Callable + from timeit import timeit + + def benchmark_a_function(func: Callable, value: int) -> None: + call = f"{func.__name__}({value})" + timing = timeit(f"__main__.{call}", setup="import __main__") + # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") + print(f"{call:38} -- {timing:.4f} seconds") + + for value in range(15): # (1, 7, 14): + for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): + benchmark_a_function(func, value) + print() + + if __name__ == "__main__": import doctest doctest.testmod() + benchmark() From 506b63f02da11691f19c4fd86c120e1d54842ea4 Mon Sep 17 00:00:00 2001 From: Shreyas Kamath <42207943+s18k@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:34:42 +0530 Subject: [PATCH 187/368] Create convert_number_to_words.py (#6788) * Create convert_number_to_words.py A Python Program to convert numerical digits to English words. An Application of this can be in a Payment Application for confirmation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/convert_number_to_words.py | 111 +++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 web_programming/convert_number_to_words.py diff --git a/web_programming/convert_number_to_words.py b/web_programming/convert_number_to_words.py new file mode 100644 index 000000000..50612dec2 --- /dev/null +++ b/web_programming/convert_number_to_words.py @@ -0,0 +1,111 @@ +import math + + +def convert(number: int) -> str: + """ + Given a number return the number in words. + + >>> convert(123) + 'OneHundred,TwentyThree' + """ + if number == 0: + words = "Zero" + return words + else: + digits = math.log10(number) + digits = digits + 1 + singles = {} + singles[0] = "" + singles[1] = "One" + singles[2] = "Two" + singles[3] = "Three" + singles[4] = "Four" + singles[5] = "Five" + singles[6] = "Six" + singles[7] = "Seven" + singles[8] = "Eight" + singles[9] = "Nine" + + doubles = {} + doubles[0] = "" + doubles[2] = "Twenty" + doubles[3] = "Thirty" + doubles[4] = "Forty" + doubles[5] = "Fifty" + doubles[6] = "Sixty" + doubles[7] = "Seventy" + doubles[8] = "Eighty" + doubles[9] = "Ninety" + + teens = {} + teens[0] = "Ten" + teens[1] = "Eleven" + teens[2] = "Twelve" + teens[3] = "Thirteen" + teens[4] = "Fourteen" + teens[5] = "Fifteen" + teens[6] = "Sixteen" + teens[7] = "Seventeen" + teens[8] = "Eighteen" + teens[9] = "Nineteen" + + placevalue = {} + placevalue[2] = "Hundred," + placevalue[3] = "Thousand," + placevalue[5] = "Lakh," + placevalue[7] = "Crore," + + temp_num = number + words = "" + counter = 0 + digits = int(digits) + while counter < digits: + current = temp_num % 10 + if counter % 2 == 0: + addition = "" + if counter in placevalue.keys() and current != 0: + addition = placevalue[counter] + if counter == 2: + words = singles[current] + addition + words + elif counter == 0: + if ((temp_num % 100) // 10) == 1: + words = teens[current] + addition + words + temp_num = temp_num // 10 + counter += 1 + else: + words = singles[current] + addition + words + + else: + words = doubles[current] + addition + words + + else: + if counter == 1: + if current == 1: + words = teens[number % 10] + words + else: + addition = "" + if counter in placevalue.keys(): + addition = placevalue[counter] + words = doubles[current] + addition + words + else: + addition = "" + if counter in placevalue.keys(): + if current == 0 and ((temp_num % 100) // 10) == 0: + addition = "" + else: + addition = placevalue[counter] + if ((temp_num % 100) // 10) == 1: + words = teens[current] + addition + words + temp_num = temp_num // 10 + counter += 1 + else: + words = singles[current] + addition + words + counter += 1 + temp_num = temp_num // 10 + return words + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ded5deabe9f9f1c8f2a57da8657056480f142b55 Mon Sep 17 00:00:00 2001 From: Shriyans Gandhi <41372639+shri30yans@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:45:37 +0530 Subject: [PATCH 188/368] Dodecahedron surface area and volume (#6606) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Hexagonal number sequence A hexagonal number sequence is a sequence of figurate numbers where the nth hexagonal number hₙ is the number of distinct dots in a pattern of dots consisting of the outlines of regular hexagons with sides up to n dots, when the hexagons are overlaid so that they share one vertex. This program returns the hexagonal number sequence of n length. * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update and rename hexagonalnumbers.py to hexagonal_numbers.py * Length must be a positive integer * Create dodecahedron.py * Update dodecahedron.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dodecahedron.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dodecahedron.py * Update dodecahedron.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dodecahedron.py * Update dodecahedron.py * Apply suggestions from code review Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update dodecahedron.py Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> --- maths/dodecahedron.py | 73 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 maths/dodecahedron.py diff --git a/maths/dodecahedron.py b/maths/dodecahedron.py new file mode 100644 index 000000000..856245f4a --- /dev/null +++ b/maths/dodecahedron.py @@ -0,0 +1,73 @@ +# dodecahedron.py + +""" +A regular dodecahedron is a three-dimensional figure made up of +12 pentagon faces having the same equal size. +""" + + +def dodecahedron_surface_area(edge: float) -> float: + """ + Calculates the surface area of a regular dodecahedron + a = 3 * ((25 + 10 * (5** (1 / 2))) ** (1 / 2 )) * (e**2) + where: + a --> is the area of the dodecahedron + e --> is the length of the edge + reference-->"Dodecahedron" Study.com + + + :param edge: length of the edge of the dodecahedron + :type edge: float + :return: the surface area of the dodecahedron as a float + + + Tests: + >>> dodecahedron_surface_area(5) + 516.1432201766901 + >>> dodecahedron_surface_area(10) + 2064.5728807067603 + >>> dodecahedron_surface_area(-1) + Traceback (most recent call last): + ... + ValueError: Length must be a positive. + """ + + if edge <= 0 or not isinstance(edge, int): + raise ValueError("Length must be a positive.") + return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) + + +def dodecahedron_volume(edge: float) -> float: + """ + Calculates the volume of a regular dodecahedron + v = ((15 + (7 * (5** (1 / 2)))) / 4) * (e**3) + where: + v --> is the volume of the dodecahedron + e --> is the length of the edge + reference-->"Dodecahedron" Study.com + + + :param edge: length of the edge of the dodecahedron + :type edge: float + :return: the volume of the dodecahedron as a float + + Tests: + >>> dodecahedron_volume(5) + 957.8898700780791 + >>> dodecahedron_volume(10) + 7663.118960624633 + >>> dodecahedron_volume(-1) + Traceback (most recent call last): + ... + ValueError: Length must be a positive. + """ + + if edge <= 0 or not isinstance(edge, int): + raise ValueError("Length must be a positive.") + return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 21601a4070830069101bbb0ddc2d662eac68d627 Mon Sep 17 00:00:00 2001 From: Kevin Joven <59969678+KevinJoven11@users.noreply.github.com> Date: Mon, 31 Oct 2022 13:32:54 -0400 Subject: [PATCH 189/368] create quantum_fourier_transform (#6682) * create quantum_fourier_transform This is part of the #Hacktoberfest. I build the quantum fourier transform for N qubits. (n = 3 in the example) Best, Kevin * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update q_fourier_transform.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add the doctest! * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update q_fourier_transform.py * Pass first then fail Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- quantum/q_fourier_transform.py | 97 ++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 quantum/q_fourier_transform.py diff --git a/quantum/q_fourier_transform.py b/quantum/q_fourier_transform.py new file mode 100644 index 000000000..d138dfb45 --- /dev/null +++ b/quantum/q_fourier_transform.py @@ -0,0 +1,97 @@ +""" +Build the quantum fourier transform (qft) for a desire +number of quantum bits using Qiskit framework. This +experiment run in IBM Q simulator with 10000 shots. +This circuit can be use as a building block to design +the Shor's algorithm in quantum computing. As well as, +quantum phase estimation among others. +. +References: +https://en.wikipedia.org/wiki/Quantum_Fourier_transform +https://qiskit.org/textbook/ch-algorithms/quantum-fourier-transform.html +""" + +import math + +import numpy as np +import qiskit +from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute + + +def quantum_fourier_transform(number_of_qubits: int = 3) -> qiskit.result.counts.Counts: + """ + # >>> quantum_fourier_transform(2) + # {'00': 2500, '01': 2500, '11': 2500, '10': 2500} + # quantum circuit for number_of_qubits = 3: + ┌───┐ + qr_0: ──────■──────────────────────■───────┤ H ├─X─ + │ ┌───┐ │P(π/2) └───┘ │ + qr_1: ──────┼────────■───────┤ H ├─■─────────────┼─ + ┌───┐ │P(π/4) │P(π/2) └───┘ │ + qr_2: ┤ H ├─■────────■───────────────────────────X─ + └───┘ + cr: 3/═════════════════════════════════════════════ + Args: + n : number of qubits + Returns: + qiskit.result.counts.Counts: distribute counts. + + >>> quantum_fourier_transform(2) + {'00': 2500, '01': 2500, '10': 2500, '11': 2500} + >>> quantum_fourier_transform(-1) + Traceback (most recent call last): + ... + ValueError: number of qubits must be > 0. + >>> quantum_fourier_transform('a') + Traceback (most recent call last): + ... + TypeError: number of qubits must be a integer. + >>> quantum_fourier_transform(100) + Traceback (most recent call last): + ... + ValueError: number of qubits too large to simulate(>10). + >>> quantum_fourier_transform(0.5) + Traceback (most recent call last): + ... + ValueError: number of qubits must be exact integer. + """ + if type(number_of_qubits) == str: + raise TypeError("number of qubits must be a integer.") + if not number_of_qubits > 0: + raise ValueError("number of qubits must be > 0.") + if math.floor(number_of_qubits) != number_of_qubits: + raise ValueError("number of qubits must be exact integer.") + if number_of_qubits > 10: + raise ValueError("number of qubits too large to simulate(>10).") + + qr = QuantumRegister(number_of_qubits, "qr") + cr = ClassicalRegister(number_of_qubits, "cr") + + quantum_circuit = QuantumCircuit(qr, cr) + + counter = number_of_qubits + + for i in range(counter): + + quantum_circuit.h(number_of_qubits - i - 1) + counter -= 1 + for j in range(counter): + quantum_circuit.cp(np.pi / 2 ** (counter - j), j, counter) + + for k in range(number_of_qubits // 2): + quantum_circuit.swap(k, number_of_qubits - k - 1) + + # measure all the qubits + quantum_circuit.measure(qr, cr) + # simulate with 10000 shots + backend = Aer.get_backend("qasm_simulator") + job = execute(quantum_circuit, backend, shots=10000) + + return job.result().get_counts(quantum_circuit) + + +if __name__ == "__main__": + print( + f"Total count for quantum fourier transform state is: \ + {quantum_fourier_transform(3)}" + ) From 6cd7c49525b520fc5fe44ac0568fe39393ff85b4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 31 Oct 2022 21:33:08 +0100 Subject: [PATCH 190/368] [pre-commit.ci] pre-commit autoupdate (#7920) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.1.0 → v3.2.0](https://github.com/asottile/pyupgrade/compare/v3.1.0...v3.2.0) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 004def5e4..a0ea03b9b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.1.0 + rev: v3.2.0 hooks: - id: pyupgrade args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 0b0d1e6a7..5c4a032db 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -238,6 +238,7 @@ * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py) * [Stock Span Problem](data_structures/stacks/stock_span_problem.py) * Trie + * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) ## Digital Image Processing @@ -526,6 +527,7 @@ * [Collatz Sequence](maths/collatz_sequence.py) * [Combinations](maths/combinations.py) * [Decimal Isolate](maths/decimal_isolate.py) + * [Dodecahedron](maths/dodecahedron.py) * [Double Factorial Iterative](maths/double_factorial_iterative.py) * [Double Factorial Recursive](maths/double_factorial_recursive.py) * [Entropy](maths/entropy.py) @@ -994,6 +996,7 @@ * [Deutsch Jozsa](quantum/deutsch_jozsa.py) * [Half Adder](quantum/half_adder.py) * [Not Gate](quantum/not_gate.py) + * [Q Fourier Transform](quantum/q_fourier_transform.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) * [Quantum Teleportation](quantum/quantum_teleportation.py) @@ -1129,6 +1132,7 @@ ## Web Programming * [Co2 Emission](web_programming/co2_emission.py) + * [Convert Number To Words](web_programming/convert_number_to_words.py) * [Covid Stats Via Xpath](web_programming/covid_stats_via_xpath.py) * [Crawl Google Results](web_programming/crawl_google_results.py) * [Crawl Google Scholar Citation](web_programming/crawl_google_scholar_citation.py) From 6c15f526e58dbb3d3a67e613323781df39b58620 Mon Sep 17 00:00:00 2001 From: Paradact <44441385+Paradact@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:50:50 +0100 Subject: [PATCH 191/368] Added Torus surface area (#7906) * Added Torus surface area * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed error in test Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/area.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/maths/area.py b/maths/area.py index 5db7dac38..ea7216c8f 100644 --- a/maths/area.py +++ b/maths/area.py @@ -201,6 +201,40 @@ def surface_area_cylinder(radius: float, height: float) -> float: return 2 * pi * radius * (height + radius) +def surface_area_torus(torus_radius: float, tube_radius: float) -> float: + """Calculate the Area of a Torus. + Wikipedia reference: https://en.wikipedia.org/wiki/Torus + :return 4pi^2 * torus_radius * tube_radius + >>> surface_area_torus(1, 1) + 39.47841760435743 + >>> surface_area_torus(4, 3) + 473.7410112522892 + >>> surface_area_torus(3, 4) + Traceback (most recent call last): + ... + ValueError: surface_area_torus() does not support spindle or self intersecting tori + >>> surface_area_torus(1.6, 1.6) + 101.06474906715503 + >>> surface_area_torus(0, 0) + 0.0 + >>> surface_area_torus(-1, 1) + Traceback (most recent call last): + ... + ValueError: surface_area_torus() only accepts non-negative values + >>> surface_area_torus(1, -1) + Traceback (most recent call last): + ... + ValueError: surface_area_torus() only accepts non-negative values + """ + if torus_radius < 0 or tube_radius < 0: + raise ValueError("surface_area_torus() only accepts non-negative values") + if torus_radius < tube_radius: + raise ValueError( + "surface_area_torus() does not support spindle or self intersecting tori" + ) + return 4 * pow(pi, 2) * torus_radius * tube_radius + + def area_rectangle(length: float, width: float) -> float: """ Calculate the area of a rectangle. @@ -543,6 +577,7 @@ if __name__ == "__main__": print(f"Cone: {surface_area_cone(10, 20) = }") print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(f"Cylinder: {surface_area_cylinder(10, 20) = }") + print(f"Torus: {surface_area_torus(20, 10) = }") print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }") print(f"Square: {area_reg_polygon(4, 10) = }") print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }") From 7addbccee72d2b18e6d095ab6675cbcb290412ce Mon Sep 17 00:00:00 2001 From: Paradact <44441385+Paradact@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:51:45 +0100 Subject: [PATCH 192/368] Torus volume (#7905) * Added Torus volume algorithm * Updated Torus volume for simplicity (removed ref to vol_sphere()) * Refactoring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/volume.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/maths/volume.py b/maths/volume.py index da4054646..1da4584c8 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -441,6 +441,34 @@ def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> floa ) +def vol_torus(torus_radius: float, tube_radius: float) -> float: + """Calculate the Volume of a Torus. + Wikipedia reference: https://en.wikipedia.org/wiki/Torus + :return 2pi^2 * torus_radius * tube_radius^2 + >>> vol_torus(1, 1) + 19.739208802178716 + >>> vol_torus(4, 3) + 710.6115168784338 + >>> vol_torus(3, 4) + 947.4820225045784 + >>> vol_torus(1.6, 1.6) + 80.85179925372404 + >>> vol_torus(0, 0) + 0.0 + >>> vol_torus(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_torus() only accepts non-negative values + >>> vol_torus(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_torus() only accepts non-negative values + """ + if torus_radius < 0 or tube_radius < 0: + raise ValueError("vol_torus() only accepts non-negative values") + return 2 * pow(pi, 2) * torus_radius * pow(tube_radius, 2) + + def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") @@ -453,6 +481,7 @@ def main(): print(f"Sphere: {vol_sphere(2) = }") # ~= 33.5 print(f"Hemisphere: {vol_hemisphere(2) = }") # ~= 16.75 print(f"Circular Cylinder: {vol_circular_cylinder(2, 2) = }") # ~= 25.1 + print(f"Torus: {vol_torus(2, 2) = }") # ~= 157.9 print(f"Conical Frustum: {vol_conical_frustum(2, 2, 4) = }") # ~= 58.6 print(f"Spherical cap: {vol_spherical_cap(1, 2) = }") # ~= 5.24 print(f"Spheres intersetion: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21 From 74aa9efa1d164e7dba56a88b4b3546232f3c3024 Mon Sep 17 00:00:00 2001 From: Gustavobflh <43830003+Gustavobflh@users.noreply.github.com> Date: Mon, 31 Oct 2022 19:04:42 -0300 Subject: [PATCH 193/368] Added a Hubble Parameter calculator file (#7921) --- physics/hubble_parameter.py | 110 ++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 physics/hubble_parameter.py diff --git a/physics/hubble_parameter.py b/physics/hubble_parameter.py new file mode 100644 index 000000000..798564722 --- /dev/null +++ b/physics/hubble_parameter.py @@ -0,0 +1,110 @@ +""" +Title : Calculating the Hubble Parameter + +Description : The Hubble parameter H is the Universe expansion rate +in any time. In cosmology is customary to use the redshift redshift +in place of time, becausethe redshift is directily mensure +in the light of galaxies moving away from us. + +So, the general relation that we obtain is + +H = hubble_constant*(radiation_density*(redshift+1)**4 + + matter_density*(redshift+1)**3 + + curvature*(redshift+1)**2 + dark_energy)**(1/2) + +where radiation_density, matter_density, dark_energy are the relativity +(the percentage) energy densities that exist +in the Universe today. Here, matter_density is the +sum of the barion density and the +dark matter. Curvature is the curvature parameter and can be written in term +of the densities by the completeness + + +curvature = 1 - (matter_density + radiation_density + dark_energy) + +Source : +https://www.sciencedirect.com/topics/mathematics/hubble-parameter +""" + + +def hubble_parameter( + hubble_constant: float, + radiation_density: float, + matter_density: float, + dark_energy: float, + redshift: float, +) -> float: + + """ + Input Parameters + ---------------- + hubble_constant: Hubble constante is the expansion rate today usually + given in km/(s*Mpc) + + radiation_density: relative radiation density today + + matter_density: relative mass density today + + dark_energy: relative dark energy density today + + redshift: the light redshift + + Returns + ------- + result : Hubble parameter in and the unit km/s/Mpc (the unit can be + changed if you want, just need to change the unit of the Hubble constant) + + >>> hubble_parameter(hubble_constant=68.3, radiation_density=1e-4, + ... matter_density=-0.3, dark_energy=0.7, redshift=1) + Traceback (most recent call last): + ... + ValueError: All input parameters must be positive + + >>> hubble_parameter(hubble_constant=68.3, radiation_density=1e-4, + ... matter_density= 1.2, dark_energy=0.7, redshift=1) + Traceback (most recent call last): + ... + ValueError: Relative densities cannot be greater than one + + >>> hubble_parameter(hubble_constant=68.3, radiation_density=1e-4, + ... matter_density= 0.3, dark_energy=0.7, redshift=0) + 68.3 + """ + parameters = [redshift, radiation_density, matter_density, dark_energy] + if any(0 > p for p in parameters): + raise ValueError("All input parameters must be positive") + + if any(1 < p for p in parameters[1:4]): + raise ValueError("Relative densities cannot be greater than one") + else: + curvature = 1 - (matter_density + radiation_density + dark_energy) + + e_2 = ( + radiation_density * (redshift + 1) ** 4 + + matter_density * (redshift + 1) ** 3 + + curvature * (redshift + 1) ** 2 + + dark_energy + ) + + hubble = hubble_constant * e_2 ** (1 / 2) + return hubble + + +if __name__ == "__main__": + import doctest + + # run doctest + doctest.testmod() + + # demo LCDM approximation + matter_density = 0.3 + + print( + hubble_parameter( + hubble_constant=68.3, + radiation_density=1e-4, + matter_density=matter_density, + dark_energy=1 - matter_density, + redshift=0, + ) + ) From 7d139ee7f1e48648cc8cf176b293d23d2ba85d13 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 1 Nov 2022 06:50:43 +0000 Subject: [PATCH 194/368] refactor(abs): Condense `abs_min` and `abs_max` (#7881) * refactor(abs): Condense `abs_min` and `abs_max` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/abs.py | 66 ++++++++++++++++++++++++++++++++++++++++++++++++ maths/abs_max.py | 50 ------------------------------------ maths/abs_min.py | 35 ------------------------- 3 files changed, 66 insertions(+), 85 deletions(-) delete mode 100644 maths/abs_max.py delete mode 100644 maths/abs_min.py diff --git a/maths/abs.py b/maths/abs.py index dfea52dfb..cb0ffc8a5 100644 --- a/maths/abs.py +++ b/maths/abs.py @@ -15,6 +15,62 @@ def abs_val(num: float) -> float: return -num if num < 0 else num +def abs_min(x: list[int]) -> int: + """ + >>> abs_min([0,5,1,11]) + 0 + >>> abs_min([3,-10,-2]) + -2 + >>> abs_min([]) + Traceback (most recent call last): + ... + ValueError: abs_min() arg is an empty sequence + """ + if len(x) == 0: + raise ValueError("abs_min() arg is an empty sequence") + j = x[0] + for i in x: + if abs_val(i) < abs_val(j): + j = i + return j + + +def abs_max(x: list[int]) -> int: + """ + >>> abs_max([0,5,1,11]) + 11 + >>> abs_max([3,-10,-2]) + -10 + >>> abs_max([]) + Traceback (most recent call last): + ... + ValueError: abs_max() arg is an empty sequence + """ + if len(x) == 0: + raise ValueError("abs_max() arg is an empty sequence") + j = x[0] + for i in x: + if abs(i) > abs(j): + j = i + return j + + +def abs_max_sort(x: list[int]) -> int: + """ + >>> abs_max_sort([0,5,1,11]) + 11 + >>> abs_max_sort([3,-10,-2]) + -10 + >>> abs_max_sort([]) + Traceback (most recent call last): + ... + ValueError: abs_max_sort() arg is an empty sequence + """ + if len(x) == 0: + raise ValueError("abs_max_sort() arg is an empty sequence") + return sorted(x, key=abs)[-1] + + def test_abs_val(): """ >>> test_abs_val() @@ -23,6 +79,16 @@ def test_abs_val(): assert 34 == abs_val(34) assert 100000000000 == abs_val(-100000000000) + a = [-3, -1, 2, -11] + assert abs_max(a) == -11 + assert abs_max_sort(a) == -11 + assert abs_min(a) == -1 + if __name__ == "__main__": + import doctest + + doctest.testmod() + + test_abs_val() print(abs_val(-34)) # --> 34 diff --git a/maths/abs_max.py b/maths/abs_max.py deleted file mode 100644 index 4a4b4d9eb..000000000 --- a/maths/abs_max.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import annotations - - -def abs_max(x: list[int]) -> int: - """ - >>> abs_max([0,5,1,11]) - 11 - >>> abs_max([3,-10,-2]) - -10 - >>> abs_max([]) - Traceback (most recent call last): - ... - ValueError: abs_max() arg is an empty sequence - """ - if len(x) == 0: - raise ValueError("abs_max() arg is an empty sequence") - j = x[0] - for i in x: - if abs(i) > abs(j): - j = i - return j - - -def abs_max_sort(x: list[int]) -> int: - """ - >>> abs_max_sort([0,5,1,11]) - 11 - >>> abs_max_sort([3,-10,-2]) - -10 - >>> abs_max_sort([]) - Traceback (most recent call last): - ... - ValueError: abs_max_sort() arg is an empty sequence - """ - if len(x) == 0: - raise ValueError("abs_max_sort() arg is an empty sequence") - return sorted(x, key=abs)[-1] - - -def main(): - a = [1, 2, -11] - assert abs_max(a) == -11 - assert abs_max_sort(a) == -11 - - -if __name__ == "__main__": - import doctest - - doctest.testmod(verbose=True) - main() diff --git a/maths/abs_min.py b/maths/abs_min.py deleted file mode 100644 index 00dbcb025..000000000 --- a/maths/abs_min.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import annotations - -from .abs import abs_val - - -def abs_min(x: list[int]) -> int: - """ - >>> abs_min([0,5,1,11]) - 0 - >>> abs_min([3,-10,-2]) - -2 - >>> abs_min([]) - Traceback (most recent call last): - ... - ValueError: abs_min() arg is an empty sequence - """ - if len(x) == 0: - raise ValueError("abs_min() arg is an empty sequence") - j = x[0] - for i in x: - if abs_val(i) < abs_val(j): - j = i - return j - - -def main(): - a = [-3, -1, 2, -11] - print(abs_min(a)) # = -1 - - -if __name__ == "__main__": - import doctest - - doctest.testmod(verbose=True) - main() From d23e709aea75647540e6ba57b3a5979854e80117 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 1 Nov 2022 14:07:11 +0100 Subject: [PATCH 195/368] maths/sum_of_digits.py: Streamline benchmarks (#7914) * maths/sum_of_digits.py: Streamline benchmarks ``` sum_of_digits(262144): 19 -- 0.3128329170285724 seconds sum_of_digits_recursion(262144): 19 -- 0.34008108399575576 seconds sum_of_digits_compact(262144): 19 -- 0.6086010000435635 seconds sum_of_digits(1125899906842624): 76 -- 0.8079068749793805 seconds sum_of_digits_recursion(1125899906842624): 76 -- 0.8435653329943307 seconds sum_of_digits_compact(1125899906842624): 76 -- 1.247976207989268 seconds sum_of_digits(1267650600228229401496703205376): 115 -- 1.6441589999594726 seconds sum_of_digits_recursion(1267650600228229401496703205376): 115 -- 1.713684624992311 seconds sum_of_digits_compact(1267650600228229401496703205376): 115 -- 2.2197747920290567 seconds ``` * updating DIRECTORY.md * Update sum_of_digits.py * Update sum_of_digits.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/sum_of_digits.py | 99 +++++------------------------------------- 1 file changed, 12 insertions(+), 87 deletions(-) diff --git a/maths/sum_of_digits.py b/maths/sum_of_digits.py index 5ad5fe6c9..d5488bb9e 100644 --- a/maths/sum_of_digits.py +++ b/maths/sum_of_digits.py @@ -1,10 +1,6 @@ -from timeit import timeit - - def sum_of_digits(n: int) -> int: """ Find the sum of digits of a number. - >>> sum_of_digits(12345) 15 >>> sum_of_digits(123) @@ -25,7 +21,6 @@ def sum_of_digits(n: int) -> int: def sum_of_digits_recursion(n: int) -> int: """ Find the sum of digits of a number using recursion - >>> sum_of_digits_recursion(12345) 15 >>> sum_of_digits_recursion(123) @@ -42,7 +37,6 @@ def sum_of_digits_recursion(n: int) -> int: def sum_of_digits_compact(n: int) -> int: """ Find the sum of digits of a number - >>> sum_of_digits_compact(12345) 15 >>> sum_of_digits_compact(123) @@ -57,93 +51,24 @@ def sum_of_digits_compact(n: int) -> int: def benchmark() -> None: """ - Benchmark code for comparing 3 functions, - with 3 different length int values. + Benchmark multiple functions, with three different length int values. """ - print("\nFor small_num = ", small_num, ":") - print( - "> sum_of_digits()", - "\t\tans =", - sum_of_digits(small_num), - "\ttime =", - timeit("z.sum_of_digits(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_recursion()", - "\tans =", - sum_of_digits_recursion(small_num), - "\ttime =", - timeit("z.sum_of_digits_recursion(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_compact()", - "\tans =", - sum_of_digits_compact(small_num), - "\ttime =", - timeit("z.sum_of_digits_compact(z.small_num)", setup="import __main__ as z"), - "seconds", - ) + from collections.abc import Callable + from timeit import timeit - print("\nFor medium_num = ", medium_num, ":") - print( - "> sum_of_digits()", - "\t\tans =", - sum_of_digits(medium_num), - "\ttime =", - timeit("z.sum_of_digits(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_recursion()", - "\tans =", - sum_of_digits_recursion(medium_num), - "\ttime =", - timeit("z.sum_of_digits_recursion(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_compact()", - "\tans =", - sum_of_digits_compact(medium_num), - "\ttime =", - timeit("z.sum_of_digits_compact(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) + def benchmark_a_function(func: Callable, value: int) -> None: + call = f"{func.__name__}({value})" + timing = timeit(f"__main__.{call}", setup="import __main__") + print(f"{call:56} = {func(value)} -- {timing:.4f} seconds") - print("\nFor large_num = ", large_num, ":") - print( - "> sum_of_digits()", - "\t\tans =", - sum_of_digits(large_num), - "\ttime =", - timeit("z.sum_of_digits(z.large_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_recursion()", - "\tans =", - sum_of_digits_recursion(large_num), - "\ttime =", - timeit("z.sum_of_digits_recursion(z.large_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_compact()", - "\tans =", - sum_of_digits_compact(large_num), - "\ttime =", - timeit("z.sum_of_digits_compact(z.large_num)", setup="import __main__ as z"), - "seconds", - ) + for value in (262144, 1125899906842624, 1267650600228229401496703205376): + for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): + benchmark_a_function(func, value) + print() if __name__ == "__main__": - small_num = 262144 - medium_num = 1125899906842624 - large_num = 1267650600228229401496703205376 - benchmark() import doctest doctest.testmod() + benchmark() From 4e6c1c049dffdc984232fe1fce1e4791fc527d11 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Tue, 1 Nov 2022 21:43:03 +0400 Subject: [PATCH 196/368] Is power of two (#7936) * add is power of two * fix comment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Deal with negative numbers * Spelling: negative Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- bit_manipulation/is_power_of_two.py | 57 +++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 bit_manipulation/is_power_of_two.py diff --git a/bit_manipulation/is_power_of_two.py b/bit_manipulation/is_power_of_two.py new file mode 100644 index 000000000..023e979fe --- /dev/null +++ b/bit_manipulation/is_power_of_two.py @@ -0,0 +1,57 @@ +""" +Author : Alexander Pantyukhin +Date : November 1, 2022 + +Task: +Given a positive int number. Return True if this number is power of 2 +or False otherwise. + +Implementation notes: Use bit manipulation. +For example if the number is the power of two it's bits representation: +n = 0..100..00 +n - 1 = 0..011..11 + +n & (n - 1) - no intersections = 0 +""" + + +def is_power_of_two(number: int) -> bool: + """ + Return True if this number is power of 2 or False otherwise. + + >>> is_power_of_two(0) + True + >>> is_power_of_two(1) + True + >>> is_power_of_two(2) + True + >>> is_power_of_two(4) + True + >>> is_power_of_two(6) + False + >>> is_power_of_two(8) + True + >>> is_power_of_two(17) + False + >>> is_power_of_two(-1) + Traceback (most recent call last): + ... + ValueError: number must not be negative + >>> is_power_of_two(1.2) + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for &: 'float' and 'float' + + # Test all powers of 2 from 0 to 10,000 + >>> all(is_power_of_two(int(2 ** i)) for i in range(10000)) + True + """ + if number < 0: + raise ValueError("number must not be negative") + return number & (number - 1) == 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f512b4d105b6f3188deced19761b6ed288378f0d Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 1 Nov 2022 19:25:39 +0000 Subject: [PATCH 197/368] refactor: Move pascals triange to maths/ (#7932) * refactor: Move pascals triange to maths/ * Update xgboost_classifier.py * statsmodels is now compatible with Python 3.11 * statsmodels is now compatible with Python 3.11 * cython>=0.29.28 * cython>=0.29.28 # For statsmodels on Python 3.11 Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 1 - machine_learning/xgboost_classifier.py | 2 +- {other => matrix}/pascal_triangle.py | 0 requirements.txt | 3 ++- 4 files changed, 3 insertions(+), 3 deletions(-) rename {other => matrix}/pascal_triangle.py (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1069c68d2..6b9cc890b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,7 +25,6 @@ jobs: # See: #6591 for re-enabling tests on Python v3.11 run: pytest --ignore=computer_vision/cnn_classification.py - --ignore=machine_learning/forecasting/run.py --ignore=machine_learning/lstm/lstm_prediction.py --ignore=quantum/ --ignore=project_euler/ diff --git a/machine_learning/xgboost_classifier.py b/machine_learning/xgboost_classifier.py index 62a1b331b..08967f171 100644 --- a/machine_learning/xgboost_classifier.py +++ b/machine_learning/xgboost_classifier.py @@ -23,7 +23,7 @@ def data_handling(data: dict) -> tuple: def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier: """ - >>> xgboost(np.array([[5.1, 3.6, 1.4, 0.2]]), np.array([0])) + # THIS TEST IS BROKEN!! >>> xgboost(np.array([[5.1, 3.6, 1.4, 0.2]]), np.array([0])) XGBClassifier(base_score=0.5, booster='gbtree', callbacks=None, colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, early_stopping_rounds=None, enable_categorical=False, diff --git a/other/pascal_triangle.py b/matrix/pascal_triangle.py similarity index 100% rename from other/pascal_triangle.py rename to matrix/pascal_triangle.py diff --git a/requirements.txt b/requirements.txt index ae6203998..2e2782455 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ beautifulsoup4 +cython>=0.29.28 # For statsmodels on Python 3.11 fake_useragent keras lxml @@ -13,7 +14,7 @@ requests rich scikit-fuzzy sklearn -statsmodels; python_version < "3.11" +statsmodels sympy tensorflow; python_version < "3.11" texttable From f05baa2b2b9aeb5a9ae8184ff418a5ccdc56960a Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Wed, 2 Nov 2022 16:25:19 +0400 Subject: [PATCH 198/368] add dp up - down minimum cost for tickets (#7934) * add dp up - down minimum cost for tickets * add typints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add new tests and checks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add more tests * add types for the dp function * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: Christian Clauss * fix review notes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * small fix * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tests * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/minimum_tickets_cost.py | 129 ++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 dynamic_programming/minimum_tickets_cost.py diff --git a/dynamic_programming/minimum_tickets_cost.py b/dynamic_programming/minimum_tickets_cost.py new file mode 100644 index 000000000..261a5a7cf --- /dev/null +++ b/dynamic_programming/minimum_tickets_cost.py @@ -0,0 +1,129 @@ +""" +Author : Alexander Pantyukhin +Date : November 1, 2022 + +Task: +Given a list of days when you need to travel. Each day is integer from 1 to 365. +You are able to use tickets for 1 day, 7 days and 30 days. +Each ticket has a cost. + +Find the minimum cost you need to travel every day in the given list of days. + +Implementation notes: +implementation Dynamic Programming up bottom approach. + +Runtime complexity: O(n) + +The implementation was tested on the +leetcode: https://leetcode.com/problems/minimum-cost-for-tickets/ + + +Minimum Cost For Tickets +Dynamic Programming: up -> down. +""" + +from functools import lru_cache + + +def mincost_tickets(days: list[int], costs: list[int]) -> int: + """ + >>> mincost_tickets([1, 4, 6, 7, 8, 20], [2, 7, 15]) + 11 + + >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 7, 15]) + 17 + + >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150]) + 24 + + >>> mincost_tickets([2], [2, 90, 150]) + 2 + + >>> mincost_tickets([], [2, 90, 150]) + 0 + + >>> mincost_tickets('hello', [2, 90, 150]) + Traceback (most recent call last): + ... + ValueError: The parameter days should be a list of integers + + >>> mincost_tickets([], 'world') + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + + >>> mincost_tickets([0.25, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150]) + Traceback (most recent call last): + ... + ValueError: The parameter days should be a list of integers + + >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 0.9, 150]) + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + + >>> mincost_tickets([-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150]) + Traceback (most recent call last): + ... + ValueError: All days elements should be greater than 0 + + >>> mincost_tickets([2, 367], [2, 90, 150]) + Traceback (most recent call last): + ... + ValueError: All days elements should be less than 366 + + >>> mincost_tickets([2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], []) + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + + >>> mincost_tickets([], []) + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + + >>> mincost_tickets([2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [1, 2, 3, 4]) + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + """ + + # Validation + if not isinstance(days, list) or not all(isinstance(day, int) for day in days): + raise ValueError("The parameter days should be a list of integers") + + if len(costs) != 3 or not all(isinstance(cost, int) for cost in costs): + raise ValueError("The parameter costs should be a list of three integers") + + if len(days) == 0: + return 0 + + if min(days) <= 0: + raise ValueError("All days elements should be greater than 0") + + if max(days) >= 366: + raise ValueError("All days elements should be less than 366") + + days_set = set(days) + + @lru_cache(maxsize=None) + def dynamic_programming(index: int) -> int: + if index > 365: + return 0 + + if index not in days_set: + return dp(index + 1) + + return min( + costs[0] + dp(index + 1), + costs[1] + dp(index + 7), + costs[2] + dp(index + 30), + ) + + return dynamic_programming(1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 598f6a26a14d815f5fd079f43787995b0f076c03 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Wed, 2 Nov 2022 16:20:57 +0000 Subject: [PATCH 199/368] refactor: Condense `password` related files in one (#7939) * refactor: Condense `password` related files in one * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update other/password.py Co-authored-by: Christian Clauss * dynamic_programming * test: Make test input `str` * requirements.txt: Remove cython>=0.29.28 # For statsmodels on Python 3.11 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/minimum_tickets_cost.py | 8 ++-- other/check_strong_password.py | 47 -------------------- other/{password_generator.py => password.py} | 44 +++++++++++++++++- requirements.txt | 1 - 4 files changed, 46 insertions(+), 54 deletions(-) delete mode 100644 other/check_strong_password.py rename other/{password_generator.py => password.py} (58%) diff --git a/dynamic_programming/minimum_tickets_cost.py b/dynamic_programming/minimum_tickets_cost.py index 261a5a7cf..d07056d92 100644 --- a/dynamic_programming/minimum_tickets_cost.py +++ b/dynamic_programming/minimum_tickets_cost.py @@ -112,12 +112,12 @@ def mincost_tickets(days: list[int], costs: list[int]) -> int: return 0 if index not in days_set: - return dp(index + 1) + return dynamic_programming(index + 1) return min( - costs[0] + dp(index + 1), - costs[1] + dp(index + 7), - costs[2] + dp(index + 30), + costs[0] + dynamic_programming(index + 1), + costs[1] + dynamic_programming(index + 7), + costs[2] + dynamic_programming(index + 30), ) return dynamic_programming(1) diff --git a/other/check_strong_password.py b/other/check_strong_password.py deleted file mode 100644 index 95bb327ad..000000000 --- a/other/check_strong_password.py +++ /dev/null @@ -1,47 +0,0 @@ -# This Will Check Whether A Given Password Is Strong Or Not -# It Follows The Rule that Length Of Password Should Be At Least 8 Characters -# And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character - -from string import ascii_lowercase, ascii_uppercase, digits, punctuation - - -def strong_password_detector(password: str, min_length: int = 8) -> str: - """ - >>> strong_password_detector('Hwea7$2!') - 'This is a strong Password' - - >>> strong_password_detector('Sh0r1') - 'Your Password must be at least 8 characters long' - - >>> strong_password_detector('Hello123') - 'Password should contain UPPERCASE, lowercase, numbers, special characters' - - >>> strong_password_detector('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') - 'This is a strong Password' - - >>> strong_password_detector(0) - 'Your Password must be at least 8 characters long' - """ - - if len(str(password)) < 8: - return "Your Password must be at least 8 characters long" - - upper = any(char in ascii_uppercase for char in password) - lower = any(char in ascii_lowercase for char in password) - num = any(char in digits for char in password) - spec_char = any(char in punctuation for char in password) - - if upper and lower and num and spec_char: - return "This is a strong Password" - - else: - return ( - "Password should contain UPPERCASE, lowercase, " - "numbers, special characters" - ) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/other/password_generator.py b/other/password.py similarity index 58% rename from other/password_generator.py rename to other/password.py index 8f9d58a33..8f6833073 100644 --- a/other/password_generator.py +++ b/other/password.py @@ -1,11 +1,12 @@ -"""Password Generator allows you to generate a random password of length N.""" import secrets from random import shuffle -from string import ascii_letters, digits, punctuation +from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def password_generator(length: int = 8) -> str: """ + Password Generator allows you to generate a random password of length N. + >>> len(password_generator()) 8 >>> len(password_generator(length=16)) @@ -62,6 +63,45 @@ def random_characters(chars_incl, i): pass # Put your code here... +# This Will Check Whether A Given Password Is Strong Or Not +# It Follows The Rule that Length Of Password Should Be At Least 8 Characters +# And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character +def strong_password_detector(password: str, min_length: int = 8) -> str: + """ + >>> strong_password_detector('Hwea7$2!') + 'This is a strong Password' + + >>> strong_password_detector('Sh0r1') + 'Your Password must be at least 8 characters long' + + >>> strong_password_detector('Hello123') + 'Password should contain UPPERCASE, lowercase, numbers, special characters' + + >>> strong_password_detector('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') + 'This is a strong Password' + + >>> strong_password_detector('0') + 'Your Password must be at least 8 characters long' + """ + + if len(password) < min_length: + return "Your Password must be at least 8 characters long" + + upper = any(char in ascii_uppercase for char in password) + lower = any(char in ascii_lowercase for char in password) + num = any(char in digits for char in password) + spec_char = any(char in punctuation for char in password) + + if upper and lower and num and spec_char: + return "This is a strong Password" + + else: + return ( + "Password should contain UPPERCASE, lowercase, " + "numbers, special characters" + ) + + def main(): length = int(input("Please indicate the max length of your password: ").strip()) chars_incl = input( diff --git a/requirements.txt b/requirements.txt index 2e2782455..00f31b85e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ beautifulsoup4 -cython>=0.29.28 # For statsmodels on Python 3.11 fake_useragent keras lxml From 45b3383c3952f646e985972d1fcd772d3d9f5d3f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 2 Nov 2022 19:20:45 +0100 Subject: [PATCH 200/368] Flake8: Drop ignore of issue A003 (#7949) * Flake8: Drop ignore of issue A003 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .flake8 | 4 +-- DIRECTORY.md | 10 +++--- data_structures/binary_tree/fenwick_tree.py | 8 ++--- data_structures/heap/heap.py | 7 ----- .../linked_list/merge_two_lists.py | 4 +-- data_structures/queue/double_ended_queue.py | 31 +++++++++---------- linear_algebra/src/lib.py | 12 ------- other/lfu_cache.py | 14 ++++----- other/lru_cache.py | 14 ++++----- 9 files changed, 42 insertions(+), 62 deletions(-) diff --git a/.flake8 b/.flake8 index 0d9ef18d1..2bb36b71a 100644 --- a/.flake8 +++ b/.flake8 @@ -1,8 +1,8 @@ [flake8] max-line-length = 88 -max-complexity = 25 +# max-complexity should be 10 +max-complexity = 23 extend-ignore = - A003 # Class attribute is shadowing a python builtin # Formatting style for `black` E203 # Whitespace before ':' W503 # Line break occurred before a binary operator diff --git a/DIRECTORY.md b/DIRECTORY.md index 5c4a032db..a2112bcfb 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -48,6 +48,7 @@ * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) + * [Is Power Of Two](bit_manipulation/is_power_of_two.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) @@ -315,6 +316,7 @@ * [Minimum Partition](dynamic_programming/minimum_partition.py) * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) + * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) @@ -496,8 +498,6 @@ ## Maths * [3N Plus 1](maths/3n_plus_1.py) * [Abs](maths/abs.py) - * [Abs Max](maths/abs_max.py) - * [Abs Min](maths/abs_min.py) * [Add](maths/add.py) * [Addition Without Arithmetic](maths/addition_without_arithmetic.py) * [Aliquot Sum](maths/aliquot_sum.py) @@ -653,6 +653,7 @@ * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) * [Nth Fibonacci Using Matrix Exponentiation](matrix/nth_fibonacci_using_matrix_exponentiation.py) + * [Pascal Triangle](matrix/pascal_triangle.py) * [Rotate Matrix](matrix/rotate_matrix.py) * [Searching In Sorted Matrix](matrix/searching_in_sorted_matrix.py) * [Sherman Morrison](matrix/sherman_morrison.py) @@ -674,7 +675,6 @@ ## Other * [Activity Selection](other/activity_selection.py) * [Alternative List Arrange](other/alternative_list_arrange.py) - * [Check Strong Password](other/check_strong_password.py) * [Davisb Putnamb Logemannb Loveland](other/davisb_putnamb_logemannb_loveland.py) * [Dijkstra Bankers Algorithm](other/dijkstra_bankers_algorithm.py) * [Doomsday](other/doomsday.py) @@ -689,8 +689,7 @@ * [Magicdiamondpattern](other/magicdiamondpattern.py) * [Maximum Subarray](other/maximum_subarray.py) * [Nested Brackets](other/nested_brackets.py) - * [Pascal Triangle](other/pascal_triangle.py) - * [Password Generator](other/password_generator.py) + * [Password](other/password.py) * [Quine](other/quine.py) * [Scoring Algorithm](other/scoring_algorithm.py) * [Sdes](other/sdes.py) @@ -701,6 +700,7 @@ * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) + * [Hubble Parameter](physics/hubble_parameter.py) * [Ideal Gas Law](physics/ideal_gas_law.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) diff --git a/data_structures/binary_tree/fenwick_tree.py b/data_structures/binary_tree/fenwick_tree.py index 96020d142..babd75ac4 100644 --- a/data_structures/binary_tree/fenwick_tree.py +++ b/data_structures/binary_tree/fenwick_tree.py @@ -46,7 +46,7 @@ class FenwickTree: self.size = len(arr) self.tree = deepcopy(arr) for i in range(1, self.size): - j = self.next(i) + j = self.next_(i) if j < self.size: self.tree[j] += self.tree[i] @@ -64,13 +64,13 @@ class FenwickTree: """ arr = self.tree[:] for i in range(self.size - 1, 0, -1): - j = self.next(i) + j = self.next_(i) if j < self.size: arr[j] -= arr[i] return arr @staticmethod - def next(index: int) -> int: + def next_(index: int) -> int: return index + (index & (-index)) @staticmethod @@ -102,7 +102,7 @@ class FenwickTree: return while index < self.size: self.tree[index] += value - index = self.next(index) + index = self.next_(index) def update(self, index: int, value: int) -> None: """ diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 071790d18..b14c55d9d 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -88,13 +88,6 @@ class Heap: for i in range(self.heap_size // 2 - 1, -1, -1): self.max_heapify(i) - def max(self) -> float: - """return the max in the heap""" - if self.heap_size >= 1: - return self.h[0] - else: - raise Exception("Empty heap") - def extract_max(self) -> float: """get and remove max from heap""" if self.heap_size >= 2: diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index 93cf7a7e1..61e2412aa 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -13,7 +13,7 @@ test_data_even = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class Node: data: int - next: Node | None + next_node: Node | None class SortedLinkedList: @@ -32,7 +32,7 @@ class SortedLinkedList: node = self.head while node: yield node.data - node = node.next + node = node.next_node def __len__(self) -> int: """ diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 7053879d4..11942db83 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -42,8 +42,8 @@ class Deque: """ val: Any = None - next: Deque._Node | None = None - prev: Deque._Node | None = None + next_node: Deque._Node | None = None + prev_node: Deque._Node | None = None class _Iterator: """ @@ -81,7 +81,7 @@ class Deque: # finished iterating raise StopIteration val = self._cur.val - self._cur = self._cur.next + self._cur = self._cur.next_node return val @@ -128,8 +128,8 @@ class Deque: self._len = 1 else: # connect nodes - self._back.next = node - node.prev = self._back + self._back.next_node = node + node.prev_node = self._back self._back = node # assign new back to the new node self._len += 1 @@ -170,8 +170,8 @@ class Deque: self._len = 1 else: # connect nodes - node.next = self._front - self._front.prev = node + node.next_node = self._front + self._front.prev_node = node self._front = node # assign new front to the new node self._len += 1 @@ -264,10 +264,9 @@ class Deque: assert not self.is_empty(), "Deque is empty." topop = self._back - self._back = self._back.prev # set new back - self._back.next = ( - None # drop the last node - python will deallocate memory automatically - ) + self._back = self._back.prev_node # set new back + # drop the last node - python will deallocate memory automatically + self._back.next_node = None self._len -= 1 @@ -300,8 +299,8 @@ class Deque: assert not self.is_empty(), "Deque is empty." topop = self._front - self._front = self._front.next # set new front and drop the first node - self._front.prev = None + self._front = self._front.next_node # set new front and drop the first node + self._front.prev_node = None self._len -= 1 @@ -385,8 +384,8 @@ class Deque: # compare every value if me.val != oth.val: return False - me = me.next - oth = oth.next + me = me.next_node + oth = oth.next_node return True @@ -424,7 +423,7 @@ class Deque: while aux is not None: # append the values in a list to display values_list.append(aux.val) - aux = aux.next + aux = aux.next_node return "[" + ", ".join(repr(val) for val in values_list) + "]" diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 079731487..775e0244a 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -40,7 +40,6 @@ class Vector: __sub__(other: Vector): vector subtraction __mul__(other: float): scalar multiplication __mul__(other: Vector): dot product - set(components: Collection[float]): changes the vector components copy(): copies this vector and returns it component(i): gets the i-th component (0-indexed) change_component(pos: int, value: float): changes specified component @@ -119,17 +118,6 @@ class Vector: else: # error case raise Exception("invalid operand!") - def set(self, components: Collection[float]) -> None: - """ - input: new components - changes the components of the vector. - replaces the components with newer one. - """ - if len(components) > 0: - self.__components = list(components) - else: - raise Exception("please give any vector") - def copy(self) -> Vector: """ copies this vector and returns it. diff --git a/other/lfu_cache.py b/other/lfu_cache.py index 2f26bb6cc..b68ba3a46 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -166,14 +166,14 @@ class LFUCache(Generic[T, U]): or as a function decorator. >>> cache = LFUCache(2) - >>> cache.set(1, 1) - >>> cache.set(2, 2) + >>> cache.put(1, 1) + >>> cache.put(2, 2) >>> cache.get(1) 1 - >>> cache.set(3, 3) + >>> cache.put(3, 3) >>> cache.get(2) is None True - >>> cache.set(4, 4) + >>> cache.put(4, 4) >>> cache.get(1) is None True >>> cache.get(3) @@ -224,7 +224,7 @@ class LFUCache(Generic[T, U]): >>> 1 in cache False - >>> cache.set(1, 1) + >>> cache.put(1, 1) >>> 1 in cache True """ @@ -250,7 +250,7 @@ class LFUCache(Generic[T, U]): self.miss += 1 return None - def set(self, key: T, value: U) -> None: + def put(self, key: T, value: U) -> None: """ Sets the value for the input key and updates the Double Linked List """ @@ -297,7 +297,7 @@ class LFUCache(Generic[T, U]): result = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: result = func(*args) - cls.decorator_function_to_instance_map[func].set(args[0], result) + cls.decorator_function_to_instance_map[func].put(args[0], result) return result def cache_info() -> LFUCache[T, U]: diff --git a/other/lru_cache.py b/other/lru_cache.py index aa910e487..1e5eeac45 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -150,8 +150,8 @@ class LRUCache(Generic[T, U]): >>> cache = LRUCache(2) - >>> cache.set(1, 1) - >>> cache.set(2, 2) + >>> cache.put(1, 1) + >>> cache.put(2, 2) >>> cache.get(1) 1 @@ -166,7 +166,7 @@ class LRUCache(Generic[T, U]): {1: Node: key: 1, val: 1, has next: True, has prev: True, \ 2: Node: key: 2, val: 2, has next: True, has prev: True} - >>> cache.set(3, 3) + >>> cache.put(3, 3) >>> cache.list DoubleLinkedList, @@ -182,7 +182,7 @@ class LRUCache(Generic[T, U]): >>> cache.get(2) is None True - >>> cache.set(4, 4) + >>> cache.put(4, 4) >>> cache.get(1) is None True @@ -238,7 +238,7 @@ class LRUCache(Generic[T, U]): >>> 1 in cache False - >>> cache.set(1, 1) + >>> cache.put(1, 1) >>> 1 in cache True @@ -266,7 +266,7 @@ class LRUCache(Generic[T, U]): self.miss += 1 return None - def set(self, key: T, value: U) -> None: + def put(self, key: T, value: U) -> None: """ Sets the value for the input key and updates the Double Linked List """ @@ -315,7 +315,7 @@ class LRUCache(Generic[T, U]): result = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: result = func(*args) - cls.decorator_function_to_instance_map[func].set(args[0], result) + cls.decorator_function_to_instance_map[func].put(args[0], result) return result def cache_info() -> LRUCache[T, U]: From db5215f60e31820dba5525e8b5fbf3e73b76b8df Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 2 Nov 2022 21:40:25 +0300 Subject: [PATCH 201/368] Reduce the complexity of linear_algebra/src/polynom_for_points.py (#7948) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Lower the --max-complexity threshold in the file .flake8 * Reduce the complexity of linear_algebra/src/polynom_for_points.py * Update linear_algebra/src/polynom_for_points.py Co-authored-by: Christian Clauss * Update linear_algebra/src/polynom_for_points.py Co-authored-by: Christian Clauss * Fix Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .flake8 | 2 +- linear_algebra/src/polynom_for_points.py | 139 ++++++++++------------- 2 files changed, 62 insertions(+), 79 deletions(-) diff --git a/.flake8 b/.flake8 index 2bb36b71a..1a62d57f9 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 23 +max-complexity = 21 extend-ignore = # Formatting style for `black` E203 # Whitespace before ':' diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index 091849542..1d702deb1 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -24,96 +24,79 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: >>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]])) f(x)=x^2*5.0+x^1*-18.0+x^0*18.0 """ - try: - check = 1 - more_check = 0 - d = coordinates[0][0] - for j in range(len(coordinates)): - if j == 0: - continue - if d == coordinates[j][0]: - more_check += 1 - solved = "x=" + str(coordinates[j][0]) - if more_check == len(coordinates) - 1: - check = 2 - break - elif more_check > 0 and more_check != len(coordinates) - 1: - check = 3 - else: - check = 1 + if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): + return "The program cannot work out a fitting polynomial." - if len(coordinates) == 1 and coordinates[0][0] == 0: - check = 2 - solved = "x=0" - except Exception: - check = 3 + if len({tuple(pair) for pair in coordinates}) != len(coordinates): + return "The program cannot work out a fitting polynomial." + + set_x = {x for x, _ in coordinates} + if len(set_x) == 1: + return f"x={coordinates[0][0]}" + + if len(set_x) != len(coordinates): + return "The program cannot work out a fitting polynomial." x = len(coordinates) - if check == 1: - count_of_line = 0 - matrix: list[list[float]] = [] - # put the x and x to the power values in a matrix - while count_of_line < x: - count_in_line = 0 - a = coordinates[count_of_line][0] - count_line: list[float] = [] - while count_in_line < x: - count_line.append(a ** (x - (count_in_line + 1))) - count_in_line += 1 - matrix.append(count_line) - count_of_line += 1 + count_of_line = 0 + matrix: list[list[float]] = [] + # put the x and x to the power values in a matrix + while count_of_line < x: + count_in_line = 0 + a = coordinates[count_of_line][0] + count_line: list[float] = [] + while count_in_line < x: + count_line.append(a ** (x - (count_in_line + 1))) + count_in_line += 1 + matrix.append(count_line) + count_of_line += 1 - count_of_line = 0 - # put the y values into a vector - vector: list[float] = [] - while count_of_line < x: - vector.append(coordinates[count_of_line][1]) - count_of_line += 1 + count_of_line = 0 + # put the y values into a vector + vector: list[float] = [] + while count_of_line < x: + vector.append(coordinates[count_of_line][1]) + count_of_line += 1 - count = 0 + count = 0 - while count < x: - zahlen = 0 - while zahlen < x: - if count == zahlen: - zahlen += 1 - if zahlen == x: - break - bruch = matrix[zahlen][count] / matrix[count][count] - for counting_columns, item in enumerate(matrix[count]): - # manipulating all the values in the matrix - matrix[zahlen][counting_columns] -= item * bruch - # manipulating the values in the vector - vector[zahlen] -= vector[count] * bruch + while count < x: + zahlen = 0 + while zahlen < x: + if count == zahlen: zahlen += 1 - count += 1 + if zahlen == x: + break + bruch = matrix[zahlen][count] / matrix[count][count] + for counting_columns, item in enumerate(matrix[count]): + # manipulating all the values in the matrix + matrix[zahlen][counting_columns] -= item * bruch + # manipulating the values in the vector + vector[zahlen] -= vector[count] * bruch + zahlen += 1 + count += 1 - count = 0 - # make solutions - solution: list[str] = [] - while count < x: - solution.append(str(vector[count] / matrix[count][count])) - count += 1 + count = 0 + # make solutions + solution: list[str] = [] + while count < x: + solution.append(str(vector[count] / matrix[count][count])) + count += 1 - count = 0 - solved = "f(x)=" + count = 0 + solved = "f(x)=" - while count < x: - remove_e: list[str] = solution[count].split("E") - if len(remove_e) > 1: - solution[count] = remove_e[0] + "*10^" + remove_e[1] - solved += "x^" + str(x - (count + 1)) + "*" + str(solution[count]) - if count + 1 != x: - solved += "+" - count += 1 + while count < x: + remove_e: list[str] = solution[count].split("E") + if len(remove_e) > 1: + solution[count] = f"{remove_e[0]}*10^{remove_e[1]}" + solved += f"x^{x - (count + 1)}*{solution[count]}" + if count + 1 != x: + solved += "+" + count += 1 - return solved - - elif check == 2: - return solved - else: - return "The program cannot work out a fitting polynomial." + return solved if __name__ == "__main__": From a02de964d137b803aad9bb9c9d7096eff62539fd Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 3 Nov 2022 00:16:44 +0300 Subject: [PATCH 202/368] Reduce the complexity of graphs/minimum_spanning_tree_prims.py (#7952) * Lower the --max-complexity threshold in the file .flake8 * Add test * Reduce the complexity of graphs/minimum_spanning_tree_prims.py * Remove backslashes * Remove # noqa: E741 * Fix the flake8 E741 issues * Refactor * Fix --- .flake8 | 2 +- graphs/minimum_spanning_tree_prims.py | 127 +++++++++++++++----------- 2 files changed, 76 insertions(+), 53 deletions(-) diff --git a/.flake8 b/.flake8 index 1a62d57f9..834d1f63d 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 21 +max-complexity = 20 extend-ignore = # Formatting style for `black` E203 # Whitespace before ':' diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 5b2eaa4bf..f577866f0 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -2,40 +2,45 @@ import sys from collections import defaultdict -def prisms_algorithm(l): # noqa: E741 +class Heap: + def __init__(self): + self.node_position = [] - node_position = [] + def get_position(self, vertex): + return self.node_position[vertex] - def get_position(vertex): - return node_position[vertex] + def set_position(self, vertex, pos): + self.node_position[vertex] = pos - def set_position(vertex, pos): - node_position[vertex] = pos - - def top_to_bottom(heap, start, size, positions): + def top_to_bottom(self, heap, start, size, positions): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: - m = 2 * start + 1 + smallest_child = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: - m = 2 * start + 1 + smallest_child = 2 * start + 1 else: - m = 2 * start + 2 - if heap[m] < heap[start]: - temp, temp1 = heap[m], positions[m] - heap[m], positions[m] = heap[start], positions[start] + smallest_child = 2 * start + 2 + if heap[smallest_child] < heap[start]: + temp, temp1 = heap[smallest_child], positions[smallest_child] + heap[smallest_child], positions[smallest_child] = ( + heap[start], + positions[start], + ) heap[start], positions[start] = temp, temp1 - temp = get_position(positions[m]) - set_position(positions[m], get_position(positions[start])) - set_position(positions[start], temp) + temp = self.get_position(positions[smallest_child]) + self.set_position( + positions[smallest_child], self.get_position(positions[start]) + ) + self.set_position(positions[start], temp) - top_to_bottom(heap, m, size, positions) + self.top_to_bottom(heap, smallest_child, size, positions) # Update function if value of any node in min-heap decreases - def bottom_to_top(val, index, heap, position): + def bottom_to_top(self, val, index, heap, position): temp = position[index] while index != 0: @@ -47,70 +52,88 @@ def prisms_algorithm(l): # noqa: E741 if val < heap[parent]: heap[index] = heap[parent] position[index] = position[parent] - set_position(position[parent], index) + self.set_position(position[parent], index) else: heap[index] = val position[index] = temp - set_position(temp, index) + self.set_position(temp, index) break index = parent else: heap[0] = val position[0] = temp - set_position(temp, 0) + self.set_position(temp, 0) - def heapify(heap, positions): + def heapify(self, heap, positions): start = len(heap) // 2 - 1 for i in range(start, -1, -1): - top_to_bottom(heap, i, len(heap), positions) + self.top_to_bottom(heap, i, len(heap), positions) - def delete_minimum(heap, positions): + def delete_minimum(self, heap, positions): temp = positions[0] heap[0] = sys.maxsize - top_to_bottom(heap, 0, len(heap), positions) + self.top_to_bottom(heap, 0, len(heap), positions) return temp - visited = [0 for i in range(len(l))] - nbr_tv = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex + +def prisms_algorithm(adjacency_list): + """ + >>> adjacency_list = {0: [[1, 1], [3, 3]], + ... 1: [[0, 1], [2, 6], [3, 5], [4, 1]], + ... 2: [[1, 6], [4, 5], [5, 2]], + ... 3: [[0, 3], [1, 5], [4, 1]], + ... 4: [[1, 1], [2, 5], [3, 1], [5, 4]], + ... 5: [[2, 2], [4, 4]]} + >>> prisms_algorithm(adjacency_list) + [(0, 1), (1, 4), (4, 3), (4, 5), (5, 2)] + """ + + heap = Heap() + + visited = [0] * len(adjacency_list) + nbr_tv = [-1] * len(adjacency_list) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph distance_tv = [] # Heap of Distance of vertices from their neighboring vertex positions = [] - for x in range(len(l)): - p = sys.maxsize - distance_tv.append(p) - positions.append(x) - node_position.append(x) + for vertex in range(len(adjacency_list)): + distance_tv.append(sys.maxsize) + positions.append(vertex) + heap.node_position.append(vertex) tree_edges = [] visited[0] = 1 distance_tv[0] = sys.maxsize - for x in l[0]: - nbr_tv[x[0]] = 0 - distance_tv[x[0]] = x[1] - heapify(distance_tv, positions) + for neighbor, distance in adjacency_list[0]: + nbr_tv[neighbor] = 0 + distance_tv[neighbor] = distance + heap.heapify(distance_tv, positions) - for _ in range(1, len(l)): - vertex = delete_minimum(distance_tv, positions) + for _ in range(1, len(adjacency_list)): + vertex = heap.delete_minimum(distance_tv, positions) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex)) visited[vertex] = 1 - for v in l[vertex]: - if visited[v[0]] == 0 and v[1] < distance_tv[get_position(v[0])]: - distance_tv[get_position(v[0])] = v[1] - bottom_to_top(v[1], get_position(v[0]), distance_tv, positions) - nbr_tv[v[0]] = vertex + for neighbor, distance in adjacency_list[vertex]: + if ( + visited[neighbor] == 0 + and distance < distance_tv[heap.get_position(neighbor)] + ): + distance_tv[heap.get_position(neighbor)] = distance + heap.bottom_to_top( + distance, heap.get_position(neighbor), distance_tv, positions + ) + nbr_tv[neighbor] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > - n = int(input("Enter number of vertices: ").strip()) - e = int(input("Enter number of edges: ").strip()) - adjlist = defaultdict(list) - for x in range(e): - l = [int(x) for x in input().strip().split()] # noqa: E741 - adjlist[l[0]].append([l[1], l[2]]) - adjlist[l[1]].append([l[0], l[2]]) - print(prisms_algorithm(adjlist)) + edges_number = int(input("Enter number of edges: ").strip()) + adjacency_list = defaultdict(list) + for _ in range(edges_number): + edge = [int(x) for x in input().strip().split()] + adjacency_list[edge[0]].append([edge[1], edge[2]]) + adjacency_list[edge[1]].append([edge[0], edge[2]]) + print(prisms_algorithm(adjacency_list)) From 978414bd50ae294352e0e4d93566f49074450857 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 3 Nov 2022 01:56:30 +0300 Subject: [PATCH 203/368] Reduce the complexity of other/graham_scan.py (#7953) * Reduce the complexity of other/graham_scan.py * Lower the --max-complexity threshold in the file .flake8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix tests * Update other/graham_scan.py Co-authored-by: Christian Clauss * Update graham_scan.py * Update other/graham_scan.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .flake8 | 2 +- other/graham_scan.py | 150 ++++++++++++++++++++++--------------------- 2 files changed, 77 insertions(+), 75 deletions(-) diff --git a/.flake8 b/.flake8 index 834d1f63d..2f74f421d 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 20 +max-complexity = 19 extend-ignore = # Formatting style for `black` E203 # Whitespace before ':' diff --git a/other/graham_scan.py b/other/graham_scan.py index 91bb6812f..8e83bfcf4 100644 --- a/other/graham_scan.py +++ b/other/graham_scan.py @@ -14,6 +14,82 @@ from math import atan2, degrees from sys import maxsize +# traversal from the lowest and the most left point in anti-clockwise direction +# if direction gets right, the previous point is not the convex hull. +class Direction(Enum): + left = 1 + straight = 2 + right = 3 + + def __repr__(self): + return f"{self.__class__.__name__}.{self.name}" + + +def angle_comparer(point: tuple[int, int], minx: int, miny: int) -> float: + """Return the angle toward to point from (minx, miny) + + :param point: The target point + minx: The starting point's x + miny: The starting point's y + :return: the angle + + Examples: + >>> angle_comparer((1,1), 0, 0) + 45.0 + + >>> angle_comparer((100,1), 10, 10) + -5.710593137499642 + + >>> angle_comparer((5,5), 2, 3) + 33.690067525979785 + """ + # sort the points accorgind to the angle from the lowest and the most left point + x, y = point + return degrees(atan2(y - miny, x - minx)) + + +def check_direction( + starting: tuple[int, int], via: tuple[int, int], target: tuple[int, int] +) -> Direction: + """Return the direction toward to the line from via to target from starting + + :param starting: The starting point + via: The via point + target: The target point + :return: the Direction + + Examples: + >>> check_direction((1,1), (2,2), (3,3)) + Direction.straight + + >>> check_direction((60,1), (-50,199), (30,2)) + Direction.left + + >>> check_direction((0,0), (5,5), (10,0)) + Direction.right + """ + x0, y0 = starting + x1, y1 = via + x2, y2 = target + via_angle = degrees(atan2(y1 - y0, x1 - x0)) + via_angle %= 360 + target_angle = degrees(atan2(y2 - y0, x2 - x0)) + target_angle %= 360 + # t- + # \ \ + # \ v + # \| + # s + # via_angle is always lower than target_angle, if direction is left. + # If they are same, it means they are on a same line of convex hull. + if target_angle > via_angle: + return Direction.left + elif target_angle == via_angle: + return Direction.straight + else: + return Direction.right + + def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: """Pure implementation of graham scan algorithm in Python @@ -57,86 +133,12 @@ def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: # remove the lowest and the most left point from points for preparing for sort points.pop(minidx) - def angle_comparer(point: tuple[int, int], minx: int, miny: int) -> float: - """Return the angle toward to point from (minx, miny) - - :param point: The target point - minx: The starting point's x - miny: The starting point's y - :return: the angle - - Examples: - >>> angle_comparer((1,1), 0, 0) - 45.0 - - >>> angle_comparer((100,1), 10, 10) - -5.710593137499642 - - >>> angle_comparer((5,5), 2, 3) - 33.690067525979785 - """ - # sort the points accorgind to the angle from the lowest and the most left point - x = point[0] - y = point[1] - angle = degrees(atan2(y - miny, x - minx)) - return angle - sorted_points = sorted(points, key=lambda point: angle_comparer(point, minx, miny)) # This insert actually costs complexity, # and you should instead add (minx, miny) into stack later. # I'm using insert just for easy understanding. sorted_points.insert(0, (minx, miny)) - # traversal from the lowest and the most left point in anti-clockwise direction - # if direction gets right, the previous point is not the convex hull. - class Direction(Enum): - left = 1 - straight = 2 - right = 3 - - def check_direction( - starting: tuple[int, int], via: tuple[int, int], target: tuple[int, int] - ) -> Direction: - """Return the direction toward to the line from via to target from starting - - :param starting: The starting point - via: The via point - target: The target point - :return: the Direction - - Examples: - >>> check_direction((1,1), (2,2), (3,3)) - Direction.straight - - >>> check_direction((60,1), (-50,199), (30,2)) - Direction.left - - >>> check_direction((0,0), (5,5), (10,0)) - Direction.right - """ - x0, y0 = starting - x1, y1 = via - x2, y2 = target - via_angle = degrees(atan2(y1 - y0, x1 - x0)) - if via_angle < 0: - via_angle += 360 - target_angle = degrees(atan2(y2 - y0, x2 - x0)) - if target_angle < 0: - target_angle += 360 - # t- - # \ \ - # \ v - # \| - # s - # via_angle is always lower than target_angle, if direction is left. - # If they are same, it means they are on a same line of convex hull. - if target_angle > via_angle: - return Direction.left - elif target_angle == via_angle: - return Direction.straight - else: - return Direction.right - stack: deque[tuple[int, int]] = deque() stack.append(sorted_points[0]) stack.append(sorted_points[1]) From 3e1cb70abf9997af3a4903f77cb3506a301de893 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Fri, 4 Nov 2022 00:03:37 +0400 Subject: [PATCH 204/368] add algorithm to check binary search tree (#7947) * add algorithm to check binary search tree * add tests * add leetcode link * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typehints * typehints fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix flake8 * fix typehint * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add TreeNode resolving * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Christian Clauss * change func name * Update data_structures/binary_tree/is_bst.py Co-authored-by: Christian Clauss * review notes fixes. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix flake8 * fix flake 8 * fix doctest * Update data_structures/binary_tree/is_bst.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- data_structures/binary_tree/is_bst.py | 131 ++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 data_structures/binary_tree/is_bst.py diff --git a/data_structures/binary_tree/is_bst.py b/data_structures/binary_tree/is_bst.py new file mode 100644 index 000000000..0b2ef8c9f --- /dev/null +++ b/data_structures/binary_tree/is_bst.py @@ -0,0 +1,131 @@ +""" +Author : Alexander Pantyukhin +Date : November 2, 2022 + +Task: +Given the root of a binary tree, determine if it is a valid binary search +tree (BST). + +A valid binary search tree is defined as follows: + +- The left subtree of a node contains only nodes with keys less than the node's key. +- The right subtree of a node contains only nodes with keys greater than the node's key. +- Both the left and right subtrees must also be binary search trees. + +Implementation notes: +Depth-first search approach. + +leetcode: https://leetcode.com/problems/validate-binary-search-tree/ + +Let n is the number of nodes in tree +Runtime: O(n) +Space: O(1) +""" + +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class TreeNode: + data: float + left: TreeNode | None = None + right: TreeNode | None = None + + +def is_binary_search_tree(root: TreeNode | None) -> bool: + """ + >>> is_binary_search_tree(TreeNode(data=2, + ... left=TreeNode(data=1), + ... right=TreeNode(data=3)) + ... ) + True + + >>> is_binary_search_tree(TreeNode(data=0, + ... left=TreeNode(data=-11), + ... right=TreeNode(data=3)) + ... ) + True + + >>> is_binary_search_tree(TreeNode(data=5, + ... left=TreeNode(data=1), + ... right=TreeNode(data=4, left=TreeNode(data=3))) + ... ) + False + + >>> is_binary_search_tree(TreeNode(data='a', + ... left=TreeNode(data=1), + ... right=TreeNode(data=4, left=TreeNode(data=3))) + ... ) + Traceback (most recent call last): + ... + ValueError: Each node should be type of TreeNode and data should be float. + + >>> is_binary_search_tree(TreeNode(data=2, + ... left=TreeNode([]), + ... right=TreeNode(data=4, left=TreeNode(data=3))) + ... ) + Traceback (most recent call last): + ... + ValueError: Each node should be type of TreeNode and data should be float. + """ + + # Validation + def is_valid_tree(node: TreeNode | None) -> bool: + """ + >>> is_valid_tree(None) + True + >>> is_valid_tree('abc') + False + >>> is_valid_tree(TreeNode(data='not a float')) + False + >>> is_valid_tree(TreeNode(data=1, left=TreeNode('123'))) + False + """ + if node is None: + return True + + if not isinstance(node, TreeNode): + return False + + try: + float(node.data) + except (TypeError, ValueError): + return False + + return is_valid_tree(node.left) and is_valid_tree(node.right) + + if not is_valid_tree(root): + raise ValueError( + "Each node should be type of TreeNode and data should be float." + ) + + def is_binary_search_tree_recursive_check( + node: TreeNode | None, left_bound: float, right_bound: float + ) -> bool: + """ + >>> is_binary_search_tree_recursive_check(None) + True + >>> is_binary_search_tree_recursive_check(TreeNode(data=1), 10, 20) + False + """ + + if node is None: + return True + + return ( + left_bound < node.data < right_bound + and is_binary_search_tree_recursive_check(node.left, left_bound, node.data) + and is_binary_search_tree_recursive_check( + node.right, node.data, right_bound + ) + ) + + return is_binary_search_tree_recursive_check(root, -float("inf"), float("inf")) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7f1a5521f4b73d15df409a81f3da48427f9c6cdc Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Fri, 4 Nov 2022 11:30:32 +0400 Subject: [PATCH 205/368] add prefix sum (#7959) * add prefix sum * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + data_structures/arrays/prefix_sum.py | 78 ++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 data_structures/arrays/prefix_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index a2112bcfb..76c7f9dea 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -162,6 +162,7 @@ ## Data Structures * Arrays * [Permutations](data_structures/arrays/permutations.py) + * [Prefix Sum](data_structures/arrays/prefix_sum.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) @@ -174,6 +175,7 @@ * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) + * [Is Bst](data_structures/binary_tree/is_bst.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) diff --git a/data_structures/arrays/prefix_sum.py b/data_structures/arrays/prefix_sum.py new file mode 100644 index 000000000..2243a5308 --- /dev/null +++ b/data_structures/arrays/prefix_sum.py @@ -0,0 +1,78 @@ +""" +Author : Alexander Pantyukhin +Date : November 3, 2022 + +Implement the class of prefix sum with useful functions based on it. + +""" + + +class PrefixSum: + def __init__(self, array: list[int]) -> None: + len_array = len(array) + self.prefix_sum = [0] * len_array + + if len_array > 0: + self.prefix_sum[0] = array[0] + + for i in range(1, len_array): + self.prefix_sum[i] = self.prefix_sum[i - 1] + array[i] + + def get_sum(self, start: int, end: int) -> int: + """ + The function returns the sum of array from the start to the end indexes. + Runtime : O(1) + Space: O(1) + + >>> PrefixSum([1,2,3]).get_sum(0, 2) + 6 + >>> PrefixSum([1,2,3]).get_sum(1, 2) + 5 + >>> PrefixSum([1,2,3]).get_sum(2, 2) + 3 + >>> PrefixSum([1,2,3]).get_sum(2, 3) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + if start == 0: + return self.prefix_sum[end] + + return self.prefix_sum[end] - self.prefix_sum[start - 1] + + def contains_sum(self, target_sum: int) -> bool: + """ + The function returns True if array contains the target_sum, + False otherwise. + + Runtime : O(n) + Space: O(n) + + >>> PrefixSum([1,2,3]).contains_sum(6) + True + >>> PrefixSum([1,2,3]).contains_sum(5) + True + >>> PrefixSum([1,2,3]).contains_sum(3) + True + >>> PrefixSum([1,2,3]).contains_sum(4) + False + >>> PrefixSum([1,2,3]).contains_sum(7) + False + >>> PrefixSum([1,-2,3]).contains_sum(2) + True + """ + + sums = {0} + for sum_item in self.prefix_sum: + if sum_item - target_sum in sums: + return True + + sums.add(sum_item) + + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 51708530b6a46a5e53d12e750521a11c6bf5c986 Mon Sep 17 00:00:00 2001 From: Sanders Lin Date: Sun, 6 Nov 2022 17:35:40 +0800 Subject: [PATCH 206/368] Update 3n_plus_1.py (#7966) * Update 3n_plus_1.py 1. Minor issue with ValueError message: Given integer should be positive, not greater than 1, as 1 is allowed. 2. += calls underlying list extend method which might be slower. Calling apend seems more appropriate. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/3n_plus_1.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py index e455a158e..59fdec48e 100644 --- a/maths/3n_plus_1.py +++ b/maths/3n_plus_1.py @@ -11,15 +11,15 @@ def n31(a: int) -> tuple[list[int], int]: if not isinstance(a, int): raise TypeError(f"Must be int, not {type(a).__name__}") if a < 1: - raise ValueError(f"Given integer must be greater than 1, not {a}") + raise ValueError(f"Given integer must be positive, not {a}") path = [a] while a != 1: if a % 2 == 0: - a = a // 2 + a //= 2 else: a = 3 * a + 1 - path += [a] + path.append(a) return path, len(path) From daa1c7529ac6491338adb81622d5041a4ba1f446 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 6 Nov 2022 14:54:44 +0000 Subject: [PATCH 207/368] Raise error not string (#7945) * ci: Add `B023` to `.flake8` ignores * refactor: Return `bool`/raise Exception * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * revert: Remove previous branch commit * Update data_structures/binary_tree/segment_tree_other.py Co-authored-by: Christian Clauss * feat: Apply `__repr__` changes * chore: Fix failing tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/segment_tree_other.py Co-authored-by: Christian Clauss * test: Fix doctests * random.choice(population_score[:N_SELECTED])[0] * Update basic_string.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- boolean_algebra/quine_mc_cluskey.py | 11 +- ciphers/shuffled_shift_cipher.py | 2 +- computer_vision/harris_corner.py | 3 +- .../binary_tree/segment_tree_other.py | 121 +++++++++--------- data_structures/binary_tree/wavelet_tree.py | 6 +- .../linked_list/doubly_linked_list.py | 2 +- data_structures/queue/double_ended_queue.py | 2 +- graphs/breadth_first_search_shortest_path.py | 8 +- graphs/page_rank.py | 2 +- linear_algebra/src/polynom_for_points.py | 14 +- maths/monte_carlo_dice.py | 3 - matrix/cramers_rule_2x2.py | 16 ++- other/password.py | 38 +++--- strings/dna.py | 15 ++- 14 files changed, 123 insertions(+), 120 deletions(-) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 5bd7117bb..6788dfb28 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -1,15 +1,16 @@ from __future__ import annotations from collections.abc import Sequence +from typing import Literal -def compare_string(string1: str, string2: str) -> str: +def compare_string(string1: str, string2: str) -> str | Literal[False]: """ >>> compare_string('0010','0110') '0_10' >>> compare_string('0110','1101') - 'X' + False """ list1 = list(string1) list2 = list(string2) @@ -19,7 +20,7 @@ def compare_string(string1: str, string2: str) -> str: count += 1 list1[i] = "_" if count > 1: - return "X" + return False else: return "".join(list1) @@ -36,10 +37,10 @@ def check(binary: list[str]) -> list[str]: for i in range(len(binary)): for j in range(i + 1, len(binary)): k = compare_string(binary[i], binary[j]) - if k != "X": + if k is False: check1[i] = "*" check1[j] = "*" - temp.append(k) + temp.append("X") for i in range(len(binary)): if check1[i] == "$": pi.append(binary[i]) diff --git a/ciphers/shuffled_shift_cipher.py b/ciphers/shuffled_shift_cipher.py index 714acd4b1..08b2cab97 100644 --- a/ciphers/shuffled_shift_cipher.py +++ b/ciphers/shuffled_shift_cipher.py @@ -42,7 +42,7 @@ class ShuffledShiftCipher: """ :return: passcode of the cipher object """ - return "Passcode is: " + "".join(self.__passcode) + return "".join(self.__passcode) def __neg_pos(self, iterlist: list[int]) -> list[int]: """ diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py index 7850085f8..c8905bb6a 100644 --- a/computer_vision/harris_corner.py +++ b/computer_vision/harris_corner.py @@ -22,8 +22,7 @@ class HarrisCorner: raise ValueError("invalid k value") def __str__(self) -> str: - - return f"Harris Corner detection with k : {self.k}" + return str(self.k) def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: diff --git a/data_structures/binary_tree/segment_tree_other.py b/data_structures/binary_tree/segment_tree_other.py index 90afd7ca8..cc77c4951 100644 --- a/data_structures/binary_tree/segment_tree_other.py +++ b/data_structures/binary_tree/segment_tree_other.py @@ -16,40 +16,36 @@ class SegmentTreeNode: self.left = left self.right = right - def __str__(self): - return f"val: {self.val}, start: {self.start}, end: {self.end}" + def __repr__(self): + return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})" class SegmentTree: """ >>> import operator >>> num_arr = SegmentTree([2, 1, 5, 3, 4], operator.add) - >>> for node in num_arr.traverse(): - ... print(node) - ... - val: 15, start: 0, end: 4 - val: 8, start: 0, end: 2 - val: 7, start: 3, end: 4 - val: 3, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 1, start: 1, end: 1 + >>> tuple(num_arr.traverse()) # doctest: +NORMALIZE_WHITESPACE + (SegmentTreeNode(start=0, end=4, val=15), + SegmentTreeNode(start=0, end=2, val=8), + SegmentTreeNode(start=3, end=4, val=7), + SegmentTreeNode(start=0, end=1, val=3), + SegmentTreeNode(start=2, end=2, val=5), + SegmentTreeNode(start=3, end=3, val=3), + SegmentTreeNode(start=4, end=4, val=4), + SegmentTreeNode(start=0, end=0, val=2), + SegmentTreeNode(start=1, end=1, val=1)) >>> >>> num_arr.update(1, 5) - >>> for node in num_arr.traverse(): - ... print(node) - ... - val: 19, start: 0, end: 4 - val: 12, start: 0, end: 2 - val: 7, start: 3, end: 4 - val: 7, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 5, start: 1, end: 1 + >>> tuple(num_arr.traverse()) # doctest: +NORMALIZE_WHITESPACE + (SegmentTreeNode(start=0, end=4, val=19), + SegmentTreeNode(start=0, end=2, val=12), + SegmentTreeNode(start=3, end=4, val=7), + SegmentTreeNode(start=0, end=1, val=7), + SegmentTreeNode(start=2, end=2, val=5), + SegmentTreeNode(start=3, end=3, val=3), + SegmentTreeNode(start=4, end=4, val=4), + SegmentTreeNode(start=0, end=0, val=2), + SegmentTreeNode(start=1, end=1, val=5)) >>> >>> num_arr.query_range(3, 4) 7 @@ -62,29 +58,29 @@ class SegmentTree: >>> for node in max_arr.traverse(): ... print(node) ... - val: 5, start: 0, end: 4 - val: 5, start: 0, end: 2 - val: 4, start: 3, end: 4 - val: 2, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 1, start: 1, end: 1 + SegmentTreeNode(start=0, end=4, val=5) + SegmentTreeNode(start=0, end=2, val=5) + SegmentTreeNode(start=3, end=4, val=4) + SegmentTreeNode(start=0, end=1, val=2) + SegmentTreeNode(start=2, end=2, val=5) + SegmentTreeNode(start=3, end=3, val=3) + SegmentTreeNode(start=4, end=4, val=4) + SegmentTreeNode(start=0, end=0, val=2) + SegmentTreeNode(start=1, end=1, val=1) >>> >>> max_arr.update(1, 5) >>> for node in max_arr.traverse(): ... print(node) ... - val: 5, start: 0, end: 4 - val: 5, start: 0, end: 2 - val: 4, start: 3, end: 4 - val: 5, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 5, start: 1, end: 1 + SegmentTreeNode(start=0, end=4, val=5) + SegmentTreeNode(start=0, end=2, val=5) + SegmentTreeNode(start=3, end=4, val=4) + SegmentTreeNode(start=0, end=1, val=5) + SegmentTreeNode(start=2, end=2, val=5) + SegmentTreeNode(start=3, end=3, val=3) + SegmentTreeNode(start=4, end=4, val=4) + SegmentTreeNode(start=0, end=0, val=2) + SegmentTreeNode(start=1, end=1, val=5) >>> >>> max_arr.query_range(3, 4) 4 @@ -97,29 +93,29 @@ class SegmentTree: >>> for node in min_arr.traverse(): ... print(node) ... - val: 1, start: 0, end: 4 - val: 1, start: 0, end: 2 - val: 3, start: 3, end: 4 - val: 1, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 1, start: 1, end: 1 + SegmentTreeNode(start=0, end=4, val=1) + SegmentTreeNode(start=0, end=2, val=1) + SegmentTreeNode(start=3, end=4, val=3) + SegmentTreeNode(start=0, end=1, val=1) + SegmentTreeNode(start=2, end=2, val=5) + SegmentTreeNode(start=3, end=3, val=3) + SegmentTreeNode(start=4, end=4, val=4) + SegmentTreeNode(start=0, end=0, val=2) + SegmentTreeNode(start=1, end=1, val=1) >>> >>> min_arr.update(1, 5) >>> for node in min_arr.traverse(): ... print(node) ... - val: 2, start: 0, end: 4 - val: 2, start: 0, end: 2 - val: 3, start: 3, end: 4 - val: 2, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 5, start: 1, end: 1 + SegmentTreeNode(start=0, end=4, val=2) + SegmentTreeNode(start=0, end=2, val=2) + SegmentTreeNode(start=3, end=4, val=3) + SegmentTreeNode(start=0, end=1, val=2) + SegmentTreeNode(start=2, end=2, val=5) + SegmentTreeNode(start=3, end=3, val=3) + SegmentTreeNode(start=4, end=4, val=4) + SegmentTreeNode(start=0, end=0, val=2) + SegmentTreeNode(start=1, end=1, val=5) >>> >>> min_arr.query_range(3, 4) 3 @@ -128,7 +124,6 @@ class SegmentTree: >>> min_arr.query_range(1, 3) 3 >>> - """ def __init__(self, collection: Sequence, function): diff --git a/data_structures/binary_tree/wavelet_tree.py b/data_structures/binary_tree/wavelet_tree.py index 8d7145189..041e140f5 100644 --- a/data_structures/binary_tree/wavelet_tree.py +++ b/data_structures/binary_tree/wavelet_tree.py @@ -24,11 +24,11 @@ class Node: """ >>> node = Node(length=27) >>> repr(node) - 'min_value: -1, max_value: -1' + 'Node(min_value=-1 max_value=-1)' >>> repr(node) == str(node) True """ - return f"min_value: {self.minn}, max_value: {self.maxx}" + return f"Node(min_value={self.minn} max_value={self.maxx})" def build_tree(arr: list[int]) -> Node | None: @@ -37,7 +37,7 @@ def build_tree(arr: list[int]) -> Node | None: of the constructed tree >>> build_tree(test_array) - min_value: 0, max_value: 9 + Node(min_value=0 max_value=9) """ root = Node(len(arr)) root.minn, root.maxx = min(arr), max(arr) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 90b6b6eb2..6c81493ff 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -159,7 +159,7 @@ class DoublyLinkedList: if current.next: current = current.next else: # We have reached the end an no value matches - return "No data matching given value" + raise ValueError("No data matching given value") if current == self.head: self.delete_head() diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 11942db83..637b7f62f 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -425,7 +425,7 @@ class Deque: values_list.append(aux.val) aux = aux.next_node - return "[" + ", ".join(repr(val) for val in values_list) + "]" + return f"[{', '.join(repr(val) for val in values_list)}]" if __name__ == "__main__": diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index 697a8c634..cb21076f9 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -58,7 +58,9 @@ class Graph: Case 1 - No path is found. >>> g.shortest_path("Foo") - 'No path from vertex:G to vertex:Foo' + Traceback (most recent call last): + ... + ValueError: No path from vertex: G to vertex: Foo Case 2 - The path is found. >>> g.shortest_path("D") @@ -71,7 +73,9 @@ class Graph: target_vertex_parent = self.parent.get(target_vertex) if target_vertex_parent is None: - return f"No path from vertex:{self.source_vertex} to vertex:{target_vertex}" + raise ValueError( + f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" + ) return self.shortest_path(target_vertex_parent) + f"->{target_vertex}" diff --git a/graphs/page_rank.py b/graphs/page_rank.py index e1af35b34..b9e4c4a72 100644 --- a/graphs/page_rank.py +++ b/graphs/page_rank.py @@ -27,7 +27,7 @@ class Node: self.outbound.append(node) def __repr__(self): - return f"Node {self.name}: Inbound: {self.inbound} ; Outbound: {self.outbound}" + return f"" def page_rank(nodes, limit=3, d=0.85): diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index 1d702deb1..f5e3db0cb 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -4,9 +4,13 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: number of points you want to use >>> print(points_to_polynomial([])) - The program cannot work out a fitting polynomial. + Traceback (most recent call last): + ... + ValueError: The program cannot work out a fitting polynomial. >>> print(points_to_polynomial([[]])) - The program cannot work out a fitting polynomial. + Traceback (most recent call last): + ... + ValueError: The program cannot work out a fitting polynomial. >>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]])) f(x)=x^2*0.0+x^1*-0.0+x^0*0.0 >>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]])) @@ -25,17 +29,17 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: f(x)=x^2*5.0+x^1*-18.0+x^0*18.0 """ if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): - return "The program cannot work out a fitting polynomial." + raise ValueError("The program cannot work out a fitting polynomial.") if len({tuple(pair) for pair in coordinates}) != len(coordinates): - return "The program cannot work out a fitting polynomial." + raise ValueError("The program cannot work out a fitting polynomial.") set_x = {x for x, _ in coordinates} if len(set_x) == 1: return f"x={coordinates[0][0]}" if len(set_x) != len(coordinates): - return "The program cannot work out a fitting polynomial." + raise ValueError("The program cannot work out a fitting polynomial.") x = len(coordinates) diff --git a/maths/monte_carlo_dice.py b/maths/monte_carlo_dice.py index c4150b88f..362f70b49 100644 --- a/maths/monte_carlo_dice.py +++ b/maths/monte_carlo_dice.py @@ -13,9 +13,6 @@ class Dice: def roll(self): return random.choice(self.sides) - def _str_(self): - return "Fair Dice" - def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]: """ diff --git a/matrix/cramers_rule_2x2.py b/matrix/cramers_rule_2x2.py index a635d66fb..4f52dbe64 100644 --- a/matrix/cramers_rule_2x2.py +++ b/matrix/cramers_rule_2x2.py @@ -2,7 +2,7 @@ # https://en.wikipedia.org/wiki/Cramer%27s_rule -def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> str: +def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> tuple[float, float]: """ Solves the system of linear equation in 2 variables. :param: equation1: list of 3 numbers @@ -14,13 +14,13 @@ def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> str: determinant_y = [[a1, d1], [a2, d2]] >>> cramers_rule_2x2([2, 3, 0], [5, 1, 0]) - 'Trivial solution. (Consistent system) x = 0 and y = 0' + (0.0, 0.0) >>> cramers_rule_2x2([0, 4, 50], [2, 0, 26]) - 'Non-Trivial Solution (Consistent system) x = 13.0, y = 12.5' + (13.0, 12.5) >>> cramers_rule_2x2([11, 2, 30], [1, 0, 4]) - 'Non-Trivial Solution (Consistent system) x = 4.0, y = -7.0' + (4.0, -7.0) >>> cramers_rule_2x2([4, 7, 1], [1, 2, 0]) - 'Non-Trivial Solution (Consistent system) x = 2.0, y = -1.0' + (2.0, -1.0) >>> cramers_rule_2x2([1, 2, 3], [2, 4, 6]) Traceback (most recent call last): @@ -75,8 +75,10 @@ def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> str: raise ValueError("No solution. (Inconsistent system)") else: if determinant_x == determinant_y == 0: - return "Trivial solution. (Consistent system) x = 0 and y = 0" + # Trivial solution (Inconsistent system) + return (0.0, 0.0) else: x = determinant_x / determinant y = determinant_y / determinant - return f"Non-Trivial Solution (Consistent system) x = {x}, y = {y}" + # Non-Trivial Solution (Consistent system) + return (x, y) diff --git a/other/password.py b/other/password.py index 8f6833073..f463c7564 100644 --- a/other/password.py +++ b/other/password.py @@ -66,26 +66,23 @@ def random_characters(chars_incl, i): # This Will Check Whether A Given Password Is Strong Or Not # It Follows The Rule that Length Of Password Should Be At Least 8 Characters # And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character -def strong_password_detector(password: str, min_length: int = 8) -> str: +def is_strong_password(password: str, min_length: int = 8) -> bool: """ - >>> strong_password_detector('Hwea7$2!') - 'This is a strong Password' - - >>> strong_password_detector('Sh0r1') - 'Your Password must be at least 8 characters long' - - >>> strong_password_detector('Hello123') - 'Password should contain UPPERCASE, lowercase, numbers, special characters' - - >>> strong_password_detector('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') - 'This is a strong Password' - - >>> strong_password_detector('0') - 'Your Password must be at least 8 characters long' + >>> is_strong_password('Hwea7$2!') + True + >>> is_strong_password('Sh0r1') + False + >>> is_strong_password('Hello123') + False + >>> is_strong_password('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') + True + >>> is_strong_password('0') + False """ if len(password) < min_length: - return "Your Password must be at least 8 characters long" + # Your Password must be at least 8 characters long + return False upper = any(char in ascii_uppercase for char in password) lower = any(char in ascii_lowercase for char in password) @@ -93,13 +90,12 @@ def strong_password_detector(password: str, min_length: int = 8) -> str: spec_char = any(char in punctuation for char in password) if upper and lower and num and spec_char: - return "This is a strong Password" + return True else: - return ( - "Password should contain UPPERCASE, lowercase, " - "numbers, special characters" - ) + # Passwords should contain UPPERCASE, lowerase + # numbers, and special characters + return False def main(): diff --git a/strings/dna.py b/strings/dna.py index 46e271d68..c2b96110b 100644 --- a/strings/dna.py +++ b/strings/dna.py @@ -14,13 +14,18 @@ def dna(dna: str) -> str: >>> dna("CTGA") 'GACT' >>> dna("GFGG") - 'Invalid Strand' + Traceback (most recent call last): + ... + ValueError: Invalid Strand """ - r = len(re.findall("[ATCG]", dna)) != len(dna) - val = dna.translate(dna.maketrans("ATCG", "TAGC")) - return "Invalid Strand" if r else val + if len(re.findall("[ATCG]", dna)) != len(dna): + raise ValueError("Invalid Strand") + + return dna.translate(dna.maketrans("ATCG", "TAGC")) if __name__ == "__main__": - __import__("doctest").testmod() + import doctest + + doctest.testmod() From 6aaf0a2c77b671f3e35e71dfccc569f51d8e3b00 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 8 Nov 2022 12:49:47 +0100 Subject: [PATCH 208/368] maths/number_of_digits.py: Streamline benchmarks (#7913) * maths/number_of_digits.py: Streamline benchmarks ``` num_digits(262144): 6 -- 0.2226011250168085 seconds num_digits_fast(262144): 6 -- 0.13145116699161008 seconds num_digits_faster(262144): 6 -- 0.09273383300751448 seconds num_digits(1125899906842624): 16 -- 0.6056742920191027 seconds num_digits_fast(1125899906842624): 16 -- 0.15698366600554436 seconds num_digits_faster(1125899906842624): 16 -- 0.1027024170034565 seconds num_digits(1267650600228229401496703205376): 31 -- 1.1957934170495719 seconds num_digits_fast(1267650600228229401496703205376): 31 -- 0.15552304196171463 seconds num_digits_faster(1267650600228229401496703205376): 31 -- 0.13062308297958225 seconds ``` * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update number_of_digits.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/number_of_digits.py | 92 +++++---------------------------------- 1 file changed, 11 insertions(+), 81 deletions(-) diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py index 3c0eb7b38..86bc67f72 100644 --- a/maths/number_of_digits.py +++ b/maths/number_of_digits.py @@ -67,93 +67,23 @@ def num_digits_faster(n: int) -> int: def benchmark() -> None: """ - Benchmark code for comparing 3 functions, - with 3 different length int values. + Benchmark multiple functions, with three different length int values. """ - print("\nFor small_num = ", small_num, ":") - print( - "> num_digits()", - "\t\tans =", - num_digits(small_num), - "\ttime =", - timeit("z.num_digits(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_fast()", - "\tans =", - num_digits_fast(small_num), - "\ttime =", - timeit("z.num_digits_fast(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_faster()", - "\tans =", - num_digits_faster(small_num), - "\ttime =", - timeit("z.num_digits_faster(z.small_num)", setup="import __main__ as z"), - "seconds", - ) + from collections.abc import Callable - print("\nFor medium_num = ", medium_num, ":") - print( - "> num_digits()", - "\t\tans =", - num_digits(medium_num), - "\ttime =", - timeit("z.num_digits(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_fast()", - "\tans =", - num_digits_fast(medium_num), - "\ttime =", - timeit("z.num_digits_fast(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_faster()", - "\tans =", - num_digits_faster(medium_num), - "\ttime =", - timeit("z.num_digits_faster(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) + def benchmark_a_function(func: Callable, value: int) -> None: + call = f"{func.__name__}({value})" + timing = timeit(f"__main__.{call}", setup="import __main__") + print(f"{call}: {func(value)} -- {timing} seconds") - print("\nFor large_num = ", large_num, ":") - print( - "> num_digits()", - "\t\tans =", - num_digits(large_num), - "\ttime =", - timeit("z.num_digits(z.large_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_fast()", - "\tans =", - num_digits_fast(large_num), - "\ttime =", - timeit("z.num_digits_fast(z.large_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_faster()", - "\tans =", - num_digits_faster(large_num), - "\ttime =", - timeit("z.num_digits_faster(z.large_num)", setup="import __main__ as z"), - "seconds", - ) + for value in (262144, 1125899906842624, 1267650600228229401496703205376): + for func in (num_digits, num_digits_fast, num_digits_faster): + benchmark_a_function(func, value) + print() if __name__ == "__main__": - small_num = 262144 - medium_num = 1125899906842624 - large_num = 1267650600228229401496703205376 - benchmark() import doctest doctest.testmod() + benchmark() From 8951d857fea2f30d30f64e63d906dc986c32308a Mon Sep 17 00:00:00 2001 From: Abhishek Chakraborty Date: Tue, 8 Nov 2022 09:24:21 -0800 Subject: [PATCH 209/368] BB84 QKD algorithm (#7898) * Added BB84 algorithm. * Function name lowercase + imports fix I thought uppercase was appropriate because they're initials. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum/bb84.py Co-authored-by: Christian Clauss * Removed python < 3.11 restriction from qiskit * Removed python < 3.11 restriction from qiskit * scikit-learn * Update quantum/bb84.py Correct typo in `default_rng()` call Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Maxim Smolskiy --- quantum/bb84.py | 133 +++++++++++++++++++++++++++++++++++++++++++++++ requirements.txt | 4 +- 2 files changed, 135 insertions(+), 2 deletions(-) create mode 100644 quantum/bb84.py diff --git a/quantum/bb84.py b/quantum/bb84.py new file mode 100644 index 000000000..60d64371f --- /dev/null +++ b/quantum/bb84.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +""" +Simulation of the Quantum Key Distribution (QKD) protocol called BB84, +created by Charles Bennett and Gilles Brassard in 1984. + +BB84 is a key-distribution protocol that ensures secure key distribution +using qubits instead of classical bits. The generated key is the result +of simulating a quantum circuit. Our algorithm to construct the circuit +is as follows: + +Alice generates two binary strings. One encodes the basis for each qubit: + + - 0 -> {0,1} basis. + - 1 -> {+,-} basis. + +The other encodes the state: + + - 0 -> |0> or |+>. + - 1 -> |1> or |->. + +Bob also generates a binary string and uses the same convention to choose +a basis for measurement. Based on the following results, we follow the +algorithm below: + +X|0> = |1> + +H|0> = |+> + +HX|0> = |-> + +1. Whenever Alice wants to encode 1 in a qubit, she applies an +X (NOT) gate to the qubit. To encode 0, no action is needed. + +2. Wherever she wants to encode it in the {+,-} basis, she applies +an H (Hadamard) gate. No action is necessary to encode a qubit in +the {0,1} basis. + +3. She then sends the qubits to Bob (symbolically represented in +this circuit using wires). + +4. Bob measures the qubits according to his binary string for +measurement. To measure a qubit in the {+,-} basis, he applies +an H gate to the corresponding qubit and then performs a measurement. + +References: +https://en.wikipedia.org/wiki/BB84 +https://qiskit.org/textbook/ch-algorithms/quantum-key-distribution.html +""" +import numpy as np +import qiskit + + +def bb84(key_len: int = 8, seed: int | None = None) -> str: + """ + Performs the BB84 protocol using a key made of `key_len` bits. + The two parties in the key distribution are called Alice and Bob. + Args: + key_len: The length of the generated key in bits. The default is 8. + + seed: Seed for the random number generator. + Mostly used for testing. Default is None. + + Returns: + key: The key generated using BB84 protocol. + + >>> bb84(16, seed=0) + '1101101100010000' + + >>> bb84(8, seed=0) + '01011011' + """ + # Set up the random number generator. + rng = np.random.default_rng(seed=seed) + + # Roughly 25% of the qubits will contribute to the key. + # So we take more than we need. + num_qubits = 6 * key_len + # Measurement basis for Alice's qubits. + alice_basis = rng.integers(2, size=num_qubits) + # The set of states Alice will prepare. + alice_state = rng.integers(2, size=num_qubits) + # Measurement basis for Bob's qubits. + bob_basis = rng.integers(2, size=num_qubits) + + # Quantum Circuit to simulate BB84 + bb84_circ = qiskit.QuantumCircuit(num_qubits, name="BB84") + + # Alice prepares her qubits according to rules above. + for index, _ in enumerate(alice_basis): + if alice_state[index] == 1: + bb84_circ.x(index) + if alice_basis[index] == 1: + bb84_circ.h(index) + bb84_circ.barrier() + + # Bob measures the received qubits according to rules above. + for index, _ in enumerate(bob_basis): + if bob_basis[index] == 1: + bb84_circ.h(index) + + bb84_circ.barrier() + bb84_circ.measure_all() + + # Simulate the quantum circuit. + sim = qiskit.Aer.get_backend("aer_simulator") + # We only need to run one shot because the key is unique. + # Multiple shots will produce the same key. + job = qiskit.execute(bb84_circ, sim, shots=1, seed_simulator=seed) + # Returns the result of measurement. + result = job.result().get_counts(bb84_circ).most_frequent() + + # Extracting the generated key from the simulation results. + # Only keep measurement results where Alice and Bob chose the same basis. + gen_key = "".join( + [ + result_bit + for alice_basis_bit, bob_basis_bit, result_bit in zip( + alice_basis, bob_basis, result + ) + if alice_basis_bit == bob_basis_bit + ] + ) + + # Get final key. Pad with 0 if too short, otherwise truncate. + key = gen_key[:key_len] if len(gen_key) >= key_len else gen_key.ljust(key_len, "0") + return key + + +if __name__ == "__main__": + print(f"The generated key is : {bb84(8, seed=0)}") + from doctest import testmod + + testmod() diff --git a/requirements.txt b/requirements.txt index 00f31b85e..a1d607df0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,11 +8,11 @@ opencv-python pandas pillow projectq -qiskit; python_version < "3.11" +qiskit requests rich scikit-fuzzy -sklearn +scikit-learn statsmodels sympy tensorflow; python_version < "3.11" From 3f9aae149dba5c9b68ff6f7fd83cadf3fd6b1d7d Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Wed, 9 Nov 2022 21:06:38 +0530 Subject: [PATCH 210/368] feat: Add automorphic number implementation (#7978) * feat: Add automorphic number implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: Add type checking for number * refactor: Rename variable n to number * test: Add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * test: Add unit test for number=0 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/automorphic_number.py | 58 +++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 maths/automorphic_number.py diff --git a/maths/automorphic_number.py b/maths/automorphic_number.py new file mode 100644 index 000000000..103fc7301 --- /dev/null +++ b/maths/automorphic_number.py @@ -0,0 +1,58 @@ +""" +== Automorphic Numbers == +A number n is said to be a Automorphic number if +the square of n "ends" in the same digits as n itself. + +Examples of Automorphic Numbers: 0, 1, 5, 6, 25, 76, 376, 625, 9376, 90625, ... +https://en.wikipedia.org/wiki/Automorphic_number +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) +# Time Complexity : O(log10n) + + +def is_automorphic_number(number: int) -> bool: + """ + # doctest: +NORMALIZE_WHITESPACE + This functions takes an integer number as input. + returns True if the number is automorphic. + >>> is_automorphic_number(-1) + False + >>> is_automorphic_number(0) + True + >>> is_automorphic_number(5) + True + >>> is_automorphic_number(6) + True + >>> is_automorphic_number(7) + False + >>> is_automorphic_number(25) + True + >>> is_automorphic_number(259918212890625) + True + >>> is_automorphic_number(259918212890636) + False + >>> is_automorphic_number(740081787109376) + True + >>> is_automorphic_number(5.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=5.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 0: + return False + number_square = number * number + while number > 0: + if number % 10 != number_square % 10: + return False + number //= 10 + number_square //= 10 + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 076193eefa161a2030ca4b1ee60b285d4a50e4c6 Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Thu, 10 Nov 2022 08:09:47 +0530 Subject: [PATCH 211/368] feat: Add pronic number implementation (#7979) * feat: Add pronic number implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/pronic_number.py | 54 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 maths/pronic_number.py diff --git a/maths/pronic_number.py b/maths/pronic_number.py new file mode 100644 index 000000000..8b554dbbd --- /dev/null +++ b/maths/pronic_number.py @@ -0,0 +1,54 @@ +""" +== Pronic Number == +A number n is said to be a Proic number if +there exists an integer m such that n = m * (m + 1) + +Examples of Proic Numbers: 0, 2, 6, 12, 20, 30, 42, 56, 72, 90, 110 ... +https://en.wikipedia.org/wiki/Pronic_number +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) + + +def is_pronic(number: int) -> bool: + """ + # doctest: +NORMALIZE_WHITESPACE + This functions takes an integer number as input. + returns True if the number is pronic. + >>> is_pronic(-1) + False + >>> is_pronic(0) + True + >>> is_pronic(2) + True + >>> is_pronic(5) + False + >>> is_pronic(6) + True + >>> is_pronic(8) + False + >>> is_pronic(30) + True + >>> is_pronic(32) + False + >>> is_pronic(2147441940) + True + >>> is_pronic(9223372033963249500) + True + >>> is_pronic(6.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=6.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 0 or number % 2 == 1: + return False + number_sqrt = int(number**0.5) + return number == number_sqrt * (number_sqrt + 1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4cddb26908bde48047e4b6e383c4b061c289a5e5 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 10 Nov 2022 03:41:28 +0100 Subject: [PATCH 212/368] atbash.py: Tighten up the benchmarks (#7977) * atbash.py: Tighten up the benchmarks * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + ciphers/atbash.py | 21 ++++----------------- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 76c7f9dea..5f314c317 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -995,6 +995,7 @@ * [Sol1](project_euler/problem_686/sol1.py) ## Quantum + * [Bb84](quantum/bb84.py) * [Deutsch Jozsa](quantum/deutsch_jozsa.py) * [Half Adder](quantum/half_adder.py) * [Not Gate](quantum/not_gate.py) diff --git a/ciphers/atbash.py b/ciphers/atbash.py index 5c2aea610..0a86a800c 100644 --- a/ciphers/atbash.py +++ b/ciphers/atbash.py @@ -38,26 +38,13 @@ def atbash(sequence: str) -> str: def benchmark() -> None: - """Let's benchmark them side-by-side...""" + """Let's benchmark our functions side-by-side...""" from timeit import timeit print("Running performance benchmarks...") - print( - "> atbash_slow()", - timeit( - "atbash_slow(printable)", - setup="from string import printable ; from __main__ import atbash_slow", - ), - "seconds", - ) - print( - "> atbash()", - timeit( - "atbash(printable)", - setup="from string import printable ; from __main__ import atbash", - ), - "seconds", - ) + setup = "from string import printable ; from __main__ import atbash, atbash_slow" + print(f"> atbash_slow(): {timeit('atbash_slow(printable)', setup=setup)} seconds") + print(f"> atbash(): {timeit('atbash(printable)', setup=setup)} seconds") if __name__ == "__main__": From 5c92b7390e650494f49e1f9298c1a79421673385 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 10 Nov 2022 03:42:14 +0100 Subject: [PATCH 213/368] prime_numbers.py: Tighten up the benchmarks (#7976) * prime_numbers.py: Tighten up the benchmarks * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/prime_numbers.py | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index 4e076fe31..c5297ed92 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -90,32 +90,20 @@ def fast_primes(max_n: int) -> Generator[int, None, None]: yield i +def benchmark(): + """ + Let's benchmark our functions side-by-side... + """ + from timeit import timeit + + setup = "from __main__ import slow_primes, primes, fast_primes" + print(timeit("slow_primes(1_000_000_000_000)", setup=setup, number=1_000_000)) + print(timeit("primes(1_000_000_000_000)", setup=setup, number=1_000_000)) + print(timeit("fast_primes(1_000_000_000_000)", setup=setup, number=1_000_000)) + + if __name__ == "__main__": number = int(input("Calculate primes up to:\n>> ").strip()) for ret in primes(number): print(ret) - - # Let's benchmark them side-by-side... - from timeit import timeit - - print( - timeit( - "slow_primes(1_000_000_000_000)", - setup="from __main__ import slow_primes", - number=1_000_000, - ) - ) - print( - timeit( - "primes(1_000_000_000_000)", - setup="from __main__ import primes", - number=1_000_000, - ) - ) - print( - timeit( - "fast_primes(1_000_000_000_000)", - setup="from __main__ import fast_primes", - number=1_000_000, - ) - ) + benchmark() From 7b2eca0243f5c4454875e17971cb527037d2e281 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Thu, 10 Nov 2022 06:49:38 +0400 Subject: [PATCH 214/368] add distribute coins (#7975) * add distribute coins * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix review notes * fix typehint * fix type in TreeNode Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + .../binary_tree/distribute_coins.py | 135 ++++++++++++++++++ 2 files changed, 136 insertions(+) create mode 100644 data_structures/binary_tree/distribute_coins.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 5f314c317..74243cd06 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -173,6 +173,7 @@ * [Binary Tree Path Sum](data_structures/binary_tree/binary_tree_path_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) + * [Distribute Coins](data_structures/binary_tree/distribute_coins.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Is Bst](data_structures/binary_tree/is_bst.py) diff --git a/data_structures/binary_tree/distribute_coins.py b/data_structures/binary_tree/distribute_coins.py new file mode 100644 index 000000000..ea02afc2c --- /dev/null +++ b/data_structures/binary_tree/distribute_coins.py @@ -0,0 +1,135 @@ +""" +Author : Alexander Pantyukhin +Date : November 7, 2022 + +Task: +You are given a tree root of a binary tree with n nodes, where each node has +node.data coins. There are exactly n coins in whole tree. + +In one move, we may choose two adjacent nodes and move one coin from one node +to another. A move may be from parent to child, or from child to parent. + +Return the minimum number of moves required to make every node have exactly one coin. + +Example 1: + + 3 + / \ + 0 0 + +Result: 2 + +Example 2: + + 0 + / \ + 3 0 + +Result 3 + +leetcode: https://leetcode.com/problems/distribute-coins-in-binary-tree/ + +Implementation notes: +User depth-first search approach. + +Let n is the number of nodes in tree +Runtime: O(n) +Space: O(1) +""" + +from __future__ import annotations + +from collections import namedtuple +from dataclasses import dataclass + + +@dataclass +class TreeNode: + data: int + left: TreeNode | None = None + right: TreeNode | None = None + + +CoinsDistribResult = namedtuple("CoinsDistribResult", "moves excess") + + +def distribute_coins(root: TreeNode | None) -> int: + """ + >>> distribute_coins(TreeNode(3, TreeNode(0), TreeNode(0))) + 2 + >>> distribute_coins(TreeNode(0, TreeNode(3), TreeNode(0))) + 3 + >>> distribute_coins(TreeNode(0, TreeNode(0), TreeNode(3))) + 3 + >>> distribute_coins(None) + 0 + >>> distribute_coins(TreeNode(0, TreeNode(0), TreeNode(0))) + Traceback (most recent call last): + ... + ValueError: The nodes number should be same as the number of coins + >>> distribute_coins(TreeNode(0, TreeNode(1), TreeNode(1))) + Traceback (most recent call last): + ... + ValueError: The nodes number should be same as the number of coins + """ + + if root is None: + return 0 + + # Validation + def count_nodes(node: TreeNode | None) -> int: + """ + >>> count_nodes(None): + 0 + """ + if node is None: + return 0 + + return count_nodes(node.left) + count_nodes(node.right) + 1 + + def count_coins(node: TreeNode | None) -> int: + """ + >>> count_coins(None): + 0 + """ + if node is None: + return 0 + + return count_coins(node.left) + count_coins(node.right) + node.data + + if count_nodes(root) != count_coins(root): + raise ValueError("The nodes number should be same as the number of coins") + + # Main calculation + def get_distrib(node: TreeNode | None) -> CoinsDistribResult: + """ + >>> get_distrib(None) + namedtuple("CoinsDistribResult", "0 2") + """ + + if node is None: + return CoinsDistribResult(0, 1) + + left_distrib_moves, left_distrib_excess = get_distrib(node.left) + right_distrib_moves, right_distrib_excess = get_distrib(node.right) + + coins_to_left = 1 - left_distrib_excess + coins_to_right = 1 - right_distrib_excess + + result_moves = ( + left_distrib_moves + + right_distrib_moves + + abs(coins_to_left) + + abs(coins_to_right) + ) + result_excess = node.data - coins_to_left - coins_to_right + + return CoinsDistribResult(result_moves, result_excess) + + return get_distrib(root)[0] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e1be882f72f85d5f7267b46f0ffd5203a6d81e2e Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Thu, 10 Nov 2022 16:25:50 +0530 Subject: [PATCH 215/368] algorithm: Twin prime (#7980) * feat: Add twin prime algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: Fix broken import statement Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/twin_prime.py | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 maths/twin_prime.py diff --git a/maths/twin_prime.py b/maths/twin_prime.py new file mode 100644 index 000000000..e6ac0cc78 --- /dev/null +++ b/maths/twin_prime.py @@ -0,0 +1,45 @@ +""" +== Twin Prime == +A number n+2 is said to be a Twin prime of number n if +both n and n+2 are prime. + +Examples of Twin pairs: (3, 5), (5, 7), (11, 13), (17, 19), (29, 31), (41, 43), ... +https://en.wikipedia.org/wiki/Twin_prime +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) +from maths.prime_check import is_prime + + +def twin_prime(number: int) -> int: + """ + # doctest: +NORMALIZE_WHITESPACE + This functions takes an integer number as input. + returns n+2 if n and n+2 are prime numbers and -1 otherwise. + >>> twin_prime(3) + 5 + >>> twin_prime(4) + -1 + >>> twin_prime(5) + 7 + >>> twin_prime(17) + 19 + >>> twin_prime(0) + -1 + >>> twin_prime(6.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=6.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if is_prime(number) and is_prime(number + 2): + return number + 2 + else: + return -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 316e71b03448b6adb8a32d96cb4d6488ee7b7787 Mon Sep 17 00:00:00 2001 From: Gayathri Krishnan Date: Tue, 15 Nov 2022 19:07:59 +0530 Subject: [PATCH 216/368] Additional intro blockchain doc (#7974) * A deeper introduction to blockchain technology * Update README.md Rectified errors as image was not visible * Delete img1.jpg Deleting the image as it is not getting accepted in PR merge * Delete img2.jpg Deleting the image as it is not getting accepted in PR merge * Update README.md Removed all image s in the document * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Commited the suggested changes and submitting for review. * Update README.md Changed a sentence that needed grammatical correction. * Update README.md Added the changes suggested by review panel Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- blockchain/README.md | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/blockchain/README.md b/blockchain/README.md index 5ae7f95ec..b5fab7b36 100644 --- a/blockchain/README.md +++ b/blockchain/README.md @@ -1,7 +1,44 @@ # Blockchain -A Blockchain is a type of distributed ledger technology (DLT) that consists of growing list of records, called blocks, that are securely linked together using cryptography. +A Blockchain is a type of **distributed ledger** technology (DLT) that consists of growing list of records, called **blocks**, that are securely linked together using **cryptography**. +Let's breakdown the terminologies in the above definition. We find below terminologies, + +- Digital Ledger Technology (DLT) +- Blocks +- Cryptography + +## Digital Ledger Technology + + It is otherwise called as distributed ledger technology. It is simply the opposite of centralized database. Firstly, what is a **ledger**? A ledger is a book or collection of accounts that records account transactions. + + *Why is Blockchain addressed as digital ledger if it can record more than account transactions? What other transaction details and information can it hold?* + +Digital Ledger Technology is just a ledger which is shared among multiple nodes. This way there exist no need for central authority to hold the info. Okay, how is it differentiated from central database and what are their benefits? + +There is an organization which has 4 branches whose data are stored in a centralized database. So even if one branch needs any data from ledger they need an approval from database in charge. And if one hacks the central database he gets to tamper and control all the data. + +Now lets assume every branch has a copy of the ledger and then once anything is added to the ledger by anyone branch it is gonna automatically reflect in all other ledgers available in other branch. This is done using Peer-to-peer network. + +So this means even if information is tampered in one branch we can find out. If one branch is hacked we can be alerted ,so we can safeguard other branches. Now, assume these branches as computers or nodes and the ledger is a transaction record or digital receipt. If one ledger is hacked in a node we can detect since there will be a mismatch in comparison with other node information. So this is the concept of Digital Ledger Technology. + +*Is it required for all nodes to have access to all information in other nodes? Wouldn't this require enormous storage space in each node?* + +## Blocks + +In short a block is nothing but collections of records with a labelled header. These are connected cryptographically. Once a new block is added to a chain, the previous block is connected, more precisely said as locked and hence, will remain unaltered. We can understand this concept once we get a clear understanding of working mechanism of blockchain. + +## Cryptography + +It is the practice and study of secure communication techniques in the midst of adversarial behavior. More broadly, cryptography is the creation and analysis of protocols that prevent third parties or the general public from accessing private messages. + +*Which cryptography technology is most widely used in blockchain and why?* + +So, in general, blockchain technology is a distributed record holder which records the information about ownership of an asset. To define precisely, +> Blockchain is a distributed, immutable ledger that makes it easier to record transactions and track assets in a corporate network. +An asset could be tangible (such as a house, car, cash, or land) or intangible (such as a business) (intellectual property, patents, copyrights, branding). A blockchain network can track and sell almost anything of value, lowering risk and costs for everyone involved. + +So this is all about introduction to blockchain technology. To learn more about the topic refer below links.... * * * From 3bf86b91e7d438eb2b9ecbab68060c007d270332 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Tue, 15 Nov 2022 19:25:14 +0530 Subject: [PATCH 217/368] fix: no implicit optional (#7984) --- data_structures/binary_tree/fenwick_tree.py | 2 +- fractals/julia_sets.py | 2 +- linear_algebra/src/schur_complement.py | 2 +- machine_learning/linear_discriminant_analysis.py | 2 +- project_euler/problem_074/sol1.py | 2 +- sorts/strand_sort.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/data_structures/binary_tree/fenwick_tree.py b/data_structures/binary_tree/fenwick_tree.py index babd75ac4..88b0873a1 100644 --- a/data_structures/binary_tree/fenwick_tree.py +++ b/data_structures/binary_tree/fenwick_tree.py @@ -8,7 +8,7 @@ class FenwickTree: More info: https://en.wikipedia.org/wiki/Fenwick_tree """ - def __init__(self, arr: list[int] = None, size: int = None) -> None: + def __init__(self, arr: list[int] | None = None, size: int | None = None) -> None: """ Constructor for the Fenwick tree diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 35fdc45d0..77d1d7c04 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -89,7 +89,7 @@ def iterate_function( function_params: Any, nb_iterations: int, z_0: numpy.ndarray, - infinity: float = None, + infinity: float | None = None, ) -> numpy.ndarray: """ Iterate the function "eval_function" exactly nb_iterations times. diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index f3cb736d9..3a5f4443a 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -7,7 +7,7 @@ def schur_complement( mat_a: np.ndarray, mat_b: np.ndarray, mat_c: np.ndarray, - pseudo_inv: np.ndarray = None, + pseudo_inv: np.ndarray | None = None, ) -> np.ndarray: """ Schur complement of a symmetric matrix X given as a 2x2 block matrix diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 9ef42ed19..f4fb5ba76 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -256,7 +256,7 @@ def valid_input( input_msg: str, err_msg: str, condition: Callable[[num], bool] = lambda x: True, - default: str = None, + default: str | None = None, ) -> num: """ Ask for user value and validate that it fulfill a condition. diff --git a/project_euler/problem_074/sol1.py b/project_euler/problem_074/sol1.py index a40a62903..a257d4d94 100644 --- a/project_euler/problem_074/sol1.py +++ b/project_euler/problem_074/sol1.py @@ -71,7 +71,7 @@ def sum_digit_factorials(n: int) -> int: return ret -def chain_length(n: int, previous: set = None) -> int: +def chain_length(n: int, previous: set | None = None) -> int: """ Calculate the length of the chain of non-repeating terms starting with n. Previous is a set containing the previous member of the chain. diff --git a/sorts/strand_sort.py b/sorts/strand_sort.py index a89135a06..4cadd3961 100644 --- a/sorts/strand_sort.py +++ b/sorts/strand_sort.py @@ -1,7 +1,7 @@ import operator -def strand_sort(arr: list, reverse: bool = False, solution: list = None) -> list: +def strand_sort(arr: list, reverse: bool = False, solution: list | None = None) -> list: """ Strand sort implementation source: https://en.wikipedia.org/wiki/Strand_sort From 4ce8ad9ce6e554360089e77e088df6dd8b4a69df Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Tue, 15 Nov 2022 22:58:49 +0530 Subject: [PATCH 218/368] algorithm: Liouville lambda function (#7986) * feat: Add liouville lambda function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: Refactor if-else block * refactor: Refactor error handling for -ve numbers * refactor: Remove # doctest: +NORMALIZE_WHITESPACE Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/liouville_lambda.py | 45 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 maths/liouville_lambda.py diff --git a/maths/liouville_lambda.py b/maths/liouville_lambda.py new file mode 100644 index 000000000..5993efa42 --- /dev/null +++ b/maths/liouville_lambda.py @@ -0,0 +1,45 @@ +""" +== Liouville Lambda Function == +The Liouville Lambda function, denoted by λ(n) +and λ(n) is 1 if n is the product of an even number of prime numbers, +and -1 if it is the product of an odd number of primes. + +https://en.wikipedia.org/wiki/Liouville_function +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) +from maths.prime_factors import prime_factors + + +def liouville_lambda(number: int) -> int: + """ + This functions takes an integer number as input. + returns 1 if n has even number of prime factors and -1 otherwise. + >>> liouville_lambda(10) + 1 + >>> liouville_lambda(11) + -1 + >>> liouville_lambda(0) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer + >>> liouville_lambda(-1) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer + >>> liouville_lambda(11.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=11.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 1: + raise ValueError("Input must be a positive integer") + return -1 if len(prime_factors(number)) % 2 else 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 8bfd1c844b388cb78b03952c7da28f07f3838fd1 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Tue, 15 Nov 2022 22:59:14 +0530 Subject: [PATCH 219/368] fix: mypy 0.991 issues (#7988) * fix: mypy 0.991 issues * fix: invalid condition for base case --- conversions/decimal_to_any.py | 5 +- data_structures/linked_list/__init__.py | 2 +- matrix/matrix_class.py | 732 ++++++++++++------------ 3 files changed, 370 insertions(+), 369 deletions(-) diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py index 11a2af294..c9c2e9a5f 100644 --- a/conversions/decimal_to_any.py +++ b/conversions/decimal_to_any.py @@ -76,8 +76,9 @@ def decimal_to_any(num: int, base: int) -> str: div, mod = divmod(num, base) if base >= 11 and 9 < mod < 36: actual_value = ALPHABET_VALUES[str(mod)] - mod = actual_value - new_value += str(mod) + else: + actual_value = str(mod) + new_value += actual_value div = num // base num = div if div == 0: diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 85660a6d2..56b0e51ba 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -49,7 +49,7 @@ class LinkedList: >>> print(linked_list) 9 --> 14 --> 23 """ - if not self.is_empty: + if self.is_empty(): return "" else: iterate = self.head diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 0c3078fe6..a73e8b92a 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -1,366 +1,366 @@ -# An OOP approach to representing and manipulating matrices - -from __future__ import annotations - - -class Matrix: - """ - Matrix object generated from a 2D array where each element is an array representing - a row. - Rows can contain type int or float. - Common operations and information available. - >>> rows = [ - ... [1, 2, 3], - ... [4, 5, 6], - ... [7, 8, 9] - ... ] - >>> matrix = Matrix(rows) - >>> print(matrix) - [[1. 2. 3.] - [4. 5. 6.] - [7. 8. 9.]] - - Matrix rows and columns are available as 2D arrays - >>> matrix.rows - [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - >>> matrix.columns() - [[1, 4, 7], [2, 5, 8], [3, 6, 9]] - - Order is returned as a tuple - >>> matrix.order - (3, 3) - - Squareness and invertability are represented as bool - >>> matrix.is_square - True - >>> matrix.is_invertable() - False - - Identity, Minors, Cofactors and Adjugate are returned as Matrices. Inverse can be - a Matrix or Nonetype - >>> print(matrix.identity()) - [[1. 0. 0.] - [0. 1. 0.] - [0. 0. 1.]] - >>> print(matrix.minors()) - [[-3. -6. -3.] - [-6. -12. -6.] - [-3. -6. -3.]] - >>> print(matrix.cofactors()) - [[-3. 6. -3.] - [6. -12. 6.] - [-3. 6. -3.]] - >>> # won't be apparent due to the nature of the cofactor matrix - >>> print(matrix.adjugate()) - [[-3. 6. -3.] - [6. -12. 6.] - [-3. 6. -3.]] - >>> matrix.inverse() - Traceback (most recent call last): - ... - TypeError: Only matrices with a non-zero determinant have an inverse - - Determinant is an int, float, or Nonetype - >>> matrix.determinant() - 0 - - Negation, scalar multiplication, addition, subtraction, multiplication and - exponentiation are available and all return a Matrix - >>> print(-matrix) - [[-1. -2. -3.] - [-4. -5. -6.] - [-7. -8. -9.]] - >>> matrix2 = matrix * 3 - >>> print(matrix2) - [[3. 6. 9.] - [12. 15. 18.] - [21. 24. 27.]] - >>> print(matrix + matrix2) - [[4. 8. 12.] - [16. 20. 24.] - [28. 32. 36.]] - >>> print(matrix - matrix2) - [[-2. -4. -6.] - [-8. -10. -12.] - [-14. -16. -18.]] - >>> print(matrix ** 3) - [[468. 576. 684.] - [1062. 1305. 1548.] - [1656. 2034. 2412.]] - - Matrices can also be modified - >>> matrix.add_row([10, 11, 12]) - >>> print(matrix) - [[1. 2. 3.] - [4. 5. 6.] - [7. 8. 9.] - [10. 11. 12.]] - >>> matrix2.add_column([8, 16, 32]) - >>> print(matrix2) - [[3. 6. 9. 8.] - [12. 15. 18. 16.] - [21. 24. 27. 32.]] - >>> print(matrix * matrix2) - [[90. 108. 126. 136.] - [198. 243. 288. 304.] - [306. 378. 450. 472.] - [414. 513. 612. 640.]] - """ - - def __init__(self, rows: list[list[int]]): - error = TypeError( - "Matrices must be formed from a list of zero or more lists containing at " - "least one and the same number of values, each of which must be of type " - "int or float." - ) - if len(rows) != 0: - cols = len(rows[0]) - if cols == 0: - raise error - for row in rows: - if len(row) != cols: - raise error - for value in row: - if not isinstance(value, (int, float)): - raise error - self.rows = rows - else: - self.rows = [] - - # MATRIX INFORMATION - def columns(self) -> list[list[int]]: - return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] - - @property - def num_rows(self) -> int: - return len(self.rows) - - @property - def num_columns(self) -> int: - return len(self.rows[0]) - - @property - def order(self) -> tuple[int, int]: - return (self.num_rows, self.num_columns) - - @property - def is_square(self) -> bool: - return self.order[0] == self.order[1] - - def identity(self) -> Matrix: - values = [ - [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] - for row_num in range(self.num_rows) - ] - return Matrix(values) - - def determinant(self) -> int: - if not self.is_square: - return 0 - if self.order == (0, 0): - return 1 - if self.order == (1, 1): - return int(self.rows[0][0]) - if self.order == (2, 2): - return int( - (self.rows[0][0] * self.rows[1][1]) - - (self.rows[0][1] * self.rows[1][0]) - ) - else: - return sum( - self.rows[0][column] * self.cofactors().rows[0][column] - for column in range(self.num_columns) - ) - - def is_invertable(self) -> bool: - return bool(self.determinant()) - - def get_minor(self, row: int, column: int) -> int: - values = [ - [ - self.rows[other_row][other_column] - for other_column in range(self.num_columns) - if other_column != column - ] - for other_row in range(self.num_rows) - if other_row != row - ] - return Matrix(values).determinant() - - def get_cofactor(self, row: int, column: int) -> int: - if (row + column) % 2 == 0: - return self.get_minor(row, column) - return -1 * self.get_minor(row, column) - - def minors(self) -> Matrix: - return Matrix( - [ - [self.get_minor(row, column) for column in range(self.num_columns)] - for row in range(self.num_rows) - ] - ) - - def cofactors(self) -> Matrix: - return Matrix( - [ - [ - self.minors().rows[row][column] - if (row + column) % 2 == 0 - else self.minors().rows[row][column] * -1 - for column in range(self.minors().num_columns) - ] - for row in range(self.minors().num_rows) - ] - ) - - def adjugate(self) -> Matrix: - values = [ - [self.cofactors().rows[column][row] for column in range(self.num_columns)] - for row in range(self.num_rows) - ] - return Matrix(values) - - def inverse(self) -> Matrix: - determinant = self.determinant() - if not determinant: - raise TypeError("Only matrices with a non-zero determinant have an inverse") - return self.adjugate() * (1 / determinant) - - def __repr__(self) -> str: - return str(self.rows) - - def __str__(self) -> str: - if self.num_rows == 0: - return "[]" - if self.num_rows == 1: - return "[[" + ". ".join(str(self.rows[0])) + "]]" - return ( - "[" - + "\n ".join( - [ - "[" + ". ".join([str(value) for value in row]) + ".]" - for row in self.rows - ] - ) - + "]" - ) - - # MATRIX MANIPULATION - def add_row(self, row: list[int], position: int | None = None) -> None: - type_error = TypeError("Row must be a list containing all ints and/or floats") - if not isinstance(row, list): - raise type_error - for value in row: - if not isinstance(value, (int, float)): - raise type_error - if len(row) != self.num_columns: - raise ValueError( - "Row must be equal in length to the other rows in the matrix" - ) - if position is None: - self.rows.append(row) - else: - self.rows = self.rows[0:position] + [row] + self.rows[position:] - - def add_column(self, column: list[int], position: int | None = None) -> None: - type_error = TypeError( - "Column must be a list containing all ints and/or floats" - ) - if not isinstance(column, list): - raise type_error - for value in column: - if not isinstance(value, (int, float)): - raise type_error - if len(column) != self.num_rows: - raise ValueError( - "Column must be equal in length to the other columns in the matrix" - ) - if position is None: - self.rows = [self.rows[i] + [column[i]] for i in range(self.num_rows)] - else: - self.rows = [ - self.rows[i][0:position] + [column[i]] + self.rows[i][position:] - for i in range(self.num_rows) - ] - - # MATRIX OPERATIONS - def __eq__(self, other: object) -> bool: - if not isinstance(other, Matrix): - return NotImplemented - return self.rows == other.rows - - def __ne__(self, other: object) -> bool: - return not self == other - - def __neg__(self) -> Matrix: - return self * -1 - - def __add__(self, other: Matrix) -> Matrix: - if self.order != other.order: - raise ValueError("Addition requires matrices of the same order") - return Matrix( - [ - [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] - for i in range(self.num_rows) - ] - ) - - def __sub__(self, other: Matrix) -> Matrix: - if self.order != other.order: - raise ValueError("Subtraction requires matrices of the same order") - return Matrix( - [ - [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] - for i in range(self.num_rows) - ] - ) - - def __mul__(self, other: Matrix | int | float) -> Matrix: - if isinstance(other, (int, float)): - return Matrix( - [[int(element * other) for element in row] for row in self.rows] - ) - elif isinstance(other, Matrix): - if self.num_columns != other.num_rows: - raise ValueError( - "The number of columns in the first matrix must " - "be equal to the number of rows in the second" - ) - return Matrix( - [ - [Matrix.dot_product(row, column) for column in other.columns()] - for row in self.rows - ] - ) - else: - raise TypeError( - "A Matrix can only be multiplied by an int, float, or another matrix" - ) - - def __pow__(self, other: int) -> Matrix: - if not isinstance(other, int): - raise TypeError("A Matrix can only be raised to the power of an int") - if not self.is_square: - raise ValueError("Only square matrices can be raised to a power") - if other == 0: - return self.identity() - if other < 0: - if self.is_invertable: - return self.inverse() ** (-other) - raise ValueError( - "Only invertable matrices can be raised to a negative power" - ) - result = self - for _ in range(other - 1): - result *= self - return result - - @classmethod - def dot_product(cls, row: list[int], column: list[int]) -> int: - return sum(row[i] * column[i] for i in range(len(row))) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() +# An OOP approach to representing and manipulating matrices + +from __future__ import annotations + + +class Matrix: + """ + Matrix object generated from a 2D array where each element is an array representing + a row. + Rows can contain type int or float. + Common operations and information available. + >>> rows = [ + ... [1, 2, 3], + ... [4, 5, 6], + ... [7, 8, 9] + ... ] + >>> matrix = Matrix(rows) + >>> print(matrix) + [[1. 2. 3.] + [4. 5. 6.] + [7. 8. 9.]] + + Matrix rows and columns are available as 2D arrays + >>> matrix.rows + [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + >>> matrix.columns() + [[1, 4, 7], [2, 5, 8], [3, 6, 9]] + + Order is returned as a tuple + >>> matrix.order + (3, 3) + + Squareness and invertability are represented as bool + >>> matrix.is_square + True + >>> matrix.is_invertable() + False + + Identity, Minors, Cofactors and Adjugate are returned as Matrices. Inverse can be + a Matrix or Nonetype + >>> print(matrix.identity()) + [[1. 0. 0.] + [0. 1. 0.] + [0. 0. 1.]] + >>> print(matrix.minors()) + [[-3. -6. -3.] + [-6. -12. -6.] + [-3. -6. -3.]] + >>> print(matrix.cofactors()) + [[-3. 6. -3.] + [6. -12. 6.] + [-3. 6. -3.]] + >>> # won't be apparent due to the nature of the cofactor matrix + >>> print(matrix.adjugate()) + [[-3. 6. -3.] + [6. -12. 6.] + [-3. 6. -3.]] + >>> matrix.inverse() + Traceback (most recent call last): + ... + TypeError: Only matrices with a non-zero determinant have an inverse + + Determinant is an int, float, or Nonetype + >>> matrix.determinant() + 0 + + Negation, scalar multiplication, addition, subtraction, multiplication and + exponentiation are available and all return a Matrix + >>> print(-matrix) + [[-1. -2. -3.] + [-4. -5. -6.] + [-7. -8. -9.]] + >>> matrix2 = matrix * 3 + >>> print(matrix2) + [[3. 6. 9.] + [12. 15. 18.] + [21. 24. 27.]] + >>> print(matrix + matrix2) + [[4. 8. 12.] + [16. 20. 24.] + [28. 32. 36.]] + >>> print(matrix - matrix2) + [[-2. -4. -6.] + [-8. -10. -12.] + [-14. -16. -18.]] + >>> print(matrix ** 3) + [[468. 576. 684.] + [1062. 1305. 1548.] + [1656. 2034. 2412.]] + + Matrices can also be modified + >>> matrix.add_row([10, 11, 12]) + >>> print(matrix) + [[1. 2. 3.] + [4. 5. 6.] + [7. 8. 9.] + [10. 11. 12.]] + >>> matrix2.add_column([8, 16, 32]) + >>> print(matrix2) + [[3. 6. 9. 8.] + [12. 15. 18. 16.] + [21. 24. 27. 32.]] + >>> print(matrix * matrix2) + [[90. 108. 126. 136.] + [198. 243. 288. 304.] + [306. 378. 450. 472.] + [414. 513. 612. 640.]] + """ + + def __init__(self, rows: list[list[int]]): + error = TypeError( + "Matrices must be formed from a list of zero or more lists containing at " + "least one and the same number of values, each of which must be of type " + "int or float." + ) + if len(rows) != 0: + cols = len(rows[0]) + if cols == 0: + raise error + for row in rows: + if len(row) != cols: + raise error + for value in row: + if not isinstance(value, (int, float)): + raise error + self.rows = rows + else: + self.rows = [] + + # MATRIX INFORMATION + def columns(self) -> list[list[int]]: + return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] + + @property + def num_rows(self) -> int: + return len(self.rows) + + @property + def num_columns(self) -> int: + return len(self.rows[0]) + + @property + def order(self) -> tuple[int, int]: + return (self.num_rows, self.num_columns) + + @property + def is_square(self) -> bool: + return self.order[0] == self.order[1] + + def identity(self) -> Matrix: + values = [ + [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] + for row_num in range(self.num_rows) + ] + return Matrix(values) + + def determinant(self) -> int: + if not self.is_square: + return 0 + if self.order == (0, 0): + return 1 + if self.order == (1, 1): + return int(self.rows[0][0]) + if self.order == (2, 2): + return int( + (self.rows[0][0] * self.rows[1][1]) + - (self.rows[0][1] * self.rows[1][0]) + ) + else: + return sum( + self.rows[0][column] * self.cofactors().rows[0][column] + for column in range(self.num_columns) + ) + + def is_invertable(self) -> bool: + return bool(self.determinant()) + + def get_minor(self, row: int, column: int) -> int: + values = [ + [ + self.rows[other_row][other_column] + for other_column in range(self.num_columns) + if other_column != column + ] + for other_row in range(self.num_rows) + if other_row != row + ] + return Matrix(values).determinant() + + def get_cofactor(self, row: int, column: int) -> int: + if (row + column) % 2 == 0: + return self.get_minor(row, column) + return -1 * self.get_minor(row, column) + + def minors(self) -> Matrix: + return Matrix( + [ + [self.get_minor(row, column) for column in range(self.num_columns)] + for row in range(self.num_rows) + ] + ) + + def cofactors(self) -> Matrix: + return Matrix( + [ + [ + self.minors().rows[row][column] + if (row + column) % 2 == 0 + else self.minors().rows[row][column] * -1 + for column in range(self.minors().num_columns) + ] + for row in range(self.minors().num_rows) + ] + ) + + def adjugate(self) -> Matrix: + values = [ + [self.cofactors().rows[column][row] for column in range(self.num_columns)] + for row in range(self.num_rows) + ] + return Matrix(values) + + def inverse(self) -> Matrix: + determinant = self.determinant() + if not determinant: + raise TypeError("Only matrices with a non-zero determinant have an inverse") + return self.adjugate() * (1 / determinant) + + def __repr__(self) -> str: + return str(self.rows) + + def __str__(self) -> str: + if self.num_rows == 0: + return "[]" + if self.num_rows == 1: + return "[[" + ". ".join(str(self.rows[0])) + "]]" + return ( + "[" + + "\n ".join( + [ + "[" + ". ".join([str(value) for value in row]) + ".]" + for row in self.rows + ] + ) + + "]" + ) + + # MATRIX MANIPULATION + def add_row(self, row: list[int], position: int | None = None) -> None: + type_error = TypeError("Row must be a list containing all ints and/or floats") + if not isinstance(row, list): + raise type_error + for value in row: + if not isinstance(value, (int, float)): + raise type_error + if len(row) != self.num_columns: + raise ValueError( + "Row must be equal in length to the other rows in the matrix" + ) + if position is None: + self.rows.append(row) + else: + self.rows = self.rows[0:position] + [row] + self.rows[position:] + + def add_column(self, column: list[int], position: int | None = None) -> None: + type_error = TypeError( + "Column must be a list containing all ints and/or floats" + ) + if not isinstance(column, list): + raise type_error + for value in column: + if not isinstance(value, (int, float)): + raise type_error + if len(column) != self.num_rows: + raise ValueError( + "Column must be equal in length to the other columns in the matrix" + ) + if position is None: + self.rows = [self.rows[i] + [column[i]] for i in range(self.num_rows)] + else: + self.rows = [ + self.rows[i][0:position] + [column[i]] + self.rows[i][position:] + for i in range(self.num_rows) + ] + + # MATRIX OPERATIONS + def __eq__(self, other: object) -> bool: + if not isinstance(other, Matrix): + return NotImplemented + return self.rows == other.rows + + def __ne__(self, other: object) -> bool: + return not self == other + + def __neg__(self) -> Matrix: + return self * -1 + + def __add__(self, other: Matrix) -> Matrix: + if self.order != other.order: + raise ValueError("Addition requires matrices of the same order") + return Matrix( + [ + [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] + for i in range(self.num_rows) + ] + ) + + def __sub__(self, other: Matrix) -> Matrix: + if self.order != other.order: + raise ValueError("Subtraction requires matrices of the same order") + return Matrix( + [ + [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] + for i in range(self.num_rows) + ] + ) + + def __mul__(self, other: Matrix | int | float) -> Matrix: + if isinstance(other, (int, float)): + return Matrix( + [[int(element * other) for element in row] for row in self.rows] + ) + elif isinstance(other, Matrix): + if self.num_columns != other.num_rows: + raise ValueError( + "The number of columns in the first matrix must " + "be equal to the number of rows in the second" + ) + return Matrix( + [ + [Matrix.dot_product(row, column) for column in other.columns()] + for row in self.rows + ] + ) + else: + raise TypeError( + "A Matrix can only be multiplied by an int, float, or another matrix" + ) + + def __pow__(self, other: int) -> Matrix: + if not isinstance(other, int): + raise TypeError("A Matrix can only be raised to the power of an int") + if not self.is_square: + raise ValueError("Only square matrices can be raised to a power") + if other == 0: + return self.identity() + if other < 0: + if self.is_invertable(): + return self.inverse() ** (-other) + raise ValueError( + "Only invertable matrices can be raised to a negative power" + ) + result = self + for _ in range(other - 1): + result *= self + return result + + @classmethod + def dot_product(cls, row: list[int], column: list[int]) -> int: + return sum(row[i] * column[i] for i in range(len(row))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0684ccdd69c62d5dc816bdc488bc079d06b9685a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 15 Nov 2022 18:34:17 +0100 Subject: [PATCH 220/368] [pre-commit.ci] pre-commit autoupdate (#7983) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.2.0 → v3.2.2](https://github.com/asottile/pyupgrade/compare/v3.2.0...v3.2.2) - [github.com/pre-commit/mirrors-mypy: v0.982 → v0.990](https://github.com/pre-commit/mirrors-mypy/compare/v0.982...v0.990) * updating DIRECTORY.md * Update .pre-commit-config.yaml * Downgrade to mypy v0.991 --> v0.990 * mpyp v0.991 * Update DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a0ea03b9b..324a021ee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,11 +27,11 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.2.0 + rev: v3.2.2 hooks: - id: pyupgrade args: - - --py310-plus + - --py311-plus - repo: https://github.com/PyCQA/flake8 rev: 5.0.4 @@ -52,7 +52,7 @@ repos: *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.982 + rev: v0.991 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 74243cd06..e2fffec57 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -509,6 +509,7 @@ * [Area](maths/area.py) * [Area Under Curve](maths/area_under_curve.py) * [Armstrong Numbers](maths/armstrong_numbers.py) + * [Automorphic Number](maths/automorphic_number.py) * [Average Absolute Deviation](maths/average_absolute_deviation.py) * [Average Mean](maths/average_mean.py) * [Average Median](maths/average_median.py) @@ -603,6 +604,7 @@ * [Prime Sieve Eratosthenes](maths/prime_sieve_eratosthenes.py) * [Primelib](maths/primelib.py) * [Print Multiplication Table](maths/print_multiplication_table.py) + * [Pronic Number](maths/pronic_number.py) * [Proth Number](maths/proth_number.py) * [Pythagoras](maths/pythagoras.py) * [Qr Decomposition](maths/qr_decomposition.py) @@ -638,6 +640,7 @@ * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) * [Triplet Sum](maths/triplet_sum.py) + * [Twin Prime](maths/twin_prime.py) * [Two Pointer](maths/two_pointer.py) * [Two Sum](maths/two_sum.py) * [Ugly Numbers](maths/ugly_numbers.py) From b33ea81a7437eaf7d048d92a9b75330c9d9e165e Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Fri, 18 Nov 2022 13:48:47 +0530 Subject: [PATCH 221/368] algorithm: Add juggler sequence (#7985) * feat: Add juggler sequence * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: Remove temp variable * refactor: Change error type for negative numbers * refactor: Remove # doctest: +NORMALIZE_WHITESPACE * refactor: Remove int typecasting * test: Add unit tests for n=10 and n=25 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/juggler_sequence.py | 61 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 maths/juggler_sequence.py diff --git a/maths/juggler_sequence.py b/maths/juggler_sequence.py new file mode 100644 index 000000000..9daba8bc0 --- /dev/null +++ b/maths/juggler_sequence.py @@ -0,0 +1,61 @@ +""" +== Juggler Sequence == +Juggler sequence start with any positive integer n. The next term is +obtained as follows: + If n term is even, the next term is floor value of square root of n . + If n is odd, the next term is floor value of 3 time the square root of n. + +https://en.wikipedia.org/wiki/Juggler_sequence +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) +import math + + +def juggler_sequence(number: int) -> list[int]: + """ + >>> juggler_sequence(0) + Traceback (most recent call last): + ... + ValueError: Input value of [number=0] must be a positive integer + >>> juggler_sequence(1) + [1] + >>> juggler_sequence(2) + [2, 1] + >>> juggler_sequence(3) + [3, 5, 11, 36, 6, 2, 1] + >>> juggler_sequence(5) + [5, 11, 36, 6, 2, 1] + >>> juggler_sequence(10) + [10, 3, 5, 11, 36, 6, 2, 1] + >>> juggler_sequence(25) + [25, 125, 1397, 52214, 228, 15, 58, 7, 18, 4, 2, 1] + >>> juggler_sequence(6.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=6.0] must be an integer + >>> juggler_sequence(-1) + Traceback (most recent call last): + ... + ValueError: Input value of [number=-1] must be a positive integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 1: + raise ValueError(f"Input value of [number={number}] must be a positive integer") + sequence = [number] + while number != 1: + if number % 2 == 0: + number = math.floor(math.sqrt(number)) + else: + number = math.floor( + math.sqrt(number) * math.sqrt(number) * math.sqrt(number) + ) + sequence.append(number) + return sequence + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f01a1af1df28ba53fc4727ea0bb703b5744100a7 Mon Sep 17 00:00:00 2001 From: Swayam <74960567+practice404@users.noreply.github.com> Date: Sun, 20 Nov 2022 16:25:58 +0530 Subject: [PATCH 222/368] Bi directional dijkstra (#7982) * Added Bi-Directional Dijkstra * Added Bi-Directional Dijkstra * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added doctest and type hints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename Bi_directional_Dijkstra.py to bi_directional_dijkstra.py * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/bi_directional_dijkstra.py | 130 ++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 graphs/bi_directional_dijkstra.py diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py new file mode 100644 index 000000000..fc53e2f0d --- /dev/null +++ b/graphs/bi_directional_dijkstra.py @@ -0,0 +1,130 @@ +""" +Bi-directional Dijkstra's algorithm. + +A bi-directional approach is an efficient and +less time consuming optimization for Dijkstra's +searching algorithm + +Reference: shorturl.at/exHM7 +""" + +# Author: Swayam Singh (https://github.com/practice404) + + +from queue import PriorityQueue +from typing import Any + +import numpy as np + + +def bidirectional_dij( + source: str, destination: str, graph_forward: dict, graph_backward: dict +) -> int: + """ + Bi-directional Dijkstra's algorithm. + + Returns: + shortest_path_distance (int): length of the shortest path. + + Warnings: + If the destination is not reachable, function returns -1 + + >>> bidirectional_dij("E", "F", graph_fwd, graph_bwd) + 3 + """ + shortest_path_distance = -1 + + visited_forward = set() + visited_backward = set() + cst_fwd = {source: 0} + cst_bwd = {destination: 0} + parent_forward = {source: None} + parent_backward = {destination: None} + queue_forward: PriorityQueue[Any] = PriorityQueue() + queue_backward: PriorityQueue[Any] = PriorityQueue() + + shortest_distance = np.inf + + queue_forward.put((0, source)) + queue_backward.put((0, destination)) + + if source == destination: + return 0 + + while queue_forward and queue_backward: + while not queue_forward.empty(): + _, v_fwd = queue_forward.get() + + if v_fwd not in visited_forward: + break + else: + break + visited_forward.add(v_fwd) + + while not queue_backward.empty(): + _, v_bwd = queue_backward.get() + + if v_bwd not in visited_backward: + break + else: + break + visited_backward.add(v_bwd) + + # forward pass and relaxation + for nxt_fwd, d_forward in graph_forward[v_fwd]: + if nxt_fwd in visited_forward: + continue + old_cost_f = cst_fwd.get(nxt_fwd, np.inf) + new_cost_f = cst_fwd[v_fwd] + d_forward + if new_cost_f < old_cost_f: + queue_forward.put((new_cost_f, nxt_fwd)) + cst_fwd[nxt_fwd] = new_cost_f + parent_forward[nxt_fwd] = v_fwd + if nxt_fwd in visited_backward: + if cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] < shortest_distance: + shortest_distance = cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] + + # backward pass and relaxation + for nxt_bwd, d_backward in graph_backward[v_bwd]: + if nxt_bwd in visited_backward: + continue + old_cost_b = cst_bwd.get(nxt_bwd, np.inf) + new_cost_b = cst_bwd[v_bwd] + d_backward + if new_cost_b < old_cost_b: + queue_backward.put((new_cost_b, nxt_bwd)) + cst_bwd[nxt_bwd] = new_cost_b + parent_backward[nxt_bwd] = v_bwd + + if nxt_bwd in visited_forward: + if cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] < shortest_distance: + shortest_distance = cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] + + if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: + break + + if shortest_distance != np.inf: + shortest_path_distance = shortest_distance + return shortest_path_distance + + +graph_fwd = { + "B": [["C", 1]], + "C": [["D", 1]], + "D": [["F", 1]], + "E": [["B", 1], ["G", 2]], + "F": [], + "G": [["F", 1]], +} +graph_bwd = { + "B": [["E", 1]], + "C": [["B", 1]], + "D": [["C", 1]], + "F": [["D", 1], ["G", 1]], + "E": [[None, np.inf]], + "G": [["E", 2]], +} + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a25c53e8b0cc73ff718ec406ac04cca0c2ddbb02 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Sun, 20 Nov 2022 14:59:25 +0400 Subject: [PATCH 223/368] Fix argument validation for count_1s_brian_kernighan_method (#7994) * Fix argument validation for count_1s_brian_kernighan_method * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + .../count_1s_brian_kernighan_method.py | 15 +++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index e2fffec57..83da4b76a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -571,6 +571,7 @@ * [Largest Subarray Sum](maths/largest_subarray_sum.py) * [Least Common Multiple](maths/least_common_multiple.py) * [Line Length](maths/line_length.py) + * [Liouville Lambda](maths/liouville_lambda.py) * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) * [Lucas Series](maths/lucas_series.py) * [Maclaurin Series](maths/maclaurin_series.py) diff --git a/bit_manipulation/count_1s_brian_kernighan_method.py b/bit_manipulation/count_1s_brian_kernighan_method.py index e6d6d6534..2ed81b09d 100644 --- a/bit_manipulation/count_1s_brian_kernighan_method.py +++ b/bit_manipulation/count_1s_brian_kernighan_method.py @@ -17,16 +17,19 @@ def get_1s_count(number: int) -> int: >>> get_1s_count(-1) Traceback (most recent call last): ... - ValueError: the value of input must be positive + ValueError: Input must be a non-negative integer >>> get_1s_count(0.8) Traceback (most recent call last): ... - TypeError: Input value must be an 'int' type + ValueError: Input must be a non-negative integer + >>> get_1s_count("25") + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer """ - if number < 0: - raise ValueError("the value of input must be positive") - elif isinstance(number, float): - raise TypeError("Input value must be an 'int' type") + if not isinstance(number, int) or number < 0: + raise ValueError("Input must be a non-negative integer") + count = 0 while number: # This way we arrive at next set bit (next 1) instead of looping From f32d611689dc72bda67f1c4636ab1599c60d27a4 Mon Sep 17 00:00:00 2001 From: Mark Mayo Date: Mon, 21 Nov 2022 00:00:27 +1300 Subject: [PATCH 224/368] clean of unnecessary checks, imports, calls (#7993) --- backtracking/rat_in_maze.py | 4 ++-- boolean_algebra/not_gate.py | 2 +- cellular_automata/nagel_schrekenberg.py | 3 +-- ciphers/mixed_keyword_cypher.py | 4 ++-- compression/huffman.py | 2 +- data_structures/heap/min_heap.py | 2 +- .../test_digital_image_processing.py | 2 +- dynamic_programming/fizz_buzz.py | 5 ++--- dynamic_programming/max_sub_array.py | 3 +-- graphs/directed_and_undirected_(weighted)_graph.py | 8 ++++---- graphs/multi_heuristic_astar.py | 3 ++- linear_algebra/src/lib.py | 2 +- machine_learning/sequential_minimum_optimization.py | 10 ++-------- maths/find_min.py | 3 +-- maths/kadanes.py | 6 ++---- maths/largest_subarray_sum.py | 6 ++---- maths/series/geometric_series.py | 2 +- networking_flow/ford_fulkerson.py | 2 +- networking_flow/minimum_cut.py | 2 +- other/password.py | 10 +++------- project_euler/problem_025/sol1.py | 2 +- project_euler/problem_036/sol1.py | 2 +- quantum/q_fourier_transform.py | 4 ++-- quantum/q_full_adder.py | 6 +++++- quantum/superdense_coding.py | 2 +- sorts/msd_radix_sort.py | 2 +- strings/aho_corasick.py | 2 +- 27 files changed, 44 insertions(+), 57 deletions(-) diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py index 2860880db..7bde886dd 100644 --- a/backtracking/rat_in_maze.py +++ b/backtracking/rat_in_maze.py @@ -88,12 +88,12 @@ def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) solutions[i][j] = 1 return True - lower_flag = (not (i < 0)) and (not (j < 0)) # Check lower bounds + lower_flag = (not i < 0) and (not j < 0) # Check lower bounds upper_flag = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. - block_flag = (not (solutions[i][j])) and (not (maze[i][j])) + block_flag = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited solutions[i][j] = 1 diff --git a/boolean_algebra/not_gate.py b/boolean_algebra/not_gate.py index b41da602d..eb85e9e44 100644 --- a/boolean_algebra/not_gate.py +++ b/boolean_algebra/not_gate.py @@ -34,4 +34,4 @@ def test_not_gate() -> None: if __name__ == "__main__": print(not_gate(0)) - print(not_gate(1)) + print(not_gate(1)) diff --git a/cellular_automata/nagel_schrekenberg.py b/cellular_automata/nagel_schrekenberg.py index be44761ec..3fd6afca0 100644 --- a/cellular_automata/nagel_schrekenberg.py +++ b/cellular_automata/nagel_schrekenberg.py @@ -45,8 +45,7 @@ def construct_highway( highway = [[-1] * number_of_cells] # Create a highway without any car i = 0 - if initial_speed < 0: - initial_speed = 0 + initial_speed = max(initial_speed, 0) while i < number_of_cells: highway[0][i] = ( randint(0, max_speed) if random_speed else initial_speed diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index f55c9c428..806004faa 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -42,7 +42,7 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: s = [] for _ in range(len_temp): s.append(temp[k]) - if not (k < 25): + if k >= 25: break k += 1 modalpha.append(s) @@ -52,7 +52,7 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: k = 0 for j in range(len_temp): for m in modalpha: - if not (len(m) - 1 >= j): + if not len(m) - 1 >= j: break d[alpha[k]] = m[j] if not k < 25: diff --git a/compression/huffman.py b/compression/huffman.py index f619ed82c..b337ac3ec 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -56,7 +56,7 @@ def traverse_tree(root: Letter | TreeNode, bitstring: str) -> list[Letter]: Recursively traverse the Huffman Tree to set each Letter's bitstring dictionary, and return the list of Letters """ - if type(root) is Letter: + if isinstance(root, Letter): root.bitstring[root.letter] = bitstring return [root] treenode: TreeNode = root # type: ignore diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index 0403624f2..ecb187649 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -121,7 +121,7 @@ class MinHeap: self.sift_up(len(self.heap) - 1) def is_empty(self): - return True if len(self.heap) == 0 else False + return len(self.heap) == 0 def decrease_key(self, node, new_value): assert ( diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index fdcebfdad..c999464ce 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -10,7 +10,7 @@ from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs -from digital_image_processing.edge_detection import canny as canny +from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py index dd1d21b10..e77ab3de7 100644 --- a/dynamic_programming/fizz_buzz.py +++ b/dynamic_programming/fizz_buzz.py @@ -33,10 +33,9 @@ def fizz_buzz(number: int, iterations: int) -> str: ... ValueError: iterations must be defined as integers """ - - if not type(iterations) == int: + if not isinstance(iterations, int): raise ValueError("iterations must be defined as integers") - if not type(number) == int or not number >= 1: + if not isinstance(number, int) or not number >= 1: raise ValueError( """starting number must be and integer and be more than 0""" diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py index 42eca79a9..07717fba4 100644 --- a/dynamic_programming/max_sub_array.py +++ b/dynamic_programming/max_sub_array.py @@ -62,8 +62,7 @@ def max_sub_array(nums: list[int]) -> int: current = 0 for i in nums: current += i - if current < 0: - current = 0 + current = max(current, 0) best = max(best, current) return best diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py index 43a72b89e..b29485031 100644 --- a/graphs/directed_and_undirected_(weighted)_graph.py +++ b/graphs/directed_and_undirected_(weighted)_graph.py @@ -167,7 +167,7 @@ class DirectedGraph: and not on_the_way_back ): len_stack = len(stack) - 1 - while True and len_stack >= 0: + while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break @@ -220,7 +220,7 @@ class DirectedGraph: and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 - while True and len_stack_minus_one >= 0: + while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break @@ -392,7 +392,7 @@ class Graph: and not on_the_way_back ): len_stack = len(stack) - 1 - while True and len_stack >= 0: + while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break @@ -445,7 +445,7 @@ class Graph: and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 - while True and len_stack_minus_one >= 0: + while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index e16a98393..cd8e37b00 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -1,4 +1,5 @@ import heapq +import sys import numpy as np @@ -116,7 +117,7 @@ def do_something(back_pointer, goal, start): print(x, end=" ") x = back_pointer[x] print(x) - quit() + sys.exit() def valid(p: TPos): diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 775e0244a..ac0398a31 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -129,7 +129,7 @@ class Vector: input: index (0-indexed) output: the i-th component of the vector. """ - if type(i) is int and -len(self.__components) <= i < len(self.__components): + if isinstance(i, int) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception("index out of range") diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 66535e806..3864f6421 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -388,16 +388,10 @@ class SmoSVM: return (data - self._min) / (self._max - self._min) def _is_unbound(self, index): - if 0.0 < self.alphas[index] < self._c: - return True - else: - return False + return bool(0.0 < self.alphas[index] < self._c) def _is_support(self, index): - if self.alphas[index] > 0: - return True - else: - return False + return bool(self.alphas[index] > 0) @property def unbound(self): diff --git a/maths/find_min.py b/maths/find_min.py index 228205ed7..2eac087c6 100644 --- a/maths/find_min.py +++ b/maths/find_min.py @@ -24,8 +24,7 @@ def find_min(nums: list[int | float]) -> int | float: raise ValueError("find_min() arg is an empty sequence") min_num = nums[0] for num in nums: - if min_num > num: - min_num = num + min_num = min(min_num, num) return min_num diff --git a/maths/kadanes.py b/maths/kadanes.py index b23409e2b..c2ea53a6c 100644 --- a/maths/kadanes.py +++ b/maths/kadanes.py @@ -49,10 +49,8 @@ def kadanes(arr: list) -> int: for i in arr: max_till_element += i - if max_sum <= max_till_element: - max_sum = max_till_element - if max_till_element < 0: - max_till_element = 0 + max_sum = max(max_sum, max_till_element) + max_till_element = max(max_till_element, 0) return max_sum diff --git a/maths/largest_subarray_sum.py b/maths/largest_subarray_sum.py index 0449e72e6..90f92c712 100644 --- a/maths/largest_subarray_sum.py +++ b/maths/largest_subarray_sum.py @@ -11,10 +11,8 @@ def max_sub_array_sum(a: list, size: int = 0): max_ending_here = 0 for i in range(0, size): max_ending_here = max_ending_here + a[i] - if max_so_far < max_ending_here: - max_so_far = max_ending_here - if max_ending_here < 0: - max_ending_here = 0 + max_so_far = max(max_so_far, max_ending_here) + max_ending_here = max(max_ending_here, 0) return max_so_far diff --git a/maths/series/geometric_series.py b/maths/series/geometric_series.py index a875ab89a..90c9fe77b 100644 --- a/maths/series/geometric_series.py +++ b/maths/series/geometric_series.py @@ -52,7 +52,7 @@ def geometric_series( power = 1 multiple = common_ratio_r for _ in range(int(nth_term)): - if series == []: + if not series: series.append(start_term_a) else: power += 1 diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 370e38482..716ed508e 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -21,7 +21,7 @@ def bfs(graph, s, t, parent): visited[ind] = True parent[ind] = u - return True if visited[t] else False + return visited[t] def ford_fulkerson(graph, source, sink): diff --git a/networking_flow/minimum_cut.py b/networking_flow/minimum_cut.py index 33131315f..164b45f10 100644 --- a/networking_flow/minimum_cut.py +++ b/networking_flow/minimum_cut.py @@ -24,7 +24,7 @@ def bfs(graph, s, t, parent): visited[ind] = True parent[ind] = u - return True if visited[t] else False + return visited[t] def mincut(graph, source, sink): diff --git a/other/password.py b/other/password.py index f463c7564..9a6161af8 100644 --- a/other/password.py +++ b/other/password.py @@ -89,13 +89,9 @@ def is_strong_password(password: str, min_length: int = 8) -> bool: num = any(char in digits for char in password) spec_char = any(char in punctuation for char in password) - if upper and lower and num and spec_char: - return True - - else: - # Passwords should contain UPPERCASE, lowerase - # numbers, and special characters - return False + return upper and lower and num and spec_char + # Passwords should contain UPPERCASE, lowerase + # numbers, and special characters def main(): diff --git a/project_euler/problem_025/sol1.py b/project_euler/problem_025/sol1.py index c30a74a43..803464b5d 100644 --- a/project_euler/problem_025/sol1.py +++ b/project_euler/problem_025/sol1.py @@ -43,7 +43,7 @@ def fibonacci(n: int) -> int: 144 """ - if n == 1 or type(n) is not int: + if n == 1 or not isinstance(n, int): return 0 elif n == 2: return 1 diff --git a/project_euler/problem_036/sol1.py b/project_euler/problem_036/sol1.py index 425c41221..1d27356ec 100644 --- a/project_euler/problem_036/sol1.py +++ b/project_euler/problem_036/sol1.py @@ -32,7 +32,7 @@ def is_palindrome(n: int | str) -> bool: False """ n = str(n) - return True if n == n[::-1] else False + return n == n[::-1] def solution(n: int = 1000000): diff --git a/quantum/q_fourier_transform.py b/quantum/q_fourier_transform.py index d138dfb45..07a257579 100644 --- a/quantum/q_fourier_transform.py +++ b/quantum/q_fourier_transform.py @@ -55,9 +55,9 @@ def quantum_fourier_transform(number_of_qubits: int = 3) -> qiskit.result.counts ... ValueError: number of qubits must be exact integer. """ - if type(number_of_qubits) == str: + if isinstance(number_of_qubits, str): raise TypeError("number of qubits must be a integer.") - if not number_of_qubits > 0: + if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0.") if math.floor(number_of_qubits) != number_of_qubits: raise ValueError("number of qubits must be exact integer.") diff --git a/quantum/q_full_adder.py b/quantum/q_full_adder.py index c6d03d170..66d931985 100644 --- a/quantum/q_full_adder.py +++ b/quantum/q_full_adder.py @@ -60,7 +60,11 @@ def quantum_full_adder( ... ValueError: inputs must be less or equal to 2. """ - if (type(input_1) == str) or (type(input_2) == str) or (type(carry_in) == str): + if ( + isinstance(input_1, str) + or isinstance(input_2, str) + or isinstance(carry_in, str) + ): raise TypeError("inputs must be integers.") if (input_1 < 0) or (input_2 < 0) or (carry_in < 0): diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py index 10ebc2d35..1087312f9 100644 --- a/quantum/superdense_coding.py +++ b/quantum/superdense_coding.py @@ -53,7 +53,7 @@ def superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Co ... ValueError: inputs must be less or equal to 1. """ - if (type(bit_1) == str) or (type(bit_2) == str): + if isinstance(bit_1, str) or isinstance(bit_2, str): raise TypeError("inputs must be integers.") if (bit_1 < 0) or (bit_2 < 0): raise ValueError("inputs must be positive.") diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 84460e47b..74ce21762 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -133,7 +133,7 @@ def _msd_radix_sort_inplace( j = end_index - 1 while i <= j: changed = False - if not ((list_of_ints[i] >> bit_position) & 1): + if not (list_of_ints[i] >> bit_position) & 1: # found zero at the beginning i += 1 changed = True diff --git a/strings/aho_corasick.py b/strings/aho_corasick.py index 25ed649ce..e32a4ba64 100644 --- a/strings/aho_corasick.py +++ b/strings/aho_corasick.py @@ -84,7 +84,7 @@ class Automaton: else: current_state = next_state for key in self.adlist[current_state]["output"]: - if not (key in result): + if key not in result: result[key] = [] result[key].append(i - len(key) + 1) return result From 08c22457058207dc465b9ba9fd95659d33b3f1dd Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 29 Nov 2022 16:56:41 +0100 Subject: [PATCH 225/368] Upgrade to flake8 v6 (#8007) * Upgrade to flake8 v6 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .flake8 | 6 ++++-- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 2 ++ compression/huffman.py | 4 ++-- data_structures/binary_tree/non_recursive_segment_tree.py | 2 +- data_structures/binary_tree/segment_tree.py | 6 +++--- machine_learning/sequential_minimum_optimization.py | 2 +- project_euler/problem_107/sol1.py | 1 - 8 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.flake8 b/.flake8 index 2f74f421d..b68ee8533 100644 --- a/.flake8 +++ b/.flake8 @@ -4,5 +4,7 @@ max-line-length = 88 max-complexity = 19 extend-ignore = # Formatting style for `black` - E203 # Whitespace before ':' - W503 # Line break occurred before a binary operator + # E203 is whitespace before ':' + E203, + # W503 is line break occurred before a binary operator + W503 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 324a021ee..74502b3ea 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: check-executables-have-shebangs - id: check-yaml @@ -34,13 +34,13 @@ repos: - --py311-plus - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 # See .flake8 for args additional_dependencies: &flake8-plugins - flake8-bugbear - flake8-builtins - - flake8-broken-line + # - flake8-broken-line - flake8-comprehensions - pep8-naming diff --git a/DIRECTORY.md b/DIRECTORY.md index 83da4b76a..b3b484f73 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -375,6 +375,7 @@ * [Articulation Points](graphs/articulation_points.py) * [Basic Graphs](graphs/basic_graphs.py) * [Bellman Ford](graphs/bellman_ford.py) + * [Bi Directional Dijkstra](graphs/bi_directional_dijkstra.py) * [Bidirectional A Star](graphs/bidirectional_a_star.py) * [Bidirectional Breadth First Search](graphs/bidirectional_breadth_first_search.py) * [Boruvka](graphs/boruvka.py) @@ -563,6 +564,7 @@ * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) + * [Juggler Sequence](maths/juggler_sequence.py) * [Kadanes](maths/kadanes.py) * [Karatsuba](maths/karatsuba.py) * [Krishnamurthy Number](maths/krishnamurthy_number.py) diff --git a/compression/huffman.py b/compression/huffman.py index b337ac3ec..65e5c2f25 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -32,7 +32,7 @@ def parse_file(file_path: str) -> list[Letter]: if not c: break chars[c] = chars[c] + 1 if c in chars else 1 - return sorted((Letter(c, f) for c, f in chars.items()), key=lambda l: l.freq) + return sorted((Letter(c, f) for c, f in chars.items()), key=lambda x: x.freq) def build_tree(letters: list[Letter]) -> Letter | TreeNode: @@ -47,7 +47,7 @@ def build_tree(letters: list[Letter]) -> Letter | TreeNode: total_freq = left.freq + right.freq node = TreeNode(total_freq, left, right) response.append(node) - response.sort(key=lambda l: l.freq) + response.sort(key=lambda x: x.freq) return response[0] diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 075ff6c91..04164e5cb 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -106,7 +106,7 @@ class SegmentTree(Generic[T]): l, r = l + self.N, r + self.N res: T | None = None - while l <= r: # noqa: E741 + while l <= r: if l % 2 == 1: res = self.st[l] if res is None else self.fn(res, self.st[l]) if r % 2 == 0: diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 949a3ecdd..b05803869 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -16,7 +16,7 @@ class SegmentTree: return idx * 2 + 1 def build(self, idx, l, r): # noqa: E741 - if l == r: # noqa: E741 + if l == r: self.st[idx] = A[l] else: mid = (l + r) // 2 @@ -33,7 +33,7 @@ class SegmentTree: """ if r < a or l > b: return True - if l == r: # noqa: E741 + if l == r: self.st[idx] = val return True mid = (l + r) // 2 @@ -51,7 +51,7 @@ class SegmentTree: """ if r < a or l > b: return -math.inf - if l >= a and r <= b: # noqa: E741 + if l >= a and r <= b: return self.st[idx] mid = (l + r) // 2 q1 = self.query_recursive(self.left(idx), l, mid, a, b) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 3864f6421..f5185e1d9 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -314,7 +314,7 @@ class SmoSVM: l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) else: l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) - if l == h: # noqa: E741 + if l == h: return None, None # calculate eta diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index b3f5685b9..4659eac24 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -99,7 +99,6 @@ def solution(filename: str = "p107_network.txt") -> int: """ script_dir: str = os.path.abspath(os.path.dirname(__file__)) network_file: str = os.path.join(script_dir, filename) - adjacency_matrix: list[list[str]] edges: dict[EdgeT, int] = {} data: list[str] edge1: int From 47bf3f58e04873ef609301b1e654f6ddcc02b0fa Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Tue, 29 Nov 2022 22:07:27 +0400 Subject: [PATCH 227/368] fix validation condition and add tests (#7997) * fix validation condition and add tests * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- bit_manipulation/index_of_rightmost_set_bit.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/bit_manipulation/index_of_rightmost_set_bit.py b/bit_manipulation/index_of_rightmost_set_bit.py index eb52ea4e6..c9c911660 100644 --- a/bit_manipulation/index_of_rightmost_set_bit.py +++ b/bit_manipulation/index_of_rightmost_set_bit.py @@ -19,9 +19,17 @@ def get_index_of_rightmost_set_bit(number: int) -> int: Traceback (most recent call last): ... ValueError: Input must be a non-negative integer + >>> get_index_of_rightmost_set_bit('test') + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer + >>> get_index_of_rightmost_set_bit(1.25) + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer """ - if number < 0 or not isinstance(number, int): + if not isinstance(number, int) or number < 0: raise ValueError("Input must be a non-negative integer") intermediate = number & ~(number - 1) From 6a86fe48671adb90504412acc2589c3ab1b18564 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Tue, 29 Nov 2022 22:28:47 +0400 Subject: [PATCH 228/368] Add backtrack word search in matrix (#8005) * add backtracking word search * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * review notes fixes * additional fixes * add tests * small cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * small cleanup 2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update backtracking/word_search.py Co-authored-by: Christian Clauss * Update backtracking/word_search.py Co-authored-by: Christian Clauss * Update backtracking/word_search.py Co-authored-by: Christian Clauss * Update backtracking/word_search.py Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + backtracking/word_search.py | 160 ++++++++++++++++++++++++++++++++++++ 2 files changed, 161 insertions(+) create mode 100644 backtracking/word_search.py diff --git a/DIRECTORY.md b/DIRECTORY.md index b3b484f73..51430a1e1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -33,6 +33,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Search](backtracking/word_search.py) ## Bit Manipulation * [Binary And Operator](bit_manipulation/binary_and_operator.py) diff --git a/backtracking/word_search.py b/backtracking/word_search.py new file mode 100644 index 000000000..25d1436be --- /dev/null +++ b/backtracking/word_search.py @@ -0,0 +1,160 @@ +""" +Author : Alexander Pantyukhin +Date : November 24, 2022 + +Task: +Given an m x n grid of characters board and a string word, +return true if word exists in the grid. + +The word can be constructed from letters of sequentially adjacent cells, +where adjacent cells are horizontally or vertically neighboring. +The same letter cell may not be used more than once. + +Example: + +Matrix: +--------- +|A|B|C|E| +|S|F|C|S| +|A|D|E|E| +--------- + +Word: +"ABCCED" + +Result: +True + +Implementation notes: Use backtracking approach. +At each point, check all neighbors to try to find the next letter of the word. + +leetcode: https://leetcode.com/problems/word-search/ + +""" + + +def word_exists(board: list[list[str]], word: str) -> bool: + """ + >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "ABCCED") + True + >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "SEE") + True + >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "ABCB") + False + >>> word_exists([["A"]], "A") + True + >>> word_exists([["A","A","A","A","A","A"], + ... ["A","A","A","A","A","A"], + ... ["A","A","A","A","A","A"], + ... ["A","A","A","A","A","A"], + ... ["A","A","A","A","A","B"], + ... ["A","A","A","A","B","A"]], + ... "AAAAAAAAAAAAABB") + False + >>> word_exists([["A"]], 123) + Traceback (most recent call last): + ... + ValueError: The word parameter should be a string of length greater than 0. + >>> word_exists([["A"]], "") + Traceback (most recent call last): + ... + ValueError: The word parameter should be a string of length greater than 0. + >>> word_exists([[]], "AB") + Traceback (most recent call last): + ... + ValueError: The board should be a non empty matrix of single chars strings. + >>> word_exists([], "AB") + Traceback (most recent call last): + ... + ValueError: The board should be a non empty matrix of single chars strings. + >>> word_exists([["A"], [21]], "AB") + Traceback (most recent call last): + ... + ValueError: The board should be a non empty matrix of single chars strings. + """ + + # Validate board + board_error_message = ( + "The board should be a non empty matrix of single chars strings." + ) + if not isinstance(board, list) or len(board) == 0: + raise ValueError(board_error_message) + + for row in board: + if not isinstance(row, list) or len(row) == 0: + raise ValueError(board_error_message) + + for item in row: + if not isinstance(item, str) or len(item) != 1: + raise ValueError(board_error_message) + + # Validate word + if not isinstance(word, str) or len(word) == 0: + raise ValueError( + "The word parameter should be a string of length greater than 0." + ) + + traverts_directions = [(0, 1), (0, -1), (-1, 0), (1, 0)] + len_word = len(word) + len_board = len(board) + len_board_column = len(board[0]) + + # Returns the hash key of matrix indexes. + def get_point_key(row: int, column: int) -> int: + """ + >>> len_board=10 + >>> len_board_column=20 + >>> get_point_key(0, 0) + 200 + """ + + return len_board * len_board_column * row + column + + # Return True if it's possible to search the word suffix + # starting from the word_index. + def exits_word( + row: int, column: int, word_index: int, visited_points_set: set[int] + ) -> bool: + """ + >>> board=[["A"]] + >>> word="B" + >>> exits_word(0, 0, 0, set()) + False + """ + + if board[row][column] != word[word_index]: + return False + + if word_index == len_word - 1: + return True + + for direction in traverts_directions: + next_i = row + direction[0] + next_j = column + direction[1] + if not (0 <= next_i < len_board and 0 <= next_j < len_board_column): + continue + + key = get_point_key(next_i, next_j) + if key in visited_points_set: + continue + + visited_points_set.add(key) + if exits_word(next_i, next_j, word_index + 1, visited_points_set): + return True + + visited_points_set.remove(key) + + return False + + for i in range(len_board): + for j in range(len_board_column): + if exits_word(i, j, 0, {get_point_key(i, j)}): + return True + + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5654c6242ed5974fa8f2aa89d9689efa012bdafc Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Tue, 29 Nov 2022 23:59:21 +0530 Subject: [PATCH 229/368] algorithm: Hexagonal number (#8003) * feat: Add hexagonal number * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/hexagonal_number.py | 48 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 maths/hexagonal_number.py diff --git a/maths/hexagonal_number.py b/maths/hexagonal_number.py new file mode 100644 index 000000000..28735c638 --- /dev/null +++ b/maths/hexagonal_number.py @@ -0,0 +1,48 @@ +""" +== Hexagonal Number == +The nth hexagonal number hn is the number of distinct dots +in a pattern of dots consisting of the outlines of regular +hexagons with sides up to n dots, when the hexagons are +overlaid so that they share one vertex. + +https://en.wikipedia.org/wiki/Hexagonal_number +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) + + +def hexagonal(number: int) -> int: + """ + :param number: nth hexagonal number to calculate + :return: the nth hexagonal number + Note: A hexagonal number is only defined for positive integers + >>> hexagonal(4) + 28 + >>> hexagonal(11) + 231 + >>> hexagonal(22) + 946 + >>> hexagonal(0) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer + >>> hexagonal(-1) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer + >>> hexagonal(11.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=11.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 1: + raise ValueError("Input must be a positive integer") + return number * (2 * number - 1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d141fa8838369fafb3b28a8dd825ec1b20d34e03 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 21:34:24 +0100 Subject: [PATCH 230/368] [pre-commit.ci] pre-commit autoupdate (#8017) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.2.2 → v3.3.0](https://github.com/asottile/pyupgrade/compare/v3.2.2...v3.3.0) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 74502b3ea..3d83499f0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.2.2 + rev: v3.3.0 hooks: - id: pyupgrade args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 51430a1e1..382ff3a6f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -561,6 +561,7 @@ * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) + * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) From b25915adf91cc39c98c597fce1eef9422f4e7d0d Mon Sep 17 00:00:00 2001 From: Aaryan Raj <97806283+iaaryanraj@users.noreply.github.com> Date: Sun, 11 Dec 2022 12:34:04 +0530 Subject: [PATCH 231/368] Add algorithm to convert decimal number to its simplest fraction form (#8001) * Added algorithm to convert decimal number to its simplest fraction form * Apply suggested changes --- maths/decimal_to_fraction.py | 48 ++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 maths/decimal_to_fraction.py diff --git a/maths/decimal_to_fraction.py b/maths/decimal_to_fraction.py new file mode 100644 index 000000000..9462bafe0 --- /dev/null +++ b/maths/decimal_to_fraction.py @@ -0,0 +1,48 @@ +def decimal_to_fraction(decimal: int | float | str) -> tuple[int, int]: + """ + Return a decimal number in its simplest fraction form + >>> decimal_to_fraction(2) + (2, 1) + >>> decimal_to_fraction(89.) + (89, 1) + >>> decimal_to_fraction("67") + (67, 1) + >>> decimal_to_fraction("45.0") + (45, 1) + >>> decimal_to_fraction(1.5) + (3, 2) + >>> decimal_to_fraction("6.25") + (25, 4) + >>> decimal_to_fraction("78td") + Traceback (most recent call last): + ValueError: Please enter a valid number + """ + try: + decimal = float(decimal) + except ValueError: + raise ValueError("Please enter a valid number") + fractional_part = decimal - int(decimal) + if fractional_part == 0: + return int(decimal), 1 + else: + number_of_frac_digits = len(str(decimal).split(".")[1]) + numerator = int(decimal * (10**number_of_frac_digits)) + denominator = 10**number_of_frac_digits + divisor, dividend = denominator, numerator + while True: + remainder = dividend % divisor + if remainder == 0: + break + dividend, divisor = divisor, remainder + numerator, denominator = numerator / divisor, denominator / divisor + return int(numerator), int(denominator) + + +if __name__ == "__main__": + print(f"{decimal_to_fraction(2) = }") + print(f"{decimal_to_fraction(89.0) = }") + print(f"{decimal_to_fraction('67') = }") + print(f"{decimal_to_fraction('45.0') = }") + print(f"{decimal_to_fraction(1.5) = }") + print(f"{decimal_to_fraction('6.25') = }") + print(f"{decimal_to_fraction('78td') = }") From 40f165b789e9a2475415768db5acadf63e021e46 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 21:29:50 +0100 Subject: [PATCH 232/368] [pre-commit.ci] pre-commit autoupdate (#8026) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/psf/black: 22.10.0 → 22.12.0](https://github.com/psf/black/compare/22.10.0...22.12.0) - [github.com/asottile/pyupgrade: v3.3.0 → v3.3.1](https://github.com/asottile/pyupgrade/compare/v3.3.0...v3.3.1) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3d83499f0..7cf4bedd7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: - id: auto-walrus - repo: https://github.com/psf/black - rev: 22.10.0 + rev: 22.12.0 hooks: - id: black @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.3.0 + rev: v3.3.1 hooks: - id: pyupgrade args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 382ff3a6f..0624eda2c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -533,6 +533,7 @@ * [Collatz Sequence](maths/collatz_sequence.py) * [Combinations](maths/combinations.py) * [Decimal Isolate](maths/decimal_isolate.py) + * [Decimal To Fraction](maths/decimal_to_fraction.py) * [Dodecahedron](maths/dodecahedron.py) * [Double Factorial Iterative](maths/double_factorial_iterative.py) * [Double Factorial Recursive](maths/double_factorial_recursive.py) From af8d52092232e1104154b733000716036e668444 Mon Sep 17 00:00:00 2001 From: Roberto Garcia <37519995+rga2@users.noreply.github.com> Date: Wed, 14 Dec 2022 22:10:09 -0600 Subject: [PATCH 233/368] Update is_even.py (#8028) --- bit_manipulation/is_even.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bit_manipulation/is_even.py b/bit_manipulation/is_even.py index b7b0841a1..ba036f35a 100644 --- a/bit_manipulation/is_even.py +++ b/bit_manipulation/is_even.py @@ -11,7 +11,7 @@ def is_even(number: int) -> bool: from the above examples we can observe that for all the odd integers there is always 1 set bit at the end also, 1 in binary can be represented as 001, 00001, or 0000001 - so for any odd integer n => n&1 is always equlas 1 else the integer is even + so for any odd integer n => n&1 is always equals 1 else the integer is even >>> is_even(1) False From 30277f8590a7bf636477fa4c4ad22cedf10588f5 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Thu, 15 Dec 2022 08:11:32 +0400 Subject: [PATCH 234/368] add numbers different signs algorithm. (#8008) --- DIRECTORY.md | 1 + bit_manipulation/numbers_different_signs.py | 39 +++++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 bit_manipulation/numbers_different_signs.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 0624eda2c..34ce88a4f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -50,6 +50,7 @@ * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Is Power Of Two](bit_manipulation/is_power_of_two.py) + * [Numbers Different Signs](bit_manipulation/numbers_different_signs.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) diff --git a/bit_manipulation/numbers_different_signs.py b/bit_manipulation/numbers_different_signs.py new file mode 100644 index 000000000..cf8b6d86f --- /dev/null +++ b/bit_manipulation/numbers_different_signs.py @@ -0,0 +1,39 @@ +""" +Author : Alexander Pantyukhin +Date : November 30, 2022 + +Task: +Given two int numbers. Return True these numbers have opposite signs +or False otherwise. + +Implementation notes: Use bit manipulation. +Use XOR for two numbers. +""" + + +def different_signs(num1: int, num2: int) -> bool: + """ + Return True if numbers have opposite signs False otherwise. + + >>> different_signs(1, -1) + True + >>> different_signs(1, 1) + False + >>> different_signs(1000000000000000000000000000, -1000000000000000000000000000) + True + >>> different_signs(-1000000000000000000000000000, 1000000000000000000000000000) + True + >>> different_signs(50, 278) + False + >>> different_signs(0, 2) + False + >>> different_signs(2, 0) + False + """ + return num1 ^ num2 < 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 3f8b2af14bd3b64b838098f9e1830c0fea926a1a Mon Sep 17 00:00:00 2001 From: Victor Rodrigues da Silva <63797831+VictorRS27@users.noreply.github.com> Date: Sun, 18 Dec 2022 19:26:39 -0300 Subject: [PATCH 235/368] Add autoclave cipher (#8029) * Add autoclave cipher * Update autoclave with the given suggestions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixing errors * Another fixes * Update and rename autoclave.py to autokey.py * Rename gaussian_naive_bayes.py to gaussian_naive_bayes.py.broken.txt * Rename gradient_boosting_regressor.py to gradient_boosting_regressor.py.broken.txt * Rename random_forest_classifier.py to random_forest_classifier.py.broken.txt * Rename random_forest_regressor.py to random_forest_regressor.py.broken.txt * Rename equal_loudness_filter.py to equal_loudness_filter.py.broken.txt Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ...py => equal_loudness_filter.py.broken.txt} | 0 ciphers/autokey.py | 131 ++++++++++++++++++ ....py => gaussian_naive_bayes.py.broken.txt} | 0 ...gradient_boosting_regressor.py.broken.txt} | 0 ...=> random_forest_classifier.py.broken.txt} | 0 ... => random_forest_regressor.py.broken.txt} | 0 6 files changed, 131 insertions(+) rename audio_filters/{equal_loudness_filter.py => equal_loudness_filter.py.broken.txt} (100%) create mode 100644 ciphers/autokey.py rename machine_learning/{gaussian_naive_bayes.py => gaussian_naive_bayes.py.broken.txt} (100%) rename machine_learning/{gradient_boosting_regressor.py => gradient_boosting_regressor.py.broken.txt} (100%) rename machine_learning/{random_forest_classifier.py => random_forest_classifier.py.broken.txt} (100%) rename machine_learning/{random_forest_regressor.py => random_forest_regressor.py.broken.txt} (100%) diff --git a/audio_filters/equal_loudness_filter.py b/audio_filters/equal_loudness_filter.py.broken.txt similarity index 100% rename from audio_filters/equal_loudness_filter.py rename to audio_filters/equal_loudness_filter.py.broken.txt diff --git a/ciphers/autokey.py b/ciphers/autokey.py new file mode 100644 index 000000000..8683e6d37 --- /dev/null +++ b/ciphers/autokey.py @@ -0,0 +1,131 @@ +""" +https://en.wikipedia.org/wiki/Autokey_cipher +An autokey cipher (also known as the autoclave cipher) is a cipher that +incorporates the message (the plaintext) into the key. +The key is generated from the message in some automated fashion, +sometimes by selecting certain letters from the text or, more commonly, +by adding a short primer key to the front of the message. +""" + + +def encrypt(plaintext: str, key: str) -> str: + """ + Encrypt a given plaintext (string) and key (string), returning the + encrypted ciphertext. + >>> encrypt("hello world", "coffee") + 'jsqqs avvwo' + >>> encrypt("coffee is good as python", "TheAlgorithms") + 'vvjfpk wj ohvp su ddylsv' + >>> encrypt("coffee is good as python", 2) + Traceback (most recent call last): + ... + TypeError: key must be a string + >>> encrypt("", "TheAlgorithms") + Traceback (most recent call last): + ... + ValueError: plaintext is empty + """ + if not isinstance(plaintext, str): + raise TypeError("plaintext must be a string") + if not isinstance(key, str): + raise TypeError("key must be a string") + + if not plaintext: + raise ValueError("plaintext is empty") + if not key: + raise ValueError("key is empty") + + key += plaintext + plaintext = plaintext.lower() + key = key.lower() + plaintext_iterator = 0 + key_iterator = 0 + ciphertext = "" + while plaintext_iterator < len(plaintext): + if ( + ord(plaintext[plaintext_iterator]) < 97 + or ord(plaintext[plaintext_iterator]) > 122 + ): + ciphertext += plaintext[plaintext_iterator] + plaintext_iterator += 1 + elif ord(key[key_iterator]) < 97 or ord(key[key_iterator]) > 122: + key_iterator += 1 + else: + ciphertext += chr( + ( + (ord(plaintext[plaintext_iterator]) - 97 + ord(key[key_iterator])) + - 97 + ) + % 26 + + 97 + ) + key_iterator += 1 + plaintext_iterator += 1 + return ciphertext + + +def decrypt(ciphertext: str, key: str) -> str: + """ + Decrypt a given ciphertext (string) and key (string), returning the decrypted + ciphertext. + >>> decrypt("jsqqs avvwo", "coffee") + 'hello world' + >>> decrypt("vvjfpk wj ohvp su ddylsv", "TheAlgorithms") + 'coffee is good as python' + >>> decrypt("vvjfpk wj ohvp su ddylsv", "") + Traceback (most recent call last): + ... + ValueError: key is empty + >>> decrypt(527.26, "TheAlgorithms") + Traceback (most recent call last): + ... + TypeError: ciphertext must be a string + """ + if not isinstance(ciphertext, str): + raise TypeError("ciphertext must be a string") + if not isinstance(key, str): + raise TypeError("key must be a string") + + if not ciphertext: + raise ValueError("ciphertext is empty") + if not key: + raise ValueError("key is empty") + + key = key.lower() + ciphertext_iterator = 0 + key_iterator = 0 + plaintext = "" + while ciphertext_iterator < len(ciphertext): + if ( + ord(ciphertext[ciphertext_iterator]) < 97 + or ord(ciphertext[ciphertext_iterator]) > 122 + ): + plaintext += ciphertext[ciphertext_iterator] + else: + plaintext += chr( + (ord(ciphertext[ciphertext_iterator]) - ord(key[key_iterator])) % 26 + + 97 + ) + key += chr( + (ord(ciphertext[ciphertext_iterator]) - ord(key[key_iterator])) % 26 + + 97 + ) + key_iterator += 1 + ciphertext_iterator += 1 + return plaintext + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + operation = int(input("Type 1 to encrypt or 2 to decrypt:")) + if operation == 1: + plaintext = input("Typeplaintext to be encrypted:\n") + key = input("Type the key:\n") + print(encrypt(plaintext, key)) + elif operation == 2: + ciphertext = input("Type the ciphertext to be decrypted:\n") + key = input("Type the key:\n") + print(decrypt(ciphertext, key)) + decrypt("jsqqs avvwo", "coffee") diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py.broken.txt similarity index 100% rename from machine_learning/gaussian_naive_bayes.py rename to machine_learning/gaussian_naive_bayes.py.broken.txt diff --git a/machine_learning/gradient_boosting_regressor.py b/machine_learning/gradient_boosting_regressor.py.broken.txt similarity index 100% rename from machine_learning/gradient_boosting_regressor.py rename to machine_learning/gradient_boosting_regressor.py.broken.txt diff --git a/machine_learning/random_forest_classifier.py b/machine_learning/random_forest_classifier.py.broken.txt similarity index 100% rename from machine_learning/random_forest_classifier.py rename to machine_learning/random_forest_classifier.py.broken.txt diff --git a/machine_learning/random_forest_regressor.py b/machine_learning/random_forest_regressor.py.broken.txt similarity index 100% rename from machine_learning/random_forest_regressor.py rename to machine_learning/random_forest_regressor.py.broken.txt From d4c5b22424d05d3198dc2e5a49427e929b058ccf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 23:04:34 +0100 Subject: [PATCH 236/368] [pre-commit.ci] pre-commit autoupdate (#8037) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/MarcoGorelli/auto-walrus: v0.2.1 → v0.2.2](https://github.com/MarcoGorelli/auto-walrus/compare/v0.2.1...v0.2.2) - [github.com/PyCQA/isort: 5.10.1 → v5.11.3](https://github.com/PyCQA/isort/compare/5.10.1...v5.11.3) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 6 +----- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7cf4bedd7..0f5fe20a8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,7 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus - rev: v0.2.1 + rev: v0.2.2 hooks: - id: auto-walrus @@ -20,7 +20,7 @@ repos: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: v5.11.3 hooks: - id: isort args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 34ce88a4f..bec857a38 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -14,7 +14,6 @@ ## Audio Filters * [Butterworth Filter](audio_filters/butterworth_filter.py) - * [Equal Loudness Filter](audio_filters/equal_loudness_filter.py) * [Iir Filter](audio_filters/iir_filter.py) * [Show Response](audio_filters/show_response.py) @@ -79,6 +78,7 @@ * [A1Z26](ciphers/a1z26.py) * [Affine Cipher](ciphers/affine_cipher.py) * [Atbash](ciphers/atbash.py) + * [Autokey](ciphers/autokey.py) * [Baconian Cipher](ciphers/baconian_cipher.py) * [Base16](ciphers/base16.py) * [Base32](ciphers/base32.py) @@ -475,8 +475,6 @@ * [Decision Tree](machine_learning/decision_tree.py) * Forecasting * [Run](machine_learning/forecasting/run.py) - * [Gaussian Naive Bayes](machine_learning/gaussian_naive_bayes.py) - * [Gradient Boosting Regressor](machine_learning/gradient_boosting_regressor.py) * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) @@ -490,8 +488,6 @@ * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polymonial Regression](machine_learning/polymonial_regression.py) - * [Random Forest Classifier](machine_learning/random_forest_classifier.py) - * [Random Forest Regressor](machine_learning/random_forest_regressor.py) * [Scoring Functions](machine_learning/scoring_functions.py) * [Self Organizing Map](machine_learning/self_organizing_map.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) From 79ef431cec53020709268507b6515ff1e7e47680 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 24 Dec 2022 17:57:28 +0300 Subject: [PATCH 237/368] Reduce the complexity of sorts/merge_insertion_sort.py (#7954) * Reduce the complexity of sorts/merge_insertion_sort.py * Add tests * Lower the --max-complexity threshold in the file .flake8 --- .flake8 | 2 +- sorts/merge_insertion_sort.py | 79 +++++++++++++++++++++-------------- 2 files changed, 48 insertions(+), 33 deletions(-) diff --git a/.flake8 b/.flake8 index b68ee8533..77ca7a328 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 19 +max-complexity = 17 extend-ignore = # Formatting style for `black` # E203 is whitespace before ':' diff --git a/sorts/merge_insertion_sort.py b/sorts/merge_insertion_sort.py index ecaa53545..4a5bdea0a 100644 --- a/sorts/merge_insertion_sort.py +++ b/sorts/merge_insertion_sort.py @@ -14,6 +14,53 @@ python3 merge_insertion_sort.py from __future__ import annotations +def binary_search_insertion(sorted_list, item): + """ + >>> binary_search_insertion([1, 2, 7, 9, 10], 4) + [1, 2, 4, 7, 9, 10] + """ + left = 0 + right = len(sorted_list) - 1 + while left <= right: + middle = (left + right) // 2 + if left == right: + if sorted_list[middle] < item: + left = middle + 1 + break + elif sorted_list[middle] < item: + left = middle + 1 + else: + right = middle - 1 + sorted_list.insert(left, item) + return sorted_list + + +def merge(left, right): + """ + >>> merge([[1, 6], [9, 10]], [[2, 3], [4, 5], [7, 8]]) + [[1, 6], [2, 3], [4, 5], [7, 8], [9, 10]] + """ + result = [] + while left and right: + if left[0][0] < right[0][0]: + result.append(left.pop(0)) + else: + result.append(right.pop(0)) + return result + left + right + + +def sortlist_2d(list_2d): + """ + >>> sortlist_2d([[9, 10], [1, 6], [7, 8], [2, 3], [4, 5]]) + [[1, 6], [2, 3], [4, 5], [7, 8], [9, 10]] + """ + length = len(list_2d) + if length <= 1: + return list_2d + middle = length // 2 + return merge(sortlist_2d(list_2d[:middle]), sortlist_2d(list_2d[middle:])) + + def merge_insertion_sort(collection: list[int]) -> list[int]: """Pure implementation of merge-insertion sort algorithm in Python @@ -38,38 +85,6 @@ def merge_insertion_sort(collection: list[int]) -> list[int]: True """ - def binary_search_insertion(sorted_list, item): - left = 0 - right = len(sorted_list) - 1 - while left <= right: - middle = (left + right) // 2 - if left == right: - if sorted_list[middle] < item: - left = middle + 1 - break - elif sorted_list[middle] < item: - left = middle + 1 - else: - right = middle - 1 - sorted_list.insert(left, item) - return sorted_list - - def sortlist_2d(list_2d): - def merge(left, right): - result = [] - while left and right: - if left[0][0] < right[0][0]: - result.append(left.pop(0)) - else: - result.append(right.pop(0)) - return result + left + right - - length = len(list_2d) - if length <= 1: - return list_2d - middle = length // 2 - return merge(sortlist_2d(list_2d[:middle]), sortlist_2d(list_2d[middle:])) - if len(collection) <= 1: return collection From 27d56ba3932d2ca2951a45232790794b2b0838d8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 22:02:50 +0100 Subject: [PATCH 238/368] [pre-commit.ci] pre-commit autoupdate (#8047) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/PyCQA/isort: v5.11.3 → 5.11.4](https://github.com/PyCQA/isort/compare/v5.11.3...5.11.4) * Update .flake8 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .flake8 | 2 +- .pre-commit-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.flake8 b/.flake8 index 77ca7a328..b68ee8533 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 17 +max-complexity = 19 extend-ignore = # Formatting style for `black` # E203 is whitespace before ':' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0f5fe20a8..8eb6d297e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,7 +20,7 @@ repos: - id: black - repo: https://github.com/PyCQA/isort - rev: v5.11.3 + rev: 5.11.4 hooks: - id: isort args: From 90686e39b9fd3b599a8cd77810e0fdbb74eae064 Mon Sep 17 00:00:00 2001 From: Lucia Harcekova <119792460+LuciaHarcekova@users.noreply.github.com> Date: Wed, 28 Dec 2022 17:34:35 +0000 Subject: [PATCH 239/368] Add LZ77 compression algorithm (#8059) * - add "lz77_compressor" class with compress and decompress methods using LZ77 compression algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * - use "list" instead "List", formatting * - fix spelling * - add Python type hints * - add 'Token' class to represent triplet (offset, length, indicator) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * - add test, hange type rom List to list * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * - remove extra import * - remove extra types in comments * - better test * - edit comments * - add return types * - add tests for __str__ and __repr__ * Update lz77.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- compression/lz77.py | 227 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100644 compression/lz77.py diff --git a/compression/lz77.py b/compression/lz77.py new file mode 100644 index 000000000..7c1a6f6a4 --- /dev/null +++ b/compression/lz77.py @@ -0,0 +1,227 @@ +""" +LZ77 compression algorithm +- lossless data compression published in papers by Abraham Lempel and Jacob Ziv in 1977 +- also known as LZ1 or sliding-window compression +- form the basis for many variations including LZW, LZSS, LZMA and others + +It uses a “sliding window” method. Within the sliding window we have: + - search buffer + - look ahead buffer +len(sliding_window) = len(search_buffer) + len(look_ahead_buffer) + +LZ77 manages a dictionary that uses triples composed of: + - Offset into search buffer, it's the distance between the start of a phrase and + the beginning of a file. + - Length of the match, it's the number of characters that make up a phrase. + - The indicator is represented by a character that is going to be encoded next. + +As a file is parsed, the dictionary is dynamically updated to reflect the compressed +data contents and size. + +Examples: +"cabracadabrarrarrad" <-> [(0, 0, 'c'), (0, 0, 'a'), (0, 0, 'b'), (0, 0, 'r'), + (3, 1, 'c'), (2, 1, 'd'), (7, 4, 'r'), (3, 5, 'd')] +"ababcbababaa" <-> [(0, 0, 'a'), (0, 0, 'b'), (2, 2, 'c'), (4, 3, 'a'), (2, 2, 'a')] +"aacaacabcabaaac" <-> [(0, 0, 'a'), (1, 1, 'c'), (3, 4, 'b'), (3, 3, 'a'), (1, 2, 'c')] + +Sources: +en.wikipedia.org/wiki/LZ77_and_LZ78 +""" + + +from dataclasses import dataclass + +__version__ = "0.1" +__author__ = "Lucia Harcekova" + + +@dataclass +class Token: + """ + Dataclass representing triplet called token consisting of length, offset + and indicator. This triplet is used during LZ77 compression. + """ + + offset: int + length: int + indicator: str + + def __repr__(self) -> str: + """ + >>> token = Token(1, 2, "c") + >>> repr(token) + '(1, 2, c)' + >>> str(token) + '(1, 2, c)' + """ + return f"({self.offset}, {self.length}, {self.indicator})" + + +class LZ77Compressor: + """ + Class containing compress and decompress methods using LZ77 compression algorithm. + """ + + def __init__(self, window_size: int = 13, lookahead_buffer_size: int = 6) -> None: + self.window_size = window_size + self.lookahead_buffer_size = lookahead_buffer_size + self.search_buffer_size = self.window_size - self.lookahead_buffer_size + + def compress(self, text: str) -> list[Token]: + """ + Compress the given string text using LZ77 compression algorithm. + + Args: + text: string to be compressed + + Returns: + output: the compressed text as a list of Tokens + + >>> lz77_compressor = LZ77Compressor() + >>> str(lz77_compressor.compress("ababcbababaa")) + '[(0, 0, a), (0, 0, b), (2, 2, c), (4, 3, a), (2, 2, a)]' + >>> str(lz77_compressor.compress("aacaacabcabaaac")) + '[(0, 0, a), (1, 1, c), (3, 4, b), (3, 3, a), (1, 2, c)]' + """ + + output = [] + search_buffer = "" + + # while there are still characters in text to compress + while text: + + # find the next encoding phrase + # - triplet with offset, length, indicator (the next encoding character) + token = self._find_encoding_token(text, search_buffer) + + # update the search buffer: + # - add new characters from text into it + # - check if size exceed the max search buffer size, if so, drop the + # oldest elements + search_buffer += text[: token.length + 1] + if len(search_buffer) > self.search_buffer_size: + search_buffer = search_buffer[-self.search_buffer_size :] + + # update the text + text = text[token.length + 1 :] + + # append the token to output + output.append(token) + + return output + + def decompress(self, tokens: list[Token]) -> str: + """ + Convert the list of tokens into an output string. + + Args: + tokens: list containing triplets (offset, length, char) + + Returns: + output: decompressed text + + Tests: + >>> lz77_compressor = LZ77Compressor() + >>> lz77_compressor.decompress([Token(0, 0, 'c'), Token(0, 0, 'a'), + ... Token(0, 0, 'b'), Token(0, 0, 'r'), Token(3, 1, 'c'), + ... Token(2, 1, 'd'), Token(7, 4, 'r'), Token(3, 5, 'd')]) + 'cabracadabrarrarrad' + >>> lz77_compressor.decompress([Token(0, 0, 'a'), Token(0, 0, 'b'), + ... Token(2, 2, 'c'), Token(4, 3, 'a'), Token(2, 2, 'a')]) + 'ababcbababaa' + >>> lz77_compressor.decompress([Token(0, 0, 'a'), Token(1, 1, 'c'), + ... Token(3, 4, 'b'), Token(3, 3, 'a'), Token(1, 2, 'c')]) + 'aacaacabcabaaac' + """ + + output = "" + + for token in tokens: + for _ in range(token.length): + output += output[-token.offset] + output += token.indicator + + return output + + def _find_encoding_token(self, text: str, search_buffer: str) -> Token: + """Finds the encoding token for the first character in the text. + + Tests: + >>> lz77_compressor = LZ77Compressor() + >>> lz77_compressor._find_encoding_token("abrarrarrad", "abracad").offset + 7 + >>> lz77_compressor._find_encoding_token("adabrarrarrad", "cabrac").length + 1 + >>> lz77_compressor._find_encoding_token("abc", "xyz").offset + 0 + >>> lz77_compressor._find_encoding_token("", "xyz").offset + Traceback (most recent call last): + ... + ValueError: We need some text to work with. + >>> lz77_compressor._find_encoding_token("abc", "").offset + 0 + """ + + if not text: + raise ValueError("We need some text to work with.") + + # Initialise result parameters to default values + length, offset = 0, 0 + + if not search_buffer: + return Token(offset, length, text[length]) + + for i, character in enumerate(search_buffer): + found_offset = len(search_buffer) - i + if character == text[0]: + found_length = self._match_length_from_index(text, search_buffer, 0, i) + # if the found length is bigger than the current or if it's equal, + # which means it's offset is smaller: update offset and length + if found_length >= length: + offset, length = found_offset, found_length + + return Token(offset, length, text[length]) + + def _match_length_from_index( + self, text: str, window: str, text_index: int, window_index: int + ) -> int: + """Calculate the longest possible match of text and window characters from + text_index in text and window_index in window. + + Args: + text: _description_ + window: sliding window + text_index: index of character in text + window_index: index of character in sliding window + + Returns: + The maximum match between text and window, from given indexes. + + Tests: + >>> lz77_compressor = LZ77Compressor(13, 6) + >>> lz77_compressor._match_length_from_index("rarrad", "adabrar", 0, 4) + 5 + >>> lz77_compressor._match_length_from_index("adabrarrarrad", + ... "cabrac", 0, 1) + 1 + """ + if not text or text[text_index] != window[window_index]: + return 0 + return 1 + self._match_length_from_index( + text, window + text[text_index], text_index + 1, window_index + 1 + ) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + # Initialize compressor class + lz77_compressor = LZ77Compressor(window_size=13, lookahead_buffer_size=6) + + # Example + TEXT = "cabracadabrarrarrad" + compressed_text = lz77_compressor.compress(TEXT) + print(lz77_compressor.compress("ababcbababaa")) + decompressed_text = lz77_compressor.decompress(compressed_text) + assert decompressed_text == TEXT, "The LZ77 algorithm returned the invalid result." From b72d0681ec8fd6c02ee10ba04bae3fe97ffaebc6 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 29 Dec 2022 09:06:26 -0800 Subject: [PATCH 240/368] Remove extra imports in gamma.py doctests (#8060) * Refactor bottom-up function to be class method * Add type hints * Update convolve function namespace * Remove depreciated np.float * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Renamed function for consistency * updating DIRECTORY.md * Remove extra imports in gamma.py doctests Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- maths/gamma.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/maths/gamma.py b/maths/gamma.py index 69cd819ef..d5debc587 100644 --- a/maths/gamma.py +++ b/maths/gamma.py @@ -11,42 +11,27 @@ def gamma(num: float) -> float: used extension of the factorial function to complex numbers. The gamma function is defined for all complex numbers except the non-positive integers - - >>> gamma(-1) Traceback (most recent call last): ... ValueError: math domain error - - - >>> gamma(0) Traceback (most recent call last): ... ValueError: math domain error - - >>> gamma(9) 40320.0 - >>> from math import gamma as math_gamma >>> all(.99999999 < gamma(i) / math_gamma(i) <= 1.000000001 ... for i in range(1, 50)) True - - - >>> from math import gamma as math_gamma >>> gamma(-1)/math_gamma(-1) <= 1.000000001 Traceback (most recent call last): ... ValueError: math domain error - - - >>> from math import gamma as math_gamma >>> gamma(3.3) - math_gamma(3.3) <= 0.00000001 True """ - if num <= 0: raise ValueError("math domain error") From c6223c71d82c7ba57f3de9eed23963ec96de01bb Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Fri, 30 Dec 2022 09:47:40 +0400 Subject: [PATCH 241/368] add word_break dynamic approach up -> down. (#8039) * add word_break dynamic approach up -> down. * updating DIRECTORY.md * Update word_break.py fix review notes. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update word_break.py fix review notes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix review notes * add trie type * Update word_break.py add typing Any to trie. * Update dynamic_programming/word_break.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/word_break.py Co-authored-by: Christian Clauss * Update dynamic_programming/word_break.py Co-authored-by: Christian Clauss * Update dynamic_programming/word_break.py Co-authored-by: Christian Clauss * Update dynamic_programming/word_break.py Co-authored-by: Christian Clauss * fix review notes Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + dynamic_programming/word_break.py | 111 ++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 dynamic_programming/word_break.py diff --git a/DIRECTORY.md b/DIRECTORY.md index bec857a38..3437df12c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -328,6 +328,7 @@ * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) * [Viterbi](dynamic_programming/viterbi.py) + * [Word Break](dynamic_programming/word_break.py) ## Electronics * [Builtin Voltage](electronics/builtin_voltage.py) diff --git a/dynamic_programming/word_break.py b/dynamic_programming/word_break.py new file mode 100644 index 000000000..642ea0edf --- /dev/null +++ b/dynamic_programming/word_break.py @@ -0,0 +1,111 @@ +""" +Author : Alexander Pantyukhin +Date : December 12, 2022 + +Task: +Given a string and a list of words, return true if the string can be +segmented into a space-separated sequence of one or more words. + +Note that the same word may be reused +multiple times in the segmentation. + +Implementation notes: Trie + Dynamic programming up -> down. +The Trie will be used to store the words. It will be useful for scanning +available words for the current position in the string. + +Leetcode: +https://leetcode.com/problems/word-break/description/ + +Runtime: O(n * n) +Space: O(n) +""" + +from functools import lru_cache +from typing import Any + + +def word_break(string: str, words: list[str]) -> bool: + """ + Return True if numbers have opposite signs False otherwise. + + >>> word_break("applepenapple", ["apple","pen"]) + True + >>> word_break("catsandog", ["cats","dog","sand","and","cat"]) + False + >>> word_break("cars", ["car","ca","rs"]) + True + >>> word_break('abc', []) + False + >>> word_break(123, ['a']) + Traceback (most recent call last): + ... + ValueError: the string should be not empty string + >>> word_break('', ['a']) + Traceback (most recent call last): + ... + ValueError: the string should be not empty string + >>> word_break('abc', [123]) + Traceback (most recent call last): + ... + ValueError: the words should be a list of non-empty strings + >>> word_break('abc', ['']) + Traceback (most recent call last): + ... + ValueError: the words should be a list of non-empty strings + """ + + # Validation + if not isinstance(string, str) or len(string) == 0: + raise ValueError("the string should be not empty string") + + if not isinstance(words, list) or not all( + isinstance(item, str) and len(item) > 0 for item in words + ): + raise ValueError("the words should be a list of non-empty strings") + + # Build trie + trie: dict[str, Any] = {} + word_keeper_key = "WORD_KEEPER" + + for word in words: + trie_node = trie + for c in word: + if c not in trie_node: + trie_node[c] = {} + + trie_node = trie_node[c] + + trie_node[word_keeper_key] = True + + len_string = len(string) + + # Dynamic programming method + @lru_cache(maxsize=None) + def is_breakable(index: int) -> bool: + """ + >>> string = 'a' + >>> is_breakable(1) + True + """ + if index == len_string: + return True + + trie_node = trie + for i in range(index, len_string): + trie_node = trie_node.get(string[i], None) + + if trie_node is None: + return False + + if trie_node.get(word_keeper_key, False) and is_breakable(i + 1): + return True + + return False + + return is_breakable(0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d29afca93b278e7885f2395c1640aa90d109cc12 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 1 Jan 2023 05:30:14 -0800 Subject: [PATCH 242/368] Fix get_top_billioners.py file name typo (#8066) --- .../{get_top_billioners.py => get_top_billionaires.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename web_programming/{get_top_billioners.py => get_top_billionaires.py} (100%) diff --git a/web_programming/get_top_billioners.py b/web_programming/get_top_billionaires.py similarity index 100% rename from web_programming/get_top_billioners.py rename to web_programming/get_top_billionaires.py From 7c1d23d4485904634a6755d5978d406be534421d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 1 Jan 2023 17:10:59 -0800 Subject: [PATCH 243/368] Change prime_sieve_eratosthenes.py to return list (#8062) --- maths/prime_sieve_eratosthenes.py | 35 +++++++++++++++++++------------ 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/maths/prime_sieve_eratosthenes.py b/maths/prime_sieve_eratosthenes.py index 3a3c55085..32eef9165 100644 --- a/maths/prime_sieve_eratosthenes.py +++ b/maths/prime_sieve_eratosthenes.py @@ -1,10 +1,10 @@ """ Sieve of Eratosthenes -Input : n =10 +Input: n = 10 Output: 2 3 5 7 -Input : n = 20 +Input: n = 20 Output: 2 3 5 7 11 13 17 19 you can read in detail about this at @@ -12,34 +12,43 @@ https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes """ -def prime_sieve_eratosthenes(num): +def prime_sieve_eratosthenes(num: int) -> list[int]: """ - print the prime numbers up to n + Print the prime numbers up to n >>> prime_sieve_eratosthenes(10) - 2,3,5,7, + [2, 3, 5, 7] >>> prime_sieve_eratosthenes(20) - 2,3,5,7,11,13,17,19, + [2, 3, 5, 7, 11, 13, 17, 19] + >>> prime_sieve_eratosthenes(2) + [2] + >>> prime_sieve_eratosthenes(1) + [] + >>> prime_sieve_eratosthenes(-1) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer """ - primes = [True for i in range(num + 1)] - p = 2 + if num <= 0: + raise ValueError("Input must be a positive integer") + primes = [True] * (num + 1) + + p = 2 while p * p <= num: if primes[p]: for i in range(p * p, num + 1, p): primes[i] = False p += 1 - for prime in range(2, num + 1): - if primes[prime]: - print(prime, end=",") + return [prime for prime in range(2, num + 1) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() - num = int(input()) - prime_sieve_eratosthenes(num) + user_num = int(input("Enter a positive integer: ").strip()) + print(prime_sieve_eratosthenes(user_num)) From 725731c8d289f742bfde3f159a538a47d19c27dc Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 2 Jan 2023 05:07:39 -0800 Subject: [PATCH 244/368] Refactor `local_weighted_learning.py` to use `np.array` (#8069) * updating DIRECTORY.md * Format local_weighted_learning.py doctests for clarity * Refactor local_weighted_learning.py to use np.array instead of np.mat The np.matrix class is planned to be eventually depreciated in favor of np.array, and current use of the class raises warnings in pytest * Update local_weighted_learning.py documentation Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 +- .../local_weighted_learning.py | 116 ++++++++++-------- 2 files changed, 68 insertions(+), 51 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3437df12c..5ce9dca74 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -123,6 +123,7 @@ * [Huffman](compression/huffman.py) * [Lempel Ziv](compression/lempel_ziv.py) * [Lempel Ziv Decompress](compression/lempel_ziv_decompress.py) + * [Lz77](compression/lz77.py) * [Peak Signal To Noise Ratio](compression/peak_signal_to_noise_ratio.py) * [Run Length Encoding](compression/run_length_encoding.py) @@ -1162,7 +1163,7 @@ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) - * [Get Top Billioners](web_programming/get_top_billioners.py) + * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index df03fe0a1..6260e9ac6 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -1,76 +1,86 @@ -# Required imports to run this file import matplotlib.pyplot as plt import numpy as np -# weighted matrix -def weighted_matrix(point: np.mat, training_data_x: np.mat, bandwidth: float) -> np.mat: +def weighted_matrix( + point: np.array, training_data_x: np.array, bandwidth: float +) -> np.array: """ - Calculate the weight for every point in the - data set. It takes training_point , query_point, and tau - Here Tau is not a fixed value it can be varied depends on output. - tau --> bandwidth - xmat -->Training data - point --> the x where we want to make predictions - >>> weighted_matrix(np.array([1., 1.]),np.mat([[16.99, 10.34], [21.01,23.68], - ... [24.59,25.69]]), 0.6) - matrix([[1.43807972e-207, 0.00000000e+000, 0.00000000e+000], - [0.00000000e+000, 0.00000000e+000, 0.00000000e+000], - [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]]) + Calculate the weight for every point in the data set. + point --> the x value at which we want to make predictions + >>> weighted_matrix( + ... np.array([1., 1.]), + ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), + ... 0.6 + ... ) + array([[1.43807972e-207, 0.00000000e+000, 0.00000000e+000], + [0.00000000e+000, 0.00000000e+000, 0.00000000e+000], + [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]]) """ - # m is the number of training samples - m, n = np.shape(training_data_x) - # Initializing weights as identity matrix - weights = np.mat(np.eye(m)) + m, _ = np.shape(training_data_x) # m is the number of training samples + weights = np.eye(m) # Initializing weights as identity matrix + # calculating weights for all training examples [x(i)'s] for j in range(m): diff = point - training_data_x[j] - weights[j, j] = np.exp(diff * diff.T / (-2.0 * bandwidth**2)) + weights[j, j] = np.exp(diff @ diff.T / (-2.0 * bandwidth**2)) return weights def local_weight( - point: np.mat, training_data_x: np.mat, training_data_y: np.mat, bandwidth: float -) -> np.mat: + point: np.array, + training_data_x: np.array, + training_data_y: np.array, + bandwidth: float, +) -> np.array: """ Calculate the local weights using the weight_matrix function on training data. Return the weighted matrix. - >>> local_weight(np.array([1., 1.]),np.mat([[16.99, 10.34], [21.01,23.68], - ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) - matrix([[0.00873174], - [0.08272556]]) + >>> local_weight( + ... np.array([1., 1.]), + ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), + ... np.array([[1.01, 1.66, 3.5]]), + ... 0.6 + ... ) + array([[0.00873174], + [0.08272556]]) """ weight = weighted_matrix(point, training_data_x, bandwidth) - w = (training_data_x.T * (weight * training_data_x)).I * ( - training_data_x.T * weight * training_data_y.T + w = np.linalg.inv(training_data_x.T @ (weight @ training_data_x)) @ ( + training_data_x.T @ weight @ training_data_y.T ) return w def local_weight_regression( - training_data_x: np.mat, training_data_y: np.mat, bandwidth: float -) -> np.mat: + training_data_x: np.array, training_data_y: np.array, bandwidth: float +) -> np.array: """ - Calculate predictions for each data point on axis. - >>> local_weight_regression(np.mat([[16.99, 10.34], [21.01,23.68], - ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) + Calculate predictions for each data point on axis + >>> local_weight_regression( + ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), + ... np.array([[1.01, 1.66, 3.5]]), + ... 0.6 + ... ) array([1.07173261, 1.65970737, 3.50160179]) """ - m, n = np.shape(training_data_x) + m, _ = np.shape(training_data_x) ypred = np.zeros(m) for i, item in enumerate(training_data_x): - ypred[i] = item * local_weight( + ypred[i] = item @ local_weight( item, training_data_x, training_data_y, bandwidth ) return ypred -def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: +def load_data( + dataset_name: str, cola_name: str, colb_name: str +) -> tuple[np.array, np.array, np.array, np.array]: """ - Function used for loading data from the seaborn splitting into x and y points + Load data from seaborn and split it into x and y points """ import seaborn as sns @@ -78,23 +88,25 @@ def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: col_a = np.array(data[cola_name]) # total_bill col_b = np.array(data[colb_name]) # tip - mcol_a = np.mat(col_a) - mcol_b = np.mat(col_b) + mcol_a = col_a.copy() + mcol_b = col_b.copy() - m = np.shape(mcol_b)[1] - one = np.ones((1, m), dtype=int) + one = np.ones(np.shape(mcol_b)[0], dtype=int) - # horizontal stacking - training_data_x = np.hstack((one.T, mcol_a.T)) + # pairing elements of one and mcol_a + training_data_x = np.column_stack((one, mcol_a)) return training_data_x, mcol_b, col_a, col_b -def get_preds(training_data_x: np.mat, mcol_b: np.mat, tau: float) -> np.ndarray: +def get_preds(training_data_x: np.array, mcol_b: np.array, tau: float) -> np.array: """ Get predictions with minimum error for each training data - >>> get_preds(np.mat([[16.99, 10.34], [21.01,23.68], - ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) + >>> get_preds( + ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), + ... np.array([[1.01, 1.66, 3.5]]), + ... 0.6 + ... ) array([1.07173261, 1.65970737, 3.50160179]) """ ypred = local_weight_regression(training_data_x, mcol_b, tau) @@ -102,15 +114,15 @@ def get_preds(training_data_x: np.mat, mcol_b: np.mat, tau: float) -> np.ndarray def plot_preds( - training_data_x: np.mat, - predictions: np.ndarray, - col_x: np.ndarray, - col_y: np.ndarray, + training_data_x: np.array, + predictions: np.array, + col_x: np.array, + col_y: np.array, cola_name: str, colb_name: str, ) -> plt.plot: """ - This function used to plot predictions and display the graph + Plot predictions and display the graph """ xsort = training_data_x.copy() xsort.sort(axis=0) @@ -128,6 +140,10 @@ def plot_preds( if __name__ == "__main__": + import doctest + + doctest.testmod() + training_data_x, mcol_b, col_a, col_b = load_data("tips", "total_bill", "tip") predictions = get_preds(training_data_x, mcol_b, 0.5) plot_preds(training_data_x, predictions, col_a, col_b, "total_bill", "tip") From 9f041e9cc82dab21401359d4cfa1b966fc30ddc4 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 2 Jan 2023 05:15:14 -0800 Subject: [PATCH 245/368] Refactor `sierpinski_triangle.py` (#8068) * updating DIRECTORY.md * Update sierpinski_triangle.py header doc * Remove unused PROGNAME var in sierpinski_triangle.py The PROGNAME var was used to print an image description in the reference code that this implementation was taken from, but it's entirely unused here * Refactor triangle() function to not use list of vertices Since the number of vertices is always fixed at 3, there's no need to pass in the vertices as a list, and it's clearer to give the vertices distinct names rather than index them from the list * Refactor sierpinski_triangle.py to use tuples Tuples make more sense than lists for storing coordinate pairs * Flip if-statement condition in sierpinski_triangle.py to avoid nesting * Add type hints to sierpinski_triangle.py * Add doctests to sierpinski_triangle.py * Fix return types in doctests * Update fractals/sierpinski_triangle.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- fractals/sierpinski_triangle.py | 110 +++++++++++++++++--------------- 1 file changed, 59 insertions(+), 51 deletions(-) diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index 084f6661f..c28ec00b2 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -1,76 +1,84 @@ -#!/usr/bin/python +""" +Author Anurag Kumar | anuragkumarak95@gmail.com | git/anuragkumarak95 -"""Author Anurag Kumar | anuragkumarak95@gmail.com | git/anuragkumarak95 +Simple example of fractal generation using recursion. -Simple example of Fractal generation using recursive function. +What is the Sierpiński Triangle? + The Sierpiński triangle (sometimes spelled Sierpinski), also called the +Sierpiński gasket or Sierpiński sieve, is a fractal attractive fixed set with +the overall shape of an equilateral triangle, subdivided recursively into +smaller equilateral triangles. Originally constructed as a curve, this is one of +the basic examples of self-similar sets—that is, it is a mathematically +generated pattern that is reproducible at any magnification or reduction. It is +named after the Polish mathematician Wacław Sierpiński, but appeared as a +decorative pattern many centuries before the work of Sierpiński. -What is Sierpinski Triangle? ->>The Sierpinski triangle (also with the original orthography Sierpinski), also called -the Sierpinski gasket or the Sierpinski Sieve, is a fractal and attractive fixed set -with the overall shape of an equilateral triangle, subdivided recursively into smaller -equilateral triangles. Originally constructed as a curve, this is one of the basic -examples of self-similar sets, i.e., it is a mathematically generated pattern that can -be reproducible at any magnification or reduction. It is named after the Polish -mathematician Wacław Sierpinski, but appeared as a decorative pattern many centuries -prior to the work of Sierpinski. -Requirements(pip): - - turtle - -Python: - - 2.6 - -Usage: - - $python sierpinski_triangle.py - -Credits: This code was written by editing the code from -https://www.riannetrujillo.com/blog/python-fractal/ +Usage: python sierpinski_triangle.py +Credits: + The above description is taken from + https://en.wikipedia.org/wiki/Sierpi%C5%84ski_triangle + This code was written by editing the code from + https://www.riannetrujillo.com/blog/python-fractal/ """ import sys import turtle -PROGNAME = "Sierpinski Triangle" -points = [[-175, -125], [0, 175], [175, -125]] # size of triangle +def get_mid(p1: tuple[float, float], p2: tuple[float, float]) -> tuple[float, float]: + """ + Find the midpoint of two points + + >>> get_mid((0, 0), (2, 2)) + (1.0, 1.0) + >>> get_mid((-3, -3), (3, 3)) + (0.0, 0.0) + >>> get_mid((1, 0), (3, 2)) + (2.0, 1.0) + >>> get_mid((0, 0), (1, 1)) + (0.5, 0.5) + >>> get_mid((0, 0), (0, 0)) + (0.0, 0.0) + """ + return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2 -def get_mid(p1, p2): - return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2) # find midpoint - - -def triangle(points, depth): - +def triangle( + vertex1: tuple[float, float], + vertex2: tuple[float, float], + vertex3: tuple[float, float], + depth: int, +) -> None: + """ + Recursively draw the Sierpinski triangle given the vertices of the triangle + and the recursion depth + """ my_pen.up() - my_pen.goto(points[0][0], points[0][1]) + my_pen.goto(vertex1[0], vertex1[1]) my_pen.down() - my_pen.goto(points[1][0], points[1][1]) - my_pen.goto(points[2][0], points[2][1]) - my_pen.goto(points[0][0], points[0][1]) + my_pen.goto(vertex2[0], vertex2[1]) + my_pen.goto(vertex3[0], vertex3[1]) + my_pen.goto(vertex1[0], vertex1[1]) - if depth > 0: - triangle( - [points[0], get_mid(points[0], points[1]), get_mid(points[0], points[2])], - depth - 1, - ) - triangle( - [points[1], get_mid(points[0], points[1]), get_mid(points[1], points[2])], - depth - 1, - ) - triangle( - [points[2], get_mid(points[2], points[1]), get_mid(points[0], points[2])], - depth - 1, - ) + if depth == 0: + return + + triangle(vertex1, get_mid(vertex1, vertex2), get_mid(vertex1, vertex3), depth - 1) + triangle(vertex2, get_mid(vertex1, vertex2), get_mid(vertex2, vertex3), depth - 1) + triangle(vertex3, get_mid(vertex3, vertex2), get_mid(vertex1, vertex3), depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( - "right format for using this script: " - "$python fractals.py " + "Correct format for using this script: " + "python fractals.py " ) my_pen = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") - triangle(points, int(sys.argv[1])) + + vertices = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle + triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) From 32a1ff9359b4de80b94ef26c55a5b24204d35382 Mon Sep 17 00:00:00 2001 From: Abhishek Mulik Date: Wed, 4 Jan 2023 06:17:15 +0530 Subject: [PATCH 246/368] Update is_palindrome.py (#8022) --- strings/is_palindrome.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py index 5758af0ce..9bf2abd98 100644 --- a/strings/is_palindrome.py +++ b/strings/is_palindrome.py @@ -16,7 +16,24 @@ def is_palindrome(s: str) -> bool: # Since punctuation, capitalization, and spaces are often ignored while checking # palindromes, we first remove them from our string. s = "".join(character for character in s.lower() if character.isalnum()) - return s == s[::-1] + # return s == s[::-1] the slicing method + # uses extra spaces we can + # better with iteration method. + + end = len(s) // 2 + n = len(s) + + # We need to traverse till half of the length of string + # as we can get access of the i'th last element from + # i'th index. + # eg: [0,1,2,3,4,5] => 4th index can be accessed + # with the help of 1st index (i==n-i-1) + # where n is length of string + + for i in range(end): + if s[i] != s[n - i - 1]: + return False + return True if __name__ == "__main__": From 4939e8463fc34c936a309d513cfe8153343cb9d5 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 7 Jan 2023 16:56:39 +0000 Subject: [PATCH 247/368] Create cached fibonacci algorithm (#8084) * feat: Add `fib_recursive_cached` func * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * doc: Show difference in time when caching algorithm Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/fibonacci.py | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index e0da66ee5..d58c9fc68 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -16,6 +16,7 @@ fib_memoization runtime: 0.0107 ms fib_binet runtime: 0.0174 ms """ +from functools import lru_cache from math import sqrt from time import time @@ -92,6 +93,39 @@ def fib_recursive(n: int) -> list[int]: return [fib_recursive_term(i) for i in range(n + 1)] +def fib_recursive_cached(n: int) -> list[int]: + """ + Calculates the first n (0-indexed) Fibonacci numbers using recursion + >>> fib_iterative(0) + [0] + >>> fib_iterative(1) + [0, 1] + >>> fib_iterative(5) + [0, 1, 1, 2, 3, 5] + >>> fib_iterative(10) + [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] + >>> fib_iterative(-1) + Traceback (most recent call last): + ... + Exception: n is negative + """ + + @lru_cache(maxsize=None) + def fib_recursive_term(i: int) -> int: + """ + Calculates the i-th (0-indexed) Fibonacci number using recursion + """ + if i < 0: + raise Exception("n is negative") + if i < 2: + return i + return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) + + if n < 0: + raise Exception("n is negative") + return [fib_recursive_term(i) for i in range(n + 1)] + + def fib_memoization(n: int) -> list[int]: """ Calculates the first n (0-indexed) Fibonacci numbers using memoization @@ -163,8 +197,9 @@ def fib_binet(n: int) -> list[int]: if __name__ == "__main__": - num = 20 + num = 30 time_func(fib_iterative, num) - time_func(fib_recursive, num) + time_func(fib_recursive, num) # Around 3s runtime + time_func(fib_recursive_cached, num) # Around 0ms runtime time_func(fib_memoization, num) time_func(fib_binet, num) From 1a27258bd6c3a35a403629b4ea7fc0228bcc892d Mon Sep 17 00:00:00 2001 From: MohammadReza Balakhaniyan <51448587+balakhaniyan@users.noreply.github.com> Date: Wed, 11 Jan 2023 02:17:02 +0330 Subject: [PATCH 248/368] gcd_of_n_numbers (#8057) * add maths/Gcd of N Numbers * add maths/Gcd of N Numbers * add maths/Gcd of N Numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add maths/Gcd of N Numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add maths/Gcd of N Numbers * add maths/Gcd of N Numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add maths/Gcd of N Numbers * add maths/Gcd of N Numbers * more pythonic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * more pythonic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * merged * merged * more readable * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/gcd_of_n_numbers.py | 109 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 maths/gcd_of_n_numbers.py diff --git a/maths/gcd_of_n_numbers.py b/maths/gcd_of_n_numbers.py new file mode 100644 index 000000000..63236c236 --- /dev/null +++ b/maths/gcd_of_n_numbers.py @@ -0,0 +1,109 @@ +""" +Gcd of N Numbers +Reference: https://en.wikipedia.org/wiki/Greatest_common_divisor +""" + +from collections import Counter + + +def get_factors( + number: int, factors: Counter | None = None, factor: int = 2 +) -> Counter: + """ + this is a recursive function for get all factors of number + >>> get_factors(45) + Counter({3: 2, 5: 1}) + >>> get_factors(2520) + Counter({2: 3, 3: 2, 5: 1, 7: 1}) + >>> get_factors(23) + Counter({23: 1}) + >>> get_factors(0) + Traceback (most recent call last): + ... + TypeError: number must be integer and greater than zero + >>> get_factors(-1) + Traceback (most recent call last): + ... + TypeError: number must be integer and greater than zero + >>> get_factors(1.5) + Traceback (most recent call last): + ... + TypeError: number must be integer and greater than zero + + factor can be all numbers from 2 to number that we check if number % factor == 0 + if it is equal to zero, we check again with number // factor + else we increase factor by one + """ + + match number: + case int(number) if number == 1: + return Counter({1: 1}) + case int(num) if number > 0: + number = num + case _: + raise TypeError("number must be integer and greater than zero") + + factors = factors or Counter() + + if number == factor: # break condition + # all numbers are factors of itself + factors[factor] += 1 + return factors + + if number % factor > 0: + # if it is greater than zero + # so it is not a factor of number and we check next number + return get_factors(number, factors, factor + 1) + + factors[factor] += 1 + # else we update factors (that is Counter(dict-like) type) and check again + return get_factors(number // factor, factors, factor) + + +def get_greatest_common_divisor(*numbers: int) -> int: + """ + get gcd of n numbers: + >>> get_greatest_common_divisor(18, 45) + 9 + >>> get_greatest_common_divisor(23, 37) + 1 + >>> get_greatest_common_divisor(2520, 8350) + 10 + >>> get_greatest_common_divisor(-10, 20) + Traceback (most recent call last): + ... + Exception: numbers must be integer and greater than zero + >>> get_greatest_common_divisor(1.5, 2) + Traceback (most recent call last): + ... + Exception: numbers must be integer and greater than zero + >>> get_greatest_common_divisor(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) + 1 + >>> get_greatest_common_divisor("1", 2, 3, 4, 5, 6, 7, 8, 9, 10) + Traceback (most recent call last): + ... + Exception: numbers must be integer and greater than zero + """ + + # we just need factors, not numbers itself + try: + same_factors, *factors = map(get_factors, numbers) + except TypeError as e: + raise Exception("numbers must be integer and greater than zero") from e + + for factor in factors: + same_factors &= factor + # get common factor between all + # `&` return common elements with smaller value (for Counter type) + + # now, same_factors is something like {2: 2, 3: 4} that means 2 * 2 * 3 * 3 * 3 * 3 + mult = 1 + # power each factor and multiply + # for {2: 2, 3: 4}, it is [4, 81] and then 324 + for m in [factor**power for factor, power in same_factors.items()]: + mult *= m + return mult + + +if __name__ == "__main__": + print(get_greatest_common_divisor(18, 45)) # 9 From c00af459fe0a18ae6adca2aec5ca8c7ff64864c8 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 26 Jan 2023 07:12:11 +0000 Subject: [PATCH 249/368] feat: Concatenate both factorial implementations (#8099) * feat: Concatenate both factorial implementations * fix: Rename factorial recursive method --- .../{factorial_iterative.py => factorial.py} | 24 ++++++++++++++++ maths/factorial_recursive.py | 28 ------------------- 2 files changed, 24 insertions(+), 28 deletions(-) rename maths/{factorial_iterative.py => factorial.py} (58%) delete mode 100644 maths/factorial_recursive.py diff --git a/maths/factorial_iterative.py b/maths/factorial.py similarity index 58% rename from maths/factorial_iterative.py rename to maths/factorial.py index c6cf7de57..bbf0efc01 100644 --- a/maths/factorial_iterative.py +++ b/maths/factorial.py @@ -34,6 +34,30 @@ def factorial(number: int) -> int: return value +def factorial_recursive(n: int) -> int: + """ + Calculate the factorial of a positive integer + https://en.wikipedia.org/wiki/Factorial + + >>> import math + >>> all(factorial(i) == math.factorial(i) for i in range(20)) + True + >>> factorial(0.1) + Traceback (most recent call last): + ... + ValueError: factorial() only accepts integral values + >>> factorial(-1) + Traceback (most recent call last): + ... + ValueError: factorial() not defined for negative values + """ + if not isinstance(n, int): + raise ValueError("factorial() only accepts integral values") + if n < 0: + raise ValueError("factorial() not defined for negative values") + return 1 if n == 0 or n == 1 else n * factorial(n - 1) + + if __name__ == "__main__": import doctest diff --git a/maths/factorial_recursive.py b/maths/factorial_recursive.py deleted file mode 100644 index 137112738..000000000 --- a/maths/factorial_recursive.py +++ /dev/null @@ -1,28 +0,0 @@ -def factorial(n: int) -> int: - """ - Calculate the factorial of a positive integer - https://en.wikipedia.org/wiki/Factorial - - >>> import math - >>> all(factorial(i) == math.factorial(i) for i in range(20)) - True - >>> factorial(0.1) - Traceback (most recent call last): - ... - ValueError: factorial() only accepts integral values - >>> factorial(-1) - Traceback (most recent call last): - ... - ValueError: factorial() not defined for negative values - """ - if not isinstance(n, int): - raise ValueError("factorial() only accepts integral values") - if n < 0: - raise ValueError("factorial() not defined for negative values") - return 1 if n == 0 or n == 1 else n * factorial(n - 1) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 57c12fab2822df33b8da5a1fd9b95f2f7d64f130 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 26 Jan 2023 02:13:03 -0500 Subject: [PATCH 250/368] Fix `mypy` errors in `lorentz_transformation_four_vector.py` (#8075) * updating DIRECTORY.md * Fix mypy errors in lorentz_transformation_four_vector.py * Remove unused symbol vars * Add function documentation and rewrite algorithm explanation Previous explanation was misleading, as the code only calculates Lorentz transformations for movement in the x direction (0 velocity in the y and z directions) and not movement in any direction * updating DIRECTORY.md * Update error message for speed Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + physics/lorentz_transformation_four_vector.py | 138 ++++++++---------- 2 files changed, 62 insertions(+), 77 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 5ce9dca74..31e86ea59 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -557,6 +557,7 @@ * [Gamma Recursive](maths/gamma_recursive.py) * [Gaussian](maths/gaussian.py) * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) + * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index f58b40e59..64be97245 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -1,39 +1,33 @@ """ -Lorentz transformation describes the transition from a reference frame P -to another reference frame P', each of which is moving in a direction with -respect to the other. The Lorentz transformation implemented in this code -is the relativistic version using a four vector described by Minkowsky Space: -x0 = ct, x1 = x, x2 = y, and x3 = z +Lorentz transformations describe the transition between two inertial reference +frames F and F', each of which is moving in some direction with respect to the +other. This code only calculates Lorentz transformations for movement in the x +direction with no spacial rotation (i.e., a Lorentz boost in the x direction). +The Lorentz transformations are calculated here as linear transformations of +four-vectors [ct, x, y, z] described by Minkowski space. Note that t (time) is +multiplied by c (the speed of light) in the first entry of each four-vector. -NOTE: Please note that x0 is c (speed of light) times t (time). +Thus, if X = [ct; x; y; z] and X' = [ct'; x'; y'; z'] are the four-vectors for +two inertial reference frames and X' moves in the x direction with velocity v +with respect to X, then the Lorentz transformation from X to X' is X' = BX, +where -So, the Lorentz transformation using a four vector is defined as: + | γ -γβ 0 0| +B = |-γβ γ 0 0| + | 0 0 1 0| + | 0 0 0 1| -|ct'| | γ -γβ 0 0| |ct| -|x' | = |-γβ γ 0 0| *|x | -|y' | | 0 0 1 0| |y | -|z' | | 0 0 0 1| |z | - -Where: - 1 -γ = --------------- - ----------- - / v^2 | - /(1 - --- - -/ c^2 - - v -β = ----- - c +is the matrix describing the Lorentz boost between X and X', +γ = 1 / √(1 - v²/c²) is the Lorentz factor, and β = v/c is the velocity as +a fraction of c. Reference: https://en.wikipedia.org/wiki/Lorentz_transformation """ -from __future__ import annotations from math import sqrt -import numpy as np # type: ignore -from sympy import symbols # type: ignore +import numpy as np +from sympy import symbols # Coefficient # Speed of light (m/s) @@ -41,79 +35,77 @@ c = 299792458 # Symbols ct, x, y, z = symbols("ct x y z") -ct_p, x_p, y_p, z_p = symbols("ct' x' y' z'") # Vehicle's speed divided by speed of light (no units) def beta(velocity: float) -> float: """ + Calculates β = v/c, the given velocity as a fraction of c >>> beta(c) 1.0 - >>> beta(199792458) 0.666435904801848 - >>> beta(1e5) 0.00033356409519815205 - >>> beta(0.2) Traceback (most recent call last): ... - ValueError: Speed must be greater than 1! + ValueError: Speed must be greater than or equal to 1! """ if velocity > c: - raise ValueError("Speed must not exceed Light Speed 299,792,458 [m/s]!") - - # Usually the speed u should be much higher than 1 (c order of magnitude) + raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!") elif velocity < 1: - raise ValueError("Speed must be greater than 1!") + # Usually the speed should be much higher than 1 (c order of magnitude) + raise ValueError("Speed must be greater than or equal to 1!") + return velocity / c def gamma(velocity: float) -> float: """ + Calculate the Lorentz factor γ = 1 / √(1 - v²/c²) for a given velocity >>> gamma(4) 1.0000000000000002 - >>> gamma(1e5) 1.0000000556325075 - >>> gamma(3e7) 1.005044845777813 - >>> gamma(2.8e8) 2.7985595722318277 - >>> gamma(299792451) 4627.49902669495 - >>> gamma(0.3) Traceback (most recent call last): ... - ValueError: Speed must be greater than 1! - - >>> gamma(2*c) + ValueError: Speed must be greater than or equal to 1! + >>> gamma(2 * c) Traceback (most recent call last): ... - ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! + ValueError: Speed must not exceed light speed 299,792,458 [m/s]! """ - return 1 / (sqrt(1 - beta(velocity) ** 2)) + return 1 / sqrt(1 - beta(velocity) ** 2) -def transformation_matrix(velocity: float) -> np.array: +def transformation_matrix(velocity: float) -> np.ndarray: """ + Calculate the Lorentz transformation matrix for movement in the x direction: + + | γ -γβ 0 0| + |-γβ γ 0 0| + | 0 0 1 0| + | 0 0 0 1| + + where γ is the Lorentz factor and β is the velocity as a fraction of c >>> transformation_matrix(29979245) array([[ 1.00503781, -0.10050378, 0. , 0. ], [-0.10050378, 1.00503781, 0. , 0. ], [ 0. , 0. , 1. , 0. ], [ 0. , 0. , 0. , 1. ]]) - >>> transformation_matrix(19979245.2) array([[ 1.00222811, -0.06679208, 0. , 0. ], [-0.06679208, 1.00222811, 0. , 0. ], [ 0. , 0. , 1. , 0. ], [ 0. , 0. , 0. , 1. ]]) - >>> transformation_matrix(1) array([[ 1.00000000e+00, -3.33564095e-09, 0.00000000e+00, 0.00000000e+00], @@ -123,16 +115,14 @@ def transformation_matrix(velocity: float) -> np.array: 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) - >>> transformation_matrix(0) Traceback (most recent call last): ... - ValueError: Speed must be greater than 1! - + ValueError: Speed must be greater than or equal to 1! >>> transformation_matrix(c * 1.5) Traceback (most recent call last): ... - ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! + ValueError: Speed must not exceed light speed 299,792,458 [m/s]! """ return np.array( [ @@ -144,44 +134,39 @@ def transformation_matrix(velocity: float) -> np.array: ) -def transform( - velocity: float, event: np.array = np.zeros(4), symbolic: bool = True # noqa: B008 -) -> np.array: +def transform(velocity: float, event: np.ndarray | None = None) -> np.ndarray: """ - >>> transform(29979245,np.array([1,2,3,4]), False) - array([ 3.01302757e+08, -3.01302729e+07, 3.00000000e+00, 4.00000000e+00]) + Calculate a Lorentz transformation for movement in the x direction given a + velocity and a four-vector for an inertial reference frame + If no four-vector is given, then calculate the transformation symbolically + with variables + >>> transform(29979245, np.array([1, 2, 3, 4])) + array([ 3.01302757e+08, -3.01302729e+07, 3.00000000e+00, 4.00000000e+00]) >>> transform(29979245) array([1.00503781498831*ct - 0.100503778816875*x, -0.100503778816875*ct + 1.00503781498831*x, 1.0*y, 1.0*z], dtype=object) - >>> transform(19879210.2) array([1.0022057787097*ct - 0.066456172618675*x, -0.066456172618675*ct + 1.0022057787097*x, 1.0*y, 1.0*z], dtype=object) - - >>> transform(299792459, np.array([1,1,1,1])) + >>> transform(299792459, np.array([1, 1, 1, 1])) Traceback (most recent call last): ... - ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! - - >>> transform(-1, np.array([1,1,1,1])) + ValueError: Speed must not exceed light speed 299,792,458 [m/s]! + >>> transform(-1, np.array([1, 1, 1, 1])) Traceback (most recent call last): ... - ValueError: Speed must be greater than 1! + ValueError: Speed must be greater than or equal to 1! """ - # Ensure event is not a vector of zeros - if not symbolic: - - # x0 is ct (speed of ligt * time) - event[0] = event[0] * c + # Ensure event is not empty + if event is None: + event = np.array([ct, x, y, z]) # Symbolic four vector else: + event[0] *= c # x0 is ct (speed of light * time) - # Symbolic four vector - event = np.array([ct, x, y, z]) - - return transformation_matrix(velocity).dot(event) + return transformation_matrix(velocity) @ event if __name__ == "__main__": @@ -197,9 +182,8 @@ if __name__ == "__main__": print(f"y' = {four_vector[2]}") print(f"z' = {four_vector[3]}") - # Substitute symbols with numerical values: - values = np.array([1, 1, 1, 1]) - sub_dict = {ct: c * values[0], x: values[1], y: values[2], z: values[3]} - numerical_vector = [four_vector[i].subs(sub_dict) for i in range(0, 4)] + # Substitute symbols with numerical values + sub_dict = {ct: c, x: 1, y: 1, z: 1} + numerical_vector = [four_vector[i].subs(sub_dict) for i in range(4)] print(f"\n{numerical_vector}") From ed0a581f9347b8fddc1928e52232eea250108573 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Jan 2023 23:42:15 +0100 Subject: [PATCH 251/368] [pre-commit.ci] pre-commit autoupdate (#8107) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/PyCQA/isort: 5.11.4 → 5.12.0](https://github.com/PyCQA/isort/compare/5.11.4...5.12.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8eb6d297e..b97ef2889 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,7 +20,7 @@ repos: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.11.4 + rev: 5.12.0 hooks: - id: isort args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 31e86ea59..a8786cc25 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -543,8 +543,7 @@ * [Euler Modified](maths/euler_modified.py) * [Eulers Totient](maths/eulers_totient.py) * [Extended Euclidean Algorithm](maths/extended_euclidean_algorithm.py) - * [Factorial Iterative](maths/factorial_iterative.py) - * [Factorial Recursive](maths/factorial_recursive.py) + * [Factorial](maths/factorial.py) * [Factors](maths/factors.py) * [Fermat Little Theorem](maths/fermat_little_theorem.py) * [Fibonacci](maths/fibonacci.py) From c909da9b085957fcd16b6b30b6bdc0cf2855a150 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 1 Feb 2023 14:14:54 +0100 Subject: [PATCH 252/368] pre-commit: Upgrade psf/black for stable style 2023 (#8110) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * pre-commit: Upgrade psf/black for stable style 2023 Updating https://github.com/psf/black ... updating 22.12.0 -> 23.1.0 for their `2023 stable style`. * https://github.com/psf/black/blob/main/CHANGES.md#2310 > This is the first [psf/black] release of 2023, and following our stability policy, it comes with a number of improvements to our stable style… Also, add https://github.com/tox-dev/pyproject-fmt and https://github.com/abravalheri/validate-pyproject to pre-commit. I only modified `.pre-commit-config.yaml` and all other files were modified by pre-commit.ci and psf/black. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 12 +++++++++- arithmetic_analysis/newton_raphson_new.py | 1 - backtracking/n_queens_math.py | 1 - blockchain/chinese_remainder_theorem.py | 1 + ciphers/enigma_machine2.py | 1 - ciphers/playfair_cipher.py | 1 - ciphers/polybius.py | 1 - ciphers/xor_cipher.py | 2 -- compression/lz77.py | 1 - computer_vision/cnn_classification.py | 1 - computer_vision/harris_corner.py | 3 --- conversions/decimal_to_binary.py | 1 - conversions/molecular_chemistry.py | 1 - conversions/roman_numerals.py | 2 +- conversions/temperature_conversions.py | 1 - conversions/weight_conversion.py | 1 - .../binary_tree/binary_tree_traversals.py | 1 - .../inorder_tree_traversal_2022.py | 1 - data_structures/hashing/double_hash.py | 1 - data_structures/hashing/hash_table.py | 2 -- data_structures/heap/binomial_heap.py | 2 -- data_structures/heap/skew_heap.py | 1 - .../linked_list/doubly_linked_list_two.py | 2 -- data_structures/stacks/prefix_evaluation.py | 1 - .../stacks/stack_with_doubly_linked_list.py | 1 - data_structures/stacks/stock_span_problem.py | 2 -- .../filters/bilateral_filter.py | 1 - .../filters/local_binary_pattern.py | 1 - dynamic_programming/bitmask.py | 5 ---- .../iterating_through_submasks.py | 1 - electronics/coulombs_law.py | 1 - fractals/julia_sets.py | 1 - fractals/mandelbrot.py | 1 - geodesy/lamberts_ellipsoidal_distance.py | 1 - graphs/a_star.py | 1 - graphs/check_bipartite_graph_bfs.py | 1 - graphs/graph_matrix.py | 1 - graphs/karger.py | 1 - graphs/minimum_spanning_tree_boruvka.py | 1 - graphs/multi_heuristic_astar.py | 4 ++-- knapsack/recursive_approach_knapsack.py | 1 - linear_algebra/src/conjugate_gradient.py | 1 - machine_learning/k_means_clust.py | 3 --- machine_learning/self_organizing_map.py | 1 - .../sequential_minimum_optimization.py | 2 -- machine_learning/xgboost_classifier.py | 1 - maths/armstrong_numbers.py | 2 +- maths/binary_exponentiation.py | 1 - maths/combinations.py | 1 - maths/decimal_isolate.py | 1 - maths/fermat_little_theorem.py | 1 - maths/greedy_coin_change.py | 2 -- maths/integration_by_simpson_approx.py | 1 - maths/jaccard_similarity.py | 2 -- maths/least_common_multiple.py | 1 - maths/line_length.py | 2 -- maths/monte_carlo.py | 1 + maths/newton_raphson.py | 1 - maths/numerical_integration.py | 2 -- maths/primelib.py | 23 ------------------- maths/segmented_sieve.py | 1 - maths/two_pointer.py | 1 - maths/zellers_congruence.py | 1 - matrix/largest_square_area_in_matrix.py | 3 --- other/activity_selection.py | 1 - other/nested_brackets.py | 2 -- other/scoring_algorithm.py | 1 - other/sdes.py | 1 - physics/casimir_effect.py | 1 - physics/hubble_parameter.py | 1 - physics/newtons_law_of_gravitation.py | 1 - project_euler/problem_004/sol1.py | 2 -- project_euler/problem_074/sol2.py | 1 - project_euler/problem_089/sol1.py | 1 - project_euler/problem_092/sol1.py | 2 -- quantum/q_fourier_transform.py | 1 - quantum/quantum_teleportation.py | 1 - scheduling/highest_response_ratio_next.py | 2 -- searches/binary_search.py | 1 - searches/interpolation_search.py | 1 - searches/tabu_search.py | 1 - searches/ternary_search.py | 1 - sorts/comb_sort.py | 1 - sorts/odd_even_sort.py | 1 - sorts/odd_even_transposition_parallel.py | 1 - sorts/random_normal_distribution_quicksort.py | 2 -- sorts/shrink_shell_sort.py | 1 - sorts/stooge_sort.py | 1 - sorts/tim_sort.py | 1 - strings/dna.py | 1 - strings/hamming_distance.py | 1 - strings/levenshtein_distance.py | 2 -- strings/prefix_function.py | 1 - strings/text_justification.py | 1 - web_programming/fetch_anime_and_play.py | 9 ++------ web_programming/fetch_well_rx_price.py | 5 ---- web_programming/get_user_tweets.py | 1 - 97 files changed, 19 insertions(+), 154 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b97ef2889..f8d1a65db 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: - id: auto-walrus - repo: https://github.com/psf/black - rev: 22.12.0 + rev: 23.1.0 hooks: - id: black @@ -26,6 +26,16 @@ repos: args: - --profile=black + - repo: https://github.com/tox-dev/pyproject-fmt + rev: "0.6.0" + hooks: + - id: pyproject-fmt + + - repo: https://github.com/abravalheri/validate-pyproject + rev: v0.12.1 + hooks: + - id: validate-pyproject + - repo: https://github.com/asottile/pyupgrade rev: v3.3.1 hooks: diff --git a/arithmetic_analysis/newton_raphson_new.py b/arithmetic_analysis/newton_raphson_new.py index dd1d7e092..472cb5b5a 100644 --- a/arithmetic_analysis/newton_raphson_new.py +++ b/arithmetic_analysis/newton_raphson_new.py @@ -59,7 +59,6 @@ def newton_raphson( # Let's Execute if __name__ == "__main__": - # Find root of trigonometric function # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index 2de784ded..23bd15906 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -107,7 +107,6 @@ def depth_first_search( # We iterate each column in the row to find all possible results in each row for col in range(n): - # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we diff --git a/blockchain/chinese_remainder_theorem.py b/blockchain/chinese_remainder_theorem.py index 54d861dd9..d3e75e779 100644 --- a/blockchain/chinese_remainder_theorem.py +++ b/blockchain/chinese_remainder_theorem.py @@ -53,6 +53,7 @@ def chinese_remainder_theorem(n1: int, r1: int, n2: int, r2: int) -> int: # ----------SAME SOLUTION USING InvertModulo instead ExtendedEuclid---------------- + # This function find the inverses of a i.e., a^(-1) def invert_modulo(a: int, n: int) -> int: """ diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index a877256eb..07d21893f 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -230,7 +230,6 @@ def enigma( # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: - # 1st plugboard -------------------------- if symbol in plugboard: symbol = plugboard[symbol] diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py index 89aedb7af..7279fb23e 100644 --- a/ciphers/playfair_cipher.py +++ b/ciphers/playfair_cipher.py @@ -39,7 +39,6 @@ def prepare_input(dirty: str) -> str: def generate_table(key: str) -> list[str]: - # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) alphabet = "ABCDEFGHIKLMNOPQRSTUVWXYZ" diff --git a/ciphers/polybius.py b/ciphers/polybius.py index c81c1d395..3539ab70c 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -19,7 +19,6 @@ SQUARE = [ class PolybiusCipher: def __init__(self) -> None: - self.SQUARE = np.array(SQUARE) def letter_to_numbers(self, letter: str) -> np.ndarray: diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index ca9dfe20f..379ef0ef7 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -130,7 +130,6 @@ class XORCipher: try: with open(file) as fin: with open("encrypt.out", "w+") as fout: - # actual encrypt-process for line in fin: fout.write(self.encrypt_string(line, key)) @@ -155,7 +154,6 @@ class XORCipher: try: with open(file) as fin: with open("decrypt.out", "w+") as fout: - # actual encrypt-process for line in fin: fout.write(self.decrypt_string(line, key)) diff --git a/compression/lz77.py b/compression/lz77.py index 7c1a6f6a4..1b201c59f 100644 --- a/compression/lz77.py +++ b/compression/lz77.py @@ -89,7 +89,6 @@ class LZ77Compressor: # while there are still characters in text to compress while text: - # find the next encoding phrase # - triplet with offset, length, indicator (the next encoding character) token = self._find_encoding_token(text, search_buffer) diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index 59e4556e0..1c193fcbb 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -28,7 +28,6 @@ import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": - # Initialising the CNN # (Sequential- Building the model layer by layer) classifier = models.Sequential() diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py index c8905bb6a..0cc7522bc 100644 --- a/computer_vision/harris_corner.py +++ b/computer_vision/harris_corner.py @@ -9,7 +9,6 @@ https://en.wikipedia.org/wiki/Harris_Corner_Detector class HarrisCorner: def __init__(self, k: float, window_size: int): - """ k : is an empirically determined constant in [0.04,0.06] window_size : neighbourhoods considered @@ -25,7 +24,6 @@ class HarrisCorner: return str(self.k) def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: - """ Returns the image with corners identified img_path : path of the image @@ -68,7 +66,6 @@ class HarrisCorner: if __name__ == "__main__": - edge_detect = HarrisCorner(0.04, 3) color_img, _ = edge_detect.detect("path_to_image") cv2.imwrite("detect.png", color_img) diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py index cfda57ca7..973c47c8a 100644 --- a/conversions/decimal_to_binary.py +++ b/conversions/decimal_to_binary.py @@ -2,7 +2,6 @@ def decimal_to_binary(num: int) -> str: - """ Convert an Integer Decimal Number to a Binary Number as str. >>> decimal_to_binary(0) diff --git a/conversions/molecular_chemistry.py b/conversions/molecular_chemistry.py index 0024eb5cb..51ffe534d 100644 --- a/conversions/molecular_chemistry.py +++ b/conversions/molecular_chemistry.py @@ -86,7 +86,6 @@ def pressure_and_volume_to_temperature( if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/conversions/roman_numerals.py b/conversions/roman_numerals.py index 61215a0c0..75af2ac72 100644 --- a/conversions/roman_numerals.py +++ b/conversions/roman_numerals.py @@ -47,7 +47,7 @@ def int_to_roman(number: int) -> str: True """ result = [] - for (arabic, roman) in ROMAN: + for arabic, roman in ROMAN: (factor, number) = divmod(number, arabic) result.append(roman * factor) if number == 0: diff --git a/conversions/temperature_conversions.py b/conversions/temperature_conversions.py index e5af46556..f7af6c8f1 100644 --- a/conversions/temperature_conversions.py +++ b/conversions/temperature_conversions.py @@ -380,7 +380,6 @@ def reaumur_to_rankine(reaumur: float, ndigits: int = 2) -> float: if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index 18c403731..5c032a497 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -307,7 +307,6 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 54b1dc536..24dd1bd8c 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -105,7 +105,6 @@ def get_nodes_from_left_to_right( if not root: return if level == 1: - output.append(root.data) elif level > 1: populate_output(root.left, level - 1) diff --git a/data_structures/binary_tree/inorder_tree_traversal_2022.py b/data_structures/binary_tree/inorder_tree_traversal_2022.py index 08001738f..e94ba7013 100644 --- a/data_structures/binary_tree/inorder_tree_traversal_2022.py +++ b/data_structures/binary_tree/inorder_tree_traversal_2022.py @@ -58,7 +58,6 @@ def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return def make_tree() -> BinaryTreeNode | None: - root = insert(None, 15) insert(root, 10) insert(root, 25) diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index 453e0d131..be21e74ca 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -24,7 +24,6 @@ class DoubleHash(HashTable): super().__init__(*args, **kwargs) def __hash_function_2(self, value, data): - next_prime_gt = ( next_prime(value % self.size_table) if not is_prime(value % self.size_table) diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 607454c82..7ca2f7c40 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -32,7 +32,6 @@ class HashTable: return key % self.size_table def _step_by_step(self, step_ord): - print(f"step {step_ord}") print(list(range(len(self.values)))) print(self.values) @@ -53,7 +52,6 @@ class HashTable: new_key = self.hash_function(key + 1) while self.values[new_key] is not None and self.values[new_key] != key: - if self.values.count(None) > 0: new_key = self.hash_function(new_key + 1) else: diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index d79fac7a9..2e05c5c80 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -174,7 +174,6 @@ class BinomialHeap: i.left_tree_size == i.parent.left_tree_size and i.left_tree_size != i.parent.parent.left_tree_size ): - # Neighbouring Nodes previous_node = i.left next_node = i.parent.parent @@ -233,7 +232,6 @@ class BinomialHeap: and self.bottom_root.left_tree_size == self.bottom_root.parent.left_tree_size ): - # Next node next_node = self.bottom_root.parent.parent diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py index 490db061d..c4c13b082 100644 --- a/data_structures/heap/skew_heap.py +++ b/data_structures/heap/skew_heap.py @@ -71,7 +71,6 @@ class SkewHeap(Generic[T]): """ def __init__(self, data: Iterable[T] | None = ()) -> None: - """ >>> sh = SkewHeap([3, 1, 3, 7]) >>> list(sh) diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index 94b916a62..c19309c9f 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -80,7 +80,6 @@ class LinkedList: return None def set_head(self, node: Node) -> None: - if self.head is None: self.head = node self.tail = node @@ -143,7 +142,6 @@ class LinkedList: raise Exception("Node not found") def delete_value(self, value): - if (node := self.get_node(value)) is not None: if node == self.head: self.head = self.head.get_next() diff --git a/data_structures/stacks/prefix_evaluation.py b/data_structures/stacks/prefix_evaluation.py index 00df2c1e6..f48eca23d 100644 --- a/data_structures/stacks/prefix_evaluation.py +++ b/data_structures/stacks/prefix_evaluation.py @@ -36,7 +36,6 @@ def evaluate(expression): # iterate over the string in reverse order for c in expression.split()[::-1]: - # push operand to stack if is_operand(c): stack.append(int(c)) diff --git a/data_structures/stacks/stack_with_doubly_linked_list.py b/data_structures/stacks/stack_with_doubly_linked_list.py index a129665f2..50c5236e0 100644 --- a/data_structures/stacks/stack_with_doubly_linked_list.py +++ b/data_structures/stacks/stack_with_doubly_linked_list.py @@ -92,7 +92,6 @@ class Stack(Generic[T]): # Code execution starts here if __name__ == "__main__": - # Start with the empty stack stack: Stack[int] = Stack() diff --git a/data_structures/stacks/stock_span_problem.py b/data_structures/stacks/stock_span_problem.py index 19a81bd36..de423c1eb 100644 --- a/data_structures/stacks/stock_span_problem.py +++ b/data_structures/stacks/stock_span_problem.py @@ -9,7 +9,6 @@ on the current day is less than or equal to its price on the given day. def calculation_span(price, s): - n = len(price) # Create a stack and push index of fist element to it st = [] @@ -20,7 +19,6 @@ def calculation_span(price, s): # Calculate span values for rest of the elements for i in range(1, n): - # Pop elements from stack while stack is not # empty and top of stack is smaller than price[i] while len(st) > 0 and price[st[0]] <= price[i]: diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py index 1afa01d3f..565da73f6 100644 --- a/digital_image_processing/filters/bilateral_filter.py +++ b/digital_image_processing/filters/bilateral_filter.py @@ -50,7 +50,6 @@ def bilateral_filter( size_x, size_y = img.shape for i in range(kernel_size // 2, size_x - kernel_size // 2): for j in range(kernel_size // 2, size_y - kernel_size // 2): - img_s = get_slice(img, i, j, kernel_size) img_i = img_s - img_s[kernel_size // 2, kernel_size // 2] img_ig = vec_gaussian(img_i, intensity_variance) diff --git a/digital_image_processing/filters/local_binary_pattern.py b/digital_image_processing/filters/local_binary_pattern.py index e92e554a3..907fe2cb0 100644 --- a/digital_image_processing/filters/local_binary_pattern.py +++ b/digital_image_processing/filters/local_binary_pattern.py @@ -61,7 +61,6 @@ def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) if __name__ == "__main__": - # Reading the image and converting it to grayscale. image = cv2.imread( "digital_image_processing/image_data/lena.jpg", cv2.IMREAD_GRAYSCALE diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py index f45250c9c..56bb8e96b 100644 --- a/dynamic_programming/bitmask.py +++ b/dynamic_programming/bitmask.py @@ -13,7 +13,6 @@ from collections import defaultdict class AssignmentUsingBitmask: def __init__(self, task_performed, total): - self.total_tasks = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N @@ -29,7 +28,6 @@ class AssignmentUsingBitmask: self.final_mask = (1 << len(task_performed)) - 1 def count_ways_until(self, mask, task_no): - # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 @@ -49,7 +47,6 @@ class AssignmentUsingBitmask: # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: - # if p is already given a task if mask & (1 << p): continue @@ -64,7 +61,6 @@ class AssignmentUsingBitmask: return self.dp[mask][task_no] def count_no_of_ways(self, task_performed): - # Store the list of persons for each task for i in range(len(task_performed)): for j in task_performed[i]: @@ -75,7 +71,6 @@ class AssignmentUsingBitmask: if __name__ == "__main__": - total_tasks = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py index 21c64dba4..4d0a250e8 100644 --- a/dynamic_programming/iterating_through_submasks.py +++ b/dynamic_programming/iterating_through_submasks.py @@ -9,7 +9,6 @@ from __future__ import annotations def list_of_submasks(mask: int) -> list[int]: - """ Args: mask : number which shows mask ( always integer > 0, zero does not have any diff --git a/electronics/coulombs_law.py b/electronics/coulombs_law.py index e41c0410c..18c1a8179 100644 --- a/electronics/coulombs_law.py +++ b/electronics/coulombs_law.py @@ -8,7 +8,6 @@ COULOMBS_CONSTANT = 8.988e9 # units = N * m^s * C^-2 def couloumbs_law( force: float, charge1: float, charge2: float, distance: float ) -> dict[str, float]: - """ Apply Coulomb's Law on any three given values. These can be force, charge1, charge2, or distance, and then in a Python dict return name/value pair of diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 77d1d7c04..482e1eddf 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -170,7 +170,6 @@ def ignore_overflow_warnings() -> None: if __name__ == "__main__": - z_0 = prepare_grid(window_size, nb_pixels) ignore_overflow_warnings() # See file header for explanations diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index f97bcd170..84dbda997 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -114,7 +114,6 @@ def get_image( # loop through the image-coordinates for image_x in range(image_width): for image_y in range(image_height): - # determine the figure-coordinates based on the image-coordinates figure_height = figure_width / image_width * image_height figure_x = figure_center_x + (image_x / image_width - 0.5) * figure_width diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py index 62ce59bb4..4805674e5 100644 --- a/geodesy/lamberts_ellipsoidal_distance.py +++ b/geodesy/lamberts_ellipsoidal_distance.py @@ -10,7 +10,6 @@ EQUATORIAL_RADIUS = 6378137 def lamberts_ellipsoidal_distance( lat1: float, lon1: float, lat2: float, lon2: float ) -> float: - """ Calculate the shortest distance along the surface of an ellipsoid between two points on the surface of earth given longitudes and latitudes diff --git a/graphs/a_star.py b/graphs/a_star.py index 793ba3bda..e8735179e 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -16,7 +16,6 @@ def search( cost: int, heuristic: list[list[int]], ) -> tuple[list[list[int]], list[list[int]]]: - closed = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] # the reference grid diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py index 552b7eee2..7fc57cbc7 100644 --- a/graphs/check_bipartite_graph_bfs.py +++ b/graphs/check_bipartite_graph_bfs.py @@ -20,7 +20,6 @@ def check_bipartite(graph): visited[u] = True for neighbour in graph[u]: - if neighbour == u: return False diff --git a/graphs/graph_matrix.py b/graphs/graph_matrix.py index 987168426..4adc6c0bb 100644 --- a/graphs/graph_matrix.py +++ b/graphs/graph_matrix.py @@ -8,7 +8,6 @@ class Graph: self.graph[v - 1][u - 1] = 1 def show(self): - for i in self.graph: for j in i: print(j, end=" ") diff --git a/graphs/karger.py b/graphs/karger.py index f72128c81..3ef65c0d6 100644 --- a/graphs/karger.py +++ b/graphs/karger.py @@ -47,7 +47,6 @@ def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]: graph_copy = {node: graph[node][:] for node in graph} while len(graph_copy) > 2: - # Choose a random edge. u = random.choice(list(graph_copy.keys())) v = random.choice(graph_copy[u]) diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py index 6c72615cc..663d8e26c 100644 --- a/graphs/minimum_spanning_tree_boruvka.py +++ b/graphs/minimum_spanning_tree_boruvka.py @@ -4,7 +4,6 @@ class Graph: """ def __init__(self): - self.num_vertices = 0 self.num_edges = 0 self.adjacency = {} diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index cd8e37b00..0a18ede6e 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -33,7 +33,7 @@ class PriorityQueue: temp.append((pri, x)) (pri, x) = heapq.heappop(self.elements) temp.append((priority, item)) - for (pro, xxx) in temp: + for pro, xxx in temp: heapq.heappush(self.elements, (pro, xxx)) def remove_element(self, item): @@ -44,7 +44,7 @@ class PriorityQueue: while x != item: temp.append((pro, x)) (pro, x) = heapq.heappop(self.elements) - for (prito, yyy) in temp: + for prito, yyy in temp: heapq.heappush(self.elements, (prito, yyy)) def top_show(self): diff --git a/knapsack/recursive_approach_knapsack.py b/knapsack/recursive_approach_knapsack.py index d813981cb..9a8ed1886 100644 --- a/knapsack/recursive_approach_knapsack.py +++ b/knapsack/recursive_approach_knapsack.py @@ -46,7 +46,6 @@ def knapsack( if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 418ae88a5..4cf566ec9 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -115,7 +115,6 @@ def conjugate_gradient( iterations = 0 while error > tol: - # Save this value so we only calculate the matrix-vector product once. w = np.dot(spd_matrix, p0) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 5dc2b7118..b6305469e 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -74,7 +74,6 @@ def centroid_pairwise_dist(x, centroids): def assign_clusters(data, centroids): - # Compute distances between each data point and the set of centroids: # Fill in the blank (RHS only) distances_from_centroids = centroid_pairwise_dist(data, centroids) @@ -100,10 +99,8 @@ def revise_centroids(data, k, cluster_assignment): def compute_heterogeneity(data, k, centroids, cluster_assignment): - heterogeneity = 0.0 for i in range(k): - # Select all data points that belong to cluster i. Fill in the blank (RHS only) member_data_points = data[cluster_assignment == i, :] diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py index 057c2a76b..32fdf1d2b 100644 --- a/machine_learning/self_organizing_map.py +++ b/machine_learning/self_organizing_map.py @@ -49,7 +49,6 @@ def main() -> None: for _ in range(epochs): for j in range(len(training_samples)): - # training sample sample = training_samples[j] diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index f5185e1d9..9c45c3512 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -82,7 +82,6 @@ class SmoSVM: k = self._k state = None while True: - # 1: Find alpha1, alpha2 try: i1, i2 = self.choose_alpha.send(state) @@ -146,7 +145,6 @@ class SmoSVM: # Predict test samples def predict(self, test_samples, classify=True): - if test_samples.shape[1] > self.samples.shape[1]: raise ValueError( "Test samples' feature length does not equal to that of train samples" diff --git a/machine_learning/xgboost_classifier.py b/machine_learning/xgboost_classifier.py index 08967f171..1da933cf6 100644 --- a/machine_learning/xgboost_classifier.py +++ b/machine_learning/xgboost_classifier.py @@ -41,7 +41,6 @@ def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier: def main() -> None: - """ >>> main() diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index f62991b74..26709b428 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -62,7 +62,7 @@ def pluperfect_number(n: int) -> bool: digit_histogram[rem] += 1 digit_total += 1 - for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))): + for cnt, i in zip(digit_histogram, range(len(digit_histogram))): total += cnt * i**digit_total return n == total diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index 8dda5245c..147b4285f 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -5,7 +5,6 @@ def binary_exponentiation(a, n): - if n == 0: return 1 diff --git a/maths/combinations.py b/maths/combinations.py index 6db1d773f..a2324012c 100644 --- a/maths/combinations.py +++ b/maths/combinations.py @@ -39,7 +39,6 @@ def combinations(n: int, k: int) -> int: if __name__ == "__main__": - print( "The number of five-card hands possible from a standard", f"fifty-two card deck is: {combinations(52, 5)}\n", diff --git a/maths/decimal_isolate.py b/maths/decimal_isolate.py index cdf43ea5d..058ed1bb9 100644 --- a/maths/decimal_isolate.py +++ b/maths/decimal_isolate.py @@ -5,7 +5,6 @@ https://stackoverflow.com/questions/3886402/how-to-get-numbers-after-decimal-poi def decimal_isolate(number: float, digit_amount: int) -> float: - """ Isolates the decimal part of a number. If digitAmount > 0 round to that decimal place, else print the entire decimal. diff --git a/maths/fermat_little_theorem.py b/maths/fermat_little_theorem.py index 73af3e28c..eea03be24 100644 --- a/maths/fermat_little_theorem.py +++ b/maths/fermat_little_theorem.py @@ -6,7 +6,6 @@ def binary_exponentiation(a, n, mod): - if n == 0: return 1 diff --git a/maths/greedy_coin_change.py b/maths/greedy_coin_change.py index 29c2f1803..7cf669bcb 100644 --- a/maths/greedy_coin_change.py +++ b/maths/greedy_coin_change.py @@ -62,7 +62,6 @@ def find_minimum_change(denominations: list[int], value: str) -> list[int]: # Traverse through all denomination for denomination in reversed(denominations): - # Find denominations while int(total_value) >= int(denomination): total_value -= int(denomination) @@ -73,7 +72,6 @@ def find_minimum_change(denominations: list[int], value: str) -> list[int]: # Driver Code if __name__ == "__main__": - denominations = [] value = "0" diff --git a/maths/integration_by_simpson_approx.py b/maths/integration_by_simpson_approx.py index 408041de9..f77ae7613 100644 --- a/maths/integration_by_simpson_approx.py +++ b/maths/integration_by_simpson_approx.py @@ -35,7 +35,6 @@ xn = b def simpson_integration(function, a: float, b: float, precision: int = 4) -> float: - """ Args: function : the function which's integration is desired diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index b299a8147..eab25188b 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -51,7 +51,6 @@ def jaccard_similarity(set_a, set_b, alternative_union=False): """ if isinstance(set_a, set) and isinstance(set_b, set): - intersection = len(set_a.intersection(set_b)) if alternative_union: @@ -62,7 +61,6 @@ def jaccard_similarity(set_a, set_b, alternative_union=False): return intersection / union if isinstance(set_a, (list, tuple)) and isinstance(set_b, (list, tuple)): - intersection = [element for element in set_a if element in set_b] if alternative_union: diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 0d087643e..621d93720 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -67,7 +67,6 @@ def benchmark(): class TestLeastCommonMultiple(unittest.TestCase): - test_inputs = [ (10, 20), (13, 15), diff --git a/maths/line_length.py b/maths/line_length.py index ea27ee904..b810f2d9a 100644 --- a/maths/line_length.py +++ b/maths/line_length.py @@ -10,7 +10,6 @@ def line_length( x_end: int | float, steps: int = 100, ) -> float: - """ Approximates the arc length of a line segment by treating the curve as a sequence of linear lines and summing their lengths @@ -41,7 +40,6 @@ def line_length( length = 0.0 for _ in range(steps): - # Approximates curve as a sequence of linear lines and sums their length x2 = (x_end - x_start) / steps + x1 fx2 = fnc(x2) diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py index c13b8d0a4..474f1f65d 100644 --- a/maths/monte_carlo.py +++ b/maths/monte_carlo.py @@ -18,6 +18,7 @@ def pi_estimator(iterations: int): 5. Multiply this value by 4 to get your estimate of pi. 6. Print the estimated and numpy value of pi """ + # A local function to see if a dot lands in the circle. def is_in_circle(x: float, y: float) -> bool: distance_from_centre = sqrt((x**2) + (y**2)) diff --git a/maths/newton_raphson.py b/maths/newton_raphson.py index f2b7cb976..2c9cd1de9 100644 --- a/maths/newton_raphson.py +++ b/maths/newton_raphson.py @@ -19,7 +19,6 @@ def calc_derivative(f, a, h=0.001): def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=False): - a = x0 # set the initial guess steps = [a] error = abs(f(a)) diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py index 8f32fd356..f2d65f89e 100644 --- a/maths/numerical_integration.py +++ b/maths/numerical_integration.py @@ -12,7 +12,6 @@ def trapezoidal_area( x_end: int | float, steps: int = 100, ) -> float: - """ Treats curve as a collection of linear lines and sums the area of the trapezium shape they form @@ -40,7 +39,6 @@ def trapezoidal_area( area = 0.0 for _ in range(steps): - # Approximates small segments of curve as linear and solve # for trapezoidal area x2 = (x_end - x_start) / steps + x1 diff --git a/maths/primelib.py b/maths/primelib.py index 9586227ea..81d573706 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -59,7 +59,6 @@ def is_prime(number: int) -> bool: status = False for divisor in range(2, int(round(sqrt(number))) + 1): - # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: @@ -95,9 +94,7 @@ def sieve_er(n): # actual sieve of erathostenes for i in range(len(begin_list)): - for j in range(i + 1, len(begin_list)): - if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): begin_list[j] = 0 @@ -128,9 +125,7 @@ def get_prime_numbers(n): # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): - if is_prime(number): - ans.append(number) # precondition @@ -160,14 +155,11 @@ def prime_factorization(number): quotient = number if number == 0 or number == 1: - ans.append(number) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(number): - while quotient != 1: - if is_prime(factor) and (quotient % factor == 0): ans.append(factor) quotient /= factor @@ -298,11 +290,9 @@ def goldbach(number): loop = True while i < len_pn and loop: - j = i + 1 while j < len_pn and loop: - if prime_numbers[i] + prime_numbers[j] == number: loop = False ans.append(prime_numbers[i]) @@ -345,7 +335,6 @@ def gcd(number1, number2): rest = 0 while number2 != 0: - rest = number1 % number2 number1 = number2 number2 = rest @@ -380,13 +369,11 @@ def kg_v(number1, number2): # for kgV (x,1) if number1 > 1 and number2 > 1: - # builds the prime factorization of 'number1' and 'number2' prime_fac_1 = prime_factorization(number1) prime_fac_2 = prime_factorization(number2) elif number1 == 1 or number2 == 1: - prime_fac_1 = [] prime_fac_2 = [] ans = max(number1, number2) @@ -398,11 +385,8 @@ def kg_v(number1, number2): # iterates through primeFac1 for n in prime_fac_1: - if n not in done: - if n in prime_fac_2: - count1 = prime_fac_1.count(n) count2 = prime_fac_2.count(n) @@ -410,7 +394,6 @@ def kg_v(number1, number2): ans *= n else: - count1 = prime_fac_1.count(n) for _ in range(count1): @@ -420,9 +403,7 @@ def kg_v(number1, number2): # iterates through primeFac2 for n in prime_fac_2: - if n not in done: - count2 = prime_fac_2.count(n) for _ in range(count2): @@ -455,7 +436,6 @@ def get_prime(n): ans = 2 # this variable holds the answer while index < n: - index += 1 ans += 1 # counts to the next number @@ -499,7 +479,6 @@ def get_primes_between(p_number_1, p_number_2): number += 1 while number < p_number_2: - ans.append(number) number += 1 @@ -534,7 +513,6 @@ def get_divisors(n): ans = [] # will be returned. for divisor in range(1, n + 1): - if n % divisor == 0: ans.append(divisor) @@ -638,7 +616,6 @@ def fib(n): ans = 1 # this will be return for _ in range(n - 1): - tmp = ans ans += fib1 fib1 = tmp diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py index 35ed9702b..e950a83b7 100644 --- a/maths/segmented_sieve.py +++ b/maths/segmented_sieve.py @@ -25,7 +25,6 @@ def sieve(n: int) -> list[int]: while low <= n: temp = [True] * (high - low + 1) for each in in_prime: - t = math.floor(low / each) * each if t < low: t += each diff --git a/maths/two_pointer.py b/maths/two_pointer.py index ff234cddc..d0fb0fc9c 100644 --- a/maths/two_pointer.py +++ b/maths/two_pointer.py @@ -43,7 +43,6 @@ def two_pointer(nums: list[int], target: int) -> list[int]: j = len(nums) - 1 while i < j: - if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: diff --git a/maths/zellers_congruence.py b/maths/zellers_congruence.py index 624bbfe10..483fb000f 100644 --- a/maths/zellers_congruence.py +++ b/maths/zellers_congruence.py @@ -3,7 +3,6 @@ import datetime def zeller(date_input: str) -> str: - """ Zellers Congruence Algorithm Find the day of the week for nearly any Gregorian or Julian calendar date diff --git a/matrix/largest_square_area_in_matrix.py b/matrix/largest_square_area_in_matrix.py index cf975cb7c..a93369c56 100644 --- a/matrix/largest_square_area_in_matrix.py +++ b/matrix/largest_square_area_in_matrix.py @@ -59,7 +59,6 @@ def largest_square_area_in_matrix_top_down_approch( """ def update_area_of_max_square(row: int, col: int) -> int: - # BASE CASE if row >= rows or col >= cols: return 0 @@ -138,7 +137,6 @@ def largest_square_area_in_matrix_bottom_up( largest_square_area = 0 for row in range(rows - 1, -1, -1): for col in range(cols - 1, -1, -1): - right = dp_array[row][col + 1] diagonal = dp_array[row + 1][col + 1] bottom = dp_array[row + 1][col] @@ -169,7 +167,6 @@ def largest_square_area_in_matrix_bottom_up_space_optimization( largest_square_area = 0 for row in range(rows - 1, -1, -1): for col in range(cols - 1, -1, -1): - right = current_row[col + 1] diagonal = next_row[col + 1] bottom = next_row[col] diff --git a/other/activity_selection.py b/other/activity_selection.py index 18ff6a24c..2cc08d959 100644 --- a/other/activity_selection.py +++ b/other/activity_selection.py @@ -25,7 +25,6 @@ def print_max_activities(start: list[int], finish: list[int]) -> None: # Consider rest of the activities for j in range(n): - # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it diff --git a/other/nested_brackets.py b/other/nested_brackets.py index 9dd9a0f04..3f61a4e70 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -15,14 +15,12 @@ brackets and returns true if S is nested and false otherwise. def is_balanced(s): - stack = [] open_brackets = set({"(", "[", "{"}) closed_brackets = set({")", "]", "}"}) open_to_closed = dict({"{": "}", "[": "]", "(": ")"}) for i in range(len(s)): - if s[i] in open_brackets: stack.append(s[i]) diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index 1e6293f84..00d87cfc0 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -26,7 +26,6 @@ Thus the weights for each column are as follows: def procentual_proximity( source_data: list[list[float]], weights: list[int] ) -> list[list[float]]: - """ weights - int list possible values - 0 / 1 diff --git a/other/sdes.py b/other/sdes.py index 695675000..31105984b 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -54,7 +54,6 @@ def function(expansion, s0, s1, key, message): if __name__ == "__main__": - key = input("Enter 10 bit key: ") message = input("Enter 8 bit message: ") diff --git a/physics/casimir_effect.py b/physics/casimir_effect.py index ee8a6c1eb..e4a77e5b5 100644 --- a/physics/casimir_effect.py +++ b/physics/casimir_effect.py @@ -47,7 +47,6 @@ SPEED_OF_LIGHT = 3e8 # unit of c : m * s^-1 def casimir_force(force: float, area: float, distance: float) -> dict[str, float]: - """ Input Parameters ---------------- diff --git a/physics/hubble_parameter.py b/physics/hubble_parameter.py index 798564722..6bc62e713 100644 --- a/physics/hubble_parameter.py +++ b/physics/hubble_parameter.py @@ -34,7 +34,6 @@ def hubble_parameter( dark_energy: float, redshift: float, ) -> float: - """ Input Parameters ---------------- diff --git a/physics/newtons_law_of_gravitation.py b/physics/newtons_law_of_gravitation.py index 0bb27bb24..4bbeddd61 100644 --- a/physics/newtons_law_of_gravitation.py +++ b/physics/newtons_law_of_gravitation.py @@ -28,7 +28,6 @@ GRAVITATIONAL_CONSTANT = 6.6743e-11 # unit of G : m^3 * kg^-1 * s^-2 def gravitational_law( force: float, mass_1: float, mass_2: float, distance: float ) -> dict[str, float]: - """ Input Parameters ---------------- diff --git a/project_euler/problem_004/sol1.py b/project_euler/problem_004/sol1.py index b1e229289..f237afdd9 100644 --- a/project_euler/problem_004/sol1.py +++ b/project_euler/problem_004/sol1.py @@ -32,12 +32,10 @@ def solution(n: int = 998001) -> int: # fetches the next number for number in range(n - 1, 9999, -1): - str_number = str(number) # checks whether 'str_number' is a palindrome. if str_number == str_number[::-1]: - divisor = 999 # if 'number' is a product of two 3-digit numbers diff --git a/project_euler/problem_074/sol2.py b/project_euler/problem_074/sol2.py index d76bb014d..b54bc023e 100644 --- a/project_euler/problem_074/sol2.py +++ b/project_euler/problem_074/sol2.py @@ -111,7 +111,6 @@ def solution(chain_length: int = 60, number_limit: int = 1000000) -> int: chain_sets_lengths: dict[int, int] = {} for start_chain_element in range(1, number_limit): - # The temporary set will contain the elements of the chain chain_set = set() chain_set_length = 0 diff --git a/project_euler/problem_089/sol1.py b/project_euler/problem_089/sol1.py index 83609cd23..123159bdc 100644 --- a/project_euler/problem_089/sol1.py +++ b/project_euler/problem_089/sol1.py @@ -138,5 +138,4 @@ def solution(roman_numerals_filename: str = "/p089_roman.txt") -> int: if __name__ == "__main__": - print(f"{solution() = }") diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index 33a6c0694..8d3f0c9dd 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -15,7 +15,6 @@ DIGITS_SQUARED = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10000 def next_number(number: int) -> int: - """ Returns the next number of the chain by adding the square of each digit to form a new number. @@ -31,7 +30,6 @@ def next_number(number: int) -> int: sum_of_digits_squared = 0 while number: - # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 diff --git a/quantum/q_fourier_transform.py b/quantum/q_fourier_transform.py index 07a257579..762ac4081 100644 --- a/quantum/q_fourier_transform.py +++ b/quantum/q_fourier_transform.py @@ -72,7 +72,6 @@ def quantum_fourier_transform(number_of_qubits: int = 3) -> qiskit.result.counts counter = number_of_qubits for i in range(counter): - quantum_circuit.h(number_of_qubits - i - 1) counter -= 1 for j in range(counter): diff --git a/quantum/quantum_teleportation.py b/quantum/quantum_teleportation.py index d04b44d15..5da79ed20 100644 --- a/quantum/quantum_teleportation.py +++ b/quantum/quantum_teleportation.py @@ -18,7 +18,6 @@ from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, exec def quantum_teleportation( theta: float = np.pi / 2, phi: float = np.pi / 2, lam: float = np.pi / 2 ) -> qiskit.result.counts.Counts: - """ # >>> quantum_teleportation() #{'00': 500, '11': 500} # ideally diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index a5c62ddbe..9c999ec65 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -37,7 +37,6 @@ def calculate_turn_around_time( arrival_time.sort() while no_of_process > finished_process_count: - """ If the current time is less than the arrival time of the process that arrives first among the processes that have not been performed, @@ -94,7 +93,6 @@ def calculate_waiting_time( if __name__ == "__main__": - no_of_process = 5 process_name = ["A", "B", "C", "D", "E"] arrival_time = [1, 2, 3, 4, 5] diff --git a/searches/binary_search.py b/searches/binary_search.py index 88fee4715..05dadd4fe 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -261,7 +261,6 @@ def binary_search_std_lib(sorted_collection: list[int], item: int) -> int | None def binary_search_by_recursion( sorted_collection: list[int], item: int, left: int, right: int ) -> int | None: - """Pure implementation of binary search algorithm in Python by recursion Be careful collection must be ascending sorted, otherwise result will be diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index 35e6bc506..49194c260 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -49,7 +49,6 @@ def interpolation_search(sorted_collection, item): def interpolation_search_by_recursion(sorted_collection, item, left, right): - """Pure implementation of interpolation search algorithm in Python by recursion Be careful collection must be ascending sorted, otherwise result will be unpredictable diff --git a/searches/tabu_search.py b/searches/tabu_search.py index 3e1728286..d998ddc55 100644 --- a/searches/tabu_search.py +++ b/searches/tabu_search.py @@ -220,7 +220,6 @@ def tabu_search( while not found: i = 0 while i < len(best_solution): - if best_solution[i] != solution[i]: first_exchange_node = best_solution[i] second_exchange_node = solution[i] diff --git a/searches/ternary_search.py b/searches/ternary_search.py index 9830cce36..cb36e72fa 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -103,7 +103,6 @@ def ite_ternary_search(array: list[int], target: int) -> int: left = two_third + 1 else: - left = one_third + 1 right = two_third - 1 else: diff --git a/sorts/comb_sort.py b/sorts/comb_sort.py index 16bd10c78..3c8b1e99a 100644 --- a/sorts/comb_sort.py +++ b/sorts/comb_sort.py @@ -37,7 +37,6 @@ def comb_sort(data: list) -> list: completed = False while not completed: - # Update the gap value for a next comb gap = int(gap / shrink_factor) if gap <= 1: diff --git a/sorts/odd_even_sort.py b/sorts/odd_even_sort.py index 9ef4462c7..7dfe03054 100644 --- a/sorts/odd_even_sort.py +++ b/sorts/odd_even_sort.py @@ -30,7 +30,6 @@ def odd_even_sort(input_list: list) -> list: is_sorted = True for i in range(0, len(input_list) - 1, 2): # iterating over all even indices if input_list[i] > input_list[i + 1]: - input_list[i], input_list[i + 1] = input_list[i + 1], input_list[i] # swapping if elements not in order is_sorted = False diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index b656df3a3..87b0e4d1e 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -34,7 +34,6 @@ def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0, 10): - if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() diff --git a/sorts/random_normal_distribution_quicksort.py b/sorts/random_normal_distribution_quicksort.py index 5777d5cb2..f7f60903c 100644 --- a/sorts/random_normal_distribution_quicksort.py +++ b/sorts/random_normal_distribution_quicksort.py @@ -19,7 +19,6 @@ def _in_place_quick_sort(a, start, end): def _in_place_partition(a, start, end): - count = 0 pivot = randint(start, end) temp = a[end] @@ -27,7 +26,6 @@ def _in_place_partition(a, start, end): a[pivot] = temp new_pivot_index = start - 1 for index in range(start, end): - count += 1 if a[index] < a[end]: # check if current val is less than pivot value new_pivot_index = new_pivot_index + 1 diff --git a/sorts/shrink_shell_sort.py b/sorts/shrink_shell_sort.py index 69992bfb7..f77b73d01 100644 --- a/sorts/shrink_shell_sort.py +++ b/sorts/shrink_shell_sort.py @@ -44,7 +44,6 @@ def shell_sort(collection: list) -> list: # Continue sorting until the gap is 1 while gap > 1: - # Decrease the gap value gap = int(gap / shrink) diff --git a/sorts/stooge_sort.py b/sorts/stooge_sort.py index de997a85d..9a5bedeae 100644 --- a/sorts/stooge_sort.py +++ b/sorts/stooge_sort.py @@ -12,7 +12,6 @@ def stooge_sort(arr): def stooge(arr, i, h): - if i >= h: return diff --git a/sorts/tim_sort.py b/sorts/tim_sort.py index b95ff34cf..c90c7e803 100644 --- a/sorts/tim_sort.py +++ b/sorts/tim_sort.py @@ -73,7 +73,6 @@ def tim_sort(lst): def main(): - lst = [5, 9, 10, 3, -4, 5, 178, 92, 46, -18, 0, 7] sorted_lst = tim_sort(lst) print(sorted_lst) diff --git a/strings/dna.py b/strings/dna.py index c2b96110b..33e1063f4 100644 --- a/strings/dna.py +++ b/strings/dna.py @@ -2,7 +2,6 @@ import re def dna(dna: str) -> str: - """ https://en.wikipedia.org/wiki/DNA Returns the second side of a DNA strand diff --git a/strings/hamming_distance.py b/strings/hamming_distance.py index 5de27dc77..a28949172 100644 --- a/strings/hamming_distance.py +++ b/strings/hamming_distance.py @@ -35,7 +35,6 @@ def hamming_distance(string1: str, string2: str) -> int: if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/strings/levenshtein_distance.py b/strings/levenshtein_distance.py index 9f7a7e3e6..7be4074dc 100644 --- a/strings/levenshtein_distance.py +++ b/strings/levenshtein_distance.py @@ -44,11 +44,9 @@ def levenshtein_distance(first_word: str, second_word: str) -> int: previous_row = list(range(len(second_word) + 1)) for i, c1 in enumerate(first_word): - current_row = [i + 1] for j, c2 in enumerate(second_word): - # Calculate insertions, deletions and substitutions insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 diff --git a/strings/prefix_function.py b/strings/prefix_function.py index 6eca01635..65bbe9100 100644 --- a/strings/prefix_function.py +++ b/strings/prefix_function.py @@ -29,7 +29,6 @@ def prefix_function(input_string: str) -> list: prefix_result = [0] * len(input_string) for i in range(1, len(input_string)): - # use last results for better performance - dynamic programming j = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: diff --git a/strings/text_justification.py b/strings/text_justification.py index 5e86456c2..b0ef12231 100644 --- a/strings/text_justification.py +++ b/strings/text_justification.py @@ -33,7 +33,6 @@ def text_justification(word: str, max_width: int) -> list: words = word.split() def justify(line: list, width: int, max_width: int) -> str: - overall_spaces_count = max_width - width words_count = len(line) if len(line) == 1: diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index e11948d0a..3bd4f704d 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -8,7 +8,6 @@ BASE_URL = "https://ww1.gogoanime2.org" def search_scraper(anime_name: str) -> list: - """[summary] Take an url and @@ -66,7 +65,6 @@ def search_scraper(anime_name: str) -> list: def search_anime_episode_list(episode_endpoint: str) -> list: - """[summary] Take an url and @@ -116,7 +114,6 @@ def search_anime_episode_list(episode_endpoint: str) -> list: def get_anime_episode(episode_endpoint: str) -> list: - """[summary] Get click url and download url from episode url @@ -153,7 +150,6 @@ def get_anime_episode(episode_endpoint: str) -> list: if __name__ == "__main__": - anime_name = input("Enter anime name: ").strip() anime_list = search_scraper(anime_name) print("\n") @@ -161,9 +157,8 @@ if __name__ == "__main__": if len(anime_list) == 0: print("No anime found with this name") else: - print(f"Found {len(anime_list)} results: ") - for (i, anime) in enumerate(anime_list): + for i, anime in enumerate(anime_list): anime_title = anime["title"] print(f"{i+1}. {anime_title}") @@ -176,7 +171,7 @@ if __name__ == "__main__": print("No episode found for this anime") else: print(f"Found {len(episode_list)} results: ") - for (i, episode) in enumerate(episode_list): + for i, episode in enumerate(episode_list): print(f"{i+1}. {episode['title']}") episode_choice = int(input("\nChoose an episode by serial no: ").strip()) diff --git a/web_programming/fetch_well_rx_price.py b/web_programming/fetch_well_rx_price.py index 5174f39f9..ee51b9a50 100644 --- a/web_programming/fetch_well_rx_price.py +++ b/web_programming/fetch_well_rx_price.py @@ -37,7 +37,6 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: """ try: - # Has user provided both inputs? if not drug_name or not zip_code: return None @@ -58,7 +57,6 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: grid_list = soup.find_all("div", {"class": "grid-x pharmCard"}) if grid_list and len(grid_list) > 0: for grid in grid_list: - # Get the pharmacy price. pharmacy_name = grid.find("p", {"class": "list-title"}).text @@ -79,7 +77,6 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: if __name__ == "__main__": - # Enter a drug name and a zip code drug_name = input("Enter drug name: ").strip() zip_code = input("Enter zip code: ").strip() @@ -89,10 +86,8 @@ if __name__ == "__main__": ) if pharmacy_price_list: - print(f"\nSearch results for {drug_name} at location {zip_code}:") for pharmacy_price in pharmacy_price_list: - name = pharmacy_price["pharmacy_name"] price = pharmacy_price["price"] diff --git a/web_programming/get_user_tweets.py b/web_programming/get_user_tweets.py index 28cf85541..3abc69715 100644 --- a/web_programming/get_user_tweets.py +++ b/web_programming/get_user_tweets.py @@ -10,7 +10,6 @@ access_secret = "" def get_all_tweets(screen_name: str) -> None: - # authorize twitter, initialize tweepy auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) From 77b4fa8b3f2070ff708405cca1381b7860e316ab Mon Sep 17 00:00:00 2001 From: Damon Gregory <46330424+SheriffHobo@users.noreply.github.com> Date: Sun, 12 Feb 2023 07:55:25 -0800 Subject: [PATCH 253/368] fix_ci_badge (#8134) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index da80c012b..68a6e5e6f 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@
- GitHub Workflow Status + GitHub Workflow Status pre-commit From 126e89d8a3983c1ffc9b3eefa1fbaff0f6fe4ead Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 22:05:56 +0100 Subject: [PATCH 254/368] [pre-commit.ci] pre-commit autoupdate (#8141) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/tox-dev/pyproject-fmt: 0.6.0 → 0.8.0](https://github.com/tox-dev/pyproject-fmt/compare/0.6.0...0.8.0) - [github.com/pre-commit/mirrors-mypy: v0.991 → v1.0.0](https://github.com/pre-commit/mirrors-mypy/compare/v0.991...v1.0.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f8d1a65db..a1496984f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.6.0" + rev: "0.8.0" hooks: - id: pyproject-fmt @@ -62,7 +62,7 @@ repos: *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.991 + rev: v1.0.0 hooks: - id: mypy args: From 1bf03889c5e34420001e72b5d26cc0846dcd122a Mon Sep 17 00:00:00 2001 From: Jan Wojciechowski <96974442+yanvoi@users.noreply.github.com> Date: Sun, 19 Feb 2023 23:14:01 +0100 Subject: [PATCH 255/368] Update bogo_sort.py (#8144) --- sorts/bogo_sort.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/sorts/bogo_sort.py b/sorts/bogo_sort.py index b72f2089f..9c133f0d8 100644 --- a/sorts/bogo_sort.py +++ b/sorts/bogo_sort.py @@ -31,8 +31,6 @@ def bogo_sort(collection): """ def is_sorted(collection): - if len(collection) < 2: - return True for i in range(len(collection) - 1): if collection[i] > collection[i + 1]: return False From 67676c3b790d9631ea99c89f71dc2bf65e9aa2ca Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 08:33:44 +0100 Subject: [PATCH 256/368] [pre-commit.ci] pre-commit autoupdate (#8149) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 0.8.0 → 0.9.1](https://github.com/tox-dev/pyproject-fmt/compare/0.8.0...0.9.1) - [github.com/pre-commit/mirrors-mypy: v1.0.0 → v1.0.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.0.0...v1.0.1) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- pyproject.toml | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a1496984f..93064949e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.8.0" + rev: "0.9.1" hooks: - id: pyproject-fmt @@ -62,7 +62,7 @@ repos: *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.0.0 + rev: v1.0.1 hooks: - id: mypy args: diff --git a/pyproject.toml b/pyproject.toml index 410e7655b..5f9b1aa06 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,6 @@ addopts = [ "--showlocals", ] - [tool.coverage.report] omit = [".env/*"] sort = "Cover" From 1c15cdff70893bc27ced2b390959e1d9cc493628 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 23:08:40 +0100 Subject: [PATCH 257/368] [pre-commit.ci] pre-commit autoupdate (#8160) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 0.9.1 → 0.9.2](https://github.com/tox-dev/pyproject-fmt/compare/0.9.1...0.9.2) * pre-commit: Add ruff --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 93064949e..9f27f985b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.9.1" + rev: "0.9.2" hooks: - id: pyproject-fmt @@ -43,6 +43,13 @@ repos: args: - --py311-plus + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.0.253 + hooks: + - id: ruff + args: + - --ignore=E741 + - repo: https://github.com/PyCQA/flake8 rev: 6.0.0 hooks: From 64543faa980b526f79d287a073ebb7554749faf9 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 1 Mar 2023 17:23:33 +0100 Subject: [PATCH 258/368] Make some ruff fixes (#8154) * Make some ruff fixes * Undo manual fix * Undo manual fix * Updates from ruff=0.0.251 --- audio_filters/iir_filter.py | 2 +- backtracking/n_queens_math.py | 6 +++--- backtracking/sum_of_subsets.py | 2 +- ciphers/bifid.py | 2 +- ciphers/diffie_hellman.py | 16 ++++++++-------- ciphers/polybius.py | 2 +- ciphers/xor_cipher.py | 18 ++++++++---------- computer_vision/mosaic_augmentation.py | 2 +- .../binary_tree/binary_search_tree.py | 2 +- .../binary_tree/binary_tree_traversals.py | 4 ++-- .../binary_tree/inorder_tree_traversal_2022.py | 2 +- data_structures/binary_tree/red_black_tree.py | 5 ++--- .../hashing/number_theory/prime_numbers.py | 2 +- data_structures/heap/binomial_heap.py | 4 ++-- .../linked_list/doubly_linked_list_two.py | 2 +- .../linked_list/singly_linked_list.py | 1 + data_structures/linked_list/skip_list.py | 5 +---- .../queue/circular_queue_linked_list.py | 2 +- .../dilation_operation.py | 2 +- .../erosion_operation.py | 2 +- dynamic_programming/all_construct.py | 2 +- dynamic_programming/fizz_buzz.py | 2 +- .../longest_common_subsequence.py | 10 ++-------- .../longest_increasing_subsequence.py | 2 +- graphs/basic_graphs.py | 14 ++++++-------- graphs/check_cycle.py | 9 ++++----- graphs/connected_components.py | 2 +- graphs/dijkstra_algorithm.py | 2 +- .../edmonds_karp_multiple_source_and_sink.py | 5 ++--- graphs/frequent_pattern_graph_miner.py | 6 +++--- graphs/minimum_spanning_tree_boruvka.py | 1 + graphs/minimum_spanning_tree_prims.py | 5 +---- graphs/minimum_spanning_tree_prims2.py | 16 +++++++--------- hashes/hamming_code.py | 5 ++--- linear_algebra/src/lib.py | 7 ++++--- machine_learning/gradient_descent.py | 2 ++ machine_learning/k_means_clust.py | 4 ++-- .../sequential_minimum_optimization.py | 9 ++++----- maths/abs.py | 6 +++--- maths/binary_exp_mod.py | 2 +- maths/jaccard_similarity.py | 1 + maths/largest_of_very_large_numbers.py | 1 + maths/radix2_fft.py | 5 +---- .../back_propagation_neural_network.py | 1 + other/graham_scan.py | 7 +++---- other/nested_brackets.py | 9 ++++----- physics/hubble_parameter.py | 4 ++-- project_euler/problem_005/sol1.py | 1 + project_euler/problem_009/sol1.py | 5 ++--- project_euler/problem_014/sol2.py | 5 +---- project_euler/problem_018/solution.py | 10 ++-------- project_euler/problem_019/sol1.py | 2 +- project_euler/problem_033/sol1.py | 8 +++----- project_euler/problem_064/sol1.py | 5 ++--- project_euler/problem_067/sol1.py | 10 ++-------- project_euler/problem_109/sol1.py | 2 +- project_euler/problem_203/sol1.py | 4 ++-- scheduling/shortest_job_first.py | 11 +++++------ scripts/build_directory_md.py | 5 ++--- searches/binary_tree_traversal.py | 1 + sorts/circle_sort.py | 13 ++++++------- sorts/counting_sort.py | 2 +- sorts/msd_radix_sort.py | 2 +- sorts/quick_sort.py | 2 +- sorts/recursive_quick_sort.py | 10 +++++----- sorts/tim_sort.py | 4 ++-- strings/autocomplete_using_trie.py | 5 +---- strings/check_anagrams.py | 5 +---- strings/is_palindrome.py | 5 +---- strings/snake_case_to_camel_pascal_case.py | 2 +- web_programming/convert_number_to_words.py | 6 +++--- web_programming/instagram_crawler.py | 2 +- web_programming/open_google_results.py | 5 +---- 73 files changed, 151 insertions(+), 203 deletions(-) diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py index aae320365..bd448175f 100644 --- a/audio_filters/iir_filter.py +++ b/audio_filters/iir_filter.py @@ -47,7 +47,7 @@ class IIRFilter: >>> filt.set_coefficients(a_coeffs, b_coeffs) """ if len(a_coeffs) < self.order: - a_coeffs = [1.0] + a_coeffs + a_coeffs = [1.0, *a_coeffs] if len(a_coeffs) != self.order + 1: raise ValueError( diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index 23bd15906..f3b08ab0a 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -129,9 +129,9 @@ def depth_first_search( # If it is False we call dfs function again and we update the inputs depth_first_search( - possible_board + [col], - diagonal_right_collisions + [row - col], - diagonal_left_collisions + [row + col], + [*possible_board, col], + [*diagonal_right_collisions, row - col], + [*diagonal_left_collisions, row + col], boards, n, ) diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index 128e29071..c5e23321c 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -44,7 +44,7 @@ def create_state_space_tree( nums, max_sum, index + 1, - path + [nums[index]], + [*path, nums[index]], result, remaining_nums_sum - nums[index], ) diff --git a/ciphers/bifid.py b/ciphers/bifid.py index c005e051a..a15b38164 100644 --- a/ciphers/bifid.py +++ b/ciphers/bifid.py @@ -33,7 +33,7 @@ class BifidCipher: >>> np.array_equal(BifidCipher().letter_to_numbers('u'), [4,5]) True """ - index1, index2 = np.where(self.SQUARE == letter) + index1, index2 = np.where(letter == self.SQUARE) indexes = np.concatenate([index1 + 1, index2 + 1]) return indexes diff --git a/ciphers/diffie_hellman.py b/ciphers/diffie_hellman.py index 072f4aaaa..cd40a6b9c 100644 --- a/ciphers/diffie_hellman.py +++ b/ciphers/diffie_hellman.py @@ -228,10 +228,10 @@ class DiffieHellman: def is_valid_public_key(self, key: int) -> bool: # check if the other public key is valid based on NIST SP800-56 - if 2 <= key and key <= self.prime - 2: - if pow(key, (self.prime - 1) // 2, self.prime) == 1: - return True - return False + return ( + 2 <= key <= self.prime - 2 + and pow(key, (self.prime - 1) // 2, self.prime) == 1 + ) def generate_shared_key(self, other_key_str: str) -> str: other_key = int(other_key_str, base=16) @@ -243,10 +243,10 @@ class DiffieHellman: @staticmethod def is_valid_public_key_static(remote_public_key_str: int, prime: int) -> bool: # check if the other public key is valid based on NIST SP800-56 - if 2 <= remote_public_key_str and remote_public_key_str <= prime - 2: - if pow(remote_public_key_str, (prime - 1) // 2, prime) == 1: - return True - return False + return ( + 2 <= remote_public_key_str <= prime - 2 + and pow(remote_public_key_str, (prime - 1) // 2, prime) == 1 + ) @staticmethod def generate_shared_key_static( diff --git a/ciphers/polybius.py b/ciphers/polybius.py index 3539ab70c..d83badf4a 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -31,7 +31,7 @@ class PolybiusCipher: >>> np.array_equal(PolybiusCipher().letter_to_numbers('u'), [4,5]) True """ - index1, index2 = np.where(self.SQUARE == letter) + index1, index2 = np.where(letter == self.SQUARE) indexes = np.concatenate([index1 + 1, index2 + 1]) return indexes diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index 379ef0ef7..0f369e38f 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -128,11 +128,10 @@ class XORCipher: assert isinstance(file, str) and isinstance(key, int) try: - with open(file) as fin: - with open("encrypt.out", "w+") as fout: - # actual encrypt-process - for line in fin: - fout.write(self.encrypt_string(line, key)) + with open(file) as fin, open("encrypt.out", "w+") as fout: + # actual encrypt-process + for line in fin: + fout.write(self.encrypt_string(line, key)) except OSError: return False @@ -152,11 +151,10 @@ class XORCipher: assert isinstance(file, str) and isinstance(key, int) try: - with open(file) as fin: - with open("decrypt.out", "w+") as fout: - # actual encrypt-process - for line in fin: - fout.write(self.decrypt_string(line, key)) + with open(file) as fin, open("decrypt.out", "w+") as fout: + # actual encrypt-process + for line in fin: + fout.write(self.decrypt_string(line, key)) except OSError: return False diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index e29537497..c150126d6 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -159,7 +159,7 @@ def update_image_and_anno( new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) # Remove bounding box small than scale of filter - if 0 < filter_scale: + if filter_scale > 0: new_anno = [ anno for anno in new_anno diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index fc512944e..cd88cc10e 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -60,7 +60,7 @@ class BinarySearchTree: else: # Tree is not empty parent_node = self.root # from root if parent_node is None: - return None + return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 24dd1bd8c..71a895e76 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -37,7 +37,7 @@ def preorder(root: Node | None) -> list[int]: >>> preorder(make_tree()) [1, 2, 4, 5, 3] """ - return [root.data] + preorder(root.left) + preorder(root.right) if root else [] + return [root.data, *preorder(root.left), *preorder(root.right)] if root else [] def postorder(root: Node | None) -> list[int]: @@ -55,7 +55,7 @@ def inorder(root: Node | None) -> list[int]: >>> inorder(make_tree()) [4, 2, 5, 1, 3] """ - return inorder(root.left) + [root.data] + inorder(root.right) if root else [] + return [*inorder(root.left), root.data, *inorder(root.right)] if root else [] def height(root: Node | None) -> int: diff --git a/data_structures/binary_tree/inorder_tree_traversal_2022.py b/data_structures/binary_tree/inorder_tree_traversal_2022.py index e94ba7013..1357527d2 100644 --- a/data_structures/binary_tree/inorder_tree_traversal_2022.py +++ b/data_structures/binary_tree/inorder_tree_traversal_2022.py @@ -50,7 +50,7 @@ def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return """ if node: inorder_array = inorder(node.left_child) - inorder_array = inorder_array + [node.data] + inorder_array = [*inorder_array, node.data] inorder_array = inorder_array + inorder(node.right_child) else: inorder_array = [] diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index a9dbd699c..b50d75d33 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -319,9 +319,8 @@ class RedBlackTree: """A helper function to recursively check Property 4 of a Red-Black Tree. See check_color_properties for more info. """ - if self.color == 1: - if color(self.left) == 1 or color(self.right) == 1: - return False + if self.color == 1 and 1 in (color(self.left), color(self.right)): + return False if self.left and not self.left.check_coloring(): return False if self.right and not self.right.check_coloring(): diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py index b88ab76ec..0c25896f9 100644 --- a/data_structures/hashing/number_theory/prime_numbers.py +++ b/data_structures/hashing/number_theory/prime_numbers.py @@ -52,7 +52,7 @@ def next_prime(value, factor=1, **kwargs): first_value_val = value while not is_prime(value): - value += 1 if not ("desc" in kwargs.keys() and kwargs["desc"] is True) else -1 + value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1, **kwargs) diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 2e05c5c80..099bd2871 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -136,12 +136,12 @@ class BinomialHeap: # Empty heaps corner cases if other.size == 0: - return + return None if self.size == 0: self.size = other.size self.bottom_root = other.bottom_root self.min_node = other.min_node - return + return None # Update size self.size = self.size + other.size diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index c19309c9f..e993cc5a2 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -128,7 +128,7 @@ class LinkedList: while node: if current_position == position: self.insert_before_node(node, new_node) - return None + return current_position += 1 node = node.next self.insert_after_node(self.tail, new_node) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 3e52c7e43..bdeb5922a 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -107,6 +107,7 @@ class LinkedList: for i, node in enumerate(self): if i == index: return node + return None # Used to change the data of a particular node def __setitem__(self, index: int, data: Any) -> None: diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 96b0db7c8..4413c53e5 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -388,10 +388,7 @@ def test_delete_doesnt_leave_dead_nodes(): def test_iter_always_yields_sorted_values(): def is_sorted(lst): - for item, next_item in zip(lst, lst[1:]): - if next_item < item: - return False - return True + return all(next_item >= item for item, next_item in zip(lst, lst[1:])) skip_list = SkipList() for i in range(10): diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queue/circular_queue_linked_list.py index e8c2b8bff..62042c4bc 100644 --- a/data_structures/queue/circular_queue_linked_list.py +++ b/data_structures/queue/circular_queue_linked_list.py @@ -127,7 +127,7 @@ class CircularQueueLinkedList: """ self.check_can_perform_operation() if self.rear is None or self.front is None: - return + return None if self.front == self.rear: data = self.front.data self.front.data = None diff --git a/digital_image_processing/morphological_operations/dilation_operation.py b/digital_image_processing/morphological_operations/dilation_operation.py index 274880b0a..c8380737d 100644 --- a/digital_image_processing/morphological_operations/dilation_operation.py +++ b/digital_image_processing/morphological_operations/dilation_operation.py @@ -32,7 +32,7 @@ def gray2binary(gray: np.array) -> np.array: [False, True, False], [False, True, False]]) """ - return (127 < gray) & (gray <= 255) + return (gray > 127) & (gray <= 255) def dilation(image: np.array, kernel: np.array) -> np.array: diff --git a/digital_image_processing/morphological_operations/erosion_operation.py b/digital_image_processing/morphological_operations/erosion_operation.py index 4b0a5eee8..c2cde2ea6 100644 --- a/digital_image_processing/morphological_operations/erosion_operation.py +++ b/digital_image_processing/morphological_operations/erosion_operation.py @@ -32,7 +32,7 @@ def gray2binary(gray: np.array) -> np.array: [False, True, False], [False, True, False]]) """ - return (127 < gray) & (gray <= 255) + return (gray > 127) & (gray <= 255) def erosion(image: np.array, kernel: np.array) -> np.array: diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py index 3839d01e6..6e53a702c 100644 --- a/dynamic_programming/all_construct.py +++ b/dynamic_programming/all_construct.py @@ -34,7 +34,7 @@ def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[ # slice condition if target[i : i + len(word)] == word: new_combinations: list[list[str]] = [ - [word] + way for way in table[i] + [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py index e77ab3de7..e29116437 100644 --- a/dynamic_programming/fizz_buzz.py +++ b/dynamic_programming/fizz_buzz.py @@ -49,7 +49,7 @@ def fizz_buzz(number: int, iterations: int) -> str: out += "Fizz" if number % 5 == 0: out += "Buzz" - if not number % 3 == 0 and not number % 5 == 0: + if 0 not in (number % 3, number % 5): out += str(number) # print(out) diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 3468fd87d..178b4169b 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -42,20 +42,14 @@ def longest_common_subsequence(x: str, y: str): for i in range(1, m + 1): for j in range(1, n + 1): - if x[i - 1] == y[j - 1]: - match = 1 - else: - match = 0 + match = 1 if x[i - 1] == y[j - 1] else 0 l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match) seq = "" i, j = m, n while i > 0 and j > 0: - if x[i - 1] == y[j - 1]: - match = 1 - else: - match = 0 + match = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index 6feed2352..d82789376 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -48,7 +48,7 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu i += 1 temp_array = [element for element in array[1:] if element >= pivot] - temp_array = [pivot] + longest_subsequence(temp_array) + temp_array = [pivot, *longest_subsequence(temp_array)] if len(temp_array) > len(longest_subseq): return temp_array else: diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 298a97bf0..065b6185c 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -139,10 +139,9 @@ def dijk(g, s): u = i known.add(u) for v in g[u]: - if v[0] not in known: - if dist[u] + v[1] < dist.get(v[0], 100000): - dist[v[0]] = dist[u] + v[1] - path[v[0]] = u + if v[0] not in known and dist[u] + v[1] < dist.get(v[0], 100000): + dist[v[0]] = dist[u] + v[1] + path[v[0]] = u for i in dist: if i != s: print(dist[i]) @@ -243,10 +242,9 @@ def prim(g, s): u = i known.add(u) for v in g[u]: - if v[0] not in known: - if v[1] < dist.get(v[0], 100000): - dist[v[0]] = v[1] - path[v[0]] = u + if v[0] not in known and v[1] < dist.get(v[0], 100000): + dist[v[0]] = v[1] + path[v[0]] = u return dist diff --git a/graphs/check_cycle.py b/graphs/check_cycle.py index dcc864988..9fd1cd80f 100644 --- a/graphs/check_cycle.py +++ b/graphs/check_cycle.py @@ -15,11 +15,10 @@ def check_cycle(graph: dict) -> bool: visited: set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack rec_stk: set[int] = set() - for node in graph: - if node not in visited: - if depth_first_search(graph, node, visited, rec_stk): - return True - return False + return any( + node not in visited and depth_first_search(graph, node, visited, rec_stk) + for node in graph + ) def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> bool: diff --git a/graphs/connected_components.py b/graphs/connected_components.py index 4af7803d7..15c7633e1 100644 --- a/graphs/connected_components.py +++ b/graphs/connected_components.py @@ -27,7 +27,7 @@ def dfs(graph: dict, vert: int, visited: list) -> list: if not visited[neighbour]: connected_verts += dfs(graph, neighbour, visited) - return [vert] + connected_verts + return [vert, *connected_verts] def connected_components(graph: dict) -> list: diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 1845dad05..452138fe9 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -112,7 +112,7 @@ class Graph: self.dist[src] = 0 q = PriorityQueue() q.insert((0, src)) # (dist from src, node) - for u in self.adjList.keys(): + for u in self.adjList: if u != src: self.dist[u] = sys.maxsize # Infinity self.par[u] = -1 diff --git a/graphs/edmonds_karp_multiple_source_and_sink.py b/graphs/edmonds_karp_multiple_source_and_sink.py index 070d758e6..d06108041 100644 --- a/graphs/edmonds_karp_multiple_source_and_sink.py +++ b/graphs/edmonds_karp_multiple_source_and_sink.py @@ -163,9 +163,8 @@ class PushRelabelExecutor(MaximumFlowAlgorithmExecutor): self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 - ): - if min_height is None or self.heights[to_index] < min_height: - min_height = self.heights[to_index] + ) and (min_height is None or self.heights[to_index] < min_height): + min_height = self.heights[to_index] if min_height is not None: self.heights[vertex_index] = min_height + 1 diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 87d5605a0..208e57f9b 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -130,11 +130,11 @@ def create_edge(nodes, graph, cluster, c1): """ create edge between the nodes """ - for i in cluster[c1].keys(): + for i in cluster[c1]: count = 0 c2 = c1 + 1 while c2 < max(cluster.keys()): - for j in cluster[c2].keys(): + for j in cluster[c2]: """ creates edge only if the condition satisfies """ @@ -185,7 +185,7 @@ def find_freq_subgraph_given_support(s, cluster, graph): find edges of multiple frequent subgraphs """ k = int(s / 100 * (len(cluster) - 1)) - for i in cluster[k].keys(): + for i in cluster[k]: my_dfs(graph, tuple(cluster[k][i]), (["Header"],)) diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py index 663d8e26c..3c6888037 100644 --- a/graphs/minimum_spanning_tree_boruvka.py +++ b/graphs/minimum_spanning_tree_boruvka.py @@ -144,6 +144,7 @@ class Graph: self.rank[root1] += 1 self.parent[root2] = root1 return root1 + return None @staticmethod def boruvka_mst(graph): diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index f577866f0..5a08ec57f 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -44,10 +44,7 @@ class Heap: temp = position[index] while index != 0: - if index % 2 == 0: - parent = int((index - 2) / 2) - else: - parent = int((index - 1) / 2) + parent = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: heap[index] = heap[parent] diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index 707be783d..81f30ef61 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -135,14 +135,14 @@ class MinPriorityQueue(Generic[T]): # only] curr_pos = self.position_map[elem] if curr_pos == 0: - return + return None parent_position = get_parent_position(curr_pos) _, weight = self.heap[curr_pos] _, parent_weight = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(parent_position, curr_pos) return self._bubble_up(elem) - return + return None def _bubble_down(self, elem: T) -> None: # Place a node at the proper position (downward movement) [to be used @@ -154,24 +154,22 @@ class MinPriorityQueue(Generic[T]): if child_left_position < self.elements and child_right_position < self.elements: _, child_left_weight = self.heap[child_left_position] _, child_right_weight = self.heap[child_right_position] - if child_right_weight < child_left_weight: - if child_right_weight < weight: - self._swap_nodes(child_right_position, curr_pos) - return self._bubble_down(elem) + if child_right_weight < child_left_weight and child_right_weight < weight: + self._swap_nodes(child_right_position, curr_pos) + return self._bubble_down(elem) if child_left_position < self.elements: _, child_left_weight = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(child_left_position, curr_pos) return self._bubble_down(elem) else: - return + return None if child_right_position < self.elements: _, child_right_weight = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(child_right_position, curr_pos) return self._bubble_down(elem) - else: - return + return None def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None: # Swap the nodes at the given positions diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index 481a67507..dc9303218 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -126,9 +126,8 @@ def emitter_converter(size_par, data): aux = (bin_pos[cont_loop])[-1 * (bp)] except IndexError: aux = "0" - if aux == "1": - if x == "1": - cont_bo += 1 + if aux == "1" and x == "1": + cont_bo += 1 cont_loop += 1 parity.append(cont_bo % 2) diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index ac0398a31..e3556e74c 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -108,7 +108,7 @@ class Vector: mul implements the scalar multiplication and the dot-product """ - if isinstance(other, float) or isinstance(other, int): + if isinstance(other, (float, int)): ans = [c * other for c in self.__components] return Vector(ans) elif isinstance(other, Vector) and len(self) == len(other): @@ -216,7 +216,7 @@ def axpy(scalar: float, x: Vector, y: Vector) -> Vector: assert ( isinstance(x, Vector) and isinstance(y, Vector) - and (isinstance(scalar, int) or isinstance(scalar, float)) + and (isinstance(scalar, (int, float))) ) return x * scalar + y @@ -337,12 +337,13 @@ class Matrix: "vector must have the same size as the " "number of columns of the matrix!" ) - elif isinstance(other, int) or isinstance(other, float): # matrix-scalar + elif isinstance(other, (int, float)): # matrix-scalar matrix = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(matrix, self.__width, self.__height) + return None def height(self) -> int: """ diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index 9fa460a07..5b74dad08 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -55,6 +55,7 @@ def output(example_no, data_set): return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] + return None def calculate_hypothesis_value(example_no, data_set): @@ -68,6 +69,7 @@ def calculate_hypothesis_value(example_no, data_set): return _hypothesis_value(train_data[example_no][0]) elif data_set == "test": return _hypothesis_value(test_data[example_no][0]) + return None def summation_of_cost_derivative(index, end=m): diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index b6305469e..7c8142aab 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -229,7 +229,7 @@ def report_generator( """ # Fill missing values with given rules if fill_missing_report: - df.fillna(value=fill_missing_report, inplace=True) + df = df.fillna(value=fill_missing_report) df["dummy"] = 1 numeric_cols = df.select_dtypes(np.number).columns report = ( @@ -338,7 +338,7 @@ def report_generator( ) report.columns.name = "" report = report.reset_index() - report.drop(columns=["index"], inplace=True) + report = report.drop(columns=["index"]) return report diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 9c45c3512..37172c8e9 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -129,7 +129,7 @@ class SmoSVM: # error self._unbound = [i for i in self._all_samples if self._is_unbound(i)] for s in self.unbound: - if s == i1 or s == i2: + if s in (i1, i2): continue self._error[s] += ( y1 * (a1_new - a1) * k(i1, s) @@ -225,7 +225,7 @@ class SmoSVM: def _choose_alphas(self): locis = yield from self._choose_a1() if not locis: - return + return None return locis def _choose_a1(self): @@ -423,9 +423,8 @@ class Kernel: return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2)) def _check(self): - if self._kernel == self._rbf: - if self.gamma < 0: - raise ValueError("gamma value must greater than 0") + if self._kernel == self._rbf and self.gamma < 0: + raise ValueError("gamma value must greater than 0") def _get_kernel(self, kernel_name): maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} diff --git a/maths/abs.py b/maths/abs.py index cb0ffc8a5..b357e98d8 100644 --- a/maths/abs.py +++ b/maths/abs.py @@ -75,9 +75,9 @@ def test_abs_val(): """ >>> test_abs_val() """ - assert 0 == abs_val(0) - assert 34 == abs_val(34) - assert 100000000000 == abs_val(-100000000000) + assert abs_val(0) == 0 + assert abs_val(34) == 34 + assert abs_val(-100000000000) == 100000000000 a = [-3, -1, 2, -11] assert abs_max(a) == -11 diff --git a/maths/binary_exp_mod.py b/maths/binary_exp_mod.py index 67dd1e728..df688892d 100644 --- a/maths/binary_exp_mod.py +++ b/maths/binary_exp_mod.py @@ -6,7 +6,7 @@ def bin_exp_mod(a, n, b): 7 """ # mod b - assert not (b == 0), "This cannot accept modulo that is == 0" + assert b != 0, "This cannot accept modulo that is == 0" if n == 0: return 1 diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index eab25188b..32054414c 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -71,6 +71,7 @@ def jaccard_similarity(set_a, set_b, alternative_union=False): return len(intersection) / len(union) return len(intersection) / len(union) + return None if __name__ == "__main__": diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py index d2dc0af18..7e7fea004 100644 --- a/maths/largest_of_very_large_numbers.py +++ b/maths/largest_of_very_large_numbers.py @@ -12,6 +12,7 @@ def res(x, y): return 0 elif y == 0: return 1 # any number raised to 0 is 1 + raise AssertionError("This should never happen") if __name__ == "__main__": # Main function diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 1def58e1f..af98f24f9 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -80,10 +80,7 @@ class FFT: # Discrete fourier transform of A and B def __dft(self, which): - if which == "A": - dft = [[x] for x in self.polyA] - else: - dft = [[x] for x in self.polyB] + dft = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB] # Corner case if len(dft) <= 1: return dft[0] diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index cb47b8290..9dd112115 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -153,6 +153,7 @@ class BPNN: if mse < self.accuracy: print("----达到精度----") return mse + return None def cal_loss(self, ydata, ydata_): self.loss = np.sum(np.power((ydata - ydata_), 2)) diff --git a/other/graham_scan.py b/other/graham_scan.py index 8e83bfcf4..2eadb4e56 100644 --- a/other/graham_scan.py +++ b/other/graham_scan.py @@ -125,10 +125,9 @@ def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: miny = y minx = x minidx = i - if y == miny: - if x < minx: - minx = x - minidx = i + if y == miny and x < minx: + minx = x + minidx = i # remove the lowest and the most left point from points for preparing for sort points.pop(minidx) diff --git a/other/nested_brackets.py b/other/nested_brackets.py index 3f61a4e70..ea48c0a5f 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -24,11 +24,10 @@ def is_balanced(s): if s[i] in open_brackets: stack.append(s[i]) - elif s[i] in closed_brackets: - if len(stack) == 0 or ( - len(stack) > 0 and open_to_closed[stack.pop()] != s[i] - ): - return False + elif s[i] in closed_brackets and ( + len(stack) == 0 or (len(stack) > 0 and open_to_closed[stack.pop()] != s[i]) + ): + return False return len(stack) == 0 diff --git a/physics/hubble_parameter.py b/physics/hubble_parameter.py index 6bc62e713..f7b2d28a6 100644 --- a/physics/hubble_parameter.py +++ b/physics/hubble_parameter.py @@ -70,10 +70,10 @@ def hubble_parameter( 68.3 """ parameters = [redshift, radiation_density, matter_density, dark_energy] - if any(0 > p for p in parameters): + if any(p < 0 for p in parameters): raise ValueError("All input parameters must be positive") - if any(1 < p for p in parameters[1:4]): + if any(p > 1 for p in parameters[1:4]): raise ValueError("Relative densities cannot be greater than one") else: curvature = 1 - (matter_density + radiation_density + dark_energy) diff --git a/project_euler/problem_005/sol1.py b/project_euler/problem_005/sol1.py index f272c102d..01cbd0e15 100644 --- a/project_euler/problem_005/sol1.py +++ b/project_euler/problem_005/sol1.py @@ -63,6 +63,7 @@ def solution(n: int = 20) -> int: if i == 0: i = 1 return i + return None if __name__ == "__main__": diff --git a/project_euler/problem_009/sol1.py b/project_euler/problem_009/sol1.py index 1d908402b..e65c9b857 100644 --- a/project_euler/problem_009/sol1.py +++ b/project_euler/problem_009/sol1.py @@ -32,9 +32,8 @@ def solution() -> int: for a in range(300): for b in range(a + 1, 400): for c in range(b + 1, 500): - if (a + b + c) == 1000: - if (a**2) + (b**2) == (c**2): - return a * b * c + if (a + b + c) == 1000 and (a**2) + (b**2) == (c**2): + return a * b * c return -1 diff --git a/project_euler/problem_014/sol2.py b/project_euler/problem_014/sol2.py index d2a1d9f0e..2448e652c 100644 --- a/project_euler/problem_014/sol2.py +++ b/project_euler/problem_014/sol2.py @@ -34,10 +34,7 @@ def collatz_sequence_length(n: int) -> int: """Returns the Collatz sequence length for n.""" if n in COLLATZ_SEQUENCE_LENGTHS: return COLLATZ_SEQUENCE_LENGTHS[n] - if n % 2 == 0: - next_n = n // 2 - else: - next_n = 3 * n + 1 + next_n = n // 2 if n % 2 == 0 else 3 * n + 1 sequence_length = collatz_sequence_length(next_n) + 1 COLLATZ_SEQUENCE_LENGTHS[n] = sequence_length return sequence_length diff --git a/project_euler/problem_018/solution.py b/project_euler/problem_018/solution.py index 82fc3ce3c..70306148b 100644 --- a/project_euler/problem_018/solution.py +++ b/project_euler/problem_018/solution.py @@ -48,14 +48,8 @@ def solution(): for i in range(1, len(a)): for j in range(len(a[i])): - if j != len(a[i - 1]): - number1 = a[i - 1][j] - else: - number1 = 0 - if j > 0: - number2 = a[i - 1][j - 1] - else: - number2 = 0 + number1 = a[i - 1][j] if j != len(a[i - 1]) else 0 + number2 = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(number1, number2) return max(a[-1]) diff --git a/project_euler/problem_019/sol1.py b/project_euler/problem_019/sol1.py index ab5936584..0e38137d4 100644 --- a/project_euler/problem_019/sol1.py +++ b/project_euler/problem_019/sol1.py @@ -39,7 +39,7 @@ def solution(): while year < 2001: day += 7 - if (year % 4 == 0 and not year % 100 == 0) or (year % 400 == 0): + if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 day = day - days_per_month[month - 2] diff --git a/project_euler/problem_033/sol1.py b/project_euler/problem_033/sol1.py index e0c9a058a..32be424b6 100644 --- a/project_euler/problem_033/sol1.py +++ b/project_euler/problem_033/sol1.py @@ -20,11 +20,9 @@ from fractions import Fraction def is_digit_cancelling(num: int, den: int) -> bool: - if num != den: - if num % 10 == den // 10: - if (num // 10) / (den % 10) == num / den: - return True - return False + return ( + num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den + ) def fraction_list(digit_len: int) -> list[str]: diff --git a/project_euler/problem_064/sol1.py b/project_euler/problem_064/sol1.py index 81ebcc7b7..12769decc 100644 --- a/project_euler/problem_064/sol1.py +++ b/project_euler/problem_064/sol1.py @@ -67,9 +67,8 @@ def solution(n: int = 10000) -> int: count_odd_periods = 0 for i in range(2, n + 1): sr = sqrt(i) - if sr - floor(sr) != 0: - if continuous_fraction_period(i) % 2 == 1: - count_odd_periods += 1 + if sr - floor(sr) != 0 and continuous_fraction_period(i) % 2 == 1: + count_odd_periods += 1 return count_odd_periods diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index f20c206cc..2b41fedc6 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -37,14 +37,8 @@ def solution(): for i in range(1, len(a)): for j in range(len(a[i])): - if j != len(a[i - 1]): - number1 = a[i - 1][j] - else: - number1 = 0 - if j > 0: - number2 = a[i - 1][j - 1] - else: - number2 = 0 + number1 = a[i - 1][j] if j != len(a[i - 1]) else 0 + number2 = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(number1, number2) return max(a[-1]) diff --git a/project_euler/problem_109/sol1.py b/project_euler/problem_109/sol1.py index 852f001d3..ef145dda5 100644 --- a/project_euler/problem_109/sol1.py +++ b/project_euler/problem_109/sol1.py @@ -65,7 +65,7 @@ def solution(limit: int = 100) -> int: >>> solution(50) 12577 """ - singles: list[int] = list(range(1, 21)) + [25] + singles: list[int] = [*list(range(1, 21)), 25] doubles: list[int] = [2 * x for x in range(1, 21)] + [50] triples: list[int] = [3 * x for x in range(1, 21)] all_values: list[int] = singles + doubles + triples + [0] diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index 713b530b6..da9436246 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -50,8 +50,8 @@ def get_pascal_triangle_unique_coefficients(depth: int) -> set[int]: coefficients = {1} previous_coefficients = [1] for _ in range(2, depth + 1): - coefficients_begins_one = previous_coefficients + [0] - coefficients_ends_one = [0] + previous_coefficients + coefficients_begins_one = [*previous_coefficients, 0] + coefficients_ends_one = [0, *previous_coefficients] previous_coefficients = [] for x, y in zip(coefficients_begins_one, coefficients_ends_one): coefficients.add(x + y) diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index b3f81bfd1..871de8207 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -36,12 +36,11 @@ def calculate_waitingtime( # Process until all processes are completed while complete != no_of_processes: for j in range(no_of_processes): - if arrival_time[j] <= increment_time: - if remaining_time[j] > 0: - if remaining_time[j] < minm: - minm = remaining_time[j] - short = j - check = True + if arrival_time[j] <= increment_time and remaining_time[j] > 0: + if remaining_time[j] < minm: + minm = remaining_time[j] + short = j + check = True if not check: increment_time += 1 diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index 7572ce342..b95be9ebc 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -21,9 +21,8 @@ def md_prefix(i): def print_path(old_path: str, new_path: str) -> str: old_parts = old_path.split(os.sep) for i, new_part in enumerate(new_path.split(os.sep)): - if i + 1 > len(old_parts) or old_parts[i] != new_part: - if new_part: - print(f"{md_prefix(i)} {new_part.replace('_', ' ').title()}") + if (i + 1 > len(old_parts) or old_parts[i] != new_part) and new_part: + print(f"{md_prefix(i)} {new_part.replace('_', ' ').title()}") return new_path diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 66814b478..76e80df25 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -37,6 +37,7 @@ def build_tree(): right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) + return None def pre_order(node: TreeNode) -> None: diff --git a/sorts/circle_sort.py b/sorts/circle_sort.py index da3c59059..271fa1e8d 100644 --- a/sorts/circle_sort.py +++ b/sorts/circle_sort.py @@ -58,14 +58,13 @@ def circle_sort(collection: list) -> list: left += 1 right -= 1 - if left == right: - if collection[left] > collection[right + 1]: - collection[left], collection[right + 1] = ( - collection[right + 1], - collection[left], - ) + if left == right and collection[left] > collection[right + 1]: + collection[left], collection[right + 1] = ( + collection[right + 1], + collection[left], + ) - swapped = True + swapped = True mid = low + int((high - low) / 2) left_swap = circle_sort_util(collection, low, mid) diff --git a/sorts/counting_sort.py b/sorts/counting_sort.py index 892ec5d5f..18c4b0323 100644 --- a/sorts/counting_sort.py +++ b/sorts/counting_sort.py @@ -66,7 +66,7 @@ def counting_sort_string(string): if __name__ == "__main__": # Test string sort - assert "eghhiiinrsssttt" == counting_sort_string("thisisthestring") + assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt" user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 74ce21762..03f84c75b 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -147,7 +147,7 @@ def _msd_radix_sort_inplace( list_of_ints[i], list_of_ints[j] = list_of_ints[j], list_of_ints[i] j -= 1 - if not j == i: + if j != i: i += 1 _msd_radix_sort_inplace(list_of_ints, bit_position, begin_index, i) diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index 70cd19d7a..b79d3eac3 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -39,7 +39,7 @@ def quick_sort(collection: list) -> list: for element in collection[pivot_index + 1 :]: (greater if element > pivot else lesser).append(element) - return quick_sort(lesser) + [pivot] + quick_sort(greater) + return [*quick_sort(lesser), pivot, *quick_sort(greater)] if __name__ == "__main__": diff --git a/sorts/recursive_quick_sort.py b/sorts/recursive_quick_sort.py index c28a14e37..c29009aca 100644 --- a/sorts/recursive_quick_sort.py +++ b/sorts/recursive_quick_sort.py @@ -9,11 +9,11 @@ def quick_sort(data: list) -> list: if len(data) <= 1: return data else: - return ( - quick_sort([e for e in data[1:] if e <= data[0]]) - + [data[0]] - + quick_sort([e for e in data[1:] if e > data[0]]) - ) + return [ + *quick_sort([e for e in data[1:] if e <= data[0]]), + data[0], + *quick_sort([e for e in data[1:] if e > data[0]]), + ] if __name__ == "__main__": diff --git a/sorts/tim_sort.py b/sorts/tim_sort.py index c90c7e803..138f11c71 100644 --- a/sorts/tim_sort.py +++ b/sorts/tim_sort.py @@ -32,9 +32,9 @@ def merge(left, right): return left if left[0] < right[0]: - return [left[0]] + merge(left[1:], right) + return [left[0], *merge(left[1:], right)] - return [right[0]] + merge(left, right[1:]) + return [right[0], *merge(left, right[1:])] def tim_sort(lst): diff --git a/strings/autocomplete_using_trie.py b/strings/autocomplete_using_trie.py index 758260292..77a3050ab 100644 --- a/strings/autocomplete_using_trie.py +++ b/strings/autocomplete_using_trie.py @@ -27,10 +27,7 @@ class Trie: def _elements(self, d: dict) -> tuple: result = [] for c, v in d.items(): - if c == END: - sub_result = [" "] - else: - sub_result = [c + s for s in self._elements(v)] + sub_result = [" "] if c == END else [(c + s) for s in self._elements(v)] result.extend(sub_result) return tuple(result) diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index 0d2f8091a..a364b9821 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -38,10 +38,7 @@ def check_anagrams(first_str: str, second_str: str) -> bool: count[first_str[i]] += 1 count[second_str[i]] -= 1 - for _count in count.values(): - if _count != 0: - return False - return True + return all(_count == 0 for _count in count.values()) if __name__ == "__main__": diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py index 9bf2abd98..406aa2e8d 100644 --- a/strings/is_palindrome.py +++ b/strings/is_palindrome.py @@ -30,10 +30,7 @@ def is_palindrome(s: str) -> bool: # with the help of 1st index (i==n-i-1) # where n is length of string - for i in range(end): - if s[i] != s[n - i - 1]: - return False - return True + return all(s[i] == s[n - i - 1] for i in range(end)) if __name__ == "__main__": diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py index eaabdcb87..28a28b517 100644 --- a/strings/snake_case_to_camel_pascal_case.py +++ b/strings/snake_case_to_camel_pascal_case.py @@ -43,7 +43,7 @@ def snake_to_camel_case(input_str: str, use_pascal: bool = False) -> str: initial_word = "" if use_pascal else words[0] - return "".join([initial_word] + capitalized_words) + return "".join([initial_word, *capitalized_words]) if __name__ == "__main__": diff --git a/web_programming/convert_number_to_words.py b/web_programming/convert_number_to_words.py index 50612dec2..1e293df96 100644 --- a/web_programming/convert_number_to_words.py +++ b/web_programming/convert_number_to_words.py @@ -63,7 +63,7 @@ def convert(number: int) -> str: current = temp_num % 10 if counter % 2 == 0: addition = "" - if counter in placevalue.keys() and current != 0: + if counter in placevalue and current != 0: addition = placevalue[counter] if counter == 2: words = singles[current] + addition + words @@ -84,12 +84,12 @@ def convert(number: int) -> str: words = teens[number % 10] + words else: addition = "" - if counter in placevalue.keys(): + if counter in placevalue: addition = placevalue[counter] words = doubles[current] + addition + words else: addition = "" - if counter in placevalue.keys(): + if counter in placevalue: if current == 0 and ((temp_num % 100) // 10) == 0: addition = "" else: diff --git a/web_programming/instagram_crawler.py b/web_programming/instagram_crawler.py index 4536257a9..0816cd181 100644 --- a/web_programming/instagram_crawler.py +++ b/web_programming/instagram_crawler.py @@ -105,7 +105,7 @@ def test_instagram_user(username: str = "github") -> None: import os if os.environ.get("CI"): - return None # test failing on GitHub Actions + return # test failing on GitHub Actions instagram_user = InstagramUser(username) assert instagram_user.user_data assert isinstance(instagram_user.user_data, dict) diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py index 2685bf621..f61e3666d 100644 --- a/web_programming/open_google_results.py +++ b/web_programming/open_google_results.py @@ -7,10 +7,7 @@ from bs4 import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": - if len(argv) > 1: - query = "%20".join(argv[1:]) - else: - query = quote(str(input("Search: "))) + query = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") From 069a14b1c55112bc4f4e08571fc3c2156bb69e5a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 2 Mar 2023 07:55:47 +0300 Subject: [PATCH 259/368] Add Project Euler problem 082 solution 1 (#6282) Update DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_082/__init__.py | 0 project_euler/problem_082/input.txt | 80 +++++++++++++++++++++++ project_euler/problem_082/sol1.py | 65 ++++++++++++++++++ project_euler/problem_082/test_matrix.txt | 5 ++ 5 files changed, 152 insertions(+) create mode 100644 project_euler/problem_082/__init__.py create mode 100644 project_euler/problem_082/input.txt create mode 100644 project_euler/problem_082/sol1.py create mode 100644 project_euler/problem_082/test_matrix.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index a8786cc25..3d1bc967e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -918,6 +918,8 @@ * [Sol1](project_euler/problem_080/sol1.py) * Problem 081 * [Sol1](project_euler/problem_081/sol1.py) + * Problem 082 + * [Sol1](project_euler/problem_082/sol1.py) * Problem 085 * [Sol1](project_euler/problem_085/sol1.py) * Problem 086 diff --git a/project_euler/problem_082/__init__.py b/project_euler/problem_082/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_082/input.txt b/project_euler/problem_082/input.txt new file mode 100644 index 000000000..f65322a7e --- /dev/null +++ b/project_euler/problem_082/input.txt @@ -0,0 +1,80 @@ +4445,2697,5115,718,2209,2212,654,4348,3079,6821,7668,3276,8874,4190,3785,2752,9473,7817,9137,496,7338,3434,7152,4355,4552,7917,7827,2460,2350,691,3514,5880,3145,7633,7199,3783,5066,7487,3285,1084,8985,760,872,8609,8051,1134,9536,5750,9716,9371,7619,5617,275,9721,2997,2698,1887,8825,6372,3014,2113,7122,7050,6775,5948,2758,1219,3539,348,7989,2735,9862,1263,8089,6401,9462,3168,2758,3748,5870 +1096,20,1318,7586,5167,2642,1443,5741,7621,7030,5526,4244,2348,4641,9827,2448,6918,5883,3737,300,7116,6531,567,5997,3971,6623,820,6148,3287,1874,7981,8424,7672,7575,6797,6717,1078,5008,4051,8795,5820,346,1851,6463,2117,6058,3407,8211,117,4822,1317,4377,4434,5925,8341,4800,1175,4173,690,8978,7470,1295,3799,8724,3509,9849,618,3320,7068,9633,2384,7175,544,6583,1908,9983,481,4187,9353,9377 +9607,7385,521,6084,1364,8983,7623,1585,6935,8551,2574,8267,4781,3834,2764,2084,2669,4656,9343,7709,2203,9328,8004,6192,5856,3555,2260,5118,6504,1839,9227,1259,9451,1388,7909,5733,6968,8519,9973,1663,5315,7571,3035,4325,4283,2304,6438,3815,9213,9806,9536,196,5542,6907,2475,1159,5820,9075,9470,2179,9248,1828,4592,9167,3713,4640,47,3637,309,7344,6955,346,378,9044,8635,7466,5036,9515,6385,9230 +7206,3114,7760,1094,6150,5182,7358,7387,4497,955,101,1478,7777,6966,7010,8417,6453,4955,3496,107,449,8271,131,2948,6185,784,5937,8001,6104,8282,4165,3642,710,2390,575,715,3089,6964,4217,192,5949,7006,715,3328,1152,66,8044,4319,1735,146,4818,5456,6451,4113,1063,4781,6799,602,1504,6245,6550,1417,1343,2363,3785,5448,4545,9371,5420,5068,4613,4882,4241,5043,7873,8042,8434,3939,9256,2187 +3620,8024,577,9997,7377,7682,1314,1158,6282,6310,1896,2509,5436,1732,9480,706,496,101,6232,7375,2207,2306,110,6772,3433,2878,8140,5933,8688,1399,2210,7332,6172,6403,7333,4044,2291,1790,2446,7390,8698,5723,3678,7104,1825,2040,140,3982,4905,4160,2200,5041,2512,1488,2268,1175,7588,8321,8078,7312,977,5257,8465,5068,3453,3096,1651,7906,253,9250,6021,8791,8109,6651,3412,345,4778,5152,4883,7505 +1074,5438,9008,2679,5397,5429,2652,3403,770,9188,4248,2493,4361,8327,9587,707,9525,5913,93,1899,328,2876,3604,673,8576,6908,7659,2544,3359,3883,5273,6587,3065,1749,3223,604,9925,6941,2823,8767,7039,3290,3214,1787,7904,3421,7137,9560,8451,2669,9219,6332,1576,5477,6755,8348,4164,4307,2984,4012,6629,1044,2874,6541,4942,903,1404,9125,5160,8836,4345,2581,460,8438,1538,5507,668,3352,2678,6942 +4295,1176,5596,1521,3061,9868,7037,7129,8933,6659,5947,5063,3653,9447,9245,2679,767,714,116,8558,163,3927,8779,158,5093,2447,5782,3967,1716,931,7772,8164,1117,9244,5783,7776,3846,8862,6014,2330,6947,1777,3112,6008,3491,1906,5952,314,4602,8994,5919,9214,3995,5026,7688,6809,5003,3128,2509,7477,110,8971,3982,8539,2980,4689,6343,5411,2992,5270,5247,9260,2269,7474,1042,7162,5206,1232,4556,4757 +510,3556,5377,1406,5721,4946,2635,7847,4251,8293,8281,6351,4912,287,2870,3380,3948,5322,3840,4738,9563,1906,6298,3234,8959,1562,6297,8835,7861,239,6618,1322,2553,2213,5053,5446,4402,6500,5182,8585,6900,5756,9661,903,5186,7687,5998,7997,8081,8955,4835,6069,2621,1581,732,9564,1082,1853,5442,1342,520,1737,3703,5321,4793,2776,1508,1647,9101,2499,6891,4336,7012,3329,3212,1442,9993,3988,4930,7706 +9444,3401,5891,9716,1228,7107,109,3563,2700,6161,5039,4992,2242,8541,7372,2067,1294,3058,1306,320,8881,5756,9326,411,8650,8824,5495,8282,8397,2000,1228,7817,2099,6473,3571,5994,4447,1299,5991,543,7874,2297,1651,101,2093,3463,9189,6872,6118,872,1008,1779,2805,9084,4048,2123,5877,55,3075,1737,9459,4535,6453,3644,108,5982,4437,5213,1340,6967,9943,5815,669,8074,1838,6979,9132,9315,715,5048 +3327,4030,7177,6336,9933,5296,2621,4785,2755,4832,2512,2118,2244,4407,2170,499,7532,9742,5051,7687,970,6924,3527,4694,5145,1306,2165,5940,2425,8910,3513,1909,6983,346,6377,4304,9330,7203,6605,3709,3346,970,369,9737,5811,4427,9939,3693,8436,5566,1977,3728,2399,3985,8303,2492,5366,9802,9193,7296,1033,5060,9144,2766,1151,7629,5169,5995,58,7619,7565,4208,1713,6279,3209,4908,9224,7409,1325,8540 +6882,1265,1775,3648,4690,959,5837,4520,5394,1378,9485,1360,4018,578,9174,2932,9890,3696,116,1723,1178,9355,7063,1594,1918,8574,7594,7942,1547,6166,7888,354,6932,4651,1010,7759,6905,661,7689,6092,9292,3845,9605,8443,443,8275,5163,7720,7265,6356,7779,1798,1754,5225,6661,1180,8024,5666,88,9153,1840,3508,1193,4445,2648,3538,6243,6375,8107,5902,5423,2520,1122,5015,6113,8859,9370,966,8673,2442 +7338,3423,4723,6533,848,8041,7921,8277,4094,5368,7252,8852,9166,2250,2801,6125,8093,5738,4038,9808,7359,9494,601,9116,4946,2702,5573,2921,9862,1462,1269,2410,4171,2709,7508,6241,7522,615,2407,8200,4189,5492,5649,7353,2590,5203,4274,710,7329,9063,956,8371,3722,4253,4785,1194,4828,4717,4548,940,983,2575,4511,2938,1827,2027,2700,1236,841,5760,1680,6260,2373,3851,1841,4968,1172,5179,7175,3509 +4420,1327,3560,2376,6260,2988,9537,4064,4829,8872,9598,3228,1792,7118,9962,9336,4368,9189,6857,1829,9863,6287,7303,7769,2707,8257,2391,2009,3975,4993,3068,9835,3427,341,8412,2134,4034,8511,6421,3041,9012,2983,7289,100,1355,7904,9186,6920,5856,2008,6545,8331,3655,5011,839,8041,9255,6524,3862,8788,62,7455,3513,5003,8413,3918,2076,7960,6108,3638,6999,3436,1441,4858,4181,1866,8731,7745,3744,1000 +356,8296,8325,1058,1277,4743,3850,2388,6079,6462,2815,5620,8495,5378,75,4324,3441,9870,1113,165,1544,1179,2834,562,6176,2313,6836,8839,2986,9454,5199,6888,1927,5866,8760,320,1792,8296,7898,6121,7241,5886,5814,2815,8336,1576,4314,3109,2572,6011,2086,9061,9403,3947,5487,9731,7281,3159,1819,1334,3181,5844,5114,9898,4634,2531,4412,6430,4262,8482,4546,4555,6804,2607,9421,686,8649,8860,7794,6672 +9870,152,1558,4963,8750,4754,6521,6256,8818,5208,5691,9659,8377,9725,5050,5343,2539,6101,1844,9700,7750,8114,5357,3001,8830,4438,199,9545,8496,43,2078,327,9397,106,6090,8181,8646,6414,7499,5450,4850,6273,5014,4131,7639,3913,6571,8534,9703,4391,7618,445,1320,5,1894,6771,7383,9191,4708,9706,6939,7937,8726,9382,5216,3685,2247,9029,8154,1738,9984,2626,9438,4167,6351,5060,29,1218,1239,4785 +192,5213,8297,8974,4032,6966,5717,1179,6523,4679,9513,1481,3041,5355,9303,9154,1389,8702,6589,7818,6336,3539,5538,3094,6646,6702,6266,2759,4608,4452,617,9406,8064,6379,444,5602,4950,1810,8391,1536,316,8714,1178,5182,5863,5110,5372,4954,1978,2971,5680,4863,2255,4630,5723,2168,538,1692,1319,7540,440,6430,6266,7712,7385,5702,620,641,3136,7350,1478,3155,2820,9109,6261,1122,4470,14,8493,2095 +1046,4301,6082,474,4974,7822,2102,5161,5172,6946,8074,9716,6586,9962,9749,5015,2217,995,5388,4402,7652,6399,6539,1349,8101,3677,1328,9612,7922,2879,231,5887,2655,508,4357,4964,3554,5930,6236,7384,4614,280,3093,9600,2110,7863,2631,6626,6620,68,1311,7198,7561,1768,5139,1431,221,230,2940,968,5283,6517,2146,1646,869,9402,7068,8645,7058,1765,9690,4152,2926,9504,2939,7504,6074,2944,6470,7859 +4659,736,4951,9344,1927,6271,8837,8711,3241,6579,7660,5499,5616,3743,5801,4682,9748,8796,779,1833,4549,8138,4026,775,4170,2432,4174,3741,7540,8017,2833,4027,396,811,2871,1150,9809,2719,9199,8504,1224,540,2051,3519,7982,7367,2761,308,3358,6505,2050,4836,5090,7864,805,2566,2409,6876,3361,8622,5572,5895,3280,441,7893,8105,1634,2929,274,3926,7786,6123,8233,9921,2674,5340,1445,203,4585,3837 +5759,338,7444,7968,7742,3755,1591,4839,1705,650,7061,2461,9230,9391,9373,2413,1213,431,7801,4994,2380,2703,6161,6878,8331,2538,6093,1275,5065,5062,2839,582,1014,8109,3525,1544,1569,8622,7944,2905,6120,1564,1839,5570,7579,1318,2677,5257,4418,5601,7935,7656,5192,1864,5886,6083,5580,6202,8869,1636,7907,4759,9082,5854,3185,7631,6854,5872,5632,5280,1431,2077,9717,7431,4256,8261,9680,4487,4752,4286 +1571,1428,8599,1230,7772,4221,8523,9049,4042,8726,7567,6736,9033,2104,4879,4967,6334,6716,3994,1269,8995,6539,3610,7667,6560,6065,874,848,4597,1711,7161,4811,6734,5723,6356,6026,9183,2586,5636,1092,7779,7923,8747,6887,7505,9909,1792,3233,4526,3176,1508,8043,720,5212,6046,4988,709,5277,8256,3642,1391,5803,1468,2145,3970,6301,7767,2359,8487,9771,8785,7520,856,1605,8972,2402,2386,991,1383,5963 +1822,4824,5957,6511,9868,4113,301,9353,6228,2881,2966,6956,9124,9574,9233,1601,7340,973,9396,540,4747,8590,9535,3650,7333,7583,4806,3593,2738,8157,5215,8472,2284,9473,3906,6982,5505,6053,7936,6074,7179,6688,1564,1103,6860,5839,2022,8490,910,7551,7805,881,7024,1855,9448,4790,1274,3672,2810,774,7623,4223,4850,6071,9975,4935,1915,9771,6690,3846,517,463,7624,4511,614,6394,3661,7409,1395,8127 +8738,3850,9555,3695,4383,2378,87,6256,6740,7682,9546,4255,6105,2000,1851,4073,8957,9022,6547,5189,2487,303,9602,7833,1628,4163,6678,3144,8589,7096,8913,5823,4890,7679,1212,9294,5884,2972,3012,3359,7794,7428,1579,4350,7246,4301,7779,7790,3294,9547,4367,3549,1958,8237,6758,3497,3250,3456,6318,1663,708,7714,6143,6890,3428,6853,9334,7992,591,6449,9786,1412,8500,722,5468,1371,108,3939,4199,2535 +7047,4323,1934,5163,4166,461,3544,2767,6554,203,6098,2265,9078,2075,4644,6641,8412,9183,487,101,7566,5622,1975,5726,2920,5374,7779,5631,3753,3725,2672,3621,4280,1162,5812,345,8173,9785,1525,955,5603,2215,2580,5261,2765,2990,5979,389,3907,2484,1232,5933,5871,3304,1138,1616,5114,9199,5072,7442,7245,6472,4760,6359,9053,7876,2564,9404,3043,9026,2261,3374,4460,7306,2326,966,828,3274,1712,3446 +3975,4565,8131,5800,4570,2306,8838,4392,9147,11,3911,7118,9645,4994,2028,6062,5431,2279,8752,2658,7836,994,7316,5336,7185,3289,1898,9689,2331,5737,3403,1124,2679,3241,7748,16,2724,5441,6640,9368,9081,5618,858,4969,17,2103,6035,8043,7475,2181,939,415,1617,8500,8253,2155,7843,7974,7859,1746,6336,3193,2617,8736,4079,6324,6645,8891,9396,5522,6103,1857,8979,3835,2475,1310,7422,610,8345,7615 +9248,5397,5686,2988,3446,4359,6634,9141,497,9176,6773,7448,1907,8454,916,1596,2241,1626,1384,2741,3649,5362,8791,7170,2903,2475,5325,6451,924,3328,522,90,4813,9737,9557,691,2388,1383,4021,1609,9206,4707,5200,7107,8104,4333,9860,5013,1224,6959,8527,1877,4545,7772,6268,621,4915,9349,5970,706,9583,3071,4127,780,8231,3017,9114,3836,7503,2383,1977,4870,8035,2379,9704,1037,3992,3642,1016,4303 +5093,138,4639,6609,1146,5565,95,7521,9077,2272,974,4388,2465,2650,722,4998,3567,3047,921,2736,7855,173,2065,4238,1048,5,6847,9548,8632,9194,5942,4777,7910,8971,6279,7253,2516,1555,1833,3184,9453,9053,6897,7808,8629,4877,1871,8055,4881,7639,1537,7701,2508,7564,5845,5023,2304,5396,3193,2955,1088,3801,6203,1748,3737,1276,13,4120,7715,8552,3047,2921,106,7508,304,1280,7140,2567,9135,5266 +6237,4607,7527,9047,522,7371,4883,2540,5867,6366,5301,1570,421,276,3361,527,6637,4861,2401,7522,5808,9371,5298,2045,5096,5447,7755,5115,7060,8529,4078,1943,1697,1764,5453,7085,960,2405,739,2100,5800,728,9737,5704,5693,1431,8979,6428,673,7540,6,7773,5857,6823,150,5869,8486,684,5816,9626,7451,5579,8260,3397,5322,6920,1879,2127,2884,5478,4977,9016,6165,6292,3062,5671,5968,78,4619,4763 +9905,7127,9390,5185,6923,3721,9164,9705,4341,1031,1046,5127,7376,6528,3248,4941,1178,7889,3364,4486,5358,9402,9158,8600,1025,874,1839,1783,309,9030,1843,845,8398,1433,7118,70,8071,2877,3904,8866,6722,4299,10,1929,5897,4188,600,1889,3325,2485,6473,4474,7444,6992,4846,6166,4441,2283,2629,4352,7775,1101,2214,9985,215,8270,9750,2740,8361,7103,5930,8664,9690,8302,9267,344,2077,1372,1880,9550 +5825,8517,7769,2405,8204,1060,3603,7025,478,8334,1997,3692,7433,9101,7294,7498,9415,5452,3850,3508,6857,9213,6807,4412,7310,854,5384,686,4978,892,8651,3241,2743,3801,3813,8588,6701,4416,6990,6490,3197,6838,6503,114,8343,5844,8646,8694,65,791,5979,2687,2621,2019,8097,1423,3644,9764,4921,3266,3662,5561,2476,8271,8138,6147,1168,3340,1998,9874,6572,9873,6659,5609,2711,3931,9567,4143,7833,8887 +6223,2099,2700,589,4716,8333,1362,5007,2753,2848,4441,8397,7192,8191,4916,9955,6076,3370,6396,6971,3156,248,3911,2488,4930,2458,7183,5455,170,6809,6417,3390,1956,7188,577,7526,2203,968,8164,479,8699,7915,507,6393,4632,1597,7534,3604,618,3280,6061,9793,9238,8347,568,9645,2070,5198,6482,5000,9212,6655,5961,7513,1323,3872,6170,3812,4146,2736,67,3151,5548,2781,9679,7564,5043,8587,1893,4531 +5826,3690,6724,2121,9308,6986,8106,6659,2142,1642,7170,2877,5757,6494,8026,6571,8387,9961,6043,9758,9607,6450,8631,8334,7359,5256,8523,2225,7487,1977,9555,8048,5763,2414,4948,4265,2427,8978,8088,8841,9208,9601,5810,9398,8866,9138,4176,5875,7212,3272,6759,5678,7649,4922,5422,1343,8197,3154,3600,687,1028,4579,2084,9467,4492,7262,7296,6538,7657,7134,2077,1505,7332,6890,8964,4879,7603,7400,5973,739 +1861,1613,4879,1884,7334,966,2000,7489,2123,4287,1472,3263,4726,9203,1040,4103,6075,6049,330,9253,4062,4268,1635,9960,577,1320,3195,9628,1030,4092,4979,6474,6393,2799,6967,8687,7724,7392,9927,2085,3200,6466,8702,265,7646,8665,7986,7266,4574,6587,612,2724,704,3191,8323,9523,3002,704,5064,3960,8209,2027,2758,8393,4875,4641,9584,6401,7883,7014,768,443,5490,7506,1852,2005,8850,5776,4487,4269 +4052,6687,4705,7260,6645,6715,3706,5504,8672,2853,1136,8187,8203,4016,871,1809,1366,4952,9294,5339,6872,2645,6083,7874,3056,5218,7485,8796,7401,3348,2103,426,8572,4163,9171,3176,948,7654,9344,3217,1650,5580,7971,2622,76,2874,880,2034,9929,1546,2659,5811,3754,7096,7436,9694,9960,7415,2164,953,2360,4194,2397,1047,2196,6827,575,784,2675,8821,6802,7972,5996,6699,2134,7577,2887,1412,4349,4380 +4629,2234,6240,8132,7592,3181,6389,1214,266,1910,2451,8784,2790,1127,6932,1447,8986,2492,5476,397,889,3027,7641,5083,5776,4022,185,3364,5701,2442,2840,4160,9525,4828,6602,2614,7447,3711,4505,7745,8034,6514,4907,2605,7753,6958,7270,6936,3006,8968,439,2326,4652,3085,3425,9863,5049,5361,8688,297,7580,8777,7916,6687,8683,7141,306,9569,2384,1500,3346,4601,7329,9040,6097,2727,6314,4501,4974,2829 +8316,4072,2025,6884,3027,1808,5714,7624,7880,8528,4205,8686,7587,3230,1139,7273,6163,6986,3914,9309,1464,9359,4474,7095,2212,7302,2583,9462,7532,6567,1606,4436,8981,5612,6796,4385,5076,2007,6072,3678,8331,1338,3299,8845,4783,8613,4071,1232,6028,2176,3990,2148,3748,103,9453,538,6745,9110,926,3125,473,5970,8728,7072,9062,1404,1317,5139,9862,6496,6062,3338,464,1600,2532,1088,8232,7739,8274,3873 +2341,523,7096,8397,8301,6541,9844,244,4993,2280,7689,4025,4196,5522,7904,6048,2623,9258,2149,9461,6448,8087,7245,1917,8340,7127,8466,5725,6996,3421,5313,512,9164,9837,9794,8369,4185,1488,7210,1524,1016,4620,9435,2478,7765,8035,697,6677,3724,6988,5853,7662,3895,9593,1185,4727,6025,5734,7665,3070,138,8469,6748,6459,561,7935,8646,2378,462,7755,3115,9690,8877,3946,2728,8793,244,6323,8666,4271 +6430,2406,8994,56,1267,3826,9443,7079,7579,5232,6691,3435,6718,5698,4144,7028,592,2627,217,734,6194,8156,9118,58,2640,8069,4127,3285,694,3197,3377,4143,4802,3324,8134,6953,7625,3598,3584,4289,7065,3434,2106,7132,5802,7920,9060,7531,3321,1725,1067,3751,444,5503,6785,7937,6365,4803,198,6266,8177,1470,6390,1606,2904,7555,9834,8667,2033,1723,5167,1666,8546,8152,473,4475,6451,7947,3062,3281 +2810,3042,7759,1741,2275,2609,7676,8640,4117,1958,7500,8048,1757,3954,9270,1971,4796,2912,660,5511,3553,1012,5757,4525,6084,7198,8352,5775,7726,8591,7710,9589,3122,4392,6856,5016,749,2285,3356,7482,9956,7348,2599,8944,495,3462,3578,551,4543,7207,7169,7796,1247,4278,6916,8176,3742,8385,2310,1345,8692,2667,4568,1770,8319,3585,4920,3890,4928,7343,5385,9772,7947,8786,2056,9266,3454,2807,877,2660 +6206,8252,5928,5837,4177,4333,207,7934,5581,9526,8906,1498,8411,2984,5198,5134,2464,8435,8514,8674,3876,599,5327,826,2152,4084,2433,9327,9697,4800,2728,3608,3849,3861,3498,9943,1407,3991,7191,9110,5666,8434,4704,6545,5944,2357,1163,4995,9619,6754,4200,9682,6654,4862,4744,5953,6632,1054,293,9439,8286,2255,696,8709,1533,1844,6441,430,1999,6063,9431,7018,8057,2920,6266,6799,356,3597,4024,6665 +3847,6356,8541,7225,2325,2946,5199,469,5450,7508,2197,9915,8284,7983,6341,3276,3321,16,1321,7608,5015,3362,8491,6968,6818,797,156,2575,706,9516,5344,5457,9210,5051,8099,1617,9951,7663,8253,9683,2670,1261,4710,1068,8753,4799,1228,2621,3275,6188,4699,1791,9518,8701,5932,4275,6011,9877,2933,4182,6059,2930,6687,6682,9771,654,9437,3169,8596,1827,5471,8909,2352,123,4394,3208,8756,5513,6917,2056 +5458,8173,3138,3290,4570,4892,3317,4251,9699,7973,1163,1935,5477,6648,9614,5655,9592,975,9118,2194,7322,8248,8413,3462,8560,1907,7810,6650,7355,2939,4973,6894,3933,3784,3200,2419,9234,4747,2208,2207,1945,2899,1407,6145,8023,3484,5688,7686,2737,3828,3704,9004,5190,9740,8643,8650,5358,4426,1522,1707,3613,9887,6956,2447,2762,833,1449,9489,2573,1080,4167,3456,6809,2466,227,7125,2759,6250,6472,8089 +3266,7025,9756,3914,1265,9116,7723,9788,6805,5493,2092,8688,6592,9173,4431,4028,6007,7131,4446,4815,3648,6701,759,3312,8355,4485,4187,5188,8746,7759,3528,2177,5243,8379,3838,7233,4607,9187,7216,2190,6967,2920,6082,7910,5354,3609,8958,6949,7731,494,8753,8707,1523,4426,3543,7085,647,6771,9847,646,5049,824,8417,5260,2730,5702,2513,9275,4279,2767,8684,1165,9903,4518,55,9682,8963,6005,2102,6523 +1998,8731,936,1479,5259,7064,4085,91,7745,7136,3773,3810,730,8255,2705,2653,9790,6807,2342,355,9344,2668,3690,2028,9679,8102,574,4318,6481,9175,5423,8062,2867,9657,7553,3442,3920,7430,3945,7639,3714,3392,2525,4995,4850,2867,7951,9667,486,9506,9888,781,8866,1702,3795,90,356,1483,4200,2131,6969,5931,486,6880,4404,1084,5169,4910,6567,8335,4686,5043,2614,3352,2667,4513,6472,7471,5720,1616 +8878,1613,1716,868,1906,2681,564,665,5995,2474,7496,3432,9491,9087,8850,8287,669,823,347,6194,2264,2592,7871,7616,8508,4827,760,2676,4660,4881,7572,3811,9032,939,4384,929,7525,8419,5556,9063,662,8887,7026,8534,3111,1454,2082,7598,5726,6687,9647,7608,73,3014,5063,670,5461,5631,3367,9796,8475,7908,5073,1565,5008,5295,4457,1274,4788,1728,338,600,8415,8535,9351,7750,6887,5845,1741,125 +3637,6489,9634,9464,9055,2413,7824,9517,7532,3577,7050,6186,6980,9365,9782,191,870,2497,8498,2218,2757,5420,6468,586,3320,9230,1034,1393,9886,5072,9391,1178,8464,8042,6869,2075,8275,3601,7715,9470,8786,6475,8373,2159,9237,2066,3264,5000,679,355,3069,4073,494,2308,5512,4334,9438,8786,8637,9774,1169,1949,6594,6072,4270,9158,7916,5752,6794,9391,6301,5842,3285,2141,3898,8027,4310,8821,7079,1307 +8497,6681,4732,7151,7060,5204,9030,7157,833,5014,8723,3207,9796,9286,4913,119,5118,7650,9335,809,3675,2597,5144,3945,5090,8384,187,4102,1260,2445,2792,4422,8389,9290,50,1765,1521,6921,8586,4368,1565,5727,7855,2003,4834,9897,5911,8630,5070,1330,7692,7557,7980,6028,5805,9090,8265,3019,3802,698,9149,5748,1965,9658,4417,5994,5584,8226,2937,272,5743,1278,5698,8736,2595,6475,5342,6596,1149,6920 +8188,8009,9546,6310,8772,2500,9846,6592,6872,3857,1307,8125,7042,1544,6159,2330,643,4604,7899,6848,371,8067,2062,3200,7295,1857,9505,6936,384,2193,2190,301,8535,5503,1462,7380,5114,4824,8833,1763,4974,8711,9262,6698,3999,2645,6937,7747,1128,2933,3556,7943,2885,3122,9105,5447,418,2899,5148,3699,9021,9501,597,4084,175,1621,1,1079,6067,5812,4326,9914,6633,5394,4233,6728,9084,1864,5863,1225 +9935,8793,9117,1825,9542,8246,8437,3331,9128,9675,6086,7075,319,1334,7932,3583,7167,4178,1726,7720,695,8277,7887,6359,5912,1719,2780,8529,1359,2013,4498,8072,1129,9998,1147,8804,9405,6255,1619,2165,7491,1,8882,7378,3337,503,5758,4109,3577,985,3200,7615,8058,5032,1080,6410,6873,5496,1466,2412,9885,5904,4406,3605,8770,4361,6205,9193,1537,9959,214,7260,9566,1685,100,4920,7138,9819,5637,976 +3466,9854,985,1078,7222,8888,5466,5379,3578,4540,6853,8690,3728,6351,7147,3134,6921,9692,857,3307,4998,2172,5783,3931,9417,2541,6299,13,787,2099,9131,9494,896,8600,1643,8419,7248,2660,2609,8579,91,6663,5506,7675,1947,6165,4286,1972,9645,3805,1663,1456,8853,5705,9889,7489,1107,383,4044,2969,3343,152,7805,4980,9929,5033,1737,9953,7197,9158,4071,1324,473,9676,3984,9680,3606,8160,7384,5432 +1005,4512,5186,3953,2164,3372,4097,3247,8697,3022,9896,4101,3871,6791,3219,2742,4630,6967,7829,5991,6134,1197,1414,8923,8787,1394,8852,5019,7768,5147,8004,8825,5062,9625,7988,1110,3992,7984,9966,6516,6251,8270,421,3723,1432,4830,6935,8095,9059,2214,6483,6846,3120,1587,6201,6691,9096,9627,6671,4002,3495,9939,7708,7465,5879,6959,6634,3241,3401,2355,9061,2611,7830,3941,2177,2146,5089,7079,519,6351 +7280,8586,4261,2831,7217,3141,9994,9940,5462,2189,4005,6942,9848,5350,8060,6665,7519,4324,7684,657,9453,9296,2944,6843,7499,7847,1728,9681,3906,6353,5529,2822,3355,3897,7724,4257,7489,8672,4356,3983,1948,6892,7415,4153,5893,4190,621,1736,4045,9532,7701,3671,1211,1622,3176,4524,9317,7800,5638,6644,6943,5463,3531,2821,1347,5958,3436,1438,2999,994,850,4131,2616,1549,3465,5946,690,9273,6954,7991 +9517,399,3249,2596,7736,2142,1322,968,7350,1614,468,3346,3265,7222,6086,1661,5317,2582,7959,4685,2807,2917,1037,5698,1529,3972,8716,2634,3301,3412,8621,743,8001,4734,888,7744,8092,3671,8941,1487,5658,7099,2781,99,1932,4443,4756,4652,9328,1581,7855,4312,5976,7255,6480,3996,2748,1973,9731,4530,2790,9417,7186,5303,3557,351,7182,9428,1342,9020,7599,1392,8304,2070,9138,7215,2008,9937,1106,7110 +7444,769,9688,632,1571,6820,8743,4338,337,3366,3073,1946,8219,104,4210,6986,249,5061,8693,7960,6546,1004,8857,5997,9352,4338,6105,5008,2556,6518,6694,4345,3727,7956,20,3954,8652,4424,9387,2035,8358,5962,5304,5194,8650,8282,1256,1103,2138,6679,1985,3653,2770,2433,4278,615,2863,1715,242,3790,2636,6998,3088,1671,2239,957,5411,4595,6282,2881,9974,2401,875,7574,2987,4587,3147,6766,9885,2965 +3287,3016,3619,6818,9073,6120,5423,557,2900,2015,8111,3873,1314,4189,1846,4399,7041,7583,2427,2864,3525,5002,2069,748,1948,6015,2684,438,770,8367,1663,7887,7759,1885,157,7770,4520,4878,3857,1137,3525,3050,6276,5569,7649,904,4533,7843,2199,5648,7628,9075,9441,3600,7231,2388,5640,9096,958,3058,584,5899,8150,1181,9616,1098,8162,6819,8171,1519,1140,7665,8801,2632,1299,9192,707,9955,2710,7314 +1772,2963,7578,3541,3095,1488,7026,2634,6015,4633,4370,2762,1650,2174,909,8158,2922,8467,4198,4280,9092,8856,8835,5457,2790,8574,9742,5054,9547,4156,7940,8126,9824,7340,8840,6574,3547,1477,3014,6798,7134,435,9484,9859,3031,4,1502,4133,1738,1807,4825,463,6343,9701,8506,9822,9555,8688,8168,3467,3234,6318,1787,5591,419,6593,7974,8486,9861,6381,6758,194,3061,4315,2863,4665,3789,2201,1492,4416 +126,8927,6608,5682,8986,6867,1715,6076,3159,788,3140,4744,830,9253,5812,5021,7616,8534,1546,9590,1101,9012,9821,8132,7857,4086,1069,7491,2988,1579,2442,4321,2149,7642,6108,250,6086,3167,24,9528,7663,2685,1220,9196,1397,5776,1577,1730,5481,977,6115,199,6326,2183,3767,5928,5586,7561,663,8649,9688,949,5913,9160,1870,5764,9887,4477,6703,1413,4995,5494,7131,2192,8969,7138,3997,8697,646,1028 +8074,1731,8245,624,4601,8706,155,8891,309,2552,8208,8452,2954,3124,3469,4246,3352,1105,4509,8677,9901,4416,8191,9283,5625,7120,2952,8881,7693,830,4580,8228,9459,8611,4499,1179,4988,1394,550,2336,6089,6872,269,7213,1848,917,6672,4890,656,1478,6536,3165,4743,4990,1176,6211,7207,5284,9730,4738,1549,4986,4942,8645,3698,9429,1439,2175,6549,3058,6513,1574,6988,8333,3406,5245,5431,7140,7085,6407 +7845,4694,2530,8249,290,5948,5509,1588,5940,4495,5866,5021,4626,3979,3296,7589,4854,1998,5627,3926,8346,6512,9608,1918,7070,4747,4182,2858,2766,4606,6269,4107,8982,8568,9053,4244,5604,102,2756,727,5887,2566,7922,44,5986,621,1202,374,6988,4130,3627,6744,9443,4568,1398,8679,397,3928,9159,367,2917,6127,5788,3304,8129,911,2669,1463,9749,264,4478,8940,1109,7309,2462,117,4692,7724,225,2312 +4164,3637,2000,941,8903,39,3443,7172,1031,3687,4901,8082,4945,4515,7204,9310,9349,9535,9940,218,1788,9245,2237,1541,5670,6538,6047,5553,9807,8101,1925,8714,445,8332,7309,6830,5786,5736,7306,2710,3034,1838,7969,6318,7912,2584,2080,7437,6705,2254,7428,820,782,9861,7596,3842,3631,8063,5240,6666,394,4565,7865,4895,9890,6028,6117,4724,9156,4473,4552,602,470,6191,4927,5387,884,3146,1978,3000 +4258,6880,1696,3582,5793,4923,2119,1155,9056,9698,6603,3768,5514,9927,9609,6166,6566,4536,4985,4934,8076,9062,6741,6163,7399,4562,2337,5600,2919,9012,8459,1308,6072,1225,9306,8818,5886,7243,7365,8792,6007,9256,6699,7171,4230,7002,8720,7839,4533,1671,478,7774,1607,2317,5437,4705,7886,4760,6760,7271,3081,2997,3088,7675,6208,3101,6821,6840,122,9633,4900,2067,8546,4549,2091,7188,5605,8599,6758,5229 +7854,5243,9155,3556,8812,7047,2202,1541,5993,4600,4760,713,434,7911,7426,7414,8729,322,803,7960,7563,4908,6285,6291,736,3389,9339,4132,8701,7534,5287,3646,592,3065,7582,2592,8755,6068,8597,1982,5782,1894,2900,6236,4039,6569,3037,5837,7698,700,7815,2491,7272,5878,3083,6778,6639,3589,5010,8313,2581,6617,5869,8402,6808,2951,2321,5195,497,2190,6187,1342,1316,4453,7740,4154,2959,1781,1482,8256 +7178,2046,4419,744,8312,5356,6855,8839,319,2962,5662,47,6307,8662,68,4813,567,2712,9931,1678,3101,8227,6533,4933,6656,92,5846,4780,6256,6361,4323,9985,1231,2175,7178,3034,9744,6155,9165,7787,5836,9318,7860,9644,8941,6480,9443,8188,5928,161,6979,2352,5628,6991,1198,8067,5867,6620,3778,8426,2994,3122,3124,6335,3918,8897,2655,9670,634,1088,1576,8935,7255,474,8166,7417,9547,2886,5560,3842 +6957,3111,26,7530,7143,1295,1744,6057,3009,1854,8098,5405,2234,4874,9447,2620,9303,27,7410,969,40,2966,5648,7596,8637,4238,3143,3679,7187,690,9980,7085,7714,9373,5632,7526,6707,3951,9734,4216,2146,3602,5371,6029,3039,4433,4855,4151,1449,3376,8009,7240,7027,4602,2947,9081,4045,8424,9352,8742,923,2705,4266,3232,2264,6761,363,2651,3383,7770,6730,7856,7340,9679,2158,610,4471,4608,910,6241 +4417,6756,1013,8797,658,8809,5032,8703,7541,846,3357,2920,9817,1745,9980,7593,4667,3087,779,3218,6233,5568,4296,2289,2654,7898,5021,9461,5593,8214,9173,4203,2271,7980,2983,5952,9992,8399,3468,1776,3188,9314,1720,6523,2933,621,8685,5483,8986,6163,3444,9539,4320,155,3992,2828,2150,6071,524,2895,5468,8063,1210,3348,9071,4862,483,9017,4097,6186,9815,3610,5048,1644,1003,9865,9332,2145,1944,2213 +9284,3803,4920,1927,6706,4344,7383,4786,9890,2010,5228,1224,3158,6967,8580,8990,8883,5213,76,8306,2031,4980,5639,9519,7184,5645,7769,3259,8077,9130,1317,3096,9624,3818,1770,695,2454,947,6029,3474,9938,3527,5696,4760,7724,7738,2848,6442,5767,6845,8323,4131,2859,7595,2500,4815,3660,9130,8580,7016,8231,4391,8369,3444,4069,4021,556,6154,627,2778,1496,4206,6356,8434,8491,3816,8231,3190,5575,1015 +3787,7572,1788,6803,5641,6844,1961,4811,8535,9914,9999,1450,8857,738,4662,8569,6679,2225,7839,8618,286,2648,5342,2294,3205,4546,176,8705,3741,6134,8324,8021,7004,5205,7032,6637,9442,5539,5584,4819,5874,5807,8589,6871,9016,983,1758,3786,1519,6241,185,8398,495,3370,9133,3051,4549,9674,7311,9738,3316,9383,2658,2776,9481,7558,619,3943,3324,6491,4933,153,9738,4623,912,3595,7771,7939,1219,4405 +2650,3883,4154,5809,315,7756,4430,1788,4451,1631,6461,7230,6017,5751,138,588,5282,2442,9110,9035,6349,2515,1570,6122,4192,4174,3530,1933,4186,4420,4609,5739,4135,2963,6308,1161,8809,8619,2796,3819,6971,8228,4188,1492,909,8048,2328,6772,8467,7671,9068,2226,7579,6422,7056,8042,3296,2272,3006,2196,7320,3238,3490,3102,37,1293,3212,4767,5041,8773,5794,4456,6174,7279,7054,2835,7053,9088,790,6640 +3101,1057,7057,3826,6077,1025,2955,1224,1114,6729,5902,4698,6239,7203,9423,1804,4417,6686,1426,6941,8071,1029,4985,9010,6122,6597,1622,1574,3513,1684,7086,5505,3244,411,9638,4150,907,9135,829,981,1707,5359,8781,9751,5,9131,3973,7159,1340,6955,7514,7993,6964,8198,1933,2797,877,3993,4453,8020,9349,8646,2779,8679,2961,3547,3374,3510,1129,3568,2241,2625,9138,5974,8206,7669,7678,1833,8700,4480 +4865,9912,8038,8238,782,3095,8199,1127,4501,7280,2112,2487,3626,2790,9432,1475,6312,8277,4827,2218,5806,7132,8752,1468,7471,6386,739,8762,8323,8120,5169,9078,9058,3370,9560,7987,8585,8531,5347,9312,1058,4271,1159,5286,5404,6925,8606,9204,7361,2415,560,586,4002,2644,1927,2824,768,4409,2942,3345,1002,808,4941,6267,7979,5140,8643,7553,9438,7320,4938,2666,4609,2778,8158,6730,3748,3867,1866,7181 +171,3771,7134,8927,4778,2913,3326,2004,3089,7853,1378,1729,4777,2706,9578,1360,5693,3036,1851,7248,2403,2273,8536,6501,9216,613,9671,7131,7719,6425,773,717,8803,160,1114,7554,7197,753,4513,4322,8499,4533,2609,4226,8710,6627,644,9666,6260,4870,5744,7385,6542,6203,7703,6130,8944,5589,2262,6803,6381,7414,6888,5123,7320,9392,9061,6780,322,8975,7050,5089,1061,2260,3199,1150,1865,5386,9699,6501 +3744,8454,6885,8277,919,1923,4001,6864,7854,5519,2491,6057,8794,9645,1776,5714,9786,9281,7538,6916,3215,395,2501,9618,4835,8846,9708,2813,3303,1794,8309,7176,2206,1602,1838,236,4593,2245,8993,4017,10,8215,6921,5206,4023,5932,6997,7801,262,7640,3107,8275,4938,7822,2425,3223,3886,2105,8700,9526,2088,8662,8034,7004,5710,2124,7164,3574,6630,9980,4242,2901,9471,1491,2117,4562,1130,9086,4117,6698 +2810,2280,2331,1170,4554,4071,8387,1215,2274,9848,6738,1604,7281,8805,439,1298,8318,7834,9426,8603,6092,7944,1309,8828,303,3157,4638,4439,9175,1921,4695,7716,1494,1015,1772,5913,1127,1952,1950,8905,4064,9890,385,9357,7945,5035,7082,5369,4093,6546,5187,5637,2041,8946,1758,7111,6566,1027,1049,5148,7224,7248,296,6169,375,1656,7993,2816,3717,4279,4675,1609,3317,42,6201,3100,3144,163,9530,4531 +7096,6070,1009,4988,3538,5801,7149,3063,2324,2912,7911,7002,4338,7880,2481,7368,3516,2016,7556,2193,1388,3865,8125,4637,4096,8114,750,3144,1938,7002,9343,4095,1392,4220,3455,6969,9647,1321,9048,1996,1640,6626,1788,314,9578,6630,2813,6626,4981,9908,7024,4355,3201,3521,3864,3303,464,1923,595,9801,3391,8366,8084,9374,1041,8807,9085,1892,9431,8317,9016,9221,8574,9981,9240,5395,2009,6310,2854,9255 +8830,3145,2960,9615,8220,6061,3452,2918,6481,9278,2297,3385,6565,7066,7316,5682,107,7646,4466,68,1952,9603,8615,54,7191,791,6833,2560,693,9733,4168,570,9127,9537,1925,8287,5508,4297,8452,8795,6213,7994,2420,4208,524,5915,8602,8330,2651,8547,6156,1812,6271,7991,9407,9804,1553,6866,1128,2119,4691,9711,8315,5879,9935,6900,482,682,4126,1041,428,6247,3720,5882,7526,2582,4327,7725,3503,2631 +2738,9323,721,7434,1453,6294,2957,3786,5722,6019,8685,4386,3066,9057,6860,499,5315,3045,5194,7111,3137,9104,941,586,3066,755,4177,8819,7040,5309,3583,3897,4428,7788,4721,7249,6559,7324,825,7311,3760,6064,6070,9672,4882,584,1365,9739,9331,5783,2624,7889,1604,1303,1555,7125,8312,425,8936,3233,7724,1480,403,7440,1784,1754,4721,1569,652,3893,4574,5692,9730,4813,9844,8291,9199,7101,3391,8914 +6044,2928,9332,3328,8588,447,3830,1176,3523,2705,8365,6136,5442,9049,5526,8575,8869,9031,7280,706,2794,8814,5767,4241,7696,78,6570,556,5083,1426,4502,3336,9518,2292,1885,3740,3153,9348,9331,8051,2759,5407,9028,7840,9255,831,515,2612,9747,7435,8964,4971,2048,4900,5967,8271,1719,9670,2810,6777,1594,6367,6259,8316,3815,1689,6840,9437,4361,822,9619,3065,83,6344,7486,8657,8228,9635,6932,4864 +8478,4777,6334,4678,7476,4963,6735,3096,5860,1405,5127,7269,7793,4738,227,9168,2996,8928,765,733,1276,7677,6258,1528,9558,3329,302,8901,1422,8277,6340,645,9125,8869,5952,141,8141,1816,9635,4025,4184,3093,83,2344,2747,9352,7966,1206,1126,1826,218,7939,2957,2729,810,8752,5247,4174,4038,8884,7899,9567,301,5265,5752,7524,4381,1669,3106,8270,6228,6373,754,2547,4240,2313,5514,3022,1040,9738 +2265,8192,1763,1369,8469,8789,4836,52,1212,6690,5257,8918,6723,6319,378,4039,2421,8555,8184,9577,1432,7139,8078,5452,9628,7579,4161,7490,5159,8559,1011,81,478,5840,1964,1334,6875,8670,9900,739,1514,8692,522,9316,6955,1345,8132,2277,3193,9773,3923,4177,2183,1236,6747,6575,4874,6003,6409,8187,745,8776,9440,7543,9825,2582,7381,8147,7236,5185,7564,6125,218,7991,6394,391,7659,7456,5128,5294 +2132,8992,8160,5782,4420,3371,3798,5054,552,5631,7546,4716,1332,6486,7892,7441,4370,6231,4579,2121,8615,1145,9391,1524,1385,2400,9437,2454,7896,7467,2928,8400,3299,4025,7458,4703,7206,6358,792,6200,725,4275,4136,7390,5984,4502,7929,5085,8176,4600,119,3568,76,9363,6943,2248,9077,9731,6213,5817,6729,4190,3092,6910,759,2682,8380,1254,9604,3011,9291,5329,9453,9746,2739,6522,3765,5634,1113,5789 +5304,5499,564,2801,679,2653,1783,3608,7359,7797,3284,796,3222,437,7185,6135,8571,2778,7488,5746,678,6140,861,7750,803,9859,9918,2425,3734,2698,9005,4864,9818,6743,2475,132,9486,3825,5472,919,292,4411,7213,7699,6435,9019,6769,1388,802,2124,1345,8493,9487,8558,7061,8777,8833,2427,2238,5409,4957,8503,3171,7622,5779,6145,2417,5873,5563,5693,9574,9491,1937,7384,4563,6842,5432,2751,3406,7981 diff --git a/project_euler/problem_082/sol1.py b/project_euler/problem_082/sol1.py new file mode 100644 index 000000000..7b50dc887 --- /dev/null +++ b/project_euler/problem_082/sol1.py @@ -0,0 +1,65 @@ +""" +Project Euler Problem 82: https://projecteuler.net/problem=82 + +The minimal path sum in the 5 by 5 matrix below, by starting in any cell +in the left column and finishing in any cell in the right column, +and only moving up, down, and right, is indicated in red and bold; +the sum is equal to 994. + + 131 673 [234] [103] [18] + [201] [96] [342] 965 150 + 630 803 746 422 111 + 537 699 497 121 956 + 805 732 524 37 331 + +Find the minimal path sum from the left column to the right column in matrix.txt +(https://projecteuler.net/project/resources/p082_matrix.txt) +(right click and "Save Link/Target As..."), +a 31K text file containing an 80 by 80 matrix. +""" + +import os + + +def solution(filename: str = "input.txt") -> int: + """ + Returns the minimal path sum in the matrix from the file, by starting in any cell + in the left column and finishing in any cell in the right column, + and only moving up, down, and right + + >>> solution("test_matrix.txt") + 994 + """ + + with open(os.path.join(os.path.dirname(__file__), filename)) as input_file: + matrix = [ + [int(element) for element in line.split(",")] + for line in input_file.readlines() + ] + + rows = len(matrix) + cols = len(matrix[0]) + + minimal_path_sums = [[-1 for _ in range(cols)] for _ in range(rows)] + for i in range(rows): + minimal_path_sums[i][0] = matrix[i][0] + + for j in range(1, cols): + for i in range(rows): + minimal_path_sums[i][j] = minimal_path_sums[i][j - 1] + matrix[i][j] + + for i in range(1, rows): + minimal_path_sums[i][j] = min( + minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] + ) + + for i in range(rows - 2, -1, -1): + minimal_path_sums[i][j] = min( + minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] + ) + + return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums) + + +if __name__ == "__main__": + print(f"{solution() = }") diff --git a/project_euler/problem_082/test_matrix.txt b/project_euler/problem_082/test_matrix.txt new file mode 100644 index 000000000..76167d9e7 --- /dev/null +++ b/project_euler/problem_082/test_matrix.txt @@ -0,0 +1,5 @@ +131,673,234,103,18 +201,96,342,965,150 +630,803,746,422,111 +537,699,497,121,956 +805,732,524,37,331 From ee778128bdf8d4d6d386cfdc500f3b3173f56c06 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 2 Mar 2023 07:57:07 +0300 Subject: [PATCH 260/368] Reduce the complexity of other/scoring_algorithm.py (#8045) * Increase the --max-complexity threshold in the file .flake8 --- other/scoring_algorithm.py | 57 ++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 14 deletions(-) diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index 00d87cfc0..8e04a8f30 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -23,29 +23,29 @@ Thus the weights for each column are as follows: """ -def procentual_proximity( - source_data: list[list[float]], weights: list[int] -) -> list[list[float]]: +def get_data(source_data: list[list[float]]) -> list[list[float]]: """ - weights - int list - possible values - 0 / 1 - 0 if lower values have higher weight in the data set - 1 if higher values have higher weight in the data set - - >>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) - [[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] + >>> get_data([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]]) + [[20.0, 23.0, 22.0], [60.0, 90.0, 50.0], [2012.0, 2015.0, 2011.0]] """ - - # getting data data_lists: list[list[float]] = [] for data in source_data: for i, el in enumerate(data): if len(data_lists) < i + 1: data_lists.append([]) data_lists[i].append(float(el)) + return data_lists + +def calculate_each_score( + data_lists: list[list[float]], weights: list[int] +) -> list[list[float]]: + """ + >>> calculate_each_score([[20, 23, 22], [60, 90, 50], [2012, 2015, 2011]], + ... [0, 0, 1]) + [[1.0, 0.0, 0.33333333333333337], [0.75, 0.0, 1.0], [0.25, 1.0, 0.0]] + """ score_lists: list[list[float]] = [] - # calculating each score for dlist, weight in zip(data_lists, weights): mind = min(dlist) maxd = max(dlist) @@ -72,14 +72,43 @@ def procentual_proximity( score_lists.append(score) + return score_lists + + +def generate_final_scores(score_lists: list[list[float]]) -> list[float]: + """ + >>> generate_final_scores([[1.0, 0.0, 0.33333333333333337], + ... [0.75, 0.0, 1.0], + ... [0.25, 1.0, 0.0]]) + [2.0, 1.0, 1.3333333333333335] + """ # initialize final scores final_scores: list[float] = [0 for i in range(len(score_lists[0]))] - # generate final scores for slist in score_lists: for j, ele in enumerate(slist): final_scores[j] = final_scores[j] + ele + return final_scores + + +def procentual_proximity( + source_data: list[list[float]], weights: list[int] +) -> list[list[float]]: + """ + weights - int list + possible values - 0 / 1 + 0 if lower values have higher weight in the data set + 1 if higher values have higher weight in the data set + + >>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) + [[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] + """ + + data_lists = get_data(source_data) + score_lists = calculate_each_score(data_lists, weights) + final_scores = generate_final_scores(score_lists) + # append scores to source data for i, ele in enumerate(final_scores): source_data[i].append(ele) From 9720e6a6cf52e2395e2d7ef3ef6ae91a355d318e Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 2 Mar 2023 19:51:48 +0300 Subject: [PATCH 261/368] Add Project Euler problem 117 solution 1 (#6872) Update DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_117/__init__.py | 0 project_euler/problem_117/sol1.py | 53 +++++++++++++++++++++++++++ 3 files changed, 55 insertions(+) create mode 100644 project_euler/problem_117/__init__.py create mode 100644 project_euler/problem_117/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 3d1bc967e..484484104 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -956,6 +956,8 @@ * [Sol1](project_euler/problem_115/sol1.py) * Problem 116 * [Sol1](project_euler/problem_116/sol1.py) + * Problem 117 + * [Sol1](project_euler/problem_117/sol1.py) * Problem 119 * [Sol1](project_euler/problem_119/sol1.py) * Problem 120 diff --git a/project_euler/problem_117/__init__.py b/project_euler/problem_117/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_117/sol1.py b/project_euler/problem_117/sol1.py new file mode 100644 index 000000000..e8214454f --- /dev/null +++ b/project_euler/problem_117/sol1.py @@ -0,0 +1,53 @@ +""" +Project Euler Problem 117: https://projecteuler.net/problem=117 + +Using a combination of grey square tiles and oblong tiles chosen from: +red tiles (measuring two units), green tiles (measuring three units), +and blue tiles (measuring four units), +it is possible to tile a row measuring five units in length +in exactly fifteen different ways. + + |grey|grey|grey|grey|grey| |red,red|grey|grey|grey| + + |grey|red,red|grey|grey| |grey|grey|red,red|grey| + + |grey|grey|grey|red,red| |red,red|red,red|grey| + + |red,red|grey|red,red| |grey|red,red|red,red| + + |green,green,green|grey|grey| |grey|green,green,green|grey| + + |grey|grey|green,green,green| |red,red|green,green,green| + + |green,green,green|red,red| |blue,blue,blue,blue|grey| + + |grey|blue,blue,blue,blue| + +How many ways can a row measuring fifty units in length be tiled? + +NOTE: This is related to Problem 116 (https://projecteuler.net/problem=116). +""" + + +def solution(length: int = 50) -> int: + """ + Returns the number of ways can a row of the given length be tiled + + >>> solution(5) + 15 + """ + + ways_number = [1] * (length + 1) + + for row_length in range(length + 1): + for tile_length in range(2, 5): + for tile_start in range(row_length - tile_length + 1): + ways_number[row_length] += ways_number[ + row_length - tile_start - tile_length + ] + + return ways_number[length] + + +if __name__ == "__main__": + print(f"{solution() = }") From 41b633a841084acac5a640042d365c985e23b357 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 00:10:39 +0100 Subject: [PATCH 262/368] [pre-commit.ci] pre-commit autoupdate (#8168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.253 → v0.0.254](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.253...v0.0.254) * Rename get_top_billionaires.py to get_top_billionaires.py.disabled * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 - ...get_top_billionaires.py => get_top_billionaires.py.disabled} | 0 3 files changed, 1 insertion(+), 2 deletions(-) rename web_programming/{get_top_billionaires.py => get_top_billionaires.py.disabled} (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f27f985b..329407265 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -44,7 +44,7 @@ repos: - --py311-plus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.253 + rev: v0.0.254 hooks: - id: ruff args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 484484104..f25b0c6ff 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1167,7 +1167,6 @@ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) - * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py.disabled similarity index 100% rename from web_programming/get_top_billionaires.py rename to web_programming/get_top_billionaires.py.disabled From 9e28ecca28176254c39bcc791733589c6091422e Mon Sep 17 00:00:00 2001 From: Subhendu Dash <71781104+subhendudash02@users.noreply.github.com> Date: Tue, 7 Mar 2023 21:46:25 +0530 Subject: [PATCH 263/368] Add circular convolution (#8158) * add circular convolution * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add type hint for __init__ * rounding off final values to 2 and minor changes * add test case for unequal signals * changes in list comprehension and enumeraton --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- electronics/circular_convolution.py | 99 +++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 electronics/circular_convolution.py diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py new file mode 100644 index 000000000..f2e35742e --- /dev/null +++ b/electronics/circular_convolution.py @@ -0,0 +1,99 @@ +# https://en.wikipedia.org/wiki/Circular_convolution + +""" +Circular convolution, also known as cyclic convolution, +is a special case of periodic convolution, which is the convolution of two +periodic functions that have the same period. Periodic convolution arises, +for example, in the context of the discrete-time Fourier transform (DTFT). +In particular, the DTFT of the product of two discrete sequences is the periodic +convolution of the DTFTs of the individual sequences. And each DTFT is a periodic +summation of a continuous Fourier transform function. + +Source: https://en.wikipedia.org/wiki/Circular_convolution +""" + +import doctest +from collections import deque + +import numpy as np + + +class CircularConvolution: + """ + This class stores the first and second signal and performs the circular convolution + """ + + def __init__(self) -> None: + """ + First signal and second signal are stored as 1-D array + """ + + self.first_signal = [2, 1, 2, -1] + self.second_signal = [1, 2, 3, 4] + + def circular_convolution(self) -> list[float]: + """ + This function performs the circular convolution of the first and second signal + using matrix method + + Usage: + >>> import circular_convolution as cc + >>> convolution = cc.CircularConvolution() + >>> convolution.circular_convolution() + [10, 10, 6, 14] + + >>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6] + >>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5] + >>> convolution.circular_convolution() + [5.2, 6.0, 6.48, 6.64, 6.48, 6.0, 5.2, 4.08] + + >>> convolution.first_signal = [-1, 1, 2, -2] + >>> convolution.second_signal = [0.5, 1, -1, 2, 0.75] + >>> convolution.circular_convolution() + [6.25, -3.0, 1.5, -2.0, -2.75] + + >>> convolution.first_signal = [1, -1, 2, 3, -1] + >>> convolution.second_signal = [1, 2, 3] + >>> convolution.circular_convolution() + [8, -2, 3, 4, 11] + + """ + + length_first_signal = len(self.first_signal) + length_second_signal = len(self.second_signal) + + max_length = max(length_first_signal, length_second_signal) + + # create a zero matrix of max_length x max_length + matrix = [[0] * max_length for i in range(max_length)] + + # fills the smaller signal with zeros to make both signals of same length + if length_first_signal < length_second_signal: + self.first_signal += [0] * (max_length - length_first_signal) + elif length_first_signal > length_second_signal: + self.second_signal += [0] * (max_length - length_second_signal) + + """ + Fills the matrix in the following way assuming 'x' is the signal of length 4 + [ + [x[0], x[3], x[2], x[1]], + [x[1], x[0], x[3], x[2]], + [x[2], x[1], x[0], x[3]], + [x[3], x[2], x[1], x[0]] + ] + """ + for i in range(max_length): + rotated_signal = deque(self.second_signal) + rotated_signal.rotate(i) + for j, item in enumerate(rotated_signal): + matrix[i][j] += item + + # multiply the matrix with the first signal + final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) + + # rounding-off to two decimal places + return [round(i, 2) for i in final_signal] + + +if __name__ == "__main__": + doctest.testmod() From f9cc25221c1521a0da9ee27d6a9bea1f14f4c986 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 10 Mar 2023 12:48:05 +0300 Subject: [PATCH 264/368] Reduce the complexity of backtracking/word_search.py (#8166) * Lower the --max-complexity threshold in the file .flake8 --- backtracking/word_search.py | 112 +++++++++++++++++++----------------- 1 file changed, 60 insertions(+), 52 deletions(-) diff --git a/backtracking/word_search.py b/backtracking/word_search.py index 25d1436be..c9d52012b 100644 --- a/backtracking/word_search.py +++ b/backtracking/word_search.py @@ -33,6 +33,61 @@ leetcode: https://leetcode.com/problems/word-search/ """ +def get_point_key(len_board: int, len_board_column: int, row: int, column: int) -> int: + """ + Returns the hash key of matrix indexes. + + >>> get_point_key(10, 20, 1, 0) + 200 + """ + + return len_board * len_board_column * row + column + + +def exits_word( + board: list[list[str]], + word: str, + row: int, + column: int, + word_index: int, + visited_points_set: set[int], +) -> bool: + """ + Return True if it's possible to search the word suffix + starting from the word_index. + + >>> exits_word([["A"]], "B", 0, 0, 0, set()) + False + """ + + if board[row][column] != word[word_index]: + return False + + if word_index == len(word) - 1: + return True + + traverts_directions = [(0, 1), (0, -1), (-1, 0), (1, 0)] + len_board = len(board) + len_board_column = len(board[0]) + for direction in traverts_directions: + next_i = row + direction[0] + next_j = column + direction[1] + if not (0 <= next_i < len_board and 0 <= next_j < len_board_column): + continue + + key = get_point_key(len_board, len_board_column, next_i, next_j) + if key in visited_points_set: + continue + + visited_points_set.add(key) + if exits_word(board, word, next_i, next_j, word_index + 1, visited_points_set): + return True + + visited_points_set.remove(key) + + return False + + def word_exists(board: list[list[str]], word: str) -> bool: """ >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "ABCCED") @@ -77,6 +132,8 @@ def word_exists(board: list[list[str]], word: str) -> bool: board_error_message = ( "The board should be a non empty matrix of single chars strings." ) + + len_board = len(board) if not isinstance(board, list) or len(board) == 0: raise ValueError(board_error_message) @@ -94,61 +151,12 @@ def word_exists(board: list[list[str]], word: str) -> bool: "The word parameter should be a string of length greater than 0." ) - traverts_directions = [(0, 1), (0, -1), (-1, 0), (1, 0)] - len_word = len(word) - len_board = len(board) len_board_column = len(board[0]) - - # Returns the hash key of matrix indexes. - def get_point_key(row: int, column: int) -> int: - """ - >>> len_board=10 - >>> len_board_column=20 - >>> get_point_key(0, 0) - 200 - """ - - return len_board * len_board_column * row + column - - # Return True if it's possible to search the word suffix - # starting from the word_index. - def exits_word( - row: int, column: int, word_index: int, visited_points_set: set[int] - ) -> bool: - """ - >>> board=[["A"]] - >>> word="B" - >>> exits_word(0, 0, 0, set()) - False - """ - - if board[row][column] != word[word_index]: - return False - - if word_index == len_word - 1: - return True - - for direction in traverts_directions: - next_i = row + direction[0] - next_j = column + direction[1] - if not (0 <= next_i < len_board and 0 <= next_j < len_board_column): - continue - - key = get_point_key(next_i, next_j) - if key in visited_points_set: - continue - - visited_points_set.add(key) - if exits_word(next_i, next_j, word_index + 1, visited_points_set): - return True - - visited_points_set.remove(key) - - return False - for i in range(len_board): for j in range(len_board_column): - if exits_word(i, j, 0, {get_point_key(i, j)}): + if exits_word( + board, word, i, j, 0, {get_point_key(len_board, len_board_column, i, j)} + ): return True return False From 8959211100ba7a612d42a6e7db4755303b78c5a7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 23:18:35 +0100 Subject: [PATCH 265/368] [pre-commit.ci] pre-commit autoupdate (#8177) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.254 → v0.0.255](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.254...v0.0.255) - [github.com/pre-commit/mirrors-mypy: v1.0.1 → v1.1.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.0.1...v1.1.1) - [github.com/codespell-project/codespell: v2.2.2 → v2.2.4](https://github.com/codespell-project/codespell/compare/v2.2.2...v2.2.4) * updating DIRECTORY.md * Fixes for new version of codespell --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 8 ++++---- DIRECTORY.md | 1 + machine_learning/sequential_minimum_optimization.py | 2 +- physics/lorentz_transformation_four_vector.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 329407265..9aa965e42 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -44,7 +44,7 @@ repos: - --py311-plus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.254 + rev: v0.0.255 hooks: - id: ruff args: @@ -69,7 +69,7 @@ repos: *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.0.1 + rev: v1.1.1 hooks: - id: mypy args: @@ -79,11 +79,11 @@ repos: additional_dependencies: [types-requests] - repo: https://github.com/codespell-project/codespell - rev: v2.2.2 + rev: v2.2.4 hooks: - id: codespell args: - - --ignore-words-list=ans,crate,damon,fo,followings,hist,iff,mater,secant,som,sur,tim,zar + - --ignore-words-list=3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar exclude: | (?x)^( ciphers/prehistoric_men.txt | diff --git a/DIRECTORY.md b/DIRECTORY.md index f25b0c6ff..b2daaaa9c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -334,6 +334,7 @@ ## Electronics * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) + * [Circular Convolution](electronics/circular_convolution.py) * [Coulombs Law](electronics/coulombs_law.py) * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 37172c8e9..b68bd52f4 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -569,7 +569,7 @@ def plot_partition_boundary( """ We can not get the optimum w of our kernel svm model which is different from linear svm. For this reason, we generate randomly distributed points with high desity and - prediced values of these points are calculated by using our tained model. Then we + prediced values of these points are calculated by using our trained model. Then we could use this prediced values to draw contour map. And this contour map can represent svm's partition boundary. """ diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index 64be97245..f4fda4dff 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -2,7 +2,7 @@ Lorentz transformations describe the transition between two inertial reference frames F and F', each of which is moving in some direction with respect to the other. This code only calculates Lorentz transformations for movement in the x -direction with no spacial rotation (i.e., a Lorentz boost in the x direction). +direction with no spatial rotation (i.e., a Lorentz boost in the x direction). The Lorentz transformations are calculated here as linear transformations of four-vectors [ct, x, y, z] described by Minkowski space. Note that t (time) is multiplied by c (the speed of light) in the first entry of each four-vector. From b797e437aeadcac50556d6606a547dc634cf5329 Mon Sep 17 00:00:00 2001 From: Andrey Date: Tue, 14 Mar 2023 01:31:27 +0100 Subject: [PATCH 266/368] Add hashmap implementation (#7967) --- data_structures/hashing/hash_map.py | 162 ++++++++++++++++++ .../hashing/tests/test_hash_map.py | 97 +++++++++++ 2 files changed, 259 insertions(+) create mode 100644 data_structures/hashing/hash_map.py create mode 100644 data_structures/hashing/tests/test_hash_map.py diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py new file mode 100644 index 000000000..1dfcc8bbf --- /dev/null +++ b/data_structures/hashing/hash_map.py @@ -0,0 +1,162 @@ +""" +Hash map with open addressing. + +https://en.wikipedia.org/wiki/Hash_table + +Another hash map implementation, with a good explanation. +Modern Dictionaries by Raymond Hettinger +https://www.youtube.com/watch?v=p33CVV29OG8 +""" +from collections.abc import Iterator, MutableMapping +from dataclasses import dataclass +from typing import Generic, TypeVar + +KEY = TypeVar("KEY") +VAL = TypeVar("VAL") + + +@dataclass(frozen=True, slots=True) +class _Item(Generic[KEY, VAL]): + key: KEY + val: VAL + + +class _DeletedItem(_Item): + def __init__(self) -> None: + super().__init__(None, None) + + def __bool__(self) -> bool: + return False + + +_deleted = _DeletedItem() + + +class HashMap(MutableMapping[KEY, VAL]): + """ + Hash map with open addressing. + """ + + def __init__( + self, initial_block_size: int = 8, capacity_factor: float = 0.75 + ) -> None: + self._initial_block_size = initial_block_size + self._buckets: list[_Item | None] = [None] * initial_block_size + assert 0.0 < capacity_factor < 1.0 + self._capacity_factor = capacity_factor + self._len = 0 + + def _get_bucket_index(self, key: KEY) -> int: + return hash(key) % len(self._buckets) + + def _get_next_ind(self, ind: int) -> int: + """ + Get next index. + + Implements linear open addressing. + """ + return (ind + 1) % len(self._buckets) + + def _try_set(self, ind: int, key: KEY, val: VAL) -> bool: + """ + Try to add value to the bucket. + + If bucket is empty or key is the same, does insert and return True. + + If bucket has another key or deleted placeholder, + that means that we need to check next bucket. + """ + stored = self._buckets[ind] + if not stored: + self._buckets[ind] = _Item(key, val) + self._len += 1 + return True + elif stored.key == key: + self._buckets[ind] = _Item(key, val) + return True + else: + return False + + def _is_full(self) -> bool: + """ + Return true if we have reached safe capacity. + + So we need to increase the number of buckets to avoid collisions. + """ + limit = len(self._buckets) * self._capacity_factor + return len(self) >= int(limit) + + def _is_sparse(self) -> bool: + """Return true if we need twice fewer buckets when we have now.""" + if len(self._buckets) <= self._initial_block_size: + return False + limit = len(self._buckets) * self._capacity_factor / 2 + return len(self) < limit + + def _resize(self, new_size: int) -> None: + old_buckets = self._buckets + self._buckets = [None] * new_size + self._len = 0 + for item in old_buckets: + if item: + self._add_item(item.key, item.val) + + def _size_up(self) -> None: + self._resize(len(self._buckets) * 2) + + def _size_down(self) -> None: + self._resize(len(self._buckets) // 2) + + def _iterate_buckets(self, key: KEY) -> Iterator[int]: + ind = self._get_bucket_index(key) + for _ in range(len(self._buckets)): + yield ind + ind = self._get_next_ind(ind) + + def _add_item(self, key: KEY, val: VAL) -> None: + for ind in self._iterate_buckets(key): + if self._try_set(ind, key, val): + break + + def __setitem__(self, key: KEY, val: VAL) -> None: + if self._is_full(): + self._size_up() + + self._add_item(key, val) + + def __delitem__(self, key: KEY) -> None: + for ind in self._iterate_buckets(key): + item = self._buckets[ind] + if item is None: + raise KeyError(key) + if item is _deleted: + continue + if item.key == key: + self._buckets[ind] = _deleted + self._len -= 1 + break + if self._is_sparse(): + self._size_down() + + def __getitem__(self, key: KEY) -> VAL: + for ind in self._iterate_buckets(key): + item = self._buckets[ind] + if item is None: + break + if item is _deleted: + continue + if item.key == key: + return item.val + raise KeyError(key) + + def __len__(self) -> int: + return self._len + + def __iter__(self) -> Iterator[KEY]: + yield from (item.key for item in self._buckets if item) + + def __repr__(self) -> str: + val_string = " ,".join( + f"{item.key}: {item.val}" for item in self._buckets if item + ) + return f"HashMap({val_string})" diff --git a/data_structures/hashing/tests/test_hash_map.py b/data_structures/hashing/tests/test_hash_map.py new file mode 100644 index 000000000..929e67311 --- /dev/null +++ b/data_structures/hashing/tests/test_hash_map.py @@ -0,0 +1,97 @@ +from operator import delitem, getitem, setitem + +import pytest + +from data_structures.hashing.hash_map import HashMap + + +def _get(k): + return getitem, k + + +def _set(k, v): + return setitem, k, v + + +def _del(k): + return delitem, k + + +def _run_operation(obj, fun, *args): + try: + return fun(obj, *args), None + except Exception as e: + return None, e + + +_add_items = ( + _set("key_a", "val_a"), + _set("key_b", "val_b"), +) + +_overwrite_items = [ + _set("key_a", "val_a"), + _set("key_a", "val_b"), +] + +_delete_items = [ + _set("key_a", "val_a"), + _set("key_b", "val_b"), + _del("key_a"), + _del("key_b"), + _set("key_a", "val_a"), + _del("key_a"), +] + +_access_absent_items = [ + _get("key_a"), + _del("key_a"), + _set("key_a", "val_a"), + _del("key_a"), + _del("key_a"), + _get("key_a"), +] + +_add_with_resize_up = [ + *[_set(x, x) for x in range(5)], # guaranteed upsize +] + +_add_with_resize_down = [ + *[_set(x, x) for x in range(5)], # guaranteed upsize + *[_del(x) for x in range(5)], + _set("key_a", "val_b"), +] + + +@pytest.mark.parametrize( + "operations", + ( + pytest.param(_add_items, id="add items"), + pytest.param(_overwrite_items, id="overwrite items"), + pytest.param(_delete_items, id="delete items"), + pytest.param(_access_absent_items, id="access absent items"), + pytest.param(_add_with_resize_up, id="add with resize up"), + pytest.param(_add_with_resize_down, id="add with resize down"), + ), +) +def test_hash_map_is_the_same_as_dict(operations): + my = HashMap(initial_block_size=4) + py = {} + for _, (fun, *args) in enumerate(operations): + my_res, my_exc = _run_operation(my, fun, *args) + py_res, py_exc = _run_operation(py, fun, *args) + assert my_res == py_res + assert str(my_exc) == str(py_exc) + assert set(py) == set(my) + assert len(py) == len(my) + assert set(my.items()) == set(py.items()) + + +def test_no_new_methods_was_added_to_api(): + def is_public(name: str) -> bool: + return not name.startswith("_") + + dict_public_names = {name for name in dir({}) if is_public(name)} + hash_public_names = {name for name in dir(HashMap()) if is_public(name)} + + assert dict_public_names > hash_public_names From 9701e459e884e883fc720277452ec592eae305d0 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 14 Mar 2023 08:39:36 +0300 Subject: [PATCH 267/368] Add Project Euler problem 100 solution 1 (#8175) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 ++ project_euler/problem_100/__init__.py | 0 project_euler/problem_100/sol1.py | 48 +++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 project_euler/problem_100/__init__.py create mode 100644 project_euler/problem_100/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index b2daaaa9c..e1ce44eed 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -937,6 +937,8 @@ * [Sol1](project_euler/problem_097/sol1.py) * Problem 099 * [Sol1](project_euler/problem_099/sol1.py) + * Problem 100 + * [Sol1](project_euler/problem_100/sol1.py) * Problem 101 * [Sol1](project_euler/problem_101/sol1.py) * Problem 102 diff --git a/project_euler/problem_100/__init__.py b/project_euler/problem_100/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_100/sol1.py b/project_euler/problem_100/sol1.py new file mode 100644 index 000000000..367378e7a --- /dev/null +++ b/project_euler/problem_100/sol1.py @@ -0,0 +1,48 @@ +""" +Project Euler Problem 100: https://projecteuler.net/problem=100 + +If a box contains twenty-one coloured discs, composed of fifteen blue discs and +six red discs, and two discs were taken at random, it can be seen that +the probability of taking two blue discs, P(BB) = (15/21) x (14/20) = 1/2. + +The next such arrangement, for which there is exactly 50% chance of taking two blue +discs at random, is a box containing eighty-five blue discs and thirty-five red discs. + +By finding the first arrangement to contain over 10^12 = 1,000,000,000,000 discs +in total, determine the number of blue discs that the box would contain. +""" + + +def solution(min_total: int = 10**12) -> int: + """ + Returns the number of blue discs for the first arrangement to contain + over min_total discs in total + + >>> solution(2) + 3 + + >>> solution(4) + 15 + + >>> solution(21) + 85 + """ + + prev_numerator = 1 + prev_denominator = 0 + + numerator = 1 + denominator = 1 + + while numerator <= 2 * min_total - 1: + prev_numerator += 2 * numerator + numerator += 2 * prev_numerator + + prev_denominator += 2 * denominator + denominator += 2 * prev_denominator + + return (denominator + 1) // 2 + + +if __name__ == "__main__": + print(f"{solution() = }") From 47b3c729826e864fb1d0a30b03cf95fa2adae591 Mon Sep 17 00:00:00 2001 From: David Leal Date: Mon, 13 Mar 2023 23:46:52 -0600 Subject: [PATCH 268/368] docs: add the other/miscellaneous form (#8163) Co-authored-by: Christian Clauss Co-authored-by: Dhruv Manilawala --- .github/ISSUE_TEMPLATE/other.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/other.yml diff --git a/.github/ISSUE_TEMPLATE/other.yml b/.github/ISSUE_TEMPLATE/other.yml new file mode 100644 index 000000000..44d6ff541 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/other.yml @@ -0,0 +1,19 @@ +name: Other +description: Use this for any other issues. PLEASE do not create blank issues +labels: ["awaiting triage"] +body: + - type: textarea + id: issuedescription + attributes: + label: What would you like to share? + description: Provide a clear and concise explanation of your issue. + validations: + required: true + + - type: textarea + id: extrainfo + attributes: + label: Additional information + description: Is there anything else we should know about this issue? + validations: + required: false From adc3ccdabede375df5cff62c3c8f06d8a191a803 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 15 Mar 2023 15:56:03 +0300 Subject: [PATCH 269/368] Add Project Euler problem 131 solution 1 (#8179) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 5 +++ project_euler/problem_131/__init__.py | 0 project_euler/problem_131/sol1.py | 56 +++++++++++++++++++++++++++ 3 files changed, 61 insertions(+) create mode 100644 project_euler/problem_131/__init__.py create mode 100644 project_euler/problem_131/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e1ce44eed..1d3177801 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -196,11 +196,14 @@ * [Disjoint Set](data_structures/disjoint_set/disjoint_set.py) * Hashing * [Double Hash](data_structures/hashing/double_hash.py) + * [Hash Map](data_structures/hashing/hash_map.py) * [Hash Table](data_structures/hashing/hash_table.py) * [Hash Table With Linked List](data_structures/hashing/hash_table_with_linked_list.py) * Number Theory * [Prime Numbers](data_structures/hashing/number_theory/prime_numbers.py) * [Quadratic Probing](data_structures/hashing/quadratic_probing.py) + * Tests + * [Test Hash Map](data_structures/hashing/tests/test_hash_map.py) * Heap * [Binomial Heap](data_structures/heap/binomial_heap.py) * [Heap](data_structures/heap/heap.py) @@ -973,6 +976,8 @@ * [Sol1](project_euler/problem_125/sol1.py) * Problem 129 * [Sol1](project_euler/problem_129/sol1.py) + * Problem 131 + * [Sol1](project_euler/problem_131/sol1.py) * Problem 135 * [Sol1](project_euler/problem_135/sol1.py) * Problem 144 diff --git a/project_euler/problem_131/__init__.py b/project_euler/problem_131/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_131/sol1.py b/project_euler/problem_131/sol1.py new file mode 100644 index 000000000..f5302aac8 --- /dev/null +++ b/project_euler/problem_131/sol1.py @@ -0,0 +1,56 @@ +""" +Project Euler Problem 131: https://projecteuler.net/problem=131 + +There are some prime values, p, for which there exists a positive integer, n, +such that the expression n^3 + n^2p is a perfect cube. + +For example, when p = 19, 8^3 + 8^2 x 19 = 12^3. + +What is perhaps most surprising is that for each prime with this property +the value of n is unique, and there are only four such primes below one-hundred. + +How many primes below one million have this remarkable property? +""" + +from math import isqrt + + +def is_prime(number: int) -> bool: + """ + Determines whether number is prime + + >>> is_prime(3) + True + + >>> is_prime(4) + False + """ + + for divisor in range(2, isqrt(number) + 1): + if number % divisor == 0: + return False + return True + + +def solution(max_prime: int = 10**6) -> int: + """ + Returns number of primes below max_prime with the property + + >>> solution(100) + 4 + """ + + primes_count = 0 + cube_index = 1 + prime_candidate = 7 + while prime_candidate < max_prime: + primes_count += is_prime(prime_candidate) + + cube_index += 1 + prime_candidate += 6 * cube_index + + return primes_count + + +if __name__ == "__main__": + print(f"{solution() = }") From c96241b5a5052af466894ef90c7a7c749ba872eb Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 15 Mar 2023 13:58:25 +0100 Subject: [PATCH 270/368] Replace bandit, flake8, isort, and pyupgrade with ruff (#8178) * Replace bandit, flake8, isort, and pyupgrade with ruff * Comment on ruff rules * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .flake8 | 10 --- .github/workflows/ruff.yml | 16 ++++ .pre-commit-config.yaml | 78 +++++-------------- arithmetic_analysis/newton_raphson.py | 2 +- arithmetic_analysis/newton_raphson_new.py | 2 +- data_structures/heap/heap_generic.py | 1 - dynamic_programming/min_distance_up_bottom.py | 9 +-- dynamic_programming/minimum_tickets_cost.py | 4 +- dynamic_programming/word_break.py | 4 +- hashes/sha1.py | 12 +-- machine_learning/support_vector_machines.py | 4 +- maths/eulers_totient.py | 34 ++++---- maths/fibonacci.py | 4 +- maths/pythagoras.py | 6 +- other/quine.py | 1 + project_euler/problem_075/sol1.py | 3 +- pyproject.toml | 59 ++++++++++++-- sorts/external_sort.py | 2 +- strings/check_anagrams.py | 3 +- strings/word_occurrence.py | 3 +- web_programming/currency_converter.py | 2 +- 21 files changed, 127 insertions(+), 132 deletions(-) delete mode 100644 .flake8 create mode 100644 .github/workflows/ruff.yml diff --git a/.flake8 b/.flake8 deleted file mode 100644 index b68ee8533..000000000 --- a/.flake8 +++ /dev/null @@ -1,10 +0,0 @@ -[flake8] -max-line-length = 88 -# max-complexity should be 10 -max-complexity = 19 -extend-ignore = - # Formatting style for `black` - # E203 is whitespace before ':' - E203, - # W503 is line break occurred before a binary operator - W503 diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml new file mode 100644 index 000000000..ca2d5be47 --- /dev/null +++ b/.github/workflows/ruff.yml @@ -0,0 +1,16 @@ +# https://beta.ruff.rs +name: ruff +on: + push: + branches: + - master + pull_request: + branches: + - master +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: pip install --user ruff + - run: ruff --format=github . diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9aa965e42..82aad6c65 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,6 +3,7 @@ repos: rev: v4.4.0 hooks: - id: check-executables-have-shebangs + - id: check-toml - id: check-yaml - id: end-of-file-fixer types: [python] @@ -14,60 +15,41 @@ repos: hooks: - id: auto-walrus + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.0.255 + hooks: + - id: ruff + - repo: https://github.com/psf/black rev: 23.1.0 hooks: - id: black - - repo: https://github.com/PyCQA/isort - rev: 5.12.0 + - repo: https://github.com/codespell-project/codespell + rev: v2.2.4 hooks: - - id: isort - args: - - --profile=black + - id: codespell + additional_dependencies: + - tomli - repo: https://github.com/tox-dev/pyproject-fmt rev: "0.9.2" hooks: - id: pyproject-fmt + - repo: local + hooks: + - id: validate-filenames + name: Validate filenames + entry: ./scripts/validate_filenames.py + language: script + pass_filenames: false + - repo: https://github.com/abravalheri/validate-pyproject rev: v0.12.1 hooks: - id: validate-pyproject - - repo: https://github.com/asottile/pyupgrade - rev: v3.3.1 - hooks: - - id: pyupgrade - args: - - --py311-plus - - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.255 - hooks: - - id: ruff - args: - - --ignore=E741 - - - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 - hooks: - - id: flake8 # See .flake8 for args - additional_dependencies: &flake8-plugins - - flake8-bugbear - - flake8-builtins - # - flake8-broken-line - - flake8-comprehensions - - pep8-naming - - - repo: https://github.com/asottile/yesqa - rev: v1.4.0 - hooks: - - id: yesqa - additional_dependencies: - *flake8-plugins - - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.1.1 hooks: @@ -77,25 +59,3 @@ repos: - --install-types # See mirrors-mypy README.md - --non-interactive additional_dependencies: [types-requests] - - - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 - hooks: - - id: codespell - args: - - --ignore-words-list=3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar - exclude: | - (?x)^( - ciphers/prehistoric_men.txt | - strings/dictionary.txt | - strings/words.txt | - project_euler/problem_022/p022_names.txt - )$ - - - repo: local - hooks: - - id: validate-filenames - name: Validate filenames - entry: ./scripts/validate_filenames.py - language: script - pass_filenames: false diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py index 86ff9d350..aee2f07e5 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/arithmetic_analysis/newton_raphson.py @@ -5,7 +5,7 @@ from __future__ import annotations from decimal import Decimal -from math import * # noqa: F401, F403 +from math import * # noqa: F403 from sympy import diff diff --git a/arithmetic_analysis/newton_raphson_new.py b/arithmetic_analysis/newton_raphson_new.py index 472cb5b5a..f61841e2e 100644 --- a/arithmetic_analysis/newton_raphson_new.py +++ b/arithmetic_analysis/newton_raphson_new.py @@ -8,7 +8,7 @@ # Newton's Method - https://en.wikipedia.org/wiki/Newton's_method from sympy import diff, lambdify, symbols -from sympy.functions import * # noqa: F401, F403 +from sympy.functions import * # noqa: F403 def newton_raphson( diff --git a/data_structures/heap/heap_generic.py b/data_structures/heap/heap_generic.py index b4d7019f4..ee92149e2 100644 --- a/data_structures/heap/heap_generic.py +++ b/data_structures/heap/heap_generic.py @@ -166,7 +166,6 @@ def test_heap() -> None: >>> h.get_top() [9, -40] """ - pass if __name__ == "__main__": diff --git a/dynamic_programming/min_distance_up_bottom.py b/dynamic_programming/min_distance_up_bottom.py index 49c361f24..4870c7ef4 100644 --- a/dynamic_programming/min_distance_up_bottom.py +++ b/dynamic_programming/min_distance_up_bottom.py @@ -6,13 +6,13 @@ to find edit distance. The aim is to demonstate up bottom approach for solving the task. The implementation was tested on the leetcode: https://leetcode.com/problems/edit-distance/ -""" -""" Levinstein distance Dynamic Programming: up -> down. """ +import functools + def min_distance_up_bottom(word1: str, word2: str) -> int: """ @@ -25,13 +25,10 @@ def min_distance_up_bottom(word1: str, word2: str) -> int: >>> min_distance_up_bottom("zooicoarchaeologist", "zoologist") 10 """ - - from functools import lru_cache - len_word1 = len(word1) len_word2 = len(word2) - @lru_cache(maxsize=None) + @functools.cache def min_distance(index1: int, index2: int) -> int: # if first word index is overflow - delete all from the second word if index1 >= len_word1: diff --git a/dynamic_programming/minimum_tickets_cost.py b/dynamic_programming/minimum_tickets_cost.py index d07056d92..6790c21f1 100644 --- a/dynamic_programming/minimum_tickets_cost.py +++ b/dynamic_programming/minimum_tickets_cost.py @@ -22,7 +22,7 @@ Minimum Cost For Tickets Dynamic Programming: up -> down. """ -from functools import lru_cache +import functools def mincost_tickets(days: list[int], costs: list[int]) -> int: @@ -106,7 +106,7 @@ def mincost_tickets(days: list[int], costs: list[int]) -> int: days_set = set(days) - @lru_cache(maxsize=None) + @functools.cache def dynamic_programming(index: int) -> int: if index > 365: return 0 diff --git a/dynamic_programming/word_break.py b/dynamic_programming/word_break.py index 642ea0edf..4d7ac8690 100644 --- a/dynamic_programming/word_break.py +++ b/dynamic_programming/word_break.py @@ -20,7 +20,7 @@ Runtime: O(n * n) Space: O(n) """ -from functools import lru_cache +import functools from typing import Any @@ -80,7 +80,7 @@ def word_break(string: str, words: list[str]) -> bool: len_string = len(string) # Dynamic programming method - @lru_cache(maxsize=None) + @functools.cache def is_breakable(index: int) -> bool: """ >>> string = 'a' diff --git a/hashes/sha1.py b/hashes/sha1.py index b19e0cfaf..9f0437f20 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -26,7 +26,6 @@ Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/ import argparse import hashlib # hashlib is only used inside the Test class import struct -import unittest class SHA1Hash: @@ -128,14 +127,9 @@ class SHA1Hash: return "%08x%08x%08x%08x%08x" % tuple(self.h) -class SHA1HashTest(unittest.TestCase): - """ - Test class for the SHA1Hash class. Inherits the TestCase class from unittest - """ - - def testMatchHashes(self): # noqa: N802 - msg = bytes("Test String", "utf-8") - self.assertEqual(SHA1Hash(msg).final_hash(), hashlib.sha1(msg).hexdigest()) +def test_sha1_hash(): + msg = b"Test String" + assert SHA1Hash(msg).final_hash() == hashlib.sha1(msg).hexdigest() # noqa: S324 def main(): diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index caec10175..df854cc85 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -56,7 +56,7 @@ class SVC: *, regularization: float = np.inf, kernel: str = "linear", - gamma: float = 0, + gamma: float = 0.0, ) -> None: self.regularization = regularization self.gamma = gamma @@ -65,7 +65,7 @@ class SVC: elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma") - if not (isinstance(self.gamma, float) or isinstance(self.gamma, int)): + if not isinstance(self.gamma, (float, int)): raise ValueError("gamma must be float or int") if not self.gamma > 0: raise ValueError("gamma must be > 0") diff --git a/maths/eulers_totient.py b/maths/eulers_totient.py index 6a35e69bd..a15664703 100644 --- a/maths/eulers_totient.py +++ b/maths/eulers_totient.py @@ -1,5 +1,20 @@ # Eulers Totient function finds the number of relative primes of a number n from 1 to n def totient(n: int) -> list: + """ + >>> n = 10 + >>> totient_calculation = totient(n) + >>> for i in range(1, n): + ... print(f"{i} has {totient_calculation[i]} relative primes.") + 1 has 0 relative primes. + 2 has 1 relative primes. + 3 has 2 relative primes. + 4 has 2 relative primes. + 5 has 4 relative primes. + 6 has 2 relative primes. + 7 has 6 relative primes. + 8 has 4 relative primes. + 9 has 6 relative primes. + """ is_prime = [True for i in range(n + 1)] totients = [i - 1 for i in range(n + 1)] primes = [] @@ -20,25 +35,6 @@ def totient(n: int) -> list: return totients -def test_totient() -> None: - """ - >>> n = 10 - >>> totient_calculation = totient(n) - >>> for i in range(1, n): - ... print(f"{i} has {totient_calculation[i]} relative primes.") - 1 has 0 relative primes. - 2 has 1 relative primes. - 3 has 2 relative primes. - 4 has 2 relative primes. - 5 has 4 relative primes. - 6 has 2 relative primes. - 7 has 6 relative primes. - 8 has 4 relative primes. - 9 has 6 relative primes. - """ - pass - - if __name__ == "__main__": import doctest diff --git a/maths/fibonacci.py b/maths/fibonacci.py index d58c9fc68..e810add69 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -16,7 +16,7 @@ fib_memoization runtime: 0.0107 ms fib_binet runtime: 0.0174 ms """ -from functools import lru_cache +import functools from math import sqrt from time import time @@ -110,7 +110,7 @@ def fib_recursive_cached(n: int) -> list[int]: Exception: n is negative """ - @lru_cache(maxsize=None) + @functools.cache def fib_recursive_term(i: int) -> int: """ Calculates the i-th (0-indexed) Fibonacci number using recursion diff --git a/maths/pythagoras.py b/maths/pythagoras.py index 69a17731a..7770e981d 100644 --- a/maths/pythagoras.py +++ b/maths/pythagoras.py @@ -14,17 +14,13 @@ class Point: def distance(a: Point, b: Point) -> float: - return math.sqrt(abs((b.x - a.x) ** 2 + (b.y - a.y) ** 2 + (b.z - a.z) ** 2)) - - -def test_distance() -> None: """ >>> point1 = Point(2, -1, 7) >>> point2 = Point(1, -3, 5) >>> print(f"Distance from {point1} to {point2} is {distance(point1, point2)}") Distance from Point(2, -1, 7) to Point(1, -3, 5) is 3.0 """ - pass + return math.sqrt(abs((b.x - a.x) ** 2 + (b.y - a.y) ** 2 + (b.z - a.z) ** 2)) if __name__ == "__main__": diff --git a/other/quine.py b/other/quine.py index 01e03bbb0..500a351d3 100644 --- a/other/quine.py +++ b/other/quine.py @@ -1,4 +1,5 @@ #!/bin/python3 +# ruff: noqa """ Quine: diff --git a/project_euler/problem_075/sol1.py b/project_euler/problem_075/sol1.py index b57604d76..0ccaf5dee 100644 --- a/project_euler/problem_075/sol1.py +++ b/project_euler/problem_075/sol1.py @@ -29,7 +29,6 @@ Reference: https://en.wikipedia.org/wiki/Pythagorean_triple#Generating_a_triple from collections import defaultdict from math import gcd -from typing import DefaultDict def solution(limit: int = 1500000) -> int: @@ -43,7 +42,7 @@ def solution(limit: int = 1500000) -> int: >>> solution(50000) 5502 """ - frequencies: DefaultDict = defaultdict(int) + frequencies: defaultdict = defaultdict(int) euclid_m = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1, euclid_m, 2): diff --git a/pyproject.toml b/pyproject.toml index 5f9b1aa06..6552101d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,8 +12,57 @@ addopts = [ omit = [".env/*"] sort = "Cover" -#[report] -#sort = Cover -#omit = -# .env/* -# backtracking/* +[tool.codespell] +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" +skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" + +[tool.ruff] +ignore = [ # `ruff rule S101` for a description of that rule + "B904", # B904: Within an `except` clause, raise exceptions with `raise ... from err` + "B905", # B905: `zip()` without an explicit `strict=` parameter + "E741", # E741: Ambiguous variable name 'l' + "G004", # G004 Logging statement uses f-string + "N999", # N999: Invalid module name + "PLC1901", # PLC1901: `{}` can be simplified to `{}` as an empty string is falsey + "PLR2004", # PLR2004: Magic value used in comparison + "PLR5501", # PLR5501: Consider using `elif` instead of `else` + "PLW0120", # PLW0120: `else` clause on loop without a `break` statement + "PLW060", # PLW060: Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable + "RUF00", # RUF00: Ambiguous unicode character -- DO NOT FIX + "RUF100", # RUF100: Unused `noqa` directive + "S101", # S101: Use of `assert` detected -- DO NOT FIX + "S105", # S105: Possible hardcoded password: 'password' + "S113", # S113: Probable use of requests call without timeout + "UP038", # UP038: Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] +select = [ # https://beta.ruff.rs/docs/rules + "A", # A: builtins + "B", # B: bugbear + "C40", # C40: comprehensions + "C90", # C90: mccabe code complexity + "E", # E: pycodestyle errors + "F", # F: pyflakes + "G", # G: logging format + "I", # I: isort + "N", # N: pep8 naming + "PL", # PL: pylint + "PIE", # PIE: pie + "PYI", # PYI: type hinting stub files + "RUF", # RUF: ruff + "S", # S: bandit + "TID", # TID: tidy imports + "UP", # UP: pyupgrade + "W", # W: pycodestyle warnings + "YTT", # YTT: year 2020 +] +target-version = "py311" + +[tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE +max-complexity = 20 # default: 10 + +[tool.ruff.pylint] # DO NOT INCREASE THESE VALUES +max-args = 10 # default: 5 +max-branches = 20 # default: 12 +max-returns = 8 # default: 6 +max-statements = 88 # default: 50 diff --git a/sorts/external_sort.py b/sorts/external_sort.py index 7af7dc0a6..e6b0d47f7 100644 --- a/sorts/external_sort.py +++ b/sorts/external_sort.py @@ -104,7 +104,7 @@ class FileMerger: files = {} for i in range(len(filenames)): - files[i] = open(filenames[i], "r", buffer_size) + files[i] = open(filenames[i], "r", buffer_size) # noqa: UP015 return files diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index a364b9821..9dcdffcfb 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -2,7 +2,6 @@ wiki: https://en.wikipedia.org/wiki/Anagram """ from collections import defaultdict -from typing import DefaultDict def check_anagrams(first_str: str, second_str: str) -> bool: @@ -30,7 +29,7 @@ def check_anagrams(first_str: str, second_str: str) -> bool: return False # Default values for count should be 0 - count: DefaultDict[str, int] = defaultdict(int) + count: defaultdict[str, int] = defaultdict(int) # For each character in input strings, # increment count in the corresponding diff --git a/strings/word_occurrence.py b/strings/word_occurrence.py index 8260620c3..5a18ebf77 100644 --- a/strings/word_occurrence.py +++ b/strings/word_occurrence.py @@ -1,7 +1,6 @@ # Created by sarathkaul on 17/11/19 # Modified by Arkadip Bhattacharya(@darkmatter18) on 20/04/2020 from collections import defaultdict -from typing import DefaultDict def word_occurrence(sentence: str) -> dict: @@ -15,7 +14,7 @@ def word_occurrence(sentence: str) -> dict: >>> dict(word_occurrence("Two spaces")) {'Two': 1, 'spaces': 1} """ - occurrence: DefaultDict[str, int] = defaultdict(int) + occurrence: defaultdict[str, int] = defaultdict(int) # Creating a dictionary containing count of each word for word in sentence.split(): occurrence[word] += 1 diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 6fcc60e8f..69f2a2c4d 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -8,7 +8,7 @@ import os import requests URL_BASE = "https://www.amdoren.com/api/currency.php" -TESTING = os.getenv("CI", False) +TESTING = os.getenv("CI", "") API_KEY = os.getenv("AMDOREN_API_KEY", "") if not API_KEY and not TESTING: From 521fbca61c6bdb84746564eb58c2ef2131260187 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 16 Mar 2023 13:31:29 +0100 Subject: [PATCH 271/368] Replace flake8 with ruff (#8184) --- CONTRIBUTING.md | 6 +++--- audio_filters/equal_loudness_filter.py.broken.txt | 2 +- data_structures/binary_tree/red_black_tree.py | 4 ++-- digital_image_processing/change_contrast.py | 4 ++-- maths/is_square_free.py | 4 ++-- maths/mobius_function.py | 4 ++-- other/linear_congruential_generator.py | 8 ++++---- pyproject.toml | 1 + quantum/ripple_adder_classic.py | 6 +++--- 9 files changed, 20 insertions(+), 19 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3ce5bd1ed..6b6e4d21b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -81,11 +81,11 @@ We want your work to be readable by others; therefore, we encourage you to note black . ``` -- All submissions will need to pass the test `flake8 . --ignore=E203,W503 --max-line-length=88` before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request. +- All submissions will need to pass the test `ruff .` before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request. ```bash - python3 -m pip install flake8 # only required the first time - flake8 . --ignore=E203,W503 --max-line-length=88 --show-source + python3 -m pip install ruff # only required the first time + ruff . ``` - Original code submission require docstrings or comments to describe your work. diff --git a/audio_filters/equal_loudness_filter.py.broken.txt b/audio_filters/equal_loudness_filter.py.broken.txt index b9a3c50e1..88cba8533 100644 --- a/audio_filters/equal_loudness_filter.py.broken.txt +++ b/audio_filters/equal_loudness_filter.py.broken.txt @@ -20,7 +20,7 @@ class EqualLoudnessFilter: samplerate, use with caution. Code based on matlab implementation at https://bit.ly/3eqh2HU - (url shortened for flake8) + (url shortened for ruff) Target curve: https://i.imgur.com/3g2VfaM.png Yulewalk response: https://i.imgur.com/J9LnJ4C.png diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index b50d75d33..3ebc8d639 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -1,6 +1,6 @@ """ -python/black : true -flake8 : passed +psf/black : true +ruff : passed """ from __future__ import annotations diff --git a/digital_image_processing/change_contrast.py b/digital_image_processing/change_contrast.py index 6a1504002..7e4969470 100644 --- a/digital_image_processing/change_contrast.py +++ b/digital_image_processing/change_contrast.py @@ -4,8 +4,8 @@ Changing contrast with PIL This algorithm is used in https://noivce.pythonanywhere.com/ Python web app. -python/black: True -flake8 : True +psf/black: True +ruff : True """ from PIL import Image diff --git a/maths/is_square_free.py b/maths/is_square_free.py index 4134398d2..08c70dc32 100644 --- a/maths/is_square_free.py +++ b/maths/is_square_free.py @@ -1,7 +1,7 @@ """ References: wikipedia:square free number -python/black : True -flake8 : True +psf/black : True +ruff : True """ from __future__ import annotations diff --git a/maths/mobius_function.py b/maths/mobius_function.py index 4fcf35f21..8abdc4caf 100644 --- a/maths/mobius_function.py +++ b/maths/mobius_function.py @@ -1,8 +1,8 @@ """ References: https://en.wikipedia.org/wiki/M%C3%B6bius_function References: wikipedia:square free number -python/black : True -flake8 : True +psf/black : True +ruff : True """ from maths.is_square_free import is_square_free diff --git a/other/linear_congruential_generator.py b/other/linear_congruential_generator.py index 777ee6355..c016310f9 100644 --- a/other/linear_congruential_generator.py +++ b/other/linear_congruential_generator.py @@ -9,10 +9,10 @@ class LinearCongruentialGenerator: """ # The default value for **seed** is the result of a function call which is not - # normally recommended and causes flake8-bugbear to raise a B008 error. However, - # in this case, it is accptable because `LinearCongruentialGenerator.__init__()` - # will only be called once per instance and it ensures that each instance will - # generate a unique sequence of numbers. + # normally recommended and causes ruff to raise a B008 error. However, in this case, + # it is accptable because `LinearCongruentialGenerator.__init__()` will only be + # called once per instance and it ensures that each instance will generate a unique + # sequence of numbers. def __init__(self, multiplier, increment, modulo, seed=int(time())): # noqa: B008 """ diff --git a/pyproject.toml b/pyproject.toml index 6552101d2..169c3a71b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,7 @@ select = [ # https://beta.ruff.rs/docs/rules "W", # W: pycodestyle warnings "YTT", # YTT: year 2020 ] +show-source = true target-version = "py311" [tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index c07757af7..b604395bc 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -54,9 +54,9 @@ def full_adder( # The default value for **backend** is the result of a function call which is not -# normally recommended and causes flake8-bugbear to raise a B008 error. However, -# in this case, this is acceptable because `Aer.get_backend()` is called when the -# function is defined and that same backend is then reused for all function calls. +# normally recommended and causes ruff to raise a B008 error. However, in this case, +# this is acceptable because `Aer.get_backend()` is called when the function is defined +# and that same backend is then reused for all function calls. def ripple_adder( From 3f9150c1b2dd15808a4962e03a1455f8d825512c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 22:16:13 +0100 Subject: [PATCH 272/368] [pre-commit.ci] pre-commit autoupdate (#8294) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.255 → v0.0.257](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.255...v0.0.257) * Fix PLR1711 Useless statement at end of function --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- data_structures/binary_tree/avl_tree.py | 4 ---- machine_learning/polymonial_regression.py | 1 - 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 82aad6c65..58cec4ff6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.255 + rev: v0.0.257 hooks: - id: ruff diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 320e7ed0d..4c1fb17af 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -60,19 +60,15 @@ class MyNode: def set_data(self, data: Any) -> None: self.data = data - return def set_left(self, node: MyNode | None) -> None: self.left = node - return def set_right(self, node: MyNode | None) -> None: self.right = node - return def set_height(self, height: int) -> None: self.height = height - return def get_height(node: MyNode | None) -> int: diff --git a/machine_learning/polymonial_regression.py b/machine_learning/polymonial_regression.py index 374c35f7f..487fb8145 100644 --- a/machine_learning/polymonial_regression.py +++ b/machine_learning/polymonial_regression.py @@ -34,7 +34,6 @@ def viz_polymonial(): plt.xlabel("Position level") plt.ylabel("Salary") plt.show() - return if __name__ == "__main__": From 7cdb011ba440a07768179bfaea190bddefc890d8 Mon Sep 17 00:00:00 2001 From: Genesis <128913081+KaixLina@users.noreply.github.com> Date: Sun, 26 Mar 2023 20:49:18 +0530 Subject: [PATCH 273/368] New gitter link added or replaced (#8551) * New gitter link added * ruff==0.0.258 * noqa: S310 * noqa: S310 * Update ruff.yml * Add Ruff rule S311 * Ruff v0.0.259 * return ("{:08x}" * 5).format(*self.h) * pickle.load(f) # noqa: S301 --------- Co-authored-by: Christian Clauss --- .github/stale.yml | 4 ++-- .pre-commit-config.yaml | 2 +- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- hashes/sha1.py | 2 +- machine_learning/sequential_minimum_optimization.py | 2 +- neural_network/convolution_neural_network.py | 2 +- project_euler/README.md | 2 +- pyproject.toml | 1 + web_programming/download_images_from_google_query.py | 2 +- 10 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 36ca56266..813f68834 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -45,7 +45,7 @@ pulls: closeComment: > Please reopen this pull request once you commit the changes requested or make improvements on the code. If this is not the case and you need - some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms) + some help, feel free to seek help from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the reviewers. Thank you for your contributions! issues: @@ -59,5 +59,5 @@ issues: closeComment: > Please reopen this issue once you add more information and updates here. If this is not the case and you need some help, feel free to seek help - from our [Gitter](https://gitter.im/TheAlgorithms) or ping one of the + from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the reviewers. Thank you for your contributions! diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 58cec4ff6..72a878387 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.257 + rev: v0.0.259 hooks: - id: ruff diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6b6e4d21b..75e4fb893 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). ## Contributing @@ -176,7 +176,7 @@ We want your work to be readable by others; therefore, we encourage you to note - Most importantly, - __Be consistent in the use of these guidelines when submitting.__ - - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms) __now!__ + - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) __now!__ - Happy coding! Writer [@poyea](https://github.com/poyea), Jun 2019. diff --git a/README.md b/README.md index 68a6e5e6f..3d2f1a110 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Discord chat - + Gitter chat @@ -42,7 +42,7 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms)! Community channels are a great way for you to ask questions and get help. Please join us! +We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms diff --git a/hashes/sha1.py b/hashes/sha1.py index 9f0437f20..b325ce3e4 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -124,7 +124,7 @@ class SHA1Hash: self.h[3] + d & 0xFFFFFFFF, self.h[4] + e & 0xFFFFFFFF, ) - return "%08x%08x%08x%08x%08x" % tuple(self.h) + return ("{:08x}" * 5).format(*self.h) def test_sha1_hash(): diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index b68bd52f4..b24f5669e 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -458,7 +458,7 @@ def test_cancel_data(): CANCER_DATASET_URL, headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, ) - response = urllib.request.urlopen(request) + response = urllib.request.urlopen(request) # noqa: S310 content = response.read().decode("utf-8") with open(r"cancel_data.csv", "w") as f: f.write(content) diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index bd0550212..f5ec156f3 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -77,7 +77,7 @@ class CNN: def read_model(cls, model_path): # read saved model with open(model_path, "rb") as f: - model_dic = pickle.load(f) + model_dic = pickle.load(f) # noqa: S301 conv_get = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) diff --git a/project_euler/README.md b/project_euler/README.md index e3dc035ee..4832d0078 100644 --- a/project_euler/README.md +++ b/project_euler/README.md @@ -10,7 +10,7 @@ The solutions will be checked by our [automated testing on GitHub Actions](https ## Solution Guidelines -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. ### Coding Style diff --git a/pyproject.toml b/pyproject.toml index 169c3a71b..23fe45e97 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ ignore = [ # `ruff rule S101` for a description of that rule "S101", # S101: Use of `assert` detected -- DO NOT FIX "S105", # S105: Possible hardcoded password: 'password' "S113", # S113: Probable use of requests call without timeout + "S311", # S311: Standard pseudo-random generators are not suitable for cryptographic purposes "UP038", # UP038: Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] select = [ # https://beta.ruff.rs/docs/rules diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py index 9c0c21dc8..441347459 100644 --- a/web_programming/download_images_from_google_query.py +++ b/web_programming/download_images_from_google_query.py @@ -86,7 +86,7 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) path_name = f"query_{query.replace(' ', '_')}" if not os.path.exists(path_name): os.makedirs(path_name) - urllib.request.urlretrieve( + urllib.request.urlretrieve( # noqa: S310 original_size_img, f"{path_name}/original_size_img_{index}.jpg" ) return index From 86b2ab09aab359ef1b4bea58ed3c1fdf5b989500 Mon Sep 17 00:00:00 2001 From: Christian Veenhuis Date: Sun, 26 Mar 2023 18:20:47 +0200 Subject: [PATCH 274/368] Fix broken links to Gitter Community (Fixes: #8197) (#8546) Co-authored-by: Christian Clauss --- .github/stale.yml | 4 ++-- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- project_euler/README.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 813f68834..0939e1f22 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -45,7 +45,7 @@ pulls: closeComment: > Please reopen this pull request once you commit the changes requested or make improvements on the code. If this is not the case and you need - some help, feel free to seek help from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) + some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms/community) or ping one of the reviewers. Thank you for your contributions! issues: @@ -59,5 +59,5 @@ issues: closeComment: > Please reopen this issue once you add more information and updates here. If this is not the case and you need some help, feel free to seek help - from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the + from our [Gitter](https://gitter.im/TheAlgorithms/community) or ping one of the reviewers. Thank you for your contributions! diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 75e4fb893..2bb0c2e39 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community). ## Contributing @@ -176,7 +176,7 @@ We want your work to be readable by others; therefore, we encourage you to note - Most importantly, - __Be consistent in the use of these guidelines when submitting.__ - - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) __now!__ + - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms/community) __now!__ - Happy coding! Writer [@poyea](https://github.com/poyea), Jun 2019. diff --git a/README.md b/README.md index 3d2f1a110..bf6e0ed3c 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Discord chat - + Gitter chat @@ -42,7 +42,7 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im)! Community channels are a great way for you to ask questions and get help. Please join us! +We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms/community)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms diff --git a/project_euler/README.md b/project_euler/README.md index 4832d0078..16865edf2 100644 --- a/project_euler/README.md +++ b/project_euler/README.md @@ -10,7 +10,7 @@ The solutions will be checked by our [automated testing on GitHub Actions](https ## Solution Guidelines -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. ### Coding Style From ac111ee463065e372ad148dbafba630045ecf94c Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 29 Mar 2023 00:41:54 +0300 Subject: [PATCH 275/368] Reduce the complexity of graphs/bi_directional_dijkstra.py (#8165) * Reduce the complexity of graphs/bi_directional_dijkstra.py * Try to lower the --max-complexity threshold in the file .flake8 * Lower the --max-complexity threshold in the file .flake8 * updating DIRECTORY.md * updating DIRECTORY.md * Try to lower max-complexity * Try to lower max-complexity * Try to lower max-complexity --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- graphs/bi_directional_dijkstra.py | 93 +++++++++++++++++-------------- pyproject.toml | 2 +- 2 files changed, 52 insertions(+), 43 deletions(-) diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index fc53e2f0d..a4489026b 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -17,6 +17,32 @@ from typing import Any import numpy as np +def pass_and_relaxation( + graph: dict, + v: str, + visited_forward: set, + visited_backward: set, + cst_fwd: dict, + cst_bwd: dict, + queue: PriorityQueue, + parent: dict, + shortest_distance: float | int, +) -> float | int: + for nxt, d in graph[v]: + if nxt in visited_forward: + continue + old_cost_f = cst_fwd.get(nxt, np.inf) + new_cost_f = cst_fwd[v] + d + if new_cost_f < old_cost_f: + queue.put((new_cost_f, nxt)) + cst_fwd[nxt] = new_cost_f + parent[nxt] = v + if nxt in visited_backward: + if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: + shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] + return shortest_distance + + def bidirectional_dij( source: str, destination: str, graph_forward: dict, graph_backward: dict ) -> int: @@ -51,53 +77,36 @@ def bidirectional_dij( if source == destination: return 0 - while queue_forward and queue_backward: - while not queue_forward.empty(): - _, v_fwd = queue_forward.get() - - if v_fwd not in visited_forward: - break - else: - break + while not queue_forward.empty() and not queue_backward.empty(): + _, v_fwd = queue_forward.get() visited_forward.add(v_fwd) - while not queue_backward.empty(): - _, v_bwd = queue_backward.get() - - if v_bwd not in visited_backward: - break - else: - break + _, v_bwd = queue_backward.get() visited_backward.add(v_bwd) - # forward pass and relaxation - for nxt_fwd, d_forward in graph_forward[v_fwd]: - if nxt_fwd in visited_forward: - continue - old_cost_f = cst_fwd.get(nxt_fwd, np.inf) - new_cost_f = cst_fwd[v_fwd] + d_forward - if new_cost_f < old_cost_f: - queue_forward.put((new_cost_f, nxt_fwd)) - cst_fwd[nxt_fwd] = new_cost_f - parent_forward[nxt_fwd] = v_fwd - if nxt_fwd in visited_backward: - if cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] < shortest_distance: - shortest_distance = cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] + shortest_distance = pass_and_relaxation( + graph_forward, + v_fwd, + visited_forward, + visited_backward, + cst_fwd, + cst_bwd, + queue_forward, + parent_forward, + shortest_distance, + ) - # backward pass and relaxation - for nxt_bwd, d_backward in graph_backward[v_bwd]: - if nxt_bwd in visited_backward: - continue - old_cost_b = cst_bwd.get(nxt_bwd, np.inf) - new_cost_b = cst_bwd[v_bwd] + d_backward - if new_cost_b < old_cost_b: - queue_backward.put((new_cost_b, nxt_bwd)) - cst_bwd[nxt_bwd] = new_cost_b - parent_backward[nxt_bwd] = v_bwd - - if nxt_bwd in visited_forward: - if cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] < shortest_distance: - shortest_distance = cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] + shortest_distance = pass_and_relaxation( + graph_backward, + v_bwd, + visited_backward, + visited_forward, + cst_bwd, + cst_fwd, + queue_backward, + parent_backward, + shortest_distance, + ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break diff --git a/pyproject.toml b/pyproject.toml index 23fe45e97..48c3fbd40 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,7 +61,7 @@ show-source = true target-version = "py311" [tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 20 # default: 10 +max-complexity = 17 # default: 10 [tool.ruff.pylint] # DO NOT INCREASE THESE VALUES max-args = 10 # default: 5 From a71f22dae54f830dbf68b3bd5e5e8d540e338a4c Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Thu, 30 Mar 2023 10:39:21 +0530 Subject: [PATCH 276/368] Update cnn_classification.py (#8570) --- computer_vision/cnn_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index 1c193fcbb..9b5f8c95e 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -93,7 +93,7 @@ if __name__ == "__main__": test_image = tf.keras.preprocessing.image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis=0) result = classifier.predict(test_image) - training_set.class_indices + # training_set.class_indices if result[0][0] == 0: prediction = "Normal" if result[0][0] == 1: From a00492911a949a1e59072367bbabee22cd884106 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Fri, 31 Mar 2023 16:47:13 +0530 Subject: [PATCH 277/368] added a problem on kadane's algo and its solution. (#8569) * added kadane's algorithm directory with one problem's solution. * added type hints * Rename kaadne_algorithm/max_product_subarray.py to dynamic_programming/max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py * Update max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * Update max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/max_product_subarray.py | 53 +++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 dynamic_programming/max_product_subarray.py diff --git a/dynamic_programming/max_product_subarray.py b/dynamic_programming/max_product_subarray.py new file mode 100644 index 000000000..425859bc0 --- /dev/null +++ b/dynamic_programming/max_product_subarray.py @@ -0,0 +1,53 @@ +def max_product_subarray(numbers: list[int]) -> int: + """ + Returns the maximum product that can be obtained by multiplying a + contiguous subarray of the given integer list `nums`. + + Example: + >>> max_product_subarray([2, 3, -2, 4]) + 6 + >>> max_product_subarray((-2, 0, -1)) + 0 + >>> max_product_subarray([2, 3, -2, 4, -1]) + 48 + >>> max_product_subarray([-1]) + -1 + >>> max_product_subarray([0]) + 0 + >>> max_product_subarray([]) + 0 + >>> max_product_subarray("") + 0 + >>> max_product_subarray(None) + 0 + >>> max_product_subarray([2, 3, -2, 4.5, -1]) + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + >>> max_product_subarray("ABC") + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + """ + if not numbers: + return 0 + + if not isinstance(numbers, (list, tuple)) or not all( + isinstance(number, int) for number in numbers + ): + raise ValueError("numbers must be an iterable of integers") + + max_till_now = min_till_now = max_prod = numbers[0] + + for i in range(1, len(numbers)): + # update the maximum and minimum subarray products + number = numbers[i] + if number < 0: + max_till_now, min_till_now = min_till_now, max_till_now + max_till_now = max(number, max_till_now * number) + min_till_now = min(number, min_till_now * number) + + # update the maximum product found till now + max_prod = max(max_prod, max_till_now) + + return max_prod From 238fe8c494ab5be80c96441095d1c8958f95c04d Mon Sep 17 00:00:00 2001 From: NIKITA PANDEY <113332472+nikitapandeyy@users.noreply.github.com> Date: Fri, 31 Mar 2023 19:38:13 +0530 Subject: [PATCH 278/368] Update receive_file.py (#8541) * Update receive_file.py Here are the changes I made: Added the main() function and called it from if __name__ == "__main__" block. This makes it easier to test the code and import it into other programs. Added socket.AF_INET as the first argument to socket.socket(). This specifies the address family to be used, which is necessary when using connect(). Changed print(f"{data = }") to print("Received:", len(data), "bytes"). This makes it clearer what's happening and how much data is being received. Changed the final print statement to "Successfully received the file". This makes it more accurate and descriptive. Moved the import statement to the top of the file. This is a common convention in Python. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- file_transfer/receive_file.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/file_transfer/receive_file.py b/file_transfer/receive_file.py index 37a503036..f50ad9fe1 100644 --- a/file_transfer/receive_file.py +++ b/file_transfer/receive_file.py @@ -1,8 +1,9 @@ -if __name__ == "__main__": - import socket # Import socket module +import socket - sock = socket.socket() # Create a socket object - host = socket.gethostname() # Get local machine name + +def main(): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + host = socket.gethostname() port = 12312 sock.connect((host, port)) @@ -13,11 +14,14 @@ if __name__ == "__main__": print("Receiving data...") while True: data = sock.recv(1024) - print(f"{data = }") if not data: break - out_file.write(data) # Write data to a file + out_file.write(data) - print("Successfully got the file") + print("Successfully received the file") sock.close() print("Connection closed") + + +if __name__ == "__main__": + main() From 5ce63b5966b6ad9c7ce36c449fb31112c3e1d084 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 01:11:24 -0400 Subject: [PATCH 279/368] Fix `mypy` errors in `lu_decomposition.py` (attempt 2) (#8100) * updating DIRECTORY.md * Fix mypy errors in lu_decomposition.py * Replace for-loops with comprehensions * Add explanation of LU decomposition and extra doctests Add an explanation of LU decomposition with conditions for when an LU decomposition exists Add extra doctests to handle each of the possible conditions for when a decomposition exists/doesn't exist * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- arithmetic_analysis/lu_decomposition.py | 91 ++++++++++++++++++------- 1 file changed, 65 insertions(+), 26 deletions(-) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 217719cf4..941c1dadf 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -1,62 +1,101 @@ -"""Lower-Upper (LU) Decomposition. +""" +Lower–upper (LU) decomposition factors a matrix as a product of a lower +triangular matrix and an upper triangular matrix. A square matrix has an LU +decomposition under the following conditions: + - If the matrix is invertible, then it has an LU decomposition if and only + if all of its leading principal minors are non-zero (see + https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of + leading principal minors of a matrix). + - If the matrix is singular (i.e., not invertible) and it has a rank of k + (i.e., it has k linearly independent columns), then it has an LU + decomposition if its first k leading principal minors are non-zero. -Reference: -- https://en.wikipedia.org/wiki/LU_decomposition +This algorithm will simply attempt to perform LU decomposition on any square +matrix and raise an error if no such decomposition exists. + +Reference: https://en.wikipedia.org/wiki/LU_decomposition """ from __future__ import annotations import numpy as np -from numpy import float64 -from numpy.typing import ArrayLike -def lower_upper_decomposition( - table: ArrayLike[float64], -) -> tuple[ArrayLike[float64], ArrayLike[float64]]: - """Lower-Upper (LU) Decomposition - - Example: - +def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """ + Perform LU decomposition on a given matrix and raises an error if the matrix + isn't square or if no such decomposition exists >>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]]) - >>> outcome = lower_upper_decomposition(matrix) - >>> outcome[0] + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat array([[1. , 0. , 0. ], [0. , 1. , 0. ], [2.5, 8. , 1. ]]) - >>> outcome[1] + >>> upper_mat array([[ 2. , -2. , 1. ], [ 0. , 1. , 2. ], [ 0. , 0. , -17.5]]) + >>> matrix = np.array([[4, 3], [6, 3]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat + array([[1. , 0. ], + [1.5, 1. ]]) + >>> upper_mat + array([[ 4. , 3. ], + [ 0. , -1.5]]) + + # Matrix is not square >>> matrix = np.array([[2, -2, 1], [0, 1, 2]]) - >>> lower_upper_decomposition(matrix) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): ... ValueError: 'table' has to be of square shaped array but got a 2x3 array: [[ 2 -2 1] [ 0 1 2]] + + # Matrix is invertible, but its first leading principal minor is 0 + >>> matrix = np.array([[0, 1], [1, 0]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + Traceback (most recent call last): + ... + ArithmeticError: No LU decomposition exists + + # Matrix is singular, but its first leading principal minor is 1 + >>> matrix = np.array([[1, 0], [1, 0]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat + array([[1., 0.], + [1., 1.]]) + >>> upper_mat + array([[1., 0.], + [0., 0.]]) + + # Matrix is singular, but its first leading principal minor is 0 + >>> matrix = np.array([[0, 1], [0, 1]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + Traceback (most recent call last): + ... + ArithmeticError: No LU decomposition exists """ - # Table that contains our data - # Table has to be a square array so we need to check first + # Ensure that table is a square array rows, columns = np.shape(table) if rows != columns: raise ValueError( - f"'table' has to be of square shaped array but got a {rows}x{columns} " - + f"array:\n{table}" + f"'table' has to be of square shaped array but got a " + f"{rows}x{columns} array:\n{table}" ) + lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) for i in range(columns): for j in range(i): - total = 0 - for k in range(j): - total += lower[i][k] * upper[k][j] + total = sum(lower[i][k] * upper[k][j] for k in range(j)) + if upper[j][j] == 0: + raise ArithmeticError("No LU decomposition exists") lower[i][j] = (table[i][j] - total) / upper[j][j] lower[i][i] = 1 for j in range(i, columns): - total = 0 - for k in range(i): - total += lower[i][k] * upper[k][j] + total = sum(lower[i][k] * upper[k][j] for k in range(j)) upper[i][j] = table[i][j] - total return lower, upper From dc4f603dad22eab31892855555999b552e97e9d8 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 08:47:24 +0300 Subject: [PATCH 280/368] Add Project Euler problem 187 solution 1 (#8182) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_187/__init__.py | 0 project_euler/problem_187/sol1.py | 58 +++++++++++++++++++++++++++ 3 files changed, 60 insertions(+) create mode 100644 project_euler/problem_187/__init__.py create mode 100644 project_euler/problem_187/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 1d3177801..1a641d8ec 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -990,6 +990,8 @@ * [Sol1](project_euler/problem_174/sol1.py) * Problem 180 * [Sol1](project_euler/problem_180/sol1.py) + * Problem 187 + * [Sol1](project_euler/problem_187/sol1.py) * Problem 188 * [Sol1](project_euler/problem_188/sol1.py) * Problem 191 diff --git a/project_euler/problem_187/__init__.py b/project_euler/problem_187/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_187/sol1.py b/project_euler/problem_187/sol1.py new file mode 100644 index 000000000..12f03e2a7 --- /dev/null +++ b/project_euler/problem_187/sol1.py @@ -0,0 +1,58 @@ +""" +Project Euler Problem 187: https://projecteuler.net/problem=187 + +A composite is a number containing at least two prime factors. +For example, 15 = 3 x 5; 9 = 3 x 3; 12 = 2 x 2 x 3. + +There are ten composites below thirty containing precisely two, +not necessarily distinct, prime factors: 4, 6, 9, 10, 14, 15, 21, 22, 25, 26. + +How many composite integers, n < 10^8, have precisely two, +not necessarily distinct, prime factors? +""" + +from math import isqrt + + +def calculate_prime_numbers(max_number: int) -> list[int]: + """ + Returns prime numbers below max_number + + >>> calculate_prime_numbers(10) + [2, 3, 5, 7] + """ + + is_prime = [True] * max_number + for i in range(2, isqrt(max_number - 1) + 1): + if is_prime[i]: + for j in range(i**2, max_number, i): + is_prime[j] = False + + return [i for i in range(2, max_number) if is_prime[i]] + + +def solution(max_number: int = 10**8) -> int: + """ + Returns the number of composite integers below max_number have precisely two, + not necessarily distinct, prime factors + + >>> solution(30) + 10 + """ + + prime_numbers = calculate_prime_numbers(max_number // 2) + + semiprimes_count = 0 + left = 0 + right = len(prime_numbers) - 1 + while left <= right: + while prime_numbers[left] * prime_numbers[right] >= max_number: + right -= 1 + semiprimes_count += right - left + 1 + left += 1 + + return semiprimes_count + + +if __name__ == "__main__": + print(f"{solution() = }") From e4d90e2d5b92fdcff558f1848843dfbe20d81035 Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 1 Apr 2023 09:26:43 +0300 Subject: [PATCH 281/368] change space complexity of linked list's __len__ from O(n) to O(1) (#8183) --- data_structures/linked_list/circular_linked_list.py | 2 +- data_structures/linked_list/doubly_linked_list.py | 2 +- data_structures/linked_list/merge_two_lists.py | 2 +- data_structures/linked_list/singly_linked_list.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 67a63cd55..9092fb29e 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -24,7 +24,7 @@ class CircularLinkedList: break def __len__(self) -> int: - return len(tuple(iter(self))) + return sum(1 for _ in self) def __repr__(self): return "->".join(str(item) for item in iter(self)) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 6c81493ff..41d07d63e 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -51,7 +51,7 @@ class DoublyLinkedList: >>> len(linked_list) == 5 True """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def insert_at_head(self, data): self.insert_at_nth(0, data) diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index 61e2412aa..ca0d3bb48 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -44,7 +44,7 @@ class SortedLinkedList: >>> len(SortedLinkedList(test_data_odd)) 8 """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def __str__(self) -> str: """ diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index bdeb5922a..a8f9e8ebb 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -72,7 +72,7 @@ class LinkedList: >>> len(linked_list) 0 """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def __repr__(self) -> str: """ From 9e0c357a57f76abc354d704012040f3f5511a941 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sat, 1 Apr 2023 11:59:26 +0530 Subject: [PATCH 282/368] chore: additional Project Euler solution hash (#8593) --- scripts/project_euler_answers.json | 109 ++++++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 1 deletion(-) diff --git a/scripts/project_euler_answers.json b/scripts/project_euler_answers.json index 6d354363e..f2b876934 100644 --- a/scripts/project_euler_answers.json +++ b/scripts/project_euler_answers.json @@ -723,5 +723,112 @@ "722": "9687101dfe209fd65f57a10603baa38ba83c9152e43a8b802b96f1e07f568e0e", "723": "74832787e7d4e0cb7991256c8f6d02775dffec0684de234786f25f898003f2de", "724": "fa05e2b497e7eafa64574017a4c45aadef6b163d907b03d63ba3f4021096d329", - "725": "005c873563f51bbebfdb1f8dbc383259e9a98e506bc87ae8d8c9044b81fc6418" + "725": "005c873563f51bbebfdb1f8dbc383259e9a98e506bc87ae8d8c9044b81fc6418", + "726": "93e41c533136bf4b436e493090fd4e7b277234db2a69c62a871f775ff26681bf", + "727": "c366f7426ca9351dcdde2e3bea01181897cda4d9b44977678ea3828419b84851", + "728": "8de62a644511d27c7c23c7722f56112b3c1ab9b05a078a98a0891f09f92464c6", + "729": "0ae82177174eef99fc80a2ec921295f61a6ac4dfed86a1bf333a50c26d01955c", + "730": "78cd876a176c8fbf7c2155b80dccbdededdbc43c28ef17b5a6e554d649325d38", + "731": "54afb9f829be51d29f90eecbfe40e5ba91f3a3bf538de62f3e34674af15eb542", + "732": "c4dc4610dcafc806b30e5d3f5560b57f462218a04397809843a7110838f0ebac", + "733": "bdde7d98d057d6a6ae360fd2f872d8bccb7e7f2971df37a3c5f20712ea3c618f", + "734": "9a514875bd9af26fcc565337771f852d311cd77033186e4d957e7b6c7b8ce018", + "735": "8bbc5a27c0031d8c44f3f73c99622a202cd6ea9a080049d615a7ae80ce6024f9", + "736": "e0d4c78b9b3dae51940877aff28275d036eccfc641111c8e34227ff6015a0fab", + "737": "a600884bcaa01797310c83b198bad58c98530289305af29b0bf75f679af38d3a", + "738": "c85f15fdaafe7d5525acff960afef7e4b8ffded5a7ee0d1dc2b0e8d0c26b9b46", + "739": "8716e9302f0fb90153e2f522bd88a710361a897480e4ccc0542473c704793518", + "740": "6ff41ee34b263b742cda109aee3be9ad6c95eec2ce31d6a9fc5353bba1b41afd", + "741": "99ac0eb9589b895e5755895206bbad5febd6bc29b2912df1c7544c547e26bca3", + "742": "7d2761a240aa577348df4813ea248088d0d6d8d421142c712ed576cdc90d4df9", + "743": "d93c42a129c0961b4e36738efae3b7e8ffae3a4daeced20e85bb740d3d72522d", + "744": "211f76700a010461486dde6c723720be85e68c192cd8a8ed0a88860b8ae9b0f0", + "745": "2d32dc1fea2f1b8600c0ada927b057b566870ceb5362cce71ac3693dcb7136ae", + "746": "2df1c2a0181f0c25e8d13d2a1eadba55a6b06267a2b22075fcf6867fb2e10c02", + "747": "a8d8f93142e320c6f0dd386c7a3bfb011bbdc15b85291a9be8f0266b3608175e", + "748": "7de937e04c10386b240afb8bb2ff590009946df8b7850a0329ccdb59fca8955f", + "749": "1a55f5484ccf964aeb186faedefa01db05d87180891dc2280b6eb85b6efb4779", + "750": "fa4318c213179e6af1c949be7cf47210f4383e0a44d191e2bad44228d3192f14", + "751": "12fe650fcb3afc214b3d647c655070e8142cfd397441fc7636ad7e6ffcaefde2", + "752": "e416c0123bc6b82df8726b328494db31aa4781d938a0a6e2107b1e44c73c0434", + "753": "0ee3299bc89e1e4c2fc79285fb1cd84c887456358a825e56be92244b7115f5af", + "754": "1370574b16207c41d3dafb62aa898379ec101ac36843634b1633b7b509d4c35a", + "755": "78bb4b18b13f5254cfafe872c0e93791ab5206b2851960dc6aebea8f62b9580c", + "756": "6becaabbda2e9ea22373e62e989b6b70467efa24fbe2f0d124d7a99a53e93f74", + "757": "fbfee0a5c4fa57a1dd6cf0c9bb2423cf7e7bcb130e67114aa360e42234987314", + "758": "8e4dfc259cec9dfd89d4b4ac8c33c75af6e0f5f7926526ee22ad4d45f93d3c18", + "759": "40bac0ed2e4f7861a6d9a2d87191a9034e177c319aa40a43638cc1b69572e5f2", + "760": "7ab50386a211f0815593389ab05b57a1a5eb5cbf5b9a85fe4afc517dcab74e06", + "761": "1cdb0318ac16e11c8d2ae7b1d7ca7138f7b1a461e9d75bd69be0f9cdd3add0c5", + "762": "84c4662267d5809380a540dfc2881665b3019047d74d5ef0a01f86e45f4b5b59", + "763": "f0def5903139447fabe7d106db5fff660d94b45af7b8b48d789596cf65ab2514", + "764": "7b4131f4d1e13d091ca7dd4d32317a14a2a24e6e1abd214df1c14c215287b330", + "765": "7558b775727426bccd945f5aa6b3e131e6034a7b1ff8576332329ef65d6a1663", + "766": "23c309430fa9546adb617457dbfd30fb7432904595c8c000e9b67ea23f32a53b", + "767": "70aef22ac2db8a5bdfcc42ff8dafbd2901e85e268f5f3c45085aa40c590b1d42", + "768": "b69a808dfc654b037e2f47ace16f48fe3bb553b3c8eed3e2b6421942fbf521d0", + "769": "78537a30577e806c6d8d94725e54d2d52e56f7f39f89c133cd5d0a2aad7e46e4", + "770": "c9d80c19c4895d1498bf809fcc37c447fa961fb325e5667eb35d6aa992966b41", + "771": "9803ace30c0d90d422e703fdf25a10a9342d0178a277ebc20c7bd6feac4c7a15", + "772": "f5a1e391af815ea6453db58a1bd71790f433c44ed63e5e93d8f5c045dfd5a464", + "773": "e1b93fc323c4d9c383100603339548e1e56ce9c38bcdcc425024c12b862ea8cb", + "774": "3646cd098b213014fb7bbc9597871585e62ee0cf2770e141f1df771237cc09ab", + "775": "d9d7d515ce7350c9e5696d85f68bbb42daa74b9e171a601dd04c823b18bb7757", + "776": "83286074d3bc86a5b449facb5fe5eafc91eb4c8031e2fb5e716443402cd8ed0f", + "777": "e62616a387d05b619d47cee3d49d5d2db19393736bf54b6cdd20933c0531cb7e", + "778": "d4de958ba44d25353de5b380e04d06c7968794ad50dbf6231ad0049ff53e106b", + "779": "c08ce54a59afc4af62f28b80a9c9a5190822d124eed8d73fd6db3e19c81e2157", + "780": "fc7ba646c16482f0f4f5ce2b06d21183dba2bdeaf9469b36b55bc7bc2d87baf3", + "781": "8fa5733f06838fb61b55b3e9d59c5061d922147e59947fe52e566dd975b2199f", + "782": "9f757d92df401ee049bc066bb2625c6287e5e4bcd38c958396a77a578f036a24", + "783": "270ff37f60c267a673bd4b223e44941f01ae9cfbf6bbdf99ca57af89b1e9a66f", + "784": "388b17c4c7b829cef767f83b4686c903faeec1241edfe5f58ee91d2b0c7f8dfc", + "785": "77cf600204c5265e1d5d3d26bf28ba1e92e6f24def040c16977450bec8b1cb99", + "786": "fb14022b7edbc6c7bfde27f35b49f6acaa4f0fc383af27614cb9d4a1980e626b", + "787": "7516ba0ac1951665723dcc4adcc52764d9497e7b6ed30bdb9937ac9df82b7c4f", + "788": "adede1d30258bb0f353af11f559b67f8b823304c71e967f52db52d002760c24f", + "789": "0c82e744a1f9bc57fd8ae8b2f479998455bc45126de971c59b68541c254e303a", + "790": "319847122251afd20d4d650047c55981a509fa2be78abd7c9c3caa0555e60a05", + "791": "2e0bbdcd0a8460e1e33c55668d0dc9752379a78b9f3561d7a17b922a5541a3fb", + "792": "5f77834c5a509023dd95dd98411eae1dd4bafd125deca590632f409f92fd257b", + "793": "dbfd900a3b31eeec2f14b916f5151611541cb716d80b7b9a1229de12293a02ea", + "794": "d019fe415aba832c4c761140d60c466c9aaad52b504df3167c17f2d3f0b277a7", + "795": "617b259349da44c2af2664acde113673ab3bb03a85d31f1be8f01027d0ebd4d3", + "796": "cba6b30a818d073398e5802211987f0897523e4752987bb445b2bca079670e22", + "797": "61e42cac3d7858b8850111a8c64c56432a18dd058dfb6afd773f07d703703b1a", + "798": "ae8b155d6b77522af79f7e4017fefe92aaa5d45eff132c83dc4d4bcfc9686020", + "799": "a41cb14ddf8f1948a01f590fbe53d9ca4e2faf48375ce1c306f91acf7c94e005", + "800": "c6a47bc6f02cf06be16728fb308c83f2f2ae350325ef7016867f5bdaea849d71", + "801": "d14b358c76b55106613f9c0a2112393338dfd01513b0fd231b79fc8db20e41f0", + "802": "22ae33e67fb48accfaa3b36e70c5a19066b974194c3130680de0c7cdce2d0f2e", + "803": "d95b3f9bbb7054042c1fba4db02f7223a2dad94977a36f08c8aaf92f373f9e78", + "804": "b0b1cf7253593eb2334c75e66dbe22b4b4540347485f1ea24e80226b4b18171c", + "805": "41b1ff5db0e70984ad20c50d1a9ac2b5a53ccd5f42796c8e948ae8880005fbb9", + "806": "b9c813beb39671adb8e1530555cadca44c21ddc7127932274918df2091dbd9ca", + "807": "745fd9ba97970d85a29877942839e41fc192794420e86f3bde39fd26db7a8bff", + "808": "6c73b947eb603602a7e8afadc83eaaa381a46db8b82a6fb89c9c1d93cb023fce", + "809": "eebac7753da4c1230dfce0f15fc124ffff01b0e432f0b74623b60cff71bbc9a9", + "810": "42be7899672a1a0046823603ce60dbeda7250a56fcb8d0913093850c85394307", + "811": "8698cd28ae4d93db36631870c33e4a8a527d970050d994666115f54260b64138", + "812": "dc2495924f37353db8b846323b8085fae9db502e890c513ed2e64ed7281f567f", + "813": "92179dde05aa6557baca65699fda50ca024d33a77078d8e128caa3c5db84064b", + "814": "344ed8cb7684307c00b7f03d751729a7f9d2a5f4a4cb4574594113d69593c0c1", + "815": "f642cf15345af3feab60e26a02aee038f759914906a5b2b469b46fdeee50ff59", + "816": "058178444e85f2aedb2f75d824a469747381f0bd3235d8c72df4385fec86eb07", + "817": "582fdc2233298192b09ceaf1463d6be06a09894075532630aa9d9efcfcb31da4", + "818": "67f6964d6ff114a43371b8375c44db2f1362df4f110b4a7ce8d79cf1b76621a0", + "819": "c7a82513ad48dfc87f2c1e0f2915b71464b7f5a16501c71df4ae4a8741dceef3", + "820": "9b23ae0181f320aadda2637ac2179c8b41b00715630c3acb643c7aee3b81cf90", + "821": "0941e396ff15b98fd7827de8e33ef94996d48ba719a88ba8e2da7f2605df3e5c", + "822": "ed8ef7f568939b9df1b77ae58344940b91c7e154a4367fe2b179bc7b9484d4e6", + "823": "05139328571a86096032b57e3a6a02a61acad4fb0d8f8e1b5d0ffb0d063ba697", + "826": "7f40f14ca65e5c06dd9ec9bbb212adb4d97a503199cb3c30ed921a04373bbe1c", + "827": "80461f02c63654c642382a6ffb7a44d0a3554434dfcfcea00ba91537724c7106", + "828": "520c196175625a0230afb76579ea26033372de3ef4c78aceb146b84322bfa871", + "829": "ed0089e61cf5540dd4a8fef1c468b96cf57f1d2bb79968755ba856d547ddafdf", + "831": "8ec445084427419ca6da405e0ded9814a4b4e11a2be84d88a8dea421f8e49992", + "832": "cfcb9ebef9308823f64798b5e12a59bf77ff6f92b0eae3790a61c0a26f577010", + "833": "e6ff3a5b257eb53366a32bfc8ea410a00a78bafa63650c76ac2bceddfbb42ff5", + "834": "b0d2a7e7d629ef14db9e7352a9a06d6ca66f750429170bb169ca52c172b8cc96", + "835": "bdfa1b1eecbad79f5de48bc6daee4d2b07689d7fb172aa306dd6094172b396f0" } From d66e1e873288bf399559c9ca40310d4b031aec50 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 15:18:13 +0300 Subject: [PATCH 283/368] Add Project Euler problem 800 solution 1 (#8567) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 3 ++ project_euler/problem_800/__init__.py | 0 project_euler/problem_800/sol1.py | 65 +++++++++++++++++++++++++++ 3 files changed, 68 insertions(+) create mode 100644 project_euler/problem_800/__init__.py create mode 100644 project_euler/problem_800/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 1a641d8ec..18c573909 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -317,6 +317,7 @@ * [Longest Sub Array](dynamic_programming/longest_sub_array.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) + * [Max Product Subarray](dynamic_programming/max_product_subarray.py) * [Max Sub Array](dynamic_programming/max_sub_array.py) * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) @@ -1016,6 +1017,8 @@ * [Sol1](project_euler/problem_587/sol1.py) * Problem 686 * [Sol1](project_euler/problem_686/sol1.py) + * Problem 800 + * [Sol1](project_euler/problem_800/sol1.py) ## Quantum * [Bb84](quantum/bb84.py) diff --git a/project_euler/problem_800/__init__.py b/project_euler/problem_800/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_800/sol1.py b/project_euler/problem_800/sol1.py new file mode 100644 index 000000000..f887787bc --- /dev/null +++ b/project_euler/problem_800/sol1.py @@ -0,0 +1,65 @@ +""" +Project Euler Problem 800: https://projecteuler.net/problem=800 + +An integer of the form p^q q^p with prime numbers p != q is called a hybrid-integer. +For example, 800 = 2^5 5^2 is a hybrid-integer. + +We define C(n) to be the number of hybrid-integers less than or equal to n. +You are given C(800) = 2 and C(800^800) = 10790 + +Find C(800800^800800) +""" + +from math import isqrt, log2 + + +def calculate_prime_numbers(max_number: int) -> list[int]: + """ + Returns prime numbers below max_number + + >>> calculate_prime_numbers(10) + [2, 3, 5, 7] + """ + + is_prime = [True] * max_number + for i in range(2, isqrt(max_number - 1) + 1): + if is_prime[i]: + for j in range(i**2, max_number, i): + is_prime[j] = False + + return [i for i in range(2, max_number) if is_prime[i]] + + +def solution(base: int = 800800, degree: int = 800800) -> int: + """ + Returns the number of hybrid-integers less than or equal to base^degree + + >>> solution(800, 1) + 2 + + >>> solution(800, 800) + 10790 + """ + + upper_bound = degree * log2(base) + max_prime = int(upper_bound) + prime_numbers = calculate_prime_numbers(max_prime) + + hybrid_integers_count = 0 + left = 0 + right = len(prime_numbers) - 1 + while left < right: + while ( + prime_numbers[right] * log2(prime_numbers[left]) + + prime_numbers[left] * log2(prime_numbers[right]) + > upper_bound + ): + right -= 1 + hybrid_integers_count += right - left + left += 1 + + return hybrid_integers_count + + +if __name__ == "__main__": + print(f"{solution() = }") From 3d2012c4ba3a9d9ddd80e518f0b5b9ba6c52df7d Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 15:20:08 +0300 Subject: [PATCH 284/368] Add Project Euler problem 94 solution 1 (#8599) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 ++ project_euler/problem_094/__init__.py | 0 project_euler/problem_094/sol1.py | 44 +++++++++++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 project_euler/problem_094/__init__.py create mode 100644 project_euler/problem_094/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 18c573909..c781b17bf 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -937,6 +937,8 @@ * [Sol1](project_euler/problem_091/sol1.py) * Problem 092 * [Sol1](project_euler/problem_092/sol1.py) + * Problem 094 + * [Sol1](project_euler/problem_094/sol1.py) * Problem 097 * [Sol1](project_euler/problem_097/sol1.py) * Problem 099 diff --git a/project_euler/problem_094/__init__.py b/project_euler/problem_094/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_094/sol1.py b/project_euler/problem_094/sol1.py new file mode 100644 index 000000000..a41292fe2 --- /dev/null +++ b/project_euler/problem_094/sol1.py @@ -0,0 +1,44 @@ +""" +Project Euler Problem 94: https://projecteuler.net/problem=94 + +It is easily proved that no equilateral triangle exists with integral length sides and +integral area. However, the almost equilateral triangle 5-5-6 has an area of 12 square +units. + +We shall define an almost equilateral triangle to be a triangle for which two sides are +equal and the third differs by no more than one unit. + +Find the sum of the perimeters of all almost equilateral triangles with integral side +lengths and area and whose perimeters do not exceed one billion (1,000,000,000). +""" + + +def solution(max_perimeter: int = 10**9) -> int: + """ + Returns the sum of the perimeters of all almost equilateral triangles with integral + side lengths and area and whose perimeters do not exceed max_perimeter + + >>> solution(20) + 16 + """ + + prev_value = 1 + value = 2 + + perimeters_sum = 0 + i = 0 + perimeter = 0 + while perimeter <= max_perimeter: + perimeters_sum += perimeter + + prev_value += 2 * value + value += prev_value + + perimeter = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 + i += 1 + + return perimeters_sum + + +if __name__ == "__main__": + print(f"{solution() = }") From 63710883c8634772fadf0145899cea4a1eadc31d Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 1 Apr 2023 15:23:21 +0300 Subject: [PATCH 285/368] Remove extra `len` calls in doubly-linked-list's methods (#8600) --- data_structures/linked_list/doubly_linked_list.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 41d07d63e..69763d12d 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -81,7 +81,9 @@ class DoublyLinkedList: .... IndexError: list index out of range """ - if not 0 <= index <= len(self): + length = len(self) + + if not 0 <= index <= length: raise IndexError("list index out of range") new_node = Node(data) if self.head is None: @@ -90,7 +92,7 @@ class DoublyLinkedList: self.head.previous = new_node new_node.next = self.head self.head = new_node - elif index == len(self): + elif index == length: self.tail.next = new_node new_node.previous = self.tail self.tail = new_node @@ -131,15 +133,17 @@ class DoublyLinkedList: .... IndexError: list index out of range """ - if not 0 <= index <= len(self) - 1: + length = len(self) + + if not 0 <= index <= length - 1: raise IndexError("list index out of range") delete_node = self.head # default first node - if len(self) == 1: + if length == 1: self.head = self.tail = None elif index == 0: self.head = self.head.next self.head.previous = None - elif index == len(self) - 1: + elif index == length - 1: delete_node = self.tail self.tail = self.tail.previous self.tail.next = None From 59cae167e0e6b830b7ff5c89f5f2b8c747fb84c2 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 19:22:33 +0300 Subject: [PATCH 286/368] Reduce the complexity of digital_image_processing/edge detection/canny.py (#8167) * Reduce the complexity of digital_image_processing/edge_detection/canny.py * Fix * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Fix review issues * Rename dst to destination --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .../edge_detection/canny.py | 127 ++++++++++-------- 1 file changed, 74 insertions(+), 53 deletions(-) diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index a83035526..f8cbeedb3 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -18,105 +18,126 @@ def gen_gaussian_kernel(k_size, sigma): return g -def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): - image_row, image_col = image.shape[0], image.shape[1] - # gaussian_filter - gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4)) - # get the gradient and degree by sobel_filter - sobel_grad, sobel_theta = sobel_filter(gaussian_out) - gradient_direction = np.rad2deg(sobel_theta) - gradient_direction += PI - - dst = np.zeros((image_row, image_col)) - +def suppress_non_maximum(image_shape, gradient_direction, sobel_grad): """ Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed. """ - for row in range(1, image_row - 1): - for col in range(1, image_col - 1): + destination = np.zeros(image_shape) + + for row in range(1, image_shape[0] - 1): + for col in range(1, image_shape[1] - 1): direction = gradient_direction[row, col] if ( - 0 <= direction < 22.5 + 0 <= direction < PI / 8 or 15 * PI / 8 <= direction <= 2 * PI or 7 * PI / 8 <= direction <= 9 * PI / 8 ): w = sobel_grad[row, col - 1] e = sobel_grad[row, col + 1] if sobel_grad[row, col] >= w and sobel_grad[row, col] >= e: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (PI / 8 <= direction < 3 * PI / 8) or ( - 9 * PI / 8 <= direction < 11 * PI / 8 + elif ( + PI / 8 <= direction < 3 * PI / 8 + or 9 * PI / 8 <= direction < 11 * PI / 8 ): sw = sobel_grad[row + 1, col - 1] ne = sobel_grad[row - 1, col + 1] if sobel_grad[row, col] >= sw and sobel_grad[row, col] >= ne: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (3 * PI / 8 <= direction < 5 * PI / 8) or ( - 11 * PI / 8 <= direction < 13 * PI / 8 + elif ( + 3 * PI / 8 <= direction < 5 * PI / 8 + or 11 * PI / 8 <= direction < 13 * PI / 8 ): n = sobel_grad[row - 1, col] s = sobel_grad[row + 1, col] if sobel_grad[row, col] >= n and sobel_grad[row, col] >= s: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (5 * PI / 8 <= direction < 7 * PI / 8) or ( - 13 * PI / 8 <= direction < 15 * PI / 8 + elif ( + 5 * PI / 8 <= direction < 7 * PI / 8 + or 13 * PI / 8 <= direction < 15 * PI / 8 ): nw = sobel_grad[row - 1, col - 1] se = sobel_grad[row + 1, col + 1] if sobel_grad[row, col] >= nw and sobel_grad[row, col] >= se: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - """ - High-Low threshold detection. If an edge pixel’s gradient value is higher - than the high threshold value, it is marked as a strong edge pixel. If an - edge pixel’s gradient value is smaller than the high threshold value and - larger than the low threshold value, it is marked as a weak edge pixel. If - an edge pixel's value is smaller than the low threshold value, it will be - suppressed. - """ - if dst[row, col] >= threshold_high: - dst[row, col] = strong - elif dst[row, col] <= threshold_low: - dst[row, col] = 0 + return destination + + +def detect_high_low_threshold( + image_shape, destination, threshold_low, threshold_high, weak, strong +): + """ + High-Low threshold detection. If an edge pixel’s gradient value is higher + than the high threshold value, it is marked as a strong edge pixel. If an + edge pixel’s gradient value is smaller than the high threshold value and + larger than the low threshold value, it is marked as a weak edge pixel. If + an edge pixel's value is smaller than the low threshold value, it will be + suppressed. + """ + for row in range(1, image_shape[0] - 1): + for col in range(1, image_shape[1] - 1): + if destination[row, col] >= threshold_high: + destination[row, col] = strong + elif destination[row, col] <= threshold_low: + destination[row, col] = 0 else: - dst[row, col] = weak + destination[row, col] = weak + +def track_edge(image_shape, destination, weak, strong): """ Edge tracking. Usually a weak edge pixel caused from true edges will be connected to a strong edge pixel while noise responses are unconnected. As long as there is one strong edge pixel that is involved in its 8-connected neighborhood, that weak edge point can be identified as one that should be preserved. """ - for row in range(1, image_row): - for col in range(1, image_col): - if dst[row, col] == weak: + for row in range(1, image_shape[0]): + for col in range(1, image_shape[1]): + if destination[row, col] == weak: if 255 in ( - dst[row, col + 1], - dst[row, col - 1], - dst[row - 1, col], - dst[row + 1, col], - dst[row - 1, col - 1], - dst[row + 1, col - 1], - dst[row - 1, col + 1], - dst[row + 1, col + 1], + destination[row, col + 1], + destination[row, col - 1], + destination[row - 1, col], + destination[row + 1, col], + destination[row - 1, col - 1], + destination[row + 1, col - 1], + destination[row - 1, col + 1], + destination[row + 1, col + 1], ): - dst[row, col] = strong + destination[row, col] = strong else: - dst[row, col] = 0 + destination[row, col] = 0 - return dst + +def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): + # gaussian_filter + gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4)) + # get the gradient and degree by sobel_filter + sobel_grad, sobel_theta = sobel_filter(gaussian_out) + gradient_direction = PI + np.rad2deg(sobel_theta) + + destination = suppress_non_maximum(image.shape, gradient_direction, sobel_grad) + + detect_high_low_threshold( + image.shape, destination, threshold_low, threshold_high, weak, strong + ) + + track_edge(image.shape, destination, weak, strong) + + return destination if __name__ == "__main__": # read original image in gray mode lena = cv2.imread(r"../image_data/lena.jpg", 0) # canny edge detection - canny_dst = canny(lena) - cv2.imshow("canny", canny_dst) + canny_destination = canny(lena) + cv2.imshow("canny", canny_destination) cv2.waitKey(0) From a213cea5f5a74e0a6b19240526779a3b0b1f270d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 12:39:22 -0400 Subject: [PATCH 287/368] Fix `mypy` errors in `dilation_operation.py` (#8595) * updating DIRECTORY.md * Fix mypy errors in dilation_operation.py * Rename functions to use snake case * updating DIRECTORY.md * updating DIRECTORY.md * Replace raw file string with pathlib Path * Update digital_image_processing/morphological_operations/dilation_operation.py Co-authored-by: Christian Clauss --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../dilation_operation.py | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/digital_image_processing/morphological_operations/dilation_operation.py b/digital_image_processing/morphological_operations/dilation_operation.py index c8380737d..e49b955c1 100644 --- a/digital_image_processing/morphological_operations/dilation_operation.py +++ b/digital_image_processing/morphological_operations/dilation_operation.py @@ -1,33 +1,35 @@ +from pathlib import Path + import numpy as np from PIL import Image -def rgb2gray(rgb: np.array) -> np.array: +def rgb_to_gray(rgb: np.ndarray) -> np.ndarray: """ Return gray image from rgb image - >>> rgb2gray(np.array([[[127, 255, 0]]])) + >>> rgb_to_gray(np.array([[[127, 255, 0]]])) array([[187.6453]]) - >>> rgb2gray(np.array([[[0, 0, 0]]])) + >>> rgb_to_gray(np.array([[[0, 0, 0]]])) array([[0.]]) - >>> rgb2gray(np.array([[[2, 4, 1]]])) + >>> rgb_to_gray(np.array([[[2, 4, 1]]])) array([[3.0598]]) - >>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) + >>> rgb_to_gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) array([[159.0524, 90.0635, 117.6989]]) """ r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b -def gray2binary(gray: np.array) -> np.array: +def gray_to_binary(gray: np.ndarray) -> np.ndarray: """ Return binary image from gray image - >>> gray2binary(np.array([[127, 255, 0]])) + >>> gray_to_binary(np.array([[127, 255, 0]])) array([[False, True, False]]) - >>> gray2binary(np.array([[0]])) + >>> gray_to_binary(np.array([[0]])) array([[False]]) - >>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]])) + >>> gray_to_binary(np.array([[26.2409, 4.9315, 1.4729]])) array([[False, False, False]]) - >>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) + >>> gray_to_binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) array([[False, True, False], [False, True, False], [False, True, False]]) @@ -35,7 +37,7 @@ def gray2binary(gray: np.array) -> np.array: return (gray > 127) & (gray <= 255) -def dilation(image: np.array, kernel: np.array) -> np.array: +def dilation(image: np.ndarray, kernel: np.ndarray) -> np.ndarray: """ Return dilated image >>> dilation(np.array([[True, False, True]]), np.array([[0, 1, 0]])) @@ -61,14 +63,13 @@ def dilation(image: np.array, kernel: np.array) -> np.array: return output -# kernel to be applied -structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) - - if __name__ == "__main__": # read original image - image = np.array(Image.open(r"..\image_data\lena.jpg")) - output = dilation(gray2binary(rgb2gray(image)), structuring_element) + lena_path = Path(__file__).resolve().parent / "image_data" / "lena.jpg" + lena = np.array(Image.open(lena_path)) + # kernel to be applied + structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + output = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image pil_img = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png") From 84b6852de80bb51c185c30942bff47f9c451c74d Mon Sep 17 00:00:00 2001 From: Blake Reimer Date: Sat, 1 Apr 2023 10:43:07 -0600 Subject: [PATCH 288/368] Graham's Law (#8162) * grahams law * doctest and type hints * doctest formatting * peer review updates --- physics/grahams_law.py | 208 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 physics/grahams_law.py diff --git a/physics/grahams_law.py b/physics/grahams_law.py new file mode 100644 index 000000000..6e5d75127 --- /dev/null +++ b/physics/grahams_law.py @@ -0,0 +1,208 @@ +""" +Title: Graham's Law of Effusion + +Description: Graham's law of effusion states that the rate of effusion of a gas is +inversely proportional to the square root of the molar mass of its particles: + +r1/r2 = sqrt(m2/m1) + +r1 = Rate of effusion for the first gas. +r2 = Rate of effusion for the second gas. +m1 = Molar mass of the first gas. +m2 = Molar mass of the second gas. + +(Description adapted from https://en.wikipedia.org/wiki/Graham%27s_law) +""" + +from math import pow, sqrt + + +def validate(*values: float) -> bool: + """ + Input Parameters: + ----------------- + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> validate(2.016, 4.002) + True + >>> validate(-2.016, 4.002) + False + >>> validate() + False + """ + result = len(values) > 0 and all(value > 0.0 for value in values) + return result + + +def effusion_ratio(molar_mass_1: float, molar_mass_2: float) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> effusion_ratio(2.016, 4.002) + 1.408943 + >>> effusion_ratio(-2.016, 4.002) + ValueError('Input Error: Molar mass values must greater than 0.') + >>> effusion_ratio(2.016) + Traceback (most recent call last): + ... + TypeError: effusion_ratio() missing 1 required positional argument: 'molar_mass_2' + """ + return ( + round(sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(molar_mass_1, molar_mass_2) + else ValueError("Input Error: Molar mass values must greater than 0.") + ) + + +def first_effusion_rate( + effusion_rate: float, molar_mass_1: float, molar_mass_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + effusion_rate: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> first_effusion_rate(1, 2.016, 4.002) + 1.408943 + >>> first_effusion_rate(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> first_effusion_rate(1) + Traceback (most recent call last): + ... + TypeError: first_effusion_rate() missing 2 required positional arguments: \ +'molar_mass_1' and 'molar_mass_2' + >>> first_effusion_rate(1, 2.016) + Traceback (most recent call last): + ... + TypeError: first_effusion_rate() missing 1 required positional argument: \ +'molar_mass_2' + """ + return ( + round(effusion_rate * sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(effusion_rate, molar_mass_1, molar_mass_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def second_effusion_rate( + effusion_rate: float, molar_mass_1: float, molar_mass_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + effusion_rate: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> second_effusion_rate(1, 2.016, 4.002) + 0.709752 + >>> second_effusion_rate(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> second_effusion_rate(1) + Traceback (most recent call last): + ... + TypeError: second_effusion_rate() missing 2 required positional arguments: \ +'molar_mass_1' and 'molar_mass_2' + >>> second_effusion_rate(1, 2.016) + Traceback (most recent call last): + ... + TypeError: second_effusion_rate() missing 1 required positional argument: \ +'molar_mass_2' + """ + return ( + round(effusion_rate / sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(effusion_rate, molar_mass_1, molar_mass_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def first_molar_mass( + molar_mass: float, effusion_rate_1: float, effusion_rate_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass: Molar mass of the first gas (g/mol, kg/kmol, etc.) + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + + Returns: + -------- + >>> first_molar_mass(2, 1.408943, 0.709752) + 0.507524 + >>> first_molar_mass(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> first_molar_mass(1) + Traceback (most recent call last): + ... + TypeError: first_molar_mass() missing 2 required positional arguments: \ +'effusion_rate_1' and 'effusion_rate_2' + >>> first_molar_mass(1, 2.016) + Traceback (most recent call last): + ... + TypeError: first_molar_mass() missing 1 required positional argument: \ +'effusion_rate_2' + """ + return ( + round(molar_mass / pow(effusion_rate_1 / effusion_rate_2, 2), 6) + if validate(molar_mass, effusion_rate_1, effusion_rate_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def second_molar_mass( + molar_mass: float, effusion_rate_1: float, effusion_rate_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass: Molar mass of the first gas (g/mol, kg/kmol, etc.) + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + + Returns: + -------- + >>> second_molar_mass(2, 1.408943, 0.709752) + 1.970351 + >>> second_molar_mass(-2, 1.408943, 0.709752) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> second_molar_mass(1) + Traceback (most recent call last): + ... + TypeError: second_molar_mass() missing 2 required positional arguments: \ +'effusion_rate_1' and 'effusion_rate_2' + >>> second_molar_mass(1, 2.016) + Traceback (most recent call last): + ... + TypeError: second_molar_mass() missing 1 required positional argument: \ +'effusion_rate_2' + """ + return ( + round(pow(effusion_rate_1 / effusion_rate_2, 2) / molar_mass, 6) + if validate(molar_mass, effusion_rate_1, effusion_rate_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) From 56a40eb3ee9aa151defd97597f4e67acf294089f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 20:43:11 +0300 Subject: [PATCH 289/368] Reenable files when TensorFlow supports the current Python (#8602) * Remove python_version < "3.11" for tensorflow * Reenable neural_network/input_data.py_tf * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * Try to fix ruff * Try to fix ruff * Try to fix pre-commit * Try to fix * Fix * Fix * Reenable dynamic_programming/k_means_clustering_tensorflow.py_tf * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + ...py_tf => k_means_clustering_tensorflow.py} | 9 +- .../{input_data.py_tf => input_data.py} | 96 +++++++++---------- requirements.txt | 2 +- 4 files changed, 54 insertions(+), 55 deletions(-) rename dynamic_programming/{k_means_clustering_tensorflow.py_tf => k_means_clustering_tensorflow.py} (98%) rename neural_network/{input_data.py_tf => input_data.py} (83%) diff --git a/DIRECTORY.md b/DIRECTORY.md index c781b17bf..34967082b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -309,6 +309,7 @@ * [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) + * [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py) * [Knapsack](dynamic_programming/knapsack.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) @@ -685,6 +686,7 @@ * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) + * [Input Data](neural_network/input_data.py) * [Perceptron](neural_network/perceptron.py) * [Simple Neural Network](neural_network/simple_neural_network.py) diff --git a/dynamic_programming/k_means_clustering_tensorflow.py_tf b/dynamic_programming/k_means_clustering_tensorflow.py similarity index 98% rename from dynamic_programming/k_means_clustering_tensorflow.py_tf rename to dynamic_programming/k_means_clustering_tensorflow.py index 4fbcedeaa..8d3f6f0df 100644 --- a/dynamic_programming/k_means_clustering_tensorflow.py_tf +++ b/dynamic_programming/k_means_clustering_tensorflow.py @@ -1,9 +1,10 @@ -import tensorflow as tf from random import shuffle + +import tensorflow as tf from numpy import array -def TFKMeansCluster(vectors, noofclusters): +def tf_k_means_cluster(vectors, noofclusters): """ K-Means Clustering using TensorFlow. 'vectors' should be a n*k 2-D NumPy array, where n is the number @@ -30,7 +31,6 @@ def TFKMeansCluster(vectors, noofclusters): graph = tf.Graph() with graph.as_default(): - # SESSION OF COMPUTATION sess = tf.Session() @@ -95,8 +95,7 @@ def TFKMeansCluster(vectors, noofclusters): # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. noofiterations = 100 - for iteration_n in range(noofiterations): - + for _ in range(noofiterations): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. diff --git a/neural_network/input_data.py_tf b/neural_network/input_data.py similarity index 83% rename from neural_network/input_data.py_tf rename to neural_network/input_data.py index 0e22ac0bc..2a32f0b82 100644 --- a/neural_network/input_data.py_tf +++ b/neural_network/input_data.py @@ -21,13 +21,10 @@ This module and all its submodules are deprecated. import collections import gzip import os +import urllib import numpy -from six.moves import urllib -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensorflow.python.framework import dtypes -from tensorflow.python.framework import random_seed +from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated @@ -46,16 +43,16 @@ def _read32(bytestream): def _extract_images(f): """Extract the images into a 4D uint8 numpy array [index, y, x, depth]. - Args: - f: A file object that can be passed into a gzip reader. + Args: + f: A file object that can be passed into a gzip reader. - Returns: - data: A 4D uint8 numpy array [index, y, x, depth]. + Returns: + data: A 4D uint8 numpy array [index, y, x, depth]. - Raises: - ValueError: If the bytestream does not start with 2051. + Raises: + ValueError: If the bytestream does not start with 2051. - """ + """ print("Extracting", f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) @@ -86,17 +83,17 @@ def _dense_to_one_hot(labels_dense, num_classes): def _extract_labels(f, one_hot=False, num_classes=10): """Extract the labels into a 1D uint8 numpy array [index]. - Args: - f: A file object that can be passed into a gzip reader. - one_hot: Does one hot encoding for the result. - num_classes: Number of classes for the one hot encoding. + Args: + f: A file object that can be passed into a gzip reader. + one_hot: Does one hot encoding for the result. + num_classes: Number of classes for the one hot encoding. - Returns: - labels: a 1D uint8 numpy array. + Returns: + labels: a 1D uint8 numpy array. - Raises: - ValueError: If the bystream doesn't start with 2049. - """ + Raises: + ValueError: If the bystream doesn't start with 2049. + """ print("Extracting", f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) @@ -115,8 +112,8 @@ def _extract_labels(f, one_hot=False, num_classes=10): class _DataSet: """Container class for a _DataSet (deprecated). - THIS CLASS IS DEPRECATED. - """ + THIS CLASS IS DEPRECATED. + """ @deprecated( None, @@ -135,21 +132,21 @@ class _DataSet: ): """Construct a _DataSet. - one_hot arg is used only if fake_data is true. `dtype` can be either - `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into - `[0, 1]`. Seed arg provides for convenient deterministic testing. + one_hot arg is used only if fake_data is true. `dtype` can be either + `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into + `[0, 1]`. Seed arg provides for convenient deterministic testing. - Args: - images: The images - labels: The labels - fake_data: Ignore inages and labels, use fake data. - one_hot: Bool, return the labels as one hot vectors (if True) or ints (if - False). - dtype: Output image dtype. One of [uint8, float32]. `uint8` output has - range [0,255]. float32 output has range [0,1]. - reshape: Bool. If True returned images are returned flattened to vectors. - seed: The random seed to use. - """ + Args: + images: The images + labels: The labels + fake_data: Ignore inages and labels, use fake data. + one_hot: Bool, return the labels as one hot vectors (if True) or ints (if + False). + dtype: Output image dtype. One of [uint8, float32]. `uint8` output has + range [0,255]. float32 output has range [0,1]. + reshape: Bool. If True returned images are returned flattened to vectors. + seed: The random seed to use. + """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seed1 if seed is None else seed2) @@ -206,8 +203,8 @@ class _DataSet: else: fake_label = 0 return ( - [fake_image for _ in xrange(batch_size)], - [fake_label for _ in xrange(batch_size)], + [fake_image for _ in range(batch_size)], + [fake_label for _ in range(batch_size)], ) start = self._index_in_epoch # Shuffle for the first epoch @@ -250,19 +247,19 @@ class _DataSet: def _maybe_download(filename, work_directory, source_url): """Download the data from source url, unless it's already here. - Args: - filename: string, name of the file in the directory. - work_directory: string, path to working directory. - source_url: url to download from if file doesn't exist. + Args: + filename: string, name of the file in the directory. + work_directory: string, path to working directory. + source_url: url to download from if file doesn't exist. - Returns: - Path to resulting file. - """ + Returns: + Path to resulting file. + """ if not gfile.Exists(work_directory): gfile.MakeDirs(work_directory) filepath = os.path.join(work_directory, filename) if not gfile.Exists(filepath): - urllib.request.urlretrieve(source_url, filepath) + urllib.request.urlretrieve(source_url, filepath) # noqa: S310 with gfile.GFile(filepath) as f: size = f.size() print("Successfully downloaded", filename, size, "bytes.") @@ -328,7 +325,8 @@ def read_data_sets( if not 0 <= validation_size <= len(train_images): raise ValueError( - f"Validation size should be between 0 and {len(train_images)}. Received: {validation_size}." + f"Validation size should be between 0 and {len(train_images)}. " + f"Received: {validation_size}." ) validation_images = train_images[:validation_size] @@ -336,7 +334,7 @@ def read_data_sets( train_images = train_images[validation_size:] train_labels = train_labels[validation_size:] - options = dict(dtype=dtype, reshape=reshape, seed=seed) + options = {"dtype": dtype, "reshape": reshape, "seed": seed} train = _DataSet(train_images, train_labels, **options) validation = _DataSet(validation_images, validation_labels, **options) diff --git a/requirements.txt b/requirements.txt index a1d607df0..acfbc823e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ scikit-fuzzy scikit-learn statsmodels sympy -tensorflow; python_version < "3.11" +tensorflow texttable tweepy xgboost From 33114f0272bcc1fafa6ce0f40d92ded908747ce3 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 16:05:01 -0400 Subject: [PATCH 290/368] Revamp `md5.py` (#8065) * Add type hints to md5.py * Rename some vars to snake case * Specify functions imported from math * Rename vars and functions to be more descriptive * Make tests from test function into doctests * Clarify more var names * Refactor some MD5 code into preprocess function * Simplify loop indices in get_block_words * Add more detailed comments, docs, and doctests * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Add type hints to md5.py * Rename some vars to snake case * Specify functions imported from math * Rename vars and functions to be more descriptive * Make tests from test function into doctests * Clarify more var names * Refactor some MD5 code into preprocess function * Simplify loop indices in get_block_words * Add more detailed comments, docs, and doctests * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Convert str types to bytes * Add tests comparing md5_me to hashlib's md5 * Replace line-break backslashes with parentheses --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + hashes/md5.py | 376 ++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 292 insertions(+), 85 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 34967082b..b1adc23f6 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -717,6 +717,7 @@ * [Archimedes Principle](physics/archimedes_principle.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) + * [Grahams Law](physics/grahams_law.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) * [Ideal Gas Law](physics/ideal_gas_law.py) diff --git a/hashes/md5.py b/hashes/md5.py index 2020bf2e5..2187006ec 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -1,91 +1,223 @@ -import math +""" +The MD5 algorithm is a hash function that's commonly used as a checksum to +detect data corruption. The algorithm works by processing a given message in +blocks of 512 bits, padding the message as needed. It uses the blocks to operate +a 128-bit state and performs a total of 64 such operations. Note that all values +are little-endian, so inputs are converted as needed. + +Although MD5 was used as a cryptographic hash function in the past, it's since +been cracked, so it shouldn't be used for security purposes. + +For more info, see https://en.wikipedia.org/wiki/MD5 +""" + +from collections.abc import Generator +from math import sin -def rearrange(bit_string_32): - """[summary] - Regroups the given binary string. +def to_little_endian(string_32: bytes) -> bytes: + """ + Converts the given string to little-endian in groups of 8 chars. Arguments: - bitString32 {[string]} -- [32 bit binary] + string_32 {[string]} -- [32-char string] Raises: - ValueError -- [if the given string not are 32 bit binary string] + ValueError -- [input is not 32 char] Returns: - [string] -- [32 bit binary string] - >>> rearrange('1234567890abcdfghijklmnopqrstuvw') - 'pqrstuvwhijklmno90abcdfg12345678' + 32-char little-endian string + >>> to_little_endian(b'1234567890abcdfghijklmnopqrstuvw') + b'pqrstuvwhijklmno90abcdfg12345678' + >>> to_little_endian(b'1234567890') + Traceback (most recent call last): + ... + ValueError: Input must be of length 32 """ + if len(string_32) != 32: + raise ValueError("Input must be of length 32") - if len(bit_string_32) != 32: - raise ValueError("Need length 32") - new_string = "" + little_endian = b"" for i in [3, 2, 1, 0]: - new_string += bit_string_32[8 * i : 8 * i + 8] - return new_string + little_endian += string_32[8 * i : 8 * i + 8] + return little_endian -def reformat_hex(i): - """[summary] - Converts the given integer into 8-digit hex number. +def reformat_hex(i: int) -> bytes: + """ + Converts the given non-negative integer to hex string. + + Example: Suppose the input is the following: + i = 1234 + + The input is 0x000004d2 in hex, so the little-endian hex string is + "d2040000". Arguments: - i {[int]} -- [integer] + i {[int]} -- [integer] + + Raises: + ValueError -- [input is negative] + + Returns: + 8-char little-endian hex string + + >>> reformat_hex(1234) + b'd2040000' >>> reformat_hex(666) - '9a020000' + b'9a020000' + >>> reformat_hex(0) + b'00000000' + >>> reformat_hex(1234567890) + b'd2029649' + >>> reformat_hex(1234567890987654321) + b'b11c6cb1' + >>> reformat_hex(-1) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative """ + if i < 0: + raise ValueError("Input must be non-negative") - hexrep = format(i, "08x") - thing = "" + hex_rep = format(i, "08x")[-8:] + little_endian_hex = b"" for i in [3, 2, 1, 0]: - thing += hexrep[2 * i : 2 * i + 2] - return thing + little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8") + return little_endian_hex -def pad(bit_string): - """[summary] - Fills up the binary string to a 512 bit binary string +def preprocess(message: bytes) -> bytes: + """ + Preprocesses the message string: + - Convert message to bit string + - Pad bit string to a multiple of 512 chars: + - Append a 1 + - Append 0's until length = 448 (mod 512) + - Append length of original message (64 chars) + + Example: Suppose the input is the following: + message = "a" + + The message bit string is "01100001", which is 8 bits long. Thus, the + bit string needs 439 bits of padding so that + (bit_string + "1" + padding) = 448 (mod 512). + The message length is "000010000...0" in 64-bit little-endian binary. + The combined bit string is then 512 bits long. Arguments: - bitString {[string]} -- [binary string] + message {[string]} -- [message string] Returns: - [string] -- [binary string] + processed bit string padded to a multiple of 512 chars + + >>> preprocess(b"a") == (b"01100001" + b"1" + + ... (b"0" * 439) + b"00001000" + (b"0" * 56)) + True + >>> preprocess(b"") == b"1" + (b"0" * 447) + (b"0" * 64) + True """ - start_length = len(bit_string) - bit_string += "1" + bit_string = b"" + for char in message: + bit_string += format(char, "08b").encode("utf-8") + start_len = format(len(bit_string), "064b").encode("utf-8") + + # Pad bit_string to a multiple of 512 chars + bit_string += b"1" while len(bit_string) % 512 != 448: - bit_string += "0" - last_part = format(start_length, "064b") - bit_string += rearrange(last_part[32:]) + rearrange(last_part[:32]) + bit_string += b"0" + bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32]) + return bit_string -def get_block(bit_string): - """[summary] - Iterator: - Returns by each call a list of length 16 with the 32 bit - integer blocks. +def get_block_words(bit_string: bytes) -> Generator[list[int], None, None]: + """ + Splits bit string into blocks of 512 chars and yields each block as a list + of 32-bit words + + Example: Suppose the input is the following: + bit_string = + "000000000...0" + # 0x00 (32 bits, padded to the right) + "000000010...0" + # 0x01 (32 bits, padded to the right) + "000000100...0" + # 0x02 (32 bits, padded to the right) + "000000110...0" + # 0x03 (32 bits, padded to the right) + ... + "000011110...0" # 0x0a (32 bits, padded to the right) + + Then len(bit_string) == 512, so there'll be 1 block. The block is split + into 32-bit words, and each word is converted to little endian. The + first word is interpreted as 0 in decimal, the second word is + interpreted as 1 in decimal, etc. + + Thus, block_words == [[0, 1, 2, 3, ..., 15]]. Arguments: - bit_string {[string]} -- [binary string >= 512] + bit_string {[string]} -- [bit string with multiple of 512 as length] + + Raises: + ValueError -- [length of bit string isn't multiple of 512] + + Yields: + a list of 16 32-bit words + + >>> test_string = ("".join(format(n << 24, "032b") for n in range(16)) + ... .encode("utf-8")) + >>> list(get_block_words(test_string)) + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]] + >>> list(get_block_words(test_string * 4)) == [list(range(16))] * 4 + True + >>> list(get_block_words(b"1" * 512)) == [[4294967295] * 16] + True + >>> list(get_block_words(b"")) + [] + >>> list(get_block_words(b"1111")) + Traceback (most recent call last): + ... + ValueError: Input must have length that's a multiple of 512 """ + if len(bit_string) % 512 != 0: + raise ValueError("Input must have length that's a multiple of 512") - curr_pos = 0 - while curr_pos < len(bit_string): - curr_part = bit_string[curr_pos : curr_pos + 512] - my_splits = [] - for i in range(16): - my_splits.append(int(rearrange(curr_part[32 * i : 32 * i + 32]), 2)) - yield my_splits - curr_pos += 512 + for pos in range(0, len(bit_string), 512): + block = bit_string[pos : pos + 512] + block_words = [] + for i in range(0, 512, 32): + block_words.append(int(to_little_endian(block[i : i + 32]), 2)) + yield block_words -def not32(i): +def not_32(i: int) -> int: """ - >>> not32(34) + Perform bitwise NOT on given int. + + Arguments: + i {[int]} -- [given int] + + Raises: + ValueError -- [input is negative] + + Returns: + Result of bitwise NOT on i + + >>> not_32(34) 4294967261 + >>> not_32(1234) + 4294966061 + >>> not_32(4294966061) + 1234 + >>> not_32(0) + 4294967295 + >>> not_32(1) + 4294967294 + >>> not_32(-1) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative """ + if i < 0: + raise ValueError("Input must be non-negative") + i_str = format(i, "032b") new_str = "" for c in i_str: @@ -93,35 +225,114 @@ def not32(i): return int(new_str, 2) -def sum32(a, b): +def sum_32(a: int, b: int) -> int: + """ + Add two numbers as 32-bit ints. + + Arguments: + a {[int]} -- [first given int] + b {[int]} -- [second given int] + + Returns: + (a + b) as an unsigned 32-bit int + + >>> sum_32(1, 1) + 2 + >>> sum_32(2, 3) + 5 + >>> sum_32(0, 0) + 0 + >>> sum_32(-1, -1) + 4294967294 + >>> sum_32(4294967295, 1) + 0 + """ return (a + b) % 2**32 -def leftrot32(i, s): - return (i << s) ^ (i >> (32 - s)) - - -def md5me(test_string): - """[summary] - Returns a 32-bit hash code of the string 'testString' +def left_rotate_32(i: int, shift: int) -> int: + """ + Rotate the bits of a given int left by a given amount. Arguments: - testString {[string]} -- [message] + i {[int]} -- [given int] + shift {[int]} -- [shift amount] + + Raises: + ValueError -- [either given int or shift is negative] + + Returns: + `i` rotated to the left by `shift` bits + + >>> left_rotate_32(1234, 1) + 2468 + >>> left_rotate_32(1111, 4) + 17776 + >>> left_rotate_32(2147483648, 1) + 1 + >>> left_rotate_32(2147483648, 3) + 4 + >>> left_rotate_32(4294967295, 4) + 4294967295 + >>> left_rotate_32(1234, 0) + 1234 + >>> left_rotate_32(0, 0) + 0 + >>> left_rotate_32(-1, 0) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative + >>> left_rotate_32(0, -1) + Traceback (most recent call last): + ... + ValueError: Shift must be non-negative + """ + if i < 0: + raise ValueError("Input must be non-negative") + if shift < 0: + raise ValueError("Shift must be non-negative") + return ((i << shift) ^ (i >> (32 - shift))) % 2**32 + + +def md5_me(message: bytes) -> bytes: + """ + Returns the 32-char MD5 hash of a given message. + + Reference: https://en.wikipedia.org/wiki/MD5#Algorithm + + Arguments: + message {[string]} -- [message] + + Returns: + 32-char MD5 hash string + + >>> md5_me(b"") + b'd41d8cd98f00b204e9800998ecf8427e' + >>> md5_me(b"The quick brown fox jumps over the lazy dog") + b'9e107d9d372bb6826bd81d3542a419d6' + >>> md5_me(b"The quick brown fox jumps over the lazy dog.") + b'e4d909c290d0fb1ca068ffaddf22cbd0' + + >>> import hashlib + >>> from string import ascii_letters + >>> msgs = [b"", ascii_letters.encode("utf-8"), "Üñîçø∂é".encode("utf-8"), + ... b"The quick brown fox jumps over the lazy dog."] + >>> all(md5_me(msg) == hashlib.md5(msg).hexdigest().encode("utf-8") for msg in msgs) + True """ - bs = "" - for i in test_string: - bs += format(ord(i), "08b") - bs = pad(bs) + # Convert to bit string, add padding and append message length + bit_string = preprocess(message) - tvals = [int(2**32 * abs(math.sin(i + 1))) for i in range(64)] + added_consts = [int(2**32 * abs(sin(i + 1))) for i in range(64)] + # Starting states a0 = 0x67452301 b0 = 0xEFCDAB89 c0 = 0x98BADCFE d0 = 0x10325476 - s = [ + shift_amounts = [ 7, 12, 17, @@ -188,51 +399,46 @@ def md5me(test_string): 21, ] - for m in get_block(bs): + # Process bit string in chunks, each with 16 32-char words + for block_words in get_block_words(bit_string): a = a0 b = b0 c = c0 d = d0 + + # Hash current chunk for i in range(64): if i <= 15: - # f = (B & C) | (not32(B) & D) + # f = (b & c) | (not_32(b) & d) # Alternate definition for f f = d ^ (b & (c ^ d)) g = i elif i <= 31: - # f = (D & B) | (not32(D) & C) + # f = (d & b) | (not_32(d) & c) # Alternate definition for f f = c ^ (d & (b ^ c)) g = (5 * i + 1) % 16 elif i <= 47: f = b ^ c ^ d g = (3 * i + 5) % 16 else: - f = c ^ (b | not32(d)) + f = c ^ (b | not_32(d)) g = (7 * i) % 16 - dtemp = d + f = (f + a + added_consts[i] + block_words[g]) % 2**32 + a = d d = c c = b - b = sum32(b, leftrot32((a + f + tvals[i] + m[g]) % 2**32, s[i])) - a = dtemp - a0 = sum32(a0, a) - b0 = sum32(b0, b) - c0 = sum32(c0, c) - d0 = sum32(d0, d) + b = sum_32(b, left_rotate_32(f, shift_amounts[i])) + + # Add hashed chunk to running total + a0 = sum_32(a0, a) + b0 = sum_32(b0, b) + c0 = sum_32(c0, c) + d0 = sum_32(d0, d) digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0) return digest -def test(): - assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e" - assert ( - md5me("The quick brown fox jumps over the lazy dog") - == "9e107d9d372bb6826bd81d3542a419d6" - ) - print("Success.") - - if __name__ == "__main__": - test() import doctest doctest.testmod() From 5ca71895630719cc41f8171aba8be461fb8cc9d2 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 2 Apr 2023 06:48:19 +0200 Subject: [PATCH 291/368] Rename quantum_random.py.DISABLED.txt to quantum_random.py (#8601) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + quantum/{quantum_random.py.DISABLED.txt => quantum_random.py} | 0 2 files changed, 1 insertion(+) rename quantum/{quantum_random.py.DISABLED.txt => quantum_random.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index b1adc23f6..8dd3fb5d9 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1033,6 +1033,7 @@ * [Q Fourier Transform](quantum/q_fourier_transform.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) + * [Quantum Random](quantum/quantum_random.py) * [Quantum Teleportation](quantum/quantum_teleportation.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/quantum/quantum_random.py.DISABLED.txt b/quantum/quantum_random.py similarity index 100% rename from quantum/quantum_random.py.DISABLED.txt rename to quantum/quantum_random.py From ebc2d5d79f837931e80f7d5e7e1dece9ef48f760 Mon Sep 17 00:00:00 2001 From: Ishab Date: Sun, 2 Apr 2023 13:04:11 +0100 Subject: [PATCH 292/368] Add Project Euler problem 79 solution 1 (#8607) Co-authored-by: Dhruv Manilawala --- project_euler/problem_079/__init__.py | 0 project_euler/problem_079/keylog.txt | 50 ++++++++++++++++ project_euler/problem_079/keylog_test.txt | 16 ++++++ project_euler/problem_079/sol1.py | 69 +++++++++++++++++++++++ 4 files changed, 135 insertions(+) create mode 100644 project_euler/problem_079/__init__.py create mode 100644 project_euler/problem_079/keylog.txt create mode 100644 project_euler/problem_079/keylog_test.txt create mode 100644 project_euler/problem_079/sol1.py diff --git a/project_euler/problem_079/__init__.py b/project_euler/problem_079/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/project_euler/problem_079/keylog.txt b/project_euler/problem_079/keylog.txt new file mode 100644 index 000000000..41f156732 --- /dev/null +++ b/project_euler/problem_079/keylog.txt @@ -0,0 +1,50 @@ +319 +680 +180 +690 +129 +620 +762 +689 +762 +318 +368 +710 +720 +710 +629 +168 +160 +689 +716 +731 +736 +729 +316 +729 +729 +710 +769 +290 +719 +680 +318 +389 +162 +289 +162 +718 +729 +319 +790 +680 +890 +362 +319 +760 +316 +729 +380 +319 +728 +716 diff --git a/project_euler/problem_079/keylog_test.txt b/project_euler/problem_079/keylog_test.txt new file mode 100644 index 000000000..2c7024bde --- /dev/null +++ b/project_euler/problem_079/keylog_test.txt @@ -0,0 +1,16 @@ +319 +680 +180 +690 +129 +620 +698 +318 +328 +310 +320 +610 +629 +198 +190 +631 diff --git a/project_euler/problem_079/sol1.py b/project_euler/problem_079/sol1.py new file mode 100644 index 000000000..d34adcd24 --- /dev/null +++ b/project_euler/problem_079/sol1.py @@ -0,0 +1,69 @@ +""" +Project Euler Problem 79: https://projecteuler.net/problem=79 + +Passcode derivation + +A common security method used for online banking is to ask the user for three +random characters from a passcode. For example, if the passcode was 531278, +they may ask for the 2nd, 3rd, and 5th characters; the expected reply would +be: 317. + +The text file, keylog.txt, contains fifty successful login attempts. + +Given that the three characters are always asked for in order, analyse the file +so as to determine the shortest possible secret passcode of unknown length. +""" +import itertools +from pathlib import Path + + +def find_secret_passcode(logins: list[str]) -> int: + """ + Returns the shortest possible secret passcode of unknown length. + + >>> find_secret_passcode(["135", "259", "235", "189", "690", "168", "120", + ... "136", "289", "589", "160", "165", "580", "369", "250", "280"]) + 12365890 + + >>> find_secret_passcode(["426", "281", "061", "819" "268", "406", "420", + ... "428", "209", "689", "019", "421", "469", "261", "681", "201"]) + 4206819 + """ + + # Split each login by character e.g. '319' -> ('3', '1', '9') + split_logins = [tuple(login) for login in logins] + + unique_chars = {char for login in split_logins for char in login} + + for permutation in itertools.permutations(unique_chars): + satisfied = True + for login in logins: + if not ( + permutation.index(login[0]) + < permutation.index(login[1]) + < permutation.index(login[2]) + ): + satisfied = False + break + + if satisfied: + return int("".join(permutation)) + + raise Exception("Unable to find the secret passcode") + + +def solution(input_file: str = "keylog.txt") -> int: + """ + Returns the shortest possible secret passcode of unknown length + for successful login attempts given by `input_file` text file. + + >>> solution("keylog_test.txt") + 6312980 + """ + logins = Path(__file__).parent.joinpath(input_file).read_text().splitlines() + + return find_secret_passcode(logins) + + +if __name__ == "__main__": + print(f"{solution() = }") From 740ecfb121009612310ab9e1bc9d6ffe22b62ae4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 07:00:31 +0530 Subject: [PATCH 293/368] [pre-commit.ci] pre-commit autoupdate (#8611) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.259 → v0.0.260](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.259...v0.0.260) - [github.com/psf/black: 23.1.0 → 23.3.0](https://github.com/psf/black/compare/23.1.0...23.3.0) - [github.com/abravalheri/validate-pyproject: v0.12.1 → v0.12.2](https://github.com/abravalheri/validate-pyproject/compare/v0.12.1...v0.12.2) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 72a878387..d54ce5add 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.259 + rev: v0.0.260 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.1.0 + rev: 23.3.0 hooks: - id: black @@ -46,7 +46,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.12.1 + rev: v0.12.2 hooks: - id: validate-pyproject diff --git a/DIRECTORY.md b/DIRECTORY.md index 8dd3fb5d9..3764c471c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -922,6 +922,8 @@ * [Sol1](project_euler/problem_077/sol1.py) * Problem 078 * [Sol1](project_euler/problem_078/sol1.py) + * Problem 079 + * [Sol1](project_euler/problem_079/sol1.py) * Problem 080 * [Sol1](project_euler/problem_080/sol1.py) * Problem 081 From b2b8585e63664a0c7aa18b95528e345c2738c4ae Mon Sep 17 00:00:00 2001 From: Ishan Dutta Date: Fri, 7 Apr 2023 21:21:25 +0530 Subject: [PATCH 294/368] Add LeNet Implementation in PyTorch (#7070) * add torch to requirements * add lenet architecture in pytorch * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add type hints * remove file * add type hints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update variable name * add fail test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add newline * reformatting --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- computer_vision/lenet_pytorch.py | 82 ++++++++++++++++++++++++++++++++ requirements.txt | 1 + 2 files changed, 83 insertions(+) create mode 100644 computer_vision/lenet_pytorch.py diff --git a/computer_vision/lenet_pytorch.py b/computer_vision/lenet_pytorch.py new file mode 100644 index 000000000..177a5ebfc --- /dev/null +++ b/computer_vision/lenet_pytorch.py @@ -0,0 +1,82 @@ +""" +LeNet Network + +Paper: http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf +""" + +import numpy +import torch +import torch.nn as nn + + +class LeNet(nn.Module): + def __init__(self) -> None: + super().__init__() + + self.tanh = nn.Tanh() + self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2) + + self.conv1 = nn.Conv2d( + in_channels=1, + out_channels=6, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + self.conv2 = nn.Conv2d( + in_channels=6, + out_channels=16, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + self.conv3 = nn.Conv2d( + in_channels=16, + out_channels=120, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + + self.linear1 = nn.Linear(120, 84) + self.linear2 = nn.Linear(84, 10) + + def forward(self, image_array: numpy.ndarray) -> numpy.ndarray: + image_array = self.tanh(self.conv1(image_array)) + image_array = self.avgpool(image_array) + image_array = self.tanh(self.conv2(image_array)) + image_array = self.avgpool(image_array) + image_array = self.tanh(self.conv3(image_array)) + + image_array = image_array.reshape(image_array.shape[0], -1) + image_array = self.tanh(self.linear1(image_array)) + image_array = self.linear2(image_array) + return image_array + + +def test_model(image_tensor: torch.tensor) -> bool: + """ + Test the model on an input batch of 64 images + + Args: + image_tensor (torch.tensor): Batch of Images for the model + + >>> test_model(torch.randn(64, 1, 32, 32)) + True + + """ + try: + model = LeNet() + output = model(image_tensor) + except RuntimeError: + return False + + return output.shape == torch.zeros([64, 10]).shape + + +if __name__ == "__main__": + random_image_1 = torch.randn(64, 1, 32, 32) + random_image_2 = torch.randn(1, 32, 32) + + print(f"random_image_1 Model Passed: {test_model(random_image_1)}") + print(f"\nrandom_image_2 Model Passed: {test_model(random_image_2)}") diff --git a/requirements.txt b/requirements.txt index acfbc823e..e159fe010 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,7 @@ statsmodels sympy tensorflow texttable +torch tweepy xgboost yulewalker From 179298e3a291470ef30e850f23d98c2fb9055202 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 8 Apr 2023 02:52:26 +0200 Subject: [PATCH 295/368] Revert "Add LeNet Implementation in PyTorch (#7070)" (#8621) This reverts commit b2b8585e63664a0c7aa18b95528e345c2738c4ae. --- computer_vision/lenet_pytorch.py | 82 -------------------------------- requirements.txt | 1 - 2 files changed, 83 deletions(-) delete mode 100644 computer_vision/lenet_pytorch.py diff --git a/computer_vision/lenet_pytorch.py b/computer_vision/lenet_pytorch.py deleted file mode 100644 index 177a5ebfc..000000000 --- a/computer_vision/lenet_pytorch.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -LeNet Network - -Paper: http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf -""" - -import numpy -import torch -import torch.nn as nn - - -class LeNet(nn.Module): - def __init__(self) -> None: - super().__init__() - - self.tanh = nn.Tanh() - self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2) - - self.conv1 = nn.Conv2d( - in_channels=1, - out_channels=6, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - self.conv2 = nn.Conv2d( - in_channels=6, - out_channels=16, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - self.conv3 = nn.Conv2d( - in_channels=16, - out_channels=120, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - - self.linear1 = nn.Linear(120, 84) - self.linear2 = nn.Linear(84, 10) - - def forward(self, image_array: numpy.ndarray) -> numpy.ndarray: - image_array = self.tanh(self.conv1(image_array)) - image_array = self.avgpool(image_array) - image_array = self.tanh(self.conv2(image_array)) - image_array = self.avgpool(image_array) - image_array = self.tanh(self.conv3(image_array)) - - image_array = image_array.reshape(image_array.shape[0], -1) - image_array = self.tanh(self.linear1(image_array)) - image_array = self.linear2(image_array) - return image_array - - -def test_model(image_tensor: torch.tensor) -> bool: - """ - Test the model on an input batch of 64 images - - Args: - image_tensor (torch.tensor): Batch of Images for the model - - >>> test_model(torch.randn(64, 1, 32, 32)) - True - - """ - try: - model = LeNet() - output = model(image_tensor) - except RuntimeError: - return False - - return output.shape == torch.zeros([64, 10]).shape - - -if __name__ == "__main__": - random_image_1 = torch.randn(64, 1, 32, 32) - random_image_2 = torch.randn(1, 32, 32) - - print(f"random_image_1 Model Passed: {test_model(random_image_1)}") - print(f"\nrandom_image_2 Model Passed: {test_model(random_image_2)}") diff --git a/requirements.txt b/requirements.txt index e159fe010..acfbc823e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,7 +17,6 @@ statsmodels sympy tensorflow texttable -torch tweepy xgboost yulewalker From 5cb0a000c47398c6d8af1ac43e2f83ae018f7182 Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 8 Apr 2023 14:41:08 +0300 Subject: [PATCH 296/368] Queue implementation using two Stacks (#8617) * Queue implementation using two Stacks * fix typo in queue/queue_on_two_stacks.py * add 'iterable' to queue_on_two_stacks initializer * make queue_on_two_stacks.py generic class * fix ruff-UP007 in queue_on_two_stacks.py * enhance readability in queue_on_two_stacks.py * Create queue_by_two_stacks.py --------- Co-authored-by: Christian Clauss --- data_structures/queue/queue_by_two_stacks.py | 115 ++++++++++++++++ data_structures/queue/queue_on_two_stacks.py | 137 +++++++++++++++++++ 2 files changed, 252 insertions(+) create mode 100644 data_structures/queue/queue_by_two_stacks.py create mode 100644 data_structures/queue/queue_on_two_stacks.py diff --git a/data_structures/queue/queue_by_two_stacks.py b/data_structures/queue/queue_by_two_stacks.py new file mode 100644 index 000000000..cd62f155a --- /dev/null +++ b/data_structures/queue/queue_by_two_stacks.py @@ -0,0 +1,115 @@ +"""Queue implementation using two stacks""" + +from collections.abc import Iterable +from typing import Generic, TypeVar + +_T = TypeVar("_T") + + +class QueueByTwoStacks(Generic[_T]): + def __init__(self, iterable: Iterable[_T] | None = None) -> None: + """ + >>> QueueByTwoStacks() + Queue(()) + >>> QueueByTwoStacks([10, 20, 30]) + Queue((10, 20, 30)) + >>> QueueByTwoStacks((i**2 for i in range(1, 4))) + Queue((1, 4, 9)) + """ + self._stack1: list[_T] = list(iterable or []) + self._stack2: list[_T] = [] + + def __len__(self) -> int: + """ + >>> len(QueueByTwoStacks()) + 0 + >>> from string import ascii_lowercase + >>> len(QueueByTwoStacks(ascii_lowercase)) + 26 + >>> queue = QueueByTwoStacks() + >>> for i in range(1, 11): + ... queue.put(i) + ... + >>> len(queue) + 10 + >>> for i in range(2): + ... queue.get() + 1 + 2 + >>> len(queue) + 8 + """ + + return len(self._stack1) + len(self._stack2) + + def __repr__(self) -> str: + """ + >>> queue = QueueByTwoStacks() + >>> queue + Queue(()) + >>> str(queue) + 'Queue(())' + >>> queue.put(10) + >>> queue + Queue((10,)) + >>> queue.put(20) + >>> queue.put(30) + >>> queue + Queue((10, 20, 30)) + """ + return f"Queue({tuple(self._stack2[::-1] + self._stack1)})" + + def put(self, item: _T) -> None: + """ + Put `item` into the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.put(10) + >>> queue.put(20) + >>> len(queue) + 2 + >>> queue + Queue((10, 20)) + """ + + self._stack1.append(item) + + def get(self) -> _T: + """ + Get `item` from the Queue + + >>> queue = QueueByTwoStacks((10, 20, 30)) + >>> queue.get() + 10 + >>> queue.put(40) + >>> queue.get() + 20 + >>> queue.get() + 30 + >>> len(queue) + 1 + >>> queue.get() + 40 + >>> queue.get() + Traceback (most recent call last): + ... + IndexError: Queue is empty + """ + + # To reduce number of attribute look-ups in `while` loop. + stack1_pop = self._stack1.pop + stack2_append = self._stack2.append + + if not self._stack2: + while self._stack1: + stack2_append(stack1_pop()) + + if not self._stack2: + raise IndexError("Queue is empty") + return self._stack2.pop() + + +if __name__ == "__main__": + from doctest import testmod + + testmod() diff --git a/data_structures/queue/queue_on_two_stacks.py b/data_structures/queue/queue_on_two_stacks.py new file mode 100644 index 000000000..61db2b512 --- /dev/null +++ b/data_structures/queue/queue_on_two_stacks.py @@ -0,0 +1,137 @@ +"""Queue implementation using two stacks""" + +from collections.abc import Iterable +from typing import Generic, TypeVar + +_T = TypeVar("_T") + + +class QueueByTwoStacks(Generic[_T]): + def __init__(self, iterable: Iterable[_T] | None = None) -> None: + """ + >>> queue1 = QueueByTwoStacks() + >>> str(queue1) + 'Queue([])' + >>> queue2 = QueueByTwoStacks([10, 20, 30]) + >>> str(queue2) + 'Queue([10, 20, 30])' + >>> queue3 = QueueByTwoStacks((i**2 for i in range(1, 4))) + >>> str(queue3) + 'Queue([1, 4, 9])' + """ + + self._stack1: list[_T] = [] if iterable is None else list(iterable) + self._stack2: list[_T] = [] + + def __len__(self) -> int: + """ + >>> queue = QueueByTwoStacks() + >>> for i in range(1, 11): + ... queue.put(i) + ... + >>> len(queue) == 10 + True + >>> for i in range(2): + ... queue.get() + 1 + 2 + >>> len(queue) == 8 + True + """ + + return len(self._stack1) + len(self._stack2) + + def __repr__(self) -> str: + """ + >>> queue = QueueByTwoStacks() + >>> queue + Queue([]) + >>> str(queue) + 'Queue([])' + >>> queue.put(10) + >>> queue + Queue([10]) + >>> queue.put(20) + >>> queue.put(30) + >>> queue + Queue([10, 20, 30]) + """ + + items = self._stack2[::-1] + self._stack1 + return f"Queue({items})" + + def put(self, item: _T) -> None: + """ + Put `item` into the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.put(10) + >>> queue.put(20) + >>> len(queue) == 2 + True + >>> str(queue) + 'Queue([10, 20])' + """ + + self._stack1.append(item) + + def get(self) -> _T: + """ + Get `item` from the Queue + + >>> queue = QueueByTwoStacks() + >>> for i in (10, 20, 30): + ... queue.put(i) + >>> queue.get() + 10 + >>> queue.put(40) + >>> queue.get() + 20 + >>> queue.get() + 30 + >>> len(queue) == 1 + True + >>> queue.get() + 40 + >>> queue.get() + Traceback (most recent call last): + ... + IndexError: Queue is empty + """ + + # To reduce number of attribute look-ups in `while` loop. + stack1_pop = self._stack1.pop + stack2_append = self._stack2.append + + if not self._stack2: + while self._stack1: + stack2_append(stack1_pop()) + + if not self._stack2: + raise IndexError("Queue is empty") + return self._stack2.pop() + + def size(self) -> int: + """ + Returns the length of the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.size() + 0 + >>> queue.put(10) + >>> queue.put(20) + >>> queue.size() + 2 + >>> queue.get() + 10 + >>> queue.size() == 1 + True + """ + + return len(self) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 2f9b03393c75f3ab14b491becae4ac5caf26de17 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 8 Apr 2023 14:16:19 +0200 Subject: [PATCH 297/368] Delete queue_on_two_stacks.py which duplicates queue_by_two_stacks.py (#8624) * Delete queue_on_two_stacks.py which duplicates queue_by_two_stacks.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + data_structures/queue/queue_on_two_stacks.py | 137 ------------------- 2 files changed, 1 insertion(+), 137 deletions(-) delete mode 100644 data_structures/queue/queue_on_two_stacks.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 3764c471c..e3e0748ec 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -232,6 +232,7 @@ * [Double Ended Queue](data_structures/queue/double_ended_queue.py) * [Linked Queue](data_structures/queue/linked_queue.py) * [Priority Queue Using List](data_structures/queue/priority_queue_using_list.py) + * [Queue By Two Stacks](data_structures/queue/queue_by_two_stacks.py) * [Queue On List](data_structures/queue/queue_on_list.py) * [Queue On Pseudo Stack](data_structures/queue/queue_on_pseudo_stack.py) * Stacks diff --git a/data_structures/queue/queue_on_two_stacks.py b/data_structures/queue/queue_on_two_stacks.py deleted file mode 100644 index 61db2b512..000000000 --- a/data_structures/queue/queue_on_two_stacks.py +++ /dev/null @@ -1,137 +0,0 @@ -"""Queue implementation using two stacks""" - -from collections.abc import Iterable -from typing import Generic, TypeVar - -_T = TypeVar("_T") - - -class QueueByTwoStacks(Generic[_T]): - def __init__(self, iterable: Iterable[_T] | None = None) -> None: - """ - >>> queue1 = QueueByTwoStacks() - >>> str(queue1) - 'Queue([])' - >>> queue2 = QueueByTwoStacks([10, 20, 30]) - >>> str(queue2) - 'Queue([10, 20, 30])' - >>> queue3 = QueueByTwoStacks((i**2 for i in range(1, 4))) - >>> str(queue3) - 'Queue([1, 4, 9])' - """ - - self._stack1: list[_T] = [] if iterable is None else list(iterable) - self._stack2: list[_T] = [] - - def __len__(self) -> int: - """ - >>> queue = QueueByTwoStacks() - >>> for i in range(1, 11): - ... queue.put(i) - ... - >>> len(queue) == 10 - True - >>> for i in range(2): - ... queue.get() - 1 - 2 - >>> len(queue) == 8 - True - """ - - return len(self._stack1) + len(self._stack2) - - def __repr__(self) -> str: - """ - >>> queue = QueueByTwoStacks() - >>> queue - Queue([]) - >>> str(queue) - 'Queue([])' - >>> queue.put(10) - >>> queue - Queue([10]) - >>> queue.put(20) - >>> queue.put(30) - >>> queue - Queue([10, 20, 30]) - """ - - items = self._stack2[::-1] + self._stack1 - return f"Queue({items})" - - def put(self, item: _T) -> None: - """ - Put `item` into the Queue - - >>> queue = QueueByTwoStacks() - >>> queue.put(10) - >>> queue.put(20) - >>> len(queue) == 2 - True - >>> str(queue) - 'Queue([10, 20])' - """ - - self._stack1.append(item) - - def get(self) -> _T: - """ - Get `item` from the Queue - - >>> queue = QueueByTwoStacks() - >>> for i in (10, 20, 30): - ... queue.put(i) - >>> queue.get() - 10 - >>> queue.put(40) - >>> queue.get() - 20 - >>> queue.get() - 30 - >>> len(queue) == 1 - True - >>> queue.get() - 40 - >>> queue.get() - Traceback (most recent call last): - ... - IndexError: Queue is empty - """ - - # To reduce number of attribute look-ups in `while` loop. - stack1_pop = self._stack1.pop - stack2_append = self._stack2.append - - if not self._stack2: - while self._stack1: - stack2_append(stack1_pop()) - - if not self._stack2: - raise IndexError("Queue is empty") - return self._stack2.pop() - - def size(self) -> int: - """ - Returns the length of the Queue - - >>> queue = QueueByTwoStacks() - >>> queue.size() - 0 - >>> queue.put(10) - >>> queue.put(20) - >>> queue.size() - 2 - >>> queue.get() - 10 - >>> queue.size() == 1 - True - """ - - return len(self) - - -if __name__ == "__main__": - from doctest import testmod - - testmod() From 14bdd174bba7828ac2bf476f3697aa13fa179492 Mon Sep 17 00:00:00 2001 From: isidroas Date: Sat, 8 Apr 2023 19:39:24 +0200 Subject: [PATCH 298/368] Bloom Filter (#8615) * Bloom filter with tests * has functions constant * fix type * isort * passing ruff * type hints * type hints * from fail to erro * captital leter * type hints requested by boot * descriptive name for m * more descriptibe arguments II * moved movies_test to doctest * commented doctest * removed test_probability * estimated error * added types * again hash_ * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * from b to bloom * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * syntax error in dict comprehension * from goodfather to godfather * removed Interestellar * forgot the last Godfather * Revert "removed Interestellar" This reverts commit 35fa5f5c4bf101d073aad43c37b0a423d8975071. * pretty dict * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bloom_filter.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/hashing/bloom_filter.py | 105 ++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 data_structures/hashing/bloom_filter.py diff --git a/data_structures/hashing/bloom_filter.py b/data_structures/hashing/bloom_filter.py new file mode 100644 index 000000000..7fd0985bd --- /dev/null +++ b/data_structures/hashing/bloom_filter.py @@ -0,0 +1,105 @@ +""" +See https://en.wikipedia.org/wiki/Bloom_filter + +The use of this data structure is to test membership in a set. +Compared to Python's built-in set() it is more space-efficient. +In the following example, only 8 bits of memory will be used: +>>> bloom = Bloom(size=8) + +Initially, the filter contains all zeros: +>>> bloom.bitstring +'00000000' + +When an element is added, two bits are set to 1 +since there are 2 hash functions in this implementation: +>>> "Titanic" in bloom +False +>>> bloom.add("Titanic") +>>> bloom.bitstring +'01100000' +>>> "Titanic" in bloom +True + +However, sometimes only one bit is added +because both hash functions return the same value +>>> bloom.add("Avatar") +>>> "Avatar" in bloom +True +>>> bloom.format_hash("Avatar") +'00000100' +>>> bloom.bitstring +'01100100' + +Not added elements should return False ... +>>> not_present_films = ("The Godfather", "Interstellar", "Parasite", "Pulp Fiction") +>>> { +... film: bloom.format_hash(film) for film in not_present_films +... } # doctest: +NORMALIZE_WHITESPACE +{'The Godfather': '00000101', + 'Interstellar': '00000011', + 'Parasite': '00010010', + 'Pulp Fiction': '10000100'} +>>> any(film in bloom for film in not_present_films) +False + +but sometimes there are false positives: +>>> "Ratatouille" in bloom +True +>>> bloom.format_hash("Ratatouille") +'01100000' + +The probability increases with the number of elements added. +The probability decreases with the number of bits in the bitarray. +>>> bloom.estimated_error_rate +0.140625 +>>> bloom.add("The Godfather") +>>> bloom.estimated_error_rate +0.25 +>>> bloom.bitstring +'01100101' +""" +from hashlib import md5, sha256 + +HASH_FUNCTIONS = (sha256, md5) + + +class Bloom: + def __init__(self, size: int = 8) -> None: + self.bitarray = 0b0 + self.size = size + + def add(self, value: str) -> None: + h = self.hash_(value) + self.bitarray |= h + + def exists(self, value: str) -> bool: + h = self.hash_(value) + return (h & self.bitarray) == h + + def __contains__(self, other: str) -> bool: + return self.exists(other) + + def format_bin(self, bitarray: int) -> str: + res = bin(bitarray)[2:] + return res.zfill(self.size) + + @property + def bitstring(self) -> str: + return self.format_bin(self.bitarray) + + def hash_(self, value: str) -> int: + res = 0b0 + for func in HASH_FUNCTIONS: + position = ( + int.from_bytes(func(value.encode()).digest(), "little") % self.size + ) + res |= 2**position + return res + + def format_hash(self, value: str) -> str: + return self.format_bin(self.hash_(value)) + + @property + def estimated_error_rate(self) -> float: + n_ones = bin(self.bitarray).count("1") + return (n_ones / self.size) ** len(HASH_FUNCTIONS) From d182f95646aa7c515afe0912a34e8c2a11a34ca3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 23:43:17 +0200 Subject: [PATCH 299/368] [pre-commit.ci] pre-commit autoupdate (#8634) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.260 → v0.0.261](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.260...v0.0.261) - [github.com/pre-commit/mirrors-mypy: v1.1.1 → v1.2.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.1.1...v1.2.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d54ce5add..55345a574 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.260 + rev: v0.0.261 hooks: - id: ruff @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.1.1 + rev: v1.2.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index e3e0748ec..36f5a752c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -195,6 +195,7 @@ * [Alternate Disjoint Set](data_structures/disjoint_set/alternate_disjoint_set.py) * [Disjoint Set](data_structures/disjoint_set/disjoint_set.py) * Hashing + * [Bloom Filter](data_structures/hashing/bloom_filter.py) * [Double Hash](data_structures/hashing/double_hash.py) * [Hash Map](data_structures/hashing/hash_map.py) * [Hash Table](data_structures/hashing/hash_table.py) From 54dedf844a30d39bd42c66ebf9cd67ec186f47bb Mon Sep 17 00:00:00 2001 From: Diego Gasco <62801631+Diegomangasco@users.noreply.github.com> Date: Mon, 17 Apr 2023 00:34:22 +0200 Subject: [PATCH 300/368] Dimensionality reduction (#8590) --- machine_learning/dimensionality_reduction.py | 198 +++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 machine_learning/dimensionality_reduction.py diff --git a/machine_learning/dimensionality_reduction.py b/machine_learning/dimensionality_reduction.py new file mode 100644 index 000000000..d2046f81a --- /dev/null +++ b/machine_learning/dimensionality_reduction.py @@ -0,0 +1,198 @@ +# Copyright (c) 2023 Diego Gasco (diego.gasco99@gmail.com), Diegomangasco on GitHub + +""" +Requirements: + - numpy version 1.21 + - scipy version 1.3.3 +Notes: + - Each column of the features matrix corresponds to a class item +""" + +import logging + +import numpy as np +import pytest +from scipy.linalg import eigh + +logging.basicConfig(level=logging.INFO, format="%(message)s") + + +def column_reshape(input_array: np.ndarray) -> np.ndarray: + """Function to reshape a row Numpy array into a column Numpy array + >>> input_array = np.array([1, 2, 3]) + >>> column_reshape(input_array) + array([[1], + [2], + [3]]) + """ + + return input_array.reshape((input_array.size, 1)) + + +def covariance_within_classes( + features: np.ndarray, labels: np.ndarray, classes: int +) -> np.ndarray: + """Function to compute the covariance matrix inside each class. + >>> features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> labels = np.array([0, 1, 0]) + >>> covariance_within_classes(features, labels, 2) + array([[0.66666667, 0.66666667, 0.66666667], + [0.66666667, 0.66666667, 0.66666667], + [0.66666667, 0.66666667, 0.66666667]]) + """ + + covariance_sum = np.nan + for i in range(classes): + data = features[:, labels == i] + data_mean = data.mean(1) + # Centralize the data of class i + centered_data = data - column_reshape(data_mean) + if i > 0: + # If covariance_sum is not None + covariance_sum += np.dot(centered_data, centered_data.T) + else: + # If covariance_sum is np.nan (i.e. first loop) + covariance_sum = np.dot(centered_data, centered_data.T) + + return covariance_sum / features.shape[1] + + +def covariance_between_classes( + features: np.ndarray, labels: np.ndarray, classes: int +) -> np.ndarray: + """Function to compute the covariance matrix between multiple classes + >>> features = np.array([[9, 2, 3], [4, 3, 6], [1, 8, 9]]) + >>> labels = np.array([0, 1, 0]) + >>> covariance_between_classes(features, labels, 2) + array([[ 3.55555556, 1.77777778, -2.66666667], + [ 1.77777778, 0.88888889, -1.33333333], + [-2.66666667, -1.33333333, 2. ]]) + """ + + general_data_mean = features.mean(1) + covariance_sum = np.nan + for i in range(classes): + data = features[:, labels == i] + device_data = data.shape[1] + data_mean = data.mean(1) + if i > 0: + # If covariance_sum is not None + covariance_sum += device_data * np.dot( + column_reshape(data_mean) - column_reshape(general_data_mean), + (column_reshape(data_mean) - column_reshape(general_data_mean)).T, + ) + else: + # If covariance_sum is np.nan (i.e. first loop) + covariance_sum = device_data * np.dot( + column_reshape(data_mean) - column_reshape(general_data_mean), + (column_reshape(data_mean) - column_reshape(general_data_mean)).T, + ) + + return covariance_sum / features.shape[1] + + +def principal_component_analysis(features: np.ndarray, dimensions: int) -> np.ndarray: + """ + Principal Component Analysis. + + For more details, see: https://en.wikipedia.org/wiki/Principal_component_analysis. + Parameters: + * features: the features extracted from the dataset + * dimensions: to filter the projected data for the desired dimension + + >>> test_principal_component_analysis() + """ + + # Check if the features have been loaded + if features.any(): + data_mean = features.mean(1) + # Center the dataset + centered_data = features - np.reshape(data_mean, (data_mean.size, 1)) + covariance_matrix = np.dot(centered_data, centered_data.T) / features.shape[1] + _, eigenvectors = np.linalg.eigh(covariance_matrix) + # Take all the columns in the reverse order (-1), and then takes only the first + filtered_eigenvectors = eigenvectors[:, ::-1][:, 0:dimensions] + # Project the database on the new space + projected_data = np.dot(filtered_eigenvectors.T, features) + logging.info("Principal Component Analysis computed") + + return projected_data + else: + logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True) + logging.error("Dataset empty") + raise AssertionError + + +def linear_discriminant_analysis( + features: np.ndarray, labels: np.ndarray, classes: int, dimensions: int +) -> np.ndarray: + """ + Linear Discriminant Analysis. + + For more details, see: https://en.wikipedia.org/wiki/Linear_discriminant_analysis. + Parameters: + * features: the features extracted from the dataset + * labels: the class labels of the features + * classes: the number of classes present in the dataset + * dimensions: to filter the projected data for the desired dimension + + >>> test_linear_discriminant_analysis() + """ + + # Check if the dimension desired is less than the number of classes + assert classes > dimensions + + # Check if features have been already loaded + if features.any: + _, eigenvectors = eigh( + covariance_between_classes(features, labels, classes), + covariance_within_classes(features, labels, classes), + ) + filtered_eigenvectors = eigenvectors[:, ::-1][:, :dimensions] + svd_matrix, _, _ = np.linalg.svd(filtered_eigenvectors) + filtered_svd_matrix = svd_matrix[:, 0:dimensions] + projected_data = np.dot(filtered_svd_matrix.T, features) + logging.info("Linear Discriminant Analysis computed") + + return projected_data + else: + logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True) + logging.error("Dataset empty") + raise AssertionError + + +def test_linear_discriminant_analysis() -> None: + # Create dummy dataset with 2 classes and 3 features + features = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]]) + labels = np.array([0, 0, 0, 1, 1]) + classes = 2 + dimensions = 2 + + # Assert that the function raises an AssertionError if dimensions > classes + with pytest.raises(AssertionError) as error_info: + projected_data = linear_discriminant_analysis( + features, labels, classes, dimensions + ) + if isinstance(projected_data, np.ndarray): + raise AssertionError( + "Did not raise AssertionError for dimensions > classes" + ) + assert error_info.type is AssertionError + + +def test_principal_component_analysis() -> None: + features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + dimensions = 2 + expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]]) + + with pytest.raises(AssertionError) as error_info: + output = principal_component_analysis(features, dimensions) + if not np.allclose(expected_output, output): + raise AssertionError + assert error_info.type is AssertionError + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2b051a2de4adf711857f5453286dff47d1d87636 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 03:47:48 +0530 Subject: [PATCH 301/368] Create real_and_reactive_power.py (#8665) --- electronics/real_and_reactive_power.py | 49 ++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 electronics/real_and_reactive_power.py diff --git a/electronics/real_and_reactive_power.py b/electronics/real_and_reactive_power.py new file mode 100644 index 000000000..81dcba800 --- /dev/null +++ b/electronics/real_and_reactive_power.py @@ -0,0 +1,49 @@ +import math + + +def real_power(apparent_power: float, power_factor: float) -> float: + """ + Calculate real power from apparent power and power factor. + + Examples: + >>> real_power(100, 0.9) + 90.0 + >>> real_power(0, 0.8) + 0.0 + >>> real_power(100, -0.9) + -90.0 + """ + if ( + not isinstance(power_factor, (int, float)) + or power_factor < -1 + or power_factor > 1 + ): + raise ValueError("power_factor must be a valid float value between -1 and 1.") + return apparent_power * power_factor + + +def reactive_power(apparent_power: float, power_factor: float) -> float: + """ + Calculate reactive power from apparent power and power factor. + + Examples: + >>> reactive_power(100, 0.9) + 43.58898943540673 + >>> reactive_power(0, 0.8) + 0.0 + >>> reactive_power(100, -0.9) + 43.58898943540673 + """ + if ( + not isinstance(power_factor, (int, float)) + or power_factor < -1 + or power_factor > 1 + ): + raise ValueError("power_factor must be a valid float value between -1 and 1.") + return apparent_power * math.sqrt(1 - power_factor**2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b5047cfa114c6343b92370419772b9cf0f13e634 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:00:01 +0530 Subject: [PATCH 302/368] Create apparent_power.py (#8664) * Create apparent_power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update apparent_power.py * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/apparent_power.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 electronics/apparent_power.py diff --git a/electronics/apparent_power.py b/electronics/apparent_power.py new file mode 100644 index 000000000..a6f1a5082 --- /dev/null +++ b/electronics/apparent_power.py @@ -0,0 +1,35 @@ +import cmath +import math + + +def apparent_power( + voltage: float, current: float, voltage_angle: float, current_angle: float +) -> complex: + """ + Calculate the apparent power in a single-phase AC circuit. + + >>> apparent_power(100, 5, 0, 0) + (500+0j) + >>> apparent_power(100, 5, 90, 0) + (3.061616997868383e-14+500j) + >>> apparent_power(100, 5, -45, -60) + (-129.40952255126027-482.9629131445341j) + >>> apparent_power(200, 10, -30, -90) + (-999.9999999999998-1732.0508075688776j) + """ + # Convert angles from degrees to radians + voltage_angle_rad = math.radians(voltage_angle) + current_angle_rad = math.radians(current_angle) + + # Convert voltage and current to rectangular form + voltage_rect = cmath.rect(voltage, voltage_angle_rad) + current_rect = cmath.rect(current, current_angle_rad) + + # Calculate apparent power + return voltage_rect * current_rect + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 93ce8cb75da2740089df8db23fa493ce104a011b Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:14:06 +0530 Subject: [PATCH 303/368] added reference link. (#8667) * added reference link. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- electronics/apparent_power.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/electronics/apparent_power.py b/electronics/apparent_power.py index a6f1a5082..0ce1c2aa9 100644 --- a/electronics/apparent_power.py +++ b/electronics/apparent_power.py @@ -8,6 +8,8 @@ def apparent_power( """ Calculate the apparent power in a single-phase AC circuit. + Reference: https://en.wikipedia.org/wiki/AC_power#Apparent_power + >>> apparent_power(100, 5, 0, 0) (500+0j) >>> apparent_power(100, 5, 90, 0) From 458debc237d41752c6c4223264a4bb23efb2ecec Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:32:20 +0530 Subject: [PATCH 304/368] added a problem with solution on sliding window. (#8566) * added a problem with solution on sliding window. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added hint for return type and parameter * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * removed un-necessary docs and added 2 test cases * Rename sliding_window/minimum_size_subarray_sum.py to dynamic_programming/minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../minimum_size_subarray_sum.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 dynamic_programming/minimum_size_subarray_sum.py diff --git a/dynamic_programming/minimum_size_subarray_sum.py b/dynamic_programming/minimum_size_subarray_sum.py new file mode 100644 index 000000000..3868d7353 --- /dev/null +++ b/dynamic_programming/minimum_size_subarray_sum.py @@ -0,0 +1,62 @@ +import sys + + +def minimum_subarray_sum(target: int, numbers: list[int]) -> int: + """ + Return the length of the shortest contiguous subarray in a list of numbers whose sum + is at least target. Reference: https://stackoverflow.com/questions/8269916 + + >>> minimum_subarray_sum(7, [2, 3, 1, 2, 4, 3]) + 2 + >>> minimum_subarray_sum(7, [2, 3, -1, 2, 4, -3]) + 4 + >>> minimum_subarray_sum(11, [1, 1, 1, 1, 1, 1, 1, 1]) + 0 + >>> minimum_subarray_sum(10, [1, 2, 3, 4, 5, 6, 7]) + 2 + >>> minimum_subarray_sum(5, [1, 1, 1, 1, 1, 5]) + 1 + >>> minimum_subarray_sum(0, []) + 0 + >>> minimum_subarray_sum(0, [1, 2, 3]) + 1 + >>> minimum_subarray_sum(10, [10, 20, 30]) + 1 + >>> minimum_subarray_sum(7, [1, 1, 1, 1, 1, 1, 10]) + 1 + >>> minimum_subarray_sum(6, []) + 0 + >>> minimum_subarray_sum(2, [1, 2, 3]) + 1 + >>> minimum_subarray_sum(-6, []) + 0 + >>> minimum_subarray_sum(-6, [3, 4, 5]) + 1 + >>> minimum_subarray_sum(8, None) + 0 + >>> minimum_subarray_sum(2, "ABC") + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + """ + if not numbers: + return 0 + if target == 0 and target in numbers: + return 0 + if not isinstance(numbers, (list, tuple)) or not all( + isinstance(number, int) for number in numbers + ): + raise ValueError("numbers must be an iterable of integers") + + left = right = curr_sum = 0 + min_len = sys.maxsize + + while right < len(numbers): + curr_sum += numbers[right] + while curr_sum >= target and left <= right: + min_len = min(min_len, right - left + 1) + curr_sum -= numbers[left] + left += 1 + right += 1 + + return 0 if min_len == sys.maxsize else min_len From 11582943a555ae3b6a22938df6d3645b0327562e Mon Sep 17 00:00:00 2001 From: JulianStiebler <68881884+JulianStiebler@users.noreply.github.com> Date: Tue, 18 Apr 2023 11:57:48 +0200 Subject: [PATCH 305/368] Create maths/pi_generator.py (#8666) * Create pi_generator.py * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated commentary on line 28, added math.pi comparison & math.isclose() test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed # noqa: E501 * printf() added as recommended by cclaus --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/pi_generator.py | 94 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 maths/pi_generator.py diff --git a/maths/pi_generator.py b/maths/pi_generator.py new file mode 100644 index 000000000..dcd218aae --- /dev/null +++ b/maths/pi_generator.py @@ -0,0 +1,94 @@ +def calculate_pi(limit: int) -> str: + """ + https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80 + Leibniz Formula for Pi + + The Leibniz formula is the special case arctan 1 = 1/4 Pi . + Leibniz's formula converges extremely slowly: it exhibits sublinear convergence. + + Convergence (https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Convergence) + + We cannot try to prove against an interrupted, uncompleted generation. + https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Unusual_behaviour + The errors can in fact be predicted; + but those calculations also approach infinity for accuracy. + + Our output will always be a string since we can defintely store all digits in there. + For simplicity' sake, let's just compare against known values and since our outpit + is a string, we need to convert to float. + + >>> import math + >>> float(calculate_pi(15)) == math.pi + True + + Since we cannot predict errors or interrupt any infinite alternating + series generation since they approach infinity, + or interrupt any alternating series, we are going to need math.isclose() + + >>> math.isclose(float(calculate_pi(50)), math.pi) + True + + >>> math.isclose(float(calculate_pi(100)), math.pi) + True + + Since math.pi-constant contains only 16 digits, here some test with preknown values: + + >>> calculate_pi(50) + '3.14159265358979323846264338327950288419716939937510' + >>> calculate_pi(80) + '3.14159265358979323846264338327950288419716939937510582097494459230781640628620899' + + To apply the Leibniz formula for calculating pi, + the variables q, r, t, k, n, and l are used for the iteration process. + """ + q = 1 + r = 0 + t = 1 + k = 1 + n = 3 + l = 3 + decimal = limit + counter = 0 + + result = "" + + """ + We will avoid using yield since we otherwise get a Generator-Object, + which we can't just compare against anything. We would have to make a list out of it + after the generation, so we will just stick to plain return logic: + """ + while counter != decimal + 1: + if 4 * q + r - t < n * t: + result += str(n) + if counter == 0: + result += "." + + if decimal == counter: + break + + counter += 1 + nr = 10 * (r - n * t) + n = ((10 * (3 * q + r)) // t) - 10 * n + q *= 10 + r = nr + else: + nr = (2 * q + r) * l + nn = (q * (7 * k) + 2 + (r * l)) // (t * l) + q *= k + t *= l + l += 2 + k += 1 + n = nn + r = nr + return result + + +def main() -> None: + print(f"{calculate_pi(50) = }") + import doctest + + doctest.testmod() + + +if __name__ == "__main__": + main() From bf30b18192dd7ff9a43523ee6efe5c015ae6b99c Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Mon, 24 Apr 2023 10:58:30 +0530 Subject: [PATCH 306/368] Update linear_discriminant_analysis.py and rsa_cipher.py (#8680) * Update rsa_cipher.py by replacing %s with {} * Update rsa_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update machine_learning/linear_discriminant_analysis.py Co-authored-by: Christian Clauss * Update linear_discriminant_analysis.py * updated --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/rsa_cipher.py | 14 ++++++++------ machine_learning/linear_discriminant_analysis.py | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index de26992f5..9c41cdc5d 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -76,10 +76,11 @@ def encrypt_and_write_to_file( key_size, n, e = read_key_file(key_filename) if key_size < block_size * 8: sys.exit( - "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher " + "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " "requires the block size to be equal to or greater than the key size. " - "Either decrease the block size or use different keys." - % (block_size * 8, key_size) + "Either decrease the block size or use different keys.".format( + block_size * 8, key_size + ) ) encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)] @@ -101,10 +102,11 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str: if key_size < block_size * 8: sys.exit( - "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher " + "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " "requires the block size to be equal to or greater than the key size. " - "Did you specify the correct key file and encrypted file?" - % (block_size * 8, key_size) + "Did you specify the correct key file and encrypted file?".format( + block_size * 8, key_size + ) ) encrypted_blocks = [] diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index f4fb5ba76..c0a477be1 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -399,7 +399,7 @@ def main(): if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q": print("\n" + "GoodBye!".center(100, "-") + "\n") break - system("cls" if name == "nt" else "clear") + system("clear" if name == "posix" else "cls") # noqa: S605 if __name__ == "__main__": From a650426350dc7833ff1110bc2e434763caed631e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Apr 2023 06:05:45 +0200 Subject: [PATCH 307/368] [pre-commit.ci] pre-commit autoupdate (#8691) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.261 → v0.0.262](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.261...v0.0.262) - [github.com/tox-dev/pyproject-fmt: 0.9.2 → 0.10.0](https://github.com/tox-dev/pyproject-fmt/compare/0.9.2...0.10.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 55345a574..288473ca3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.261 + rev: v0.0.262 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.9.2" + rev: "0.10.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 36f5a752c..8e67c85c6 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -327,6 +327,7 @@ * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) * [Minimum Partition](dynamic_programming/minimum_partition.py) + * [Minimum Size Subarray Sum](dynamic_programming/minimum_size_subarray_sum.py) * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py) @@ -339,6 +340,7 @@ * [Word Break](dynamic_programming/word_break.py) ## Electronics + * [Apparent Power](electronics/apparent_power.py) * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Circular Convolution](electronics/circular_convolution.py) @@ -348,6 +350,7 @@ * [Electrical Impedance](electronics/electrical_impedance.py) * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) + * [Real And Reactive Power](electronics/real_and_reactive_power.py) * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) @@ -483,6 +486,7 @@ * [Astar](machine_learning/astar.py) * [Data Transformations](machine_learning/data_transformations.py) * [Decision Tree](machine_learning/decision_tree.py) + * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) * [Gradient Descent](machine_learning/gradient_descent.py) @@ -604,6 +608,7 @@ * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) * [Persistence](maths/persistence.py) + * [Pi Generator](maths/pi_generator.py) * [Pi Monte Carlo Estimation](maths/pi_monte_carlo_estimation.py) * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) * [Pollard Rho](maths/pollard_rho.py) From c1b3ea5355266bb47daba378ca10200c4d359453 Mon Sep 17 00:00:00 2001 From: Dipankar Mitra <50228537+Mitra-babu@users.noreply.github.com> Date: Tue, 25 Apr 2023 21:36:14 +0530 Subject: [PATCH 308/368] The tanh activation function is added (#8689) * tanh function been added * tanh function been added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function is added * tanh function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function added * tanh function added * tanh function is added * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/tanh.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 maths/tanh.py diff --git a/maths/tanh.py b/maths/tanh.py new file mode 100644 index 000000000..ddab3e1ab --- /dev/null +++ b/maths/tanh.py @@ -0,0 +1,42 @@ +""" +This script demonstrates the implementation of the tangent hyperbolic +or tanh function. + +The function takes a vector of K real numbers as input and +then (e^x - e^(-x))/(e^x + e^(-x)). After through tanh, the +element of the vector mostly -1 between 1. + +Script inspired from its corresponding Wikipedia article +https://en.wikipedia.org/wiki/Activation_function +""" +import numpy as np + + +def tangent_hyperbolic(vector: np.array) -> np.array: + """ + Implements the tanh function + + Parameters: + vector: np.array + + Returns: + tanh (np.array): The input numpy array after applying tanh. + + mathematically (e^x - e^(-x))/(e^x + e^(-x)) can be written as (2/(1+e^(-2x))-1 + + Examples: + >>> tangent_hyperbolic(np.array([1,5,6,-0.67])) + array([ 0.76159416, 0.9999092 , 0.99998771, -0.58497988]) + + >>> tangent_hyperbolic(np.array([8,10,2,-0.98,13])) + array([ 0.99999977, 1. , 0.96402758, -0.7530659 , 1. ]) + + """ + + return (2 / (1 + np.exp(-2 * vector))) - 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4c1f876567673db0934ba65d662ea221465ec921 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 27 Apr 2023 19:32:07 +0200 Subject: [PATCH 309/368] Solving the `Top k most frequent words` problem using a max-heap (#8685) * Solving the `Top k most frequent words` problem using a max-heap * Mentioning Python standard library solution in `Top k most frequent words` docstring * ruff --fix . * updating DIRECTORY.md --------- Co-authored-by: Amos Paribocci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + data_structures/heap/heap.py | 31 ++++-- .../linear_discriminant_analysis.py | 2 +- strings/top_k_frequent_words.py | 101 ++++++++++++++++++ 4 files changed, 128 insertions(+), 7 deletions(-) create mode 100644 strings/top_k_frequent_words.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 8e67c85c6..681d252b2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1167,6 +1167,7 @@ * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) * [Text Justification](strings/text_justification.py) + * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index b14c55d9d..c1004f349 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -1,9 +1,28 @@ from __future__ import annotations +from abc import abstractmethod from collections.abc import Iterable +from typing import Generic, Protocol, TypeVar -class Heap: +class Comparable(Protocol): + @abstractmethod + def __lt__(self: T, other: T) -> bool: + pass + + @abstractmethod + def __gt__(self: T, other: T) -> bool: + pass + + @abstractmethod + def __eq__(self: T, other: object) -> bool: + pass + + +T = TypeVar("T", bound=Comparable) + + +class Heap(Generic[T]): """A Max Heap Implementation >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5] @@ -27,7 +46,7 @@ class Heap: """ def __init__(self) -> None: - self.h: list[float] = [] + self.h: list[T] = [] self.heap_size: int = 0 def __repr__(self) -> str: @@ -79,7 +98,7 @@ class Heap: # fix the subsequent violation recursively if any self.max_heapify(violation) - def build_max_heap(self, collection: Iterable[float]) -> None: + def build_max_heap(self, collection: Iterable[T]) -> None: """build max heap from an unsorted array""" self.h = list(collection) self.heap_size = len(self.h) @@ -88,7 +107,7 @@ class Heap: for i in range(self.heap_size // 2 - 1, -1, -1): self.max_heapify(i) - def extract_max(self) -> float: + def extract_max(self) -> T: """get and remove max from heap""" if self.heap_size >= 2: me = self.h[0] @@ -102,7 +121,7 @@ class Heap: else: raise Exception("Empty heap") - def insert(self, value: float) -> None: + def insert(self, value: T) -> None: """insert a new value into the max heap""" self.h.append(value) idx = (self.heap_size - 1) // 2 @@ -144,7 +163,7 @@ if __name__ == "__main__": ]: print(f"unsorted array: {unsorted}") - heap = Heap() + heap: Heap[int] = Heap() heap.build_max_heap(unsorted) print(f"after build heap: {heap}") diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index c0a477be1..88c047157 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -399,7 +399,7 @@ def main(): if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q": print("\n" + "GoodBye!".center(100, "-") + "\n") break - system("clear" if name == "posix" else "cls") # noqa: S605 + system("cls" if name == "nt" else "clear") # noqa: S605 if __name__ == "__main__": diff --git a/strings/top_k_frequent_words.py b/strings/top_k_frequent_words.py new file mode 100644 index 000000000..f3d1e0cd5 --- /dev/null +++ b/strings/top_k_frequent_words.py @@ -0,0 +1,101 @@ +""" +Finds the top K most frequent words from the provided word list. + +This implementation aims to show how to solve the problem using the Heap class +already present in this repository. +Computing order statistics is, in fact, a typical usage of heaps. + +This is mostly shown for educational purposes, since the problem can be solved +in a few lines using collections.Counter from the Python standard library: + +from collections import Counter +def top_k_frequent_words(words, k_value): + return [x[0] for x in Counter(words).most_common(k_value)] +""" + + +from collections import Counter +from functools import total_ordering + +from data_structures.heap.heap import Heap + + +@total_ordering +class WordCount: + def __init__(self, word: str, count: int) -> None: + self.word = word + self.count = count + + def __eq__(self, other: object) -> bool: + """ + >>> WordCount('a', 1).__eq__(WordCount('b', 1)) + True + >>> WordCount('a', 1).__eq__(WordCount('a', 1)) + True + >>> WordCount('a', 1).__eq__(WordCount('a', 2)) + False + >>> WordCount('a', 1).__eq__(WordCount('b', 2)) + False + >>> WordCount('a', 1).__eq__(1) + NotImplemented + """ + if not isinstance(other, WordCount): + return NotImplemented + return self.count == other.count + + def __lt__(self, other: object) -> bool: + """ + >>> WordCount('a', 1).__lt__(WordCount('b', 1)) + False + >>> WordCount('a', 1).__lt__(WordCount('a', 1)) + False + >>> WordCount('a', 1).__lt__(WordCount('a', 2)) + True + >>> WordCount('a', 1).__lt__(WordCount('b', 2)) + True + >>> WordCount('a', 2).__lt__(WordCount('a', 1)) + False + >>> WordCount('a', 2).__lt__(WordCount('b', 1)) + False + >>> WordCount('a', 1).__lt__(1) + NotImplemented + """ + if not isinstance(other, WordCount): + return NotImplemented + return self.count < other.count + + +def top_k_frequent_words(words: list[str], k_value: int) -> list[str]: + """ + Returns the `k_value` most frequently occurring words, + in non-increasing order of occurrence. + In this context, a word is defined as an element in the provided list. + + In case `k_value` is greater than the number of distinct words, a value of k equal + to the number of distinct words will be considered, instead. + + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 3) + ['c', 'a', 'b'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 2) + ['c', 'a'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 1) + ['c'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 0) + [] + >>> top_k_frequent_words([], 1) + [] + >>> top_k_frequent_words(['a', 'a'], 2) + ['a'] + """ + heap: Heap[WordCount] = Heap() + count_by_word = Counter(words) + heap.build_max_heap( + [WordCount(word, count) for word, count in count_by_word.items()] + ) + return [heap.extract_max().word for _ in range(min(k_value, len(count_by_word)))] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c4dcc44dd44f7e3e7c65debc8e173080fc693150 Mon Sep 17 00:00:00 2001 From: Sahil Goel <55365655+sahilg13@users.noreply.github.com> Date: Sun, 30 Apr 2023 13:33:22 -0400 Subject: [PATCH 310/368] Added an algorithm to calculate the present value of cash flows (#8700) * Added an algorithm to calculate the present value of cash flows * added doctest and reference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolving deprecation issues with typing module * Fixing argument type checks and adding doctest case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixing failing doctest case by requiring less precision due to floating point inprecision * Updating return type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test cases for more coverage * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Make improvements based on Rohan's suggestions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update financial/present_value.py Committed first suggestion Co-authored-by: Christian Clauss * Update financial/present_value.py Committed second suggestion Co-authored-by: Christian Clauss * Update financial/present_value.py Committed third suggestion Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- financial/present_value.py | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 financial/present_value.py diff --git a/financial/present_value.py b/financial/present_value.py new file mode 100644 index 000000000..dc8191a6e --- /dev/null +++ b/financial/present_value.py @@ -0,0 +1,41 @@ +""" +Reference: https://www.investopedia.com/terms/p/presentvalue.asp + +An algorithm that calculates the present value of a stream of yearly cash flows given... +1. The discount rate (as a decimal, not a percent) +2. An array of cash flows, with the index of the cash flow being the associated year + +Note: This algorithm assumes that cash flows are paid at the end of the specified year + + +def present_value(discount_rate: float, cash_flows: list[float]) -> float: + """ + >>> present_value(0.13, [10, 20.70, -293, 297]) + 4.69 + >>> present_value(0.07, [-109129.39, 30923.23, 15098.93, 29734,39]) + -42739.63 + >>> present_value(0.07, [109129.39, 30923.23, 15098.93, 29734,39]) + 175519.15 + >>> present_value(-1, [109129.39, 30923.23, 15098.93, 29734,39]) + Traceback (most recent call last): + ... + ValueError: Discount rate cannot be negative + >>> present_value(0.03, []) + Traceback (most recent call last): + ... + ValueError: Cash flows list cannot be empty + """ + if discount_rate < 0: + raise ValueError("Discount rate cannot be negative") + if not cash_flows: + raise ValueError("Cash flows list cannot be empty") + present_value = sum( + cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(cash_flows) + ) + return round(present_value, ndigits=2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f6df26bf0f5c05d53b6fd24552de9e3eec2334aa Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 1 May 2023 02:59:42 +0200 Subject: [PATCH 311/368] Fix docstring in present_value.py (#8702) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 ++ financial/present_value.py | 1 + 2 files changed, 3 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 681d252b2..167d062b4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -363,6 +363,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) * [Interest](financial/interest.py) + * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) ## Fractals @@ -655,6 +656,7 @@ * [Sum Of Harmonic Series](maths/sum_of_harmonic_series.py) * [Sumset](maths/sumset.py) * [Sylvester Sequence](maths/sylvester_sequence.py) + * [Tanh](maths/tanh.py) * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) * [Triplet Sum](maths/triplet_sum.py) diff --git a/financial/present_value.py b/financial/present_value.py index dc8191a6e..f74612b92 100644 --- a/financial/present_value.py +++ b/financial/present_value.py @@ -6,6 +6,7 @@ An algorithm that calculates the present value of a stream of yearly cash flows 2. An array of cash flows, with the index of the cash flow being the associated year Note: This algorithm assumes that cash flows are paid at the end of the specified year +""" def present_value(discount_rate: float, cash_flows: list[float]) -> float: From e966c5cc0f856afab11a8bb150ef3b48f0c63112 Mon Sep 17 00:00:00 2001 From: Himanshu Tomar Date: Mon, 1 May 2023 15:53:03 +0530 Subject: [PATCH 312/368] Added minimum waiting time problem solution using greedy algorithm (#8701) * Added minimum waiting time problem solution using greedy algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff --fix * Add type hints * Added two more doc test * Removed unnecessary comments * updated type hints * Updated the code as per the code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + greedy_methods/minimum_waiting_time.py | 48 ++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 greedy_methods/minimum_waiting_time.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 167d062b4..021669d13 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -450,6 +450,7 @@ * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) + * [Minimum Waiting Time ](greedy_methods/minimum_waiting_time.py) ## Hashes * [Adler32](hashes/adler32.py) diff --git a/greedy_methods/minimum_waiting_time.py b/greedy_methods/minimum_waiting_time.py new file mode 100644 index 000000000..aaae8cf8f --- /dev/null +++ b/greedy_methods/minimum_waiting_time.py @@ -0,0 +1,48 @@ +""" +Calculate the minimum waiting time using a greedy algorithm. +reference: https://www.youtube.com/watch?v=Sf3eiO12eJs + +For doctests run following command: +python -m doctest -v minimum_waiting_time.py + +The minimum_waiting_time function uses a greedy algorithm to calculate the minimum +time for queries to complete. It sorts the list in non-decreasing order, calculates +the waiting time for each query by multiplying its position in the list with the +sum of all remaining query times, and returns the total waiting time. A doctest +ensures that the function produces the correct output. +""" + + +def minimum_waiting_time(queries: list[int]) -> int: + """ + This function takes a list of query times and returns the minimum waiting time + for all queries to be completed. + + Args: + queries: A list of queries measured in picoseconds + + Returns: + total_waiting_time: Minimum waiting time measured in picoseconds + + Examples: + >>> minimum_waiting_time([3, 2, 1, 2, 6]) + 17 + >>> minimum_waiting_time([3, 2, 1]) + 4 + >>> minimum_waiting_time([1, 2, 3, 4]) + 10 + >>> minimum_waiting_time([5, 5, 5, 5]) + 30 + >>> minimum_waiting_time([]) + 0 + """ + n = len(queries) + if n in (0, 1): + return 0 + return sum(query * (n - i - 1) for i, query in enumerate(sorted(queries))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 777f966893d7042d350b44b05ce7f8431f561509 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 May 2023 23:48:56 +0200 Subject: [PATCH 313/368] [pre-commit.ci] pre-commit autoupdate (#8704) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.262 → v0.0.263](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.262...v0.0.263) - [github.com/tox-dev/pyproject-fmt: 0.10.0 → 0.11.1](https://github.com/tox-dev/pyproject-fmt/compare/0.10.0...0.11.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 288473ca3..accb57da3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.262 + rev: v0.0.263 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.10.0" + rev: "0.11.1" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 021669d13..826bd6fd3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -449,8 +449,8 @@ ## Greedy Methods * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) + * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) - * [Minimum Waiting Time ](greedy_methods/minimum_waiting_time.py) ## Hashes * [Adler32](hashes/adler32.py) From 73105145090f0ce972f6fa29cc5d71f012dd8c92 Mon Sep 17 00:00:00 2001 From: Dipankar Mitra <50228537+Mitra-babu@users.noreply.github.com> Date: Tue, 2 May 2023 20:06:28 +0530 Subject: [PATCH 314/368] The ELU activation is added (#8699) * tanh function been added * tanh function been added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function is added * tanh function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function added * tanh function added * tanh function is added * Apply suggestions from code review * ELU activation function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * elu activation is added * ELU activation is added * Update maths/elu_activation.py Co-authored-by: Christian Clauss * Exponential_linear_unit activation is added * Exponential_linear_unit activation is added --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../exponential_linear_unit.py | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 neural_network/activation_functions/exponential_linear_unit.py diff --git a/neural_network/activation_functions/exponential_linear_unit.py b/neural_network/activation_functions/exponential_linear_unit.py new file mode 100644 index 000000000..7a3cf1d84 --- /dev/null +++ b/neural_network/activation_functions/exponential_linear_unit.py @@ -0,0 +1,40 @@ +""" +Implements the Exponential Linear Unit or ELU function. + +The function takes a vector of K real numbers and a real number alpha as +input and then applies the ELU function to each element of the vector. + +Script inspired from its corresponding Wikipedia article +https://en.wikipedia.org/wiki/Rectifier_(neural_networks) +""" + +import numpy as np + + +def exponential_linear_unit(vector: np.ndarray, alpha: float) -> np.ndarray: + """ + Implements the ELU activation function. + Parameters: + vector: the array containing input of elu activation + alpha: hyper-parameter + return: + elu (np.array): The input numpy array after applying elu. + + Mathematically, f(x) = x, x>0 else (alpha * (e^x -1)), x<=0, alpha >=0 + + Examples: + >>> exponential_linear_unit(vector=np.array([2.3,0.6,-2,-3.8]), alpha=0.3) + array([ 2.3 , 0.6 , -0.25939942, -0.29328877]) + + >>> exponential_linear_unit(vector=np.array([-9.2,-0.3,0.45,-4.56]), alpha=0.067) + array([-0.06699323, -0.01736518, 0.45 , -0.06629904]) + + + """ + return np.where(vector > 0, vector, (alpha * (np.exp(vector) - 1))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 91cc3a240f05922024d4c5523422138857c48ae0 Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Wed, 10 May 2023 15:04:36 +0530 Subject: [PATCH 315/368] Update game_of_life.py (#8703) Rectify spelling in docstring --- cellular_automata/game_of_life.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 8e5470251..3382af7b5 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -34,7 +34,7 @@ import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap -usage_doc = "Usage of script: script_nama " +usage_doc = "Usage of script: script_name " choice = [0] * 100 + [1] * 10 random.shuffle(choice) From 209a59ee562dd4b0358d8d1a12b112ec3f3e68ed Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Wed, 10 May 2023 15:08:52 +0530 Subject: [PATCH 316/368] Update and_gate.py (#8690) * Update and_gate.py addressing issue #8656 by calling `test_and_gate()` , ensuring that all the assertions are verified before the actual output is printed. * Update and_gate.py addressing issue #8632 --- boolean_algebra/and_gate.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py index cbbcfde79..834116772 100644 --- a/boolean_algebra/and_gate.py +++ b/boolean_algebra/and_gate.py @@ -43,6 +43,8 @@ def test_and_gate() -> None: if __name__ == "__main__": + test_and_gate() + print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1)) From 44aa17fb86b0c04508580425b588c0f8a0cf4ce9 Mon Sep 17 00:00:00 2001 From: shricubed Date: Wed, 10 May 2023 14:50:32 -0400 Subject: [PATCH 317/368] Working binary insertion sort in Python (#8024) --- sorts/binary_insertion_sort.py | 61 ++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 sorts/binary_insertion_sort.py diff --git a/sorts/binary_insertion_sort.py b/sorts/binary_insertion_sort.py new file mode 100644 index 000000000..8d4102558 --- /dev/null +++ b/sorts/binary_insertion_sort.py @@ -0,0 +1,61 @@ +""" +This is a pure Python implementation of the binary insertion sort algorithm + +For doctests run following command: +python -m doctest -v binary_insertion_sort.py +or +python3 -m doctest -v binary_insertion_sort.py + +For manual testing run: +python binary_insertion_sort.py +""" + + +def binary_insertion_sort(collection: list) -> list: + """Pure implementation of the binary insertion sort algorithm in Python + :param collection: some mutable ordered collection with heterogeneous + comparable items inside + :return: the same collection ordered by ascending + + Examples: + >>> binary_insertion_sort([0, 4, 1234, 4, 1]) + [0, 1, 4, 4, 1234] + >>> binary_insertion_sort([]) == sorted([]) + True + >>> binary_insertion_sort([-1, -2, -3]) == sorted([-1, -2, -3]) + True + >>> lst = ['d', 'a', 'b', 'e', 'c'] + >>> binary_insertion_sort(lst) == sorted(lst) + True + >>> import random + >>> collection = random.sample(range(-50, 50), 100) + >>> binary_insertion_sort(collection) == sorted(collection) + True + >>> import string + >>> collection = random.choices(string.ascii_letters + string.digits, k=100) + >>> binary_insertion_sort(collection) == sorted(collection) + True + """ + + n = len(collection) + for i in range(1, n): + val = collection[i] + low = 0 + high = i - 1 + + while low <= high: + mid = (low + high) // 2 + if val < collection[mid]: + high = mid - 1 + else: + low = mid + 1 + for j in range(i, low, -1): + collection[j] = collection[j - 1] + collection[low] = val + return collection + + +if __name__ == "__main__": + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item) for item in user_input.split(",")] + print(binary_insertion_sort(unsorted)) From 997d56fb633e3bd726c1fac32a2d37277361d5e9 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 10 May 2023 21:53:47 +0300 Subject: [PATCH 318/368] Switch case (#7995) --- strings/string_switch_case.py | 108 ++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 strings/string_switch_case.py diff --git a/strings/string_switch_case.py b/strings/string_switch_case.py new file mode 100644 index 000000000..9a07472df --- /dev/null +++ b/strings/string_switch_case.py @@ -0,0 +1,108 @@ +import re + +""" +general info: +https://en.wikipedia.org/wiki/Naming_convention_(programming)#Python_and_Ruby + +pascal case [ an upper Camel Case ]: https://en.wikipedia.org/wiki/Camel_case + +camel case: https://en.wikipedia.org/wiki/Camel_case + +kebab case [ can be found in general info ]: +https://en.wikipedia.org/wiki/Naming_convention_(programming)#Python_and_Ruby + +snake case: https://en.wikipedia.org/wiki/Snake_case +""" + + +# assistant functions +def split_input(str_: str) -> list: + """ + >>> split_input("one two 31235three4four") + [['one', 'two', '31235three4four']] + """ + return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]", str_)] + + +def to_simple_case(str_: str) -> str: + """ + >>> to_simple_case("one two 31235three4four") + 'OneTwo31235three4four' + """ + string_split = split_input(str_) + return "".join( + ["".join([char.capitalize() for char in sub_str]) for sub_str in string_split] + ) + + +def to_complex_case(text: str, upper: bool, separator: str) -> str: + """ + >>> to_complex_case("one two 31235three4four", True, "_") + 'ONE_TWO_31235THREE4FOUR' + >>> to_complex_case("one two 31235three4four", False, "-") + 'one-two-31235three4four' + """ + try: + string_split = split_input(text) + if upper: + res_str = "".join( + [ + separator.join([char.upper() for char in sub_str]) + for sub_str in string_split + ] + ) + else: + res_str = "".join( + [ + separator.join([char.lower() for char in sub_str]) + for sub_str in string_split + ] + ) + return res_str + except IndexError: + return "not valid string" + + +# main content +def to_pascal_case(text: str) -> str: + """ + >>> to_pascal_case("one two 31235three4four") + 'OneTwo31235three4four' + """ + return to_simple_case(text) + + +def to_camel_case(text: str) -> str: + """ + >>> to_camel_case("one two 31235three4four") + 'oneTwo31235three4four' + """ + try: + res_str = to_simple_case(text) + return res_str[0].lower() + res_str[1:] + except IndexError: + return "not valid string" + + +def to_snake_case(text: str, upper: bool) -> str: + """ + >>> to_snake_case("one two 31235three4four", True) + 'ONE_TWO_31235THREE4FOUR' + >>> to_snake_case("one two 31235three4four", False) + 'one_two_31235three4four' + """ + return to_complex_case(text, upper, "_") + + +def to_kebab_case(text: str, upper: bool) -> str: + """ + >>> to_kebab_case("one two 31235three4four", True) + 'ONE-TWO-31235THREE4FOUR' + >>> to_kebab_case("one two 31235three4four", False) + 'one-two-31235three4four' + """ + return to_complex_case(text, upper, "-") + + +if __name__ == "__main__": + __import__("doctest").testmod() From 6939538a41202bf05f958c9c2d7c1c20e2f87430 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 10 May 2023 21:55:48 +0300 Subject: [PATCH 319/368] adding the remove digit algorithm (#6708) --- maths/remove_digit.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 maths/remove_digit.py diff --git a/maths/remove_digit.py b/maths/remove_digit.py new file mode 100644 index 000000000..db14ac902 --- /dev/null +++ b/maths/remove_digit.py @@ -0,0 +1,37 @@ +def remove_digit(num: int) -> int: + """ + + returns the biggest possible result + that can be achieved by removing + one digit from the given number + + >>> remove_digit(152) + 52 + >>> remove_digit(6385) + 685 + >>> remove_digit(-11) + 1 + >>> remove_digit(2222222) + 222222 + >>> remove_digit("2222222") + Traceback (most recent call last): + TypeError: only integers accepted as input + >>> remove_digit("string input") + Traceback (most recent call last): + TypeError: only integers accepted as input + """ + + if not isinstance(num, int): + raise TypeError("only integers accepted as input") + else: + num_str = str(abs(num)) + num_transpositions = [list(num_str) for char in range(len(num_str))] + for index in range(len(num_str)): + num_transpositions[index].pop(index) + return max( + int("".join(list(transposition))) for transposition in num_transpositions + ) + + +if __name__ == "__main__": + __import__("doctest").testmod() From 793e564e1d4bd6e00b6e2f80869c5fd1fd2872b3 Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Thu, 11 May 2023 00:30:59 +0530 Subject: [PATCH 320/368] Create maximum_subsequence.py (#7811) --- DIRECTORY.md | 1 + other/maximum_subsequence.py | 42 ++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 other/maximum_subsequence.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 826bd6fd3..a70ad6861 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -716,6 +716,7 @@ * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) * [Maximum Subarray](other/maximum_subarray.py) + * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) * [Password](other/password.py) * [Quine](other/quine.py) diff --git a/other/maximum_subsequence.py b/other/maximum_subsequence.py new file mode 100644 index 000000000..f81717596 --- /dev/null +++ b/other/maximum_subsequence.py @@ -0,0 +1,42 @@ +from collections.abc import Sequence + + +def max_subsequence_sum(nums: Sequence[int] | None = None) -> int: + """Return the maximum possible sum amongst all non - empty subsequences. + + Raises: + ValueError: when nums is empty. + + >>> max_subsequence_sum([1,2,3,4,-2]) + 10 + >>> max_subsequence_sum([-2, -3, -1, -4, -6]) + -1 + >>> max_subsequence_sum([]) + Traceback (most recent call last): + . . . + ValueError: Input sequence should not be empty + >>> max_subsequence_sum() + Traceback (most recent call last): + . . . + ValueError: Input sequence should not be empty + """ + if nums is None or not nums: + raise ValueError("Input sequence should not be empty") + + ans = nums[0] + for i in range(1, len(nums)): + num = nums[i] + ans = max(ans, ans + num, num) + + return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Try on a sample input from the user + n = int(input("Enter number of elements : ").strip()) + array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] + print(max_subsequence_sum(array)) From 1faf10b5c2dff8cef3f5d59f60a126bd19bb1c44 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 14 May 2023 22:03:13 +0100 Subject: [PATCH 321/368] Correct ruff failures (#8732) * fix: Correct ruff problems * updating DIRECTORY.md * fix: Fix pre-commit errors * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 6 +++++- conversions/prefix_conversions_string.py | 4 ++-- conversions/rgb_hsv_conversion.py | 4 ++-- .../test_digital_image_processing.py | 2 +- ...ion.py => strassen_matrix_multiplication.py.BROKEN} | 2 +- dynamic_programming/fibonacci.py | 2 +- maths/euclidean_distance.py | 6 +++--- physics/horizontal_projectile_motion.py | 6 +++--- searches/binary_tree_traversal.py | 10 ++++------ 9 files changed, 22 insertions(+), 20 deletions(-) rename divide_and_conquer/{strassen_matrix_multiplication.py => strassen_matrix_multiplication.py.BROKEN} (99%) diff --git a/DIRECTORY.md b/DIRECTORY.md index a70ad6861..fc6cbaf7f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -294,7 +294,6 @@ * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) - * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) @@ -632,6 +631,7 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Relu](maths/relu.py) + * [Remove Digit](maths/remove_digit.py) * [Runge Kutta](maths/runge_kutta.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series @@ -694,6 +694,8 @@ ## Neural Network * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) + * Activation Functions + * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Input Data](neural_network/input_data.py) @@ -1080,6 +1082,7 @@ ## Sorts * [Bead Sort](sorts/bead_sort.py) + * [Binary Insertion Sort](sorts/binary_insertion_sort.py) * [Bitonic Sort](sorts/bitonic_sort.py) * [Bogo Sort](sorts/bogo_sort.py) * [Bubble Sort](sorts/bubble_sort.py) @@ -1170,6 +1173,7 @@ * [Reverse Words](strings/reverse_words.py) * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) + * [String Switch Case](strings/string_switch_case.py) * [Text Justification](strings/text_justification.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) diff --git a/conversions/prefix_conversions_string.py b/conversions/prefix_conversions_string.py index 3851d7c8b..9344c9672 100644 --- a/conversions/prefix_conversions_string.py +++ b/conversions/prefix_conversions_string.py @@ -96,7 +96,7 @@ def add_si_prefix(value: float) -> str: for name_prefix, value_prefix in prefixes.items(): numerical_part = value / (10**value_prefix) if numerical_part > 1: - return f"{str(numerical_part)} {name_prefix}" + return f"{numerical_part!s} {name_prefix}" return str(value) @@ -111,7 +111,7 @@ def add_binary_prefix(value: float) -> str: for prefix in BinaryUnit: numerical_part = value / (2**prefix.value) if numerical_part > 1: - return f"{str(numerical_part)} {prefix.name}" + return f"{numerical_part!s} {prefix.name}" return str(value) diff --git a/conversions/rgb_hsv_conversion.py b/conversions/rgb_hsv_conversion.py index 081cfe1d7..74b3d33e4 100644 --- a/conversions/rgb_hsv_conversion.py +++ b/conversions/rgb_hsv_conversion.py @@ -121,8 +121,8 @@ def rgb_to_hsv(red: int, green: int, blue: int) -> list[float]: float_red = red / 255 float_green = green / 255 float_blue = blue / 255 - value = max(max(float_red, float_green), float_blue) - chroma = value - min(min(float_red, float_green), float_blue) + value = max(float_red, float_green, float_blue) + chroma = value - min(float_red, float_green, float_blue) saturation = 0 if value == 0 else chroma / value if chroma == 0: diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index c999464ce..fee7ab247 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -96,7 +96,7 @@ def test_nearest_neighbour( def test_local_binary_pattern(): - file_path: str = "digital_image_processing/image_data/lena.jpg" + file_path = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. image = imread(file_path, 0) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN similarity index 99% rename from divide_and_conquer/strassen_matrix_multiplication.py rename to divide_and_conquer/strassen_matrix_multiplication.py.BROKEN index 371605d6d..2ca91c63b 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN @@ -122,7 +122,7 @@ def strassen(matrix1: list, matrix2: list) -> list: if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] - maximum = max(max(dimension1), max(dimension2)) + maximum = max(dimension1, dimension2) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 diff --git a/dynamic_programming/fibonacci.py b/dynamic_programming/fibonacci.py index 7ec5993ef..c102493aa 100644 --- a/dynamic_programming/fibonacci.py +++ b/dynamic_programming/fibonacci.py @@ -24,7 +24,7 @@ class Fibonacci: return self.sequence[:index] -def main(): +def main() -> None: print( "Fibonacci Series Using Dynamic Programming\n", "Enter the index of the Fibonacci number you want to calculate ", diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 22012e92c..9b29b37b0 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -1,12 +1,12 @@ from __future__ import annotations +import typing from collections.abc import Iterable -from typing import Union import numpy as np -Vector = Union[Iterable[float], Iterable[int], np.ndarray] -VectorOut = Union[np.float64, int, float] +Vector = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 +VectorOut = typing.Union[np.float64, int, float] # noqa: UP007 def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index dbde3660f..80f85a1b7 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -147,6 +147,6 @@ if __name__ == "__main__": # Print results print() print("Results: ") - print(f"Horizontal Distance: {str(horizontal_distance(init_vel, angle))} [m]") - print(f"Maximum Height: {str(max_height(init_vel, angle))} [m]") - print(f"Total Time: {str(total_time(init_vel, angle))} [s]") + print(f"Horizontal Distance: {horizontal_distance(init_vel, angle)!s} [m]") + print(f"Maximum Height: {max_height(init_vel, angle)!s} [m]") + print(f"Total Time: {total_time(init_vel, angle)!s} [s]") diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 76e80df25..6fb841af4 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -13,11 +13,9 @@ class TreeNode: self.left = None -def build_tree(): +def build_tree() -> TreeNode: print("\n********Press N to stop entering at any point of time********\n") - check = input("Enter the value of the root node: ").strip().lower() or "n" - if check == "n": - return None + check = input("Enter the value of the root node: ").strip().lower() q: queue.Queue = queue.Queue() tree_node = TreeNode(int(check)) q.put(tree_node) @@ -37,7 +35,7 @@ def build_tree(): right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) - return None + raise def pre_order(node: TreeNode) -> None: @@ -272,7 +270,7 @@ if __name__ == "__main__": doctest.testmod() print(prompt("Binary Tree Traversals")) - node = build_tree() + node: TreeNode = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") From 2a57dafce096b51b4b28d1495116e79472c8a3f4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 22:27:59 +0100 Subject: [PATCH 322/368] [pre-commit.ci] pre-commit autoupdate (#8716) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.263 → v0.0.267](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.263...v0.0.267) - [github.com/tox-dev/pyproject-fmt: 0.11.1 → 0.11.2](https://github.com/tox-dev/pyproject-fmt/compare/0.11.1...0.11.2) - [github.com/pre-commit/mirrors-mypy: v1.2.0 → v1.3.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.2.0...v1.3.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index accb57da3..6bdbc7370 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.263 + rev: v0.0.267 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.11.1" + rev: "0.11.2" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.2.0 + rev: v1.3.0 hooks: - id: mypy args: From c0892a06515b8ea5030db2e8344dee2292bb10ad Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 16 May 2023 00:47:50 +0300 Subject: [PATCH 323/368] Reduce the complexity of genetic_algorithm/basic_string.py (#8606) --- genetic_algorithm/basic_string.py | 95 ++++++++++++++++--------------- 1 file changed, 50 insertions(+), 45 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 45b8be651..388e7219f 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -21,6 +21,54 @@ MUTATION_PROBABILITY = 0.4 random.seed(random.randint(0, 1000)) +def evaluate(item: str, main_target: str) -> tuple[str, float]: + """ + Evaluate how similar the item is with the target by just + counting each char in the right position + >>> evaluate("Helxo Worlx", "Hello World") + ('Helxo Worlx', 9.0) + """ + score = len([g for position, g in enumerate(item) if g == main_target[position]]) + return (item, float(score)) + + +def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: + """Slice and combine two string at a random point.""" + random_slice = random.randint(0, len(parent_1) - 1) + child_1 = parent_1[:random_slice] + parent_2[random_slice:] + child_2 = parent_2[:random_slice] + parent_1[random_slice:] + return (child_1, child_2) + + +def mutate(child: str, genes: list[str]) -> str: + """Mutate a random gene of a child with another one from the list.""" + child_list = list(child) + if random.uniform(0, 1) < MUTATION_PROBABILITY: + child_list[random.randint(0, len(child)) - 1] = random.choice(genes) + return "".join(child_list) + + +# Select, crossover and mutate a new population. +def select( + parent_1: tuple[str, float], + population_score: list[tuple[str, float]], + genes: list[str], +) -> list[str]: + """Select the second parent and generate new population""" + pop = [] + # Generate more children proportionally to the fitness score. + child_n = int(parent_1[1] * 100) + 1 + child_n = 10 if child_n >= 10 else child_n + for _ in range(child_n): + parent_2 = population_score[random.randint(0, N_SELECTED)][0] + + child_1, child_2 = crossover(parent_1[0], parent_2) + # Append new string to the population list. + pop.append(mutate(child_1, genes)) + pop.append(mutate(child_2, genes)) + return pop + + def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, str]: """ Verify that the target contains no genes besides the ones inside genes variable. @@ -70,17 +118,6 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, total_population += len(population) # Random population created. Now it's time to evaluate. - def evaluate(item: str, main_target: str = target) -> tuple[str, float]: - """ - Evaluate how similar the item is with the target by just - counting each char in the right position - >>> evaluate("Helxo Worlx", Hello World) - ["Helxo Worlx", 9] - """ - score = len( - [g for position, g in enumerate(item) if g == main_target[position]] - ) - return (item, float(score)) # Adding a bit of concurrency can make everything faster, # @@ -94,7 +131,7 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. - population_score = [evaluate(item) for item in population] + population_score = [evaluate(item, target) for item in population] # Check if there is a matching evolution. population_score = sorted(population_score, key=lambda x: x[1], reverse=True) @@ -121,41 +158,9 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, (item, score / len(target)) for item, score in population_score ] - # Select, crossover and mutate a new population. - def select(parent_1: tuple[str, float]) -> list[str]: - """Select the second parent and generate new population""" - pop = [] - # Generate more children proportionally to the fitness score. - child_n = int(parent_1[1] * 100) + 1 - child_n = 10 if child_n >= 10 else child_n - for _ in range(child_n): - parent_2 = population_score[ # noqa: B023 - random.randint(0, N_SELECTED) - ][0] - - child_1, child_2 = crossover(parent_1[0], parent_2) - # Append new string to the population list. - pop.append(mutate(child_1)) - pop.append(mutate(child_2)) - return pop - - def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: - """Slice and combine two string at a random point.""" - random_slice = random.randint(0, len(parent_1) - 1) - child_1 = parent_1[:random_slice] + parent_2[random_slice:] - child_2 = parent_2[:random_slice] + parent_1[random_slice:] - return (child_1, child_2) - - def mutate(child: str) -> str: - """Mutate a random gene of a child with another one from the list.""" - child_list = list(child) - if random.uniform(0, 1) < MUTATION_PROBABILITY: - child_list[random.randint(0, len(child)) - 1] = random.choice(genes) - return "".join(child_list) - # This is selection for i in range(N_SELECTED): - population.extend(select(population_score[int(i)])) + population.extend(select(population_score[int(i)], population_score, genes)) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in From 8102424950f2d3801eda7817d7f69288fd984a63 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 16 May 2023 17:05:55 -0700 Subject: [PATCH 324/368] `local_weighted_learning.py`: fix `mypy` errors and more (#8073) --- .../local_weighted_learning.py | 188 +++++++++++------- 1 file changed, 112 insertions(+), 76 deletions(-) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index 6260e9ac6..8dd0e55d4 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -1,14 +1,55 @@ +""" +Locally weighted linear regression, also called local regression, is a type of +non-parametric linear regression that prioritizes data closest to a given +prediction point. The algorithm estimates the vector of model coefficients β +using weighted least squares regression: + +β = (XᵀWX)⁻¹(XᵀWy), + +where X is the design matrix, y is the response vector, and W is the diagonal +weight matrix. + +This implementation calculates wᵢ, the weight of the ith training sample, using +the Gaussian weight: + +wᵢ = exp(-‖xᵢ - x‖²/(2τ²)), + +where xᵢ is the ith training sample, x is the prediction point, τ is the +"bandwidth", and ‖x‖ is the Euclidean norm (also called the 2-norm or the L² +norm). The bandwidth τ controls how quickly the weight of a training sample +decreases as its distance from the prediction point increases. One can think of +the Gaussian weight as a bell curve centered around the prediction point: a +training sample is weighted lower if it's farther from the center, and τ +controls the spread of the bell curve. + +Other types of locally weighted regression such as locally estimated scatterplot +smoothing (LOESS) typically use different weight functions. + +References: + - https://en.wikipedia.org/wiki/Local_regression + - https://en.wikipedia.org/wiki/Weighted_least_squares + - https://cs229.stanford.edu/notes2022fall/main_notes.pdf +""" + import matplotlib.pyplot as plt import numpy as np -def weighted_matrix( - point: np.array, training_data_x: np.array, bandwidth: float -) -> np.array: +def weight_matrix(point: np.ndarray, x_train: np.ndarray, tau: float) -> np.ndarray: """ - Calculate the weight for every point in the data set. - point --> the x value at which we want to make predictions - >>> weighted_matrix( + Calculate the weight of every point in the training data around a given + prediction point + + Args: + point: x-value at which the prediction is being made + x_train: ndarray of x-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + + Returns: + m x m weight matrix around the prediction point, where m is the size of + the training set + >>> weight_matrix( ... np.array([1., 1.]), ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), ... 0.6 @@ -17,25 +58,30 @@ def weighted_matrix( [0.00000000e+000, 0.00000000e+000, 0.00000000e+000], [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]]) """ - m, _ = np.shape(training_data_x) # m is the number of training samples - weights = np.eye(m) # Initializing weights as identity matrix - - # calculating weights for all training examples [x(i)'s] + m = len(x_train) # Number of training samples + weights = np.eye(m) # Initialize weights as identity matrix for j in range(m): - diff = point - training_data_x[j] - weights[j, j] = np.exp(diff @ diff.T / (-2.0 * bandwidth**2)) + diff = point - x_train[j] + weights[j, j] = np.exp(diff @ diff.T / (-2.0 * tau**2)) + return weights def local_weight( - point: np.array, - training_data_x: np.array, - training_data_y: np.array, - bandwidth: float, -) -> np.array: + point: np.ndarray, x_train: np.ndarray, y_train: np.ndarray, tau: float +) -> np.ndarray: """ - Calculate the local weights using the weight_matrix function on training data. - Return the weighted matrix. + Calculate the local weights at a given prediction point using the weight + matrix for that point + + Args: + point: x-value at which the prediction is being made + x_train: ndarray of x-values for training + y_train: ndarray of y-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + Returns: + ndarray of local weights >>> local_weight( ... np.array([1., 1.]), ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), @@ -45,19 +91,28 @@ def local_weight( array([[0.00873174], [0.08272556]]) """ - weight = weighted_matrix(point, training_data_x, bandwidth) - w = np.linalg.inv(training_data_x.T @ (weight @ training_data_x)) @ ( - training_data_x.T @ weight @ training_data_y.T + weight_mat = weight_matrix(point, x_train, tau) + weight = np.linalg.inv(x_train.T @ weight_mat @ x_train) @ ( + x_train.T @ weight_mat @ y_train.T ) - return w + return weight def local_weight_regression( - training_data_x: np.array, training_data_y: np.array, bandwidth: float -) -> np.array: + x_train: np.ndarray, y_train: np.ndarray, tau: float +) -> np.ndarray: """ - Calculate predictions for each data point on axis + Calculate predictions for each point in the training data + + Args: + x_train: ndarray of x-values for training + y_train: ndarray of y-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + + Returns: + ndarray of predictions >>> local_weight_regression( ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), ... np.array([[1.01, 1.66, 3.5]]), @@ -65,77 +120,57 @@ def local_weight_regression( ... ) array([1.07173261, 1.65970737, 3.50160179]) """ - m, _ = np.shape(training_data_x) - ypred = np.zeros(m) + y_pred = np.zeros(len(x_train)) # Initialize array of predictions + for i, item in enumerate(x_train): + y_pred[i] = item @ local_weight(item, x_train, y_train, tau) - for i, item in enumerate(training_data_x): - ypred[i] = item @ local_weight( - item, training_data_x, training_data_y, bandwidth - ) - - return ypred + return y_pred def load_data( - dataset_name: str, cola_name: str, colb_name: str -) -> tuple[np.array, np.array, np.array, np.array]: + dataset_name: str, x_name: str, y_name: str +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Load data from seaborn and split it into x and y points + >>> pass # No doctests, function is for demo purposes only """ import seaborn as sns data = sns.load_dataset(dataset_name) - col_a = np.array(data[cola_name]) # total_bill - col_b = np.array(data[colb_name]) # tip + x_data = np.array(data[x_name]) + y_data = np.array(data[y_name]) - mcol_a = col_a.copy() - mcol_b = col_b.copy() + one = np.ones(len(y_data)) - one = np.ones(np.shape(mcol_b)[0], dtype=int) + # pairing elements of one and x_data + x_train = np.column_stack((one, x_data)) - # pairing elements of one and mcol_a - training_data_x = np.column_stack((one, mcol_a)) - - return training_data_x, mcol_b, col_a, col_b - - -def get_preds(training_data_x: np.array, mcol_b: np.array, tau: float) -> np.array: - """ - Get predictions with minimum error for each training data - >>> get_preds( - ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), - ... np.array([[1.01, 1.66, 3.5]]), - ... 0.6 - ... ) - array([1.07173261, 1.65970737, 3.50160179]) - """ - ypred = local_weight_regression(training_data_x, mcol_b, tau) - return ypred + return x_train, x_data, y_data def plot_preds( - training_data_x: np.array, - predictions: np.array, - col_x: np.array, - col_y: np.array, - cola_name: str, - colb_name: str, -) -> plt.plot: + x_train: np.ndarray, + preds: np.ndarray, + x_data: np.ndarray, + y_data: np.ndarray, + x_name: str, + y_name: str, +) -> None: """ Plot predictions and display the graph + >>> pass # No doctests, function is for demo purposes only """ - xsort = training_data_x.copy() - xsort.sort(axis=0) - plt.scatter(col_x, col_y, color="blue") + x_train_sorted = np.sort(x_train, axis=0) + plt.scatter(x_data, y_data, color="blue") plt.plot( - xsort[:, 1], - predictions[training_data_x[:, 1].argsort(0)], + x_train_sorted[:, 1], + preds[x_train[:, 1].argsort(0)], color="yellow", linewidth=5, ) plt.title("Local Weighted Regression") - plt.xlabel(cola_name) - plt.ylabel(colb_name) + plt.xlabel(x_name) + plt.ylabel(y_name) plt.show() @@ -144,6 +179,7 @@ if __name__ == "__main__": doctest.testmod() - training_data_x, mcol_b, col_a, col_b = load_data("tips", "total_bill", "tip") - predictions = get_preds(training_data_x, mcol_b, 0.5) - plot_preds(training_data_x, predictions, col_a, col_b, "total_bill", "tip") + # Demo with a dataset from the seaborn module + training_data_x, total_bill, tip = load_data("tips", "total_bill", "tip") + predictions = local_weight_regression(training_data_x, tip, 5) + plot_preds(training_data_x, predictions, total_bill, tip, "total_bill", "tip") From 3dc143f7218a1221f346c0fccb516d1199850e18 Mon Sep 17 00:00:00 2001 From: Rohan Saraogi <62804340+r0sa2@users.noreply.github.com> Date: Wed, 17 May 2023 05:38:56 +0530 Subject: [PATCH 325/368] Added odd_sieve.py (#8740) --- maths/odd_sieve.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 maths/odd_sieve.py diff --git a/maths/odd_sieve.py b/maths/odd_sieve.py new file mode 100644 index 000000000..60e92921a --- /dev/null +++ b/maths/odd_sieve.py @@ -0,0 +1,42 @@ +from itertools import compress, repeat +from math import ceil, sqrt + + +def odd_sieve(num: int) -> list[int]: + """ + Returns the prime numbers < `num`. The prime numbers are calculated using an + odd sieve implementation of the Sieve of Eratosthenes algorithm + (see for reference https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes). + + >>> odd_sieve(2) + [] + >>> odd_sieve(3) + [2] + >>> odd_sieve(10) + [2, 3, 5, 7] + >>> odd_sieve(20) + [2, 3, 5, 7, 11, 13, 17, 19] + """ + + if num <= 2: + return [] + if num == 3: + return [2] + + # Odd sieve for numbers in range [3, num - 1] + sieve = bytearray(b"\x01") * ((num >> 1) - 1) + + for i in range(3, int(sqrt(num)) + 1, 2): + if sieve[(i >> 1) - 1]: + i_squared = i**2 + sieve[(i_squared >> 1) - 1 :: i] = repeat( + 0, ceil((num - i_squared) / (i << 1)) + ) + + return [2] + list(compress(range(3, num, 2), sieve)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 61cfb43d2b9246d1e2019ce7f03cb91f452ed2ba Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Wed, 17 May 2023 04:21:16 +0400 Subject: [PATCH 326/368] Add h index (#8036) --- DIRECTORY.md | 1 + other/h_index.py | 71 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 other/h_index.py diff --git a/DIRECTORY.md b/DIRECTORY.md index fc6cbaf7f..46bd51ce9 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -712,6 +712,7 @@ * [Gauss Easter](other/gauss_easter.py) * [Graham Scan](other/graham_scan.py) * [Greedy](other/greedy.py) + * [H Index](other/h_index.py) * [Least Recently Used](other/least_recently_used.py) * [Lfu Cache](other/lfu_cache.py) * [Linear Congruential Generator](other/linear_congruential_generator.py) diff --git a/other/h_index.py b/other/h_index.py new file mode 100644 index 000000000..e91389675 --- /dev/null +++ b/other/h_index.py @@ -0,0 +1,71 @@ +""" +Task: +Given an array of integers citations where citations[i] is the number of +citations a researcher received for their ith paper, return compute the +researcher's h-index. + +According to the definition of h-index on Wikipedia: A scientist has an +index h if h of their n papers have at least h citations each, and the other +n - h papers have no more than h citations each. + +If there are several possible values for h, the maximum one is taken as the +h-index. + +H-Index link: https://en.wikipedia.org/wiki/H-index + +Implementation notes: +Use sorting of array + +Leetcode link: https://leetcode.com/problems/h-index/description/ + +n = len(citations) +Runtime Complexity: O(n * log(n)) +Space Complexity: O(1) + +""" + + +def h_index(citations: list[int]) -> int: + """ + Return H-index of citations + + >>> h_index([3, 0, 6, 1, 5]) + 3 + >>> h_index([1, 3, 1]) + 1 + >>> h_index([1, 2, 3]) + 2 + >>> h_index('test') + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + >>> h_index([1,2,'3']) + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + >>> h_index([1,2,-3]) + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + """ + + # validate: + if not isinstance(citations, list) or not all( + isinstance(item, int) and item >= 0 for item in citations + ): + raise ValueError("The citations should be a list of non negative integers.") + + citations.sort() + len_citations = len(citations) + + for i in range(len_citations): + if citations[len_citations - 1 - i] <= i: + return i + + return len_citations + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a2783c6597a154a87f60bb5878770d2f152a1d09 Mon Sep 17 00:00:00 2001 From: Harkishan Khuva <78949167+hakiKhuva@users.noreply.github.com> Date: Wed, 17 May 2023 05:52:24 +0530 Subject: [PATCH 327/368] Create guess_the_number_search.py (#7937) --- other/guess_the_number_search.py | 165 +++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 other/guess_the_number_search.py diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py new file mode 100644 index 000000000..0439223f2 --- /dev/null +++ b/other/guess_the_number_search.py @@ -0,0 +1,165 @@ +""" +guess the number using lower,higher and the value to find or guess + +solution works by dividing lower and higher of number guessed + +suppose lower is 0, higher is 1000 and the number to guess is 355 + +>>> guess_the_number(10, 1000, 17) +started... +guess the number : 17 +details : [505, 257, 133, 71, 40, 25, 17] + +""" + + +def temp_input_value( + min_val: int = 10, max_val: int = 1000, option: bool = True +) -> int: + """ + Temporary input values for tests + + >>> temp_input_value(option=True) + 10 + + >>> temp_input_value(option=False) + 1000 + + >>> temp_input_value(min_val=100, option=True) + 100 + + >>> temp_input_value(min_val=100, max_val=50) + Traceback (most recent call last): + ... + ValueError: Invalid value for min_val or max_val (min_value < max_value) + + >>> temp_input_value("ten","fifty",1) + Traceback (most recent call last): + ... + AssertionError: Invalid type of value(s) specified to function! + + >>> temp_input_value(min_val=-100, max_val=500) + -100 + + >>> temp_input_value(min_val=-5100, max_val=-100) + -5100 + """ + assert ( + isinstance(min_val, int) + and isinstance(max_val, int) + and isinstance(option, bool) + ), "Invalid type of value(s) specified to function!" + + if min_val > max_val: + raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") + return min_val if option else max_val + + +def get_avg(number_1: int, number_2: int) -> int: + """ + Return the mid-number(whole) of two integers a and b + + >>> get_avg(10, 15) + 12 + + >>> get_avg(20, 300) + 160 + + >>> get_avg("abcd", 300) + Traceback (most recent call last): + ... + TypeError: can only concatenate str (not "int") to str + + >>> get_avg(10.5,50.25) + 30 + """ + return int((number_1 + number_2) / 2) + + +def guess_the_number(lower: int, higher: int, to_guess: int) -> None: + """ + The `guess_the_number` function that guess the number by some operations + and using inner functions + + >>> guess_the_number(10, 1000, 17) + started... + guess the number : 17 + details : [505, 257, 133, 71, 40, 25, 17] + + >>> guess_the_number(-10000, 10000, 7) + started... + guess the number : 7 + details : [0, 5000, 2500, 1250, 625, 312, 156, 78, 39, 19, 9, 4, 6, 7] + + >>> guess_the_number(10, 1000, "a") + Traceback (most recent call last): + ... + AssertionError: argument values must be type of "int" + + >>> guess_the_number(10, 1000, 5) + Traceback (most recent call last): + ... + ValueError: guess value must be within the range of lower and higher value + + >>> guess_the_number(10000, 100, 5) + Traceback (most recent call last): + ... + ValueError: argument value for lower and higher must be(lower > higher) + """ + assert ( + isinstance(lower, int) and isinstance(higher, int) and isinstance(to_guess, int) + ), 'argument values must be type of "int"' + + if lower > higher: + raise ValueError("argument value for lower and higher must be(lower > higher)") + + if not lower < to_guess < higher: + raise ValueError( + "guess value must be within the range of lower and higher value" + ) + + def answer(number: int) -> str: + """ + Returns value by comparing with entered `to_guess` number + """ + if number > to_guess: + return "high" + elif number < to_guess: + return "low" + else: + return "same" + + print("started...") + + last_lowest = lower + last_highest = higher + + last_numbers = [] + + while True: + number = get_avg(last_lowest, last_highest) + last_numbers.append(number) + + if answer(number) == "low": + last_lowest = number + elif answer(number) == "high": + last_highest = number + else: + break + + print(f"guess the number : {last_numbers[-1]}") + print(f"details : {str(last_numbers)}") + + +def main() -> None: + """ + starting point or function of script + """ + lower = int(input("Enter lower value : ").strip()) + higher = int(input("Enter high value : ").strip()) + guess = int(input("Enter value to guess : ").strip()) + guess_the_number(lower, higher, guess) + + +if __name__ == "__main__": + main() From 9b3e4028c6927a17656e590e878c2a101bc4e951 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Wed, 17 May 2023 07:47:23 +0100 Subject: [PATCH 328/368] Fixes broken "Create guess_the_number_search.py" (#8746) --- DIRECTORY.md | 2 ++ other/guess_the_number_search.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 46bd51ce9..82791cde1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -605,6 +605,7 @@ * [Newton Raphson](maths/newton_raphson.py) * [Number Of Digits](maths/number_of_digits.py) * [Numerical Integration](maths/numerical_integration.py) + * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) @@ -712,6 +713,7 @@ * [Gauss Easter](other/gauss_easter.py) * [Graham Scan](other/graham_scan.py) * [Greedy](other/greedy.py) + * [Guess The Number Search](other/guess_the_number_search.py) * [H Index](other/h_index.py) * [Least Recently Used](other/least_recently_used.py) * [Lfu Cache](other/lfu_cache.py) diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py index 0439223f2..01e8898bb 100644 --- a/other/guess_the_number_search.py +++ b/other/guess_the_number_search.py @@ -148,7 +148,7 @@ def guess_the_number(lower: int, higher: int, to_guess: int) -> None: break print(f"guess the number : {last_numbers[-1]}") - print(f"details : {str(last_numbers)}") + print(f"details : {last_numbers!s}") def main() -> None: From cf5e34d4794fbba04d18c98d5d09854029c83466 Mon Sep 17 00:00:00 2001 From: Rohan Saraogi <62804340+r0sa2@users.noreply.github.com> Date: Fri, 19 May 2023 05:18:22 +0530 Subject: [PATCH 329/368] Added is_palindrome.py (#8748) --- maths/is_palindrome.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 maths/is_palindrome.py diff --git a/maths/is_palindrome.py b/maths/is_palindrome.py new file mode 100644 index 000000000..ba60573ab --- /dev/null +++ b/maths/is_palindrome.py @@ -0,0 +1,34 @@ +def is_palindrome(num: int) -> bool: + """ + Returns whether `num` is a palindrome or not + (see for reference https://en.wikipedia.org/wiki/Palindromic_number). + + >>> is_palindrome(-121) + False + >>> is_palindrome(0) + True + >>> is_palindrome(10) + False + >>> is_palindrome(11) + True + >>> is_palindrome(101) + True + >>> is_palindrome(120) + False + """ + if num < 0: + return False + + num_copy: int = num + rev_num: int = 0 + while num > 0: + rev_num = rev_num * 10 + (num % 10) + num //= 10 + + return num_copy == rev_num + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From edc17b60e00e704cb4109a0e6b18c6ad43234c26 Mon Sep 17 00:00:00 2001 From: Daniel Luo <103051750+DanielLuo7@users.noreply.github.com> Date: Thu, 18 May 2023 20:40:52 -0400 Subject: [PATCH 330/368] add __main__ around print (#8747) --- ciphers/mixed_keyword_cypher.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 806004faa..93a0e3acb 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -65,4 +65,5 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: return cypher -print(mixed_keyword("college", "UNIVERSITY")) +if __name__ == "__main__": + print(mixed_keyword("college", "UNIVERSITY")) From ce43a8ac4ad14e1639014d374b1137906218cfe3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 05:54:30 +0200 Subject: [PATCH 331/368] [pre-commit.ci] pre-commit autoupdate (#8759) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.267 → v0.0.269](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.267...v0.0.269) - [github.com/abravalheri/validate-pyproject: v0.12.2 → v0.13](https://github.com/abravalheri/validate-pyproject/compare/v0.12.2...v0.13) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6bdbc7370..bd5bca8f0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.267 + rev: v0.0.269 hooks: - id: ruff @@ -46,7 +46,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.12.2 + rev: v0.13 hooks: - id: validate-pyproject diff --git a/DIRECTORY.md b/DIRECTORY.md index 82791cde1..3181a93f3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -577,6 +577,7 @@ * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) + * [Is Palindrome](maths/is_palindrome.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) From df88771905e68c0639069a92144d6b7af1d491ce Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 25 May 2023 06:59:15 +0100 Subject: [PATCH 332/368] Mark fetch anime and play as broken (#8763) * updating DIRECTORY.md * updating DIRECTORY.md * fix: Correct ruff errors * fix: Mark anime algorithm as broken * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - .../{fetch_anime_and_play.py => fetch_anime_and_play.py.BROKEN} | 0 2 files changed, 1 deletion(-) rename web_programming/{fetch_anime_and_play.py => fetch_anime_and_play.py.BROKEN} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3181a93f3..71bdf30b2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1199,7 +1199,6 @@ * [Daily Horoscope](web_programming/daily_horoscope.py) * [Download Images From Google Query](web_programming/download_images_from_google_query.py) * [Emails From Url](web_programming/emails_from_url.py) - * [Fetch Anime And Play](web_programming/fetch_anime_and_play.py) * [Fetch Bbc News](web_programming/fetch_bbc_news.py) * [Fetch Github Info](web_programming/fetch_github_info.py) * [Fetch Jobs](web_programming/fetch_jobs.py) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py.BROKEN similarity index 100% rename from web_programming/fetch_anime_and_play.py rename to web_programming/fetch_anime_and_play.py.BROKEN From 200429fc4739c3757180635016614b984cfd2206 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Thu, 25 May 2023 18:04:42 +1200 Subject: [PATCH 333/368] Dual Number Automatic Differentiation (#8760) * Added dual_number_automatic_differentiation.py * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/dual_number_automatic_differentiation.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + .../dual_number_automatic_differentiation.py | 141 ++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 maths/dual_number_automatic_differentiation.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 71bdf30b2..a75723369 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -549,6 +549,7 @@ * [Dodecahedron](maths/dodecahedron.py) * [Double Factorial Iterative](maths/double_factorial_iterative.py) * [Double Factorial Recursive](maths/double_factorial_recursive.py) + * [Dual Number Automatic Differentiation](maths/dual_number_automatic_differentiation.py) * [Entropy](maths/entropy.py) * [Euclidean Distance](maths/euclidean_distance.py) * [Euclidean Gcd](maths/euclidean_gcd.py) diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py new file mode 100644 index 000000000..9aa75830c --- /dev/null +++ b/maths/dual_number_automatic_differentiation.py @@ -0,0 +1,141 @@ +from math import factorial + +""" +https://en.wikipedia.org/wiki/Automatic_differentiation#Automatic_differentiation_using_dual_numbers +https://blog.jliszka.org/2013/10/24/exact-numeric-nth-derivatives.html + +Note this only works for basic functions, f(x) where the power of x is positive. +""" + + +class Dual: + def __init__(self, real, rank): + self.real = real + if isinstance(rank, int): + self.duals = [1] * rank + else: + self.duals = rank + + def __repr__(self): + return ( + f"{self.real}+" + f"{'+'.join(str(dual)+'E'+str(n+1)for n,dual in enumerate(self.duals))}" + ) + + def reduce(self): + cur = self.duals.copy() + while cur[-1] == 0: + cur.pop(-1) + return Dual(self.real, cur) + + def __add__(self, other): + if not isinstance(other, Dual): + return Dual(self.real + other, self.duals) + s_dual = self.duals.copy() + o_dual = other.duals.copy() + if len(s_dual) > len(o_dual): + o_dual.extend([1] * (len(s_dual) - len(o_dual))) + elif len(s_dual) < len(o_dual): + s_dual.extend([1] * (len(o_dual) - len(s_dual))) + new_duals = [] + for i in range(len(s_dual)): + new_duals.append(s_dual[i] + o_dual[i]) + return Dual(self.real + other.real, new_duals) + + __radd__ = __add__ + + def __sub__(self, other): + return self + other * -1 + + def __mul__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i * other) + return Dual(self.real * other, new_duals) + new_duals = [0] * (len(self.duals) + len(other.duals) + 1) + for i, item in enumerate(self.duals): + for j, jtem in enumerate(other.duals): + new_duals[i + j + 1] += item * jtem + for k in range(len(self.duals)): + new_duals[k] += self.duals[k] * other.real + for index in range(len(other.duals)): + new_duals[index] += other.duals[index] * self.real + return Dual(self.real * other.real, new_duals) + + __rmul__ = __mul__ + + def __truediv__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i / other) + return Dual(self.real / other, new_duals) + raise ValueError() + + def __floordiv__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i // other) + return Dual(self.real // other, new_duals) + raise ValueError() + + def __pow__(self, n): + if n < 0 or isinstance(n, float): + raise ValueError("power must be a positive integer") + if n == 0: + return 1 + if n == 1: + return self + x = self + for _ in range(n - 1): + x *= self + return x + + +def differentiate(func, position, order): + """ + >>> differentiate(lambda x: x**2, 2, 2) + 2 + >>> differentiate(lambda x: x**2 * x**4, 9, 2) + 196830 + >>> differentiate(lambda y: 0.5 * (y + 3) ** 6, 3.5, 4) + 7605.0 + >>> differentiate(lambda y: y ** 2, 4, 3) + 0 + >>> differentiate(8, 8, 8) + Traceback (most recent call last): + ... + ValueError: differentiate() requires a function as input for func + >>> differentiate(lambda x: x **2, "", 1) + Traceback (most recent call last): + ... + ValueError: differentiate() requires a float as input for position + >>> differentiate(lambda x: x**2, 3, "") + Traceback (most recent call last): + ... + ValueError: differentiate() requires an int as input for order + """ + if not callable(func): + raise ValueError("differentiate() requires a function as input for func") + if not isinstance(position, (float, int)): + raise ValueError("differentiate() requires a float as input for position") + if not isinstance(order, int): + raise ValueError("differentiate() requires an int as input for order") + d = Dual(position, 1) + result = func(d) + if order == 0: + return result.real + return result.duals[order - 1] * factorial(order) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + def f(y): + return y**2 * y**4 + + print(differentiate(f, 9, 2)) From cfbbfd9896cc96379f7374a68ff04b245bb3527c Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 25 May 2023 11:56:23 +0100 Subject: [PATCH 335/368] Merge and add benchmarks to palindrome algorithms in the strings/ directory (#8749) * refactor: Merge and add benchmarks to palindrome * updating DIRECTORY.md * chore: Fix failing tests * Update strings/palindrome.py Co-authored-by: Christian Clauss * Update palindrome.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 - strings/is_palindrome.py | 41 ---------------------------------------- strings/palindrome.py | 40 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 39 insertions(+), 43 deletions(-) delete mode 100644 strings/is_palindrome.py diff --git a/DIRECTORY.md b/DIRECTORY.md index a75723369..fe4baac86 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1156,7 +1156,6 @@ * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) * [Is Isogram](strings/is_isogram.py) - * [Is Palindrome](strings/is_palindrome.py) * [Is Pangram](strings/is_pangram.py) * [Is Spain National Id](strings/is_spain_national_id.py) * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py) diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py deleted file mode 100644 index 406aa2e8d..000000000 --- a/strings/is_palindrome.py +++ /dev/null @@ -1,41 +0,0 @@ -def is_palindrome(s: str) -> bool: - """ - Determine if the string s is a palindrome. - - >>> is_palindrome("A man, A plan, A canal -- Panama!") - True - >>> is_palindrome("Hello") - False - >>> is_palindrome("Able was I ere I saw Elba") - True - >>> is_palindrome("racecar") - True - >>> is_palindrome("Mr. Owl ate my metal worm?") - True - """ - # Since punctuation, capitalization, and spaces are often ignored while checking - # palindromes, we first remove them from our string. - s = "".join(character for character in s.lower() if character.isalnum()) - # return s == s[::-1] the slicing method - # uses extra spaces we can - # better with iteration method. - - end = len(s) // 2 - n = len(s) - - # We need to traverse till half of the length of string - # as we can get access of the i'th last element from - # i'th index. - # eg: [0,1,2,3,4,5] => 4th index can be accessed - # with the help of 1st index (i==n-i-1) - # where n is length of string - - return all(s[i] == s[n - i - 1] for i in range(end)) - - -if __name__ == "__main__": - s = input("Please enter a string to see if it is a palindrome: ") - if is_palindrome(s): - print(f"'{s}' is a palindrome.") - else: - print(f"'{s}' is not a palindrome.") diff --git a/strings/palindrome.py b/strings/palindrome.py index dd1fe316f..bfdb3ddcf 100644 --- a/strings/palindrome.py +++ b/strings/palindrome.py @@ -1,5 +1,7 @@ # Algorithms to determine if a string is palindrome +from timeit import timeit + test_data = { "MALAYALAM": True, "String": False, @@ -33,6 +35,25 @@ def is_palindrome(s: str) -> bool: return True +def is_palindrome_traversal(s: str) -> bool: + """ + Return True if s is a palindrome otherwise return False. + + >>> all(is_palindrome_traversal(key) is value for key, value in test_data.items()) + True + """ + end = len(s) // 2 + n = len(s) + + # We need to traverse till half of the length of string + # as we can get access of the i'th last element from + # i'th index. + # eg: [0,1,2,3,4,5] => 4th index can be accessed + # with the help of 1st index (i==n-i-1) + # where n is length of string + return all(s[i] == s[n - i - 1] for i in range(end)) + + def is_palindrome_recursive(s: str) -> bool: """ Return True if s is a palindrome otherwise return False. @@ -40,7 +61,7 @@ def is_palindrome_recursive(s: str) -> bool: >>> all(is_palindrome_recursive(key) is value for key, value in test_data.items()) True """ - if len(s) <= 1: + if len(s) <= 2: return True if s[0] == s[len(s) - 1]: return is_palindrome_recursive(s[1:-1]) @@ -58,9 +79,26 @@ def is_palindrome_slice(s: str) -> bool: return s == s[::-1] +def benchmark_function(name: str) -> None: + stmt = f"all({name}(key) is value for key, value in test_data.items())" + setup = f"from __main__ import test_data, {name}" + number = 500000 + result = timeit(stmt=stmt, setup=setup, number=number) + print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds") + + if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print("a man a plan a canal panama") + + # finished 500,000 runs in 0.46793 seconds + benchmark_function("is_palindrome_slice") + # finished 500,000 runs in 0.85234 seconds + benchmark_function("is_palindrome") + # finished 500,000 runs in 1.32028 seconds + benchmark_function("is_palindrome_recursive") + # finished 500,000 runs in 2.08679 seconds + benchmark_function("is_palindrome_traversal") From a17791d022bdc942c8badabc52307c354069a7ae Mon Sep 17 00:00:00 2001 From: Juyoung Kim <61103343+JadeKim042386@users.noreply.github.com> Date: Thu, 25 May 2023 21:54:18 +0900 Subject: [PATCH 336/368] fix: graphs/greedy_best_first typo (#8766) #8764 --- graphs/greedy_best_first.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py index d49e65b9d..35f7ca9fe 100644 --- a/graphs/greedy_best_first.py +++ b/graphs/greedy_best_first.py @@ -58,8 +58,8 @@ class Node: The heuristic here is the Manhattan Distance Could elaborate to offer more than one choice """ - dy = abs(self.pos_x - self.goal_x) - dx = abs(self.pos_y - self.goal_y) + dx = abs(self.pos_x - self.goal_x) + dy = abs(self.pos_y - self.goal_y) return dx + dy def __lt__(self, other) -> bool: From dd3b499bfa972507759d0705b77e2e1946f42596 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 26 May 2023 08:50:33 +0200 Subject: [PATCH 337/368] Rename is_palindrome.py to is_int_palindrome.py (#8768) * Rename is_palindrome.py to is_int_palindrome.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- maths/{is_palindrome.py => is_int_palindrome.py} | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) rename maths/{is_palindrome.py => is_int_palindrome.py} (67%) diff --git a/DIRECTORY.md b/DIRECTORY.md index fe4baac86..11ff93c91 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -577,8 +577,8 @@ * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) + * [Is Int Palindrome](maths/is_int_palindrome.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) - * [Is Palindrome](maths/is_palindrome.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) diff --git a/maths/is_palindrome.py b/maths/is_int_palindrome.py similarity index 67% rename from maths/is_palindrome.py rename to maths/is_int_palindrome.py index ba60573ab..63dc9e213 100644 --- a/maths/is_palindrome.py +++ b/maths/is_int_palindrome.py @@ -1,19 +1,19 @@ -def is_palindrome(num: int) -> bool: +def is_int_palindrome(num: int) -> bool: """ Returns whether `num` is a palindrome or not (see for reference https://en.wikipedia.org/wiki/Palindromic_number). - >>> is_palindrome(-121) + >>> is_int_palindrome(-121) False - >>> is_palindrome(0) + >>> is_int_palindrome(0) True - >>> is_palindrome(10) + >>> is_int_palindrome(10) False - >>> is_palindrome(11) + >>> is_int_palindrome(11) True - >>> is_palindrome(101) + >>> is_int_palindrome(101) True - >>> is_palindrome(120) + >>> is_int_palindrome(120) False """ if num < 0: From 4b79d771cd81a820c195e62430100c416a1618ea Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 26 May 2023 09:34:17 +0200 Subject: [PATCH 338/368] Add more ruff rules (#8767) * Add more ruff rules * Add more ruff rules * pre-commit: Update ruff v0.0.269 -> v0.0.270 * Apply suggestions from code review * Fix doctest * Fix doctest (ignore whitespace) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Dhruv Manilawala Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- .../jacobi_iteration_method.py | 30 ++-- arithmetic_analysis/lu_decomposition.py | 5 +- audio_filters/iir_filter.py | 14 +- backtracking/knight_tour.py | 3 +- bit_manipulation/reverse_bits.py | 3 +- ciphers/base64.py | 12 +- ciphers/beaufort_cipher.py | 2 +- ciphers/cryptomath_module.py | 3 +- ciphers/enigma_machine2.py | 30 ++-- ciphers/hill_cipher.py | 7 +- .../astronomical_length_scale_conversion.py | 6 +- conversions/length_conversion.py | 6 +- conversions/speed_conversions.py | 3 +- conversions/weight_conversion.py | 3 +- .../binary_search_tree_recursive.py | 6 +- .../binary_tree/binary_tree_mirror.py | 3 +- data_structures/disjoint_set/disjoint_set.py | 3 +- .../linked_list/circular_linked_list.py | 8 +- .../linked_list/doubly_linked_list.py | 4 +- .../linked_list/singly_linked_list.py | 4 +- data_structures/stacks/stack.py | 6 +- digital_image_processing/dithering/burkes.py | 3 +- divide_and_conquer/convex_hull.py | 8 +- dynamic_programming/knapsack.py | 15 +- dynamic_programming/minimum_steps_to_one.py | 3 +- dynamic_programming/rod_cutting.py | 10 +- dynamic_programming/viterbi.py | 17 ++- electronics/resistor_equivalence.py | 6 +- genetic_algorithm/basic_string.py | 8 +- graphics/vector3_for_2d_rendering.py | 8 +- graphs/breadth_first_search_shortest_path.py | 3 +- linear_algebra/src/schur_complement.py | 14 +- machine_learning/similarity_search.py | 21 +-- machine_learning/support_vector_machines.py | 3 +- maths/3n_plus_1.py | 6 +- maths/automorphic_number.py | 3 +- maths/catalan_number.py | 6 +- .../dual_number_automatic_differentiation.py | 4 +- maths/hexagonal_number.py | 3 +- maths/juggler_sequence.py | 6 +- maths/liouville_lambda.py | 3 +- maths/manhattan_distance.py | 18 +-- maths/pronic_number.py | 3 +- maths/proth_number.py | 6 +- maths/radix2_fft.py | 2 +- maths/sieve_of_eratosthenes.py | 3 +- maths/sylvester_sequence.py | 3 +- maths/twin_prime.py | 3 +- matrix/matrix_operation.py | 12 +- matrix/sherman_morrison.py | 3 +- neural_network/input_data.py | 12 +- other/nested_brackets.py | 2 +- other/scoring_algorithm.py | 3 +- project_euler/problem_054/sol1.py | 6 +- project_euler/problem_068/sol1.py | 3 +- project_euler/problem_131/sol1.py | 5 +- pyproject.toml | 139 +++++++++++++----- scripts/build_directory_md.py | 2 +- sorts/dutch_national_flag_sort.py | 5 +- strings/barcode_validator.py | 3 +- strings/capitalize.py | 2 +- strings/is_spain_national_id.py | 3 +- strings/snake_case_to_camel_pascal_case.py | 8 +- web_programming/reddit.py | 3 +- web_programming/search_books_by_isbn.py | 3 +- web_programming/slack_message.py | 7 +- 67 files changed, 349 insertions(+), 223 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bd5bca8f0..4c70ae219 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.269 + rev: v0.0.270 hooks: - id: ruff diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index fe506a94a..17edf4bf4 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -49,7 +49,9 @@ def jacobi_iteration_method( >>> constant = np.array([[2], [-6]]) >>> init_val = [0.5, -0.5, -0.5] >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but @@ -59,7 +61,9 @@ def jacobi_iteration_method( >>> constant = np.array([[2], [-6], [-4]]) >>> init_val = [0.5, -0.5] >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Number of initial values must be equal to number of rows in coefficient @@ -79,24 +83,26 @@ def jacobi_iteration_method( rows2, cols2 = constant_matrix.shape if rows1 != cols1: - raise ValueError( - f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" - ) + msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" + raise ValueError(msg) if cols2 != 1: - raise ValueError(f"Constant matrix must be nx1 but received {rows2}x{cols2}") + msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}" + raise ValueError(msg) if rows1 != rows2: - raise ValueError( - f"""Coefficient and constant matrices dimensions must be nxn and nx1 but - received {rows1}x{cols1} and {rows2}x{cols2}""" + msg = ( + "Coefficient and constant matrices dimensions must be nxn and nx1 but " + f"received {rows1}x{cols1} and {rows2}x{cols2}" ) + raise ValueError(msg) if len(init_val) != rows1: - raise ValueError( - f"""Number of initial values must be equal to number of rows in coefficient - matrix but received {len(init_val)} and {rows1}""" + msg = ( + "Number of initial values must be equal to number of rows in coefficient " + f"matrix but received {len(init_val)} and {rows1}" ) + raise ValueError(msg) if iterations <= 0: raise ValueError("Iterations must be at least 1") diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 941c1dadf..eaabce544 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -80,10 +80,11 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray # Ensure that table is a square array rows, columns = np.shape(table) if rows != columns: - raise ValueError( - f"'table' has to be of square shaped array but got a " + msg = ( + "'table' has to be of square shaped array but got a " f"{rows}x{columns} array:\n{table}" ) + raise ValueError(msg) lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py index bd448175f..f3c1ad43b 100644 --- a/audio_filters/iir_filter.py +++ b/audio_filters/iir_filter.py @@ -50,16 +50,18 @@ class IIRFilter: a_coeffs = [1.0, *a_coeffs] if len(a_coeffs) != self.order + 1: - raise ValueError( - f"Expected a_coeffs to have {self.order + 1} elements for {self.order}" - f"-order filter, got {len(a_coeffs)}" + msg = ( + f"Expected a_coeffs to have {self.order + 1} elements " + f"for {self.order}-order filter, got {len(a_coeffs)}" ) + raise ValueError(msg) if len(b_coeffs) != self.order + 1: - raise ValueError( - f"Expected b_coeffs to have {self.order + 1} elements for {self.order}" - f"-order filter, got {len(a_coeffs)}" + msg = ( + f"Expected b_coeffs to have {self.order + 1} elements " + f"for {self.order}-order filter, got {len(a_coeffs)}" ) + raise ValueError(msg) self.a_coeffs = a_coeffs self.b_coeffs = b_coeffs diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index bb650ece3..cc88307b7 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -91,7 +91,8 @@ def open_knight_tour(n: int) -> list[list[int]]: return board board[i][j] = 0 - raise ValueError(f"Open Kight Tour cannot be performed on a board of size {n}") + msg = f"Open Kight Tour cannot be performed on a board of size {n}" + raise ValueError(msg) if __name__ == "__main__": diff --git a/bit_manipulation/reverse_bits.py b/bit_manipulation/reverse_bits.py index 55608ae12..a8c77c11b 100644 --- a/bit_manipulation/reverse_bits.py +++ b/bit_manipulation/reverse_bits.py @@ -14,10 +14,11 @@ def get_reverse_bit_string(number: int) -> str: TypeError: operation can not be conducted on a object of type str """ if not isinstance(number, int): - raise TypeError( + msg = ( "operation can not be conducted on a object of type " f"{type(number).__name__}" ) + raise TypeError(msg) bit_string = "" for _ in range(0, 32): bit_string += str(number % 2) diff --git a/ciphers/base64.py b/ciphers/base64.py index 38a952acc..2b950b1be 100644 --- a/ciphers/base64.py +++ b/ciphers/base64.py @@ -34,9 +34,8 @@ def base64_encode(data: bytes) -> bytes: """ # Make sure the supplied data is a bytes-like object if not isinstance(data, bytes): - raise TypeError( - f"a bytes-like object is required, not '{data.__class__.__name__}'" - ) + msg = f"a bytes-like object is required, not '{data.__class__.__name__}'" + raise TypeError(msg) binary_stream = "".join(bin(byte)[2:].zfill(8) for byte in data) @@ -88,10 +87,11 @@ def base64_decode(encoded_data: str) -> bytes: """ # Make sure encoded_data is either a string or a bytes-like object if not isinstance(encoded_data, bytes) and not isinstance(encoded_data, str): - raise TypeError( - "argument should be a bytes-like object or ASCII string, not " - f"'{encoded_data.__class__.__name__}'" + msg = ( + "argument should be a bytes-like object or ASCII string, " + f"not '{encoded_data.__class__.__name__}'" ) + raise TypeError(msg) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object diff --git a/ciphers/beaufort_cipher.py b/ciphers/beaufort_cipher.py index 8eae847a7..788fc72b8 100644 --- a/ciphers/beaufort_cipher.py +++ b/ciphers/beaufort_cipher.py @@ -5,7 +5,7 @@ Author: Mohit Radadiya from string import ascii_uppercase dict1 = {char: i for i, char in enumerate(ascii_uppercase)} -dict2 = {i: char for i, char in enumerate(ascii_uppercase)} +dict2 = dict(enumerate(ascii_uppercase)) # This function generates the key in diff --git a/ciphers/cryptomath_module.py b/ciphers/cryptomath_module.py index be8764ff3..6f15f7b73 100644 --- a/ciphers/cryptomath_module.py +++ b/ciphers/cryptomath_module.py @@ -6,7 +6,8 @@ def gcd(a: int, b: int) -> int: def find_mod_inverse(a: int, m: int) -> int: if gcd(a, m) != 1: - raise ValueError(f"mod inverse of {a!r} and {m!r} does not exist") + msg = f"mod inverse of {a!r} and {m!r} does not exist" + raise ValueError(msg) u1, u2, u3 = 1, 0, a v1, v2, v3 = 0, 1, m while v3 != 0: diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 07d21893f..ec0d44e4a 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -87,22 +87,20 @@ def _validator( # Checks if there are 3 unique rotors if (unique_rotsel := len(set(rotsel))) < 3: - raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})") + msg = f"Please use 3 unique rotors (not {unique_rotsel})" + raise Exception(msg) # Checks if rotor positions are valid rotorpos1, rotorpos2, rotorpos3 = rotpos if not 0 < rotorpos1 <= len(abc): - raise ValueError( - "First rotor position is not within range of 1..26 (" f"{rotorpos1}" - ) + msg = f"First rotor position is not within range of 1..26 ({rotorpos1}" + raise ValueError(msg) if not 0 < rotorpos2 <= len(abc): - raise ValueError( - "Second rotor position is not within range of 1..26 (" f"{rotorpos2})" - ) + msg = f"Second rotor position is not within range of 1..26 ({rotorpos2})" + raise ValueError(msg) if not 0 < rotorpos3 <= len(abc): - raise ValueError( - "Third rotor position is not within range of 1..26 (" f"{rotorpos3})" - ) + msg = f"Third rotor position is not within range of 1..26 ({rotorpos3})" + raise ValueError(msg) # Validates string and returns dict pbdict = _plugboard(pb) @@ -130,9 +128,11 @@ def _plugboard(pbstring: str) -> dict[str, str]: # a) is type string # b) has even length (so pairs can be made) if not isinstance(pbstring, str): - raise TypeError(f"Plugboard setting isn't type string ({type(pbstring)})") + msg = f"Plugboard setting isn't type string ({type(pbstring)})" + raise TypeError(msg) elif len(pbstring) % 2 != 0: - raise Exception(f"Odd number of symbols ({len(pbstring)})") + msg = f"Odd number of symbols ({len(pbstring)})" + raise Exception(msg) elif pbstring == "": return {} @@ -142,9 +142,11 @@ def _plugboard(pbstring: str) -> dict[str, str]: tmppbl = set() for i in pbstring: if i not in abc: - raise Exception(f"'{i}' not in list of symbols") + msg = f"'{i}' not in list of symbols" + raise Exception(msg) elif i in tmppbl: - raise Exception(f"Duplicate symbol ({i})") + msg = f"Duplicate symbol ({i})" + raise Exception(msg) else: tmppbl.add(i) del tmppbl diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index f646d567b..b4424e822 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -104,10 +104,11 @@ class HillCipher: req_l = len(self.key_string) if greatest_common_divisor(det, len(self.key_string)) != 1: - raise ValueError( - f"determinant modular {req_l} of encryption key({det}) is not co prime " - f"w.r.t {req_l}.\nTry another key." + msg = ( + f"determinant modular {req_l} of encryption key({det}) " + f"is not co prime w.r.t {req_l}.\nTry another key." ) + raise ValueError(msg) def process_text(self, text: str) -> str: """ diff --git a/conversions/astronomical_length_scale_conversion.py b/conversions/astronomical_length_scale_conversion.py index 804d82487..0f4136449 100644 --- a/conversions/astronomical_length_scale_conversion.py +++ b/conversions/astronomical_length_scale_conversion.py @@ -77,15 +77,17 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: to_sanitized = UNIT_SYMBOL.get(to_sanitized, to_sanitized) if from_sanitized not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'from_type' value: {from_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) if to_sanitized not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'to_type' value: {to_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) from_exponent = METRIC_CONVERSION[from_sanitized] to_exponent = METRIC_CONVERSION[to_sanitized] exponent = 1 diff --git a/conversions/length_conversion.py b/conversions/length_conversion.py index 790d9c116..d8f395152 100644 --- a/conversions/length_conversion.py +++ b/conversions/length_conversion.py @@ -104,15 +104,17 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: new_to = to_type.lower().rstrip("s") new_to = TYPE_CONVERSION.get(new_to, new_to) if new_from not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'from_type' value: {from_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) if new_to not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'to_type' value: {to_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to diff --git a/conversions/speed_conversions.py b/conversions/speed_conversions.py index 62da9e137..ba497119d 100644 --- a/conversions/speed_conversions.py +++ b/conversions/speed_conversions.py @@ -57,10 +57,11 @@ def convert_speed(speed: float, unit_from: str, unit_to: str) -> float: 115.078 """ if unit_to not in speed_chart or unit_from not in speed_chart_inverse: - raise ValueError( + msg = ( f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n" f"Valid values are: {', '.join(speed_chart_inverse)}" ) + raise ValueError(msg) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3) diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index 5c032a497..e8326e0b6 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -299,10 +299,11 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 1.999999998903455 """ if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART: - raise ValueError( + msg = ( f"Invalid 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" f"Supported values are: {', '.join(WEIGHT_TYPE_CHART)}" ) + raise ValueError(msg) return value * KILOGRAM_CHART[to_type] * WEIGHT_TYPE_CHART[from_type] diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 97eb8e25b..b5b983b9b 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -77,7 +77,8 @@ class BinarySearchTree: elif label > node.label: node.right = self._put(node.right, label, node) else: - raise Exception(f"Node with label {label} already exists") + msg = f"Node with label {label} already exists" + raise Exception(msg) return node @@ -100,7 +101,8 @@ class BinarySearchTree: def _search(self, node: Node | None, label: int) -> Node: if node is None: - raise Exception(f"Node with label {label} does not exist") + msg = f"Node with label {label} does not exist" + raise Exception(msg) else: if label < node.label: node = self._search(node.left, label) diff --git a/data_structures/binary_tree/binary_tree_mirror.py b/data_structures/binary_tree/binary_tree_mirror.py index 1ef950ad6..b8548f4ec 100644 --- a/data_structures/binary_tree/binary_tree_mirror.py +++ b/data_structures/binary_tree/binary_tree_mirror.py @@ -31,7 +31,8 @@ def binary_tree_mirror(binary_tree: dict, root: int = 1) -> dict: if not binary_tree: raise ValueError("binary tree cannot be empty") if root not in binary_tree: - raise ValueError(f"root {root} is not present in the binary_tree") + msg = f"root {root} is not present in the binary_tree" + raise ValueError(msg) binary_tree_mirror_dictionary = dict(binary_tree) binary_tree_mirror_dict(binary_tree_mirror_dictionary, root) return binary_tree_mirror_dictionary diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py index f8500bf2c..12dafb2d9 100644 --- a/data_structures/disjoint_set/disjoint_set.py +++ b/data_structures/disjoint_set/disjoint_set.py @@ -56,7 +56,8 @@ def find_python_set(node: Node) -> set: for s in sets: if node.data in s: return s - raise ValueError(f"{node.data} is not in {sets}") + msg = f"{node.data} is not in {sets}" + raise ValueError(msg) def test_disjoint_set() -> None: diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 9092fb29e..325d91026 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -94,25 +94,25 @@ def test_circular_linked_list() -> None: try: circular_linked_list.delete_front() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1) - raise AssertionError() + raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0) - raise AssertionError() + raise AssertionError except IndexError: assert True diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 69763d12d..1a6c48191 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -198,13 +198,13 @@ def test_doubly_linked_list() -> None: try: linked_list.delete_head() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index a8f9e8ebb..890e21c9b 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -353,13 +353,13 @@ def test_singly_linked_list() -> None: try: linked_list.delete_head() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index 55d424d50..a14f4648a 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -92,13 +92,13 @@ def test_stack() -> None: try: _ = stack.pop() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackUnderflowError: assert True # This should happen try: _ = stack.peek() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackUnderflowError: assert True # This should happen @@ -118,7 +118,7 @@ def test_stack() -> None: try: stack.push(200) - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackOverflowError: assert True # This should happen diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py index 2bf0bbe03..0804104ab 100644 --- a/digital_image_processing/dithering/burkes.py +++ b/digital_image_processing/dithering/burkes.py @@ -21,7 +21,8 @@ class Burkes: self.max_threshold = int(self.get_greyscale(255, 255, 255)) if not self.min_threshold < threshold < self.max_threshold: - raise ValueError(f"Factor value should be from 0 to {self.max_threshold}") + msg = f"Factor value should be from 0 to {self.max_threshold}" + raise ValueError(msg) self.input_img = input_img self.threshold = threshold diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 39e78be04..1ad933417 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -174,12 +174,12 @@ def _validate_input(points: list[Point] | list[list[float]]) -> list[Point]: """ if not hasattr(points, "__iter__"): - raise ValueError( - f"Expecting an iterable object but got an non-iterable type {points}" - ) + msg = f"Expecting an iterable object but got an non-iterable type {points}" + raise ValueError(msg) if not points: - raise ValueError(f"Expecting a list of points but got {points}") + msg = f"Expecting a list of points but got {points}" + raise ValueError(msg) return _construct_points(points) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index b12d30313..489b5ada4 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -78,17 +78,18 @@ def knapsack_with_example_solution(w: int, wt: list, val: list): num_items = len(wt) if num_items != len(val): - raise ValueError( - "The number of weights must be the " - "same as the number of values.\nBut " - f"got {num_items} weights and {len(val)} values" + msg = ( + "The number of weights must be the same as the number of values.\n" + f"But got {num_items} weights and {len(val)} values" ) + raise ValueError(msg) for i in range(num_items): if not isinstance(wt[i], int): - raise TypeError( - "All weights must be integers but " - f"got weight of type {type(wt[i])} at index {i}" + msg = ( + "All weights must be integers but got weight of " + f"type {type(wt[i])} at index {i}" ) + raise TypeError(msg) optimal_val, dp_table = knapsack(w, wt, val, num_items) example_optional_set: set = set() diff --git a/dynamic_programming/minimum_steps_to_one.py b/dynamic_programming/minimum_steps_to_one.py index f4eb7033d..8785027fb 100644 --- a/dynamic_programming/minimum_steps_to_one.py +++ b/dynamic_programming/minimum_steps_to_one.py @@ -42,7 +42,8 @@ def min_steps_to_one(number: int) -> int: """ if number <= 0: - raise ValueError(f"n must be greater than 0. Got n = {number}") + msg = f"n must be greater than 0. Got n = {number}" + raise ValueError(msg) table = [number + 1] * (number + 1) diff --git a/dynamic_programming/rod_cutting.py b/dynamic_programming/rod_cutting.py index 79104d8f4..f80fa440a 100644 --- a/dynamic_programming/rod_cutting.py +++ b/dynamic_programming/rod_cutting.py @@ -177,13 +177,15 @@ def _enforce_args(n: int, prices: list): the rod """ if n < 0: - raise ValueError(f"n must be greater than or equal to 0. Got n = {n}") + msg = f"n must be greater than or equal to 0. Got n = {n}" + raise ValueError(msg) if n > len(prices): - raise ValueError( - "Each integral piece of rod must have a corresponding " - f"price. Got n = {n} but length of prices = {len(prices)}" + msg = ( + "Each integral piece of rod must have a corresponding price. " + f"Got n = {n} but length of prices = {len(prices)}" ) + raise ValueError(msg) def main(): diff --git a/dynamic_programming/viterbi.py b/dynamic_programming/viterbi.py index 93ab845e2..764d45dc2 100644 --- a/dynamic_programming/viterbi.py +++ b/dynamic_programming/viterbi.py @@ -297,11 +297,13 @@ def _validate_list(_object: Any, var_name: str) -> None: """ if not isinstance(_object, list): - raise ValueError(f"{var_name} must be a list") + msg = f"{var_name} must be a list" + raise ValueError(msg) else: for x in _object: if not isinstance(x, str): - raise ValueError(f"{var_name} must be a list of strings") + msg = f"{var_name} must be a list of strings" + raise ValueError(msg) def _validate_dicts( @@ -384,14 +386,15 @@ def _validate_dict( ValueError: mock_name nested dictionary all values must be float """ if not isinstance(_object, dict): - raise ValueError(f"{var_name} must be a dict") + msg = f"{var_name} must be a dict" + raise ValueError(msg) if not all(isinstance(x, str) for x in _object): - raise ValueError(f"{var_name} all keys must be strings") + msg = f"{var_name} all keys must be strings" + raise ValueError(msg) if not all(isinstance(x, value_type) for x in _object.values()): nested_text = "nested dictionary " if nested else "" - raise ValueError( - f"{var_name} {nested_text}all values must be {value_type.__name__}" - ) + msg = f"{var_name} {nested_text}all values must be {value_type.__name__}" + raise ValueError(msg) if __name__ == "__main__": diff --git a/electronics/resistor_equivalence.py b/electronics/resistor_equivalence.py index 7142f838a..55e7f2d6b 100644 --- a/electronics/resistor_equivalence.py +++ b/electronics/resistor_equivalence.py @@ -23,7 +23,8 @@ def resistor_parallel(resistors: list[float]) -> float: index = 0 for resistor in resistors: if resistor <= 0: - raise ValueError(f"Resistor at index {index} has a negative or zero value!") + msg = f"Resistor at index {index} has a negative or zero value!" + raise ValueError(msg) first_sum += 1 / float(resistor) index += 1 return 1 / first_sum @@ -47,7 +48,8 @@ def resistor_series(resistors: list[float]) -> float: for resistor in resistors: sum_r += resistor if resistor < 0: - raise ValueError(f"Resistor at index {index} has a negative value!") + msg = f"Resistor at index {index} has a negative value!" + raise ValueError(msg) index += 1 return sum_r diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 388e7219f..089c5c99a 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -96,13 +96,13 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: - raise ValueError(f"{N_POPULATION} must be bigger than {N_SELECTED}") + msg = f"{N_POPULATION} must be bigger than {N_SELECTED}" + raise ValueError(msg) # Verify that the target contains no genes besides the ones inside genes variable. not_in_genes_list = sorted({c for c in target if c not in genes}) if not_in_genes_list: - raise ValueError( - f"{not_in_genes_list} is not in genes list, evolution cannot converge" - ) + msg = f"{not_in_genes_list} is not in genes list, evolution cannot converge" + raise ValueError(msg) # Generate random starting population. population = [] diff --git a/graphics/vector3_for_2d_rendering.py b/graphics/vector3_for_2d_rendering.py index dfa22262a..a332206e6 100644 --- a/graphics/vector3_for_2d_rendering.py +++ b/graphics/vector3_for_2d_rendering.py @@ -28,9 +28,8 @@ def convert_to_2d( TypeError: Input values must either be float or int: ['1', 2, 3, 10, 10] """ if not all(isinstance(val, (float, int)) for val in locals().values()): - raise TypeError( - "Input values must either be float or int: " f"{list(locals().values())}" - ) + msg = f"Input values must either be float or int: {list(locals().values())}" + raise TypeError(msg) projected_x = ((x * distance) / (z + distance)) * scale projected_y = ((y * distance) / (z + distance)) * scale return projected_x, projected_y @@ -71,10 +70,11 @@ def rotate( input_variables = locals() del input_variables["axis"] if not all(isinstance(val, (float, int)) for val in input_variables.values()): - raise TypeError( + msg = ( "Input values except axis must either be float or int: " f"{list(input_variables.values())}" ) + raise TypeError(msg) angle = (angle % 360) / 450 * 180 / math.pi if axis == "z": new_x = x * math.cos(angle) - y * math.sin(angle) diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index cb21076f9..d489b110b 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -73,9 +73,10 @@ class Graph: target_vertex_parent = self.parent.get(target_vertex) if target_vertex_parent is None: - raise ValueError( + msg = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) + raise ValueError(msg) return self.shortest_path(target_vertex_parent) + f"->{target_vertex}" diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 3a5f4443a..750f4de5e 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -31,16 +31,18 @@ def schur_complement( shape_c = np.shape(mat_c) if shape_a[0] != shape_b[0]: - raise ValueError( - f"Expected the same number of rows for A and B. \ - Instead found A of size {shape_a} and B of size {shape_b}" + msg = ( + "Expected the same number of rows for A and B. " + f"Instead found A of size {shape_a} and B of size {shape_b}" ) + raise ValueError(msg) if shape_b[1] != shape_c[1]: - raise ValueError( - f"Expected the same number of columns for B and C. \ - Instead found B of size {shape_b} and C of size {shape_c}" + msg = ( + "Expected the same number of columns for B and C. " + f"Instead found B of size {shape_b} and C of size {shape_c}" ) + raise ValueError(msg) a_inv = pseudo_inv if a_inv is None: diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 72979181f..7a23ec463 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -97,26 +97,29 @@ def similarity_search( """ if dataset.ndim != value_array.ndim: - raise ValueError( - f"Wrong input data's dimensions... dataset : {dataset.ndim}, " - f"value_array : {value_array.ndim}" + msg = ( + "Wrong input data's dimensions... " + f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) + raise ValueError(msg) try: if dataset.shape[1] != value_array.shape[1]: - raise ValueError( - f"Wrong input data's shape... dataset : {dataset.shape[1]}, " - f"value_array : {value_array.shape[1]}" + msg = ( + "Wrong input data's shape... " + f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) + raise ValueError(msg) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape") if dataset.dtype != value_array.dtype: - raise TypeError( - f"Input data have different datatype... dataset : {dataset.dtype}, " - f"value_array : {value_array.dtype}" + msg = ( + "Input data have different datatype... " + f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) + raise TypeError(msg) answer = [] diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index df854cc85..24046115e 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -74,7 +74,8 @@ class SVC: # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: - raise ValueError(f"Unknown kernel: {kernel}") + msg = f"Unknown kernel: {kernel}" + raise ValueError(msg) # kernels def __linear(self, vector1: ndarray, vector2: ndarray) -> float: diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py index 59fdec48e..f9f6dfeb9 100644 --- a/maths/3n_plus_1.py +++ b/maths/3n_plus_1.py @@ -9,9 +9,11 @@ def n31(a: int) -> tuple[list[int], int]: """ if not isinstance(a, int): - raise TypeError(f"Must be int, not {type(a).__name__}") + msg = f"Must be int, not {type(a).__name__}" + raise TypeError(msg) if a < 1: - raise ValueError(f"Given integer must be positive, not {a}") + msg = f"Given integer must be positive, not {a}" + raise ValueError(msg) path = [a] while a != 1: diff --git a/maths/automorphic_number.py b/maths/automorphic_number.py index 103fc7301..8ed937563 100644 --- a/maths/automorphic_number.py +++ b/maths/automorphic_number.py @@ -40,7 +40,8 @@ def is_automorphic_number(number: int) -> bool: TypeError: Input value of [number=5.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 0: return False number_square = number * number diff --git a/maths/catalan_number.py b/maths/catalan_number.py index 85607dc1e..20c2cfb17 100644 --- a/maths/catalan_number.py +++ b/maths/catalan_number.py @@ -31,10 +31,12 @@ def catalan(number: int) -> int: """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be > 0") + msg = f"Input value of [number={number}] must be > 0" + raise ValueError(msg) current_number = 1 diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py index 9aa75830c..f98997c8b 100644 --- a/maths/dual_number_automatic_differentiation.py +++ b/maths/dual_number_automatic_differentiation.py @@ -71,7 +71,7 @@ class Dual: for i in self.duals: new_duals.append(i / other) return Dual(self.real / other, new_duals) - raise ValueError() + raise ValueError def __floordiv__(self, other): if not isinstance(other, Dual): @@ -79,7 +79,7 @@ class Dual: for i in self.duals: new_duals.append(i // other) return Dual(self.real // other, new_duals) - raise ValueError() + raise ValueError def __pow__(self, n): if n < 0 or isinstance(n, float): diff --git a/maths/hexagonal_number.py b/maths/hexagonal_number.py index 28735c638..3677ab95e 100644 --- a/maths/hexagonal_number.py +++ b/maths/hexagonal_number.py @@ -36,7 +36,8 @@ def hexagonal(number: int) -> int: TypeError: Input value of [number=11.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: raise ValueError("Input must be a positive integer") return number * (2 * number - 1) diff --git a/maths/juggler_sequence.py b/maths/juggler_sequence.py index 9daba8bc0..7f65d1dff 100644 --- a/maths/juggler_sequence.py +++ b/maths/juggler_sequence.py @@ -40,9 +40,11 @@ def juggler_sequence(number: int) -> list[int]: ValueError: Input value of [number=-1] must be a positive integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be a positive integer") + msg = f"Input value of [number={number}] must be a positive integer" + raise ValueError(msg) sequence = [number] while number != 1: if number % 2 == 0: diff --git a/maths/liouville_lambda.py b/maths/liouville_lambda.py index 5993efa42..1ed228dd5 100644 --- a/maths/liouville_lambda.py +++ b/maths/liouville_lambda.py @@ -33,7 +33,8 @@ def liouville_lambda(number: int) -> int: TypeError: Input value of [number=11.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: raise ValueError("Input must be a positive integer") return -1 if len(prime_factors(number)) % 2 else 1 diff --git a/maths/manhattan_distance.py b/maths/manhattan_distance.py index 2711d4c8c..413991468 100644 --- a/maths/manhattan_distance.py +++ b/maths/manhattan_distance.py @@ -15,15 +15,15 @@ def manhattan_distance(point_a: list, point_b: list) -> float: 9.0 >>> manhattan_distance([1,1], None) Traceback (most recent call last): - ... + ... ValueError: Missing an input >>> manhattan_distance([1,1], [2, 2, 2]) Traceback (most recent call last): - ... + ... ValueError: Both points must be in the same n-dimensional space >>> manhattan_distance([1,"one"], [2, 2, 2]) Traceback (most recent call last): - ... + ... TypeError: Expected a list of numbers as input, found str >>> manhattan_distance(1, [2, 2, 2]) Traceback (most recent call last): @@ -66,14 +66,14 @@ def _validate_point(point: list[float]) -> None: if isinstance(point, list): for item in point: if not isinstance(item, (int, float)): - raise TypeError( - f"Expected a list of numbers as input, " - f"found {type(item).__name__}" + msg = ( + "Expected a list of numbers as input, found " + f"{type(item).__name__}" ) + raise TypeError(msg) else: - raise TypeError( - f"Expected a list of numbers as input, found {type(point).__name__}" - ) + msg = f"Expected a list of numbers as input, found {type(point).__name__}" + raise TypeError(msg) else: raise ValueError("Missing an input") diff --git a/maths/pronic_number.py b/maths/pronic_number.py index 8b554dbbd..cf4d3d2eb 100644 --- a/maths/pronic_number.py +++ b/maths/pronic_number.py @@ -41,7 +41,8 @@ def is_pronic(number: int) -> bool: TypeError: Input value of [number=6.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 0 or number % 2 == 1: return False number_sqrt = int(number**0.5) diff --git a/maths/proth_number.py b/maths/proth_number.py index ce911473a..47747ed26 100644 --- a/maths/proth_number.py +++ b/maths/proth_number.py @@ -29,10 +29,12 @@ def proth(number: int) -> int: """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be > 0") + msg = f"Input value of [number={number}] must be > 0" + raise ValueError(msg) elif number == 1: return 3 elif number == 2: diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index af98f24f9..2c5cdc004 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -167,7 +167,7 @@ class FFT: f"{coef}*x^{i}" for coef, i in enumerate(self.product) ) - return "\n".join((a, b, c)) + return f"{a}\n{b}\n{c}" # Unit tests diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py index 3cd6ce0b4..a0520aa5c 100644 --- a/maths/sieve_of_eratosthenes.py +++ b/maths/sieve_of_eratosthenes.py @@ -34,7 +34,8 @@ def prime_sieve(num: int) -> list[int]: """ if num <= 0: - raise ValueError(f"{num}: Invalid input, please enter a positive integer.") + msg = f"{num}: Invalid input, please enter a positive integer." + raise ValueError(msg) sieve = [True] * (num + 1) prime = [] diff --git a/maths/sylvester_sequence.py b/maths/sylvester_sequence.py index 114c9dd58..607424c6a 100644 --- a/maths/sylvester_sequence.py +++ b/maths/sylvester_sequence.py @@ -31,7 +31,8 @@ def sylvester(number: int) -> int: if number == 1: return 2 elif number < 1: - raise ValueError(f"The input value of [n={number}] has to be > 0") + msg = f"The input value of [n={number}] has to be > 0" + raise ValueError(msg) else: num = sylvester(number - 1) lower = num - 1 diff --git a/maths/twin_prime.py b/maths/twin_prime.py index e6ac0cc78..912b10b36 100644 --- a/maths/twin_prime.py +++ b/maths/twin_prime.py @@ -32,7 +32,8 @@ def twin_prime(number: int) -> int: TypeError: Input value of [number=6.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if is_prime(number) and is_prime(number + 2): return number + 2 else: diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py index 576094902..f189f1898 100644 --- a/matrix/matrix_operation.py +++ b/matrix/matrix_operation.py @@ -70,10 +70,11 @@ def multiply(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[ rows, cols = _verify_matrix_sizes(matrix_a, matrix_b) if cols[0] != rows[1]: - raise ValueError( - f"Cannot multiply matrix of dimensions ({rows[0]},{cols[0]}) " - f"and ({rows[1]},{cols[1]})" + msg = ( + "Cannot multiply matrix of dimensions " + f"({rows[0]},{cols[0]}) and ({rows[1]},{cols[1]})" ) + raise ValueError(msg) return [ [sum(m * n for m, n in zip(i, j)) for j in zip(*matrix_b)] for i in matrix_a ] @@ -174,10 +175,11 @@ def _verify_matrix_sizes( ) -> tuple[tuple[int, int], tuple[int, int]]: shape = _shape(matrix_a) + _shape(matrix_b) if shape[0] != shape[3] or shape[1] != shape[2]: - raise ValueError( - f"operands could not be broadcast together with shape " + msg = ( + "operands could not be broadcast together with shape " f"({shape[0], shape[1]}), ({shape[2], shape[3]})" ) + raise ValueError(msg) return (shape[0], shape[2]), (shape[1], shape[3]) diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 39eddfed8..256271e8a 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -173,7 +173,8 @@ class Matrix: result[r, c] += self[r, i] * another[i, c] return result else: - raise TypeError(f"Unsupported type given for another ({type(another)})") + msg = f"Unsupported type given for another ({type(another)})" + raise TypeError(msg) def transpose(self) -> Matrix: """ diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 2a32f0b82..94c018ece 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -198,10 +198,7 @@ class _DataSet: """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1] * 784 - if self.one_hot: - fake_label = [1] + [0] * 9 - else: - fake_label = 0 + fake_label = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)], @@ -324,10 +321,11 @@ def read_data_sets( test_labels = _extract_labels(f, one_hot=one_hot) if not 0 <= validation_size <= len(train_images): - raise ValueError( - f"Validation size should be between 0 and {len(train_images)}. " - f"Received: {validation_size}." + msg = ( + "Validation size should be between 0 and " + f"{len(train_images)}. Received: {validation_size}." ) + raise ValueError(msg) validation_images = train_images[:validation_size] validation_labels = train_labels[:validation_size] diff --git a/other/nested_brackets.py b/other/nested_brackets.py index ea48c0a5f..19c6dd53c 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -18,7 +18,7 @@ def is_balanced(s): stack = [] open_brackets = set({"(", "[", "{"}) closed_brackets = set({")", "]", "}"}) - open_to_closed = dict({"{": "}", "[": "]", "(": ")"}) + open_to_closed = {"{": "}", "[": "]", "(": ")"} for i in range(len(s)): if s[i] in open_brackets: diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index 8e04a8f30..af04f432e 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -68,7 +68,8 @@ def calculate_each_score( # weight not 0 or 1 else: - raise ValueError(f"Invalid weight of {weight:f} provided") + msg = f"Invalid weight of {weight:f} provided" + raise ValueError(msg) score_lists.append(score) diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index 9af7aef5a..74409f32c 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -119,10 +119,12 @@ class PokerHand: For example: "6S 4C KC AS TH" """ if not isinstance(hand, str): - raise TypeError(f"Hand should be of type 'str': {hand!r}") + msg = f"Hand should be of type 'str': {hand!r}" + raise TypeError(msg) # split removes duplicate whitespaces so no need of strip if len(hand.split(" ")) != 5: - raise ValueError(f"Hand should contain only 5 cards: {hand!r}") + msg = f"Hand should contain only 5 cards: {hand!r}" + raise ValueError(msg) self._hand = hand self._first_pair = 0 self._second_pair = 0 diff --git a/project_euler/problem_068/sol1.py b/project_euler/problem_068/sol1.py index 772be359f..cf814b001 100644 --- a/project_euler/problem_068/sol1.py +++ b/project_euler/problem_068/sol1.py @@ -73,7 +73,8 @@ def solution(gon_side: int = 5) -> int: if is_magic_gon(numbers): return int("".join(str(n) for n in numbers)) - raise ValueError(f"Magic {gon_side}-gon ring is impossible") + msg = f"Magic {gon_side}-gon ring is impossible" + raise ValueError(msg) def generate_gon_ring(gon_side: int, perm: list[int]) -> list[int]: diff --git a/project_euler/problem_131/sol1.py b/project_euler/problem_131/sol1.py index f5302aac8..be3ea9c81 100644 --- a/project_euler/problem_131/sol1.py +++ b/project_euler/problem_131/sol1.py @@ -26,10 +26,7 @@ def is_prime(number: int) -> bool: False """ - for divisor in range(2, isqrt(number) + 1): - if number % divisor == 0: - return False - return True + return all(number % divisor != 0 for divisor in range(2, isqrt(number) + 1)) def solution(max_prime: int = 10**6) -> int: diff --git a/pyproject.toml b/pyproject.toml index 48c3fbd40..a52619668 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,45 +17,88 @@ ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,sec skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" [tool.ruff] -ignore = [ # `ruff rule S101` for a description of that rule - "B904", # B904: Within an `except` clause, raise exceptions with `raise ... from err` - "B905", # B905: `zip()` without an explicit `strict=` parameter - "E741", # E741: Ambiguous variable name 'l' - "G004", # G004 Logging statement uses f-string - "N999", # N999: Invalid module name - "PLC1901", # PLC1901: `{}` can be simplified to `{}` as an empty string is falsey - "PLR2004", # PLR2004: Magic value used in comparison - "PLR5501", # PLR5501: Consider using `elif` instead of `else` - "PLW0120", # PLW0120: `else` clause on loop without a `break` statement - "PLW060", # PLW060: Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable - "RUF00", # RUF00: Ambiguous unicode character -- DO NOT FIX - "RUF100", # RUF100: Unused `noqa` directive - "S101", # S101: Use of `assert` detected -- DO NOT FIX - "S105", # S105: Possible hardcoded password: 'password' - "S113", # S113: Probable use of requests call without timeout - "S311", # S311: Standard pseudo-random generators are not suitable for cryptographic purposes - "UP038", # UP038: Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +ignore = [ # `ruff rule S101` for a description of that rule + "ARG001", # Unused function argument `amount` -- FIX ME? + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "DTZ001", # The use of `datetime.datetime()` without `tzinfo` argument is not allowed -- FIX ME + "DTZ005", # The use of `datetime.datetime.now()` without `tzinfo` argument is not allowed -- FIX ME + "E741", # Ambiguous variable name 'l' -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable" -- FIX ME + "G004", # Logging statement uses f-string + "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME + "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME + "N999", # Invalid module name -- FIX ME + "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME + "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLR5501", # Consider using `elif` instead of `else` -- FIX ME + "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "RUF00", # Ambiguous unicode character and other rules + "RUF100", # Unused `noqa` directive -- FIX ME + "S101", # Use of `assert` detected -- DO NOT FIX + "S105", # Possible hardcoded password: 'password' + "S113", # Probable use of requests call without timeout -- FIX ME + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SIM102", # Use a single `if` statement instead of nested `if` statements -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -select = [ # https://beta.ruff.rs/docs/rules - "A", # A: builtins - "B", # B: bugbear - "C40", # C40: comprehensions - "C90", # C90: mccabe code complexity - "E", # E: pycodestyle errors - "F", # F: pyflakes - "G", # G: logging format - "I", # I: isort - "N", # N: pep8 naming - "PL", # PL: pylint - "PIE", # PIE: pie - "PYI", # PYI: type hinting stub files - "RUF", # RUF: ruff - "S", # S: bandit - "TID", # TID: tidy imports - "UP", # UP: pyupgrade - "W", # W: pycodestyle warnings - "YTT", # YTT: year 2020 +select = [ # https://beta.ruff.rs/docs/rules + "A", # flake8-builtins + "ARG", # flake8-unused-arguments + "ASYNC", # flake8-async + "B", # flake8-bugbear + "BLE", # flake8-blind-except + "C4", # flake8-comprehensions + "C90", # McCabe cyclomatic complexity + "DTZ", # flake8-datetimez + "E", # pycodestyle + "EM", # flake8-errmsg + "EXE", # flake8-executable + "F", # Pyflakes + "FA", # flake8-future-annotations + "FLY", # flynt + "G", # flake8-logging-format + "I", # isort + "ICN", # flake8-import-conventions + "INP", # flake8-no-pep420 + "INT", # flake8-gettext + "N", # pep8-naming + "NPY", # NumPy-specific rules + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # Pylint + "PYI", # flake8-pyi + "RSE", # flake8-raise + "RUF", # Ruff-specific rules + "S", # flake8-bandit + "SIM", # flake8-simplify + "SLF", # flake8-self + "T10", # flake8-debugger + "TD", # flake8-todos + "TID", # flake8-tidy-imports + "UP", # pyupgrade + "W", # pycodestyle + "YTT", # flake8-2020 + # "ANN", # flake8-annotations # FIX ME? + # "COM", # flake8-commas + # "D", # pydocstyle -- FIX ME? + # "DJ", # flake8-django + # "ERA", # eradicate -- DO NOT FIX + # "FBT", # flake8-boolean-trap # FIX ME + # "ISC", # flake8-implicit-str-concat # FIX ME + # "PD", # pandas-vet + # "PT", # flake8-pytest-style + # "PTH", # flake8-use-pathlib # FIX ME + # "Q", # flake8-quotes + # "RET", # flake8-return # FIX ME? + # "T20", # flake8-print + # "TCH", # flake8-type-checking + # "TRY", # tryceratops ] show-source = true target-version = "py311" @@ -63,7 +106,27 @@ target-version = "py311" [tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE max-complexity = 17 # default: 10 +[tool.ruff.per-file-ignores] +"arithmetic_analysis/newton_raphson.py" = ["PGH001"] +"audio_filters/show_response.py" = ["ARG002"] +"data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] +"data_structures/binary_tree/treap.py" = ["SIM114"] +"data_structures/hashing/hash_table.py" = ["ARG002"] +"data_structures/hashing/quadratic_probing.py" = ["ARG002"] +"data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] +"data_structures/heap/max_heap.py" = ["SIM114"] +"graphs/minimum_spanning_tree_prims.py" = ["SIM114"] +"hashes/enigma_machine.py" = ["BLE001"] +"machine_learning/decision_tree.py" = ["SIM114"] +"machine_learning/linear_discriminant_analysis.py" = ["ARG005"] +"machine_learning/sequential_minimum_optimization.py" = ["SIM115"] +"matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"physics/newtons_second_law_of_motion.py" = ["BLE001"] +"project_euler/problem_099/sol1.py" = ["SIM115"] +"sorts/external_sort.py" = ["SIM115"] + [tool.ruff.pylint] # DO NOT INCREASE THESE VALUES +allow-magic-value-types = ["float", "int", "str"] max-args = 10 # default: 5 max-branches = 20 # default: 12 max-returns = 8 # default: 6 diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index b95be9ebc..24bc00cd0 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -33,7 +33,7 @@ def print_directory_md(top_dir: str = ".") -> None: if filepath != old_path: old_path = print_path(old_path, filepath) indent = (filepath.count(os.sep) + 1) if filepath else 0 - url = "/".join((filepath, filename)).replace(" ", "%20") + url = f"{filepath}/{filename}".replace(" ", "%20") filename = os.path.splitext(filename.replace("_", " ").title())[0] print(f"{md_prefix(indent)} [{filename}]({url})") diff --git a/sorts/dutch_national_flag_sort.py b/sorts/dutch_national_flag_sort.py index 79afefa73..758e3a887 100644 --- a/sorts/dutch_national_flag_sort.py +++ b/sorts/dutch_national_flag_sort.py @@ -84,9 +84,8 @@ def dutch_national_flag_sort(sequence: list) -> list: sequence[mid], sequence[high] = sequence[high], sequence[mid] high -= 1 else: - raise ValueError( - f"The elements inside the sequence must contains only {colors} values" - ) + msg = f"The elements inside the sequence must contains only {colors} values" + raise ValueError(msg) return sequence diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py index e050cd337..b4f3864e2 100644 --- a/strings/barcode_validator.py +++ b/strings/barcode_validator.py @@ -65,7 +65,8 @@ def get_barcode(barcode: str) -> int: ValueError: Barcode 'dwefgiweuf' has alphabetic characters. """ if str(barcode).isalpha(): - raise ValueError(f"Barcode '{barcode}' has alphabetic characters.") + msg = f"Barcode '{barcode}' has alphabetic characters." + raise ValueError(msg) elif int(barcode) < 0: raise ValueError("The entered barcode has a negative value. Try again.") else: diff --git a/strings/capitalize.py b/strings/capitalize.py index 63603aa07..e7e97c2be 100644 --- a/strings/capitalize.py +++ b/strings/capitalize.py @@ -17,7 +17,7 @@ def capitalize(sentence: str) -> str: """ if not sentence: return "" - lower_to_upper = {lc: uc for lc, uc in zip(ascii_lowercase, ascii_uppercase)} + lower_to_upper = dict(zip(ascii_lowercase, ascii_uppercase)) return lower_to_upper.get(sentence[0], sentence[0]) + sentence[1:] diff --git a/strings/is_spain_national_id.py b/strings/is_spain_national_id.py index 67f49755f..60d06e123 100644 --- a/strings/is_spain_national_id.py +++ b/strings/is_spain_national_id.py @@ -48,7 +48,8 @@ def is_spain_national_id(spanish_id: str) -> bool: """ if not isinstance(spanish_id, str): - raise TypeError(f"Expected string as input, found {type(spanish_id).__name__}") + msg = f"Expected string as input, found {type(spanish_id).__name__}" + raise TypeError(msg) spanish_id_clean = spanish_id.replace("-", "").upper() if len(spanish_id_clean) != 9: diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py index 28a28b517..8219337a6 100644 --- a/strings/snake_case_to_camel_pascal_case.py +++ b/strings/snake_case_to_camel_pascal_case.py @@ -27,11 +27,11 @@ def snake_to_camel_case(input_str: str, use_pascal: bool = False) -> str: """ if not isinstance(input_str, str): - raise ValueError(f"Expected string as input, found {type(input_str)}") + msg = f"Expected string as input, found {type(input_str)}" + raise ValueError(msg) if not isinstance(use_pascal, bool): - raise ValueError( - f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" - ) + msg = f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" + raise ValueError(msg) words = input_str.split("_") diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 6a31c81c3..5ca5f828c 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -26,7 +26,8 @@ def get_subreddit_data( """ wanted_data = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): - raise ValueError(f"Invalid search term: {invalid_search_terms}") + msg = f"Invalid search term: {invalid_search_terms}" + raise ValueError(msg) response = requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"User-agent": "A random string"}, diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index abac3c70b..d5d4cfe92 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -22,7 +22,8 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: """ new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes if new_olid.count("/") != 1: - raise ValueError(f"{olid} is not a valid Open Library olid") + msg = f"{olid} is not a valid Open Library olid" + raise ValueError(msg) return requests.get(f"https://openlibrary.org/{new_olid}.json").json() diff --git a/web_programming/slack_message.py b/web_programming/slack_message.py index f35aa3ca5..5e97d6b64 100644 --- a/web_programming/slack_message.py +++ b/web_programming/slack_message.py @@ -7,10 +7,11 @@ def send_slack_message(message_body: str, slack_url: str) -> None: headers = {"Content-Type": "application/json"} response = requests.post(slack_url, json={"text": message_body}, headers=headers) if response.status_code != 200: - raise ValueError( - f"Request to slack returned an error {response.status_code}, " - f"the response is:\n{response.text}" + msg = ( + "Request to slack returned an error " + f"{response.status_code}, the response is:\n{response.text}" ) + raise ValueError(msg) if __name__ == "__main__": From c93659d7ce65e3717f06333e3d049ebaa888e597 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 29 May 2023 17:37:54 -0700 Subject: [PATCH 339/368] Fix type error in `strassen_matrix_multiplication.py` (#8784) * Fix type error in strassen_matrix_multiplication.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + ...ion.py.BROKEN => strassen_matrix_multiplication.py} | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) rename divide_and_conquer/{strassen_matrix_multiplication.py.BROKEN => strassen_matrix_multiplication.py} (97%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 11ff93c91..231b0e2f1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -294,6 +294,7 @@ * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) + * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN b/divide_and_conquer/strassen_matrix_multiplication.py similarity index 97% rename from divide_and_conquer/strassen_matrix_multiplication.py.BROKEN rename to divide_and_conquer/strassen_matrix_multiplication.py index 2ca91c63b..cbfc7e565 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -112,17 +112,19 @@ def strassen(matrix1: list, matrix2: list) -> list: [[139, 163], [121, 134], [100, 121]] """ if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]: - raise Exception( - "Unable to multiply these matrices, please check the dimensions. \n" - f"Matrix A:{matrix1} \nMatrix B:{matrix2}" + msg = ( + "Unable to multiply these matrices, please check the dimensions.\n" + f"Matrix A: {matrix1}\n" + f"Matrix B: {matrix2}" ) + raise Exception(msg) dimension1 = matrix_dimensions(matrix1) dimension2 = matrix_dimensions(matrix2) if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] - maximum = max(dimension1, dimension2) + maximum = max(*dimension1, *dimension2) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 From 4a27b544303e6bab90ed57b72fa3acf3d785429e Mon Sep 17 00:00:00 2001 From: Sundaram Kumar Jha Date: Wed, 31 May 2023 06:26:59 +0530 Subject: [PATCH 340/368] Update permutations.py (#8102) --- data_structures/arrays/permutations.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py index eb3f26517..4558bd8d4 100644 --- a/data_structures/arrays/permutations.py +++ b/data_structures/arrays/permutations.py @@ -1,7 +1,6 @@ def permute(nums: list[int]) -> list[list[int]]: """ Return all permutations. - >>> from itertools import permutations >>> numbers= [1,2,3] >>> all(list(nums) in permute(numbers) for nums in permutations(numbers)) @@ -20,7 +19,32 @@ def permute(nums: list[int]) -> list[list[int]]: return result +def permute2(nums): + """ + Return all permutations of the given list. + + >>> permute2([1, 2, 3]) + [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]] + """ + + def backtrack(start): + if start == len(nums) - 1: + output.append(nums[:]) + else: + for i in range(start, len(nums)): + nums[start], nums[i] = nums[i], nums[start] + backtrack(start + 1) + nums[start], nums[i] = nums[i], nums[start] # backtrack + + output = [] + backtrack(0) + return output + + if __name__ == "__main__": import doctest + # use res to print the data in permute2 function + res = permute2([1, 2, 3]) + print(res) doctest.testmod() From e871540e37b834673f9e6650b8e2281d7d36a8c3 Mon Sep 17 00:00:00 2001 From: Rudransh Bhardwaj <115872354+rudransh61@users.noreply.github.com> Date: Wed, 31 May 2023 20:33:02 +0530 Subject: [PATCH 341/368] Added rank of matrix in linear algebra (#8687) * Added rank of matrix in linear algebra * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Corrected name of function * Corrected Rank_of_Matrix.py * Completed rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * delete to rename Rank_of_Matrix.py * created rank_of_matrix * added more doctests in rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed some issues in rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added moreeee doctestsss in rank_of_mtrix.py and fixed some bugss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update rank_of_matrix.py * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Caeden Perelli-Harris --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- linear_algebra/src/rank_of_matrix.py | 89 ++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 linear_algebra/src/rank_of_matrix.py diff --git a/linear_algebra/src/rank_of_matrix.py b/linear_algebra/src/rank_of_matrix.py new file mode 100644 index 000000000..7ff3c1699 --- /dev/null +++ b/linear_algebra/src/rank_of_matrix.py @@ -0,0 +1,89 @@ +""" +Calculate the rank of a matrix. + +See: https://en.wikipedia.org/wiki/Rank_(linear_algebra) +""" + + +def rank_of_matrix(matrix: list[list[int | float]]) -> int: + """ + Finds the rank of a matrix. + Args: + matrix: The matrix as a list of lists. + Returns: + The rank of the matrix. + Example: + >>> matrix1 = [[1, 2, 3], + ... [4, 5, 6], + ... [7, 8, 9]] + >>> rank_of_matrix(matrix1) + 2 + >>> matrix2 = [[1, 0, 0], + ... [0, 1, 0], + ... [0, 0, 0]] + >>> rank_of_matrix(matrix2) + 2 + >>> matrix3 = [[1, 2, 3, 4], + ... [5, 6, 7, 8], + ... [9, 10, 11, 12]] + >>> rank_of_matrix(matrix3) + 2 + >>> rank_of_matrix([[2,3,-1,-1], + ... [1,-1,-2,4], + ... [3,1,3,-2], + ... [6,3,0,-7]]) + 4 + >>> rank_of_matrix([[2,1,-3,-6], + ... [3,-3,1,2], + ... [1,1,1,2]]) + 3 + >>> rank_of_matrix([[2,-1,0], + ... [1,3,4], + ... [4,1,-3]]) + 3 + >>> rank_of_matrix([[3,2,1], + ... [-6,-4,-2]]) + 1 + >>> rank_of_matrix([[],[]]) + 0 + >>> rank_of_matrix([[1]]) + 1 + >>> rank_of_matrix([[]]) + 0 + """ + + rows = len(matrix) + columns = len(matrix[0]) + rank = min(rows, columns) + + for row in range(rank): + # Check if diagonal element is not zero + if matrix[row][row] != 0: + # Eliminate all the elements below the diagonal + for col in range(row + 1, rows): + multiplier = matrix[col][row] / matrix[row][row] + for i in range(row, columns): + matrix[col][i] -= multiplier * matrix[row][i] + else: + # Find a non-zero diagonal element to swap rows + reduce = True + for i in range(row + 1, rows): + if matrix[i][row] != 0: + matrix[row], matrix[i] = matrix[i], matrix[row] + reduce = False + break + if reduce: + rank -= 1 + for i in range(rows): + matrix[i][row] = matrix[i][rank] + + # Reduce the row pointer by one to stay on the same row + row -= 1 + + return rank + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4621b0bb4f5d3fff2fa4f0e53d6cb862fe002c60 Mon Sep 17 00:00:00 2001 From: nith2001 <75632283+nith2001@users.noreply.github.com> Date: Wed, 31 May 2023 13:06:12 -0700 Subject: [PATCH 342/368] Improved Graph Implementations (#8730) * Improved Graph Implementations Provides new implementation for graph_list.py and graph_matrix.py along with pytest suites for each. Fixes #8709 * Graph implementation style fixes, corrections, and refactored tests * Helpful docs about graph implementation * Refactored code to separate files and applied enumerate() * Renamed files and refactored code to fail fast * Error handling style fix * Fixed f-string code quality issue * Last f-string fix * Added return types to test functions and more style fixes * Added more function return types * Added more function return types pt2 * Fixed error messages --- graphs/graph_adjacency_list.py | 589 ++++++++++++++++++++++++++++++ graphs/graph_adjacency_matrix.py | 608 +++++++++++++++++++++++++++++++ graphs/graph_matrix.py | 24 -- graphs/tests/__init__.py | 0 4 files changed, 1197 insertions(+), 24 deletions(-) create mode 100644 graphs/graph_adjacency_list.py create mode 100644 graphs/graph_adjacency_matrix.py delete mode 100644 graphs/graph_matrix.py create mode 100644 graphs/tests/__init__.py diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py new file mode 100644 index 000000000..76f34f845 --- /dev/null +++ b/graphs/graph_adjacency_list.py @@ -0,0 +1,589 @@ +#!/usr/bin/env python3 +""" +Author: Vikram Nithyanandam + +Description: +The following implementation is a robust unweighted Graph data structure +implemented using an adjacency list. This vertices and edges of this graph can be +effectively initialized and modified while storing your chosen generic +value in each vertex. + +Adjacency List: https://en.wikipedia.org/wiki/Adjacency_list + +Potential Future Ideas: +- Add a flag to set edge weights on and set edge weights +- Make edge weights and vertex values customizable to store whatever the client wants +- Support multigraph functionality if the client wants it +""" +from __future__ import annotations + +import random +import unittest +from pprint import pformat +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class GraphAdjacencyList(Generic[T]): + def __init__( + self, vertices: list[T], edges: list[list[T]], directed: bool = True + ) -> None: + """ + Parameters: + - vertices: (list[T]) The list of vertex names the client wants to + pass in. Default is empty. + - edges: (list[list[T]]) The list of edges the client wants to + pass in. Each edge is a 2-element list. Default is empty. + - directed: (bool) Indicates if graph is directed or undirected. + Default is True. + """ + self.adj_list: dict[T, list[T]] = {} # dictionary of lists of T + self.directed = directed + + # Falsey checks + edges = edges or [] + vertices = vertices or [] + + for vertex in vertices: + self.add_vertex(vertex) + + for edge in edges: + if len(edge) != 2: + msg = f"Invalid input: {edge} is the wrong length." + raise ValueError(msg) + self.add_edge(edge[0], edge[1]) + + def add_vertex(self, vertex: T) -> None: + """ + Adds a vertex to the graph. If the given vertex already exists, + a ValueError will be thrown. + """ + if self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} is already in the graph." + raise ValueError(msg) + self.adj_list[vertex] = [] + + def add_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Creates an edge from source vertex to destination vertex. If any + given vertex doesn't exist or the edge already exists, a ValueError + will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge already exists between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # add the destination vertex to the list associated with the source vertex + # and vice versa if not directed + self.adj_list[source_vertex].append(destination_vertex) + if not self.directed: + self.adj_list[destination_vertex].append(source_vertex) + + def remove_vertex(self, vertex: T) -> None: + """ + Removes the given vertex from the graph and deletes all incoming and + outgoing edges from the given vertex as well. If the given vertex + does not exist, a ValueError will be thrown. + """ + if not self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} does not exist in this graph." + raise ValueError(msg) + + if not self.directed: + # If not directed, find all neighboring vertices and delete all references + # of edges connecting to the given vertex + for neighbor in self.adj_list[vertex]: + self.adj_list[neighbor].remove(vertex) + else: + # If directed, search all neighbors of all vertices and delete all + # references of edges connecting to the given vertex + for edge_list in self.adj_list.values(): + if vertex in edge_list: + edge_list.remove(vertex) + + # Finally, delete the given vertex and all of its outgoing edge references + self.adj_list.pop(vertex) + + def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Removes the edge between the two vertices. If any given vertex + doesn't exist or the edge does not exist, a ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if not self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge does NOT exist between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # remove the destination vertex from the list associated with the source + # vertex and vice versa if not directed + self.adj_list[source_vertex].remove(destination_vertex) + if not self.directed: + self.adj_list[destination_vertex].remove(source_vertex) + + def contains_vertex(self, vertex: T) -> bool: + """ + Returns True if the graph contains the vertex, False otherwise. + """ + return vertex in self.adj_list + + def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: + """ + Returns True if the graph contains the edge from the source_vertex to the + destination_vertex, False otherwise. If any given vertex doesn't exist, a + ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} " + f"or {destination_vertex} does not exist." + ) + raise ValueError(msg) + + return destination_vertex in self.adj_list[source_vertex] + + def clear_graph(self) -> None: + """ + Clears all vertices and edges. + """ + self.adj_list = {} + + def __repr__(self) -> str: + return pformat(self.adj_list) + + +class TestGraphAdjacencyList(unittest.TestCase): + def __assert_graph_edge_exists_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + edge: list[int], + ) -> None: + self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_edge_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + edge: list[int], + ) -> None: + self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_vertex_exists_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + vertex: int, + ) -> None: + self.assertTrue(undirected_graph.contains_vertex(vertex)) + self.assertTrue(directed_graph.contains_vertex(vertex)) + + def __assert_graph_vertex_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + vertex: int, + ) -> None: + self.assertFalse(undirected_graph.contains_vertex(vertex)) + self.assertFalse(directed_graph.contains_vertex(vertex)) + + def __generate_random_edges( + self, vertices: list[int], edge_pick_count: int + ) -> list[list[int]]: + self.assertTrue(edge_pick_count <= len(vertices)) + + random_source_vertices: list[int] = random.sample( + vertices[0 : int(len(vertices) / 2)], edge_pick_count + ) + random_destination_vertices: list[int] = random.sample( + vertices[int(len(vertices) / 2) :], edge_pick_count + ) + random_edges: list[list[int]] = [] + + for source in random_source_vertices: + for dest in random_destination_vertices: + random_edges.append([source, dest]) + + return random_edges + + def __generate_graphs( + self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int + ) -> tuple[GraphAdjacencyList, GraphAdjacencyList, list[int], list[list[int]]]: + if max_val - min_val + 1 < vertex_count: + raise ValueError( + "Will result in duplicate vertices. Either increase range " + "between min_val and max_val or decrease vertex count." + ) + + # generate graph input + random_vertices: list[int] = random.sample( + range(min_val, max_val + 1), vertex_count + ) + random_edges: list[list[int]] = self.__generate_random_edges( + random_vertices, edge_pick_count + ) + + # build graphs + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=random_edges, directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=random_edges, directed=True + ) + + return undirected_graph, directed_graph, random_vertices, random_edges + + def test_init_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # test graph initialization with vertices and edges + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + self.assertFalse(undirected_graph.directed) + self.assertTrue(directed_graph.directed) + + def test_contains_vertex(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # Build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # Test contains_vertex + for num in range(101): + self.assertEqual( + num in random_vertices, undirected_graph.contains_vertex(num) + ) + self.assertEqual( + num in random_vertices, directed_graph.contains_vertex(num) + ) + + def test_add_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build empty graphs + undirected_graph: GraphAdjacencyList = GraphAdjacencyList( + vertices=[], edges=[], directed=False + ) + directed_graph: GraphAdjacencyList = GraphAdjacencyList( + vertices=[], edges=[], directed=True + ) + + # run add_vertex + for num in random_vertices: + undirected_graph.add_vertex(num) + + for num in random_vertices: + directed_graph.add_vertex(num) + + # test add_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + def test_remove_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # test remove_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + undirected_graph.remove_vertex(num) + directed_graph.remove_vertex(num) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, num + ) + + def test_add_and_remove_vertices_repeatedly(self) -> None: + random_vertices1: list[int] = random.sample(range(51), 20) + random_vertices2: list[int] = random.sample(range(51, 101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices1, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices1, edges=[], directed=True + ) + + # test adding and removing vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.add_vertex(random_vertices2[i]) + directed_graph.add_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + undirected_graph.remove_vertex(random_vertices1[i]) + directed_graph.remove_vertex(random_vertices1[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices1[i] + ) + + # remove all vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.remove_vertex(random_vertices2[i]) + directed_graph.remove_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + def test_contains_edge(self) -> None: + # generate graphs and graph input + vertex_count = 20 + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(vertex_count, 0, 100, 4) + + # generate all possible edges for testing + all_possible_edges: list[list[int]] = [] + for i in range(vertex_count - 1): + for j in range(i + 1, vertex_count): + all_possible_edges.append([random_vertices[i], random_vertices[j]]) + all_possible_edges.append([random_vertices[j], random_vertices[i]]) + + # test contains_edge function + for edge in all_possible_edges: + if edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + elif [edge[1], edge[0]] in random_edges: + # since this edge exists for undirected but the reverse + # may not exist for directed + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, [edge[1], edge[0]] + ) + else: + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_edge(self) -> None: + # generate graph input + random_vertices: list[int] = random.sample(range(101), 15) + random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # run and test add_edge + for edge in random_edges: + undirected_graph.add_edge(edge[0], edge[1]) + directed_graph.add_edge(edge[0], edge[1]) + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + def test_remove_edge(self) -> None: + # generate graph input and graphs + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # run and test remove_edge + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + undirected_graph.remove_edge(edge[0], edge[1]) + directed_graph.remove_edge(edge[0], edge[1]) + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_and_remove_edges_repeatedly(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # make some more edge options! + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for i, _ in enumerate(random_edges): + undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, more_random_edges[i] + ) + + undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, random_edges[i] + ) + + def test_add_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.add_vertex(vertex) + with self.assertRaises(ValueError): + directed_graph.add_vertex(vertex) + + def test_remove_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for i in range(101): + if i not in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.remove_vertex(i) + with self.assertRaises(ValueError): + directed_graph.remove_vertex(i) + + def test_add_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for edge in random_edges: + with self.assertRaises(ValueError): + undirected_graph.add_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.add_edge(edge[0], edge[1]) + + def test_remove_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for edge in more_random_edges: + with self.assertRaises(ValueError): + undirected_graph.remove_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.remove_edge(edge[0], edge[1]) + + def test_contains_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.contains_edge(vertex, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(vertex, 102) + + with self.assertRaises(ValueError): + undirected_graph.contains_edge(103, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(103, 102) + + +if __name__ == "__main__": + unittest.main() diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py new file mode 100644 index 000000000..4d2e02f73 --- /dev/null +++ b/graphs/graph_adjacency_matrix.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python3 +""" +Author: Vikram Nithyanandam + +Description: +The following implementation is a robust unweighted Graph data structure +implemented using an adjacency matrix. This vertices and edges of this graph can be +effectively initialized and modified while storing your chosen generic +value in each vertex. + +Adjacency Matrix: https://mathworld.wolfram.com/AdjacencyMatrix.html + +Potential Future Ideas: +- Add a flag to set edge weights on and set edge weights +- Make edge weights and vertex values customizable to store whatever the client wants +- Support multigraph functionality if the client wants it +""" +from __future__ import annotations + +import random +import unittest +from pprint import pformat +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class GraphAdjacencyMatrix(Generic[T]): + def __init__( + self, vertices: list[T], edges: list[list[T]], directed: bool = True + ) -> None: + """ + Parameters: + - vertices: (list[T]) The list of vertex names the client wants to + pass in. Default is empty. + - edges: (list[list[T]]) The list of edges the client wants to + pass in. Each edge is a 2-element list. Default is empty. + - directed: (bool) Indicates if graph is directed or undirected. + Default is True. + """ + self.directed = directed + self.vertex_to_index: dict[T, int] = {} + self.adj_matrix: list[list[int]] = [] + + # Falsey checks + edges = edges or [] + vertices = vertices or [] + + for vertex in vertices: + self.add_vertex(vertex) + + for edge in edges: + if len(edge) != 2: + msg = f"Invalid input: {edge} must have length 2." + raise ValueError(msg) + self.add_edge(edge[0], edge[1]) + + def add_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Creates an edge from source vertex to destination vertex. If any + given vertex doesn't exist or the edge already exists, a ValueError + will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge already exists between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # Get the indices of the corresponding vertices and set their edge value to 1. + u: int = self.vertex_to_index[source_vertex] + v: int = self.vertex_to_index[destination_vertex] + self.adj_matrix[u][v] = 1 + if not self.directed: + self.adj_matrix[v][u] = 1 + + def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Removes the edge between the two vertices. If any given vertex + doesn't exist or the edge does not exist, a ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if not self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge does NOT exist between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # Get the indices of the corresponding vertices and set their edge value to 0. + u: int = self.vertex_to_index[source_vertex] + v: int = self.vertex_to_index[destination_vertex] + self.adj_matrix[u][v] = 0 + if not self.directed: + self.adj_matrix[v][u] = 0 + + def add_vertex(self, vertex: T) -> None: + """ + Adds a vertex to the graph. If the given vertex already exists, + a ValueError will be thrown. + """ + if self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} already exists in this graph." + raise ValueError(msg) + + # build column for vertex + for row in self.adj_matrix: + row.append(0) + + # build row for vertex and update other data structures + self.adj_matrix.append([0] * (len(self.adj_matrix) + 1)) + self.vertex_to_index[vertex] = len(self.adj_matrix) - 1 + + def remove_vertex(self, vertex: T) -> None: + """ + Removes the given vertex from the graph and deletes all incoming and + outgoing edges from the given vertex as well. If the given vertex + does not exist, a ValueError will be thrown. + """ + if not self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} does not exist in this graph." + raise ValueError(msg) + + # first slide up the rows by deleting the row corresponding to + # the vertex being deleted. + start_index = self.vertex_to_index[vertex] + self.adj_matrix.pop(start_index) + + # next, slide the columns to the left by deleting the values in + # the column corresponding to the vertex being deleted + for lst in self.adj_matrix: + lst.pop(start_index) + + # final clean up + self.vertex_to_index.pop(vertex) + + # decrement indices for vertices shifted by the deleted vertex in the adj matrix + for vertex in self.vertex_to_index: + if self.vertex_to_index[vertex] >= start_index: + self.vertex_to_index[vertex] = self.vertex_to_index[vertex] - 1 + + def contains_vertex(self, vertex: T) -> bool: + """ + Returns True if the graph contains the vertex, False otherwise. + """ + return vertex in self.vertex_to_index + + def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: + """ + Returns True if the graph contains the edge from the source_vertex to the + destination_vertex, False otherwise. If any given vertex doesn't exist, a + ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} " + f"or {destination_vertex} does not exist." + ) + raise ValueError(msg) + + u = self.vertex_to_index[source_vertex] + v = self.vertex_to_index[destination_vertex] + return self.adj_matrix[u][v] == 1 + + def clear_graph(self) -> None: + """ + Clears all vertices and edges. + """ + self.vertex_to_index = {} + self.adj_matrix = [] + + def __repr__(self) -> str: + first = "Adj Matrix:\n" + pformat(self.adj_matrix) + second = "\nVertex to index mapping:\n" + pformat(self.vertex_to_index) + return first + second + + +class TestGraphMatrix(unittest.TestCase): + def __assert_graph_edge_exists_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + edge: list[int], + ) -> None: + self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_edge_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + edge: list[int], + ) -> None: + self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_vertex_exists_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + vertex: int, + ) -> None: + self.assertTrue(undirected_graph.contains_vertex(vertex)) + self.assertTrue(directed_graph.contains_vertex(vertex)) + + def __assert_graph_vertex_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + vertex: int, + ) -> None: + self.assertFalse(undirected_graph.contains_vertex(vertex)) + self.assertFalse(directed_graph.contains_vertex(vertex)) + + def __generate_random_edges( + self, vertices: list[int], edge_pick_count: int + ) -> list[list[int]]: + self.assertTrue(edge_pick_count <= len(vertices)) + + random_source_vertices: list[int] = random.sample( + vertices[0 : int(len(vertices) / 2)], edge_pick_count + ) + random_destination_vertices: list[int] = random.sample( + vertices[int(len(vertices) / 2) :], edge_pick_count + ) + random_edges: list[list[int]] = [] + + for source in random_source_vertices: + for dest in random_destination_vertices: + random_edges.append([source, dest]) + + return random_edges + + def __generate_graphs( + self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int + ) -> tuple[GraphAdjacencyMatrix, GraphAdjacencyMatrix, list[int], list[list[int]]]: + if max_val - min_val + 1 < vertex_count: + raise ValueError( + "Will result in duplicate vertices. Either increase " + "range between min_val and max_val or decrease vertex count" + ) + + # generate graph input + random_vertices: list[int] = random.sample( + range(min_val, max_val + 1), vertex_count + ) + random_edges: list[list[int]] = self.__generate_random_edges( + random_vertices, edge_pick_count + ) + + # build graphs + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=random_edges, directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=random_edges, directed=True + ) + + return undirected_graph, directed_graph, random_vertices, random_edges + + def test_init_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # test graph initialization with vertices and edges + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + self.assertFalse(undirected_graph.directed) + self.assertTrue(directed_graph.directed) + + def test_contains_vertex(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # Build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # Test contains_vertex + for num in range(101): + self.assertEqual( + num in random_vertices, undirected_graph.contains_vertex(num) + ) + self.assertEqual( + num in random_vertices, directed_graph.contains_vertex(num) + ) + + def test_add_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build empty graphs + undirected_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( + vertices=[], edges=[], directed=False + ) + directed_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( + vertices=[], edges=[], directed=True + ) + + # run add_vertex + for num in random_vertices: + undirected_graph.add_vertex(num) + + for num in random_vertices: + directed_graph.add_vertex(num) + + # test add_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + def test_remove_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # test remove_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + undirected_graph.remove_vertex(num) + directed_graph.remove_vertex(num) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, num + ) + + def test_add_and_remove_vertices_repeatedly(self) -> None: + random_vertices1: list[int] = random.sample(range(51), 20) + random_vertices2: list[int] = random.sample(range(51, 101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices1, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices1, edges=[], directed=True + ) + + # test adding and removing vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.add_vertex(random_vertices2[i]) + directed_graph.add_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + undirected_graph.remove_vertex(random_vertices1[i]) + directed_graph.remove_vertex(random_vertices1[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices1[i] + ) + + # remove all vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.remove_vertex(random_vertices2[i]) + directed_graph.remove_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + def test_contains_edge(self) -> None: + # generate graphs and graph input + vertex_count = 20 + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(vertex_count, 0, 100, 4) + + # generate all possible edges for testing + all_possible_edges: list[list[int]] = [] + for i in range(vertex_count - 1): + for j in range(i + 1, vertex_count): + all_possible_edges.append([random_vertices[i], random_vertices[j]]) + all_possible_edges.append([random_vertices[j], random_vertices[i]]) + + # test contains_edge function + for edge in all_possible_edges: + if edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + elif [edge[1], edge[0]] in random_edges: + # since this edge exists for undirected but the reverse may + # not exist for directed + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, [edge[1], edge[0]] + ) + else: + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_edge(self) -> None: + # generate graph input + random_vertices: list[int] = random.sample(range(101), 15) + random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # run and test add_edge + for edge in random_edges: + undirected_graph.add_edge(edge[0], edge[1]) + directed_graph.add_edge(edge[0], edge[1]) + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + def test_remove_edge(self) -> None: + # generate graph input and graphs + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # run and test remove_edge + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + undirected_graph.remove_edge(edge[0], edge[1]) + directed_graph.remove_edge(edge[0], edge[1]) + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_and_remove_edges_repeatedly(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # make some more edge options! + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for i, _ in enumerate(random_edges): + undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, more_random_edges[i] + ) + + undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, random_edges[i] + ) + + def test_add_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.add_vertex(vertex) + with self.assertRaises(ValueError): + directed_graph.add_vertex(vertex) + + def test_remove_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for i in range(101): + if i not in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.remove_vertex(i) + with self.assertRaises(ValueError): + directed_graph.remove_vertex(i) + + def test_add_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for edge in random_edges: + with self.assertRaises(ValueError): + undirected_graph.add_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.add_edge(edge[0], edge[1]) + + def test_remove_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for edge in more_random_edges: + with self.assertRaises(ValueError): + undirected_graph.remove_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.remove_edge(edge[0], edge[1]) + + def test_contains_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.contains_edge(vertex, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(vertex, 102) + + with self.assertRaises(ValueError): + undirected_graph.contains_edge(103, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(103, 102) + + +if __name__ == "__main__": + unittest.main() diff --git a/graphs/graph_matrix.py b/graphs/graph_matrix.py deleted file mode 100644 index 4adc6c0bb..000000000 --- a/graphs/graph_matrix.py +++ /dev/null @@ -1,24 +0,0 @@ -class Graph: - def __init__(self, vertex): - self.vertex = vertex - self.graph = [[0] * vertex for i in range(vertex)] - - def add_edge(self, u, v): - self.graph[u - 1][v - 1] = 1 - self.graph[v - 1][u - 1] = 1 - - def show(self): - for i in self.graph: - for j in i: - print(j, end=" ") - print(" ") - - -g = Graph(100) - -g.add_edge(1, 4) -g.add_edge(4, 2) -g.add_edge(4, 5) -g.add_edge(2, 5) -g.add_edge(5, 3) -g.show() diff --git a/graphs/tests/__init__.py b/graphs/tests/__init__.py new file mode 100644 index 000000000..e69de29bb From 3a9e5fa5ecea0df54ed3ffdcb74f46171199f552 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Fri, 2 Jun 2023 17:14:25 +1200 Subject: [PATCH 343/368] Create a Simultaneous Equation Solver Algorithm (#8773) * Added simultaneous_linear_equation_solver.py * Removed Augment class, replaced with recursive functions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed edge cases * Update settings.json --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .vscode/settings.json | 5 + maths/simultaneous_linear_equation_solver.py | 142 +++++++++++++++++++ 2 files changed, 147 insertions(+) create mode 100644 .vscode/settings.json create mode 100644 maths/simultaneous_linear_equation_solver.py diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..ef16fa1aa --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "githubPullRequests.ignoredPullRequestBranches": [ + "master" + ] +} diff --git a/maths/simultaneous_linear_equation_solver.py b/maths/simultaneous_linear_equation_solver.py new file mode 100644 index 000000000..1287b2002 --- /dev/null +++ b/maths/simultaneous_linear_equation_solver.py @@ -0,0 +1,142 @@ +""" +https://en.wikipedia.org/wiki/Augmented_matrix + +This algorithm solves simultaneous linear equations of the form +λa + λb + λc + λd + ... = γ as [λ, λ, λ, λ, ..., γ] +Where λ & γ are individual coefficients, the no. of equations = no. of coefficients - 1 + +Note in order to work there must exist 1 equation where all instances of λ and γ != 0 +""" + + +def simplify(current_set: list[list]) -> list[list]: + """ + >>> simplify([[1, 2, 3], [4, 5, 6]]) + [[1.0, 2.0, 3.0], [0.0, 0.75, 1.5]] + >>> simplify([[5, 2, 5], [5, 1, 10]]) + [[1.0, 0.4, 1.0], [0.0, 0.2, -1.0]] + """ + # Divide each row by magnitude of first term --> creates 'unit' matrix + duplicate_set = current_set.copy() + for row_index, row in enumerate(duplicate_set): + magnitude = row[0] + for column_index, column in enumerate(row): + if magnitude == 0: + current_set[row_index][column_index] = column + continue + current_set[row_index][column_index] = column / magnitude + # Subtract to cancel term + first_row = current_set[0] + final_set = [first_row] + current_set = current_set[1::] + for row in current_set: + temp_row = [] + # If first term is 0, it is already in form we want, so we preserve it + if row[0] == 0: + final_set.append(row) + continue + for column_index in range(len(row)): + temp_row.append(first_row[column_index] - row[column_index]) + final_set.append(temp_row) + # Create next recursion iteration set + if len(final_set[0]) != 3: + current_first_row = final_set[0] + current_first_column = [] + next_iteration = [] + for row in final_set[1::]: + current_first_column.append(row[0]) + next_iteration.append(row[1::]) + resultant = simplify(next_iteration) + for i in range(len(resultant)): + resultant[i].insert(0, current_first_column[i]) + resultant.insert(0, current_first_row) + final_set = resultant + return final_set + + +def solve_simultaneous(equations: list[list]) -> list: + """ + >>> solve_simultaneous([[1, 2, 3],[4, 5, 6]]) + [-1.0, 2.0] + >>> solve_simultaneous([[0, -3, 1, 7],[3, 2, -1, 11],[5, 1, -2, 12]]) + [6.4, 1.2, 10.6] + >>> solve_simultaneous([]) + Traceback (most recent call last): + ... + IndexError: solve_simultaneous() requires n lists of length n+1 + >>> solve_simultaneous([[1, 2, 3],[1, 2]]) + Traceback (most recent call last): + ... + IndexError: solve_simultaneous() requires n lists of length n+1 + >>> solve_simultaneous([[1, 2, 3],["a", 7, 8]]) + Traceback (most recent call last): + ... + ValueError: solve_simultaneous() requires lists of integers + >>> solve_simultaneous([[0, 2, 3],[4, 0, 6]]) + Traceback (most recent call last): + ... + ValueError: solve_simultaneous() requires at least 1 full equation + """ + if len(equations) == 0: + raise IndexError("solve_simultaneous() requires n lists of length n+1") + _length = len(equations) + 1 + if any(len(item) != _length for item in equations): + raise IndexError("solve_simultaneous() requires n lists of length n+1") + for row in equations: + if any(not isinstance(column, (int, float)) for column in row): + raise ValueError("solve_simultaneous() requires lists of integers") + if len(equations) == 1: + return [equations[0][-1] / equations[0][0]] + data_set = equations.copy() + if any(0 in row for row in data_set): + temp_data = data_set.copy() + full_row = [] + for row_index, row in enumerate(temp_data): + if 0 not in row: + full_row = data_set.pop(row_index) + break + if not full_row: + raise ValueError("solve_simultaneous() requires at least 1 full equation") + data_set.insert(0, full_row) + useable_form = data_set.copy() + simplified = simplify(useable_form) + simplified = simplified[::-1] + solutions: list = [] + for row in simplified: + current_solution = row[-1] + if not solutions: + if row[-2] == 0: + solutions.append(0) + continue + solutions.append(current_solution / row[-2]) + continue + temp_row = row.copy()[: len(row) - 1 :] + while temp_row[0] == 0: + temp_row.pop(0) + if len(temp_row) == 0: + solutions.append(0) + continue + temp_row = temp_row[1::] + temp_row = temp_row[::-1] + for column_index, column in enumerate(temp_row): + current_solution -= column * solutions[column_index] + solutions.append(current_solution) + final = [] + for item in solutions: + final.append(float(round(item, 5))) + return final[::-1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + eq = [ + [2, 1, 1, 1, 1, 4], + [1, 2, 1, 1, 1, 5], + [1, 1, 2, 1, 1, 6], + [1, 1, 1, 2, 1, 7], + [1, 1, 1, 1, 2, 8], + ] + print(solve_simultaneous(eq)) + print(solve_simultaneous([[4, 2]])) From 80d95fccc390d366a9f617d8628a546a7be7b2a3 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 3 Jun 2023 17:16:33 +0100 Subject: [PATCH 344/368] Pytest locally fails due to API_KEY env variable (#8738) * fix: Pytest locally fails due to API_KEY env variable (#8737) * chore: Fix ruff errors --- web_programming/currency_converter.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 69f2a2c4d..3bbcafa8f 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -8,13 +8,7 @@ import os import requests URL_BASE = "https://www.amdoren.com/api/currency.php" -TESTING = os.getenv("CI", "") -API_KEY = os.getenv("AMDOREN_API_KEY", "") -if not API_KEY and not TESTING: - raise KeyError( - "API key must be provided in the 'AMDOREN_API_KEY' environment variable." - ) # Currency and their description list_of_currencies = """ @@ -175,20 +169,31 @@ ZMW Zambian Kwacha def convert_currency( - from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = API_KEY + from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = "" ) -> str: """https://www.amdoren.com/currency-api/""" + # Instead of manually generating parameters params = locals() + # from is a reserved keyword params["from"] = params.pop("from_") res = requests.get(URL_BASE, params=params).json() return str(res["amount"]) if res["error"] == 0 else res["error_message"] if __name__ == "__main__": + TESTING = os.getenv("CI", "") + API_KEY = os.getenv("AMDOREN_API_KEY", "") + + if not API_KEY and not TESTING: + raise KeyError( + "API key must be provided in the 'AMDOREN_API_KEY' environment variable." + ) + print( convert_currency( input("Enter from currency: ").strip(), input("Enter to currency: ").strip(), float(input("Enter the amount: ").strip()), + API_KEY, ) ) From fa12b9a286bf42d250b30a772e8f226dc14953f4 Mon Sep 17 00:00:00 2001 From: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Date: Wed, 7 Jun 2023 23:47:27 +0200 Subject: [PATCH 345/368] Speed of sound (#8803) * Create TestShiva * Delete TestShiva * Add speed of sound * Update physics/speed_of_sound.py Co-authored-by: Christian Clauss * Update physics/speed_of_sound.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update speed_of_sound.py * Update speed_of_sound.py --------- Co-authored-by: jlhuhn <134317018+jlhuhn@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/speed_of_sound.py | 52 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 physics/speed_of_sound.py diff --git a/physics/speed_of_sound.py b/physics/speed_of_sound.py new file mode 100644 index 000000000..a4658366a --- /dev/null +++ b/physics/speed_of_sound.py @@ -0,0 +1,52 @@ +""" +Title : Calculating the speed of sound + +Description : + The speed of sound (c) is the speed that a sound wave travels + per unit time (m/s). During propagation, the sound wave propagates + through an elastic medium. Its SI unit is meter per second (m/s). + + Only longitudinal waves can propagate in liquids and gas other then + solid where they also travel in transverse wave. The following Algo- + rithem calculates the speed of sound in fluid depanding on the bulk + module and the density of the fluid. + + Equation for calculating speed od sound in fluid: + c_fluid = (K_s*p)**0.5 + + c_fluid: speed of sound in fluid + K_s: isentropic bulk modulus + p: density of fluid + + + +Source : https://en.wikipedia.org/wiki/Speed_of_sound +""" + + +def speed_of_sound_in_a_fluid(density: float, bulk_modulus: float) -> float: + """ + This method calculates the speed of sound in fluid - + This is calculated from the other two provided values + Examples: + Example 1 --> Water 20°C: bulk_moduls= 2.15MPa, density=998kg/m³ + Example 2 --> Murcery 20°: bulk_moduls= 28.5MPa, density=13600kg/m³ + + >>> speed_of_sound_in_a_fluid(bulk_modulus=2.15*10**9, density=998) + 1467.7563207952705 + >>> speed_of_sound_in_a_fluid(bulk_modulus=28.5*10**9, density=13600) + 1447.614670861731 + """ + + if density <= 0: + raise ValueError("Impossible fluid density") + if bulk_modulus <= 0: + raise ValueError("Impossible bulk modulus") + + return (bulk_modulus / density) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7775de0ef779a28cec7d9f28af97a89b2bc29d7e Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 8 Jun 2023 13:40:38 +0100 Subject: [PATCH 346/368] Create number container system algorithm (#8808) * feat: Create number container system algorithm * updating DIRECTORY.md * chore: Fix failing tests * Update other/number_container_system.py Co-authored-by: Christian Clauss * Update other/number_container_system.py Co-authored-by: Christian Clauss * Update other/number_container_system.py Co-authored-by: Christian Clauss * chore: Add more tests * chore: Create binary_search_insert failing test * type: Update typehints to accept str, list and range --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 +- other/number_container_system.py | 180 +++++++++++++++++++++++++++++++ 2 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 other/number_container_system.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 231b0e2f1..6dac4a9a5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -419,8 +419,9 @@ * [Frequent Pattern Graph Miner](graphs/frequent_pattern_graph_miner.py) * [G Topological Sort](graphs/g_topological_sort.py) * [Gale Shapley Bigraph](graphs/gale_shapley_bigraph.py) + * [Graph Adjacency List](graphs/graph_adjacency_list.py) + * [Graph Adjacency Matrix](graphs/graph_adjacency_matrix.py) * [Graph List](graphs/graph_list.py) - * [Graph Matrix](graphs/graph_matrix.py) * [Graphs Floyd Warshall](graphs/graphs_floyd_warshall.py) * [Greedy Best First](graphs/greedy_best_first.py) * [Greedy Min Vertex Cover](graphs/greedy_min_vertex_cover.py) @@ -479,6 +480,7 @@ * [Lib](linear_algebra/src/lib.py) * [Polynom For Points](linear_algebra/src/polynom_for_points.py) * [Power Iteration](linear_algebra/src/power_iteration.py) + * [Rank Of Matrix](linear_algebra/src/rank_of_matrix.py) * [Rayleigh Quotient](linear_algebra/src/rayleigh_quotient.py) * [Schur Complement](linear_algebra/src/schur_complement.py) * [Test Linear Algebra](linear_algebra/src/test_linear_algebra.py) @@ -651,6 +653,7 @@ * [Sigmoid Linear Unit](maths/sigmoid_linear_unit.py) * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) + * [Simultaneous Linear Equation Solver](maths/simultaneous_linear_equation_solver.py) * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) @@ -726,6 +729,7 @@ * [Maximum Subarray](other/maximum_subarray.py) * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) + * [Number Container System](other/number_container_system.py) * [Password](other/password.py) * [Quine](other/quine.py) * [Scoring Algorithm](other/scoring_algorithm.py) diff --git a/other/number_container_system.py b/other/number_container_system.py new file mode 100644 index 000000000..f547bc8a2 --- /dev/null +++ b/other/number_container_system.py @@ -0,0 +1,180 @@ +""" +A number container system that uses binary search to delete and insert values into +arrays with O(n logn) write times and O(1) read times. + +This container system holds integers at indexes. + +Further explained in this leetcode problem +> https://leetcode.com/problems/minimum-cost-tree-from-leaf-values +""" + + +class NumberContainer: + def __init__(self) -> None: + # numbermap keys are the number and its values are lists of indexes sorted + # in ascending order + self.numbermap: dict[int, list[int]] = {} + # indexmap keys are an index and it's values are the number at that index + self.indexmap: dict[int, int] = {} + + def binary_search_delete(self, array: list | str | range, item: int) -> list[int]: + """ + Removes the item from the sorted array and returns + the new array. + + >>> NumberContainer().binary_search_delete([1,2,3], 2) + [1, 3] + >>> NumberContainer().binary_search_delete([0, 0, 0], 0) + [0, 0] + >>> NumberContainer().binary_search_delete([-1, -1, -1], -1) + [-1, -1] + >>> NumberContainer().binary_search_delete([-1, 0], 0) + [-1] + >>> NumberContainer().binary_search_delete([-1, 0], -1) + [0] + >>> NumberContainer().binary_search_delete(range(7), 3) + [0, 1, 2, 4, 5, 6] + >>> NumberContainer().binary_search_delete([1.1, 2.2, 3.3], 2.2) + [1.1, 3.3] + >>> NumberContainer().binary_search_delete("abcde", "c") + ['a', 'b', 'd', 'e'] + >>> NumberContainer().binary_search_delete([0, -1, 2, 4], 0) + Traceback (most recent call last): + ... + ValueError: Either the item is not in the array or the array was unsorted + >>> NumberContainer().binary_search_delete([2, 0, 4, -1, 11], -1) + Traceback (most recent call last): + ... + ValueError: Either the item is not in the array or the array was unsorted + >>> NumberContainer().binary_search_delete(125, 1) + Traceback (most recent call last): + ... + TypeError: binary_search_delete() only accepts either a list, range or str + """ + if isinstance(array, (range, str)): + array = list(array) + elif not isinstance(array, list): + raise TypeError( + "binary_search_delete() only accepts either a list, range or str" + ) + + low = 0 + high = len(array) - 1 + + while low <= high: + mid = (low + high) // 2 + if array[mid] == item: + array.pop(mid) + return array + elif array[mid] < item: + low = mid + 1 + else: + high = mid - 1 + raise ValueError( + "Either the item is not in the array or the array was unsorted" + ) + + def binary_search_insert(self, array: list | str | range, index: int) -> list[int]: + """ + Inserts the index into the sorted array + at the correct position. + + >>> NumberContainer().binary_search_insert([1,2,3], 2) + [1, 2, 2, 3] + >>> NumberContainer().binary_search_insert([0,1,3], 2) + [0, 1, 2, 3] + >>> NumberContainer().binary_search_insert([-5, -3, 0, 0, 11, 103], 51) + [-5, -3, 0, 0, 11, 51, 103] + >>> NumberContainer().binary_search_insert([-5, -3, 0, 0, 11, 100, 103], 101) + [-5, -3, 0, 0, 11, 100, 101, 103] + >>> NumberContainer().binary_search_insert(range(10), 4) + [0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9] + >>> NumberContainer().binary_search_insert("abd", "c") + ['a', 'b', 'c', 'd'] + >>> NumberContainer().binary_search_insert(131, 23) + Traceback (most recent call last): + ... + TypeError: binary_search_insert() only accepts either a list, range or str + """ + if isinstance(array, (range, str)): + array = list(array) + elif not isinstance(array, list): + raise TypeError( + "binary_search_insert() only accepts either a list, range or str" + ) + + low = 0 + high = len(array) - 1 + + while low <= high: + mid = (low + high) // 2 + if array[mid] == index: + # If the item already exists in the array, + # insert it after the existing item + array.insert(mid + 1, index) + return array + elif array[mid] < index: + low = mid + 1 + else: + high = mid - 1 + + # If the item doesn't exist in the array, insert it at the appropriate position + array.insert(low, index) + return array + + def change(self, index: int, number: int) -> None: + """ + Changes (sets) the index as number + + >>> cont = NumberContainer() + >>> cont.change(0, 10) + >>> cont.change(0, 20) + >>> cont.change(-13, 20) + >>> cont.change(-100030, 20032903290) + """ + # Remove previous index + if index in self.indexmap: + n = self.indexmap[index] + if len(self.numbermap[n]) == 1: + del self.numbermap[n] + else: + self.numbermap[n] = self.binary_search_delete(self.numbermap[n], index) + + # Set new index + self.indexmap[index] = number + + # Number not seen before or empty so insert number value + if number not in self.numbermap: + self.numbermap[number] = [index] + + # Here we need to perform a binary search insertion in order to insert + # The item in the correct place + else: + self.numbermap[number] = self.binary_search_insert( + self.numbermap[number], index + ) + + def find(self, number: int) -> int: + """ + Returns the smallest index where the number is. + + >>> cont = NumberContainer() + >>> cont.find(10) + -1 + >>> cont.change(0, 10) + >>> cont.find(10) + 0 + >>> cont.change(0, 20) + >>> cont.find(10) + -1 + >>> cont.find(20) + 0 + """ + # Simply return the 0th index (smallest) of the indexes found (or -1) + return self.numbermap.get(number, [-1])[0] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 9c9da8ebf1d35ae40ac5438c05cc273f7c6d4473 Mon Sep 17 00:00:00 2001 From: Jan Wojciechowski <96974442+yanvoi@users.noreply.github.com> Date: Fri, 9 Jun 2023 11:06:37 +0200 Subject: [PATCH 347/368] Improve readability of ciphers/mixed_keyword_cypher.py (#8626) * refactored the code * the code will now pass the test * looked more into it and fixed the logic * made the code easier to read, added comments and fixed the logic * got rid of redundant code + plaintext can contain chars that are not in the alphabet * fixed the reduntant conversion of ascii_uppercase to a list * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * keyword and plaintext won't have default values * ran the ruff command * Update linear_discriminant_analysis.py and rsa_cipher.py (#8680) * Update rsa_cipher.py by replacing %s with {} * Update rsa_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update machine_learning/linear_discriminant_analysis.py Co-authored-by: Christian Clauss * Update linear_discriminant_analysis.py * updated --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss * fixed some difficulties * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added comments, made printing mapping optional, added 1 test * shortened the line that was too long * Update ciphers/mixed_keyword_cypher.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- ciphers/mixed_keyword_cypher.py | 100 +++++++++++++++++--------------- 1 file changed, 53 insertions(+), 47 deletions(-) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 93a0e3acb..b984808fc 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -1,7 +1,11 @@ -def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: - """ +from string import ascii_uppercase - For key:hello + +def mixed_keyword( + keyword: str, plaintext: str, verbose: bool = False, alphabet: str = ascii_uppercase +) -> str: + """ + For keyword: hello H E L O A B C D @@ -12,58 +16,60 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: Y Z and map vertically - >>> mixed_keyword("college", "UNIVERSITY") # doctest: +NORMALIZE_WHITESPACE + >>> mixed_keyword("college", "UNIVERSITY", True) # doctest: +NORMALIZE_WHITESPACE {'A': 'C', 'B': 'A', 'C': 'I', 'D': 'P', 'E': 'U', 'F': 'Z', 'G': 'O', 'H': 'B', 'I': 'J', 'J': 'Q', 'K': 'V', 'L': 'L', 'M': 'D', 'N': 'K', 'O': 'R', 'P': 'W', 'Q': 'E', 'R': 'F', 'S': 'M', 'T': 'S', 'U': 'X', 'V': 'G', 'W': 'H', 'X': 'N', 'Y': 'T', 'Z': 'Y'} 'XKJGUFMJST' + + >>> mixed_keyword("college", "UNIVERSITY", False) # doctest: +NORMALIZE_WHITESPACE + 'XKJGUFMJST' """ - key = key.upper() - pt = pt.upper() - temp = [] - for i in key: - if i not in temp: - temp.append(i) - len_temp = len(temp) - # print(temp) - alpha = [] - modalpha = [] - for j in range(65, 91): - t = chr(j) - alpha.append(t) - if t not in temp: - temp.append(t) - # print(temp) - r = int(26 / 4) - # print(r) - k = 0 - for _ in range(r): - s = [] - for _ in range(len_temp): - s.append(temp[k]) - if k >= 25: + keyword = keyword.upper() + plaintext = plaintext.upper() + alphabet_set = set(alphabet) + + # create a list of unique characters in the keyword - their order matters + # it determines how we will map plaintext characters to the ciphertext + unique_chars = [] + for char in keyword: + if char in alphabet_set and char not in unique_chars: + unique_chars.append(char) + # the number of those unique characters will determine the number of rows + num_unique_chars_in_keyword = len(unique_chars) + + # create a shifted version of the alphabet + shifted_alphabet = unique_chars + [ + char for char in alphabet if char not in unique_chars + ] + + # create a modified alphabet by splitting the shifted alphabet into rows + modified_alphabet = [ + shifted_alphabet[k : k + num_unique_chars_in_keyword] + for k in range(0, 26, num_unique_chars_in_keyword) + ] + + # map the alphabet characters to the modified alphabet characters + # going 'vertically' through the modified alphabet - consider columns first + mapping = {} + letter_index = 0 + for column in range(num_unique_chars_in_keyword): + for row in modified_alphabet: + # if current row (the last one) is too short, break out of loop + if len(row) <= column: break - k += 1 - modalpha.append(s) - # print(modalpha) - d = {} - j = 0 - k = 0 - for j in range(len_temp): - for m in modalpha: - if not len(m) - 1 >= j: - break - d[alpha[k]] = m[j] - if not k < 25: - break - k += 1 - print(d) - cypher = "" - for i in pt: - cypher += d[i] - return cypher + + # map current letter to letter in modified alphabet + mapping[alphabet[letter_index]] = row[column] + letter_index += 1 + + if verbose: + print(mapping) + # create the encrypted text by mapping the plaintext to the modified alphabet + return "".join(mapping[char] if char in mapping else char for char in plaintext) if __name__ == "__main__": + # example use print(mixed_keyword("college", "UNIVERSITY")) From daa0c8f3d340485ce295570e6d76b38891e371bd Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 10 Jun 2023 13:21:49 +0100 Subject: [PATCH 348/368] Create count negative numbers in matrix algorithm (#8813) * updating DIRECTORY.md * feat: Count negative numbers in sorted matrix * updating DIRECTORY.md * chore: Fix pre-commit * refactor: Combine functions into iteration * style: Reformat reference * feat: Add timings of each implementation * chore: Fix problems with algorithms-keeper bot * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * test: Remove doctest from benchmark function * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * refactor: Use sum instead of large iteration * refactor: Use len not sum * Update count_negative_numbers_in_sorted_matrix.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 2 + ...count_negative_numbers_in_sorted_matrix.py | 151 ++++++++++++++++++ 2 files changed, 153 insertions(+) create mode 100644 matrix/count_negative_numbers_in_sorted_matrix.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 6dac4a9a5..8511c261a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -679,6 +679,7 @@ ## Matrix * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) + * [Count Negative Numbers In Sorted Matrix](matrix/count_negative_numbers_in_sorted_matrix.py) * [Count Paths](matrix/count_paths.py) * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) @@ -753,6 +754,7 @@ * [Potential Energy](physics/potential_energy.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) + * [Speed Of Sound](physics/speed_of_sound.py) ## Project Euler * Problem 001 diff --git a/matrix/count_negative_numbers_in_sorted_matrix.py b/matrix/count_negative_numbers_in_sorted_matrix.py new file mode 100644 index 000000000..2799ff3b4 --- /dev/null +++ b/matrix/count_negative_numbers_in_sorted_matrix.py @@ -0,0 +1,151 @@ +""" +Given an matrix of numbers in which all rows and all columns are sorted in decreasing +order, return the number of negative numbers in grid. + +Reference: https://leetcode.com/problems/count-negative-numbers-in-a-sorted-matrix +""" + + +def generate_large_matrix() -> list[list[int]]: + """ + >>> generate_large_matrix() # doctest: +ELLIPSIS + [[1000, ..., -999], [999, ..., -1001], ..., [2, ..., -1998]] + """ + return [list(range(1000 - i, -1000 - i, -1)) for i in range(1000)] + + +grid = generate_large_matrix() +test_grids = ( + [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], + [[3, 2], [1, 0]], + [[7, 7, 6]], + [[7, 7, 6], [-1, -2, -3]], + grid, +) + + +def validate_grid(grid: list[list[int]]) -> None: + """ + Validate that the rows and columns of the grid is sorted in decreasing order. + >>> for grid in test_grids: + ... validate_grid(grid) + """ + assert all(row == sorted(row, reverse=True) for row in grid) + assert all(list(col) == sorted(col, reverse=True) for col in zip(*grid)) + + +def find_negative_index(array: list[int]) -> int: + """ + Find the smallest negative index + + >>> find_negative_index([0,0,0,0]) + 4 + >>> find_negative_index([4,3,2,-1]) + 3 + >>> find_negative_index([1,0,-1,-10]) + 2 + >>> find_negative_index([0,0,0,-1]) + 3 + >>> find_negative_index([11,8,7,-3,-5,-9]) + 3 + >>> find_negative_index([-1,-1,-2,-3]) + 0 + >>> find_negative_index([5,1,0]) + 3 + >>> find_negative_index([-5,-5,-5]) + 0 + >>> find_negative_index([0]) + 1 + >>> find_negative_index([]) + 0 + """ + left = 0 + right = len(array) - 1 + + # Edge cases such as no values or all numbers are negative. + if not array or array[0] < 0: + return 0 + + while right + 1 > left: + mid = (left + right) // 2 + num = array[mid] + + # Num must be negative and the index must be greater than or equal to 0. + if num < 0 and array[mid - 1] >= 0: + return mid + + if num >= 0: + left = mid + 1 + else: + right = mid - 1 + # No negative numbers so return the last index of the array + 1 which is the length. + return len(array) + + +def count_negatives_binary_search(grid: list[list[int]]) -> int: + """ + An O(m logn) solution that uses binary search in order to find the boundary between + positive and negative numbers + + >>> [count_negatives_binary_search(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + total = 0 + bound = len(grid[0]) + + for i in range(len(grid)): + bound = find_negative_index(grid[i][:bound]) + total += bound + return (len(grid) * len(grid[0])) - total + + +def count_negatives_brute_force(grid: list[list[int]]) -> int: + """ + This solution is O(n^2) because it iterates through every column and row. + + >>> [count_negatives_brute_force(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + return len([number for row in grid for number in row if number < 0]) + + +def count_negatives_brute_force_with_break(grid: list[list[int]]) -> int: + """ + Similar to the brute force solution above but uses break in order to reduce the + number of iterations. + + >>> [count_negatives_brute_force_with_break(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + total = 0 + for row in grid: + for i, number in enumerate(row): + if number < 0: + total += len(row) - i + break + return total + + +def benchmark() -> None: + """Benchmark our functions next to each other""" + from timeit import timeit + + print("Running benchmarks") + setup = ( + "from __main__ import count_negatives_binary_search, " + "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" + ) + for func in ( + "count_negatives_binary_search", # took 0.7727 seconds + "count_negatives_brute_force_with_break", # took 4.6505 seconds + "count_negatives_brute_force", # took 12.8160 seconds + ): + time = timeit(f"{func}(grid=grid)", setup=setup, number=500) + print(f"{func}() took {time:0.4f} seconds") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + benchmark() From 46379861257d43bb7140d261094bf17dc414950f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 00:09:33 +0200 Subject: [PATCH 349/368] [pre-commit.ci] pre-commit autoupdate (#8817) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.270 → v0.0.272](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.270...v0.0.272) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4c70ae219..1d4b73681 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.270 + rev: v0.0.272 hooks: - id: ruff From e6f89a6b89941ffed911e96362be3611a45420e7 Mon Sep 17 00:00:00 2001 From: Ilkin Mengusoglu <113149540+imengus@users.noreply.github.com> Date: Sun, 18 Jun 2023 17:00:02 +0100 Subject: [PATCH 350/368] Simplex algorithm (#8825) * feat: added simplex.py * added docstrings * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff fix Co-authored by: CaedenPH * removed README to add in separate PR * Update linear_programming/simplex.py Co-authored-by: Tianyi Zheng * Update linear_programming/simplex.py Co-authored-by: Tianyi Zheng * fix class docstring * add comments --------- Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- linear_programming/simplex.py | 311 ++++++++++++++++++++++++++++++++++ 1 file changed, 311 insertions(+) create mode 100644 linear_programming/simplex.py diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py new file mode 100644 index 000000000..ba64add40 --- /dev/null +++ b/linear_programming/simplex.py @@ -0,0 +1,311 @@ +""" +Python implementation of the simplex algorithm for solving linear programs in +tabular form with +- `>=`, `<=`, and `=` constraints and +- each variable `x1, x2, ...>= 0`. + +See https://gist.github.com/imengus/f9619a568f7da5bc74eaf20169a24d98 for how to +convert linear programs to simplex tableaus, and the steps taken in the simplex +algorithm. + +Resources: +https://en.wikipedia.org/wiki/Simplex_algorithm +https://tinyurl.com/simplex4beginners +""" +from typing import Any + +import numpy as np + + +class Tableau: + """Operate on simplex tableaus + + >>> t = Tableau(np.array([[-1,-1,0,0,-1],[1,3,1,0,4],[3,1,0,1,4.]]), 2) + Traceback (most recent call last): + ... + ValueError: RHS must be > 0 + """ + + def __init__(self, tableau: np.ndarray, n_vars: int) -> None: + # Check if RHS is negative + if np.any(tableau[:, -1], where=tableau[:, -1] < 0): + raise ValueError("RHS must be > 0") + + self.tableau = tableau + self.n_rows, _ = tableau.shape + + # Number of decision variables x1, x2, x3... + self.n_vars = n_vars + + # Number of artificial variables to be minimised + self.n_art_vars = len(np.where(tableau[self.n_vars : -1] == -1)[0]) + + # 2 if there are >= or == constraints (nonstandard), 1 otherwise (std) + self.n_stages = (self.n_art_vars > 0) + 1 + + # Number of slack variables added to make inequalities into equalities + self.n_slack = self.n_rows - self.n_stages + + # Objectives for each stage + self.objectives = ["max"] + + # In two stage simplex, first minimise then maximise + if self.n_art_vars: + self.objectives.append("min") + + self.col_titles = [""] + + # Index of current pivot row and column + self.row_idx = None + self.col_idx = None + + # Does objective row only contain (non)-negative values? + self.stop_iter = False + + @staticmethod + def generate_col_titles(*args: int) -> list[str]: + """Generate column titles for tableau of specific dimensions + + >>> Tableau.generate_col_titles(2, 3, 1) + ['x1', 'x2', 's1', 's2', 's3', 'a1', 'RHS'] + + >>> Tableau.generate_col_titles() + Traceback (most recent call last): + ... + ValueError: Must provide n_vars, n_slack, and n_art_vars + >>> Tableau.generate_col_titles(-2, 3, 1) + Traceback (most recent call last): + ... + ValueError: All arguments must be non-negative integers + """ + if len(args) != 3: + raise ValueError("Must provide n_vars, n_slack, and n_art_vars") + + if not all(x >= 0 and isinstance(x, int) for x in args): + raise ValueError("All arguments must be non-negative integers") + + # decision | slack | artificial + string_starts = ["x", "s", "a"] + titles = [] + for i in range(3): + for j in range(args[i]): + titles.append(string_starts[i] + str(j + 1)) + titles.append("RHS") + return titles + + def find_pivot(self, tableau: np.ndarray) -> tuple[Any, Any]: + """Finds the pivot row and column. + >>> t = Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), 2) + >>> t.find_pivot(t.tableau) + (1, 0) + """ + objective = self.objectives[-1] + + # Find entries of highest magnitude in objective rows + sign = (objective == "min") - (objective == "max") + col_idx = np.argmax(sign * tableau[0, : self.n_vars]) + + # Choice is only valid if below 0 for maximise, and above for minimise + if sign * self.tableau[0, col_idx] <= 0: + self.stop_iter = True + return 0, 0 + + # Pivot row is chosen as having the lowest quotient when elements of + # the pivot column divide the right-hand side + + # Slice excluding the objective rows + s = slice(self.n_stages, self.n_rows) + + # RHS + dividend = tableau[s, -1] + + # Elements of pivot column within slice + divisor = tableau[s, col_idx] + + # Array filled with nans + nans = np.full(self.n_rows - self.n_stages, np.nan) + + # If element in pivot column is greater than zeron_stages, return + # quotient or nan otherwise + quotients = np.divide(dividend, divisor, out=nans, where=divisor > 0) + + # Arg of minimum quotient excluding the nan values. n_stages is added + # to compensate for earlier exclusion of objective columns + row_idx = np.nanargmin(quotients) + self.n_stages + return row_idx, col_idx + + def pivot(self, tableau: np.ndarray, row_idx: int, col_idx: int) -> np.ndarray: + """Pivots on value on the intersection of pivot row and column. + + >>> t = Tableau(np.array([[-2,-3,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), 2) + >>> t.pivot(t.tableau, 1, 0).tolist() + ... # doctest: +NORMALIZE_WHITESPACE + [[0.0, 3.0, 2.0, 0.0, 8.0], + [1.0, 3.0, 1.0, 0.0, 4.0], + [0.0, -8.0, -3.0, 1.0, -8.0]] + """ + # Avoid changes to original tableau + piv_row = tableau[row_idx].copy() + + piv_val = piv_row[col_idx] + + # Entry becomes 1 + piv_row *= 1 / piv_val + + # Variable in pivot column becomes basic, ie the only non-zero entry + for idx, coeff in enumerate(tableau[:, col_idx]): + tableau[idx] += -coeff * piv_row + tableau[row_idx] = piv_row + return tableau + + def change_stage(self, tableau: np.ndarray) -> np.ndarray: + """Exits first phase of the two-stage method by deleting artificial + rows and columns, or completes the algorithm if exiting the standard + case. + + >>> t = Tableau(np.array([ + ... [3, 3, -1, -1, 0, 0, 4], + ... [2, 1, 0, 0, 0, 0, 0.], + ... [1, 2, -1, 0, 1, 0, 2], + ... [2, 1, 0, -1, 0, 1, 2] + ... ]), 2) + >>> t.change_stage(t.tableau).tolist() + ... # doctest: +NORMALIZE_WHITESPACE + [[2.0, 1.0, 0.0, 0.0, 0.0, 0.0], + [1.0, 2.0, -1.0, 0.0, 1.0, 2.0], + [2.0, 1.0, 0.0, -1.0, 0.0, 2.0]] + """ + # Objective of original objective row remains + self.objectives.pop() + + if not self.objectives: + return tableau + + # Slice containing ids for artificial columns + s = slice(-self.n_art_vars - 1, -1) + + # Delete the artificial variable columns + tableau = np.delete(tableau, s, axis=1) + + # Delete the objective row of the first stage + tableau = np.delete(tableau, 0, axis=0) + + self.n_stages = 1 + self.n_rows -= 1 + self.n_art_vars = 0 + self.stop_iter = False + return tableau + + def run_simplex(self) -> dict[Any, Any]: + """Operate on tableau until objective function cannot be + improved further. + + # Standard linear program: + Max: x1 + x2 + ST: x1 + 3x2 <= 4 + 3x1 + x2 <= 4 + >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), + ... 2).run_simplex() + {'P': 2.0, 'x1': 1.0, 'x2': 1.0} + + # Optimal tableau input: + >>> Tableau(np.array([ + ... [0, 0, 0.25, 0.25, 2], + ... [0, 1, 0.375, -0.125, 1], + ... [1, 0, -0.125, 0.375, 1] + ... ]), 2).run_simplex() + {'P': 2.0, 'x1': 1.0, 'x2': 1.0} + + # Non-standard: >= constraints + Max: 2x1 + 3x2 + x3 + ST: x1 + x2 + x3 <= 40 + 2x1 + x2 - x3 >= 10 + - x2 + x3 >= 10 + >>> Tableau(np.array([ + ... [2, 0, 0, 0, -1, -1, 0, 0, 20], + ... [-2, -3, -1, 0, 0, 0, 0, 0, 0], + ... [1, 1, 1, 1, 0, 0, 0, 0, 40], + ... [2, 1, -1, 0, -1, 0, 1, 0, 10], + ... [0, -1, 1, 0, 0, -1, 0, 1, 10.] + ... ]), 3).run_simplex() + {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} + + # Non standard: minimisation and equalities + Min: x1 + x2 + ST: 2x1 + x2 = 12 + 6x1 + 5x2 = 40 + >>> Tableau(np.array([ + ... [8, 6, 0, -1, 0, -1, 0, 0, 52], + ... [1, 1, 0, 0, 0, 0, 0, 0, 0], + ... [2, 1, 1, 0, 0, 0, 0, 0, 12], + ... [2, 1, 0, -1, 0, 0, 1, 0, 12], + ... [6, 5, 0, 0, 1, 0, 0, 0, 40], + ... [6, 5, 0, 0, 0, -1, 0, 1, 40.] + ... ]), 2).run_simplex() + {'P': 7.0, 'x1': 5.0, 'x2': 2.0} + """ + # Stop simplex algorithm from cycling. + for _ in range(100): + # Completion of each stage removes an objective. If both stages + # are complete, then no objectives are left + if not self.objectives: + self.col_titles = self.generate_col_titles( + self.n_vars, self.n_slack, self.n_art_vars + ) + + # Find the values of each variable at optimal solution + return self.interpret_tableau(self.tableau, self.col_titles) + + row_idx, col_idx = self.find_pivot(self.tableau) + + # If there are no more negative values in objective row + if self.stop_iter: + # Delete artificial variable columns and rows. Update attributes + self.tableau = self.change_stage(self.tableau) + else: + self.tableau = self.pivot(self.tableau, row_idx, col_idx) + return {} + + def interpret_tableau( + self, tableau: np.ndarray, col_titles: list[str] + ) -> dict[str, float]: + """Given the final tableau, add the corresponding values of the basic + decision variables to the `output_dict` + >>> tableau = np.array([ + ... [0,0,0.875,0.375,5], + ... [0,1,0.375,-0.125,1], + ... [1,0,-0.125,0.375,1] + ... ]) + >>> t = Tableau(tableau, 2) + >>> t.interpret_tableau(tableau, ["x1", "x2", "s1", "s2", "RHS"]) + {'P': 5.0, 'x1': 1.0, 'x2': 1.0} + """ + # P = RHS of final tableau + output_dict = {"P": abs(tableau[0, -1])} + + for i in range(self.n_vars): + # Gives ids of nonzero entries in the ith column + nonzero = np.nonzero(tableau[:, i]) + n_nonzero = len(nonzero[0]) + + # First entry in the nonzero ids + nonzero_rowidx = nonzero[0][0] + nonzero_val = tableau[nonzero_rowidx, i] + + # If there is only one nonzero value in column, which is one + if n_nonzero == nonzero_val == 1: + rhs_val = tableau[nonzero_rowidx, -1] + output_dict[col_titles[i]] = rhs_val + + # Check for basic variables + for title in col_titles: + # Don't add RHS or slack variables to output dict + if title[0] not in "R-s-a": + output_dict.setdefault(title, 0) + return output_dict + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b0f871032e78dd1d2f2214acbaae2fac88fa55b0 Mon Sep 17 00:00:00 2001 From: Frank-1998 <77809242+Frank-1998@users.noreply.github.com> Date: Sun, 18 Jun 2023 10:30:06 -0600 Subject: [PATCH 351/368] Fix removing the root node in binary_search_tree.py removes the whole tree (#8752) * fix issue #8715 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/binary_tree/binary_search_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index cd88cc10e..c72195424 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -40,7 +40,7 @@ class BinarySearchTree: else: node.parent.left = new_children else: - self.root = None + self.root = new_children def is_right(self, node: Node) -> bool: if node.parent and node.parent.right: From ea6c6056cf2215358834710bf89422310f831178 Mon Sep 17 00:00:00 2001 From: Turro <42980188+smturro2@users.noreply.github.com> Date: Mon, 19 Jun 2023 06:46:29 -0500 Subject: [PATCH 352/368] Added apr_interest function to financial (#6025) * Added apr_interest function to financial * Update interest.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update financial/interest.py * float --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- financial/interest.py | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/financial/interest.py b/financial/interest.py index c69c73045..33d02e27c 100644 --- a/financial/interest.py +++ b/financial/interest.py @@ -4,7 +4,7 @@ from __future__ import annotations def simple_interest( - principal: float, daily_interest_rate: float, days_between_payments: int + principal: float, daily_interest_rate: float, days_between_payments: float ) -> float: """ >>> simple_interest(18000.0, 0.06, 3) @@ -42,7 +42,7 @@ def simple_interest( def compound_interest( principal: float, nominal_annual_interest_rate_percentage: float, - number_of_compounding_periods: int, + number_of_compounding_periods: float, ) -> float: """ >>> compound_interest(10000.0, 0.05, 3) @@ -77,6 +77,43 @@ def compound_interest( ) +def apr_interest( + principal: float, + nominal_annual_percentage_rate: float, + number_of_years: float, +) -> float: + """ + >>> apr_interest(10000.0, 0.05, 3) + 1618.223072263547 + >>> apr_interest(10000.0, 0.05, 1) + 512.6749646744732 + >>> apr_interest(0.5, 0.05, 3) + 0.08091115361317736 + >>> apr_interest(10000.0, 0.06, -4) + Traceback (most recent call last): + ... + ValueError: number_of_years must be > 0 + >>> apr_interest(10000.0, -3.5, 3.0) + Traceback (most recent call last): + ... + ValueError: nominal_annual_percentage_rate must be >= 0 + >>> apr_interest(-5500.0, 0.01, 5) + Traceback (most recent call last): + ... + ValueError: principal must be > 0 + """ + if number_of_years <= 0: + raise ValueError("number_of_years must be > 0") + if nominal_annual_percentage_rate < 0: + raise ValueError("nominal_annual_percentage_rate must be >= 0") + if principal <= 0: + raise ValueError("principal must be > 0") + + return compound_interest( + principal, nominal_annual_percentage_rate / 365, number_of_years * 365 + ) + + if __name__ == "__main__": import doctest From 0dee4a402c85981af0c2d4c53af27a69a7eb91bf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 15:56:14 +0200 Subject: [PATCH 353/368] [pre-commit.ci] pre-commit autoupdate (#8827) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/codespell-project/codespell: v2.2.4 → v2.2.5](https://github.com/codespell-project/codespell/compare/v2.2.4...v2.2.5) - [github.com/tox-dev/pyproject-fmt: 0.11.2 → 0.12.0](https://github.com/tox-dev/pyproject-fmt/compare/0.11.2...0.12.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1d4b73681..591fd7819 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,14 +26,14 @@ repos: - id: black - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 + rev: v2.2.5 hooks: - id: codespell additional_dependencies: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.11.2" + rev: "0.12.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 8511c261a..6ec8d5111 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -486,6 +486,9 @@ * [Test Linear Algebra](linear_algebra/src/test_linear_algebra.py) * [Transformations 2D](linear_algebra/src/transformations_2d.py) +## Linear Programming + * [Simplex](linear_programming/simplex.py) + ## Machine Learning * [Astar](machine_learning/astar.py) * [Data Transformations](machine_learning/data_transformations.py) From 07e68128883b84fb7e342c6bce88863a05fbbf62 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 20 Jun 2023 18:03:16 +0200 Subject: [PATCH 354/368] Update .pre-commit-config.yaml (#8828) * Update .pre-commit-config.yaml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- pyproject.toml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a52619668..1dcce044a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,3 @@ -[tool.pytest.ini_options] -markers = [ - "mat_ops: mark a test as utilizing matrix operations.", -] -addopts = [ - "--durations=10", - "--doctest-modules", - "--showlocals", -] - -[tool.coverage.report] -omit = [".env/*"] -sort = "Cover" - -[tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" -skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" - [tool.ruff] ignore = [ # `ruff rule S101` for a description of that rule "ARG001", # Unused function argument `amount` -- FIX ME? @@ -131,3 +113,21 @@ max-args = 10 # default: 5 max-branches = 20 # default: 12 max-returns = 8 # default: 6 max-statements = 88 # default: 50 + +[tool.pytest.ini_options] +markers = [ + "mat_ops: mark a test as utilizing matrix operations.", +] +addopts = [ + "--durations=10", + "--doctest-modules", + "--showlocals", +] + +[tool.coverage.report] +omit = [".env/*"] +sort = "Cover" + +[tool.codespell] +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" +skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From 5b0890bd833eb85c58fae9afc4984d520e7e2ad6 Mon Sep 17 00:00:00 2001 From: "Linus M. Henkel" <86628476+linushenkel@users.noreply.github.com> Date: Thu, 22 Jun 2023 13:49:09 +0200 Subject: [PATCH 355/368] Dijkstra algorithm with binary grid (#8802) * Create TestShiva * Delete TestShiva * Implementation of the Dijkstra-Algorithm in a binary grid * Update double_ended_queue.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update least_common_multiple.py * Update sol1.py * Update pyproject.toml * Update pyproject.toml * https://github.com/astral-sh/ruff-pre-commit v0.0.274 --------- Co-authored-by: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Co-authored-by: jlhuhn <134317018+jlhuhn@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +- data_structures/queue/double_ended_queue.py | 4 +- graphs/dijkstra_binary_grid.py | 89 +++++++++++++++++++++ maths/least_common_multiple.py | 6 +- project_euler/problem_054/sol1.py | 18 ++--- pyproject.toml | 1 + 6 files changed, 106 insertions(+), 16 deletions(-) create mode 100644 graphs/dijkstra_binary_grid.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 591fd7819..3d4cc4084 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,8 +15,8 @@ repos: hooks: - id: auto-walrus - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.272 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.274 hooks: - id: ruff diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 637b7f62f..2472371b4 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -32,7 +32,7 @@ class Deque: the number of nodes """ - __slots__ = ["_front", "_back", "_len"] + __slots__ = ("_front", "_back", "_len") @dataclass class _Node: @@ -54,7 +54,7 @@ class Deque: the current node of the iteration. """ - __slots__ = ["_cur"] + __slots__ = "_cur" def __init__(self, cur: Deque._Node | None) -> None: self._cur = cur diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py new file mode 100644 index 000000000..c23d82343 --- /dev/null +++ b/graphs/dijkstra_binary_grid.py @@ -0,0 +1,89 @@ +""" +This script implements the Dijkstra algorithm on a binary grid. +The grid consists of 0s and 1s, where 1 represents +a walkable node and 0 represents an obstacle. +The algorithm finds the shortest path from a start node to a destination node. +Diagonal movement can be allowed or disallowed. +""" + +from heapq import heappop, heappush + +import numpy as np + + +def dijkstra( + grid: np.ndarray, + source: tuple[int, int], + destination: tuple[int, int], + allow_diagonal: bool, +) -> tuple[float | int, list[tuple[int, int]]]: + """ + Implements Dijkstra's algorithm on a binary grid. + + Args: + grid (np.ndarray): A 2D numpy array representing the grid. + 1 represents a walkable node and 0 represents an obstacle. + source (Tuple[int, int]): A tuple representing the start node. + destination (Tuple[int, int]): A tuple representing the + destination node. + allow_diagonal (bool): A boolean determining whether + diagonal movements are allowed. + + Returns: + Tuple[Union[float, int], List[Tuple[int, int]]]: + The shortest distance from the start node to the destination node + and the shortest path as a list of nodes. + + >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), False) + (4.0, [(0, 0), (0, 1), (1, 1), (2, 1), (2, 2)]) + + >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), True) + (2.0, [(0, 0), (1, 1), (2, 2)]) + + >>> dijkstra(np.array([[1, 1, 1], [0, 0, 1], [0, 1, 1]]), (0, 0), (2, 2), False) + (4.0, [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)]) + """ + rows, cols = grid.shape + dx = [-1, 1, 0, 0] + dy = [0, 0, -1, 1] + if allow_diagonal: + dx += [-1, -1, 1, 1] + dy += [-1, 1, -1, 1] + + queue, visited = [(0, source)], set() + matrix = np.full((rows, cols), np.inf) + matrix[source] = 0 + predecessors = np.empty((rows, cols), dtype=object) + predecessors[source] = None + + while queue: + (dist, (x, y)) = heappop(queue) + if (x, y) in visited: + continue + visited.add((x, y)) + + if (x, y) == destination: + path = [] + while (x, y) != source: + path.append((x, y)) + x, y = predecessors[x, y] + path.append(source) # add the source manually + path.reverse() + return matrix[destination], path + + for i in range(len(dx)): + nx, ny = x + dx[i], y + dy[i] + if 0 <= nx < rows and 0 <= ny < cols: + next_node = grid[nx][ny] + if next_node == 1 and matrix[nx, ny] > dist + 1: + heappush(queue, (dist + 1, (nx, ny))) + matrix[nx, ny] = dist + 1 + predecessors[nx, ny] = (x, y) + + return np.inf, [] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 621d93720..10cc63ac7 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -67,7 +67,7 @@ def benchmark(): class TestLeastCommonMultiple(unittest.TestCase): - test_inputs = [ + test_inputs = ( (10, 20), (13, 15), (4, 31), @@ -77,8 +77,8 @@ class TestLeastCommonMultiple(unittest.TestCase): (12, 25), (10, 25), (6, 9), - ] - expected_results = [20, 195, 124, 210, 1462, 60, 300, 50, 18] + ) + expected_results = (20, 195, 124, 210, 1462, 60, 300, 50, 18) def test_lcm_function(self): for i, (first_num, second_num) in enumerate(self.test_inputs): diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index 74409f32c..86dfa5edd 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -47,18 +47,18 @@ import os class PokerHand: """Create an object representing a Poker Hand based on an input of a - string which represents the best 5 card combination from the player's hand + string which represents the best 5-card combination from the player's hand and board cards. Attributes: (read-only) - hand: string representing the hand consisting of five cards + hand: a string representing the hand consisting of five cards Methods: compare_with(opponent): takes in player's hand (self) and opponent's hand (opponent) and compares both hands according to the rules of Texas Hold'em. Returns one of 3 strings (Win, Loss, Tie) based on whether - player's hand is better than opponent's hand. + player's hand is better than the opponent's hand. hand_name(): Returns a string made up of two parts: hand name and high card. @@ -66,11 +66,11 @@ class PokerHand: Supported operators: Rich comparison operators: <, >, <=, >=, ==, != - Supported builtin methods and functions: + Supported built-in methods and functions: list.sort(), sorted() """ - _HAND_NAME = [ + _HAND_NAME = ( "High card", "One pair", "Two pairs", @@ -81,10 +81,10 @@ class PokerHand: "Four of a kind", "Straight flush", "Royal flush", - ] + ) - _CARD_NAME = [ - "", # placeholder as lists are zero indexed + _CARD_NAME = ( + "", # placeholder as tuples are zero-indexed "One", "Two", "Three", @@ -99,7 +99,7 @@ class PokerHand: "Queen", "King", "Ace", - ] + ) def __init__(self, hand: str) -> None: """ diff --git a/pyproject.toml b/pyproject.toml index 1dcce044a..4f21a9519 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -103,6 +103,7 @@ max-complexity = 17 # default: 10 "machine_learning/linear_discriminant_analysis.py" = ["ARG005"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] "matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"other/l*u_cache.py" = ["RUF012"] "physics/newtons_second_law_of_motion.py" = ["BLE001"] "project_euler/problem_099/sol1.py" = ["SIM115"] "sorts/external_sort.py" = ["SIM115"] From 5ffe601c86a9b44691a4dce37480c6d904102d49 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 22 Jun 2023 05:24:34 -0700 Subject: [PATCH 356/368] Fix `mypy` errors in `maths/sigmoid_linear_unit.py` (#8786) * updating DIRECTORY.md * Fix mypy errors in sigmoid_linear_unit.py * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/sigmoid_linear_unit.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/maths/sigmoid_linear_unit.py b/maths/sigmoid_linear_unit.py index a8ada10dd..0ee09bf82 100644 --- a/maths/sigmoid_linear_unit.py +++ b/maths/sigmoid_linear_unit.py @@ -17,7 +17,7 @@ This script is inspired by a corresponding research paper. import numpy as np -def sigmoid(vector: np.array) -> np.array: +def sigmoid(vector: np.ndarray) -> np.ndarray: """ Mathematical function sigmoid takes a vector x of K real numbers as input and returns 1/ (1 + e^-x). @@ -29,17 +29,15 @@ def sigmoid(vector: np.array) -> np.array: return 1 / (1 + np.exp(-vector)) -def sigmoid_linear_unit(vector: np.array) -> np.array: +def sigmoid_linear_unit(vector: np.ndarray) -> np.ndarray: """ Implements the Sigmoid Linear Unit (SiLU) or swish function Parameters: - vector (np.array): A numpy array consisting of real - values. + vector (np.ndarray): A numpy array consisting of real values Returns: - swish_vec (np.array): The input numpy array, after applying - swish. + swish_vec (np.ndarray): The input numpy array, after applying swish Examples: >>> sigmoid_linear_unit(np.array([-1.0, 1.0, 2.0])) From f54a9668103e560f20b50559fb54ac38a74d1fe8 Mon Sep 17 00:00:00 2001 From: Jan-Lukas Huhn <134317018+jlhuhn@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:31:48 +0200 Subject: [PATCH 357/368] Energy conversions (#8801) * Create TestShiva * Delete TestShiva * Create energy_conversions.py * Update conversions/energy_conversions.py Co-authored-by: Caeden Perelli-Harris --------- Co-authored-by: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- conversions/energy_conversions.py | 114 ++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 conversions/energy_conversions.py diff --git a/conversions/energy_conversions.py b/conversions/energy_conversions.py new file mode 100644 index 000000000..51de6b313 --- /dev/null +++ b/conversions/energy_conversions.py @@ -0,0 +1,114 @@ +""" +Conversion of energy units. + +Available units: joule, kilojoule, megajoule, gigajoule,\ + wattsecond, watthour, kilowatthour, newtonmeter, calorie_nutr,\ + kilocalorie_nutr, electronvolt, britishthermalunit_it, footpound + +USAGE : +-> Import this file into their respective project. +-> Use the function energy_conversion() for conversion of energy units. +-> Parameters : + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert + -> value : the value which you want to convert + +REFERENCES : +-> Wikipedia reference: https://en.wikipedia.org/wiki/Units_of_energy +-> Wikipedia reference: https://en.wikipedia.org/wiki/Joule +-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilowatt-hour +-> Wikipedia reference: https://en.wikipedia.org/wiki/Newton-metre +-> Wikipedia reference: https://en.wikipedia.org/wiki/Calorie +-> Wikipedia reference: https://en.wikipedia.org/wiki/Electronvolt +-> Wikipedia reference: https://en.wikipedia.org/wiki/British_thermal_unit +-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot-pound_(energy) +-> Unit converter reference: https://www.unitconverters.net/energy-converter.html +""" + +ENERGY_CONVERSION: dict[str, float] = { + "joule": 1.0, + "kilojoule": 1_000, + "megajoule": 1_000_000, + "gigajoule": 1_000_000_000, + "wattsecond": 1.0, + "watthour": 3_600, + "kilowatthour": 3_600_000, + "newtonmeter": 1.0, + "calorie_nutr": 4_186.8, + "kilocalorie_nutr": 4_186_800.00, + "electronvolt": 1.602_176_634e-19, + "britishthermalunit_it": 1_055.055_85, + "footpound": 1.355_818, +} + + +def energy_conversion(from_type: str, to_type: str, value: float) -> float: + """ + Conversion of energy units. + >>> energy_conversion("joule", "joule", 1) + 1.0 + >>> energy_conversion("joule", "kilojoule", 1) + 0.001 + >>> energy_conversion("joule", "megajoule", 1) + 1e-06 + >>> energy_conversion("joule", "gigajoule", 1) + 1e-09 + >>> energy_conversion("joule", "wattsecond", 1) + 1.0 + >>> energy_conversion("joule", "watthour", 1) + 0.0002777777777777778 + >>> energy_conversion("joule", "kilowatthour", 1) + 2.7777777777777776e-07 + >>> energy_conversion("joule", "newtonmeter", 1) + 1.0 + >>> energy_conversion("joule", "calorie_nutr", 1) + 0.00023884589662749592 + >>> energy_conversion("joule", "kilocalorie_nutr", 1) + 2.388458966274959e-07 + >>> energy_conversion("joule", "electronvolt", 1) + 6.241509074460763e+18 + >>> energy_conversion("joule", "britishthermalunit_it", 1) + 0.0009478171226670134 + >>> energy_conversion("joule", "footpound", 1) + 0.7375621211696556 + >>> energy_conversion("joule", "megajoule", 1000) + 0.001 + >>> energy_conversion("calorie_nutr", "kilocalorie_nutr", 1000) + 1.0 + >>> energy_conversion("kilowatthour", "joule", 10) + 36000000.0 + >>> energy_conversion("britishthermalunit_it", "footpound", 1) + 778.1692306784539 + >>> energy_conversion("watthour", "joule", "a") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for /: 'str' and 'float' + >>> energy_conversion("wrongunit", "joule", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: 'wrongunit', 'joule' + Valid values are: joule, ... footpound + >>> energy_conversion("joule", "wrongunit", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: 'joule', 'wrongunit' + Valid values are: joule, ... footpound + >>> energy_conversion("123", "abc", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: '123', 'abc' + Valid values are: joule, ... footpound + """ + if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: + msg = ( + f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" + f"Valid values are: {', '.join(ENERGY_CONVERSION)}" + ) + raise ValueError(msg) + return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 331585f3f866e210e23d11700b09a8770a1c2490 Mon Sep 17 00:00:00 2001 From: Himanshu Tomar Date: Fri, 23 Jun 2023 13:56:05 +0530 Subject: [PATCH 358/368] Algorithm: Calculating Product Sum from a Special Array with Nested Structures (#8761) * Added minimum waiting time problem solution using greedy algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff --fix * Add type hints * Added two more doc test * Removed unnecessary comments * updated type hints * Updated the code as per the code review * Added recursive algo to calculate product sum from an array * Added recursive algo to calculate product sum from an array * Update doc string * Added doctest for product_sum function * Updated the code and added more doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added more test coverage for product_sum method * Update product_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + data_structures/arrays/product_sum.py | 98 +++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) create mode 100644 data_structures/arrays/product_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 6ec8d5111..83389dab1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -166,6 +166,7 @@ * Arrays * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) + * [Product Sum Array](data_structures/arrays/product_sum.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) diff --git a/data_structures/arrays/product_sum.py b/data_structures/arrays/product_sum.py new file mode 100644 index 000000000..4fb906f36 --- /dev/null +++ b/data_structures/arrays/product_sum.py @@ -0,0 +1,98 @@ +""" +Calculate the Product Sum from a Special Array. +reference: https://dev.to/sfrasica/algorithms-product-sum-from-an-array-dc6 + +Python doctests can be run with the following command: +python -m doctest -v product_sum.py + +Calculate the product sum of a "special" array which can contain integers or nested +arrays. The product sum is obtained by adding all elements and multiplying by their +respective depths. + +For example, in the array [x, y], the product sum is (x + y). In the array [x, [y, z]], +the product sum is x + 2 * (y + z). In the array [x, [y, [z]]], +the product sum is x + 2 * (y + 3z). + +Example Input: +[5, 2, [-7, 1], 3, [6, [-13, 8], 4]] +Output: 12 + +""" + + +def product_sum(arr: list[int | list], depth: int) -> int: + """ + Recursively calculates the product sum of an array. + + The product sum of an array is defined as the sum of its elements multiplied by + their respective depths. If an element is a list, its product sum is calculated + recursively by multiplying the sum of its elements with its depth plus one. + + Args: + arr: The array of integers and nested lists. + depth: The current depth level. + + Returns: + int: The product sum of the array. + + Examples: + >>> product_sum([1, 2, 3], 1) + 6 + >>> product_sum([-1, 2, [-3, 4]], 2) + 8 + >>> product_sum([1, 2, 3], -1) + -6 + >>> product_sum([1, 2, 3], 0) + 0 + >>> product_sum([1, 2, 3], 7) + 42 + >>> product_sum((1, 2, 3), 7) + 42 + >>> product_sum({1, 2, 3}, 7) + 42 + >>> product_sum([1, -1], 1) + 0 + >>> product_sum([1, -2], 1) + -1 + >>> product_sum([-3.5, [1, [0.5]]], 1) + 1.5 + + """ + total_sum = 0 + for ele in arr: + total_sum += product_sum(ele, depth + 1) if isinstance(ele, list) else ele + return total_sum * depth + + +def product_sum_array(array: list[int | list]) -> int: + """ + Calculates the product sum of an array. + + Args: + array (List[Union[int, List]]): The array of integers and nested lists. + + Returns: + int: The product sum of the array. + + Examples: + >>> product_sum_array([1, 2, 3]) + 6 + >>> product_sum_array([1, [2, 3]]) + 11 + >>> product_sum_array([1, [2, [3, 4]]]) + 47 + >>> product_sum_array([0]) + 0 + >>> product_sum_array([-3.5, [1, [0.5]]]) + 1.5 + >>> product_sum_array([1, -2]) + -1 + + """ + return product_sum(array, 1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 267a8b72f97762383e7c313ed20df859115e2815 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 23 Jun 2023 06:56:58 -0700 Subject: [PATCH 359/368] Clarify how to add issue numbers in PR template and CONTRIBUTING.md (#8833) * updating DIRECTORY.md * Clarify wording in PR template * Clarify CONTRIBUTING.md wording about adding issue numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add suggested change from review to CONTRIBUTING.md Co-authored-by: Christian Clauss * Incorporate review edit to CONTRIBUTING.md Co-authored-by: Christian Clauss --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .github/pull_request_template.md | 2 +- CONTRIBUTING.md | 7 ++++++- DIRECTORY.md | 2 ++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b3ba8baf9..1f9797fae 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,4 @@ * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms include at least one URL that points to Wikipedia or another similar explanation. -* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`. +* [ ] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER". diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2bb0c2e39..618cca868 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,7 +25,12 @@ We appreciate any contribution, from fixing a grammar mistake in a comment to im Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. -Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitHub will use this tag to auto-close the issue when the PR is merged. +Please help us keep our issue list small by adding `Fixes #{$ISSUE_NUMBER}` to the description of pull requests that resolve open issues. +For example, if your pull request fixes issue #10, then please add the following to its description: +``` +Fixes #10 +``` +GitHub will use this tag to [auto-close the issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) if and when the PR is merged. #### What is an Algorithm? diff --git a/DIRECTORY.md b/DIRECTORY.md index 83389dab1..1414aacf9 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -146,6 +146,7 @@ * [Decimal To Binary Recursion](conversions/decimal_to_binary_recursion.py) * [Decimal To Hexadecimal](conversions/decimal_to_hexadecimal.py) * [Decimal To Octal](conversions/decimal_to_octal.py) + * [Energy Conversions](conversions/energy_conversions.py) * [Excel Title To Column](conversions/excel_title_to_column.py) * [Hex To Bin](conversions/hex_to_bin.py) * [Hexadecimal To Decimal](conversions/hexadecimal_to_decimal.py) @@ -411,6 +412,7 @@ * [Dijkstra 2](graphs/dijkstra_2.py) * [Dijkstra Algorithm](graphs/dijkstra_algorithm.py) * [Dijkstra Alternate](graphs/dijkstra_alternate.py) + * [Dijkstra Binary Grid](graphs/dijkstra_binary_grid.py) * [Dinic](graphs/dinic.py) * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) From 3bfa89dacf877b1d7a62b14f82d54e8de99a838e Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 25 Jun 2023 18:28:01 +0200 Subject: [PATCH 360/368] GitHub Actions build: Add more tests (#8837) * GitHub Actions build: Add more tests Re-enable some tests that were disabled in #6591. Fixes #8818 * updating DIRECTORY.md * TODO: Re-enable quantum tests * fails: pytest quantum/bb84.py quantum/q_fourier_transform.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 7 +++---- DIRECTORY.md | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6b9cc890b..5229edaf8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,11 +22,10 @@ jobs: python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests - # See: #6591 for re-enabling tests on Python v3.11 + # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=computer_vision/cnn_classification.py - --ignore=machine_learning/lstm/lstm_prediction.py - --ignore=quantum/ + --ignore=quantum/bb84.py + --ignore=quantum/q_fourier_transform.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered diff --git a/DIRECTORY.md b/DIRECTORY.md index 1414aacf9..0c21b9537 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -167,7 +167,7 @@ * Arrays * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) - * [Product Sum Array](data_structures/arrays/product_sum.py) + * [Product Sum](data_structures/arrays/product_sum.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) From d764eec655c1c51f5ef3490d27ea72430191a000 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 26 Jun 2023 05:24:50 +0200 Subject: [PATCH 361/368] Fix failing pytest quantum/bb84.py (#8838) * Fix failing pytest quantum/bb84.py * Update bb84.py test results to match current qiskit --- .github/workflows/build.yml | 1 - quantum/bb84.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5229edaf8..fc8cb6369 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,6 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/bb84.py --ignore=quantum/q_fourier_transform.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py diff --git a/quantum/bb84.py b/quantum/bb84.py index 60d64371f..e90a11c2a 100644 --- a/quantum/bb84.py +++ b/quantum/bb84.py @@ -64,10 +64,10 @@ def bb84(key_len: int = 8, seed: int | None = None) -> str: key: The key generated using BB84 protocol. >>> bb84(16, seed=0) - '1101101100010000' + '0111110111010010' >>> bb84(8, seed=0) - '01011011' + '10110001' """ # Set up the random number generator. rng = np.random.default_rng(seed=seed) From 62dcbea943e8cc4ea4d83eff115c4e6f6a4808af Mon Sep 17 00:00:00 2001 From: duongoku Date: Mon, 26 Jun 2023 14:39:18 +0700 Subject: [PATCH 362/368] Add power sum problem (#8832) * Add powersum problem * Add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add more doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add more doctests * Improve paramater name * Fix line too long * Remove global variables * Apply suggestions from code review * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/power_sum.py | 93 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 backtracking/power_sum.py diff --git a/backtracking/power_sum.py b/backtracking/power_sum.py new file mode 100644 index 000000000..fcf1429f8 --- /dev/null +++ b/backtracking/power_sum.py @@ -0,0 +1,93 @@ +""" +Problem source: https://www.hackerrank.com/challenges/the-power-sum/problem +Find the number of ways that a given integer X, can be expressed as the sum +of the Nth powers of unique, natural numbers. For example, if X=13 and N=2. +We have to find all combinations of unique squares adding up to 13. +The only solution is 2^2+3^2. Constraints: 1<=X<=1000, 2<=N<=10. +""" + +from math import pow + + +def backtrack( + needed_sum: int, + power: int, + current_number: int, + current_sum: int, + solutions_count: int, +) -> tuple[int, int]: + """ + >>> backtrack(13, 2, 1, 0, 0) + (0, 1) + >>> backtrack(100, 2, 1, 0, 0) + (0, 3) + >>> backtrack(100, 3, 1, 0, 0) + (0, 1) + >>> backtrack(800, 2, 1, 0, 0) + (0, 561) + >>> backtrack(1000, 10, 1, 0, 0) + (0, 0) + >>> backtrack(400, 2, 1, 0, 0) + (0, 55) + >>> backtrack(50, 1, 1, 0, 0) + (0, 3658) + """ + if current_sum == needed_sum: + # If the sum of the powers is equal to needed_sum, then we have a solution. + solutions_count += 1 + return current_sum, solutions_count + + i_to_n = int(pow(current_number, power)) + if current_sum + i_to_n <= needed_sum: + # If the sum of the powers is less than needed_sum, then continue adding powers. + current_sum += i_to_n + current_sum, solutions_count = backtrack( + needed_sum, power, current_number + 1, current_sum, solutions_count + ) + current_sum -= i_to_n + if i_to_n < needed_sum: + # If the power of i is less than needed_sum, then try with the next power. + current_sum, solutions_count = backtrack( + needed_sum, power, current_number + 1, current_sum, solutions_count + ) + return current_sum, solutions_count + + +def solve(needed_sum: int, power: int) -> int: + """ + >>> solve(13, 2) + 1 + >>> solve(100, 2) + 3 + >>> solve(100, 3) + 1 + >>> solve(800, 2) + 561 + >>> solve(1000, 10) + 0 + >>> solve(400, 2) + 55 + >>> solve(50, 1) + Traceback (most recent call last): + ... + ValueError: Invalid input + needed_sum must be between 1 and 1000, power between 2 and 10. + >>> solve(-10, 5) + Traceback (most recent call last): + ... + ValueError: Invalid input + needed_sum must be between 1 and 1000, power between 2 and 10. + """ + if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): + raise ValueError( + "Invalid input\n" + "needed_sum must be between 1 and 1000, power between 2 and 10." + ) + + return backtrack(needed_sum, power, 1, 0, 0)[1] # Return the solutions_count + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 69f20033e55ae62c337e2fb2146aea5fabf3e5a0 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 26 Jun 2023 02:15:31 -0700 Subject: [PATCH 363/368] Remove duplicate implementation of Collatz sequence (#8836) * updating DIRECTORY.md * Remove duplicate implementation of Collatz sequence * updating DIRECTORY.md * Add suggestions from PR review --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - maths/3n_plus_1.py | 151 -------------------------------------- maths/collatz_sequence.py | 69 +++++++++++------ 3 files changed, 46 insertions(+), 175 deletions(-) delete mode 100644 maths/3n_plus_1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 0c21b9537..1e0e450bc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -522,7 +522,6 @@ * [Xgboost Regressor](machine_learning/xgboost_regressor.py) ## Maths - * [3N Plus 1](maths/3n_plus_1.py) * [Abs](maths/abs.py) * [Add](maths/add.py) * [Addition Without Arithmetic](maths/addition_without_arithmetic.py) diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py deleted file mode 100644 index f9f6dfeb9..000000000 --- a/maths/3n_plus_1.py +++ /dev/null @@ -1,151 +0,0 @@ -from __future__ import annotations - - -def n31(a: int) -> tuple[list[int], int]: - """ - Returns the Collatz sequence and its length of any positive integer. - >>> n31(4) - ([4, 2, 1], 3) - """ - - if not isinstance(a, int): - msg = f"Must be int, not {type(a).__name__}" - raise TypeError(msg) - if a < 1: - msg = f"Given integer must be positive, not {a}" - raise ValueError(msg) - - path = [a] - while a != 1: - if a % 2 == 0: - a //= 2 - else: - a = 3 * a + 1 - path.append(a) - return path, len(path) - - -def test_n31(): - """ - >>> test_n31() - """ - assert n31(4) == ([4, 2, 1], 3) - assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15) - assert n31(31) == ( - [ - 31, - 94, - 47, - 142, - 71, - 214, - 107, - 322, - 161, - 484, - 242, - 121, - 364, - 182, - 91, - 274, - 137, - 412, - 206, - 103, - 310, - 155, - 466, - 233, - 700, - 350, - 175, - 526, - 263, - 790, - 395, - 1186, - 593, - 1780, - 890, - 445, - 1336, - 668, - 334, - 167, - 502, - 251, - 754, - 377, - 1132, - 566, - 283, - 850, - 425, - 1276, - 638, - 319, - 958, - 479, - 1438, - 719, - 2158, - 1079, - 3238, - 1619, - 4858, - 2429, - 7288, - 3644, - 1822, - 911, - 2734, - 1367, - 4102, - 2051, - 6154, - 3077, - 9232, - 4616, - 2308, - 1154, - 577, - 1732, - 866, - 433, - 1300, - 650, - 325, - 976, - 488, - 244, - 122, - 61, - 184, - 92, - 46, - 23, - 70, - 35, - 106, - 53, - 160, - 80, - 40, - 20, - 10, - 5, - 16, - 8, - 4, - 2, - 1, - ], - 107, - ) - - -if __name__ == "__main__": - num = 4 - path, length = n31(num) - print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}") diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py index 7b3636de6..4f3aa5582 100644 --- a/maths/collatz_sequence.py +++ b/maths/collatz_sequence.py @@ -1,43 +1,66 @@ +""" +The Collatz conjecture is a famous unsolved problem in mathematics. Given a starting +positive integer, define the following sequence: +- If the current term n is even, then the next term is n/2. +- If the current term n is odd, then the next term is 3n + 1. +The conjecture claims that this sequence will always reach 1 for any starting number. + +Other names for this problem include the 3n + 1 problem, the Ulam conjecture, Kakutani's +problem, the Thwaites conjecture, Hasse's algorithm, the Syracuse problem, and the +hailstone sequence. + +Reference: https://en.wikipedia.org/wiki/Collatz_conjecture +""" + from __future__ import annotations +from collections.abc import Generator -def collatz_sequence(n: int) -> list[int]: + +def collatz_sequence(n: int) -> Generator[int, None, None]: """ - Collatz conjecture: start with any positive integer n. The next term is - obtained as follows: - If n term is even, the next term is: n / 2 . - If n is odd, the next term is: 3 * n + 1. - - The conjecture states the sequence will always reach 1 for any starting value n. - Example: - >>> collatz_sequence(2.1) + Generate the Collatz sequence starting at n. + >>> tuple(collatz_sequence(2.1)) Traceback (most recent call last): ... - Exception: Sequence only defined for natural numbers - >>> collatz_sequence(0) + Exception: Sequence only defined for positive integers + >>> tuple(collatz_sequence(0)) Traceback (most recent call last): ... - Exception: Sequence only defined for natural numbers - >>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE - [43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, - 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1] + Exception: Sequence only defined for positive integers + >>> tuple(collatz_sequence(4)) + (4, 2, 1) + >>> tuple(collatz_sequence(11)) + (11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1) + >>> tuple(collatz_sequence(31)) # doctest: +NORMALIZE_WHITESPACE + (31, 94, 47, 142, 71, 214, 107, 322, 161, 484, 242, 121, 364, 182, 91, 274, 137, + 412, 206, 103, 310, 155, 466, 233, 700, 350, 175, 526, 263, 790, 395, 1186, 593, + 1780, 890, 445, 1336, 668, 334, 167, 502, 251, 754, 377, 1132, 566, 283, 850, 425, + 1276, 638, 319, 958, 479, 1438, 719, 2158, 1079, 3238, 1619, 4858, 2429, 7288, 3644, + 1822, 911, 2734, 1367, 4102, 2051, 6154, 3077, 9232, 4616, 2308, 1154, 577, 1732, + 866, 433, 1300, 650, 325, 976, 488, 244, 122, 61, 184, 92, 46, 23, 70, 35, 106, 53, + 160, 80, 40, 20, 10, 5, 16, 8, 4, 2, 1) + >>> tuple(collatz_sequence(43)) # doctest: +NORMALIZE_WHITESPACE + (43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, 22, 11, 34, 17, 52, 26, + 13, 40, 20, 10, 5, 16, 8, 4, 2, 1) """ - if not isinstance(n, int) or n < 1: - raise Exception("Sequence only defined for natural numbers") + raise Exception("Sequence only defined for positive integers") - sequence = [n] + yield n while n != 1: - n = 3 * n + 1 if n & 1 else n // 2 - sequence.append(n) - return sequence + if n % 2 == 0: + n //= 2 + else: + n = 3 * n + 1 + yield n def main(): n = 43 - sequence = collatz_sequence(n) + sequence = tuple(collatz_sequence(n)) print(sequence) - print(f"collatz sequence from {n} took {len(sequence)} steps.") + print(f"Collatz sequence from {n} took {len(sequence)} steps.") if __name__ == "__main__": From 929d3d9219020d2978d5560e3b931df69a6f2d50 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 07:23:54 +0200 Subject: [PATCH 364/368] [pre-commit.ci] pre-commit autoupdate (#8842) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.274 → v0.0.275](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.274...v0.0.275) - [github.com/tox-dev/pyproject-fmt: 0.12.0 → 0.12.1](https://github.com/tox-dev/pyproject-fmt/compare/0.12.0...0.12.1) - [github.com/pre-commit/mirrors-mypy: v1.3.0 → v1.4.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.3.0...v1.4.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3d4cc4084..1d92d2ff3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.274 + rev: v0.0.275 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.12.0" + rev: "0.12.1" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.3.0 + rev: v1.4.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 1e0e450bc..d25d665ef 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -29,6 +29,7 @@ * [Minmax](backtracking/minmax.py) * [N Queens](backtracking/n_queens.py) * [N Queens Math](backtracking/n_queens_math.py) + * [Power Sum](backtracking/power_sum.py) * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) From c9ee6ed1887fadd25c1c43c31ed55a99b2be5f24 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 00:20:35 +0200 Subject: [PATCH 365/368] [pre-commit.ci] pre-commit autoupdate (#8853) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.275 → v0.0.276](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.275...v0.0.276) * Update double_ended_queue.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update double_ended_queue.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- data_structures/queue/double_ended_queue.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1d92d2ff3..42ebeed14 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.275 + rev: v0.0.276 hooks: - id: ruff diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 2472371b4..44dc863b9 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -54,7 +54,7 @@ class Deque: the current node of the iteration. """ - __slots__ = "_cur" + __slots__ = ("_cur",) def __init__(self, cur: Deque._Node | None) -> None: self._cur = cur From a0eec90466beeb3b6ce0f7afd905f96454e9b14c Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 11 Jul 2023 02:44:12 -0700 Subject: [PATCH 366/368] Consolidate duplicate implementations of max subarray (#8849) * Remove max subarray sum duplicate implementations * updating DIRECTORY.md * Rename max_sum_contiguous_subsequence.py * Fix typo in dynamic_programming/max_subarray_sum.py * Remove duplicate divide and conquer max subarray * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 8 +- divide_and_conquer/max_subarray.py | 112 ++++++++++++++++++ divide_and_conquer/max_subarray_sum.py | 78 ------------ dynamic_programming/max_sub_array.py | 93 --------------- dynamic_programming/max_subarray_sum.py | 60 ++++++++++ .../max_sum_contiguous_subsequence.py | 20 ---- maths/kadanes.py | 63 ---------- maths/largest_subarray_sum.py | 21 ---- other/maximum_subarray.py | 32 ----- 9 files changed, 174 insertions(+), 313 deletions(-) create mode 100644 divide_and_conquer/max_subarray.py delete mode 100644 divide_and_conquer/max_subarray_sum.py delete mode 100644 dynamic_programming/max_sub_array.py create mode 100644 dynamic_programming/max_subarray_sum.py delete mode 100644 dynamic_programming/max_sum_contiguous_subsequence.py delete mode 100644 maths/kadanes.py delete mode 100644 maths/largest_subarray_sum.py delete mode 100644 other/maximum_subarray.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d25d665ef..77938f450 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -293,7 +293,7 @@ * [Inversions](divide_and_conquer/inversions.py) * [Kth Order Statistic](divide_and_conquer/kth_order_statistic.py) * [Max Difference Pair](divide_and_conquer/max_difference_pair.py) - * [Max Subarray Sum](divide_and_conquer/max_subarray_sum.py) + * [Max Subarray](divide_and_conquer/max_subarray.py) * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) @@ -324,8 +324,7 @@ * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) * [Max Product Subarray](dynamic_programming/max_product_subarray.py) - * [Max Sub Array](dynamic_programming/max_sub_array.py) - * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) + * [Max Subarray Sum](dynamic_programming/max_subarray_sum.py) * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) @@ -591,12 +590,10 @@ * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) - * [Kadanes](maths/kadanes.py) * [Karatsuba](maths/karatsuba.py) * [Krishnamurthy Number](maths/krishnamurthy_number.py) * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py) * [Largest Of Very Large Numbers](maths/largest_of_very_large_numbers.py) - * [Largest Subarray Sum](maths/largest_subarray_sum.py) * [Least Common Multiple](maths/least_common_multiple.py) * [Line Length](maths/line_length.py) * [Liouville Lambda](maths/liouville_lambda.py) @@ -733,7 +730,6 @@ * [Linear Congruential Generator](other/linear_congruential_generator.py) * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) - * [Maximum Subarray](other/maximum_subarray.py) * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) * [Number Container System](other/number_container_system.py) diff --git a/divide_and_conquer/max_subarray.py b/divide_and_conquer/max_subarray.py new file mode 100644 index 000000000..851ef621a --- /dev/null +++ b/divide_and_conquer/max_subarray.py @@ -0,0 +1,112 @@ +""" +The maximum subarray problem is the task of finding the continuous subarray that has the +maximum sum within a given array of numbers. For example, given the array +[-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum is +[4, -1, 2, 1], which has a sum of 6. + +This divide-and-conquer algorithm finds the maximum subarray in O(n log n) time. +""" +from __future__ import annotations + +import time +from collections.abc import Sequence +from random import randint + +from matplotlib import pyplot as plt + + +def max_subarray( + arr: Sequence[float], low: int, high: int +) -> tuple[int | None, int | None, float]: + """ + Solves the maximum subarray problem using divide and conquer. + :param arr: the given array of numbers + :param low: the start index + :param high: the end index + :return: the start index of the maximum subarray, the end index of the + maximum subarray, and the maximum subarray sum + + >>> nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4] + >>> max_subarray(nums, 0, len(nums) - 1) + (3, 6, 6) + >>> nums = [2, 8, 9] + >>> max_subarray(nums, 0, len(nums) - 1) + (0, 2, 19) + >>> nums = [0, 0] + >>> max_subarray(nums, 0, len(nums) - 1) + (0, 0, 0) + >>> nums = [-1.0, 0.0, 1.0] + >>> max_subarray(nums, 0, len(nums) - 1) + (2, 2, 1.0) + >>> nums = [-2, -3, -1, -4, -6] + >>> max_subarray(nums, 0, len(nums) - 1) + (2, 2, -1) + >>> max_subarray([], 0, 0) + (None, None, 0) + """ + if not arr: + return None, None, 0 + if low == high: + return low, high, arr[low] + + mid = (low + high) // 2 + left_low, left_high, left_sum = max_subarray(arr, low, mid) + right_low, right_high, right_sum = max_subarray(arr, mid + 1, high) + cross_left, cross_right, cross_sum = max_cross_sum(arr, low, mid, high) + if left_sum >= right_sum and left_sum >= cross_sum: + return left_low, left_high, left_sum + elif right_sum >= left_sum and right_sum >= cross_sum: + return right_low, right_high, right_sum + return cross_left, cross_right, cross_sum + + +def max_cross_sum( + arr: Sequence[float], low: int, mid: int, high: int +) -> tuple[int, int, float]: + left_sum, max_left = float("-inf"), -1 + right_sum, max_right = float("-inf"), -1 + + summ: int | float = 0 + for i in range(mid, low - 1, -1): + summ += arr[i] + if summ > left_sum: + left_sum = summ + max_left = i + + summ = 0 + for i in range(mid + 1, high + 1): + summ += arr[i] + if summ > right_sum: + right_sum = summ + max_right = i + + return max_left, max_right, (left_sum + right_sum) + + +def time_max_subarray(input_size: int) -> float: + arr = [randint(1, input_size) for _ in range(input_size)] + start = time.time() + max_subarray(arr, 0, input_size - 1) + end = time.time() + return end - start + + +def plot_runtimes() -> None: + input_sizes = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] + runtimes = [time_max_subarray(input_size) for input_size in input_sizes] + print("No of Inputs\t\tTime Taken") + for input_size, runtime in zip(input_sizes, runtimes): + print(input_size, "\t\t", runtime) + plt.plot(input_sizes, runtimes) + plt.xlabel("Number of Inputs") + plt.ylabel("Time taken in seconds") + plt.show() + + +if __name__ == "__main__": + """ + A random simulation of this algorithm. + """ + from doctest import testmod + + testmod() diff --git a/divide_and_conquer/max_subarray_sum.py b/divide_and_conquer/max_subarray_sum.py deleted file mode 100644 index f23e81719..000000000 --- a/divide_and_conquer/max_subarray_sum.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Given a array of length n, max_subarray_sum() finds -the maximum of sum of contiguous sub-array using divide and conquer method. - -Time complexity : O(n log n) - -Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION -(section : 4, sub-section : 4.1, page : 70) - -""" - - -def max_sum_from_start(array): - """This function finds the maximum contiguous sum of array from 0 index - - Parameters : - array (list[int]) : given array - - Returns : - max_sum (int) : maximum contiguous sum of array from 0 index - - """ - array_sum = 0 - max_sum = float("-inf") - for num in array: - array_sum += num - if array_sum > max_sum: - max_sum = array_sum - return max_sum - - -def max_cross_array_sum(array, left, mid, right): - """This function finds the maximum contiguous sum of left and right arrays - - Parameters : - array, left, mid, right (list[int], int, int, int) - - Returns : - (int) : maximum of sum of contiguous sum of left and right arrays - - """ - - max_sum_of_left = max_sum_from_start(array[left : mid + 1][::-1]) - max_sum_of_right = max_sum_from_start(array[mid + 1 : right + 1]) - return max_sum_of_left + max_sum_of_right - - -def max_subarray_sum(array, left, right): - """Maximum contiguous sub-array sum, using divide and conquer method - - Parameters : - array, left, right (list[int], int, int) : - given array, current left index and current right index - - Returns : - int : maximum of sum of contiguous sub-array - - """ - - # base case: array has only one element - if left == right: - return array[right] - - # Recursion - mid = (left + right) // 2 - left_half_sum = max_subarray_sum(array, left, mid) - right_half_sum = max_subarray_sum(array, mid + 1, right) - cross_sum = max_cross_array_sum(array, left, mid, right) - return max(left_half_sum, right_half_sum, cross_sum) - - -if __name__ == "__main__": - array = [-2, -5, 6, -2, -3, 1, 5, -6] - array_length = len(array) - print( - "Maximum sum of contiguous subarray:", - max_subarray_sum(array, 0, array_length - 1), - ) diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py deleted file mode 100644 index 07717fba4..000000000 --- a/dynamic_programming/max_sub_array.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -author : Mayank Kumar Jha (mk9440) -""" -from __future__ import annotations - - -def find_max_sub_array(a, low, high): - if low == high: - return low, high, a[low] - else: - mid = (low + high) // 2 - left_low, left_high, left_sum = find_max_sub_array(a, low, mid) - right_low, right_high, right_sum = find_max_sub_array(a, mid + 1, high) - cross_left, cross_right, cross_sum = find_max_cross_sum(a, low, mid, high) - if left_sum >= right_sum and left_sum >= cross_sum: - return left_low, left_high, left_sum - elif right_sum >= left_sum and right_sum >= cross_sum: - return right_low, right_high, right_sum - else: - return cross_left, cross_right, cross_sum - - -def find_max_cross_sum(a, low, mid, high): - left_sum, max_left = -999999999, -1 - right_sum, max_right = -999999999, -1 - summ = 0 - for i in range(mid, low - 1, -1): - summ += a[i] - if summ > left_sum: - left_sum = summ - max_left = i - summ = 0 - for i in range(mid + 1, high + 1): - summ += a[i] - if summ > right_sum: - right_sum = summ - max_right = i - return max_left, max_right, (left_sum + right_sum) - - -def max_sub_array(nums: list[int]) -> int: - """ - Finds the contiguous subarray which has the largest sum and return its sum. - - >>> max_sub_array([-2, 1, -3, 4, -1, 2, 1, -5, 4]) - 6 - - An empty (sub)array has sum 0. - >>> max_sub_array([]) - 0 - - If all elements are negative, the largest subarray would be the empty array, - having the sum 0. - >>> max_sub_array([-1, -2, -3]) - 0 - >>> max_sub_array([5, -2, -3]) - 5 - >>> max_sub_array([31, -41, 59, 26, -53, 58, 97, -93, -23, 84]) - 187 - """ - best = 0 - current = 0 - for i in nums: - current += i - current = max(current, 0) - best = max(best, current) - return best - - -if __name__ == "__main__": - """ - A random simulation of this algorithm. - """ - import time - from random import randint - - from matplotlib import pyplot as plt - - inputs = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] - tim = [] - for i in inputs: - li = [randint(1, i) for j in range(i)] - strt = time.time() - (find_max_sub_array(li, 0, len(li) - 1)) - end = time.time() - tim.append(end - strt) - print("No of Inputs Time Taken") - for i in range(len(inputs)): - print(inputs[i], "\t\t", tim[i]) - plt.plot(inputs, tim) - plt.xlabel("Number of Inputs") - plt.ylabel("Time taken in seconds ") - plt.show() diff --git a/dynamic_programming/max_subarray_sum.py b/dynamic_programming/max_subarray_sum.py new file mode 100644 index 000000000..c76943472 --- /dev/null +++ b/dynamic_programming/max_subarray_sum.py @@ -0,0 +1,60 @@ +""" +The maximum subarray sum problem is the task of finding the maximum sum that can be +obtained from a contiguous subarray within a given array of numbers. For example, given +the array [-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum +is [4, -1, 2, 1], so the maximum subarray sum is 6. + +Kadane's algorithm is a simple dynamic programming algorithm that solves the maximum +subarray sum problem in O(n) time and O(1) space. + +Reference: https://en.wikipedia.org/wiki/Maximum_subarray_problem +""" +from collections.abc import Sequence + + +def max_subarray_sum( + arr: Sequence[float], allow_empty_subarrays: bool = False +) -> float: + """ + Solves the maximum subarray sum problem using Kadane's algorithm. + :param arr: the given array of numbers + :param allow_empty_subarrays: if True, then the algorithm considers empty subarrays + + >>> max_subarray_sum([2, 8, 9]) + 19 + >>> max_subarray_sum([0, 0]) + 0 + >>> max_subarray_sum([-1.0, 0.0, 1.0]) + 1.0 + >>> max_subarray_sum([1, 2, 3, 4, -2]) + 10 + >>> max_subarray_sum([-2, 1, -3, 4, -1, 2, 1, -5, 4]) + 6 + >>> max_subarray_sum([2, 3, -9, 8, -2]) + 8 + >>> max_subarray_sum([-2, -3, -1, -4, -6]) + -1 + >>> max_subarray_sum([-2, -3, -1, -4, -6], allow_empty_subarrays=True) + 0 + >>> max_subarray_sum([]) + 0 + """ + if not arr: + return 0 + + max_sum = 0 if allow_empty_subarrays else float("-inf") + curr_sum = 0.0 + for num in arr: + curr_sum = max(0 if allow_empty_subarrays else num, curr_sum + num) + max_sum = max(max_sum, curr_sum) + + return max_sum + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4] + print(f"{max_subarray_sum(nums) = }") diff --git a/dynamic_programming/max_sum_contiguous_subsequence.py b/dynamic_programming/max_sum_contiguous_subsequence.py deleted file mode 100644 index bac592370..000000000 --- a/dynamic_programming/max_sum_contiguous_subsequence.py +++ /dev/null @@ -1,20 +0,0 @@ -def max_subarray_sum(nums: list) -> int: - """ - >>> max_subarray_sum([6 , 9, -1, 3, -7, -5, 10]) - 17 - """ - if not nums: - return 0 - n = len(nums) - - res, s, s_pre = nums[0], nums[0], nums[0] - for i in range(1, n): - s = max(nums[i], s_pre + nums[i]) - s_pre = s - res = max(res, s) - return res - - -if __name__ == "__main__": - nums = [6, 9, -1, 3, -7, -5, 10] - print(max_subarray_sum(nums)) diff --git a/maths/kadanes.py b/maths/kadanes.py deleted file mode 100644 index c2ea53a6c..000000000 --- a/maths/kadanes.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Kadane's algorithm to get maximum subarray sum -https://medium.com/@rsinghal757/kadanes-algorithm-dynamic-programming-how-and-why-does-it-work-3fd8849ed73d -https://en.wikipedia.org/wiki/Maximum_subarray_problem -""" -test_data: tuple = ([-2, -8, -9], [2, 8, 9], [-1, 0, 1], [0, 0], []) - - -def negative_exist(arr: list) -> int: - """ - >>> negative_exist([-2,-8,-9]) - -2 - >>> [negative_exist(arr) for arr in test_data] - [-2, 0, 0, 0, 0] - """ - arr = arr or [0] - max_number = arr[0] - for i in arr: - if i >= 0: - return 0 - elif max_number <= i: - max_number = i - return max_number - - -def kadanes(arr: list) -> int: - """ - If negative_exist() returns 0 than this function will execute - else it will return the value return by negative_exist function - - For example: arr = [2, 3, -9, 8, -2] - Initially we set value of max_sum to 0 and max_till_element to 0 than when - max_sum is less than max_till particular element it will assign that value to - max_sum and when value of max_till_sum is less than 0 it will assign 0 to i - and after that whole process, return the max_sum - So the output for above arr is 8 - - >>> kadanes([2, 3, -9, 8, -2]) - 8 - >>> [kadanes(arr) for arr in test_data] - [-2, 19, 1, 0, 0] - """ - max_sum = negative_exist(arr) - if max_sum < 0: - return max_sum - - max_sum = 0 - max_till_element = 0 - - for i in arr: - max_till_element += i - max_sum = max(max_sum, max_till_element) - max_till_element = max(max_till_element, 0) - return max_sum - - -if __name__ == "__main__": - try: - print("Enter integer values sepatated by spaces") - arr = [int(x) for x in input().split()] - print(f"Maximum subarray sum of {arr} is {kadanes(arr)}") - except ValueError: - print("Please enter integer values.") diff --git a/maths/largest_subarray_sum.py b/maths/largest_subarray_sum.py deleted file mode 100644 index 90f92c712..000000000 --- a/maths/largest_subarray_sum.py +++ /dev/null @@ -1,21 +0,0 @@ -from sys import maxsize - - -def max_sub_array_sum(a: list, size: int = 0): - """ - >>> max_sub_array_sum([-13, -3, -25, -20, -3, -16, -23, -12, -5, -22, -15, -4, -7]) - -3 - """ - size = size or len(a) - max_so_far = -maxsize - 1 - max_ending_here = 0 - for i in range(0, size): - max_ending_here = max_ending_here + a[i] - max_so_far = max(max_so_far, max_ending_here) - max_ending_here = max(max_ending_here, 0) - return max_so_far - - -if __name__ == "__main__": - a = [-13, -3, -25, -20, 1, -16, -23, -12, -5, -22, -15, -4, -7] - print(("Maximum contiguous sum is", max_sub_array_sum(a, len(a)))) diff --git a/other/maximum_subarray.py b/other/maximum_subarray.py deleted file mode 100644 index 1c8c8cabc..000000000 --- a/other/maximum_subarray.py +++ /dev/null @@ -1,32 +0,0 @@ -from collections.abc import Sequence - - -def max_subarray_sum(nums: Sequence[int]) -> int: - """Return the maximum possible sum amongst all non - empty subarrays. - - Raises: - ValueError: when nums is empty. - - >>> max_subarray_sum([1,2,3,4,-2]) - 10 - >>> max_subarray_sum([-2,1,-3,4,-1,2,1,-5,4]) - 6 - """ - if not nums: - raise ValueError("Input sequence should not be empty") - - curr_max = ans = nums[0] - nums_len = len(nums) - - for i in range(1, nums_len): - num = nums[i] - curr_max = max(curr_max + num, num) - ans = max(curr_max, ans) - - return ans - - -if __name__ == "__main__": - n = int(input("Enter number of elements : ").strip()) - array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] - print(max_subarray_sum(array)) From 44b1bcc7c7e0f15385530bf54c59ad4eb86fef0b Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 11 Jul 2023 10:51:21 +0100 Subject: [PATCH 367/368] Fix failing tests from ruff/newton_raphson (ignore S307 "possibly insecure function") (#8862) * chore: Fix failing tests (ignore S307 "possibly insecure function") * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: Move noqa back to right line --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- arithmetic_analysis/newton_raphson.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py index aee2f07e5..1b90ad417 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/arithmetic_analysis/newton_raphson.py @@ -25,9 +25,11 @@ def newton_raphson( """ x = a while True: - x = Decimal(x) - (Decimal(eval(func)) / Decimal(eval(str(diff(func))))) + x = Decimal(x) - ( + Decimal(eval(func)) / Decimal(eval(str(diff(func)))) # noqa: S307 + ) # This number dictates the accuracy of the answer - if abs(eval(func)) < precision: + if abs(eval(func)) < precision: # noqa: S307 return float(x) From f614ed72170011d2d439f7901e1c8daa7deac8c4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 11 Jul 2023 11:55:32 +0200 Subject: [PATCH 368/368] [pre-commit.ci] pre-commit autoupdate (#8860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.276 → v0.0.277](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.276...v0.0.277) - [github.com/tox-dev/pyproject-fmt: 0.12.1 → 0.13.0](https://github.com/tox-dev/pyproject-fmt/compare/0.12.1...0.13.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 42ebeed14..bf30703bd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.276 + rev: v0.0.277 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.12.1" + rev: "0.13.0" hooks: - id: pyproject-fmt