diff --git a/.coveragerc b/.coveragerc
deleted file mode 100644
index f7e6eb212..000000000
--- a/.coveragerc
+++ /dev/null
@@ -1,4 +0,0 @@
-[report]
-sort = Cover
-omit =
- .env/*
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 000000000..b5a5347c6
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,8 @@
+# https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md
+ARG VARIANT=3.11-bookworm
+FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT}
+COPY requirements.txt /tmp/pip-tmp/
+RUN python3 -m pip install --upgrade pip \
+ && python3 -m pip install --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \
+ && pipx install pre-commit ruff \
+ && pre-commit install
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 000000000..c5a855b25
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,42 @@
+{
+ "name": "Python 3",
+ "build": {
+ "dockerfile": "Dockerfile",
+ "context": "..",
+ "args": {
+ // Update 'VARIANT' to pick a Python version: 3, 3.10, 3.9, 3.8, 3.7, 3.6
+ // Append -bullseye or -buster to pin to an OS version.
+ // Use -bullseye variants on local on arm64/Apple Silicon.
+ "VARIANT": "3.11-bookworm",
+ }
+ },
+
+ // Configure tool-specific properties.
+ "customizations": {
+ // Configure properties specific to VS Code.
+ "vscode": {
+ // Set *default* container specific settings.json values on container create.
+ "settings": {
+ "python.defaultInterpreterPath": "/usr/local/bin/python",
+ "python.linting.enabled": true,
+ "python.formatting.blackPath": "/usr/local/py-utils/bin/black",
+ "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy"
+ },
+
+ // Add the IDs of extensions you want installed when the container is created.
+ "extensions": [
+ "ms-python.python",
+ "ms-python.vscode-pylance"
+ ]
+ }
+ },
+
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
+ // "forwardPorts": [],
+
+ // Use 'postCreateCommand' to run commands after the container is created.
+ // "postCreateCommand": "pip3 install --user -r requirements.txt",
+
+ // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
+ "remoteUser": "vscode"
+}
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 260b9704e..abf99ab22 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -31,11 +31,11 @@
# /data_structures/ @cclauss # TODO: Uncomment this line after Hacktoberfest
-/digital_image_processing/ @mateuszz0000
+# /digital_image_processing/
# /divide_and_conquer/
-/dynamic_programming/ @Kush1101
+# /dynamic_programming/
# /file_transfer/
@@ -59,7 +59,7 @@
# /machine_learning/
-/maths/ @Kush1101
+# /maths/
# /matrix/
@@ -69,7 +69,7 @@
# /other/ @cclauss # TODO: Uncomment this line after Hacktoberfest
-/project_euler/ @dhruvmanila @Kush1101
+/project_euler/ @dhruvmanila
# /quantum/
@@ -79,7 +79,7 @@
# /searches/
-/sorts/ @mateuszz0000
+# /sorts/
# /strings/ @cclauss # TODO: Uncomment this line after Hacktoberfest
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..4ccdb52ca
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,54 @@
+name: Bug report
+description: Create a bug report to help us address errors in the repository
+labels: [bug]
+body:
+ - type: markdown
+ attributes:
+ value: >
+ Before requesting please search [existing issues](https://github.com/TheAlgorithms/Python/labels/bug).
+ Usage questions such as "How do I...?" belong on the
+ [Discord](https://discord.gg/c7MnfGFGa6) and will be closed.
+
+ - type: input
+ attributes:
+ label: "Repository commit"
+ description: >
+ The commit hash for `TheAlgorithms/Python` repository. You can get this
+ by running the command `git rev-parse HEAD` locally.
+ placeholder: "a0b0f414ae134aa1772d33bb930e5a960f9979e8"
+ validations:
+ required: true
+
+ - type: input
+ attributes:
+ label: "Python version (python --version)"
+ placeholder: "Python 3.10.7"
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: "Dependencies version (pip freeze)"
+ description: >
+ This is the output of the command `pip freeze --all`. Note that the
+ actual output might be different as compared to the placeholder text.
+ placeholder: |
+ appnope==0.1.3
+ asttokens==2.0.8
+ backcall==0.2.0
+ ...
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: "Expected behavior"
+ description: "Describe the behavior you expect. May include images or videos."
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: "Actual behavior"
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..62019bb08
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: false
+contact_links:
+ - name: Discord community
+ url: https://discord.gg/c7MnfGFGa6
+ about: Have any questions or need any help? Please contact us via Discord
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 000000000..09a159b21
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,19 @@
+name: Feature request
+description: Suggest features, propose improvements, discuss new ideas.
+labels: [enhancement]
+body:
+ - type: markdown
+ attributes:
+ value: >
+ Before requesting please search [existing issues](https://github.com/TheAlgorithms/Python/labels/enhancement).
+ Usage questions such as "How do I...?" belong on the
+ [Discord](https://discord.gg/c7MnfGFGa6) and will be closed.
+
+ - type: textarea
+ attributes:
+ label: "Feature description"
+ description: >
+ This could be new algorithms, data structures or improving any existing
+ implementations.
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/other.yml b/.github/ISSUE_TEMPLATE/other.yml
new file mode 100644
index 000000000..44d6ff541
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/other.yml
@@ -0,0 +1,19 @@
+name: Other
+description: Use this for any other issues. PLEASE do not create blank issues
+labels: ["awaiting triage"]
+body:
+ - type: textarea
+ id: issuedescription
+ attributes:
+ label: What would you like to share?
+ description: Provide a clear and concise explanation of your issue.
+ validations:
+ required: true
+
+ - type: textarea
+ id: extrainfo
+ attributes:
+ label: Additional information
+ description: Is there anything else we should know about this issue?
+ validations:
+ required: false
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 103ecf7c2..1f9797fae 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -1,4 +1,4 @@
-### **Describe your change:**
+### Describe your change:
@@ -6,7 +6,7 @@
* [ ] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
-### **Checklist:**
+### Checklist:
* [ ] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [ ] This pull request is all my own work -- I have not plagiarized.
* [ ] I know that pull requests will not be merged if they fail the automated tests.
@@ -16,5 +16,5 @@
* [ ] All functions and variable names follow Python naming conventions.
* [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
-* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
-* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
+* [ ] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.
+* [ ] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER".
diff --git a/.github/stale.yml b/.github/stale.yml
index 36ca56266..0939e1f22 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -45,7 +45,7 @@ pulls:
closeComment: >
Please reopen this pull request once you commit the changes requested
or make improvements on the code. If this is not the case and you need
- some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms)
+ some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms/community)
or ping one of the reviewers. Thank you for your contributions!
issues:
@@ -59,5 +59,5 @@ issues:
closeComment: >
Please reopen this issue once you add more information and updates here.
If this is not the case and you need some help, feel free to seek help
- from our [Gitter](https://gitter.im/TheAlgorithms) or ping one of the
+ from our [Gitter](https://gitter.im/TheAlgorithms/community) or ping one of the
reviewers. Thank you for your contributions!
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 2ffc2aa29..fc8cb6369 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -9,20 +9,25 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
with:
- python-version: "3.9"
- - uses: actions/cache@v2
+ python-version: 3.11
+ - uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools six wheel
- python -m pip install mypy pytest-cov -r requirements.txt
- - run: mypy .
+ python -m pip install pytest-cov -r requirements.txt
- name: Run tests
- run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. .
+ # TODO: #8818 Re-enable quantum tests
+ run: pytest
+ --ignore=quantum/q_fourier_transform.py
+ --ignore=project_euler/
+ --ignore=scripts/validate_solutions.py
+ --cov-report=term-missing:skip-covered
+ --cov=. .
- if: ${{ success() }}
run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md
diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml
index be8154a32..331962cef 100644
--- a/.github/workflows/directory_writer.yml
+++ b/.github/workflows/directory_writer.yml
@@ -6,8 +6,10 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v1 # v1, NOT v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v1 # v1, NOT v2 or v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.x
- name: Write DIRECTORY.md
run: |
scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
deleted file mode 100644
index 27a5a97c0..000000000
--- a/.github/workflows/pre-commit.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: pre-commit
-
-on: [push, pull_request]
-
-jobs:
- pre-commit:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - uses: actions/cache@v2
- with:
- path: |
- ~/.cache/pre-commit
- ~/.cache/pip
- key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- - uses: actions/setup-python@v2
- - uses: psf/black@21.4b0
- - name: Install pre-commit
- run: |
- python -m pip install --upgrade pip
- python -m pip install --upgrade pre-commit
- - run: pre-commit run --verbose --all-files --show-diff-on-failure
diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml
index 995295fca..460938219 100644
--- a/.github/workflows/project_euler.yml
+++ b/.github/workflows/project_euler.yml
@@ -14,8 +14,10 @@ jobs:
project-euler:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.x
- name: Install pytest and pytest-cov
run: |
python -m pip install --upgrade pip
@@ -24,8 +26,10 @@ jobs:
validate-solutions:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.x
- name: Install pytest and requests
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml
new file mode 100644
index 000000000..ca2d5be47
--- /dev/null
+++ b/.github/workflows/ruff.yml
@@ -0,0 +1,16 @@
+# https://beta.ruff.rs
+name: ruff
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+ branches:
+ - master
+jobs:
+ ruff:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - run: pip install --user ruff
+ - run: ruff --format=github .
diff --git a/.gitignore b/.gitignore
index 574cdf312..baea84b8d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -107,3 +107,4 @@ venv.bak/
.idea
.try
.vscode/
+.vs/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b666e88aa..e158bd8d6 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,56 +1,42 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v3.4.0
+ rev: v4.4.0
hooks:
- id: check-executables-have-shebangs
+ - id: check-toml
- id: check-yaml
- id: end-of-file-fixer
types: [python]
- id: trailing-whitespace
- exclude: |
- (?x)^(
- data_structures/heap/binomial_heap.py
- )$
- id: requirements-txt-fixer
+
+ - repo: https://github.com/MarcoGorelli/auto-walrus
+ rev: v0.2.2
+ hooks:
+ - id: auto-walrus
+
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.0.281
+ hooks:
+ - id: ruff
+
- repo: https://github.com/psf/black
- rev: 21.4b0
+ rev: 23.7.0
hooks:
- id: black
- - repo: https://github.com/PyCQA/isort
- rev: 5.8.0
- hooks:
- - id: isort
- args:
- - --profile=black
- - repo: https://gitlab.com/pycqa/flake8
- rev: 3.9.1
- hooks:
- - id: flake8
- args:
- - --ignore=E203,W503
- - --max-complexity=25
- - --max-line-length=88
-# FIXME: fix mypy errors and then uncomment this
-# - repo: https://github.com/pre-commit/mirrors-mypy
-# rev: v0.782
-# hooks:
-# - id: mypy
-# args:
-# - --ignore-missing-imports
+
- repo: https://github.com/codespell-project/codespell
- rev: v2.0.0
+ rev: v2.2.5
hooks:
- id: codespell
- args:
- - --ignore-words-list=ans,crate,fo,followings,hist,iff,mater,secant,som,tim
- - --skip="./.*,./strings/dictionary.txt,./strings/words.txt,./project_euler/problem_022/p022_names.txt"
- - --quiet-level=2
- exclude: |
- (?x)^(
- strings/dictionary.txt |
- strings/words.txt |
- project_euler/problem_022/p022_names.txt
- )$
+ additional_dependencies:
+ - tomli
+
+ - repo: https://github.com/tox-dev/pyproject-fmt
+ rev: "0.13.0"
+ hooks:
+ - id: pyproject-fmt
+
- repo: local
hooks:
- id: validate-filenames
@@ -58,3 +44,18 @@ repos:
entry: ./scripts/validate_filenames.py
language: script
pass_filenames: false
+
+ - repo: https://github.com/abravalheri/validate-pyproject
+ rev: v0.13
+ hooks:
+ - id: validate-pyproject
+
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v1.4.1
+ hooks:
+ - id: mypy
+ args:
+ - --ignore-missing-imports
+ - --install-types # See mirrors-mypy README.md
+ - --non-interactive
+ additional_dependencies: [types-requests]
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 000000000..ef16fa1aa
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,5 @@
+{
+ "githubPullRequests.ignoredPullRequestBranches": [
+ "master"
+ ]
+}
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 13d330a90..4a1bb6527 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -2,18 +2,18 @@
## Before contributing
-Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms).
+Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community).
## Contributing
### Contributor
-We are very happy that you consider implementing algorithms and data structure for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that:
+We are very happy that you are considering implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that:
- You did your work - no plagiarism allowed
- Any plagiarized work will not be merged.
- Your work will be distributed under [MIT License](LICENSE.md) once your pull request is merged
-- You submitted work fulfils or mostly fulfils our styles and standards
+- Your submitted work fulfils or mostly fulfils our styles and standards
__New implementation__ is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity but __identical implementation__ of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request.
@@ -23,9 +23,16 @@ __Improving comments__ and __writing proper tests__ are also highly welcome.
We appreciate any contribution, from fixing a grammar mistake in a comment to implementing complex algorithms. Please read this section if you are contributing your work.
-Your contribution will be tested by our [automated testing on Travis CI](https://travis-ci.org/TheAlgorithms/Python/pull_requests) to save time and mental energy. After you have submitted your pull request, you should see the Travis tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the Travis output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help.
+Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help.
-Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitHub will use this tag to auto close the issue when the PR is merged.
+If you are interested in resolving an [open issue](https://github.com/TheAlgorithms/Python/issues), simply make a pull request with your proposed fix. __We do not assign issues in this repo__ so please do not ask for permission to work on an issue.
+
+Please help us keep our issue list small by adding `Fixes #{$ISSUE_NUMBER}` to the description of pull requests that resolve open issues.
+For example, if your pull request fixes issue #10, then please add the following to its description:
+```
+Fixes #10
+```
+GitHub will use this tag to [auto-close the issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) if and when the PR is merged.
#### What is an Algorithm?
@@ -53,7 +60,7 @@ Algorithms in this repo should not be how-to examples for existing Python packag
Use [pre-commit](https://pre-commit.com/#installation) to automatically format your code to match our coding style:
```bash
-python3 -m pip install pre-commit # required only once
+python3 -m pip install pre-commit # only required the first time
pre-commit install
```
That's it! The plugin will run every time you commit any changes. If there are any errors found during the run, fix them and commit those changes. You can even run the plugin manually on all files:
@@ -66,8 +73,8 @@ pre-commit run --all-files --show-diff-on-failure
We want your work to be readable by others; therefore, we encourage you to note the following:
-- Please write in Python 3.9+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will.
-- Please focus hard on naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments.
+- Please write in Python 3.11+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will.
+- Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments.
- Single letter variable names are *old school* so please avoid them unless their life only spans a few lines.
- Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not.
- Please follow the [Python Naming Conventions](https://pep8.org/#prescriptive-naming-conventions) so variable_names and function_names should be lower_case, CONSTANTS in UPPERCASE, ClassNames should be CamelCase, etc.
@@ -81,11 +88,11 @@ We want your work to be readable by others; therefore, we encourage you to note
black .
```
-- All submissions will need to pass the test `flake8 . --ignore=E203,W503 --max-line-length=88` before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request.
+- All submissions will need to pass the test `ruff .` before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request.
```bash
- python3 -m pip install flake8 # only required the first time
- flake8 . --ignore=E203,W503 --max-line-length=88 --show-source
+ python3 -m pip install ruff # only required the first time
+ ruff .
```
- Original code submission require docstrings or comments to describe your work.
@@ -102,7 +109,7 @@ We want your work to be readable by others; therefore, we encourage you to note
This is too trivial. Comments are expected to be explanatory. For comments, you can write them above, on or below a line of code, as long as you are consistent within the same piece of code.
- We encourage you to put docstrings inside your functions but please pay attention to indentation of docstrings. The following is a good example:
+ We encourage you to put docstrings inside your functions but please pay attention to the indentation of docstrings. The following is a good example:
```python
def sum_ab(a, b):
@@ -160,7 +167,7 @@ We want your work to be readable by others; therefore, we encourage you to note
- [__List comprehensions and generators__](https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions) are preferred over the use of `lambda`, `map`, `filter`, `reduce` but the important thing is to demonstrate the power of Python in code that is easy to read and maintain.
- Avoid importing external libraries for basic algorithms. Only use those libraries for complicated algorithms.
-- If you need a third party module that is not in the file __requirements.txt__, please add it to that file as part of your submission.
+- If you need a third-party module that is not in the file __requirements.txt__, please add it to that file as part of your submission.
#### Other Requirements for Submissions
- If you are submitting code in the `project_euler/` directory, please also read [the dedicated Guideline](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md) before contributing to our Project Euler library.
@@ -170,13 +177,13 @@ We want your work to be readable by others; therefore, we encourage you to note
- If possible, follow the standard *within* the folder you are submitting to.
- If you have modified/added code work, make sure the code compiles before submitting.
- If you have modified/added documentation work, ensure your language is concise and contains no grammar errors.
-- Do not update the README.md or DIRECTORY.md file which will be periodically autogenerated by our Travis CI processes.
+- Do not update the README.md or DIRECTORY.md file which will be periodically autogenerated by our GitHub Actions processes.
- Add a corresponding explanation to [Algorithms-Explanation](https://github.com/TheAlgorithms/Algorithms-Explanation) (Optional but recommended).
-- All submissions will be tested with [__mypy__](http://www.mypy-lang.org) so we encourage to add [__Python type hints__](https://docs.python.org/3/library/typing.html) where it makes sense to do so.
+- All submissions will be tested with [__mypy__](http://www.mypy-lang.org) so we encourage you to add [__Python type hints__](https://docs.python.org/3/library/typing.html) where it makes sense to do so.
- Most importantly,
- __Be consistent in the use of these guidelines when submitting.__
- - __Join__ [Gitter](https://gitter.im/TheAlgorithms) __now!__
+ - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms/community) __now!__
- Happy coding!
Writer [@poyea](https://github.com/poyea), Jun 2019.
diff --git a/DIRECTORY.md b/DIRECTORY.md
index adc9bb9e4..fdcf0ceed 100644
--- a/DIRECTORY.md
+++ b/DIRECTORY.md
@@ -1,947 +1,1234 @@
## Arithmetic Analysis
- * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/bisection.py)
- * [Gaussian Elimination](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/gaussian_elimination.py)
- * [In Static Equilibrium](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/in_static_equilibrium.py)
- * [Intersection](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/intersection.py)
- * [Lu Decomposition](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/lu_decomposition.py)
- * [Newton Forward Interpolation](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_forward_interpolation.py)
- * [Newton Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_method.py)
- * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_raphson.py)
- * [Secant Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/secant_method.py)
+ * [Bisection](arithmetic_analysis/bisection.py)
+ * [Gaussian Elimination](arithmetic_analysis/gaussian_elimination.py)
+ * [In Static Equilibrium](arithmetic_analysis/in_static_equilibrium.py)
+ * [Intersection](arithmetic_analysis/intersection.py)
+ * [Jacobi Iteration Method](arithmetic_analysis/jacobi_iteration_method.py)
+ * [Lu Decomposition](arithmetic_analysis/lu_decomposition.py)
+ * [Newton Forward Interpolation](arithmetic_analysis/newton_forward_interpolation.py)
+ * [Newton Method](arithmetic_analysis/newton_method.py)
+ * [Newton Raphson](arithmetic_analysis/newton_raphson.py)
+ * [Newton Raphson New](arithmetic_analysis/newton_raphson_new.py)
+ * [Secant Method](arithmetic_analysis/secant_method.py)
+
+## Audio Filters
+ * [Butterworth Filter](audio_filters/butterworth_filter.py)
+ * [Iir Filter](audio_filters/iir_filter.py)
+ * [Show Response](audio_filters/show_response.py)
## Backtracking
- * [All Combinations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_combinations.py)
- * [All Permutations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_permutations.py)
- * [All Subsequences](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_subsequences.py)
- * [Coloring](https://github.com/TheAlgorithms/Python/blob/master/backtracking/coloring.py)
- * [Hamiltonian Cycle](https://github.com/TheAlgorithms/Python/blob/master/backtracking/hamiltonian_cycle.py)
- * [Knight Tour](https://github.com/TheAlgorithms/Python/blob/master/backtracking/knight_tour.py)
- * [Minimax](https://github.com/TheAlgorithms/Python/blob/master/backtracking/minimax.py)
- * [N Queens](https://github.com/TheAlgorithms/Python/blob/master/backtracking/n_queens.py)
- * [N Queens Math](https://github.com/TheAlgorithms/Python/blob/master/backtracking/n_queens_math.py)
- * [Rat In Maze](https://github.com/TheAlgorithms/Python/blob/master/backtracking/rat_in_maze.py)
- * [Sudoku](https://github.com/TheAlgorithms/Python/blob/master/backtracking/sudoku.py)
- * [Sum Of Subsets](https://github.com/TheAlgorithms/Python/blob/master/backtracking/sum_of_subsets.py)
+ * [All Combinations](backtracking/all_combinations.py)
+ * [All Permutations](backtracking/all_permutations.py)
+ * [All Subsequences](backtracking/all_subsequences.py)
+ * [Coloring](backtracking/coloring.py)
+ * [Combination Sum](backtracking/combination_sum.py)
+ * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py)
+ * [Knight Tour](backtracking/knight_tour.py)
+ * [Minimax](backtracking/minimax.py)
+ * [Minmax](backtracking/minmax.py)
+ * [N Queens](backtracking/n_queens.py)
+ * [N Queens Math](backtracking/n_queens_math.py)
+ * [Power Sum](backtracking/power_sum.py)
+ * [Rat In Maze](backtracking/rat_in_maze.py)
+ * [Sudoku](backtracking/sudoku.py)
+ * [Sum Of Subsets](backtracking/sum_of_subsets.py)
+ * [Word Search](backtracking/word_search.py)
## Bit Manipulation
- * [Binary And Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_and_operator.py)
- * [Binary Count Setbits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_setbits.py)
- * [Binary Count Trailing Zeros](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_trailing_zeros.py)
- * [Binary Or Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_or_operator.py)
- * [Binary Shifts](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_shifts.py)
- * [Binary Twos Complement](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_twos_complement.py)
- * [Binary Xor Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_xor_operator.py)
- * [Count Number Of One Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_number_of_one_bits.py)
- * [Reverse Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/reverse_bits.py)
- * [Single Bit Manipulation Operations](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/single_bit_manipulation_operations.py)
+ * [Binary And Operator](bit_manipulation/binary_and_operator.py)
+ * [Binary Count Setbits](bit_manipulation/binary_count_setbits.py)
+ * [Binary Count Trailing Zeros](bit_manipulation/binary_count_trailing_zeros.py)
+ * [Binary Or Operator](bit_manipulation/binary_or_operator.py)
+ * [Binary Shifts](bit_manipulation/binary_shifts.py)
+ * [Binary Twos Complement](bit_manipulation/binary_twos_complement.py)
+ * [Binary Xor Operator](bit_manipulation/binary_xor_operator.py)
+ * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py)
+ * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py)
+ * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py)
+ * [Highest Set Bit](bit_manipulation/highest_set_bit.py)
+ * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py)
+ * [Is Even](bit_manipulation/is_even.py)
+ * [Is Power Of Two](bit_manipulation/is_power_of_two.py)
+ * [Numbers Different Signs](bit_manipulation/numbers_different_signs.py)
+ * [Reverse Bits](bit_manipulation/reverse_bits.py)
+ * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py)
## Blockchain
- * [Chinese Remainder Theorem](https://github.com/TheAlgorithms/Python/blob/master/blockchain/chinese_remainder_theorem.py)
- * [Diophantine Equation](https://github.com/TheAlgorithms/Python/blob/master/blockchain/diophantine_equation.py)
- * [Modular Division](https://github.com/TheAlgorithms/Python/blob/master/blockchain/modular_division.py)
+ * [Chinese Remainder Theorem](blockchain/chinese_remainder_theorem.py)
+ * [Diophantine Equation](blockchain/diophantine_equation.py)
+ * [Modular Division](blockchain/modular_division.py)
## Boolean Algebra
- * [Quine Mc Cluskey](https://github.com/TheAlgorithms/Python/blob/master/boolean_algebra/quine_mc_cluskey.py)
+ * [And Gate](boolean_algebra/and_gate.py)
+ * [Nand Gate](boolean_algebra/nand_gate.py)
+ * [Norgate](boolean_algebra/norgate.py)
+ * [Not Gate](boolean_algebra/not_gate.py)
+ * [Or Gate](boolean_algebra/or_gate.py)
+ * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py)
+ * [Xnor Gate](boolean_algebra/xnor_gate.py)
+ * [Xor Gate](boolean_algebra/xor_gate.py)
## Cellular Automata
- * [Conways Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/conways_game_of_life.py)
- * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/game_of_life.py)
- * [One Dimensional](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/one_dimensional.py)
+ * [Conways Game Of Life](cellular_automata/conways_game_of_life.py)
+ * [Game Of Life](cellular_automata/game_of_life.py)
+ * [Nagel Schrekenberg](cellular_automata/nagel_schrekenberg.py)
+ * [One Dimensional](cellular_automata/one_dimensional.py)
## Ciphers
- * [A1Z26](https://github.com/TheAlgorithms/Python/blob/master/ciphers/a1z26.py)
- * [Affine Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/affine_cipher.py)
- * [Atbash](https://github.com/TheAlgorithms/Python/blob/master/ciphers/atbash.py)
- * [Base16](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base16.py)
- * [Base32](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base32.py)
- * [Base64 Encoding](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64_encoding.py)
- * [Base85](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base85.py)
- * [Beaufort Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/beaufort_cipher.py)
- * [Brute Force Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/brute_force_caesar_cipher.py)
- * [Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/caesar_cipher.py)
- * [Cryptomath Module](https://github.com/TheAlgorithms/Python/blob/master/ciphers/cryptomath_module.py)
- * [Decrypt Caesar With Chi Squared](https://github.com/TheAlgorithms/Python/blob/master/ciphers/decrypt_caesar_with_chi_squared.py)
- * [Deterministic Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/ciphers/deterministic_miller_rabin.py)
- * [Diffie](https://github.com/TheAlgorithms/Python/blob/master/ciphers/diffie.py)
- * [Diffie Hellman](https://github.com/TheAlgorithms/Python/blob/master/ciphers/diffie_hellman.py)
- * [Elgamal Key Generator](https://github.com/TheAlgorithms/Python/blob/master/ciphers/elgamal_key_generator.py)
- * [Enigma Machine2](https://github.com/TheAlgorithms/Python/blob/master/ciphers/enigma_machine2.py)
- * [Hill Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/hill_cipher.py)
- * [Mixed Keyword Cypher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mixed_keyword_cypher.py)
- * [Mono Alphabetic Ciphers](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mono_alphabetic_ciphers.py)
- * [Morse Code Implementation](https://github.com/TheAlgorithms/Python/blob/master/ciphers/morse_code_implementation.py)
- * [Onepad Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/onepad_cipher.py)
- * [Playfair Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/playfair_cipher.py)
- * [Porta Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/porta_cipher.py)
- * [Rabin Miller](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rabin_miller.py)
- * [Rail Fence Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rail_fence_cipher.py)
- * [Rot13](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rot13.py)
- * [Rsa Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_cipher.py)
- * [Rsa Factorization](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_factorization.py)
- * [Rsa Key Generator](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_key_generator.py)
- * [Shuffled Shift Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/shuffled_shift_cipher.py)
- * [Simple Keyword Cypher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/simple_keyword_cypher.py)
- * [Simple Substitution Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/simple_substitution_cipher.py)
- * [Trafid Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/trafid_cipher.py)
- * [Transposition Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/transposition_cipher.py)
- * [Transposition Cipher Encrypt Decrypt File](https://github.com/TheAlgorithms/Python/blob/master/ciphers/transposition_cipher_encrypt_decrypt_file.py)
- * [Vigenere Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/vigenere_cipher.py)
- * [Xor Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/xor_cipher.py)
+ * [A1Z26](ciphers/a1z26.py)
+ * [Affine Cipher](ciphers/affine_cipher.py)
+ * [Atbash](ciphers/atbash.py)
+ * [Autokey](ciphers/autokey.py)
+ * [Baconian Cipher](ciphers/baconian_cipher.py)
+ * [Base16](ciphers/base16.py)
+ * [Base32](ciphers/base32.py)
+ * [Base64](ciphers/base64.py)
+ * [Base85](ciphers/base85.py)
+ * [Beaufort Cipher](ciphers/beaufort_cipher.py)
+ * [Bifid](ciphers/bifid.py)
+ * [Brute Force Caesar Cipher](ciphers/brute_force_caesar_cipher.py)
+ * [Caesar Cipher](ciphers/caesar_cipher.py)
+ * [Cryptomath Module](ciphers/cryptomath_module.py)
+ * [Decrypt Caesar With Chi Squared](ciphers/decrypt_caesar_with_chi_squared.py)
+ * [Deterministic Miller Rabin](ciphers/deterministic_miller_rabin.py)
+ * [Diffie](ciphers/diffie.py)
+ * [Diffie Hellman](ciphers/diffie_hellman.py)
+ * [Elgamal Key Generator](ciphers/elgamal_key_generator.py)
+ * [Enigma Machine2](ciphers/enigma_machine2.py)
+ * [Hill Cipher](ciphers/hill_cipher.py)
+ * [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py)
+ * [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py)
+ * [Morse Code](ciphers/morse_code.py)
+ * [Onepad Cipher](ciphers/onepad_cipher.py)
+ * [Playfair Cipher](ciphers/playfair_cipher.py)
+ * [Polybius](ciphers/polybius.py)
+ * [Porta Cipher](ciphers/porta_cipher.py)
+ * [Rabin Miller](ciphers/rabin_miller.py)
+ * [Rail Fence Cipher](ciphers/rail_fence_cipher.py)
+ * [Rot13](ciphers/rot13.py)
+ * [Rsa Cipher](ciphers/rsa_cipher.py)
+ * [Rsa Factorization](ciphers/rsa_factorization.py)
+ * [Rsa Key Generator](ciphers/rsa_key_generator.py)
+ * [Shuffled Shift Cipher](ciphers/shuffled_shift_cipher.py)
+ * [Simple Keyword Cypher](ciphers/simple_keyword_cypher.py)
+ * [Simple Substitution Cipher](ciphers/simple_substitution_cipher.py)
+ * [Trafid Cipher](ciphers/trafid_cipher.py)
+ * [Transposition Cipher](ciphers/transposition_cipher.py)
+ * [Transposition Cipher Encrypt Decrypt File](ciphers/transposition_cipher_encrypt_decrypt_file.py)
+ * [Vigenere Cipher](ciphers/vigenere_cipher.py)
+ * [Xor Cipher](ciphers/xor_cipher.py)
## Compression
- * [Burrows Wheeler](https://github.com/TheAlgorithms/Python/blob/master/compression/burrows_wheeler.py)
- * [Huffman](https://github.com/TheAlgorithms/Python/blob/master/compression/huffman.py)
- * [Lempel Ziv](https://github.com/TheAlgorithms/Python/blob/master/compression/lempel_ziv.py)
- * [Lempel Ziv Decompress](https://github.com/TheAlgorithms/Python/blob/master/compression/lempel_ziv_decompress.py)
- * [Peak Signal To Noise Ratio](https://github.com/TheAlgorithms/Python/blob/master/compression/peak_signal_to_noise_ratio.py)
+ * [Burrows Wheeler](compression/burrows_wheeler.py)
+ * [Huffman](compression/huffman.py)
+ * [Lempel Ziv](compression/lempel_ziv.py)
+ * [Lempel Ziv Decompress](compression/lempel_ziv_decompress.py)
+ * [Lz77](compression/lz77.py)
+ * [Peak Signal To Noise Ratio](compression/peak_signal_to_noise_ratio.py)
+ * [Run Length Encoding](compression/run_length_encoding.py)
## Computer Vision
- * [Harris Corner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harris_corner.py)
- * [Mean Threshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mean_threshold.py)
+ * [Cnn Classification](computer_vision/cnn_classification.py)
+ * [Flip Augmentation](computer_vision/flip_augmentation.py)
+ * [Harris Corner](computer_vision/harris_corner.py)
+ * [Horn Schunck](computer_vision/horn_schunck.py)
+ * [Mean Threshold](computer_vision/mean_threshold.py)
+ * [Mosaic Augmentation](computer_vision/mosaic_augmentation.py)
+ * [Pooling Functions](computer_vision/pooling_functions.py)
## Conversions
- * [Binary To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_decimal.py)
- * [Binary To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_octal.py)
- * [Decimal To Any](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_any.py)
- * [Decimal To Binary](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary.py)
- * [Decimal To Binary Recursion](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary_recursion.py)
- * [Decimal To Hexadecimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_hexadecimal.py)
- * [Decimal To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_octal.py)
- * [Hex To Bin](https://github.com/TheAlgorithms/Python/blob/master/conversions/hex_to_bin.py)
- * [Hexadecimal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/hexadecimal_to_decimal.py)
- * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py)
- * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py)
- * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py)
- * [Rgb Hsv Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/rgb_hsv_conversion.py)
- * [Roman Numerals](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_numerals.py)
- * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py)
- * [Weight Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/weight_conversion.py)
+ * [Astronomical Length Scale Conversion](conversions/astronomical_length_scale_conversion.py)
+ * [Binary To Decimal](conversions/binary_to_decimal.py)
+ * [Binary To Hexadecimal](conversions/binary_to_hexadecimal.py)
+ * [Binary To Octal](conversions/binary_to_octal.py)
+ * [Decimal To Any](conversions/decimal_to_any.py)
+ * [Decimal To Binary](conversions/decimal_to_binary.py)
+ * [Decimal To Binary Recursion](conversions/decimal_to_binary_recursion.py)
+ * [Decimal To Hexadecimal](conversions/decimal_to_hexadecimal.py)
+ * [Decimal To Octal](conversions/decimal_to_octal.py)
+ * [Energy Conversions](conversions/energy_conversions.py)
+ * [Excel Title To Column](conversions/excel_title_to_column.py)
+ * [Hex To Bin](conversions/hex_to_bin.py)
+ * [Hexadecimal To Decimal](conversions/hexadecimal_to_decimal.py)
+ * [Length Conversion](conversions/length_conversion.py)
+ * [Molecular Chemistry](conversions/molecular_chemistry.py)
+ * [Octal To Decimal](conversions/octal_to_decimal.py)
+ * [Prefix Conversions](conversions/prefix_conversions.py)
+ * [Prefix Conversions String](conversions/prefix_conversions_string.py)
+ * [Pressure Conversions](conversions/pressure_conversions.py)
+ * [Rgb Hsv Conversion](conversions/rgb_hsv_conversion.py)
+ * [Roman Numerals](conversions/roman_numerals.py)
+ * [Speed Conversions](conversions/speed_conversions.py)
+ * [Temperature Conversions](conversions/temperature_conversions.py)
+ * [Volume Conversions](conversions/volume_conversions.py)
+ * [Weight Conversion](conversions/weight_conversion.py)
## Data Structures
+ * Arrays
+ * [Permutations](data_structures/arrays/permutations.py)
+ * [Prefix Sum](data_structures/arrays/prefix_sum.py)
+ * [Product Sum](data_structures/arrays/product_sum.py)
* Binary Tree
- * [Avl Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/avl_tree.py)
- * [Basic Binary Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/basic_binary_tree.py)
- * [Binary Search Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_search_tree.py)
- * [Binary Search Tree Recursive](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_search_tree_recursive.py)
- * [Binary Tree Mirror](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_mirror.py)
- * [Binary Tree Traversals](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_traversals.py)
- * [Fenwick Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/fenwick_tree.py)
- * [Lazy Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/lazy_segment_tree.py)
- * [Lowest Common Ancestor](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/lowest_common_ancestor.py)
- * [Merge Two Binary Trees](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/merge_two_binary_trees.py)
- * [Non Recursive Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/non_recursive_segment_tree.py)
- * [Number Of Possible Binary Trees](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/number_of_possible_binary_trees.py)
- * [Red Black Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/red_black_tree.py)
- * [Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree.py)
- * [Segment Tree Other](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree_other.py)
- * [Treap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/treap.py)
- * [Wavelet Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/wavelet_tree.py)
+ * [Avl Tree](data_structures/binary_tree/avl_tree.py)
+ * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py)
+ * [Binary Search Tree](data_structures/binary_tree/binary_search_tree.py)
+ * [Binary Search Tree Recursive](data_structures/binary_tree/binary_search_tree_recursive.py)
+ * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py)
+ * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py)
+ * [Binary Tree Path Sum](data_structures/binary_tree/binary_tree_path_sum.py)
+ * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py)
+ * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py)
+ * [Distribute Coins](data_structures/binary_tree/distribute_coins.py)
+ * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py)
+ * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py)
+ * [Is Bst](data_structures/binary_tree/is_bst.py)
+ * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py)
+ * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py)
+ * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py)
+ * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py)
+ * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py)
+ * [Number Of Possible Binary Trees](data_structures/binary_tree/number_of_possible_binary_trees.py)
+ * [Red Black Tree](data_structures/binary_tree/red_black_tree.py)
+ * [Segment Tree](data_structures/binary_tree/segment_tree.py)
+ * [Segment Tree Other](data_structures/binary_tree/segment_tree_other.py)
+ * [Treap](data_structures/binary_tree/treap.py)
+ * [Wavelet Tree](data_structures/binary_tree/wavelet_tree.py)
* Disjoint Set
- * [Alternate Disjoint Set](https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/alternate_disjoint_set.py)
- * [Disjoint Set](https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/disjoint_set.py)
+ * [Alternate Disjoint Set](data_structures/disjoint_set/alternate_disjoint_set.py)
+ * [Disjoint Set](data_structures/disjoint_set/disjoint_set.py)
* Hashing
- * [Double Hash](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/double_hash.py)
- * [Hash Table](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/hash_table.py)
- * [Hash Table With Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/hash_table_with_linked_list.py)
+ * [Bloom Filter](data_structures/hashing/bloom_filter.py)
+ * [Double Hash](data_structures/hashing/double_hash.py)
+ * [Hash Map](data_structures/hashing/hash_map.py)
+ * [Hash Table](data_structures/hashing/hash_table.py)
+ * [Hash Table With Linked List](data_structures/hashing/hash_table_with_linked_list.py)
* Number Theory
- * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/number_theory/prime_numbers.py)
- * [Quadratic Probing](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/quadratic_probing.py)
+ * [Prime Numbers](data_structures/hashing/number_theory/prime_numbers.py)
+ * [Quadratic Probing](data_structures/hashing/quadratic_probing.py)
+ * Tests
+ * [Test Hash Map](data_structures/hashing/tests/test_hash_map.py)
* Heap
- * [Binomial Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/binomial_heap.py)
- * [Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/heap.py)
- * [Heap Generic](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/heap_generic.py)
- * [Max Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/max_heap.py)
- * [Min Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/min_heap.py)
- * [Randomized Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/randomized_heap.py)
- * [Skew Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/skew_heap.py)
+ * [Binomial Heap](data_structures/heap/binomial_heap.py)
+ * [Heap](data_structures/heap/heap.py)
+ * [Heap Generic](data_structures/heap/heap_generic.py)
+ * [Max Heap](data_structures/heap/max_heap.py)
+ * [Min Heap](data_structures/heap/min_heap.py)
+ * [Randomized Heap](data_structures/heap/randomized_heap.py)
+ * [Skew Heap](data_structures/heap/skew_heap.py)
* Linked List
- * [Circular Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/circular_linked_list.py)
- * [Deque Doubly](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/deque_doubly.py)
- * [Doubly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/doubly_linked_list.py)
- * [Doubly Linked List Two](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/doubly_linked_list_two.py)
- * [From Sequence](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/from_sequence.py)
- * [Has Loop](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/has_loop.py)
- * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/is_palindrome.py)
- * [Merge Two Lists](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/merge_two_lists.py)
- * [Middle Element Of Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/middle_element_of_linked_list.py)
- * [Print Reverse](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/print_reverse.py)
- * [Singly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/singly_linked_list.py)
- * [Skip List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/skip_list.py)
- * [Swap Nodes](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/swap_nodes.py)
+ * [Circular Linked List](data_structures/linked_list/circular_linked_list.py)
+ * [Deque Doubly](data_structures/linked_list/deque_doubly.py)
+ * [Doubly Linked List](data_structures/linked_list/doubly_linked_list.py)
+ * [Doubly Linked List Two](data_structures/linked_list/doubly_linked_list_two.py)
+ * [From Sequence](data_structures/linked_list/from_sequence.py)
+ * [Has Loop](data_structures/linked_list/has_loop.py)
+ * [Is Palindrome](data_structures/linked_list/is_palindrome.py)
+ * [Merge Two Lists](data_structures/linked_list/merge_two_lists.py)
+ * [Middle Element Of Linked List](data_structures/linked_list/middle_element_of_linked_list.py)
+ * [Print Reverse](data_structures/linked_list/print_reverse.py)
+ * [Singly Linked List](data_structures/linked_list/singly_linked_list.py)
+ * [Skip List](data_structures/linked_list/skip_list.py)
+ * [Swap Nodes](data_structures/linked_list/swap_nodes.py)
* Queue
- * [Circular Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/circular_queue.py)
- * [Double Ended Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/double_ended_queue.py)
- * [Linked Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/linked_queue.py)
- * [Priority Queue Using List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/priority_queue_using_list.py)
- * [Queue On List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/queue_on_list.py)
- * [Queue On Pseudo Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/queue_on_pseudo_stack.py)
+ * [Circular Queue](data_structures/queue/circular_queue.py)
+ * [Circular Queue Linked List](data_structures/queue/circular_queue_linked_list.py)
+ * [Double Ended Queue](data_structures/queue/double_ended_queue.py)
+ * [Linked Queue](data_structures/queue/linked_queue.py)
+ * [Priority Queue Using List](data_structures/queue/priority_queue_using_list.py)
+ * [Queue By List](data_structures/queue/queue_by_list.py)
+ * [Queue By Two Stacks](data_structures/queue/queue_by_two_stacks.py)
+ * [Queue On Pseudo Stack](data_structures/queue/queue_on_pseudo_stack.py)
* Stacks
- * [Balanced Parentheses](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/balanced_parentheses.py)
- * [Dijkstras Two Stack Algorithm](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/dijkstras_two_stack_algorithm.py)
- * [Evaluate Postfix Notations](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/evaluate_postfix_notations.py)
- * [Infix To Postfix Conversion](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_postfix_conversion.py)
- * [Infix To Prefix Conversion](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_prefix_conversion.py)
- * [Linked Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/linked_stack.py)
- * [Next Greater Element](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/next_greater_element.py)
- * [Postfix Evaluation](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/postfix_evaluation.py)
- * [Prefix Evaluation](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/prefix_evaluation.py)
- * [Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack.py)
- * [Stack Using Dll](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack_using_dll.py)
- * [Stock Span Problem](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stock_span_problem.py)
+ * [Balanced Parentheses](data_structures/stacks/balanced_parentheses.py)
+ * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py)
+ * [Evaluate Postfix Notations](data_structures/stacks/evaluate_postfix_notations.py)
+ * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py)
+ * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py)
+ * [Next Greater Element](data_structures/stacks/next_greater_element.py)
+ * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py)
+ * [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py)
+ * [Stack](data_structures/stacks/stack.py)
+ * [Stack With Doubly Linked List](data_structures/stacks/stack_with_doubly_linked_list.py)
+ * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py)
+ * [Stock Span Problem](data_structures/stacks/stock_span_problem.py)
* Trie
- * [Trie](https://github.com/TheAlgorithms/Python/blob/master/data_structures/trie/trie.py)
+ * [Radix Tree](data_structures/trie/radix_tree.py)
+ * [Trie](data_structures/trie/trie.py)
## Digital Image Processing
- * [Change Brightness](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/change_brightness.py)
- * [Change Contrast](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/change_contrast.py)
- * [Convert To Negative](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/convert_to_negative.py)
+ * [Change Brightness](digital_image_processing/change_brightness.py)
+ * [Change Contrast](digital_image_processing/change_contrast.py)
+ * [Convert To Negative](digital_image_processing/convert_to_negative.py)
* Dithering
- * [Burkes](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/dithering/burkes.py)
+ * [Burkes](digital_image_processing/dithering/burkes.py)
* Edge Detection
- * [Canny](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/edge_detection/canny.py)
+ * [Canny](digital_image_processing/edge_detection/canny.py)
* Filters
- * [Bilateral Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/bilateral_filter.py)
- * [Convolve](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/convolve.py)
- * [Gaussian Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/gaussian_filter.py)
- * [Median Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/median_filter.py)
- * [Sobel Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/sobel_filter.py)
+ * [Bilateral Filter](digital_image_processing/filters/bilateral_filter.py)
+ * [Convolve](digital_image_processing/filters/convolve.py)
+ * [Gabor Filter](digital_image_processing/filters/gabor_filter.py)
+ * [Gaussian Filter](digital_image_processing/filters/gaussian_filter.py)
+ * [Local Binary Pattern](digital_image_processing/filters/local_binary_pattern.py)
+ * [Median Filter](digital_image_processing/filters/median_filter.py)
+ * [Sobel Filter](digital_image_processing/filters/sobel_filter.py)
* Histogram Equalization
- * [Histogram Stretch](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/histogram_equalization/histogram_stretch.py)
- * [Index Calculation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/index_calculation.py)
+ * [Histogram Stretch](digital_image_processing/histogram_equalization/histogram_stretch.py)
+ * [Index Calculation](digital_image_processing/index_calculation.py)
+ * Morphological Operations
+ * [Dilation Operation](digital_image_processing/morphological_operations/dilation_operation.py)
+ * [Erosion Operation](digital_image_processing/morphological_operations/erosion_operation.py)
* Resize
- * [Resize](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/resize/resize.py)
+ * [Resize](digital_image_processing/resize/resize.py)
* Rotation
- * [Rotation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/rotation/rotation.py)
- * [Sepia](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/sepia.py)
- * [Test Digital Image Processing](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/test_digital_image_processing.py)
+ * [Rotation](digital_image_processing/rotation/rotation.py)
+ * [Sepia](digital_image_processing/sepia.py)
+ * [Test Digital Image Processing](digital_image_processing/test_digital_image_processing.py)
## Divide And Conquer
- * [Closest Pair Of Points](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/closest_pair_of_points.py)
- * [Convex Hull](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/convex_hull.py)
- * [Heaps Algorithm](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm.py)
- * [Heaps Algorithm Iterative](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm_iterative.py)
- * [Inversions](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/inversions.py)
- * [Kth Order Statistic](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/kth_order_statistic.py)
- * [Max Difference Pair](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_difference_pair.py)
- * [Max Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_subarray_sum.py)
- * [Mergesort](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/mergesort.py)
- * [Peak](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/peak.py)
- * [Power](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/power.py)
- * [Strassen Matrix Multiplication](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/strassen_matrix_multiplication.py)
+ * [Closest Pair Of Points](divide_and_conquer/closest_pair_of_points.py)
+ * [Convex Hull](divide_and_conquer/convex_hull.py)
+ * [Heaps Algorithm](divide_and_conquer/heaps_algorithm.py)
+ * [Heaps Algorithm Iterative](divide_and_conquer/heaps_algorithm_iterative.py)
+ * [Inversions](divide_and_conquer/inversions.py)
+ * [Kth Order Statistic](divide_and_conquer/kth_order_statistic.py)
+ * [Max Difference Pair](divide_and_conquer/max_difference_pair.py)
+ * [Max Subarray](divide_and_conquer/max_subarray.py)
+ * [Mergesort](divide_and_conquer/mergesort.py)
+ * [Peak](divide_and_conquer/peak.py)
+ * [Power](divide_and_conquer/power.py)
+ * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py)
## Dynamic Programming
- * [Abbreviation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/abbreviation.py)
- * [Bitmask](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/bitmask.py)
- * [Catalan Numbers](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/catalan_numbers.py)
- * [Climbing Stairs](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/climbing_stairs.py)
- * [Edit Distance](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/edit_distance.py)
- * [Factorial](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/factorial.py)
- * [Fast Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fast_fibonacci.py)
- * [Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fibonacci.py)
- * [Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/floyd_warshall.py)
- * [Fractional Knapsack](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fractional_knapsack.py)
- * [Fractional Knapsack 2](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fractional_knapsack_2.py)
- * [Integer Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/integer_partition.py)
- * [Iterating Through Submasks](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/iterating_through_submasks.py)
- * [Knapsack](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/knapsack.py)
- * [Longest Common Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_common_subsequence.py)
- * [Longest Increasing Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_increasing_subsequence.py)
- * [Longest Increasing Subsequence O(Nlogn)](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_increasing_subsequence_o(nlogn).py)
- * [Longest Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_sub_array.py)
- * [Matrix Chain Order](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/matrix_chain_order.py)
- * [Max Non Adjacent Sum](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_non_adjacent_sum.py)
- * [Max Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sub_array.py)
- * [Max Sum Contiguous Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sum_contiguous_subsequence.py)
- * [Minimum Coin Change](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_coin_change.py)
- * [Minimum Cost Path](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_cost_path.py)
- * [Minimum Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_partition.py)
- * [Minimum Steps To One](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_steps_to_one.py)
- * [Optimal Binary Search Tree](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/optimal_binary_search_tree.py)
- * [Rod Cutting](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/rod_cutting.py)
- * [Subset Generation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/subset_generation.py)
- * [Sum Of Subset](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/sum_of_subset.py)
+ * [Abbreviation](dynamic_programming/abbreviation.py)
+ * [All Construct](dynamic_programming/all_construct.py)
+ * [Bitmask](dynamic_programming/bitmask.py)
+ * [Catalan Numbers](dynamic_programming/catalan_numbers.py)
+ * [Climbing Stairs](dynamic_programming/climbing_stairs.py)
+ * [Combination Sum Iv](dynamic_programming/combination_sum_iv.py)
+ * [Edit Distance](dynamic_programming/edit_distance.py)
+ * [Factorial](dynamic_programming/factorial.py)
+ * [Fast Fibonacci](dynamic_programming/fast_fibonacci.py)
+ * [Fibonacci](dynamic_programming/fibonacci.py)
+ * [Fizz Buzz](dynamic_programming/fizz_buzz.py)
+ * [Floyd Warshall](dynamic_programming/floyd_warshall.py)
+ * [Integer Partition](dynamic_programming/integer_partition.py)
+ * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py)
+ * [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py)
+ * [Knapsack](dynamic_programming/knapsack.py)
+ * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py)
+ * [Longest Common Substring](dynamic_programming/longest_common_substring.py)
+ * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py)
+ * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py)
+ * [Longest Sub Array](dynamic_programming/longest_sub_array.py)
+ * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py)
+ * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py)
+ * [Max Product Subarray](dynamic_programming/max_product_subarray.py)
+ * [Max Subarray Sum](dynamic_programming/max_subarray_sum.py)
+ * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py)
+ * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py)
+ * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py)
+ * [Minimum Partition](dynamic_programming/minimum_partition.py)
+ * [Minimum Size Subarray Sum](dynamic_programming/minimum_size_subarray_sum.py)
+ * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py)
+ * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py)
+ * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py)
+ * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py)
+ * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py)
+ * [Rod Cutting](dynamic_programming/rod_cutting.py)
+ * [Subset Generation](dynamic_programming/subset_generation.py)
+ * [Sum Of Subset](dynamic_programming/sum_of_subset.py)
+ * [Viterbi](dynamic_programming/viterbi.py)
+ * [Word Break](dynamic_programming/word_break.py)
## Electronics
- * [Electric Power](https://github.com/TheAlgorithms/Python/blob/master/electronics/electric_power.py)
- * [Ohms Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/ohms_law.py)
+ * [Apparent Power](electronics/apparent_power.py)
+ * [Builtin Voltage](electronics/builtin_voltage.py)
+ * [Carrier Concentration](electronics/carrier_concentration.py)
+ * [Circular Convolution](electronics/circular_convolution.py)
+ * [Coulombs Law](electronics/coulombs_law.py)
+ * [Electric Conductivity](electronics/electric_conductivity.py)
+ * [Electric Power](electronics/electric_power.py)
+ * [Electrical Impedance](electronics/electrical_impedance.py)
+ * [Ind Reactance](electronics/ind_reactance.py)
+ * [Ohms Law](electronics/ohms_law.py)
+ * [Real And Reactive Power](electronics/real_and_reactive_power.py)
+ * [Resistor Equivalence](electronics/resistor_equivalence.py)
+ * [Resonant Frequency](electronics/resonant_frequency.py)
## File Transfer
- * [Receive File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/receive_file.py)
- * [Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/send_file.py)
+ * [Receive File](file_transfer/receive_file.py)
+ * [Send File](file_transfer/send_file.py)
* Tests
- * [Test Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/tests/test_send_file.py)
+ * [Test Send File](file_transfer/tests/test_send_file.py)
+
+## Financial
+ * [Equated Monthly Installments](financial/equated_monthly_installments.py)
+ * [Interest](financial/interest.py)
+ * [Present Value](financial/present_value.py)
+ * [Price Plus Tax](financial/price_plus_tax.py)
## Fractals
- * [Koch Snowflake](https://github.com/TheAlgorithms/Python/blob/master/fractals/koch_snowflake.py)
- * [Mandelbrot](https://github.com/TheAlgorithms/Python/blob/master/fractals/mandelbrot.py)
- * [Sierpinski Triangle](https://github.com/TheAlgorithms/Python/blob/master/fractals/sierpinski_triangle.py)
+ * [Julia Sets](fractals/julia_sets.py)
+ * [Koch Snowflake](fractals/koch_snowflake.py)
+ * [Mandelbrot](fractals/mandelbrot.py)
+ * [Sierpinski Triangle](fractals/sierpinski_triangle.py)
## Fuzzy Logic
- * [Fuzzy Operations](https://github.com/TheAlgorithms/Python/blob/master/fuzzy_logic/fuzzy_operations.py)
+ * [Fuzzy Operations](fuzzy_logic/fuzzy_operations.py)
## Genetic Algorithm
- * [Basic String](https://github.com/TheAlgorithms/Python/blob/master/genetic_algorithm/basic_string.py)
+ * [Basic String](genetic_algorithm/basic_string.py)
## Geodesy
- * [Haversine Distance](https://github.com/TheAlgorithms/Python/blob/master/geodesy/haversine_distance.py)
- * [Lamberts Ellipsoidal Distance](https://github.com/TheAlgorithms/Python/blob/master/geodesy/lamberts_ellipsoidal_distance.py)
+ * [Haversine Distance](geodesy/haversine_distance.py)
+ * [Lamberts Ellipsoidal Distance](geodesy/lamberts_ellipsoidal_distance.py)
## Graphics
- * [Bezier Curve](https://github.com/TheAlgorithms/Python/blob/master/graphics/bezier_curve.py)
- * [Vector3 For 2D Rendering](https://github.com/TheAlgorithms/Python/blob/master/graphics/vector3_for_2d_rendering.py)
+ * [Bezier Curve](graphics/bezier_curve.py)
+ * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py)
## Graphs
- * [A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/a_star.py)
- * [Articulation Points](https://github.com/TheAlgorithms/Python/blob/master/graphs/articulation_points.py)
- * [Basic Graphs](https://github.com/TheAlgorithms/Python/blob/master/graphs/basic_graphs.py)
- * [Bellman Ford](https://github.com/TheAlgorithms/Python/blob/master/graphs/bellman_ford.py)
- * [Bfs Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_shortest_path.py)
- * [Bfs Zero One Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_zero_one_shortest_path.py)
- * [Bidirectional A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_a_star.py)
- * [Bidirectional Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_breadth_first_search.py)
- * [Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search.py)
- * [Breadth First Search 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_2.py)
- * [Breadth First Search Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_shortest_path.py)
- * [Check Bipartite Graph Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_bfs.py)
- * [Check Bipartite Graph Dfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_dfs.py)
- * [Connected Components](https://github.com/TheAlgorithms/Python/blob/master/graphs/connected_components.py)
- * [Depth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/depth_first_search.py)
- * [Depth First Search 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/depth_first_search_2.py)
- * [Dijkstra](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra.py)
- * [Dijkstra 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra_2.py)
- * [Dijkstra Algorithm](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra_algorithm.py)
- * [Dinic](https://github.com/TheAlgorithms/Python/blob/master/graphs/dinic.py)
- * [Directed And Undirected (Weighted) Graph](https://github.com/TheAlgorithms/Python/blob/master/graphs/directed_and_undirected_(weighted)_graph.py)
- * [Edmonds Karp Multiple Source And Sink](https://github.com/TheAlgorithms/Python/blob/master/graphs/edmonds_karp_multiple_source_and_sink.py)
- * [Eulerian Path And Circuit For Undirected Graph](https://github.com/TheAlgorithms/Python/blob/master/graphs/eulerian_path_and_circuit_for_undirected_graph.py)
- * [Even Tree](https://github.com/TheAlgorithms/Python/blob/master/graphs/even_tree.py)
- * [Finding Bridges](https://github.com/TheAlgorithms/Python/blob/master/graphs/finding_bridges.py)
- * [Frequent Pattern Graph Miner](https://github.com/TheAlgorithms/Python/blob/master/graphs/frequent_pattern_graph_miner.py)
- * [G Topological Sort](https://github.com/TheAlgorithms/Python/blob/master/graphs/g_topological_sort.py)
- * [Gale Shapley Bigraph](https://github.com/TheAlgorithms/Python/blob/master/graphs/gale_shapley_bigraph.py)
- * [Graph List](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_list.py)
- * [Graph Matrix](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_matrix.py)
- * [Graphs Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/graphs/graphs_floyd_warshall.py)
- * [Greedy Best First](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_best_first.py)
- * [Kahns Algorithm Long](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_long.py)
- * [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py)
- * [Karger](https://github.com/TheAlgorithms/Python/blob/master/graphs/karger.py)
- * [Markov Chain](https://github.com/TheAlgorithms/Python/blob/master/graphs/markov_chain.py)
- * [Minimum Spanning Tree Boruvka](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_boruvka.py)
- * [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py)
- * [Minimum Spanning Tree Kruskal2](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal2.py)
- * [Minimum Spanning Tree Prims](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_prims.py)
- * [Minimum Spanning Tree Prims2](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_prims2.py)
- * [Multi Heuristic Astar](https://github.com/TheAlgorithms/Python/blob/master/graphs/multi_heuristic_astar.py)
- * [Page Rank](https://github.com/TheAlgorithms/Python/blob/master/graphs/page_rank.py)
- * [Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/prim.py)
- * [Scc Kosaraju](https://github.com/TheAlgorithms/Python/blob/master/graphs/scc_kosaraju.py)
- * [Strongly Connected Components](https://github.com/TheAlgorithms/Python/blob/master/graphs/strongly_connected_components.py)
- * [Tarjans Scc](https://github.com/TheAlgorithms/Python/blob/master/graphs/tarjans_scc.py)
+ * [A Star](graphs/a_star.py)
+ * [Articulation Points](graphs/articulation_points.py)
+ * [Basic Graphs](graphs/basic_graphs.py)
+ * [Bellman Ford](graphs/bellman_ford.py)
+ * [Bi Directional Dijkstra](graphs/bi_directional_dijkstra.py)
+ * [Bidirectional A Star](graphs/bidirectional_a_star.py)
+ * [Bidirectional Breadth First Search](graphs/bidirectional_breadth_first_search.py)
+ * [Boruvka](graphs/boruvka.py)
+ * [Breadth First Search](graphs/breadth_first_search.py)
+ * [Breadth First Search 2](graphs/breadth_first_search_2.py)
+ * [Breadth First Search Shortest Path](graphs/breadth_first_search_shortest_path.py)
+ * [Breadth First Search Shortest Path 2](graphs/breadth_first_search_shortest_path_2.py)
+ * [Breadth First Search Zero One Shortest Path](graphs/breadth_first_search_zero_one_shortest_path.py)
+ * [Check Bipartite Graph Bfs](graphs/check_bipartite_graph_bfs.py)
+ * [Check Bipartite Graph Dfs](graphs/check_bipartite_graph_dfs.py)
+ * [Check Cycle](graphs/check_cycle.py)
+ * [Connected Components](graphs/connected_components.py)
+ * [Depth First Search](graphs/depth_first_search.py)
+ * [Depth First Search 2](graphs/depth_first_search_2.py)
+ * [Dijkstra](graphs/dijkstra.py)
+ * [Dijkstra 2](graphs/dijkstra_2.py)
+ * [Dijkstra Algorithm](graphs/dijkstra_algorithm.py)
+ * [Dijkstra Alternate](graphs/dijkstra_alternate.py)
+ * [Dijkstra Binary Grid](graphs/dijkstra_binary_grid.py)
+ * [Dinic](graphs/dinic.py)
+ * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py)
+ * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py)
+ * [Eulerian Path And Circuit For Undirected Graph](graphs/eulerian_path_and_circuit_for_undirected_graph.py)
+ * [Even Tree](graphs/even_tree.py)
+ * [Finding Bridges](graphs/finding_bridges.py)
+ * [Frequent Pattern Graph Miner](graphs/frequent_pattern_graph_miner.py)
+ * [G Topological Sort](graphs/g_topological_sort.py)
+ * [Gale Shapley Bigraph](graphs/gale_shapley_bigraph.py)
+ * [Graph Adjacency List](graphs/graph_adjacency_list.py)
+ * [Graph Adjacency Matrix](graphs/graph_adjacency_matrix.py)
+ * [Graph List](graphs/graph_list.py)
+ * [Graphs Floyd Warshall](graphs/graphs_floyd_warshall.py)
+ * [Greedy Best First](graphs/greedy_best_first.py)
+ * [Greedy Min Vertex Cover](graphs/greedy_min_vertex_cover.py)
+ * [Kahns Algorithm Long](graphs/kahns_algorithm_long.py)
+ * [Kahns Algorithm Topo](graphs/kahns_algorithm_topo.py)
+ * [Karger](graphs/karger.py)
+ * [Markov Chain](graphs/markov_chain.py)
+ * [Matching Min Vertex Cover](graphs/matching_min_vertex_cover.py)
+ * [Minimum Path Sum](graphs/minimum_path_sum.py)
+ * [Minimum Spanning Tree Boruvka](graphs/minimum_spanning_tree_boruvka.py)
+ * [Minimum Spanning Tree Kruskal](graphs/minimum_spanning_tree_kruskal.py)
+ * [Minimum Spanning Tree Kruskal2](graphs/minimum_spanning_tree_kruskal2.py)
+ * [Minimum Spanning Tree Prims](graphs/minimum_spanning_tree_prims.py)
+ * [Minimum Spanning Tree Prims2](graphs/minimum_spanning_tree_prims2.py)
+ * [Multi Heuristic Astar](graphs/multi_heuristic_astar.py)
+ * [Page Rank](graphs/page_rank.py)
+ * [Prim](graphs/prim.py)
+ * [Random Graph Generator](graphs/random_graph_generator.py)
+ * [Scc Kosaraju](graphs/scc_kosaraju.py)
+ * [Strongly Connected Components](graphs/strongly_connected_components.py)
+ * [Tarjans Scc](graphs/tarjans_scc.py)
* Tests
- * [Test Min Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_kruskal.py)
- * [Test Min Spanning Tree Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_prim.py)
+ * [Test Min Spanning Tree Kruskal](graphs/tests/test_min_spanning_tree_kruskal.py)
+ * [Test Min Spanning Tree Prim](graphs/tests/test_min_spanning_tree_prim.py)
+
+## Greedy Methods
+ * [Fractional Knapsack](greedy_methods/fractional_knapsack.py)
+ * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py)
+ * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py)
+ * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py)
## Hashes
- * [Adler32](https://github.com/TheAlgorithms/Python/blob/master/hashes/adler32.py)
- * [Chaos Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/chaos_machine.py)
- * [Djb2](https://github.com/TheAlgorithms/Python/blob/master/hashes/djb2.py)
- * [Enigma Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/enigma_machine.py)
- * [Hamming Code](https://github.com/TheAlgorithms/Python/blob/master/hashes/hamming_code.py)
- * [Md5](https://github.com/TheAlgorithms/Python/blob/master/hashes/md5.py)
- * [Sdbm](https://github.com/TheAlgorithms/Python/blob/master/hashes/sdbm.py)
- * [Sha1](https://github.com/TheAlgorithms/Python/blob/master/hashes/sha1.py)
+ * [Adler32](hashes/adler32.py)
+ * [Chaos Machine](hashes/chaos_machine.py)
+ * [Djb2](hashes/djb2.py)
+ * [Elf](hashes/elf.py)
+ * [Enigma Machine](hashes/enigma_machine.py)
+ * [Hamming Code](hashes/hamming_code.py)
+ * [Luhn](hashes/luhn.py)
+ * [Md5](hashes/md5.py)
+ * [Sdbm](hashes/sdbm.py)
+ * [Sha1](hashes/sha1.py)
+ * [Sha256](hashes/sha256.py)
## Knapsack
- * [Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/greedy_knapsack.py)
- * [Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/knapsack.py)
+ * [Greedy Knapsack](knapsack/greedy_knapsack.py)
+ * [Knapsack](knapsack/knapsack.py)
+ * [Recursive Approach Knapsack](knapsack/recursive_approach_knapsack.py)
* Tests
- * [Test Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/tests/test_greedy_knapsack.py)
- * [Test Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/tests/test_knapsack.py)
+ * [Test Greedy Knapsack](knapsack/tests/test_greedy_knapsack.py)
+ * [Test Knapsack](knapsack/tests/test_knapsack.py)
## Linear Algebra
* Src
- * [Conjugate Gradient](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/conjugate_gradient.py)
- * [Lib](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/lib.py)
- * [Polynom For Points](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/polynom_for_points.py)
- * [Power Iteration](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/power_iteration.py)
- * [Rayleigh Quotient](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/rayleigh_quotient.py)
- * [Test Linear Algebra](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/test_linear_algebra.py)
- * [Transformations 2D](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/transformations_2d.py)
+ * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py)
+ * [Lib](linear_algebra/src/lib.py)
+ * [Polynom For Points](linear_algebra/src/polynom_for_points.py)
+ * [Power Iteration](linear_algebra/src/power_iteration.py)
+ * [Rank Of Matrix](linear_algebra/src/rank_of_matrix.py)
+ * [Rayleigh Quotient](linear_algebra/src/rayleigh_quotient.py)
+ * [Schur Complement](linear_algebra/src/schur_complement.py)
+ * [Test Linear Algebra](linear_algebra/src/test_linear_algebra.py)
+ * [Transformations 2D](linear_algebra/src/transformations_2d.py)
+
+## Linear Programming
+ * [Simplex](linear_programming/simplex.py)
## Machine Learning
- * [Astar](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/astar.py)
- * [Data Transformations](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/data_transformations.py)
- * [Decision Tree](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/decision_tree.py)
+ * [Astar](machine_learning/astar.py)
+ * [Data Transformations](machine_learning/data_transformations.py)
+ * [Decision Tree](machine_learning/decision_tree.py)
+ * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py)
* Forecasting
- * [Run](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/forecasting/run.py)
- * [Gaussian Naive Bayes](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gaussian_naive_bayes.py)
- * [Gradient Boosting Regressor](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gradient_boosting_regressor.py)
- * [Gradient Descent](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gradient_descent.py)
- * [K Means Clust](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/k_means_clust.py)
- * [K Nearest Neighbours](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/k_nearest_neighbours.py)
- * [Knn Sklearn](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/knn_sklearn.py)
- * [Linear Discriminant Analysis](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_discriminant_analysis.py)
- * [Linear Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_regression.py)
- * [Logistic Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/logistic_regression.py)
+ * [Run](machine_learning/forecasting/run.py)
+ * [Gradient Descent](machine_learning/gradient_descent.py)
+ * [K Means Clust](machine_learning/k_means_clust.py)
+ * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py)
+ * [Knn Sklearn](machine_learning/knn_sklearn.py)
+ * [Linear Discriminant Analysis](machine_learning/linear_discriminant_analysis.py)
+ * [Linear Regression](machine_learning/linear_regression.py)
+ * Local Weighted Learning
+ * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py)
+ * [Logistic Regression](machine_learning/logistic_regression.py)
* Lstm
- * [Lstm Prediction](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/lstm/lstm_prediction.py)
- * [Multilayer Perceptron Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/multilayer_perceptron_classifier.py)
- * [Polymonial Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/polymonial_regression.py)
- * [Random Forest Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/random_forest_classifier.py)
- * [Random Forest Regressor](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/random_forest_regressor.py)
- * [Scoring Functions](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/scoring_functions.py)
- * [Sequential Minimum Optimization](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/sequential_minimum_optimization.py)
- * [Similarity Search](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/similarity_search.py)
- * [Support Vector Machines](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/support_vector_machines.py)
- * [Word Frequency Functions](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/word_frequency_functions.py)
+ * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py)
+ * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py)
+ * [Polynomial Regression](machine_learning/polynomial_regression.py)
+ * [Scoring Functions](machine_learning/scoring_functions.py)
+ * [Self Organizing Map](machine_learning/self_organizing_map.py)
+ * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py)
+ * [Similarity Search](machine_learning/similarity_search.py)
+ * [Support Vector Machines](machine_learning/support_vector_machines.py)
+ * [Word Frequency Functions](machine_learning/word_frequency_functions.py)
+ * [Xgboost Classifier](machine_learning/xgboost_classifier.py)
+ * [Xgboost Regressor](machine_learning/xgboost_regressor.py)
## Maths
- * [3N Plus 1](https://github.com/TheAlgorithms/Python/blob/master/maths/3n_plus_1.py)
- * [Abs](https://github.com/TheAlgorithms/Python/blob/master/maths/abs.py)
- * [Abs Max](https://github.com/TheAlgorithms/Python/blob/master/maths/abs_max.py)
- * [Abs Min](https://github.com/TheAlgorithms/Python/blob/master/maths/abs_min.py)
- * [Add](https://github.com/TheAlgorithms/Python/blob/master/maths/add.py)
- * [Aliquot Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/aliquot_sum.py)
- * [Allocation Number](https://github.com/TheAlgorithms/Python/blob/master/maths/allocation_number.py)
- * [Area](https://github.com/TheAlgorithms/Python/blob/master/maths/area.py)
- * [Area Under Curve](https://github.com/TheAlgorithms/Python/blob/master/maths/area_under_curve.py)
- * [Armstrong Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/armstrong_numbers.py)
- * [Average Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mean.py)
- * [Average Median](https://github.com/TheAlgorithms/Python/blob/master/maths/average_median.py)
- * [Average Mode](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mode.py)
- * [Bailey Borwein Plouffe](https://github.com/TheAlgorithms/Python/blob/master/maths/bailey_borwein_plouffe.py)
- * [Basic Maths](https://github.com/TheAlgorithms/Python/blob/master/maths/basic_maths.py)
- * [Binary Exp Mod](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exp_mod.py)
- * [Binary Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation.py)
- * [Binary Exponentiation 2](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation_2.py)
- * [Binary Exponentiation 3](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation_3.py)
- * [Binomial Coefficient](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_coefficient.py)
- * [Binomial Distribution](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_distribution.py)
- * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/maths/bisection.py)
- * [Ceil](https://github.com/TheAlgorithms/Python/blob/master/maths/ceil.py)
- * [Chudnovsky Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/chudnovsky_algorithm.py)
- * [Collatz Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/collatz_sequence.py)
- * [Combinations](https://github.com/TheAlgorithms/Python/blob/master/maths/combinations.py)
- * [Decimal Isolate](https://github.com/TheAlgorithms/Python/blob/master/maths/decimal_isolate.py)
- * [Entropy](https://github.com/TheAlgorithms/Python/blob/master/maths/entropy.py)
- * [Euclidean Distance](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_distance.py)
- * [Euclidean Gcd](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py)
- * [Euler Method](https://github.com/TheAlgorithms/Python/blob/master/maths/euler_method.py)
- * [Eulers Totient](https://github.com/TheAlgorithms/Python/blob/master/maths/eulers_totient.py)
- * [Extended Euclidean Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/extended_euclidean_algorithm.py)
- * [Factorial Iterative](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_iterative.py)
- * [Factorial Python](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_python.py)
- * [Factorial Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_recursive.py)
- * [Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/factors.py)
- * [Fermat Little Theorem](https://github.com/TheAlgorithms/Python/blob/master/maths/fermat_little_theorem.py)
- * [Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci.py)
- * [Fibonacci Sequence Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci_sequence_recursion.py)
- * [Find Max](https://github.com/TheAlgorithms/Python/blob/master/maths/find_max.py)
- * [Find Max Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/find_max_recursion.py)
- * [Find Min](https://github.com/TheAlgorithms/Python/blob/master/maths/find_min.py)
- * [Find Min Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/find_min_recursion.py)
- * [Floor](https://github.com/TheAlgorithms/Python/blob/master/maths/floor.py)
- * [Gamma](https://github.com/TheAlgorithms/Python/blob/master/maths/gamma.py)
- * [Gaussian](https://github.com/TheAlgorithms/Python/blob/master/maths/gaussian.py)
- * [Greatest Common Divisor](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py)
- * [Greedy Coin Change](https://github.com/TheAlgorithms/Python/blob/master/maths/greedy_coin_change.py)
- * [Hardy Ramanujanalgo](https://github.com/TheAlgorithms/Python/blob/master/maths/hardy_ramanujanalgo.py)
- * [Integration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/maths/integration_by_simpson_approx.py)
- * [Is Square Free](https://github.com/TheAlgorithms/Python/blob/master/maths/is_square_free.py)
- * [Jaccard Similarity](https://github.com/TheAlgorithms/Python/blob/master/maths/jaccard_similarity.py)
- * [Kadanes](https://github.com/TheAlgorithms/Python/blob/master/maths/kadanes.py)
- * [Karatsuba](https://github.com/TheAlgorithms/Python/blob/master/maths/karatsuba.py)
- * [Krishnamurthy Number](https://github.com/TheAlgorithms/Python/blob/master/maths/krishnamurthy_number.py)
- * [Kth Lexicographic Permutation](https://github.com/TheAlgorithms/Python/blob/master/maths/kth_lexicographic_permutation.py)
- * [Largest Of Very Large Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/largest_of_very_large_numbers.py)
- * [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/largest_subarray_sum.py)
- * [Least Common Multiple](https://github.com/TheAlgorithms/Python/blob/master/maths/least_common_multiple.py)
- * [Line Length](https://github.com/TheAlgorithms/Python/blob/master/maths/line_length.py)
- * [Lucas Lehmer Primality Test](https://github.com/TheAlgorithms/Python/blob/master/maths/lucas_lehmer_primality_test.py)
- * [Lucas Series](https://github.com/TheAlgorithms/Python/blob/master/maths/lucas_series.py)
- * [Matrix Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/matrix_exponentiation.py)
- * [Max Sum Sliding Window](https://github.com/TheAlgorithms/Python/blob/master/maths/max_sum_sliding_window.py)
- * [Median Of Two Arrays](https://github.com/TheAlgorithms/Python/blob/master/maths/median_of_two_arrays.py)
- * [Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/maths/miller_rabin.py)
- * [Mobius Function](https://github.com/TheAlgorithms/Python/blob/master/maths/mobius_function.py)
- * [Modular Exponential](https://github.com/TheAlgorithms/Python/blob/master/maths/modular_exponential.py)
- * [Monte Carlo](https://github.com/TheAlgorithms/Python/blob/master/maths/monte_carlo.py)
- * [Monte Carlo Dice](https://github.com/TheAlgorithms/Python/blob/master/maths/monte_carlo_dice.py)
- * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/maths/newton_raphson.py)
- * [Number Of Digits](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py)
- * [Numerical Integration](https://github.com/TheAlgorithms/Python/blob/master/maths/numerical_integration.py)
- * [Perfect Cube](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_cube.py)
- * [Perfect Number](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_number.py)
- * [Perfect Square](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_square.py)
- * [Pi Monte Carlo Estimation](https://github.com/TheAlgorithms/Python/blob/master/maths/pi_monte_carlo_estimation.py)
- * [Polynomial Evaluation](https://github.com/TheAlgorithms/Python/blob/master/maths/polynomial_evaluation.py)
- * [Power Using Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/power_using_recursion.py)
- * [Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_check.py)
- * [Prime Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_factors.py)
- * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_numbers.py)
- * [Prime Sieve Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_sieve_eratosthenes.py)
- * [Primelib](https://github.com/TheAlgorithms/Python/blob/master/maths/primelib.py)
- * [Pythagoras](https://github.com/TheAlgorithms/Python/blob/master/maths/pythagoras.py)
- * [Qr Decomposition](https://github.com/TheAlgorithms/Python/blob/master/maths/qr_decomposition.py)
- * [Quadratic Equations Complex Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/quadratic_equations_complex_numbers.py)
- * [Radians](https://github.com/TheAlgorithms/Python/blob/master/maths/radians.py)
- * [Radix2 Fft](https://github.com/TheAlgorithms/Python/blob/master/maths/radix2_fft.py)
- * [Relu](https://github.com/TheAlgorithms/Python/blob/master/maths/relu.py)
- * [Runge Kutta](https://github.com/TheAlgorithms/Python/blob/master/maths/runge_kutta.py)
- * [Segmented Sieve](https://github.com/TheAlgorithms/Python/blob/master/maths/segmented_sieve.py)
+ * [Abs](maths/abs.py)
+ * [Add](maths/add.py)
+ * [Addition Without Arithmetic](maths/addition_without_arithmetic.py)
+ * [Aliquot Sum](maths/aliquot_sum.py)
+ * [Allocation Number](maths/allocation_number.py)
+ * [Arc Length](maths/arc_length.py)
+ * [Area](maths/area.py)
+ * [Area Under Curve](maths/area_under_curve.py)
+ * [Armstrong Numbers](maths/armstrong_numbers.py)
+ * [Automorphic Number](maths/automorphic_number.py)
+ * [Average Absolute Deviation](maths/average_absolute_deviation.py)
+ * [Average Mean](maths/average_mean.py)
+ * [Average Median](maths/average_median.py)
+ * [Average Mode](maths/average_mode.py)
+ * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py)
+ * [Basic Maths](maths/basic_maths.py)
+ * [Binary Exp Mod](maths/binary_exp_mod.py)
+ * [Binary Exponentiation](maths/binary_exponentiation.py)
+ * [Binary Exponentiation 2](maths/binary_exponentiation_2.py)
+ * [Binary Exponentiation 3](maths/binary_exponentiation_3.py)
+ * [Binomial Coefficient](maths/binomial_coefficient.py)
+ * [Binomial Distribution](maths/binomial_distribution.py)
+ * [Bisection](maths/bisection.py)
+ * [Carmichael Number](maths/carmichael_number.py)
+ * [Catalan Number](maths/catalan_number.py)
+ * [Ceil](maths/ceil.py)
+ * [Check Polygon](maths/check_polygon.py)
+ * [Chudnovsky Algorithm](maths/chudnovsky_algorithm.py)
+ * [Collatz Sequence](maths/collatz_sequence.py)
+ * [Combinations](maths/combinations.py)
+ * [Decimal Isolate](maths/decimal_isolate.py)
+ * [Decimal To Fraction](maths/decimal_to_fraction.py)
+ * [Dodecahedron](maths/dodecahedron.py)
+ * [Double Factorial Iterative](maths/double_factorial_iterative.py)
+ * [Double Factorial Recursive](maths/double_factorial_recursive.py)
+ * [Dual Number Automatic Differentiation](maths/dual_number_automatic_differentiation.py)
+ * [Entropy](maths/entropy.py)
+ * [Euclidean Distance](maths/euclidean_distance.py)
+ * [Euclidean Gcd](maths/euclidean_gcd.py)
+ * [Euler Method](maths/euler_method.py)
+ * [Euler Modified](maths/euler_modified.py)
+ * [Eulers Totient](maths/eulers_totient.py)
+ * [Extended Euclidean Algorithm](maths/extended_euclidean_algorithm.py)
+ * [Factorial](maths/factorial.py)
+ * [Factors](maths/factors.py)
+ * [Fermat Little Theorem](maths/fermat_little_theorem.py)
+ * [Fibonacci](maths/fibonacci.py)
+ * [Find Max](maths/find_max.py)
+ * [Find Max Recursion](maths/find_max_recursion.py)
+ * [Find Min](maths/find_min.py)
+ * [Find Min Recursion](maths/find_min_recursion.py)
+ * [Floor](maths/floor.py)
+ * [Gamma](maths/gamma.py)
+ * [Gamma Recursive](maths/gamma_recursive.py)
+ * [Gaussian](maths/gaussian.py)
+ * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py)
+ * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py)
+ * [Greatest Common Divisor](maths/greatest_common_divisor.py)
+ * [Greedy Coin Change](maths/greedy_coin_change.py)
+ * [Hamming Numbers](maths/hamming_numbers.py)
+ * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py)
+ * [Hexagonal Number](maths/hexagonal_number.py)
+ * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py)
+ * [Is Int Palindrome](maths/is_int_palindrome.py)
+ * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py)
+ * [Is Square Free](maths/is_square_free.py)
+ * [Jaccard Similarity](maths/jaccard_similarity.py)
+ * [Juggler Sequence](maths/juggler_sequence.py)
+ * [Karatsuba](maths/karatsuba.py)
+ * [Krishnamurthy Number](maths/krishnamurthy_number.py)
+ * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py)
+ * [Largest Of Very Large Numbers](maths/largest_of_very_large_numbers.py)
+ * [Least Common Multiple](maths/least_common_multiple.py)
+ * [Line Length](maths/line_length.py)
+ * [Liouville Lambda](maths/liouville_lambda.py)
+ * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py)
+ * [Lucas Series](maths/lucas_series.py)
+ * [Maclaurin Series](maths/maclaurin_series.py)
+ * [Manhattan Distance](maths/manhattan_distance.py)
+ * [Matrix Exponentiation](maths/matrix_exponentiation.py)
+ * [Max Sum Sliding Window](maths/max_sum_sliding_window.py)
+ * [Median Of Two Arrays](maths/median_of_two_arrays.py)
+ * [Miller Rabin](maths/miller_rabin.py)
+ * [Mobius Function](maths/mobius_function.py)
+ * [Modular Exponential](maths/modular_exponential.py)
+ * [Monte Carlo](maths/monte_carlo.py)
+ * [Monte Carlo Dice](maths/monte_carlo_dice.py)
+ * [Nevilles Method](maths/nevilles_method.py)
+ * [Newton Raphson](maths/newton_raphson.py)
+ * [Number Of Digits](maths/number_of_digits.py)
+ * [Numerical Integration](maths/numerical_integration.py)
+ * [Odd Sieve](maths/odd_sieve.py)
+ * [Perfect Cube](maths/perfect_cube.py)
+ * [Perfect Number](maths/perfect_number.py)
+ * [Perfect Square](maths/perfect_square.py)
+ * [Persistence](maths/persistence.py)
+ * [Pi Generator](maths/pi_generator.py)
+ * [Pi Monte Carlo Estimation](maths/pi_monte_carlo_estimation.py)
+ * [Points Are Collinear 3D](maths/points_are_collinear_3d.py)
+ * [Pollard Rho](maths/pollard_rho.py)
+ * [Polynomial Evaluation](maths/polynomial_evaluation.py)
+ * Polynomials
+ * [Single Indeterminate Operations](maths/polynomials/single_indeterminate_operations.py)
+ * [Power Using Recursion](maths/power_using_recursion.py)
+ * [Prime Check](maths/prime_check.py)
+ * [Prime Factors](maths/prime_factors.py)
+ * [Prime Numbers](maths/prime_numbers.py)
+ * [Prime Sieve Eratosthenes](maths/prime_sieve_eratosthenes.py)
+ * [Primelib](maths/primelib.py)
+ * [Print Multiplication Table](maths/print_multiplication_table.py)
+ * [Pronic Number](maths/pronic_number.py)
+ * [Proth Number](maths/proth_number.py)
+ * [Pythagoras](maths/pythagoras.py)
+ * [Qr Decomposition](maths/qr_decomposition.py)
+ * [Quadratic Equations Complex Numbers](maths/quadratic_equations_complex_numbers.py)
+ * [Radians](maths/radians.py)
+ * [Radix2 Fft](maths/radix2_fft.py)
+ * [Relu](maths/relu.py)
+ * [Remove Digit](maths/remove_digit.py)
+ * [Runge Kutta](maths/runge_kutta.py)
+ * [Segmented Sieve](maths/segmented_sieve.py)
* Series
- * [Arithmetic Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/series/arithmetic_mean.py)
- * [Geometric Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_mean.py)
- * [Geometric Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_series.py)
- * [Harmonic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic_series.py)
- * [P Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/p_series.py)
- * [Sieve Of Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/sieve_of_eratosthenes.py)
- * [Sigmoid](https://github.com/TheAlgorithms/Python/blob/master/maths/sigmoid.py)
- * [Simpson Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/simpson_rule.py)
- * [Softmax](https://github.com/TheAlgorithms/Python/blob/master/maths/softmax.py)
- * [Square Root](https://github.com/TheAlgorithms/Python/blob/master/maths/square_root.py)
- * [Sum Of Arithmetic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_arithmetic_series.py)
- * [Sum Of Digits](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_digits.py)
- * [Sum Of Geometric Progression](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_geometric_progression.py)
- * [Test Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/test_prime_check.py)
- * [Trapezoidal Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/trapezoidal_rule.py)
- * [Triplet Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/triplet_sum.py)
- * [Two Pointer](https://github.com/TheAlgorithms/Python/blob/master/maths/two_pointer.py)
- * [Two Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/two_sum.py)
- * [Ugly Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/ugly_numbers.py)
- * [Volume](https://github.com/TheAlgorithms/Python/blob/master/maths/volume.py)
- * [Zellers Congruence](https://github.com/TheAlgorithms/Python/blob/master/maths/zellers_congruence.py)
+ * [Arithmetic](maths/series/arithmetic.py)
+ * [Geometric](maths/series/geometric.py)
+ * [Geometric Series](maths/series/geometric_series.py)
+ * [Harmonic](maths/series/harmonic.py)
+ * [Harmonic Series](maths/series/harmonic_series.py)
+ * [Hexagonal Numbers](maths/series/hexagonal_numbers.py)
+ * [P Series](maths/series/p_series.py)
+ * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py)
+ * [Sigmoid](maths/sigmoid.py)
+ * [Sigmoid Linear Unit](maths/sigmoid_linear_unit.py)
+ * [Signum](maths/signum.py)
+ * [Simpson Rule](maths/simpson_rule.py)
+ * [Simultaneous Linear Equation Solver](maths/simultaneous_linear_equation_solver.py)
+ * [Sin](maths/sin.py)
+ * [Sock Merchant](maths/sock_merchant.py)
+ * [Softmax](maths/softmax.py)
+ * [Square Root](maths/square_root.py)
+ * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py)
+ * [Sum Of Digits](maths/sum_of_digits.py)
+ * [Sum Of Geometric Progression](maths/sum_of_geometric_progression.py)
+ * [Sum Of Harmonic Series](maths/sum_of_harmonic_series.py)
+ * [Sumset](maths/sumset.py)
+ * [Sylvester Sequence](maths/sylvester_sequence.py)
+ * [Tanh](maths/tanh.py)
+ * [Test Prime Check](maths/test_prime_check.py)
+ * [Trapezoidal Rule](maths/trapezoidal_rule.py)
+ * [Triplet Sum](maths/triplet_sum.py)
+ * [Twin Prime](maths/twin_prime.py)
+ * [Two Pointer](maths/two_pointer.py)
+ * [Two Sum](maths/two_sum.py)
+ * [Ugly Numbers](maths/ugly_numbers.py)
+ * [Volume](maths/volume.py)
+ * [Weird Number](maths/weird_number.py)
+ * [Zellers Congruence](maths/zellers_congruence.py)
## Matrix
- * [Count Islands In Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/count_islands_in_matrix.py)
- * [Inverse Of Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/inverse_of_matrix.py)
- * [Matrix Class](https://github.com/TheAlgorithms/Python/blob/master/matrix/matrix_class.py)
- * [Matrix Operation](https://github.com/TheAlgorithms/Python/blob/master/matrix/matrix_operation.py)
- * [Nth Fibonacci Using Matrix Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/matrix/nth_fibonacci_using_matrix_exponentiation.py)
- * [Rotate Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/rotate_matrix.py)
- * [Searching In Sorted Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/searching_in_sorted_matrix.py)
- * [Sherman Morrison](https://github.com/TheAlgorithms/Python/blob/master/matrix/sherman_morrison.py)
- * [Spiral Print](https://github.com/TheAlgorithms/Python/blob/master/matrix/spiral_print.py)
+ * [Binary Search Matrix](matrix/binary_search_matrix.py)
+ * [Count Islands In Matrix](matrix/count_islands_in_matrix.py)
+ * [Count Negative Numbers In Sorted Matrix](matrix/count_negative_numbers_in_sorted_matrix.py)
+ * [Count Paths](matrix/count_paths.py)
+ * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py)
+ * [Inverse Of Matrix](matrix/inverse_of_matrix.py)
+ * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py)
+ * [Matrix Class](matrix/matrix_class.py)
+ * [Matrix Operation](matrix/matrix_operation.py)
+ * [Max Area Of Island](matrix/max_area_of_island.py)
+ * [Nth Fibonacci Using Matrix Exponentiation](matrix/nth_fibonacci_using_matrix_exponentiation.py)
+ * [Pascal Triangle](matrix/pascal_triangle.py)
+ * [Rotate Matrix](matrix/rotate_matrix.py)
+ * [Searching In Sorted Matrix](matrix/searching_in_sorted_matrix.py)
+ * [Sherman Morrison](matrix/sherman_morrison.py)
+ * [Spiral Print](matrix/spiral_print.py)
* Tests
- * [Test Matrix Operation](https://github.com/TheAlgorithms/Python/blob/master/matrix/tests/test_matrix_operation.py)
+ * [Test Matrix Operation](matrix/tests/test_matrix_operation.py)
## Networking Flow
- * [Ford Fulkerson](https://github.com/TheAlgorithms/Python/blob/master/networking_flow/ford_fulkerson.py)
- * [Minimum Cut](https://github.com/TheAlgorithms/Python/blob/master/networking_flow/minimum_cut.py)
+ * [Ford Fulkerson](networking_flow/ford_fulkerson.py)
+ * [Minimum Cut](networking_flow/minimum_cut.py)
## Neural Network
- * [2 Hidden Layers Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/2_hidden_layers_neural_network.py)
- * [Back Propagation Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/back_propagation_neural_network.py)
- * [Convolution Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/convolution_neural_network.py)
- * [Perceptron](https://github.com/TheAlgorithms/Python/blob/master/neural_network/perceptron.py)
+ * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py)
+ * Activation Functions
+ * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py)
+ * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py)
+ * [Convolution Neural Network](neural_network/convolution_neural_network.py)
+ * [Input Data](neural_network/input_data.py)
+ * [Perceptron](neural_network/perceptron.py)
+ * [Simple Neural Network](neural_network/simple_neural_network.py)
## Other
- * [Activity Selection](https://github.com/TheAlgorithms/Python/blob/master/other/activity_selection.py)
- * [Davis–Putnam–Logemann–Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davis–putnam–logemann–loveland.py)
- * [Dijkstra Bankers Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/dijkstra_bankers_algorithm.py)
- * [Doomsday](https://github.com/TheAlgorithms/Python/blob/master/other/doomsday.py)
- * [Fischer Yates Shuffle](https://github.com/TheAlgorithms/Python/blob/master/other/fischer_yates_shuffle.py)
- * [Gauss Easter](https://github.com/TheAlgorithms/Python/blob/master/other/gauss_easter.py)
- * [Graham Scan](https://github.com/TheAlgorithms/Python/blob/master/other/graham_scan.py)
- * [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py)
- * [Least Recently Used](https://github.com/TheAlgorithms/Python/blob/master/other/least_recently_used.py)
- * [Lfu Cache](https://github.com/TheAlgorithms/Python/blob/master/other/lfu_cache.py)
- * [Linear Congruential Generator](https://github.com/TheAlgorithms/Python/blob/master/other/linear_congruential_generator.py)
- * [Lru Cache](https://github.com/TheAlgorithms/Python/blob/master/other/lru_cache.py)
- * [Magicdiamondpattern](https://github.com/TheAlgorithms/Python/blob/master/other/magicdiamondpattern.py)
- * [Nested Brackets](https://github.com/TheAlgorithms/Python/blob/master/other/nested_brackets.py)
- * [Password Generator](https://github.com/TheAlgorithms/Python/blob/master/other/password_generator.py)
- * [Scoring Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/scoring_algorithm.py)
- * [Sdes](https://github.com/TheAlgorithms/Python/blob/master/other/sdes.py)
- * [Tower Of Hanoi](https://github.com/TheAlgorithms/Python/blob/master/other/tower_of_hanoi.py)
+ * [Activity Selection](other/activity_selection.py)
+ * [Alternative List Arrange](other/alternative_list_arrange.py)
+ * [Davisb Putnamb Logemannb Loveland](other/davisb_putnamb_logemannb_loveland.py)
+ * [Dijkstra Bankers Algorithm](other/dijkstra_bankers_algorithm.py)
+ * [Doomsday](other/doomsday.py)
+ * [Fischer Yates Shuffle](other/fischer_yates_shuffle.py)
+ * [Gauss Easter](other/gauss_easter.py)
+ * [Graham Scan](other/graham_scan.py)
+ * [Greedy](other/greedy.py)
+ * [Guess The Number Search](other/guess_the_number_search.py)
+ * [H Index](other/h_index.py)
+ * [Least Recently Used](other/least_recently_used.py)
+ * [Lfu Cache](other/lfu_cache.py)
+ * [Linear Congruential Generator](other/linear_congruential_generator.py)
+ * [Lru Cache](other/lru_cache.py)
+ * [Magicdiamondpattern](other/magicdiamondpattern.py)
+ * [Maximum Subsequence](other/maximum_subsequence.py)
+ * [Nested Brackets](other/nested_brackets.py)
+ * [Number Container System](other/number_container_system.py)
+ * [Password](other/password.py)
+ * [Quine](other/quine.py)
+ * [Scoring Algorithm](other/scoring_algorithm.py)
+ * [Sdes](other/sdes.py)
+ * [Tower Of Hanoi](other/tower_of_hanoi.py)
## Physics
- * [N Body Simulation](https://github.com/TheAlgorithms/Python/blob/master/physics/n_body_simulation.py)
+ * [Altitude Pressure](physics/altitude_pressure.py)
+ * [Archimedes Principle](physics/archimedes_principle.py)
+ * [Basic Orbital Capture](physics/basic_orbital_capture.py)
+ * [Casimir Effect](physics/casimir_effect.py)
+ * [Centripetal Force](physics/centripetal_force.py)
+ * [Grahams Law](physics/grahams_law.py)
+ * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py)
+ * [Hubble Parameter](physics/hubble_parameter.py)
+ * [Ideal Gas Law](physics/ideal_gas_law.py)
+ * [Kinetic Energy](physics/kinetic_energy.py)
+ * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py)
+ * [Malus Law](physics/malus_law.py)
+ * [N Body Simulation](physics/n_body_simulation.py)
+ * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py)
+ * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py)
+ * [Potential Energy](physics/potential_energy.py)
+ * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py)
+ * [Shear Stress](physics/shear_stress.py)
+ * [Speed Of Sound](physics/speed_of_sound.py)
## Project Euler
* Problem 001
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol3.py)
- * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol4.py)
- * [Sol5](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol5.py)
- * [Sol6](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol6.py)
- * [Sol7](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol7.py)
+ * [Sol1](project_euler/problem_001/sol1.py)
+ * [Sol2](project_euler/problem_001/sol2.py)
+ * [Sol3](project_euler/problem_001/sol3.py)
+ * [Sol4](project_euler/problem_001/sol4.py)
+ * [Sol5](project_euler/problem_001/sol5.py)
+ * [Sol6](project_euler/problem_001/sol6.py)
+ * [Sol7](project_euler/problem_001/sol7.py)
* Problem 002
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol3.py)
- * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol4.py)
- * [Sol5](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol5.py)
+ * [Sol1](project_euler/problem_002/sol1.py)
+ * [Sol2](project_euler/problem_002/sol2.py)
+ * [Sol3](project_euler/problem_002/sol3.py)
+ * [Sol4](project_euler/problem_002/sol4.py)
+ * [Sol5](project_euler/problem_002/sol5.py)
* Problem 003
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol3.py)
+ * [Sol1](project_euler/problem_003/sol1.py)
+ * [Sol2](project_euler/problem_003/sol2.py)
+ * [Sol3](project_euler/problem_003/sol3.py)
* Problem 004
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_004/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_004/sol2.py)
+ * [Sol1](project_euler/problem_004/sol1.py)
+ * [Sol2](project_euler/problem_004/sol2.py)
* Problem 005
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_005/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_005/sol2.py)
+ * [Sol1](project_euler/problem_005/sol1.py)
+ * [Sol2](project_euler/problem_005/sol2.py)
* Problem 006
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol3.py)
- * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol4.py)
+ * [Sol1](project_euler/problem_006/sol1.py)
+ * [Sol2](project_euler/problem_006/sol2.py)
+ * [Sol3](project_euler/problem_006/sol3.py)
+ * [Sol4](project_euler/problem_006/sol4.py)
* Problem 007
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol3.py)
+ * [Sol1](project_euler/problem_007/sol1.py)
+ * [Sol2](project_euler/problem_007/sol2.py)
+ * [Sol3](project_euler/problem_007/sol3.py)
* Problem 008
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol3.py)
+ * [Sol1](project_euler/problem_008/sol1.py)
+ * [Sol2](project_euler/problem_008/sol2.py)
+ * [Sol3](project_euler/problem_008/sol3.py)
* Problem 009
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol3.py)
+ * [Sol1](project_euler/problem_009/sol1.py)
+ * [Sol2](project_euler/problem_009/sol2.py)
+ * [Sol3](project_euler/problem_009/sol3.py)
* Problem 010
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol3.py)
+ * [Sol1](project_euler/problem_010/sol1.py)
+ * [Sol2](project_euler/problem_010/sol2.py)
+ * [Sol3](project_euler/problem_010/sol3.py)
* Problem 011
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_011/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_011/sol2.py)
+ * [Sol1](project_euler/problem_011/sol1.py)
+ * [Sol2](project_euler/problem_011/sol2.py)
* Problem 012
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_012/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_012/sol2.py)
+ * [Sol1](project_euler/problem_012/sol1.py)
+ * [Sol2](project_euler/problem_012/sol2.py)
* Problem 013
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_013/sol1.py)
+ * [Sol1](project_euler/problem_013/sol1.py)
* Problem 014
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_014/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_014/sol2.py)
+ * [Sol1](project_euler/problem_014/sol1.py)
+ * [Sol2](project_euler/problem_014/sol2.py)
* Problem 015
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_015/sol1.py)
+ * [Sol1](project_euler/problem_015/sol1.py)
* Problem 016
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_016/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_016/sol2.py)
+ * [Sol1](project_euler/problem_016/sol1.py)
+ * [Sol2](project_euler/problem_016/sol2.py)
* Problem 017
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_017/sol1.py)
+ * [Sol1](project_euler/problem_017/sol1.py)
* Problem 018
- * [Solution](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_018/solution.py)
+ * [Solution](project_euler/problem_018/solution.py)
* Problem 019
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_019/sol1.py)
+ * [Sol1](project_euler/problem_019/sol1.py)
* Problem 020
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol3.py)
- * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol4.py)
+ * [Sol1](project_euler/problem_020/sol1.py)
+ * [Sol2](project_euler/problem_020/sol2.py)
+ * [Sol3](project_euler/problem_020/sol3.py)
+ * [Sol4](project_euler/problem_020/sol4.py)
* Problem 021
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_021/sol1.py)
+ * [Sol1](project_euler/problem_021/sol1.py)
* Problem 022
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_022/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_022/sol2.py)
+ * [Sol1](project_euler/problem_022/sol1.py)
+ * [Sol2](project_euler/problem_022/sol2.py)
* Problem 023
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_023/sol1.py)
+ * [Sol1](project_euler/problem_023/sol1.py)
* Problem 024
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_024/sol1.py)
+ * [Sol1](project_euler/problem_024/sol1.py)
* Problem 025
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol2.py)
- * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol3.py)
+ * [Sol1](project_euler/problem_025/sol1.py)
+ * [Sol2](project_euler/problem_025/sol2.py)
+ * [Sol3](project_euler/problem_025/sol3.py)
* Problem 026
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_026/sol1.py)
+ * [Sol1](project_euler/problem_026/sol1.py)
* Problem 027
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_027/sol1.py)
+ * [Sol1](project_euler/problem_027/sol1.py)
* Problem 028
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_028/sol1.py)
+ * [Sol1](project_euler/problem_028/sol1.py)
* Problem 029
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_029/sol1.py)
+ * [Sol1](project_euler/problem_029/sol1.py)
* Problem 030
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_030/sol1.py)
+ * [Sol1](project_euler/problem_030/sol1.py)
* Problem 031
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_031/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_031/sol2.py)
+ * [Sol1](project_euler/problem_031/sol1.py)
+ * [Sol2](project_euler/problem_031/sol2.py)
* Problem 032
- * [Sol32](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_032/sol32.py)
+ * [Sol32](project_euler/problem_032/sol32.py)
* Problem 033
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_033/sol1.py)
+ * [Sol1](project_euler/problem_033/sol1.py)
* Problem 034
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_034/sol1.py)
+ * [Sol1](project_euler/problem_034/sol1.py)
* Problem 035
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_035/sol1.py)
+ * [Sol1](project_euler/problem_035/sol1.py)
* Problem 036
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_036/sol1.py)
+ * [Sol1](project_euler/problem_036/sol1.py)
* Problem 037
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_037/sol1.py)
+ * [Sol1](project_euler/problem_037/sol1.py)
* Problem 038
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_038/sol1.py)
+ * [Sol1](project_euler/problem_038/sol1.py)
* Problem 039
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_039/sol1.py)
+ * [Sol1](project_euler/problem_039/sol1.py)
* Problem 040
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_040/sol1.py)
+ * [Sol1](project_euler/problem_040/sol1.py)
* Problem 041
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_041/sol1.py)
+ * [Sol1](project_euler/problem_041/sol1.py)
* Problem 042
- * [Solution42](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_042/solution42.py)
+ * [Solution42](project_euler/problem_042/solution42.py)
* Problem 043
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_043/sol1.py)
+ * [Sol1](project_euler/problem_043/sol1.py)
* Problem 044
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_044/sol1.py)
+ * [Sol1](project_euler/problem_044/sol1.py)
* Problem 045
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_045/sol1.py)
+ * [Sol1](project_euler/problem_045/sol1.py)
* Problem 046
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_046/sol1.py)
+ * [Sol1](project_euler/problem_046/sol1.py)
* Problem 047
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_047/sol1.py)
+ * [Sol1](project_euler/problem_047/sol1.py)
* Problem 048
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_048/sol1.py)
+ * [Sol1](project_euler/problem_048/sol1.py)
* Problem 049
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_049/sol1.py)
+ * [Sol1](project_euler/problem_049/sol1.py)
* Problem 050
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_050/sol1.py)
+ * [Sol1](project_euler/problem_050/sol1.py)
* Problem 051
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_051/sol1.py)
+ * [Sol1](project_euler/problem_051/sol1.py)
* Problem 052
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_052/sol1.py)
+ * [Sol1](project_euler/problem_052/sol1.py)
* Problem 053
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_053/sol1.py)
+ * [Sol1](project_euler/problem_053/sol1.py)
* Problem 054
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_054/sol1.py)
- * [Test Poker Hand](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_054/test_poker_hand.py)
+ * [Sol1](project_euler/problem_054/sol1.py)
+ * [Test Poker Hand](project_euler/problem_054/test_poker_hand.py)
* Problem 055
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_055/sol1.py)
+ * [Sol1](project_euler/problem_055/sol1.py)
* Problem 056
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_056/sol1.py)
+ * [Sol1](project_euler/problem_056/sol1.py)
* Problem 057
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_057/sol1.py)
+ * [Sol1](project_euler/problem_057/sol1.py)
* Problem 058
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_058/sol1.py)
+ * [Sol1](project_euler/problem_058/sol1.py)
* Problem 059
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_059/sol1.py)
+ * [Sol1](project_euler/problem_059/sol1.py)
* Problem 062
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_062/sol1.py)
+ * [Sol1](project_euler/problem_062/sol1.py)
* Problem 063
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_063/sol1.py)
+ * [Sol1](project_euler/problem_063/sol1.py)
* Problem 064
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_064/sol1.py)
+ * [Sol1](project_euler/problem_064/sol1.py)
* Problem 065
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_065/sol1.py)
+ * [Sol1](project_euler/problem_065/sol1.py)
* Problem 067
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol1.py)
+ * [Sol1](project_euler/problem_067/sol1.py)
+ * [Sol2](project_euler/problem_067/sol2.py)
+ * Problem 068
+ * [Sol1](project_euler/problem_068/sol1.py)
* Problem 069
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_069/sol1.py)
+ * [Sol1](project_euler/problem_069/sol1.py)
* Problem 070
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_070/sol1.py)
+ * [Sol1](project_euler/problem_070/sol1.py)
* Problem 071
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_071/sol1.py)
+ * [Sol1](project_euler/problem_071/sol1.py)
* Problem 072
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_072/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_072/sol2.py)
+ * [Sol1](project_euler/problem_072/sol1.py)
+ * [Sol2](project_euler/problem_072/sol2.py)
+ * Problem 073
+ * [Sol1](project_euler/problem_073/sol1.py)
* Problem 074
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_074/sol1.py)
- * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_074/sol2.py)
+ * [Sol1](project_euler/problem_074/sol1.py)
+ * [Sol2](project_euler/problem_074/sol2.py)
* Problem 075
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_075/sol1.py)
+ * [Sol1](project_euler/problem_075/sol1.py)
* Problem 076
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_076/sol1.py)
+ * [Sol1](project_euler/problem_076/sol1.py)
* Problem 077
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_077/sol1.py)
+ * [Sol1](project_euler/problem_077/sol1.py)
+ * Problem 078
+ * [Sol1](project_euler/problem_078/sol1.py)
+ * Problem 079
+ * [Sol1](project_euler/problem_079/sol1.py)
* Problem 080
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_080/sol1.py)
+ * [Sol1](project_euler/problem_080/sol1.py)
* Problem 081
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_081/sol1.py)
+ * [Sol1](project_euler/problem_081/sol1.py)
+ * Problem 082
+ * [Sol1](project_euler/problem_082/sol1.py)
* Problem 085
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_085/sol1.py)
+ * [Sol1](project_euler/problem_085/sol1.py)
* Problem 086
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_086/sol1.py)
+ * [Sol1](project_euler/problem_086/sol1.py)
* Problem 087
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_087/sol1.py)
+ * [Sol1](project_euler/problem_087/sol1.py)
* Problem 089
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_089/sol1.py)
+ * [Sol1](project_euler/problem_089/sol1.py)
* Problem 091
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_091/sol1.py)
+ * [Sol1](project_euler/problem_091/sol1.py)
+ * Problem 092
+ * [Sol1](project_euler/problem_092/sol1.py)
+ * Problem 094
+ * [Sol1](project_euler/problem_094/sol1.py)
* Problem 097
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_097/sol1.py)
+ * [Sol1](project_euler/problem_097/sol1.py)
* Problem 099
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_099/sol1.py)
+ * [Sol1](project_euler/problem_099/sol1.py)
+ * Problem 100
+ * [Sol1](project_euler/problem_100/sol1.py)
* Problem 101
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_101/sol1.py)
+ * [Sol1](project_euler/problem_101/sol1.py)
* Problem 102
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_102/sol1.py)
+ * [Sol1](project_euler/problem_102/sol1.py)
+ * Problem 104
+ * [Sol1](project_euler/problem_104/sol1.py)
* Problem 107
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_107/sol1.py)
+ * [Sol1](project_euler/problem_107/sol1.py)
* Problem 109
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_109/sol1.py)
+ * [Sol1](project_euler/problem_109/sol1.py)
* Problem 112
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_112/sol1.py)
+ * [Sol1](project_euler/problem_112/sol1.py)
* Problem 113
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_113/sol1.py)
+ * [Sol1](project_euler/problem_113/sol1.py)
+ * Problem 114
+ * [Sol1](project_euler/problem_114/sol1.py)
+ * Problem 115
+ * [Sol1](project_euler/problem_115/sol1.py)
+ * Problem 116
+ * [Sol1](project_euler/problem_116/sol1.py)
+ * Problem 117
+ * [Sol1](project_euler/problem_117/sol1.py)
* Problem 119
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_119/sol1.py)
+ * [Sol1](project_euler/problem_119/sol1.py)
* Problem 120
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_120/sol1.py)
+ * [Sol1](project_euler/problem_120/sol1.py)
* Problem 121
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_121/sol1.py)
+ * [Sol1](project_euler/problem_121/sol1.py)
* Problem 123
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_123/sol1.py)
+ * [Sol1](project_euler/problem_123/sol1.py)
* Problem 125
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_125/sol1.py)
+ * [Sol1](project_euler/problem_125/sol1.py)
* Problem 129
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_129/sol1.py)
+ * [Sol1](project_euler/problem_129/sol1.py)
+ * Problem 131
+ * [Sol1](project_euler/problem_131/sol1.py)
* Problem 135
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_135/sol1.py)
+ * [Sol1](project_euler/problem_135/sol1.py)
* Problem 144
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_144/sol1.py)
+ * [Sol1](project_euler/problem_144/sol1.py)
+ * Problem 145
+ * [Sol1](project_euler/problem_145/sol1.py)
* Problem 173
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py)
+ * [Sol1](project_euler/problem_173/sol1.py)
* Problem 174
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_174/sol1.py)
+ * [Sol1](project_euler/problem_174/sol1.py)
* Problem 180
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_180/sol1.py)
+ * [Sol1](project_euler/problem_180/sol1.py)
+ * Problem 187
+ * [Sol1](project_euler/problem_187/sol1.py)
* Problem 188
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_188/sol1.py)
+ * [Sol1](project_euler/problem_188/sol1.py)
* Problem 191
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_191/sol1.py)
+ * [Sol1](project_euler/problem_191/sol1.py)
* Problem 203
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_203/sol1.py)
+ * [Sol1](project_euler/problem_203/sol1.py)
+ * Problem 205
+ * [Sol1](project_euler/problem_205/sol1.py)
* Problem 206
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_206/sol1.py)
+ * [Sol1](project_euler/problem_206/sol1.py)
* Problem 207
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_207/sol1.py)
+ * [Sol1](project_euler/problem_207/sol1.py)
* Problem 234
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_234/sol1.py)
+ * [Sol1](project_euler/problem_234/sol1.py)
* Problem 301
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_301/sol1.py)
+ * [Sol1](project_euler/problem_301/sol1.py)
+ * Problem 493
+ * [Sol1](project_euler/problem_493/sol1.py)
* Problem 551
- * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_551/sol1.py)
+ * [Sol1](project_euler/problem_551/sol1.py)
+ * Problem 587
+ * [Sol1](project_euler/problem_587/sol1.py)
+ * Problem 686
+ * [Sol1](project_euler/problem_686/sol1.py)
+ * Problem 800
+ * [Sol1](project_euler/problem_800/sol1.py)
## Quantum
- * [Deutsch Jozsa](https://github.com/TheAlgorithms/Python/blob/master/quantum/deutsch_jozsa.py)
- * [Half Adder](https://github.com/TheAlgorithms/Python/blob/master/quantum/half_adder.py)
- * [Not Gate](https://github.com/TheAlgorithms/Python/blob/master/quantum/not_gate.py)
- * [Quantum Entanglement](https://github.com/TheAlgorithms/Python/blob/master/quantum/quantum_entanglement.py)
- * [Ripple Adder Classic](https://github.com/TheAlgorithms/Python/blob/master/quantum/ripple_adder_classic.py)
- * [Single Qubit Measure](https://github.com/TheAlgorithms/Python/blob/master/quantum/single_qubit_measure.py)
+ * [Bb84](quantum/bb84.py)
+ * [Deutsch Jozsa](quantum/deutsch_jozsa.py)
+ * [Half Adder](quantum/half_adder.py)
+ * [Not Gate](quantum/not_gate.py)
+ * [Q Fourier Transform](quantum/q_fourier_transform.py)
+ * [Q Full Adder](quantum/q_full_adder.py)
+ * [Quantum Entanglement](quantum/quantum_entanglement.py)
+ * [Quantum Teleportation](quantum/quantum_teleportation.py)
+ * [Ripple Adder Classic](quantum/ripple_adder_classic.py)
+ * [Single Qubit Measure](quantum/single_qubit_measure.py)
+ * [Superdense Coding](quantum/superdense_coding.py)
## Scheduling
- * [First Come First Served](https://github.com/TheAlgorithms/Python/blob/master/scheduling/first_come_first_served.py)
- * [Round Robin](https://github.com/TheAlgorithms/Python/blob/master/scheduling/round_robin.py)
- * [Shortest Job First](https://github.com/TheAlgorithms/Python/blob/master/scheduling/shortest_job_first.py)
+ * [First Come First Served](scheduling/first_come_first_served.py)
+ * [Highest Response Ratio Next](scheduling/highest_response_ratio_next.py)
+ * [Job Sequencing With Deadline](scheduling/job_sequencing_with_deadline.py)
+ * [Multi Level Feedback Queue](scheduling/multi_level_feedback_queue.py)
+ * [Non Preemptive Shortest Job First](scheduling/non_preemptive_shortest_job_first.py)
+ * [Round Robin](scheduling/round_robin.py)
+ * [Shortest Job First](scheduling/shortest_job_first.py)
## Searches
- * [Binary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/binary_search.py)
- * [Binary Tree Traversal](https://github.com/TheAlgorithms/Python/blob/master/searches/binary_tree_traversal.py)
- * [Double Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/double_linear_search.py)
- * [Double Linear Search Recursion](https://github.com/TheAlgorithms/Python/blob/master/searches/double_linear_search_recursion.py)
- * [Fibonacci Search](https://github.com/TheAlgorithms/Python/blob/master/searches/fibonacci_search.py)
- * [Hill Climbing](https://github.com/TheAlgorithms/Python/blob/master/searches/hill_climbing.py)
- * [Interpolation Search](https://github.com/TheAlgorithms/Python/blob/master/searches/interpolation_search.py)
- * [Jump Search](https://github.com/TheAlgorithms/Python/blob/master/searches/jump_search.py)
- * [Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/linear_search.py)
- * [Quick Select](https://github.com/TheAlgorithms/Python/blob/master/searches/quick_select.py)
- * [Sentinel Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/sentinel_linear_search.py)
- * [Simple Binary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/simple_binary_search.py)
- * [Simulated Annealing](https://github.com/TheAlgorithms/Python/blob/master/searches/simulated_annealing.py)
- * [Tabu Search](https://github.com/TheAlgorithms/Python/blob/master/searches/tabu_search.py)
- * [Ternary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/ternary_search.py)
+ * [Binary Search](searches/binary_search.py)
+ * [Binary Tree Traversal](searches/binary_tree_traversal.py)
+ * [Double Linear Search](searches/double_linear_search.py)
+ * [Double Linear Search Recursion](searches/double_linear_search_recursion.py)
+ * [Fibonacci Search](searches/fibonacci_search.py)
+ * [Hill Climbing](searches/hill_climbing.py)
+ * [Interpolation Search](searches/interpolation_search.py)
+ * [Jump Search](searches/jump_search.py)
+ * [Linear Search](searches/linear_search.py)
+ * [Quick Select](searches/quick_select.py)
+ * [Sentinel Linear Search](searches/sentinel_linear_search.py)
+ * [Simple Binary Search](searches/simple_binary_search.py)
+ * [Simulated Annealing](searches/simulated_annealing.py)
+ * [Tabu Search](searches/tabu_search.py)
+ * [Ternary Search](searches/ternary_search.py)
## Sorts
- * [Bead Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bead_sort.py)
- * [Bitonic Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bitonic_sort.py)
- * [Bogo Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bogo_sort.py)
- * [Bubble Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bubble_sort.py)
- * [Bucket Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bucket_sort.py)
- * [Cocktail Shaker Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/cocktail_shaker_sort.py)
- * [Comb Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/comb_sort.py)
- * [Counting Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/counting_sort.py)
- * [Cycle Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/cycle_sort.py)
- * [Double Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/double_sort.py)
- * [External Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/external_sort.py)
- * [Gnome Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/gnome_sort.py)
- * [Heap Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/heap_sort.py)
- * [Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py)
- * [Intro Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/intro_sort.py)
- * [Iterative Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/iterative_merge_sort.py)
- * [Merge Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_insertion_sort.py)
- * [Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_sort.py)
- * [Msd Radix Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/msd_radix_sort.py)
- * [Natural Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/natural_sort.py)
- * [Odd Even Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_sort.py)
- * [Odd Even Transposition Parallel](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_parallel.py)
- * [Odd Even Transposition Single Threaded](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_single_threaded.py)
- * [Pancake Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pancake_sort.py)
- * [Patience Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/patience_sort.py)
- * [Pigeon Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pigeon_sort.py)
- * [Pigeonhole Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pigeonhole_sort.py)
- * [Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/quick_sort.py)
- * [Quick Sort 3 Partition](https://github.com/TheAlgorithms/Python/blob/master/sorts/quick_sort_3_partition.py)
- * [Radix Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/radix_sort.py)
- * [Random Normal Distribution Quicksort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_normal_distribution_quicksort.py)
- * [Random Pivot Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_pivot_quick_sort.py)
- * [Recursive Bubble Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_bubble_sort.py)
- * [Recursive Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_insertion_sort.py)
- * [Recursive Mergesort Array](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_mergesort_array.py)
- * [Recursive Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_quick_sort.py)
- * [Selection Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/selection_sort.py)
- * [Shell Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/shell_sort.py)
- * [Slowsort](https://github.com/TheAlgorithms/Python/blob/master/sorts/slowsort.py)
- * [Stooge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/stooge_sort.py)
- * [Strand Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/strand_sort.py)
- * [Tim Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/tim_sort.py)
- * [Topological Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/topological_sort.py)
- * [Tree Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/tree_sort.py)
- * [Unknown Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/unknown_sort.py)
- * [Wiggle Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/wiggle_sort.py)
+ * [Bead Sort](sorts/bead_sort.py)
+ * [Binary Insertion Sort](sorts/binary_insertion_sort.py)
+ * [Bitonic Sort](sorts/bitonic_sort.py)
+ * [Bogo Sort](sorts/bogo_sort.py)
+ * [Bubble Sort](sorts/bubble_sort.py)
+ * [Bucket Sort](sorts/bucket_sort.py)
+ * [Circle Sort](sorts/circle_sort.py)
+ * [Cocktail Shaker Sort](sorts/cocktail_shaker_sort.py)
+ * [Comb Sort](sorts/comb_sort.py)
+ * [Counting Sort](sorts/counting_sort.py)
+ * [Cycle Sort](sorts/cycle_sort.py)
+ * [Double Sort](sorts/double_sort.py)
+ * [Dutch National Flag Sort](sorts/dutch_national_flag_sort.py)
+ * [Exchange Sort](sorts/exchange_sort.py)
+ * [External Sort](sorts/external_sort.py)
+ * [Gnome Sort](sorts/gnome_sort.py)
+ * [Heap Sort](sorts/heap_sort.py)
+ * [Insertion Sort](sorts/insertion_sort.py)
+ * [Intro Sort](sorts/intro_sort.py)
+ * [Iterative Merge Sort](sorts/iterative_merge_sort.py)
+ * [Merge Insertion Sort](sorts/merge_insertion_sort.py)
+ * [Merge Sort](sorts/merge_sort.py)
+ * [Msd Radix Sort](sorts/msd_radix_sort.py)
+ * [Natural Sort](sorts/natural_sort.py)
+ * [Odd Even Sort](sorts/odd_even_sort.py)
+ * [Odd Even Transposition Parallel](sorts/odd_even_transposition_parallel.py)
+ * [Odd Even Transposition Single Threaded](sorts/odd_even_transposition_single_threaded.py)
+ * [Pancake Sort](sorts/pancake_sort.py)
+ * [Patience Sort](sorts/patience_sort.py)
+ * [Pigeon Sort](sorts/pigeon_sort.py)
+ * [Pigeonhole Sort](sorts/pigeonhole_sort.py)
+ * [Quick Sort](sorts/quick_sort.py)
+ * [Quick Sort 3 Partition](sorts/quick_sort_3_partition.py)
+ * [Radix Sort](sorts/radix_sort.py)
+ * [Random Normal Distribution Quicksort](sorts/random_normal_distribution_quicksort.py)
+ * [Random Pivot Quick Sort](sorts/random_pivot_quick_sort.py)
+ * [Recursive Bubble Sort](sorts/recursive_bubble_sort.py)
+ * [Recursive Insertion Sort](sorts/recursive_insertion_sort.py)
+ * [Recursive Mergesort Array](sorts/recursive_mergesort_array.py)
+ * [Recursive Quick Sort](sorts/recursive_quick_sort.py)
+ * [Selection Sort](sorts/selection_sort.py)
+ * [Shell Sort](sorts/shell_sort.py)
+ * [Shrink Shell Sort](sorts/shrink_shell_sort.py)
+ * [Slowsort](sorts/slowsort.py)
+ * [Stooge Sort](sorts/stooge_sort.py)
+ * [Strand Sort](sorts/strand_sort.py)
+ * [Tim Sort](sorts/tim_sort.py)
+ * [Topological Sort](sorts/topological_sort.py)
+ * [Tree Sort](sorts/tree_sort.py)
+ * [Unknown Sort](sorts/unknown_sort.py)
+ * [Wiggle Sort](sorts/wiggle_sort.py)
## Strings
- * [Aho Corasick](https://github.com/TheAlgorithms/Python/blob/master/strings/aho_corasick.py)
- * [Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/anagrams.py)
- * [Autocomplete Using Trie](https://github.com/TheAlgorithms/Python/blob/master/strings/autocomplete_using_trie.py)
- * [Boyer Moore Search](https://github.com/TheAlgorithms/Python/blob/master/strings/boyer_moore_search.py)
- * [Can String Be Rearranged As Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/can_string_be_rearranged_as_palindrome.py)
- * [Capitalize](https://github.com/TheAlgorithms/Python/blob/master/strings/capitalize.py)
- * [Check Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/check_anagrams.py)
- * [Check Pangram](https://github.com/TheAlgorithms/Python/blob/master/strings/check_pangram.py)
- * [Detecting English Programmatically](https://github.com/TheAlgorithms/Python/blob/master/strings/detecting_english_programmatically.py)
- * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/strings/frequency_finder.py)
- * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/is_palindrome.py)
- * [Jaro Winkler](https://github.com/TheAlgorithms/Python/blob/master/strings/jaro_winkler.py)
- * [Knuth Morris Pratt](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py)
- * [Levenshtein Distance](https://github.com/TheAlgorithms/Python/blob/master/strings/levenshtein_distance.py)
- * [Lower](https://github.com/TheAlgorithms/Python/blob/master/strings/lower.py)
- * [Manacher](https://github.com/TheAlgorithms/Python/blob/master/strings/manacher.py)
- * [Min Cost String Conversion](https://github.com/TheAlgorithms/Python/blob/master/strings/min_cost_string_conversion.py)
- * [Naive String Search](https://github.com/TheAlgorithms/Python/blob/master/strings/naive_string_search.py)
- * [Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/palindrome.py)
- * [Prefix Function](https://github.com/TheAlgorithms/Python/blob/master/strings/prefix_function.py)
- * [Rabin Karp](https://github.com/TheAlgorithms/Python/blob/master/strings/rabin_karp.py)
- * [Remove Duplicate](https://github.com/TheAlgorithms/Python/blob/master/strings/remove_duplicate.py)
- * [Reverse Letters](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_letters.py)
- * [Reverse Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_words.py)
- * [Split](https://github.com/TheAlgorithms/Python/blob/master/strings/split.py)
- * [Swap Case](https://github.com/TheAlgorithms/Python/blob/master/strings/swap_case.py)
- * [Upper](https://github.com/TheAlgorithms/Python/blob/master/strings/upper.py)
- * [Word Occurrence](https://github.com/TheAlgorithms/Python/blob/master/strings/word_occurrence.py)
- * [Word Patterns](https://github.com/TheAlgorithms/Python/blob/master/strings/word_patterns.py)
- * [Z Function](https://github.com/TheAlgorithms/Python/blob/master/strings/z_function.py)
+ * [Aho Corasick](strings/aho_corasick.py)
+ * [Alternative String Arrange](strings/alternative_string_arrange.py)
+ * [Anagrams](strings/anagrams.py)
+ * [Autocomplete Using Trie](strings/autocomplete_using_trie.py)
+ * [Barcode Validator](strings/barcode_validator.py)
+ * [Boyer Moore Search](strings/boyer_moore_search.py)
+ * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py)
+ * [Capitalize](strings/capitalize.py)
+ * [Check Anagrams](strings/check_anagrams.py)
+ * [Credit Card Validator](strings/credit_card_validator.py)
+ * [Detecting English Programmatically](strings/detecting_english_programmatically.py)
+ * [Dna](strings/dna.py)
+ * [Frequency Finder](strings/frequency_finder.py)
+ * [Hamming Distance](strings/hamming_distance.py)
+ * [Indian Phone Validator](strings/indian_phone_validator.py)
+ * [Is Contains Unique Chars](strings/is_contains_unique_chars.py)
+ * [Is Isogram](strings/is_isogram.py)
+ * [Is Pangram](strings/is_pangram.py)
+ * [Is Spain National Id](strings/is_spain_national_id.py)
+ * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py)
+ * [Jaro Winkler](strings/jaro_winkler.py)
+ * [Join](strings/join.py)
+ * [Knuth Morris Pratt](strings/knuth_morris_pratt.py)
+ * [Levenshtein Distance](strings/levenshtein_distance.py)
+ * [Lower](strings/lower.py)
+ * [Manacher](strings/manacher.py)
+ * [Min Cost String Conversion](strings/min_cost_string_conversion.py)
+ * [Naive String Search](strings/naive_string_search.py)
+ * [Ngram](strings/ngram.py)
+ * [Palindrome](strings/palindrome.py)
+ * [Prefix Function](strings/prefix_function.py)
+ * [Rabin Karp](strings/rabin_karp.py)
+ * [Remove Duplicate](strings/remove_duplicate.py)
+ * [Reverse Letters](strings/reverse_letters.py)
+ * [Reverse Long Words](strings/reverse_long_words.py)
+ * [Reverse Words](strings/reverse_words.py)
+ * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py)
+ * [Split](strings/split.py)
+ * [String Switch Case](strings/string_switch_case.py)
+ * [Text Justification](strings/text_justification.py)
+ * [Top K Frequent Words](strings/top_k_frequent_words.py)
+ * [Upper](strings/upper.py)
+ * [Wave](strings/wave.py)
+ * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py)
+ * [Word Occurrence](strings/word_occurrence.py)
+ * [Word Patterns](strings/word_patterns.py)
+ * [Z Function](strings/z_function.py)
## Web Programming
- * [Co2 Emission](https://github.com/TheAlgorithms/Python/blob/master/web_programming/co2_emission.py)
- * [Covid Stats Via Xpath](https://github.com/TheAlgorithms/Python/blob/master/web_programming/covid_stats_via_xpath.py)
- * [Crawl Google Results](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_results.py)
- * [Crawl Google Scholar Citation](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_scholar_citation.py)
- * [Currency Converter](https://github.com/TheAlgorithms/Python/blob/master/web_programming/currency_converter.py)
- * [Current Stock Price](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_stock_price.py)
- * [Current Weather](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_weather.py)
- * [Daily Horoscope](https://github.com/TheAlgorithms/Python/blob/master/web_programming/daily_horoscope.py)
- * [Emails From Url](https://github.com/TheAlgorithms/Python/blob/master/web_programming/emails_from_url.py)
- * [Fetch Bbc News](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_bbc_news.py)
- * [Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_github_info.py)
- * [Fetch Jobs](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_jobs.py)
- * [Get Imdb Top 250 Movies Csv](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdb_top_250_movies_csv.py)
- * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py)
- * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py)
- * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py)
- * [Instagram Video](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_video.py)
- * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py)
- * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py)
- * [Test Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/test_fetch_github_info.py)
- * [World Covid19 Stats](https://github.com/TheAlgorithms/Python/blob/master/web_programming/world_covid19_stats.py)
+ * [Co2 Emission](web_programming/co2_emission.py)
+ * [Convert Number To Words](web_programming/convert_number_to_words.py)
+ * [Covid Stats Via Xpath](web_programming/covid_stats_via_xpath.py)
+ * [Crawl Google Results](web_programming/crawl_google_results.py)
+ * [Crawl Google Scholar Citation](web_programming/crawl_google_scholar_citation.py)
+ * [Currency Converter](web_programming/currency_converter.py)
+ * [Current Stock Price](web_programming/current_stock_price.py)
+ * [Current Weather](web_programming/current_weather.py)
+ * [Daily Horoscope](web_programming/daily_horoscope.py)
+ * [Download Images From Google Query](web_programming/download_images_from_google_query.py)
+ * [Emails From Url](web_programming/emails_from_url.py)
+ * [Fetch Bbc News](web_programming/fetch_bbc_news.py)
+ * [Fetch Github Info](web_programming/fetch_github_info.py)
+ * [Fetch Jobs](web_programming/fetch_jobs.py)
+ * [Fetch Quotes](web_programming/fetch_quotes.py)
+ * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py)
+ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py)
+ * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py)
+ * [Get Imdbtop](web_programming/get_imdbtop.py)
+ * [Get Top Hn Posts](web_programming/get_top_hn_posts.py)
+ * [Get User Tweets](web_programming/get_user_tweets.py)
+ * [Giphy](web_programming/giphy.py)
+ * [Instagram Crawler](web_programming/instagram_crawler.py)
+ * [Instagram Pic](web_programming/instagram_pic.py)
+ * [Instagram Video](web_programming/instagram_video.py)
+ * [Nasa Data](web_programming/nasa_data.py)
+ * [Open Google Results](web_programming/open_google_results.py)
+ * [Random Anime Character](web_programming/random_anime_character.py)
+ * [Recaptcha Verification](web_programming/recaptcha_verification.py)
+ * [Reddit](web_programming/reddit.py)
+ * [Search Books By Isbn](web_programming/search_books_by_isbn.py)
+ * [Slack Message](web_programming/slack_message.py)
+ * [Test Fetch Github Info](web_programming/test_fetch_github_info.py)
+ * [World Covid19 Stats](web_programming/world_covid19_stats.py)
diff --git a/LICENSE.md b/LICENSE.md
index c3c2857cd..2897d02e2 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2016-2021 The Algorithms
+Copyright (c) 2016-2022 TheAlgorithms and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index 1e85ed0da..d8eba4e01 100644
--- a/README.md
+++ b/README.md
@@ -1,28 +1,49 @@
-# The Algorithms - Python
-[](https://gitpod.io/#https://github.com/TheAlgorithms/Python)
-[](https://discord.gg/c7MnfGFGa6)
-[](https://gitter.im/TheAlgorithms)
-[](https://github.com/TheAlgorithms/Python/actions)
-[](https://lgtm.com/projects/g/TheAlgorithms/Python/alerts)
-[](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md)
-[](https://www.paypal.me/TheAlgorithms/100)
-
-[](https://github.com/pre-commit/pre-commit)
-[](https://github.com/psf/black)
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
All algorithms implemented in Python - for education
+
-### All algorithms implemented in Python (for education)
+Implementations are for learning purposes only. They may be less efficient than the implementations in the Python standard library. Use them at your discretion.
-These implementations are for learning purposes only. Therefore they may be less efficient than the implementations in the Python standard library.
+## Getting Started
-## Contribution Guidelines
+Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribute.
-Read our [Contribution Guidelines](CONTRIBUTING.md) before you contribute.
+## Community Channels
-## Community Channel
-
-We're on [Gitter](https://gitter.im/TheAlgorithms)! Please join us.
+We are on [Discord](https://the-algorithms.com/discord) and [Gitter](https://gitter.im/TheAlgorithms/community)! Community channels are a great way for you to ask questions and get help. Please join us!
## List of Algorithms
-See our [directory](DIRECTORY.md).
+See our [directory](DIRECTORY.md) for easier navigation and a better overview of the project.
diff --git a/arithmetic_analysis/README.md b/arithmetic_analysis/README.md
new file mode 100644
index 000000000..45cf321eb
--- /dev/null
+++ b/arithmetic_analysis/README.md
@@ -0,0 +1,7 @@
+# Arithmetic analysis
+
+Arithmetic analysis is a branch of mathematics that deals with solving linear equations.
+
+*
+*
+*
diff --git a/arithmetic_analysis/bisection.py b/arithmetic_analysis/bisection.py
index 0ef691678..e359cc170 100644
--- a/arithmetic_analysis/bisection.py
+++ b/arithmetic_analysis/bisection.py
@@ -1,4 +1,4 @@
-from typing import Callable
+from collections.abc import Callable
def bisection(function: Callable[[float], float], a: float, b: float) -> float:
@@ -8,7 +8,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float:
1.0000000149011612
>>> bisection(lambda x: x ** 3 - 1, 2, 1000)
Traceback (most recent call last):
- ...
+ ...
ValueError: could not find root in given interval.
>>> bisection(lambda x: x ** 2 - 4 * x + 3, 0, 2)
1.0
@@ -16,7 +16,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float:
3.0
>>> bisection(lambda x: x ** 2 - 4 * x + 3, 4, 1000)
Traceback (most recent call last):
- ...
+ ...
ValueError: could not find root in given interval.
"""
start: float = a
@@ -32,7 +32,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float:
raise ValueError("could not find root in given interval.")
else:
mid: float = start + (end - start) / 2.0
- while abs(start - mid) > 10 ** -7: # until precisely equals to 10^-7
+ while abs(start - mid) > 10**-7: # until precisely equals to 10^-7
if function(mid) == 0:
return mid
elif function(mid) * function(start) < 0:
@@ -44,7 +44,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float:
def f(x: float) -> float:
- return x ** 3 - 2 * x - 5
+ return x**3 - 2 * x - 5
if __name__ == "__main__":
diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py
index 2dada4fbf..f0f20af8e 100644
--- a/arithmetic_analysis/gaussian_elimination.py
+++ b/arithmetic_analysis/gaussian_elimination.py
@@ -5,9 +5,13 @@ Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination
import numpy as np
+from numpy import float64
+from numpy.typing import NDArray
-def retroactive_resolution(coefficients: np.matrix, vector: np.ndarray) -> np.ndarray:
+def retroactive_resolution(
+ coefficients: NDArray[float64], vector: NDArray[float64]
+) -> NDArray[float64]:
"""
This function performs a retroactive linear system resolution
for triangular matrix
@@ -27,18 +31,20 @@ def retroactive_resolution(coefficients: np.matrix, vector: np.ndarray) -> np.nd
rows, columns = np.shape(coefficients)
- x = np.zeros((rows, 1), dtype=float)
+ x: NDArray[float64] = np.zeros((rows, 1), dtype=float)
for row in reversed(range(rows)):
- sum = 0
+ total = 0
for col in range(row + 1, columns):
- sum += coefficients[row, col] * x[col]
+ total += coefficients[row, col] * x[col]
- x[row, 0] = (vector[row] - sum) / coefficients[row, row]
+ x[row, 0] = (vector[row] - total) / coefficients[row, row]
return x
-def gaussian_elimination(coefficients: np.matrix, vector: np.ndarray) -> np.ndarray:
+def gaussian_elimination(
+ coefficients: NDArray[float64], vector: NDArray[float64]
+) -> NDArray[float64]:
"""
This function performs Gaussian elimination method
@@ -60,7 +66,7 @@ def gaussian_elimination(coefficients: np.matrix, vector: np.ndarray) -> np.ndar
return np.array((), dtype=float)
# augmented matrix
- augmented_mat = np.concatenate((coefficients, vector), axis=1)
+ augmented_mat: NDArray[float64] = np.concatenate((coefficients, vector), axis=1)
augmented_mat = augmented_mat.astype("float64")
# scale the matrix leaving it triangular
diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py
index 7b5006a1a..7aaecf174 100644
--- a/arithmetic_analysis/in_static_equilibrium.py
+++ b/arithmetic_analysis/in_static_equilibrium.py
@@ -1,21 +1,29 @@
"""
Checks if a system of forces is in static equilibrium.
"""
-from typing import List
+from __future__ import annotations
-from numpy import array, cos, cross, ndarray, radians, sin
+from numpy import array, cos, cross, float64, radians, sin
+from numpy.typing import NDArray
def polar_force(
magnitude: float, angle: float, radian_mode: bool = False
-) -> List[float]:
+) -> list[float]:
"""
Resolves force along rectangular components.
(force, angle) => (force_x, force_y)
- >>> polar_force(10, 45)
- [7.0710678118654755, 7.071067811865475]
- >>> polar_force(10, 3.14, radian_mode=True)
- [-9.999987317275394, 0.01592652916486828]
+ >>> import math
+ >>> force = polar_force(10, 45)
+ >>> math.isclose(force[0], 7.071067811865477)
+ True
+ >>> math.isclose(force[1], 7.0710678118654755)
+ True
+ >>> force = polar_force(10, 3.14, radian_mode=True)
+ >>> math.isclose(force[0], -9.999987317275396)
+ True
+ >>> math.isclose(force[1], 0.01592652916486828)
+ True
"""
if radian_mode:
return [magnitude * cos(angle), magnitude * sin(angle)]
@@ -23,7 +31,7 @@ def polar_force(
def in_static_equilibrium(
- forces: ndarray, location: ndarray, eps: float = 10 ** -1
+ forces: NDArray[float64], location: NDArray[float64], eps: float = 10**-1
) -> bool:
"""
Check if a system is in equilibrium.
@@ -42,7 +50,7 @@ def in_static_equilibrium(
False
"""
# summation of moments is zero
- moments: ndarray = cross(location, forces)
+ moments: NDArray[float64] = cross(location, forces)
sum_moments: float = sum(moments)
return abs(sum_moments) < eps
@@ -50,10 +58,14 @@ def in_static_equilibrium(
if __name__ == "__main__":
# Test to check if it works
forces = array(
- [polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90)]
+ [
+ polar_force(718.4, 180 - 30),
+ polar_force(879.54, 45),
+ polar_force(100, -90),
+ ]
)
- location = array([[0, 0], [0, 0], [0, 0]])
+ location: NDArray[float64] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
diff --git a/arithmetic_analysis/intersection.py b/arithmetic_analysis/intersection.py
index 204dd5d8a..826c0ead0 100644
--- a/arithmetic_analysis/intersection.py
+++ b/arithmetic_analysis/intersection.py
@@ -1,5 +1,5 @@
import math
-from typing import Callable
+from collections.abc import Callable
def intersection(function: Callable[[float], float], x0: float, x1: float) -> float:
@@ -10,7 +10,7 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl
0.9999999999954654
>>> intersection(lambda x: x ** 3 - 1, 5, 5)
Traceback (most recent call last):
- ...
+ ...
ZeroDivisionError: float division by zero, could not find root
>>> intersection(lambda x: x ** 3 - 1, 100, 200)
1.0000000000003888
@@ -24,7 +24,7 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl
0.0
>>> intersection(math.cos, -math.pi, math.pi)
Traceback (most recent call last):
- ...
+ ...
ZeroDivisionError: float division by zero, could not find root
"""
x_n: float = x0
@@ -35,7 +35,7 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl
x_n2: float = x_n1 - (
function(x_n1) / ((function(x_n1) - function(x_n)) / (x_n1 - x_n))
)
- if abs(x_n2 - x_n1) < 10 ** -5:
+ if abs(x_n2 - x_n1) < 10**-5:
return x_n2
x_n = x_n1
x_n1 = x_n2
diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py
new file mode 100644
index 000000000..17edf4bf4
--- /dev/null
+++ b/arithmetic_analysis/jacobi_iteration_method.py
@@ -0,0 +1,173 @@
+"""
+Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method
+"""
+from __future__ import annotations
+
+import numpy as np
+from numpy import float64
+from numpy.typing import NDArray
+
+
+# Method to find solution of system of linear equations
+def jacobi_iteration_method(
+ coefficient_matrix: NDArray[float64],
+ constant_matrix: NDArray[float64],
+ init_val: list[int],
+ iterations: int,
+) -> list[float]:
+ """
+ Jacobi Iteration Method:
+ An iterative algorithm to determine the solutions of strictly diagonally dominant
+ system of linear equations
+
+ 4x1 + x2 + x3 = 2
+ x1 + 5x2 + 2x3 = -6
+ x1 + 2x2 + 4x3 = -4
+
+ x_init = [0.5, -0.5 , -0.5]
+
+ Examples:
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]])
+ >>> constant = np.array([[2], [-6], [-4]])
+ >>> init_val = [0.5, -0.5, -0.5]
+ >>> iterations = 3
+ >>> jacobi_iteration_method(coefficient, constant, init_val, iterations)
+ [0.909375, -1.14375, -0.7484375]
+
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2]])
+ >>> constant = np.array([[2], [-6], [-4]])
+ >>> init_val = [0.5, -0.5, -0.5]
+ >>> iterations = 3
+ >>> jacobi_iteration_method(coefficient, constant, init_val, iterations)
+ Traceback (most recent call last):
+ ...
+ ValueError: Coefficient matrix dimensions must be nxn but received 2x3
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]])
+ >>> constant = np.array([[2], [-6]])
+ >>> init_val = [0.5, -0.5, -0.5]
+ >>> iterations = 3
+ >>> jacobi_iteration_method(
+ ... coefficient, constant, init_val, iterations
+ ... ) # doctest: +NORMALIZE_WHITESPACE
+ Traceback (most recent call last):
+ ...
+ ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but
+ received 3x3 and 2x1
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]])
+ >>> constant = np.array([[2], [-6], [-4]])
+ >>> init_val = [0.5, -0.5]
+ >>> iterations = 3
+ >>> jacobi_iteration_method(
+ ... coefficient, constant, init_val, iterations
+ ... ) # doctest: +NORMALIZE_WHITESPACE
+ Traceback (most recent call last):
+ ...
+ ValueError: Number of initial values must be equal to number of rows in coefficient
+ matrix but received 2 and 3
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]])
+ >>> constant = np.array([[2], [-6], [-4]])
+ >>> init_val = [0.5, -0.5, -0.5]
+ >>> iterations = 0
+ >>> jacobi_iteration_method(coefficient, constant, init_val, iterations)
+ Traceback (most recent call last):
+ ...
+ ValueError: Iterations must be at least 1
+ """
+
+ rows1, cols1 = coefficient_matrix.shape
+ rows2, cols2 = constant_matrix.shape
+
+ if rows1 != cols1:
+ msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}"
+ raise ValueError(msg)
+
+ if cols2 != 1:
+ msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}"
+ raise ValueError(msg)
+
+ if rows1 != rows2:
+ msg = (
+ "Coefficient and constant matrices dimensions must be nxn and nx1 but "
+ f"received {rows1}x{cols1} and {rows2}x{cols2}"
+ )
+ raise ValueError(msg)
+
+ if len(init_val) != rows1:
+ msg = (
+ "Number of initial values must be equal to number of rows in coefficient "
+ f"matrix but received {len(init_val)} and {rows1}"
+ )
+ raise ValueError(msg)
+
+ if iterations <= 0:
+ raise ValueError("Iterations must be at least 1")
+
+ table: NDArray[float64] = np.concatenate(
+ (coefficient_matrix, constant_matrix), axis=1
+ )
+
+ rows, cols = table.shape
+
+ strictly_diagonally_dominant(table)
+
+ # Iterates the whole matrix for given number of times
+ for _ in range(iterations):
+ new_val = []
+ for row in range(rows):
+ temp = 0
+ for col in range(cols):
+ if col == row:
+ denom = table[row][col]
+ elif col == cols - 1:
+ val = table[row][col]
+ else:
+ temp += (-1) * table[row][col] * init_val[col]
+ temp = (temp + val) / denom
+ new_val.append(temp)
+ init_val = new_val
+
+ return [float(i) for i in new_val]
+
+
+# Checks if the given matrix is strictly diagonally dominant
+def strictly_diagonally_dominant(table: NDArray[float64]) -> bool:
+ """
+ >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 4, -4]])
+ >>> strictly_diagonally_dominant(table)
+ True
+
+ >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]])
+ >>> strictly_diagonally_dominant(table)
+ Traceback (most recent call last):
+ ...
+ ValueError: Coefficient matrix is not strictly diagonally dominant
+ """
+
+ rows, cols = table.shape
+
+ is_diagonally_dominant = True
+
+ for i in range(0, rows):
+ total = 0
+ for j in range(0, cols - 1):
+ if i == j:
+ continue
+ else:
+ total += table[i][j]
+
+ if table[i][i] <= total:
+ raise ValueError("Coefficient matrix is not strictly diagonally dominant")
+
+ return is_diagonally_dominant
+
+
+# Test Cases
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py
index 5bb631758..eaabce544 100644
--- a/arithmetic_analysis/lu_decomposition.py
+++ b/arithmetic_analysis/lu_decomposition.py
@@ -1,58 +1,102 @@
-"""Lower-Upper (LU) Decomposition.
-
-Reference:
-- https://en.wikipedia.org/wiki/LU_decomposition
"""
-from typing import Tuple
+Lower–upper (LU) decomposition factors a matrix as a product of a lower
+triangular matrix and an upper triangular matrix. A square matrix has an LU
+decomposition under the following conditions:
+ - If the matrix is invertible, then it has an LU decomposition if and only
+ if all of its leading principal minors are non-zero (see
+ https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of
+ leading principal minors of a matrix).
+ - If the matrix is singular (i.e., not invertible) and it has a rank of k
+ (i.e., it has k linearly independent columns), then it has an LU
+ decomposition if its first k leading principal minors are non-zero.
+
+This algorithm will simply attempt to perform LU decomposition on any square
+matrix and raise an error if no such decomposition exists.
+
+Reference: https://en.wikipedia.org/wiki/LU_decomposition
+"""
+from __future__ import annotations
import numpy as np
-def lower_upper_decomposition(table: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
- """Lower-Upper (LU) Decomposition
-
- Example:
-
+def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
+ """
+ Perform LU decomposition on a given matrix and raises an error if the matrix
+ isn't square or if no such decomposition exists
>>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]])
- >>> outcome = lower_upper_decomposition(matrix)
- >>> outcome[0]
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ >>> lower_mat
array([[1. , 0. , 0. ],
[0. , 1. , 0. ],
[2.5, 8. , 1. ]])
- >>> outcome[1]
+ >>> upper_mat
array([[ 2. , -2. , 1. ],
[ 0. , 1. , 2. ],
[ 0. , 0. , -17.5]])
+ >>> matrix = np.array([[4, 3], [6, 3]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ >>> lower_mat
+ array([[1. , 0. ],
+ [1.5, 1. ]])
+ >>> upper_mat
+ array([[ 4. , 3. ],
+ [ 0. , -1.5]])
+
+ # Matrix is not square
>>> matrix = np.array([[2, -2, 1], [0, 1, 2]])
- >>> lower_upper_decomposition(matrix)
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
Traceback (most recent call last):
- ...
+ ...
ValueError: 'table' has to be of square shaped array but got a 2x3 array:
[[ 2 -2 1]
[ 0 1 2]]
+
+ # Matrix is invertible, but its first leading principal minor is 0
+ >>> matrix = np.array([[0, 1], [1, 0]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ Traceback (most recent call last):
+ ...
+ ArithmeticError: No LU decomposition exists
+
+ # Matrix is singular, but its first leading principal minor is 1
+ >>> matrix = np.array([[1, 0], [1, 0]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ >>> lower_mat
+ array([[1., 0.],
+ [1., 1.]])
+ >>> upper_mat
+ array([[1., 0.],
+ [0., 0.]])
+
+ # Matrix is singular, but its first leading principal minor is 0
+ >>> matrix = np.array([[0, 1], [0, 1]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ Traceback (most recent call last):
+ ...
+ ArithmeticError: No LU decomposition exists
"""
- # Table that contains our data
- # Table has to be a square array so we need to check first
+ # Ensure that table is a square array
rows, columns = np.shape(table)
if rows != columns:
- raise ValueError(
- f"'table' has to be of square shaped array but got a {rows}x{columns} "
- + f"array:\n{table}"
+ msg = (
+ "'table' has to be of square shaped array but got a "
+ f"{rows}x{columns} array:\n{table}"
)
+ raise ValueError(msg)
+
lower = np.zeros((rows, columns))
upper = np.zeros((rows, columns))
for i in range(columns):
for j in range(i):
- total = 0
- for k in range(j):
- total += lower[i][k] * upper[k][j]
+ total = sum(lower[i][k] * upper[k][j] for k in range(j))
+ if upper[j][j] == 0:
+ raise ArithmeticError("No LU decomposition exists")
lower[i][j] = (table[i][j] - total) / upper[j][j]
lower[i][i] = 1
for j in range(i, columns):
- total = 0
- for k in range(i):
- total += lower[i][k] * upper[k][j]
+ total = sum(lower[i][k] * upper[k][j] for k in range(j))
upper[i][j] = table[i][j] - total
return lower, upper
diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/arithmetic_analysis/newton_forward_interpolation.py
index 66cde4b73..466f6c18c 100644
--- a/arithmetic_analysis/newton_forward_interpolation.py
+++ b/arithmetic_analysis/newton_forward_interpolation.py
@@ -1,7 +1,7 @@
# https://www.geeksforgeeks.org/newton-forward-backward-interpolation/
+from __future__ import annotations
import math
-from typing import List
# for calculating u value
@@ -22,8 +22,8 @@ def ucal(u: float, p: int) -> float:
def main() -> None:
n = int(input("enter the numbers of values: "))
- y: List[List[float]] = []
- for i in range(n):
+ y: list[list[float]] = []
+ for _ in range(n):
y.append([])
for i in range(n):
for j in range(n):
diff --git a/arithmetic_analysis/newton_method.py b/arithmetic_analysis/newton_method.py
index a9a943726..5127bfcaf 100644
--- a/arithmetic_analysis/newton_method.py
+++ b/arithmetic_analysis/newton_method.py
@@ -1,7 +1,7 @@
"""Newton's Method."""
# Newton's Method - https://en.wikipedia.org/wiki/Newton%27s_method
-from typing import Callable
+from collections.abc import Callable
RealFunc = Callable[[float], float] # type alias for a real -> real function
@@ -28,7 +28,7 @@ def newton(
1.5707963267948966
>>> newton(math.cos, lambda x: -math.sin(x), 0)
Traceback (most recent call last):
- ...
+ ...
ZeroDivisionError: Could not find root
"""
prev_guess = float(starting_int)
@@ -37,17 +37,17 @@ def newton(
next_guess = prev_guess - function(prev_guess) / derivative(prev_guess)
except ZeroDivisionError:
raise ZeroDivisionError("Could not find root") from None
- if abs(prev_guess - next_guess) < 10 ** -5:
+ if abs(prev_guess - next_guess) < 10**-5:
return next_guess
prev_guess = next_guess
def f(x: float) -> float:
- return (x ** 3) - (2 * x) - 5
+ return (x**3) - (2 * x) - 5
def f1(x: float) -> float:
- return 3 * (x ** 2) - 2
+ return 3 * (x**2) - 2
if __name__ == "__main__":
diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py
index 146bb0aa5..1b90ad417 100644
--- a/arithmetic_analysis/newton_raphson.py
+++ b/arithmetic_analysis/newton_raphson.py
@@ -2,15 +2,16 @@
# Author: Syed Haseeb Shah (github.com/QuantumNovice)
# The Newton-Raphson method (also known as Newton's method) is a way to
# quickly find a good approximation for the root of a real-valued function
+from __future__ import annotations
+
from decimal import Decimal
-from math import * # noqa: F401, F403
-from typing import Union
+from math import * # noqa: F403
from sympy import diff
def newton_raphson(
- func: str, a: Union[float, Decimal], precision: float = 10 ** -10
+ func: str, a: float | Decimal, precision: float = 10**-10
) -> float:
"""Finds root from the point 'a' onwards by Newton-Raphson method
>>> newton_raphson("sin(x)", 2)
@@ -24,9 +25,11 @@ def newton_raphson(
"""
x = a
while True:
- x = Decimal(x) - (Decimal(eval(func)) / Decimal(eval(str(diff(func)))))
+ x = Decimal(x) - (
+ Decimal(eval(func)) / Decimal(eval(str(diff(func)))) # noqa: S307
+ )
# This number dictates the accuracy of the answer
- if abs(eval(func)) < precision:
+ if abs(eval(func)) < precision: # noqa: S307
return float(x)
diff --git a/arithmetic_analysis/newton_raphson_new.py b/arithmetic_analysis/newton_raphson_new.py
new file mode 100644
index 000000000..f61841e2e
--- /dev/null
+++ b/arithmetic_analysis/newton_raphson_new.py
@@ -0,0 +1,83 @@
+# Implementing Newton Raphson method in Python
+# Author: Saksham Gupta
+#
+# The Newton-Raphson method (also known as Newton's method) is a way to
+# quickly find a good approximation for the root of a functreal-valued ion
+# The method can also be extended to complex functions
+#
+# Newton's Method - https://en.wikipedia.org/wiki/Newton's_method
+
+from sympy import diff, lambdify, symbols
+from sympy.functions import * # noqa: F403
+
+
+def newton_raphson(
+ function: str,
+ starting_point: complex,
+ variable: str = "x",
+ precision: float = 10**-10,
+ multiplicity: int = 1,
+) -> complex:
+ """Finds root from the 'starting_point' onwards by Newton-Raphson method
+ Refer to https://docs.sympy.org/latest/modules/functions/index.html
+ for usable mathematical functions
+
+ >>> newton_raphson("sin(x)", 2)
+ 3.141592653589793
+ >>> newton_raphson("x**4 -5", 0.4 + 5j)
+ (-7.52316384526264e-37+1.4953487812212207j)
+ >>> newton_raphson('log(y) - 1', 2, variable='y')
+ 2.7182818284590455
+ >>> newton_raphson('exp(x) - 1', 10, precision=0.005)
+ 1.2186556186174883e-10
+ >>> newton_raphson('cos(x)', 0)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError: Could not find root
+ """
+
+ x = symbols(variable)
+ func = lambdify(x, function)
+ diff_function = lambdify(x, diff(function, x))
+
+ prev_guess = starting_point
+
+ while True:
+ if diff_function(prev_guess) != 0:
+ next_guess = prev_guess - multiplicity * func(prev_guess) / diff_function(
+ prev_guess
+ )
+ else:
+ raise ZeroDivisionError("Could not find root") from None
+
+ # Precision is checked by comparing the difference of consecutive guesses
+ if abs(next_guess - prev_guess) < precision:
+ return next_guess
+
+ prev_guess = next_guess
+
+
+# Let's Execute
+if __name__ == "__main__":
+ # Find root of trigonometric function
+ # Find value of pi
+ print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
+
+ # Find root of polynomial
+ # Find fourth Root of 5
+ print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}")
+
+ # Find value of e
+ print(
+ "The root of log(y) - 1 = 0 is ",
+ f"{newton_raphson('log(y) - 1', 2, variable='y')}",
+ )
+
+ # Exponential Roots
+ print(
+ "The root of exp(x) - 1 = 0 is",
+ f"{newton_raphson('exp(x) - 1', 10, precision=0.005)}",
+ )
+
+ # Find root of cos(x)
+ print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
diff --git a/arithmetic_analysis/secant_method.py b/arithmetic_analysis/secant_method.py
index 45bcb185f..d28a46206 100644
--- a/arithmetic_analysis/secant_method.py
+++ b/arithmetic_analysis/secant_method.py
@@ -20,7 +20,7 @@ def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float
"""
x0 = lower_bound
x1 = upper_bound
- for i in range(0, repeats):
+ for _ in range(0, repeats):
x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0))
return x1
diff --git a/audio_filters/README.md b/audio_filters/README.md
new file mode 100644
index 000000000..4419bd8bd
--- /dev/null
+++ b/audio_filters/README.md
@@ -0,0 +1,9 @@
+# Audio Filter
+
+Audio filters work on the frequency of an audio signal to attenuate unwanted frequency and amplify wanted ones.
+They are used within anything related to sound, whether it is radio communication or a hi-fi system.
+
+*
+*
+*
+*
diff --git a/audio_filters/__init__.py b/audio_filters/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py
new file mode 100644
index 000000000..cffedb7a6
--- /dev/null
+++ b/audio_filters/butterworth_filter.py
@@ -0,0 +1,226 @@
+from math import cos, sin, sqrt, tau
+
+from audio_filters.iir_filter import IIRFilter
+
+"""
+Create 2nd-order IIR filters with Butterworth design.
+
+Code based on https://webaudio.github.io/Audio-EQ-Cookbook/audio-eq-cookbook.html
+Alternatively you can use scipy.signal.butter, which should yield the same results.
+"""
+
+
+def make_lowpass(
+ frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008
+) -> IIRFilter:
+ """
+ Creates a low-pass filter
+
+ >>> filter = make_lowpass(1000, 48000)
+ >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE
+ [1.0922959556412573, -1.9828897227476208, 0.9077040443587427, 0.004277569313094809,
+ 0.008555138626189618, 0.004277569313094809]
+ """
+ w0 = tau * frequency / samplerate
+ _sin = sin(w0)
+ _cos = cos(w0)
+ alpha = _sin / (2 * q_factor)
+
+ b0 = (1 - _cos) / 2
+ b1 = 1 - _cos
+
+ a0 = 1 + alpha
+ a1 = -2 * _cos
+ a2 = 1 - alpha
+
+ filt = IIRFilter(2)
+ filt.set_coefficients([a0, a1, a2], [b0, b1, b0])
+ return filt
+
+
+def make_highpass(
+ frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008
+) -> IIRFilter:
+ """
+ Creates a high-pass filter
+
+ >>> filter = make_highpass(1000, 48000)
+ >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE
+ [1.0922959556412573, -1.9828897227476208, 0.9077040443587427, 0.9957224306869052,
+ -1.9914448613738105, 0.9957224306869052]
+ """
+ w0 = tau * frequency / samplerate
+ _sin = sin(w0)
+ _cos = cos(w0)
+ alpha = _sin / (2 * q_factor)
+
+ b0 = (1 + _cos) / 2
+ b1 = -1 - _cos
+
+ a0 = 1 + alpha
+ a1 = -2 * _cos
+ a2 = 1 - alpha
+
+ filt = IIRFilter(2)
+ filt.set_coefficients([a0, a1, a2], [b0, b1, b0])
+ return filt
+
+
+def make_bandpass(
+ frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008
+) -> IIRFilter:
+ """
+ Creates a band-pass filter
+
+ >>> filter = make_bandpass(1000, 48000)
+ >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE
+ [1.0922959556412573, -1.9828897227476208, 0.9077040443587427, 0.06526309611002579,
+ 0, -0.06526309611002579]
+ """
+ w0 = tau * frequency / samplerate
+ _sin = sin(w0)
+ _cos = cos(w0)
+ alpha = _sin / (2 * q_factor)
+
+ b0 = _sin / 2
+ b1 = 0
+ b2 = -b0
+
+ a0 = 1 + alpha
+ a1 = -2 * _cos
+ a2 = 1 - alpha
+
+ filt = IIRFilter(2)
+ filt.set_coefficients([a0, a1, a2], [b0, b1, b2])
+ return filt
+
+
+def make_allpass(
+ frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008
+) -> IIRFilter:
+ """
+ Creates an all-pass filter
+
+ >>> filter = make_allpass(1000, 48000)
+ >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE
+ [1.0922959556412573, -1.9828897227476208, 0.9077040443587427, 0.9077040443587427,
+ -1.9828897227476208, 1.0922959556412573]
+ """
+ w0 = tau * frequency / samplerate
+ _sin = sin(w0)
+ _cos = cos(w0)
+ alpha = _sin / (2 * q_factor)
+
+ b0 = 1 - alpha
+ b1 = -2 * _cos
+ b2 = 1 + alpha
+
+ filt = IIRFilter(2)
+ filt.set_coefficients([b2, b1, b0], [b0, b1, b2])
+ return filt
+
+
+def make_peak(
+ frequency: int,
+ samplerate: int,
+ gain_db: float,
+ q_factor: float = 1 / sqrt(2), # noqa: B008
+) -> IIRFilter:
+ """
+ Creates a peak filter
+
+ >>> filter = make_peak(1000, 48000, 6)
+ >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE
+ [1.0653405327119334, -1.9828897227476208, 0.9346594672880666, 1.1303715025601122,
+ -1.9828897227476208, 0.8696284974398878]
+ """
+ w0 = tau * frequency / samplerate
+ _sin = sin(w0)
+ _cos = cos(w0)
+ alpha = _sin / (2 * q_factor)
+ big_a = 10 ** (gain_db / 40)
+
+ b0 = 1 + alpha * big_a
+ b1 = -2 * _cos
+ b2 = 1 - alpha * big_a
+ a0 = 1 + alpha / big_a
+ a1 = -2 * _cos
+ a2 = 1 - alpha / big_a
+
+ filt = IIRFilter(2)
+ filt.set_coefficients([a0, a1, a2], [b0, b1, b2])
+ return filt
+
+
+def make_lowshelf(
+ frequency: int,
+ samplerate: int,
+ gain_db: float,
+ q_factor: float = 1 / sqrt(2), # noqa: B008
+) -> IIRFilter:
+ """
+ Creates a low-shelf filter
+
+ >>> filter = make_lowshelf(1000, 48000, 6)
+ >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE
+ [3.0409336710888786, -5.608870992220748, 2.602157875636628, 3.139954022810743,
+ -5.591841778072785, 2.5201667380627257]
+ """
+ w0 = tau * frequency / samplerate
+ _sin = sin(w0)
+ _cos = cos(w0)
+ alpha = _sin / (2 * q_factor)
+ big_a = 10 ** (gain_db / 40)
+ pmc = (big_a + 1) - (big_a - 1) * _cos
+ ppmc = (big_a + 1) + (big_a - 1) * _cos
+ mpc = (big_a - 1) - (big_a + 1) * _cos
+ pmpc = (big_a - 1) + (big_a + 1) * _cos
+ aa2 = 2 * sqrt(big_a) * alpha
+
+ b0 = big_a * (pmc + aa2)
+ b1 = 2 * big_a * mpc
+ b2 = big_a * (pmc - aa2)
+ a0 = ppmc + aa2
+ a1 = -2 * pmpc
+ a2 = ppmc - aa2
+
+ filt = IIRFilter(2)
+ filt.set_coefficients([a0, a1, a2], [b0, b1, b2])
+ return filt
+
+
+def make_highshelf(
+ frequency: int,
+ samplerate: int,
+ gain_db: float,
+ q_factor: float = 1 / sqrt(2), # noqa: B008
+) -> IIRFilter:
+ """
+ Creates a high-shelf filter
+
+ >>> filter = make_highshelf(1000, 48000, 6)
+ >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE
+ [2.2229172136088806, -3.9587208137297303, 1.7841414181566304, 4.295432981120543,
+ -7.922740859457287, 3.6756456963725253]
+ """
+ w0 = tau * frequency / samplerate
+ _sin = sin(w0)
+ _cos = cos(w0)
+ alpha = _sin / (2 * q_factor)
+ big_a = 10 ** (gain_db / 40)
+ pmc = (big_a + 1) - (big_a - 1) * _cos
+ ppmc = (big_a + 1) + (big_a - 1) * _cos
+ mpc = (big_a - 1) - (big_a + 1) * _cos
+ pmpc = (big_a - 1) + (big_a + 1) * _cos
+ aa2 = 2 * sqrt(big_a) * alpha
+
+ b0 = big_a * (ppmc + aa2)
+ b1 = -2 * big_a * pmpc
+ b2 = big_a * (ppmc - aa2)
+ a0 = pmc + aa2
+ a1 = 2 * mpc
+ a2 = pmc - aa2
+
+ filt = IIRFilter(2)
+ filt.set_coefficients([a0, a1, a2], [b0, b1, b2])
+ return filt
diff --git a/audio_filters/equal_loudness_filter.py.broken.txt b/audio_filters/equal_loudness_filter.py.broken.txt
new file mode 100644
index 000000000..88cba8533
--- /dev/null
+++ b/audio_filters/equal_loudness_filter.py.broken.txt
@@ -0,0 +1,61 @@
+from json import loads
+from pathlib import Path
+
+import numpy as np
+from yulewalker import yulewalk
+
+from audio_filters.butterworth_filter import make_highpass
+from audio_filters.iir_filter import IIRFilter
+
+data = loads((Path(__file__).resolve().parent / "loudness_curve.json").read_text())
+
+
+class EqualLoudnessFilter:
+ r"""
+ An equal-loudness filter which compensates for the human ear's non-linear response
+ to sound.
+ This filter corrects this by cascading a yulewalk filter and a butterworth filter.
+
+ Designed for use with samplerate of 44.1kHz and above. If you're using a lower
+ samplerate, use with caution.
+
+ Code based on matlab implementation at https://bit.ly/3eqh2HU
+ (url shortened for ruff)
+
+ Target curve: https://i.imgur.com/3g2VfaM.png
+ Yulewalk response: https://i.imgur.com/J9LnJ4C.png
+ Butterworth and overall response: https://i.imgur.com/3g2VfaM.png
+
+ Images and original matlab implementation by David Robinson, 2001
+ """
+
+ def __init__(self, samplerate: int = 44100) -> None:
+ self.yulewalk_filter = IIRFilter(10)
+ self.butterworth_filter = make_highpass(150, samplerate)
+
+ # pad the data to nyquist
+ curve_freqs = np.array(data["frequencies"] + [max(20000.0, samplerate / 2)])
+ curve_gains = np.array(data["gains"] + [140])
+
+ # Convert to angular frequency
+ freqs_normalized = curve_freqs / samplerate * 2
+ # Invert the curve and normalize to 0dB
+ gains_normalized = np.power(10, (np.min(curve_gains) - curve_gains) / 20)
+
+ # Scipy's `yulewalk` function is a stub, so we're using the
+ # `yulewalker` library instead.
+ # This function computes the coefficients using a least-squares
+ # fit to the specified curve.
+ ya, yb = yulewalk(10, freqs_normalized, gains_normalized)
+ self.yulewalk_filter.set_coefficients(ya, yb)
+
+ def process(self, sample: float) -> float:
+ """
+ Process a single sample through both filters
+
+ >>> filt = EqualLoudnessFilter()
+ >>> filt.process(0.0)
+ 0.0
+ """
+ tmp = self.yulewalk_filter.process(sample)
+ return self.butterworth_filter.process(tmp)
diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py
new file mode 100644
index 000000000..f3c1ad43b
--- /dev/null
+++ b/audio_filters/iir_filter.py
@@ -0,0 +1,94 @@
+from __future__ import annotations
+
+
+class IIRFilter:
+ r"""
+ N-Order IIR filter
+ Assumes working with float samples normalized on [-1, 1]
+
+ ---
+
+ Implementation details:
+ Based on the 2nd-order function from
+ https://en.wikipedia.org/wiki/Digital_biquad_filter,
+ this generalized N-order function was made.
+
+ Using the following transfer function
+ H(z)=\frac{b_{0}+b_{1}z^{-1}+b_{2}z^{-2}+...+b_{k}z^{-k}}{a_{0}+a_{1}z^{-1}+a_{2}z^{-2}+...+a_{k}z^{-k}}
+ we can rewrite this to
+ y[n]={\frac{1}{a_{0}}}\left(\left(b_{0}x[n]+b_{1}x[n-1]+b_{2}x[n-2]+...+b_{k}x[n-k]\right)-\left(a_{1}y[n-1]+a_{2}y[n-2]+...+a_{k}y[n-k]\right)\right)
+ """
+
+ def __init__(self, order: int) -> None:
+ self.order = order
+
+ # a_{0} ... a_{k}
+ self.a_coeffs = [1.0] + [0.0] * order
+ # b_{0} ... b_{k}
+ self.b_coeffs = [1.0] + [0.0] * order
+
+ # x[n-1] ... x[n-k]
+ self.input_history = [0.0] * self.order
+ # y[n-1] ... y[n-k]
+ self.output_history = [0.0] * self.order
+
+ def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None:
+ """
+ Set the coefficients for the IIR filter. These should both be of size order + 1.
+ a_0 may be left out, and it will use 1.0 as default value.
+
+ This method works well with scipy's filter design functions
+ >>> # Make a 2nd-order 1000Hz butterworth lowpass filter
+ >>> import scipy.signal
+ >>> b_coeffs, a_coeffs = scipy.signal.butter(2, 1000,
+ ... btype='lowpass',
+ ... fs=48000)
+ >>> filt = IIRFilter(2)
+ >>> filt.set_coefficients(a_coeffs, b_coeffs)
+ """
+ if len(a_coeffs) < self.order:
+ a_coeffs = [1.0, *a_coeffs]
+
+ if len(a_coeffs) != self.order + 1:
+ msg = (
+ f"Expected a_coeffs to have {self.order + 1} elements "
+ f"for {self.order}-order filter, got {len(a_coeffs)}"
+ )
+ raise ValueError(msg)
+
+ if len(b_coeffs) != self.order + 1:
+ msg = (
+ f"Expected b_coeffs to have {self.order + 1} elements "
+ f"for {self.order}-order filter, got {len(a_coeffs)}"
+ )
+ raise ValueError(msg)
+
+ self.a_coeffs = a_coeffs
+ self.b_coeffs = b_coeffs
+
+ def process(self, sample: float) -> float:
+ """
+ Calculate y[n]
+
+ >>> filt = IIRFilter(2)
+ >>> filt.process(0)
+ 0.0
+ """
+ result = 0.0
+
+ # Start at index 1 and do index 0 at the end.
+ for i in range(1, self.order + 1):
+ result += (
+ self.b_coeffs[i] * self.input_history[i - 1]
+ - self.a_coeffs[i] * self.output_history[i - 1]
+ )
+
+ result = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
+
+ self.input_history[1:] = self.input_history[:-1]
+ self.output_history[1:] = self.output_history[:-1]
+
+ self.input_history[0] = sample
+ self.output_history[0] = result
+
+ return result
diff --git a/audio_filters/loudness_curve.json b/audio_filters/loudness_curve.json
new file mode 100644
index 000000000..fc066a081
--- /dev/null
+++ b/audio_filters/loudness_curve.json
@@ -0,0 +1,76 @@
+{
+ "_comment": "The following is a representative average of the Equal Loudness Contours as measured by Robinson and Dadson, 1956",
+ "_doi": "10.1088/0508-3443/7/5/302",
+ "frequencies": [
+ 0,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1500,
+ 2000,
+ 2500,
+ 3000,
+ 3700,
+ 4000,
+ 5000,
+ 6000,
+ 7000,
+ 8000,
+ 9000,
+ 10000,
+ 12000,
+ 15000,
+ 20000
+ ],
+ "gains": [
+ 120,
+ 113,
+ 103,
+ 97,
+ 93,
+ 91,
+ 89,
+ 87,
+ 86,
+ 85,
+ 78,
+ 76,
+ 76,
+ 76,
+ 76,
+ 77,
+ 78,
+ 79.5,
+ 80,
+ 79,
+ 77,
+ 74,
+ 71.5,
+ 70,
+ 70.5,
+ 74,
+ 79,
+ 84,
+ 86,
+ 86,
+ 85,
+ 95,
+ 110,
+ 125
+ ]
+}
diff --git a/audio_filters/show_response.py b/audio_filters/show_response.py
new file mode 100644
index 000000000..097b8152b
--- /dev/null
+++ b/audio_filters/show_response.py
@@ -0,0 +1,94 @@
+from __future__ import annotations
+
+from math import pi
+from typing import Protocol
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+class FilterType(Protocol):
+ def process(self, sample: float) -> float:
+ """
+ Calculate y[n]
+
+ >>> issubclass(FilterType, Protocol)
+ True
+ """
+ return 0.0
+
+
+def get_bounds(
+ fft_results: np.ndarray, samplerate: int
+) -> tuple[int | float, int | float]:
+ """
+ Get bounds for printing fft results
+
+ >>> import numpy
+ >>> array = numpy.linspace(-20.0, 20.0, 1000)
+ >>> get_bounds(array, 1000)
+ (-20, 20)
+ """
+ lowest = min([-20, np.min(fft_results[1 : samplerate // 2 - 1])])
+ highest = max([20, np.max(fft_results[1 : samplerate // 2 - 1])])
+ return lowest, highest
+
+
+def show_frequency_response(filter_type: FilterType, samplerate: int) -> None:
+ """
+ Show frequency response of a filter
+
+ >>> from audio_filters.iir_filter import IIRFilter
+ >>> filt = IIRFilter(4)
+ >>> show_frequency_response(filt, 48000)
+ """
+
+ size = 512
+ inputs = [1] + [0] * (size - 1)
+ outputs = [filter_type.process(item) for item in inputs]
+
+ filler = [0] * (samplerate - size) # zero-padding
+ outputs += filler
+ fft_out = np.abs(np.fft.fft(outputs))
+ fft_db = 20 * np.log10(fft_out)
+
+ # Frequencies on log scale from 24 to nyquist frequency
+ plt.xlim(24, samplerate / 2 - 1)
+ plt.xlabel("Frequency (Hz)")
+ plt.xscale("log")
+
+ # Display within reasonable bounds
+ bounds = get_bounds(fft_db, samplerate)
+ plt.ylim(max([-80, bounds[0]]), min([80, bounds[1]]))
+ plt.ylabel("Gain (dB)")
+
+ plt.plot(fft_db)
+ plt.show()
+
+
+def show_phase_response(filter_type: FilterType, samplerate: int) -> None:
+ """
+ Show phase response of a filter
+
+ >>> from audio_filters.iir_filter import IIRFilter
+ >>> filt = IIRFilter(4)
+ >>> show_phase_response(filt, 48000)
+ """
+
+ size = 512
+ inputs = [1] + [0] * (size - 1)
+ outputs = [filter_type.process(item) for item in inputs]
+
+ filler = [0] * (samplerate - size) # zero-padding
+ outputs += filler
+ fft_out = np.angle(np.fft.fft(outputs))
+
+ # Frequencies on log scale from 24 to nyquist frequency
+ plt.xlim(24, samplerate / 2 - 1)
+ plt.xlabel("Frequency (Hz)")
+ plt.xscale("log")
+
+ plt.ylim(-2 * pi, 2 * pi)
+ plt.ylabel("Phase shift (Radians)")
+ plt.plot(np.unwrap(fft_out, -2 * pi))
+ plt.show()
diff --git a/backtracking/README.md b/backtracking/README.md
new file mode 100644
index 000000000..d4975dfb5
--- /dev/null
+++ b/backtracking/README.md
@@ -0,0 +1,8 @@
+# Backtracking
+
+Backtracking is a way to speed up the search process by removing candidates when they can't be the solution of a problem.
+
+*
+*
+*
+*
diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py
index 76462837c..bde60f032 100644
--- a/backtracking/all_combinations.py
+++ b/backtracking/all_combinations.py
@@ -3,16 +3,16 @@
numbers out of 1 ... n. We use backtracking to solve this problem.
Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!)))
"""
-from typing import List
+from __future__ import annotations
-def generate_all_combinations(n: int, k: int) -> List[List[int]]:
+def generate_all_combinations(n: int, k: int) -> list[list[int]]:
"""
>>> generate_all_combinations(n=4, k=2)
[[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
"""
- result: List[List[int]] = []
+ result: list[list[int]] = []
create_all_state(1, n, k, [], result)
return result
@@ -21,8 +21,8 @@ def create_all_state(
increment: int,
total_number: int,
level: int,
- current_list: List[int],
- total_list: List[List[int]],
+ current_list: list[int],
+ total_list: list[list[int]],
) -> None:
if level == 0:
total_list.append(current_list[:])
@@ -34,7 +34,7 @@ def create_all_state(
current_list.pop()
-def print_all_state(total_list: List[List[int]]) -> None:
+def print_all_state(total_list: list[list[int]]) -> None:
for i in total_list:
print(*i)
diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py
index a0032c5ca..ff8a53e0d 100644
--- a/backtracking/all_permutations.py
+++ b/backtracking/all_permutations.py
@@ -5,18 +5,18 @@
Time complexity: O(n! * n),
where n denotes the length of the given sequence.
"""
-from typing import List, Union
+from __future__ import annotations
-def generate_all_permutations(sequence: List[Union[int, str]]) -> None:
+def generate_all_permutations(sequence: list[int | str]) -> None:
create_state_space_tree(sequence, [], 0, [0 for i in range(len(sequence))])
def create_state_space_tree(
- sequence: List[Union[int, str]],
- current_sequence: List[Union[int, str]],
+ sequence: list[int | str],
+ current_sequence: list[int | str],
index: int,
- index_used: List[int],
+ index_used: list[int],
) -> None:
"""
Creates a state space tree to iterate through each branch using DFS.
@@ -44,8 +44,8 @@ print("Enter the elements")
sequence = list(map(int, input().split()))
"""
-sequence: List[Union[int, str]] = [3, 1, 2, 4]
+sequence: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
-sequence_2: List[Union[int, str]] = ["A", "B", "C"]
+sequence_2: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_2)
diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py
index 99db4ea46..c465fc542 100644
--- a/backtracking/all_subsequences.py
+++ b/backtracking/all_subsequences.py
@@ -5,15 +5,17 @@ of the given sequence. We use backtracking to solve this problem.
Time complexity: O(2^n),
where n denotes the length of the given sequence.
"""
-from typing import Any, List
+from __future__ import annotations
+
+from typing import Any
-def generate_all_subsequences(sequence: List[Any]) -> None:
+def generate_all_subsequences(sequence: list[Any]) -> None:
create_state_space_tree(sequence, [], 0)
def create_state_space_tree(
- sequence: List[Any], current_subsequence: List[Any], index: int
+ sequence: list[Any], current_subsequence: list[Any], index: int
) -> None:
"""
Creates a state space tree to iterate through each branch using DFS.
@@ -32,7 +34,7 @@ def create_state_space_tree(
if __name__ == "__main__":
- seq: List[Any] = [3, 1, 2, 4]
+ seq: list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
diff --git a/backtracking/coloring.py b/backtracking/coloring.py
index 3956b21a9..9d539de8a 100644
--- a/backtracking/coloring.py
+++ b/backtracking/coloring.py
@@ -1,20 +1,19 @@
"""
Graph Coloring also called "m coloring problem"
- consists of coloring given graph with at most m colors
- such that no adjacent vertices are assigned same color
+ consists of coloring a given graph with at most m colors
+ such that no adjacent vertices are assigned the same color
Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring
"""
-from typing import List
def valid_coloring(
- neighbours: List[int], colored_vertices: List[int], color: int
+ neighbours: list[int], colored_vertices: list[int], color: int
) -> bool:
"""
- For each neighbour check if coloring constraint is satisfied
+ For each neighbour check if the coloring constraint is satisfied
If any of the neighbours fail the constraint return False
- If all neighbours validate constraint return True
+ If all neighbours validate the constraint return True
>>> neighbours = [0,1,0,1,0]
>>> colored_vertices = [0, 2, 1, 2, 0]
@@ -35,21 +34,21 @@ def valid_coloring(
def util_color(
- graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int
+ graph: list[list[int]], max_colors: int, colored_vertices: list[int], index: int
) -> bool:
"""
Pseudo-Code
Base Case:
1. Check if coloring is complete
- 1.1 If complete return True (meaning that we successfully colored graph)
+ 1.1 If complete return True (meaning that we successfully colored the graph)
Recursive Step:
- 2. Itterates over each color:
- Check if current coloring is valid:
+ 2. Iterates over each color:
+ Check if the current coloring is valid:
2.1. Color given vertex
- 2.2. Do recursive call check if this coloring leads to solving problem
- 2.4. if current coloring leads to solution return
+ 2.2. Do recursive call, check if this coloring leads to a solution
+ 2.4. if current coloring leads to a solution return
2.5. Uncolor given vertex
>>> graph = [[0, 1, 0, 0, 0],
@@ -86,7 +85,7 @@ def util_color(
return False
-def color(graph: List[List[int]], max_colors: int) -> List[int]:
+def color(graph: list[list[int]], max_colors: int) -> list[int]:
"""
Wrapper function to call subroutine called util_color
which will either return True or False.
diff --git a/backtracking/combination_sum.py b/backtracking/combination_sum.py
new file mode 100644
index 000000000..f555adb75
--- /dev/null
+++ b/backtracking/combination_sum.py
@@ -0,0 +1,66 @@
+"""
+In the Combination Sum problem, we are given a list consisting of distinct integers.
+We need to find all the combinations whose sum equals to target given.
+We can use an element more than one.
+
+Time complexity(Average Case): O(n!)
+
+Constraints:
+1 <= candidates.length <= 30
+2 <= candidates[i] <= 40
+All elements of candidates are distinct.
+1 <= target <= 40
+"""
+
+
+def backtrack(
+ candidates: list, path: list, answer: list, target: int, previous_index: int
+) -> None:
+ """
+ A recursive function that searches for possible combinations. Backtracks in case
+ of a bigger current combination value than the target value.
+
+ Parameters
+ ----------
+ previous_index: Last index from the previous search
+ target: The value we need to obtain by summing our integers in the path list.
+ answer: A list of possible combinations
+ path: Current combination
+ candidates: A list of integers we can use.
+ """
+ if target == 0:
+ answer.append(path.copy())
+ else:
+ for index in range(previous_index, len(candidates)):
+ if target >= candidates[index]:
+ path.append(candidates[index])
+ backtrack(candidates, path, answer, target - candidates[index], index)
+ path.pop(len(path) - 1)
+
+
+def combination_sum(candidates: list, target: int) -> list:
+ """
+ >>> combination_sum([2, 3, 5], 8)
+ [[2, 2, 2, 2], [2, 3, 3], [3, 5]]
+ >>> combination_sum([2, 3, 6, 7], 7)
+ [[2, 2, 3], [7]]
+ >>> combination_sum([-8, 2.3, 0], 1)
+ Traceback (most recent call last):
+ ...
+ RecursionError: maximum recursion depth exceeded in comparison
+ """
+ path = [] # type: list[int]
+ answer = [] # type: list[int]
+ backtrack(candidates, path, answer, target, 0)
+ return answer
+
+
+def main() -> None:
+ print(combination_sum([-8, 2.3, 0], 1))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py
index 7be1ea350..4a4156d70 100644
--- a/backtracking/hamiltonian_cycle.py
+++ b/backtracking/hamiltonian_cycle.py
@@ -6,18 +6,17 @@
Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path
"""
-from typing import List
def valid_connection(
- graph: List[List[int]], next_ver: int, curr_ind: int, path: List[int]
+ graph: list[list[int]], next_ver: int, curr_ind: int, path: list[int]
) -> bool:
"""
Checks whether it is possible to add next into path by validating 2 statements
1. There should be path between current and next vertex
2. Next vertex should not be in path
- If both validations succeeds we return True saying that it is possible to connect
- this vertices either we return False
+ If both validations succeed we return True, saying that it is possible to connect
+ this vertices, otherwise we return False
Case 1:Use exact graph as in main function, with initialized values
>>> graph = [[0, 1, 0, 1, 0],
@@ -47,7 +46,7 @@ def valid_connection(
return not any(vertex == next_ver for vertex in path)
-def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int) -> bool:
+def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) -> bool:
"""
Pseudo-Code
Base Case:
@@ -72,7 +71,7 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int)
>>> curr_ind = 1
>>> util_hamilton_cycle(graph, path, curr_ind)
True
- >>> print(path)
+ >>> path
[0, 1, 2, 4, 3, 0]
Case 2: Use exact graph as in previous case, but in the properties taken from
@@ -86,7 +85,7 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int)
>>> curr_ind = 3
>>> util_hamilton_cycle(graph, path, curr_ind)
True
- >>> print(path)
+ >>> path
[0, 1, 2, 4, 3, 0]
"""
@@ -96,10 +95,10 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int)
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
- for next in range(0, len(graph)):
- if valid_connection(graph, next, curr_ind, path):
+ for next_ver in range(0, len(graph)):
+ if valid_connection(graph, next_ver, curr_ind, path):
# Insert current vertex into path as next transition
- path[curr_ind] = next
+ path[curr_ind] = next_ver
# Validate created path
if util_hamilton_cycle(graph, path, curr_ind + 1):
return True
@@ -108,7 +107,7 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int)
return False
-def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]:
+def hamilton_cycle(graph: list[list[int]], start_index: int = 0) -> list[int]:
r"""
Wrapper function to call subroutine called util_hamilton_cycle,
which will either return array of vertices indicating hamiltonian cycle
diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py
index 8e6613e07..cc88307b7 100644
--- a/backtracking/knight_tour.py
+++ b/backtracking/knight_tour.py
@@ -1,9 +1,9 @@
# Knight Tour Intro: https://www.youtube.com/watch?v=ab_dY3dZFHM
-from typing import List, Tuple
+from __future__ import annotations
-def get_valid_pos(position: Tuple[int, int], n: int) -> List[Tuple[int, int]]:
+def get_valid_pos(position: tuple[int, int], n: int) -> list[tuple[int, int]]:
"""
Find all the valid positions a knight can move to from the current position.
@@ -32,7 +32,7 @@ def get_valid_pos(position: Tuple[int, int], n: int) -> List[Tuple[int, int]]:
return permissible_positions
-def is_complete(board: List[List[int]]) -> bool:
+def is_complete(board: list[list[int]]) -> bool:
"""
Check if the board (matrix) has been completely filled with non-zero values.
@@ -47,7 +47,7 @@ def is_complete(board: List[List[int]]) -> bool:
def open_knight_tour_helper(
- board: List[List[int]], pos: Tuple[int, int], curr: int
+ board: list[list[int]], pos: tuple[int, int], curr: int
) -> bool:
"""
Helper function to solve knight tour problem.
@@ -68,7 +68,7 @@ def open_knight_tour_helper(
return False
-def open_knight_tour(n: int) -> List[List[int]]:
+def open_knight_tour(n: int) -> list[list[int]]:
"""
Find the solution for the knight tour problem for a board of size n. Raises
ValueError if the tour cannot be performed for the given size.
@@ -78,7 +78,7 @@ def open_knight_tour(n: int) -> List[List[int]]:
>>> open_knight_tour(2)
Traceback (most recent call last):
- ...
+ ...
ValueError: Open Kight Tour cannot be performed on a board of size 2
"""
@@ -91,7 +91,8 @@ def open_knight_tour(n: int) -> List[List[int]]:
return board
board[i][j] = 0
- raise ValueError(f"Open Kight Tour cannot be performed on a board of size {n}")
+ msg = f"Open Kight Tour cannot be performed on a board of size {n}"
+ raise ValueError(msg)
if __name__ == "__main__":
diff --git a/backtracking/minimax.py b/backtracking/minimax.py
index dda29b47d..6e310131e 100644
--- a/backtracking/minimax.py
+++ b/backtracking/minimax.py
@@ -7,12 +7,13 @@ if move is of maximizer return true else false
leaves of game tree is stored in scores[]
height is maximum height of Game tree
"""
+from __future__ import annotations
+
import math
-from typing import List
def minimax(
- depth: int, node_index: int, is_max: bool, scores: List[int], height: float
+ depth: int, node_index: int, is_max: bool, scores: list[int], height: float
) -> int:
"""
>>> import math
diff --git a/backtracking/minmax.py b/backtracking/minmax.py
new file mode 100644
index 000000000..9b87183cf
--- /dev/null
+++ b/backtracking/minmax.py
@@ -0,0 +1,69 @@
+"""
+Minimax helps to achieve maximum score in a game by checking all possible moves.
+
+"""
+from __future__ import annotations
+
+import math
+
+
+def minimax(
+ depth: int, node_index: int, is_max: bool, scores: list[int], height: float
+) -> int:
+ """
+ depth is current depth in game tree.
+ node_index is index of current node in scores[].
+ scores[] contains the leaves of game tree.
+ height is maximum height of game tree.
+
+ >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
+ >>> height = math.log(len(scores), 2)
+ >>> minimax(0, 0, True, scores, height)
+ 65
+ >>> minimax(-1, 0, True, scores, height)
+ Traceback (most recent call last):
+ ...
+ ValueError: Depth cannot be less than 0
+ >>> minimax(0, 0, True, [], 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: Scores cannot be empty
+ >>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
+ >>> height = math.log(len(scores), 2)
+ >>> minimax(0, 0, True, scores, height)
+ 12
+ """
+
+ if depth < 0:
+ raise ValueError("Depth cannot be less than 0")
+
+ if not scores:
+ raise ValueError("Scores cannot be empty")
+
+ if depth == height:
+ return scores[node_index]
+
+ return (
+ max(
+ minimax(depth + 1, node_index * 2, False, scores, height),
+ minimax(depth + 1, node_index * 2 + 1, False, scores, height),
+ )
+ if is_max
+ else min(
+ minimax(depth + 1, node_index * 2, True, scores, height),
+ minimax(depth + 1, node_index * 2 + 1, True, scores, height),
+ )
+ )
+
+
+def main() -> None:
+ scores = [90, 23, 6, 33, 21, 65, 123, 34423]
+ height = math.log(len(scores), 2)
+ print(f"Optimal value : {minimax(0, 0, True, scores, height)}")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py
index 29b8d819a..bbf0ce44f 100644
--- a/backtracking/n_queens.py
+++ b/backtracking/n_queens.py
@@ -7,12 +7,12 @@
diagonal lines.
"""
-from typing import List
+from __future__ import annotations
solution = []
-def isSafe(board: List[List[int]], row: int, column: int) -> bool:
+def is_safe(board: list[list[int]], row: int, column: int) -> bool:
"""
This function returns a boolean value True if it is safe to place a queen there
considering the current state of the board.
@@ -40,7 +40,7 @@ def isSafe(board: List[List[int]], row: int, column: int) -> bool:
return True
-def solve(board: List[List[int]], row: int) -> bool:
+def solve(board: list[list[int]], row: int) -> bool:
"""
It creates a state space tree and calls the safe function until it receives a
False Boolean and terminates that branch and backtracks to the next
@@ -63,14 +63,14 @@ def solve(board: List[List[int]], row: int) -> bool:
If all the combinations for that particular branch are successful the board is
reinitialized for the next possible combination.
"""
- if isSafe(board, row, i):
+ if is_safe(board, row, i):
board[row][i] = 1
solve(board, row + 1)
board[row][i] = 0
return False
-def printboard(board: List[List[int]]) -> None:
+def printboard(board: list[list[int]]) -> None:
"""
Prints the boards that have a successful combination.
"""
diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py
index a8651c5c3..f3b08ab0a 100644
--- a/backtracking/n_queens_math.py
+++ b/backtracking/n_queens_math.py
@@ -1,7 +1,7 @@
r"""
Problem:
-The n queens problem is of placing N queens on a N * N chess board such that no queen
+The n queens problem is: placing N queens on a N * N chess board such that no queen
can attack any other queens placed on that chess board. This means that one queen
cannot have any other queen on its horizontal, vertical and diagonal lines.
@@ -31,7 +31,7 @@ So if we use an array and we verify that each value in the array is different to
other we know that at least the queens can't attack each other in horizontal and
vertical.
-At this point we have that halfway completed and we will treat the chessboard as a
+At this point we have it halfway completed and we will treat the chessboard as a
Cartesian plane. Hereinafter we are going to remember basic math, so in the school we
learned this formula:
@@ -47,7 +47,7 @@ This formula allow us to get the slope. For the angles 45º (right diagonal) and
See::
https://www.enotes.com/homework-help/write-equation-line-that-hits-origin-45-degree-1474860
-Then we have this another formula:
+Then we have this other formula:
Slope intercept:
@@ -59,7 +59,7 @@ we would have:
y - mx = b
-And like we already have the m values for the angles 45º and 135º, this formula would
+And since we already have the m values for the angles 45º and 135º, this formula would
look like this:
45º: y - (1)x = b
@@ -71,18 +71,18 @@ look like this:
y = row
x = column
-Applying this two formulas we can check if a queen in some position is being attacked
+Applying these two formulas we can check if a queen in some position is being attacked
for another one or vice versa.
"""
-from typing import List
+from __future__ import annotations
def depth_first_search(
- possible_board: List[int],
- diagonal_right_collisions: List[int],
- diagonal_left_collisions: List[int],
- boards: List[List[str]],
+ possible_board: list[int],
+ diagonal_right_collisions: list[int],
+ diagonal_left_collisions: list[int],
+ boards: list[list[str]],
n: int,
) -> None:
"""
@@ -107,7 +107,6 @@ def depth_first_search(
# We iterate each column in the row to find all possible results in each row
for col in range(n):
-
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
@@ -130,16 +129,16 @@ def depth_first_search(
# If it is False we call dfs function again and we update the inputs
depth_first_search(
- possible_board + [col],
- diagonal_right_collisions + [row - col],
- diagonal_left_collisions + [row + col],
+ [*possible_board, col],
+ [*diagonal_right_collisions, row - col],
+ [*diagonal_left_collisions, row + col],
boards,
n,
)
def n_queens_solution(n: int) -> None:
- boards: List[List[str]] = []
+ boards: list[list[str]] = []
depth_first_search([], [], [], boards, n)
# Print all the boards
diff --git a/backtracking/power_sum.py b/backtracking/power_sum.py
new file mode 100644
index 000000000..fcf1429f8
--- /dev/null
+++ b/backtracking/power_sum.py
@@ -0,0 +1,93 @@
+"""
+Problem source: https://www.hackerrank.com/challenges/the-power-sum/problem
+Find the number of ways that a given integer X, can be expressed as the sum
+of the Nth powers of unique, natural numbers. For example, if X=13 and N=2.
+We have to find all combinations of unique squares adding up to 13.
+The only solution is 2^2+3^2. Constraints: 1<=X<=1000, 2<=N<=10.
+"""
+
+from math import pow
+
+
+def backtrack(
+ needed_sum: int,
+ power: int,
+ current_number: int,
+ current_sum: int,
+ solutions_count: int,
+) -> tuple[int, int]:
+ """
+ >>> backtrack(13, 2, 1, 0, 0)
+ (0, 1)
+ >>> backtrack(100, 2, 1, 0, 0)
+ (0, 3)
+ >>> backtrack(100, 3, 1, 0, 0)
+ (0, 1)
+ >>> backtrack(800, 2, 1, 0, 0)
+ (0, 561)
+ >>> backtrack(1000, 10, 1, 0, 0)
+ (0, 0)
+ >>> backtrack(400, 2, 1, 0, 0)
+ (0, 55)
+ >>> backtrack(50, 1, 1, 0, 0)
+ (0, 3658)
+ """
+ if current_sum == needed_sum:
+ # If the sum of the powers is equal to needed_sum, then we have a solution.
+ solutions_count += 1
+ return current_sum, solutions_count
+
+ i_to_n = int(pow(current_number, power))
+ if current_sum + i_to_n <= needed_sum:
+ # If the sum of the powers is less than needed_sum, then continue adding powers.
+ current_sum += i_to_n
+ current_sum, solutions_count = backtrack(
+ needed_sum, power, current_number + 1, current_sum, solutions_count
+ )
+ current_sum -= i_to_n
+ if i_to_n < needed_sum:
+ # If the power of i is less than needed_sum, then try with the next power.
+ current_sum, solutions_count = backtrack(
+ needed_sum, power, current_number + 1, current_sum, solutions_count
+ )
+ return current_sum, solutions_count
+
+
+def solve(needed_sum: int, power: int) -> int:
+ """
+ >>> solve(13, 2)
+ 1
+ >>> solve(100, 2)
+ 3
+ >>> solve(100, 3)
+ 1
+ >>> solve(800, 2)
+ 561
+ >>> solve(1000, 10)
+ 0
+ >>> solve(400, 2)
+ 55
+ >>> solve(50, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Invalid input
+ needed_sum must be between 1 and 1000, power between 2 and 10.
+ >>> solve(-10, 5)
+ Traceback (most recent call last):
+ ...
+ ValueError: Invalid input
+ needed_sum must be between 1 and 1000, power between 2 and 10.
+ """
+ if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
+ raise ValueError(
+ "Invalid input\n"
+ "needed_sum must be between 1 and 1000, power between 2 and 10."
+ )
+
+ return backtrack(needed_sum, power, 1, 0, 0)[1] # Return the solutions_count
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py
index cd2a8f41d..7bde886dd 100644
--- a/backtracking/rat_in_maze.py
+++ b/backtracking/rat_in_maze.py
@@ -1,7 +1,7 @@
-from typing import List
+from __future__ import annotations
-def solve_maze(maze: List[List[int]]) -> bool:
+def solve_maze(maze: list[list[int]]) -> bool:
"""
This method solves the "rat in maze" problem.
In this problem we have some n by n matrix, a start point and an end point.
@@ -70,7 +70,7 @@ def solve_maze(maze: List[List[int]]) -> bool:
return solved
-def run_maze(maze: List[List[int]], i: int, j: int, solutions: List[List[int]]) -> bool:
+def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) -> bool:
"""
This method is recursive starting from (i, j) and going in one of four directions:
up, down, left, right.
@@ -88,12 +88,12 @@ def run_maze(maze: List[List[int]], i: int, j: int, solutions: List[List[int]])
solutions[i][j] = 1
return True
- lower_flag = (not (i < 0)) and (not (j < 0)) # Check lower bounds
+ lower_flag = (not i < 0) and (not j < 0) # Check lower bounds
upper_flag = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
- block_flag = (not (solutions[i][j])) and (not (maze[i][j]))
+ block_flag = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
solutions[i][j] = 1
diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py
index 593fa52d6..698dedcc2 100644
--- a/backtracking/sudoku.py
+++ b/backtracking/sudoku.py
@@ -9,9 +9,9 @@ function on the next column to see if it returns True. if yes, we
have solved the puzzle. else, we backtrack and place another number
in that cell and repeat this process.
"""
-from typing import List, Optional, Tuple
+from __future__ import annotations
-Matrix = List[List[int]]
+Matrix = list[list[int]]
# assigning initial values to the grid
initial_grid: Matrix = [
@@ -59,7 +59,7 @@ def is_safe(grid: Matrix, row: int, column: int, n: int) -> bool:
return True
-def find_empty_location(grid: Matrix) -> Optional[Tuple[int, int]]:
+def find_empty_location(grid: Matrix) -> tuple[int, int] | None:
"""
This function finds an empty location so that we can assign a number
for that particular row and column.
@@ -71,7 +71,7 @@ def find_empty_location(grid: Matrix) -> Optional[Tuple[int, int]]:
return None
-def sudoku(grid: Matrix) -> Optional[Matrix]:
+def sudoku(grid: Matrix) -> Matrix | None:
"""
Takes a partially filled-in grid and attempts to assign values to
all unassigned locations in such a way to meet the requirements
diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py
index f695b8f7a..c5e23321c 100644
--- a/backtracking/sum_of_subsets.py
+++ b/backtracking/sum_of_subsets.py
@@ -6,12 +6,12 @@
Summation of the chosen numbers must be equal to given number M and one number
can be used only once.
"""
-from typing import List
+from __future__ import annotations
-def generate_sum_of_subsets_soln(nums: List[int], max_sum: int) -> List[List[int]]:
- result: List[List[int]] = []
- path: List[int] = []
+def generate_sum_of_subsets_soln(nums: list[int], max_sum: int) -> list[list[int]]:
+ result: list[list[int]] = []
+ path: list[int] = []
num_index = 0
remaining_nums_sum = sum(nums)
create_state_space_tree(nums, max_sum, num_index, path, result, remaining_nums_sum)
@@ -19,11 +19,11 @@ def generate_sum_of_subsets_soln(nums: List[int], max_sum: int) -> List[List[int
def create_state_space_tree(
- nums: List[int],
+ nums: list[int],
max_sum: int,
num_index: int,
- path: List[int],
- result: List[List[int]],
+ path: list[int],
+ result: list[list[int]],
remaining_nums_sum: int,
) -> None:
"""
@@ -39,14 +39,14 @@ def create_state_space_tree(
if sum(path) == max_sum:
result.append(path)
return
- for num_index in range(num_index, len(nums)):
+ for index in range(num_index, len(nums)):
create_state_space_tree(
nums,
max_sum,
- num_index + 1,
- path + [nums[num_index]],
+ index + 1,
+ [*path, nums[index]],
result,
- remaining_nums_sum - nums[num_index],
+ remaining_nums_sum - nums[index],
)
diff --git a/backtracking/word_search.py b/backtracking/word_search.py
new file mode 100644
index 000000000..c9d52012b
--- /dev/null
+++ b/backtracking/word_search.py
@@ -0,0 +1,168 @@
+"""
+Author : Alexander Pantyukhin
+Date : November 24, 2022
+
+Task:
+Given an m x n grid of characters board and a string word,
+return true if word exists in the grid.
+
+The word can be constructed from letters of sequentially adjacent cells,
+where adjacent cells are horizontally or vertically neighboring.
+The same letter cell may not be used more than once.
+
+Example:
+
+Matrix:
+---------
+|A|B|C|E|
+|S|F|C|S|
+|A|D|E|E|
+---------
+
+Word:
+"ABCCED"
+
+Result:
+True
+
+Implementation notes: Use backtracking approach.
+At each point, check all neighbors to try to find the next letter of the word.
+
+leetcode: https://leetcode.com/problems/word-search/
+
+"""
+
+
+def get_point_key(len_board: int, len_board_column: int, row: int, column: int) -> int:
+ """
+ Returns the hash key of matrix indexes.
+
+ >>> get_point_key(10, 20, 1, 0)
+ 200
+ """
+
+ return len_board * len_board_column * row + column
+
+
+def exits_word(
+ board: list[list[str]],
+ word: str,
+ row: int,
+ column: int,
+ word_index: int,
+ visited_points_set: set[int],
+) -> bool:
+ """
+ Return True if it's possible to search the word suffix
+ starting from the word_index.
+
+ >>> exits_word([["A"]], "B", 0, 0, 0, set())
+ False
+ """
+
+ if board[row][column] != word[word_index]:
+ return False
+
+ if word_index == len(word) - 1:
+ return True
+
+ traverts_directions = [(0, 1), (0, -1), (-1, 0), (1, 0)]
+ len_board = len(board)
+ len_board_column = len(board[0])
+ for direction in traverts_directions:
+ next_i = row + direction[0]
+ next_j = column + direction[1]
+ if not (0 <= next_i < len_board and 0 <= next_j < len_board_column):
+ continue
+
+ key = get_point_key(len_board, len_board_column, next_i, next_j)
+ if key in visited_points_set:
+ continue
+
+ visited_points_set.add(key)
+ if exits_word(board, word, next_i, next_j, word_index + 1, visited_points_set):
+ return True
+
+ visited_points_set.remove(key)
+
+ return False
+
+
+def word_exists(board: list[list[str]], word: str) -> bool:
+ """
+ >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "ABCCED")
+ True
+ >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "SEE")
+ True
+ >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "ABCB")
+ False
+ >>> word_exists([["A"]], "A")
+ True
+ >>> word_exists([["A","A","A","A","A","A"],
+ ... ["A","A","A","A","A","A"],
+ ... ["A","A","A","A","A","A"],
+ ... ["A","A","A","A","A","A"],
+ ... ["A","A","A","A","A","B"],
+ ... ["A","A","A","A","B","A"]],
+ ... "AAAAAAAAAAAAABB")
+ False
+ >>> word_exists([["A"]], 123)
+ Traceback (most recent call last):
+ ...
+ ValueError: The word parameter should be a string of length greater than 0.
+ >>> word_exists([["A"]], "")
+ Traceback (most recent call last):
+ ...
+ ValueError: The word parameter should be a string of length greater than 0.
+ >>> word_exists([[]], "AB")
+ Traceback (most recent call last):
+ ...
+ ValueError: The board should be a non empty matrix of single chars strings.
+ >>> word_exists([], "AB")
+ Traceback (most recent call last):
+ ...
+ ValueError: The board should be a non empty matrix of single chars strings.
+ >>> word_exists([["A"], [21]], "AB")
+ Traceback (most recent call last):
+ ...
+ ValueError: The board should be a non empty matrix of single chars strings.
+ """
+
+ # Validate board
+ board_error_message = (
+ "The board should be a non empty matrix of single chars strings."
+ )
+
+ len_board = len(board)
+ if not isinstance(board, list) or len(board) == 0:
+ raise ValueError(board_error_message)
+
+ for row in board:
+ if not isinstance(row, list) or len(row) == 0:
+ raise ValueError(board_error_message)
+
+ for item in row:
+ if not isinstance(item, str) or len(item) != 1:
+ raise ValueError(board_error_message)
+
+ # Validate word
+ if not isinstance(word, str) or len(word) == 0:
+ raise ValueError(
+ "The word parameter should be a string of length greater than 0."
+ )
+
+ len_board_column = len(board[0])
+ for i in range(len_board):
+ for j in range(len_board_column):
+ if exits_word(
+ board, word, i, j, 0, {get_point_key(len_board, len_board_column, i, j)}
+ ):
+ return True
+
+ return False
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/README.md b/bit_manipulation/README.md
index 2ef166152..3f5e028be 100644
--- a/bit_manipulation/README.md
+++ b/bit_manipulation/README.md
@@ -1,7 +1,11 @@
-https://docs.python.org/3/reference/expressions.html#binary-bitwise-operations
-https://docs.python.org/3/reference/expressions.html#unary-arithmetic-and-bitwise-operations
-https://docs.python.org/3/library/stdtypes.html#bitwise-operations-on-integer-types
+# Bit manipulation
-https://wiki.python.org/moin/BitManipulation
-https://wiki.python.org/moin/BitwiseOperators
-https://www.tutorialspoint.com/python3/bitwise_operators_example.htm
+Bit manipulation is the act of manipulating bits to detect errors (hamming code), encrypts and decrypts messages (more on that in the 'ciphers' folder) or just do anything at the lowest level of your computer.
+
+*
+*
+*
+*
+*
+*
+*
diff --git a/bit_manipulation/binary_and_operator.py b/bit_manipulation/binary_and_operator.py
index 191ff8eb4..36f6c668d 100644
--- a/bit_manipulation/binary_and_operator.py
+++ b/bit_manipulation/binary_and_operator.py
@@ -22,7 +22,7 @@ def binary_and(a: int, b: int) -> str:
>>> binary_and(0, -1)
Traceback (most recent call last):
...
- ValueError: the value of both input must be positive
+ ValueError: the value of both inputs must be positive
>>> binary_and(0, 1.1)
Traceback (most recent call last):
...
@@ -33,7 +33,7 @@ def binary_and(a: int, b: int) -> str:
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if a < 0 or b < 0:
- raise ValueError("the value of both input must be positive")
+ raise ValueError("the value of both inputs must be positive")
a_binary = str(bin(a))[2:] # remove the leading "0b"
b_binary = str(bin(b))[2:] # remove the leading "0b"
diff --git a/bit_manipulation/binary_or_operator.py b/bit_manipulation/binary_or_operator.py
index dabf5bcb0..95f61f1da 100644
--- a/bit_manipulation/binary_or_operator.py
+++ b/bit_manipulation/binary_or_operator.py
@@ -21,7 +21,7 @@ def binary_or(a: int, b: int) -> str:
>>> binary_or(0, -1)
Traceback (most recent call last):
...
- ValueError: the value of both input must be positive
+ ValueError: the value of both inputs must be positive
>>> binary_or(0, 1.1)
Traceback (most recent call last):
...
@@ -32,7 +32,7 @@ def binary_or(a: int, b: int) -> str:
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if a < 0 or b < 0:
- raise ValueError("the value of both input must be positive")
+ raise ValueError("the value of both inputs must be positive")
a_binary = str(bin(a))[2:] # remove the leading "0b"
b_binary = str(bin(b))[2:]
max_len = max(len(a_binary), len(b_binary))
diff --git a/bit_manipulation/binary_xor_operator.py b/bit_manipulation/binary_xor_operator.py
index 6f8962192..6206c70a9 100644
--- a/bit_manipulation/binary_xor_operator.py
+++ b/bit_manipulation/binary_xor_operator.py
@@ -22,7 +22,7 @@ def binary_xor(a: int, b: int) -> str:
>>> binary_xor(0, -1)
Traceback (most recent call last):
...
- ValueError: the value of both input must be positive
+ ValueError: the value of both inputs must be positive
>>> binary_xor(0, 1.1)
Traceback (most recent call last):
...
@@ -33,7 +33,7 @@ def binary_xor(a: int, b: int) -> str:
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if a < 0 or b < 0:
- raise ValueError("the value of both input must be positive")
+ raise ValueError("the value of both inputs must be positive")
a_binary = str(bin(a))[2:] # remove the leading "0b"
b_binary = str(bin(b))[2:] # remove the leading "0b"
diff --git a/bit_manipulation/count_1s_brian_kernighan_method.py b/bit_manipulation/count_1s_brian_kernighan_method.py
new file mode 100644
index 000000000..2ed81b09d
--- /dev/null
+++ b/bit_manipulation/count_1s_brian_kernighan_method.py
@@ -0,0 +1,46 @@
+def get_1s_count(number: int) -> int:
+ """
+ Count the number of set bits in a 32 bit integer using Brian Kernighan's way.
+ Ref - https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan
+ >>> get_1s_count(25)
+ 3
+ >>> get_1s_count(37)
+ 3
+ >>> get_1s_count(21)
+ 3
+ >>> get_1s_count(58)
+ 4
+ >>> get_1s_count(0)
+ 0
+ >>> get_1s_count(256)
+ 1
+ >>> get_1s_count(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a non-negative integer
+ >>> get_1s_count(0.8)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a non-negative integer
+ >>> get_1s_count("25")
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a non-negative integer
+ """
+ if not isinstance(number, int) or number < 0:
+ raise ValueError("Input must be a non-negative integer")
+
+ count = 0
+ while number:
+ # This way we arrive at next set bit (next 1) instead of looping
+ # through each bit and checking for 1s hence the
+ # loop won't run 32 times it will only run the number of `1` times
+ number &= number - 1
+ count += 1
+ return count
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/count_number_of_one_bits.py b/bit_manipulation/count_number_of_one_bits.py
index 51fd2b630..a1687503a 100644
--- a/bit_manipulation/count_number_of_one_bits.py
+++ b/bit_manipulation/count_number_of_one_bits.py
@@ -1,34 +1,91 @@
-def get_set_bits_count(number: int) -> int:
+from timeit import timeit
+
+
+def get_set_bits_count_using_brian_kernighans_algorithm(number: int) -> int:
"""
Count the number of set bits in a 32 bit integer
- >>> get_set_bits_count(25)
+ >>> get_set_bits_count_using_brian_kernighans_algorithm(25)
3
- >>> get_set_bits_count(37)
+ >>> get_set_bits_count_using_brian_kernighans_algorithm(37)
3
- >>> get_set_bits_count(21)
+ >>> get_set_bits_count_using_brian_kernighans_algorithm(21)
3
- >>> get_set_bits_count(58)
+ >>> get_set_bits_count_using_brian_kernighans_algorithm(58)
4
- >>> get_set_bits_count(0)
+ >>> get_set_bits_count_using_brian_kernighans_algorithm(0)
0
- >>> get_set_bits_count(256)
+ >>> get_set_bits_count_using_brian_kernighans_algorithm(256)
1
- >>> get_set_bits_count(-1)
+ >>> get_set_bits_count_using_brian_kernighans_algorithm(-1)
Traceback (most recent call last):
...
- ValueError: the value of input must be positive
+ ValueError: the value of input must not be negative
"""
if number < 0:
- raise ValueError("the value of input must be positive")
+ raise ValueError("the value of input must not be negative")
+ result = 0
+ while number:
+ number &= number - 1
+ result += 1
+ return result
+
+
+def get_set_bits_count_using_modulo_operator(number: int) -> int:
+ """
+ Count the number of set bits in a 32 bit integer
+ >>> get_set_bits_count_using_modulo_operator(25)
+ 3
+ >>> get_set_bits_count_using_modulo_operator(37)
+ 3
+ >>> get_set_bits_count_using_modulo_operator(21)
+ 3
+ >>> get_set_bits_count_using_modulo_operator(58)
+ 4
+ >>> get_set_bits_count_using_modulo_operator(0)
+ 0
+ >>> get_set_bits_count_using_modulo_operator(256)
+ 1
+ >>> get_set_bits_count_using_modulo_operator(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: the value of input must not be negative
+ """
+ if number < 0:
+ raise ValueError("the value of input must not be negative")
result = 0
while number:
if number % 2 == 1:
result += 1
- number = number >> 1
+ number >>= 1
return result
+def benchmark() -> None:
+ """
+ Benchmark code for comparing 2 functions, with different length int values.
+ Brian Kernighan's algorithm is consistently faster than using modulo_operator.
+ """
+
+ def do_benchmark(number: int) -> None:
+ setup = "import __main__ as z"
+ print(f"Benchmark when {number = }:")
+ print(f"{get_set_bits_count_using_modulo_operator(number) = }")
+ timing = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=setup)
+ print(f"timeit() runs in {timing} seconds")
+ print(f"{get_set_bits_count_using_brian_kernighans_algorithm(number) = }")
+ timing = timeit(
+ "z.get_set_bits_count_using_brian_kernighans_algorithm(25)",
+ setup=setup,
+ )
+ print(f"timeit() runs in {timing} seconds")
+
+ for number in (25, 37, 58, 0):
+ do_benchmark(number)
+ print()
+
+
if __name__ == "__main__":
import doctest
doctest.testmod()
+ benchmark()
diff --git a/bit_manipulation/gray_code_sequence.py b/bit_manipulation/gray_code_sequence.py
new file mode 100644
index 000000000..636578d89
--- /dev/null
+++ b/bit_manipulation/gray_code_sequence.py
@@ -0,0 +1,94 @@
+def gray_code(bit_count: int) -> list:
+ """
+ Takes in an integer n and returns a n-bit
+ gray code sequence
+ An n-bit gray code sequence is a sequence of 2^n
+ integers where:
+
+ a) Every integer is between [0,2^n -1] inclusive
+ b) The sequence begins with 0
+ c) An integer appears at most one times in the sequence
+ d)The binary representation of every pair of integers differ
+ by exactly one bit
+ e) The binary representation of first and last bit also
+ differ by exactly one bit
+
+ >>> gray_code(2)
+ [0, 1, 3, 2]
+
+ >>> gray_code(1)
+ [0, 1]
+
+ >>> gray_code(3)
+ [0, 1, 3, 2, 6, 7, 5, 4]
+
+ >>> gray_code(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: The given input must be positive
+
+ >>> gray_code(10.6)
+ Traceback (most recent call last):
+ ...
+ TypeError: unsupported operand type(s) for <<: 'int' and 'float'
+ """
+
+ # bit count represents no. of bits in the gray code
+ if bit_count < 0:
+ raise ValueError("The given input must be positive")
+
+ # get the generated string sequence
+ sequence = gray_code_sequence_string(bit_count)
+ #
+ # convert them to integers
+ for i in range(len(sequence)):
+ sequence[i] = int(sequence[i], 2)
+
+ return sequence
+
+
+def gray_code_sequence_string(bit_count: int) -> list:
+ """
+ Will output the n-bit grey sequence as a
+ string of bits
+
+ >>> gray_code_sequence_string(2)
+ ['00', '01', '11', '10']
+
+ >>> gray_code_sequence_string(1)
+ ['0', '1']
+ """
+
+ # The approach is a recursive one
+ # Base case achieved when either n = 0 or n=1
+ if bit_count == 0:
+ return ["0"]
+
+ if bit_count == 1:
+ return ["0", "1"]
+
+ seq_len = 1 << bit_count # defines the length of the sequence
+ # 1<< n is equivalent to 2^n
+
+ # recursive answer will generate answer for n-1 bits
+ smaller_sequence = gray_code_sequence_string(bit_count - 1)
+
+ sequence = []
+
+ # append 0 to first half of the smaller sequence generated
+ for i in range(seq_len // 2):
+ generated_no = "0" + smaller_sequence[i]
+ sequence.append(generated_no)
+
+ # append 1 to second half ... start from the end of the list
+ for i in reversed(range(seq_len // 2)):
+ generated_no = "1" + smaller_sequence[i]
+ sequence.append(generated_no)
+
+ return sequence
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/highest_set_bit.py b/bit_manipulation/highest_set_bit.py
new file mode 100644
index 000000000..21d92dcb9
--- /dev/null
+++ b/bit_manipulation/highest_set_bit.py
@@ -0,0 +1,34 @@
+def get_highest_set_bit_position(number: int) -> int:
+ """
+ Returns position of the highest set bit of a number.
+ Ref - https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogObvious
+ >>> get_highest_set_bit_position(25)
+ 5
+ >>> get_highest_set_bit_position(37)
+ 6
+ >>> get_highest_set_bit_position(1)
+ 1
+ >>> get_highest_set_bit_position(4)
+ 3
+ >>> get_highest_set_bit_position(0)
+ 0
+ >>> get_highest_set_bit_position(0.8)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value must be an 'int' type
+ """
+ if not isinstance(number, int):
+ raise TypeError("Input value must be an 'int' type")
+
+ position = 0
+ while number:
+ position += 1
+ number >>= 1
+
+ return position
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/index_of_rightmost_set_bit.py b/bit_manipulation/index_of_rightmost_set_bit.py
new file mode 100644
index 000000000..c9c911660
--- /dev/null
+++ b/bit_manipulation/index_of_rightmost_set_bit.py
@@ -0,0 +1,51 @@
+# Reference: https://www.geeksforgeeks.org/position-of-rightmost-set-bit/
+
+
+def get_index_of_rightmost_set_bit(number: int) -> int:
+ """
+ Take in a positive integer 'number'.
+ Returns the zero-based index of first set bit in that 'number' from right.
+ Returns -1, If no set bit found.
+
+ >>> get_index_of_rightmost_set_bit(0)
+ -1
+ >>> get_index_of_rightmost_set_bit(5)
+ 0
+ >>> get_index_of_rightmost_set_bit(36)
+ 2
+ >>> get_index_of_rightmost_set_bit(8)
+ 3
+ >>> get_index_of_rightmost_set_bit(-18)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a non-negative integer
+ >>> get_index_of_rightmost_set_bit('test')
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a non-negative integer
+ >>> get_index_of_rightmost_set_bit(1.25)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a non-negative integer
+ """
+
+ if not isinstance(number, int) or number < 0:
+ raise ValueError("Input must be a non-negative integer")
+
+ intermediate = number & ~(number - 1)
+ index = 0
+ while intermediate:
+ intermediate >>= 1
+ index += 1
+ return index - 1
+
+
+if __name__ == "__main__":
+ """
+ Finding the index of rightmost set bit has some very peculiar use-cases,
+ especially in finding missing or/and repeating numbers in a list of
+ positive integers.
+ """
+ import doctest
+
+ doctest.testmod(verbose=True)
diff --git a/bit_manipulation/is_even.py b/bit_manipulation/is_even.py
new file mode 100644
index 000000000..ba036f35a
--- /dev/null
+++ b/bit_manipulation/is_even.py
@@ -0,0 +1,37 @@
+def is_even(number: int) -> bool:
+ """
+ return true if the input integer is even
+ Explanation: Lets take a look at the following deicmal to binary conversions
+ 2 => 10
+ 14 => 1110
+ 100 => 1100100
+ 3 => 11
+ 13 => 1101
+ 101 => 1100101
+ from the above examples we can observe that
+ for all the odd integers there is always 1 set bit at the end
+ also, 1 in binary can be represented as 001, 00001, or 0000001
+ so for any odd integer n => n&1 is always equals 1 else the integer is even
+
+ >>> is_even(1)
+ False
+ >>> is_even(4)
+ True
+ >>> is_even(9)
+ False
+ >>> is_even(15)
+ False
+ >>> is_even(40)
+ True
+ >>> is_even(100)
+ True
+ >>> is_even(101)
+ False
+ """
+ return number & 1 == 0
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/is_power_of_two.py b/bit_manipulation/is_power_of_two.py
new file mode 100644
index 000000000..023e979fe
--- /dev/null
+++ b/bit_manipulation/is_power_of_two.py
@@ -0,0 +1,57 @@
+"""
+Author : Alexander Pantyukhin
+Date : November 1, 2022
+
+Task:
+Given a positive int number. Return True if this number is power of 2
+or False otherwise.
+
+Implementation notes: Use bit manipulation.
+For example if the number is the power of two it's bits representation:
+n = 0..100..00
+n - 1 = 0..011..11
+
+n & (n - 1) - no intersections = 0
+"""
+
+
+def is_power_of_two(number: int) -> bool:
+ """
+ Return True if this number is power of 2 or False otherwise.
+
+ >>> is_power_of_two(0)
+ True
+ >>> is_power_of_two(1)
+ True
+ >>> is_power_of_two(2)
+ True
+ >>> is_power_of_two(4)
+ True
+ >>> is_power_of_two(6)
+ False
+ >>> is_power_of_two(8)
+ True
+ >>> is_power_of_two(17)
+ False
+ >>> is_power_of_two(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: number must not be negative
+ >>> is_power_of_two(1.2)
+ Traceback (most recent call last):
+ ...
+ TypeError: unsupported operand type(s) for &: 'float' and 'float'
+
+ # Test all powers of 2 from 0 to 10,000
+ >>> all(is_power_of_two(int(2 ** i)) for i in range(10000))
+ True
+ """
+ if number < 0:
+ raise ValueError("number must not be negative")
+ return number & (number - 1) == 0
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/numbers_different_signs.py b/bit_manipulation/numbers_different_signs.py
new file mode 100644
index 000000000..cf8b6d86f
--- /dev/null
+++ b/bit_manipulation/numbers_different_signs.py
@@ -0,0 +1,39 @@
+"""
+Author : Alexander Pantyukhin
+Date : November 30, 2022
+
+Task:
+Given two int numbers. Return True these numbers have opposite signs
+or False otherwise.
+
+Implementation notes: Use bit manipulation.
+Use XOR for two numbers.
+"""
+
+
+def different_signs(num1: int, num2: int) -> bool:
+ """
+ Return True if numbers have opposite signs False otherwise.
+
+ >>> different_signs(1, -1)
+ True
+ >>> different_signs(1, 1)
+ False
+ >>> different_signs(1000000000000000000000000000, -1000000000000000000000000000)
+ True
+ >>> different_signs(-1000000000000000000000000000, 1000000000000000000000000000)
+ True
+ >>> different_signs(50, 278)
+ False
+ >>> different_signs(0, 2)
+ False
+ >>> different_signs(2, 0)
+ False
+ """
+ return num1 ^ num2 < 0
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/bit_manipulation/reverse_bits.py b/bit_manipulation/reverse_bits.py
index 55608ae12..a8c77c11b 100644
--- a/bit_manipulation/reverse_bits.py
+++ b/bit_manipulation/reverse_bits.py
@@ -14,10 +14,11 @@ def get_reverse_bit_string(number: int) -> str:
TypeError: operation can not be conducted on a object of type str
"""
if not isinstance(number, int):
- raise TypeError(
+ msg = (
"operation can not be conducted on a object of type "
f"{type(number).__name__}"
)
+ raise TypeError(msg)
bit_string = ""
for _ in range(0, 32):
bit_string += str(number % 2)
diff --git a/blockchain/README.md b/blockchain/README.md
new file mode 100644
index 000000000..b5fab7b36
--- /dev/null
+++ b/blockchain/README.md
@@ -0,0 +1,45 @@
+# Blockchain
+
+A Blockchain is a type of **distributed ledger** technology (DLT) that consists of growing list of records, called **blocks**, that are securely linked together using **cryptography**.
+
+Let's breakdown the terminologies in the above definition. We find below terminologies,
+
+- Digital Ledger Technology (DLT)
+- Blocks
+- Cryptography
+
+## Digital Ledger Technology
+
+ It is otherwise called as distributed ledger technology. It is simply the opposite of centralized database. Firstly, what is a **ledger**? A ledger is a book or collection of accounts that records account transactions.
+
+ *Why is Blockchain addressed as digital ledger if it can record more than account transactions? What other transaction details and information can it hold?*
+
+Digital Ledger Technology is just a ledger which is shared among multiple nodes. This way there exist no need for central authority to hold the info. Okay, how is it differentiated from central database and what are their benefits?
+
+There is an organization which has 4 branches whose data are stored in a centralized database. So even if one branch needs any data from ledger they need an approval from database in charge. And if one hacks the central database he gets to tamper and control all the data.
+
+Now lets assume every branch has a copy of the ledger and then once anything is added to the ledger by anyone branch it is gonna automatically reflect in all other ledgers available in other branch. This is done using Peer-to-peer network.
+
+So this means even if information is tampered in one branch we can find out. If one branch is hacked we can be alerted ,so we can safeguard other branches. Now, assume these branches as computers or nodes and the ledger is a transaction record or digital receipt. If one ledger is hacked in a node we can detect since there will be a mismatch in comparison with other node information. So this is the concept of Digital Ledger Technology.
+
+*Is it required for all nodes to have access to all information in other nodes? Wouldn't this require enormous storage space in each node?*
+
+## Blocks
+
+In short a block is nothing but collections of records with a labelled header. These are connected cryptographically. Once a new block is added to a chain, the previous block is connected, more precisely said as locked and hence, will remain unaltered. We can understand this concept once we get a clear understanding of working mechanism of blockchain.
+
+## Cryptography
+
+It is the practice and study of secure communication techniques in the midst of adversarial behavior. More broadly, cryptography is the creation and analysis of protocols that prevent third parties or the general public from accessing private messages.
+
+*Which cryptography technology is most widely used in blockchain and why?*
+
+So, in general, blockchain technology is a distributed record holder which records the information about ownership of an asset. To define precisely,
+> Blockchain is a distributed, immutable ledger that makes it easier to record transactions and track assets in a corporate network.
+An asset could be tangible (such as a house, car, cash, or land) or intangible (such as a business) (intellectual property, patents, copyrights, branding). A blockchain network can track and sell almost anything of value, lowering risk and costs for everyone involved.
+
+So this is all about introduction to blockchain technology. To learn more about the topic refer below links....
+*
+*
+*
+*
diff --git a/blockchain/chinese_remainder_theorem.py b/blockchain/chinese_remainder_theorem.py
index b50147ac1..d3e75e779 100644
--- a/blockchain/chinese_remainder_theorem.py
+++ b/blockchain/chinese_remainder_theorem.py
@@ -11,11 +11,11 @@ Algorithm :
1. Use extended euclid algorithm to find x,y such that a*x + b*y = 1
2. Take n = ra*by + rb*ax
"""
-from typing import Tuple
+from __future__ import annotations
# Extended Euclid
-def extended_euclid(a: int, b: int) -> Tuple[int, int]:
+def extended_euclid(a: int, b: int) -> tuple[int, int]:
"""
>>> extended_euclid(10, 6)
(-1, 2)
@@ -53,6 +53,7 @@ def chinese_remainder_theorem(n1: int, r1: int, n2: int, r2: int) -> int:
# ----------SAME SOLUTION USING InvertModulo instead ExtendedEuclid----------------
+
# This function find the inverses of a i.e., a^(-1)
def invert_modulo(a: int, n: int) -> int:
"""
diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py
index 7df674cb1..22b0cad75 100644
--- a/blockchain/diophantine_equation.py
+++ b/blockchain/diophantine_equation.py
@@ -1,7 +1,7 @@
-from typing import Tuple
+from __future__ import annotations
-def diophantine(a: int, b: int, c: int) -> Tuple[float, float]:
+def diophantine(a: int, b: int, c: int) -> tuple[float, float]:
"""
Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the
diophantine equation a*x + b*y = c has a solution (where x and y are integers)
@@ -95,7 +95,7 @@ def greatest_common_divisor(a: int, b: int) -> int:
return b
-def extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
+def extended_gcd(a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers
x and y, then d = gcd(a,b)
diff --git a/blockchain/modular_division.py b/blockchain/modular_division.py
index 4f7f50a92..a9d0f65c5 100644
--- a/blockchain/modular_division.py
+++ b/blockchain/modular_division.py
@@ -1,4 +1,4 @@
-from typing import Tuple
+from __future__ import annotations
def modular_division(a: int, b: int, n: int) -> int:
@@ -73,7 +73,7 @@ def modular_division2(a: int, b: int, n: int) -> int:
return x
-def extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
+def extended_gcd(a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x
and y, then d = gcd(a,b)
@@ -101,7 +101,7 @@ def extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
return (d, x, y)
-def extended_euclid(a: int, b: int) -> Tuple[int, int]:
+def extended_euclid(a: int, b: int) -> tuple[int, int]:
"""
Extended Euclid
>>> extended_euclid(10, 6)
diff --git a/boolean_algebra/README.md b/boolean_algebra/README.md
new file mode 100644
index 000000000..45969c855
--- /dev/null
+++ b/boolean_algebra/README.md
@@ -0,0 +1,7 @@
+# Boolean Algebra
+
+Boolean algebra is used to do arithmetic with bits of values True (1) or False (0).
+There are three basic operations: 'and', 'or' and 'not'.
+
+*
+*
diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py
new file mode 100644
index 000000000..834116772
--- /dev/null
+++ b/boolean_algebra/and_gate.py
@@ -0,0 +1,50 @@
+"""
+An AND Gate is a logic gate in boolean algebra which results to 1 (True) if both the
+inputs are 1, and 0 (False) otherwise.
+
+Following is the truth table of an AND Gate:
+ ------------------------------
+ | Input 1 | Input 2 | Output |
+ ------------------------------
+ | 0 | 0 | 0 |
+ | 0 | 1 | 0 |
+ | 1 | 0 | 0 |
+ | 1 | 1 | 1 |
+ ------------------------------
+
+Refer - https://www.geeksforgeeks.org/logic-gates-in-python/
+"""
+
+
+def and_gate(input_1: int, input_2: int) -> int:
+ """
+ Calculate AND of the input values
+
+ >>> and_gate(0, 0)
+ 0
+ >>> and_gate(0, 1)
+ 0
+ >>> and_gate(1, 0)
+ 0
+ >>> and_gate(1, 1)
+ 1
+ """
+ return int((input_1, input_2).count(0) == 0)
+
+
+def test_and_gate() -> None:
+ """
+ Tests the and_gate function
+ """
+ assert and_gate(0, 0) == 0
+ assert and_gate(0, 1) == 0
+ assert and_gate(1, 0) == 0
+ assert and_gate(1, 1) == 1
+
+
+if __name__ == "__main__":
+ test_and_gate()
+ print(and_gate(1, 0))
+ print(and_gate(0, 0))
+ print(and_gate(0, 1))
+ print(and_gate(1, 1))
diff --git a/boolean_algebra/nand_gate.py b/boolean_algebra/nand_gate.py
new file mode 100644
index 000000000..ea3303d16
--- /dev/null
+++ b/boolean_algebra/nand_gate.py
@@ -0,0 +1,47 @@
+"""
+A NAND Gate is a logic gate in boolean algebra which results to 0 (False) if both
+the inputs are 1, and 1 (True) otherwise. It's similar to adding
+a NOT gate along with an AND gate.
+Following is the truth table of a NAND Gate:
+ ------------------------------
+ | Input 1 | Input 2 | Output |
+ ------------------------------
+ | 0 | 0 | 1 |
+ | 0 | 1 | 1 |
+ | 1 | 0 | 1 |
+ | 1 | 1 | 0 |
+ ------------------------------
+Refer - https://www.geeksforgeeks.org/logic-gates-in-python/
+"""
+
+
+def nand_gate(input_1: int, input_2: int) -> int:
+ """
+ Calculate NAND of the input values
+ >>> nand_gate(0, 0)
+ 1
+ >>> nand_gate(0, 1)
+ 1
+ >>> nand_gate(1, 0)
+ 1
+ >>> nand_gate(1, 1)
+ 0
+ """
+ return int((input_1, input_2).count(0) != 0)
+
+
+def test_nand_gate() -> None:
+ """
+ Tests the nand_gate function
+ """
+ assert nand_gate(0, 0) == 1
+ assert nand_gate(0, 1) == 1
+ assert nand_gate(1, 0) == 1
+ assert nand_gate(1, 1) == 0
+
+
+if __name__ == "__main__":
+ print(nand_gate(0, 0))
+ print(nand_gate(0, 1))
+ print(nand_gate(1, 0))
+ print(nand_gate(1, 1))
diff --git a/boolean_algebra/norgate.py b/boolean_algebra/norgate.py
new file mode 100644
index 000000000..2c27b80af
--- /dev/null
+++ b/boolean_algebra/norgate.py
@@ -0,0 +1,48 @@
+"""
+A NOR Gate is a logic gate in boolean algebra which results to false(0)
+if any of the input is 1, and True(1) if both the inputs are 0.
+Following is the truth table of a NOR Gate:
+ | Input 1 | Input 2 | Output |
+ | 0 | 0 | 1 |
+ | 0 | 1 | 0 |
+ | 1 | 0 | 0 |
+ | 1 | 1 | 0 |
+
+Following is the code implementation of the NOR Gate
+"""
+
+
+def nor_gate(input_1: int, input_2: int) -> int:
+ """
+ >>> nor_gate(0, 0)
+ 1
+ >>> nor_gate(0, 1)
+ 0
+ >>> nor_gate(1, 0)
+ 0
+ >>> nor_gate(1, 1)
+ 0
+ >>> nor_gate(0.0, 0.0)
+ 1
+ >>> nor_gate(0, -7)
+ 0
+ """
+ return int(input_1 == input_2 == 0)
+
+
+def main() -> None:
+ print("Truth Table of NOR Gate:")
+ print("| Input 1 | Input 2 | Output |")
+ print(f"| 0 | 0 | {nor_gate(0, 0)} |")
+ print(f"| 0 | 1 | {nor_gate(0, 1)} |")
+ print(f"| 1 | 0 | {nor_gate(1, 0)} |")
+ print(f"| 1 | 1 | {nor_gate(1, 1)} |")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
+"""Code provided by Akshaj Vishwanathan"""
+"""Reference: https://www.geeksforgeeks.org/logic-gates-in-python/"""
diff --git a/boolean_algebra/not_gate.py b/boolean_algebra/not_gate.py
new file mode 100644
index 000000000..eb85e9e44
--- /dev/null
+++ b/boolean_algebra/not_gate.py
@@ -0,0 +1,37 @@
+"""
+A NOT Gate is a logic gate in boolean algebra which results to 0 (False) if the
+input is high, and 1 (True) if the input is low.
+Following is the truth table of a XOR Gate:
+ ------------------------------
+ | Input | Output |
+ ------------------------------
+ | 0 | 1 |
+ | 1 | 0 |
+ ------------------------------
+Refer - https://www.geeksforgeeks.org/logic-gates-in-python/
+"""
+
+
+def not_gate(input_1: int) -> int:
+ """
+ Calculate NOT of the input values
+ >>> not_gate(0)
+ 1
+ >>> not_gate(1)
+ 0
+ """
+
+ return 1 if input_1 == 0 else 0
+
+
+def test_not_gate() -> None:
+ """
+ Tests the not_gate function
+ """
+ assert not_gate(0) == 1
+ assert not_gate(1) == 0
+
+
+if __name__ == "__main__":
+ print(not_gate(0))
+ print(not_gate(1))
diff --git a/boolean_algebra/or_gate.py b/boolean_algebra/or_gate.py
new file mode 100644
index 000000000..aa7e6645e
--- /dev/null
+++ b/boolean_algebra/or_gate.py
@@ -0,0 +1,46 @@
+"""
+An OR Gate is a logic gate in boolean algebra which results to 0 (False) if both the
+inputs are 0, and 1 (True) otherwise.
+Following is the truth table of an AND Gate:
+ ------------------------------
+ | Input 1 | Input 2 | Output |
+ ------------------------------
+ | 0 | 0 | 0 |
+ | 0 | 1 | 1 |
+ | 1 | 0 | 1 |
+ | 1 | 1 | 1 |
+ ------------------------------
+Refer - https://www.geeksforgeeks.org/logic-gates-in-python/
+"""
+
+
+def or_gate(input_1: int, input_2: int) -> int:
+ """
+ Calculate OR of the input values
+ >>> or_gate(0, 0)
+ 0
+ >>> or_gate(0, 1)
+ 1
+ >>> or_gate(1, 0)
+ 1
+ >>> or_gate(1, 1)
+ 1
+ """
+ return int((input_1, input_2).count(1) != 0)
+
+
+def test_or_gate() -> None:
+ """
+ Tests the or_gate function
+ """
+ assert or_gate(0, 0) == 0
+ assert or_gate(0, 1) == 1
+ assert or_gate(1, 0) == 1
+ assert or_gate(1, 1) == 1
+
+
+if __name__ == "__main__":
+ print(or_gate(0, 1))
+ print(or_gate(1, 0))
+ print(or_gate(0, 0))
+ print(or_gate(1, 1))
diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py
index 70cdf25a7..6788dfb28 100644
--- a/boolean_algebra/quine_mc_cluskey.py
+++ b/boolean_algebra/quine_mc_cluskey.py
@@ -1,43 +1,46 @@
-from typing import List
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import Literal
-def compare_string(string1: str, string2: str) -> str:
+def compare_string(string1: str, string2: str) -> str | Literal[False]:
"""
>>> compare_string('0010','0110')
'0_10'
>>> compare_string('0110','1101')
- 'X'
+ False
"""
- l1 = list(string1)
- l2 = list(string2)
+ list1 = list(string1)
+ list2 = list(string2)
count = 0
- for i in range(len(l1)):
- if l1[i] != l2[i]:
+ for i in range(len(list1)):
+ if list1[i] != list2[i]:
count += 1
- l1[i] = "_"
+ list1[i] = "_"
if count > 1:
- return "X"
+ return False
else:
- return "".join(l1)
+ return "".join(list1)
-def check(binary: List[str]) -> List[str]:
+def check(binary: list[str]) -> list[str]:
"""
>>> check(['0.00.01.5'])
['0.00.01.5']
"""
pi = []
- while 1:
+ while True:
check1 = ["$"] * len(binary)
temp = []
for i in range(len(binary)):
for j in range(i + 1, len(binary)):
k = compare_string(binary[i], binary[j])
- if k != "X":
+ if k is False:
check1[i] = "*"
check1[j] = "*"
- temp.append(k)
+ temp.append("X")
for i in range(len(binary)):
if check1[i] == "$":
pi.append(binary[i])
@@ -46,19 +49,18 @@ def check(binary: List[str]) -> List[str]:
binary = list(set(temp))
-def decimal_to_binary(no_of_variable: int, minterms: List[float]) -> List[str]:
+def decimal_to_binary(no_of_variable: int, minterms: Sequence[float]) -> list[str]:
"""
>>> decimal_to_binary(3,[1.5])
['0.00.01.5']
"""
temp = []
- s = ""
- for m in minterms:
- for i in range(no_of_variable):
- s = str(m % 2) + s
- m //= 2
- temp.append(s)
- s = ""
+ for minterm in minterms:
+ string = ""
+ for _ in range(no_of_variable):
+ string = str(minterm % 2) + string
+ minterm //= 2
+ temp.append(string)
return temp
@@ -70,19 +72,16 @@ def is_for_table(string1: str, string2: str, count: int) -> bool:
>>> is_for_table('01_','001',1)
False
"""
- l1 = list(string1)
- l2 = list(string2)
+ list1 = list(string1)
+ list2 = list(string2)
count_n = 0
- for i in range(len(l1)):
- if l1[i] != l2[i]:
+ for i in range(len(list1)):
+ if list1[i] != list2[i]:
count_n += 1
- if count_n == count:
- return True
- else:
- return False
+ return count_n == count
-def selection(chart: List[List[int]], prime_implicants: List[str]) -> List[str]:
+def selection(chart: list[list[int]], prime_implicants: list[str]) -> list[str]:
"""
>>> selection([[1]],['0.00.01.5'])
['0.00.01.5']
@@ -108,7 +107,7 @@ def selection(chart: List[List[int]], prime_implicants: List[str]) -> List[str]:
for k in range(len(chart)):
chart[k][j] = 0
temp.append(prime_implicants[i])
- while 1:
+ while True:
max_n = 0
rem = -1
count_n = 0
@@ -130,8 +129,8 @@ def selection(chart: List[List[int]], prime_implicants: List[str]) -> List[str]:
def prime_implicant_chart(
- prime_implicants: List[str], binary: List[str]
-) -> List[List[int]]:
+ prime_implicants: list[str], binary: list[str]
+) -> list[list[int]]:
"""
>>> prime_implicant_chart(['0.00.01.5'],['0.00.01.5'])
[[1]]
@@ -146,10 +145,10 @@ def prime_implicant_chart(
return chart
-def main():
+def main() -> None:
no_of_variable = int(input("Enter the no. of variables\n"))
minterms = [
- int(x)
+ float(x)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n"
).split()
diff --git a/boolean_algebra/xnor_gate.py b/boolean_algebra/xnor_gate.py
new file mode 100644
index 000000000..45ab2700e
--- /dev/null
+++ b/boolean_algebra/xnor_gate.py
@@ -0,0 +1,48 @@
+"""
+A XNOR Gate is a logic gate in boolean algebra which results to 0 (False) if both the
+inputs are different, and 1 (True), if the inputs are same.
+It's similar to adding a NOT gate to an XOR gate
+
+Following is the truth table of a XNOR Gate:
+ ------------------------------
+ | Input 1 | Input 2 | Output |
+ ------------------------------
+ | 0 | 0 | 1 |
+ | 0 | 1 | 0 |
+ | 1 | 0 | 0 |
+ | 1 | 1 | 1 |
+ ------------------------------
+Refer - https://www.geeksforgeeks.org/logic-gates-in-python/
+"""
+
+
+def xnor_gate(input_1: int, input_2: int) -> int:
+ """
+ Calculate XOR of the input values
+ >>> xnor_gate(0, 0)
+ 1
+ >>> xnor_gate(0, 1)
+ 0
+ >>> xnor_gate(1, 0)
+ 0
+ >>> xnor_gate(1, 1)
+ 1
+ """
+ return 1 if input_1 == input_2 else 0
+
+
+def test_xnor_gate() -> None:
+ """
+ Tests the xnor_gate function
+ """
+ assert xnor_gate(0, 0) == 1
+ assert xnor_gate(0, 1) == 0
+ assert xnor_gate(1, 0) == 0
+ assert xnor_gate(1, 1) == 1
+
+
+if __name__ == "__main__":
+ print(xnor_gate(0, 0))
+ print(xnor_gate(0, 1))
+ print(xnor_gate(1, 0))
+ print(xnor_gate(1, 1))
diff --git a/boolean_algebra/xor_gate.py b/boolean_algebra/xor_gate.py
new file mode 100644
index 000000000..db4f5b45c
--- /dev/null
+++ b/boolean_algebra/xor_gate.py
@@ -0,0 +1,46 @@
+"""
+A XOR Gate is a logic gate in boolean algebra which results to 1 (True) if only one of
+the two inputs is 1, and 0 (False) if an even number of inputs are 1.
+Following is the truth table of a XOR Gate:
+ ------------------------------
+ | Input 1 | Input 2 | Output |
+ ------------------------------
+ | 0 | 0 | 0 |
+ | 0 | 1 | 1 |
+ | 1 | 0 | 1 |
+ | 1 | 1 | 0 |
+ ------------------------------
+
+Refer - https://www.geeksforgeeks.org/logic-gates-in-python/
+"""
+
+
+def xor_gate(input_1: int, input_2: int) -> int:
+ """
+ calculate xor of the input values
+
+ >>> xor_gate(0, 0)
+ 0
+ >>> xor_gate(0, 1)
+ 1
+ >>> xor_gate(1, 0)
+ 1
+ >>> xor_gate(1, 1)
+ 0
+ """
+ return (input_1, input_2).count(0) % 2
+
+
+def test_xor_gate() -> None:
+ """
+ Tests the xor_gate function
+ """
+ assert xor_gate(0, 0) == 0
+ assert xor_gate(0, 1) == 1
+ assert xor_gate(1, 0) == 1
+ assert xor_gate(1, 1) == 0
+
+
+if __name__ == "__main__":
+ print(xor_gate(0, 0))
+ print(xor_gate(0, 1))
diff --git a/cellular_automata/README.md b/cellular_automata/README.md
index c3fa0516f..c5681b339 100644
--- a/cellular_automata/README.md
+++ b/cellular_automata/README.md
@@ -1,4 +1,8 @@
# Cellular Automata
-* https://en.wikipedia.org/wiki/Cellular_automaton
-* https://mathworld.wolfram.com/ElementaryCellularAutomaton.html
+Cellular automata are a way to simulate the behavior of "life", no matter if it is a robot or cell.
+They usually follow simple rules but can lead to the creation of complex forms.
+The most popular cellular automaton is Conway's [Game of Life](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life).
+
+*
+*
diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py
index 321baa3a3..84f4d5be4 100644
--- a/cellular_automata/conways_game_of_life.py
+++ b/cellular_automata/conways_game_of_life.py
@@ -2,11 +2,8 @@
Conway's Game of Life implemented in Python.
https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
"""
-
from __future__ import annotations
-from typing import List
-
from PIL import Image
# Define glider example
@@ -25,7 +22,7 @@ GLIDER = [
BLINKER = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
-def new_generation(cells: List[List[int]]) -> List[List[int]]:
+def new_generation(cells: list[list[int]]) -> list[list[int]]:
"""
Generates the next generation for a given state of Conway's Game of Life.
>>> new_generation(BLINKER)
@@ -73,7 +70,7 @@ def new_generation(cells: List[List[int]]) -> List[List[int]]:
return next_generation
-def generate_images(cells: list[list[int]], frames) -> list[Image.Image]:
+def generate_images(cells: list[list[int]], frames: int) -> list[Image.Image]:
"""
Generates a list of images of subsequent Game of Life states.
"""
diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py
index 09863993d..d691a2b73 100644
--- a/cellular_automata/game_of_life.py
+++ b/cellular_automata/game_of_life.py
@@ -10,7 +10,7 @@ Python:
- 3.5
Usage:
- - $python3 game_o_life
+ - $python3 game_of_life
Game-Of-Life Rules:
@@ -34,25 +34,26 @@ import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
-usage_doc = "Usage of script: script_nama "
+usage_doc = "Usage of script: script_name "
choice = [0] * 100 + [1] * 10
random.shuffle(choice)
-def create_canvas(size):
+def create_canvas(size: int) -> list[list[bool]]:
canvas = [[False for i in range(size)] for j in range(size)]
return canvas
-def seed(canvas):
+def seed(canvas: list[list[bool]]) -> None:
for i, row in enumerate(canvas):
for j, _ in enumerate(row):
canvas[i][j] = bool(random.getrandbits(1))
-def run(canvas):
- """This function runs the rules of game through all points, and changes their
+def run(canvas: list[list[bool]]) -> list[list[bool]]:
+ """
+ This function runs the rules of game through all points, and changes their
status accordingly.(in the same canvas)
@Args:
--
@@ -60,23 +61,20 @@ def run(canvas):
@returns:
--
- None
+ canvas of population after one step
"""
- canvas = np.array(canvas)
- next_gen_canvas = np.array(create_canvas(canvas.shape[0]))
- for r, row in enumerate(canvas):
+ current_canvas = np.array(canvas)
+ next_gen_canvas = np.array(create_canvas(current_canvas.shape[0]))
+ for r, row in enumerate(current_canvas):
for c, pt in enumerate(row):
- # print(r-1,r+2,c-1,c+2)
next_gen_canvas[r][c] = __judge_point(
- pt, canvas[r - 1 : r + 2, c - 1 : c + 2]
+ pt, current_canvas[r - 1 : r + 2, c - 1 : c + 2]
)
- canvas = next_gen_canvas
- del next_gen_canvas # cleaning memory as we move on.
- return canvas.tolist()
+ return next_gen_canvas.tolist()
-def __judge_point(pt, neighbours):
+def __judge_point(pt: bool, neighbours: list[list[bool]]) -> bool:
dead = 0
alive = 0
# finding dead or alive neighbours count.
@@ -98,7 +96,7 @@ def __judge_point(pt, neighbours):
if pt:
if alive < 2:
state = False
- elif alive == 2 or alive == 3:
+ elif alive in {2, 3}:
state = True
elif alive > 3:
state = False
diff --git a/cellular_automata/nagel_schrekenberg.py b/cellular_automata/nagel_schrekenberg.py
new file mode 100644
index 000000000..3fd6afca0
--- /dev/null
+++ b/cellular_automata/nagel_schrekenberg.py
@@ -0,0 +1,139 @@
+"""
+Simulate the evolution of a highway with only one road that is a loop.
+The highway is divided in cells, each cell can have at most one car in it.
+The highway is a loop so when a car comes to one end, it will come out on the other.
+Each car is represented by its speed (from 0 to 5).
+
+Some information about speed:
+ -1 means that the cell on the highway is empty
+ 0 to 5 are the speed of the cars with 0 being the lowest and 5 the highest
+
+highway: list[int] Where every position and speed of every car will be stored
+probability The probability that a driver will slow down
+initial_speed The speed of the cars a the start
+frequency How many cells there are between two cars at the start
+max_speed The maximum speed a car can go to
+number_of_cells How many cell are there in the highway
+number_of_update How many times will the position be updated
+
+More information here: https://en.wikipedia.org/wiki/Nagel%E2%80%93Schreckenberg_model
+
+Examples for doctest:
+>>> simulate(construct_highway(6, 3, 0), 2, 0, 2)
+[[0, -1, -1, 0, -1, -1], [-1, 1, -1, -1, 1, -1], [-1, -1, 1, -1, -1, 1]]
+>>> simulate(construct_highway(5, 2, -2), 3, 0, 2)
+[[0, -1, 0, -1, 0], [0, -1, 0, -1, -1], [0, -1, -1, 1, -1], [-1, 1, -1, 0, -1]]
+"""
+from random import randint, random
+
+
+def construct_highway(
+ number_of_cells: int,
+ frequency: int,
+ initial_speed: int,
+ random_frequency: bool = False,
+ random_speed: bool = False,
+ max_speed: int = 5,
+) -> list:
+ """
+ Build the highway following the parameters given
+ >>> construct_highway(10, 2, 6)
+ [[6, -1, 6, -1, 6, -1, 6, -1, 6, -1]]
+ >>> construct_highway(10, 10, 2)
+ [[2, -1, -1, -1, -1, -1, -1, -1, -1, -1]]
+ """
+
+ highway = [[-1] * number_of_cells] # Create a highway without any car
+ i = 0
+ initial_speed = max(initial_speed, 0)
+ while i < number_of_cells:
+ highway[0][i] = (
+ randint(0, max_speed) if random_speed else initial_speed
+ ) # Place the cars
+ i += (
+ randint(1, max_speed * 2) if random_frequency else frequency
+ ) # Arbitrary number, may need tuning
+ return highway
+
+
+def get_distance(highway_now: list, car_index: int) -> int:
+ """
+ Get the distance between a car (at index car_index) and the next car
+ >>> get_distance([6, -1, 6, -1, 6], 2)
+ 1
+ >>> get_distance([2, -1, -1, -1, 3, 1, 0, 1, 3, 2], 0)
+ 3
+ >>> get_distance([-1, -1, -1, -1, 2, -1, -1, -1, 3], -1)
+ 4
+ """
+
+ distance = 0
+ cells = highway_now[car_index + 1 :]
+ for cell in range(len(cells)): # May need a better name for this
+ if cells[cell] != -1: # If the cell is not empty then
+ return distance # we have the distance we wanted
+ distance += 1
+ # Here if the car is near the end of the highway
+ return distance + get_distance(highway_now, -1)
+
+
+def update(highway_now: list, probability: float, max_speed: int) -> list:
+ """
+ Update the speed of the cars
+ >>> update([-1, -1, -1, -1, -1, 2, -1, -1, -1, -1, 3], 0.0, 5)
+ [-1, -1, -1, -1, -1, 3, -1, -1, -1, -1, 4]
+ >>> update([-1, -1, 2, -1, -1, -1, -1, 3], 0.0, 5)
+ [-1, -1, 3, -1, -1, -1, -1, 1]
+ """
+
+ number_of_cells = len(highway_now)
+ # Beforce calculations, the highway is empty
+ next_highway = [-1] * number_of_cells
+
+ for car_index in range(number_of_cells):
+ if highway_now[car_index] != -1:
+ # Add 1 to the current speed of the car and cap the speed
+ next_highway[car_index] = min(highway_now[car_index] + 1, max_speed)
+ # Number of empty cell before the next car
+ dn = get_distance(highway_now, car_index) - 1
+ # We can't have the car causing an accident
+ next_highway[car_index] = min(next_highway[car_index], dn)
+ if random() < probability:
+ # Randomly, a driver will slow down
+ next_highway[car_index] = max(next_highway[car_index] - 1, 0)
+ return next_highway
+
+
+def simulate(
+ highway: list, number_of_update: int, probability: float, max_speed: int
+) -> list:
+ """
+ The main function, it will simulate the evolution of the highway
+ >>> simulate([[-1, 2, -1, -1, -1, 3]], 2, 0.0, 3)
+ [[-1, 2, -1, -1, -1, 3], [-1, -1, -1, 2, -1, 0], [1, -1, -1, 0, -1, -1]]
+ >>> simulate([[-1, 2, -1, 3]], 4, 0.0, 3)
+ [[-1, 2, -1, 3], [-1, 0, -1, 0], [-1, 0, -1, 0], [-1, 0, -1, 0], [-1, 0, -1, 0]]
+ """
+
+ number_of_cells = len(highway[0])
+
+ for i in range(number_of_update):
+ next_speeds_calculated = update(highway[i], probability, max_speed)
+ real_next_speeds = [-1] * number_of_cells
+
+ for car_index in range(number_of_cells):
+ speed = next_speeds_calculated[car_index]
+ if speed != -1:
+ # Change the position based on the speed (with % to create the loop)
+ index = (car_index + speed) % number_of_cells
+ # Commit the change of position
+ real_next_speeds[index] = speed
+ highway.append(real_next_speeds)
+
+ return highway
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/ciphers/README.md b/ciphers/README.md
new file mode 100644
index 000000000..fa09874f3
--- /dev/null
+++ b/ciphers/README.md
@@ -0,0 +1,7 @@
+# Ciphers
+
+Ciphers are used to protect data from people that are not allowed to have it. They are everywhere on the internet to protect your connections.
+
+*
+*
+*
diff --git a/ciphers/a1z26.py b/ciphers/a1z26.py
index e6684fb1e..0f0eb7c5c 100644
--- a/ciphers/a1z26.py
+++ b/ciphers/a1z26.py
@@ -5,6 +5,7 @@ corresponding to the character's position in the alphabet.
https://www.dcode.fr/letter-number-cipher
http://bestcodes.weebly.com/a1z26.html
"""
+from __future__ import annotations
def encode(plain: str) -> list[int]:
diff --git a/ciphers/affine_cipher.py b/ciphers/affine_cipher.py
index d3b806ba1..cd1e33b88 100644
--- a/ciphers/affine_cipher.py
+++ b/ciphers/affine_cipher.py
@@ -9,26 +9,26 @@ SYMBOLS = (
)
-def check_keys(keyA: int, keyB: int, mode: str) -> None:
+def check_keys(key_a: int, key_b: int, mode: str) -> None:
if mode == "encrypt":
- if keyA == 1:
+ if key_a == 1:
sys.exit(
"The affine cipher becomes weak when key "
"A is set to 1. Choose different key"
)
- if keyB == 0:
+ if key_b == 0:
sys.exit(
"The affine cipher becomes weak when key "
"B is set to 0. Choose different key"
)
- if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1:
+ if key_a < 0 or key_b < 0 or key_b > len(SYMBOLS) - 1:
sys.exit(
"Key A must be greater than 0 and key B must "
f"be between 0 and {len(SYMBOLS) - 1}."
)
- if cryptomath.gcd(keyA, len(SYMBOLS)) != 1:
+ if cryptomath.gcd(key_a, len(SYMBOLS)) != 1:
sys.exit(
- f"Key A {keyA} and the symbol set size {len(SYMBOLS)} "
+ f"Key A {key_a} and the symbol set size {len(SYMBOLS)} "
"are not relatively prime. Choose a different key."
)
@@ -39,16 +39,16 @@ def encrypt_message(key: int, message: str) -> str:
... 'substitution cipher.')
'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi'
"""
- keyA, keyB = divmod(key, len(SYMBOLS))
- check_keys(keyA, keyB, "encrypt")
- cipherText = ""
+ key_a, key_b = divmod(key, len(SYMBOLS))
+ check_keys(key_a, key_b, "encrypt")
+ cipher_text = ""
for symbol in message:
if symbol in SYMBOLS:
- symIndex = SYMBOLS.find(symbol)
- cipherText += SYMBOLS[(symIndex * keyA + keyB) % len(SYMBOLS)]
+ sym_index = SYMBOLS.find(symbol)
+ cipher_text += SYMBOLS[(sym_index * key_a + key_b) % len(SYMBOLS)]
else:
- cipherText += symbol
- return cipherText
+ cipher_text += symbol
+ return cipher_text
def decrypt_message(key: int, message: str) -> str:
@@ -57,25 +57,27 @@ def decrypt_message(key: int, message: str) -> str:
... '{xIp~{HL}Gi')
'The affine cipher is a type of monoalphabetic substitution cipher.'
"""
- keyA, keyB = divmod(key, len(SYMBOLS))
- check_keys(keyA, keyB, "decrypt")
- plainText = ""
- modInverseOfkeyA = cryptomath.find_mod_inverse(keyA, len(SYMBOLS))
+ key_a, key_b = divmod(key, len(SYMBOLS))
+ check_keys(key_a, key_b, "decrypt")
+ plain_text = ""
+ mod_inverse_of_key_a = cryptomath.find_mod_inverse(key_a, len(SYMBOLS))
for symbol in message:
if symbol in SYMBOLS:
- symIndex = SYMBOLS.find(symbol)
- plainText += SYMBOLS[(symIndex - keyB) * modInverseOfkeyA % len(SYMBOLS)]
+ sym_index = SYMBOLS.find(symbol)
+ plain_text += SYMBOLS[
+ (sym_index - key_b) * mod_inverse_of_key_a % len(SYMBOLS)
+ ]
else:
- plainText += symbol
- return plainText
+ plain_text += symbol
+ return plain_text
def get_random_key() -> int:
while True:
- keyA = random.randint(2, len(SYMBOLS))
- keyB = random.randint(2, len(SYMBOLS))
- if cryptomath.gcd(keyA, len(SYMBOLS)) == 1 and keyB % len(SYMBOLS) != 0:
- return keyA * len(SYMBOLS) + keyB
+ key_b = random.randint(2, len(SYMBOLS))
+ key_b = random.randint(2, len(SYMBOLS))
+ if cryptomath.gcd(key_b, len(SYMBOLS)) == 1 and key_b % len(SYMBOLS) != 0:
+ return key_b * len(SYMBOLS) + key_b
def main() -> None:
diff --git a/ciphers/atbash.py b/ciphers/atbash.py
index 5c2aea610..0a86a800c 100644
--- a/ciphers/atbash.py
+++ b/ciphers/atbash.py
@@ -38,26 +38,13 @@ def atbash(sequence: str) -> str:
def benchmark() -> None:
- """Let's benchmark them side-by-side..."""
+ """Let's benchmark our functions side-by-side..."""
from timeit import timeit
print("Running performance benchmarks...")
- print(
- "> atbash_slow()",
- timeit(
- "atbash_slow(printable)",
- setup="from string import printable ; from __main__ import atbash_slow",
- ),
- "seconds",
- )
- print(
- "> atbash()",
- timeit(
- "atbash(printable)",
- setup="from string import printable ; from __main__ import atbash",
- ),
- "seconds",
- )
+ setup = "from string import printable ; from __main__ import atbash, atbash_slow"
+ print(f"> atbash_slow(): {timeit('atbash_slow(printable)', setup=setup)} seconds")
+ print(f"> atbash(): {timeit('atbash(printable)', setup=setup)} seconds")
if __name__ == "__main__":
diff --git a/ciphers/autokey.py b/ciphers/autokey.py
new file mode 100644
index 000000000..8683e6d37
--- /dev/null
+++ b/ciphers/autokey.py
@@ -0,0 +1,131 @@
+"""
+https://en.wikipedia.org/wiki/Autokey_cipher
+An autokey cipher (also known as the autoclave cipher) is a cipher that
+incorporates the message (the plaintext) into the key.
+The key is generated from the message in some automated fashion,
+sometimes by selecting certain letters from the text or, more commonly,
+by adding a short primer key to the front of the message.
+"""
+
+
+def encrypt(plaintext: str, key: str) -> str:
+ """
+ Encrypt a given plaintext (string) and key (string), returning the
+ encrypted ciphertext.
+ >>> encrypt("hello world", "coffee")
+ 'jsqqs avvwo'
+ >>> encrypt("coffee is good as python", "TheAlgorithms")
+ 'vvjfpk wj ohvp su ddylsv'
+ >>> encrypt("coffee is good as python", 2)
+ Traceback (most recent call last):
+ ...
+ TypeError: key must be a string
+ >>> encrypt("", "TheAlgorithms")
+ Traceback (most recent call last):
+ ...
+ ValueError: plaintext is empty
+ """
+ if not isinstance(plaintext, str):
+ raise TypeError("plaintext must be a string")
+ if not isinstance(key, str):
+ raise TypeError("key must be a string")
+
+ if not plaintext:
+ raise ValueError("plaintext is empty")
+ if not key:
+ raise ValueError("key is empty")
+
+ key += plaintext
+ plaintext = plaintext.lower()
+ key = key.lower()
+ plaintext_iterator = 0
+ key_iterator = 0
+ ciphertext = ""
+ while plaintext_iterator < len(plaintext):
+ if (
+ ord(plaintext[plaintext_iterator]) < 97
+ or ord(plaintext[plaintext_iterator]) > 122
+ ):
+ ciphertext += plaintext[plaintext_iterator]
+ plaintext_iterator += 1
+ elif ord(key[key_iterator]) < 97 or ord(key[key_iterator]) > 122:
+ key_iterator += 1
+ else:
+ ciphertext += chr(
+ (
+ (ord(plaintext[plaintext_iterator]) - 97 + ord(key[key_iterator]))
+ - 97
+ )
+ % 26
+ + 97
+ )
+ key_iterator += 1
+ plaintext_iterator += 1
+ return ciphertext
+
+
+def decrypt(ciphertext: str, key: str) -> str:
+ """
+ Decrypt a given ciphertext (string) and key (string), returning the decrypted
+ ciphertext.
+ >>> decrypt("jsqqs avvwo", "coffee")
+ 'hello world'
+ >>> decrypt("vvjfpk wj ohvp su ddylsv", "TheAlgorithms")
+ 'coffee is good as python'
+ >>> decrypt("vvjfpk wj ohvp su ddylsv", "")
+ Traceback (most recent call last):
+ ...
+ ValueError: key is empty
+ >>> decrypt(527.26, "TheAlgorithms")
+ Traceback (most recent call last):
+ ...
+ TypeError: ciphertext must be a string
+ """
+ if not isinstance(ciphertext, str):
+ raise TypeError("ciphertext must be a string")
+ if not isinstance(key, str):
+ raise TypeError("key must be a string")
+
+ if not ciphertext:
+ raise ValueError("ciphertext is empty")
+ if not key:
+ raise ValueError("key is empty")
+
+ key = key.lower()
+ ciphertext_iterator = 0
+ key_iterator = 0
+ plaintext = ""
+ while ciphertext_iterator < len(ciphertext):
+ if (
+ ord(ciphertext[ciphertext_iterator]) < 97
+ or ord(ciphertext[ciphertext_iterator]) > 122
+ ):
+ plaintext += ciphertext[ciphertext_iterator]
+ else:
+ plaintext += chr(
+ (ord(ciphertext[ciphertext_iterator]) - ord(key[key_iterator])) % 26
+ + 97
+ )
+ key += chr(
+ (ord(ciphertext[ciphertext_iterator]) - ord(key[key_iterator])) % 26
+ + 97
+ )
+ key_iterator += 1
+ ciphertext_iterator += 1
+ return plaintext
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ operation = int(input("Type 1 to encrypt or 2 to decrypt:"))
+ if operation == 1:
+ plaintext = input("Typeplaintext to be encrypted:\n")
+ key = input("Type the key:\n")
+ print(encrypt(plaintext, key))
+ elif operation == 2:
+ ciphertext = input("Type the ciphertext to be decrypted:\n")
+ key = input("Type the key:\n")
+ print(decrypt(ciphertext, key))
+ decrypt("jsqqs avvwo", "coffee")
diff --git a/ciphers/baconian_cipher.py b/ciphers/baconian_cipher.py
new file mode 100644
index 000000000..f146ba91b
--- /dev/null
+++ b/ciphers/baconian_cipher.py
@@ -0,0 +1,89 @@
+"""
+Program to encode and decode Baconian or Bacon's Cipher
+Wikipedia reference : https://en.wikipedia.org/wiki/Bacon%27s_cipher
+"""
+
+encode_dict = {
+ "a": "AAAAA",
+ "b": "AAAAB",
+ "c": "AAABA",
+ "d": "AAABB",
+ "e": "AABAA",
+ "f": "AABAB",
+ "g": "AABBA",
+ "h": "AABBB",
+ "i": "ABAAA",
+ "j": "BBBAA",
+ "k": "ABAAB",
+ "l": "ABABA",
+ "m": "ABABB",
+ "n": "ABBAA",
+ "o": "ABBAB",
+ "p": "ABBBA",
+ "q": "ABBBB",
+ "r": "BAAAA",
+ "s": "BAAAB",
+ "t": "BAABA",
+ "u": "BAABB",
+ "v": "BBBAB",
+ "w": "BABAA",
+ "x": "BABAB",
+ "y": "BABBA",
+ "z": "BABBB",
+ " ": " ",
+}
+
+
+decode_dict = {value: key for key, value in encode_dict.items()}
+
+
+def encode(word: str) -> str:
+ """
+ Encodes to Baconian cipher
+
+ >>> encode("hello")
+ 'AABBBAABAAABABAABABAABBAB'
+ >>> encode("hello world")
+ 'AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB'
+ >>> encode("hello world!")
+ Traceback (most recent call last):
+ ...
+ Exception: encode() accepts only letters of the alphabet and spaces
+ """
+ encoded = ""
+ for letter in word.lower():
+ if letter.isalpha() or letter == " ":
+ encoded += encode_dict[letter]
+ else:
+ raise Exception("encode() accepts only letters of the alphabet and spaces")
+ return encoded
+
+
+def decode(coded: str) -> str:
+ """
+ Decodes from Baconian cipher
+
+ >>> decode("AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB")
+ 'hello world'
+ >>> decode("AABBBAABAAABABAABABAABBAB")
+ 'hello'
+ >>> decode("AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB!")
+ Traceback (most recent call last):
+ ...
+ Exception: decode() accepts only 'A', 'B' and spaces
+ """
+ if set(coded) - {"A", "B", " "} != set():
+ raise Exception("decode() accepts only 'A', 'B' and spaces")
+ decoded = ""
+ for word in coded.split():
+ while len(word) != 0:
+ decoded += decode_dict[word[:5]]
+ word = word[5:]
+ decoded += " "
+ return decoded.strip()
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/ciphers/base16.py b/ciphers/base16.py
index f27ea4628..6cd62846f 100644
--- a/ciphers/base16.py
+++ b/ciphers/base16.py
@@ -1,19 +1,63 @@
-import base64
-
-
-def encode_to_b16(inp: str) -> bytes:
+def base16_encode(data: bytes) -> str:
"""
- Encodes a given utf-8 string into base-16.
- >>> encode_to_b16('Hello World!')
- b'48656C6C6F20576F726C6421'
- >>> encode_to_b16('HELLO WORLD!')
- b'48454C4C4F20574F524C4421'
- >>> encode_to_b16('')
+ Encodes the given bytes into base16.
+
+ >>> base16_encode(b'Hello World!')
+ '48656C6C6F20576F726C6421'
+ >>> base16_encode(b'HELLO WORLD!')
+ '48454C4C4F20574F524C4421'
+ >>> base16_encode(b'')
+ ''
+ """
+ # Turn the data into a list of integers (where each integer is a byte),
+ # Then turn each byte into its hexadecimal representation, make sure
+ # it is uppercase, and then join everything together and return it.
+ return "".join([hex(byte)[2:].zfill(2).upper() for byte in list(data)])
+
+
+def base16_decode(data: str) -> bytes:
+ """
+ Decodes the given base16 encoded data into bytes.
+
+ >>> base16_decode('48656C6C6F20576F726C6421')
+ b'Hello World!'
+ >>> base16_decode('48454C4C4F20574F524C4421')
+ b'HELLO WORLD!'
+ >>> base16_decode('')
b''
+ >>> base16_decode('486')
+ Traceback (most recent call last):
+ ...
+ ValueError: Base16 encoded data is invalid:
+ Data does not have an even number of hex digits.
+ >>> base16_decode('48656c6c6f20576f726c6421')
+ Traceback (most recent call last):
+ ...
+ ValueError: Base16 encoded data is invalid:
+ Data is not uppercase hex or it contains invalid characters.
+ >>> base16_decode('This is not base64 encoded data.')
+ Traceback (most recent call last):
+ ...
+ ValueError: Base16 encoded data is invalid:
+ Data is not uppercase hex or it contains invalid characters.
"""
- encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object)
- b16encoded = base64.b16encode(encoded) # b16encoded the encoded string
- return b16encoded
+ # Check data validity, following RFC3548
+ # https://www.ietf.org/rfc/rfc3548.txt
+ if (len(data) % 2) != 0:
+ raise ValueError(
+ """Base16 encoded data is invalid:
+Data does not have an even number of hex digits."""
+ )
+ # Check the character set - the standard base16 alphabet
+ # is uppercase according to RFC3548 section 6
+ if not set(data) <= set("0123456789ABCDEF"):
+ raise ValueError(
+ """Base16 encoded data is invalid:
+Data is not uppercase hex or it contains invalid characters."""
+ )
+ # For every two hexadecimal digits (= a byte), turn it into an integer.
+ # Then, string the result together into bytes, and return it.
+ return bytes(int(data[i] + data[i + 1], 16) for i in range(0, len(data), 2))
if __name__ == "__main__":
diff --git a/ciphers/base32.py b/ciphers/base32.py
index da289a721..fee53ccaf 100644
--- a/ciphers/base32.py
+++ b/ciphers/base32.py
@@ -1,13 +1,42 @@
import base64
-def main() -> None:
- inp = input("->")
- encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object)
- b32encoded = base64.b32encode(encoded) # b32encoded the encoded string
- print(b32encoded)
- print(base64.b32decode(b32encoded).decode("utf-8")) # decoded it
+def base32_encode(string: str) -> bytes:
+ """
+ Encodes a given string to base32, returning a bytes-like object
+ >>> base32_encode("Hello World!")
+ b'JBSWY3DPEBLW64TMMQQQ===='
+ >>> base32_encode("123456")
+ b'GEZDGNBVGY======'
+ >>> base32_encode("some long complex string")
+ b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY='
+ """
+
+ # encoded the input (we need a bytes like object)
+ # then, b32encoded the bytes-like object
+ return base64.b32encode(string.encode("utf-8"))
+
+
+def base32_decode(encoded_bytes: bytes) -> str:
+ """
+ Decodes a given bytes-like object to a string, returning a string
+ >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====')
+ 'Hello World!'
+ >>> base32_decode(b'GEZDGNBVGY======')
+ '123456'
+ >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=')
+ 'some long complex string'
+ """
+
+ # decode the bytes from base32
+ # then, decode the bytes-like object to return as a string
+ return base64.b32decode(encoded_bytes).decode("utf-8")
if __name__ == "__main__":
- main()
+ test = "Hello World!"
+ encoded = base32_encode(test)
+ print(encoded)
+
+ decoded = base32_decode(encoded)
+ print(decoded)
diff --git a/ciphers/base64_encoding.py b/ciphers/base64.py
similarity index 94%
rename from ciphers/base64_encoding.py
rename to ciphers/base64.py
index 634afcb89..2b950b1be 100644
--- a/ciphers/base64_encoding.py
+++ b/ciphers/base64.py
@@ -7,7 +7,7 @@ def base64_encode(data: bytes) -> bytes:
The data is first transformed to binary and appended with binary digits so that its
length becomes a multiple of 6, then each 6 binary digits will match a character in
the B64_CHARSET string. The number of appended binary digits would later determine
- how many "=" sign should be added, the padding.
+ how many "=" signs should be added, the padding.
For every 2 binary digits added, a "=" sign is added in the output.
We can add any binary digits to make it a multiple of 6, for instance, consider the
following example:
@@ -34,9 +34,8 @@ def base64_encode(data: bytes) -> bytes:
"""
# Make sure the supplied data is a bytes-like object
if not isinstance(data, bytes):
- raise TypeError(
- f"a bytes-like object is required, not '{data.__class__.__name__}'"
- )
+ msg = f"a bytes-like object is required, not '{data.__class__.__name__}'"
+ raise TypeError(msg)
binary_stream = "".join(bin(byte)[2:].zfill(8) for byte in data)
@@ -88,10 +87,11 @@ def base64_decode(encoded_data: str) -> bytes:
"""
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(encoded_data, bytes) and not isinstance(encoded_data, str):
- raise TypeError(
- "argument should be a bytes-like object or ASCII string, not "
- f"'{encoded_data.__class__.__name__}'"
+ msg = (
+ "argument should be a bytes-like object or ASCII string, "
+ f"not '{encoded_data.__class__.__name__}'"
)
+ raise TypeError(msg)
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
diff --git a/ciphers/base85.py b/ciphers/base85.py
index 9740299b9..afd1aff79 100644
--- a/ciphers/base85.py
+++ b/ciphers/base85.py
@@ -1,13 +1,33 @@
import base64
-def main() -> None:
- inp = input("->")
- encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object)
- a85encoded = base64.a85encode(encoded) # a85encoded the encoded string
- print(a85encoded)
- print(base64.a85decode(a85encoded).decode("utf-8")) # decoded it
+def base85_encode(string: str) -> bytes:
+ """
+ >>> base85_encode("")
+ b''
+ >>> base85_encode("12345")
+ b'0etOA2#'
+ >>> base85_encode("base 85")
+ b'@UX=h+?24'
+ """
+ # encoded the input to a bytes-like object and then a85encode that
+ return base64.a85encode(string.encode("utf-8"))
+
+
+def base85_decode(a85encoded: bytes) -> str:
+ """
+ >>> base85_decode(b"")
+ ''
+ >>> base85_decode(b"0etOA2#")
+ '12345'
+ >>> base85_decode(b"@UX=h+?24")
+ 'base 85'
+ """
+ # a85decode the input into bytes and decode that into a human readable string
+ return base64.a85decode(a85encoded).decode("utf-8")
if __name__ == "__main__":
- main()
+ import doctest
+
+ doctest.testmod()
diff --git a/ciphers/beaufort_cipher.py b/ciphers/beaufort_cipher.py
index 8eae847a7..788fc72b8 100644
--- a/ciphers/beaufort_cipher.py
+++ b/ciphers/beaufort_cipher.py
@@ -5,7 +5,7 @@ Author: Mohit Radadiya
from string import ascii_uppercase
dict1 = {char: i for i, char in enumerate(ascii_uppercase)}
-dict2 = {i: char for i, char in enumerate(ascii_uppercase)}
+dict2 = dict(enumerate(ascii_uppercase))
# This function generates the key in
diff --git a/ciphers/bifid.py b/ciphers/bifid.py
new file mode 100644
index 000000000..a15b38164
--- /dev/null
+++ b/ciphers/bifid.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+
+"""
+The Bifid Cipher uses a Polybius Square to encipher a message in a way that
+makes it fairly difficult to decipher without knowing the secret.
+
+https://www.braingle.com/brainteasers/codes/bifid.php
+"""
+
+import numpy as np
+
+SQUARE = [
+ ["a", "b", "c", "d", "e"],
+ ["f", "g", "h", "i", "k"],
+ ["l", "m", "n", "o", "p"],
+ ["q", "r", "s", "t", "u"],
+ ["v", "w", "x", "y", "z"],
+]
+
+
+class BifidCipher:
+ def __init__(self) -> None:
+ self.SQUARE = np.array(SQUARE)
+
+ def letter_to_numbers(self, letter: str) -> np.ndarray:
+ """
+ Return the pair of numbers that represents the given letter in the
+ polybius square
+
+ >>> np.array_equal(BifidCipher().letter_to_numbers('a'), [1,1])
+ True
+
+ >>> np.array_equal(BifidCipher().letter_to_numbers('u'), [4,5])
+ True
+ """
+ index1, index2 = np.where(letter == self.SQUARE)
+ indexes = np.concatenate([index1 + 1, index2 + 1])
+ return indexes
+
+ def numbers_to_letter(self, index1: int, index2: int) -> str:
+ """
+ Return the letter corresponding to the position [index1, index2] in
+ the polybius square
+
+ >>> BifidCipher().numbers_to_letter(4, 5) == "u"
+ True
+
+ >>> BifidCipher().numbers_to_letter(1, 1) == "a"
+ True
+ """
+ letter = self.SQUARE[index1 - 1, index2 - 1]
+ return letter
+
+ def encode(self, message: str) -> str:
+ """
+ Return the encoded version of message according to the polybius cipher
+
+ >>> BifidCipher().encode('testmessage') == 'qtltbdxrxlk'
+ True
+
+ >>> BifidCipher().encode('Test Message') == 'qtltbdxrxlk'
+ True
+
+ >>> BifidCipher().encode('test j') == BifidCipher().encode('test i')
+ True
+ """
+ message = message.lower()
+ message = message.replace(" ", "")
+ message = message.replace("j", "i")
+
+ first_step = np.empty((2, len(message)))
+ for letter_index in range(len(message)):
+ numbers = self.letter_to_numbers(message[letter_index])
+
+ first_step[0, letter_index] = numbers[0]
+ first_step[1, letter_index] = numbers[1]
+
+ second_step = first_step.reshape(2 * len(message))
+ encoded_message = ""
+ for numbers_index in range(len(message)):
+ index1 = int(second_step[numbers_index * 2])
+ index2 = int(second_step[(numbers_index * 2) + 1])
+ letter = self.numbers_to_letter(index1, index2)
+ encoded_message = encoded_message + letter
+
+ return encoded_message
+
+ def decode(self, message: str) -> str:
+ """
+ Return the decoded version of message according to the polybius cipher
+
+ >>> BifidCipher().decode('qtltbdxrxlk') == 'testmessage'
+ True
+ """
+ message = message.lower()
+ message.replace(" ", "")
+ first_step = np.empty(2 * len(message))
+ for letter_index in range(len(message)):
+ numbers = self.letter_to_numbers(message[letter_index])
+ first_step[letter_index * 2] = numbers[0]
+ first_step[letter_index * 2 + 1] = numbers[1]
+
+ second_step = first_step.reshape((2, len(message)))
+ decoded_message = ""
+ for numbers_index in range(len(message)):
+ index1 = int(second_step[0, numbers_index])
+ index2 = int(second_step[1, numbers_index])
+ letter = self.numbers_to_letter(index1, index2)
+ decoded_message = decoded_message + letter
+
+ return decoded_message
diff --git a/ciphers/brute_force_caesar_cipher.py b/ciphers/brute_force_caesar_cipher.py
index 8ab6e7730..458d08db2 100644
--- a/ciphers/brute_force_caesar_cipher.py
+++ b/ciphers/brute_force_caesar_cipher.py
@@ -1,3 +1,6 @@
+import string
+
+
def decrypt(message: str) -> None:
"""
>>> decrypt('TMDETUX PMDVU')
@@ -28,16 +31,15 @@ def decrypt(message: str) -> None:
Decryption using Key #24: VOFGVWZ ROFXW
Decryption using Key #25: UNEFUVY QNEWV
"""
- LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- for key in range(len(LETTERS)):
+ for key in range(len(string.ascii_uppercase)):
translated = ""
for symbol in message:
- if symbol in LETTERS:
- num = LETTERS.find(symbol)
+ if symbol in string.ascii_uppercase:
+ num = string.ascii_uppercase.find(symbol)
num = num - key
if num < 0:
- num = num + len(LETTERS)
- translated = translated + LETTERS[num]
+ num = num + len(string.ascii_uppercase)
+ translated = translated + string.ascii_uppercase[num]
else:
translated = translated + symbol
print(f"Decryption using Key #{key}: {translated}")
diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py
index 4b2f76c7d..d19b9a337 100644
--- a/ciphers/caesar_cipher.py
+++ b/ciphers/caesar_cipher.py
@@ -1,8 +1,9 @@
+from __future__ import annotations
+
from string import ascii_letters
-from typing import Dict, Optional
-def encrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str:
+def encrypt(input_string: str, key: int, alphabet: str | None = None) -> str:
"""
encrypt
=======
@@ -26,7 +27,7 @@ def encrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str:
=========================
The caesar cipher is named after Julius Caesar who used it when sending
secret military messages to his troops. This is a simple substitution cipher
- where very character in the plain-text is shifted by a certain number known
+ where every character in the plain-text is shifted by a certain number known
as the "key" or "shift".
Example:
@@ -80,7 +81,7 @@ def encrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str:
return result
-def decrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str:
+def decrypt(input_string: str, key: int, alphabet: str | None = None) -> str:
"""
decrypt
=======
@@ -145,7 +146,7 @@ def decrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str:
return encrypt(input_string, key, alphabet)
-def brute_force(input_string: str, alphabet: Optional[str] = None) -> Dict[int, str]:
+def brute_force(input_string: str, alphabet: str | None = None) -> dict[int, str]:
"""
brute_force
===========
diff --git a/ciphers/cryptomath_module.py b/ciphers/cryptomath_module.py
index be8764ff3..6f15f7b73 100644
--- a/ciphers/cryptomath_module.py
+++ b/ciphers/cryptomath_module.py
@@ -6,7 +6,8 @@ def gcd(a: int, b: int) -> int:
def find_mod_inverse(a: int, m: int) -> int:
if gcd(a, m) != 1:
- raise ValueError(f"mod inverse of {a!r} and {m!r} does not exist")
+ msg = f"mod inverse of {a!r} and {m!r} does not exist"
+ raise ValueError(msg)
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, m
while v3 != 0:
diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py
index 7e3705b8f..6c3686020 100644
--- a/ciphers/decrypt_caesar_with_chi_squared.py
+++ b/ciphers/decrypt_caesar_with_chi_squared.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
-
-from typing import Optional
+from __future__ import annotations
def decrypt_caesar_with_chi_squared(
ciphertext: str,
- cipher_alphabet: Optional[list[str]] = None,
- frequencies_dict: Optional[dict[str, float]] = None,
- case_sensetive: bool = False,
+ cipher_alphabet: list[str] | None = None,
+ frequencies_dict: dict[str, float] | None = None,
+ case_sensitive: bool = False,
) -> tuple[int, float, str]:
"""
Basic Usage
@@ -21,7 +20,7 @@ def decrypt_caesar_with_chi_squared(
* frequencies_dict (dict): a dictionary of word frequencies where keys are
the letters and values are a percentage representation of the frequency as
a decimal/float
- * case_sensetive (bool): a boolean value: True if the case matters during
+ * case_sensitive (bool): a boolean value: True if the case matters during
decryption, False if it doesn't
Returns:
@@ -118,6 +117,9 @@ def decrypt_caesar_with_chi_squared(
>>> decrypt_caesar_with_chi_squared('crybd cdbsxq')
(10, 233.35343938980898, 'short string')
+ >>> decrypt_caesar_with_chi_squared('Crybd Cdbsxq', case_sensitive=True)
+ (10, 233.35343938980898, 'Short String')
+
>>> decrypt_caesar_with_chi_squared(12)
Traceback (most recent call last):
AttributeError: 'int' object has no attribute 'lower'
@@ -159,7 +161,7 @@ def decrypt_caesar_with_chi_squared(
# Custom frequencies dictionary
frequencies = frequencies_dict
- if not case_sensetive:
+ if not case_sensitive:
ciphertext = ciphertext.lower()
# Chi squared statistic values
@@ -173,10 +175,14 @@ def decrypt_caesar_with_chi_squared(
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
- new_key = (alphabet_letters.index(letter) - shift) % len(
+ new_key = (alphabet_letters.index(letter.lower()) - shift) % len(
alphabet_letters
)
- decrypted_with_shift += alphabet_letters[new_key]
+ decrypted_with_shift += (
+ alphabet_letters[new_key].upper()
+ if case_sensitive and letter.isupper()
+ else alphabet_letters[new_key]
+ )
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
@@ -185,10 +191,11 @@ def decrypt_caesar_with_chi_squared(
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
- if case_sensetive:
+ if case_sensitive:
+ letter = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
- occurrences = decrypted_with_shift.count(letter)
+ occurrences = decrypted_with_shift.lower().count(letter)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
@@ -222,10 +229,13 @@ def decrypt_caesar_with_chi_squared(
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
- most_likely_cipher: int = min( # type: ignore
- chi_squared_statistic_values, # type: ignore
- key=chi_squared_statistic_values.get, # type: ignore
- ) # type: ignore
+ def chi_squared_statistic_values_sorting_key(key: int) -> tuple[float, str]:
+ return chi_squared_statistic_values[key]
+
+ most_likely_cipher: int = min(
+ chi_squared_statistic_values,
+ key=chi_squared_statistic_values_sorting_key,
+ )
# Get all the data from the most likely cipher (key, decoded message)
(
diff --git a/ciphers/deterministic_miller_rabin.py b/ciphers/deterministic_miller_rabin.py
index d7fcb67e9..2191caf63 100644
--- a/ciphers/deterministic_miller_rabin.py
+++ b/ciphers/deterministic_miller_rabin.py
@@ -73,7 +73,7 @@ def miller_rabin(n: int, allow_probable: bool = False) -> bool:
for prime in plist:
pr = False
for r in range(s):
- m = pow(prime, d * 2 ** r, n)
+ m = pow(prime, d * 2**r, n)
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
pr = True
diff --git a/ciphers/diffie.py b/ciphers/diffie.py
index a23a8104a..4ff90be00 100644
--- a/ciphers/diffie.py
+++ b/ciphers/diffie.py
@@ -1,7 +1,7 @@
-from typing import Optional
+from __future__ import annotations
-def find_primitive(n: int) -> Optional[int]:
+def find_primitive(n: int) -> int | None:
for r in range(1, n):
li = []
for x in range(n - 1):
diff --git a/ciphers/diffie_hellman.py b/ciphers/diffie_hellman.py
index 072f4aaaa..aec7fb3ea 100644
--- a/ciphers/diffie_hellman.py
+++ b/ciphers/diffie_hellman.py
@@ -10,13 +10,13 @@ primes = {
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
- + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
- + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
- + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
- + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
- + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
- + "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
- + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
@@ -25,16 +25,16 @@ primes = {
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
- + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
- + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
- + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
- + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
- + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
- + "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
- + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
- + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
- + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
- + "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
@@ -43,21 +43,21 @@ primes = {
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
- + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
- + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
- + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
- + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
- + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
- + "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
- + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
- + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
- + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
- + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
- + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
- + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
- + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
- + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
- + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
@@ -66,27 +66,27 @@ primes = {
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
- + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
- + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
- + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
- + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
- + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
- + "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
- + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
- + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
- + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
- + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
- + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
- + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
- + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
- + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
- + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
- + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
- + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
- + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
- + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
- + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
- + "FFFFFFFFFFFFFFFF",
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
@@ -95,33 +95,33 @@ primes = {
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
- + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
- + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
- + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
- + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
- + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
- + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
- + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
- + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
- + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
- + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
- + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
- + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
- + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
- + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
- + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
- + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
- + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
- + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
- + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
- + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
- + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
- + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
- + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
- + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
- + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
- + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
- + "6DCC4024FFFFFFFFFFFFFFFF",
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
@@ -130,48 +130,48 @@ primes = {
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
- + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
- + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
- + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
- + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
- + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
- + "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
- + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
- + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
- + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
- + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
- + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
- + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
- + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
- + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
- + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
- + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
- + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
- + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
- + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
- + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
- + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
- + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
- + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
- + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
- + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
- + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
- + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
- + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
- + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
- + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
- + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
- + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
- + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
- + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
- + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
- + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
- + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
- + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
- + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
- + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
- + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
- + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
@@ -228,10 +228,10 @@ class DiffieHellman:
def is_valid_public_key(self, key: int) -> bool:
# check if the other public key is valid based on NIST SP800-56
- if 2 <= key and key <= self.prime - 2:
- if pow(key, (self.prime - 1) // 2, self.prime) == 1:
- return True
- return False
+ return (
+ 2 <= key <= self.prime - 2
+ and pow(key, (self.prime - 1) // 2, self.prime) == 1
+ )
def generate_shared_key(self, other_key_str: str) -> str:
other_key = int(other_key_str, base=16)
@@ -243,10 +243,10 @@ class DiffieHellman:
@staticmethod
def is_valid_public_key_static(remote_public_key_str: int, prime: int) -> bool:
# check if the other public key is valid based on NIST SP800-56
- if 2 <= remote_public_key_str and remote_public_key_str <= prime - 2:
- if pow(remote_public_key_str, (prime - 1) // 2, prime) == 1:
- return True
- return False
+ return (
+ 2 <= remote_public_key_str <= prime - 2
+ and pow(remote_public_key_str, (prime - 1) // 2, prime) == 1
+ )
@staticmethod
def generate_shared_key_static(
diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py
index f557b0e0d..17ba55c0d 100644
--- a/ciphers/elgamal_key_generator.py
+++ b/ciphers/elgamal_key_generator.py
@@ -26,7 +26,7 @@ def primitive_root(p_val: int) -> int:
def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("Generating prime p...")
- p = rabin_miller.generateLargePrime(key_size) # select large prime number.
+ p = rabin_miller.generate_large_prime(key_size) # select large prime number.
e_1 = primitive_root(p) # one primitive root on modulo p.
d = random.randrange(3, p) # private_key -> have to be greater than 2 for safety.
e_2 = cryptomath.find_mod_inverse(pow(e_1, d, p), p)
@@ -37,28 +37,23 @@ def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, i
return public_key, private_key
-def make_key_files(name: str, keySize: int) -> None:
- if os.path.exists("%s_pubkey.txt" % name) or os.path.exists(
- "%s_privkey.txt" % name
- ):
+def make_key_files(name: str, key_size: int) -> None:
+ if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"):
print("\nWARNING:")
print(
- '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n'
+ f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program."
- % (name, name)
)
sys.exit()
- publicKey, privateKey = generate_key(keySize)
- print("\nWriting public key to file %s_pubkey.txt..." % name)
- with open("%s_pubkey.txt" % name, "w") as fo:
- fo.write(
- "%d,%d,%d,%d" % (publicKey[0], publicKey[1], publicKey[2], publicKey[3])
- )
+ public_key, private_key = generate_key(key_size)
+ print(f"\nWriting public key to file {name}_pubkey.txt...")
+ with open(f"{name}_pubkey.txt", "w") as fo:
+ fo.write(f"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}")
- print("Writing private key to file %s_privkey.txt..." % name)
- with open("%s_privkey.txt" % name, "w") as fo:
- fo.write("%d,%d" % (privateKey[0], privateKey[1]))
+ print(f"Writing private key to file {name}_privkey.txt...")
+ with open(f"{name}_privkey.txt", "w") as fo:
+ fo.write(f"{private_key[0]},{private_key[1]}")
def main() -> None:
diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py
index f4ce5a075..ec0d44e4a 100644
--- a/ciphers/enigma_machine2.py
+++ b/ciphers/enigma_machine2.py
@@ -8,12 +8,13 @@ the famous Enigma machine from WWII.
Module includes:
- enigma function
- showcase of function usage
-- 9 randnomly generated rotors
+- 9 randomly generated rotors
- reflector (aka static rotor)
- original alphabet
Created by TrapinchO
"""
+from __future__ import annotations
RotorPositionT = tuple[int, int, int]
RotorSelectionT = tuple[str, str, str]
@@ -85,24 +86,21 @@ def _validator(
"""
# Checks if there are 3 unique rotors
- unique_rotsel = len(set(rotsel))
- if unique_rotsel < 3:
- raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})")
+ if (unique_rotsel := len(set(rotsel))) < 3:
+ msg = f"Please use 3 unique rotors (not {unique_rotsel})"
+ raise Exception(msg)
# Checks if rotor positions are valid
rotorpos1, rotorpos2, rotorpos3 = rotpos
if not 0 < rotorpos1 <= len(abc):
- raise ValueError(
- f"First rotor position is not within range of 1..26 (" f"{rotorpos1}"
- )
+ msg = f"First rotor position is not within range of 1..26 ({rotorpos1}"
+ raise ValueError(msg)
if not 0 < rotorpos2 <= len(abc):
- raise ValueError(
- f"Second rotor position is not within range of 1..26 (" f"{rotorpos2})"
- )
+ msg = f"Second rotor position is not within range of 1..26 ({rotorpos2})"
+ raise ValueError(msg)
if not 0 < rotorpos3 <= len(abc):
- raise ValueError(
- f"Third rotor position is not within range of 1..26 (" f"{rotorpos3})"
- )
+ msg = f"Third rotor position is not within range of 1..26 ({rotorpos3})"
+ raise ValueError(msg)
# Validates string and returns dict
pbdict = _plugboard(pb)
@@ -130,9 +128,11 @@ def _plugboard(pbstring: str) -> dict[str, str]:
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(pbstring, str):
- raise TypeError(f"Plugboard setting isn't type string ({type(pbstring)})")
+ msg = f"Plugboard setting isn't type string ({type(pbstring)})"
+ raise TypeError(msg)
elif len(pbstring) % 2 != 0:
- raise Exception(f"Odd number of symbols ({len(pbstring)})")
+ msg = f"Odd number of symbols ({len(pbstring)})"
+ raise Exception(msg)
elif pbstring == "":
return {}
@@ -142,9 +142,11 @@ def _plugboard(pbstring: str) -> dict[str, str]:
tmppbl = set()
for i in pbstring:
if i not in abc:
- raise Exception(f"'{i}' not in list of symbols")
+ msg = f"'{i}' not in list of symbols"
+ raise Exception(msg)
elif i in tmppbl:
- raise Exception(f"Duplicate symbol ({i})")
+ msg = f"Duplicate symbol ({i})"
+ raise Exception(msg)
else:
tmppbl.add(i)
del tmppbl
@@ -230,7 +232,6 @@ def enigma(
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
-
# 1st plugboard --------------------------
if symbol in plugboard:
symbol = plugboard[symbol]
diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py
index bc8f5b41b..b4424e822 100644
--- a/ciphers/hill_cipher.py
+++ b/ciphers/hill_cipher.py
@@ -62,7 +62,7 @@ class HillCipher:
# take x and return x % len(key_string)
modulus = numpy.vectorize(lambda x: x % 36)
- to_int = numpy.vectorize(lambda x: round(x))
+ to_int = numpy.vectorize(round)
def __init__(self, encrypt_key: numpy.ndarray) -> None:
"""
@@ -104,10 +104,11 @@ class HillCipher:
req_l = len(self.key_string)
if greatest_common_divisor(det, len(self.key_string)) != 1:
- raise ValueError(
- f"determinant modular {req_l} of encryption key({det}) is not co prime "
- f"w.r.t {req_l}.\nTry another key."
+ msg = (
+ f"determinant modular {req_l} of encryption key({det}) "
+ f"is not co prime w.r.t {req_l}.\nTry another key."
)
+ raise ValueError(msg)
def process_text(self, text: str) -> str:
"""
@@ -201,11 +202,11 @@ class HillCipher:
def main() -> None:
- N = int(input("Enter the order of the encryption key: "))
+ n = int(input("Enter the order of the encryption key: "))
hill_matrix = []
print("Enter each row of the encryption key with space separated integers")
- for _ in range(N):
+ for _ in range(n):
row = [int(x) for x in input().split()]
hill_matrix.append(row)
diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py
index 178902173..b984808fc 100644
--- a/ciphers/mixed_keyword_cypher.py
+++ b/ciphers/mixed_keyword_cypher.py
@@ -1,7 +1,11 @@
-def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str:
- """
+from string import ascii_uppercase
- For key:hello
+
+def mixed_keyword(
+ keyword: str, plaintext: str, verbose: bool = False, alphabet: str = ascii_uppercase
+) -> str:
+ """
+ For keyword: hello
H E L O
A B C D
@@ -12,57 +16,60 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str:
Y Z
and map vertically
- >>> mixed_keyword("college", "UNIVERSITY") # doctest: +NORMALIZE_WHITESPACE
+ >>> mixed_keyword("college", "UNIVERSITY", True) # doctest: +NORMALIZE_WHITESPACE
{'A': 'C', 'B': 'A', 'C': 'I', 'D': 'P', 'E': 'U', 'F': 'Z', 'G': 'O', 'H': 'B',
'I': 'J', 'J': 'Q', 'K': 'V', 'L': 'L', 'M': 'D', 'N': 'K', 'O': 'R', 'P': 'W',
'Q': 'E', 'R': 'F', 'S': 'M', 'T': 'S', 'U': 'X', 'V': 'G', 'W': 'H', 'X': 'N',
'Y': 'T', 'Z': 'Y'}
'XKJGUFMJST'
+
+ >>> mixed_keyword("college", "UNIVERSITY", False) # doctest: +NORMALIZE_WHITESPACE
+ 'XKJGUFMJST'
"""
- key = key.upper()
- pt = pt.upper()
- temp = []
- for i in key:
- if i not in temp:
- temp.append(i)
- len_temp = len(temp)
- # print(temp)
- alpha = []
- modalpha = []
- for j in range(65, 91):
- t = chr(j)
- alpha.append(t)
- if t not in temp:
- temp.append(t)
- # print(temp)
- r = int(26 / 4)
- # print(r)
- k = 0
- for _ in range(r):
- s = []
- for j in range(len_temp):
- s.append(temp[k])
- if not (k < 25):
+ keyword = keyword.upper()
+ plaintext = plaintext.upper()
+ alphabet_set = set(alphabet)
+
+ # create a list of unique characters in the keyword - their order matters
+ # it determines how we will map plaintext characters to the ciphertext
+ unique_chars = []
+ for char in keyword:
+ if char in alphabet_set and char not in unique_chars:
+ unique_chars.append(char)
+ # the number of those unique characters will determine the number of rows
+ num_unique_chars_in_keyword = len(unique_chars)
+
+ # create a shifted version of the alphabet
+ shifted_alphabet = unique_chars + [
+ char for char in alphabet if char not in unique_chars
+ ]
+
+ # create a modified alphabet by splitting the shifted alphabet into rows
+ modified_alphabet = [
+ shifted_alphabet[k : k + num_unique_chars_in_keyword]
+ for k in range(0, 26, num_unique_chars_in_keyword)
+ ]
+
+ # map the alphabet characters to the modified alphabet characters
+ # going 'vertically' through the modified alphabet - consider columns first
+ mapping = {}
+ letter_index = 0
+ for column in range(num_unique_chars_in_keyword):
+ for row in modified_alphabet:
+ # if current row (the last one) is too short, break out of loop
+ if len(row) <= column:
break
- k += 1
- modalpha.append(s)
- # print(modalpha)
- d = {}
- j = 0
- k = 0
- for j in range(len_temp):
- for m in modalpha:
- if not (len(m) - 1 >= j):
- break
- d[alpha[k]] = m[j]
- if not k < 25:
- break
- k += 1
- print(d)
- cypher = ""
- for i in pt:
- cypher += d[i]
- return cypher
+
+ # map current letter to letter in modified alphabet
+ mapping[alphabet[letter_index]] = row[column]
+ letter_index += 1
+
+ if verbose:
+ print(mapping)
+ # create the encrypted text by mapping the plaintext to the modified alphabet
+ return "".join(mapping[char] if char in mapping else char for char in plaintext)
-print(mixed_keyword("college", "UNIVERSITY"))
+if __name__ == "__main__":
+ # example use
+ print(mixed_keyword("college", "UNIVERSITY"))
diff --git a/ciphers/morse_code.py b/ciphers/morse_code.py
new file mode 100644
index 000000000..0370c26fe
--- /dev/null
+++ b/ciphers/morse_code.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+
+"""
+Python program to translate to and from Morse code.
+
+https://en.wikipedia.org/wiki/Morse_code
+"""
+
+# fmt: off
+MORSE_CODE_DICT = {
+ "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
+ "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
+ "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
+ "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
+ "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
+ "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
+ ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", '"': ".-..-.",
+ "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
+ "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
+} # Exclamation mark is not in ITU-R recommendation
+# fmt: on
+REVERSE_DICT = {value: key for key, value in MORSE_CODE_DICT.items()}
+
+
+def encrypt(message: str) -> str:
+ """
+ >>> encrypt("Sos!")
+ '... --- ... -.-.--'
+ >>> encrypt("SOS!") == encrypt("sos!")
+ True
+ """
+ return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
+
+
+def decrypt(message: str) -> str:
+ """
+ >>> decrypt('... --- ... -.-.--')
+ 'SOS!'
+ """
+ return "".join(REVERSE_DICT[char] for char in message.split())
+
+
+def main() -> None:
+ """
+ >>> s = "".join(MORSE_CODE_DICT)
+ >>> decrypt(encrypt(s)) == s
+ True
+ """
+ message = "Morse code here!"
+ print(message)
+ message = encrypt(message)
+ print(message)
+ message = decrypt(message)
+ print(message)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ciphers/morse_code_implementation.py b/ciphers/morse_code_implementation.py
deleted file mode 100644
index eec4183fa..000000000
--- a/ciphers/morse_code_implementation.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Python program to implement Morse Code Translator
-
-# Dictionary representing the morse code chart
-MORSE_CODE_DICT = {
- "A": ".-",
- "B": "-...",
- "C": "-.-.",
- "D": "-..",
- "E": ".",
- "F": "..-.",
- "G": "--.",
- "H": "....",
- "I": "..",
- "J": ".---",
- "K": "-.-",
- "L": ".-..",
- "M": "--",
- "N": "-.",
- "O": "---",
- "P": ".--.",
- "Q": "--.-",
- "R": ".-.",
- "S": "...",
- "T": "-",
- "U": "..-",
- "V": "...-",
- "W": ".--",
- "X": "-..-",
- "Y": "-.--",
- "Z": "--..",
- "1": ".----",
- "2": "..---",
- "3": "...--",
- "4": "....-",
- "5": ".....",
- "6": "-....",
- "7": "--...",
- "8": "---..",
- "9": "----.",
- "0": "-----",
- "&": ".-...",
- "@": ".--.-.",
- ":": "---...",
- ",": "--..--",
- ".": ".-.-.-",
- "'": ".----.",
- '"': ".-..-.",
- "?": "..--..",
- "/": "-..-.",
- "=": "-...-",
- "+": ".-.-.",
- "-": "-....-",
- "(": "-.--.",
- ")": "-.--.-",
- # Exclamation mark is not in ITU-R recommendation
- "!": "-.-.--",
-}
-
-
-def encrypt(message: str) -> str:
- cipher = ""
- for letter in message:
- if letter != " ":
- cipher += MORSE_CODE_DICT[letter] + " "
- else:
- cipher += "/ "
-
- # Remove trailing space added on line 64
- return cipher[:-1]
-
-
-def decrypt(message: str) -> str:
- decipher = ""
- letters = message.split(" ")
- for letter in letters:
- if letter != "/":
- decipher += list(MORSE_CODE_DICT.keys())[
- list(MORSE_CODE_DICT.values()).index(letter)
- ]
- else:
- decipher += " "
-
- return decipher
-
-
-def main() -> None:
- message = "Morse code here"
- result = encrypt(message.upper())
- print(result)
-
- message = result
- result = decrypt(message)
- print(result)
-
-
-if __name__ == "__main__":
- main()
diff --git a/ciphers/onepad_cipher.py b/ciphers/onepad_cipher.py
index 3ace9b098..4bfe35b71 100644
--- a/ciphers/onepad_cipher.py
+++ b/ciphers/onepad_cipher.py
@@ -22,7 +22,7 @@ class Onepad:
for i in range(len(key)):
p = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(p))
- return "".join([i for i in plain])
+ return "".join(plain)
if __name__ == "__main__":
diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py
index 7c0ee5bd5..7279fb23e 100644
--- a/ciphers/playfair_cipher.py
+++ b/ciphers/playfair_cipher.py
@@ -1,6 +1,6 @@
import itertools
import string
-from typing import Generator, Iterable
+from collections.abc import Generator, Iterable
def chunker(seq: Iterable[str], size: int) -> Generator[tuple[str, ...], None, None]:
@@ -39,7 +39,6 @@ def prepare_input(dirty: str) -> str:
def generate_table(key: str) -> list[str]:
-
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
alphabet = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
diff --git a/ciphers/polybius.py b/ciphers/polybius.py
new file mode 100644
index 000000000..d83badf4a
--- /dev/null
+++ b/ciphers/polybius.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+
+"""
+A Polybius Square is a table that allows someone to translate letters into numbers.
+
+https://www.braingle.com/brainteasers/codes/polybius.php
+"""
+
+import numpy as np
+
+SQUARE = [
+ ["a", "b", "c", "d", "e"],
+ ["f", "g", "h", "i", "k"],
+ ["l", "m", "n", "o", "p"],
+ ["q", "r", "s", "t", "u"],
+ ["v", "w", "x", "y", "z"],
+]
+
+
+class PolybiusCipher:
+ def __init__(self) -> None:
+ self.SQUARE = np.array(SQUARE)
+
+ def letter_to_numbers(self, letter: str) -> np.ndarray:
+ """
+ Return the pair of numbers that represents the given letter in the
+ polybius square
+ >>> np.array_equal(PolybiusCipher().letter_to_numbers('a'), [1,1])
+ True
+
+ >>> np.array_equal(PolybiusCipher().letter_to_numbers('u'), [4,5])
+ True
+ """
+ index1, index2 = np.where(letter == self.SQUARE)
+ indexes = np.concatenate([index1 + 1, index2 + 1])
+ return indexes
+
+ def numbers_to_letter(self, index1: int, index2: int) -> str:
+ """
+ Return the letter corresponding to the position [index1, index2] in
+ the polybius square
+
+ >>> PolybiusCipher().numbers_to_letter(4, 5) == "u"
+ True
+
+ >>> PolybiusCipher().numbers_to_letter(1, 1) == "a"
+ True
+ """
+ return self.SQUARE[index1 - 1, index2 - 1]
+
+ def encode(self, message: str) -> str:
+ """
+ Return the encoded version of message according to the polybius cipher
+
+ >>> PolybiusCipher().encode("test message") == "44154344 32154343112215"
+ True
+
+ >>> PolybiusCipher().encode("Test Message") == "44154344 32154343112215"
+ True
+ """
+ message = message.lower()
+ message = message.replace("j", "i")
+
+ encoded_message = ""
+ for letter_index in range(len(message)):
+ if message[letter_index] != " ":
+ numbers = self.letter_to_numbers(message[letter_index])
+ encoded_message = encoded_message + str(numbers[0]) + str(numbers[1])
+ elif message[letter_index] == " ":
+ encoded_message = encoded_message + " "
+
+ return encoded_message
+
+ def decode(self, message: str) -> str:
+ """
+ Return the decoded version of message according to the polybius cipher
+
+ >>> PolybiusCipher().decode("44154344 32154343112215") == "test message"
+ True
+
+ >>> PolybiusCipher().decode("4415434432154343112215") == "testmessage"
+ True
+ """
+ message = message.replace(" ", " ")
+ decoded_message = ""
+ for numbers_index in range(int(len(message) / 2)):
+ if message[numbers_index * 2] != " ":
+ index1 = message[numbers_index * 2]
+ index2 = message[numbers_index * 2 + 1]
+
+ letter = self.numbers_to_letter(int(index1), int(index2))
+ decoded_message = decoded_message + letter
+ elif message[numbers_index * 2] == " ":
+ decoded_message = decoded_message + " "
+
+ return decoded_message
diff --git a/ciphers/rabin_miller.py b/ciphers/rabin_miller.py
index 65c162984..410d559d4 100644
--- a/ciphers/rabin_miller.py
+++ b/ciphers/rabin_miller.py
@@ -3,7 +3,7 @@
import random
-def rabinMiller(num: int) -> bool:
+def rabin_miller(num: int) -> bool:
s = num - 1
t = 0
@@ -11,7 +11,7 @@ def rabinMiller(num: int) -> bool:
s = s // 2
t += 1
- for trials in range(5):
+ for _ in range(5):
a = random.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1:
@@ -21,15 +21,15 @@ def rabinMiller(num: int) -> bool:
return False
else:
i = i + 1
- v = (v ** 2) % num
+ v = (v**2) % num
return True
-def isPrime(num: int) -> bool:
+def is_prime_low_num(num: int) -> bool:
if num < 2:
return False
- lowPrimes = [
+ low_primes = [
2,
3,
5,
@@ -200,24 +200,24 @@ def isPrime(num: int) -> bool:
997,
]
- if num in lowPrimes:
+ if num in low_primes:
return True
- for prime in lowPrimes:
+ for prime in low_primes:
if (num % prime) == 0:
return False
- return rabinMiller(num)
+ return rabin_miller(num)
-def generateLargePrime(keysize: int = 1024) -> int:
+def generate_large_prime(keysize: int = 1024) -> int:
while True:
num = random.randrange(2 ** (keysize - 1), 2 ** (keysize))
- if isPrime(num):
+ if is_prime_low_num(num):
return num
if __name__ == "__main__":
- num = generateLargePrime()
+ num = generate_large_prime()
print(("Prime number:", num))
- print(("isPrime:", isPrime(num)))
+ print(("is_prime_low_num:", is_prime_low_num(num)))
diff --git a/ciphers/rail_fence_cipher.py b/ciphers/rail_fence_cipher.py
index cba593ca7..47ee7db89 100644
--- a/ciphers/rail_fence_cipher.py
+++ b/ciphers/rail_fence_cipher.py
@@ -72,7 +72,7 @@ def decrypt(input_string: str, key: int) -> str:
counter = 0
for row in temp_grid: # fills in the characters
splice = input_string[counter : counter + len(row)]
- grid.append([character for character in splice])
+ grid.append(list(splice))
counter += len(row)
output_string = "" # reads as zigzag
diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py
index b1e8a73f3..9c41cdc5d 100644
--- a/ciphers/rsa_cipher.py
+++ b/ciphers/rsa_cipher.py
@@ -29,20 +29,20 @@ def get_text_from_blocks(
block_message: list[str] = []
for i in range(block_size - 1, -1, -1):
if len(message) + i < message_length:
- ascii_number = block_int // (BYTE_SIZE ** i)
- block_int = block_int % (BYTE_SIZE ** i)
+ ascii_number = block_int // (BYTE_SIZE**i)
+ block_int = block_int % (BYTE_SIZE**i)
block_message.insert(0, chr(ascii_number))
message.extend(block_message)
return "".join(message)
def encrypt_message(
- message: str, key: tuple[int, int], blockSize: int = DEFAULT_BLOCK_SIZE
+ message: str, key: tuple[int, int], block_size: int = DEFAULT_BLOCK_SIZE
) -> list[int]:
encrypted_blocks = []
n, e = key
- for block in get_blocks_from_text(message, blockSize):
+ for block in get_blocks_from_text(message, block_size):
encrypted_blocks.append(pow(block, e, n))
return encrypted_blocks
@@ -63,8 +63,8 @@ def decrypt_message(
def read_key_file(key_filename: str) -> tuple[int, int, int]:
with open(key_filename) as fo:
content = fo.read()
- key_size, n, EorD = content.split(",")
- return (int(key_size), int(n), int(EorD))
+ key_size, n, eor_d = content.split(",")
+ return (int(key_size), int(n), int(eor_d))
def encrypt_and_write_to_file(
@@ -76,10 +76,11 @@ def encrypt_and_write_to_file(
key_size, n, e = read_key_file(key_filename)
if key_size < block_size * 8:
sys.exit(
- "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher "
+ "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher "
"requires the block size to be equal to or greater than the key size. "
- "Either decrease the block size or use different keys."
- % (block_size * 8, key_size)
+ "Either decrease the block size or use different keys.".format(
+ block_size * 8, key_size
+ )
)
encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)]
@@ -101,10 +102,11 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str:
if key_size < block_size * 8:
sys.exit(
- "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher "
+ "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher "
"requires the block size to be equal to or greater than the key size. "
- "Did you specify the correct key file and encrypted file?"
- % (block_size * 8, key_size)
+ "Did you specify the correct key file and encrypted file?".format(
+ block_size * 8, key_size
+ )
)
encrypted_blocks = []
@@ -125,19 +127,19 @@ def main() -> None:
if mode == "encrypt":
if not os.path.exists("rsa_pubkey.txt"):
- rkg.makeKeyFiles("rsa", 1024)
+ rkg.make_key_files("rsa", 1024)
message = input("\nEnter message: ")
pubkey_filename = "rsa_pubkey.txt"
- print("Encrypting and writing to %s..." % (filename))
- encryptedText = encrypt_and_write_to_file(filename, pubkey_filename, message)
+ print(f"Encrypting and writing to {filename}...")
+ encrypted_text = encrypt_and_write_to_file(filename, pubkey_filename, message)
print("\nEncrypted text:")
- print(encryptedText)
+ print(encrypted_text)
elif mode == "decrypt":
privkey_filename = "rsa_privkey.txt"
- print("Reading from %s and decrypting..." % (filename))
+ print(f"Reading from {filename} and decrypting...")
decrypted_text = read_from_file_and_decrypt(filename, privkey_filename)
print("writing decryption to rsa_decryption.txt...")
with open("rsa_decryption.txt", "w") as dec:
diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py
index 6df32b6cc..9ee52777e 100644
--- a/ciphers/rsa_factorization.py
+++ b/ciphers/rsa_factorization.py
@@ -13,7 +13,7 @@ import math
import random
-def rsafactor(d: int, e: int, N: int) -> list[int]:
+def rsafactor(d: int, e: int, n: int) -> list[int]:
"""
This function returns the factors of N, where p*q=N
Return: [p, q]
@@ -35,16 +35,16 @@ def rsafactor(d: int, e: int, N: int) -> list[int]:
p = 0
q = 0
while p == 0:
- g = random.randint(2, N - 1)
+ g = random.randint(2, n - 1)
t = k
while True:
if t % 2 == 0:
t = t // 2
- x = (g ** t) % N
- y = math.gcd(x - 1, N)
+ x = (g**t) % n
+ y = math.gcd(x - 1, n)
if x > 1 and y > 1:
p = y
- q = N // y
+ q = n // y
break # find the correct factors
else:
break # t is not divisible by 2, break and choose another g
diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py
index 584066d89..2573ed013 100644
--- a/ciphers/rsa_key_generator.py
+++ b/ciphers/rsa_key_generator.py
@@ -2,57 +2,54 @@ import os
import random
import sys
-from . import cryptomath_module as cryptoMath
-from . import rabin_miller as rabinMiller
+from . import cryptomath_module as cryptoMath # noqa: N812
+from . import rabin_miller as rabinMiller # noqa: N812
def main() -> None:
print("Making key files...")
- makeKeyFiles("rsa", 1024)
+ make_key_files("rsa", 1024)
print("Key files generation successful.")
-def generateKey(keySize: int) -> tuple[tuple[int, int], tuple[int, int]]:
+def generate_key(key_size: int) -> tuple[tuple[int, int], tuple[int, int]]:
print("Generating prime p...")
- p = rabinMiller.generateLargePrime(keySize)
+ p = rabinMiller.generate_large_prime(key_size)
print("Generating prime q...")
- q = rabinMiller.generateLargePrime(keySize)
+ q = rabinMiller.generate_large_prime(key_size)
n = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)...")
while True:
- e = random.randrange(2 ** (keySize - 1), 2 ** (keySize))
+ e = random.randrange(2 ** (key_size - 1), 2 ** (key_size))
if cryptoMath.gcd(e, (p - 1) * (q - 1)) == 1:
break
print("Calculating d that is mod inverse of e...")
d = cryptoMath.find_mod_inverse(e, (p - 1) * (q - 1))
- publicKey = (n, e)
- privateKey = (n, d)
- return (publicKey, privateKey)
+ public_key = (n, e)
+ private_key = (n, d)
+ return (public_key, private_key)
-def makeKeyFiles(name: str, keySize: int) -> None:
- if os.path.exists("%s_pubkey.txt" % (name)) or os.path.exists(
- "%s_privkey.txt" % (name)
- ):
+def make_key_files(name: str, key_size: int) -> None:
+ if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"):
print("\nWARNING:")
print(
- '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n'
+ f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program."
- % (name, name)
)
sys.exit()
- publicKey, privateKey = generateKey(keySize)
- print("\nWriting public key to file %s_pubkey.txt..." % name)
- with open("%s_pubkey.txt" % name, "w") as out_file:
- out_file.write(f"{keySize},{publicKey[0]},{publicKey[1]}")
+ public_key, private_key = generate_key(key_size)
+ print(f"\nWriting public key to file {name}_pubkey.txt...")
+ with open(f"{name}_pubkey.txt", "w") as out_file:
+ out_file.write(f"{key_size},{public_key[0]},{public_key[1]}")
- print("Writing private key to file %s_privkey.txt..." % name)
- with open("%s_privkey.txt" % name, "w") as out_file:
- out_file.write(f"{keySize},{privateKey[0]},{privateKey[1]}")
+ print(f"Writing private key to file {name}_privkey.txt...")
+ with open(f"{name}_privkey.txt", "w") as out_file:
+ out_file.write(f"{key_size},{private_key[0]},{private_key[1]}")
if __name__ == "__main__":
diff --git a/ciphers/shuffled_shift_cipher.py b/ciphers/shuffled_shift_cipher.py
index 01d099641..08b2cab97 100644
--- a/ciphers/shuffled_shift_cipher.py
+++ b/ciphers/shuffled_shift_cipher.py
@@ -1,6 +1,7 @@
+from __future__ import annotations
+
import random
import string
-from typing import Optional
class ShuffledShiftCipher:
@@ -8,7 +9,7 @@ class ShuffledShiftCipher:
This algorithm uses the Caesar Cipher algorithm but removes the option to
use brute force to decrypt the message.
- The passcode is a a random password from the selection buffer of
+ The passcode is a random password from the selection buffer of
1. uppercase letters of the English alphabet
2. lowercase letters of the English alphabet
3. digits from 0 to 9
@@ -27,7 +28,7 @@ class ShuffledShiftCipher:
cip2 = ShuffledShiftCipher()
"""
- def __init__(self, passcode: Optional[str] = None) -> None:
+ def __init__(self, passcode: str | None = None) -> None:
"""
Initializes a cipher object with a passcode as it's entity
Note: No new passcode is generated if user provides a passcode
@@ -41,7 +42,7 @@ class ShuffledShiftCipher:
"""
:return: passcode of the cipher object
"""
- return "Passcode is: " + "".join(self.__passcode)
+ return "".join(self.__passcode)
def __neg_pos(self, iterlist: list[int]) -> list[int]:
"""
diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py
index 447bacfc2..1635471ae 100644
--- a/ciphers/simple_keyword_cypher.py
+++ b/ciphers/simple_keyword_cypher.py
@@ -21,7 +21,7 @@ def create_cipher_map(key: str) -> dict[str, str]:
:param key: keyword to use
:return: dictionary cipher map
"""
- # Create alphabet list
+ # Create a list of the letters in the alphabet
alphabet = [chr(i + 65) for i in range(26)]
# Remove duplicate characters from key
key = remove_duplicates(key.upper())
diff --git a/ciphers/simple_substitution_cipher.py b/ciphers/simple_substitution_cipher.py
index a763bd6b6..291a9bccd 100644
--- a/ciphers/simple_substitution_cipher.py
+++ b/ciphers/simple_substitution_cipher.py
@@ -9,66 +9,66 @@ def main() -> None:
key = "LFWOAYUISVKMNXPBDCRJTQEGHZ"
resp = input("Encrypt/Decrypt [e/d]: ")
- checkValidKey(key)
+ check_valid_key(key)
if resp.lower().startswith("e"):
mode = "encrypt"
- translated = encryptMessage(key, message)
+ translated = encrypt_message(key, message)
elif resp.lower().startswith("d"):
mode = "decrypt"
- translated = decryptMessage(key, message)
+ translated = decrypt_message(key, message)
print(f"\n{mode.title()}ion: \n{translated}")
-def checkValidKey(key: str) -> None:
- keyList = list(key)
- lettersList = list(LETTERS)
- keyList.sort()
- lettersList.sort()
+def check_valid_key(key: str) -> None:
+ key_list = list(key)
+ letters_list = list(LETTERS)
+ key_list.sort()
+ letters_list.sort()
- if keyList != lettersList:
+ if key_list != letters_list:
sys.exit("Error in the key or symbol set.")
-def encryptMessage(key: str, message: str) -> str:
+def encrypt_message(key: str, message: str) -> str:
"""
- >>> encryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji')
+ >>> encrypt_message('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji')
'Ilcrism Olcvs'
"""
- return translateMessage(key, message, "encrypt")
+ return translate_message(key, message, "encrypt")
-def decryptMessage(key: str, message: str) -> str:
+def decrypt_message(key: str, message: str) -> str:
"""
- >>> decryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs')
+ >>> decrypt_message('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs')
'Harshil Darji'
"""
- return translateMessage(key, message, "decrypt")
+ return translate_message(key, message, "decrypt")
-def translateMessage(key: str, message: str, mode: str) -> str:
+def translate_message(key: str, message: str, mode: str) -> str:
translated = ""
- charsA = LETTERS
- charsB = key
+ chars_a = LETTERS
+ chars_b = key
if mode == "decrypt":
- charsA, charsB = charsB, charsA
+ chars_a, chars_b = chars_b, chars_a
for symbol in message:
- if symbol.upper() in charsA:
- symIndex = charsA.find(symbol.upper())
+ if symbol.upper() in chars_a:
+ sym_index = chars_a.find(symbol.upper())
if symbol.isupper():
- translated += charsB[symIndex].upper()
+ translated += chars_b[sym_index].upper()
else:
- translated += charsB[symIndex].lower()
+ translated += chars_b[sym_index].lower()
else:
translated += symbol
return translated
-def getRandomKey() -> str:
+def get_random_key() -> str:
key = list(LETTERS)
random.shuffle(key)
return "".join(key)
diff --git a/ciphers/trafid_cipher.py b/ciphers/trafid_cipher.py
index 1c8ea3024..108ac652f 100644
--- a/ciphers/trafid_cipher.py
+++ b/ciphers/trafid_cipher.py
@@ -1,12 +1,13 @@
# https://en.wikipedia.org/wiki/Trifid_cipher
+from __future__ import annotations
-def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str:
+def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str:
one, two, three = "", "", ""
tmp = []
- for character in messagePart:
- tmp.append(character2Number[character])
+ for character in message_part:
+ tmp.append(character_to_number[character])
for each in tmp:
one += each[0]
@@ -16,18 +17,18 @@ def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str:
return one + two + three
-def __decryptPart(
- messagePart: str, character2Number: dict[str, str]
+def __decrypt_part(
+ message_part: str, character_to_number: dict[str, str]
) -> tuple[str, str, str]:
- tmp, thisPart = "", ""
+ tmp, this_part = "", ""
result = []
- for character in messagePart:
- thisPart += character2Number[character]
+ for character in message_part:
+ this_part += character_to_number[character]
- for digit in thisPart:
+ for digit in this_part:
tmp += digit
- if len(tmp) == len(messagePart):
+ if len(tmp) == len(message_part):
result.append(tmp)
tmp = ""
@@ -78,51 +79,57 @@ def __prepare(
"332",
"333",
)
- character2Number = {}
- number2Character = {}
+ character_to_number = {}
+ number_to_character = {}
for letter, number in zip(alphabet, numbers):
- character2Number[letter] = number
- number2Character[number] = letter
+ character_to_number[letter] = number
+ number_to_character[number] = letter
- return message, alphabet, character2Number, number2Character
+ return message, alphabet, character_to_number, number_to_character
-def encryptMessage(
+def encrypt_message(
message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5
) -> str:
- message, alphabet, character2Number, number2Character = __prepare(message, alphabet)
+ message, alphabet, character_to_number, number_to_character = __prepare(
+ message, alphabet
+ )
encrypted, encrypted_numeric = "", ""
for i in range(0, len(message) + 1, period):
- encrypted_numeric += __encryptPart(message[i : i + period], character2Number)
+ encrypted_numeric += __encrypt_part(
+ message[i : i + period], character_to_number
+ )
for i in range(0, len(encrypted_numeric), 3):
- encrypted += number2Character[encrypted_numeric[i : i + 3]]
+ encrypted += number_to_character[encrypted_numeric[i : i + 3]]
return encrypted
-def decryptMessage(
+def decrypt_message(
message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5
) -> str:
- message, alphabet, character2Number, number2Character = __prepare(message, alphabet)
+ message, alphabet, character_to_number, number_to_character = __prepare(
+ message, alphabet
+ )
decrypted_numeric = []
decrypted = ""
for i in range(0, len(message) + 1, period):
- a, b, c = __decryptPart(message[i : i + period], character2Number)
+ a, b, c = __decrypt_part(message[i : i + period], character_to_number)
for j in range(0, len(a)):
decrypted_numeric.append(a[j] + b[j] + c[j])
for each in decrypted_numeric:
- decrypted += number2Character[each]
+ decrypted += number_to_character[each]
return decrypted
if __name__ == "__main__":
msg = "DEFEND THE EAST WALL OF THE CASTLE."
- encrypted = encryptMessage(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
- decrypted = decryptMessage(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
+ encrypted = encrypt_message(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
+ decrypted = decrypt_message(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
print(f"Encrypted: {encrypted}\nDecrypted: {decrypted}")
diff --git a/ciphers/transposition_cipher.py b/ciphers/transposition_cipher.py
index 589bb8cb5..f1f07ddc3 100644
--- a/ciphers/transposition_cipher.py
+++ b/ciphers/transposition_cipher.py
@@ -10,57 +10,57 @@ text. The type of transposition cipher demonstrated under is the ROUTE cipher.
def main() -> None:
message = input("Enter message: ")
- key = int(input("Enter key [2-%s]: " % (len(message) - 1)))
+ key = int(input(f"Enter key [2-{len(message) - 1}]: "))
mode = input("Encryption/Decryption [e/d]: ")
if mode.lower().startswith("e"):
- text = encryptMessage(key, message)
+ text = encrypt_message(key, message)
elif mode.lower().startswith("d"):
- text = decryptMessage(key, message)
+ text = decrypt_message(key, message)
# Append pipe symbol (vertical bar) to identify spaces at the end.
- print("Output:\n%s" % (text + "|"))
+ print(f"Output:\n{text + '|'}")
-def encryptMessage(key: int, message: str) -> str:
+def encrypt_message(key: int, message: str) -> str:
"""
- >>> encryptMessage(6, 'Harshil Darji')
+ >>> encrypt_message(6, 'Harshil Darji')
'Hlia rDsahrij'
"""
- cipherText = [""] * key
+ cipher_text = [""] * key
for col in range(key):
pointer = col
while pointer < len(message):
- cipherText[col] += message[pointer]
+ cipher_text[col] += message[pointer]
pointer += key
- return "".join(cipherText)
+ return "".join(cipher_text)
-def decryptMessage(key: int, message: str) -> str:
+def decrypt_message(key: int, message: str) -> str:
"""
- >>> decryptMessage(6, 'Hlia rDsahrij')
+ >>> decrypt_message(6, 'Hlia rDsahrij')
'Harshil Darji'
"""
- numCols = math.ceil(len(message) / key)
- numRows = key
- numShadedBoxes = (numCols * numRows) - len(message)
- plainText = [""] * numCols
+ num_cols = math.ceil(len(message) / key)
+ num_rows = key
+ num_shaded_boxes = (num_cols * num_rows) - len(message)
+ plain_text = [""] * num_cols
col = 0
row = 0
for symbol in message:
- plainText[col] += symbol
+ plain_text[col] += symbol
col += 1
if (
- (col == numCols)
- or (col == numCols - 1)
- and (row >= numRows - numShadedBoxes)
+ (col == num_cols)
+ or (col == num_cols - 1)
+ and (row >= num_rows - num_shaded_boxes)
):
col = 0
row += 1
- return "".join(plainText)
+ return "".join(plain_text)
if __name__ == "__main__":
diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py
index b91c73c9f..6296b1e6d 100644
--- a/ciphers/transposition_cipher_encrypt_decrypt_file.py
+++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py
@@ -2,39 +2,39 @@ import os
import sys
import time
-from . import transposition_cipher as transCipher
+from . import transposition_cipher as trans_cipher
def main() -> None:
- inputFile = "Prehistoric Men.txt"
- outputFile = "Output.txt"
+ input_file = "Prehistoric Men.txt"
+ output_file = "Output.txt"
key = int(input("Enter key: "))
mode = input("Encrypt/Decrypt [e/d]: ")
- if not os.path.exists(inputFile):
- print("File %s does not exist. Quitting..." % inputFile)
+ if not os.path.exists(input_file):
+ print(f"File {input_file} does not exist. Quitting...")
sys.exit()
- if os.path.exists(outputFile):
- print("Overwrite %s? [y/n]" % outputFile)
+ if os.path.exists(output_file):
+ print(f"Overwrite {output_file}? [y/n]")
response = input("> ")
if not response.lower().startswith("y"):
sys.exit()
- startTime = time.time()
+ start_time = time.time()
if mode.lower().startswith("e"):
- with open(inputFile) as f:
+ with open(input_file) as f:
content = f.read()
- translated = transCipher.encryptMessage(key, content)
+ translated = trans_cipher.encrypt_message(key, content)
elif mode.lower().startswith("d"):
- with open(outputFile) as f:
+ with open(output_file) as f:
content = f.read()
- translated = transCipher.decryptMessage(key, content)
+ translated = trans_cipher.decrypt_message(key, content)
- with open(outputFile, "w") as outputObj:
- outputObj.write(translated)
+ with open(output_file, "w") as output_obj:
+ output_obj.write(translated)
- totalTime = round(time.time() - startTime, 2)
- print(("Done (", totalTime, "seconds )"))
+ total_time = round(time.time() - start_time, 2)
+ print(("Done (", total_time, "seconds )"))
if __name__ == "__main__":
diff --git a/ciphers/vigenere_cipher.py b/ciphers/vigenere_cipher.py
index d97a96949..e76161351 100644
--- a/ciphers/vigenere_cipher.py
+++ b/ciphers/vigenere_cipher.py
@@ -8,43 +8,43 @@ def main() -> None:
if mode.lower().startswith("e"):
mode = "encrypt"
- translated = encryptMessage(key, message)
+ translated = encrypt_message(key, message)
elif mode.lower().startswith("d"):
mode = "decrypt"
- translated = decryptMessage(key, message)
+ translated = decrypt_message(key, message)
- print("\n%sed message:" % mode.title())
+ print(f"\n{mode.title()}ed message:")
print(translated)
-def encryptMessage(key: str, message: str) -> str:
+def encrypt_message(key: str, message: str) -> str:
"""
- >>> encryptMessage('HDarji', 'This is Harshil Darji from Dharmaj.')
+ >>> encrypt_message('HDarji', 'This is Harshil Darji from Dharmaj.')
'Akij ra Odrjqqs Gaisq muod Mphumrs.'
"""
- return translateMessage(key, message, "encrypt")
+ return translate_message(key, message, "encrypt")
-def decryptMessage(key: str, message: str) -> str:
+def decrypt_message(key: str, message: str) -> str:
"""
- >>> decryptMessage('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.')
+ >>> decrypt_message('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.')
'This is Harshil Darji from Dharmaj.'
"""
- return translateMessage(key, message, "decrypt")
+ return translate_message(key, message, "decrypt")
-def translateMessage(key: str, message: str, mode: str) -> str:
+def translate_message(key: str, message: str, mode: str) -> str:
translated = []
- keyIndex = 0
+ key_index = 0
key = key.upper()
for symbol in message:
num = LETTERS.find(symbol.upper())
if num != -1:
if mode == "encrypt":
- num += LETTERS.find(key[keyIndex])
+ num += LETTERS.find(key[key_index])
elif mode == "decrypt":
- num -= LETTERS.find(key[keyIndex])
+ num -= LETTERS.find(key[key_index])
num %= len(LETTERS)
@@ -53,9 +53,9 @@ def translateMessage(key: str, message: str, mode: str) -> str:
elif symbol.islower():
translated.append(LETTERS[num].lower())
- keyIndex += 1
- if keyIndex == len(key):
- keyIndex = 0
+ key_index += 1
+ if key_index == len(key):
+ key_index = 0
else:
translated.append(symbol)
return "".join(translated)
diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py
index 12d580e72..0f369e38f 100644
--- a/ciphers/xor_cipher.py
+++ b/ciphers/xor_cipher.py
@@ -16,6 +16,7 @@
- encrypt_file : boolean
- decrypt_file : boolean
"""
+from __future__ import annotations
class XORCipher:
@@ -41,17 +42,10 @@ class XORCipher:
key = key or self.__key or 1
- # make sure key can be any size
- while key > 255:
- key -= 255
+ # make sure key is an appropriate size
+ key %= 255
- # This will be returned
- ans = []
-
- for ch in content:
- ans.append(chr(ord(ch) ^ key))
-
- return ans
+ return [chr(ord(ch) ^ key) for ch in content]
def decrypt(self, content: str, key: int) -> list[str]:
"""
@@ -66,17 +60,10 @@ class XORCipher:
key = key or self.__key or 1
- # make sure key can be any size
- while key > 255:
- key -= 255
+ # make sure key is an appropriate size
+ key %= 255
- # This will be returned
- ans = []
-
- for ch in content:
- ans.append(chr(ord(ch) ^ key))
-
- return ans
+ return [chr(ord(ch) ^ key) for ch in content]
def encrypt_string(self, content: str, key: int = 0) -> str:
"""
@@ -141,12 +128,10 @@ class XORCipher:
assert isinstance(file, str) and isinstance(key, int)
try:
- with open(file) as fin:
- with open("encrypt.out", "w+") as fout:
-
- # actual encrypt-process
- for line in fin:
- fout.write(self.encrypt_string(line, key))
+ with open(file) as fin, open("encrypt.out", "w+") as fout:
+ # actual encrypt-process
+ for line in fin:
+ fout.write(self.encrypt_string(line, key))
except OSError:
return False
@@ -166,12 +151,10 @@ class XORCipher:
assert isinstance(file, str) and isinstance(key, int)
try:
- with open(file) as fin:
- with open("decrypt.out", "w+") as fout:
-
- # actual encrypt-process
- for line in fin:
- fout.write(self.decrypt_string(line, key))
+ with open(file) as fin, open("decrypt.out", "w+") as fout:
+ # actual encrypt-process
+ for line in fin:
+ fout.write(self.decrypt_string(line, key))
except OSError:
return False
diff --git a/compression/README.md b/compression/README.md
new file mode 100644
index 000000000..cf54ea986
--- /dev/null
+++ b/compression/README.md
@@ -0,0 +1,10 @@
+# Compression
+
+Data compression is everywhere, you need it to store data without taking too much space.
+Either the compression lose some data (then we talk about lossy compression, such as .jpg) or it does not (and then it is lossless compression, such as .png)
+
+Lossless compression is mainly used for archive purpose as it allow storing data without losing information about the file archived. On the other hand, lossy compression is used for transfer of file where quality isn't necessarily what is required (i.e: images on Twitter).
+
+*
+*
+*
diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py
index 7d705af74..52bb045d9 100644
--- a/compression/burrows_wheeler.py
+++ b/compression/burrows_wheeler.py
@@ -12,6 +12,13 @@ of text compression algorithms, costing only some extra computation.
"""
from __future__ import annotations
+from typing import TypedDict
+
+
+class BWTTransformDict(TypedDict):
+ bwt_string: str
+ idx_original_string: int
+
def all_rotations(s: str) -> list[str]:
"""
@@ -43,7 +50,7 @@ def all_rotations(s: str) -> list[str]:
return [s[i:] + s[:i] for i in range(len(s))]
-def bwt_transform(s: str) -> dict:
+def bwt_transform(s: str) -> BWTTransformDict:
"""
:param s: The string that will be used at bwt algorithm
:return: the string composed of the last char of each row of the ordered
@@ -75,10 +82,11 @@ def bwt_transform(s: str) -> dict:
rotations = all_rotations(s)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
- return {
+ response: BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(s),
}
+ return response
def reverse_bwt(bwt_string: str, idx_original_string: int) -> str:
@@ -142,11 +150,11 @@ def reverse_bwt(bwt_string: str, idx_original_string: int) -> str:
raise ValueError("The parameter idx_original_string must not be lower than 0.")
if idx_original_string >= len(bwt_string):
raise ValueError(
- "The parameter idx_original_string must be lower than" " len(bwt_string)."
+ "The parameter idx_original_string must be lower than len(bwt_string)."
)
ordered_rotations = [""] * len(bwt_string)
- for x in range(len(bwt_string)):
+ for _ in range(len(bwt_string)):
for i in range(len(bwt_string)):
ordered_rotations[i] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
diff --git a/compression/huffman.py b/compression/huffman.py
index b6cc4de1e..65e5c2f25 100644
--- a/compression/huffman.py
+++ b/compression/huffman.py
@@ -1,68 +1,72 @@
+from __future__ import annotations
+
import sys
class Letter:
- def __init__(self, letter, freq):
- self.letter = letter
- self.freq = freq
- self.bitstring = {}
+ def __init__(self, letter: str, freq: int):
+ self.letter: str = letter
+ self.freq: int = freq
+ self.bitstring: dict[str, str] = {}
- def __repr__(self):
+ def __repr__(self) -> str:
return f"{self.letter}:{self.freq}"
class TreeNode:
- def __init__(self, freq, left, right):
- self.freq = freq
- self.left = left
- self.right = right
+ def __init__(self, freq: int, left: Letter | TreeNode, right: Letter | TreeNode):
+ self.freq: int = freq
+ self.left: Letter | TreeNode = left
+ self.right: Letter | TreeNode = right
-def parse_file(file_path):
+def parse_file(file_path: str) -> list[Letter]:
"""
Read the file and build a dict of all letters and their
frequencies, then convert the dict into a list of Letters.
"""
- chars = {}
+ chars: dict[str, int] = {}
with open(file_path) as f:
while True:
c = f.read(1)
if not c:
break
- chars[c] = chars[c] + 1 if c in chars.keys() else 1
- return sorted([Letter(c, f) for c, f in chars.items()], key=lambda l: l.freq)
+ chars[c] = chars[c] + 1 if c in chars else 1
+ return sorted((Letter(c, f) for c, f in chars.items()), key=lambda x: x.freq)
-def build_tree(letters):
+def build_tree(letters: list[Letter]) -> Letter | TreeNode:
"""
Run through the list of Letters and build the min heap
for the Huffman Tree.
"""
- while len(letters) > 1:
- left = letters.pop(0)
- right = letters.pop(0)
+ response: list[Letter | TreeNode] = letters # type: ignore
+ while len(response) > 1:
+ left = response.pop(0)
+ right = response.pop(0)
total_freq = left.freq + right.freq
node = TreeNode(total_freq, left, right)
- letters.append(node)
- letters.sort(key=lambda l: l.freq)
- return letters[0]
+ response.append(node)
+ response.sort(key=lambda x: x.freq)
+ return response[0]
-def traverse_tree(root, bitstring):
+def traverse_tree(root: Letter | TreeNode, bitstring: str) -> list[Letter]:
"""
Recursively traverse the Huffman Tree to set each
Letter's bitstring dictionary, and return the list of Letters
"""
- if type(root) is Letter:
+ if isinstance(root, Letter):
root.bitstring[root.letter] = bitstring
return [root]
+ treenode: TreeNode = root # type: ignore
letters = []
- letters += traverse_tree(root.left, bitstring + "0")
- letters += traverse_tree(root.right, bitstring + "1")
+ letters += traverse_tree(treenode.left, bitstring + "0")
+ letters += traverse_tree(treenode.right, bitstring + "1")
return letters
-def huffman(file_path):
+def huffman(file_path: str) -> None:
"""
Parse the file, build the tree, then run through the file
again, using the letters dictionary to find and print out the
diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py
index 6743dc42d..ea6f33944 100644
--- a/compression/lempel_ziv.py
+++ b/compression/lempel_ziv.py
@@ -26,7 +26,7 @@ def read_file_binary(file_path: str) -> str:
def add_key_to_lexicon(
- lexicon: dict, curr_string: str, index: int, last_match_id: str
+ lexicon: dict[str, str], curr_string: str, index: int, last_match_id: str
) -> None:
"""
Adds new strings (curr_string + "0", curr_string + "1") to the lexicon
@@ -110,7 +110,7 @@ def write_file_binary(file_path: str, to_write: str) -> None:
sys.exit()
-def compress(source_path, destination_path: str) -> None:
+def compress(source_path: str, destination_path: str) -> None:
"""
Reads source file, compresses it and writes the compressed result in destination
file
diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py
index 4d3c2c0d2..ddedc3d6d 100644
--- a/compression/lempel_ziv_decompress.py
+++ b/compression/lempel_ziv_decompress.py
@@ -43,10 +43,10 @@ def decompress_data(data_bits: str) -> str:
lexicon[curr_string] = last_match_id + "0"
if math.log2(index).is_integer():
- newLex = {}
+ new_lex = {}
for curr_key in list(lexicon):
- newLex["0" + curr_key] = lexicon.pop(curr_key)
- lexicon = newLex
+ new_lex["0" + curr_key] = lexicon.pop(curr_key)
+ lexicon = new_lex
lexicon[bin(index)[2:]] = last_match_id + "1"
index += 1
diff --git a/compression/lz77.py b/compression/lz77.py
new file mode 100644
index 000000000..1b201c59f
--- /dev/null
+++ b/compression/lz77.py
@@ -0,0 +1,226 @@
+"""
+LZ77 compression algorithm
+- lossless data compression published in papers by Abraham Lempel and Jacob Ziv in 1977
+- also known as LZ1 or sliding-window compression
+- form the basis for many variations including LZW, LZSS, LZMA and others
+
+It uses a “sliding window” method. Within the sliding window we have:
+ - search buffer
+ - look ahead buffer
+len(sliding_window) = len(search_buffer) + len(look_ahead_buffer)
+
+LZ77 manages a dictionary that uses triples composed of:
+ - Offset into search buffer, it's the distance between the start of a phrase and
+ the beginning of a file.
+ - Length of the match, it's the number of characters that make up a phrase.
+ - The indicator is represented by a character that is going to be encoded next.
+
+As a file is parsed, the dictionary is dynamically updated to reflect the compressed
+data contents and size.
+
+Examples:
+"cabracadabrarrarrad" <-> [(0, 0, 'c'), (0, 0, 'a'), (0, 0, 'b'), (0, 0, 'r'),
+ (3, 1, 'c'), (2, 1, 'd'), (7, 4, 'r'), (3, 5, 'd')]
+"ababcbababaa" <-> [(0, 0, 'a'), (0, 0, 'b'), (2, 2, 'c'), (4, 3, 'a'), (2, 2, 'a')]
+"aacaacabcabaaac" <-> [(0, 0, 'a'), (1, 1, 'c'), (3, 4, 'b'), (3, 3, 'a'), (1, 2, 'c')]
+
+Sources:
+en.wikipedia.org/wiki/LZ77_and_LZ78
+"""
+
+
+from dataclasses import dataclass
+
+__version__ = "0.1"
+__author__ = "Lucia Harcekova"
+
+
+@dataclass
+class Token:
+ """
+ Dataclass representing triplet called token consisting of length, offset
+ and indicator. This triplet is used during LZ77 compression.
+ """
+
+ offset: int
+ length: int
+ indicator: str
+
+ def __repr__(self) -> str:
+ """
+ >>> token = Token(1, 2, "c")
+ >>> repr(token)
+ '(1, 2, c)'
+ >>> str(token)
+ '(1, 2, c)'
+ """
+ return f"({self.offset}, {self.length}, {self.indicator})"
+
+
+class LZ77Compressor:
+ """
+ Class containing compress and decompress methods using LZ77 compression algorithm.
+ """
+
+ def __init__(self, window_size: int = 13, lookahead_buffer_size: int = 6) -> None:
+ self.window_size = window_size
+ self.lookahead_buffer_size = lookahead_buffer_size
+ self.search_buffer_size = self.window_size - self.lookahead_buffer_size
+
+ def compress(self, text: str) -> list[Token]:
+ """
+ Compress the given string text using LZ77 compression algorithm.
+
+ Args:
+ text: string to be compressed
+
+ Returns:
+ output: the compressed text as a list of Tokens
+
+ >>> lz77_compressor = LZ77Compressor()
+ >>> str(lz77_compressor.compress("ababcbababaa"))
+ '[(0, 0, a), (0, 0, b), (2, 2, c), (4, 3, a), (2, 2, a)]'
+ >>> str(lz77_compressor.compress("aacaacabcabaaac"))
+ '[(0, 0, a), (1, 1, c), (3, 4, b), (3, 3, a), (1, 2, c)]'
+ """
+
+ output = []
+ search_buffer = ""
+
+ # while there are still characters in text to compress
+ while text:
+ # find the next encoding phrase
+ # - triplet with offset, length, indicator (the next encoding character)
+ token = self._find_encoding_token(text, search_buffer)
+
+ # update the search buffer:
+ # - add new characters from text into it
+ # - check if size exceed the max search buffer size, if so, drop the
+ # oldest elements
+ search_buffer += text[: token.length + 1]
+ if len(search_buffer) > self.search_buffer_size:
+ search_buffer = search_buffer[-self.search_buffer_size :]
+
+ # update the text
+ text = text[token.length + 1 :]
+
+ # append the token to output
+ output.append(token)
+
+ return output
+
+ def decompress(self, tokens: list[Token]) -> str:
+ """
+ Convert the list of tokens into an output string.
+
+ Args:
+ tokens: list containing triplets (offset, length, char)
+
+ Returns:
+ output: decompressed text
+
+ Tests:
+ >>> lz77_compressor = LZ77Compressor()
+ >>> lz77_compressor.decompress([Token(0, 0, 'c'), Token(0, 0, 'a'),
+ ... Token(0, 0, 'b'), Token(0, 0, 'r'), Token(3, 1, 'c'),
+ ... Token(2, 1, 'd'), Token(7, 4, 'r'), Token(3, 5, 'd')])
+ 'cabracadabrarrarrad'
+ >>> lz77_compressor.decompress([Token(0, 0, 'a'), Token(0, 0, 'b'),
+ ... Token(2, 2, 'c'), Token(4, 3, 'a'), Token(2, 2, 'a')])
+ 'ababcbababaa'
+ >>> lz77_compressor.decompress([Token(0, 0, 'a'), Token(1, 1, 'c'),
+ ... Token(3, 4, 'b'), Token(3, 3, 'a'), Token(1, 2, 'c')])
+ 'aacaacabcabaaac'
+ """
+
+ output = ""
+
+ for token in tokens:
+ for _ in range(token.length):
+ output += output[-token.offset]
+ output += token.indicator
+
+ return output
+
+ def _find_encoding_token(self, text: str, search_buffer: str) -> Token:
+ """Finds the encoding token for the first character in the text.
+
+ Tests:
+ >>> lz77_compressor = LZ77Compressor()
+ >>> lz77_compressor._find_encoding_token("abrarrarrad", "abracad").offset
+ 7
+ >>> lz77_compressor._find_encoding_token("adabrarrarrad", "cabrac").length
+ 1
+ >>> lz77_compressor._find_encoding_token("abc", "xyz").offset
+ 0
+ >>> lz77_compressor._find_encoding_token("", "xyz").offset
+ Traceback (most recent call last):
+ ...
+ ValueError: We need some text to work with.
+ >>> lz77_compressor._find_encoding_token("abc", "").offset
+ 0
+ """
+
+ if not text:
+ raise ValueError("We need some text to work with.")
+
+ # Initialise result parameters to default values
+ length, offset = 0, 0
+
+ if not search_buffer:
+ return Token(offset, length, text[length])
+
+ for i, character in enumerate(search_buffer):
+ found_offset = len(search_buffer) - i
+ if character == text[0]:
+ found_length = self._match_length_from_index(text, search_buffer, 0, i)
+ # if the found length is bigger than the current or if it's equal,
+ # which means it's offset is smaller: update offset and length
+ if found_length >= length:
+ offset, length = found_offset, found_length
+
+ return Token(offset, length, text[length])
+
+ def _match_length_from_index(
+ self, text: str, window: str, text_index: int, window_index: int
+ ) -> int:
+ """Calculate the longest possible match of text and window characters from
+ text_index in text and window_index in window.
+
+ Args:
+ text: _description_
+ window: sliding window
+ text_index: index of character in text
+ window_index: index of character in sliding window
+
+ Returns:
+ The maximum match between text and window, from given indexes.
+
+ Tests:
+ >>> lz77_compressor = LZ77Compressor(13, 6)
+ >>> lz77_compressor._match_length_from_index("rarrad", "adabrar", 0, 4)
+ 5
+ >>> lz77_compressor._match_length_from_index("adabrarrarrad",
+ ... "cabrac", 0, 1)
+ 1
+ """
+ if not text or text[text_index] != window[window_index]:
+ return 0
+ return 1 + self._match_length_from_index(
+ text, window + text[text_index], text_index + 1, window_index + 1
+ )
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
+ # Initialize compressor class
+ lz77_compressor = LZ77Compressor(window_size=13, lookahead_buffer_size=6)
+
+ # Example
+ TEXT = "cabracadabrarrarrad"
+ compressed_text = lz77_compressor.compress(TEXT)
+ print(lz77_compressor.compress("ababcbababaa"))
+ decompressed_text = lz77_compressor.decompress(compressed_text)
+ assert decompressed_text == TEXT, "The LZ77 algorithm returned the invalid result."
diff --git a/compression/peak_signal_to_noise_ratio.py b/compression/peak_signal_to_noise_ratio.py
index 6c6c4c38a..284f2904a 100644
--- a/compression/peak_signal_to_noise_ratio.py
+++ b/compression/peak_signal_to_noise_ratio.py
@@ -11,17 +11,18 @@ import os
import cv2
import numpy as np
+PIXEL_MAX = 255.0
-def psnr(original, contrast):
+
+def peak_signal_to_noise_ratio(original: float, contrast: float) -> float:
mse = np.mean((original - contrast) ** 2)
if mse == 0:
return 100
- PIXEL_MAX = 255.0
- PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
- return PSNR
+
+ return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
-def main():
+def main() -> None:
dir_path = os.path.dirname(os.path.realpath(__file__))
# Loading images (original image and compressed image)
original = cv2.imread(os.path.join(dir_path, "image_data/original_image.png"))
@@ -34,11 +35,11 @@ def main():
# Value expected: 29.73dB
print("-- First Test --")
- print(f"PSNR value is {psnr(original, contrast)} dB")
+ print(f"PSNR value is {peak_signal_to_noise_ratio(original, contrast)} dB")
# # Value expected: 31.53dB (Wikipedia Example)
print("\n-- Second Test --")
- print(f"PSNR value is {psnr(original2, contrast2)} dB")
+ print(f"PSNR value is {peak_signal_to_noise_ratio(original2, contrast2)} dB")
if __name__ == "__main__":
diff --git a/compression/run_length_encoding.py b/compression/run_length_encoding.py
new file mode 100644
index 000000000..691e19095
--- /dev/null
+++ b/compression/run_length_encoding.py
@@ -0,0 +1,48 @@
+# https://en.wikipedia.org/wiki/Run-length_encoding
+
+
+def run_length_encode(text: str) -> list:
+ """
+ Performs Run Length Encoding
+ >>> run_length_encode("AAAABBBCCDAA")
+ [('A', 4), ('B', 3), ('C', 2), ('D', 1), ('A', 2)]
+ >>> run_length_encode("A")
+ [('A', 1)]
+ >>> run_length_encode("AA")
+ [('A', 2)]
+ >>> run_length_encode("AAADDDDDDFFFCCCAAVVVV")
+ [('A', 3), ('D', 6), ('F', 3), ('C', 3), ('A', 2), ('V', 4)]
+ """
+ encoded = []
+ count = 1
+
+ for i in range(len(text)):
+ if i + 1 < len(text) and text[i] == text[i + 1]:
+ count += 1
+ else:
+ encoded.append((text[i], count))
+ count = 1
+
+ return encoded
+
+
+def run_length_decode(encoded: list) -> str:
+ """
+ Performs Run Length Decoding
+ >>> run_length_decode([('A', 4), ('B', 3), ('C', 2), ('D', 1), ('A', 2)])
+ 'AAAABBBCCDAA'
+ >>> run_length_decode([('A', 1)])
+ 'A'
+ >>> run_length_decode([('A', 2)])
+ 'AA'
+ >>> run_length_decode([('A', 3), ('D', 6), ('F', 3), ('C', 3), ('A', 2), ('V', 4)])
+ 'AAADDDDDDFFFCCCAAVVVV'
+ """
+ return "".join(char * length for char, length in encoded)
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod(name="run_length_encode", verbose=True)
+ testmod(name="run_length_decode", verbose=True)
diff --git a/computer_vision/README.md b/computer_vision/README.md
index 94ee49308..8d2f4a130 100644
--- a/computer_vision/README.md
+++ b/computer_vision/README.md
@@ -1,7 +1,11 @@
-### Computer Vision
+# Computer Vision
+
+Computer vision is a field of computer science that works on enabling computers to see, identify and process images in the same way that human does, and provide appropriate output.
-Computer vision is a field of computer science that works on enabling computers to see,
-identify and process images in the same way that human vision does, and then provide appropriate output.
It is like imparting human intelligence and instincts to a computer.
Image processing and computer vision are a little different from each other. Image processing means applying some algorithms for transforming image from one form to the other like smoothing, contrasting, stretching, etc.
+
While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision).
+
+*
+*
diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py
index 6d4f19639..9b5f8c95e 100644
--- a/computer_vision/cnn_classification.py
+++ b/computer_vision/cnn_classification.py
@@ -28,11 +28,13 @@ import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
-
# Initialising the CNN
+ # (Sequential- Building the model layer by layer)
classifier = models.Sequential()
# Step 1 - Convolution
+ # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
+ # (3,3) is the kernel size (filter matrix)
classifier.add(
layers.Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
@@ -91,7 +93,7 @@ if __name__ == "__main__":
test_image = tf.keras.preprocessing.image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = classifier.predict(test_image)
- training_set.class_indices
+ # training_set.class_indices
if result[0][0] == 0:
prediction = "Normal"
if result[0][0] == 1:
diff --git a/computer_vision/flip_augmentation.py b/computer_vision/flip_augmentation.py
new file mode 100644
index 000000000..93b4e3f6d
--- /dev/null
+++ b/computer_vision/flip_augmentation.py
@@ -0,0 +1,128 @@
+import glob
+import os
+import random
+from string import ascii_lowercase, digits
+
+import cv2
+
+"""
+Flip image and bounding box for computer vision task
+https://paperswithcode.com/method/randomhorizontalflip
+"""
+
+# Params
+LABEL_DIR = ""
+IMAGE_DIR = ""
+OUTPUT_DIR = ""
+FLIP_TYPE = 1 # (0 is vertical, 1 is horizontal)
+
+
+def main() -> None:
+ """
+ Get images list and annotations list from input dir.
+ Update new images and annotations.
+ Save images and annotations in output dir.
+ """
+ img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR)
+ print("Processing...")
+ new_images, new_annos, paths = update_image_and_anno(img_paths, annos, FLIP_TYPE)
+
+ for index, image in enumerate(new_images):
+ # Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
+ letter_code = random_chars(32)
+ file_name = paths[index].split(os.sep)[-1].rsplit(".", 1)[0]
+ file_root = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
+ cv2.imwrite(f"/{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85])
+ print(f"Success {index+1}/{len(new_images)} with {file_name}")
+ annos_list = []
+ for anno in new_annos[index]:
+ obj = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
+ annos_list.append(obj)
+ with open(f"/{file_root}.txt", "w") as outfile:
+ outfile.write("\n".join(line for line in annos_list))
+
+
+def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]:
+ """
+ - label_dir : Path to label include annotation of images
+ - img_dir : Path to folder contain images
+ Return : List of images path and labels
+ """
+ img_paths = []
+ labels = []
+ for label_file in glob.glob(os.path.join(label_dir, "*.txt")):
+ label_name = label_file.split(os.sep)[-1].rsplit(".", 1)[0]
+ with open(label_file) as in_file:
+ obj_lists = in_file.readlines()
+ img_path = os.path.join(img_dir, f"{label_name}.jpg")
+
+ boxes = []
+ for obj_list in obj_lists:
+ obj = obj_list.rstrip("\n").split(" ")
+ boxes.append(
+ [
+ int(obj[0]),
+ float(obj[1]),
+ float(obj[2]),
+ float(obj[3]),
+ float(obj[4]),
+ ]
+ )
+ if not boxes:
+ continue
+ img_paths.append(img_path)
+ labels.append(boxes)
+ return img_paths, labels
+
+
+def update_image_and_anno(
+ img_list: list, anno_list: list, flip_type: int = 1
+) -> tuple[list, list, list]:
+ """
+ - img_list : list of all images
+ - anno_list : list of all annotations of specific image
+ - flip_type : 0 is vertical, 1 is horizontal
+ Return:
+ - new_imgs_list : image after resize
+ - new_annos_lists : list of new annotation after scale
+ - path_list : list the name of image file
+ """
+ new_annos_lists = []
+ path_list = []
+ new_imgs_list = []
+ for idx in range(len(img_list)):
+ new_annos = []
+ path = img_list[idx]
+ path_list.append(path)
+ img_annos = anno_list[idx]
+ img = cv2.imread(path)
+ if flip_type == 1:
+ new_img = cv2.flip(img, flip_type)
+ for bbox in img_annos:
+ x_center_new = 1 - bbox[1]
+ new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
+ elif flip_type == 0:
+ new_img = cv2.flip(img, flip_type)
+ for bbox in img_annos:
+ y_center_new = 1 - bbox[2]
+ new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
+ new_annos_lists.append(new_annos)
+ new_imgs_list.append(new_img)
+ return new_imgs_list, new_annos_lists, path_list
+
+
+def random_chars(number_char: int = 32) -> str:
+ """
+ Automatic generate random 32 characters.
+ Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
+ >>> len(random_chars(32))
+ 32
+ """
+ assert number_char > 1, "The number of character should greater than 1"
+ letter_code = ascii_lowercase + digits
+ return "".join(random.choice(letter_code) for _ in range(number_char))
+
+
+if __name__ == "__main__":
+ main()
+ print("DONE ✅")
diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py
index fb7f560f7..0cc7522bc 100644
--- a/computer_vision/harris_corner.py
+++ b/computer_vision/harris_corner.py
@@ -7,9 +7,8 @@ https://en.wikipedia.org/wiki/Harris_Corner_Detector
"""
-class Harris_Corner:
+class HarrisCorner:
def __init__(self, k: float, window_size: int):
-
"""
k : is an empirically determined constant in [0.04,0.06]
window_size : neighbourhoods considered
@@ -21,12 +20,10 @@ class Harris_Corner:
else:
raise ValueError("invalid k value")
- def __str__(self):
-
- return f"Harris Corner detection with k : {self.k}"
-
- def detect(self, img_path: str):
+ def __str__(self) -> str:
+ return str(self.k)
+ def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]:
"""
Returns the image with corners identified
img_path : path of the image
@@ -35,12 +32,12 @@ class Harris_Corner:
img = cv2.imread(img_path, 0)
h, w = img.shape
- corner_list = []
+ corner_list: list[list[int]] = []
color_img = img.copy()
color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB)
dy, dx = np.gradient(img)
- ixx = dx ** 2
- iyy = dy ** 2
+ ixx = dx**2
+ iyy = dy**2
ixy = dx * dy
k = 0.04
offset = self.window_size // 2
@@ -56,9 +53,9 @@ class Harris_Corner:
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
- det = (wxx * wyy) - (wxy ** 2)
+ det = (wxx * wyy) - (wxy**2)
trace = wxx + wyy
- r = det - k * (trace ** 2)
+ r = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
@@ -69,7 +66,6 @@ class Harris_Corner:
if __name__ == "__main__":
-
- edge_detect = Harris_Corner(0.04, 3)
+ edge_detect = HarrisCorner(0.04, 3)
color_img, _ = edge_detect.detect("path_to_image")
cv2.imwrite("detect.png", color_img)
diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py
new file mode 100644
index 000000000..b63e02682
--- /dev/null
+++ b/computer_vision/horn_schunck.py
@@ -0,0 +1,131 @@
+"""
+ The Horn-Schunck method estimates the optical flow for every single pixel of
+ a sequence of images.
+ It works by assuming brightness constancy between two consecutive frames
+ and smoothness in the optical flow.
+
+ Useful resources:
+ Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method
+ Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf
+"""
+
+from typing import SupportsIndex
+
+import numpy as np
+from scipy.ndimage import convolve
+
+
+def warp(
+ image: np.ndarray, horizontal_flow: np.ndarray, vertical_flow: np.ndarray
+) -> np.ndarray:
+ """
+ Warps the pixels of an image into a new image using the horizontal and vertical
+ flows.
+ Pixels that are warped from an invalid location are set to 0.
+
+ Parameters:
+ image: Grayscale image
+ horizontal_flow: Horizontal flow
+ vertical_flow: Vertical flow
+
+ Returns: Warped image
+
+ >>> warp(np.array([[0, 1, 2], [0, 3, 0], [2, 2, 2]]), \
+ np.array([[0, 1, -1], [-1, 0, 0], [1, 1, 1]]), \
+ np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]))
+ array([[0, 0, 0],
+ [3, 1, 0],
+ [0, 2, 3]])
+ """
+ flow = np.stack((horizontal_flow, vertical_flow), 2)
+
+ # Create a grid of all pixel coordinates and subtract the flow to get the
+ # target pixels coordinates
+ grid = np.stack(
+ np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0])), 2
+ )
+ grid = np.round(grid - flow).astype(np.int32)
+
+ # Find the locations outside of the original image
+ invalid = (grid < 0) | (grid >= np.array([image.shape[1], image.shape[0]]))
+ grid[invalid] = 0
+
+ warped = image[grid[:, :, 1], grid[:, :, 0]]
+
+ # Set pixels at invalid locations to 0
+ warped[invalid[:, :, 0] | invalid[:, :, 1]] = 0
+
+ return warped
+
+
+def horn_schunck(
+ image0: np.ndarray,
+ image1: np.ndarray,
+ num_iter: SupportsIndex,
+ alpha: float | None = None,
+) -> tuple[np.ndarray, np.ndarray]:
+ """
+ This function performs the Horn-Schunck algorithm and returns the estimated
+ optical flow. It is assumed that the input images are grayscale and
+ normalized to be in [0, 1].
+
+ Parameters:
+ image0: First image of the sequence
+ image1: Second image of the sequence
+ alpha: Regularization constant
+ num_iter: Number of iterations performed
+
+ Returns: estimated horizontal & vertical flow
+
+ >>> np.round(horn_schunck(np.array([[0, 0, 2], [0, 0, 2]]), \
+ np.array([[0, 2, 0], [0, 2, 0]]), alpha=0.1, num_iter=110)).\
+ astype(np.int32)
+ array([[[ 0, -1, -1],
+ [ 0, -1, -1]],
+
+ [[ 0, 0, 0],
+ [ 0, 0, 0]]], dtype=int32)
+ """
+ if alpha is None:
+ alpha = 0.1
+
+ # Initialize flow
+ horizontal_flow = np.zeros_like(image0)
+ vertical_flow = np.zeros_like(image0)
+
+ # Prepare kernels for the calculation of the derivatives and the average velocity
+ kernel_x = np.array([[-1, 1], [-1, 1]]) * 0.25
+ kernel_y = np.array([[-1, -1], [1, 1]]) * 0.25
+ kernel_t = np.array([[1, 1], [1, 1]]) * 0.25
+ kernel_laplacian = np.array(
+ [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]]
+ )
+
+ # Iteratively refine the flow
+ for _ in range(num_iter):
+ warped_image = warp(image0, horizontal_flow, vertical_flow)
+ derivative_x = convolve(warped_image, kernel_x) + convolve(image1, kernel_x)
+ derivative_y = convolve(warped_image, kernel_y) + convolve(image1, kernel_y)
+ derivative_t = convolve(warped_image, kernel_t) + convolve(image1, -kernel_t)
+
+ avg_horizontal_velocity = convolve(horizontal_flow, kernel_laplacian)
+ avg_vertical_velocity = convolve(vertical_flow, kernel_laplacian)
+
+ # This updates the flow as proposed in the paper (Step 12)
+ update = (
+ derivative_x * avg_horizontal_velocity
+ + derivative_y * avg_vertical_velocity
+ + derivative_t
+ )
+ update = update / (alpha**2 + derivative_x**2 + derivative_y**2)
+
+ horizontal_flow = avg_horizontal_velocity - derivative_x * update
+ vertical_flow = avg_vertical_velocity - derivative_y * update
+
+ return horizontal_flow, vertical_flow
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py
new file mode 100644
index 000000000..c150126d6
--- /dev/null
+++ b/computer_vision/mosaic_augmentation.py
@@ -0,0 +1,186 @@
+"""Source: https://github.com/jason9075/opencv-mosaic-data-aug"""
+
+import glob
+import os
+import random
+from string import ascii_lowercase, digits
+
+import cv2
+import numpy as np
+
+# Parrameters
+OUTPUT_SIZE = (720, 1280) # Height, Width
+SCALE_RANGE = (0.4, 0.6) # if height or width lower than this scale, drop it.
+FILTER_TINY_SCALE = 1 / 100
+LABEL_DIR = ""
+IMG_DIR = ""
+OUTPUT_DIR = ""
+NUMBER_IMAGES = 250
+
+
+def main() -> None:
+ """
+ Get images list and annotations list from input dir.
+ Update new images and annotations.
+ Save images and annotations in output dir.
+ """
+ img_paths, annos = get_dataset(LABEL_DIR, IMG_DIR)
+ for index in range(NUMBER_IMAGES):
+ idxs = random.sample(range(len(annos)), 4)
+ new_image, new_annos, path = update_image_and_anno(
+ img_paths,
+ annos,
+ idxs,
+ OUTPUT_SIZE,
+ SCALE_RANGE,
+ filter_scale=FILTER_TINY_SCALE,
+ )
+
+ # Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
+ letter_code = random_chars(32)
+ file_name = path.split(os.sep)[-1].rsplit(".", 1)[0]
+ file_root = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
+ cv2.imwrite(f"{file_root}.jpg", new_image, [cv2.IMWRITE_JPEG_QUALITY, 85])
+ print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}")
+ annos_list = []
+ for anno in new_annos:
+ width = anno[3] - anno[1]
+ height = anno[4] - anno[2]
+ x_center = anno[1] + width / 2
+ y_center = anno[2] + height / 2
+ obj = f"{anno[0]} {x_center} {y_center} {width} {height}"
+ annos_list.append(obj)
+ with open(f"{file_root}.txt", "w") as outfile:
+ outfile.write("\n".join(line for line in annos_list))
+
+
+def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]:
+ """
+ - label_dir : Path to label include annotation of images
+ - img_dir : Path to folder contain images
+ Return : List of images path and labels
+ """
+ img_paths = []
+ labels = []
+ for label_file in glob.glob(os.path.join(label_dir, "*.txt")):
+ label_name = label_file.split(os.sep)[-1].rsplit(".", 1)[0]
+ with open(label_file) as in_file:
+ obj_lists = in_file.readlines()
+ img_path = os.path.join(img_dir, f"{label_name}.jpg")
+
+ boxes = []
+ for obj_list in obj_lists:
+ obj = obj_list.rstrip("\n").split(" ")
+ xmin = float(obj[1]) - float(obj[3]) / 2
+ ymin = float(obj[2]) - float(obj[4]) / 2
+ xmax = float(obj[1]) + float(obj[3]) / 2
+ ymax = float(obj[2]) + float(obj[4]) / 2
+
+ boxes.append([int(obj[0]), xmin, ymin, xmax, ymax])
+ if not boxes:
+ continue
+ img_paths.append(img_path)
+ labels.append(boxes)
+ return img_paths, labels
+
+
+def update_image_and_anno(
+ all_img_list: list,
+ all_annos: list,
+ idxs: list[int],
+ output_size: tuple[int, int],
+ scale_range: tuple[float, float],
+ filter_scale: float = 0.0,
+) -> tuple[list, list, str]:
+ """
+ - all_img_list : list of all images
+ - all_annos : list of all annotations of specific image
+ - idxs : index of image in list
+ - output_size : size of output image (Height, Width)
+ - scale_range : range of scale image
+ - filter_scale : the condition of downscale image and bounding box
+ Return:
+ - output_img : image after resize
+ - new_anno : list of new annotation after scale
+ - path[0] : get the name of image file
+ """
+ output_img = np.zeros([output_size[0], output_size[1], 3], dtype=np.uint8)
+ scale_x = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
+ scale_y = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
+ divid_point_x = int(scale_x * output_size[1])
+ divid_point_y = int(scale_y * output_size[0])
+
+ new_anno = []
+ path_list = []
+ for i, index in enumerate(idxs):
+ path = all_img_list[index]
+ path_list.append(path)
+ img_annos = all_annos[index]
+ img = cv2.imread(path)
+ if i == 0: # top-left
+ img = cv2.resize(img, (divid_point_x, divid_point_y))
+ output_img[:divid_point_y, :divid_point_x, :] = img
+ for bbox in img_annos:
+ xmin = bbox[1] * scale_x
+ ymin = bbox[2] * scale_y
+ xmax = bbox[3] * scale_x
+ ymax = bbox[4] * scale_y
+ new_anno.append([bbox[0], xmin, ymin, xmax, ymax])
+ elif i == 1: # top-right
+ img = cv2.resize(img, (output_size[1] - divid_point_x, divid_point_y))
+ output_img[:divid_point_y, divid_point_x : output_size[1], :] = img
+ for bbox in img_annos:
+ xmin = scale_x + bbox[1] * (1 - scale_x)
+ ymin = bbox[2] * scale_y
+ xmax = scale_x + bbox[3] * (1 - scale_x)
+ ymax = bbox[4] * scale_y
+ new_anno.append([bbox[0], xmin, ymin, xmax, ymax])
+ elif i == 2: # bottom-left
+ img = cv2.resize(img, (divid_point_x, output_size[0] - divid_point_y))
+ output_img[divid_point_y : output_size[0], :divid_point_x, :] = img
+ for bbox in img_annos:
+ xmin = bbox[1] * scale_x
+ ymin = scale_y + bbox[2] * (1 - scale_y)
+ xmax = bbox[3] * scale_x
+ ymax = scale_y + bbox[4] * (1 - scale_y)
+ new_anno.append([bbox[0], xmin, ymin, xmax, ymax])
+ else: # bottom-right
+ img = cv2.resize(
+ img, (output_size[1] - divid_point_x, output_size[0] - divid_point_y)
+ )
+ output_img[
+ divid_point_y : output_size[0], divid_point_x : output_size[1], :
+ ] = img
+ for bbox in img_annos:
+ xmin = scale_x + bbox[1] * (1 - scale_x)
+ ymin = scale_y + bbox[2] * (1 - scale_y)
+ xmax = scale_x + bbox[3] * (1 - scale_x)
+ ymax = scale_y + bbox[4] * (1 - scale_y)
+ new_anno.append([bbox[0], xmin, ymin, xmax, ymax])
+
+ # Remove bounding box small than scale of filter
+ if filter_scale > 0:
+ new_anno = [
+ anno
+ for anno in new_anno
+ if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
+ ]
+
+ return output_img, new_anno, path_list[0]
+
+
+def random_chars(number_char: int) -> str:
+ """
+ Automatic generate random 32 characters.
+ Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
+ >>> len(random_chars(32))
+ 32
+ """
+ assert number_char > 1, "The number of character should greater than 1"
+ letter_code = ascii_lowercase + digits
+ return "".join(random.choice(letter_code) for _ in range(number_char))
+
+
+if __name__ == "__main__":
+ main()
+ print("DONE ✅")
diff --git a/computer_vision/pooling_functions.py b/computer_vision/pooling_functions.py
new file mode 100644
index 000000000..09beabcba
--- /dev/null
+++ b/computer_vision/pooling_functions.py
@@ -0,0 +1,135 @@
+# Source : https://computersciencewiki.org/index.php/Max-pooling_/_Pooling
+# Importing the libraries
+import numpy as np
+from PIL import Image
+
+
+# Maxpooling Function
+def maxpooling(arr: np.ndarray, size: int, stride: int) -> np.ndarray:
+ """
+ This function is used to perform maxpooling on the input array of 2D matrix(image)
+ Args:
+ arr: numpy array
+ size: size of pooling matrix
+ stride: the number of pixels shifts over the input matrix
+ Returns:
+ numpy array of maxpooled matrix
+ Sample Input Output:
+ >>> maxpooling([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]], 2, 2)
+ array([[ 6., 8.],
+ [14., 16.]])
+ >>> maxpooling([[147, 180, 122],[241, 76, 32],[126, 13, 157]], 2, 1)
+ array([[241., 180.],
+ [241., 157.]])
+ """
+ arr = np.array(arr)
+ if arr.shape[0] != arr.shape[1]:
+ raise ValueError("The input array is not a square matrix")
+ i = 0
+ j = 0
+ mat_i = 0
+ mat_j = 0
+
+ # compute the shape of the output matrix
+ maxpool_shape = (arr.shape[0] - size) // stride + 1
+ # initialize the output matrix with zeros of shape maxpool_shape
+ updated_arr = np.zeros((maxpool_shape, maxpool_shape))
+
+ while i < arr.shape[0]:
+ if i + size > arr.shape[0]:
+ # if the end of the matrix is reached, break
+ break
+ while j < arr.shape[1]:
+ # if the end of the matrix is reached, break
+ if j + size > arr.shape[1]:
+ break
+ # compute the maximum of the pooling matrix
+ updated_arr[mat_i][mat_j] = np.max(arr[i : i + size, j : j + size])
+ # shift the pooling matrix by stride of column pixels
+ j += stride
+ mat_j += 1
+
+ # shift the pooling matrix by stride of row pixels
+ i += stride
+ mat_i += 1
+
+ # reset the column index to 0
+ j = 0
+ mat_j = 0
+
+ return updated_arr
+
+
+# Averagepooling Function
+def avgpooling(arr: np.ndarray, size: int, stride: int) -> np.ndarray:
+ """
+ This function is used to perform avgpooling on the input array of 2D matrix(image)
+ Args:
+ arr: numpy array
+ size: size of pooling matrix
+ stride: the number of pixels shifts over the input matrix
+ Returns:
+ numpy array of avgpooled matrix
+ Sample Input Output:
+ >>> avgpooling([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]], 2, 2)
+ array([[ 3., 5.],
+ [11., 13.]])
+ >>> avgpooling([[147, 180, 122],[241, 76, 32],[126, 13, 157]], 2, 1)
+ array([[161., 102.],
+ [114., 69.]])
+ """
+ arr = np.array(arr)
+ if arr.shape[0] != arr.shape[1]:
+ raise ValueError("The input array is not a square matrix")
+ i = 0
+ j = 0
+ mat_i = 0
+ mat_j = 0
+
+ # compute the shape of the output matrix
+ avgpool_shape = (arr.shape[0] - size) // stride + 1
+ # initialize the output matrix with zeros of shape avgpool_shape
+ updated_arr = np.zeros((avgpool_shape, avgpool_shape))
+
+ while i < arr.shape[0]:
+ # if the end of the matrix is reached, break
+ if i + size > arr.shape[0]:
+ break
+ while j < arr.shape[1]:
+ # if the end of the matrix is reached, break
+ if j + size > arr.shape[1]:
+ break
+ # compute the average of the pooling matrix
+ updated_arr[mat_i][mat_j] = int(np.average(arr[i : i + size, j : j + size]))
+ # shift the pooling matrix by stride of column pixels
+ j += stride
+ mat_j += 1
+
+ # shift the pooling matrix by stride of row pixels
+ i += stride
+ mat_i += 1
+ # reset the column index to 0
+ j = 0
+ mat_j = 0
+
+ return updated_arr
+
+
+# Main Function
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod(name="avgpooling", verbose=True)
+
+ # Loading the image
+ image = Image.open("path_to_image")
+
+ # Converting the image to numpy array and maxpooling, displaying the result
+ # Ensure that the image is a square matrix
+
+ Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
+
+ # Converting the image to numpy array and averagepooling, displaying the result
+ # Ensure that the image is a square matrix
+
+ Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
diff --git a/conversions/README.md b/conversions/README.md
new file mode 100644
index 000000000..ec3d931fd
--- /dev/null
+++ b/conversions/README.md
@@ -0,0 +1,6 @@
+# Conversion
+
+Conversion programs convert a type of data, a number from a numerical base or unit into one of another type, base or unit, e.g. binary to decimal, integer to string or foot to meters.
+
+*
+*
diff --git a/conversions/astronomical_length_scale_conversion.py b/conversions/astronomical_length_scale_conversion.py
new file mode 100644
index 000000000..0f4136449
--- /dev/null
+++ b/conversions/astronomical_length_scale_conversion.py
@@ -0,0 +1,106 @@
+"""
+Conversion of length units.
+Available Units:
+Metre, Kilometre, Megametre, Gigametre,
+Terametre, Petametre, Exametre, Zettametre, Yottametre
+
+USAGE :
+-> Import this file into their respective project.
+-> Use the function length_conversion() for conversion of length units.
+-> Parameters :
+ -> value : The number of from units you want to convert
+ -> from_type : From which type you want to convert
+ -> to_type : To which type you want to convert
+
+REFERENCES :
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Meter
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilometer
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Orders_of_magnitude_(length)
+"""
+
+UNIT_SYMBOL = {
+ "meter": "m",
+ "kilometer": "km",
+ "megametre": "Mm",
+ "gigametre": "Gm",
+ "terametre": "Tm",
+ "petametre": "Pm",
+ "exametre": "Em",
+ "zettametre": "Zm",
+ "yottametre": "Ym",
+}
+# Exponent of the factor(meter)
+METRIC_CONVERSION = {
+ "m": 0,
+ "km": 3,
+ "Mm": 6,
+ "Gm": 9,
+ "Tm": 12,
+ "Pm": 15,
+ "Em": 18,
+ "Zm": 21,
+ "Ym": 24,
+}
+
+
+def length_conversion(value: float, from_type: str, to_type: str) -> float:
+ """
+ Conversion between astronomical length units.
+
+ >>> length_conversion(1, "meter", "kilometer")
+ 0.001
+ >>> length_conversion(1, "meter", "megametre")
+ 1e-06
+ >>> length_conversion(1, "gigametre", "meter")
+ 1000000000
+ >>> length_conversion(1, "gigametre", "terametre")
+ 0.001
+ >>> length_conversion(1, "petametre", "terametre")
+ 1000
+ >>> length_conversion(1, "petametre", "exametre")
+ 0.001
+ >>> length_conversion(1, "terametre", "zettametre")
+ 1e-09
+ >>> length_conversion(1, "yottametre", "zettametre")
+ 1000
+ >>> length_conversion(4, "wrongUnit", "inch")
+ Traceback (most recent call last):
+ ...
+ ValueError: Invalid 'from_type' value: 'wrongUnit'.
+ Conversion abbreviations are: m, km, Mm, Gm, Tm, Pm, Em, Zm, Ym
+ """
+
+ from_sanitized = from_type.lower().strip("s")
+ to_sanitized = to_type.lower().strip("s")
+
+ from_sanitized = UNIT_SYMBOL.get(from_sanitized, from_sanitized)
+ to_sanitized = UNIT_SYMBOL.get(to_sanitized, to_sanitized)
+
+ if from_sanitized not in METRIC_CONVERSION:
+ msg = (
+ f"Invalid 'from_type' value: {from_type!r}.\n"
+ f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
+ )
+ raise ValueError(msg)
+ if to_sanitized not in METRIC_CONVERSION:
+ msg = (
+ f"Invalid 'to_type' value: {to_type!r}.\n"
+ f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
+ )
+ raise ValueError(msg)
+ from_exponent = METRIC_CONVERSION[from_sanitized]
+ to_exponent = METRIC_CONVERSION[to_sanitized]
+ exponent = 1
+
+ if from_exponent > to_exponent:
+ exponent = from_exponent - to_exponent
+ else:
+ exponent = -(to_exponent - from_exponent)
+
+ return value * pow(10, exponent)
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/conversions/binary_to_decimal.py b/conversions/binary_to_decimal.py
index a7625e475..914a9318c 100644
--- a/conversions/binary_to_decimal.py
+++ b/conversions/binary_to_decimal.py
@@ -12,15 +12,15 @@ def bin_to_decimal(bin_string: str) -> int:
0
>>> bin_to_decimal("a")
Traceback (most recent call last):
- ...
+ ...
ValueError: Non-binary value was passed to the function
>>> bin_to_decimal("")
Traceback (most recent call last):
- ...
+ ...
ValueError: Empty string was passed to the function
>>> bin_to_decimal("39")
Traceback (most recent call last):
- ...
+ ...
ValueError: Non-binary value was passed to the function
"""
bin_string = str(bin_string).strip()
diff --git a/conversions/binary_to_hexadecimal.py b/conversions/binary_to_hexadecimal.py
new file mode 100644
index 000000000..a3855bb70
--- /dev/null
+++ b/conversions/binary_to_hexadecimal.py
@@ -0,0 +1,66 @@
+BITS_TO_HEX = {
+ "0000": "0",
+ "0001": "1",
+ "0010": "2",
+ "0011": "3",
+ "0100": "4",
+ "0101": "5",
+ "0110": "6",
+ "0111": "7",
+ "1000": "8",
+ "1001": "9",
+ "1010": "a",
+ "1011": "b",
+ "1100": "c",
+ "1101": "d",
+ "1110": "e",
+ "1111": "f",
+}
+
+
+def bin_to_hexadecimal(binary_str: str) -> str:
+ """
+ Converting a binary string into hexadecimal using Grouping Method
+
+ >>> bin_to_hexadecimal('101011111')
+ '0x15f'
+ >>> bin_to_hexadecimal(' 1010 ')
+ '0x0a'
+ >>> bin_to_hexadecimal('-11101')
+ '-0x1d'
+ >>> bin_to_hexadecimal('a')
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-binary value was passed to the function
+ >>> bin_to_hexadecimal('')
+ Traceback (most recent call last):
+ ...
+ ValueError: Empty string was passed to the function
+ """
+ # Sanitising parameter
+ binary_str = str(binary_str).strip()
+
+ # Exceptions
+ if not binary_str:
+ raise ValueError("Empty string was passed to the function")
+ is_negative = binary_str[0] == "-"
+ binary_str = binary_str[1:] if is_negative else binary_str
+ if not all(char in "01" for char in binary_str):
+ raise ValueError("Non-binary value was passed to the function")
+
+ binary_str = (
+ "0" * (4 * (divmod(len(binary_str), 4)[0] + 1) - len(binary_str)) + binary_str
+ )
+
+ hexadecimal = []
+ for x in range(0, len(binary_str), 4):
+ hexadecimal.append(BITS_TO_HEX[binary_str[x : x + 4]])
+ hexadecimal_str = "0x" + "".join(hexadecimal)
+
+ return "-" + hexadecimal_str if is_negative else hexadecimal_str
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/conversions/binary_to_octal.py b/conversions/binary_to_octal.py
index 35ede95b1..82f81e062 100644
--- a/conversions/binary_to_octal.py
+++ b/conversions/binary_to_octal.py
@@ -9,11 +9,11 @@ The function below will convert any binary string to the octal equivalent.
>>> bin_to_octal("")
Traceback (most recent call last):
-...
+ ...
ValueError: Empty string was passed to the function
>>> bin_to_octal("a-1")
Traceback (most recent call last):
-...
+ ...
ValueError: Non-binary value was passed to the function
"""
diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py
index 3c72a7732..c9c2e9a5f 100644
--- a/conversions/decimal_to_any.py
+++ b/conversions/decimal_to_any.py
@@ -1,5 +1,9 @@
"""Convert a positive Decimal Number to Any Other Representation"""
+from string import ascii_uppercase
+
+ALPHABET_VALUES = {str(ord(c) - 55): c for c in ascii_uppercase}
+
def decimal_to_any(num: int, base: int) -> str:
"""
@@ -25,32 +29,32 @@ def decimal_to_any(num: int, base: int) -> str:
>>> # negatives will error
>>> decimal_to_any(-45, 8) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
ValueError: parameter must be positive int
>>> # floats will error
>>> decimal_to_any(34.4, 6) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
TypeError: int() can't convert non-string with explicit base
>>> # a float base will error
>>> decimal_to_any(5, 2.5) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
TypeError: 'float' object cannot be interpreted as an integer
>>> # a str base will error
>>> decimal_to_any(10, '16') # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
TypeError: 'str' object cannot be interpreted as an integer
>>> # a base less than 2 will error
>>> decimal_to_any(7, 0) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
ValueError: base must be >= 2
>>> # a base greater than 36 will error
>>> decimal_to_any(34, 37) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
ValueError: base must be <= 36
"""
if isinstance(num, float):
@@ -65,13 +69,6 @@ def decimal_to_any(num: int, base: int) -> str:
raise ValueError("base must be >= 2")
if base > 36:
raise ValueError("base must be <= 36")
- # fmt: off
- ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F',
- '16': 'G', '17': 'H', '18': 'I', '19': 'J', '20': 'K', '21': 'L',
- '22': 'M', '23': 'N', '24': 'O', '25': 'P', '26': 'Q', '27': 'R',
- '28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X',
- '34': 'Y', '35': 'Z'}
- # fmt: on
new_value = ""
mod = 0
div = 0
@@ -79,8 +76,9 @@ def decimal_to_any(num: int, base: int) -> str:
div, mod = divmod(num, base)
if base >= 11 and 9 < mod < 36:
actual_value = ALPHABET_VALUES[str(mod)]
- mod = actual_value
- new_value += str(mod)
+ else:
+ actual_value = str(mod)
+ new_value += actual_value
div = num // base
num = div
if div == 0:
diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py
index c21cdbcae..973c47c8a 100644
--- a/conversions/decimal_to_binary.py
+++ b/conversions/decimal_to_binary.py
@@ -2,7 +2,6 @@
def decimal_to_binary(num: int) -> str:
-
"""
Convert an Integer Decimal Number to a Binary Number as str.
>>> decimal_to_binary(0)
@@ -19,12 +18,12 @@ def decimal_to_binary(num: int) -> str:
>>> # other floats will error
>>> decimal_to_binary(16.16) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
TypeError: 'float' object cannot be interpreted as an integer
>>> # strings will error as well
>>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
TypeError: 'str' object cannot be interpreted as an integer
"""
diff --git a/conversions/decimal_to_binary_recursion.py b/conversions/decimal_to_binary_recursion.py
index c149ea865..05833ca67 100644
--- a/conversions/decimal_to_binary_recursion.py
+++ b/conversions/decimal_to_binary_recursion.py
@@ -7,7 +7,7 @@ def binary_recursive(decimal: int) -> str:
'1001000'
>>> binary_recursive("number")
Traceback (most recent call last):
- ...
+ ...
ValueError: invalid literal for int() with base 10: 'number'
"""
decimal = int(decimal)
@@ -30,11 +30,11 @@ def main(number: str) -> str:
'-0b101000'
>>> main(40.8)
Traceback (most recent call last):
- ...
+ ...
ValueError: Input value is not an integer
>>> main("forty")
Traceback (most recent call last):
- ...
+ ...
ValueError: Input value is not an integer
"""
number = str(number).strip()
diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py
index 2389c6d1f..5ea48401f 100644
--- a/conversions/decimal_to_hexadecimal.py
+++ b/conversions/decimal_to_hexadecimal.py
@@ -46,12 +46,12 @@ def decimal_to_hexadecimal(decimal: float) -> str:
>>> # other floats will error
>>> decimal_to_hexadecimal(16.16) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
AssertionError
>>> # strings will error as well
>>> decimal_to_hexadecimal('0xfffff') # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
AssertionError
>>> # results are the same when compared to Python's default hex function
>>> decimal_to_hexadecimal(-256) == hex(-256)
diff --git a/conversions/energy_conversions.py b/conversions/energy_conversions.py
new file mode 100644
index 000000000..51de6b313
--- /dev/null
+++ b/conversions/energy_conversions.py
@@ -0,0 +1,114 @@
+"""
+Conversion of energy units.
+
+Available units: joule, kilojoule, megajoule, gigajoule,\
+ wattsecond, watthour, kilowatthour, newtonmeter, calorie_nutr,\
+ kilocalorie_nutr, electronvolt, britishthermalunit_it, footpound
+
+USAGE :
+-> Import this file into their respective project.
+-> Use the function energy_conversion() for conversion of energy units.
+-> Parameters :
+ -> from_type : From which type you want to convert
+ -> to_type : To which type you want to convert
+ -> value : the value which you want to convert
+
+REFERENCES :
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Units_of_energy
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Joule
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilowatt-hour
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Newton-metre
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Calorie
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Electronvolt
+-> Wikipedia reference: https://en.wikipedia.org/wiki/British_thermal_unit
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot-pound_(energy)
+-> Unit converter reference: https://www.unitconverters.net/energy-converter.html
+"""
+
+ENERGY_CONVERSION: dict[str, float] = {
+ "joule": 1.0,
+ "kilojoule": 1_000,
+ "megajoule": 1_000_000,
+ "gigajoule": 1_000_000_000,
+ "wattsecond": 1.0,
+ "watthour": 3_600,
+ "kilowatthour": 3_600_000,
+ "newtonmeter": 1.0,
+ "calorie_nutr": 4_186.8,
+ "kilocalorie_nutr": 4_186_800.00,
+ "electronvolt": 1.602_176_634e-19,
+ "britishthermalunit_it": 1_055.055_85,
+ "footpound": 1.355_818,
+}
+
+
+def energy_conversion(from_type: str, to_type: str, value: float) -> float:
+ """
+ Conversion of energy units.
+ >>> energy_conversion("joule", "joule", 1)
+ 1.0
+ >>> energy_conversion("joule", "kilojoule", 1)
+ 0.001
+ >>> energy_conversion("joule", "megajoule", 1)
+ 1e-06
+ >>> energy_conversion("joule", "gigajoule", 1)
+ 1e-09
+ >>> energy_conversion("joule", "wattsecond", 1)
+ 1.0
+ >>> energy_conversion("joule", "watthour", 1)
+ 0.0002777777777777778
+ >>> energy_conversion("joule", "kilowatthour", 1)
+ 2.7777777777777776e-07
+ >>> energy_conversion("joule", "newtonmeter", 1)
+ 1.0
+ >>> energy_conversion("joule", "calorie_nutr", 1)
+ 0.00023884589662749592
+ >>> energy_conversion("joule", "kilocalorie_nutr", 1)
+ 2.388458966274959e-07
+ >>> energy_conversion("joule", "electronvolt", 1)
+ 6.241509074460763e+18
+ >>> energy_conversion("joule", "britishthermalunit_it", 1)
+ 0.0009478171226670134
+ >>> energy_conversion("joule", "footpound", 1)
+ 0.7375621211696556
+ >>> energy_conversion("joule", "megajoule", 1000)
+ 0.001
+ >>> energy_conversion("calorie_nutr", "kilocalorie_nutr", 1000)
+ 1.0
+ >>> energy_conversion("kilowatthour", "joule", 10)
+ 36000000.0
+ >>> energy_conversion("britishthermalunit_it", "footpound", 1)
+ 778.1692306784539
+ >>> energy_conversion("watthour", "joule", "a") # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ TypeError: unsupported operand type(s) for /: 'str' and 'float'
+ >>> energy_conversion("wrongunit", "joule", 1) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ ValueError: Incorrect 'from_type' or 'to_type' value: 'wrongunit', 'joule'
+ Valid values are: joule, ... footpound
+ >>> energy_conversion("joule", "wrongunit", 1) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ ValueError: Incorrect 'from_type' or 'to_type' value: 'joule', 'wrongunit'
+ Valid values are: joule, ... footpound
+ >>> energy_conversion("123", "abc", 1) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ ValueError: Incorrect 'from_type' or 'to_type' value: '123', 'abc'
+ Valid values are: joule, ... footpound
+ """
+ if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
+ msg = (
+ f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
+ f"Valid values are: {', '.join(ENERGY_CONVERSION)}"
+ )
+ raise ValueError(msg)
+ return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/excel_title_to_column.py b/conversions/excel_title_to_column.py
new file mode 100644
index 000000000..d77031ec2
--- /dev/null
+++ b/conversions/excel_title_to_column.py
@@ -0,0 +1,33 @@
+def excel_title_to_column(column_title: str) -> int:
+ """
+ Given a string column_title that represents
+ the column title in an Excel sheet, return
+ its corresponding column number.
+
+ >>> excel_title_to_column("A")
+ 1
+ >>> excel_title_to_column("B")
+ 2
+ >>> excel_title_to_column("AB")
+ 28
+ >>> excel_title_to_column("Z")
+ 26
+ """
+ assert column_title.isupper()
+ answer = 0
+ index = len(column_title) - 1
+ power = 0
+
+ while index >= 0:
+ value = (ord(column_title[index]) - 64) * pow(26, power)
+ answer += value
+ power += 1
+ index -= 1
+
+ return answer
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/conversions/hex_to_bin.py b/conversions/hex_to_bin.py
index e358d810b..b872ab5cb 100644
--- a/conversions/hex_to_bin.py
+++ b/conversions/hex_to_bin.py
@@ -21,11 +21,11 @@ def hex_to_bin(hex_num: str) -> int:
-1111111111111111
>>> hex_to_bin("F-f")
Traceback (most recent call last):
- ...
+ ...
ValueError: Invalid value was passed to the function
>>> hex_to_bin("")
Traceback (most recent call last):
- ...
+ ...
ValueError: No value was passed to the function
"""
diff --git a/conversions/hexadecimal_to_decimal.py b/conversions/hexadecimal_to_decimal.py
index beb1c2c3d..209e4aebb 100644
--- a/conversions/hexadecimal_to_decimal.py
+++ b/conversions/hexadecimal_to_decimal.py
@@ -18,15 +18,15 @@ def hex_to_decimal(hex_string: str) -> int:
-255
>>> hex_to_decimal("F-f")
Traceback (most recent call last):
- ...
+ ...
ValueError: Non-hexadecimal value was passed to the function
>>> hex_to_decimal("")
Traceback (most recent call last):
- ...
+ ...
ValueError: Empty string was passed to the function
>>> hex_to_decimal("12m")
Traceback (most recent call last):
- ...
+ ...
ValueError: Non-hexadecimal value was passed to the function
"""
hex_string = hex_string.strip().lower()
diff --git a/conversions/length_conversion.py b/conversions/length_conversion.py
new file mode 100644
index 000000000..d8f395152
--- /dev/null
+++ b/conversions/length_conversion.py
@@ -0,0 +1,124 @@
+"""
+Conversion of length units.
+Available Units:- Metre,Kilometre,Feet,Inch,Centimeter,Yard,Foot,Mile,Millimeter
+
+USAGE :
+-> Import this file into their respective project.
+-> Use the function length_conversion() for conversion of length units.
+-> Parameters :
+ -> value : The number of from units you want to convert
+ -> from_type : From which type you want to convert
+ -> to_type : To which type you want to convert
+
+REFERENCES :
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Meter
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilometer
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Feet
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Inch
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Centimeter
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Yard
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Mile
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Millimeter
+"""
+
+from collections import namedtuple
+
+from_to = namedtuple("from_to", "from_ to")
+
+TYPE_CONVERSION = {
+ "millimeter": "mm",
+ "centimeter": "cm",
+ "meter": "m",
+ "kilometer": "km",
+ "inch": "in",
+ "inche": "in", # Trailing 's' has been stripped off
+ "feet": "ft",
+ "foot": "ft",
+ "yard": "yd",
+ "mile": "mi",
+}
+
+METRIC_CONVERSION = {
+ "mm": from_to(0.001, 1000),
+ "cm": from_to(0.01, 100),
+ "m": from_to(1, 1),
+ "km": from_to(1000, 0.001),
+ "in": from_to(0.0254, 39.3701),
+ "ft": from_to(0.3048, 3.28084),
+ "yd": from_to(0.9144, 1.09361),
+ "mi": from_to(1609.34, 0.000621371),
+}
+
+
+def length_conversion(value: float, from_type: str, to_type: str) -> float:
+ """
+ Conversion between length units.
+
+ >>> length_conversion(4, "METER", "FEET")
+ 13.12336
+ >>> length_conversion(4, "M", "FT")
+ 13.12336
+ >>> length_conversion(1, "meter", "kilometer")
+ 0.001
+ >>> length_conversion(1, "kilometer", "inch")
+ 39370.1
+ >>> length_conversion(3, "kilometer", "mile")
+ 1.8641130000000001
+ >>> length_conversion(2, "feet", "meter")
+ 0.6096
+ >>> length_conversion(4, "feet", "yard")
+ 1.333329312
+ >>> length_conversion(1, "inch", "meter")
+ 0.0254
+ >>> length_conversion(2, "inch", "mile")
+ 3.15656468e-05
+ >>> length_conversion(2, "centimeter", "millimeter")
+ 20.0
+ >>> length_conversion(2, "centimeter", "yard")
+ 0.0218722
+ >>> length_conversion(4, "yard", "meter")
+ 3.6576
+ >>> length_conversion(4, "yard", "kilometer")
+ 0.0036576
+ >>> length_conversion(3, "foot", "meter")
+ 0.9144000000000001
+ >>> length_conversion(3, "foot", "inch")
+ 36.00001944
+ >>> length_conversion(4, "mile", "kilometer")
+ 6.43736
+ >>> length_conversion(2, "miles", "InChEs")
+ 126719.753468
+ >>> length_conversion(3, "millimeter", "centimeter")
+ 0.3
+ >>> length_conversion(3, "mm", "in")
+ 0.1181103
+ >>> length_conversion(4, "wrongUnit", "inch")
+ Traceback (most recent call last):
+ ...
+ ValueError: Invalid 'from_type' value: 'wrongUnit'.
+ Conversion abbreviations are: mm, cm, m, km, in, ft, yd, mi
+ """
+ new_from = from_type.lower().rstrip("s")
+ new_from = TYPE_CONVERSION.get(new_from, new_from)
+ new_to = to_type.lower().rstrip("s")
+ new_to = TYPE_CONVERSION.get(new_to, new_to)
+ if new_from not in METRIC_CONVERSION:
+ msg = (
+ f"Invalid 'from_type' value: {from_type!r}.\n"
+ f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
+ )
+ raise ValueError(msg)
+ if new_to not in METRIC_CONVERSION:
+ msg = (
+ f"Invalid 'to_type' value: {to_type!r}.\n"
+ f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
+ )
+ raise ValueError(msg)
+ return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/molecular_chemistry.py b/conversions/molecular_chemistry.py
index 8c6845996..51ffe534d 100644
--- a/conversions/molecular_chemistry.py
+++ b/conversions/molecular_chemistry.py
@@ -20,7 +20,7 @@ def molarity_to_normality(nfactor: int, moles: float, volume: float) -> float:
>>> molarity_to_normality(4, 11.4, 5.7)
8
"""
- return round((float(moles / volume) * nfactor))
+ return round(float(moles / volume) * nfactor)
def moles_to_pressure(volume: float, moles: float, temperature: float) -> float:
@@ -86,7 +86,6 @@ def pressure_and_volume_to_temperature(
if __name__ == "__main__":
-
import doctest
doctest.testmod()
diff --git a/conversions/octal_to_decimal.py b/conversions/octal_to_decimal.py
index 5a7373fef..7f006f20e 100644
--- a/conversions/octal_to_decimal.py
+++ b/conversions/octal_to_decimal.py
@@ -2,23 +2,59 @@ def oct_to_decimal(oct_string: str) -> int:
"""
Convert a octal value to its decimal equivalent
+ >>> oct_to_decimal("")
+ Traceback (most recent call last):
+ ...
+ ValueError: Empty string was passed to the function
+ >>> oct_to_decimal("-")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-octal value was passed to the function
+ >>> oct_to_decimal("e")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-octal value was passed to the function
+ >>> oct_to_decimal("8")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-octal value was passed to the function
+ >>> oct_to_decimal("-e")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-octal value was passed to the function
+ >>> oct_to_decimal("-8")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-octal value was passed to the function
+ >>> oct_to_decimal("1")
+ 1
+ >>> oct_to_decimal("-1")
+ -1
>>> oct_to_decimal("12")
10
>>> oct_to_decimal(" 12 ")
10
>>> oct_to_decimal("-45")
-37
+ >>> oct_to_decimal("-")
+ Traceback (most recent call last):
+ ...
+ ValueError: Non-octal value was passed to the function
+ >>> oct_to_decimal("0")
+ 0
+ >>> oct_to_decimal("-4055")
+ -2093
>>> oct_to_decimal("2-0Fm")
Traceback (most recent call last):
- ...
+ ...
ValueError: Non-octal value was passed to the function
>>> oct_to_decimal("")
Traceback (most recent call last):
- ...
+ ...
ValueError: Empty string was passed to the function
>>> oct_to_decimal("19")
Traceback (most recent call last):
- ...
+ ...
ValueError: Non-octal value was passed to the function
"""
oct_string = str(oct_string).strip()
diff --git a/conversions/prefix_conversions.py b/conversions/prefix_conversions.py
index 78db4a917..06b759e35 100644
--- a/conversions/prefix_conversions.py
+++ b/conversions/prefix_conversions.py
@@ -1,11 +1,12 @@
"""
Convert International System of Units (SI) and Binary prefixes
"""
+from __future__ import annotations
+
from enum import Enum
-from typing import Union
-class SI_Unit(Enum):
+class SIUnit(Enum):
yotta = 24
zetta = 21
exa = 18
@@ -28,7 +29,7 @@ class SI_Unit(Enum):
yocto = -24
-class Binary_Unit(Enum):
+class BinaryUnit(Enum):
yotta = 8
zetta = 7
exa = 6
@@ -41,17 +42,17 @@ class Binary_Unit(Enum):
def convert_si_prefix(
known_amount: float,
- known_prefix: Union[str, SI_Unit],
- unknown_prefix: Union[str, SI_Unit],
+ known_prefix: str | SIUnit,
+ unknown_prefix: str | SIUnit,
) -> float:
"""
Wikipedia reference: https://en.wikipedia.org/wiki/Binary_prefix
Wikipedia reference: https://en.wikipedia.org/wiki/International_System_of_Units
- >>> convert_si_prefix(1, SI_Unit.giga, SI_Unit.mega)
+ >>> convert_si_prefix(1, SIUnit.giga, SIUnit.mega)
1000
- >>> convert_si_prefix(1, SI_Unit.mega, SI_Unit.giga)
+ >>> convert_si_prefix(1, SIUnit.mega, SIUnit.giga)
0.001
- >>> convert_si_prefix(1, SI_Unit.kilo, SI_Unit.kilo)
+ >>> convert_si_prefix(1, SIUnit.kilo, SIUnit.kilo)
1
>>> convert_si_prefix(1, 'giga', 'mega')
1000
@@ -59,9 +60,9 @@ def convert_si_prefix(
1000
"""
if isinstance(known_prefix, str):
- known_prefix = SI_Unit[known_prefix.lower()]
+ known_prefix = SIUnit[known_prefix.lower()]
if isinstance(unknown_prefix, str):
- unknown_prefix = SI_Unit[unknown_prefix.lower()]
+ unknown_prefix = SIUnit[unknown_prefix.lower()]
unknown_amount: float = known_amount * (
10 ** (known_prefix.value - unknown_prefix.value)
)
@@ -70,16 +71,16 @@ def convert_si_prefix(
def convert_binary_prefix(
known_amount: float,
- known_prefix: Union[str, Binary_Unit],
- unknown_prefix: Union[str, Binary_Unit],
+ known_prefix: str | BinaryUnit,
+ unknown_prefix: str | BinaryUnit,
) -> float:
"""
Wikipedia reference: https://en.wikipedia.org/wiki/Metric_prefix
- >>> convert_binary_prefix(1, Binary_Unit.giga, Binary_Unit.mega)
+ >>> convert_binary_prefix(1, BinaryUnit.giga, BinaryUnit.mega)
1024
- >>> convert_binary_prefix(1, Binary_Unit.mega, Binary_Unit.giga)
+ >>> convert_binary_prefix(1, BinaryUnit.mega, BinaryUnit.giga)
0.0009765625
- >>> convert_binary_prefix(1, Binary_Unit.kilo, Binary_Unit.kilo)
+ >>> convert_binary_prefix(1, BinaryUnit.kilo, BinaryUnit.kilo)
1
>>> convert_binary_prefix(1, 'giga', 'mega')
1024
@@ -87,9 +88,9 @@ def convert_binary_prefix(
1024
"""
if isinstance(known_prefix, str):
- known_prefix = Binary_Unit[known_prefix.lower()]
+ known_prefix = BinaryUnit[known_prefix.lower()]
if isinstance(unknown_prefix, str):
- unknown_prefix = Binary_Unit[unknown_prefix.lower()]
+ unknown_prefix = BinaryUnit[unknown_prefix.lower()]
unknown_amount: float = known_amount * (
2 ** ((known_prefix.value - unknown_prefix.value) * 10)
)
diff --git a/conversions/prefix_conversions_string.py b/conversions/prefix_conversions_string.py
new file mode 100644
index 000000000..9344c9672
--- /dev/null
+++ b/conversions/prefix_conversions_string.py
@@ -0,0 +1,121 @@
+"""
+* Author: Manuel Di Lullo (https://github.com/manueldilullo)
+* Description: Convert a number to use the correct SI or Binary unit prefix.
+
+Inspired by prefix_conversion.py file in this repository by lance-pyles
+
+URL: https://en.wikipedia.org/wiki/Metric_prefix#List_of_SI_prefixes
+URL: https://en.wikipedia.org/wiki/Binary_prefix
+"""
+
+from __future__ import annotations
+
+from enum import Enum, unique
+from typing import TypeVar
+
+# Create a generic variable that can be 'Enum', or any subclass.
+T = TypeVar("T", bound="Enum")
+
+
+@unique
+class BinaryUnit(Enum):
+ yotta = 80
+ zetta = 70
+ exa = 60
+ peta = 50
+ tera = 40
+ giga = 30
+ mega = 20
+ kilo = 10
+
+
+@unique
+class SIUnit(Enum):
+ yotta = 24
+ zetta = 21
+ exa = 18
+ peta = 15
+ tera = 12
+ giga = 9
+ mega = 6
+ kilo = 3
+ hecto = 2
+ deca = 1
+ deci = -1
+ centi = -2
+ milli = -3
+ micro = -6
+ nano = -9
+ pico = -12
+ femto = -15
+ atto = -18
+ zepto = -21
+ yocto = -24
+
+ @classmethod
+ def get_positive(cls: type[T]) -> dict:
+ """
+ Returns a dictionary with only the elements of this enum
+ that has a positive value
+ >>> from itertools import islice
+ >>> positive = SIUnit.get_positive()
+ >>> inc = iter(positive.items())
+ >>> dict(islice(inc, len(positive) // 2))
+ {'yotta': 24, 'zetta': 21, 'exa': 18, 'peta': 15, 'tera': 12}
+ >>> dict(inc)
+ {'giga': 9, 'mega': 6, 'kilo': 3, 'hecto': 2, 'deca': 1}
+ """
+ return {unit.name: unit.value for unit in cls if unit.value > 0}
+
+ @classmethod
+ def get_negative(cls: type[T]) -> dict:
+ """
+ Returns a dictionary with only the elements of this enum
+ that has a negative value
+ @example
+ >>> from itertools import islice
+ >>> negative = SIUnit.get_negative()
+ >>> inc = iter(negative.items())
+ >>> dict(islice(inc, len(negative) // 2))
+ {'deci': -1, 'centi': -2, 'milli': -3, 'micro': -6, 'nano': -9}
+ >>> dict(inc)
+ {'pico': -12, 'femto': -15, 'atto': -18, 'zepto': -21, 'yocto': -24}
+ """
+ return {unit.name: unit.value for unit in cls if unit.value < 0}
+
+
+def add_si_prefix(value: float) -> str:
+ """
+ Function that converts a number to his version with SI prefix
+ @input value (an integer)
+ @example:
+ >>> add_si_prefix(10000)
+ '10.0 kilo'
+ """
+ prefixes = SIUnit.get_positive() if value > 0 else SIUnit.get_negative()
+ for name_prefix, value_prefix in prefixes.items():
+ numerical_part = value / (10**value_prefix)
+ if numerical_part > 1:
+ return f"{numerical_part!s} {name_prefix}"
+ return str(value)
+
+
+def add_binary_prefix(value: float) -> str:
+ """
+ Function that converts a number to his version with Binary prefix
+ @input value (an integer)
+ @example:
+ >>> add_binary_prefix(65536)
+ '64.0 kilo'
+ """
+ for prefix in BinaryUnit:
+ numerical_part = value / (2**prefix.value)
+ if numerical_part > 1:
+ return f"{numerical_part!s} {prefix.name}"
+ return str(value)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/pressure_conversions.py b/conversions/pressure_conversions.py
new file mode 100644
index 000000000..e0cd18d23
--- /dev/null
+++ b/conversions/pressure_conversions.py
@@ -0,0 +1,81 @@
+"""
+Conversion of pressure units.
+Available Units:- Pascal,Bar,Kilopascal,Megapascal,psi(pound per square inch),
+inHg(in mercury column),torr,atm
+USAGE :
+-> Import this file into their respective project.
+-> Use the function pressure_conversion() for conversion of pressure units.
+-> Parameters :
+ -> value : The number of from units you want to convert
+ -> from_type : From which type you want to convert
+ -> to_type : To which type you want to convert
+REFERENCES :
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit)
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Pound_per_square_inch
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Inch_of_mercury
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Torr
+-> https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
+-> https://msestudent.com/what-are-the-units-of-pressure/
+-> https://www.unitconverters.net/pressure-converter.html
+"""
+
+from collections import namedtuple
+
+from_to = namedtuple("from_to", "from_ to")
+
+PRESSURE_CONVERSION = {
+ "atm": from_to(1, 1),
+ "pascal": from_to(0.0000098, 101325),
+ "bar": from_to(0.986923, 1.01325),
+ "kilopascal": from_to(0.00986923, 101.325),
+ "megapascal": from_to(9.86923, 0.101325),
+ "psi": from_to(0.068046, 14.6959),
+ "inHg": from_to(0.0334211, 29.9213),
+ "torr": from_to(0.00131579, 760),
+}
+
+
+def pressure_conversion(value: float, from_type: str, to_type: str) -> float:
+ """
+ Conversion between pressure units.
+ >>> pressure_conversion(4, "atm", "pascal")
+ 405300
+ >>> pressure_conversion(1, "pascal", "psi")
+ 0.00014401981999999998
+ >>> pressure_conversion(1, "bar", "atm")
+ 0.986923
+ >>> pressure_conversion(3, "kilopascal", "bar")
+ 0.029999991892499998
+ >>> pressure_conversion(2, "megapascal", "psi")
+ 290.074434314
+ >>> pressure_conversion(4, "psi", "torr")
+ 206.85984
+ >>> pressure_conversion(1, "inHg", "atm")
+ 0.0334211
+ >>> pressure_conversion(1, "torr", "psi")
+ 0.019336718261000002
+ >>> pressure_conversion(4, "wrongUnit", "atm")
+ Traceback (most recent call last):
+ ...
+ ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are:
+ atm, pascal, bar, kilopascal, megapascal, psi, inHg, torr
+ """
+ if from_type not in PRESSURE_CONVERSION:
+ raise ValueError(
+ f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ + ", ".join(PRESSURE_CONVERSION)
+ )
+ if to_type not in PRESSURE_CONVERSION:
+ raise ValueError(
+ f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ + ", ".join(PRESSURE_CONVERSION)
+ )
+ return (
+ value * PRESSURE_CONVERSION[from_type].from_ * PRESSURE_CONVERSION[to_type].to
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/rgb_hsv_conversion.py b/conversions/rgb_hsv_conversion.py
index 081cfe1d7..74b3d33e4 100644
--- a/conversions/rgb_hsv_conversion.py
+++ b/conversions/rgb_hsv_conversion.py
@@ -121,8 +121,8 @@ def rgb_to_hsv(red: int, green: int, blue: int) -> list[float]:
float_red = red / 255
float_green = green / 255
float_blue = blue / 255
- value = max(max(float_red, float_green), float_blue)
- chroma = value - min(min(float_red, float_green), float_blue)
+ value = max(float_red, float_green, float_blue)
+ chroma = value - min(float_red, float_green, float_blue)
saturation = 0 if value == 0 else chroma / value
if chroma == 0:
diff --git a/conversions/roman_numerals.py b/conversions/roman_numerals.py
index 9933e6a78..75af2ac72 100644
--- a/conversions/roman_numerals.py
+++ b/conversions/roman_numerals.py
@@ -1,3 +1,20 @@
+ROMAN = [
+ (1000, "M"),
+ (900, "CM"),
+ (500, "D"),
+ (400, "CD"),
+ (100, "C"),
+ (90, "XC"),
+ (50, "L"),
+ (40, "XL"),
+ (10, "X"),
+ (9, "IX"),
+ (5, "V"),
+ (4, "IV"),
+ (1, "I"),
+]
+
+
def roman_to_int(roman: str) -> int:
"""
LeetCode No. 13 Roman to Integer
@@ -29,23 +46,8 @@ def int_to_roman(number: int) -> str:
>>> all(int_to_roman(value) == key for key, value in tests.items())
True
"""
- ROMAN = [
- (1000, "M"),
- (900, "CM"),
- (500, "D"),
- (400, "CD"),
- (100, "C"),
- (90, "XC"),
- (50, "L"),
- (40, "XL"),
- (10, "X"),
- (9, "IX"),
- (5, "V"),
- (4, "IV"),
- (1, "I"),
- ]
result = []
- for (arabic, roman) in ROMAN:
+ for arabic, roman in ROMAN:
(factor, number) = divmod(number, arabic)
result.append(roman * factor)
if number == 0:
diff --git a/conversions/speed_conversions.py b/conversions/speed_conversions.py
new file mode 100644
index 000000000..ba497119d
--- /dev/null
+++ b/conversions/speed_conversions.py
@@ -0,0 +1,71 @@
+"""
+Convert speed units
+
+https://en.wikipedia.org/wiki/Kilometres_per_hour
+https://en.wikipedia.org/wiki/Miles_per_hour
+https://en.wikipedia.org/wiki/Knot_(unit)
+https://en.wikipedia.org/wiki/Metre_per_second
+"""
+
+speed_chart: dict[str, float] = {
+ "km/h": 1.0,
+ "m/s": 3.6,
+ "mph": 1.609344,
+ "knot": 1.852,
+}
+
+speed_chart_inverse: dict[str, float] = {
+ "km/h": 1.0,
+ "m/s": 0.277777778,
+ "mph": 0.621371192,
+ "knot": 0.539956803,
+}
+
+
+def convert_speed(speed: float, unit_from: str, unit_to: str) -> float:
+ """
+ Convert speed from one unit to another using the speed_chart above.
+
+ "km/h": 1.0,
+ "m/s": 3.6,
+ "mph": 1.609344,
+ "knot": 1.852,
+
+ >>> convert_speed(100, "km/h", "m/s")
+ 27.778
+ >>> convert_speed(100, "km/h", "mph")
+ 62.137
+ >>> convert_speed(100, "km/h", "knot")
+ 53.996
+ >>> convert_speed(100, "m/s", "km/h")
+ 360.0
+ >>> convert_speed(100, "m/s", "mph")
+ 223.694
+ >>> convert_speed(100, "m/s", "knot")
+ 194.384
+ >>> convert_speed(100, "mph", "km/h")
+ 160.934
+ >>> convert_speed(100, "mph", "m/s")
+ 44.704
+ >>> convert_speed(100, "mph", "knot")
+ 86.898
+ >>> convert_speed(100, "knot", "km/h")
+ 185.2
+ >>> convert_speed(100, "knot", "m/s")
+ 51.444
+ >>> convert_speed(100, "knot", "mph")
+ 115.078
+ """
+ if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
+ msg = (
+ f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
+ f"Valid values are: {', '.join(speed_chart_inverse)}"
+ )
+ raise ValueError(msg)
+ return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/temperature_conversions.py b/conversions/temperature_conversions.py
index 167c9dc64..f7af6c8f1 100644
--- a/conversions/temperature_conversions.py
+++ b/conversions/temperature_conversions.py
@@ -23,7 +23,7 @@ def celsius_to_fahrenheit(celsius: float, ndigits: int = 2) -> float:
104.0
>>> celsius_to_fahrenheit("celsius")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'celsius'
"""
return round((float(celsius) * 9 / 5) + 32, ndigits)
@@ -47,7 +47,7 @@ def celsius_to_kelvin(celsius: float, ndigits: int = 2) -> float:
313.15
>>> celsius_to_kelvin("celsius")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'celsius'
"""
return round(float(celsius) + 273.15, ndigits)
@@ -71,7 +71,7 @@ def celsius_to_rankine(celsius: float, ndigits: int = 2) -> float:
563.67
>>> celsius_to_rankine("celsius")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'celsius'
"""
return round((float(celsius) * 9 / 5) + 491.67, ndigits)
@@ -101,7 +101,7 @@ def fahrenheit_to_celsius(fahrenheit: float, ndigits: int = 2) -> float:
37.78
>>> fahrenheit_to_celsius("fahrenheit")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'fahrenheit'
"""
return round((float(fahrenheit) - 32) * 5 / 9, ndigits)
@@ -131,7 +131,7 @@ def fahrenheit_to_kelvin(fahrenheit: float, ndigits: int = 2) -> float:
310.93
>>> fahrenheit_to_kelvin("fahrenheit")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'fahrenheit'
"""
return round(((float(fahrenheit) - 32) * 5 / 9) + 273.15, ndigits)
@@ -161,7 +161,7 @@ def fahrenheit_to_rankine(fahrenheit: float, ndigits: int = 2) -> float:
559.67
>>> fahrenheit_to_rankine("fahrenheit")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'fahrenheit'
"""
return round(float(fahrenheit) + 459.67, ndigits)
@@ -185,7 +185,7 @@ def kelvin_to_celsius(kelvin: float, ndigits: int = 2) -> float:
42.35
>>> kelvin_to_celsius("kelvin")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'kelvin'
"""
return round(float(kelvin) - 273.15, ndigits)
@@ -209,7 +209,7 @@ def kelvin_to_fahrenheit(kelvin: float, ndigits: int = 2) -> float:
108.23
>>> kelvin_to_fahrenheit("kelvin")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'kelvin'
"""
return round(((float(kelvin) - 273.15) * 9 / 5) + 32, ndigits)
@@ -233,7 +233,7 @@ def kelvin_to_rankine(kelvin: float, ndigits: int = 2) -> float:
72.0
>>> kelvin_to_rankine("kelvin")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'kelvin'
"""
return round((float(kelvin) * 9 / 5), ndigits)
@@ -257,7 +257,7 @@ def rankine_to_celsius(rankine: float, ndigits: int = 2) -> float:
-97.87
>>> rankine_to_celsius("rankine")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'rankine'
"""
return round((float(rankine) - 491.67) * 5 / 9, ndigits)
@@ -277,7 +277,7 @@ def rankine_to_fahrenheit(rankine: float, ndigits: int = 2) -> float:
-144.17
>>> rankine_to_fahrenheit("rankine")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'rankine'
"""
return round(float(rankine) - 459.67, ndigits)
@@ -297,7 +297,7 @@ def rankine_to_kelvin(rankine: float, ndigits: int = 2) -> float:
22.22
>>> rankine_to_kelvin("rankine")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'rankine'
"""
return round((float(rankine) * 5 / 9), ndigits)
@@ -316,7 +316,7 @@ def reaumur_to_kelvin(reaumur: float, ndigits: int = 2) -> float:
323.15
>>> reaumur_to_kelvin("reaumur")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'reaumur'
"""
return round((float(reaumur) * 1.25 + 273.15), ndigits)
@@ -335,7 +335,7 @@ def reaumur_to_fahrenheit(reaumur: float, ndigits: int = 2) -> float:
122.0
>>> reaumur_to_fahrenheit("reaumur")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'reaumur'
"""
return round((float(reaumur) * 2.25 + 32), ndigits)
@@ -354,7 +354,7 @@ def reaumur_to_celsius(reaumur: float, ndigits: int = 2) -> float:
50.0
>>> reaumur_to_celsius("reaumur")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'reaumur'
"""
return round((float(reaumur) * 1.25), ndigits)
@@ -373,14 +373,13 @@ def reaumur_to_rankine(reaumur: float, ndigits: int = 2) -> float:
581.67
>>> reaumur_to_rankine("reaumur")
Traceback (most recent call last):
- ...
+ ...
ValueError: could not convert string to float: 'reaumur'
"""
return round((float(reaumur) * 2.25 + 32 + 459.67), ndigits)
if __name__ == "__main__":
-
import doctest
doctest.testmod()
diff --git a/conversions/volume_conversions.py b/conversions/volume_conversions.py
new file mode 100644
index 000000000..44d290091
--- /dev/null
+++ b/conversions/volume_conversions.py
@@ -0,0 +1,75 @@
+"""
+Conversion of volume units.
+Available Units:- Cubic metre,Litre,KiloLitre,Gallon,Cubic yard,Cubic foot,cup
+USAGE :
+-> Import this file into their respective project.
+-> Use the function length_conversion() for conversion of volume units.
+-> Parameters :
+ -> value : The number of from units you want to convert
+ -> from_type : From which type you want to convert
+ -> to_type : To which type you want to convert
+REFERENCES :
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Cubic_metre
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Litre
+-> Wikipedia reference: https://en.wiktionary.org/wiki/kilolitre
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Gallon
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Cubic_yard
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Cubic_foot
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Cup_(unit)
+"""
+
+from collections import namedtuple
+
+from_to = namedtuple("from_to", "from_ to")
+
+METRIC_CONVERSION = {
+ "cubicmeter": from_to(1, 1),
+ "litre": from_to(0.001, 1000),
+ "kilolitre": from_to(1, 1),
+ "gallon": from_to(0.00454, 264.172),
+ "cubicyard": from_to(0.76455, 1.30795),
+ "cubicfoot": from_to(0.028, 35.3147),
+ "cup": from_to(0.000236588, 4226.75),
+}
+
+
+def volume_conversion(value: float, from_type: str, to_type: str) -> float:
+ """
+ Conversion between volume units.
+ >>> volume_conversion(4, "cubicmeter", "litre")
+ 4000
+ >>> volume_conversion(1, "litre", "gallon")
+ 0.264172
+ >>> volume_conversion(1, "kilolitre", "cubicmeter")
+ 1
+ >>> volume_conversion(3, "gallon", "cubicyard")
+ 0.017814279
+ >>> volume_conversion(2, "cubicyard", "litre")
+ 1529.1
+ >>> volume_conversion(4, "cubicfoot", "cup")
+ 473.396
+ >>> volume_conversion(1, "cup", "kilolitre")
+ 0.000236588
+ >>> volume_conversion(4, "wrongUnit", "litre")
+ Traceback (most recent call last):
+ ...
+ ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are:
+ cubicmeter, litre, kilolitre, gallon, cubicyard, cubicfoot, cup
+ """
+ if from_type not in METRIC_CONVERSION:
+ raise ValueError(
+ f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ + ", ".join(METRIC_CONVERSION)
+ )
+ if to_type not in METRIC_CONVERSION:
+ raise ValueError(
+ f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ + ", ".join(METRIC_CONVERSION)
+ )
+ return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py
index c344416be..e8326e0b6 100644
--- a/conversions/weight_conversion.py
+++ b/conversions/weight_conversion.py
@@ -3,7 +3,7 @@ Conversion of weight units.
__author__ = "Anubhav Solanki"
__license__ = "MIT"
-__version__ = "1.0.0"
+__version__ = "1.1.0"
__maintainer__ = "Anubhav Solanki"
__email__ = "anubhavsolanki0@gmail.com"
@@ -27,6 +27,7 @@ REFERENCES :
-> Wikipedia reference: https://en.wikipedia.org/wiki/Ounce
-> Wikipedia reference: https://en.wikipedia.org/wiki/Fineness#Karat
-> Wikipedia reference: https://en.wikipedia.org/wiki/Dalton_(unit)
+-> Wikipedia reference: https://en.wikipedia.org/wiki/Stone_(unit)
"""
KILOGRAM_CHART: dict[str, float] = {
@@ -37,6 +38,7 @@ KILOGRAM_CHART: dict[str, float] = {
"long-ton": 0.0009842073,
"short-ton": 0.0011023122,
"pound": 2.2046244202,
+ "stone": 0.1574731728,
"ounce": 35.273990723,
"carrat": 5000,
"atomic-mass-unit": 6.022136652e26,
@@ -50,6 +52,7 @@ WEIGHT_TYPE_CHART: dict[str, float] = {
"long-ton": 1016.04608,
"short-ton": 907.184,
"pound": 0.453592,
+ "stone": 6.35029,
"ounce": 0.0283495,
"carrat": 0.0002,
"atomic-mass-unit": 1.660540199e-27,
@@ -67,6 +70,7 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float:
"long-ton" : 0.0009842073,
"short-ton" : 0.0011023122,
"pound" : 2.2046244202,
+ "stone": 0.1574731728,
"ounce" : 35.273990723,
"carrat" : 5000,
"atomic-mass-unit" : 6.022136652E+26
@@ -85,6 +89,8 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float:
0.0011023122
>>> weight_conversion("kilogram","pound",4)
8.8184976808
+ >>> weight_conversion("kilogram","stone",5)
+ 0.7873658640000001
>>> weight_conversion("kilogram","ounce",4)
141.095962892
>>> weight_conversion("kilogram","carrat",3)
@@ -105,6 +111,8 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float:
3.3069366000000003e-06
>>> weight_conversion("gram","pound",3)
0.0066138732606
+ >>> weight_conversion("gram","stone",4)
+ 0.0006298926912000001
>>> weight_conversion("gram","ounce",1)
0.035273990723
>>> weight_conversion("gram","carrat",2)
@@ -211,6 +219,24 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float:
2267.96
>>> weight_conversion("pound","atomic-mass-unit",4)
1.0926372033015936e+27
+ >>> weight_conversion("stone","kilogram",5)
+ 31.751450000000002
+ >>> weight_conversion("stone","gram",2)
+ 12700.58
+ >>> weight_conversion("stone","milligram",3)
+ 19050870.0
+ >>> weight_conversion("stone","metric-ton",3)
+ 0.01905087
+ >>> weight_conversion("stone","long-ton",3)
+ 0.018750005325351003
+ >>> weight_conversion("stone","short-ton",3)
+ 0.021000006421614002
+ >>> weight_conversion("stone","pound",2)
+ 28.00000881870372
+ >>> weight_conversion("stone","ounce",1)
+ 224.00007054835967
+ >>> weight_conversion("stone","carrat",2)
+ 63502.9
>>> weight_conversion("ounce","kilogram",3)
0.0850485
>>> weight_conversion("ounce","gram",3)
@@ -273,15 +299,15 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float:
1.999999998903455
"""
if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART:
- raise ValueError(
+ msg = (
f"Invalid 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Supported values are: {', '.join(WEIGHT_TYPE_CHART)}"
)
+ raise ValueError(msg)
return value * KILOGRAM_CHART[to_type] * WEIGHT_TYPE_CHART[from_type]
if __name__ == "__main__":
-
import doctest
doctest.testmod()
diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py
new file mode 100644
index 000000000..4558bd8d4
--- /dev/null
+++ b/data_structures/arrays/permutations.py
@@ -0,0 +1,50 @@
+def permute(nums: list[int]) -> list[list[int]]:
+ """
+ Return all permutations.
+ >>> from itertools import permutations
+ >>> numbers= [1,2,3]
+ >>> all(list(nums) in permute(numbers) for nums in permutations(numbers))
+ True
+ """
+ result = []
+ if len(nums) == 1:
+ return [nums.copy()]
+ for _ in range(len(nums)):
+ n = nums.pop(0)
+ permutations = permute(nums)
+ for perm in permutations:
+ perm.append(n)
+ result.extend(permutations)
+ nums.append(n)
+ return result
+
+
+def permute2(nums):
+ """
+ Return all permutations of the given list.
+
+ >>> permute2([1, 2, 3])
+ [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]]
+ """
+
+ def backtrack(start):
+ if start == len(nums) - 1:
+ output.append(nums[:])
+ else:
+ for i in range(start, len(nums)):
+ nums[start], nums[i] = nums[i], nums[start]
+ backtrack(start + 1)
+ nums[start], nums[i] = nums[i], nums[start] # backtrack
+
+ output = []
+ backtrack(0)
+ return output
+
+
+if __name__ == "__main__":
+ import doctest
+
+ # use res to print the data in permute2 function
+ res = permute2([1, 2, 3])
+ print(res)
+ doctest.testmod()
diff --git a/data_structures/arrays/prefix_sum.py b/data_structures/arrays/prefix_sum.py
new file mode 100644
index 000000000..2243a5308
--- /dev/null
+++ b/data_structures/arrays/prefix_sum.py
@@ -0,0 +1,78 @@
+"""
+Author : Alexander Pantyukhin
+Date : November 3, 2022
+
+Implement the class of prefix sum with useful functions based on it.
+
+"""
+
+
+class PrefixSum:
+ def __init__(self, array: list[int]) -> None:
+ len_array = len(array)
+ self.prefix_sum = [0] * len_array
+
+ if len_array > 0:
+ self.prefix_sum[0] = array[0]
+
+ for i in range(1, len_array):
+ self.prefix_sum[i] = self.prefix_sum[i - 1] + array[i]
+
+ def get_sum(self, start: int, end: int) -> int:
+ """
+ The function returns the sum of array from the start to the end indexes.
+ Runtime : O(1)
+ Space: O(1)
+
+ >>> PrefixSum([1,2,3]).get_sum(0, 2)
+ 6
+ >>> PrefixSum([1,2,3]).get_sum(1, 2)
+ 5
+ >>> PrefixSum([1,2,3]).get_sum(2, 2)
+ 3
+ >>> PrefixSum([1,2,3]).get_sum(2, 3)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ """
+ if start == 0:
+ return self.prefix_sum[end]
+
+ return self.prefix_sum[end] - self.prefix_sum[start - 1]
+
+ def contains_sum(self, target_sum: int) -> bool:
+ """
+ The function returns True if array contains the target_sum,
+ False otherwise.
+
+ Runtime : O(n)
+ Space: O(n)
+
+ >>> PrefixSum([1,2,3]).contains_sum(6)
+ True
+ >>> PrefixSum([1,2,3]).contains_sum(5)
+ True
+ >>> PrefixSum([1,2,3]).contains_sum(3)
+ True
+ >>> PrefixSum([1,2,3]).contains_sum(4)
+ False
+ >>> PrefixSum([1,2,3]).contains_sum(7)
+ False
+ >>> PrefixSum([1,-2,3]).contains_sum(2)
+ True
+ """
+
+ sums = {0}
+ for sum_item in self.prefix_sum:
+ if sum_item - target_sum in sums:
+ return True
+
+ sums.add(sum_item)
+
+ return False
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/arrays/product_sum.py b/data_structures/arrays/product_sum.py
new file mode 100644
index 000000000..4fb906f36
--- /dev/null
+++ b/data_structures/arrays/product_sum.py
@@ -0,0 +1,98 @@
+"""
+Calculate the Product Sum from a Special Array.
+reference: https://dev.to/sfrasica/algorithms-product-sum-from-an-array-dc6
+
+Python doctests can be run with the following command:
+python -m doctest -v product_sum.py
+
+Calculate the product sum of a "special" array which can contain integers or nested
+arrays. The product sum is obtained by adding all elements and multiplying by their
+respective depths.
+
+For example, in the array [x, y], the product sum is (x + y). In the array [x, [y, z]],
+the product sum is x + 2 * (y + z). In the array [x, [y, [z]]],
+the product sum is x + 2 * (y + 3z).
+
+Example Input:
+[5, 2, [-7, 1], 3, [6, [-13, 8], 4]]
+Output: 12
+
+"""
+
+
+def product_sum(arr: list[int | list], depth: int) -> int:
+ """
+ Recursively calculates the product sum of an array.
+
+ The product sum of an array is defined as the sum of its elements multiplied by
+ their respective depths. If an element is a list, its product sum is calculated
+ recursively by multiplying the sum of its elements with its depth plus one.
+
+ Args:
+ arr: The array of integers and nested lists.
+ depth: The current depth level.
+
+ Returns:
+ int: The product sum of the array.
+
+ Examples:
+ >>> product_sum([1, 2, 3], 1)
+ 6
+ >>> product_sum([-1, 2, [-3, 4]], 2)
+ 8
+ >>> product_sum([1, 2, 3], -1)
+ -6
+ >>> product_sum([1, 2, 3], 0)
+ 0
+ >>> product_sum([1, 2, 3], 7)
+ 42
+ >>> product_sum((1, 2, 3), 7)
+ 42
+ >>> product_sum({1, 2, 3}, 7)
+ 42
+ >>> product_sum([1, -1], 1)
+ 0
+ >>> product_sum([1, -2], 1)
+ -1
+ >>> product_sum([-3.5, [1, [0.5]]], 1)
+ 1.5
+
+ """
+ total_sum = 0
+ for ele in arr:
+ total_sum += product_sum(ele, depth + 1) if isinstance(ele, list) else ele
+ return total_sum * depth
+
+
+def product_sum_array(array: list[int | list]) -> int:
+ """
+ Calculates the product sum of an array.
+
+ Args:
+ array (List[Union[int, List]]): The array of integers and nested lists.
+
+ Returns:
+ int: The product sum of the array.
+
+ Examples:
+ >>> product_sum_array([1, 2, 3])
+ 6
+ >>> product_sum_array([1, [2, 3]])
+ 11
+ >>> product_sum_array([1, [2, [3, 4]]])
+ 47
+ >>> product_sum_array([0])
+ 0
+ >>> product_sum_array([-3.5, [1, [0.5]]])
+ 1.5
+ >>> product_sum_array([1, -2])
+ -1
+
+ """
+ return product_sum(array, 1)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py
index e0d3e4d43..4c1fb17af 100644
--- a/data_structures/binary_tree/avl_tree.py
+++ b/data_structures/binary_tree/avl_tree.py
@@ -5,15 +5,16 @@ python3 -m doctest -v avl_tree.py
For testing run:
python avl_tree.py
"""
+from __future__ import annotations
import math
import random
-from typing import Any, List, Optional
+from typing import Any
-class my_queue:
+class MyQueue:
def __init__(self) -> None:
- self.data: List[Any] = []
+ self.data: list[Any] = []
self.head: int = 0
self.tail: int = 0
@@ -32,26 +33,26 @@ class my_queue:
def count(self) -> int:
return self.tail - self.head
- def print(self) -> None:
+ def print_queue(self) -> None:
print(self.data)
print("**************")
print(self.data[self.head : self.tail])
-class my_node:
+class MyNode:
def __init__(self, data: Any) -> None:
self.data = data
- self.left: Optional[my_node] = None
- self.right: Optional[my_node] = None
+ self.left: MyNode | None = None
+ self.right: MyNode | None = None
self.height: int = 1
def get_data(self) -> Any:
return self.data
- def get_left(self) -> Optional["my_node"]:
+ def get_left(self) -> MyNode | None:
return self.left
- def get_right(self) -> Optional["my_node"]:
+ def get_right(self) -> MyNode | None:
return self.right
def get_height(self) -> int:
@@ -59,22 +60,18 @@ class my_node:
def set_data(self, data: Any) -> None:
self.data = data
- return
- def set_left(self, node: Optional["my_node"]) -> None:
+ def set_left(self, node: MyNode | None) -> None:
self.left = node
- return
- def set_right(self, node: Optional["my_node"]) -> None:
+ def set_right(self, node: MyNode | None) -> None:
self.right = node
- return
def set_height(self, height: int) -> None:
self.height = height
- return
-def get_height(node: Optional["my_node"]) -> int:
+def get_height(node: MyNode | None) -> int:
if node is None:
return 0
return node.get_height()
@@ -86,7 +83,7 @@ def my_max(a: int, b: int) -> int:
return b
-def right_rotation(node: my_node) -> my_node:
+def right_rotation(node: MyNode) -> MyNode:
r"""
A B
/ \ / \
@@ -109,7 +106,7 @@ def right_rotation(node: my_node) -> my_node:
return ret
-def left_rotation(node: my_node) -> my_node:
+def left_rotation(node: MyNode) -> MyNode:
"""
a mirror symmetry rotation of the left_rotation
"""
@@ -125,7 +122,7 @@ def left_rotation(node: my_node) -> my_node:
return ret
-def lr_rotation(node: my_node) -> my_node:
+def lr_rotation(node: MyNode) -> MyNode:
r"""
A A Br
/ \ / \ / \
@@ -142,16 +139,16 @@ def lr_rotation(node: my_node) -> my_node:
return right_rotation(node)
-def rl_rotation(node: my_node) -> my_node:
+def rl_rotation(node: MyNode) -> MyNode:
right_child = node.get_right()
assert right_child is not None
node.set_right(right_rotation(right_child))
return left_rotation(node)
-def insert_node(node: Optional["my_node"], data: Any) -> Optional["my_node"]:
+def insert_node(node: MyNode | None, data: Any) -> MyNode | None:
if node is None:
- return my_node(data)
+ return MyNode(data)
if data < node.get_data():
node.set_left(insert_node(node.get_left(), data))
if (
@@ -179,7 +176,7 @@ def insert_node(node: Optional["my_node"], data: Any) -> Optional["my_node"]:
return node
-def get_rightMost(root: my_node) -> Any:
+def get_right_most(root: MyNode) -> Any:
while True:
right_child = root.get_right()
if right_child is None:
@@ -188,7 +185,7 @@ def get_rightMost(root: my_node) -> Any:
return root.get_data()
-def get_leftMost(root: my_node) -> Any:
+def get_left_most(root: MyNode) -> Any:
while True:
left_child = root.get_left()
if left_child is None:
@@ -197,12 +194,12 @@ def get_leftMost(root: my_node) -> Any:
return root.get_data()
-def del_node(root: my_node, data: Any) -> Optional["my_node"]:
+def del_node(root: MyNode, data: Any) -> MyNode | None:
left_child = root.get_left()
right_child = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
- temp_data = get_leftMost(right_child)
+ temp_data = get_left_most(right_child)
root.set_data(temp_data)
root.set_right(del_node(right_child, temp_data))
elif left_child is not None:
@@ -275,7 +272,7 @@ class AVLtree:
"""
def __init__(self) -> None:
- self.root: Optional[my_node] = None
+ self.root: MyNode | None = None
def get_height(self) -> int:
return get_height(self.root)
@@ -295,7 +292,7 @@ class AVLtree:
self,
) -> str: # a level traversale, gives a more intuitive look on the tree
output = ""
- q = my_queue()
+ q = MyQueue()
q.push(self.root)
layer = self.get_height()
if layer == 0:
diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py
index 575b157ee..65dccf247 100644
--- a/data_structures/binary_tree/basic_binary_tree.py
+++ b/data_structures/binary_tree/basic_binary_tree.py
@@ -1,4 +1,4 @@
-from typing import Optional
+from __future__ import annotations
class Node:
@@ -8,11 +8,11 @@ class Node:
def __init__(self, data: int) -> None:
self.data = data
- self.left: Optional[Node] = None
- self.right: Optional[Node] = None
+ self.left: Node | None = None
+ self.right: Node | None = None
-def display(tree: Optional[Node]) -> None: # In Order traversal of the tree
+def display(tree: Node | None) -> None: # In Order traversal of the tree
"""
>>> root = Node(1)
>>> root.left = Node(0)
@@ -30,7 +30,7 @@ def display(tree: Optional[Node]) -> None: # In Order traversal of the tree
display(tree.right)
-def depth_of_tree(tree: Optional[Node]) -> int:
+def depth_of_tree(tree: Node | None) -> int:
"""
Recursive function that returns the depth of a binary tree.
diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py
index a1ed1d0ac..c72195424 100644
--- a/data_structures/binary_tree/binary_search_tree.py
+++ b/data_structures/binary_tree/binary_search_tree.py
@@ -2,33 +2,36 @@
A binary search Tree
"""
+from collections.abc import Iterable
+from typing import Any
+
class Node:
- def __init__(self, value, parent):
+ def __init__(self, value: int | None = None):
self.value = value
- self.parent = parent # Added in order to delete a node easier
- self.left = None
- self.right = None
+ self.parent: Node | None = None # Added in order to delete a node easier
+ self.left: Node | None = None
+ self.right: Node | None = None
- def __repr__(self):
+ def __repr__(self) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value)
- return pformat({"%s" % (self.value): (self.left, self.right)}, indent=1)
+ return pformat({f"{self.value}": (self.left, self.right)}, indent=1)
class BinarySearchTree:
- def __init__(self, root=None):
+ def __init__(self, root: Node | None = None):
self.root = root
- def __str__(self):
+ def __str__(self) -> str:
"""
Return a string of all the Nodes using in order traversal
"""
return str(self.root)
- def __reassign_nodes(self, node, new_children):
+ def __reassign_nodes(self, node: Node, new_children: Node | None) -> None:
if new_children is not None: # reset its kids
new_children.parent = node.parent
if node.parent is not None: # reset its parent
@@ -39,21 +42,25 @@ class BinarySearchTree:
else:
self.root = new_children
- def is_right(self, node):
- return node == node.parent.right
+ def is_right(self, node: Node) -> bool:
+ if node.parent and node.parent.right:
+ return node == node.parent.right
+ return False
- def empty(self):
+ def empty(self) -> bool:
return self.root is None
- def __insert(self, value):
+ def __insert(self, value) -> None:
"""
Insert a new node in Binary Search Tree with value label
"""
- new_node = Node(value, None) # create a new Node
+ new_node = Node(value) # create a new Node
if self.empty(): # if Tree is empty
self.root = new_node # set its root
else: # Tree is not empty
parent_node = self.root # from root
+ if parent_node is None:
+ return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
@@ -69,12 +76,11 @@ class BinarySearchTree:
parent_node = parent_node.right
new_node.parent = parent_node
- def insert(self, *values):
+ def insert(self, *values) -> None:
for value in values:
self.__insert(value)
- return self
- def search(self, value):
+ def search(self, value) -> Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another.")
else:
@@ -84,30 +90,35 @@ class BinarySearchTree:
node = node.left if value < node.value else node.right
return node
- def get_max(self, node=None):
+ def get_max(self, node: Node | None = None) -> Node | None:
"""
We go deep on the right branch
"""
if node is None:
+ if self.root is None:
+ return None
node = self.root
+
if not self.empty():
while node.right is not None:
node = node.right
return node
- def get_min(self, node=None):
+ def get_min(self, node: Node | None = None) -> Node | None:
"""
We go deep on the left branch
"""
if node is None:
node = self.root
+ if self.root is None:
+ return None
if not self.empty():
node = self.root
while node.left is not None:
node = node.left
return node
- def remove(self, value):
+ def remove(self, value: int) -> None:
node = self.search(value) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
@@ -120,18 +131,18 @@ class BinarySearchTree:
tmp_node = self.get_max(
node.left
) # Gets the max value of the left branch
- self.remove(tmp_node.value)
+ self.remove(tmp_node.value) # type: ignore
node.value = (
- tmp_node.value
+ tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
- def preorder_traverse(self, node):
+ def preorder_traverse(self, node: Node | None) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left)
yield from self.preorder_traverse(node.right)
- def traversal_tree(self, traversal_function=None):
+ def traversal_tree(self, traversal_function=None) -> Any:
"""
This function traversal the tree.
You can pass a function to traversal the tree as needed by client code
@@ -141,7 +152,7 @@ class BinarySearchTree:
else:
return traversal_function(self.root)
- def inorder(self, arr: list, node: Node):
+ def inorder(self, arr: list, node: Node | None) -> None:
"""Perform an inorder traversal and append values of the nodes to
a list named arr"""
if node:
@@ -151,22 +162,22 @@ class BinarySearchTree:
def find_kth_smallest(self, k: int, node: Node) -> int:
"""Return the kth smallest element in a binary search tree"""
- arr = []
+ arr: list[int] = []
self.inorder(arr, node) # append all values to list using inorder traversal
return arr[k - 1]
-def postorder(curr_node):
+def postorder(curr_node: Node | None) -> list[Node]:
"""
postOrder (left, right, self)
"""
- node_list = list()
+ node_list = []
if curr_node is not None:
node_list = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
-def binary_search_tree():
+def binary_search_tree() -> None:
r"""
Example
8
@@ -177,14 +188,15 @@ def binary_search_tree():
/ \ /
4 7 13
- >>> t = BinarySearchTree().insert(8, 3, 6, 1, 10, 14, 13, 4, 7)
+ >>> t = BinarySearchTree()
+ >>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7)
>>> print(" ".join(repr(i.value) for i in t.traversal_tree()))
8 3 1 6 4 7 10 14 13
>>> print(" ".join(repr(i.value) for i in t.traversal_tree(postorder)))
1 4 7 6 3 13 14 10 8
>>> BinarySearchTree().search(6)
Traceback (most recent call last):
- ...
+ ...
IndexError: Warning: Tree is empty! please use another.
"""
testlist = (8, 3, 6, 1, 10, 14, 13, 4, 7)
@@ -206,8 +218,8 @@ def binary_search_tree():
print("The value -1 doesn't exist")
if not t.empty():
- print("Max Value: ", t.get_max().value)
- print("Min Value: ", t.get_min().value)
+ print("Max Value: ", t.get_max().value) # type: ignore
+ print("Min Value: ", t.get_min().value) # type: ignore
for i in testlist:
t.remove(i)
@@ -217,5 +229,4 @@ def binary_search_tree():
if __name__ == "__main__":
import doctest
- doctest.testmod()
- # binary_search_tree()
+ doctest.testmod(verbose=True)
diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py
index a05e28a7b..b5b983b9b 100644
--- a/data_structures/binary_tree/binary_search_tree_recursive.py
+++ b/data_structures/binary_tree/binary_search_tree_recursive.py
@@ -7,21 +7,23 @@ python -m unittest binary_search_tree_recursive.py
To run an example:
python binary_search_tree_recursive.py
"""
+from __future__ import annotations
+
import unittest
-from typing import Iterator, Optional
+from collections.abc import Iterator
class Node:
- def __init__(self, label: int, parent: Optional["Node"]) -> None:
+ def __init__(self, label: int, parent: Node | None) -> None:
self.label = label
self.parent = parent
- self.left: Optional[Node] = None
- self.right: Optional[Node] = None
+ self.left: Node | None = None
+ self.right: Node | None = None
class BinarySearchTree:
def __init__(self) -> None:
- self.root: Optional[Node] = None
+ self.root: Node | None = None
def empty(self) -> None:
"""
@@ -66,9 +68,7 @@ class BinarySearchTree:
"""
self.root = self._put(self.root, label)
- def _put(
- self, node: Optional[Node], label: int, parent: Optional[Node] = None
- ) -> Node:
+ def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Node:
if node is None:
node = Node(label, parent)
else:
@@ -77,7 +77,8 @@ class BinarySearchTree:
elif label > node.label:
node.right = self._put(node.right, label, node)
else:
- raise Exception(f"Node with label {label} already exists")
+ msg = f"Node with label {label} already exists"
+ raise Exception(msg)
return node
@@ -98,9 +99,10 @@ class BinarySearchTree:
"""
return self._search(self.root, label)
- def _search(self, node: Optional[Node], label: int) -> Node:
+ def _search(self, node: Node | None, label: int) -> Node:
if node is None:
- raise Exception(f"Node with label {label} does not exist")
+ msg = f"Node with label {label} does not exist"
+ raise Exception(msg)
else:
if label < node.label:
node = self._search(node.left, label)
@@ -140,7 +142,7 @@ class BinarySearchTree:
else:
self._reassign_nodes(node, None)
- def _reassign_nodes(self, node: Node, new_children: Optional[Node]) -> None:
+ def _reassign_nodes(self, node: Node, new_children: Node | None) -> None:
if new_children:
new_children.parent = node.parent
@@ -244,7 +246,7 @@ class BinarySearchTree:
"""
return self._inorder_traversal(self.root)
- def _inorder_traversal(self, node: Optional[Node]) -> Iterator[Node]:
+ def _inorder_traversal(self, node: Node | None) -> Iterator[Node]:
if node is not None:
yield from self._inorder_traversal(node.left)
yield node
@@ -266,7 +268,7 @@ class BinarySearchTree:
"""
return self._preorder_traversal(self.root)
- def _preorder_traversal(self, node: Optional[Node]) -> Iterator[Node]:
+ def _preorder_traversal(self, node: Node | None) -> Iterator[Node]:
if node is not None:
yield node
yield from self._preorder_traversal(node.left)
@@ -357,7 +359,7 @@ class BinarySearchTreeTest(unittest.TestCase):
assert t.root.left.left.parent == t.root.left
assert t.root.left.left.label == 1
- with self.assertRaises(Exception):
+ with self.assertRaises(Exception): # noqa: B017
t.put(1)
def test_search(self) -> None:
@@ -369,7 +371,7 @@ class BinarySearchTreeTest(unittest.TestCase):
node = t.search(13)
assert node.label == 13
- with self.assertRaises(Exception):
+ with self.assertRaises(Exception): # noqa: B017
t.search(2)
def test_remove(self) -> None:
@@ -515,7 +517,7 @@ class BinarySearchTreeTest(unittest.TestCase):
assert t.get_max_label() == 14
t.empty()
- with self.assertRaises(Exception):
+ with self.assertRaises(Exception): # noqa: B017
t.get_max_label()
def test_get_min_label(self) -> None:
@@ -524,7 +526,7 @@ class BinarySearchTreeTest(unittest.TestCase):
assert t.get_min_label() == 1
t.empty()
- with self.assertRaises(Exception):
+ with self.assertRaises(Exception): # noqa: B017
t.get_min_label()
def test_inorder_traversal(self) -> None:
diff --git a/data_structures/binary_tree/binary_tree_mirror.py b/data_structures/binary_tree/binary_tree_mirror.py
index dc7f657b3..b8548f4ec 100644
--- a/data_structures/binary_tree/binary_tree_mirror.py
+++ b/data_structures/binary_tree/binary_tree_mirror.py
@@ -1,6 +1,6 @@
"""
Problem Description:
-Given a binary tree, return it's mirror.
+Given a binary tree, return its mirror.
"""
@@ -21,17 +21,18 @@ def binary_tree_mirror(binary_tree: dict, root: int = 1) -> dict:
{1: [3, 2], 2: [5, 4], 3: [7, 6], 4: [11, 10]}
>>> binary_tree_mirror({ 1: [2,3], 2: [4,5], 3: [6,7], 4: [10,11]}, 5)
Traceback (most recent call last):
- ...
+ ...
ValueError: root 5 is not present in the binary_tree
>>> binary_tree_mirror({}, 5)
Traceback (most recent call last):
- ...
+ ...
ValueError: binary tree cannot be empty
"""
if not binary_tree:
raise ValueError("binary tree cannot be empty")
if root not in binary_tree:
- raise ValueError(f"root {root} is not present in the binary_tree")
+ msg = f"root {root} is not present in the binary_tree"
+ raise ValueError(msg)
binary_tree_mirror_dictionary = dict(binary_tree)
binary_tree_mirror_dict(binary_tree_mirror_dictionary, root)
return binary_tree_mirror_dictionary
diff --git a/data_structures/binary_tree/binary_tree_node_sum.py b/data_structures/binary_tree/binary_tree_node_sum.py
new file mode 100644
index 000000000..5a13e74e3
--- /dev/null
+++ b/data_structures/binary_tree/binary_tree_node_sum.py
@@ -0,0 +1,76 @@
+"""
+Sum of all nodes in a binary tree.
+
+Python implementation:
+ O(n) time complexity - Recurses through :meth:`depth_first_search`
+ with each element.
+ O(n) space complexity - At any point in time maximum number of stack
+ frames that could be in memory is `n`
+"""
+
+
+from __future__ import annotations
+
+from collections.abc import Iterator
+
+
+class Node:
+ """
+ A Node has a value variable and pointers to Nodes to its left and right.
+ """
+
+ def __init__(self, value: int) -> None:
+ self.value = value
+ self.left: Node | None = None
+ self.right: Node | None = None
+
+
+class BinaryTreeNodeSum:
+ r"""
+ The below tree looks like this
+ 10
+ / \
+ 5 -3
+ / / \
+ 12 8 0
+
+ >>> tree = Node(10)
+ >>> sum(BinaryTreeNodeSum(tree))
+ 10
+
+ >>> tree.left = Node(5)
+ >>> sum(BinaryTreeNodeSum(tree))
+ 15
+
+ >>> tree.right = Node(-3)
+ >>> sum(BinaryTreeNodeSum(tree))
+ 12
+
+ >>> tree.left.left = Node(12)
+ >>> sum(BinaryTreeNodeSum(tree))
+ 24
+
+ >>> tree.right.left = Node(8)
+ >>> tree.right.right = Node(0)
+ >>> sum(BinaryTreeNodeSum(tree))
+ 32
+ """
+
+ def __init__(self, tree: Node) -> None:
+ self.tree = tree
+
+ def depth_first_search(self, node: Node | None) -> int:
+ if node is None:
+ return 0
+ return node.value + (
+ self.depth_first_search(node.left) + self.depth_first_search(node.right)
+ )
+
+ def __iter__(self) -> Iterator[int]:
+ yield self.depth_first_search(self.tree)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/binary_tree/binary_tree_path_sum.py b/data_structures/binary_tree/binary_tree_path_sum.py
new file mode 100644
index 000000000..a3fe9ca7a
--- /dev/null
+++ b/data_structures/binary_tree/binary_tree_path_sum.py
@@ -0,0 +1,88 @@
+"""
+Given the root of a binary tree and an integer target,
+find the number of paths where the sum of the values
+along the path equals target.
+
+
+Leetcode reference: https://leetcode.com/problems/path-sum-iii/
+"""
+
+from __future__ import annotations
+
+
+class Node:
+ """
+ A Node has value variable and pointers to Nodes to its left and right.
+ """
+
+ def __init__(self, value: int) -> None:
+ self.value = value
+ self.left: Node | None = None
+ self.right: Node | None = None
+
+
+class BinaryTreePathSum:
+ r"""
+ The below tree looks like this
+ 10
+ / \
+ 5 -3
+ / \ \
+ 3 2 11
+ / \ \
+ 3 -2 1
+
+
+ >>> tree = Node(10)
+ >>> tree.left = Node(5)
+ >>> tree.right = Node(-3)
+ >>> tree.left.left = Node(3)
+ >>> tree.left.right = Node(2)
+ >>> tree.right.right = Node(11)
+ >>> tree.left.left.left = Node(3)
+ >>> tree.left.left.right = Node(-2)
+ >>> tree.left.right.right = Node(1)
+
+ >>> BinaryTreePathSum().path_sum(tree, 8)
+ 3
+ >>> BinaryTreePathSum().path_sum(tree, 7)
+ 2
+ >>> tree.right.right = Node(10)
+ >>> BinaryTreePathSum().path_sum(tree, 8)
+ 2
+ """
+
+ target: int
+
+ def __init__(self) -> None:
+ self.paths = 0
+
+ def depth_first_search(self, node: Node | None, path_sum: int) -> None:
+ if node is None:
+ return
+
+ if path_sum == self.target:
+ self.paths += 1
+
+ if node.left:
+ self.depth_first_search(node.left, path_sum + node.left.value)
+ if node.right:
+ self.depth_first_search(node.right, path_sum + node.right.value)
+
+ def path_sum(self, node: Node | None, target: int | None = None) -> int:
+ if node is None:
+ return 0
+ if target is not None:
+ self.target = target
+
+ self.depth_first_search(node, node.value)
+ self.path_sum(node.left)
+ self.path_sum(node.right)
+
+ return self.paths
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/binary_tree/binary_tree_traversals.md b/data_structures/binary_tree/binary_tree_traversals.md
new file mode 100644
index 000000000..ebe727b65
--- /dev/null
+++ b/data_structures/binary_tree/binary_tree_traversals.md
@@ -0,0 +1,111 @@
+# Binary Tree Traversal
+
+## Overview
+
+The combination of binary trees being data structures and traversal being an algorithm relates to classic problems, either directly or indirectly.
+
+> If you can grasp the traversal of binary trees, the traversal of other complicated trees will be easy for you.
+
+The following are some common ways to traverse trees.
+
+- Depth First Traversals (DFS): In-order, Pre-order, Post-order
+
+- Level Order Traversal or Breadth First or Traversal (BFS)
+
+There are applications for both DFS and BFS.
+
+Stack can be used to simplify the process of DFS traversal. Besides, since tree is a recursive data structure, recursion and stack are two key points for DFS.
+
+Graph for DFS:
+
+
+
+The key point of BFS is how to determine whether the traversal of each level has been completed. The answer is to use a variable as a flag to represent the end of the traversal of current level.
+
+## Pre-order Traversal
+
+The traversal order of pre-order traversal is `root-left-right`.
+
+Algorithm Pre-order
+
+1. Visit the root node and push it into a stack.
+
+2. Pop a node from the stack, and push its right and left child node into the stack respectively.
+
+3. Repeat step 2.
+
+Conclusion: This problem involves the classic recursive data structure (i.e. a binary tree), and the algorithm above demonstrates how a simplified solution can be reached by using a stack.
+
+If you look at the bigger picture, you'll find that the process of traversal is as followed. `Visit the left subtrees respectively from top to bottom, and visit the right subtrees respectively from bottom to top`. If we are to implement it from this perspective, things will be somewhat different. For the `top to bottom` part we can simply use recursion, and for the `bottom to top` part we can turn to stack.
+
+## In-order Traversal
+
+The traversal order of in-order traversal is `left-root-right`.
+
+So the root node is not printed first. Things are getting a bit complicated here.
+
+Algorithm In-order
+
+1. Visit the root and push it into a stack.
+
+2. If there is a left child node, push it into the stack. Repeat this process until a leaf node reached.
+
+ > At this point the root node and all the left nodes are in the stack.
+
+3. Start popping nodes from the stack. If a node has a right child node, push the child node into the stack. Repeat step 2.
+
+It's worth pointing out that the in-order traversal of a binary search tree (BST) is a sorted array, which is helpful for coming up simplified solutions for some problems.
+
+## Post-order Traversal
+
+The traversal order of post-order traversal is `left-right-root`.
+
+This one is a bit of a challenge. It deserves the `hard` tag of LeetCode.
+
+In this case, the root node is printed not as the first but the last one. A cunning way to do it is to:
+
+Record whether the current node has been visited. If 1) it's a leaf node or 2) both its left and right subtrees have been traversed, then it can be popped from the stack.
+
+As for `1) it's a leaf node`, you can easily tell whether a node is a leaf if both its left and right are `null`.
+
+As for `2) both its left and right subtrees have been traversed`, we only need a variable to record whether a node has been visited or not. In the worst case, we need to record the status for every single node and the space complexity is `O(n)`. But if you come to think about it, as we are using a stack and start printing the result from the leaf nodes, it makes sense that we only record the status for the current node popping from the stack, reducing the space complexity to `O(1)`.
+
+## Level Order Traversal
+
+The key point of level order traversal is how do we know whether the traversal of each level is done. The answer is that we use a variable as a flag representing the end of the traversal of the current level.
+
+
+
+Algorithm Level-order
+
+1. Visit the root node, put it in a FIFO queue, put in the queue a special flag (we are using `null` here).
+
+2. Dequeue a node.
+
+3. If the node equals `null`, it means that all nodes of the current level have been visited. If the queue is empty, we do nothing. Or else we put in another `null`.
+
+4. If the node is not `null`, meaning the traversal of current level has not finished yet, we enqueue its left subtree and right subtree respectively.
+
+## Bi-color marking
+
+We know that there is a tri-color marking in garbage collection algorithm, which works as described below.
+
+- The white color represents "not visited".
+
+- The gray color represents "not all child nodes visited".
+
+- The black color represents "all child nodes visited".
+
+Enlightened by tri-color marking, a bi-color marking method can be invented to solve all three traversal problems with one solution.
+
+The core idea is as follow.
+
+- Use a color to mark whether a node has been visited or not. Nodes yet to be visited are marked as white and visited nodes are marked as gray.
+
+- If we are visiting a white node, turn it into gray, and push its right child node, itself, and it's left child node into the stack respectively.
+
+- If we are visiting a gray node, print it.
+
+Implementation of pre-order and post-order traversal algorithms can be easily done by changing the order of pushing the child nodes into the stack.
+
+Reference: [LeetCode](https://github.com/azl397985856/leetcode/blob/master/thinkings/binary-tree-traversal.en.md)
diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py
index 7857880da..2afb7604f 100644
--- a/data_structures/binary_tree/binary_tree_traversals.py
+++ b/data_structures/binary_tree/binary_tree_traversals.py
@@ -1,29 +1,46 @@
# https://en.wikipedia.org/wiki/Tree_traversal
+from __future__ import annotations
+
+from collections import deque
+from collections.abc import Sequence
from dataclasses import dataclass
-from typing import Optional
+from typing import Any
@dataclass
class Node:
data: int
- left: Optional["Node"] = None
- right: Optional["Node"] = None
+ left: Node | None = None
+ right: Node | None = None
-def make_tree() -> Node:
- return Node(1, Node(2, Node(4), Node(5)), Node(3))
+def make_tree() -> Node | None:
+ r"""
+ The below tree
+ 1
+ / \
+ 2 3
+ / \
+ 4 5
+ """
+ tree = Node(1)
+ tree.left = Node(2)
+ tree.right = Node(3)
+ tree.left.left = Node(4)
+ tree.left.right = Node(5)
+ return tree
-def preorder(root: Node):
+def preorder(root: Node | None) -> list[int]:
"""
Pre-order traversal visits root node, left subtree, right subtree.
>>> preorder(make_tree())
[1, 2, 4, 5, 3]
"""
- return [root.data] + preorder(root.left) + preorder(root.right) if root else []
+ return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
-def postorder(root: Node):
+def postorder(root: Node | None) -> list[int]:
"""
Post-order traversal visits left subtree, right subtree, root node.
>>> postorder(make_tree())
@@ -32,16 +49,29 @@ def postorder(root: Node):
return postorder(root.left) + postorder(root.right) + [root.data] if root else []
-def inorder(root: Node):
+def inorder(root: Node | None) -> list[int]:
"""
In-order traversal visits left subtree, root node, right subtree.
>>> inorder(make_tree())
[4, 2, 5, 1, 3]
"""
- return inorder(root.left) + [root.data] + inorder(root.right) if root else []
+ return [*inorder(root.left), root.data, *inorder(root.right)] if root else []
-def height(root: Node):
+def reverse_inorder(root: Node | None) -> list[int]:
+ """
+ Reverse in-order traversal visits right subtree, root node, left subtree.
+ >>> reverse_inorder(make_tree())
+ [3, 1, 5, 2, 4]
+ """
+ return (
+ [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)]
+ if root
+ else []
+ )
+
+
+def height(root: Node | None) -> int:
"""
Recursive function for calculating the height of the binary tree.
>>> height(None)
@@ -52,99 +82,119 @@ def height(root: Node):
return (max(height(root.left), height(root.right)) + 1) if root else 0
-def level_order_1(root: Node):
+def level_order(root: Node | None) -> Sequence[Node | None]:
"""
- Print whole binary tree in Level Order Traverse.
+ Returns a list of nodes value from a whole binary tree in Level Order Traverse.
Level Order traverse: Visit nodes of the tree level-by-level.
"""
- if not root:
- return
- temp = root
- que = [temp]
- while len(que) > 0:
- print(que[0].data, end=" ")
- temp = que.pop(0)
- if temp.left:
- que.append(temp.left)
- if temp.right:
- que.append(temp.right)
- return que
+ output: list[Any] = []
+
+ if root is None:
+ return output
+
+ process_queue = deque([root])
+
+ while process_queue:
+ node = process_queue.popleft()
+ output.append(node.data)
+
+ if node.left:
+ process_queue.append(node.left)
+ if node.right:
+ process_queue.append(node.right)
+ return output
-def level_order_2(root: Node, level: int):
+def get_nodes_from_left_to_right(
+ root: Node | None, level: int
+) -> Sequence[Node | None]:
"""
- Level-wise traversal: Print all nodes present at the given level of the binary tree
+ Returns a list of nodes value from a particular level:
+ Left to right direction of the binary tree.
"""
- if not root:
- return root
- if level == 1:
- print(root.data, end=" ")
- elif level > 1:
- level_order_2(root.left, level - 1)
- level_order_2(root.right, level - 1)
+ output: list[Any] = []
+
+ def populate_output(root: Node | None, level: int) -> None:
+ if not root:
+ return
+ if level == 1:
+ output.append(root.data)
+ elif level > 1:
+ populate_output(root.left, level - 1)
+ populate_output(root.right, level - 1)
+
+ populate_output(root, level)
+ return output
-def print_left_to_right(root: Node, level: int):
+def get_nodes_from_right_to_left(
+ root: Node | None, level: int
+) -> Sequence[Node | None]:
"""
- Print elements on particular level from left to right direction of the binary tree.
+ Returns a list of nodes value from a particular level:
+ Right to left direction of the binary tree.
"""
- if not root:
- return
- if level == 1:
- print(root.data, end=" ")
- elif level > 1:
- print_left_to_right(root.left, level - 1)
- print_left_to_right(root.right, level - 1)
+ output: list[Any] = []
+
+ def populate_output(root: Node | None, level: int) -> None:
+ if root is None:
+ return
+ if level == 1:
+ output.append(root.data)
+ elif level > 1:
+ populate_output(root.right, level - 1)
+ populate_output(root.left, level - 1)
+
+ populate_output(root, level)
+ return output
-def print_right_to_left(root: Node, level: int):
+def zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]:
"""
- Print elements on particular level from right to left direction of the binary tree.
+ ZigZag traverse:
+ Returns a list of nodes value from left to right and right to left, alternatively.
"""
- if not root:
- return
- if level == 1:
- print(root.data, end=" ")
- elif level > 1:
- print_right_to_left(root.right, level - 1)
- print_right_to_left(root.left, level - 1)
+ if root is None:
+ return []
+ output: list[Sequence[Node | None]] = []
-def zigzag(root: Node):
- """
- ZigZag traverse: Print node left to right and right to left, alternatively.
- """
flag = 0
height_tree = height(root)
+
for h in range(1, height_tree + 1):
- if flag == 0:
- print_left_to_right(root, h)
+ if not flag:
+ output.append(get_nodes_from_left_to_right(root, h))
flag = 1
else:
- print_right_to_left(root, h)
+ output.append(get_nodes_from_right_to_left(root, h))
flag = 0
+ return output
-def main(): # Main function for testing.
- """
- Create binary tree.
- """
+
+def main() -> None: # Main function for testing.
+ # Create binary tree.
root = make_tree()
- """
- All Traversals of the binary are as follows:
- """
- print(f" In-order Traversal is {inorder(root)}")
- print(f" Pre-order Traversal is {preorder(root)}")
- print(f"Post-order Traversal is {postorder(root)}")
- print(f"Height of Tree is {height(root)}")
- print("Complete Level Order Traversal is : ")
- level_order_1(root)
- print("\nLevel-wise order Traversal is : ")
- for h in range(1, height(root) + 1):
- level_order_2(root, h)
- print("\nZigZag order Traversal is : ")
- zigzag(root)
- print()
+
+ # All Traversals of the binary are as follows:
+ print(f"In-order Traversal: {inorder(root)}")
+ print(f"Reverse In-order Traversal: {reverse_inorder(root)}")
+ print(f"Pre-order Traversal: {preorder(root)}")
+ print(f"Post-order Traversal: {postorder(root)}", "\n")
+
+ print(f"Height of Tree: {height(root)}", "\n")
+
+ print("Complete Level Order Traversal: ")
+ print(level_order(root), "\n")
+
+ print("Level-wise order Traversal: ")
+
+ for level in range(1, height(root) + 1):
+ print(f"Level {level}:", get_nodes_from_left_to_right(root, level=level))
+
+ print("\nZigZag order Traversal: ")
+ print(zigzag(root))
if __name__ == "__main__":
diff --git a/data_structures/binary_tree/diff_views_of_binary_tree.py b/data_structures/binary_tree/diff_views_of_binary_tree.py
new file mode 100644
index 000000000..3198d8065
--- /dev/null
+++ b/data_structures/binary_tree/diff_views_of_binary_tree.py
@@ -0,0 +1,210 @@
+r"""
+Problem: Given root of a binary tree, return the:
+1. binary-tree-right-side-view
+2. binary-tree-left-side-view
+3. binary-tree-top-side-view
+4. binary-tree-bottom-side-view
+"""
+
+from __future__ import annotations
+
+from collections import defaultdict
+from dataclasses import dataclass
+
+
+@dataclass
+class TreeNode:
+ val: int
+ left: TreeNode | None = None
+ right: TreeNode | None = None
+
+
+def make_tree() -> TreeNode:
+ """
+ >>> make_tree().val
+ 3
+ """
+ return TreeNode(3, TreeNode(9), TreeNode(20, TreeNode(15), TreeNode(7)))
+
+
+def binary_tree_right_side_view(root: TreeNode) -> list[int]:
+ r"""
+ Function returns the right side view of binary tree.
+
+ 3 <- 3
+ / \
+ 9 20 <- 20
+ / \
+ 15 7 <- 7
+
+ >>> binary_tree_right_side_view(make_tree())
+ [3, 20, 7]
+ >>> binary_tree_right_side_view(None)
+ []
+ """
+
+ def depth_first_search(
+ root: TreeNode | None, depth: int, right_view: list[int]
+ ) -> None:
+ """
+ A depth first search preorder traversal to append the values at
+ right side of tree.
+ """
+ if not root:
+ return
+
+ if depth == len(right_view):
+ right_view.append(root.val)
+
+ depth_first_search(root.right, depth + 1, right_view)
+ depth_first_search(root.left, depth + 1, right_view)
+
+ right_view: list = []
+ if not root:
+ return right_view
+
+ depth_first_search(root, 0, right_view)
+ return right_view
+
+
+def binary_tree_left_side_view(root: TreeNode) -> list[int]:
+ r"""
+ Function returns the left side view of binary tree.
+
+ 3 -> 3
+ / \
+ 9 -> 9 20
+ / \
+ 15 -> 15 7
+
+ >>> binary_tree_left_side_view(make_tree())
+ [3, 9, 15]
+ >>> binary_tree_left_side_view(None)
+ []
+ """
+
+ def depth_first_search(
+ root: TreeNode | None, depth: int, left_view: list[int]
+ ) -> None:
+ """
+ A depth first search preorder traversal to append the values
+ at left side of tree.
+ """
+ if not root:
+ return
+
+ if depth == len(left_view):
+ left_view.append(root.val)
+
+ depth_first_search(root.left, depth + 1, left_view)
+ depth_first_search(root.right, depth + 1, left_view)
+
+ left_view: list = []
+ if not root:
+ return left_view
+
+ depth_first_search(root, 0, left_view)
+ return left_view
+
+
+def binary_tree_top_side_view(root: TreeNode) -> list[int]:
+ r"""
+ Function returns the top side view of binary tree.
+
+ 9 3 20 7
+ ⬇ ⬇ ⬇ ⬇
+
+ 3
+ / \
+ 9 20
+ / \
+ 15 7
+
+ >>> binary_tree_top_side_view(make_tree())
+ [9, 3, 20, 7]
+ >>> binary_tree_top_side_view(None)
+ []
+ """
+
+ def breadth_first_search(root: TreeNode, top_view: list[int]) -> None:
+ """
+ A breadth first search traversal with defaultdict ds to append
+ the values of tree from top view
+ """
+ queue = [(root, 0)]
+ lookup = defaultdict(list)
+
+ while queue:
+ first = queue.pop(0)
+ node, hd = first
+
+ lookup[hd].append(node.val)
+
+ if node.left:
+ queue.append((node.left, hd - 1))
+ if node.right:
+ queue.append((node.right, hd + 1))
+
+ for pair in sorted(lookup.items(), key=lambda each: each[0]):
+ top_view.append(pair[1][0])
+
+ top_view: list = []
+ if not root:
+ return top_view
+
+ breadth_first_search(root, top_view)
+ return top_view
+
+
+def binary_tree_bottom_side_view(root: TreeNode) -> list[int]:
+ r"""
+ Function returns the bottom side view of binary tree
+
+ 3
+ / \
+ 9 20
+ / \
+ 15 7
+ ↑ ↑ ↑ ↑
+ 9 15 20 7
+
+ >>> binary_tree_bottom_side_view(make_tree())
+ [9, 15, 20, 7]
+ >>> binary_tree_bottom_side_view(None)
+ []
+ """
+ from collections import defaultdict
+
+ def breadth_first_search(root: TreeNode, bottom_view: list[int]) -> None:
+ """
+ A breadth first search traversal with defaultdict ds to append
+ the values of tree from bottom view
+ """
+ queue = [(root, 0)]
+ lookup = defaultdict(list)
+
+ while queue:
+ first = queue.pop(0)
+ node, hd = first
+ lookup[hd].append(node.val)
+
+ if node.left:
+ queue.append((node.left, hd - 1))
+ if node.right:
+ queue.append((node.right, hd + 1))
+
+ for pair in sorted(lookup.items(), key=lambda each: each[0]):
+ bottom_view.append(pair[1][-1])
+
+ bottom_view: list = []
+ if not root:
+ return bottom_view
+
+ breadth_first_search(root, bottom_view)
+ return bottom_view
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/binary_tree/distribute_coins.py b/data_structures/binary_tree/distribute_coins.py
new file mode 100644
index 000000000..ea02afc2c
--- /dev/null
+++ b/data_structures/binary_tree/distribute_coins.py
@@ -0,0 +1,135 @@
+"""
+Author : Alexander Pantyukhin
+Date : November 7, 2022
+
+Task:
+You are given a tree root of a binary tree with n nodes, where each node has
+node.data coins. There are exactly n coins in whole tree.
+
+In one move, we may choose two adjacent nodes and move one coin from one node
+to another. A move may be from parent to child, or from child to parent.
+
+Return the minimum number of moves required to make every node have exactly one coin.
+
+Example 1:
+
+ 3
+ / \
+ 0 0
+
+Result: 2
+
+Example 2:
+
+ 0
+ / \
+ 3 0
+
+Result 3
+
+leetcode: https://leetcode.com/problems/distribute-coins-in-binary-tree/
+
+Implementation notes:
+User depth-first search approach.
+
+Let n is the number of nodes in tree
+Runtime: O(n)
+Space: O(1)
+"""
+
+from __future__ import annotations
+
+from collections import namedtuple
+from dataclasses import dataclass
+
+
+@dataclass
+class TreeNode:
+ data: int
+ left: TreeNode | None = None
+ right: TreeNode | None = None
+
+
+CoinsDistribResult = namedtuple("CoinsDistribResult", "moves excess")
+
+
+def distribute_coins(root: TreeNode | None) -> int:
+ """
+ >>> distribute_coins(TreeNode(3, TreeNode(0), TreeNode(0)))
+ 2
+ >>> distribute_coins(TreeNode(0, TreeNode(3), TreeNode(0)))
+ 3
+ >>> distribute_coins(TreeNode(0, TreeNode(0), TreeNode(3)))
+ 3
+ >>> distribute_coins(None)
+ 0
+ >>> distribute_coins(TreeNode(0, TreeNode(0), TreeNode(0)))
+ Traceback (most recent call last):
+ ...
+ ValueError: The nodes number should be same as the number of coins
+ >>> distribute_coins(TreeNode(0, TreeNode(1), TreeNode(1)))
+ Traceback (most recent call last):
+ ...
+ ValueError: The nodes number should be same as the number of coins
+ """
+
+ if root is None:
+ return 0
+
+ # Validation
+ def count_nodes(node: TreeNode | None) -> int:
+ """
+ >>> count_nodes(None):
+ 0
+ """
+ if node is None:
+ return 0
+
+ return count_nodes(node.left) + count_nodes(node.right) + 1
+
+ def count_coins(node: TreeNode | None) -> int:
+ """
+ >>> count_coins(None):
+ 0
+ """
+ if node is None:
+ return 0
+
+ return count_coins(node.left) + count_coins(node.right) + node.data
+
+ if count_nodes(root) != count_coins(root):
+ raise ValueError("The nodes number should be same as the number of coins")
+
+ # Main calculation
+ def get_distrib(node: TreeNode | None) -> CoinsDistribResult:
+ """
+ >>> get_distrib(None)
+ namedtuple("CoinsDistribResult", "0 2")
+ """
+
+ if node is None:
+ return CoinsDistribResult(0, 1)
+
+ left_distrib_moves, left_distrib_excess = get_distrib(node.left)
+ right_distrib_moves, right_distrib_excess = get_distrib(node.right)
+
+ coins_to_left = 1 - left_distrib_excess
+ coins_to_right = 1 - right_distrib_excess
+
+ result_moves = (
+ left_distrib_moves
+ + right_distrib_moves
+ + abs(coins_to_left)
+ + abs(coins_to_right)
+ )
+ result_excess = node.data - coins_to_left - coins_to_right
+
+ return CoinsDistribResult(result_moves, result_excess)
+
+ return get_distrib(root)[0]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/binary_tree/fenwick_tree.py b/data_structures/binary_tree/fenwick_tree.py
index 54f0f07ac..88b0873a1 100644
--- a/data_structures/binary_tree/fenwick_tree.py
+++ b/data_structures/binary_tree/fenwick_tree.py
@@ -1,28 +1,247 @@
+from copy import deepcopy
+
+
class FenwickTree:
- def __init__(self, SIZE): # create fenwick tree with size SIZE
- self.Size = SIZE
- self.ft = [0 for i in range(0, SIZE)]
+ """
+ Fenwick Tree
- def update(self, i, val): # update data (adding) in index i in O(lg N)
- while i < self.Size:
- self.ft[i] += val
- i += i & (-i)
+ More info: https://en.wikipedia.org/wiki/Fenwick_tree
+ """
- def query(self, i): # query cumulative data from index 0 to i in O(lg N)
- ret = 0
- while i > 0:
- ret += self.ft[i]
- i -= i & (-i)
- return ret
+ def __init__(self, arr: list[int] | None = None, size: int | None = None) -> None:
+ """
+ Constructor for the Fenwick tree
+
+ Parameters:
+ arr (list): list of elements to initialize the tree with (optional)
+ size (int): size of the Fenwick tree (if arr is None)
+ """
+
+ if arr is None and size is not None:
+ self.size = size
+ self.tree = [0] * size
+ elif arr is not None:
+ self.init(arr)
+ else:
+ raise ValueError("Either arr or size must be specified")
+
+ def init(self, arr: list[int]) -> None:
+ """
+ Initialize the Fenwick tree with arr in O(N)
+
+ Parameters:
+ arr (list): list of elements to initialize the tree with
+
+ Returns:
+ None
+
+ >>> a = [1, 2, 3, 4, 5]
+ >>> f1 = FenwickTree(a)
+ >>> f2 = FenwickTree(size=len(a))
+ >>> for index, value in enumerate(a):
+ ... f2.add(index, value)
+ >>> f1.tree == f2.tree
+ True
+ """
+ self.size = len(arr)
+ self.tree = deepcopy(arr)
+ for i in range(1, self.size):
+ j = self.next_(i)
+ if j < self.size:
+ self.tree[j] += self.tree[i]
+
+ def get_array(self) -> list[int]:
+ """
+ Get the Normal Array of the Fenwick tree in O(N)
+
+ Returns:
+ list: Normal Array of the Fenwick tree
+
+ >>> a = [i for i in range(128)]
+ >>> f = FenwickTree(a)
+ >>> f.get_array() == a
+ True
+ """
+ arr = self.tree[:]
+ for i in range(self.size - 1, 0, -1):
+ j = self.next_(i)
+ if j < self.size:
+ arr[j] -= arr[i]
+ return arr
+
+ @staticmethod
+ def next_(index: int) -> int:
+ return index + (index & (-index))
+
+ @staticmethod
+ def prev(index: int) -> int:
+ return index - (index & (-index))
+
+ def add(self, index: int, value: int) -> None:
+ """
+ Add a value to index in O(lg N)
+
+ Parameters:
+ index (int): index to add value to
+ value (int): value to add to index
+
+ Returns:
+ None
+
+ >>> f = FenwickTree([1, 2, 3, 4, 5])
+ >>> f.add(0, 1)
+ >>> f.add(1, 2)
+ >>> f.add(2, 3)
+ >>> f.add(3, 4)
+ >>> f.add(4, 5)
+ >>> f.get_array()
+ [2, 4, 6, 8, 10]
+ """
+ if index == 0:
+ self.tree[0] += value
+ return
+ while index < self.size:
+ self.tree[index] += value
+ index = self.next_(index)
+
+ def update(self, index: int, value: int) -> None:
+ """
+ Set the value of index in O(lg N)
+
+ Parameters:
+ index (int): index to set value to
+ value (int): value to set in index
+
+ Returns:
+ None
+
+ >>> f = FenwickTree([5, 4, 3, 2, 1])
+ >>> f.update(0, 1)
+ >>> f.update(1, 2)
+ >>> f.update(2, 3)
+ >>> f.update(3, 4)
+ >>> f.update(4, 5)
+ >>> f.get_array()
+ [1, 2, 3, 4, 5]
+ """
+ self.add(index, value - self.get(index))
+
+ def prefix(self, right: int) -> int:
+ """
+ Prefix sum of all elements in [0, right) in O(lg N)
+
+ Parameters:
+ right (int): right bound of the query (exclusive)
+
+ Returns:
+ int: sum of all elements in [0, right)
+
+ >>> a = [i for i in range(128)]
+ >>> f = FenwickTree(a)
+ >>> res = True
+ >>> for i in range(len(a)):
+ ... res = res and f.prefix(i) == sum(a[:i])
+ >>> res
+ True
+ """
+ if right == 0:
+ return 0
+ result = self.tree[0]
+ right -= 1 # make right inclusive
+ while right > 0:
+ result += self.tree[right]
+ right = self.prev(right)
+ return result
+
+ def query(self, left: int, right: int) -> int:
+ """
+ Query the sum of all elements in [left, right) in O(lg N)
+
+ Parameters:
+ left (int): left bound of the query (inclusive)
+ right (int): right bound of the query (exclusive)
+
+ Returns:
+ int: sum of all elements in [left, right)
+
+ >>> a = [i for i in range(128)]
+ >>> f = FenwickTree(a)
+ >>> res = True
+ >>> for i in range(len(a)):
+ ... for j in range(i + 1, len(a)):
+ ... res = res and f.query(i, j) == sum(a[i:j])
+ >>> res
+ True
+ """
+ return self.prefix(right) - self.prefix(left)
+
+ def get(self, index: int) -> int:
+ """
+ Get value at index in O(lg N)
+
+ Parameters:
+ index (int): index to get the value
+
+ Returns:
+ int: Value of element at index
+
+ >>> a = [i for i in range(128)]
+ >>> f = FenwickTree(a)
+ >>> res = True
+ >>> for i in range(len(a)):
+ ... res = res and f.get(i) == a[i]
+ >>> res
+ True
+ """
+ return self.query(index, index + 1)
+
+ def rank_query(self, value: int) -> int:
+ """
+ Find the largest index with prefix(i) <= value in O(lg N)
+ NOTE: Requires that all values are non-negative!
+
+ Parameters:
+ value (int): value to find the largest index of
+
+ Returns:
+ -1: if value is smaller than all elements in prefix sum
+ int: largest index with prefix(i) <= value
+
+ >>> f = FenwickTree([1, 2, 0, 3, 0, 5])
+ >>> f.rank_query(0)
+ -1
+ >>> f.rank_query(2)
+ 0
+ >>> f.rank_query(1)
+ 0
+ >>> f.rank_query(3)
+ 2
+ >>> f.rank_query(5)
+ 2
+ >>> f.rank_query(6)
+ 4
+ >>> f.rank_query(11)
+ 5
+ """
+ value -= self.tree[0]
+ if value < 0:
+ return -1
+
+ j = 1 # Largest power of 2 <= size
+ while j * 2 < self.size:
+ j *= 2
+
+ i = 0
+
+ while j > 0:
+ if i + j < self.size and self.tree[i + j] <= value:
+ value -= self.tree[i + j]
+ i += j
+ j //= 2
+ return i
if __name__ == "__main__":
- f = FenwickTree(100)
- f.update(1, 20)
- f.update(4, 4)
- print(f.query(1))
- print(f.query(3))
- print(f.query(4))
- f.update(2, -5)
- print(f.query(1))
- print(f.query(3))
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/binary_tree/inorder_tree_traversal_2022.py b/data_structures/binary_tree/inorder_tree_traversal_2022.py
new file mode 100644
index 000000000..1357527d2
--- /dev/null
+++ b/data_structures/binary_tree/inorder_tree_traversal_2022.py
@@ -0,0 +1,82 @@
+"""
+Illustrate how to implement inorder traversal in binary search tree.
+Author: Gurneet Singh
+https://www.geeksforgeeks.org/tree-traversals-inorder-preorder-and-postorder/
+"""
+
+
+class BinaryTreeNode:
+ """Defining the structure of BinaryTreeNode"""
+
+ def __init__(self, data: int) -> None:
+ self.data = data
+ self.left_child: BinaryTreeNode | None = None
+ self.right_child: BinaryTreeNode | None = None
+
+
+def insert(node: BinaryTreeNode | None, new_value: int) -> BinaryTreeNode | None:
+ """
+ If the binary search tree is empty, make a new node and declare it as root.
+ >>> node_a = BinaryTreeNode(12345)
+ >>> node_b = insert(node_a, 67890)
+ >>> node_a.left_child == node_b.left_child
+ True
+ >>> node_a.right_child == node_b.right_child
+ True
+ >>> node_a.data == node_b.data
+ True
+ """
+ if node is None:
+ node = BinaryTreeNode(new_value)
+ return node
+
+ # binary search tree is not empty,
+ # so we will insert it into the tree
+ # if new_value is less than value of data in node,
+ # add it to left subtree and proceed recursively
+ if new_value < node.data:
+ node.left_child = insert(node.left_child, new_value)
+ else:
+ # if new_value is greater than value of data in node,
+ # add it to right subtree and proceed recursively
+ node.right_child = insert(node.right_child, new_value)
+ return node
+
+
+def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return
+ """
+ >>> inorder(make_tree())
+ [6, 10, 14, 15, 20, 25, 60]
+ """
+ if node:
+ inorder_array = inorder(node.left_child)
+ inorder_array = [*inorder_array, node.data]
+ inorder_array = inorder_array + inorder(node.right_child)
+ else:
+ inorder_array = []
+ return inorder_array
+
+
+def make_tree() -> BinaryTreeNode | None:
+ root = insert(None, 15)
+ insert(root, 10)
+ insert(root, 25)
+ insert(root, 6)
+ insert(root, 14)
+ insert(root, 20)
+ insert(root, 60)
+ return root
+
+
+def main() -> None:
+ # main function
+ root = make_tree()
+ print("Printing values of binary search tree in Inorder Traversal.")
+ inorder(root)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ main()
diff --git a/data_structures/binary_tree/is_bst.py b/data_structures/binary_tree/is_bst.py
new file mode 100644
index 000000000..0b2ef8c9f
--- /dev/null
+++ b/data_structures/binary_tree/is_bst.py
@@ -0,0 +1,131 @@
+"""
+Author : Alexander Pantyukhin
+Date : November 2, 2022
+
+Task:
+Given the root of a binary tree, determine if it is a valid binary search
+tree (BST).
+
+A valid binary search tree is defined as follows:
+
+- The left subtree of a node contains only nodes with keys less than the node's key.
+- The right subtree of a node contains only nodes with keys greater than the node's key.
+- Both the left and right subtrees must also be binary search trees.
+
+Implementation notes:
+Depth-first search approach.
+
+leetcode: https://leetcode.com/problems/validate-binary-search-tree/
+
+Let n is the number of nodes in tree
+Runtime: O(n)
+Space: O(1)
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+
+@dataclass
+class TreeNode:
+ data: float
+ left: TreeNode | None = None
+ right: TreeNode | None = None
+
+
+def is_binary_search_tree(root: TreeNode | None) -> bool:
+ """
+ >>> is_binary_search_tree(TreeNode(data=2,
+ ... left=TreeNode(data=1),
+ ... right=TreeNode(data=3))
+ ... )
+ True
+
+ >>> is_binary_search_tree(TreeNode(data=0,
+ ... left=TreeNode(data=-11),
+ ... right=TreeNode(data=3))
+ ... )
+ True
+
+ >>> is_binary_search_tree(TreeNode(data=5,
+ ... left=TreeNode(data=1),
+ ... right=TreeNode(data=4, left=TreeNode(data=3)))
+ ... )
+ False
+
+ >>> is_binary_search_tree(TreeNode(data='a',
+ ... left=TreeNode(data=1),
+ ... right=TreeNode(data=4, left=TreeNode(data=3)))
+ ... )
+ Traceback (most recent call last):
+ ...
+ ValueError: Each node should be type of TreeNode and data should be float.
+
+ >>> is_binary_search_tree(TreeNode(data=2,
+ ... left=TreeNode([]),
+ ... right=TreeNode(data=4, left=TreeNode(data=3)))
+ ... )
+ Traceback (most recent call last):
+ ...
+ ValueError: Each node should be type of TreeNode and data should be float.
+ """
+
+ # Validation
+ def is_valid_tree(node: TreeNode | None) -> bool:
+ """
+ >>> is_valid_tree(None)
+ True
+ >>> is_valid_tree('abc')
+ False
+ >>> is_valid_tree(TreeNode(data='not a float'))
+ False
+ >>> is_valid_tree(TreeNode(data=1, left=TreeNode('123')))
+ False
+ """
+ if node is None:
+ return True
+
+ if not isinstance(node, TreeNode):
+ return False
+
+ try:
+ float(node.data)
+ except (TypeError, ValueError):
+ return False
+
+ return is_valid_tree(node.left) and is_valid_tree(node.right)
+
+ if not is_valid_tree(root):
+ raise ValueError(
+ "Each node should be type of TreeNode and data should be float."
+ )
+
+ def is_binary_search_tree_recursive_check(
+ node: TreeNode | None, left_bound: float, right_bound: float
+ ) -> bool:
+ """
+ >>> is_binary_search_tree_recursive_check(None)
+ True
+ >>> is_binary_search_tree_recursive_check(TreeNode(data=1), 10, 20)
+ False
+ """
+
+ if node is None:
+ return True
+
+ return (
+ left_bound < node.data < right_bound
+ and is_binary_search_tree_recursive_check(node.left, left_bound, node.data)
+ and is_binary_search_tree_recursive_check(
+ node.right, node.data, right_bound
+ )
+ )
+
+ return is_binary_search_tree_recursive_check(root, -float("inf"), float("inf"))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/binary_tree/lazy_segment_tree.py b/data_structures/binary_tree/lazy_segment_tree.py
index 9066db294..050dfe0a6 100644
--- a/data_structures/binary_tree/lazy_segment_tree.py
+++ b/data_structures/binary_tree/lazy_segment_tree.py
@@ -1,7 +1,6 @@
from __future__ import annotations
import math
-from typing import List, Union
class SegmentTree:
@@ -38,14 +37,14 @@ class SegmentTree:
return idx * 2 + 1
def build(
- self, idx: int, left_element: int, right_element: int, A: List[int]
+ self, idx: int, left_element: int, right_element: int, a: list[int]
) -> None:
if left_element == right_element:
- self.segment_tree[idx] = A[left_element - 1]
+ self.segment_tree[idx] = a[left_element - 1]
else:
mid = (left_element + right_element) // 2
- self.build(self.left(idx), left_element, mid, A)
- self.build(self.right(idx), mid + 1, right_element, A)
+ self.build(self.left(idx), left_element, mid, a)
+ self.build(self.right(idx), mid + 1, right_element, a)
self.segment_tree[idx] = max(
self.segment_tree[self.left(idx)], self.segment_tree[self.right(idx)]
)
@@ -89,7 +88,7 @@ class SegmentTree:
# query with O(lg n)
def query(
self, idx: int, left_element: int, right_element: int, a: int, b: int
- ) -> Union[int, float]:
+ ) -> int | float:
"""
query(1, 1, size, a, b) for query max of [a,b]
>>> A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
diff --git a/data_structures/binary_tree/lowest_common_ancestor.py b/data_structures/binary_tree/lowest_common_ancestor.py
index 2f1e893fc..651037703 100644
--- a/data_structures/binary_tree/lowest_common_ancestor.py
+++ b/data_structures/binary_tree/lowest_common_ancestor.py
@@ -3,7 +3,7 @@
from __future__ import annotations
-import queue
+from queue import Queue
def swap(a: int, b: int) -> tuple[int, int]:
@@ -37,7 +37,7 @@ def create_sparse(max_node: int, parent: list[list[int]]) -> list[list[int]]:
# returns lca of node u,v
def lowest_common_ancestor(
u: int, v: int, level: list[int], parent: list[list[int]]
-) -> list[list[int]]:
+) -> int:
# u must be deeper in the tree than v
if level[u] < level[v]:
u, v = swap(u, v)
@@ -50,7 +50,7 @@ def lowest_common_ancestor(
return u
# moving both nodes upwards till lca in found
for i in range(18, -1, -1):
- if parent[i][u] != 0 and parent[i][u] != parent[i][v]:
+ if parent[i][u] not in [0, parent[i][v]]:
u, v = parent[i][u], parent[i][v]
# returning longest common ancestor of u,v
return parent[0][u]
@@ -61,8 +61,8 @@ def breadth_first_search(
level: list[int],
parent: list[list[int]],
max_node: int,
- graph: dict[int, int],
- root=1,
+ graph: dict[int, list[int]],
+ root: int = 1,
) -> tuple[list[int], list[list[int]]]:
"""
sets every nodes direct parent
@@ -70,7 +70,7 @@ def breadth_first_search(
calculates depth of each node from root node
"""
level[root] = 0
- q = queue.Queue(maxsize=max_node)
+ q: Queue[int] = Queue(maxsize=max_node)
q.put(root)
while q.qsize() != 0:
u = q.get()
@@ -88,7 +88,7 @@ def main() -> None:
parent = [[0 for _ in range(max_node + 10)] for _ in range(20)]
# initializing with -1 which means every node is unvisited
level = [-1 for _ in range(max_node + 10)]
- graph = {
+ graph: dict[int, list[int]] = {
1: [2, 3, 4],
2: [5],
3: [6, 7],
diff --git a/data_structures/binary_tree/maximum_fenwick_tree.py b/data_structures/binary_tree/maximum_fenwick_tree.py
new file mode 100644
index 000000000..84967a70c
--- /dev/null
+++ b/data_structures/binary_tree/maximum_fenwick_tree.py
@@ -0,0 +1,114 @@
+class MaxFenwickTree:
+ """
+ Maximum Fenwick Tree
+
+ More info: https://cp-algorithms.com/data_structures/fenwick.html
+ ---------
+ >>> ft = MaxFenwickTree(5)
+ >>> ft.query(0, 5)
+ 0
+ >>> ft.update(4, 100)
+ >>> ft.query(0, 5)
+ 100
+ >>> ft.update(4, 0)
+ >>> ft.update(2, 20)
+ >>> ft.query(0, 5)
+ 20
+ >>> ft.update(4, 10)
+ >>> ft.query(2, 5)
+ 20
+ >>> ft.query(1, 5)
+ 20
+ >>> ft.update(2, 0)
+ >>> ft.query(0, 5)
+ 10
+ >>> ft = MaxFenwickTree(10000)
+ >>> ft.update(255, 30)
+ >>> ft.query(0, 10000)
+ 30
+ >>> ft = MaxFenwickTree(6)
+ >>> ft.update(5, 1)
+ >>> ft.query(5, 6)
+ 1
+ >>> ft = MaxFenwickTree(6)
+ >>> ft.update(0, 1000)
+ >>> ft.query(0, 1)
+ 1000
+ """
+
+ def __init__(self, size: int) -> None:
+ """
+ Create empty Maximum Fenwick Tree with specified size
+
+ Parameters:
+ size: size of Array
+
+ Returns:
+ None
+ """
+ self.size = size
+ self.arr = [0] * size
+ self.tree = [0] * size
+
+ @staticmethod
+ def get_next(index: int) -> int:
+ """
+ Get next index in O(1)
+ """
+ return index | (index + 1)
+
+ @staticmethod
+ def get_prev(index: int) -> int:
+ """
+ Get previous index in O(1)
+ """
+ return (index & (index + 1)) - 1
+
+ def update(self, index: int, value: int) -> None:
+ """
+ Set index to value in O(lg^2 N)
+
+ Parameters:
+ index: index to update
+ value: value to set
+
+ Returns:
+ None
+ """
+ self.arr[index] = value
+ while index < self.size:
+ current_left_border = self.get_prev(index) + 1
+ if current_left_border == index:
+ self.tree[index] = value
+ else:
+ self.tree[index] = max(value, current_left_border, index)
+ index = self.get_next(index)
+
+ def query(self, left: int, right: int) -> int:
+ """
+ Answer the query of maximum range [l, r) in O(lg^2 N)
+
+ Parameters:
+ left: left index of query range (inclusive)
+ right: right index of query range (exclusive)
+
+ Returns:
+ Maximum value of range [left, right)
+ """
+ right -= 1 # Because of right is exclusive
+ result = 0
+ while left <= right:
+ current_left = self.get_prev(right)
+ if left <= current_left:
+ result = max(result, self.tree[right])
+ right = current_left
+ else:
+ result = max(result, self.arr[right])
+ right -= 1
+ return result
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/binary_tree/merge_two_binary_trees.py b/data_structures/binary_tree/merge_two_binary_trees.py
index 6b202adb3..3380f8c5f 100644
--- a/data_structures/binary_tree/merge_two_binary_trees.py
+++ b/data_structures/binary_tree/merge_two_binary_trees.py
@@ -5,7 +5,7 @@ The rule for merging is that if two nodes overlap, then put the value sum of
both nodes to the new value of the merged node. Otherwise, the NOT null node
will be used as the node of new tree.
"""
-from typing import Optional
+from __future__ import annotations
class Node:
@@ -15,11 +15,11 @@ class Node:
def __init__(self, value: int = 0) -> None:
self.value = value
- self.left: Optional[Node] = None
- self.right: Optional[Node] = None
+ self.left: Node | None = None
+ self.right: Node | None = None
-def merge_two_binary_trees(tree1: Optional[Node], tree2: Optional[Node]) -> Node:
+def merge_two_binary_trees(tree1: Node | None, tree2: Node | None) -> Node | None:
"""
Returns root node of the merged tree.
@@ -52,7 +52,7 @@ def merge_two_binary_trees(tree1: Optional[Node], tree2: Optional[Node]) -> Node
return tree1
-def print_preorder(root: Optional[Node]) -> None:
+def print_preorder(root: Node | None) -> None:
"""
Print pre-order traversal of the tree.
diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py
index c914079e0..04164e5cb 100644
--- a/data_structures/binary_tree/non_recursive_segment_tree.py
+++ b/data_structures/binary_tree/non_recursive_segment_tree.py
@@ -37,12 +37,13 @@ https://www.geeksforgeeks.org/segment-tree-efficient-implementation/
"""
from __future__ import annotations
-from typing import Callable, TypeVar
+from collections.abc import Callable
+from typing import Any, Generic, TypeVar
T = TypeVar("T")
-class SegmentTree:
+class SegmentTree(Generic[T]):
def __init__(self, arr: list[T], fnc: Callable[[T, T], T]) -> None:
"""
Segment Tree constructor, it works just with commutative combiner.
@@ -55,8 +56,10 @@ class SegmentTree:
... lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2)
(6, 9)
"""
- self.N = len(arr)
- self.st = [None for _ in range(len(arr))] + arr
+ any_type: Any | T = None
+
+ self.N: int = len(arr)
+ self.st: list[T] = [any_type for _ in range(self.N)] + arr
self.fn = fnc
self.build()
@@ -83,7 +86,7 @@ class SegmentTree:
p = p // 2
self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1])
- def query(self, l: int, r: int) -> T: # noqa: E741
+ def query(self, l: int, r: int) -> T | None: # noqa: E741
"""
Get range query value in log(N) time
:param l: left element index
@@ -100,9 +103,10 @@ class SegmentTree:
>>> st.query(2, 3)
7
"""
- l, r = l + self.N, r + self.N # noqa: E741
- res = None
- while l <= r: # noqa: E741
+ l, r = l + self.N, r + self.N
+
+ res: T | None = None
+ while l <= r:
if l % 2 == 1:
res = self.st[l] if res is None else self.fn(res, self.st[l])
if r % 2 == 0:
@@ -135,7 +139,7 @@ if __name__ == "__main__":
max_segment_tree = SegmentTree(test_array, max)
sum_segment_tree = SegmentTree(test_array, lambda a, b: a + b)
- def test_all_segments():
+ def test_all_segments() -> None:
"""
Test all possible segments
"""
diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py
index 1ad8f2ed4..684c518b1 100644
--- a/data_structures/binary_tree/number_of_possible_binary_trees.py
+++ b/data_structures/binary_tree/number_of_possible_binary_trees.py
@@ -67,7 +67,7 @@ def factorial(n: int) -> int:
True
>>> factorial(-5) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ...
+ ...
ValueError: factorial() not defined for negative values
"""
if n < 0:
diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py
index de971a712..4ebe0e927 100644
--- a/data_structures/binary_tree/red_black_tree.py
+++ b/data_structures/binary_tree/red_black_tree.py
@@ -1,8 +1,10 @@
"""
-python/black : true
-flake8 : passed
+psf/black : true
+ruff : passed
"""
-from typing import Iterator, Optional
+from __future__ import annotations
+
+from collections.abc import Iterator
class RedBlackTree:
@@ -21,11 +23,11 @@ class RedBlackTree:
def __init__(
self,
- label: Optional[int] = None,
+ label: int | None = None,
color: int = 0,
- parent: Optional["RedBlackTree"] = None,
- left: Optional["RedBlackTree"] = None,
- right: Optional["RedBlackTree"] = None,
+ parent: RedBlackTree | None = None,
+ left: RedBlackTree | None = None,
+ right: RedBlackTree | None = None,
) -> None:
"""Initialize a new Red-Black Tree node with the given values:
label: The value associated with this node
@@ -42,13 +44,15 @@ class RedBlackTree:
# Here are functions which are specific to red-black trees
- def rotate_left(self) -> "RedBlackTree":
+ def rotate_left(self) -> RedBlackTree:
"""Rotate the subtree rooted at this node to the left and
returns the new root to this subtree.
Performing one rotation can be done in O(1).
"""
parent = self.parent
right = self.right
+ if right is None:
+ return self
self.right = right.left
if self.right:
self.right.parent = self
@@ -62,11 +66,13 @@ class RedBlackTree:
right.parent = parent
return right
- def rotate_right(self) -> "RedBlackTree":
+ def rotate_right(self) -> RedBlackTree:
"""Rotate the subtree rooted at this node to the right and
returns the new root to this subtree.
Performing one rotation can be done in O(1).
"""
+ if self.left is None:
+ return self
parent = self.parent
left = self.left
self.left = left.right
@@ -82,7 +88,7 @@ class RedBlackTree:
left.parent = parent
return left
- def insert(self, label: int) -> "RedBlackTree":
+ def insert(self, label: int) -> RedBlackTree:
"""Inserts label into the subtree rooted at self, performs any
rotations necessary to maintain balance, and then returns the
new root to this subtree (likely self).
@@ -121,25 +127,32 @@ class RedBlackTree:
if color(uncle) == 0:
if self.is_left() and self.parent.is_right():
self.parent.rotate_right()
- self.right._insert_repair()
+ if self.right:
+ self.right._insert_repair()
elif self.is_right() and self.parent.is_left():
self.parent.rotate_left()
- self.left._insert_repair()
+ if self.left:
+ self.left._insert_repair()
elif self.is_left():
- self.grandparent.rotate_right()
- self.parent.color = 0
- self.parent.right.color = 1
+ if self.grandparent:
+ self.grandparent.rotate_right()
+ self.parent.color = 0
+ if self.parent.right:
+ self.parent.right.color = 1
else:
- self.grandparent.rotate_left()
- self.parent.color = 0
- self.parent.left.color = 1
+ if self.grandparent:
+ self.grandparent.rotate_left()
+ self.parent.color = 0
+ if self.parent.left:
+ self.parent.left.color = 1
else:
self.parent.color = 0
- uncle.color = 0
- self.grandparent.color = 1
- self.grandparent._insert_repair()
+ if uncle and self.grandparent:
+ uncle.color = 0
+ self.grandparent.color = 1
+ self.grandparent._insert_repair()
- def remove(self, label: int) -> "RedBlackTree":
+ def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912
"""Remove label from this tree."""
if self.label == label:
if self.left and self.right:
@@ -147,8 +160,9 @@ class RedBlackTree:
# so we replace this node with the greatest one less than
# it and remove that.
value = self.left.get_max()
- self.label = value
- self.left.remove(value)
+ if value is not None:
+ self.label = value
+ self.left.remove(value)
else:
# This node has at most one non-None child, so we don't
# need to replace
@@ -158,10 +172,11 @@ class RedBlackTree:
# The only way this happens to a node with one child
# is if both children are None leaves.
# We can just remove this node and call it a day.
- if self.is_left():
- self.parent.left = None
- else:
- self.parent.right = None
+ if self.parent:
+ if self.is_left():
+ self.parent.left = None
+ else:
+ self.parent.right = None
else:
# The node is black
if child is None:
@@ -186,7 +201,7 @@ class RedBlackTree:
self.left.parent = self
if self.right:
self.right.parent = self
- elif self.label > label:
+ elif self.label is not None and self.label > label:
if self.left:
self.left.remove(label)
else:
@@ -196,6 +211,13 @@ class RedBlackTree:
def _remove_repair(self) -> None:
"""Repair the coloring of the tree that may have been messed up."""
+ if (
+ self.parent is None
+ or self.sibling is None
+ or self.parent.sibling is None
+ or self.grandparent is None
+ ):
+ return
if color(self.sibling) == 1:
self.sibling.color = 0
self.parent.color = 1
@@ -229,7 +251,8 @@ class RedBlackTree:
):
self.sibling.rotate_right()
self.sibling.color = 0
- self.sibling.right.color = 1
+ if self.sibling.right:
+ self.sibling.right.color = 1
if (
self.is_right()
and color(self.sibling) == 0
@@ -238,7 +261,8 @@ class RedBlackTree:
):
self.sibling.rotate_left()
self.sibling.color = 0
- self.sibling.left.color = 1
+ if self.sibling.left:
+ self.sibling.left.color = 1
if (
self.is_left()
and color(self.sibling) == 0
@@ -273,21 +297,17 @@ class RedBlackTree:
"""
# I assume property 1 to hold because there is nothing that can
# make the color be anything other than 0 or 1.
-
# Property 2
if self.color:
# The root was red
print("Property 2")
return False
-
# Property 3 does not need to be checked, because None is assumed
# to be black and is all the leaves.
-
# Property 4
if not self.check_coloring():
print("Property 4")
return False
-
# Property 5
if self.black_height() is None:
print("Property 5")
@@ -295,25 +315,24 @@ class RedBlackTree:
# All properties were met
return True
- def check_coloring(self) -> None:
+ def check_coloring(self) -> bool:
"""A helper function to recursively check Property 4 of a
Red-Black Tree. See check_color_properties for more info.
"""
- if self.color == 1:
- if color(self.left) == 1 or color(self.right) == 1:
- return False
+ if self.color == 1 and 1 in (color(self.left), color(self.right)):
+ return False
if self.left and not self.left.check_coloring():
return False
if self.right and not self.right.check_coloring():
return False
return True
- def black_height(self) -> int:
+ def black_height(self) -> int | None:
"""Returns the number of black nodes from this node to the
leaves of the tree, or None if there isn't one such value (the
tree is color incorrectly).
"""
- if self is None:
+ if self is None or self.left is None or self.right is None:
# If we're already at a leaf, there is no path
return 1
left = RedBlackTree.black_height(self.left)
@@ -330,21 +349,21 @@ class RedBlackTree:
# Here are functions which are general to all binary search trees
- def __contains__(self, label) -> bool:
+ def __contains__(self, label: int) -> bool:
"""Search through the tree for label, returning True iff it is
found somewhere in the tree.
Guaranteed to run in O(log(n)) time.
"""
return self.search(label) is not None
- def search(self, label: int) -> "RedBlackTree":
+ def search(self, label: int) -> RedBlackTree | None:
"""Search through the tree for label, returning its node if
it's found, and None otherwise.
This method is guaranteed to run in O(log(n)) time.
"""
if self.label == label:
return self
- elif label > self.label:
+ elif self.label is not None and label > self.label:
if self.right is None:
return None
else:
@@ -355,12 +374,12 @@ class RedBlackTree:
else:
return self.left.search(label)
- def floor(self, label: int) -> int:
+ def floor(self, label: int) -> int | None:
"""Returns the largest element in this tree which is at most label.
This method is guaranteed to run in O(log(n)) time."""
if self.label == label:
return self.label
- elif self.label > label:
+ elif self.label is not None and self.label > label:
if self.left:
return self.left.floor(label)
else:
@@ -372,13 +391,13 @@ class RedBlackTree:
return attempt
return self.label
- def ceil(self, label: int) -> int:
+ def ceil(self, label: int) -> int | None:
"""Returns the smallest element in this tree which is at least label.
This method is guaranteed to run in O(log(n)) time.
"""
if self.label == label:
return self.label
- elif self.label < label:
+ elif self.label is not None and self.label < label:
if self.right:
return self.right.ceil(label)
else:
@@ -390,7 +409,7 @@ class RedBlackTree:
return attempt
return self.label
- def get_max(self) -> int:
+ def get_max(self) -> int | None:
"""Returns the largest element in this tree.
This method is guaranteed to run in O(log(n)) time.
"""
@@ -400,7 +419,7 @@ class RedBlackTree:
else:
return self.label
- def get_min(self) -> int:
+ def get_min(self) -> int | None:
"""Returns the smallest element in this tree.
This method is guaranteed to run in O(log(n)) time.
"""
@@ -411,7 +430,7 @@ class RedBlackTree:
return self.label
@property
- def grandparent(self) -> "RedBlackTree":
+ def grandparent(self) -> RedBlackTree | None:
"""Get the current node's grandparent, or None if it doesn't exist."""
if self.parent is None:
return None
@@ -419,7 +438,7 @@ class RedBlackTree:
return self.parent.parent
@property
- def sibling(self) -> "RedBlackTree":
+ def sibling(self) -> RedBlackTree | None:
"""Get the current node's sibling, or None if it doesn't exist."""
if self.parent is None:
return None
@@ -430,11 +449,15 @@ class RedBlackTree:
def is_left(self) -> bool:
"""Returns true iff this node is the left child of its parent."""
- return self.parent and self.parent.left is self
+ if self.parent is None:
+ return False
+ return self.parent.left is self.parent.left is self
def is_right(self) -> bool:
"""Returns true iff this node is the right child of its parent."""
- return self.parent and self.parent.right is self
+ if self.parent is None:
+ return False
+ return self.parent.right is self
def __bool__(self) -> bool:
return True
@@ -450,21 +473,21 @@ class RedBlackTree:
ln += len(self.right)
return ln
- def preorder_traverse(self) -> Iterator[int]:
+ def preorder_traverse(self) -> Iterator[int | None]:
yield self.label
if self.left:
yield from self.left.preorder_traverse()
if self.right:
yield from self.right.preorder_traverse()
- def inorder_traverse(self) -> Iterator[int]:
+ def inorder_traverse(self) -> Iterator[int | None]:
if self.left:
yield from self.left.inorder_traverse()
yield self.label
if self.right:
yield from self.right.inorder_traverse()
- def postorder_traverse(self) -> Iterator[int]:
+ def postorder_traverse(self) -> Iterator[int | None]:
if self.left:
yield from self.left.postorder_traverse()
if self.right:
@@ -486,15 +509,17 @@ class RedBlackTree:
indent=1,
)
- def __eq__(self, other) -> bool:
+ def __eq__(self, other: object) -> bool:
"""Test if two trees are equal."""
+ if not isinstance(other, RedBlackTree):
+ return NotImplemented
if self.label == other.label:
return self.left == other.left and self.right == other.right
else:
return False
-def color(node) -> int:
+def color(node: RedBlackTree | None) -> int:
"""Returns the color of a node, allowing for None leaves."""
if node is None:
return 0
@@ -697,19 +722,12 @@ def main() -> None:
>>> pytests()
"""
print_results("Rotating right and left", test_rotations())
-
print_results("Inserting", test_insert())
-
print_results("Searching", test_insert_and_search())
-
print_results("Deleting", test_insert_delete())
-
print_results("Floor and ceil", test_floor_ceil())
-
print_results("Tree traversal", test_tree_traversal())
-
print_results("Tree traversal", test_tree_chaining())
-
print("Testing tree balancing...")
print("This should only be a few seconds.")
test_insertion_speed()
diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py
index 10451ae68..5f822407d 100644
--- a/data_structures/binary_tree/segment_tree.py
+++ b/data_structures/binary_tree/segment_tree.py
@@ -2,12 +2,13 @@ import math
class SegmentTree:
- def __init__(self, A):
- self.N = len(A)
+ def __init__(self, a):
+ self.N = len(a)
self.st = [0] * (
4 * self.N
) # approximate the overall size of segment tree with array N
- self.build(1, 0, self.N - 1)
+ if self.N:
+ self.build(1, 0, self.N - 1)
def left(self, idx):
return idx * 2
@@ -16,7 +17,7 @@ class SegmentTree:
return idx * 2 + 1
def build(self, idx, l, r): # noqa: E741
- if l == r: # noqa: E741
+ if l == r:
self.st[idx] = A[l]
else:
mid = (l + r) // 2
@@ -33,7 +34,7 @@ class SegmentTree:
"""
if r < a or l > b:
return True
- if l == r: # noqa: E741
+ if l == r:
self.st[idx] = val
return True
mid = (l + r) // 2
@@ -51,18 +52,18 @@ class SegmentTree:
"""
if r < a or l > b:
return -math.inf
- if l >= a and r <= b: # noqa: E741
+ if l >= a and r <= b:
return self.st[idx]
mid = (l + r) // 2
q1 = self.query_recursive(self.left(idx), l, mid, a, b)
q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)
return max(q1, q2)
- def showData(self):
- showList = []
+ def show_data(self):
+ show_list = []
for i in range(1, N + 1):
- showList += [self.query(i, i)]
- print(showList)
+ show_list += [self.query(i, i)]
+ print(show_list)
if __name__ == "__main__":
@@ -75,4 +76,4 @@ if __name__ == "__main__":
segt.update(1, 3, 111)
print(segt.query(1, 15))
segt.update(7, 8, 235)
- segt.showData()
+ segt.show_data()
diff --git a/data_structures/binary_tree/segment_tree_other.py b/data_structures/binary_tree/segment_tree_other.py
index 90afd7ca8..cc77c4951 100644
--- a/data_structures/binary_tree/segment_tree_other.py
+++ b/data_structures/binary_tree/segment_tree_other.py
@@ -16,40 +16,36 @@ class SegmentTreeNode:
self.left = left
self.right = right
- def __str__(self):
- return f"val: {self.val}, start: {self.start}, end: {self.end}"
+ def __repr__(self):
+ return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class SegmentTree:
"""
>>> import operator
>>> num_arr = SegmentTree([2, 1, 5, 3, 4], operator.add)
- >>> for node in num_arr.traverse():
- ... print(node)
- ...
- val: 15, start: 0, end: 4
- val: 8, start: 0, end: 2
- val: 7, start: 3, end: 4
- val: 3, start: 0, end: 1
- val: 5, start: 2, end: 2
- val: 3, start: 3, end: 3
- val: 4, start: 4, end: 4
- val: 2, start: 0, end: 0
- val: 1, start: 1, end: 1
+ >>> tuple(num_arr.traverse()) # doctest: +NORMALIZE_WHITESPACE
+ (SegmentTreeNode(start=0, end=4, val=15),
+ SegmentTreeNode(start=0, end=2, val=8),
+ SegmentTreeNode(start=3, end=4, val=7),
+ SegmentTreeNode(start=0, end=1, val=3),
+ SegmentTreeNode(start=2, end=2, val=5),
+ SegmentTreeNode(start=3, end=3, val=3),
+ SegmentTreeNode(start=4, end=4, val=4),
+ SegmentTreeNode(start=0, end=0, val=2),
+ SegmentTreeNode(start=1, end=1, val=1))
>>>
>>> num_arr.update(1, 5)
- >>> for node in num_arr.traverse():
- ... print(node)
- ...
- val: 19, start: 0, end: 4
- val: 12, start: 0, end: 2
- val: 7, start: 3, end: 4
- val: 7, start: 0, end: 1
- val: 5, start: 2, end: 2
- val: 3, start: 3, end: 3
- val: 4, start: 4, end: 4
- val: 2, start: 0, end: 0
- val: 5, start: 1, end: 1
+ >>> tuple(num_arr.traverse()) # doctest: +NORMALIZE_WHITESPACE
+ (SegmentTreeNode(start=0, end=4, val=19),
+ SegmentTreeNode(start=0, end=2, val=12),
+ SegmentTreeNode(start=3, end=4, val=7),
+ SegmentTreeNode(start=0, end=1, val=7),
+ SegmentTreeNode(start=2, end=2, val=5),
+ SegmentTreeNode(start=3, end=3, val=3),
+ SegmentTreeNode(start=4, end=4, val=4),
+ SegmentTreeNode(start=0, end=0, val=2),
+ SegmentTreeNode(start=1, end=1, val=5))
>>>
>>> num_arr.query_range(3, 4)
7
@@ -62,29 +58,29 @@ class SegmentTree:
>>> for node in max_arr.traverse():
... print(node)
...
- val: 5, start: 0, end: 4
- val: 5, start: 0, end: 2
- val: 4, start: 3, end: 4
- val: 2, start: 0, end: 1
- val: 5, start: 2, end: 2
- val: 3, start: 3, end: 3
- val: 4, start: 4, end: 4
- val: 2, start: 0, end: 0
- val: 1, start: 1, end: 1
+ SegmentTreeNode(start=0, end=4, val=5)
+ SegmentTreeNode(start=0, end=2, val=5)
+ SegmentTreeNode(start=3, end=4, val=4)
+ SegmentTreeNode(start=0, end=1, val=2)
+ SegmentTreeNode(start=2, end=2, val=5)
+ SegmentTreeNode(start=3, end=3, val=3)
+ SegmentTreeNode(start=4, end=4, val=4)
+ SegmentTreeNode(start=0, end=0, val=2)
+ SegmentTreeNode(start=1, end=1, val=1)
>>>
>>> max_arr.update(1, 5)
>>> for node in max_arr.traverse():
... print(node)
...
- val: 5, start: 0, end: 4
- val: 5, start: 0, end: 2
- val: 4, start: 3, end: 4
- val: 5, start: 0, end: 1
- val: 5, start: 2, end: 2
- val: 3, start: 3, end: 3
- val: 4, start: 4, end: 4
- val: 2, start: 0, end: 0
- val: 5, start: 1, end: 1
+ SegmentTreeNode(start=0, end=4, val=5)
+ SegmentTreeNode(start=0, end=2, val=5)
+ SegmentTreeNode(start=3, end=4, val=4)
+ SegmentTreeNode(start=0, end=1, val=5)
+ SegmentTreeNode(start=2, end=2, val=5)
+ SegmentTreeNode(start=3, end=3, val=3)
+ SegmentTreeNode(start=4, end=4, val=4)
+ SegmentTreeNode(start=0, end=0, val=2)
+ SegmentTreeNode(start=1, end=1, val=5)
>>>
>>> max_arr.query_range(3, 4)
4
@@ -97,29 +93,29 @@ class SegmentTree:
>>> for node in min_arr.traverse():
... print(node)
...
- val: 1, start: 0, end: 4
- val: 1, start: 0, end: 2
- val: 3, start: 3, end: 4
- val: 1, start: 0, end: 1
- val: 5, start: 2, end: 2
- val: 3, start: 3, end: 3
- val: 4, start: 4, end: 4
- val: 2, start: 0, end: 0
- val: 1, start: 1, end: 1
+ SegmentTreeNode(start=0, end=4, val=1)
+ SegmentTreeNode(start=0, end=2, val=1)
+ SegmentTreeNode(start=3, end=4, val=3)
+ SegmentTreeNode(start=0, end=1, val=1)
+ SegmentTreeNode(start=2, end=2, val=5)
+ SegmentTreeNode(start=3, end=3, val=3)
+ SegmentTreeNode(start=4, end=4, val=4)
+ SegmentTreeNode(start=0, end=0, val=2)
+ SegmentTreeNode(start=1, end=1, val=1)
>>>
>>> min_arr.update(1, 5)
>>> for node in min_arr.traverse():
... print(node)
...
- val: 2, start: 0, end: 4
- val: 2, start: 0, end: 2
- val: 3, start: 3, end: 4
- val: 2, start: 0, end: 1
- val: 5, start: 2, end: 2
- val: 3, start: 3, end: 3
- val: 4, start: 4, end: 4
- val: 2, start: 0, end: 0
- val: 5, start: 1, end: 1
+ SegmentTreeNode(start=0, end=4, val=2)
+ SegmentTreeNode(start=0, end=2, val=2)
+ SegmentTreeNode(start=3, end=4, val=3)
+ SegmentTreeNode(start=0, end=1, val=2)
+ SegmentTreeNode(start=2, end=2, val=5)
+ SegmentTreeNode(start=3, end=3, val=3)
+ SegmentTreeNode(start=4, end=4, val=4)
+ SegmentTreeNode(start=0, end=0, val=2)
+ SegmentTreeNode(start=1, end=1, val=5)
>>>
>>> min_arr.query_range(3, 4)
3
@@ -128,7 +124,6 @@ class SegmentTree:
>>> min_arr.query_range(1, 3)
3
>>>
-
"""
def __init__(self, collection: Sequence, function):
diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py
index a09dcc928..a53ac566e 100644
--- a/data_structures/binary_tree/treap.py
+++ b/data_structures/binary_tree/treap.py
@@ -1,9 +1,6 @@
-# flake8: noqa
-
from __future__ import annotations
from random import random
-from typing import Optional, Tuple
class Node:
@@ -12,11 +9,11 @@ class Node:
Treap is a binary tree by value and heap by priority
"""
- def __init__(self, value: Optional[int] = None):
+ def __init__(self, value: int | None = None):
self.value = value
self.prior = random()
- self.left: Optional[Node] = None
- self.right: Optional[Node] = None
+ self.left: Node | None = None
+ self.right: Node | None = None
def __repr__(self) -> str:
from pprint import pformat
@@ -35,7 +32,7 @@ class Node:
return value + left + right
-def split(root: Optional[Node], value: int) -> Tuple[Optional[Node], Optional[Node]]:
+def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]:
"""
We split current tree into 2 trees with value:
@@ -64,7 +61,7 @@ def split(root: Optional[Node], value: int) -> Tuple[Optional[Node], Optional[No
return root, right
-def merge(left: Optional[Node], right: Optional[Node]) -> Optional[Node]:
+def merge(left: Node | None, right: Node | None) -> Node | None:
"""
We merge 2 trees into one.
Note: all left tree's values must be less than all right tree's
@@ -86,7 +83,7 @@ def merge(left: Optional[Node], right: Optional[Node]) -> Optional[Node]:
return right
-def insert(root: Optional[Node], value: int) -> Optional[Node]:
+def insert(root: Node | None, value: int) -> Node | None:
"""
Insert element
@@ -99,7 +96,7 @@ def insert(root: Optional[Node], value: int) -> Optional[Node]:
return merge(merge(left, node), right)
-def erase(root: Optional[Node], value: int) -> Optional[Node]:
+def erase(root: Node | None, value: int) -> Node | None:
"""
Erase element
@@ -112,7 +109,7 @@ def erase(root: Optional[Node], value: int) -> Optional[Node]:
return merge(left, right)
-def inorder(root: Optional[Node]) -> None:
+def inorder(root: Node | None) -> None:
"""
Just recursive print of a tree
"""
@@ -124,28 +121,28 @@ def inorder(root: Optional[Node]) -> None:
inorder(root.right)
-def interactTreap(root: Optional[Node], args: str) -> Optional[Node]:
+def interact_treap(root: Node | None, args: str) -> Node | None:
"""
Commands:
+ value to add value into treap
- value to erase all nodes with value
- >>> root = interactTreap(None, "+1")
+ >>> root = interact_treap(None, "+1")
>>> inorder(root)
1,
- >>> root = interactTreap(root, "+3 +5 +17 +19 +2 +16 +4 +0")
+ >>> root = interact_treap(root, "+3 +5 +17 +19 +2 +16 +4 +0")
>>> inorder(root)
0,1,2,3,4,5,16,17,19,
- >>> root = interactTreap(root, "+4 +4 +4")
+ >>> root = interact_treap(root, "+4 +4 +4")
>>> inorder(root)
0,1,2,3,4,4,4,4,5,16,17,19,
- >>> root = interactTreap(root, "-0")
+ >>> root = interact_treap(root, "-0")
>>> inorder(root)
1,2,3,4,4,4,4,5,16,17,19,
- >>> root = interactTreap(root, "-4")
+ >>> root = interact_treap(root, "-4")
>>> inorder(root)
1,2,3,5,16,17,19,
- >>> root = interactTreap(root, "=0")
+ >>> root = interact_treap(root, "=0")
Unknown command
"""
for arg in args.split():
@@ -171,7 +168,7 @@ def main() -> None:
args = input()
while args != "q":
- root = interactTreap(root, args)
+ root = interact_treap(root, args)
print(root)
args = input()
diff --git a/data_structures/binary_tree/wavelet_tree.py b/data_structures/binary_tree/wavelet_tree.py
index 1607244f7..041e140f5 100644
--- a/data_structures/binary_tree/wavelet_tree.py
+++ b/data_structures/binary_tree/wavelet_tree.py
@@ -7,8 +7,7 @@ such as the with segment trees or fenwick trees. You can read more about them he
2. https://www.youtube.com/watch?v=4aSv9PcecDw&t=811s
3. https://www.youtube.com/watch?v=CybAgVF-MMc&t=1178s
"""
-
-from typing import Optional
+from __future__ import annotations
test_array = [2, 1, 4, 5, 6, 0, 8, 9, 1, 2, 0, 6, 4, 2, 0, 6, 5, 3, 2, 7]
@@ -18,27 +17,27 @@ class Node:
self.minn: int = -1
self.maxx: int = -1
self.map_left: list[int] = [-1] * length
- self.left: Optional[Node] = None
- self.right: Optional[Node] = None
+ self.left: Node | None = None
+ self.right: Node | None = None
def __repr__(self) -> str:
"""
>>> node = Node(length=27)
>>> repr(node)
- 'min_value: -1, max_value: -1'
+ 'Node(min_value=-1 max_value=-1)'
>>> repr(node) == str(node)
True
"""
- return f"min_value: {self.minn}, max_value: {self.maxx}"
+ return f"Node(min_value={self.minn} max_value={self.maxx})"
-def build_tree(arr: list[int]) -> Node:
+def build_tree(arr: list[int]) -> Node | None:
"""
Builds the tree for arr and returns the root
of the constructed tree
>>> build_tree(test_array)
- min_value: 0, max_value: 9
+ Node(min_value=0 max_value=9)
"""
root = Node(len(arr))
root.minn, root.maxx = min(arr), max(arr)
@@ -52,7 +51,10 @@ def build_tree(arr: list[int]) -> Node:
then recursively build trees for left_arr and right_arr
"""
pivot = (root.minn + root.maxx) // 2
- left_arr, right_arr = [], []
+
+ left_arr: list[int] = []
+ right_arr: list[int] = []
+
for index, num in enumerate(arr):
if num <= pivot:
left_arr.append(num)
@@ -64,7 +66,7 @@ def build_tree(arr: list[int]) -> Node:
return root
-def rank_till_index(node: Node, num: int, index: int) -> int:
+def rank_till_index(node: Node | None, num: int, index: int) -> int:
"""
Returns the number of occurrences of num in interval [0, index] in the list
@@ -80,7 +82,7 @@ def rank_till_index(node: Node, num: int, index: int) -> int:
>>> rank_till_index(root, 0, 9)
1
"""
- if index < 0:
+ if index < 0 or node is None:
return 0
# Leaf node cases
if node.minn == node.maxx:
@@ -94,7 +96,7 @@ def rank_till_index(node: Node, num: int, index: int) -> int:
return rank_till_index(node.right, num, index - node.map_left[index])
-def rank(node: Node, num: int, start: int, end: int) -> int:
+def rank(node: Node | None, num: int, start: int, end: int) -> int:
"""
Returns the number of occurrences of num in interval [start, end] in the list
@@ -115,7 +117,7 @@ def rank(node: Node, num: int, start: int, end: int) -> int:
return rank_till_end - rank_before_start
-def quantile(node: Node, index: int, start: int, end: int) -> int:
+def quantile(node: Node | None, index: int, start: int, end: int) -> int:
"""
Returns the index'th smallest element in interval [start, end] in the list
index is 0-indexed
@@ -130,7 +132,7 @@ def quantile(node: Node, index: int, start: int, end: int) -> int:
>>> quantile(root, 4, 2, 5)
-1
"""
- if index > (end - start) or start > end:
+ if index > (end - start) or start > end or node is None:
return -1
# Leaf node case
if node.minn == node.maxx:
@@ -156,10 +158,10 @@ def quantile(node: Node, index: int, start: int, end: int) -> int:
def range_counting(
- node: Node, start: int, end: int, start_num: int, end_num: int
+ node: Node | None, start: int, end: int, start_num: int, end_num: int
) -> int:
"""
- Returns the number of elememts in range [start_num, end_num]
+ Returns the number of elements in range [start_num, end_num]
in interval [start, end] in the list
>>> root = build_tree(test_array)
@@ -176,6 +178,7 @@ def range_counting(
"""
if (
start > end
+ or node is None
or start_num > end_num
or node.minn > end_num
or node.maxx < start_num
diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py
index a93b89621..12dafb2d9 100644
--- a/data_structures/disjoint_set/disjoint_set.py
+++ b/data_structures/disjoint_set/disjoint_set.py
@@ -1,17 +1,19 @@
"""
- disjoint set
+ Disjoint set.
Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure
"""
class Node:
- def __init__(self, data):
+ def __init__(self, data: int) -> None:
self.data = data
+ self.rank: int
+ self.parent: Node
-def make_set(x):
+def make_set(x: Node) -> None:
"""
- make x as a set.
+ Make x as a set.
"""
# rank is the distance from x to its' parent
# root's rank is 0
@@ -19,14 +21,17 @@ def make_set(x):
x.parent = x
-def union_set(x, y):
+def union_set(x: Node, y: Node) -> None:
"""
- union two sets.
+ Union of two sets.
set with bigger rank should be parent, so that the
disjoint set tree will be more flat.
"""
x, y = find_set(x), find_set(y)
- if x.rank > y.rank:
+ if x == y:
+ return
+
+ elif x.rank > y.rank:
y.parent = x
else:
x.parent = y
@@ -34,9 +39,9 @@ def union_set(x, y):
y.rank += 1
-def find_set(x):
+def find_set(x: Node) -> Node:
"""
- return the parent of x
+ Return the parent of x
"""
if x != x.parent:
x.parent = find_set(x.parent)
@@ -51,10 +56,11 @@ def find_python_set(node: Node) -> set:
for s in sets:
if node.data in s:
return s
- raise ValueError(f"{node.data} is not in {sets}")
+ msg = f"{node.data} is not in {sets}"
+ raise ValueError(msg)
-def test_disjoint_set():
+def test_disjoint_set() -> None:
"""
>>> test_disjoint_set()
"""
diff --git a/data_structures/hashing/bloom_filter.py b/data_structures/hashing/bloom_filter.py
new file mode 100644
index 000000000..7fd0985bd
--- /dev/null
+++ b/data_structures/hashing/bloom_filter.py
@@ -0,0 +1,105 @@
+"""
+See https://en.wikipedia.org/wiki/Bloom_filter
+
+The use of this data structure is to test membership in a set.
+Compared to Python's built-in set() it is more space-efficient.
+In the following example, only 8 bits of memory will be used:
+>>> bloom = Bloom(size=8)
+
+Initially, the filter contains all zeros:
+>>> bloom.bitstring
+'00000000'
+
+When an element is added, two bits are set to 1
+since there are 2 hash functions in this implementation:
+>>> "Titanic" in bloom
+False
+>>> bloom.add("Titanic")
+>>> bloom.bitstring
+'01100000'
+>>> "Titanic" in bloom
+True
+
+However, sometimes only one bit is added
+because both hash functions return the same value
+>>> bloom.add("Avatar")
+>>> "Avatar" in bloom
+True
+>>> bloom.format_hash("Avatar")
+'00000100'
+>>> bloom.bitstring
+'01100100'
+
+Not added elements should return False ...
+>>> not_present_films = ("The Godfather", "Interstellar", "Parasite", "Pulp Fiction")
+>>> {
+... film: bloom.format_hash(film) for film in not_present_films
+... } # doctest: +NORMALIZE_WHITESPACE
+{'The Godfather': '00000101',
+ 'Interstellar': '00000011',
+ 'Parasite': '00010010',
+ 'Pulp Fiction': '10000100'}
+>>> any(film in bloom for film in not_present_films)
+False
+
+but sometimes there are false positives:
+>>> "Ratatouille" in bloom
+True
+>>> bloom.format_hash("Ratatouille")
+'01100000'
+
+The probability increases with the number of elements added.
+The probability decreases with the number of bits in the bitarray.
+>>> bloom.estimated_error_rate
+0.140625
+>>> bloom.add("The Godfather")
+>>> bloom.estimated_error_rate
+0.25
+>>> bloom.bitstring
+'01100101'
+"""
+from hashlib import md5, sha256
+
+HASH_FUNCTIONS = (sha256, md5)
+
+
+class Bloom:
+ def __init__(self, size: int = 8) -> None:
+ self.bitarray = 0b0
+ self.size = size
+
+ def add(self, value: str) -> None:
+ h = self.hash_(value)
+ self.bitarray |= h
+
+ def exists(self, value: str) -> bool:
+ h = self.hash_(value)
+ return (h & self.bitarray) == h
+
+ def __contains__(self, other: str) -> bool:
+ return self.exists(other)
+
+ def format_bin(self, bitarray: int) -> str:
+ res = bin(bitarray)[2:]
+ return res.zfill(self.size)
+
+ @property
+ def bitstring(self) -> str:
+ return self.format_bin(self.bitarray)
+
+ def hash_(self, value: str) -> int:
+ res = 0b0
+ for func in HASH_FUNCTIONS:
+ position = (
+ int.from_bytes(func(value.encode()).digest(), "little") % self.size
+ )
+ res |= 2**position
+ return res
+
+ def format_hash(self, value: str) -> str:
+ return self.format_bin(self.hash_(value))
+
+ @property
+ def estimated_error_rate(self) -> float:
+ n_ones = bin(self.bitarray).count("1")
+ return (n_ones / self.size) ** len(HASH_FUNCTIONS)
diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py
index 57b1ffff4..be21e74ca 100644
--- a/data_structures/hashing/double_hash.py
+++ b/data_structures/hashing/double_hash.py
@@ -1,6 +1,18 @@
#!/usr/bin/env python3
+"""
+Double hashing is a collision resolving technique in Open Addressed Hash tables.
+Double hashing uses the idea of applying a second hash function to key when a collision
+occurs. The advantage of Double hashing is that it is one of the best form of probing,
+producing a uniform distribution of records throughout a hash table. This technique
+does not yield any clusters. It is one of effective method for resolving collisions.
+
+Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE
+Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table.
+
+Reference: https://en.wikipedia.org/wiki/Double_hashing
+"""
from .hash_table import HashTable
-from .number_theory.prime_numbers import check_prime, next_prime
+from .number_theory.prime_numbers import is_prime, next_prime
class DoubleHash(HashTable):
@@ -12,10 +24,9 @@ class DoubleHash(HashTable):
super().__init__(*args, **kwargs)
def __hash_function_2(self, value, data):
-
next_prime_gt = (
next_prime(value % self.size_table)
- if not check_prime(value % self.size_table)
+ if not is_prime(value % self.size_table)
else value % self.size_table
) # gt = bigger than
return next_prime_gt - (data % next_prime_gt)
diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py
new file mode 100644
index 000000000..1dfcc8bbf
--- /dev/null
+++ b/data_structures/hashing/hash_map.py
@@ -0,0 +1,162 @@
+"""
+Hash map with open addressing.
+
+https://en.wikipedia.org/wiki/Hash_table
+
+Another hash map implementation, with a good explanation.
+Modern Dictionaries by Raymond Hettinger
+https://www.youtube.com/watch?v=p33CVV29OG8
+"""
+from collections.abc import Iterator, MutableMapping
+from dataclasses import dataclass
+from typing import Generic, TypeVar
+
+KEY = TypeVar("KEY")
+VAL = TypeVar("VAL")
+
+
+@dataclass(frozen=True, slots=True)
+class _Item(Generic[KEY, VAL]):
+ key: KEY
+ val: VAL
+
+
+class _DeletedItem(_Item):
+ def __init__(self) -> None:
+ super().__init__(None, None)
+
+ def __bool__(self) -> bool:
+ return False
+
+
+_deleted = _DeletedItem()
+
+
+class HashMap(MutableMapping[KEY, VAL]):
+ """
+ Hash map with open addressing.
+ """
+
+ def __init__(
+ self, initial_block_size: int = 8, capacity_factor: float = 0.75
+ ) -> None:
+ self._initial_block_size = initial_block_size
+ self._buckets: list[_Item | None] = [None] * initial_block_size
+ assert 0.0 < capacity_factor < 1.0
+ self._capacity_factor = capacity_factor
+ self._len = 0
+
+ def _get_bucket_index(self, key: KEY) -> int:
+ return hash(key) % len(self._buckets)
+
+ def _get_next_ind(self, ind: int) -> int:
+ """
+ Get next index.
+
+ Implements linear open addressing.
+ """
+ return (ind + 1) % len(self._buckets)
+
+ def _try_set(self, ind: int, key: KEY, val: VAL) -> bool:
+ """
+ Try to add value to the bucket.
+
+ If bucket is empty or key is the same, does insert and return True.
+
+ If bucket has another key or deleted placeholder,
+ that means that we need to check next bucket.
+ """
+ stored = self._buckets[ind]
+ if not stored:
+ self._buckets[ind] = _Item(key, val)
+ self._len += 1
+ return True
+ elif stored.key == key:
+ self._buckets[ind] = _Item(key, val)
+ return True
+ else:
+ return False
+
+ def _is_full(self) -> bool:
+ """
+ Return true if we have reached safe capacity.
+
+ So we need to increase the number of buckets to avoid collisions.
+ """
+ limit = len(self._buckets) * self._capacity_factor
+ return len(self) >= int(limit)
+
+ def _is_sparse(self) -> bool:
+ """Return true if we need twice fewer buckets when we have now."""
+ if len(self._buckets) <= self._initial_block_size:
+ return False
+ limit = len(self._buckets) * self._capacity_factor / 2
+ return len(self) < limit
+
+ def _resize(self, new_size: int) -> None:
+ old_buckets = self._buckets
+ self._buckets = [None] * new_size
+ self._len = 0
+ for item in old_buckets:
+ if item:
+ self._add_item(item.key, item.val)
+
+ def _size_up(self) -> None:
+ self._resize(len(self._buckets) * 2)
+
+ def _size_down(self) -> None:
+ self._resize(len(self._buckets) // 2)
+
+ def _iterate_buckets(self, key: KEY) -> Iterator[int]:
+ ind = self._get_bucket_index(key)
+ for _ in range(len(self._buckets)):
+ yield ind
+ ind = self._get_next_ind(ind)
+
+ def _add_item(self, key: KEY, val: VAL) -> None:
+ for ind in self._iterate_buckets(key):
+ if self._try_set(ind, key, val):
+ break
+
+ def __setitem__(self, key: KEY, val: VAL) -> None:
+ if self._is_full():
+ self._size_up()
+
+ self._add_item(key, val)
+
+ def __delitem__(self, key: KEY) -> None:
+ for ind in self._iterate_buckets(key):
+ item = self._buckets[ind]
+ if item is None:
+ raise KeyError(key)
+ if item is _deleted:
+ continue
+ if item.key == key:
+ self._buckets[ind] = _deleted
+ self._len -= 1
+ break
+ if self._is_sparse():
+ self._size_down()
+
+ def __getitem__(self, key: KEY) -> VAL:
+ for ind in self._iterate_buckets(key):
+ item = self._buckets[ind]
+ if item is None:
+ break
+ if item is _deleted:
+ continue
+ if item.key == key:
+ return item.val
+ raise KeyError(key)
+
+ def __len__(self) -> int:
+ return self._len
+
+ def __iter__(self) -> Iterator[KEY]:
+ yield from (item.key for item in self._buckets if item)
+
+ def __repr__(self) -> str:
+ val_string = " ,".join(
+ f"{item.key}: {item.val}" for item in self._buckets if item
+ )
+ return f"HashMap({val_string})"
diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py
index fd9e6eec1..7ca2f7c40 100644
--- a/data_structures/hashing/hash_table.py
+++ b/data_structures/hashing/hash_table.py
@@ -7,19 +7,24 @@ class HashTable:
Basic Hash Table example with open addressing and linear probing
"""
- def __init__(self, size_table, charge_factor=None, lim_charge=None):
+ def __init__(
+ self,
+ size_table: int,
+ charge_factor: int | None = None,
+ lim_charge: float | None = None,
+ ) -> None:
self.size_table = size_table
self.values = [None] * self.size_table
self.lim_charge = 0.75 if lim_charge is None else lim_charge
self.charge_factor = 1 if charge_factor is None else charge_factor
- self.__aux_list = []
- self._keys = {}
+ self.__aux_list: list = []
+ self._keys: dict = {}
def keys(self):
return self._keys
def balanced_factor(self):
- return sum([1 for slot in self.values if slot is not None]) / (
+ return sum(1 for slot in self.values if slot is not None) / (
self.size_table * self.charge_factor
)
@@ -27,9 +32,8 @@ class HashTable:
return key % self.size_table
def _step_by_step(self, step_ord):
-
print(f"step {step_ord}")
- print([i for i in range(len(self.values))])
+ print(list(range(len(self.values))))
print(self.values)
def bulk_insert(self, values):
@@ -48,7 +52,6 @@ class HashTable:
new_key = self.hash_function(key + 1)
while self.values[new_key] is not None and self.values[new_key] != key:
-
if self.values.count(None) > 0:
new_key = self.hash_function(new_key + 1)
else:
diff --git a/data_structures/hashing/hash_table_with_linked_list.py b/data_structures/hashing/hash_table_with_linked_list.py
index fe838268f..f404c5251 100644
--- a/data_structures/hashing/hash_table_with_linked_list.py
+++ b/data_structures/hashing/hash_table_with_linked_list.py
@@ -14,7 +14,7 @@ class HashTableWithLinkedList(HashTable):
def balanced_factor(self):
return (
- sum([self.charge_factor - len(slot) for slot in self.values])
+ sum(self.charge_factor - len(slot) for slot in self.values)
/ self.size_table
* self.charge_factor
)
diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py
index db4d40f47..0c25896f9 100644
--- a/data_structures/hashing/number_theory/prime_numbers.py
+++ b/data_structures/hashing/number_theory/prime_numbers.py
@@ -3,26 +3,56 @@
module to operations with prime numbers
"""
+import math
-def check_prime(number):
- """
- it's not the best solution
- """
- special_non_primes = [0, 1, 2]
- if number in special_non_primes[:2]:
- return 2
- elif number == special_non_primes[-1]:
- return 3
- return all([number % i for i in range(2, number)])
+def is_prime(number: int) -> bool:
+ """Checks to see if a number is a prime in O(sqrt(n)).
+
+ A number is prime if it has exactly two factors: 1 and itself.
+
+ >>> is_prime(0)
+ False
+ >>> is_prime(1)
+ False
+ >>> is_prime(2)
+ True
+ >>> is_prime(3)
+ True
+ >>> is_prime(27)
+ False
+ >>> is_prime(87)
+ False
+ >>> is_prime(563)
+ True
+ >>> is_prime(2999)
+ True
+ >>> is_prime(67483)
+ False
+ """
+
+ # precondition
+ assert isinstance(number, int) and (
+ number >= 0
+ ), "'number' must been an int and positive"
+
+ if 1 < number < 4:
+ # 2 and 3 are primes
+ return True
+ elif number < 2 or not number % 2:
+ # Negatives, 0, 1 and all even numbers are not primes
+ return False
+
+ odd_numbers = range(3, int(math.sqrt(number) + 1), 2)
+ return not any(not number % i for i in odd_numbers)
def next_prime(value, factor=1, **kwargs):
value = factor * value
first_value_val = value
- while not check_prime(value):
- value += 1 if not ("desc" in kwargs.keys() and kwargs["desc"] is True) else -1
+ while not is_prime(value):
+ value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **kwargs)
diff --git a/data_structures/hashing/tests/test_hash_map.py b/data_structures/hashing/tests/test_hash_map.py
new file mode 100644
index 000000000..929e67311
--- /dev/null
+++ b/data_structures/hashing/tests/test_hash_map.py
@@ -0,0 +1,97 @@
+from operator import delitem, getitem, setitem
+
+import pytest
+
+from data_structures.hashing.hash_map import HashMap
+
+
+def _get(k):
+ return getitem, k
+
+
+def _set(k, v):
+ return setitem, k, v
+
+
+def _del(k):
+ return delitem, k
+
+
+def _run_operation(obj, fun, *args):
+ try:
+ return fun(obj, *args), None
+ except Exception as e:
+ return None, e
+
+
+_add_items = (
+ _set("key_a", "val_a"),
+ _set("key_b", "val_b"),
+)
+
+_overwrite_items = [
+ _set("key_a", "val_a"),
+ _set("key_a", "val_b"),
+]
+
+_delete_items = [
+ _set("key_a", "val_a"),
+ _set("key_b", "val_b"),
+ _del("key_a"),
+ _del("key_b"),
+ _set("key_a", "val_a"),
+ _del("key_a"),
+]
+
+_access_absent_items = [
+ _get("key_a"),
+ _del("key_a"),
+ _set("key_a", "val_a"),
+ _del("key_a"),
+ _del("key_a"),
+ _get("key_a"),
+]
+
+_add_with_resize_up = [
+ *[_set(x, x) for x in range(5)], # guaranteed upsize
+]
+
+_add_with_resize_down = [
+ *[_set(x, x) for x in range(5)], # guaranteed upsize
+ *[_del(x) for x in range(5)],
+ _set("key_a", "val_b"),
+]
+
+
+@pytest.mark.parametrize(
+ "operations",
+ (
+ pytest.param(_add_items, id="add items"),
+ pytest.param(_overwrite_items, id="overwrite items"),
+ pytest.param(_delete_items, id="delete items"),
+ pytest.param(_access_absent_items, id="access absent items"),
+ pytest.param(_add_with_resize_up, id="add with resize up"),
+ pytest.param(_add_with_resize_down, id="add with resize down"),
+ ),
+)
+def test_hash_map_is_the_same_as_dict(operations):
+ my = HashMap(initial_block_size=4)
+ py = {}
+ for _, (fun, *args) in enumerate(operations):
+ my_res, my_exc = _run_operation(my, fun, *args)
+ py_res, py_exc = _run_operation(py, fun, *args)
+ assert my_res == py_res
+ assert str(my_exc) == str(py_exc)
+ assert set(py) == set(my)
+ assert len(py) == len(my)
+ assert set(my.items()) == set(py.items())
+
+
+def test_no_new_methods_was_added_to_api():
+ def is_public(name: str) -> bool:
+ return not name.startswith("_")
+
+ dict_public_names = {name for name in dir({}) if is_public(name)}
+ hash_public_names = {name for name in dir(HashMap()) if is_public(name)}
+
+ assert dict_public_names > hash_public_names
diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py
index 334b444ea..099bd2871 100644
--- a/data_structures/heap/binomial_heap.py
+++ b/data_structures/heap/binomial_heap.py
@@ -1,5 +1,3 @@
-# flake8: noqa
-
"""
Binomial Heap
Reference: Advanced Data Structures, Peter Brass
@@ -22,7 +20,7 @@ class Node:
self.right = None
self.parent = None
- def mergeTrees(self, other):
+ def merge_trees(self, other):
"""
In-place merge of two binomial trees of equal size.
Returns the root of the resulting tree
@@ -71,13 +69,12 @@ class BinomialHeap:
... first_heap.insert(number)
Size test
- >>> print(first_heap.size)
+ >>> first_heap.size
30
Deleting - delete() test
- >>> for i in range(25):
- ... print(first_heap.deleteMin(), end=" ")
- 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
+ >>> [first_heap.delete_min() for _ in range(20)]
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
Create a new Heap
>>> second_heap = BinomialHeap()
@@ -97,8 +94,8 @@ class BinomialHeap:
# # # #
preOrder() test
- >>> print(second_heap.preOrder())
- [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)]
+ >>> " ".join(str(x) for x in second_heap.pre_order())
+ "(17, 0) ('#', 1) (31, 1) (20, 2) ('#', 3) ('#', 3) (34, 2) ('#', 3) ('#', 3)"
printing Heap - __str__() test
>>> print(second_heap)
@@ -113,14 +110,17 @@ class BinomialHeap:
---#
mergeHeaps() test
- >>> merged = second_heap.mergeHeaps(first_heap)
+ >>>
+ >>> merged = second_heap.merge_heaps(first_heap)
>>> merged.peek()
17
values in merged heap; (merge is inplace)
- >>> while not first_heap.isEmpty():
- ... print(first_heap.deleteMin(), end=" ")
- 17 20 25 26 27 28 29 31 34
+ >>> results = []
+ >>> while not first_heap.is_empty():
+ ... results.append(first_heap.delete_min())
+ >>> results
+ [17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34]
"""
def __init__(self, bottom_root=None, min_node=None, heap_size=0):
@@ -128,7 +128,7 @@ class BinomialHeap:
self.bottom_root = bottom_root
self.min_node = min_node
- def mergeHeaps(self, other):
+ def merge_heaps(self, other):
"""
In-place merge of two binomial heaps.
Both of them become the resulting merged heap
@@ -136,12 +136,12 @@ class BinomialHeap:
# Empty heaps corner cases
if other.size == 0:
- return
+ return None
if self.size == 0:
self.size = other.size
self.bottom_root = other.bottom_root
self.min_node = other.min_node
- return
+ return None
# Update size
self.size = self.size + other.size
@@ -174,13 +174,12 @@ class BinomialHeap:
i.left_tree_size == i.parent.left_tree_size
and i.left_tree_size != i.parent.parent.left_tree_size
):
-
# Neighbouring Nodes
previous_node = i.left
next_node = i.parent.parent
# Merging trees
- i = i.mergeTrees(i.parent)
+ i = i.merge_trees(i.parent)
# Updating links
i.left = previous_node
@@ -233,12 +232,11 @@ class BinomialHeap:
and self.bottom_root.left_tree_size
== self.bottom_root.parent.left_tree_size
):
-
# Next node
next_node = self.bottom_root.parent.parent
# Merge
- self.bottom_root = self.bottom_root.mergeTrees(self.bottom_root.parent)
+ self.bottom_root = self.bottom_root.merge_trees(self.bottom_root.parent)
# Update Links
self.bottom_root.parent = next_node
@@ -252,10 +250,10 @@ class BinomialHeap:
"""
return self.min_node.val
- def isEmpty(self):
+ def is_empty(self):
return self.size == 0
- def deleteMin(self):
+ def delete_min(self):
"""
delete min element and return it
"""
@@ -317,7 +315,7 @@ class BinomialHeap:
return min_value
# Remaining cases
# Construct heap of right subtree
- newHeap = BinomialHeap(
+ new_heap = BinomialHeap(
bottom_root=bottom_of_new, min_node=min_of_new, heap_size=size_of_new
)
@@ -354,11 +352,11 @@ class BinomialHeap:
self.min_node = i
i = i.parent
# Merge heaps
- self.mergeHeaps(newHeap)
+ self.merge_heaps(new_heap)
return min_value
- def preOrder(self):
+ def pre_order(self):
"""
Returns the Pre-order representation of the heap including
values of nodes plus their level distance from the root;
@@ -369,9 +367,9 @@ class BinomialHeap:
while top_root.parent:
top_root = top_root.parent
# preorder
- heap_preOrder = []
- self.__traversal(top_root, heap_preOrder)
- return heap_preOrder
+ heap_pre_order = []
+ self.__traversal(top_root, heap_pre_order)
+ return heap_pre_order
def __traversal(self, curr_node, preorder, level=0):
"""
@@ -389,9 +387,9 @@ class BinomialHeap:
Overwriting str for a pre-order print of nodes in heap;
Performance is poor, so use only for small examples
"""
- if self.isEmpty():
+ if self.is_empty():
return ""
- preorder_heap = self.preOrder()
+ preorder_heap = self.pre_order()
return "\n".join(("-" * level + str(value)) for value, level in preorder_heap)
diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py
index 65a70e468..c1004f349 100644
--- a/data_structures/heap/heap.py
+++ b/data_structures/heap/heap.py
@@ -1,43 +1,64 @@
-from typing import Iterable, List, Optional
+from __future__ import annotations
+
+from abc import abstractmethod
+from collections.abc import Iterable
+from typing import Generic, Protocol, TypeVar
-class Heap:
+class Comparable(Protocol):
+ @abstractmethod
+ def __lt__(self: T, other: T) -> bool:
+ pass
+
+ @abstractmethod
+ def __gt__(self: T, other: T) -> bool:
+ pass
+
+ @abstractmethod
+ def __eq__(self: T, other: object) -> bool:
+ pass
+
+
+T = TypeVar("T", bound=Comparable)
+
+
+class Heap(Generic[T]):
"""A Max Heap Implementation
>>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5]
>>> h = Heap()
>>> h.build_max_heap(unsorted)
- >>> print(h)
+ >>> h
[209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5]
>>>
>>> h.extract_max()
209
- >>> print(h)
+ >>> h
[201, 107, 25, 103, 11, 15, 1, 9, 7, 5]
>>>
>>> h.insert(100)
- >>> print(h)
+ >>> h
[201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11]
>>>
>>> h.heap_sort()
- >>> print(h)
+ >>> h
[1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201]
"""
def __init__(self) -> None:
- self.h: List[float] = []
+ self.h: list[T] = []
self.heap_size: int = 0
def __repr__(self) -> str:
return str(self.h)
- def parent_index(self, child_idx: int) -> Optional[int]:
+ def parent_index(self, child_idx: int) -> int | None:
"""return the parent index of given child"""
if child_idx > 0:
return (child_idx - 1) // 2
return None
- def left_child_idx(self, parent_idx: int) -> Optional[int]:
+ def left_child_idx(self, parent_idx: int) -> int | None:
"""
return the left child index if the left child exists.
if not, return None.
@@ -47,7 +68,7 @@ class Heap:
return left_child_index
return None
- def right_child_idx(self, parent_idx: int) -> Optional[int]:
+ def right_child_idx(self, parent_idx: int) -> int | None:
"""
return the right child index if the right child exists.
if not, return None.
@@ -77,7 +98,7 @@ class Heap:
# fix the subsequent violation recursively if any
self.max_heapify(violation)
- def build_max_heap(self, collection: Iterable[float]) -> None:
+ def build_max_heap(self, collection: Iterable[T]) -> None:
"""build max heap from an unsorted array"""
self.h = list(collection)
self.heap_size = len(self.h)
@@ -86,14 +107,7 @@ class Heap:
for i in range(self.heap_size // 2 - 1, -1, -1):
self.max_heapify(i)
- def max(self) -> float:
- """return the max in the heap"""
- if self.heap_size >= 1:
- return self.h[0]
- else:
- raise Exception("Empty heap")
-
- def extract_max(self) -> float:
+ def extract_max(self) -> T:
"""get and remove max from heap"""
if self.heap_size >= 2:
me = self.h[0]
@@ -107,7 +121,7 @@ class Heap:
else:
raise Exception("Empty heap")
- def insert(self, value: float) -> None:
+ def insert(self, value: T) -> None:
"""insert a new value into the max heap"""
self.h.append(value)
idx = (self.heap_size - 1) // 2
@@ -149,7 +163,7 @@ if __name__ == "__main__":
]:
print(f"unsorted array: {unsorted}")
- heap = Heap()
+ heap: Heap[int] = Heap()
heap.build_max_heap(unsorted)
print(f"after build heap: {heap}")
diff --git a/data_structures/heap/heap_generic.py b/data_structures/heap/heap_generic.py
index 553cb9451..ee92149e2 100644
--- a/data_structures/heap/heap_generic.py
+++ b/data_structures/heap/heap_generic.py
@@ -1,35 +1,38 @@
+from collections.abc import Callable
+
+
class Heap:
"""
A generic Heap class, can be used as min or max by passing the key function
accordingly.
"""
- def __init__(self, key=None):
+ def __init__(self, key: Callable | None = None) -> None:
# Stores actual heap items.
- self.arr = list()
+ self.arr: list = []
# Stores indexes of each item for supporting updates and deletion.
- self.pos_map = {}
+ self.pos_map: dict = {}
# Stores current size of heap.
self.size = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
self.key = key or (lambda x: x)
- def _parent(self, i):
+ def _parent(self, i: int) -> int | None:
"""Returns parent index of given index if exists else None"""
return int((i - 1) / 2) if i > 0 else None
- def _left(self, i):
+ def _left(self, i: int) -> int | None:
"""Returns left-child-index of given index if exists else None"""
left = int(2 * i + 1)
return left if 0 < left < self.size else None
- def _right(self, i):
+ def _right(self, i: int) -> int | None:
"""Returns right-child-index of given index if exists else None"""
right = int(2 * i + 2)
return right if 0 < right < self.size else None
- def _swap(self, i, j):
+ def _swap(self, i: int, j: int) -> None:
"""Performs changes required for swapping two elements in the heap"""
# First update the indexes of the items in index map.
self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = (
@@ -39,11 +42,11 @@ class Heap:
# Then swap the items in the list.
self.arr[i], self.arr[j] = self.arr[j], self.arr[i]
- def _cmp(self, i, j):
+ def _cmp(self, i: int, j: int) -> bool:
"""Compares the two items using default comparison"""
return self.arr[i][1] < self.arr[j][1]
- def _get_valid_parent(self, i):
+ def _get_valid_parent(self, i: int) -> int:
"""
Returns index of valid parent as per desired ordering among given index and
both it's children
@@ -59,21 +62,21 @@ class Heap:
return valid_parent
- def _heapify_up(self, index):
+ def _heapify_up(self, index: int) -> None:
"""Fixes the heap in upward direction of given index"""
parent = self._parent(index)
while parent is not None and not self._cmp(index, parent):
self._swap(index, parent)
index, parent = parent, self._parent(parent)
- def _heapify_down(self, index):
+ def _heapify_down(self, index: int) -> None:
"""Fixes the heap in downward direction of given index"""
valid_parent = self._get_valid_parent(index)
while valid_parent != index:
self._swap(index, valid_parent)
index, valid_parent = valid_parent, self._get_valid_parent(valid_parent)
- def update_item(self, item, item_value):
+ def update_item(self, item: int, item_value: int) -> None:
"""Updates given item value in heap if present"""
if item not in self.pos_map:
return
@@ -84,7 +87,7 @@ class Heap:
self._heapify_up(index)
self._heapify_down(index)
- def delete_item(self, item):
+ def delete_item(self, item: int) -> None:
"""Deletes given item from heap if present"""
if item not in self.pos_map:
return
@@ -99,7 +102,7 @@ class Heap:
self._heapify_up(index)
self._heapify_down(index)
- def insert_item(self, item, item_value):
+ def insert_item(self, item: int, item_value: int) -> None:
"""Inserts given item with given value in heap"""
arr_len = len(self.arr)
if arr_len == self.size:
@@ -110,11 +113,11 @@ class Heap:
self.size += 1
self._heapify_up(self.size - 1)
- def get_top(self):
+ def get_top(self) -> tuple | None:
"""Returns top item tuple (Calculated value, item) from heap if present"""
return self.arr[0] if self.size else None
- def extract_top(self):
+ def extract_top(self) -> tuple | None:
"""
Return top item tuple (Calculated value, item) from heap and removes it as well
if present
@@ -163,7 +166,6 @@ def test_heap() -> None:
>>> h.get_top()
[9, -40]
"""
- pass
if __name__ == "__main__":
diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py
index 9265c4839..ecb187649 100644
--- a/data_structures/heap/min_heap.py
+++ b/data_structures/heap/min_heap.py
@@ -27,7 +27,7 @@ class MinHeap:
>>> myMinHeap.decrease_key(b, -17)
>>> print(b)
Node(B, -17)
- >>> print(myMinHeap["B"])
+ >>> myMinHeap["B"]
-17
"""
@@ -52,14 +52,14 @@ class MinHeap:
return self.heap_dict[key]
def build_heap(self, array):
- lastIdx = len(array) - 1
- startFrom = self.get_parent_idx(lastIdx)
+ last_idx = len(array) - 1
+ start_from = self.get_parent_idx(last_idx)
for idx, i in enumerate(array):
self.idx_of_element[i] = idx
self.heap_dict[i.name] = i.val
- for i in range(startFrom, -1, -1):
+ for i in range(start_from, -1, -1):
self.sift_down(i, array)
return array
@@ -121,14 +121,14 @@ class MinHeap:
self.sift_up(len(self.heap) - 1)
def is_empty(self):
- return True if len(self.heap) == 0 else False
+ return len(self.heap) == 0
- def decrease_key(self, node, newValue):
+ def decrease_key(self, node, new_value):
assert (
- self.heap[self.idx_of_element[node]].val > newValue
+ self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
- node.val = newValue
- self.heap_dict[node.name] = newValue
+ node.val = new_value
+ self.heap_dict[node.name] = new_value
self.sift_up(self.idx_of_element[node])
@@ -143,7 +143,7 @@ e = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
-myMinHeap = MinHeap([r, b, a, x, e])
+my_min_heap = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
@@ -154,14 +154,14 @@ myMinHeap = MinHeap([r, b, a, x, e])
# Before
print("Min Heap - before decrease key")
-for i in myMinHeap.heap:
+for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
-myMinHeap.decrease_key(b, -17)
+my_min_heap.decrease_key(b, -17)
# After
-for i in myMinHeap.heap:
+for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
diff --git a/data_structures/heap/randomized_heap.py b/data_structures/heap/randomized_heap.py
index 0ddc2272e..c0f9888f8 100644
--- a/data_structures/heap/randomized_heap.py
+++ b/data_structures/heap/randomized_heap.py
@@ -3,9 +3,10 @@
from __future__ import annotations
import random
-from typing import Generic, Iterable, List, Optional, TypeVar
+from collections.abc import Iterable
+from typing import Any, Generic, TypeVar
-T = TypeVar("T")
+T = TypeVar("T", bound=bool)
class RandomizedHeapNode(Generic[T]):
@@ -16,8 +17,8 @@ class RandomizedHeapNode(Generic[T]):
def __init__(self, value: T) -> None:
self._value: T = value
- self.left: Optional[RandomizedHeapNode[T]] = None
- self.right: Optional[RandomizedHeapNode[T]] = None
+ self.left: RandomizedHeapNode[T] | None = None
+ self.right: RandomizedHeapNode[T] | None = None
@property
def value(self) -> T:
@@ -26,8 +27,8 @@ class RandomizedHeapNode(Generic[T]):
@staticmethod
def merge(
- root1: Optional[RandomizedHeapNode[T]], root2: Optional[RandomizedHeapNode[T]]
- ) -> Optional[RandomizedHeapNode[T]]:
+ root1: RandomizedHeapNode[T] | None, root2: RandomizedHeapNode[T] | None
+ ) -> RandomizedHeapNode[T] | None:
"""Merge 2 nodes together."""
if not root1:
return root2
@@ -69,15 +70,17 @@ class RandomizedHeap(Generic[T]):
[-1, 0, 1]
"""
- def __init__(self, data: Optional[Iterable[T]] = ()) -> None:
+ def __init__(self, data: Iterable[T] | None = ()) -> None:
"""
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.to_sorted_list()
[1, 3, 3, 7]
"""
- self._root: Optional[RandomizedHeapNode[T]] = None
- for item in data:
- self.insert(item)
+ self._root: RandomizedHeapNode[T] | None = None
+
+ if data:
+ for item in data:
+ self.insert(item)
def insert(self, value: T) -> None:
"""
@@ -93,7 +96,7 @@ class RandomizedHeap(Generic[T]):
"""
self._root = RandomizedHeapNode.merge(self._root, RandomizedHeapNode(value))
- def pop(self) -> T:
+ def pop(self) -> T | None:
"""
Pop the smallest value from the heap and return it.
@@ -111,7 +114,12 @@ class RandomizedHeap(Generic[T]):
...
IndexError: Can't get top element for the empty heap.
"""
+
result = self.top()
+
+ if self._root is None:
+ return None
+
self._root = RandomizedHeapNode.merge(self._root.left, self._root.right)
return result
@@ -138,7 +146,7 @@ class RandomizedHeap(Generic[T]):
raise IndexError("Can't get top element for the empty heap.")
return self._root.value
- def clear(self):
+ def clear(self) -> None:
"""
Clear the heap.
@@ -151,7 +159,7 @@ class RandomizedHeap(Generic[T]):
"""
self._root = None
- def to_sorted_list(self) -> List[T]:
+ def to_sorted_list(self) -> list[Any]:
"""
Returns sorted list containing all the values in the heap.
diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py
index 417a383f7..c4c13b082 100644
--- a/data_structures/heap/skew_heap.py
+++ b/data_structures/heap/skew_heap.py
@@ -2,9 +2,10 @@
from __future__ import annotations
-from typing import Generic, Iterable, Iterator, Optional, TypeVar
+from collections.abc import Iterable, Iterator
+from typing import Any, Generic, TypeVar
-T = TypeVar("T")
+T = TypeVar("T", bound=bool)
class SkewNode(Generic[T]):
@@ -15,8 +16,8 @@ class SkewNode(Generic[T]):
def __init__(self, value: T) -> None:
self._value: T = value
- self.left: Optional[SkewNode[T]] = None
- self.right: Optional[SkewNode[T]] = None
+ self.left: SkewNode[T] | None = None
+ self.right: SkewNode[T] | None = None
@property
def value(self) -> T:
@@ -25,8 +26,8 @@ class SkewNode(Generic[T]):
@staticmethod
def merge(
- root1: Optional[SkewNode[T]], root2: Optional[SkewNode[T]]
- ) -> Optional[SkewNode[T]]:
+ root1: SkewNode[T] | None, root2: SkewNode[T] | None
+ ) -> SkewNode[T] | None:
"""Merge 2 nodes together."""
if not root1:
return root2
@@ -51,7 +52,7 @@ class SkewHeap(Generic[T]):
values. Both operations take O(logN) time where N is the size of the
structure.
Wiki: https://en.wikipedia.org/wiki/Skew_heap
- Visualisation: https://www.cs.usfca.edu/~galles/visualization/SkewHeap.html
+ Visualization: https://www.cs.usfca.edu/~galles/visualization/SkewHeap.html
>>> list(SkewHeap([2, 3, 1, 5, 1, 7]))
[1, 1, 2, 3, 5, 7]
@@ -69,15 +70,16 @@ class SkewHeap(Generic[T]):
[-1, 0, 1]
"""
- def __init__(self, data: Optional[Iterable[T]] = ()) -> None:
+ def __init__(self, data: Iterable[T] | None = ()) -> None:
"""
>>> sh = SkewHeap([3, 1, 3, 7])
>>> list(sh)
[1, 3, 3, 7]
"""
- self._root: Optional[SkewNode[T]] = None
- for item in data:
- self.insert(item)
+ self._root: SkewNode[T] | None = None
+ if data:
+ for item in data:
+ self.insert(item)
def __bool__(self) -> bool:
"""
@@ -103,7 +105,7 @@ class SkewHeap(Generic[T]):
>>> list(sh)
[1, 3, 3, 7]
"""
- result = []
+ result: list[Any] = []
while self:
result.append(self.pop())
@@ -127,7 +129,7 @@ class SkewHeap(Generic[T]):
"""
self._root = SkewNode.merge(self._root, SkewNode(value))
- def pop(self) -> T:
+ def pop(self) -> T | None:
"""
Pop the smallest value from the heap and return it.
@@ -146,7 +148,9 @@ class SkewHeap(Generic[T]):
IndexError: Can't get top element for the empty heap.
"""
result = self.top()
- self._root = SkewNode.merge(self._root.left, self._root.right)
+ self._root = (
+ SkewNode.merge(self._root.left, self._root.right) if self._root else None
+ )
return result
@@ -172,7 +176,7 @@ class SkewHeap(Generic[T]):
raise IndexError("Can't get top element for the empty heap.")
return self._root.value
- def clear(self):
+ def clear(self) -> None:
"""
Clear the heap.
diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py
index a5f5537b1..56b0e51ba 100644
--- a/data_structures/linked_list/__init__.py
+++ b/data_structures/linked_list/__init__.py
@@ -5,19 +5,20 @@ Nodes contain data and also may link to other nodes:
head node gives us access of the complete list
- Last node: points to null
"""
+from __future__ import annotations
from typing import Any
class Node:
- def __init__(self, item: Any, next: Any) -> None:
+ def __init__(self, item: Any, next: Any) -> None: # noqa: A002
self.item = item
self.next = next
class LinkedList:
def __init__(self) -> None:
- self.head = None
+ self.head: Node | None = None
self.size = 0
def add(self, item: Any) -> None:
@@ -25,7 +26,10 @@ class LinkedList:
self.size += 1
def remove(self) -> Any:
- if self.is_empty():
+ # Switched 'self.is_empty()' to 'self.head is None'
+ # because mypy was considering the possibility that 'self.head'
+ # can be None in below else part and giving error
+ if self.head is None:
return None
else:
item = self.head.item
@@ -45,12 +49,12 @@ class LinkedList:
>>> print(linked_list)
9 --> 14 --> 23
"""
- if not self.is_empty:
+ if self.is_empty():
return ""
else:
iterate = self.head
item_str = ""
- item_list = []
+ item_list: list[str] = []
while iterate:
item_list.append(str(iterate.item))
iterate = iterate.next
diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py
index f67c1e8f2..325d91026 100644
--- a/data_structures/linked_list/circular_linked_list.py
+++ b/data_structures/linked_list/circular_linked_list.py
@@ -1,10 +1,13 @@
+from __future__ import annotations
+
+from collections.abc import Iterator
from typing import Any
class Node:
def __init__(self, data: Any):
- self.data = data
- self.next = None
+ self.data: Any = data
+ self.next: Node | None = None
class CircularLinkedList:
@@ -12,7 +15,7 @@ class CircularLinkedList:
self.head = None
self.tail = None
- def __iter__(self):
+ def __iter__(self) -> Iterator[Any]:
node = self.head
while self.head:
yield node.data
@@ -21,7 +24,7 @@ class CircularLinkedList:
break
def __len__(self) -> int:
- return len(tuple(iter(self)))
+ return sum(1 for _ in self)
def __repr__(self):
return "->".join(str(item) for item in iter(self))
@@ -54,10 +57,10 @@ class CircularLinkedList:
def delete_front(self):
return self.delete_nth(0)
- def delete_tail(self) -> None:
+ def delete_tail(self) -> Any:
return self.delete_nth(len(self) - 1)
- def delete_nth(self, index: int = 0):
+ def delete_nth(self, index: int = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError("list index out of range.")
delete_node = self.head
@@ -76,7 +79,7 @@ class CircularLinkedList:
self.tail = temp
return delete_node.data
- def is_empty(self):
+ def is_empty(self) -> bool:
return len(self) == 0
@@ -91,25 +94,25 @@ def test_circular_linked_list() -> None:
try:
circular_linked_list.delete_front()
- assert False # This should not happen
+ raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
- assert False # This should not happen
+ raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1)
- assert False
+ raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0)
- assert False
+ raise AssertionError
except IndexError:
assert True
diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py
index 0eb3cf101..1a6c48191 100644
--- a/data_structures/linked_list/doubly_linked_list.py
+++ b/data_structures/linked_list/doubly_linked_list.py
@@ -51,7 +51,7 @@ class DoublyLinkedList:
>>> len(linked_list) == 5
True
"""
- return len(tuple(iter(self)))
+ return sum(1 for _ in self)
def insert_at_head(self, data):
self.insert_at_nth(0, data)
@@ -64,11 +64,11 @@ class DoublyLinkedList:
>>> linked_list = DoublyLinkedList()
>>> linked_list.insert_at_nth(-1, 666)
Traceback (most recent call last):
- ....
+ ....
IndexError: list index out of range
>>> linked_list.insert_at_nth(1, 666)
Traceback (most recent call last):
- ....
+ ....
IndexError: list index out of range
>>> linked_list.insert_at_nth(0, 2)
>>> linked_list.insert_at_nth(0, 1)
@@ -78,10 +78,12 @@ class DoublyLinkedList:
'1->2->3->4'
>>> linked_list.insert_at_nth(5, 5)
Traceback (most recent call last):
- ....
+ ....
IndexError: list index out of range
"""
- if not 0 <= index <= len(self):
+ length = len(self)
+
+ if not 0 <= index <= length:
raise IndexError("list index out of range")
new_node = Node(data)
if self.head is None:
@@ -90,13 +92,13 @@ class DoublyLinkedList:
self.head.previous = new_node
new_node.next = self.head
self.head = new_node
- elif index == len(self):
+ elif index == length:
self.tail.next = new_node
new_node.previous = self.tail
self.tail = new_node
else:
temp = self.head
- for i in range(0, index):
+ for _ in range(0, index):
temp = temp.next
temp.previous.next = new_node
new_node.previous = temp.previous
@@ -114,7 +116,7 @@ class DoublyLinkedList:
>>> linked_list = DoublyLinkedList()
>>> linked_list.delete_at_nth(0)
Traceback (most recent call last):
- ....
+ ....
IndexError: list index out of range
>>> for i in range(0, 5):
... linked_list.insert_at_nth(i, i + 1)
@@ -128,24 +130,26 @@ class DoublyLinkedList:
'2->4'
>>> linked_list.delete_at_nth(2)
Traceback (most recent call last):
- ....
+ ....
IndexError: list index out of range
"""
- if not 0 <= index <= len(self) - 1:
+ length = len(self)
+
+ if not 0 <= index <= length - 1:
raise IndexError("list index out of range")
delete_node = self.head # default first node
- if len(self) == 1:
+ if length == 1:
self.head = self.tail = None
elif index == 0:
self.head = self.head.next
self.head.previous = None
- elif index == len(self) - 1:
+ elif index == length - 1:
delete_node = self.tail
self.tail = self.tail.previous
self.tail.next = None
else:
temp = self.head
- for i in range(0, index):
+ for _ in range(0, index):
temp = temp.next
delete_node = temp
temp.next.previous = temp.previous
@@ -159,7 +163,7 @@ class DoublyLinkedList:
if current.next:
current = current.next
else: # We have reached the end an no value matches
- return "No data matching given value"
+ raise ValueError("No data matching given value")
if current == self.head:
self.delete_head()
@@ -194,13 +198,13 @@ def test_doubly_linked_list() -> None:
try:
linked_list.delete_head()
- assert False # This should not happen.
+ raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
- assert False # This should not happen.
+ raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py
index 184b6966b..e993cc5a2 100644
--- a/data_structures/linked_list/doubly_linked_list_two.py
+++ b/data_structures/linked_list/doubly_linked_list_two.py
@@ -80,7 +80,6 @@ class LinkedList:
return None
def set_head(self, node: Node) -> None:
-
if self.head is None:
self.head = node
self.tail = node
@@ -129,7 +128,7 @@ class LinkedList:
while node:
if current_position == position:
self.insert_before_node(node, new_node)
- return None
+ return
current_position += 1
node = node.next
self.insert_after_node(self.tail, new_node)
@@ -143,9 +142,7 @@ class LinkedList:
raise Exception("Node not found")
def delete_value(self, value):
- node = self.get_node(value)
-
- if node is not None:
+ if (node := self.get_node(value)) is not None:
if node == self.head:
self.head = self.head.get_next()
diff --git a/data_structures/linked_list/has_loop.py b/data_structures/linked_list/has_loop.py
index 405ece7e2..bc06ffe15 100644
--- a/data_structures/linked_list/has_loop.py
+++ b/data_structures/linked_list/has_loop.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from typing import Any
@@ -7,8 +9,8 @@ class ContainsLoopError(Exception):
class Node:
def __init__(self, data: Any) -> None:
- self.data = data
- self.next_node = None
+ self.data: Any = data
+ self.next_node: Node | None = None
def __iter__(self):
node = self
diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py
index acc87c1c2..ec19e99f7 100644
--- a/data_structures/linked_list/is_palindrome.py
+++ b/data_structures/linked_list/is_palindrome.py
@@ -55,7 +55,7 @@ def is_palindrome_dict(head):
d = {}
pos = 0
while head:
- if head.val in d.keys():
+ if head.val in d:
d[head.val].append(pos)
else:
d[head.val] = [pos]
diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py
index 96ec6b8ab..ca0d3bb48 100644
--- a/data_structures/linked_list/merge_two_lists.py
+++ b/data_structures/linked_list/merge_two_lists.py
@@ -5,7 +5,6 @@ from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
-from typing import Optional
test_data_odd = (3, 9, -11, 0, 7, 5, 1, -1)
test_data_even = (4, 6, 2, 0, 8, 10, 3, -2)
@@ -14,13 +13,13 @@ test_data_even = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class Node:
data: int
- next: Optional[Node]
+ next_node: Node | None
class SortedLinkedList:
def __init__(self, ints: Iterable[int]) -> None:
- self.head: Optional[Node] = None
- for i in reversed(sorted(ints)):
+ self.head: Node | None = None
+ for i in sorted(ints, reverse=True):
self.head = Node(i, self.head)
def __iter__(self) -> Iterator[int]:
@@ -33,7 +32,7 @@ class SortedLinkedList:
node = self.head
while node:
yield node.data
- node = node.next
+ node = node.next_node
def __len__(self) -> int:
"""
@@ -45,7 +44,7 @@ class SortedLinkedList:
>>> len(SortedLinkedList(test_data_odd))
8
"""
- return len(tuple(iter(self)))
+ return sum(1 for _ in self)
def __str__(self) -> str:
"""
diff --git a/data_structures/linked_list/middle_element_of_linked_list.py b/data_structures/linked_list/middle_element_of_linked_list.py
index 185c4ccbb..86dad6b41 100644
--- a/data_structures/linked_list/middle_element_of_linked_list.py
+++ b/data_structures/linked_list/middle_element_of_linked_list.py
@@ -1,5 +1,8 @@
+from __future__ import annotations
+
+
class Node:
- def __init__(self, data: int) -> int:
+ def __init__(self, data: int) -> None:
self.data = data
self.next = None
@@ -14,7 +17,7 @@ class LinkedList:
self.head = new_node
return self.head.data
- def middle_element(self) -> int:
+ def middle_element(self) -> int | None:
"""
>>> link = LinkedList()
>>> link.middle_element()
@@ -54,11 +57,12 @@ class LinkedList:
return slow_pointer.data
else:
print("No element found.")
+ return None
if __name__ == "__main__":
link = LinkedList()
- for i in range(int(input().strip())):
+ for _ in range(int(input().strip())):
data = int(input().strip())
link.push(data)
print(link.middle_element())
diff --git a/data_structures/linked_list/print_reverse.py b/data_structures/linked_list/print_reverse.py
index c46f228e7..f83d5607f 100644
--- a/data_structures/linked_list/print_reverse.py
+++ b/data_structures/linked_list/print_reverse.py
@@ -1,4 +1,4 @@
-from typing import List
+from __future__ import annotations
class Node:
@@ -16,7 +16,7 @@ class Node:
return "->".join(string_rep)
-def make_linked_list(elements_list: List):
+def make_linked_list(elements_list: list):
"""Creates a Linked List from the elements of the given sequence
(list/tuple) and returns the head of the Linked List.
>>> make_linked_list([])
diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py
index e45a210a1..890e21c9b 100644
--- a/data_structures/linked_list/singly_linked_list.py
+++ b/data_structures/linked_list/singly_linked_list.py
@@ -1,17 +1,53 @@
+from typing import Any
+
+
class Node:
- def __init__(self, data):
+ def __init__(self, data: Any):
+ """
+ Create and initialize Node class instance.
+ >>> Node(20)
+ Node(20)
+ >>> Node("Hello, world!")
+ Node(Hello, world!)
+ >>> Node(None)
+ Node(None)
+ >>> Node(True)
+ Node(True)
+ """
self.data = data
self.next = None
- def __repr__(self):
+ def __repr__(self) -> str:
+ """
+ Get the string representation of this node.
+ >>> Node(10).__repr__()
+ 'Node(10)'
+ """
return f"Node({self.data})"
class LinkedList:
def __init__(self):
+ """
+ Create and initialize LinkedList class instance.
+ >>> linked_list = LinkedList()
+ """
self.head = None
- def __iter__(self):
+ def __iter__(self) -> Any:
+ """
+ This function is intended for iterators to access
+ and iterate through data inside linked list.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail("tail")
+ >>> linked_list.insert_tail("tail_1")
+ >>> linked_list.insert_tail("tail_2")
+ >>> for node in linked_list: # __iter__ used here.
+ ... node
+ 'tail'
+ 'tail_1'
+ 'tail_2'
+ """
node = self.head
while node:
yield node.data
@@ -23,7 +59,7 @@ class LinkedList:
>>> linked_list = LinkedList()
>>> len(linked_list)
0
- >>> linked_list.insert_tail("head")
+ >>> linked_list.insert_tail("tail")
>>> len(linked_list)
1
>>> linked_list.insert_head("head")
@@ -36,15 +72,20 @@ class LinkedList:
>>> len(linked_list)
0
"""
- return len(tuple(iter(self)))
+ return sum(1 for _ in self)
- def __repr__(self):
+ def __repr__(self) -> str:
"""
String representation/visualization of a Linked Lists
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail(1)
+ >>> linked_list.insert_tail(3)
+ >>> linked_list.__repr__()
+ '1->3'
"""
return "->".join([str(item) for item in self])
- def __getitem__(self, index):
+ def __getitem__(self, index: int) -> Any:
"""
Indexing Support. Used to get a node at particular position
>>> linked_list = LinkedList()
@@ -54,11 +95,11 @@ class LinkedList:
True
>>> linked_list[-10]
Traceback (most recent call last):
- ...
+ ...
ValueError: list index out of range.
>>> linked_list[len(linked_list)]
Traceback (most recent call last):
- ...
+ ...
ValueError: list index out of range.
"""
if not 0 <= index < len(self):
@@ -66,9 +107,10 @@ class LinkedList:
for i, node in enumerate(self):
if i == index:
return node
+ return None
# Used to change the data of a particular node
- def __setitem__(self, index, data):
+ def __setitem__(self, index: int, data: Any) -> None:
"""
>>> linked_list = LinkedList()
>>> for i in range(0, 10):
@@ -81,27 +123,68 @@ class LinkedList:
-666
>>> linked_list[-10] = 666
Traceback (most recent call last):
- ...
+ ...
ValueError: list index out of range.
>>> linked_list[len(linked_list)] = 666
Traceback (most recent call last):
- ...
+ ...
ValueError: list index out of range.
"""
if not 0 <= index < len(self):
raise ValueError("list index out of range.")
current = self.head
- for i in range(index):
+ for _ in range(index):
current = current.next
current.data = data
- def insert_tail(self, data) -> None:
+ def insert_tail(self, data: Any) -> None:
+ """
+ Insert data to the end of linked list.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail("tail")
+ >>> linked_list
+ tail
+ >>> linked_list.insert_tail("tail_2")
+ >>> linked_list
+ tail->tail_2
+ >>> linked_list.insert_tail("tail_3")
+ >>> linked_list
+ tail->tail_2->tail_3
+ """
self.insert_nth(len(self), data)
- def insert_head(self, data) -> None:
+ def insert_head(self, data: Any) -> None:
+ """
+ Insert data to the beginning of linked list.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_head("head")
+ >>> linked_list
+ head
+ >>> linked_list.insert_head("head_2")
+ >>> linked_list
+ head_2->head
+ >>> linked_list.insert_head("head_3")
+ >>> linked_list
+ head_3->head_2->head
+ """
self.insert_nth(0, data)
- def insert_nth(self, index: int, data) -> None:
+ def insert_nth(self, index: int, data: Any) -> None:
+ """
+ Insert data at given index.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail("first")
+ >>> linked_list.insert_tail("second")
+ >>> linked_list.insert_tail("third")
+ >>> linked_list
+ first->second->third
+ >>> linked_list.insert_nth(1, "fourth")
+ >>> linked_list
+ first->fourth->second->third
+ >>> linked_list.insert_nth(3, "fifth")
+ >>> linked_list
+ first->fourth->second->fifth->third
+ """
if not 0 <= index <= len(self):
raise IndexError("list index out of range")
new_node = Node(data)
@@ -118,17 +201,96 @@ class LinkedList:
temp.next = new_node
def print_list(self) -> None: # print every node data
+ """
+ This method prints every node data.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail("first")
+ >>> linked_list.insert_tail("second")
+ >>> linked_list.insert_tail("third")
+ >>> linked_list
+ first->second->third
+ """
print(self)
- def delete_head(self):
+ def delete_head(self) -> Any:
+ """
+ Delete the first node and return the
+ node's data.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail("first")
+ >>> linked_list.insert_tail("second")
+ >>> linked_list.insert_tail("third")
+ >>> linked_list
+ first->second->third
+ >>> linked_list.delete_head()
+ 'first'
+ >>> linked_list
+ second->third
+ >>> linked_list.delete_head()
+ 'second'
+ >>> linked_list
+ third
+ >>> linked_list.delete_head()
+ 'third'
+ >>> linked_list.delete_head()
+ Traceback (most recent call last):
+ ...
+ IndexError: List index out of range.
+ """
return self.delete_nth(0)
- def delete_tail(self): # delete from tail
+ def delete_tail(self) -> Any: # delete from tail
+ """
+ Delete the tail end node and return the
+ node's data.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail("first")
+ >>> linked_list.insert_tail("second")
+ >>> linked_list.insert_tail("third")
+ >>> linked_list
+ first->second->third
+ >>> linked_list.delete_tail()
+ 'third'
+ >>> linked_list
+ first->second
+ >>> linked_list.delete_tail()
+ 'second'
+ >>> linked_list
+ first
+ >>> linked_list.delete_tail()
+ 'first'
+ >>> linked_list.delete_tail()
+ Traceback (most recent call last):
+ ...
+ IndexError: List index out of range.
+ """
return self.delete_nth(len(self) - 1)
- def delete_nth(self, index: int = 0):
+ def delete_nth(self, index: int = 0) -> Any:
+ """
+ Delete node at given index and return the
+ node's data.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail("first")
+ >>> linked_list.insert_tail("second")
+ >>> linked_list.insert_tail("third")
+ >>> linked_list
+ first->second->third
+ >>> linked_list.delete_nth(1) # delete middle
+ 'second'
+ >>> linked_list
+ first->third
+ >>> linked_list.delete_nth(5) # this raises error
+ Traceback (most recent call last):
+ ...
+ IndexError: List index out of range.
+ >>> linked_list.delete_nth(-1) # this also raises error
+ Traceback (most recent call last):
+ ...
+ IndexError: List index out of range.
+ """
if not 0 <= index <= len(self) - 1: # test if index is valid
- raise IndexError("list index out of range")
+ raise IndexError("List index out of range.")
delete_node = self.head # default first node
if index == 0:
self.head = self.head.next
@@ -141,9 +303,30 @@ class LinkedList:
return delete_node.data
def is_empty(self) -> bool:
+ """
+ Check if linked list is empty.
+ >>> linked_list = LinkedList()
+ >>> linked_list.is_empty()
+ True
+ >>> linked_list.insert_head("first")
+ >>> linked_list.is_empty()
+ False
+ """
return self.head is None
- def reverse(self):
+ def reverse(self) -> None:
+ """
+ This reverses the linked list order.
+ >>> linked_list = LinkedList()
+ >>> linked_list.insert_tail("first")
+ >>> linked_list.insert_tail("second")
+ >>> linked_list.insert_tail("third")
+ >>> linked_list
+ first->second->third
+ >>> linked_list.reverse()
+ >>> linked_list
+ third->second->first
+ """
prev = None
current = self.head
@@ -170,13 +353,13 @@ def test_singly_linked_list() -> None:
try:
linked_list.delete_head()
- assert False # This should not happen.
+ raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
- assert False # This should not happen.
+ raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
@@ -201,6 +384,91 @@ def test_singly_linked_list() -> None:
linked_list[i] = -i
assert all(linked_list[i] == -i for i in range(0, 9)) is True
+ linked_list.reverse()
+ assert str(linked_list) == "->".join(str(i) for i in range(-8, 1))
+
+
+def test_singly_linked_list_2() -> None:
+ """
+ This section of the test used varying data types for input.
+ >>> test_singly_linked_list_2()
+ """
+ test_input = [
+ -9,
+ 100,
+ Node(77345112),
+ "dlrow olleH",
+ 7,
+ 5555,
+ 0,
+ -192.55555,
+ "Hello, world!",
+ 77.9,
+ Node(10),
+ None,
+ None,
+ 12.20,
+ ]
+ linked_list = LinkedList()
+
+ for i in test_input:
+ linked_list.insert_tail(i)
+
+ # Check if it's empty or not
+ assert linked_list.is_empty() is False
+ assert (
+ str(linked_list) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
+ "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
+ )
+
+ # Delete the head
+ result = linked_list.delete_head()
+ assert result == -9
+ assert (
+ str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
+ "Hello, world!->77.9->Node(10)->None->None->12.2"
+ )
+
+ # Delete the tail
+ result = linked_list.delete_tail()
+ assert result == 12.2
+ assert (
+ str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
+ "Hello, world!->77.9->Node(10)->None->None"
+ )
+
+ # Delete a node in specific location in linked list
+ result = linked_list.delete_nth(10)
+ assert result is None
+ assert (
+ str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
+ "Hello, world!->77.9->Node(10)->None"
+ )
+
+ # Add a Node instance to its head
+ linked_list.insert_head(Node("Hello again, world!"))
+ assert (
+ str(linked_list)
+ == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
+ "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
+ )
+
+ # Add None to its tail
+ linked_list.insert_tail(None)
+ assert (
+ str(linked_list)
+ == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
+ "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
+ )
+
+ # Reverse the linked list
+ linked_list.reverse()
+ assert (
+ str(linked_list)
+ == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
+ "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
+ )
+
def main():
from doctest import testmod
diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py
index 8f06e6193..4413c53e5 100644
--- a/data_structures/linked_list/skip_list.py
+++ b/data_structures/linked_list/skip_list.py
@@ -2,18 +2,17 @@
Based on "Skip Lists: A Probabilistic Alternative to Balanced Trees" by William Pugh
https://epaperpress.com/sortsearch/download/skiplist.pdf
"""
-
from __future__ import annotations
from random import random
-from typing import Generic, Optional, TypeVar
+from typing import Generic, TypeVar
KT = TypeVar("KT")
VT = TypeVar("VT")
class Node(Generic[KT, VT]):
- def __init__(self, key: KT, value: VT):
+ def __init__(self, key: KT | str = "root", value: VT | None = None):
self.key = key
self.value = value
self.forward: list[Node[KT, VT]] = []
@@ -50,7 +49,7 @@ class Node(Generic[KT, VT]):
class SkipList(Generic[KT, VT]):
def __init__(self, p: float = 0.5, max_level: int = 16):
- self.head = Node("root", None)
+ self.head: Node[KT, VT] = Node[KT, VT]()
self.level = 0
self.p = p
self.max_level = max_level
@@ -124,7 +123,7 @@ class SkipList(Generic[KT, VT]):
return level
- def _locate_node(self, key) -> tuple[Optional[Node[KT, VT]], list[Node[KT, VT]]]:
+ def _locate_node(self, key) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""
:param key: Searched key,
:return: Tuple with searched node (or None if given key is not present)
@@ -206,7 +205,7 @@ class SkipList(Generic[KT, VT]):
if level > self.level:
# After level increase we have to add additional nodes to head.
- for i in range(self.level - 1, level):
+ for _ in range(self.level - 1, level):
update_vector.append(self.head)
self.level = level
@@ -222,7 +221,7 @@ class SkipList(Generic[KT, VT]):
else:
update_node.forward[i] = new_node
- def find(self, key: VT) -> Optional[VT]:
+ def find(self, key: VT) -> VT | None:
"""
:param key: Search key.
:return: Value associated with given key or None if given key is not present.
@@ -389,10 +388,7 @@ def test_delete_doesnt_leave_dead_nodes():
def test_iter_always_yields_sorted_values():
def is_sorted(lst):
- for item, next_item in zip(lst, lst[1:]):
- if next_item < item:
- return False
- return True
+ return all(next_item >= item for item, next_item in zip(lst, lst[1:]))
skip_list = SkipList()
for i in range(10):
@@ -408,7 +404,7 @@ def test_iter_always_yields_sorted_values():
def pytests():
- for i in range(100):
+ for _ in range(100):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
@@ -444,4 +440,7 @@ def main():
if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
main()
diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queue/circular_queue_linked_list.py
new file mode 100644
index 000000000..62042c4bc
--- /dev/null
+++ b/data_structures/queue/circular_queue_linked_list.py
@@ -0,0 +1,161 @@
+# Implementation of Circular Queue using linked lists
+# https://en.wikipedia.org/wiki/Circular_buffer
+
+from __future__ import annotations
+
+from typing import Any
+
+
+class CircularQueueLinkedList:
+ """
+ Circular FIFO list with the given capacity (default queue length : 6)
+
+ >>> cq = CircularQueueLinkedList(2)
+ >>> cq.enqueue('a')
+ >>> cq.enqueue('b')
+ >>> cq.enqueue('c')
+ Traceback (most recent call last):
+ ...
+ Exception: Full Queue
+ """
+
+ def __init__(self, initial_capacity: int = 6) -> None:
+ self.front: Node | None = None
+ self.rear: Node | None = None
+ self.create_linked_list(initial_capacity)
+
+ def create_linked_list(self, initial_capacity: int) -> None:
+ current_node = Node()
+ self.front = current_node
+ self.rear = current_node
+ previous_node = current_node
+ for _ in range(1, initial_capacity):
+ current_node = Node()
+ previous_node.next = current_node
+ current_node.prev = previous_node
+ previous_node = current_node
+ previous_node.next = self.front
+ self.front.prev = previous_node
+
+ def is_empty(self) -> bool:
+ """
+ Checks where the queue is empty or not
+ >>> cq = CircularQueueLinkedList()
+ >>> cq.is_empty()
+ True
+ >>> cq.enqueue('a')
+ >>> cq.is_empty()
+ False
+ >>> cq.dequeue()
+ 'a'
+ >>> cq.is_empty()
+ True
+ """
+
+ return (
+ self.front == self.rear
+ and self.front is not None
+ and self.front.data is None
+ )
+
+ def first(self) -> Any | None:
+ """
+ Returns the first element of the queue
+ >>> cq = CircularQueueLinkedList()
+ >>> cq.first()
+ Traceback (most recent call last):
+ ...
+ Exception: Empty Queue
+ >>> cq.enqueue('a')
+ >>> cq.first()
+ 'a'
+ >>> cq.dequeue()
+ 'a'
+ >>> cq.first()
+ Traceback (most recent call last):
+ ...
+ Exception: Empty Queue
+ >>> cq.enqueue('b')
+ >>> cq.enqueue('c')
+ >>> cq.first()
+ 'b'
+ """
+ self.check_can_perform_operation()
+ return self.front.data if self.front else None
+
+ def enqueue(self, data: Any) -> None:
+ """
+ Saves data at the end of the queue
+
+ >>> cq = CircularQueueLinkedList()
+ >>> cq.enqueue('a')
+ >>> cq.enqueue('b')
+ >>> cq.dequeue()
+ 'a'
+ >>> cq.dequeue()
+ 'b'
+ >>> cq.dequeue()
+ Traceback (most recent call last):
+ ...
+ Exception: Empty Queue
+ """
+ if self.rear is None:
+ return
+
+ self.check_is_full()
+ if not self.is_empty():
+ self.rear = self.rear.next
+ if self.rear:
+ self.rear.data = data
+
+ def dequeue(self) -> Any:
+ """
+ Removes and retrieves the first element of the queue
+
+ >>> cq = CircularQueueLinkedList()
+ >>> cq.dequeue()
+ Traceback (most recent call last):
+ ...
+ Exception: Empty Queue
+ >>> cq.enqueue('a')
+ >>> cq.dequeue()
+ 'a'
+ >>> cq.dequeue()
+ Traceback (most recent call last):
+ ...
+ Exception: Empty Queue
+ """
+ self.check_can_perform_operation()
+ if self.rear is None or self.front is None:
+ return None
+ if self.front == self.rear:
+ data = self.front.data
+ self.front.data = None
+ return data
+
+ old_front = self.front
+ self.front = old_front.next
+ data = old_front.data
+ old_front.data = None
+ return data
+
+ def check_can_perform_operation(self) -> None:
+ if self.is_empty():
+ raise Exception("Empty Queue")
+
+ def check_is_full(self) -> None:
+ if self.rear and self.rear.next == self.front:
+ raise Exception("Full Queue")
+
+
+class Node:
+ def __init__(self) -> None:
+ self.data: Any | None = None
+ self.next: Node | None = None
+ self.prev: Node | None = None
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py
index dd003b7c9..44dc863b9 100644
--- a/data_structures/queue/double_ended_queue.py
+++ b/data_structures/queue/double_ended_queue.py
@@ -1,57 +1,434 @@
-# Python code to demonstrate working of
-# extend(), extendleft(), rotate(), reverse()
+"""
+Implementation of double ended queue.
+"""
+from __future__ import annotations
-# importing "collections" for deque operations
-import collections
+from collections.abc import Iterable
+from dataclasses import dataclass
+from typing import Any
-# initializing deque
-de = collections.deque([1, 2, 3])
-# using extend() to add numbers to right end
-# adds 4,5,6 to right end
-de.extend([4, 5, 6])
+class Deque:
+ """
+ Deque data structure.
+ Operations
+ ----------
+ append(val: Any) -> None
+ appendleft(val: Any) -> None
+ extend(iterable: Iterable) -> None
+ extendleft(iterable: Iterable) -> None
+ pop() -> Any
+ popleft() -> Any
+ Observers
+ ---------
+ is_empty() -> bool
+ Attributes
+ ----------
+ _front: _Node
+ front of the deque a.k.a. the first element
+ _back: _Node
+ back of the element a.k.a. the last element
+ _len: int
+ the number of nodes
+ """
-# printing modified deque
-print("The deque after extending deque at end is : ")
-print(de)
+ __slots__ = ("_front", "_back", "_len")
-# using extendleft() to add numbers to left end
-# adds 7,8,9 to right end
-de.extendleft([7, 8, 9])
+ @dataclass
+ class _Node:
+ """
+ Representation of a node.
+ Contains a value and a pointer to the next node as well as to the previous one.
+ """
-# printing modified deque
-print("The deque after extending deque at beginning is : ")
-print(de)
+ val: Any = None
+ next_node: Deque._Node | None = None
+ prev_node: Deque._Node | None = None
-# using rotate() to rotate the deque
-# rotates by 3 to left
-de.rotate(-3)
+ class _Iterator:
+ """
+ Helper class for iteration. Will be used to implement iteration.
+ Attributes
+ ----------
+ _cur: _Node
+ the current node of the iteration.
+ """
-# printing modified deque
-print("The deque after rotating deque is : ")
-print(de)
+ __slots__ = ("_cur",)
-# using reverse() to reverse the deque
-de.reverse()
+ def __init__(self, cur: Deque._Node | None) -> None:
+ self._cur = cur
-# printing modified deque
-print("The deque after reversing deque is : ")
-print(de)
+ def __iter__(self) -> Deque._Iterator:
+ """
+ >>> our_deque = Deque([1, 2, 3])
+ >>> iterator = iter(our_deque)
+ """
+ return self
-# get right-end value and eliminate
-startValue = de.pop()
+ def __next__(self) -> Any:
+ """
+ >>> our_deque = Deque([1, 2, 3])
+ >>> iterator = iter(our_deque)
+ >>> next(iterator)
+ 1
+ >>> next(iterator)
+ 2
+ >>> next(iterator)
+ 3
+ """
+ if self._cur is None:
+ # finished iterating
+ raise StopIteration
+ val = self._cur.val
+ self._cur = self._cur.next_node
-print("The deque after popping value at end is : ")
-print(de)
+ return val
-# get left-end value and eliminate
-endValue = de.popleft()
+ def __init__(self, iterable: Iterable[Any] | None = None) -> None:
+ self._front: Any = None
+ self._back: Any = None
+ self._len: int = 0
-print("The deque after popping value at start is : ")
-print(de)
+ if iterable is not None:
+ # append every value to the deque
+ for val in iterable:
+ self.append(val)
-# eliminate element searched by value
-de.remove(5)
+ def append(self, val: Any) -> None:
+ """
+ Adds val to the end of the deque.
+ Time complexity: O(1)
+ >>> our_deque_1 = Deque([1, 2, 3])
+ >>> our_deque_1.append(4)
+ >>> our_deque_1
+ [1, 2, 3, 4]
+ >>> our_deque_2 = Deque('ab')
+ >>> our_deque_2.append('c')
+ >>> our_deque_2
+ ['a', 'b', 'c']
+ >>> from collections import deque
+ >>> deque_collections_1 = deque([1, 2, 3])
+ >>> deque_collections_1.append(4)
+ >>> deque_collections_1
+ deque([1, 2, 3, 4])
+ >>> deque_collections_2 = deque('ab')
+ >>> deque_collections_2.append('c')
+ >>> deque_collections_2
+ deque(['a', 'b', 'c'])
+ >>> list(our_deque_1) == list(deque_collections_1)
+ True
+ >>> list(our_deque_2) == list(deque_collections_2)
+ True
+ """
+ node = self._Node(val, None, None)
+ if self.is_empty():
+ # front = back
+ self._front = self._back = node
+ self._len = 1
+ else:
+ # connect nodes
+ self._back.next_node = node
+ node.prev_node = self._back
+ self._back = node # assign new back to the new node
-print("The deque after eliminating element searched by value : ")
-print(de)
+ self._len += 1
+
+ # make sure there were no errors
+ assert not self.is_empty(), "Error on appending value."
+
+ def appendleft(self, val: Any) -> None:
+ """
+ Adds val to the beginning of the deque.
+ Time complexity: O(1)
+ >>> our_deque_1 = Deque([2, 3])
+ >>> our_deque_1.appendleft(1)
+ >>> our_deque_1
+ [1, 2, 3]
+ >>> our_deque_2 = Deque('bc')
+ >>> our_deque_2.appendleft('a')
+ >>> our_deque_2
+ ['a', 'b', 'c']
+ >>> from collections import deque
+ >>> deque_collections_1 = deque([2, 3])
+ >>> deque_collections_1.appendleft(1)
+ >>> deque_collections_1
+ deque([1, 2, 3])
+ >>> deque_collections_2 = deque('bc')
+ >>> deque_collections_2.appendleft('a')
+ >>> deque_collections_2
+ deque(['a', 'b', 'c'])
+ >>> list(our_deque_1) == list(deque_collections_1)
+ True
+ >>> list(our_deque_2) == list(deque_collections_2)
+ True
+ """
+ node = self._Node(val, None, None)
+ if self.is_empty():
+ # front = back
+ self._front = self._back = node
+ self._len = 1
+ else:
+ # connect nodes
+ node.next_node = self._front
+ self._front.prev_node = node
+ self._front = node # assign new front to the new node
+
+ self._len += 1
+
+ # make sure there were no errors
+ assert not self.is_empty(), "Error on appending value."
+
+ def extend(self, iterable: Iterable[Any]) -> None:
+ """
+ Appends every value of iterable to the end of the deque.
+ Time complexity: O(n)
+ >>> our_deque_1 = Deque([1, 2, 3])
+ >>> our_deque_1.extend([4, 5])
+ >>> our_deque_1
+ [1, 2, 3, 4, 5]
+ >>> our_deque_2 = Deque('ab')
+ >>> our_deque_2.extend('cd')
+ >>> our_deque_2
+ ['a', 'b', 'c', 'd']
+ >>> from collections import deque
+ >>> deque_collections_1 = deque([1, 2, 3])
+ >>> deque_collections_1.extend([4, 5])
+ >>> deque_collections_1
+ deque([1, 2, 3, 4, 5])
+ >>> deque_collections_2 = deque('ab')
+ >>> deque_collections_2.extend('cd')
+ >>> deque_collections_2
+ deque(['a', 'b', 'c', 'd'])
+ >>> list(our_deque_1) == list(deque_collections_1)
+ True
+ >>> list(our_deque_2) == list(deque_collections_2)
+ True
+ """
+ for val in iterable:
+ self.append(val)
+
+ def extendleft(self, iterable: Iterable[Any]) -> None:
+ """
+ Appends every value of iterable to the beginning of the deque.
+ Time complexity: O(n)
+ >>> our_deque_1 = Deque([1, 2, 3])
+ >>> our_deque_1.extendleft([0, -1])
+ >>> our_deque_1
+ [-1, 0, 1, 2, 3]
+ >>> our_deque_2 = Deque('cd')
+ >>> our_deque_2.extendleft('ba')
+ >>> our_deque_2
+ ['a', 'b', 'c', 'd']
+ >>> from collections import deque
+ >>> deque_collections_1 = deque([1, 2, 3])
+ >>> deque_collections_1.extendleft([0, -1])
+ >>> deque_collections_1
+ deque([-1, 0, 1, 2, 3])
+ >>> deque_collections_2 = deque('cd')
+ >>> deque_collections_2.extendleft('ba')
+ >>> deque_collections_2
+ deque(['a', 'b', 'c', 'd'])
+ >>> list(our_deque_1) == list(deque_collections_1)
+ True
+ >>> list(our_deque_2) == list(deque_collections_2)
+ True
+ """
+ for val in iterable:
+ self.appendleft(val)
+
+ def pop(self) -> Any:
+ """
+ Removes the last element of the deque and returns it.
+ Time complexity: O(1)
+ @returns topop.val: the value of the node to pop.
+ >>> our_deque = Deque([1, 2, 3, 15182])
+ >>> our_popped = our_deque.pop()
+ >>> our_popped
+ 15182
+ >>> our_deque
+ [1, 2, 3]
+ >>> from collections import deque
+ >>> deque_collections = deque([1, 2, 3, 15182])
+ >>> collections_popped = deque_collections.pop()
+ >>> collections_popped
+ 15182
+ >>> deque_collections
+ deque([1, 2, 3])
+ >>> list(our_deque) == list(deque_collections)
+ True
+ >>> our_popped == collections_popped
+ True
+ """
+ # make sure the deque has elements to pop
+ assert not self.is_empty(), "Deque is empty."
+
+ topop = self._back
+ self._back = self._back.prev_node # set new back
+ # drop the last node - python will deallocate memory automatically
+ self._back.next_node = None
+
+ self._len -= 1
+
+ return topop.val
+
+ def popleft(self) -> Any:
+ """
+ Removes the first element of the deque and returns it.
+ Time complexity: O(1)
+ @returns topop.val: the value of the node to pop.
+ >>> our_deque = Deque([15182, 1, 2, 3])
+ >>> our_popped = our_deque.popleft()
+ >>> our_popped
+ 15182
+ >>> our_deque
+ [1, 2, 3]
+ >>> from collections import deque
+ >>> deque_collections = deque([15182, 1, 2, 3])
+ >>> collections_popped = deque_collections.popleft()
+ >>> collections_popped
+ 15182
+ >>> deque_collections
+ deque([1, 2, 3])
+ >>> list(our_deque) == list(deque_collections)
+ True
+ >>> our_popped == collections_popped
+ True
+ """
+ # make sure the deque has elements to pop
+ assert not self.is_empty(), "Deque is empty."
+
+ topop = self._front
+ self._front = self._front.next_node # set new front and drop the first node
+ self._front.prev_node = None
+
+ self._len -= 1
+
+ return topop.val
+
+ def is_empty(self) -> bool:
+ """
+ Checks if the deque is empty.
+ Time complexity: O(1)
+ >>> our_deque = Deque([1, 2, 3])
+ >>> our_deque.is_empty()
+ False
+ >>> our_empty_deque = Deque()
+ >>> our_empty_deque.is_empty()
+ True
+ >>> from collections import deque
+ >>> empty_deque_collections = deque()
+ >>> list(our_empty_deque) == list(empty_deque_collections)
+ True
+ """
+ return self._front is None
+
+ def __len__(self) -> int:
+ """
+ Implements len() function. Returns the length of the deque.
+ Time complexity: O(1)
+ >>> our_deque = Deque([1, 2, 3])
+ >>> len(our_deque)
+ 3
+ >>> our_empty_deque = Deque()
+ >>> len(our_empty_deque)
+ 0
+ >>> from collections import deque
+ >>> deque_collections = deque([1, 2, 3])
+ >>> len(deque_collections)
+ 3
+ >>> empty_deque_collections = deque()
+ >>> len(empty_deque_collections)
+ 0
+ >>> len(our_empty_deque) == len(empty_deque_collections)
+ True
+ """
+ return self._len
+
+ def __eq__(self, other: object) -> bool:
+ """
+ Implements "==" operator. Returns if *self* is equal to *other*.
+ Time complexity: O(n)
+ >>> our_deque_1 = Deque([1, 2, 3])
+ >>> our_deque_2 = Deque([1, 2, 3])
+ >>> our_deque_1 == our_deque_2
+ True
+ >>> our_deque_3 = Deque([1, 2])
+ >>> our_deque_1 == our_deque_3
+ False
+ >>> from collections import deque
+ >>> deque_collections_1 = deque([1, 2, 3])
+ >>> deque_collections_2 = deque([1, 2, 3])
+ >>> deque_collections_1 == deque_collections_2
+ True
+ >>> deque_collections_3 = deque([1, 2])
+ >>> deque_collections_1 == deque_collections_3
+ False
+ >>> (our_deque_1 == our_deque_2) == (deque_collections_1 == deque_collections_2)
+ True
+ >>> (our_deque_1 == our_deque_3) == (deque_collections_1 == deque_collections_3)
+ True
+ """
+
+ if not isinstance(other, Deque):
+ return NotImplemented
+
+ me = self._front
+ oth = other._front
+
+ # if the length of the dequeues are not the same, they are not equal
+ if len(self) != len(other):
+ return False
+
+ while me is not None and oth is not None:
+ # compare every value
+ if me.val != oth.val:
+ return False
+ me = me.next_node
+ oth = oth.next_node
+
+ return True
+
+ def __iter__(self) -> Deque._Iterator:
+ """
+ Implements iteration.
+ Time complexity: O(1)
+ >>> our_deque = Deque([1, 2, 3])
+ >>> for v in our_deque:
+ ... print(v)
+ 1
+ 2
+ 3
+ >>> from collections import deque
+ >>> deque_collections = deque([1, 2, 3])
+ >>> for v in deque_collections:
+ ... print(v)
+ 1
+ 2
+ 3
+ """
+ return Deque._Iterator(self._front)
+
+ def __repr__(self) -> str:
+ """
+ Implements representation of the deque.
+ Represents it as a list, with its values between '[' and ']'.
+ Time complexity: O(n)
+ >>> our_deque = Deque([1, 2, 3])
+ >>> our_deque
+ [1, 2, 3]
+ """
+ values_list = []
+ aux = self._front
+ while aux is not None:
+ # append the values in a list to display
+ values_list.append(aux.val)
+ aux = aux.next_node
+
+ return f"[{', '.join(repr(val) for val in values_list)}]"
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py
index 8526ad311..3af97d28e 100644
--- a/data_structures/queue/linked_queue.py
+++ b/data_structures/queue/linked_queue.py
@@ -1,11 +1,14 @@
""" A Queue using a linked list like structure """
+from __future__ import annotations
+
+from collections.abc import Iterator
from typing import Any
class Node:
def __init__(self, data: Any) -> None:
- self.data = data
- self.next = None
+ self.data: Any = data
+ self.next: Node | None = None
def __str__(self) -> str:
return f"{self.data}"
@@ -19,7 +22,7 @@ class LinkedQueue:
>>> queue.put(5)
>>> queue.put(9)
>>> queue.put('python')
- >>> queue.is_empty();
+ >>> queue.is_empty()
False
>>> queue.get()
5
@@ -39,9 +42,10 @@ class LinkedQueue:
"""
def __init__(self) -> None:
- self.front = self.rear = None
+ self.front: Node | None = None
+ self.rear: Node | None = None
- def __iter__(self):
+ def __iter__(self) -> Iterator[Any]:
node = self.front
while node:
yield node.data
@@ -87,12 +91,12 @@ class LinkedQueue:
"""
return len(self) == 0
- def put(self, item) -> None:
+ def put(self, item: Any) -> None:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
- ...
+ ...
IndexError: dequeue from empty queue
>>> for i in range(1, 6):
... queue.put(i)
@@ -112,7 +116,7 @@ class LinkedQueue:
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
- ...
+ ...
IndexError: dequeue from empty queue
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
diff --git a/data_structures/queue/priority_queue_using_list.py b/data_structures/queue/priority_queue_using_list.py
index c5cf26433..f61b5e8e6 100644
--- a/data_structures/queue/priority_queue_using_list.py
+++ b/data_structures/queue/priority_queue_using_list.py
@@ -58,7 +58,7 @@ class FixedPriorityQueue:
4
>>> fpq.dequeue()
Traceback (most recent call last):
- ...
+ ...
data_structures.queue.priority_queue_using_list.UnderFlowError: All queues are empty
>>> print(fpq)
Priority 0: []
diff --git a/data_structures/queue/queue_by_list.py b/data_structures/queue/queue_by_list.py
new file mode 100644
index 000000000..4b05be9fd
--- /dev/null
+++ b/data_structures/queue/queue_by_list.py
@@ -0,0 +1,141 @@
+"""Queue represented by a Python list"""
+
+from collections.abc import Iterable
+from typing import Generic, TypeVar
+
+_T = TypeVar("_T")
+
+
+class QueueByList(Generic[_T]):
+ def __init__(self, iterable: Iterable[_T] | None = None) -> None:
+ """
+ >>> QueueByList()
+ Queue(())
+ >>> QueueByList([10, 20, 30])
+ Queue((10, 20, 30))
+ >>> QueueByList((i**2 for i in range(1, 4)))
+ Queue((1, 4, 9))
+ """
+ self.entries: list[_T] = list(iterable or [])
+
+ def __len__(self) -> int:
+ """
+ >>> len(QueueByList())
+ 0
+ >>> from string import ascii_lowercase
+ >>> len(QueueByList(ascii_lowercase))
+ 26
+ >>> queue = QueueByList()
+ >>> for i in range(1, 11):
+ ... queue.put(i)
+ >>> len(queue)
+ 10
+ >>> for i in range(2):
+ ... queue.get()
+ 1
+ 2
+ >>> len(queue)
+ 8
+ """
+
+ return len(self.entries)
+
+ def __repr__(self) -> str:
+ """
+ >>> queue = QueueByList()
+ >>> queue
+ Queue(())
+ >>> str(queue)
+ 'Queue(())'
+ >>> queue.put(10)
+ >>> queue
+ Queue((10,))
+ >>> queue.put(20)
+ >>> queue.put(30)
+ >>> queue
+ Queue((10, 20, 30))
+ """
+
+ return f"Queue({tuple(self.entries)})"
+
+ def put(self, item: _T) -> None:
+ """Put `item` to the Queue
+
+ >>> queue = QueueByList()
+ >>> queue.put(10)
+ >>> queue.put(20)
+ >>> len(queue)
+ 2
+ >>> queue
+ Queue((10, 20))
+ """
+
+ self.entries.append(item)
+
+ def get(self) -> _T:
+ """
+ Get `item` from the Queue
+
+ >>> queue = QueueByList((10, 20, 30))
+ >>> queue.get()
+ 10
+ >>> queue.put(40)
+ >>> queue.get()
+ 20
+ >>> queue.get()
+ 30
+ >>> len(queue)
+ 1
+ >>> queue.get()
+ 40
+ >>> queue.get()
+ Traceback (most recent call last):
+ ...
+ IndexError: Queue is empty
+ """
+
+ if not self.entries:
+ raise IndexError("Queue is empty")
+ return self.entries.pop(0)
+
+ def rotate(self, rotation: int) -> None:
+ """Rotate the items of the Queue `rotation` times
+
+ >>> queue = QueueByList([10, 20, 30, 40])
+ >>> queue
+ Queue((10, 20, 30, 40))
+ >>> queue.rotate(1)
+ >>> queue
+ Queue((20, 30, 40, 10))
+ >>> queue.rotate(2)
+ >>> queue
+ Queue((40, 10, 20, 30))
+ """
+
+ put = self.entries.append
+ get = self.entries.pop
+
+ for _ in range(rotation):
+ put(get(0))
+
+ def get_front(self) -> _T:
+ """Get the front item from the Queue
+
+ >>> queue = QueueByList((10, 20, 30))
+ >>> queue.get_front()
+ 10
+ >>> queue
+ Queue((10, 20, 30))
+ >>> queue.get()
+ 10
+ >>> queue.get_front()
+ 20
+ """
+
+ return self.entries[0]
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/data_structures/queue/queue_by_two_stacks.py b/data_structures/queue/queue_by_two_stacks.py
new file mode 100644
index 000000000..cd62f155a
--- /dev/null
+++ b/data_structures/queue/queue_by_two_stacks.py
@@ -0,0 +1,115 @@
+"""Queue implementation using two stacks"""
+
+from collections.abc import Iterable
+from typing import Generic, TypeVar
+
+_T = TypeVar("_T")
+
+
+class QueueByTwoStacks(Generic[_T]):
+ def __init__(self, iterable: Iterable[_T] | None = None) -> None:
+ """
+ >>> QueueByTwoStacks()
+ Queue(())
+ >>> QueueByTwoStacks([10, 20, 30])
+ Queue((10, 20, 30))
+ >>> QueueByTwoStacks((i**2 for i in range(1, 4)))
+ Queue((1, 4, 9))
+ """
+ self._stack1: list[_T] = list(iterable or [])
+ self._stack2: list[_T] = []
+
+ def __len__(self) -> int:
+ """
+ >>> len(QueueByTwoStacks())
+ 0
+ >>> from string import ascii_lowercase
+ >>> len(QueueByTwoStacks(ascii_lowercase))
+ 26
+ >>> queue = QueueByTwoStacks()
+ >>> for i in range(1, 11):
+ ... queue.put(i)
+ ...
+ >>> len(queue)
+ 10
+ >>> for i in range(2):
+ ... queue.get()
+ 1
+ 2
+ >>> len(queue)
+ 8
+ """
+
+ return len(self._stack1) + len(self._stack2)
+
+ def __repr__(self) -> str:
+ """
+ >>> queue = QueueByTwoStacks()
+ >>> queue
+ Queue(())
+ >>> str(queue)
+ 'Queue(())'
+ >>> queue.put(10)
+ >>> queue
+ Queue((10,))
+ >>> queue.put(20)
+ >>> queue.put(30)
+ >>> queue
+ Queue((10, 20, 30))
+ """
+ return f"Queue({tuple(self._stack2[::-1] + self._stack1)})"
+
+ def put(self, item: _T) -> None:
+ """
+ Put `item` into the Queue
+
+ >>> queue = QueueByTwoStacks()
+ >>> queue.put(10)
+ >>> queue.put(20)
+ >>> len(queue)
+ 2
+ >>> queue
+ Queue((10, 20))
+ """
+
+ self._stack1.append(item)
+
+ def get(self) -> _T:
+ """
+ Get `item` from the Queue
+
+ >>> queue = QueueByTwoStacks((10, 20, 30))
+ >>> queue.get()
+ 10
+ >>> queue.put(40)
+ >>> queue.get()
+ 20
+ >>> queue.get()
+ 30
+ >>> len(queue)
+ 1
+ >>> queue.get()
+ 40
+ >>> queue.get()
+ Traceback (most recent call last):
+ ...
+ IndexError: Queue is empty
+ """
+
+ # To reduce number of attribute look-ups in `while` loop.
+ stack1_pop = self._stack1.pop
+ stack2_append = self._stack2.append
+
+ if not self._stack2:
+ while self._stack1:
+ stack2_append(stack1_pop())
+
+ if not self._stack2:
+ raise IndexError("Queue is empty")
+ return self._stack2.pop()
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/data_structures/queue/queue_on_list.py b/data_structures/queue/queue_on_list.py
deleted file mode 100644
index 485cf0b6f..000000000
--- a/data_structures/queue/queue_on_list.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Queue represented by a Python list"""
-
-
-class Queue:
- def __init__(self):
- self.entries = []
- self.length = 0
- self.front = 0
-
- def __str__(self):
- printed = "<" + str(self.entries)[1:-1] + ">"
- return printed
-
- """Enqueues {@code item}
- @param item
- item to enqueue"""
-
- def put(self, item):
- self.entries.append(item)
- self.length = self.length + 1
-
- """Dequeues {@code item}
- @requirement: |self.length| > 0
- @return dequeued
- item that was dequeued"""
-
- def get(self):
- self.length = self.length - 1
- dequeued = self.entries[self.front]
- # self.front-=1
- # self.entries = self.entries[self.front:]
- self.entries = self.entries[1:]
- return dequeued
-
- """Rotates the queue {@code rotation} times
- @param rotation
- number of times to rotate queue"""
-
- def rotate(self, rotation):
- for i in range(rotation):
- self.put(self.get())
-
- """Enqueues {@code item}
- @return item at front of self.entries"""
-
- def get_front(self):
- return self.entries[0]
-
- """Returns the length of this.entries"""
-
- def size(self):
- return self.length
diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queue/queue_on_pseudo_stack.py
index 7fa2fb256..d98451000 100644
--- a/data_structures/queue/queue_on_pseudo_stack.py
+++ b/data_structures/queue/queue_on_pseudo_stack.py
@@ -1,4 +1,5 @@
"""Queue represented by a pseudo stack (represented by a list with pop and append)"""
+from typing import Any
class Queue:
@@ -14,7 +15,7 @@ class Queue:
@param item
item to enqueue"""
- def put(self, item):
+ def put(self, item: Any) -> None:
self.stack.append(item)
self.length = self.length + 1
@@ -23,7 +24,7 @@ class Queue:
@return dequeued
item that was dequeued"""
- def get(self):
+ def get(self) -> Any:
self.rotate(1)
dequeued = self.stack[self.length - 1]
self.stack = self.stack[:-1]
@@ -35,8 +36,8 @@ class Queue:
@param rotation
number of times to rotate queue"""
- def rotate(self, rotation):
- for i in range(rotation):
+ def rotate(self, rotation: int) -> None:
+ for _ in range(rotation):
temp = self.stack[0]
self.stack = self.stack[1:]
self.put(temp)
@@ -45,7 +46,7 @@ class Queue:
"""Reports item at the front of self
@return item at front of self.stack"""
- def front(self):
+ def front(self) -> Any:
front = self.get()
self.put(front)
self.rotate(self.length - 1)
@@ -53,5 +54,5 @@ class Queue:
"""Returns the length of this.stack"""
- def size(self):
+ def size(self) -> int:
return self.length
diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py
index 674f7ea43..3c036c220 100644
--- a/data_structures/stacks/balanced_parentheses.py
+++ b/data_structures/stacks/balanced_parentheses.py
@@ -14,7 +14,7 @@ def balanced_parentheses(parentheses: str) -> bool:
>>> balanced_parentheses("")
True
"""
- stack = Stack()
+ stack: Stack[str] = Stack()
bracket_pairs = {"(": ")", "[": "]", "{": "}"}
for bracket in parentheses:
if bracket in bracket_pairs:
diff --git a/data_structures/stacks/dijkstras_two_stack_algorithm.py b/data_structures/stacks/dijkstras_two_stack_algorithm.py
index 8b4668f9f..976c9a53c 100644
--- a/data_structures/stacks/dijkstras_two_stack_algorithm.py
+++ b/data_structures/stacks/dijkstras_two_stack_algorithm.py
@@ -10,7 +10,7 @@ such as: (5 + ((4 * 2) * (2 + 3)))
THESE ARE THE ALGORITHM'S RULES:
RULE 1: Scan the expression from left to right. When an operand is encountered,
- push it onto the the operand stack.
+ push it onto the operand stack.
RULE 2: When an operator is encountered in the expression,
push it onto the operator stack.
@@ -51,8 +51,8 @@ def dijkstras_two_stack_algorithm(equation: str) -> int:
"""
operators = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
- operand_stack = Stack()
- operator_stack = Stack()
+ operand_stack: Stack[int] = Stack()
+ operator_stack: Stack[str] = Stack()
for i in equation:
if i.isdigit():
diff --git a/data_structures/stacks/evaluate_postfix_notations.py b/data_structures/stacks/evaluate_postfix_notations.py
index 2a4baf9d6..51ea353b1 100644
--- a/data_structures/stacks/evaluate_postfix_notations.py
+++ b/data_structures/stacks/evaluate_postfix_notations.py
@@ -1,5 +1,3 @@
-from typing import Any, List
-
"""
The Reverse Polish Nation also known as Polish postfix notation
or simply postfix notation.
@@ -8,6 +6,9 @@ Classic examples of simple stack implementations
Valid operators are +, -, *, /.
Each operand may be an integer or another expression.
"""
+from __future__ import annotations
+
+from typing import Any
def evaluate_postfix(postfix_notation: list) -> int:
@@ -23,7 +24,7 @@ def evaluate_postfix(postfix_notation: list) -> int:
return 0
operations = {"+", "-", "*", "/"}
- stack: List[Any] = []
+ stack: list[Any] = []
for token in postfix_notation:
if token in operations:
diff --git a/data_structures/stacks/infix_to_postfix_conversion.py b/data_structures/stacks/infix_to_postfix_conversion.py
index dedba8479..e69706193 100644
--- a/data_structures/stacks/infix_to_postfix_conversion.py
+++ b/data_structures/stacks/infix_to_postfix_conversion.py
@@ -4,9 +4,26 @@ https://en.wikipedia.org/wiki/Reverse_Polish_notation
https://en.wikipedia.org/wiki/Shunting-yard_algorithm
"""
+from typing import Literal
+
from .balanced_parentheses import balanced_parentheses
from .stack import Stack
+PRECEDENCES: dict[str, int] = {
+ "+": 1,
+ "-": 1,
+ "*": 2,
+ "/": 2,
+ "^": 3,
+}
+ASSOCIATIVITIES: dict[str, Literal["LR", "RL"]] = {
+ "+": "LR",
+ "-": "LR",
+ "*": "LR",
+ "/": "LR",
+ "^": "RL",
+}
+
def precedence(char: str) -> int:
"""
@@ -14,14 +31,22 @@ def precedence(char: str) -> int:
order of operation.
https://en.wikipedia.org/wiki/Order_of_operations
"""
- return {"+": 1, "-": 1, "*": 2, "/": 2, "^": 3}.get(char, -1)
+ return PRECEDENCES.get(char, -1)
+
+
+def associativity(char: str) -> Literal["LR", "RL"]:
+ """
+ Return the associativity of the operator `char`.
+ https://en.wikipedia.org/wiki/Operator_associativity
+ """
+ return ASSOCIATIVITIES[char]
def infix_to_postfix(expression_str: str) -> str:
"""
>>> infix_to_postfix("(1*(2+3)+4))")
Traceback (most recent call last):
- ...
+ ...
ValueError: Mismatched parentheses
>>> infix_to_postfix("")
''
@@ -35,10 +60,12 @@ def infix_to_postfix(expression_str: str) -> str:
'a b c * + d e * f + g * +'
>>> infix_to_postfix("x^y/(5*z)+2")
'x y ^ 5 z * / 2 +'
+ >>> infix_to_postfix("2^3^2")
+ '2 3 2 ^ ^'
"""
if not balanced_parentheses(expression_str):
raise ValueError("Mismatched parentheses")
- stack = Stack()
+ stack: Stack[str] = Stack()
postfix = []
for char in expression_str:
if char.isalpha() or char.isdigit():
@@ -50,9 +77,26 @@ def infix_to_postfix(expression_str: str) -> str:
postfix.append(stack.pop())
stack.pop()
else:
- while not stack.is_empty() and precedence(char) <= precedence(stack.peek()):
+ while True:
+ if stack.is_empty():
+ stack.push(char)
+ break
+
+ char_precedence = precedence(char)
+ tos_precedence = precedence(stack.peek())
+
+ if char_precedence > tos_precedence:
+ stack.push(char)
+ break
+ if char_precedence < tos_precedence:
+ postfix.append(stack.pop())
+ continue
+ # Precedences are equal
+ if associativity(char) == "RL":
+ stack.push(char)
+ break
postfix.append(stack.pop())
- stack.push(char)
+
while not stack.is_empty():
postfix.append(stack.pop())
return " ".join(postfix)
diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py
index d3dc9e3e9..6f6d5d57e 100644
--- a/data_structures/stacks/infix_to_prefix_conversion.py
+++ b/data_structures/stacks/infix_to_prefix_conversion.py
@@ -15,9 +15,9 @@ Enter an Infix Equation = a + b ^c
"""
-def infix_2_postfix(Infix):
- Stack = []
- Postfix = []
+def infix_2_postfix(infix):
+ stack = []
+ post_fix = []
priority = {
"^": 3,
"*": 2,
@@ -26,7 +26,7 @@ def infix_2_postfix(Infix):
"+": 1,
"-": 1,
} # Priority of each operator
- print_width = len(Infix) if (len(Infix) > 7) else 7
+ print_width = len(infix) if (len(infix) > 7) else 7
# Print table header for output
print(
@@ -37,52 +37,52 @@ def infix_2_postfix(Infix):
)
print("-" * (print_width * 3 + 7))
- for x in Infix:
+ for x in infix:
if x.isalpha() or x.isdigit():
- Postfix.append(x) # if x is Alphabet / Digit, add it to Postfix
+ post_fix.append(x) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
- Stack.append(x) # if x is "(" push to Stack
+ stack.append(x) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
- while Stack[-1] != "(":
- Postfix.append(Stack.pop()) # Pop stack & add the content to Postfix
- Stack.pop()
+ while stack[-1] != "(":
+ post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
+ stack.pop()
else:
- if len(Stack) == 0:
- Stack.append(x) # If stack is empty, push x to stack
+ if len(stack) == 0:
+ stack.append(x) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
- while len(Stack) > 0 and priority[x] <= priority[Stack[-1]]:
- Postfix.append(Stack.pop()) # pop stack & add to Postfix
- Stack.append(x) # push x to stack
+ while len(stack) > 0 and priority[x] <= priority[stack[-1]]:
+ post_fix.append(stack.pop()) # pop stack & add to Postfix
+ stack.append(x) # push x to stack
print(
x.center(8),
- ("".join(Stack)).ljust(print_width),
- ("".join(Postfix)).ljust(print_width),
+ ("".join(stack)).ljust(print_width),
+ ("".join(post_fix)).ljust(print_width),
sep=" | ",
) # Output in tabular format
- while len(Stack) > 0: # while stack is not empty
- Postfix.append(Stack.pop()) # pop stack & add to Postfix
+ while len(stack) > 0: # while stack is not empty
+ post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
" ".center(8),
- ("".join(Stack)).ljust(print_width),
- ("".join(Postfix)).ljust(print_width),
+ ("".join(stack)).ljust(print_width),
+ ("".join(post_fix)).ljust(print_width),
sep=" | ",
) # Output in tabular format
- return "".join(Postfix) # return Postfix as str
+ return "".join(post_fix) # return Postfix as str
-def infix_2_prefix(Infix):
- Infix = list(Infix[::-1]) # reverse the infix equation
+def infix_2_prefix(infix):
+ infix = list(infix[::-1]) # reverse the infix equation
- for i in range(len(Infix)):
- if Infix[i] == "(":
- Infix[i] = ")" # change "(" to ")"
- elif Infix[i] == ")":
- Infix[i] = "(" # change ")" to "("
+ for i in range(len(infix)):
+ if infix[i] == "(":
+ infix[i] = ")" # change "(" to ")"
+ elif infix[i] == ")":
+ infix[i] = "(" # change ")" to "("
- return (infix_2_postfix("".join(Infix)))[
+ return (infix_2_postfix("".join(infix)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
diff --git a/data_structures/stacks/next_greater_element.py b/data_structures/stacks/next_greater_element.py
index d8c7ed173..7d76d1f47 100644
--- a/data_structures/stacks/next_greater_element.py
+++ b/data_structures/stacks/next_greater_element.py
@@ -1,8 +1,10 @@
+from __future__ import annotations
+
arr = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
expect = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
-def next_greatest_element_slow(arr: list) -> list:
+def next_greatest_element_slow(arr: list[float]) -> list[float]:
"""
Get the Next Greatest Element (NGE) for all elements in a list.
Maximum element present after the current one which is also greater than the
@@ -10,18 +12,21 @@ def next_greatest_element_slow(arr: list) -> list:
>>> next_greatest_element_slow(arr) == expect
True
"""
+
result = []
- for i in range(0, len(arr), 1):
- next = -1
- for j in range(i + 1, len(arr), 1):
+ arr_size = len(arr)
+
+ for i in range(arr_size):
+ next_element: float = -1
+ for j in range(i + 1, arr_size):
if arr[i] < arr[j]:
- next = arr[j]
+ next_element = arr[j]
break
- result.append(next)
+ result.append(next_element)
return result
-def next_greatest_element_fast(arr: list) -> list:
+def next_greatest_element_fast(arr: list[float]) -> list[float]:
"""
Like next_greatest_element_slow() but changes the loops to use
enumerate() instead of range(len()) for the outer loop and
@@ -31,16 +36,16 @@ def next_greatest_element_fast(arr: list) -> list:
"""
result = []
for i, outer in enumerate(arr):
- next = -1
+ next_item: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
- next = inner
+ next_item = inner
break
- result.append(next)
+ result.append(next_item)
return result
-def next_greatest_element(arr: list) -> list:
+def next_greatest_element(arr: list[float]) -> list[float]:
"""
Get the Next Greatest Element (NGE) for all elements in a list.
Maximum element present after the current one which is also greater than the
@@ -53,21 +58,19 @@ def next_greatest_element(arr: list) -> list:
>>> next_greatest_element(arr) == expect
True
"""
- stack = []
- result = [-1] * len(arr)
+ arr_size = len(arr)
+ stack: list[float] = []
+ result: list[float] = [-1] * arr_size
- for index in reversed(range(len(arr))):
- if len(stack):
+ for index in reversed(range(arr_size)):
+ if stack:
while stack[-1] <= arr[index]:
stack.pop()
- if len(stack) == 0:
+ if not stack:
break
-
- if len(stack) != 0:
+ if stack:
result[index] = stack[-1]
-
stack.append(arr[index])
-
return result
diff --git a/data_structures/stacks/postfix_evaluation.py b/data_structures/stacks/postfix_evaluation.py
index 574acac71..28128f82e 100644
--- a/data_structures/stacks/postfix_evaluation.py
+++ b/data_structures/stacks/postfix_evaluation.py
@@ -20,49 +20,49 @@ Enter a Postfix Equation (space separated) = 5 6 9 * +
import operator as op
-def Solve(Postfix):
- Stack = []
- Div = lambda x, y: int(x / y) # noqa: E731 integer division operation
- Opr = {
+def solve(post_fix):
+ stack = []
+ div = lambda x, y: int(x / y) # noqa: E731 integer division operation
+ opr = {
"^": op.pow,
"*": op.mul,
- "/": Div,
+ "/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8), "Action".center(12), "Stack", sep=" | ")
- print("-" * (30 + len(Postfix)))
+ print("-" * (30 + len(post_fix)))
- for x in Postfix:
+ for x in post_fix:
if x.isdigit(): # if x in digit
- Stack.append(x) # append x to stack
+ stack.append(x) # append x to stack
# output in tabular format
- print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | ")
+ print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(stack), sep=" | ")
else:
- B = Stack.pop() # pop stack
+ b = stack.pop() # pop stack
# output in tabular format
- print("".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | ")
+ print("".rjust(8), ("pop(" + b + ")").ljust(12), ",".join(stack), sep=" | ")
- A = Stack.pop() # pop stack
+ a = stack.pop() # pop stack
# output in tabular format
- print("".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | ")
+ print("".rjust(8), ("pop(" + a + ")").ljust(12), ",".join(stack), sep=" | ")
- Stack.append(
- str(Opr[x](int(A), int(B)))
+ stack.append(
+ str(opr[x](int(a), int(b)))
) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8),
- ("push(" + A + x + B + ")").ljust(12),
- ",".join(Stack),
+ ("push(" + a + x + b + ")").ljust(12),
+ ",".join(stack),
sep=" | ",
)
- return int(Stack[0])
+ return int(stack[0])
if __name__ == "__main__":
Postfix = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
- print("\n\tResult = ", Solve(Postfix))
+ print("\n\tResult = ", solve(Postfix))
diff --git a/data_structures/stacks/prefix_evaluation.py b/data_structures/stacks/prefix_evaluation.py
index 00df2c1e6..f48eca23d 100644
--- a/data_structures/stacks/prefix_evaluation.py
+++ b/data_structures/stacks/prefix_evaluation.py
@@ -36,7 +36,6 @@ def evaluate(expression):
# iterate over the string in reverse order
for c in expression.split()[::-1]:
-
# push operand to stack
if is_operand(c):
stack.append(int(c))
diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py
index 245d39b32..a14f4648a 100644
--- a/data_structures/stacks/stack.py
+++ b/data_structures/stacks/stack.py
@@ -1,11 +1,19 @@
-from typing import List
+from __future__ import annotations
+
+from typing import Generic, TypeVar
+
+T = TypeVar("T")
class StackOverflowError(BaseException):
pass
-class Stack:
+class StackUnderflowError(BaseException):
+ pass
+
+
+class Stack(Generic[T]):
"""A stack is an abstract data type that serves as a collection of
elements with two principal operations: push() and pop(). push() adds an
element to the top of the stack, and pop() removes an element from the top
@@ -15,7 +23,7 @@ class Stack:
"""
def __init__(self, limit: int = 10):
- self.stack: List[int] = []
+ self.stack: list[T] = []
self.limit = limit
def __bool__(self) -> bool:
@@ -24,18 +32,36 @@ class Stack:
def __str__(self) -> str:
return str(self.stack)
- def push(self, data):
+ def push(self, data: T) -> None:
"""Push an element to the top of the stack."""
if len(self.stack) >= self.limit:
raise StackOverflowError
self.stack.append(data)
- def pop(self):
- """Pop an element off of the top of the stack."""
+ def pop(self) -> T:
+ """
+ Pop an element off of the top of the stack.
+
+ >>> Stack().pop()
+ Traceback (most recent call last):
+ ...
+ data_structures.stacks.stack.StackUnderflowError
+ """
+ if not self.stack:
+ raise StackUnderflowError
return self.stack.pop()
- def peek(self):
- """Peek at the top-most element of the stack."""
+ def peek(self) -> T:
+ """
+ Peek at the top-most element of the stack.
+
+ >>> Stack().pop()
+ Traceback (most recent call last):
+ ...
+ data_structures.stacks.stack.StackUnderflowError
+ """
+ if not self.stack:
+ raise StackUnderflowError
return self.stack[-1]
def is_empty(self) -> bool:
@@ -49,7 +75,7 @@ class Stack:
"""Return the size of the stack."""
return len(self.stack)
- def __contains__(self, item) -> bool:
+ def __contains__(self, item: T) -> bool:
"""Check if item is in stack"""
return item in self.stack
@@ -58,7 +84,7 @@ def test_stack() -> None:
"""
>>> test_stack()
"""
- stack = Stack(10)
+ stack: Stack[int] = Stack(10)
assert bool(stack) is False
assert stack.is_empty() is True
assert stack.is_full() is False
@@ -66,23 +92,23 @@ def test_stack() -> None:
try:
_ = stack.pop()
- assert False # This should not happen
- except IndexError:
+ raise AssertionError # This should not happen
+ except StackUnderflowError:
assert True # This should happen
try:
_ = stack.peek()
- assert False # This should not happen
- except IndexError:
+ raise AssertionError # This should not happen
+ except StackUnderflowError:
assert True # This should happen
for i in range(10):
assert stack.size() == i
stack.push(i)
- assert bool(stack) is True
- assert stack.is_empty() is False
- assert stack.is_full() is True
+ assert bool(stack)
+ assert not stack.is_empty()
+ assert stack.is_full()
assert str(stack) == str(list(range(10)))
assert stack.pop() == 9
assert stack.peek() == 8
@@ -92,11 +118,11 @@ def test_stack() -> None:
try:
stack.push(200)
- assert False # This should not happen
+ raise AssertionError # This should not happen
except StackOverflowError:
assert True # This should happen
- assert stack.is_empty() is False
+ assert not stack.is_empty()
assert stack.size() == 10
assert 5 in stack
diff --git a/data_structures/stacks/stack_using_dll.py b/data_structures/stacks/stack_with_doubly_linked_list.py
similarity index 72%
rename from data_structures/stacks/stack_using_dll.py
rename to data_structures/stacks/stack_with_doubly_linked_list.py
index 75e0cd206..50c5236e0 100644
--- a/data_structures/stacks/stack_using_dll.py
+++ b/data_structures/stacks/stack_with_doubly_linked_list.py
@@ -1,15 +1,21 @@
# A complete working Python program to demonstrate all
# stack operations using a doubly linked list
+from __future__ import annotations
-class Node:
- def __init__(self, data):
+from typing import Generic, TypeVar
+
+T = TypeVar("T")
+
+
+class Node(Generic[T]):
+ def __init__(self, data: T):
self.data = data # Assign data
- self.next = None # Initialize next as null
- self.prev = None # Initialize prev as null
+ self.next: Node[T] | None = None # Initialize next as null
+ self.prev: Node[T] | None = None # Initialize prev as null
-class Stack:
+class Stack(Generic[T]):
"""
>>> stack = Stack()
>>> stack.is_empty()
@@ -35,10 +41,10 @@ class Stack:
2->1->0->
"""
- def __init__(self):
- self.head = None
+ def __init__(self) -> None:
+ self.head: Node[T] | None = None
- def push(self, data):
+ def push(self, data: T) -> None:
"""add a Node to the stack"""
if self.head is None:
self.head = Node(data)
@@ -49,21 +55,23 @@ class Stack:
new_node.prev = None
self.head = new_node
- def pop(self):
+ def pop(self) -> T | None:
"""pop the top element off the stack"""
if self.head is None:
return None
else:
+ assert self.head is not None
temp = self.head.data
self.head = self.head.next
- self.head.prev = None
+ if self.head is not None:
+ self.head.prev = None
return temp
- def top(self):
+ def top(self) -> T | None:
"""return the top element of the stack"""
- return self.head.data
+ return self.head.data if self.head is not None else None
- def __len__(self):
+ def __len__(self) -> int:
temp = self.head
count = 0
while temp is not None:
@@ -71,10 +79,10 @@ class Stack:
temp = temp.next
return count
- def is_empty(self):
+ def is_empty(self) -> bool:
return self.head is None
- def print_stack(self):
+ def print_stack(self) -> None:
print("stack elements are:")
temp = self.head
while temp is not None:
@@ -84,9 +92,8 @@ class Stack:
# Code execution starts here
if __name__ == "__main__":
-
# Start with the empty stack
- stack = Stack()
+ stack: Stack[int] = Stack()
# Insert 4 at the beginning. So stack becomes 4->None
print("Stack operations using Doubly LinkedList")
diff --git a/data_structures/stacks/linked_stack.py b/data_structures/stacks/stack_with_singly_linked_list.py
similarity index 82%
rename from data_structures/stacks/linked_stack.py
rename to data_structures/stacks/stack_with_singly_linked_list.py
index 0b9c9d45e..f5ce83b86 100644
--- a/data_structures/stacks/linked_stack.py
+++ b/data_structures/stacks/stack_with_singly_linked_list.py
@@ -1,17 +1,22 @@
""" A Stack using a linked list like structure """
-from typing import Any, Optional
+from __future__ import annotations
+
+from collections.abc import Iterator
+from typing import Generic, TypeVar
+
+T = TypeVar("T")
-class Node:
- def __init__(self, data):
+class Node(Generic[T]):
+ def __init__(self, data: T):
self.data = data
- self.next = None
+ self.next: Node[T] | None = None
- def __str__(self):
+ def __str__(self) -> str:
return f"{self.data}"
-class LinkedStack:
+class LinkedStack(Generic[T]):
"""
Linked List Stack implementing push (to top),
pop (from top) and is_empty
@@ -42,15 +47,15 @@ class LinkedStack:
"""
def __init__(self) -> None:
- self.top: Optional[Node] = None
+ self.top: Node[T] | None = None
- def __iter__(self):
+ def __iter__(self) -> Iterator[T]:
node = self.top
while node:
yield node.data
node = node.next
- def __str__(self):
+ def __str__(self) -> str:
"""
>>> stack = LinkedStack()
>>> stack.push("c")
@@ -61,7 +66,7 @@ class LinkedStack:
"""
return "->".join([str(item) for item in self])
- def __len__(self):
+ def __len__(self) -> int:
"""
>>> stack = LinkedStack()
>>> len(stack) == 0
@@ -85,7 +90,7 @@ class LinkedStack:
"""
return self.top is None
- def push(self, item: Any) -> None:
+ def push(self, item: T) -> None:
"""
>>> stack = LinkedStack()
>>> stack.push("Python")
@@ -99,12 +104,12 @@ class LinkedStack:
node.next = self.top
self.top = node
- def pop(self) -> Any:
+ def pop(self) -> T:
"""
>>> stack = LinkedStack()
>>> stack.pop()
Traceback (most recent call last):
- ...
+ ...
IndexError: pop from empty stack
>>> stack.push("c")
>>> stack.push("b")
@@ -123,7 +128,7 @@ class LinkedStack:
self.top = self.top.next
return pop_node.data
- def peek(self) -> Any:
+ def peek(self) -> T:
"""
>>> stack = LinkedStack()
>>> stack.push("Java")
diff --git a/data_structures/stacks/stock_span_problem.py b/data_structures/stacks/stock_span_problem.py
index cc2adfdd6..de423c1eb 100644
--- a/data_structures/stacks/stock_span_problem.py
+++ b/data_structures/stacks/stock_span_problem.py
@@ -8,19 +8,17 @@ on the current day is less than or equal to its price on the given day.
"""
-def calculateSpan(price, S):
-
+def calculation_span(price, s):
n = len(price)
# Create a stack and push index of fist element to it
st = []
st.append(0)
# Span value of first element is always 1
- S[0] = 1
+ s[0] = 1
# Calculate span values for rest of the elements
for i in range(1, n):
-
# Pop elements from stack while stack is not
# empty and top of stack is smaller than price[i]
while len(st) > 0 and price[st[0]] <= price[i]:
@@ -30,14 +28,14 @@ def calculateSpan(price, S):
# than all elements on left of it, i.e. price[0],
# price[1], ..price[i-1]. Else the price[i] is
# greater than elements after top of stack
- S[i] = i + 1 if len(st) <= 0 else (i - st[0])
+ s[i] = i + 1 if len(st) <= 0 else (i - st[0])
# Push this element to stack
st.append(i)
# A utility function to print elements of array
-def printArray(arr, n):
+def print_array(arr, n):
for i in range(0, n):
print(arr[i], end=" ")
@@ -47,7 +45,7 @@ price = [10, 4, 5, 90, 120, 80]
S = [0 for i in range(len(price) + 1)]
# Fill the span values in array S[]
-calculateSpan(price, S)
+calculation_span(price, S)
# Print the calculated span values
-printArray(S, len(price))
+print_array(S, len(price))
diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py
new file mode 100644
index 000000000..fadc50cb4
--- /dev/null
+++ b/data_structures/trie/radix_tree.py
@@ -0,0 +1,230 @@
+"""
+A Radix Tree is a data structure that represents a space-optimized
+trie (prefix tree) in whicheach node that is the only child is merged
+with its parent [https://en.wikipedia.org/wiki/Radix_tree]
+"""
+
+
+class RadixNode:
+ def __init__(self, prefix: str = "", is_leaf: bool = False) -> None:
+ # Mapping from the first character of the prefix of the node
+ self.nodes: dict[str, RadixNode] = {}
+
+ # A node will be a leaf if the tree contains its word
+ self.is_leaf = is_leaf
+
+ self.prefix = prefix
+
+ def match(self, word: str) -> tuple[str, str, str]:
+ """Compute the common substring of the prefix of the node and a word
+
+ Args:
+ word (str): word to compare
+
+ Returns:
+ (str, str, str): common substring, remaining prefix, remaining word
+
+ >>> RadixNode("myprefix").match("mystring")
+ ('my', 'prefix', 'string')
+ """
+ x = 0
+ for q, w in zip(self.prefix, word):
+ if q != w:
+ break
+
+ x += 1
+
+ return self.prefix[:x], self.prefix[x:], word[x:]
+
+ def insert_many(self, words: list[str]) -> None:
+ """Insert many words in the tree
+
+ Args:
+ words (list[str]): list of words
+
+ >>> RadixNode("myprefix").insert_many(["mystring", "hello"])
+ """
+ for word in words:
+ self.insert(word)
+
+ def insert(self, word: str) -> None:
+ """Insert a word into the tree
+
+ Args:
+ word (str): word to insert
+
+ >>> RadixNode("myprefix").insert("mystring")
+
+ >>> root = RadixNode()
+ >>> root.insert_many(['myprefix', 'myprefixA', 'myprefixAA'])
+ >>> root.print_tree()
+ - myprefix (leaf)
+ -- A (leaf)
+ --- A (leaf)
+ """
+ # Case 1: If the word is the prefix of the node
+ # Solution: We set the current node as leaf
+ if self.prefix == word and not self.is_leaf:
+ self.is_leaf = True
+
+ # Case 2: The node has no edges that have a prefix to the word
+ # Solution: We create an edge from the current node to a new one
+ # containing the word
+ elif word[0] not in self.nodes:
+ self.nodes[word[0]] = RadixNode(prefix=word, is_leaf=True)
+
+ else:
+ incoming_node = self.nodes[word[0]]
+ matching_string, remaining_prefix, remaining_word = incoming_node.match(
+ word
+ )
+
+ # Case 3: The node prefix is equal to the matching
+ # Solution: We insert remaining word on the next node
+ if remaining_prefix == "":
+ self.nodes[matching_string[0]].insert(remaining_word)
+
+ # Case 4: The word is greater equal to the matching
+ # Solution: Create a node in between both nodes, change
+ # prefixes and add the new node for the remaining word
+ else:
+ incoming_node.prefix = remaining_prefix
+
+ aux_node = self.nodes[matching_string[0]]
+ self.nodes[matching_string[0]] = RadixNode(matching_string, False)
+ self.nodes[matching_string[0]].nodes[remaining_prefix[0]] = aux_node
+
+ if remaining_word == "":
+ self.nodes[matching_string[0]].is_leaf = True
+ else:
+ self.nodes[matching_string[0]].insert(remaining_word)
+
+ def find(self, word: str) -> bool:
+ """Returns if the word is on the tree
+
+ Args:
+ word (str): word to check
+
+ Returns:
+ bool: True if the word appears on the tree
+
+ >>> RadixNode("myprefix").find("mystring")
+ False
+ """
+ incoming_node = self.nodes.get(word[0], None)
+ if not incoming_node:
+ return False
+ else:
+ matching_string, remaining_prefix, remaining_word = incoming_node.match(
+ word
+ )
+ # If there is remaining prefix, the word can't be on the tree
+ if remaining_prefix != "":
+ return False
+ # This applies when the word and the prefix are equal
+ elif remaining_word == "":
+ return incoming_node.is_leaf
+ # We have word remaining so we check the next node
+ else:
+ return incoming_node.find(remaining_word)
+
+ def delete(self, word: str) -> bool:
+ """Deletes a word from the tree if it exists
+
+ Args:
+ word (str): word to be deleted
+
+ Returns:
+ bool: True if the word was found and deleted. False if word is not found
+
+ >>> RadixNode("myprefix").delete("mystring")
+ False
+ """
+ incoming_node = self.nodes.get(word[0], None)
+ if not incoming_node:
+ return False
+ else:
+ matching_string, remaining_prefix, remaining_word = incoming_node.match(
+ word
+ )
+ # If there is remaining prefix, the word can't be on the tree
+ if remaining_prefix != "":
+ return False
+ # We have word remaining so we check the next node
+ elif remaining_word != "":
+ return incoming_node.delete(remaining_word)
+ else:
+ # If it is not a leaf, we don't have to delete
+ if not incoming_node.is_leaf:
+ return False
+ else:
+ # We delete the nodes if no edges go from it
+ if len(incoming_node.nodes) == 0:
+ del self.nodes[word[0]]
+ # We merge the current node with its only child
+ if len(self.nodes) == 1 and not self.is_leaf:
+ merging_node = next(iter(self.nodes.values()))
+ self.is_leaf = merging_node.is_leaf
+ self.prefix += merging_node.prefix
+ self.nodes = merging_node.nodes
+ # If there is more than 1 edge, we just mark it as non-leaf
+ elif len(incoming_node.nodes) > 1:
+ incoming_node.is_leaf = False
+ # If there is 1 edge, we merge it with its child
+ else:
+ merging_node = next(iter(incoming_node.nodes.values()))
+ incoming_node.is_leaf = merging_node.is_leaf
+ incoming_node.prefix += merging_node.prefix
+ incoming_node.nodes = merging_node.nodes
+
+ return True
+
+ def print_tree(self, height: int = 0) -> None:
+ """Print the tree
+
+ Args:
+ height (int, optional): Height of the printed node
+ """
+ if self.prefix != "":
+ print("-" * height, self.prefix, " (leaf)" if self.is_leaf else "")
+
+ for value in self.nodes.values():
+ value.print_tree(height + 1)
+
+
+def test_trie() -> bool:
+ words = "banana bananas bandana band apple all beast".split()
+ root = RadixNode()
+ root.insert_many(words)
+
+ assert all(root.find(word) for word in words)
+ assert not root.find("bandanas")
+ assert not root.find("apps")
+ root.delete("all")
+ assert not root.find("all")
+ root.delete("banana")
+ assert not root.find("banana")
+ assert root.find("bananas")
+
+ return True
+
+
+def pytests() -> None:
+ assert test_trie()
+
+
+def main() -> None:
+ """
+ >>> pytests()
+ """
+ root = RadixNode()
+ words = "banana bananas bandanas bandana band apple all beast".split()
+ root.insert_many(words)
+
+ print("Words:", words)
+ print("Tree:")
+ root.print_tree()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/data_structures/trie/trie.py b/data_structures/trie/trie.py
index 6582be24f..46b93a499 100644
--- a/data_structures/trie/trie.py
+++ b/data_structures/trie/trie.py
@@ -7,11 +7,11 @@ longest word)) lookup time making it an optimal approach when space is not an is
class TrieNode:
- def __init__(self):
- self.nodes = dict() # Mapping from char to TrieNode
+ def __init__(self) -> None:
+ self.nodes: dict[str, TrieNode] = {} # Mapping from char to TrieNode
self.is_leaf = False
- def insert_many(self, words: [str]):
+ def insert_many(self, words: list[str]) -> None:
"""
Inserts a list of words into the Trie
:param words: list of string words
@@ -20,7 +20,7 @@ class TrieNode:
for word in words:
self.insert(word)
- def insert(self, word: str):
+ def insert(self, word: str) -> None:
"""
Inserts a word into the Trie
:param word: word to be inserted
@@ -46,14 +46,14 @@ class TrieNode:
curr = curr.nodes[char]
return curr.is_leaf
- def delete(self, word: str):
+ def delete(self, word: str) -> None:
"""
Deletes a word in a Trie
:param word: word to delete
:return: None
"""
- def _delete(curr: TrieNode, word: str, index: int):
+ def _delete(curr: TrieNode, word: str, index: int) -> bool:
if index == len(word):
# If word does not exist
if not curr.is_leaf:
@@ -75,7 +75,7 @@ class TrieNode:
_delete(self, word, 0)
-def print_words(node: TrieNode, word: str):
+def print_words(node: TrieNode, word: str) -> None:
"""
Prints all the words in a Trie
:param node: root node of Trie
@@ -89,7 +89,7 @@ def print_words(node: TrieNode, word: str):
print_words(value, word + key)
-def test_trie():
+def test_trie() -> bool:
words = "banana bananas bandana band apple all beast".split()
root = TrieNode()
root.insert_many(words)
@@ -112,11 +112,11 @@ def print_results(msg: str, passes: bool) -> None:
print(str(msg), "works!" if passes else "doesn't work :(")
-def pytests():
+def pytests() -> None:
assert test_trie()
-def main():
+def main() -> None:
"""
>>> pytests()
"""
diff --git a/digital_image_processing/change_contrast.py b/digital_image_processing/change_contrast.py
index 6a1504002..7e4969470 100644
--- a/digital_image_processing/change_contrast.py
+++ b/digital_image_processing/change_contrast.py
@@ -4,8 +4,8 @@ Changing contrast with PIL
This algorithm is used in
https://noivce.pythonanywhere.com/ Python web app.
-python/black: True
-flake8 : True
+psf/black: True
+ruff : True
"""
from PIL import Image
diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py
index 2bf0bbe03..35aedc16d 100644
--- a/digital_image_processing/dithering/burkes.py
+++ b/digital_image_processing/dithering/burkes.py
@@ -21,7 +21,8 @@ class Burkes:
self.max_threshold = int(self.get_greyscale(255, 255, 255))
if not self.min_threshold < threshold < self.max_threshold:
- raise ValueError(f"Factor value should be from 0 to {self.max_threshold}")
+ msg = f"Factor value should be from 0 to {self.max_threshold}"
+ raise ValueError(msg)
self.input_img = input_img
self.threshold = threshold
@@ -38,9 +39,18 @@ class Burkes:
def get_greyscale(cls, blue: int, green: int, red: int) -> float:
"""
>>> Burkes.get_greyscale(3, 4, 5)
- 3.753
+ 4.185
+ >>> Burkes.get_greyscale(0, 0, 0)
+ 0.0
+ >>> Burkes.get_greyscale(255, 255, 255)
+ 255.0
"""
- return 0.114 * blue + 0.587 * green + 0.2126 * red
+ """
+ Formula from https://en.wikipedia.org/wiki/HSL_and_HSV
+ cf Lightness section, and Fig 13c.
+ We use the first of four possible.
+ """
+ return 0.114 * blue + 0.587 * green + 0.299 * red
def process(self) -> None:
for y in range(self.height):
@@ -48,10 +58,10 @@ class Burkes:
greyscale = int(self.get_greyscale(*self.input_img[y][x]))
if self.threshold > greyscale + self.error_table[y][x]:
self.output_img[y][x] = (0, 0, 0)
- current_error = greyscale + self.error_table[x][y]
+ current_error = greyscale + self.error_table[y][x]
else:
self.output_img[y][x] = (255, 255, 255)
- current_error = greyscale + self.error_table[x][y] - 255
+ current_error = greyscale + self.error_table[y][x] - 255
"""
Burkes error propagation (`*` is current pixel):
diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py
index 295b4d825..f8cbeedb3 100644
--- a/digital_image_processing/edge_detection/canny.py
+++ b/digital_image_processing/edge_detection/canny.py
@@ -18,105 +18,126 @@ def gen_gaussian_kernel(k_size, sigma):
return g
-def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255):
- image_row, image_col = image.shape[0], image.shape[1]
- # gaussian_filter
- gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4))
- # get the gradient and degree by sobel_filter
- sobel_grad, sobel_theta = sobel_filter(gaussian_out)
- gradient_direction = np.rad2deg(sobel_theta)
- gradient_direction += PI
-
- dst = np.zeros((image_row, image_col))
-
+def suppress_non_maximum(image_shape, gradient_direction, sobel_grad):
"""
Non-maximum suppression. If the edge strength of the current pixel is the largest
compared to the other pixels in the mask with the same direction, the value will be
preserved. Otherwise, the value will be suppressed.
"""
- for row in range(1, image_row - 1):
- for col in range(1, image_col - 1):
+ destination = np.zeros(image_shape)
+
+ for row in range(1, image_shape[0] - 1):
+ for col in range(1, image_shape[1] - 1):
direction = gradient_direction[row, col]
if (
- 0 <= direction < 22.5
+ 0 <= direction < PI / 8
or 15 * PI / 8 <= direction <= 2 * PI
or 7 * PI / 8 <= direction <= 9 * PI / 8
):
- W = sobel_grad[row, col - 1]
- E = sobel_grad[row, col + 1]
- if sobel_grad[row, col] >= W and sobel_grad[row, col] >= E:
- dst[row, col] = sobel_grad[row, col]
+ w = sobel_grad[row, col - 1]
+ e = sobel_grad[row, col + 1]
+ if sobel_grad[row, col] >= w and sobel_grad[row, col] >= e:
+ destination[row, col] = sobel_grad[row, col]
- elif (PI / 8 <= direction < 3 * PI / 8) or (
- 9 * PI / 8 <= direction < 11 * PI / 8
+ elif (
+ PI / 8 <= direction < 3 * PI / 8
+ or 9 * PI / 8 <= direction < 11 * PI / 8
):
- SW = sobel_grad[row + 1, col - 1]
- NE = sobel_grad[row - 1, col + 1]
- if sobel_grad[row, col] >= SW and sobel_grad[row, col] >= NE:
- dst[row, col] = sobel_grad[row, col]
+ sw = sobel_grad[row + 1, col - 1]
+ ne = sobel_grad[row - 1, col + 1]
+ if sobel_grad[row, col] >= sw and sobel_grad[row, col] >= ne:
+ destination[row, col] = sobel_grad[row, col]
- elif (3 * PI / 8 <= direction < 5 * PI / 8) or (
- 11 * PI / 8 <= direction < 13 * PI / 8
+ elif (
+ 3 * PI / 8 <= direction < 5 * PI / 8
+ or 11 * PI / 8 <= direction < 13 * PI / 8
):
- N = sobel_grad[row - 1, col]
- S = sobel_grad[row + 1, col]
- if sobel_grad[row, col] >= N and sobel_grad[row, col] >= S:
- dst[row, col] = sobel_grad[row, col]
+ n = sobel_grad[row - 1, col]
+ s = sobel_grad[row + 1, col]
+ if sobel_grad[row, col] >= n and sobel_grad[row, col] >= s:
+ destination[row, col] = sobel_grad[row, col]
- elif (5 * PI / 8 <= direction < 7 * PI / 8) or (
- 13 * PI / 8 <= direction < 15 * PI / 8
+ elif (
+ 5 * PI / 8 <= direction < 7 * PI / 8
+ or 13 * PI / 8 <= direction < 15 * PI / 8
):
- NW = sobel_grad[row - 1, col - 1]
- SE = sobel_grad[row + 1, col + 1]
- if sobel_grad[row, col] >= NW and sobel_grad[row, col] >= SE:
- dst[row, col] = sobel_grad[row, col]
+ nw = sobel_grad[row - 1, col - 1]
+ se = sobel_grad[row + 1, col + 1]
+ if sobel_grad[row, col] >= nw and sobel_grad[row, col] >= se:
+ destination[row, col] = sobel_grad[row, col]
- """
- High-Low threshold detection. If an edge pixel’s gradient value is higher
- than the high threshold value, it is marked as a strong edge pixel. If an
- edge pixel’s gradient value is smaller than the high threshold value and
- larger than the low threshold value, it is marked as a weak edge pixel. If
- an edge pixel's value is smaller than the low threshold value, it will be
- suppressed.
- """
- if dst[row, col] >= threshold_high:
- dst[row, col] = strong
- elif dst[row, col] <= threshold_low:
- dst[row, col] = 0
+ return destination
+
+
+def detect_high_low_threshold(
+ image_shape, destination, threshold_low, threshold_high, weak, strong
+):
+ """
+ High-Low threshold detection. If an edge pixel’s gradient value is higher
+ than the high threshold value, it is marked as a strong edge pixel. If an
+ edge pixel’s gradient value is smaller than the high threshold value and
+ larger than the low threshold value, it is marked as a weak edge pixel. If
+ an edge pixel's value is smaller than the low threshold value, it will be
+ suppressed.
+ """
+ for row in range(1, image_shape[0] - 1):
+ for col in range(1, image_shape[1] - 1):
+ if destination[row, col] >= threshold_high:
+ destination[row, col] = strong
+ elif destination[row, col] <= threshold_low:
+ destination[row, col] = 0
else:
- dst[row, col] = weak
+ destination[row, col] = weak
+
+def track_edge(image_shape, destination, weak, strong):
"""
Edge tracking. Usually a weak edge pixel caused from true edges will be connected
to a strong edge pixel while noise responses are unconnected. As long as there is
one strong edge pixel that is involved in its 8-connected neighborhood, that weak
edge point can be identified as one that should be preserved.
"""
- for row in range(1, image_row):
- for col in range(1, image_col):
- if dst[row, col] == weak:
+ for row in range(1, image_shape[0]):
+ for col in range(1, image_shape[1]):
+ if destination[row, col] == weak:
if 255 in (
- dst[row, col + 1],
- dst[row, col - 1],
- dst[row - 1, col],
- dst[row + 1, col],
- dst[row - 1, col - 1],
- dst[row + 1, col - 1],
- dst[row - 1, col + 1],
- dst[row + 1, col + 1],
+ destination[row, col + 1],
+ destination[row, col - 1],
+ destination[row - 1, col],
+ destination[row + 1, col],
+ destination[row - 1, col - 1],
+ destination[row + 1, col - 1],
+ destination[row - 1, col + 1],
+ destination[row + 1, col + 1],
):
- dst[row, col] = strong
+ destination[row, col] = strong
else:
- dst[row, col] = 0
+ destination[row, col] = 0
- return dst
+
+def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255):
+ # gaussian_filter
+ gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4))
+ # get the gradient and degree by sobel_filter
+ sobel_grad, sobel_theta = sobel_filter(gaussian_out)
+ gradient_direction = PI + np.rad2deg(sobel_theta)
+
+ destination = suppress_non_maximum(image.shape, gradient_direction, sobel_grad)
+
+ detect_high_low_threshold(
+ image.shape, destination, threshold_low, threshold_high, weak, strong
+ )
+
+ track_edge(image.shape, destination, weak, strong)
+
+ return destination
if __name__ == "__main__":
# read original image in gray mode
lena = cv2.imread(r"../image_data/lena.jpg", 0)
# canny edge detection
- canny_dst = canny(lena)
- cv2.imshow("canny", canny_dst)
+ canny_destination = canny(lena)
+ cv2.imshow("canny", canny_destination)
cv2.waitKey(0)
diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py
index 76ae4dd20..565da73f6 100644
--- a/digital_image_processing/filters/bilateral_filter.py
+++ b/digital_image_processing/filters/bilateral_filter.py
@@ -46,16 +46,15 @@ def bilateral_filter(
kernel_size: int,
) -> np.ndarray:
img2 = np.zeros(img.shape)
- gaussKer = get_gauss_kernel(kernel_size, spatial_variance)
- sizeX, sizeY = img.shape
- for i in range(kernel_size // 2, sizeX - kernel_size // 2):
- for j in range(kernel_size // 2, sizeY - kernel_size // 2):
-
- imgS = get_slice(img, i, j, kernel_size)
- imgI = imgS - imgS[kernel_size // 2, kernel_size // 2]
- imgIG = vec_gaussian(imgI, intensity_variance)
- weights = np.multiply(gaussKer, imgIG)
- vals = np.multiply(imgS, weights)
+ gauss_ker = get_gauss_kernel(kernel_size, spatial_variance)
+ size_x, size_y = img.shape
+ for i in range(kernel_size // 2, size_x - kernel_size // 2):
+ for j in range(kernel_size // 2, size_y - kernel_size // 2):
+ img_s = get_slice(img, i, j, kernel_size)
+ img_i = img_s - img_s[kernel_size // 2, kernel_size // 2]
+ img_ig = vec_gaussian(img_i, intensity_variance)
+ weights = np.multiply(gauss_ker, img_ig)
+ vals = np.multiply(img_s, weights)
val = np.sum(vals) / np.sum(weights)
img2[i, j] = val
return img2
diff --git a/digital_image_processing/filters/gabor_filter.py b/digital_image_processing/filters/gabor_filter.py
new file mode 100644
index 000000000..8f9212a35
--- /dev/null
+++ b/digital_image_processing/filters/gabor_filter.py
@@ -0,0 +1,85 @@
+# Implementation of the Gaborfilter
+# https://en.wikipedia.org/wiki/Gabor_filter
+import numpy as np
+from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey
+
+
+def gabor_filter_kernel(
+ ksize: int, sigma: int, theta: int, lambd: int, gamma: int, psi: int
+) -> np.ndarray:
+ """
+ :param ksize: The kernelsize of the convolutional filter (ksize x ksize)
+ :param sigma: standard deviation of the gaussian bell curve
+ :param theta: The orientation of the normal to the parallel stripes
+ of Gabor function.
+ :param lambd: Wavelength of the sinusoidal component.
+ :param gamma: The spatial aspect ratio and specifies the ellipticity
+ of the support of Gabor function.
+ :param psi: The phase offset of the sinusoidal function.
+
+ >>> gabor_filter_kernel(3, 8, 0, 10, 0, 0).tolist()
+ [[0.8027212023735046, 1.0, 0.8027212023735046], [0.8027212023735046, 1.0, \
+0.8027212023735046], [0.8027212023735046, 1.0, 0.8027212023735046]]
+
+ """
+
+ # prepare kernel
+ # the kernel size have to be odd
+ if (ksize % 2) == 0:
+ ksize = ksize + 1
+ gabor = np.zeros((ksize, ksize), dtype=np.float32)
+
+ # each value
+ for y in range(ksize):
+ for x in range(ksize):
+ # distance from center
+ px = x - ksize // 2
+ py = y - ksize // 2
+
+ # degree to radiant
+ _theta = theta / 180 * np.pi
+ cos_theta = np.cos(_theta)
+ sin_theta = np.sin(_theta)
+
+ # get kernel x
+ _x = cos_theta * px + sin_theta * py
+
+ # get kernel y
+ _y = -sin_theta * px + cos_theta * py
+
+ # fill kernel
+ gabor[y, x] = np.exp(
+ -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)
+ ) * np.cos(2 * np.pi * _x / lambd + psi)
+
+ return gabor
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ # read original image
+ img = imread("../image_data/lena.jpg")
+ # turn image in gray scale value
+ gray = cvtColor(img, COLOR_BGR2GRAY)
+
+ # Apply multiple Kernel to detect edges
+ out = np.zeros(gray.shape[:2])
+ for theta in [0, 30, 60, 90, 120, 150]:
+ """
+ ksize = 10
+ sigma = 8
+ lambd = 10
+ gamma = 0
+ psi = 0
+ """
+ kernel_10 = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
+ out += filter2D(gray, CV_8UC3, kernel_10)
+ out = out / out.max() * 255
+ out = out.astype(np.uint8)
+
+ imshow("Original", gray)
+ imshow("Gabor filter with 20x20 mask and 6 directions", out)
+
+ waitKey(0)
diff --git a/digital_image_processing/filters/local_binary_pattern.py b/digital_image_processing/filters/local_binary_pattern.py
new file mode 100644
index 000000000..907fe2cb0
--- /dev/null
+++ b/digital_image_processing/filters/local_binary_pattern.py
@@ -0,0 +1,80 @@
+import cv2
+import numpy as np
+
+
+def get_neighbors_pixel(
+ image: np.ndarray, x_coordinate: int, y_coordinate: int, center: int
+) -> int:
+ """
+ Comparing local neighborhood pixel value with threshold value of centre pixel.
+ Exception is required when neighborhood value of a center pixel value is null.
+ i.e. values present at boundaries.
+
+ :param image: The image we're working with
+ :param x_coordinate: x-coordinate of the pixel
+ :param y_coordinate: The y coordinate of the pixel
+ :param center: center pixel value
+ :return: The value of the pixel is being returned.
+ """
+
+ try:
+ return int(image[x_coordinate][y_coordinate] >= center)
+ except (IndexError, TypeError):
+ return 0
+
+
+def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) -> int:
+ """
+ It takes an image, an x and y coordinate, and returns the
+ decimal value of the local binary patternof the pixel
+ at that coordinate
+
+ :param image: the image to be processed
+ :param x_coordinate: x coordinate of the pixel
+ :param y_coordinate: the y coordinate of the pixel
+ :return: The decimal value of the binary value of the pixels
+ around the center pixel.
+ """
+ center = image[x_coordinate][y_coordinate]
+ powers = [1, 2, 4, 8, 16, 32, 64, 128]
+
+ # skip get_neighbors_pixel if center is null
+ if center is None:
+ return 0
+
+ # Starting from the top right, assigning value to pixels clockwise
+ binary_values = [
+ get_neighbors_pixel(image, x_coordinate - 1, y_coordinate + 1, center),
+ get_neighbors_pixel(image, x_coordinate, y_coordinate + 1, center),
+ get_neighbors_pixel(image, x_coordinate - 1, y_coordinate, center),
+ get_neighbors_pixel(image, x_coordinate + 1, y_coordinate + 1, center),
+ get_neighbors_pixel(image, x_coordinate + 1, y_coordinate, center),
+ get_neighbors_pixel(image, x_coordinate + 1, y_coordinate - 1, center),
+ get_neighbors_pixel(image, x_coordinate, y_coordinate - 1, center),
+ get_neighbors_pixel(image, x_coordinate - 1, y_coordinate - 1, center),
+ ]
+
+ # Converting the binary value to decimal.
+ return sum(
+ binary_value * power for binary_value, power in zip(binary_values, powers)
+ )
+
+
+if __name__ == "__main__":
+ # Reading the image and converting it to grayscale.
+ image = cv2.imread(
+ "digital_image_processing/image_data/lena.jpg", cv2.IMREAD_GRAYSCALE
+ )
+
+ # Create a numpy array as the same height and width of read image
+ lbp_image = np.zeros((image.shape[0], image.shape[1]))
+
+ # Iterating through the image and calculating the
+ # local binary pattern value for each pixel.
+ for i in range(0, image.shape[0]):
+ for j in range(0, image.shape[1]):
+ lbp_image[i][j] = local_binary_value(image, i, j)
+
+ cv2.imshow("local binary pattern", lbp_image)
+ cv2.waitKey(0)
+ cv2.destroyAllWindows()
diff --git a/digital_image_processing/histogram_equalization/histogram_stretch.py b/digital_image_processing/histogram_equalization/histogram_stretch.py
index 0288a2c1f..5ea7773e3 100644
--- a/digital_image_processing/histogram_equalization/histogram_stretch.py
+++ b/digital_image_processing/histogram_equalization/histogram_stretch.py
@@ -11,7 +11,7 @@ import numpy as np
from matplotlib import pyplot as plt
-class contrastStretch:
+class ConstantStretch:
def __init__(self):
self.img = ""
self.original_image = ""
@@ -45,10 +45,10 @@ class contrastStretch:
self.img[j][i] = self.last_list[num]
cv2.imwrite("output_data/output.jpg", self.img)
- def plotHistogram(self):
+ def plot_histogram(self):
plt.hist(self.img.ravel(), 256, [0, 256])
- def showImage(self):
+ def show_image(self):
cv2.imshow("Output-Image", self.img)
cv2.imshow("Input-Image", self.original_image)
cv2.waitKey(5000)
@@ -57,7 +57,7 @@ class contrastStretch:
if __name__ == "__main__":
file_path = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
- stretcher = contrastStretch()
+ stretcher = ConstantStretch()
stretcher.stretch(file_path)
- stretcher.plotHistogram()
- stretcher.showImage()
+ stretcher.plot_histogram()
+ stretcher.show_image()
diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py
index 4350b8603..67830668b 100644
--- a/digital_image_processing/index_calculation.py
+++ b/digital_image_processing/index_calculation.py
@@ -104,72 +104,71 @@ class IndexCalculation:
#RGBIndex = ["GLI", "CI", "Hue", "I", "NGRDI", "RI", "S", "IF"]
"""
- def __init__(self, red=None, green=None, blue=None, redEdge=None, nir=None):
- # print("Numpy version: " + np.__version__)
- self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
+ def __init__(self, red=None, green=None, blue=None, red_edge=None, nir=None):
+ self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir)
- def setMatrices(self, red=None, green=None, blue=None, redEdge=None, nir=None):
+ def set_matricies(self, red=None, green=None, blue=None, red_edge=None, nir=None):
if red is not None:
self.red = red
if green is not None:
self.green = green
if blue is not None:
self.blue = blue
- if redEdge is not None:
- self.redEdge = redEdge
+ if red_edge is not None:
+ self.redEdge = red_edge
if nir is not None:
self.nir = nir
return True
def calculation(
- self, index="", red=None, green=None, blue=None, redEdge=None, nir=None
+ self, index="", red=None, green=None, blue=None, red_edge=None, nir=None
):
"""
performs the calculation of the index with the values instantiated in the class
:str index: abbreviation of index name to perform
"""
- self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
+ self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir)
funcs = {
- "ARVI2": self.ARVI2,
- "CCCI": self.CCCI,
- "CVI": self.CVI,
- "GLI": self.GLI,
- "NDVI": self.NDVI,
- "BNDVI": self.BNDVI,
- "redEdgeNDVI": self.redEdgeNDVI,
- "GNDVI": self.GNDVI,
- "GBNDVI": self.GBNDVI,
- "GRNDVI": self.GRNDVI,
- "RBNDVI": self.RBNDVI,
- "PNDVI": self.PNDVI,
- "ATSAVI": self.ATSAVI,
- "BWDRVI": self.BWDRVI,
- "CIgreen": self.CIgreen,
- "CIrededge": self.CIrededge,
- "CI": self.CI,
- "CTVI": self.CTVI,
- "GDVI": self.GDVI,
- "EVI": self.EVI,
- "GEMI": self.GEMI,
- "GOSAVI": self.GOSAVI,
- "GSAVI": self.GSAVI,
- "Hue": self.Hue,
- "IVI": self.IVI,
- "IPVI": self.IPVI,
- "I": self.I,
- "RVI": self.RVI,
- "MRVI": self.MRVI,
- "MSAVI": self.MSAVI,
- "NormG": self.NormG,
- "NormNIR": self.NormNIR,
- "NormR": self.NormR,
- "NGRDI": self.NGRDI,
- "RI": self.RI,
- "S": self.S,
- "IF": self.IF,
- "DVI": self.DVI,
- "TVI": self.TVI,
- "NDRE": self.NDRE,
+ "ARVI2": self.arv12,
+ "CCCI": self.ccci,
+ "CVI": self.cvi,
+ "GLI": self.gli,
+ "NDVI": self.ndvi,
+ "BNDVI": self.bndvi,
+ "redEdgeNDVI": self.red_edge_ndvi,
+ "GNDVI": self.gndvi,
+ "GBNDVI": self.gbndvi,
+ "GRNDVI": self.grndvi,
+ "RBNDVI": self.rbndvi,
+ "PNDVI": self.pndvi,
+ "ATSAVI": self.atsavi,
+ "BWDRVI": self.bwdrvi,
+ "CIgreen": self.ci_green,
+ "CIrededge": self.ci_rededge,
+ "CI": self.ci,
+ "CTVI": self.ctvi,
+ "GDVI": self.gdvi,
+ "EVI": self.evi,
+ "GEMI": self.gemi,
+ "GOSAVI": self.gosavi,
+ "GSAVI": self.gsavi,
+ "Hue": self.hue,
+ "IVI": self.ivi,
+ "IPVI": self.ipvi,
+ "I": self.i,
+ "RVI": self.rvi,
+ "MRVI": self.mrvi,
+ "MSAVI": self.m_savi,
+ "NormG": self.norm_g,
+ "NormNIR": self.norm_nir,
+ "NormR": self.norm_r,
+ "NGRDI": self.ngrdi,
+ "RI": self.ri,
+ "S": self.s,
+ "IF": self._if,
+ "DVI": self.dvi,
+ "TVI": self.tvi,
+ "NDRE": self.ndre,
}
try:
@@ -178,7 +177,7 @@ class IndexCalculation:
print("Index not in the list!")
return False
- def ARVI2(self):
+ def arv12(self):
"""
Atmospherically Resistant Vegetation Index 2
https://www.indexdatabase.de/db/i-single.php?id=396
@@ -187,7 +186,7 @@ class IndexCalculation:
"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
- def CCCI(self):
+ def ccci(self):
"""
Canopy Chlorophyll Content Index
https://www.indexdatabase.de/db/i-single.php?id=224
@@ -197,15 +196,15 @@ class IndexCalculation:
(self.nir - self.red) / (self.nir + self.red)
)
- def CVI(self):
+ def cvi(self):
"""
Chlorophyll vegetation index
https://www.indexdatabase.de/db/i-single.php?id=391
:return: index
"""
- return self.nir * (self.red / (self.green ** 2))
+ return self.nir * (self.red / (self.green**2))
- def GLI(self):
+ def gli(self):
"""
self.green leaf index
https://www.indexdatabase.de/db/i-single.php?id=375
@@ -215,7 +214,7 @@ class IndexCalculation:
2 * self.green + self.red + self.blue
)
- def NDVI(self):
+ def ndvi(self):
"""
Normalized Difference self.nir/self.red Normalized Difference Vegetation
Index, Calibrated NDVI - CDVI
@@ -224,7 +223,7 @@ class IndexCalculation:
"""
return (self.nir - self.red) / (self.nir + self.red)
- def BNDVI(self):
+ def bndvi(self):
"""
Normalized Difference self.nir/self.blue self.blue-normalized difference
vegetation index
@@ -233,7 +232,7 @@ class IndexCalculation:
"""
return (self.nir - self.blue) / (self.nir + self.blue)
- def redEdgeNDVI(self):
+ def red_edge_ndvi(self):
"""
Normalized Difference self.rededge/self.red
https://www.indexdatabase.de/db/i-single.php?id=235
@@ -241,7 +240,7 @@ class IndexCalculation:
"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
- def GNDVI(self):
+ def gndvi(self):
"""
Normalized Difference self.nir/self.green self.green NDVI
https://www.indexdatabase.de/db/i-single.php?id=401
@@ -249,7 +248,7 @@ class IndexCalculation:
"""
return (self.nir - self.green) / (self.nir + self.green)
- def GBNDVI(self):
+ def gbndvi(self):
"""
self.green-self.blue NDVI
https://www.indexdatabase.de/db/i-single.php?id=186
@@ -259,7 +258,7 @@ class IndexCalculation:
self.nir + (self.green + self.blue)
)
- def GRNDVI(self):
+ def grndvi(self):
"""
self.green-self.red NDVI
https://www.indexdatabase.de/db/i-single.php?id=185
@@ -269,7 +268,7 @@ class IndexCalculation:
self.nir + (self.green + self.red)
)
- def RBNDVI(self):
+ def rbndvi(self):
"""
self.red-self.blue NDVI
https://www.indexdatabase.de/db/i-single.php?id=187
@@ -277,7 +276,7 @@ class IndexCalculation:
"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
- def PNDVI(self):
+ def pndvi(self):
"""
Pan NDVI
https://www.indexdatabase.de/db/i-single.php?id=188
@@ -287,7 +286,7 @@ class IndexCalculation:
self.nir + (self.green + self.red + self.blue)
)
- def ATSAVI(self, X=0.08, a=1.22, b=0.03):
+ def atsavi(self, x=0.08, a=1.22, b=0.03):
"""
Adjusted transformed soil-adjusted VI
https://www.indexdatabase.de/db/i-single.php?id=209
@@ -295,10 +294,10 @@ class IndexCalculation:
"""
return a * (
(self.nir - a * self.red - b)
- / (a * self.nir + self.red - a * b + X * (1 + a ** 2))
+ / (a * self.nir + self.red - a * b + x * (1 + a**2))
)
- def BWDRVI(self):
+ def bwdrvi(self):
"""
self.blue-wide dynamic range vegetation index
https://www.indexdatabase.de/db/i-single.php?id=136
@@ -306,7 +305,7 @@ class IndexCalculation:
"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
- def CIgreen(self):
+ def ci_green(self):
"""
Chlorophyll Index self.green
https://www.indexdatabase.de/db/i-single.php?id=128
@@ -314,7 +313,7 @@ class IndexCalculation:
"""
return (self.nir / self.green) - 1
- def CIrededge(self):
+ def ci_rededge(self):
"""
Chlorophyll Index self.redEdge
https://www.indexdatabase.de/db/i-single.php?id=131
@@ -322,7 +321,7 @@ class IndexCalculation:
"""
return (self.nir / self.redEdge) - 1
- def CI(self):
+ def ci(self):
"""
Coloration Index
https://www.indexdatabase.de/db/i-single.php?id=11
@@ -330,16 +329,16 @@ class IndexCalculation:
"""
return (self.red - self.blue) / self.red
- def CTVI(self):
+ def ctvi(self):
"""
Corrected Transformed Vegetation Index
https://www.indexdatabase.de/db/i-single.php?id=244
:return: index
"""
- ndvi = self.NDVI()
+ ndvi = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
- def GDVI(self):
+ def gdvi(self):
"""
Difference self.nir/self.green self.green Difference Vegetation Index
https://www.indexdatabase.de/db/i-single.php?id=27
@@ -347,7 +346,7 @@ class IndexCalculation:
"""
return self.nir - self.green
- def EVI(self):
+ def evi(self):
"""
Enhanced Vegetation Index
https://www.indexdatabase.de/db/i-single.php?id=16
@@ -357,36 +356,36 @@ class IndexCalculation:
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
- def GEMI(self):
+ def gemi(self):
"""
Global Environment Monitoring Index
https://www.indexdatabase.de/db/i-single.php?id=25
:return: index
"""
- n = (2 * (self.nir ** 2 - self.red ** 2) + 1.5 * self.nir + 0.5 * self.red) / (
+ n = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
- def GOSAVI(self, Y=0.16):
+ def gosavi(self, y=0.16):
"""
self.green Optimized Soil Adjusted Vegetation Index
https://www.indexdatabase.de/db/i-single.php?id=29
mit Y = 0,16
:return: index
"""
- return (self.nir - self.green) / (self.nir + self.green + Y)
+ return (self.nir - self.green) / (self.nir + self.green + y)
- def GSAVI(self, L=0.5):
+ def gsavi(self, n=0.5):
"""
self.green Soil Adjusted Vegetation Index
https://www.indexdatabase.de/db/i-single.php?id=31
- mit L = 0,5
+ mit N = 0,5
:return: index
"""
- return ((self.nir - self.green) / (self.nir + self.green + L)) * (1 + L)
+ return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
- def Hue(self):
+ def hue(self):
"""
Hue
https://www.indexdatabase.de/db/i-single.php?id=34
@@ -396,7 +395,7 @@ class IndexCalculation:
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue)
)
- def IVI(self, a=None, b=None):
+ def ivi(self, a=None, b=None):
"""
Ideal vegetation index
https://www.indexdatabase.de/db/i-single.php?id=276
@@ -406,15 +405,15 @@ class IndexCalculation:
"""
return (self.nir - b) / (a * self.red)
- def IPVI(self):
+ def ipvi(self):
"""
Infraself.red percentage vegetation index
https://www.indexdatabase.de/db/i-single.php?id=35
:return: index
"""
- return (self.nir / ((self.nir + self.red) / 2)) * (self.NDVI() + 1)
+ return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
- def I(self): # noqa: E741,E743
+ def i(self):
"""
Intensity
https://www.indexdatabase.de/db/i-single.php?id=36
@@ -422,7 +421,7 @@ class IndexCalculation:
"""
return (self.red + self.green + self.blue) / 30.5
- def RVI(self):
+ def rvi(self):
"""
Ratio-Vegetation-Index
http://www.seos-project.eu/modules/remotesensing/remotesensing-c03-s01-p01.html
@@ -430,15 +429,15 @@ class IndexCalculation:
"""
return self.nir / self.red
- def MRVI(self):
+ def mrvi(self):
"""
Modified Normalized Difference Vegetation Index RVI
https://www.indexdatabase.de/db/i-single.php?id=275
:return: index
"""
- return (self.RVI() - 1) / (self.RVI() + 1)
+ return (self.rvi() - 1) / (self.rvi() + 1)
- def MSAVI(self):
+ def m_savi(self):
"""
Modified Soil Adjusted Vegetation Index
https://www.indexdatabase.de/db/i-single.php?id=44
@@ -449,7 +448,7 @@ class IndexCalculation:
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
- def NormG(self):
+ def norm_g(self):
"""
Norm G
https://www.indexdatabase.de/db/i-single.php?id=50
@@ -457,7 +456,7 @@ class IndexCalculation:
"""
return self.green / (self.nir + self.red + self.green)
- def NormNIR(self):
+ def norm_nir(self):
"""
Norm self.nir
https://www.indexdatabase.de/db/i-single.php?id=51
@@ -465,7 +464,7 @@ class IndexCalculation:
"""
return self.nir / (self.nir + self.red + self.green)
- def NormR(self):
+ def norm_r(self):
"""
Norm R
https://www.indexdatabase.de/db/i-single.php?id=52
@@ -473,7 +472,7 @@ class IndexCalculation:
"""
return self.red / (self.nir + self.red + self.green)
- def NGRDI(self):
+ def ngrdi(self):
"""
Normalized Difference self.green/self.red Normalized self.green self.red
difference index, Visible Atmospherically Resistant Indices self.green
@@ -483,7 +482,7 @@ class IndexCalculation:
"""
return (self.green - self.red) / (self.green + self.red)
- def RI(self):
+ def ri(self):
"""
Normalized Difference self.red/self.green self.redness Index
https://www.indexdatabase.de/db/i-single.php?id=74
@@ -491,17 +490,17 @@ class IndexCalculation:
"""
return (self.red - self.green) / (self.red + self.green)
- def S(self):
+ def s(self):
"""
Saturation
https://www.indexdatabase.de/db/i-single.php?id=77
:return: index
"""
- max = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
- min = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
- return (max - min) / max
+ max_value = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
+ min_value = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
+ return (max_value - min_value) / max_value
- def IF(self):
+ def _if(self):
"""
Shape Index
https://www.indexdatabase.de/db/i-single.php?id=79
@@ -509,7 +508,7 @@ class IndexCalculation:
"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
- def DVI(self):
+ def dvi(self):
"""
Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index
Number (VIN)
@@ -518,15 +517,15 @@ class IndexCalculation:
"""
return self.nir / self.red
- def TVI(self):
+ def tvi(self):
"""
Transformed Vegetation Index
https://www.indexdatabase.de/db/i-single.php?id=98
:return: index
"""
- return (self.NDVI() + 0.5) ** (1 / 2)
+ return (self.ndvi() + 0.5) ** (1 / 2)
- def NDRE(self):
+ def ndre(self):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
diff --git a/digital_image_processing/morphological_operations/dilation_operation.py b/digital_image_processing/morphological_operations/dilation_operation.py
new file mode 100644
index 000000000..e49b955c1
--- /dev/null
+++ b/digital_image_processing/morphological_operations/dilation_operation.py
@@ -0,0 +1,75 @@
+from pathlib import Path
+
+import numpy as np
+from PIL import Image
+
+
+def rgb_to_gray(rgb: np.ndarray) -> np.ndarray:
+ """
+ Return gray image from rgb image
+ >>> rgb_to_gray(np.array([[[127, 255, 0]]]))
+ array([[187.6453]])
+ >>> rgb_to_gray(np.array([[[0, 0, 0]]]))
+ array([[0.]])
+ >>> rgb_to_gray(np.array([[[2, 4, 1]]]))
+ array([[3.0598]])
+ >>> rgb_to_gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]]))
+ array([[159.0524, 90.0635, 117.6989]])
+ """
+ r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
+ return 0.2989 * r + 0.5870 * g + 0.1140 * b
+
+
+def gray_to_binary(gray: np.ndarray) -> np.ndarray:
+ """
+ Return binary image from gray image
+ >>> gray_to_binary(np.array([[127, 255, 0]]))
+ array([[False, True, False]])
+ >>> gray_to_binary(np.array([[0]]))
+ array([[False]])
+ >>> gray_to_binary(np.array([[26.2409, 4.9315, 1.4729]]))
+ array([[False, False, False]])
+ >>> gray_to_binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]]))
+ array([[False, True, False],
+ [False, True, False],
+ [False, True, False]])
+ """
+ return (gray > 127) & (gray <= 255)
+
+
+def dilation(image: np.ndarray, kernel: np.ndarray) -> np.ndarray:
+ """
+ Return dilated image
+ >>> dilation(np.array([[True, False, True]]), np.array([[0, 1, 0]]))
+ array([[False, False, False]])
+ >>> dilation(np.array([[False, False, True]]), np.array([[1, 0, 1]]))
+ array([[False, False, False]])
+ """
+ output = np.zeros_like(image)
+ image_padded = np.zeros(
+ (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1)
+ )
+
+ # Copy image to padded image
+ image_padded[kernel.shape[0] - 2 : -1 :, kernel.shape[1] - 2 : -1 :] = image
+
+ # Iterate over image & apply kernel
+ for x in range(image.shape[1]):
+ for y in range(image.shape[0]):
+ summation = (
+ kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
+ ).sum()
+ output[y, x] = int(summation > 0)
+ return output
+
+
+if __name__ == "__main__":
+ # read original image
+ lena_path = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
+ lena = np.array(Image.open(lena_path))
+ # kernel to be applied
+ structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
+ output = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
+ # Save the output image
+ pil_img = Image.fromarray(output).convert("RGB")
+ pil_img.save("result_dilation.png")
diff --git a/digital_image_processing/morphological_operations/erosion_operation.py b/digital_image_processing/morphological_operations/erosion_operation.py
new file mode 100644
index 000000000..c2cde2ea6
--- /dev/null
+++ b/digital_image_processing/morphological_operations/erosion_operation.py
@@ -0,0 +1,74 @@
+import numpy as np
+from PIL import Image
+
+
+def rgb2gray(rgb: np.array) -> np.array:
+ """
+ Return gray image from rgb image
+ >>> rgb2gray(np.array([[[127, 255, 0]]]))
+ array([[187.6453]])
+ >>> rgb2gray(np.array([[[0, 0, 0]]]))
+ array([[0.]])
+ >>> rgb2gray(np.array([[[2, 4, 1]]]))
+ array([[3.0598]])
+ >>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]]))
+ array([[159.0524, 90.0635, 117.6989]])
+ """
+ r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
+ return 0.2989 * r + 0.5870 * g + 0.1140 * b
+
+
+def gray2binary(gray: np.array) -> np.array:
+ """
+ Return binary image from gray image
+ >>> gray2binary(np.array([[127, 255, 0]]))
+ array([[False, True, False]])
+ >>> gray2binary(np.array([[0]]))
+ array([[False]])
+ >>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]]))
+ array([[False, False, False]])
+ >>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]]))
+ array([[False, True, False],
+ [False, True, False],
+ [False, True, False]])
+ """
+ return (gray > 127) & (gray <= 255)
+
+
+def erosion(image: np.array, kernel: np.array) -> np.array:
+ """
+ Return eroded image
+ >>> erosion(np.array([[True, True, False]]), np.array([[0, 1, 0]]))
+ array([[False, False, False]])
+ >>> erosion(np.array([[True, False, False]]), np.array([[1, 1, 0]]))
+ array([[False, False, False]])
+ """
+ output = np.zeros_like(image)
+ image_padded = np.zeros(
+ (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1)
+ )
+
+ # Copy image to padded image
+ image_padded[kernel.shape[0] - 2 : -1 :, kernel.shape[1] - 2 : -1 :] = image
+
+ # Iterate over image & apply kernel
+ for x in range(image.shape[1]):
+ for y in range(image.shape[0]):
+ summation = (
+ kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
+ ).sum()
+ output[y, x] = int(summation == 5)
+ return output
+
+
+# kernel to be applied
+structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
+
+if __name__ == "__main__":
+ # read original image
+ image = np.array(Image.open(r"..\image_data\lena.jpg"))
+ # Apply erosion operation to a binary image
+ output = erosion(gray2binary(rgb2gray(image)), structuring_element)
+ # Save the output image
+ pil_img = Image.fromarray(output).convert("RGB")
+ pil_img.save("result_erosion.png")
diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py
index 40f2f7b83..fee7ab247 100644
--- a/digital_image_processing/test_digital_image_processing.py
+++ b/digital_image_processing/test_digital_image_processing.py
@@ -1,6 +1,7 @@
"""
PyTest's for Digital Image Processing
"""
+import numpy as np
from cv2 import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uint8
from PIL import Image
@@ -9,9 +10,10 @@ from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
-from digital_image_processing.edge_detection import canny as canny
+from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
+from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
@@ -60,8 +62,8 @@ def test_gen_gaussian_kernel_filter():
def test_convolve_filter():
# laplace diagonals
- Laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
- res = conv.img_convolve(gray, Laplace).astype(uint8)
+ laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
+ res = conv.img_convolve(gray, laplace).astype(uint8)
assert res.any()
@@ -91,3 +93,33 @@ def test_nearest_neighbour(
nn = rs.NearestNeighbour(imread(file_path, 1), 400, 200)
nn.process()
assert nn.output.any()
+
+
+def test_local_binary_pattern():
+ file_path = "digital_image_processing/image_data/lena.jpg"
+
+ # Reading the image and converting it to grayscale.
+ image = imread(file_path, 0)
+
+ # Test for get_neighbors_pixel function() return not None
+ x_coordinate = 0
+ y_coordinate = 0
+ center = image[x_coordinate][y_coordinate]
+
+ neighbors_pixels = lbp.get_neighbors_pixel(
+ image, x_coordinate, y_coordinate, center
+ )
+
+ assert neighbors_pixels is not None
+
+ # Test for local_binary_pattern function()
+ # Create a numpy array as the same height and width of read image
+ lbp_image = np.zeros((image.shape[0], image.shape[1]))
+
+ # Iterating through the image and calculating the local binary pattern value
+ # for each pixel.
+ for i in range(0, image.shape[0]):
+ for j in range(0, image.shape[1]):
+ lbp_image[i][j] = lbp.local_binary_value(image, i, j)
+
+ assert lbp_image.any()
diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py
index 9c096f671..1d1bf301d 100644
--- a/divide_and_conquer/convex_hull.py
+++ b/divide_and_conquer/convex_hull.py
@@ -12,8 +12,9 @@ There are other several other algorithms for the convex hull problem
which have not been implemented here, yet.
"""
+from __future__ import annotations
-from typing import Iterable, List, Set, Union
+from collections.abc import Iterable
class Point:
@@ -84,8 +85,8 @@ class Point:
def _construct_points(
- list_of_tuples: Union[List[Point], List[List[float]], Iterable[List[float]]]
-) -> List[Point]:
+ list_of_tuples: list[Point] | list[list[float]] | Iterable[list[float]],
+) -> list[Point]:
"""
constructs a list of points from an array-like object of numbers
@@ -114,7 +115,7 @@ def _construct_points(
[]
"""
- points: List[Point] = []
+ points: list[Point] = []
if list_of_tuples:
for p in list_of_tuples:
if isinstance(p, Point):
@@ -130,7 +131,7 @@ def _construct_points(
return points
-def _validate_input(points: Union[List[Point], List[List[float]]]) -> List[Point]:
+def _validate_input(points: list[Point] | list[list[float]]) -> list[Point]:
"""
validates an input instance before a convex-hull algorithms uses it
@@ -173,12 +174,12 @@ def _validate_input(points: Union[List[Point], List[List[float]]]) -> List[Point
"""
if not hasattr(points, "__iter__"):
- raise ValueError(
- f"Expecting an iterable object but got an non-iterable type {points}"
- )
+ msg = f"Expecting an iterable object but got an non-iterable type {points}"
+ raise ValueError(msg)
if not points:
- raise ValueError(f"Expecting a list of points but got {points}")
+ msg = f"Expecting a list of points but got {points}"
+ raise ValueError(msg)
return _construct_points(points)
@@ -218,7 +219,7 @@ def _det(a: Point, b: Point, c: Point) -> float:
return det
-def convex_hull_bf(points: List[Point]) -> List[Point]:
+def convex_hull_bf(points: list[Point]) -> list[Point]:
"""
Constructs the convex hull of a set of 2D points using a brute force algorithm.
The algorithm basically considers all combinations of points (i, j) and uses the
@@ -265,7 +266,7 @@ def convex_hull_bf(points: List[Point]) -> List[Point]:
points_left_of_ij = points_right_of_ij = False
ij_part_of_convex_hull = True
for k in range(n):
- if k != i and k != j:
+ if k not in {i, j}:
det_k = _det(points[i], points[j], points[k])
if det_k > 0:
@@ -291,7 +292,7 @@ def convex_hull_bf(points: List[Point]) -> List[Point]:
return sorted(convex_set)
-def convex_hull_recursive(points: List[Point]) -> List[Point]:
+def convex_hull_recursive(points: list[Point]) -> list[Point]:
"""
Constructs the convex hull of a set of 2D points using a divide-and-conquer strategy
The algorithm exploits the geometric properties of the problem by repeatedly
@@ -362,7 +363,7 @@ def convex_hull_recursive(points: List[Point]) -> List[Point]:
def _construct_hull(
- points: List[Point], left: Point, right: Point, convex_set: Set[Point]
+ points: list[Point], left: Point, right: Point, convex_set: set[Point]
) -> None:
"""
@@ -405,7 +406,7 @@ def _construct_hull(
_construct_hull(candidate_points, extreme_point, right, convex_set)
-def convex_hull_melkman(points: List[Point]) -> List[Point]:
+def convex_hull_melkman(points: list[Point]) -> list[Point]:
"""
Constructs the convex hull of a set of 2D points using the melkman algorithm.
The algorithm works by iteratively inserting points of a simple polygonal chain
@@ -457,16 +458,16 @@ def convex_hull_melkman(points: List[Point]) -> List[Point]:
convex_hull[1] = points[i]
i += 1
- for i in range(i, n):
+ for j in range(i, n):
if (
- _det(convex_hull[0], convex_hull[-1], points[i]) > 0
+ _det(convex_hull[0], convex_hull[-1], points[j]) > 0
and _det(convex_hull[-1], convex_hull[0], points[1]) < 0
):
# The point lies within the convex hull
continue
- convex_hull.insert(0, points[i])
- convex_hull.append(points[i])
+ convex_hull.insert(0, points[j])
+ convex_hull.append(points[j])
while _det(convex_hull[0], convex_hull[1], convex_hull[2]) >= 0:
del convex_hull[1]
while _det(convex_hull[-1], convex_hull[-2], convex_hull[-3]) <= 0:
diff --git a/divide_and_conquer/inversions.py b/divide_and_conquer/inversions.py
index 9bb656229..35f78fe5c 100644
--- a/divide_and_conquer/inversions.py
+++ b/divide_and_conquer/inversions.py
@@ -2,31 +2,25 @@
Given an array-like data structure A[1..n], how many pairs
(i, j) for all 1 <= i < j <= n such that A[i] > A[j]? These pairs are
called inversions. Counting the number of such inversions in an array-like
-object is the important. Among other things, counting inversions can help
-us determine how close a given array is to being sorted
-
+object is the important. Among other things, counting inversions can help
+us determine how close a given array is to being sorted.
In this implementation, I provide two algorithms, a divide-and-conquer
algorithm which runs in nlogn and the brute-force n^2 algorithm.
-
"""
def count_inversions_bf(arr):
"""
- Counts the number of inversions using a a naive brute-force algorithm
-
+ Counts the number of inversions using a naive brute-force algorithm
Parameters
----------
arr: arr: array-like, the list containing the items for which the number
of inversions is desired. The elements of `arr` must be comparable.
-
Returns
-------
num_inversions: The total number of inversions in `arr`
-
Examples
---------
-
>>> count_inversions_bf([1, 4, 2, 4, 1])
4
>>> count_inversions_bf([1, 1, 2, 4, 4])
@@ -49,20 +43,16 @@ def count_inversions_bf(arr):
def count_inversions_recursive(arr):
"""
Counts the number of inversions using a divide-and-conquer algorithm
-
Parameters
-----------
arr: array-like, the list containing the items for which the number
of inversions is desired. The elements of `arr` must be comparable.
-
Returns
-------
C: a sorted copy of `arr`.
num_inversions: int, the total number of inversions in 'arr'
-
Examples
--------
-
>>> count_inversions_recursive([1, 4, 2, 4, 1])
([1, 1, 2, 4, 4], 4)
>>> count_inversions_recursive([1, 1, 2, 4, 4])
@@ -72,66 +62,60 @@ def count_inversions_recursive(arr):
"""
if len(arr) <= 1:
return arr, 0
- else:
- mid = len(arr) // 2
- P = arr[0:mid]
- Q = arr[mid:]
+ mid = len(arr) // 2
+ p = arr[0:mid]
+ q = arr[mid:]
- A, inversion_p = count_inversions_recursive(P)
- B, inversions_q = count_inversions_recursive(Q)
- C, cross_inversions = _count_cross_inversions(A, B)
+ a, inversion_p = count_inversions_recursive(p)
+ b, inversions_q = count_inversions_recursive(q)
+ c, cross_inversions = _count_cross_inversions(a, b)
- num_inversions = inversion_p + inversions_q + cross_inversions
- return C, num_inversions
+ num_inversions = inversion_p + inversions_q + cross_inversions
+ return c, num_inversions
-def _count_cross_inversions(P, Q):
+def _count_cross_inversions(p, q):
"""
Counts the inversions across two sorted arrays.
And combine the two arrays into one sorted array
-
For all 1<= i<=len(P) and for all 1 <= j <= len(Q),
if P[i] > Q[j], then (i, j) is a cross inversion
-
Parameters
----------
P: array-like, sorted in non-decreasing order
Q: array-like, sorted in non-decreasing order
-
Returns
------
R: array-like, a sorted array of the elements of `P` and `Q`
num_inversion: int, the number of inversions across `P` and `Q`
-
Examples
--------
-
>>> _count_cross_inversions([1, 2, 3], [0, 2, 5])
([0, 1, 2, 2, 3, 5], 4)
>>> _count_cross_inversions([1, 2, 3], [3, 4, 5])
([1, 2, 3, 3, 4, 5], 0)
"""
- R = []
+ r = []
i = j = num_inversion = 0
- while i < len(P) and j < len(Q):
- if P[i] > Q[j]:
+ while i < len(p) and j < len(q):
+ if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
- num_inversion += len(P) - i
- R.append(Q[j])
+ num_inversion += len(p) - i
+ r.append(q[j])
j += 1
else:
- R.append(P[i])
+ r.append(p[i])
i += 1
- if i < len(P):
- R.extend(P[i:])
+ if i < len(p):
+ r.extend(p[i:])
else:
- R.extend(Q[j:])
+ r.extend(q[j:])
- return R, num_inversion
+ return r, num_inversion
def main():
diff --git a/divide_and_conquer/kth_order_statistic.py b/divide_and_conquer/kth_order_statistic.py
index f6e81a306..666ad1a39 100644
--- a/divide_and_conquer/kth_order_statistic.py
+++ b/divide_and_conquer/kth_order_statistic.py
@@ -8,8 +8,9 @@ This is a divide and conquer algorithm that can find a solution in O(n) time.
For more information of this algorithm:
https://web.stanford.edu/class/archive/cs/cs161/cs161.1138/lectures/08/Small08.pdf
"""
+from __future__ import annotations
+
from random import choice
-from typing import List
def random_pivot(lst):
@@ -21,7 +22,7 @@ def random_pivot(lst):
return choice(lst)
-def kth_number(lst: List[int], k: int) -> int:
+def kth_number(lst: list[int], k: int) -> int:
"""
Return the kth smallest number in lst.
>>> kth_number([2, 1, 3, 4, 5], 3)
diff --git a/divide_and_conquer/max_subarray.py b/divide_and_conquer/max_subarray.py
new file mode 100644
index 000000000..851ef621a
--- /dev/null
+++ b/divide_and_conquer/max_subarray.py
@@ -0,0 +1,112 @@
+"""
+The maximum subarray problem is the task of finding the continuous subarray that has the
+maximum sum within a given array of numbers. For example, given the array
+[-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum is
+[4, -1, 2, 1], which has a sum of 6.
+
+This divide-and-conquer algorithm finds the maximum subarray in O(n log n) time.
+"""
+from __future__ import annotations
+
+import time
+from collections.abc import Sequence
+from random import randint
+
+from matplotlib import pyplot as plt
+
+
+def max_subarray(
+ arr: Sequence[float], low: int, high: int
+) -> tuple[int | None, int | None, float]:
+ """
+ Solves the maximum subarray problem using divide and conquer.
+ :param arr: the given array of numbers
+ :param low: the start index
+ :param high: the end index
+ :return: the start index of the maximum subarray, the end index of the
+ maximum subarray, and the maximum subarray sum
+
+ >>> nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
+ >>> max_subarray(nums, 0, len(nums) - 1)
+ (3, 6, 6)
+ >>> nums = [2, 8, 9]
+ >>> max_subarray(nums, 0, len(nums) - 1)
+ (0, 2, 19)
+ >>> nums = [0, 0]
+ >>> max_subarray(nums, 0, len(nums) - 1)
+ (0, 0, 0)
+ >>> nums = [-1.0, 0.0, 1.0]
+ >>> max_subarray(nums, 0, len(nums) - 1)
+ (2, 2, 1.0)
+ >>> nums = [-2, -3, -1, -4, -6]
+ >>> max_subarray(nums, 0, len(nums) - 1)
+ (2, 2, -1)
+ >>> max_subarray([], 0, 0)
+ (None, None, 0)
+ """
+ if not arr:
+ return None, None, 0
+ if low == high:
+ return low, high, arr[low]
+
+ mid = (low + high) // 2
+ left_low, left_high, left_sum = max_subarray(arr, low, mid)
+ right_low, right_high, right_sum = max_subarray(arr, mid + 1, high)
+ cross_left, cross_right, cross_sum = max_cross_sum(arr, low, mid, high)
+ if left_sum >= right_sum and left_sum >= cross_sum:
+ return left_low, left_high, left_sum
+ elif right_sum >= left_sum and right_sum >= cross_sum:
+ return right_low, right_high, right_sum
+ return cross_left, cross_right, cross_sum
+
+
+def max_cross_sum(
+ arr: Sequence[float], low: int, mid: int, high: int
+) -> tuple[int, int, float]:
+ left_sum, max_left = float("-inf"), -1
+ right_sum, max_right = float("-inf"), -1
+
+ summ: int | float = 0
+ for i in range(mid, low - 1, -1):
+ summ += arr[i]
+ if summ > left_sum:
+ left_sum = summ
+ max_left = i
+
+ summ = 0
+ for i in range(mid + 1, high + 1):
+ summ += arr[i]
+ if summ > right_sum:
+ right_sum = summ
+ max_right = i
+
+ return max_left, max_right, (left_sum + right_sum)
+
+
+def time_max_subarray(input_size: int) -> float:
+ arr = [randint(1, input_size) for _ in range(input_size)]
+ start = time.time()
+ max_subarray(arr, 0, input_size - 1)
+ end = time.time()
+ return end - start
+
+
+def plot_runtimes() -> None:
+ input_sizes = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
+ runtimes = [time_max_subarray(input_size) for input_size in input_sizes]
+ print("No of Inputs\t\tTime Taken")
+ for input_size, runtime in zip(input_sizes, runtimes):
+ print(input_size, "\t\t", runtime)
+ plt.plot(input_sizes, runtimes)
+ plt.xlabel("Number of Inputs")
+ plt.ylabel("Time taken in seconds")
+ plt.show()
+
+
+if __name__ == "__main__":
+ """
+ A random simulation of this algorithm.
+ """
+ from doctest import testmod
+
+ testmod()
diff --git a/divide_and_conquer/max_subarray_sum.py b/divide_and_conquer/max_subarray_sum.py
deleted file mode 100644
index 43f58086e..000000000
--- a/divide_and_conquer/max_subarray_sum.py
+++ /dev/null
@@ -1,76 +0,0 @@
-"""
-Given a array of length n, max_subarray_sum() finds
-the maximum of sum of contiguous sub-array using divide and conquer method.
-
-Time complexity : O(n log n)
-
-Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION
-(section : 4, sub-section : 4.1, page : 70)
-
-"""
-
-
-def max_sum_from_start(array):
- """This function finds the maximum contiguous sum of array from 0 index
-
- Parameters :
- array (list[int]) : given array
-
- Returns :
- max_sum (int) : maximum contiguous sum of array from 0 index
-
- """
- array_sum = 0
- max_sum = float("-inf")
- for num in array:
- array_sum += num
- if array_sum > max_sum:
- max_sum = array_sum
- return max_sum
-
-
-def max_cross_array_sum(array, left, mid, right):
- """This function finds the maximum contiguous sum of left and right arrays
-
- Parameters :
- array, left, mid, right (list[int], int, int, int)
-
- Returns :
- (int) : maximum of sum of contiguous sum of left and right arrays
-
- """
-
- max_sum_of_left = max_sum_from_start(array[left : mid + 1][::-1])
- max_sum_of_right = max_sum_from_start(array[mid + 1 : right + 1])
- return max_sum_of_left + max_sum_of_right
-
-
-def max_subarray_sum(array, left, right):
- """Maximum contiguous sub-array sum, using divide and conquer method
-
- Parameters :
- array, left, right (list[int], int, int) :
- given array, current left index and current right index
-
- Returns :
- int : maximum of sum of contiguous sub-array
-
- """
-
- # base case: array has only one element
- if left == right:
- return array[right]
-
- # Recursion
- mid = (left + right) // 2
- left_half_sum = max_subarray_sum(array, left, mid)
- right_half_sum = max_subarray_sum(array, mid + 1, right)
- cross_sum = max_cross_array_sum(array, left, mid, right)
- return max(left_half_sum, right_half_sum, cross_sum)
-
-
-array = [-2, -5, 6, -2, -3, 1, 5, -6]
-array_length = len(array)
-print(
- "Maximum sum of contiguous subarray:", max_subarray_sum(array, 0, array_length - 1)
-)
diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py
index 46a46941c..628080cef 100644
--- a/divide_and_conquer/mergesort.py
+++ b/divide_and_conquer/mergesort.py
@@ -1,7 +1,7 @@
-from typing import List
+from __future__ import annotations
-def merge(left_half: List, right_half: List) -> List:
+def merge(left_half: list, right_half: list) -> list:
"""Helper function for mergesort.
>>> left_half = [-2]
@@ -57,7 +57,7 @@ def merge(left_half: List, right_half: List) -> List:
return sorted_array
-def merge_sort(array: List) -> List:
+def merge_sort(array: list) -> list:
"""Returns a list of sorted array elements using merge sort.
>>> from random import shuffle
diff --git a/divide_and_conquer/peak.py b/divide_and_conquer/peak.py
index f94f83ed3..e60f28bfb 100644
--- a/divide_and_conquer/peak.py
+++ b/divide_and_conquer/peak.py
@@ -7,10 +7,10 @@ to find the maximum of the array.
(From Kleinberg and Tardos. Algorithm Design.
Addison Wesley 2006: Chapter 5 Solved Exercise 1)
"""
-from typing import List
+from __future__ import annotations
-def peak(lst: List[int]) -> int:
+def peak(lst: list[int]) -> int:
"""
Return the peak value of `lst`.
>>> peak([1, 2, 3, 4, 5, 4, 3, 2, 1])
diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py
index ca10e04ab..cbfc7e565 100644
--- a/divide_and_conquer/strassen_matrix_multiplication.py
+++ b/divide_and_conquer/strassen_matrix_multiplication.py
@@ -68,8 +68,7 @@ def matrix_dimensions(matrix: list) -> tuple[int, int]:
def print_matrix(matrix: list) -> None:
- for i in range(len(matrix)):
- print(matrix[i])
+ print("\n".join(str(line) for line in matrix))
def actual_strassen(matrix_a: list, matrix_b: list) -> list:
@@ -113,17 +112,19 @@ def strassen(matrix1: list, matrix2: list) -> list:
[[139, 163], [121, 134], [100, 121]]
"""
if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]:
- raise Exception(
- f"Unable to multiply these matrices, please check the dimensions. \n"
- f"Matrix A:{matrix1} \nMatrix B:{matrix2}"
+ msg = (
+ "Unable to multiply these matrices, please check the dimensions.\n"
+ f"Matrix A: {matrix1}\n"
+ f"Matrix B: {matrix2}"
)
+ raise Exception(msg)
dimension1 = matrix_dimensions(matrix1)
dimension2 = matrix_dimensions(matrix2)
if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]:
return [matrix1, matrix2]
- maximum = max(max(dimension1), max(dimension2))
+ maximum = max(*dimension1, *dimension2)
maxim = int(math.pow(2, math.ceil(math.log2(maximum))))
new_matrix1 = matrix1
new_matrix2 = matrix2
@@ -132,12 +133,12 @@ def strassen(matrix1: list, matrix2: list) -> list:
# power of 2
for i in range(0, maxim):
if i < dimension1[0]:
- for j in range(dimension1[1], maxim):
+ for _ in range(dimension1[1], maxim):
new_matrix1[i].append(0)
else:
new_matrix1.append([0] * maxim)
if i < dimension2[0]:
- for j in range(dimension2[1], maxim):
+ for _ in range(dimension2[1], maxim):
new_matrix2[i].append(0)
else:
new_matrix2.append([0] * maxim)
@@ -147,7 +148,7 @@ def strassen(matrix1: list, matrix2: list) -> list:
# Removing the additional zeros
for i in range(0, maxim):
if i < dimension1[0]:
- for j in range(dimension2[1], maxim):
+ for _ in range(dimension2[1], maxim):
final_matrix[i].pop()
else:
final_matrix.pop()
diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py
new file mode 100644
index 000000000..6e53a702c
--- /dev/null
+++ b/dynamic_programming/all_construct.py
@@ -0,0 +1,58 @@
+"""
+Program to list all the ways a target string can be
+constructed from the given list of substrings
+"""
+from __future__ import annotations
+
+
+def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[str]]:
+ """
+ returns the list containing all the possible
+ combinations a string(target) can be constructed from
+ the given list of substrings(word_bank)
+ >>> all_construct("hello", ["he", "l", "o"])
+ [['he', 'l', 'l', 'o']]
+ >>> all_construct("purple",["purp","p","ur","le","purpl"])
+ [['purp', 'le'], ['p', 'ur', 'p', 'le']]
+ """
+
+ word_bank = word_bank or []
+ # create a table
+ table_size: int = len(target) + 1
+
+ table: list[list[list[str]]] = []
+ for _ in range(table_size):
+ table.append([])
+ # seed value
+ table[0] = [[]] # because empty string has empty combination
+
+ # iterate through the indices
+ for i in range(table_size):
+ # condition
+ if table[i] != []:
+ for word in word_bank:
+ # slice condition
+ if target[i : i + len(word)] == word:
+ new_combinations: list[list[str]] = [
+ [word, *way] for way in table[i]
+ ]
+ # adds the word to every combination the current position holds
+ # now,push that combination to the table[i+len(word)]
+ table[i + len(word)] += new_combinations
+
+ # combinations are in reverse order so reverse for better output
+ for combination in table[len(target)]:
+ combination.reverse()
+
+ return table[len(target)]
+
+
+if __name__ == "__main__":
+ print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
+ print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
+ print(
+ all_construct(
+ "hexagonosaurus",
+ ["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
+ )
+ )
diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py
index 2994db5b5..56bb8e96b 100644
--- a/dynamic_programming/bitmask.py
+++ b/dynamic_programming/bitmask.py
@@ -13,7 +13,6 @@ from collections import defaultdict
class AssignmentUsingBitmask:
def __init__(self, task_performed, total):
-
self.total_tasks = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
@@ -28,8 +27,7 @@ class AssignmentUsingBitmask:
# to 1
self.final_mask = (1 << len(task_performed)) - 1
- def CountWaysUtil(self, mask, task_no):
-
+ def count_ways_until(self, mask, task_no):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
@@ -43,45 +41,42 @@ class AssignmentUsingBitmask:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
- total_ways_util = self.CountWaysUtil(mask, task_no + 1)
+ total_ways_util = self.count_ways_until(mask, task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
-
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
- total_ways_util += self.CountWaysUtil(mask | (1 << p), task_no + 1)
+ total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1)
# save the value.
self.dp[mask][task_no] = total_ways_util
return self.dp[mask][task_no]
- def countNoOfWays(self, task_performed):
-
+ def count_no_of_ways(self, task_performed):
# Store the list of persons for each task
for i in range(len(task_performed)):
for j in task_performed[i]:
self.task[j].append(i)
# call the function to fill the DP table, final answer is stored in dp[0][1]
- return self.CountWaysUtil(0, 1)
+ return self.count_ways_until(0, 1)
if __name__ == "__main__":
-
total_tasks = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
task_performed = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
- AssignmentUsingBitmask(task_performed, total_tasks).countNoOfWays(
+ AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
diff --git a/dynamic_programming/climbing_stairs.py b/dynamic_programming/climbing_stairs.py
index 048d57aed..d6273d025 100644
--- a/dynamic_programming/climbing_stairs.py
+++ b/dynamic_programming/climbing_stairs.py
@@ -1,20 +1,20 @@
#!/usr/bin/env python3
-def climb_stairs(n: int) -> int:
+def climb_stairs(number_of_steps: int) -> int:
"""
LeetCdoe No.70: Climbing Stairs
- Distinct ways to climb a n step staircase where
- each time you can either climb 1 or 2 steps.
+ Distinct ways to climb a number_of_steps staircase where each time you can either
+ climb 1 or 2 steps.
Args:
- n: number of steps of staircase
+ number_of_steps: number of steps on the staircase
Returns:
- Distinct ways to climb a n step staircase
+ Distinct ways to climb a number_of_steps staircase
Raises:
- AssertionError: n not positive integer
+ AssertionError: number_of_steps not positive integer
>>> climb_stairs(3)
3
@@ -23,18 +23,17 @@ def climb_stairs(n: int) -> int:
>>> climb_stairs(-7) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
- AssertionError: n needs to be positive integer, your input -7
+ AssertionError: number_of_steps needs to be positive integer, your input -7
"""
assert (
- isinstance(n, int) and n > 0
- ), f"n needs to be positive integer, your input {n}"
- if n == 1:
+ isinstance(number_of_steps, int) and number_of_steps > 0
+ ), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
+ if number_of_steps == 1:
return 1
- dp = [0] * (n + 1)
- dp[0], dp[1] = (1, 1)
- for i in range(2, n + 1):
- dp[i] = dp[i - 1] + dp[i - 2]
- return dp[n]
+ previous, current = 1, 1
+ for _ in range(number_of_steps - 1):
+ current, previous = current + previous, current
+ return current
if __name__ == "__main__":
diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py
new file mode 100644
index 000000000..b2aeb0824
--- /dev/null
+++ b/dynamic_programming/combination_sum_iv.py
@@ -0,0 +1,102 @@
+"""
+Question:
+You are given an array of distinct integers and you have to tell how many
+different ways of selecting the elements from the array are there such that
+the sum of chosen elements is equal to the target number tar.
+
+Example
+
+Input:
+N = 3
+target = 5
+array = [1, 2, 5]
+
+Output:
+9
+
+Approach:
+The basic idea is to go over recursively to find the way such that the sum
+of chosen elements is “tar”. For every element, we have two choices
+ 1. Include the element in our set of chosen elements.
+ 2. Don’t include the element in our set of chosen elements.
+"""
+
+
+def combination_sum_iv(n: int, array: list[int], target: int) -> int:
+ """
+ Function checks the all possible combinations, and returns the count
+ of possible combination in exponential Time Complexity.
+
+ >>> combination_sum_iv(3, [1,2,5], 5)
+ 9
+ """
+
+ def count_of_possible_combinations(target: int) -> int:
+ if target < 0:
+ return 0
+ if target == 0:
+ return 1
+ return sum(count_of_possible_combinations(target - item) for item in array)
+
+ return count_of_possible_combinations(target)
+
+
+def combination_sum_iv_dp_array(n: int, array: list[int], target: int) -> int:
+ """
+ Function checks the all possible combinations, and returns the count
+ of possible combination in O(N^2) Time Complexity as we are using Dynamic
+ programming array here.
+
+ >>> combination_sum_iv_dp_array(3, [1,2,5], 5)
+ 9
+ """
+
+ def count_of_possible_combinations_with_dp_array(
+ target: int, dp_array: list[int]
+ ) -> int:
+ if target < 0:
+ return 0
+ if target == 0:
+ return 1
+ if dp_array[target] != -1:
+ return dp_array[target]
+ answer = sum(
+ count_of_possible_combinations_with_dp_array(target - item, dp_array)
+ for item in array
+ )
+ dp_array[target] = answer
+ return answer
+
+ dp_array = [-1] * (target + 1)
+ return count_of_possible_combinations_with_dp_array(target, dp_array)
+
+
+def combination_sum_iv_bottom_up(n: int, array: list[int], target: int) -> int:
+ """
+ Function checks the all possible combinations with using bottom up approach,
+ and returns the count of possible combination in O(N^2) Time Complexity
+ as we are using Dynamic programming array here.
+
+ >>> combination_sum_iv_bottom_up(3, [1,2,5], 5)
+ 9
+ """
+
+ dp_array = [0] * (target + 1)
+ dp_array[0] = 1
+
+ for i in range(1, target + 1):
+ for j in range(n):
+ if i - array[j] >= 0:
+ dp_array[i] += dp_array[i - array[j]]
+
+ return dp_array[target]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ n = 3
+ target = 5
+ array = [1, 2, 5]
+ print(combination_sum_iv(n, array, target))
diff --git a/dynamic_programming/edit_distance.py b/dynamic_programming/edit_distance.py
index 56877e0c5..774aa0473 100644
--- a/dynamic_programming/edit_distance.py
+++ b/dynamic_programming/edit_distance.py
@@ -19,74 +19,72 @@ class EditDistance:
"""
def __init__(self):
- self.__prepare__()
+ self.word1 = ""
+ self.word2 = ""
+ self.dp = []
- def __prepare__(self, N=0, M=0):
- self.dp = [[-1 for y in range(0, M)] for x in range(0, N)]
-
- def __solveDP(self, x, y):
- if x == -1:
- return y + 1
- elif y == -1:
- return x + 1
- elif self.dp[x][y] > -1:
- return self.dp[x][y]
+ def __min_dist_top_down_dp(self, m: int, n: int) -> int:
+ if m == -1:
+ return n + 1
+ elif n == -1:
+ return m + 1
+ elif self.dp[m][n] > -1:
+ return self.dp[m][n]
else:
- if self.A[x] == self.B[y]:
- self.dp[x][y] = self.__solveDP(x - 1, y - 1)
+ if self.word1[m] == self.word2[n]:
+ self.dp[m][n] = self.__min_dist_top_down_dp(m - 1, n - 1)
else:
- self.dp[x][y] = 1 + min(
- self.__solveDP(x, y - 1),
- self.__solveDP(x - 1, y),
- self.__solveDP(x - 1, y - 1),
- )
+ insert = self.__min_dist_top_down_dp(m, n - 1)
+ delete = self.__min_dist_top_down_dp(m - 1, n)
+ replace = self.__min_dist_top_down_dp(m - 1, n - 1)
+ self.dp[m][n] = 1 + min(insert, delete, replace)
- return self.dp[x][y]
+ return self.dp[m][n]
- def solve(self, A, B):
- if isinstance(A, bytes):
- A = A.decode("ascii")
+ def min_dist_top_down(self, word1: str, word2: str) -> int:
+ """
+ >>> EditDistance().min_dist_top_down("intention", "execution")
+ 5
+ >>> EditDistance().min_dist_top_down("intention", "")
+ 9
+ >>> EditDistance().min_dist_top_down("", "")
+ 0
+ """
+ self.word1 = word1
+ self.word2 = word2
+ self.dp = [[-1 for _ in range(len(word2))] for _ in range(len(word1))]
- if isinstance(B, bytes):
- B = B.decode("ascii")
+ return self.__min_dist_top_down_dp(len(word1) - 1, len(word2) - 1)
- self.A = str(A)
- self.B = str(B)
+ def min_dist_bottom_up(self, word1: str, word2: str) -> int:
+ """
+ >>> EditDistance().min_dist_bottom_up("intention", "execution")
+ 5
+ >>> EditDistance().min_dist_bottom_up("intention", "")
+ 9
+ >>> EditDistance().min_dist_bottom_up("", "")
+ 0
+ """
+ self.word1 = word1
+ self.word2 = word2
+ m = len(word1)
+ n = len(word2)
+ self.dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
- self.__prepare__(len(A), len(B))
-
- return self.__solveDP(len(A) - 1, len(B) - 1)
-
-
-def min_distance_bottom_up(word1: str, word2: str) -> int:
- """
- >>> min_distance_bottom_up("intention", "execution")
- 5
- >>> min_distance_bottom_up("intention", "")
- 9
- >>> min_distance_bottom_up("", "")
- 0
- """
- m = len(word1)
- n = len(word2)
- dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
- for i in range(m + 1):
- for j in range(n + 1):
-
- if i == 0: # first string is empty
- dp[i][j] = j
- elif j == 0: # second string is empty
- dp[i][j] = i
- elif (
- word1[i - 1] == word2[j - 1]
- ): # last character of both substing is equal
- dp[i][j] = dp[i - 1][j - 1]
- else:
- insert = dp[i][j - 1]
- delete = dp[i - 1][j]
- replace = dp[i - 1][j - 1]
- dp[i][j] = 1 + min(insert, delete, replace)
- return dp[m][n]
+ for i in range(m + 1):
+ for j in range(n + 1):
+ if i == 0: # first string is empty
+ self.dp[i][j] = j
+ elif j == 0: # second string is empty
+ self.dp[i][j] = i
+ elif word1[i - 1] == word2[j - 1]: # last characters are equal
+ self.dp[i][j] = self.dp[i - 1][j - 1]
+ else:
+ insert = self.dp[i][j - 1]
+ delete = self.dp[i - 1][j]
+ replace = self.dp[i - 1][j - 1]
+ self.dp[i][j] = 1 + min(insert, delete, replace)
+ return self.dp[m][n]
if __name__ == "__main__":
@@ -99,7 +97,7 @@ if __name__ == "__main__":
S2 = input("Enter the second string: ").strip()
print()
- print("The minimum Edit Distance is: %d" % (solver.solve(S1, S2)))
- print("The minimum Edit Distance is: %d" % (min_distance_bottom_up(S1, S2)))
+ print(f"The minimum edit distance is: {solver.min_dist_top_down(S1, S2)}")
+ print(f"The minimum edit distance is: {solver.min_dist_bottom_up(S1, S2)}")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
diff --git a/dynamic_programming/fibonacci.py b/dynamic_programming/fibonacci.py
index cab1358dd..c102493aa 100644
--- a/dynamic_programming/fibonacci.py
+++ b/dynamic_programming/fibonacci.py
@@ -5,61 +5,47 @@ sequence problem.
class Fibonacci:
- def __init__(self, N=None):
- self.fib_array = []
- if N:
- N = int(N)
- self.fib_array.append(0)
- self.fib_array.append(1)
- for i in range(2, N + 1):
- self.fib_array.append(self.fib_array[i - 1] + self.fib_array[i - 2])
- elif N == 0:
- self.fib_array.append(0)
- print(self.fib_array)
+ def __init__(self) -> None:
+ self.sequence = [0, 1]
- def get(self, sequence_no=None):
+ def get(self, index: int) -> list:
"""
- >>> Fibonacci(5).get(3)
- [0, 1, 1, 2, 3, 5]
- [0, 1, 1, 2]
- >>> Fibonacci(5).get(6)
- [0, 1, 1, 2, 3, 5]
- Out of bound.
- >>> Fibonacci(5).get(-1)
- [0, 1, 1, 2, 3, 5]
- []
+ Get the Fibonacci number of `index`. If the number does not exist,
+ calculate all missing numbers leading up to the number of `index`.
+
+ >>> Fibonacci().get(10)
+ [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
+ >>> Fibonacci().get(5)
+ [0, 1, 1, 2, 3]
"""
- if sequence_no is not None:
- if sequence_no < len(self.fib_array):
- return print(self.fib_array[: sequence_no + 1])
- else:
- print("Out of bound.")
- else:
- print("Please specify a value")
+ if (difference := index - (len(self.sequence) - 2)) >= 1:
+ for _ in range(difference):
+ self.sequence.append(self.sequence[-1] + self.sequence[-2])
+ return self.sequence[:index]
+
+
+def main() -> None:
+ print(
+ "Fibonacci Series Using Dynamic Programming\n",
+ "Enter the index of the Fibonacci number you want to calculate ",
+ "in the prompt below. (To exit enter exit or Ctrl-C)\n",
+ sep="",
+ )
+ fibonacci = Fibonacci()
+
+ while True:
+ prompt: str = input(">> ")
+ if prompt in {"exit", "quit"}:
+ break
+
+ try:
+ index: int = int(prompt)
+ except ValueError:
+ print("Enter a number or 'exit'")
+ continue
+
+ print(fibonacci.get(index))
if __name__ == "__main__":
- print("\n********* Fibonacci Series Using Dynamic Programming ************\n")
- print("\n Enter the upper limit for the fibonacci sequence: ", end="")
- try:
- N = int(input().strip())
- fib = Fibonacci(N)
- print(
- "\n********* Enter different values to get the corresponding fibonacci "
- "sequence, enter any negative number to exit. ************\n"
- )
- while True:
- try:
- i = int(input("Enter value: ").strip())
- if i < 0:
- print("\n********* Good Bye!! ************\n")
- break
- fib.get(i)
- except NameError:
- print("\nInvalid input, please try again.")
- except NameError:
- print("\n********* Invalid input, good bye!! ************\n")
-
- import doctest
-
- doctest.testmod()
+ main()
diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py
new file mode 100644
index 000000000..e29116437
--- /dev/null
+++ b/dynamic_programming/fizz_buzz.py
@@ -0,0 +1,64 @@
+# https://en.wikipedia.org/wiki/Fizz_buzz#Programming
+
+
+def fizz_buzz(number: int, iterations: int) -> str:
+ """
+ Plays FizzBuzz.
+ Prints Fizz if number is a multiple of 3.
+ Prints Buzz if its a multiple of 5.
+ Prints FizzBuzz if its a multiple of both 3 and 5 or 15.
+ Else Prints The Number Itself.
+ >>> fizz_buzz(1,7)
+ '1 2 Fizz 4 Buzz Fizz 7 '
+ >>> fizz_buzz(1,0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Iterations must be done more than 0 times to play FizzBuzz
+ >>> fizz_buzz(-5,5)
+ Traceback (most recent call last):
+ ...
+ ValueError: starting number must be
+ and integer and be more than 0
+ >>> fizz_buzz(10,-5)
+ Traceback (most recent call last):
+ ...
+ ValueError: Iterations must be done more than 0 times to play FizzBuzz
+ >>> fizz_buzz(1.5,5)
+ Traceback (most recent call last):
+ ...
+ ValueError: starting number must be
+ and integer and be more than 0
+ >>> fizz_buzz(1,5.5)
+ Traceback (most recent call last):
+ ...
+ ValueError: iterations must be defined as integers
+ """
+ if not isinstance(iterations, int):
+ raise ValueError("iterations must be defined as integers")
+ if not isinstance(number, int) or not number >= 1:
+ raise ValueError(
+ """starting number must be
+ and integer and be more than 0"""
+ )
+ if not iterations >= 1:
+ raise ValueError("Iterations must be done more than 0 times to play FizzBuzz")
+
+ out = ""
+ while number <= iterations:
+ if number % 3 == 0:
+ out += "Fizz"
+ if number % 5 == 0:
+ out += "Buzz"
+ if 0 not in (number % 3, number % 5):
+ out += str(number)
+
+ # print(out)
+ number += 1
+ out += " "
+ return out
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/floyd_warshall.py b/dynamic_programming/floyd_warshall.py
index a4b6c6a82..614a3c72a 100644
--- a/dynamic_programming/floyd_warshall.py
+++ b/dynamic_programming/floyd_warshall.py
@@ -2,41 +2,41 @@ import math
class Graph:
- def __init__(self, N=0): # a graph with Node 0,1,...,N-1
- self.N = N
- self.W = [
- [math.inf for j in range(0, N)] for i in range(0, N)
+ def __init__(self, n=0): # a graph with Node 0,1,...,N-1
+ self.n = n
+ self.w = [
+ [math.inf for j in range(0, n)] for i in range(0, n)
] # adjacency matrix for weight
self.dp = [
- [math.inf for j in range(0, N)] for i in range(0, N)
+ [math.inf for j in range(0, n)] for i in range(0, n)
] # dp[i][j] stores minimum distance from i to j
- def addEdge(self, u, v, w):
+ def add_edge(self, u, v, w):
self.dp[u][v] = w
def floyd_warshall(self):
- for k in range(0, self.N):
- for i in range(0, self.N):
- for j in range(0, self.N):
+ for k in range(0, self.n):
+ for i in range(0, self.n):
+ for j in range(0, self.n):
self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j])
- def showMin(self, u, v):
+ def show_min(self, u, v):
return self.dp[u][v]
if __name__ == "__main__":
graph = Graph(5)
- graph.addEdge(0, 2, 9)
- graph.addEdge(0, 4, 10)
- graph.addEdge(1, 3, 5)
- graph.addEdge(2, 3, 7)
- graph.addEdge(3, 0, 10)
- graph.addEdge(3, 1, 2)
- graph.addEdge(3, 2, 1)
- graph.addEdge(3, 4, 6)
- graph.addEdge(4, 1, 3)
- graph.addEdge(4, 2, 4)
- graph.addEdge(4, 3, 9)
+ graph.add_edge(0, 2, 9)
+ graph.add_edge(0, 4, 10)
+ graph.add_edge(1, 3, 5)
+ graph.add_edge(2, 3, 7)
+ graph.add_edge(3, 0, 10)
+ graph.add_edge(3, 1, 2)
+ graph.add_edge(3, 2, 1)
+ graph.add_edge(3, 4, 6)
+ graph.add_edge(4, 1, 3)
+ graph.add_edge(4, 2, 4)
+ graph.add_edge(4, 3, 9)
graph.floyd_warshall()
- graph.showMin(1, 4)
- graph.showMin(0, 3)
+ graph.show_min(1, 4)
+ graph.show_min(0, 3)
diff --git a/dynamic_programming/integer_partition.py b/dynamic_programming/integer_partition.py
index 4eb06348c..8ed2e51bd 100644
--- a/dynamic_programming/integer_partition.py
+++ b/dynamic_programming/integer_partition.py
@@ -6,8 +6,8 @@ into k parts. These two facts together are used for this algorithm.
"""
-def partition(m):
- memo = [[0 for _ in range(m)] for _ in range(m + 1)]
+def partition(m: int) -> int:
+ memo: list[list[int]] = [[0 for _ in range(m)] for _ in range(m + 1)]
for i in range(m + 1):
memo[i][0] = 1
diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py
index 21c64dba4..4d0a250e8 100644
--- a/dynamic_programming/iterating_through_submasks.py
+++ b/dynamic_programming/iterating_through_submasks.py
@@ -9,7 +9,6 @@ from __future__ import annotations
def list_of_submasks(mask: int) -> list[int]:
-
"""
Args:
mask : number which shows mask ( always integer > 0, zero does not have any
diff --git a/dynamic_programming/k_means_clustering_tensorflow.py_tf b/dynamic_programming/k_means_clustering_tensorflow.py
similarity index 98%
rename from dynamic_programming/k_means_clustering_tensorflow.py_tf
rename to dynamic_programming/k_means_clustering_tensorflow.py
index 4fbcedeaa..8d3f6f0df 100644
--- a/dynamic_programming/k_means_clustering_tensorflow.py_tf
+++ b/dynamic_programming/k_means_clustering_tensorflow.py
@@ -1,9 +1,10 @@
-import tensorflow as tf
from random import shuffle
+
+import tensorflow as tf
from numpy import array
-def TFKMeansCluster(vectors, noofclusters):
+def tf_k_means_cluster(vectors, noofclusters):
"""
K-Means Clustering using TensorFlow.
'vectors' should be a n*k 2-D NumPy array, where n is the number
@@ -30,7 +31,6 @@ def TFKMeansCluster(vectors, noofclusters):
graph = tf.Graph()
with graph.as_default():
-
# SESSION OF COMPUTATION
sess = tf.Session()
@@ -95,8 +95,7 @@ def TFKMeansCluster(vectors, noofclusters):
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
noofiterations = 100
- for iteration_n in range(noofiterations):
-
+ for _ in range(noofiterations):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py
index 69e54c00a..489b5ada4 100644
--- a/dynamic_programming/knapsack.py
+++ b/dynamic_programming/knapsack.py
@@ -1,45 +1,45 @@
"""
Given weights and values of n items, put these items in a knapsack of
- capacity W to get the maximum total value in the knapsack.
+capacity W to get the maximum total value in the knapsack.
Note that only the integer weights 0-1 knapsack problem is solvable
- using dynamic programming.
+using dynamic programming.
"""
-def MF_knapsack(i, wt, val, j):
+def mf_knapsack(i, wt, val, j):
"""
This code involves the concept of memory functions. Here we solve the subproblems
which are needed unlike the below example
F is a 2D array with -1s filled up
"""
- global F # a global dp table for knapsack
- if F[i][j] < 0:
+ global f # a global dp table for knapsack
+ if f[i][j] < 0:
if j < wt[i - 1]:
- val = MF_knapsack(i - 1, wt, val, j)
+ val = mf_knapsack(i - 1, wt, val, j)
else:
val = max(
- MF_knapsack(i - 1, wt, val, j),
- MF_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1],
+ mf_knapsack(i - 1, wt, val, j),
+ mf_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1],
)
- F[i][j] = val
- return F[i][j]
+ f[i][j] = val
+ return f[i][j]
-def knapsack(W, wt, val, n):
- dp = [[0 for i in range(W + 1)] for j in range(n + 1)]
+def knapsack(w, wt, val, n):
+ dp = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
- for w in range(1, W + 1):
- if wt[i - 1] <= w:
- dp[i][w] = max(val[i - 1] + dp[i - 1][w - wt[i - 1]], dp[i - 1][w])
+ for w_ in range(1, w + 1):
+ if wt[i - 1] <= w_:
+ dp[i][w_] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_])
else:
- dp[i][w] = dp[i - 1][w]
+ dp[i][w_] = dp[i - 1][w_]
- return dp[n][W], dp
+ return dp[n][w_], dp
-def knapsack_with_example_solution(W: int, wt: list, val: list):
+def knapsack_with_example_solution(w: int, wt: list, val: list):
"""
Solves the integer weights knapsack problem returns one of
the several possible optimal subsets.
@@ -78,21 +78,22 @@ def knapsack_with_example_solution(W: int, wt: list, val: list):
num_items = len(wt)
if num_items != len(val):
- raise ValueError(
- "The number of weights must be the "
- "same as the number of values.\nBut "
- f"got {num_items} weights and {len(val)} values"
+ msg = (
+ "The number of weights must be the same as the number of values.\n"
+ f"But got {num_items} weights and {len(val)} values"
)
+ raise ValueError(msg)
for i in range(num_items):
if not isinstance(wt[i], int):
- raise TypeError(
- "All weights must be integers but "
- f"got weight of type {type(wt[i])} at index {i}"
+ msg = (
+ "All weights must be integers but got weight of "
+ f"type {type(wt[i])} at index {i}"
)
+ raise TypeError(msg)
- optimal_val, dp_table = knapsack(W, wt, val, num_items)
- example_optional_set = set()
- _construct_solution(dp_table, wt, num_items, W, example_optional_set)
+ optimal_val, dp_table = knapsack(w, wt, val, num_items)
+ example_optional_set: set = set()
+ _construct_solution(dp_table, wt, num_items, w, example_optional_set)
return optimal_val, example_optional_set
@@ -108,7 +109,7 @@ def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set):
dp: list of list, the table of a solved integer weight dynamic programming problem
wt: list or tuple, the vector of weights of the items
- i: int, the index of the item under consideration
+ i: int, the index of the item under consideration
j: int, the current possible maximum weight
optimal_set: set, the optimal subset so far. This gets modified by the function.
@@ -136,10 +137,10 @@ if __name__ == "__main__":
wt = [4, 3, 2, 3]
n = 4
w = 6
- F = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)]
+ f = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
optimal_solution, _ = knapsack(w, wt, val, n)
print(optimal_solution)
- print(MF_knapsack(n, wt, val, w)) # switched the n and w
+ print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py
index fdcf3311a..178b4169b 100644
--- a/dynamic_programming/longest_common_subsequence.py
+++ b/dynamic_programming/longest_common_subsequence.py
@@ -38,36 +38,30 @@ def longest_common_subsequence(x: str, y: str):
n = len(y)
# declaring the array for storing the dp values
- L = [[0] * (n + 1) for _ in range(m + 1)]
+ l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741
for i in range(1, m + 1):
for j in range(1, n + 1):
- if x[i - 1] == y[j - 1]:
- match = 1
- else:
- match = 0
+ match = 1 if x[i - 1] == y[j - 1] else 0
- L[i][j] = max(L[i - 1][j], L[i][j - 1], L[i - 1][j - 1] + match)
+ l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match)
seq = ""
i, j = m, n
while i > 0 and j > 0:
- if x[i - 1] == y[j - 1]:
- match = 1
- else:
- match = 0
+ match = 1 if x[i - 1] == y[j - 1] else 0
- if L[i][j] == L[i - 1][j - 1] + match:
+ if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
seq = x[i - 1] + seq
i -= 1
j -= 1
- elif L[i][j] == L[i - 1][j]:
+ elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
- return L[m][n], seq
+ return l[m][n], seq
if __name__ == "__main__":
diff --git a/dynamic_programming/longest_common_substring.py b/dynamic_programming/longest_common_substring.py
new file mode 100644
index 000000000..e2f944a5e
--- /dev/null
+++ b/dynamic_programming/longest_common_substring.py
@@ -0,0 +1,63 @@
+"""
+Longest Common Substring Problem Statement: Given two sequences, find the
+longest common substring present in both of them. A substring is
+necessarily continuous.
+Example: "abcdef" and "xabded" have two longest common substrings, "ab" or "de".
+Therefore, algorithm should return any one of them.
+"""
+
+
+def longest_common_substring(text1: str, text2: str) -> str:
+ """
+ Finds the longest common substring between two strings.
+ >>> longest_common_substring("", "")
+ ''
+ >>> longest_common_substring("a","")
+ ''
+ >>> longest_common_substring("", "a")
+ ''
+ >>> longest_common_substring("a", "a")
+ 'a'
+ >>> longest_common_substring("abcdef", "bcd")
+ 'bcd'
+ >>> longest_common_substring("abcdef", "xabded")
+ 'ab'
+ >>> longest_common_substring("GeeksforGeeks", "GeeksQuiz")
+ 'Geeks'
+ >>> longest_common_substring("abcdxyz", "xyzabcd")
+ 'abcd'
+ >>> longest_common_substring("zxabcdezy", "yzabcdezx")
+ 'abcdez'
+ >>> longest_common_substring("OldSite:GeeksforGeeks.org", "NewSite:GeeksQuiz.com")
+ 'Site:Geeks'
+ >>> longest_common_substring(1, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: longest_common_substring() takes two strings for inputs
+ """
+
+ if not (isinstance(text1, str) and isinstance(text2, str)):
+ raise ValueError("longest_common_substring() takes two strings for inputs")
+
+ text1_length = len(text1)
+ text2_length = len(text2)
+
+ dp = [[0] * (text2_length + 1) for _ in range(text1_length + 1)]
+ ans_index = 0
+ ans_length = 0
+
+ for i in range(1, text1_length + 1):
+ for j in range(1, text2_length + 1):
+ if text1[i - 1] == text2[j - 1]:
+ dp[i][j] = 1 + dp[i - 1][j - 1]
+ if dp[i][j] > ans_length:
+ ans_index = i
+ ans_length = dp[i][j]
+
+ return text1[ans_index - ans_length : ans_index]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py
index f5ca8a2b5..d82789376 100644
--- a/dynamic_programming/longest_increasing_subsequence.py
+++ b/dynamic_programming/longest_increasing_subsequence.py
@@ -34,12 +34,12 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu
return array
# Else
pivot = array[0]
- isFound = False
+ is_found = False
i = 1
- longest_subseq = []
- while not isFound and i < array_length:
+ longest_subseq: list[int] = []
+ while not is_found and i < array_length:
if array[i] < pivot:
- isFound = True
+ is_found = True
temp_array = [element for element in array[i:] if element >= array[i]]
temp_array = longest_subsequence(temp_array)
if len(temp_array) > len(longest_subseq):
@@ -48,7 +48,7 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu
i += 1
temp_array = [element for element in array[1:] if element >= pivot]
- temp_array = [pivot] + longest_subsequence(temp_array)
+ temp_array = [pivot, *longest_subsequence(temp_array)]
if len(temp_array) > len(longest_subseq):
return temp_array
else:
diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py
index af536f8bb..5e11d729f 100644
--- a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py
+++ b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py
@@ -7,7 +7,7 @@
from __future__ import annotations
-def CeilIndex(v, l, r, key): # noqa: E741
+def ceil_index(v, l, r, key): # noqa: E741
while r - l > 1:
m = (l + r) // 2
if v[m] >= key:
@@ -17,16 +17,16 @@ def CeilIndex(v, l, r, key): # noqa: E741
return r
-def LongestIncreasingSubsequenceLength(v: list[int]) -> int:
+def longest_increasing_subsequence_length(v: list[int]) -> int:
"""
- >>> LongestIncreasingSubsequenceLength([2, 5, 3, 7, 11, 8, 10, 13, 6])
+ >>> longest_increasing_subsequence_length([2, 5, 3, 7, 11, 8, 10, 13, 6])
6
- >>> LongestIncreasingSubsequenceLength([])
+ >>> longest_increasing_subsequence_length([])
0
- >>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3,
- ... 11, 7, 15])
+ >>> longest_increasing_subsequence_length([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13,
+ ... 3, 11, 7, 15])
6
- >>> LongestIncreasingSubsequenceLength([5, 4, 3, 2, 1])
+ >>> longest_increasing_subsequence_length([5, 4, 3, 2, 1])
1
"""
if len(v) == 0:
@@ -44,7 +44,7 @@ def LongestIncreasingSubsequenceLength(v: list[int]) -> int:
tail[length] = v[i]
length += 1
else:
- tail[CeilIndex(tail, -1, length - 1, v[i])] = v[i]
+ tail[ceil_index(tail, -1, length - 1, v[i])] = v[i]
return length
diff --git a/dynamic_programming/longest_sub_array.py b/dynamic_programming/longest_sub_array.py
index 30159a138..b477acf61 100644
--- a/dynamic_programming/longest_sub_array.py
+++ b/dynamic_programming/longest_sub_array.py
@@ -14,7 +14,6 @@ class SubArray:
def __init__(self, arr):
# we need a list not a string, so do something to change the type
self.array = arr.split(",")
- print(("the input array is:", self.array))
def solve_sub_array(self):
rear = [int(self.array[0])] * len(self.array)
diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py
index 9411bc704..d612aea7b 100644
--- a/dynamic_programming/matrix_chain_order.py
+++ b/dynamic_programming/matrix_chain_order.py
@@ -8,34 +8,34 @@ Space Complexity: O(n^2)
"""
-def MatrixChainOrder(array):
- N = len(array)
- Matrix = [[0 for x in range(N)] for x in range(N)]
- Sol = [[0 for x in range(N)] for x in range(N)]
+def matrix_chain_order(array):
+ n = len(array)
+ matrix = [[0 for x in range(n)] for x in range(n)]
+ sol = [[0 for x in range(n)] for x in range(n)]
- for ChainLength in range(2, N):
- for a in range(1, N - ChainLength + 1):
- b = a + ChainLength - 1
+ for chain_length in range(2, n):
+ for a in range(1, n - chain_length + 1):
+ b = a + chain_length - 1
- Matrix[a][b] = sys.maxsize
+ matrix[a][b] = sys.maxsize
for c in range(a, b):
cost = (
- Matrix[a][c] + Matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
+ matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
- if cost < Matrix[a][b]:
- Matrix[a][b] = cost
- Sol[a][b] = c
- return Matrix, Sol
+ if cost < matrix[a][b]:
+ matrix[a][b] = cost
+ sol[a][b] = c
+ return matrix, sol
# Print order of matrix with Ai as Matrix
-def PrintOptimalSolution(OptimalSolution, i, j):
+def print_optiomal_solution(optimal_solution, i, j):
if i == j:
print("A" + str(i), end=" ")
else:
print("(", end=" ")
- PrintOptimalSolution(OptimalSolution, i, OptimalSolution[i][j])
- PrintOptimalSolution(OptimalSolution, OptimalSolution[i][j] + 1, j)
+ print_optiomal_solution(optimal_solution, i, optimal_solution[i][j])
+ print_optiomal_solution(optimal_solution, optimal_solution[i][j] + 1, j)
print(")", end=" ")
@@ -44,10 +44,10 @@ def main():
n = len(array)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
- Matrix, OptimalSolution = MatrixChainOrder(array)
+ matrix, optimal_solution = matrix_chain_order(array)
- print("No. of Operation required: " + str(Matrix[1][n - 1]))
- PrintOptimalSolution(OptimalSolution, 1, n - 1)
+ print("No. of Operation required: " + str(matrix[1][n - 1]))
+ print_optiomal_solution(optimal_solution, 1, n - 1)
if __name__ == "__main__":
diff --git a/dynamic_programming/max_non_adjacent_sum.py b/dynamic_programming/max_non_adjacent_sum.py
index 5362b22ca..e3cc23f49 100644
--- a/dynamic_programming/max_non_adjacent_sum.py
+++ b/dynamic_programming/max_non_adjacent_sum.py
@@ -7,7 +7,7 @@ def maximum_non_adjacent_sum(nums: list[int]) -> int:
"""
Find the maximum non-adjacent sum of the integers in the nums input list
- >>> print(maximum_non_adjacent_sum([1, 2, 3]))
+ >>> maximum_non_adjacent_sum([1, 2, 3])
4
>>> maximum_non_adjacent_sum([1, 5, 3, 7, 2, 2, 6])
18
diff --git a/dynamic_programming/max_product_subarray.py b/dynamic_programming/max_product_subarray.py
new file mode 100644
index 000000000..425859bc0
--- /dev/null
+++ b/dynamic_programming/max_product_subarray.py
@@ -0,0 +1,53 @@
+def max_product_subarray(numbers: list[int]) -> int:
+ """
+ Returns the maximum product that can be obtained by multiplying a
+ contiguous subarray of the given integer list `nums`.
+
+ Example:
+ >>> max_product_subarray([2, 3, -2, 4])
+ 6
+ >>> max_product_subarray((-2, 0, -1))
+ 0
+ >>> max_product_subarray([2, 3, -2, 4, -1])
+ 48
+ >>> max_product_subarray([-1])
+ -1
+ >>> max_product_subarray([0])
+ 0
+ >>> max_product_subarray([])
+ 0
+ >>> max_product_subarray("")
+ 0
+ >>> max_product_subarray(None)
+ 0
+ >>> max_product_subarray([2, 3, -2, 4.5, -1])
+ Traceback (most recent call last):
+ ...
+ ValueError: numbers must be an iterable of integers
+ >>> max_product_subarray("ABC")
+ Traceback (most recent call last):
+ ...
+ ValueError: numbers must be an iterable of integers
+ """
+ if not numbers:
+ return 0
+
+ if not isinstance(numbers, (list, tuple)) or not all(
+ isinstance(number, int) for number in numbers
+ ):
+ raise ValueError("numbers must be an iterable of integers")
+
+ max_till_now = min_till_now = max_prod = numbers[0]
+
+ for i in range(1, len(numbers)):
+ # update the maximum and minimum subarray products
+ number = numbers[i]
+ if number < 0:
+ max_till_now, min_till_now = min_till_now, max_till_now
+ max_till_now = max(number, max_till_now * number)
+ min_till_now = min(number, min_till_now * number)
+
+ # update the maximum product found till now
+ max_prod = max(max_prod, max_till_now)
+
+ return max_prod
diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py
deleted file mode 100644
index 3060010ef..000000000
--- a/dynamic_programming/max_sub_array.py
+++ /dev/null
@@ -1,94 +0,0 @@
-"""
-author : Mayank Kumar Jha (mk9440)
-"""
-from __future__ import annotations
-
-
-def find_max_sub_array(A, low, high):
- if low == high:
- return low, high, A[low]
- else:
- mid = (low + high) // 2
- left_low, left_high, left_sum = find_max_sub_array(A, low, mid)
- right_low, right_high, right_sum = find_max_sub_array(A, mid + 1, high)
- cross_left, cross_right, cross_sum = find_max_cross_sum(A, low, mid, high)
- if left_sum >= right_sum and left_sum >= cross_sum:
- return left_low, left_high, left_sum
- elif right_sum >= left_sum and right_sum >= cross_sum:
- return right_low, right_high, right_sum
- else:
- return cross_left, cross_right, cross_sum
-
-
-def find_max_cross_sum(A, low, mid, high):
- left_sum, max_left = -999999999, -1
- right_sum, max_right = -999999999, -1
- summ = 0
- for i in range(mid, low - 1, -1):
- summ += A[i]
- if summ > left_sum:
- left_sum = summ
- max_left = i
- summ = 0
- for i in range(mid + 1, high + 1):
- summ += A[i]
- if summ > right_sum:
- right_sum = summ
- max_right = i
- return max_left, max_right, (left_sum + right_sum)
-
-
-def max_sub_array(nums: list[int]) -> int:
- """
- Finds the contiguous subarray which has the largest sum and return its sum.
-
- >>> max_sub_array([-2, 1, -3, 4, -1, 2, 1, -5, 4])
- 6
-
- An empty (sub)array has sum 0.
- >>> max_sub_array([])
- 0
-
- If all elements are negative, the largest subarray would be the empty array,
- having the sum 0.
- >>> max_sub_array([-1, -2, -3])
- 0
- >>> max_sub_array([5, -2, -3])
- 5
- >>> max_sub_array([31, -41, 59, 26, -53, 58, 97, -93, -23, 84])
- 187
- """
- best = 0
- current = 0
- for i in nums:
- current += i
- if current < 0:
- current = 0
- best = max(best, current)
- return best
-
-
-if __name__ == "__main__":
- """
- A random simulation of this algorithm.
- """
- import time
- from random import randint
-
- from matplotlib import pyplot as plt
-
- inputs = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
- tim = []
- for i in inputs:
- li = [randint(1, i) for j in range(i)]
- strt = time.time()
- (find_max_sub_array(li, 0, len(li) - 1))
- end = time.time()
- tim.append(end - strt)
- print("No of Inputs Time Taken")
- for i in range(len(inputs)):
- print(inputs[i], "\t\t", tim[i])
- plt.plot(inputs, tim)
- plt.xlabel("Number of Inputs")
- plt.ylabel("Time taken in seconds ")
- plt.show()
diff --git a/dynamic_programming/max_subarray_sum.py b/dynamic_programming/max_subarray_sum.py
new file mode 100644
index 000000000..c76943472
--- /dev/null
+++ b/dynamic_programming/max_subarray_sum.py
@@ -0,0 +1,60 @@
+"""
+The maximum subarray sum problem is the task of finding the maximum sum that can be
+obtained from a contiguous subarray within a given array of numbers. For example, given
+the array [-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum
+is [4, -1, 2, 1], so the maximum subarray sum is 6.
+
+Kadane's algorithm is a simple dynamic programming algorithm that solves the maximum
+subarray sum problem in O(n) time and O(1) space.
+
+Reference: https://en.wikipedia.org/wiki/Maximum_subarray_problem
+"""
+from collections.abc import Sequence
+
+
+def max_subarray_sum(
+ arr: Sequence[float], allow_empty_subarrays: bool = False
+) -> float:
+ """
+ Solves the maximum subarray sum problem using Kadane's algorithm.
+ :param arr: the given array of numbers
+ :param allow_empty_subarrays: if True, then the algorithm considers empty subarrays
+
+ >>> max_subarray_sum([2, 8, 9])
+ 19
+ >>> max_subarray_sum([0, 0])
+ 0
+ >>> max_subarray_sum([-1.0, 0.0, 1.0])
+ 1.0
+ >>> max_subarray_sum([1, 2, 3, 4, -2])
+ 10
+ >>> max_subarray_sum([-2, 1, -3, 4, -1, 2, 1, -5, 4])
+ 6
+ >>> max_subarray_sum([2, 3, -9, 8, -2])
+ 8
+ >>> max_subarray_sum([-2, -3, -1, -4, -6])
+ -1
+ >>> max_subarray_sum([-2, -3, -1, -4, -6], allow_empty_subarrays=True)
+ 0
+ >>> max_subarray_sum([])
+ 0
+ """
+ if not arr:
+ return 0
+
+ max_sum = 0 if allow_empty_subarrays else float("-inf")
+ curr_sum = 0.0
+ for num in arr:
+ curr_sum = max(0 if allow_empty_subarrays else num, curr_sum + num)
+ max_sum = max(max_sum, curr_sum)
+
+ return max_sum
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
+
+ nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
+ print(f"{max_subarray_sum(nums) = }")
diff --git a/dynamic_programming/max_sum_contiguous_subsequence.py b/dynamic_programming/max_sum_contiguous_subsequence.py
deleted file mode 100644
index bac592370..000000000
--- a/dynamic_programming/max_sum_contiguous_subsequence.py
+++ /dev/null
@@ -1,20 +0,0 @@
-def max_subarray_sum(nums: list) -> int:
- """
- >>> max_subarray_sum([6 , 9, -1, 3, -7, -5, 10])
- 17
- """
- if not nums:
- return 0
- n = len(nums)
-
- res, s, s_pre = nums[0], nums[0], nums[0]
- for i in range(1, n):
- s = max(nums[i], s_pre + nums[i])
- s_pre = s
- res = max(res, s)
- return res
-
-
-if __name__ == "__main__":
- nums = [6, 9, -1, 3, -7, -5, 10]
- print(max_subarray_sum(nums))
diff --git a/dynamic_programming/min_distance_up_bottom.py b/dynamic_programming/min_distance_up_bottom.py
new file mode 100644
index 000000000..4870c7ef4
--- /dev/null
+++ b/dynamic_programming/min_distance_up_bottom.py
@@ -0,0 +1,52 @@
+"""
+Author : Alexander Pantyukhin
+Date : October 14, 2022
+This is implementation Dynamic Programming up bottom approach
+to find edit distance.
+The aim is to demonstate up bottom approach for solving the task.
+The implementation was tested on the
+leetcode: https://leetcode.com/problems/edit-distance/
+
+Levinstein distance
+Dynamic Programming: up -> down.
+"""
+
+import functools
+
+
+def min_distance_up_bottom(word1: str, word2: str) -> int:
+ """
+ >>> min_distance_up_bottom("intention", "execution")
+ 5
+ >>> min_distance_up_bottom("intention", "")
+ 9
+ >>> min_distance_up_bottom("", "")
+ 0
+ >>> min_distance_up_bottom("zooicoarchaeologist", "zoologist")
+ 10
+ """
+ len_word1 = len(word1)
+ len_word2 = len(word2)
+
+ @functools.cache
+ def min_distance(index1: int, index2: int) -> int:
+ # if first word index is overflow - delete all from the second word
+ if index1 >= len_word1:
+ return len_word2 - index2
+ # if second word index is overflow - delete all from the first word
+ if index2 >= len_word2:
+ return len_word1 - index1
+ diff = int(word1[index1] != word2[index2]) # current letters not identical
+ return min(
+ 1 + min_distance(index1 + 1, index2),
+ 1 + min_distance(index1, index2 + 1),
+ diff + min_distance(index1 + 1, index2 + 1),
+ )
+
+ return min_distance(0, 0)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/minimum_coin_change.py b/dynamic_programming/minimum_coin_change.py
index 2869b5857..848bd654d 100644
--- a/dynamic_programming/minimum_coin_change.py
+++ b/dynamic_programming/minimum_coin_change.py
@@ -7,7 +7,7 @@ https://www.hackerrank.com/challenges/coin-change/problem
"""
-def dp_count(S, n):
+def dp_count(s, n):
"""
>>> dp_count([1, 2, 3], 4)
4
@@ -33,7 +33,7 @@ def dp_count(S, n):
# Pick all coins one by one and update table[] values
# after the index greater than or equal to the value of the
# picked coin
- for coin_val in S:
+ for coin_val in s:
for j in range(coin_val, n + 1):
table[j] += table[j - coin_val]
diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py
index 8fad4ef30..3daa9767f 100644
--- a/dynamic_programming/minimum_partition.py
+++ b/dynamic_programming/minimum_partition.py
@@ -3,7 +3,7 @@ Partition a set into two subsets such that the difference of subset sums is mini
"""
-def findMin(arr):
+def find_min(arr):
n = len(arr)
s = sum(arr)
diff --git a/dynamic_programming/minimum_size_subarray_sum.py b/dynamic_programming/minimum_size_subarray_sum.py
new file mode 100644
index 000000000..3868d7353
--- /dev/null
+++ b/dynamic_programming/minimum_size_subarray_sum.py
@@ -0,0 +1,62 @@
+import sys
+
+
+def minimum_subarray_sum(target: int, numbers: list[int]) -> int:
+ """
+ Return the length of the shortest contiguous subarray in a list of numbers whose sum
+ is at least target. Reference: https://stackoverflow.com/questions/8269916
+
+ >>> minimum_subarray_sum(7, [2, 3, 1, 2, 4, 3])
+ 2
+ >>> minimum_subarray_sum(7, [2, 3, -1, 2, 4, -3])
+ 4
+ >>> minimum_subarray_sum(11, [1, 1, 1, 1, 1, 1, 1, 1])
+ 0
+ >>> minimum_subarray_sum(10, [1, 2, 3, 4, 5, 6, 7])
+ 2
+ >>> minimum_subarray_sum(5, [1, 1, 1, 1, 1, 5])
+ 1
+ >>> minimum_subarray_sum(0, [])
+ 0
+ >>> minimum_subarray_sum(0, [1, 2, 3])
+ 1
+ >>> minimum_subarray_sum(10, [10, 20, 30])
+ 1
+ >>> minimum_subarray_sum(7, [1, 1, 1, 1, 1, 1, 10])
+ 1
+ >>> minimum_subarray_sum(6, [])
+ 0
+ >>> minimum_subarray_sum(2, [1, 2, 3])
+ 1
+ >>> minimum_subarray_sum(-6, [])
+ 0
+ >>> minimum_subarray_sum(-6, [3, 4, 5])
+ 1
+ >>> minimum_subarray_sum(8, None)
+ 0
+ >>> minimum_subarray_sum(2, "ABC")
+ Traceback (most recent call last):
+ ...
+ ValueError: numbers must be an iterable of integers
+ """
+ if not numbers:
+ return 0
+ if target == 0 and target in numbers:
+ return 0
+ if not isinstance(numbers, (list, tuple)) or not all(
+ isinstance(number, int) for number in numbers
+ ):
+ raise ValueError("numbers must be an iterable of integers")
+
+ left = right = curr_sum = 0
+ min_len = sys.maxsize
+
+ while right < len(numbers):
+ curr_sum += numbers[right]
+ while curr_sum >= target and left <= right:
+ min_len = min(min_len, right - left + 1)
+ curr_sum -= numbers[left]
+ left += 1
+ right += 1
+
+ return 0 if min_len == sys.maxsize else min_len
diff --git a/dynamic_programming/minimum_squares_to_represent_a_number.py b/dynamic_programming/minimum_squares_to_represent_a_number.py
new file mode 100644
index 000000000..bf5849f5b
--- /dev/null
+++ b/dynamic_programming/minimum_squares_to_represent_a_number.py
@@ -0,0 +1,48 @@
+import math
+import sys
+
+
+def minimum_squares_to_represent_a_number(number: int) -> int:
+ """
+ Count the number of minimum squares to represent a number
+ >>> minimum_squares_to_represent_a_number(25)
+ 1
+ >>> minimum_squares_to_represent_a_number(37)
+ 2
+ >>> minimum_squares_to_represent_a_number(21)
+ 3
+ >>> minimum_squares_to_represent_a_number(58)
+ 2
+ >>> minimum_squares_to_represent_a_number(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: the value of input must not be a negative number
+ >>> minimum_squares_to_represent_a_number(0)
+ 1
+ >>> minimum_squares_to_represent_a_number(12.34)
+ Traceback (most recent call last):
+ ...
+ ValueError: the value of input must be a natural number
+ """
+ if number != int(number):
+ raise ValueError("the value of input must be a natural number")
+ if number < 0:
+ raise ValueError("the value of input must not be a negative number")
+ if number == 0:
+ return 1
+ answers = [-1] * (number + 1)
+ answers[0] = 0
+ for i in range(1, number + 1):
+ answer = sys.maxsize
+ root = int(math.sqrt(i))
+ for j in range(1, root + 1):
+ current_answer = 1 + answers[i - (j**2)]
+ answer = min(answer, current_answer)
+ answers[i] = answer
+ return answers[number]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/minimum_steps_to_one.py b/dynamic_programming/minimum_steps_to_one.py
index f4eb7033d..8785027fb 100644
--- a/dynamic_programming/minimum_steps_to_one.py
+++ b/dynamic_programming/minimum_steps_to_one.py
@@ -42,7 +42,8 @@ def min_steps_to_one(number: int) -> int:
"""
if number <= 0:
- raise ValueError(f"n must be greater than 0. Got n = {number}")
+ msg = f"n must be greater than 0. Got n = {number}"
+ raise ValueError(msg)
table = [number + 1] * (number + 1)
diff --git a/dynamic_programming/minimum_tickets_cost.py b/dynamic_programming/minimum_tickets_cost.py
new file mode 100644
index 000000000..6790c21f1
--- /dev/null
+++ b/dynamic_programming/minimum_tickets_cost.py
@@ -0,0 +1,129 @@
+"""
+Author : Alexander Pantyukhin
+Date : November 1, 2022
+
+Task:
+Given a list of days when you need to travel. Each day is integer from 1 to 365.
+You are able to use tickets for 1 day, 7 days and 30 days.
+Each ticket has a cost.
+
+Find the minimum cost you need to travel every day in the given list of days.
+
+Implementation notes:
+implementation Dynamic Programming up bottom approach.
+
+Runtime complexity: O(n)
+
+The implementation was tested on the
+leetcode: https://leetcode.com/problems/minimum-cost-for-tickets/
+
+
+Minimum Cost For Tickets
+Dynamic Programming: up -> down.
+"""
+
+import functools
+
+
+def mincost_tickets(days: list[int], costs: list[int]) -> int:
+ """
+ >>> mincost_tickets([1, 4, 6, 7, 8, 20], [2, 7, 15])
+ 11
+
+ >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 7, 15])
+ 17
+
+ >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150])
+ 24
+
+ >>> mincost_tickets([2], [2, 90, 150])
+ 2
+
+ >>> mincost_tickets([], [2, 90, 150])
+ 0
+
+ >>> mincost_tickets('hello', [2, 90, 150])
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter days should be a list of integers
+
+ >>> mincost_tickets([], 'world')
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter costs should be a list of three integers
+
+ >>> mincost_tickets([0.25, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150])
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter days should be a list of integers
+
+ >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 0.9, 150])
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter costs should be a list of three integers
+
+ >>> mincost_tickets([-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150])
+ Traceback (most recent call last):
+ ...
+ ValueError: All days elements should be greater than 0
+
+ >>> mincost_tickets([2, 367], [2, 90, 150])
+ Traceback (most recent call last):
+ ...
+ ValueError: All days elements should be less than 366
+
+ >>> mincost_tickets([2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [])
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter costs should be a list of three integers
+
+ >>> mincost_tickets([], [])
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter costs should be a list of three integers
+
+ >>> mincost_tickets([2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [1, 2, 3, 4])
+ Traceback (most recent call last):
+ ...
+ ValueError: The parameter costs should be a list of three integers
+ """
+
+ # Validation
+ if not isinstance(days, list) or not all(isinstance(day, int) for day in days):
+ raise ValueError("The parameter days should be a list of integers")
+
+ if len(costs) != 3 or not all(isinstance(cost, int) for cost in costs):
+ raise ValueError("The parameter costs should be a list of three integers")
+
+ if len(days) == 0:
+ return 0
+
+ if min(days) <= 0:
+ raise ValueError("All days elements should be greater than 0")
+
+ if max(days) >= 366:
+ raise ValueError("All days elements should be less than 366")
+
+ days_set = set(days)
+
+ @functools.cache
+ def dynamic_programming(index: int) -> int:
+ if index > 365:
+ return 0
+
+ if index not in days_set:
+ return dynamic_programming(index + 1)
+
+ return min(
+ costs[0] + dynamic_programming(index + 1),
+ costs[1] + dynamic_programming(index + 7),
+ costs[2] + dynamic_programming(index + 30),
+ )
+
+ return dynamic_programming(1)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/dynamic_programming/optimal_binary_search_tree.py b/dynamic_programming/optimal_binary_search_tree.py
index 0d94c1b61..b4f1181ac 100644
--- a/dynamic_programming/optimal_binary_search_tree.py
+++ b/dynamic_programming/optimal_binary_search_tree.py
@@ -104,7 +104,7 @@ def find_optimal_binary_search_tree(nodes):
dp = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
# sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes
# array
- sum = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
+ total = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
# stores tree roots that will be used later for constructing binary search tree
root = [[i if i == j else 0 for j in range(n)] for i in range(n)]
@@ -113,14 +113,14 @@ def find_optimal_binary_search_tree(nodes):
j = i + interval_length - 1
dp[i][j] = sys.maxsize # set the value to "infinity"
- sum[i][j] = sum[i][j - 1] + freqs[j]
+ total[i][j] = total[i][j - 1] + freqs[j]
# Apply Knuth's optimization
# Loop without optimization: for r in range(i, j + 1):
for r in range(root[i][j - 1], root[i + 1][j] + 1): # r is a temporal root
left = dp[i][r - 1] if r != i else 0 # optimal cost for left subtree
right = dp[r + 1][j] if r != j else 0 # optimal cost for right subtree
- cost = left + sum[i][j] + right
+ cost = left + total[i][j] + right
if dp[i][j] > cost:
dp[i][j] = cost
diff --git a/dynamic_programming/palindrome_partitioning.py b/dynamic_programming/palindrome_partitioning.py
new file mode 100644
index 000000000..c1629440e
--- /dev/null
+++ b/dynamic_programming/palindrome_partitioning.py
@@ -0,0 +1,39 @@
+"""
+Given a string s, partition s such that every substring of the
+partition is a palindrome.
+Find the minimum cuts needed for a palindrome partitioning of s.
+
+Time Complexity: O(n^2)
+Space Complexity: O(n^2)
+For other explanations refer to: https://www.youtube.com/watch?v=_H8V5hJUGd0
+"""
+
+
+def find_minimum_partitions(string: str) -> int:
+ """
+ Returns the minimum cuts needed for a palindrome partitioning of string
+
+ >>> find_minimum_partitions("aab")
+ 1
+ >>> find_minimum_partitions("aaa")
+ 0
+ >>> find_minimum_partitions("ababbbabbababa")
+ 3
+ """
+ length = len(string)
+ cut = [0] * length
+ is_palindromic = [[False for i in range(length)] for j in range(length)]
+ for i, c in enumerate(string):
+ mincut = i
+ for j in range(i + 1):
+ if c == string[j] and (i - j < 2 or is_palindromic[j + 1][i - 1]):
+ is_palindromic[j][i] = True
+ mincut = min(mincut, 0 if j == 0 else (cut[j - 1] + 1))
+ cut[i] = mincut
+ return cut[length - 1]
+
+
+if __name__ == "__main__":
+ s = input("Enter the string: ").strip()
+ ans = find_minimum_partitions(s)
+ print(f"Minimum number of partitions required for the '{s}' is {ans}")
diff --git a/dynamic_programming/rod_cutting.py b/dynamic_programming/rod_cutting.py
index 442a39cb1..f80fa440a 100644
--- a/dynamic_programming/rod_cutting.py
+++ b/dynamic_programming/rod_cutting.py
@@ -177,13 +177,15 @@ def _enforce_args(n: int, prices: list):
the rod
"""
if n < 0:
- raise ValueError(f"n must be greater than or equal to 0. Got n = {n}")
+ msg = f"n must be greater than or equal to 0. Got n = {n}"
+ raise ValueError(msg)
if n > len(prices):
- raise ValueError(
- f"Each integral piece of rod must have a corresponding "
- f"price. Got n = {n} but length of prices = {len(prices)}"
+ msg = (
+ "Each integral piece of rod must have a corresponding price. "
+ f"Got n = {n} but length of prices = {len(prices)}"
)
+ raise ValueError(msg)
def main():
diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py
index 4781b23b3..819fd8106 100644
--- a/dynamic_programming/subset_generation.py
+++ b/dynamic_programming/subset_generation.py
@@ -37,7 +37,8 @@ def print_combination(arr, n, r):
combination_util(arr, n, r, 0, data, 0)
-# Driver function to check for above function
-arr = [10, 20, 30, 40, 50]
-print_combination(arr, len(arr), 3)
-# This code is contributed by Ambuj sahu
+if __name__ == "__main__":
+ # Driver code to check the function above
+ arr = [10, 20, 30, 40, 50]
+ print_combination(arr, len(arr), 3)
+ # This code is contributed by Ambuj sahu
diff --git a/dynamic_programming/sum_of_subset.py b/dynamic_programming/sum_of_subset.py
index a12177b57..96ebcf583 100644
--- a/dynamic_programming/sum_of_subset.py
+++ b/dynamic_programming/sum_of_subset.py
@@ -1,34 +1,32 @@
-def isSumSubset(arr, arrLen, requiredSum):
+def is_sum_subset(arr: list[int], required_sum: int) -> bool:
"""
- >>> isSumSubset([2, 4, 6, 8], 4, 5)
+ >>> is_sum_subset([2, 4, 6, 8], 5)
False
- >>> isSumSubset([2, 4, 6, 8], 4, 14)
+ >>> is_sum_subset([2, 4, 6, 8], 14)
True
"""
# a subset value says 1 if that subset sum can be formed else 0
# initially no subsets can be formed hence False/0
- subset = [[False for i in range(requiredSum + 1)] for i in range(arrLen + 1)]
+ arr_len = len(arr)
+ subset = [[False] * (required_sum + 1) for _ in range(arr_len + 1)]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
- for i in range(arrLen + 1):
+ for i in range(arr_len + 1):
subset[i][0] = True
# sum is not zero and set is empty then false
- for i in range(1, requiredSum + 1):
+ for i in range(1, required_sum + 1):
subset[0][i] = False
- for i in range(1, arrLen + 1):
- for j in range(1, requiredSum + 1):
+ for i in range(1, arr_len + 1):
+ for j in range(1, required_sum + 1):
if arr[i - 1] > j:
subset[i][j] = subset[i - 1][j]
if arr[i - 1] <= j:
subset[i][j] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
- # uncomment to print the subset
- # for i in range(arrLen+1):
- # print(subset[i])
- print(subset[arrLen][requiredSum])
+ return subset[arr_len][required_sum]
if __name__ == "__main__":
diff --git a/dynamic_programming/viterbi.py b/dynamic_programming/viterbi.py
new file mode 100644
index 000000000..764d45dc2
--- /dev/null
+++ b/dynamic_programming/viterbi.py
@@ -0,0 +1,403 @@
+from typing import Any
+
+
+def viterbi(
+ observations_space: list,
+ states_space: list,
+ initial_probabilities: dict,
+ transition_probabilities: dict,
+ emission_probabilities: dict,
+) -> list:
+ """
+ Viterbi Algorithm, to find the most likely path of
+ states from the start and the expected output.
+ https://en.wikipedia.org/wiki/Viterbi_algorithm
+ sdafads
+ Wikipedia example
+ >>> observations = ["normal", "cold", "dizzy"]
+ >>> states = ["Healthy", "Fever"]
+ >>> start_p = {"Healthy": 0.6, "Fever": 0.4}
+ >>> trans_p = {
+ ... "Healthy": {"Healthy": 0.7, "Fever": 0.3},
+ ... "Fever": {"Healthy": 0.4, "Fever": 0.6},
+ ... }
+ >>> emit_p = {
+ ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1},
+ ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6},
+ ... }
+ >>> viterbi(observations, states, start_p, trans_p, emit_p)
+ ['Healthy', 'Healthy', 'Fever']
+
+ >>> viterbi((), states, start_p, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+
+ >>> viterbi(observations, (), start_p, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+
+ >>> viterbi(observations, states, {}, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+
+ >>> viterbi(observations, states, start_p, {}, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+
+ >>> viterbi(observations, states, start_p, trans_p, {})
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+
+ >>> viterbi("invalid", states, start_p, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: observations_space must be a list
+
+ >>> viterbi(["valid", 123], states, start_p, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: observations_space must be a list of strings
+
+ >>> viterbi(observations, "invalid", start_p, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: states_space must be a list
+
+ >>> viterbi(observations, ["valid", 123], start_p, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: states_space must be a list of strings
+
+ >>> viterbi(observations, states, "invalid", trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: initial_probabilities must be a dict
+
+ >>> viterbi(observations, states, {2:2}, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: initial_probabilities all keys must be strings
+
+ >>> viterbi(observations, states, {"a":2}, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: initial_probabilities all values must be float
+
+ >>> viterbi(observations, states, start_p, "invalid", emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: transition_probabilities must be a dict
+
+ >>> viterbi(observations, states, start_p, {"a":2}, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: transition_probabilities all values must be dict
+
+ >>> viterbi(observations, states, start_p, {2:{2:2}}, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: transition_probabilities all keys must be strings
+
+ >>> viterbi(observations, states, start_p, {"a":{2:2}}, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: transition_probabilities all keys must be strings
+
+ >>> viterbi(observations, states, start_p, {"a":{"b":2}}, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: transition_probabilities nested dictionary all values must be float
+
+ >>> viterbi(observations, states, start_p, trans_p, "invalid")
+ Traceback (most recent call last):
+ ...
+ ValueError: emission_probabilities must be a dict
+
+ >>> viterbi(observations, states, start_p, trans_p, None)
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+
+ """
+ _validation(
+ observations_space,
+ states_space,
+ initial_probabilities,
+ transition_probabilities,
+ emission_probabilities,
+ )
+ # Creates data structures and fill initial step
+ probabilities: dict = {}
+ pointers: dict = {}
+ for state in states_space:
+ observation = observations_space[0]
+ probabilities[(state, observation)] = (
+ initial_probabilities[state] * emission_probabilities[state][observation]
+ )
+ pointers[(state, observation)] = None
+
+ # Fills the data structure with the probabilities of
+ # different transitions and pointers to previous states
+ for o in range(1, len(observations_space)):
+ observation = observations_space[o]
+ prior_observation = observations_space[o - 1]
+ for state in states_space:
+ # Calculates the argmax for probability function
+ arg_max = ""
+ max_probability = -1
+ for k_state in states_space:
+ probability = (
+ probabilities[(k_state, prior_observation)]
+ * transition_probabilities[k_state][state]
+ * emission_probabilities[state][observation]
+ )
+ if probability > max_probability:
+ max_probability = probability
+ arg_max = k_state
+
+ # Update probabilities and pointers dicts
+ probabilities[(state, observation)] = (
+ probabilities[(arg_max, prior_observation)]
+ * transition_probabilities[arg_max][state]
+ * emission_probabilities[state][observation]
+ )
+
+ pointers[(state, observation)] = arg_max
+
+ # The final observation
+ final_observation = observations_space[len(observations_space) - 1]
+
+ # argmax for given final observation
+ arg_max = ""
+ max_probability = -1
+ for k_state in states_space:
+ probability = probabilities[(k_state, final_observation)]
+ if probability > max_probability:
+ max_probability = probability
+ arg_max = k_state
+ last_state = arg_max
+
+ # Process pointers backwards
+ previous = last_state
+ result = []
+ for o in range(len(observations_space) - 1, -1, -1):
+ result.append(previous)
+ previous = pointers[previous, observations_space[o]]
+ result.reverse()
+
+ return result
+
+
+def _validation(
+ observations_space: Any,
+ states_space: Any,
+ initial_probabilities: Any,
+ transition_probabilities: Any,
+ emission_probabilities: Any,
+) -> None:
+ """
+ >>> observations = ["normal", "cold", "dizzy"]
+ >>> states = ["Healthy", "Fever"]
+ >>> start_p = {"Healthy": 0.6, "Fever": 0.4}
+ >>> trans_p = {
+ ... "Healthy": {"Healthy": 0.7, "Fever": 0.3},
+ ... "Fever": {"Healthy": 0.4, "Fever": 0.6},
+ ... }
+ >>> emit_p = {
+ ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1},
+ ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6},
+ ... }
+ >>> _validation(observations, states, start_p, trans_p, emit_p)
+
+ >>> _validation([], states, start_p, trans_p, emit_p)
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+ """
+ _validate_not_empty(
+ observations_space,
+ states_space,
+ initial_probabilities,
+ transition_probabilities,
+ emission_probabilities,
+ )
+ _validate_lists(observations_space, states_space)
+ _validate_dicts(
+ initial_probabilities, transition_probabilities, emission_probabilities
+ )
+
+
+def _validate_not_empty(
+ observations_space: Any,
+ states_space: Any,
+ initial_probabilities: Any,
+ transition_probabilities: Any,
+ emission_probabilities: Any,
+) -> None:
+ """
+ >>> _validate_not_empty(["a"], ["b"], {"c":0.5},
+ ... {"d": {"e": 0.6}}, {"f": {"g": 0.7}})
+
+ >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, {}, {"f": {"g": 0.7}})
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+ >>> _validate_not_empty(["a"], ["b"], None, {"d": {"e": 0.6}}, {"f": {"g": 0.7}})
+ Traceback (most recent call last):
+ ...
+ ValueError: There's an empty parameter
+ """
+ if not all(
+ [
+ observations_space,
+ states_space,
+ initial_probabilities,
+ transition_probabilities,
+ emission_probabilities,
+ ]
+ ):
+ raise ValueError("There's an empty parameter")
+
+
+def _validate_lists(observations_space: Any, states_space: Any) -> None:
+ """
+ >>> _validate_lists(["a"], ["b"])
+
+ >>> _validate_lists(1234, ["b"])
+ Traceback (most recent call last):
+ ...
+ ValueError: observations_space must be a list
+
+ >>> _validate_lists(["a"], [3])
+ Traceback (most recent call last):
+ ...
+ ValueError: states_space must be a list of strings
+ """
+ _validate_list(observations_space, "observations_space")
+ _validate_list(states_space, "states_space")
+
+
+def _validate_list(_object: Any, var_name: str) -> None:
+ """
+ >>> _validate_list(["a"], "mock_name")
+
+ >>> _validate_list("a", "mock_name")
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name must be a list
+ >>> _validate_list([0.5], "mock_name")
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name must be a list of strings
+
+ """
+ if not isinstance(_object, list):
+ msg = f"{var_name} must be a list"
+ raise ValueError(msg)
+ else:
+ for x in _object:
+ if not isinstance(x, str):
+ msg = f"{var_name} must be a list of strings"
+ raise ValueError(msg)
+
+
+def _validate_dicts(
+ initial_probabilities: Any,
+ transition_probabilities: Any,
+ emission_probabilities: Any,
+) -> None:
+ """
+ >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": 0.7}})
+
+ >>> _validate_dicts("invalid", {"d": {"e": 0.6}}, {"f": {"g": 0.7}})
+ Traceback (most recent call last):
+ ...
+ ValueError: initial_probabilities must be a dict
+ >>> _validate_dicts({"c":0.5}, {2: {"e": 0.6}}, {"f": {"g": 0.7}})
+ Traceback (most recent call last):
+ ...
+ ValueError: transition_probabilities all keys must be strings
+ >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {2: 0.7}})
+ Traceback (most recent call last):
+ ...
+ ValueError: emission_probabilities all keys must be strings
+ >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": "h"}})
+ Traceback (most recent call last):
+ ...
+ ValueError: emission_probabilities nested dictionary all values must be float
+ """
+ _validate_dict(initial_probabilities, "initial_probabilities", float)
+ _validate_nested_dict(transition_probabilities, "transition_probabilities")
+ _validate_nested_dict(emission_probabilities, "emission_probabilities")
+
+
+def _validate_nested_dict(_object: Any, var_name: str) -> None:
+ """
+ >>> _validate_nested_dict({"a":{"b": 0.5}}, "mock_name")
+
+ >>> _validate_nested_dict("invalid", "mock_name")
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name must be a dict
+ >>> _validate_nested_dict({"a": 8}, "mock_name")
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name all values must be dict
+ >>> _validate_nested_dict({"a":{2: 0.5}}, "mock_name")
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name all keys must be strings
+ >>> _validate_nested_dict({"a":{"b": 4}}, "mock_name")
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name nested dictionary all values must be float
+ """
+ _validate_dict(_object, var_name, dict)
+ for x in _object.values():
+ _validate_dict(x, var_name, float, True)
+
+
+def _validate_dict(
+ _object: Any, var_name: str, value_type: type, nested: bool = False
+) -> None:
+ """
+ >>> _validate_dict({"b": 0.5}, "mock_name", float)
+
+ >>> _validate_dict("invalid", "mock_name", float)
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name must be a dict
+ >>> _validate_dict({"a": 8}, "mock_name", dict)
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name all values must be dict
+ >>> _validate_dict({2: 0.5}, "mock_name",float, True)
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name all keys must be strings
+ >>> _validate_dict({"b": 4}, "mock_name", float,True)
+ Traceback (most recent call last):
+ ...
+ ValueError: mock_name nested dictionary all values must be float
+ """
+ if not isinstance(_object, dict):
+ msg = f"{var_name} must be a dict"
+ raise ValueError(msg)
+ if not all(isinstance(x, str) for x in _object):
+ msg = f"{var_name} all keys must be strings"
+ raise ValueError(msg)
+ if not all(isinstance(x, value_type) for x in _object.values()):
+ nested_text = "nested dictionary " if nested else ""
+ msg = f"{var_name} {nested_text}all values must be {value_type.__name__}"
+ raise ValueError(msg)
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/dynamic_programming/word_break.py b/dynamic_programming/word_break.py
new file mode 100644
index 000000000..4d7ac8690
--- /dev/null
+++ b/dynamic_programming/word_break.py
@@ -0,0 +1,111 @@
+"""
+Author : Alexander Pantyukhin
+Date : December 12, 2022
+
+Task:
+Given a string and a list of words, return true if the string can be
+segmented into a space-separated sequence of one or more words.
+
+Note that the same word may be reused
+multiple times in the segmentation.
+
+Implementation notes: Trie + Dynamic programming up -> down.
+The Trie will be used to store the words. It will be useful for scanning
+available words for the current position in the string.
+
+Leetcode:
+https://leetcode.com/problems/word-break/description/
+
+Runtime: O(n * n)
+Space: O(n)
+"""
+
+import functools
+from typing import Any
+
+
+def word_break(string: str, words: list[str]) -> bool:
+ """
+ Return True if numbers have opposite signs False otherwise.
+
+ >>> word_break("applepenapple", ["apple","pen"])
+ True
+ >>> word_break("catsandog", ["cats","dog","sand","and","cat"])
+ False
+ >>> word_break("cars", ["car","ca","rs"])
+ True
+ >>> word_break('abc', [])
+ False
+ >>> word_break(123, ['a'])
+ Traceback (most recent call last):
+ ...
+ ValueError: the string should be not empty string
+ >>> word_break('', ['a'])
+ Traceback (most recent call last):
+ ...
+ ValueError: the string should be not empty string
+ >>> word_break('abc', [123])
+ Traceback (most recent call last):
+ ...
+ ValueError: the words should be a list of non-empty strings
+ >>> word_break('abc', [''])
+ Traceback (most recent call last):
+ ...
+ ValueError: the words should be a list of non-empty strings
+ """
+
+ # Validation
+ if not isinstance(string, str) or len(string) == 0:
+ raise ValueError("the string should be not empty string")
+
+ if not isinstance(words, list) or not all(
+ isinstance(item, str) and len(item) > 0 for item in words
+ ):
+ raise ValueError("the words should be a list of non-empty strings")
+
+ # Build trie
+ trie: dict[str, Any] = {}
+ word_keeper_key = "WORD_KEEPER"
+
+ for word in words:
+ trie_node = trie
+ for c in word:
+ if c not in trie_node:
+ trie_node[c] = {}
+
+ trie_node = trie_node[c]
+
+ trie_node[word_keeper_key] = True
+
+ len_string = len(string)
+
+ # Dynamic programming method
+ @functools.cache
+ def is_breakable(index: int) -> bool:
+ """
+ >>> string = 'a'
+ >>> is_breakable(1)
+ True
+ """
+ if index == len_string:
+ return True
+
+ trie_node = trie
+ for i in range(index, len_string):
+ trie_node = trie_node.get(string[i], None)
+
+ if trie_node is None:
+ return False
+
+ if trie_node.get(word_keeper_key, False) and is_breakable(i + 1):
+ return True
+
+ return False
+
+ return is_breakable(0)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/apparent_power.py b/electronics/apparent_power.py
new file mode 100644
index 000000000..0ce1c2aa9
--- /dev/null
+++ b/electronics/apparent_power.py
@@ -0,0 +1,37 @@
+import cmath
+import math
+
+
+def apparent_power(
+ voltage: float, current: float, voltage_angle: float, current_angle: float
+) -> complex:
+ """
+ Calculate the apparent power in a single-phase AC circuit.
+
+ Reference: https://en.wikipedia.org/wiki/AC_power#Apparent_power
+
+ >>> apparent_power(100, 5, 0, 0)
+ (500+0j)
+ >>> apparent_power(100, 5, 90, 0)
+ (3.061616997868383e-14+500j)
+ >>> apparent_power(100, 5, -45, -60)
+ (-129.40952255126027-482.9629131445341j)
+ >>> apparent_power(200, 10, -30, -90)
+ (-999.9999999999998-1732.0508075688776j)
+ """
+ # Convert angles from degrees to radians
+ voltage_angle_rad = math.radians(voltage_angle)
+ current_angle_rad = math.radians(current_angle)
+
+ # Convert voltage and current to rectangular form
+ voltage_rect = cmath.rect(voltage, voltage_angle_rad)
+ current_rect = cmath.rect(current, current_angle_rad)
+
+ # Calculate apparent power
+ return voltage_rect * current_rect
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/builtin_voltage.py b/electronics/builtin_voltage.py
new file mode 100644
index 000000000..38fde4524
--- /dev/null
+++ b/electronics/builtin_voltage.py
@@ -0,0 +1,67 @@
+from math import log
+
+from scipy.constants import Boltzmann, physical_constants
+
+T = 300 # TEMPERATURE (unit = K)
+
+
+def builtin_voltage(
+ donor_conc: float, # donor concentration
+ acceptor_conc: float, # acceptor concentration
+ intrinsic_conc: float, # intrinsic concentration
+) -> float:
+ """
+ This function can calculate the Builtin Voltage of a pn junction diode.
+ This is calculated from the given three values.
+ Examples -
+ >>> builtin_voltage(donor_conc=1e17, acceptor_conc=1e17, intrinsic_conc=1e10)
+ 0.833370010652644
+ >>> builtin_voltage(donor_conc=0, acceptor_conc=1600, intrinsic_conc=200)
+ Traceback (most recent call last):
+ ...
+ ValueError: Donor concentration should be positive
+ >>> builtin_voltage(donor_conc=1000, acceptor_conc=0, intrinsic_conc=1200)
+ Traceback (most recent call last):
+ ...
+ ValueError: Acceptor concentration should be positive
+ >>> builtin_voltage(donor_conc=1000, acceptor_conc=1000, intrinsic_conc=0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Intrinsic concentration should be positive
+ >>> builtin_voltage(donor_conc=1000, acceptor_conc=3000, intrinsic_conc=2000)
+ Traceback (most recent call last):
+ ...
+ ValueError: Donor concentration should be greater than intrinsic concentration
+ >>> builtin_voltage(donor_conc=3000, acceptor_conc=1000, intrinsic_conc=2000)
+ Traceback (most recent call last):
+ ...
+ ValueError: Acceptor concentration should be greater than intrinsic concentration
+ """
+
+ if donor_conc <= 0:
+ raise ValueError("Donor concentration should be positive")
+ elif acceptor_conc <= 0:
+ raise ValueError("Acceptor concentration should be positive")
+ elif intrinsic_conc <= 0:
+ raise ValueError("Intrinsic concentration should be positive")
+ elif donor_conc <= intrinsic_conc:
+ raise ValueError(
+ "Donor concentration should be greater than intrinsic concentration"
+ )
+ elif acceptor_conc <= intrinsic_conc:
+ raise ValueError(
+ "Acceptor concentration should be greater than intrinsic concentration"
+ )
+ else:
+ return (
+ Boltzmann
+ * T
+ * log((donor_conc * acceptor_conc) / intrinsic_conc**2)
+ / physical_constants["electron volt"][0]
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/carrier_concentration.py b/electronics/carrier_concentration.py
new file mode 100644
index 000000000..1fb9f2430
--- /dev/null
+++ b/electronics/carrier_concentration.py
@@ -0,0 +1,75 @@
+# https://en.wikipedia.org/wiki/Charge_carrier_density
+# https://www.pveducation.org/pvcdrom/pn-junctions/equilibrium-carrier-concentration
+# http://www.ece.utep.edu/courses/ee3329/ee3329/Studyguide/ToC/Fundamentals/Carriers/concentrations.html
+
+from __future__ import annotations
+
+
+def carrier_concentration(
+ electron_conc: float,
+ hole_conc: float,
+ intrinsic_conc: float,
+) -> tuple:
+ """
+ This function can calculate any one of the three -
+ 1. Electron Concentration
+ 2, Hole Concentration
+ 3. Intrinsic Concentration
+ given the other two.
+ Examples -
+ >>> carrier_concentration(electron_conc=25, hole_conc=100, intrinsic_conc=0)
+ ('intrinsic_conc', 50.0)
+ >>> carrier_concentration(electron_conc=0, hole_conc=1600, intrinsic_conc=200)
+ ('electron_conc', 25.0)
+ >>> carrier_concentration(electron_conc=1000, hole_conc=0, intrinsic_conc=1200)
+ ('hole_conc', 1440.0)
+ >>> carrier_concentration(electron_conc=1000, hole_conc=400, intrinsic_conc=1200)
+ Traceback (most recent call last):
+ ...
+ ValueError: You cannot supply more or less than 2 values
+ >>> carrier_concentration(electron_conc=-1000, hole_conc=0, intrinsic_conc=1200)
+ Traceback (most recent call last):
+ ...
+ ValueError: Electron concentration cannot be negative in a semiconductor
+ >>> carrier_concentration(electron_conc=0, hole_conc=-400, intrinsic_conc=1200)
+ Traceback (most recent call last):
+ ...
+ ValueError: Hole concentration cannot be negative in a semiconductor
+ >>> carrier_concentration(electron_conc=0, hole_conc=400, intrinsic_conc=-1200)
+ Traceback (most recent call last):
+ ...
+ ValueError: Intrinsic concentration cannot be negative in a semiconductor
+ """
+ if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
+ raise ValueError("You cannot supply more or less than 2 values")
+ elif electron_conc < 0:
+ raise ValueError("Electron concentration cannot be negative in a semiconductor")
+ elif hole_conc < 0:
+ raise ValueError("Hole concentration cannot be negative in a semiconductor")
+ elif intrinsic_conc < 0:
+ raise ValueError(
+ "Intrinsic concentration cannot be negative in a semiconductor"
+ )
+ elif electron_conc == 0:
+ return (
+ "electron_conc",
+ intrinsic_conc**2 / hole_conc,
+ )
+ elif hole_conc == 0:
+ return (
+ "hole_conc",
+ intrinsic_conc**2 / electron_conc,
+ )
+ elif intrinsic_conc == 0:
+ return (
+ "intrinsic_conc",
+ (electron_conc * hole_conc) ** 0.5,
+ )
+ else:
+ return (-1, -1)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py
new file mode 100644
index 000000000..f2e35742e
--- /dev/null
+++ b/electronics/circular_convolution.py
@@ -0,0 +1,99 @@
+# https://en.wikipedia.org/wiki/Circular_convolution
+
+"""
+Circular convolution, also known as cyclic convolution,
+is a special case of periodic convolution, which is the convolution of two
+periodic functions that have the same period. Periodic convolution arises,
+for example, in the context of the discrete-time Fourier transform (DTFT).
+In particular, the DTFT of the product of two discrete sequences is the periodic
+convolution of the DTFTs of the individual sequences. And each DTFT is a periodic
+summation of a continuous Fourier transform function.
+
+Source: https://en.wikipedia.org/wiki/Circular_convolution
+"""
+
+import doctest
+from collections import deque
+
+import numpy as np
+
+
+class CircularConvolution:
+ """
+ This class stores the first and second signal and performs the circular convolution
+ """
+
+ def __init__(self) -> None:
+ """
+ First signal and second signal are stored as 1-D array
+ """
+
+ self.first_signal = [2, 1, 2, -1]
+ self.second_signal = [1, 2, 3, 4]
+
+ def circular_convolution(self) -> list[float]:
+ """
+ This function performs the circular convolution of the first and second signal
+ using matrix method
+
+ Usage:
+ >>> import circular_convolution as cc
+ >>> convolution = cc.CircularConvolution()
+ >>> convolution.circular_convolution()
+ [10, 10, 6, 14]
+
+ >>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6]
+ >>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5]
+ >>> convolution.circular_convolution()
+ [5.2, 6.0, 6.48, 6.64, 6.48, 6.0, 5.2, 4.08]
+
+ >>> convolution.first_signal = [-1, 1, 2, -2]
+ >>> convolution.second_signal = [0.5, 1, -1, 2, 0.75]
+ >>> convolution.circular_convolution()
+ [6.25, -3.0, 1.5, -2.0, -2.75]
+
+ >>> convolution.first_signal = [1, -1, 2, 3, -1]
+ >>> convolution.second_signal = [1, 2, 3]
+ >>> convolution.circular_convolution()
+ [8, -2, 3, 4, 11]
+
+ """
+
+ length_first_signal = len(self.first_signal)
+ length_second_signal = len(self.second_signal)
+
+ max_length = max(length_first_signal, length_second_signal)
+
+ # create a zero matrix of max_length x max_length
+ matrix = [[0] * max_length for i in range(max_length)]
+
+ # fills the smaller signal with zeros to make both signals of same length
+ if length_first_signal < length_second_signal:
+ self.first_signal += [0] * (max_length - length_first_signal)
+ elif length_first_signal > length_second_signal:
+ self.second_signal += [0] * (max_length - length_second_signal)
+
+ """
+ Fills the matrix in the following way assuming 'x' is the signal of length 4
+ [
+ [x[0], x[3], x[2], x[1]],
+ [x[1], x[0], x[3], x[2]],
+ [x[2], x[1], x[0], x[3]],
+ [x[3], x[2], x[1], x[0]]
+ ]
+ """
+ for i in range(max_length):
+ rotated_signal = deque(self.second_signal)
+ rotated_signal.rotate(i)
+ for j, item in enumerate(rotated_signal):
+ matrix[i][j] += item
+
+ # multiply the matrix with the first signal
+ final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal))
+
+ # rounding-off to two decimal places
+ return [round(i, 2) for i in final_signal]
+
+
+if __name__ == "__main__":
+ doctest.testmod()
diff --git a/electronics/coulombs_law.py b/electronics/coulombs_law.py
new file mode 100644
index 000000000..18c1a8179
--- /dev/null
+++ b/electronics/coulombs_law.py
@@ -0,0 +1,85 @@
+# https://en.wikipedia.org/wiki/Coulomb%27s_law
+
+from __future__ import annotations
+
+COULOMBS_CONSTANT = 8.988e9 # units = N * m^s * C^-2
+
+
+def couloumbs_law(
+ force: float, charge1: float, charge2: float, distance: float
+) -> dict[str, float]:
+ """
+ Apply Coulomb's Law on any three given values. These can be force, charge1,
+ charge2, or distance, and then in a Python dict return name/value pair of
+ the zero value.
+
+ Coulomb's Law states that the magnitude of the electrostatic force of
+ attraction or repulsion between two point charges is directly proportional
+ to the product of the magnitudes of charges and inversely proportional to
+ the square of the distance between them.
+
+ Reference
+ ----------
+ Coulomb (1785) "Premier mémoire sur l’électricité et le magnétisme,"
+ Histoire de l’Académie Royale des Sciences, pp. 569–577.
+
+ Parameters
+ ----------
+ force : float with units in Newtons
+
+ charge1 : float with units in Coulombs
+
+ charge2 : float with units in Coulombs
+
+ distance : float with units in meters
+
+ Returns
+ -------
+ result : dict name/value pair of the zero value
+
+ >>> couloumbs_law(force=0, charge1=3, charge2=5, distance=2000)
+ {'force': 33705.0}
+
+ >>> couloumbs_law(force=10, charge1=3, charge2=5, distance=0)
+ {'distance': 116112.01488218177}
+
+ >>> couloumbs_law(force=10, charge1=0, charge2=5, distance=2000)
+ {'charge1': 0.0008900756564307966}
+
+ >>> couloumbs_law(force=0, charge1=0, charge2=5, distance=2000)
+ Traceback (most recent call last):
+ ...
+ ValueError: One and only one argument must be 0
+
+ >>> couloumbs_law(force=0, charge1=3, charge2=5, distance=-2000)
+ Traceback (most recent call last):
+ ...
+ ValueError: Distance cannot be negative
+
+ """
+
+ charge_product = abs(charge1 * charge2)
+
+ if (force, charge1, charge2, distance).count(0) != 1:
+ raise ValueError("One and only one argument must be 0")
+ if distance < 0:
+ raise ValueError("Distance cannot be negative")
+ if force == 0:
+ force = COULOMBS_CONSTANT * charge_product / (distance**2)
+ return {"force": force}
+ elif charge1 == 0:
+ charge1 = abs(force) * (distance**2) / (COULOMBS_CONSTANT * charge2)
+ return {"charge1": charge1}
+ elif charge2 == 0:
+ charge2 = abs(force) * (distance**2) / (COULOMBS_CONSTANT * charge1)
+ return {"charge2": charge2}
+ elif distance == 0:
+ distance = (COULOMBS_CONSTANT * charge_product / abs(force)) ** 0.5
+ return {"distance": distance}
+ raise ValueError("Exactly one argument must be 0")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/electric_conductivity.py b/electronics/electric_conductivity.py
new file mode 100644
index 000000000..11f2a607d
--- /dev/null
+++ b/electronics/electric_conductivity.py
@@ -0,0 +1,53 @@
+from __future__ import annotations
+
+ELECTRON_CHARGE = 1.6021e-19 # units = C
+
+
+def electric_conductivity(
+ conductivity: float,
+ electron_conc: float,
+ mobility: float,
+) -> tuple[str, float]:
+ """
+ This function can calculate any one of the three -
+ 1. Conductivity
+ 2. Electron Concentration
+ 3. Electron Mobility
+ This is calculated from the other two provided values
+ Examples -
+ >>> electric_conductivity(conductivity=25, electron_conc=100, mobility=0)
+ ('mobility', 1.5604519068722301e+18)
+ >>> electric_conductivity(conductivity=0, electron_conc=1600, mobility=200)
+ ('conductivity', 5.12672e-14)
+ >>> electric_conductivity(conductivity=1000, electron_conc=0, mobility=1200)
+ ('electron_conc', 5.201506356240767e+18)
+ """
+ if (conductivity, electron_conc, mobility).count(0) != 1:
+ raise ValueError("You cannot supply more or less than 2 values")
+ elif conductivity < 0:
+ raise ValueError("Conductivity cannot be negative")
+ elif electron_conc < 0:
+ raise ValueError("Electron concentration cannot be negative")
+ elif mobility < 0:
+ raise ValueError("mobility cannot be negative")
+ elif conductivity == 0:
+ return (
+ "conductivity",
+ mobility * electron_conc * ELECTRON_CHARGE,
+ )
+ elif electron_conc == 0:
+ return (
+ "electron_conc",
+ conductivity / (mobility * ELECTRON_CHARGE),
+ )
+ else:
+ return (
+ "mobility",
+ conductivity / (electron_conc * ELECTRON_CHARGE),
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/electric_power.py b/electronics/electric_power.py
index e4e685bbd..e59795601 100644
--- a/electronics/electric_power.py
+++ b/electronics/electric_power.py
@@ -1,9 +1,10 @@
# https://en.m.wikipedia.org/wiki/Electric_power
+from __future__ import annotations
+
from collections import namedtuple
-from typing import Tuple
-def electric_power(voltage: float, current: float, power: float) -> Tuple:
+def electric_power(voltage: float, current: float, power: float) -> tuple:
"""
This function can calculate any one of the three (voltage, current, power),
fundamental value of electrical system.
@@ -16,15 +17,15 @@ def electric_power(voltage: float, current: float, power: float) -> Tuple:
result(name='power', value=6.0)
>>> electric_power(voltage=2, current=4, power=2)
Traceback (most recent call last):
- File "", line 15, in
+ ...
ValueError: Only one argument must be 0
>>> electric_power(voltage=0, current=0, power=2)
Traceback (most recent call last):
- File "", line 19, in
+ ...
ValueError: Only one argument must be 0
>>> electric_power(voltage=0, current=2, power=-4)
Traceback (most recent call last):
- File "", line 23, in >> electric_power(voltage=2.2, current=2.2, power=0)
result(name='power', value=4.84)
diff --git a/electronics/electrical_impedance.py b/electronics/electrical_impedance.py
new file mode 100644
index 000000000..44041ff79
--- /dev/null
+++ b/electronics/electrical_impedance.py
@@ -0,0 +1,46 @@
+"""Electrical impedance is the measure of the opposition that a
+circuit presents to a current when a voltage is applied.
+Impedance extends the concept of resistance to alternating current (AC) circuits.
+Source: https://en.wikipedia.org/wiki/Electrical_impedance
+"""
+
+from __future__ import annotations
+
+from math import pow, sqrt
+
+
+def electrical_impedance(
+ resistance: float, reactance: float, impedance: float
+) -> dict[str, float]:
+ """
+ Apply Electrical Impedance formula, on any two given electrical values,
+ which can be resistance, reactance, and impedance, and then in a Python dict
+ return name/value pair of the zero value.
+
+ >>> electrical_impedance(3,4,0)
+ {'impedance': 5.0}
+ >>> electrical_impedance(0,4,5)
+ {'resistance': 3.0}
+ >>> electrical_impedance(3,0,5)
+ {'reactance': 4.0}
+ >>> electrical_impedance(3,4,5)
+ Traceback (most recent call last):
+ ...
+ ValueError: One and only one argument must be 0
+ """
+ if (resistance, reactance, impedance).count(0) != 1:
+ raise ValueError("One and only one argument must be 0")
+ if resistance == 0:
+ return {"resistance": sqrt(pow(impedance, 2) - pow(reactance, 2))}
+ elif reactance == 0:
+ return {"reactance": sqrt(pow(impedance, 2) - pow(resistance, 2))}
+ elif impedance == 0:
+ return {"impedance": sqrt(pow(resistance, 2) + pow(reactance, 2))}
+ else:
+ raise ValueError("Exactly one argument must be 0")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/ind_reactance.py b/electronics/ind_reactance.py
new file mode 100644
index 000000000..3f77ef628
--- /dev/null
+++ b/electronics/ind_reactance.py
@@ -0,0 +1,69 @@
+# https://en.wikipedia.org/wiki/Electrical_reactance#Inductive_reactance
+from __future__ import annotations
+
+from math import pi
+
+
+def ind_reactance(
+ inductance: float, frequency: float, reactance: float
+) -> dict[str, float]:
+ """
+ Calculate inductive reactance, frequency or inductance from two given electrical
+ properties then return name/value pair of the zero value in a Python dict.
+
+ Parameters
+ ----------
+ inductance : float with units in Henries
+
+ frequency : float with units in Hertz
+
+ reactance : float with units in Ohms
+
+ >>> ind_reactance(-35e-6, 1e3, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Inductance cannot be negative
+
+ >>> ind_reactance(35e-6, -1e3, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Frequency cannot be negative
+
+ >>> ind_reactance(35e-6, 0, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Inductive reactance cannot be negative
+
+ >>> ind_reactance(0, 10e3, 50)
+ {'inductance': 0.0007957747154594767}
+
+ >>> ind_reactance(35e-3, 0, 50)
+ {'frequency': 227.36420441699332}
+
+ >>> ind_reactance(35e-6, 1e3, 0)
+ {'reactance': 0.2199114857512855}
+
+ """
+
+ if (inductance, frequency, reactance).count(0) != 1:
+ raise ValueError("One and only one argument must be 0")
+ if inductance < 0:
+ raise ValueError("Inductance cannot be negative")
+ if frequency < 0:
+ raise ValueError("Frequency cannot be negative")
+ if reactance < 0:
+ raise ValueError("Inductive reactance cannot be negative")
+ if inductance == 0:
+ return {"inductance": reactance / (2 * pi * frequency)}
+ elif frequency == 0:
+ return {"frequency": reactance / (2 * pi * inductance)}
+ elif reactance == 0:
+ return {"reactance": 2 * pi * frequency * inductance}
+ else:
+ raise ValueError("Exactly one argument must be 0")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/ohms_law.py b/electronics/ohms_law.py
index 41bffa9f8..66e737c1f 100644
--- a/electronics/ohms_law.py
+++ b/electronics/ohms_law.py
@@ -1,8 +1,8 @@
# https://en.wikipedia.org/wiki/Ohm%27s_law
-from typing import Dict
+from __future__ import annotations
-def ohms_law(voltage: float, current: float, resistance: float) -> Dict[str, float]:
+def ohms_law(voltage: float, current: float, resistance: float) -> dict[str, float]:
"""
Apply Ohm's Law, on any two given electrical values, which can be voltage, current,
and resistance, and then in a Python dict return name/value pair of the zero value.
diff --git a/electronics/real_and_reactive_power.py b/electronics/real_and_reactive_power.py
new file mode 100644
index 000000000..81dcba800
--- /dev/null
+++ b/electronics/real_and_reactive_power.py
@@ -0,0 +1,49 @@
+import math
+
+
+def real_power(apparent_power: float, power_factor: float) -> float:
+ """
+ Calculate real power from apparent power and power factor.
+
+ Examples:
+ >>> real_power(100, 0.9)
+ 90.0
+ >>> real_power(0, 0.8)
+ 0.0
+ >>> real_power(100, -0.9)
+ -90.0
+ """
+ if (
+ not isinstance(power_factor, (int, float))
+ or power_factor < -1
+ or power_factor > 1
+ ):
+ raise ValueError("power_factor must be a valid float value between -1 and 1.")
+ return apparent_power * power_factor
+
+
+def reactive_power(apparent_power: float, power_factor: float) -> float:
+ """
+ Calculate reactive power from apparent power and power factor.
+
+ Examples:
+ >>> reactive_power(100, 0.9)
+ 43.58898943540673
+ >>> reactive_power(0, 0.8)
+ 0.0
+ >>> reactive_power(100, -0.9)
+ 43.58898943540673
+ """
+ if (
+ not isinstance(power_factor, (int, float))
+ or power_factor < -1
+ or power_factor > 1
+ ):
+ raise ValueError("power_factor must be a valid float value between -1 and 1.")
+ return apparent_power * math.sqrt(1 - power_factor**2)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/resistor_equivalence.py b/electronics/resistor_equivalence.py
new file mode 100644
index 000000000..55e7f2d6b
--- /dev/null
+++ b/electronics/resistor_equivalence.py
@@ -0,0 +1,60 @@
+# https://byjus.com/equivalent-resistance-formula/
+
+from __future__ import annotations
+
+
+def resistor_parallel(resistors: list[float]) -> float:
+ """
+ Req = 1/ (1/R1 + 1/R2 + ... + 1/Rn)
+
+ >>> resistor_parallel([3.21389, 2, 3])
+ 0.8737571620498019
+ >>> resistor_parallel([3.21389, 2, -3])
+ Traceback (most recent call last):
+ ...
+ ValueError: Resistor at index 2 has a negative or zero value!
+ >>> resistor_parallel([3.21389, 2, 0.000])
+ Traceback (most recent call last):
+ ...
+ ValueError: Resistor at index 2 has a negative or zero value!
+ """
+
+ first_sum = 0.00
+ index = 0
+ for resistor in resistors:
+ if resistor <= 0:
+ msg = f"Resistor at index {index} has a negative or zero value!"
+ raise ValueError(msg)
+ first_sum += 1 / float(resistor)
+ index += 1
+ return 1 / first_sum
+
+
+def resistor_series(resistors: list[float]) -> float:
+ """
+ Req = R1 + R2 + ... + Rn
+
+ Calculate the equivalent resistance for any number of resistors in parallel.
+
+ >>> resistor_series([3.21389, 2, 3])
+ 8.21389
+ >>> resistor_series([3.21389, 2, -3])
+ Traceback (most recent call last):
+ ...
+ ValueError: Resistor at index 2 has a negative value!
+ """
+ sum_r = 0.00
+ index = 0
+ for resistor in resistors:
+ sum_r += resistor
+ if resistor < 0:
+ msg = f"Resistor at index {index} has a negative value!"
+ raise ValueError(msg)
+ index += 1
+ return sum_r
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/resonant_frequency.py b/electronics/resonant_frequency.py
new file mode 100644
index 000000000..4f95043b6
--- /dev/null
+++ b/electronics/resonant_frequency.py
@@ -0,0 +1,50 @@
+# https://en.wikipedia.org/wiki/LC_circuit
+
+"""An LC circuit, also called a resonant circuit, tank circuit, or tuned circuit,
+is an electric circuit consisting of an inductor, represented by the letter L,
+and a capacitor, represented by the letter C, connected together.
+The circuit can act as an electrical resonator, an electrical analogue of a
+tuning fork, storing energy oscillating at the circuit's resonant frequency.
+Source: https://en.wikipedia.org/wiki/LC_circuit
+"""
+
+from __future__ import annotations
+
+from math import pi, sqrt
+
+
+def resonant_frequency(inductance: float, capacitance: float) -> tuple:
+ """
+ This function can calculate the resonant frequency of LC circuit,
+ for the given value of inductance and capacitnace.
+
+ Examples are given below:
+ >>> resonant_frequency(inductance=10, capacitance=5)
+ ('Resonant frequency', 0.022507907903927652)
+ >>> resonant_frequency(inductance=0, capacitance=5)
+ Traceback (most recent call last):
+ ...
+ ValueError: Inductance cannot be 0 or negative
+ >>> resonant_frequency(inductance=10, capacitance=0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Capacitance cannot be 0 or negative
+ """
+
+ if inductance <= 0:
+ raise ValueError("Inductance cannot be 0 or negative")
+
+ elif capacitance <= 0:
+ raise ValueError("Capacitance cannot be 0 or negative")
+
+ else:
+ return (
+ "Resonant frequency",
+ float(1 / (2 * pi * (sqrt(inductance * capacitance)))),
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/file_transfer/receive_file.py b/file_transfer/receive_file.py
index 37a503036..f50ad9fe1 100644
--- a/file_transfer/receive_file.py
+++ b/file_transfer/receive_file.py
@@ -1,8 +1,9 @@
-if __name__ == "__main__":
- import socket # Import socket module
+import socket
- sock = socket.socket() # Create a socket object
- host = socket.gethostname() # Get local machine name
+
+def main():
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ host = socket.gethostname()
port = 12312
sock.connect((host, port))
@@ -13,11 +14,14 @@ if __name__ == "__main__":
print("Receiving data...")
while True:
data = sock.recv(1024)
- print(f"{data = }")
if not data:
break
- out_file.write(data) # Write data to a file
+ out_file.write(data)
- print("Successfully got the file")
+ print("Successfully received the file")
sock.close()
print("Connection closed")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/financial/ABOUT.md b/financial/ABOUT.md
new file mode 100644
index 000000000..f6b0647f8
--- /dev/null
+++ b/financial/ABOUT.md
@@ -0,0 +1,4 @@
+### Interest
+
+* Compound Interest: "Compound interest is calculated by multiplying the initial principal amount by one plus the annual interest rate raised to the number of compound periods minus one." [Compound Interest](https://www.investopedia.com/)
+* Simple Interest: "Simple interest paid or received over a certain period is a fixed percentage of the principal amount that was borrowed or lent. " [Simple Interest](https://www.investopedia.com/)
diff --git a/financial/__init__.py b/financial/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/financial/equated_monthly_installments.py b/financial/equated_monthly_installments.py
new file mode 100644
index 000000000..3af922493
--- /dev/null
+++ b/financial/equated_monthly_installments.py
@@ -0,0 +1,61 @@
+"""
+Program to calculate the amortization amount per month, given
+- Principal borrowed
+- Rate of interest per annum
+- Years to repay the loan
+
+Wikipedia Reference: https://en.wikipedia.org/wiki/Equated_monthly_installment
+"""
+
+
+def equated_monthly_installments(
+ principal: float, rate_per_annum: float, years_to_repay: int
+) -> float:
+ """
+ Formula for amortization amount per month:
+ A = p * r * (1 + r)^n / ((1 + r)^n - 1)
+ where p is the principal, r is the rate of interest per month
+ and n is the number of payments
+
+ >>> equated_monthly_installments(25000, 0.12, 3)
+ 830.3577453212793
+ >>> equated_monthly_installments(25000, 0.12, 10)
+ 358.67737100646826
+ >>> equated_monthly_installments(0, 0.12, 3)
+ Traceback (most recent call last):
+ ...
+ Exception: Principal borrowed must be > 0
+ >>> equated_monthly_installments(25000, -1, 3)
+ Traceback (most recent call last):
+ ...
+ Exception: Rate of interest must be >= 0
+ >>> equated_monthly_installments(25000, 0.12, 0)
+ Traceback (most recent call last):
+ ...
+ Exception: Years to repay must be an integer > 0
+ """
+ if principal <= 0:
+ raise Exception("Principal borrowed must be > 0")
+ if rate_per_annum < 0:
+ raise Exception("Rate of interest must be >= 0")
+ if years_to_repay <= 0 or not isinstance(years_to_repay, int):
+ raise Exception("Years to repay must be an integer > 0")
+
+ # Yearly rate is divided by 12 to get monthly rate
+ rate_per_month = rate_per_annum / 12
+
+ # Years to repay is multiplied by 12 to get number of payments as payment is monthly
+ number_of_payments = years_to_repay * 12
+
+ return (
+ principal
+ * rate_per_month
+ * (1 + rate_per_month) ** number_of_payments
+ / ((1 + rate_per_month) ** number_of_payments - 1)
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/financial/interest.py b/financial/interest.py
new file mode 100644
index 000000000..33d02e27c
--- /dev/null
+++ b/financial/interest.py
@@ -0,0 +1,120 @@
+# https://www.investopedia.com
+
+from __future__ import annotations
+
+
+def simple_interest(
+ principal: float, daily_interest_rate: float, days_between_payments: float
+) -> float:
+ """
+ >>> simple_interest(18000.0, 0.06, 3)
+ 3240.0
+ >>> simple_interest(0.5, 0.06, 3)
+ 0.09
+ >>> simple_interest(18000.0, 0.01, 10)
+ 1800.0
+ >>> simple_interest(18000.0, 0.0, 3)
+ 0.0
+ >>> simple_interest(5500.0, 0.01, 100)
+ 5500.0
+ >>> simple_interest(10000.0, -0.06, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: daily_interest_rate must be >= 0
+ >>> simple_interest(-10000.0, 0.06, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: principal must be > 0
+ >>> simple_interest(5500.0, 0.01, -5)
+ Traceback (most recent call last):
+ ...
+ ValueError: days_between_payments must be > 0
+ """
+ if days_between_payments <= 0:
+ raise ValueError("days_between_payments must be > 0")
+ if daily_interest_rate < 0:
+ raise ValueError("daily_interest_rate must be >= 0")
+ if principal <= 0:
+ raise ValueError("principal must be > 0")
+ return principal * daily_interest_rate * days_between_payments
+
+
+def compound_interest(
+ principal: float,
+ nominal_annual_interest_rate_percentage: float,
+ number_of_compounding_periods: float,
+) -> float:
+ """
+ >>> compound_interest(10000.0, 0.05, 3)
+ 1576.2500000000014
+ >>> compound_interest(10000.0, 0.05, 1)
+ 500.00000000000045
+ >>> compound_interest(0.5, 0.05, 3)
+ 0.07881250000000006
+ >>> compound_interest(10000.0, 0.06, -4)
+ Traceback (most recent call last):
+ ...
+ ValueError: number_of_compounding_periods must be > 0
+ >>> compound_interest(10000.0, -3.5, 3.0)
+ Traceback (most recent call last):
+ ...
+ ValueError: nominal_annual_interest_rate_percentage must be >= 0
+ >>> compound_interest(-5500.0, 0.01, 5)
+ Traceback (most recent call last):
+ ...
+ ValueError: principal must be > 0
+ """
+ if number_of_compounding_periods <= 0:
+ raise ValueError("number_of_compounding_periods must be > 0")
+ if nominal_annual_interest_rate_percentage < 0:
+ raise ValueError("nominal_annual_interest_rate_percentage must be >= 0")
+ if principal <= 0:
+ raise ValueError("principal must be > 0")
+
+ return principal * (
+ (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
+ - 1
+ )
+
+
+def apr_interest(
+ principal: float,
+ nominal_annual_percentage_rate: float,
+ number_of_years: float,
+) -> float:
+ """
+ >>> apr_interest(10000.0, 0.05, 3)
+ 1618.223072263547
+ >>> apr_interest(10000.0, 0.05, 1)
+ 512.6749646744732
+ >>> apr_interest(0.5, 0.05, 3)
+ 0.08091115361317736
+ >>> apr_interest(10000.0, 0.06, -4)
+ Traceback (most recent call last):
+ ...
+ ValueError: number_of_years must be > 0
+ >>> apr_interest(10000.0, -3.5, 3.0)
+ Traceback (most recent call last):
+ ...
+ ValueError: nominal_annual_percentage_rate must be >= 0
+ >>> apr_interest(-5500.0, 0.01, 5)
+ Traceback (most recent call last):
+ ...
+ ValueError: principal must be > 0
+ """
+ if number_of_years <= 0:
+ raise ValueError("number_of_years must be > 0")
+ if nominal_annual_percentage_rate < 0:
+ raise ValueError("nominal_annual_percentage_rate must be >= 0")
+ if principal <= 0:
+ raise ValueError("principal must be > 0")
+
+ return compound_interest(
+ principal, nominal_annual_percentage_rate / 365, number_of_years * 365
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/financial/present_value.py b/financial/present_value.py
new file mode 100644
index 000000000..f74612b92
--- /dev/null
+++ b/financial/present_value.py
@@ -0,0 +1,42 @@
+"""
+Reference: https://www.investopedia.com/terms/p/presentvalue.asp
+
+An algorithm that calculates the present value of a stream of yearly cash flows given...
+1. The discount rate (as a decimal, not a percent)
+2. An array of cash flows, with the index of the cash flow being the associated year
+
+Note: This algorithm assumes that cash flows are paid at the end of the specified year
+"""
+
+
+def present_value(discount_rate: float, cash_flows: list[float]) -> float:
+ """
+ >>> present_value(0.13, [10, 20.70, -293, 297])
+ 4.69
+ >>> present_value(0.07, [-109129.39, 30923.23, 15098.93, 29734,39])
+ -42739.63
+ >>> present_value(0.07, [109129.39, 30923.23, 15098.93, 29734,39])
+ 175519.15
+ >>> present_value(-1, [109129.39, 30923.23, 15098.93, 29734,39])
+ Traceback (most recent call last):
+ ...
+ ValueError: Discount rate cannot be negative
+ >>> present_value(0.03, [])
+ Traceback (most recent call last):
+ ...
+ ValueError: Cash flows list cannot be empty
+ """
+ if discount_rate < 0:
+ raise ValueError("Discount rate cannot be negative")
+ if not cash_flows:
+ raise ValueError("Cash flows list cannot be empty")
+ present_value = sum(
+ cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(cash_flows)
+ )
+ return round(present_value, ndigits=2)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/financial/price_plus_tax.py b/financial/price_plus_tax.py
new file mode 100644
index 000000000..43876d35e
--- /dev/null
+++ b/financial/price_plus_tax.py
@@ -0,0 +1,18 @@
+"""
+Calculate price plus tax of a good or service given its price and a tax rate.
+"""
+
+
+def price_plus_tax(price: float, tax_rate: float) -> float:
+ """
+ >>> price_plus_tax(100, 0.25)
+ 125.0
+ >>> price_plus_tax(125.50, 0.05)
+ 131.775
+ """
+ return price * (1 + tax_rate)
+
+
+if __name__ == "__main__":
+ print(f"{price_plus_tax(100, 0.25) = }")
+ print(f"{price_plus_tax(125.50, 0.05) = }")
diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py
new file mode 100644
index 000000000..482e1eddf
--- /dev/null
+++ b/fractals/julia_sets.py
@@ -0,0 +1,219 @@
+"""Author Alexandre De Zotti
+
+Draws Julia sets of quadratic polynomials and exponential maps.
+ More specifically, this iterates the function a fixed number of times
+ then plots whether the absolute value of the last iterate is greater than
+ a fixed threshold (named "escape radius"). For the exponential map this is not
+ really an escape radius but rather a convenient way to approximate the Julia
+ set with bounded orbits.
+
+The examples presented here are:
+- The Cauliflower Julia set, see e.g.
+https://en.wikipedia.org/wiki/File:Julia_z2%2B0,25.png
+- Other examples from https://en.wikipedia.org/wiki/Julia_set
+- An exponential map Julia set, ambiantly homeomorphic to the examples in
+https://www.math.univ-toulouse.fr/~cheritat/GalII/galery.html
+ and
+https://ddd.uab.cat/pub/pubmat/02141493v43n1/02141493v43n1p27.pdf
+
+Remark: Some overflow runtime warnings are suppressed. This is because of the
+ way the iteration loop is implemented, using numpy's efficient computations.
+ Overflows and infinites are replaced after each step by a large number.
+"""
+
+import warnings
+from collections.abc import Callable
+from typing import Any
+
+import numpy
+from matplotlib import pyplot
+
+c_cauliflower = 0.25 + 0.0j
+c_polynomial_1 = -0.4 + 0.6j
+c_polynomial_2 = -0.1 + 0.651j
+c_exponential = -2.0
+nb_iterations = 56
+window_size = 2.0
+nb_pixels = 666
+
+
+def eval_exponential(c_parameter: complex, z_values: numpy.ndarray) -> numpy.ndarray:
+ """
+ Evaluate $e^z + c$.
+ >>> eval_exponential(0, 0)
+ 1.0
+ >>> abs(eval_exponential(1, numpy.pi*1.j)) < 1e-15
+ True
+ >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15
+ True
+ """
+ return numpy.exp(z_values) + c_parameter
+
+
+def eval_quadratic_polynomial(
+ c_parameter: complex, z_values: numpy.ndarray
+) -> numpy.ndarray:
+ """
+ >>> eval_quadratic_polynomial(0, 2)
+ 4
+ >>> eval_quadratic_polynomial(-1, 1)
+ 0
+ >>> round(eval_quadratic_polynomial(1.j, 0).imag)
+ 1
+ >>> round(eval_quadratic_polynomial(1.j, 0).real)
+ 0
+ """
+ return z_values * z_values + c_parameter
+
+
+def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray:
+ """
+ Create a grid of complex values of size nb_pixels*nb_pixels with real and
+ imaginary parts ranging from -window_size to window_size (inclusive).
+ Returns a numpy array.
+
+ >>> prepare_grid(1,3)
+ array([[-1.-1.j, -1.+0.j, -1.+1.j],
+ [ 0.-1.j, 0.+0.j, 0.+1.j],
+ [ 1.-1.j, 1.+0.j, 1.+1.j]])
+ """
+ x = numpy.linspace(-window_size, window_size, nb_pixels)
+ x = x.reshape((nb_pixels, 1))
+ y = numpy.linspace(-window_size, window_size, nb_pixels)
+ y = y.reshape((1, nb_pixels))
+ return x + 1.0j * y
+
+
+def iterate_function(
+ eval_function: Callable[[Any, numpy.ndarray], numpy.ndarray],
+ function_params: Any,
+ nb_iterations: int,
+ z_0: numpy.ndarray,
+ infinity: float | None = None,
+) -> numpy.ndarray:
+ """
+ Iterate the function "eval_function" exactly nb_iterations times.
+ The first argument of the function is a parameter which is contained in
+ function_params. The variable z_0 is an array that contains the initial
+ values to iterate from.
+ This function returns the final iterates.
+
+ >>> iterate_function(eval_quadratic_polynomial, 0, 3, numpy.array([0,1,2])).shape
+ (3,)
+ >>> numpy.round(iterate_function(eval_quadratic_polynomial,
+ ... 0,
+ ... 3,
+ ... numpy.array([0,1,2]))[0])
+ 0j
+ >>> numpy.round(iterate_function(eval_quadratic_polynomial,
+ ... 0,
+ ... 3,
+ ... numpy.array([0,1,2]))[1])
+ (1+0j)
+ >>> numpy.round(iterate_function(eval_quadratic_polynomial,
+ ... 0,
+ ... 3,
+ ... numpy.array([0,1,2]))[2])
+ (256+0j)
+ """
+
+ z_n = z_0.astype("complex64")
+ for _ in range(nb_iterations):
+ z_n = eval_function(function_params, z_n)
+ if infinity is not None:
+ numpy.nan_to_num(z_n, copy=False, nan=infinity)
+ z_n[abs(z_n) == numpy.inf] = infinity
+ return z_n
+
+
+def show_results(
+ function_label: str,
+ function_params: Any,
+ escape_radius: float,
+ z_final: numpy.ndarray,
+) -> None:
+ """
+ Plots of whether the absolute value of z_final is greater than
+ the value of escape_radius. Adds the function_label and function_params to
+ the title.
+
+ >>> show_results('80', 0, 1, numpy.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]]))
+ """
+
+ abs_z_final = (abs(z_final)).transpose()
+ abs_z_final[:, :] = abs_z_final[::-1, :]
+ pyplot.matshow(abs_z_final < escape_radius)
+ pyplot.title(f"Julia set of ${function_label}$, $c={function_params}$")
+ pyplot.show()
+
+
+def ignore_overflow_warnings() -> None:
+ """
+ Ignore some overflow and invalid value warnings.
+
+ >>> ignore_overflow_warnings()
+ """
+ warnings.filterwarnings(
+ "ignore", category=RuntimeWarning, message="overflow encountered in multiply"
+ )
+ warnings.filterwarnings(
+ "ignore",
+ category=RuntimeWarning,
+ message="invalid value encountered in multiply",
+ )
+ warnings.filterwarnings(
+ "ignore", category=RuntimeWarning, message="overflow encountered in absolute"
+ )
+ warnings.filterwarnings(
+ "ignore", category=RuntimeWarning, message="overflow encountered in exp"
+ )
+
+
+if __name__ == "__main__":
+ z_0 = prepare_grid(window_size, nb_pixels)
+
+ ignore_overflow_warnings() # See file header for explanations
+
+ nb_iterations = 24
+ escape_radius = 2 * abs(c_cauliflower) + 1
+ z_final = iterate_function(
+ eval_quadratic_polynomial,
+ c_cauliflower,
+ nb_iterations,
+ z_0,
+ infinity=1.1 * escape_radius,
+ )
+ show_results("z^2+c", c_cauliflower, escape_radius, z_final)
+
+ nb_iterations = 64
+ escape_radius = 2 * abs(c_polynomial_1) + 1
+ z_final = iterate_function(
+ eval_quadratic_polynomial,
+ c_polynomial_1,
+ nb_iterations,
+ z_0,
+ infinity=1.1 * escape_radius,
+ )
+ show_results("z^2+c", c_polynomial_1, escape_radius, z_final)
+
+ nb_iterations = 161
+ escape_radius = 2 * abs(c_polynomial_2) + 1
+ z_final = iterate_function(
+ eval_quadratic_polynomial,
+ c_polynomial_2,
+ nb_iterations,
+ z_0,
+ infinity=1.1 * escape_radius,
+ )
+ show_results("z^2+c", c_polynomial_2, escape_radius, z_final)
+
+ nb_iterations = 12
+ escape_radius = 10000.0
+ z_final = iterate_function(
+ eval_exponential,
+ c_exponential,
+ nb_iterations,
+ z_0 + 2,
+ infinity=1.0e10,
+ )
+ show_results("e^z+c", c_exponential, escape_radius, z_final)
diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py
index 07c1835b4..b0aaa86b1 100644
--- a/fractals/koch_snowflake.py
+++ b/fractals/koch_snowflake.py
@@ -46,7 +46,7 @@ def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndar
0.28867513]), array([0.66666667, 0. ]), array([1, 0])]
"""
vectors = initial_vectors
- for i in range(steps):
+ for _ in range(steps):
vectors = iteration_step(vectors)
return vectors
diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py
index de795bb3f..84dbda997 100644
--- a/fractals/mandelbrot.py
+++ b/fractals/mandelbrot.py
@@ -36,7 +36,7 @@ def get_distance(x: float, y: float, max_step: int) -> float:
"""
a = x
b = y
- for step in range(max_step):
+ for step in range(max_step): # noqa: B007
a_new = a * a - b * b + x
b = 2 * a * b + y
a = a_new
@@ -101,9 +101,11 @@ def get_image(
of the Mandelbrot set is viewed. The main area of the Mandelbrot set is
roughly between "-1.5 < x < 0.5" and "-1 < y < 1" in the figure-coordinates.
- >>> get_image().load()[0,0]
+ Commenting out tests that slow down pytest...
+ # 13.35s call fractals/mandelbrot.py::mandelbrot.get_image
+ # >>> get_image().load()[0,0]
(255, 0, 0)
- >>> get_image(use_distance_color_coding = False).load()[0,0]
+ # >>> get_image(use_distance_color_coding = False).load()[0,0]
(255, 255, 255)
"""
img = Image.new("RGB", (image_width, image_height))
@@ -112,7 +114,6 @@ def get_image(
# loop through the image-coordinates
for image_x in range(image_width):
for image_y in range(image_height):
-
# determine the figure-coordinates based on the image-coordinates
figure_height = figure_width / image_width * image_height
figure_x = figure_center_x + (image_x / image_width - 0.5) * figure_width
diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py
index cf41ffa5f..45f7ab84c 100644
--- a/fractals/sierpinski_triangle.py
+++ b/fractals/sierpinski_triangle.py
@@ -1,76 +1,85 @@
-#!/usr/bin/python
+"""
+Author Anurag Kumar | anuragkumarak95@gmail.com | git/anuragkumarak95
-"""Author Anurag Kumar | anuragkumarak95@gmail.com | git/anuragkumarak95
+Simple example of fractal generation using recursion.
-Simple example of Fractal generation using recursive function.
+What is the Sierpiński Triangle?
+ The Sierpiński triangle (sometimes spelled Sierpinski), also called the
+Sierpiński gasket or Sierpiński sieve, is a fractal attractive fixed set with
+the overall shape of an equilateral triangle, subdivided recursively into
+smaller equilateral triangles. Originally constructed as a curve, this is one of
+the basic examples of self-similar sets—that is, it is a mathematically
+generated pattern that is reproducible at any magnification or reduction. It is
+named after the Polish mathematician Wacław Sierpiński, but appeared as a
+decorative pattern many centuries before the work of Sierpiński.
-What is Sierpinski Triangle?
->>The Sierpinski triangle (also with the original orthography Sierpinski), also called
-the Sierpinski gasket or the Sierpinski Sieve, is a fractal and attractive fixed set
-with the overall shape of an equilateral triangle, subdivided recursively into smaller
-equilateral triangles. Originally constructed as a curve, this is one of the basic
-examples of self-similar sets, i.e., it is a mathematically generated pattern that can
-be reproducible at any magnification or reduction. It is named after the Polish
-mathematician Wacław Sierpinski, but appeared as a decorative pattern many centuries
-prior to the work of Sierpinski.
-Requirements(pip):
- - turtle
-
-Python:
- - 2.6
-
-Usage:
- - $python sierpinski_triangle.py
-
-Credits: This code was written by editing the code from
-http://www.riannetrujillo.com/blog/python-fractal/
+Usage: python sierpinski_triangle.py
+Credits:
+ The above description is taken from
+ https://en.wikipedia.org/wiki/Sierpi%C5%84ski_triangle
+ This code was written by editing the code from
+ https://www.riannetrujillo.com/blog/python-fractal/
"""
import sys
import turtle
-PROGNAME = "Sierpinski Triangle"
-points = [[-175, -125], [0, 175], [175, -125]] # size of triangle
+def get_mid(p1: tuple[float, float], p2: tuple[float, float]) -> tuple[float, float]:
+ """
+ Find the midpoint of two points
+
+ >>> get_mid((0, 0), (2, 2))
+ (1.0, 1.0)
+ >>> get_mid((-3, -3), (3, 3))
+ (0.0, 0.0)
+ >>> get_mid((1, 0), (3, 2))
+ (2.0, 1.0)
+ >>> get_mid((0, 0), (1, 1))
+ (0.5, 0.5)
+ >>> get_mid((0, 0), (0, 0))
+ (0.0, 0.0)
+ """
+ return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2
-def getMid(p1, p2):
- return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2) # find midpoint
+def triangle(
+ vertex1: tuple[float, float],
+ vertex2: tuple[float, float],
+ vertex3: tuple[float, float],
+ depth: int,
+) -> None:
+ """
+ Recursively draw the Sierpinski triangle given the vertices of the triangle
+ and the recursion depth
+ """
+ my_pen.up()
+ my_pen.goto(vertex1[0], vertex1[1])
+ my_pen.down()
+ my_pen.goto(vertex2[0], vertex2[1])
+ my_pen.goto(vertex3[0], vertex3[1])
+ my_pen.goto(vertex1[0], vertex1[1])
+ if depth == 0:
+ return
-def triangle(points, depth):
-
- myPen.up()
- myPen.goto(points[0][0], points[0][1])
- myPen.down()
- myPen.goto(points[1][0], points[1][1])
- myPen.goto(points[2][0], points[2][1])
- myPen.goto(points[0][0], points[0][1])
-
- if depth > 0:
- triangle(
- [points[0], getMid(points[0], points[1]), getMid(points[0], points[2])],
- depth - 1,
- )
- triangle(
- [points[1], getMid(points[0], points[1]), getMid(points[1], points[2])],
- depth - 1,
- )
- triangle(
- [points[2], getMid(points[2], points[1]), getMid(points[0], points[2])],
- depth - 1,
- )
+ triangle(vertex1, get_mid(vertex1, vertex2), get_mid(vertex1, vertex3), depth - 1)
+ triangle(vertex2, get_mid(vertex1, vertex2), get_mid(vertex2, vertex3), depth - 1)
+ triangle(vertex3, get_mid(vertex3, vertex2), get_mid(vertex1, vertex3), depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
- "right format for using this script: "
- "$python fractals.py "
+ "Correct format for using this script: "
+ "python fractals.py "
)
- myPen = turtle.Turtle()
- myPen.ht()
- myPen.speed(5)
- myPen.pencolor("red")
- triangle(points, int(sys.argv[1]))
+ my_pen = turtle.Turtle()
+ my_pen.ht()
+ my_pen.speed(5)
+ my_pen.pencolor("red")
+
+ vertices = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
+ triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
+ turtle.Screen().exitonclick()
diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py
index 0f573f158..0786ef8b0 100644
--- a/fuzzy_logic/fuzzy_operations.py
+++ b/fuzzy_logic/fuzzy_operations.py
@@ -1,4 +1,5 @@
-"""README, Author - Jigyasa Gandhi(mailto:jigsgandhi97@gmail.com)
+"""
+README, Author - Jigyasa Gandhi(mailto:jigsgandhi97@gmail.com)
Requirements:
- scikit-fuzzy
- numpy
diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py
index 97dbe182b..089c5c99a 100644
--- a/genetic_algorithm/basic_string.py
+++ b/genetic_algorithm/basic_string.py
@@ -9,18 +9,66 @@ from __future__ import annotations
import random
-# Maximum size of the population. bigger could be faster but is more memory expensive
+# Maximum size of the population. Bigger could be faster but is more memory expensive.
N_POPULATION = 200
-# Number of elements selected in every generation for evolution the selection takes
-# place from the best to the worst of that generation must be smaller than N_POPULATION
+# Number of elements selected in every generation of evolution. The selection takes
+# place from best to worst of that generation and must be smaller than N_POPULATION.
N_SELECTED = 50
-# Probability that an element of a generation can mutate changing one of its genes this
-# guarantees that all genes will be used during evolution
+# Probability that an element of a generation can mutate, changing one of its genes.
+# This will guarantee that all genes will be used during evolution.
MUTATION_PROBABILITY = 0.4
-# just a seed to improve randomness required by the algorithm
+# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
+def evaluate(item: str, main_target: str) -> tuple[str, float]:
+ """
+ Evaluate how similar the item is with the target by just
+ counting each char in the right position
+ >>> evaluate("Helxo Worlx", "Hello World")
+ ('Helxo Worlx', 9.0)
+ """
+ score = len([g for position, g in enumerate(item) if g == main_target[position]])
+ return (item, float(score))
+
+
+def crossover(parent_1: str, parent_2: str) -> tuple[str, str]:
+ """Slice and combine two string at a random point."""
+ random_slice = random.randint(0, len(parent_1) - 1)
+ child_1 = parent_1[:random_slice] + parent_2[random_slice:]
+ child_2 = parent_2[:random_slice] + parent_1[random_slice:]
+ return (child_1, child_2)
+
+
+def mutate(child: str, genes: list[str]) -> str:
+ """Mutate a random gene of a child with another one from the list."""
+ child_list = list(child)
+ if random.uniform(0, 1) < MUTATION_PROBABILITY:
+ child_list[random.randint(0, len(child)) - 1] = random.choice(genes)
+ return "".join(child_list)
+
+
+# Select, crossover and mutate a new population.
+def select(
+ parent_1: tuple[str, float],
+ population_score: list[tuple[str, float]],
+ genes: list[str],
+) -> list[str]:
+ """Select the second parent and generate new population"""
+ pop = []
+ # Generate more children proportionally to the fitness score.
+ child_n = int(parent_1[1] * 100) + 1
+ child_n = 10 if child_n >= 10 else child_n
+ for _ in range(child_n):
+ parent_2 = population_score[random.randint(0, N_SELECTED)][0]
+
+ child_1, child_2 = crossover(parent_1[0], parent_2)
+ # Append new string to the population list.
+ pop.append(mutate(child_1, genes))
+ pop.append(mutate(child_2, genes))
+ return pop
+
+
def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, str]:
"""
Verify that the target contains no genes besides the ones inside genes variable.
@@ -32,55 +80,44 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int,
>>> genes.remove("e")
>>> basic("test", genes)
Traceback (most recent call last):
- ...
+ ...
ValueError: ['e'] is not in genes list, evolution cannot converge
>>> genes.remove("s")
>>> basic("test", genes)
Traceback (most recent call last):
- ...
+ ...
ValueError: ['e', 's'] is not in genes list, evolution cannot converge
>>> genes.remove("t")
>>> basic("test", genes)
Traceback (most recent call last):
- ...
+ ...
ValueError: ['e', 's', 't'] is not in genes list, evolution cannot converge
"""
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
- raise ValueError(f"{N_POPULATION} must be bigger than {N_SELECTED}")
+ msg = f"{N_POPULATION} must be bigger than {N_SELECTED}"
+ raise ValueError(msg)
# Verify that the target contains no genes besides the ones inside genes variable.
not_in_genes_list = sorted({c for c in target if c not in genes})
if not_in_genes_list:
- raise ValueError(
- f"{not_in_genes_list} is not in genes list, evolution cannot converge"
- )
+ msg = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
+ raise ValueError(msg)
- # Generate random starting population
+ # Generate random starting population.
population = []
for _ in range(N_POPULATION):
population.append("".join([random.choice(genes) for i in range(len(target))]))
- # Just some logs to know what the algorithms is doing
+ # Just some logs to know what the algorithms is doing.
generation, total_population = 0, 0
- # This loop will end when we will find a perfect match for our target
+ # This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(population)
- # Random population created now it's time to evaluate
- def evaluate(item: str, main_target: str = target) -> tuple[str, float]:
- """
- Evaluate how similar the item is with the target by just
- counting each char in the right position
- >>> evaluate("Helxo Worlx", Hello World)
- ["Helxo Worlx", 9]
- """
- score = len(
- [g for position, g in enumerate(item) if g == main_target[position]]
- )
- return (item, float(score))
+ # Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
@@ -92,17 +129,17 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int,
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
- # but with a simple algorithm like this will probably be slower
- # we just need to call evaluate for every item inside population
- population_score = [evaluate(item) for item in population]
+ # but with a simple algorithm like this, it will probably be slower.
+ # We just need to call evaluate for every item inside the population.
+ population_score = [evaluate(item, target) for item in population]
- # Check if there is a matching evolution
+ # Check if there is a matching evolution.
population_score = sorted(population_score, key=lambda x: x[1], reverse=True)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
- # Print the Best result every 10 generation
- # just to know that the algorithm is working
+ # Print the best result every 10 generation.
+ # Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
@@ -111,52 +148,23 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int,
f"\nBest string: {population_score[0][0]}"
)
- # Flush the old population keeping some of the best evolutions
- # Keeping this avoid regression of evolution
+ # Flush the old population, keeping some of the best evolutions.
+ # Keeping this avoid regression of evolution.
population_best = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(population_best)
- # Normalize population score from 0 to 1
+ # Normalize population score to be between 0 and 1.
population_score = [
(item, score / len(target)) for item, score in population_score
]
- # Select, Crossover and Mutate a new population
- def select(parent_1: tuple[str, float]) -> list[str]:
- """Select the second parent and generate new population"""
- pop = []
- # Generate more child proportionally to the fitness score
- child_n = int(parent_1[1] * 100) + 1
- child_n = 10 if child_n >= 10 else child_n
- for _ in range(child_n):
- parent_2 = population_score[random.randint(0, N_SELECTED)][0]
- child_1, child_2 = crossover(parent_1[0], parent_2)
- # Append new string to the population list
- pop.append(mutate(child_1))
- pop.append(mutate(child_2))
- return pop
-
- def crossover(parent_1: str, parent_2: str) -> tuple[str, str]:
- """Slice and combine two string in a random point"""
- random_slice = random.randint(0, len(parent_1) - 1)
- child_1 = parent_1[:random_slice] + parent_2[random_slice:]
- child_2 = parent_2[:random_slice] + parent_1[random_slice:]
- return (child_1, child_2)
-
- def mutate(child: str) -> str:
- """Mutate a random gene of a child with another one from the list"""
- child_list = list(child)
- if random.uniform(0, 1) < MUTATION_PROBABILITY:
- child_list[random.randint(0, len(child)) - 1] = random.choice(genes)
- return "".join(child_list)
-
- # This is Selection
+ # This is selection
for i in range(N_SELECTED):
- population.extend(select(population_score[int(i)]))
+ population.extend(select(population_score[int(i)], population_score, genes))
# Check if the population has already reached the maximum value and if so,
- # break the cycle. if this check is disabled the algorithm will take
- # forever to compute large strings but will also calculate small string in
- # a lot fewer generations
+ # break the cycle. If this check is disabled, the algorithm will take
+ # forever to compute large strings, but will also calculate small strings in
+ # a far fewer generations.
if len(population) > N_POPULATION:
break
@@ -169,7 +177,7 @@ if __name__ == "__main__":
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
+ generation, population, target = basic(target_str, genes_list)
print(
- "\nGeneration: %s\nTotal Population: %s\nTarget: %s"
- % basic(target_str, genes_list)
+ f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
diff --git a/geodesy/haversine_distance.py b/geodesy/haversine_distance.py
index de8ac7f88..93e625770 100644
--- a/geodesy/haversine_distance.py
+++ b/geodesy/haversine_distance.py
@@ -1,5 +1,9 @@
from math import asin, atan, cos, radians, sin, sqrt, tan
+AXIS_A = 6378137.0
+AXIS_B = 6356752.314245
+RADIUS = 6378137
+
def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""
@@ -30,9 +34,6 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl
"""
# CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System
# Distance in metres(m)
- AXIS_A = 6378137.0
- AXIS_B = 6356752.314245
- RADIUS = 6378137
# Equation parameters
# Equation https://en.wikipedia.org/wiki/Haversine_formula#Formulation
flattening = (AXIS_A - AXIS_B) / AXIS_A
diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py
index bf8f1b9a5..4805674e5 100644
--- a/geodesy/lamberts_ellipsoidal_distance.py
+++ b/geodesy/lamberts_ellipsoidal_distance.py
@@ -2,11 +2,14 @@ from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
+AXIS_A = 6378137.0
+AXIS_B = 6356752.314245
+EQUATORIAL_RADIUS = 6378137
+
def lamberts_ellipsoidal_distance(
lat1: float, lon1: float, lat2: float, lon2: float
) -> float:
-
"""
Calculate the shortest distance along the surface of an ellipsoid between
two points on the surface of earth given longitudes and latitudes
@@ -45,10 +48,6 @@ def lamberts_ellipsoidal_distance(
# CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System
# Distance in metres(m)
- AXIS_A = 6378137.0
- AXIS_B = 6356752.314245
- EQUATORIAL_RADIUS = 6378137
-
# Equation Parameters
# https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines
flattening = (AXIS_A - AXIS_B) / AXIS_A
@@ -62,22 +61,22 @@ def lamberts_ellipsoidal_distance(
sigma = haversine_distance(lat1, lon1, lat2, lon2) / EQUATORIAL_RADIUS
# Intermediate P and Q values
- P_value = (b_lat1 + b_lat2) / 2
- Q_value = (b_lat2 - b_lat1) / 2
+ p_value = (b_lat1 + b_lat2) / 2
+ q_value = (b_lat2 - b_lat1) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
- X_numerator = (sin(P_value) ** 2) * (cos(Q_value) ** 2)
- X_demonimator = cos(sigma / 2) ** 2
- X_value = (sigma - sin(sigma)) * (X_numerator / X_demonimator)
+ x_numerator = (sin(p_value) ** 2) * (cos(q_value) ** 2)
+ x_demonimator = cos(sigma / 2) ** 2
+ x_value = (sigma - sin(sigma)) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
- Y_numerator = (cos(P_value) ** 2) * (sin(Q_value) ** 2)
- Y_denominator = sin(sigma / 2) ** 2
- Y_value = (sigma + sin(sigma)) * (Y_numerator / Y_denominator)
+ y_numerator = (cos(p_value) ** 2) * (sin(q_value) ** 2)
+ y_denominator = sin(sigma / 2) ** 2
+ y_value = (sigma + sin(sigma)) * (y_numerator / y_denominator)
- return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (X_value + Y_value)))
+ return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py
index 2bb764fdc..7c22329ad 100644
--- a/graphics/bezier_curve.py
+++ b/graphics/bezier_curve.py
@@ -40,7 +40,7 @@ class BezierCurve:
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
- comb(self.degree, i) * ((1 - t) ** (self.degree - i)) * (t ** i)
+ comb(self.degree, i) * ((1 - t) ** (self.degree - i)) * (t**i)
)
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(output_values), 5) == 1
diff --git a/graphics/vector3_for_2d_rendering.py b/graphics/vector3_for_2d_rendering.py
index dfa22262a..a332206e6 100644
--- a/graphics/vector3_for_2d_rendering.py
+++ b/graphics/vector3_for_2d_rendering.py
@@ -28,9 +28,8 @@ def convert_to_2d(
TypeError: Input values must either be float or int: ['1', 2, 3, 10, 10]
"""
if not all(isinstance(val, (float, int)) for val in locals().values()):
- raise TypeError(
- "Input values must either be float or int: " f"{list(locals().values())}"
- )
+ msg = f"Input values must either be float or int: {list(locals().values())}"
+ raise TypeError(msg)
projected_x = ((x * distance) / (z + distance)) * scale
projected_y = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
@@ -71,10 +70,11 @@ def rotate(
input_variables = locals()
del input_variables["axis"]
if not all(isinstance(val, (float, int)) for val in input_variables.values()):
- raise TypeError(
+ msg = (
"Input values except axis must either be float or int: "
f"{list(input_variables.values())}"
)
+ raise TypeError(msg)
angle = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
new_x = x * math.cos(angle) - y * math.sin(angle)
diff --git a/graphs/a_star.py b/graphs/a_star.py
index d3657cb19..e8735179e 100644
--- a/graphs/a_star.py
+++ b/graphs/a_star.py
@@ -1,38 +1,21 @@
-grid = [
- [0, 1, 0, 0, 0, 0],
- [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
- [0, 1, 0, 0, 0, 0],
- [0, 1, 0, 0, 1, 0],
- [0, 0, 0, 0, 1, 0],
+from __future__ import annotations
+
+DIRECTIONS = [
+ [-1, 0], # left
+ [0, -1], # down
+ [1, 0], # right
+ [0, 1], # up
]
-"""
-heuristic = [[9, 8, 7, 6, 5, 4],
- [8, 7, 6, 5, 4, 3],
- [7, 6, 5, 4, 3, 2],
- [6, 5, 4, 3, 2, 1],
- [5, 4, 3, 2, 1, 0]]"""
-
-init = [0, 0]
-goal = [len(grid) - 1, len(grid[0]) - 1] # all coordinates are given in format [y,x]
-cost = 1
-
-# the cost map which pushes the path closer to the goal
-heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
-for i in range(len(grid)):
- for j in range(len(grid[0])):
- heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1])
- if grid[i][j] == 1:
- heuristic[i][j] = 99 # added extra penalty in the heuristic map
-
-
-# the actions we can take
-delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # go up # go left # go down # go right
-
# function to search the path
-def search(grid, init, goal, cost, heuristic):
-
+def search(
+ grid: list[list[int]],
+ init: list[int],
+ goal: list[int],
+ cost: int,
+ heuristic: list[list[int]],
+) -> tuple[list[list[int]], list[list[int]]]:
closed = [
[0 for col in range(len(grid[0]))] for row in range(len(grid))
] # the reference grid
@@ -52,21 +35,21 @@ def search(grid, init, goal, cost, heuristic):
while not found and not resign:
if len(cell) == 0:
- return "FAIL"
+ raise ValueError("Algorithm is unable to find solution")
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
- next = cell.pop()
- x = next[2]
- y = next[3]
- g = next[1]
+ next_cell = cell.pop()
+ x = next_cell[2]
+ y = next_cell[3]
+ g = next_cell[1]
if x == goal[0] and y == goal[1]:
found = True
else:
- for i in range(len(delta)): # to try out different valid actions
- x2 = x + delta[i][0]
- y2 = y + delta[i][1]
+ for i in range(len(DIRECTIONS)): # to try out different valid actions
+ x2 = x + DIRECTIONS[i][0]
+ y2 = y + DIRECTIONS[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]):
if closed[x2][y2] == 0 and grid[x2][y2] == 0:
g2 = g + cost
@@ -79,8 +62,8 @@ def search(grid, init, goal, cost, heuristic):
y = goal[1]
invpath.append([x, y]) # we get the reverse path from here
while x != init[0] or y != init[1]:
- x2 = x - delta[action[x][y]][0]
- y2 = y - delta[action[x][y]][1]
+ x2 = x - DIRECTIONS[action[x][y]][0]
+ y2 = y - DIRECTIONS[action[x][y]][1]
x = x2
y = y2
invpath.append([x, y])
@@ -88,13 +71,37 @@ def search(grid, init, goal, cost, heuristic):
path = []
for i in range(len(invpath)):
path.append(invpath[len(invpath) - 1 - i])
+ return path, action
+
+
+if __name__ == "__main__":
+ grid = [
+ [0, 1, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
+ [0, 1, 0, 0, 0, 0],
+ [0, 1, 0, 0, 1, 0],
+ [0, 0, 0, 0, 1, 0],
+ ]
+
+ init = [0, 0]
+ # all coordinates are given in format [y,x]
+ goal = [len(grid) - 1, len(grid[0]) - 1]
+ cost = 1
+
+ # the cost map which pushes the path closer to the goal
+ heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
+ for i in range(len(grid)):
+ for j in range(len(grid[0])):
+ heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1])
+ if grid[i][j] == 1:
+ # added extra penalty in the heuristic map
+ heuristic[i][j] = 99
+
+ path, action = search(grid, init, goal, cost, heuristic)
+
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
- return path
-
-
-a = search(grid, init, goal, cost, heuristic)
-for i in range(len(a)):
- print(a[i])
+ for i in range(len(path)):
+ print(path[i])
diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py
index 7197369de..d28045282 100644
--- a/graphs/articulation_points.py
+++ b/graphs/articulation_points.py
@@ -1,14 +1,14 @@
# Finding Articulation Points in Undirected Graph
-def computeAP(l): # noqa: E741
+def compute_ap(l): # noqa: E741
n = len(l)
- outEdgeCount = 0
+ out_edge_count = 0
low = [0] * n
visited = [False] * n
- isArt = [False] * n
+ is_art = [False] * n
- def dfs(root, at, parent, outEdgeCount):
+ def dfs(root, at, parent, out_edge_count):
if parent == root:
- outEdgeCount += 1
+ out_edge_count += 1
visited[at] = True
low[at] = at
@@ -16,27 +16,27 @@ def computeAP(l): # noqa: E741
if to == parent:
pass
elif not visited[to]:
- outEdgeCount = dfs(root, to, at, outEdgeCount)
+ out_edge_count = dfs(root, to, at, out_edge_count)
low[at] = min(low[at], low[to])
# AP found via bridge
if at < low[to]:
- isArt[at] = True
+ is_art[at] = True
# AP found via cycle
if at == low[to]:
- isArt[at] = True
+ is_art[at] = True
else:
low[at] = min(low[at], to)
- return outEdgeCount
+ return out_edge_count
for i in range(n):
if not visited[i]:
- outEdgeCount = 0
- outEdgeCount = dfs(i, i, -1, outEdgeCount)
- isArt[i] = outEdgeCount > 1
+ out_edge_count = 0
+ out_edge_count = dfs(i, i, -1, out_edge_count)
+ is_art[i] = out_edge_count > 1
- for x in range(len(isArt)):
- if isArt[x] is True:
+ for x in range(len(is_art)):
+ if is_art[x] is True:
print(x)
@@ -52,4 +52,4 @@ data = {
7: [6, 8],
8: [5, 7],
}
-computeAP(data)
+compute_ap(data)
diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py
index 0f73d8d07..065b6185c 100644
--- a/graphs/basic_graphs.py
+++ b/graphs/basic_graphs.py
@@ -1,42 +1,69 @@
from collections import deque
+
+def _input(message):
+ return input(message).strip().split(" ")
+
+
+def initialize_unweighted_directed_graph(
+ node_count: int, edge_count: int
+) -> dict[int, list[int]]:
+ graph: dict[int, list[int]] = {}
+ for i in range(node_count):
+ graph[i + 1] = []
+
+ for e in range(edge_count):
+ x, y = (int(i) for i in _input(f"Edge {e + 1}: "))
+ graph[x].append(y)
+ return graph
+
+
+def initialize_unweighted_undirected_graph(
+ node_count: int, edge_count: int
+) -> dict[int, list[int]]:
+ graph: dict[int, list[int]] = {}
+ for i in range(node_count):
+ graph[i + 1] = []
+
+ for e in range(edge_count):
+ x, y = (int(i) for i in _input(f"Edge {e + 1}: "))
+ graph[x].append(y)
+ graph[y].append(x)
+ return graph
+
+
+def initialize_weighted_undirected_graph(
+ node_count: int, edge_count: int
+) -> dict[int, list[tuple[int, int]]]:
+ graph: dict[int, list[tuple[int, int]]] = {}
+ for i in range(node_count):
+ graph[i + 1] = []
+
+ for e in range(edge_count):
+ x, y, w = (int(i) for i in _input(f"Edge {e + 1}: "))
+ graph[x].append((y, w))
+ graph[y].append((x, w))
+ return graph
+
+
if __name__ == "__main__":
- # Accept No. of Nodes and edges
- n, m = map(int, input().split(" "))
+ n, m = (int(i) for i in _input("Number of nodes and edges: "))
- # Initialising Dictionary of edges
- g = {}
- for i in range(n):
- g[i + 1] = []
+ graph_choice = int(
+ _input(
+ "Press 1 or 2 or 3 \n"
+ "1. Unweighted directed \n"
+ "2. Unweighted undirected \n"
+ "3. Weighted undirected \n"
+ )[0]
+ )
- """
- ----------------------------------------------------------------------------
- Accepting edges of Unweighted Directed Graphs
- ----------------------------------------------------------------------------
- """
- for _ in range(m):
- x, y = map(int, input().strip().split(" "))
- g[x].append(y)
+ g = {
+ 1: initialize_unweighted_directed_graph,
+ 2: initialize_unweighted_undirected_graph,
+ 3: initialize_weighted_undirected_graph,
+ }[graph_choice](n, m)
- """
- ----------------------------------------------------------------------------
- Accepting edges of Unweighted Undirected Graphs
- ----------------------------------------------------------------------------
- """
- for _ in range(m):
- x, y = map(int, input().strip().split(" "))
- g[x].append(y)
- g[y].append(x)
-
- """
- ----------------------------------------------------------------------------
- Accepting edges of Weighted Undirected Graphs
- ----------------------------------------------------------------------------
- """
- for _ in range(m):
- x, y, r = map(int, input().strip().split(" "))
- g[x].append([y, r])
- g[y].append([x, r])
"""
--------------------------------------------------------------------------------
@@ -49,20 +76,20 @@ if __name__ == "__main__":
"""
-def dfs(G, s):
- vis, S = {s}, [s]
+def dfs(g, s):
+ vis, _s = {s}, [s]
print(s)
- while S:
+ while _s:
flag = 0
- for i in G[S[-1]]:
+ for i in g[_s[-1]]:
if i not in vis:
- S.append(i)
+ _s.append(i)
vis.add(i)
flag = 1
print(i)
break
if not flag:
- S.pop()
+ _s.pop()
"""
@@ -76,15 +103,15 @@ def dfs(G, s):
"""
-def bfs(G, s):
- vis, Q = {s}, deque([s])
+def bfs(g, s):
+ vis, q = {s}, deque([s])
print(s)
- while Q:
- u = Q.popleft()
- for v in G[u]:
+ while q:
+ u = q.popleft()
+ for v in g[u]:
if v not in vis:
vis.add(v)
- Q.append(v)
+ q.append(v)
print(v)
@@ -100,10 +127,10 @@ def bfs(G, s):
"""
-def dijk(G, s):
+def dijk(g, s):
dist, known, path = {s: 0}, set(), {s: 0}
while True:
- if len(known) == len(G) - 1:
+ if len(known) == len(g) - 1:
break
mini = 100000
for i in dist:
@@ -111,11 +138,10 @@ def dijk(G, s):
mini = dist[i]
u = i
known.add(u)
- for v in G[u]:
- if v[0] not in known:
- if dist[u] + v[1] < dist.get(v[0], 100000):
- dist[v[0]] = dist[u] + v[1]
- path[v[0]] = u
+ for v in g[u]:
+ if v[0] not in known and dist[u] + v[1] < dist.get(v[0], 100000):
+ dist[v[0]] = dist[u] + v[1]
+ path[v[0]] = u
for i in dist:
if i != s:
print(dist[i])
@@ -128,27 +154,27 @@ def dijk(G, s):
"""
-def topo(G, ind=None, Q=None):
- if Q is None:
- Q = [1]
+def topo(g, ind=None, q=None):
+ if q is None:
+ q = [1]
if ind is None:
- ind = [0] * (len(G) + 1) # SInce oth Index is ignored
- for u in G:
- for v in G[u]:
+ ind = [0] * (len(g) + 1) # SInce oth Index is ignored
+ for u in g:
+ for v in g[u]:
ind[v] += 1
- Q = deque()
- for i in G:
+ q = deque()
+ for i in g:
if ind[i] == 0:
- Q.append(i)
- if len(Q) == 0:
+ q.append(i)
+ if len(q) == 0:
return
- v = Q.popleft()
+ v = q.popleft()
print(v)
- for w in G[v]:
+ for w in g[v]:
ind[w] -= 1
if ind[w] == 0:
- Q.append(w)
- topo(G, ind, Q)
+ q.append(w)
+ topo(g, ind, q)
"""
@@ -161,7 +187,7 @@ def topo(G, ind=None, Q=None):
def adjm():
n = input().strip()
a = []
- for i in range(n):
+ for _ in range(n):
a.append(map(int, input().strip().split()))
return a, n
@@ -179,9 +205,9 @@ def adjm():
"""
-def floy(A_and_n):
- (A, n) = A_and_n
- dist = list(A)
+def floy(a_and_n):
+ (a, n) = a_and_n
+ dist = list(a)
path = [[0] * n for i in range(n)]
for k in range(n):
for i in range(n):
@@ -204,10 +230,10 @@ def floy(A_and_n):
"""
-def prim(G, s):
+def prim(g, s):
dist, known, path = {s: 0}, set(), {s: 0}
while True:
- if len(known) == len(G) - 1:
+ if len(known) == len(g) - 1:
break
mini = 100000
for i in dist:
@@ -215,11 +241,10 @@ def prim(G, s):
mini = dist[i]
u = i
known.add(u)
- for v in G[u]:
- if v[0] not in known:
- if v[1] < dist.get(v[0], 100000):
- dist[v[0]] = v[1]
- path[v[0]] = u
+ for v in g[u]:
+ if v[0] not in known and v[1] < dist.get(v[0], 100000):
+ dist[v[0]] = v[1]
+ path[v[0]] = u
return dist
@@ -237,7 +262,7 @@ def prim(G, s):
def edglist():
n, m = map(int, input().split(" "))
edges = []
- for i in range(m):
+ for _ in range(m):
edges.append(map(int, input().split(" ")))
return edges, n
@@ -252,16 +277,16 @@ def edglist():
"""
-def krusk(E_and_n):
+def krusk(e_and_n):
# Sort edges on the basis of distance
- (E, n) = E_and_n
- E.sort(reverse=True, key=lambda x: x[2])
+ (e, n) = e_and_n
+ e.sort(reverse=True, key=lambda x: x[2])
s = [{i} for i in range(1, n + 1)]
while True:
if len(s) == 1:
break
print(s)
- x = E.pop()
+ x = e.pop()
for i in range(len(s)):
if x[0] in s[i]:
break
diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py
index d6d6b2ac7..9ac8bae85 100644
--- a/graphs/bellman_ford.py
+++ b/graphs/bellman_ford.py
@@ -11,7 +11,7 @@ def check_negative_cycle(
graph: list[dict[str, int]], distance: list[float], edge_count: int
):
for j in range(edge_count):
- u, v, w = [graph[j][k] for k in ["src", "dst", "weight"]]
+ u, v, w = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf") and distance[u] + w < distance[v]:
return True
return False
@@ -36,9 +36,9 @@ def bellman_ford(
distance = [float("inf")] * vertex_count
distance[src] = 0.0
- for i in range(vertex_count - 1):
+ for _ in range(vertex_count - 1):
for j in range(edge_count):
- u, v, w = [graph[j][k] for k in ["src", "dst", "weight"]]
+ u, v, w = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf") and distance[u] + w < distance[v]:
distance[v] = distance[u] + w
@@ -58,14 +58,14 @@ if __name__ == "__main__":
V = int(input("Enter number of vertices: ").strip())
E = int(input("Enter number of edges: ").strip())
- graph: list[dict[str, int]] = [dict() for j in range(E)]
+ graph: list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
- src, dest, weight = [
+ src, dest, weight = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
- ]
+ )
graph[i] = {"src": src, "dst": dest, "weight": weight}
source = int(input("\nEnter shortest path source:").strip())
diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py
new file mode 100644
index 000000000..a4489026b
--- /dev/null
+++ b/graphs/bi_directional_dijkstra.py
@@ -0,0 +1,139 @@
+"""
+Bi-directional Dijkstra's algorithm.
+
+A bi-directional approach is an efficient and
+less time consuming optimization for Dijkstra's
+searching algorithm
+
+Reference: shorturl.at/exHM7
+"""
+
+# Author: Swayam Singh (https://github.com/practice404)
+
+
+from queue import PriorityQueue
+from typing import Any
+
+import numpy as np
+
+
+def pass_and_relaxation(
+ graph: dict,
+ v: str,
+ visited_forward: set,
+ visited_backward: set,
+ cst_fwd: dict,
+ cst_bwd: dict,
+ queue: PriorityQueue,
+ parent: dict,
+ shortest_distance: float | int,
+) -> float | int:
+ for nxt, d in graph[v]:
+ if nxt in visited_forward:
+ continue
+ old_cost_f = cst_fwd.get(nxt, np.inf)
+ new_cost_f = cst_fwd[v] + d
+ if new_cost_f < old_cost_f:
+ queue.put((new_cost_f, nxt))
+ cst_fwd[nxt] = new_cost_f
+ parent[nxt] = v
+ if nxt in visited_backward:
+ if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
+ shortest_distance = cst_fwd[v] + d + cst_bwd[nxt]
+ return shortest_distance
+
+
+def bidirectional_dij(
+ source: str, destination: str, graph_forward: dict, graph_backward: dict
+) -> int:
+ """
+ Bi-directional Dijkstra's algorithm.
+
+ Returns:
+ shortest_path_distance (int): length of the shortest path.
+
+ Warnings:
+ If the destination is not reachable, function returns -1
+
+ >>> bidirectional_dij("E", "F", graph_fwd, graph_bwd)
+ 3
+ """
+ shortest_path_distance = -1
+
+ visited_forward = set()
+ visited_backward = set()
+ cst_fwd = {source: 0}
+ cst_bwd = {destination: 0}
+ parent_forward = {source: None}
+ parent_backward = {destination: None}
+ queue_forward: PriorityQueue[Any] = PriorityQueue()
+ queue_backward: PriorityQueue[Any] = PriorityQueue()
+
+ shortest_distance = np.inf
+
+ queue_forward.put((0, source))
+ queue_backward.put((0, destination))
+
+ if source == destination:
+ return 0
+
+ while not queue_forward.empty() and not queue_backward.empty():
+ _, v_fwd = queue_forward.get()
+ visited_forward.add(v_fwd)
+
+ _, v_bwd = queue_backward.get()
+ visited_backward.add(v_bwd)
+
+ shortest_distance = pass_and_relaxation(
+ graph_forward,
+ v_fwd,
+ visited_forward,
+ visited_backward,
+ cst_fwd,
+ cst_bwd,
+ queue_forward,
+ parent_forward,
+ shortest_distance,
+ )
+
+ shortest_distance = pass_and_relaxation(
+ graph_backward,
+ v_bwd,
+ visited_backward,
+ visited_forward,
+ cst_bwd,
+ cst_fwd,
+ queue_backward,
+ parent_backward,
+ shortest_distance,
+ )
+
+ if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
+ break
+
+ if shortest_distance != np.inf:
+ shortest_path_distance = shortest_distance
+ return shortest_path_distance
+
+
+graph_fwd = {
+ "B": [["C", 1]],
+ "C": [["D", 1]],
+ "D": [["F", 1]],
+ "E": [["B", 1], ["G", 2]],
+ "F": [],
+ "G": [["F", 1]],
+}
+graph_bwd = {
+ "B": [["E", 1]],
+ "C": [["B", 1]],
+ "D": [["C", 1]],
+ "F": [["D", 1], ["G", 1]],
+ "E": [[None, np.inf]],
+ "G": [["E", 2]],
+}
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/bidirectional_a_star.py b/graphs/bidirectional_a_star.py
index 729d8957b..373d67142 100644
--- a/graphs/bidirectional_a_star.py
+++ b/graphs/bidirectional_a_star.py
@@ -1,15 +1,12 @@
"""
https://en.wikipedia.org/wiki/Bidirectional_search
"""
-
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
-from typing import Optional
-
HEURISTIC = 0
grid = [
@@ -50,7 +47,7 @@ class Node:
goal_x: int,
goal_y: int,
g_cost: int,
- parent: Optional[Node],
+ parent: Node | None,
) -> None:
self.pos_x = pos_x
self.pos_y = pos_y
@@ -71,7 +68,7 @@ class Node:
if HEURISTIC == 1:
return abs(dx) + abs(dy)
else:
- return sqrt(dy ** 2 + dx ** 2)
+ return sqrt(dy**2 + dx**2)
def __lt__(self, other: Node) -> bool:
return self.f_cost < other.f_cost
@@ -157,7 +154,7 @@ class AStar:
)
return successors
- def retrace_path(self, node: Optional[Node]) -> list[TPosition]:
+ def retrace_path(self, node: Node | None) -> list[TPosition]:
"""
Retrace the path from parents to parents until start node
"""
diff --git a/graphs/bidirectional_breadth_first_search.py b/graphs/bidirectional_breadth_first_search.py
index 9b84ab21b..511b080a9 100644
--- a/graphs/bidirectional_breadth_first_search.py
+++ b/graphs/bidirectional_breadth_first_search.py
@@ -1,11 +1,9 @@
"""
https://en.wikipedia.org/wiki/Bidirectional_search
"""
-
from __future__ import annotations
import time
-from typing import Optional
Path = list[tuple[int, int]]
@@ -24,7 +22,7 @@ delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class Node:
def __init__(
- self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Optional[Node]
+ self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Node | None
):
self.pos_x = pos_x
self.pos_y = pos_y
@@ -36,16 +34,19 @@ class Node:
class BreadthFirstSearch:
"""
- >>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1))
- >>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1])
+ # Comment out slow pytests...
+ # 9.15s call graphs/bidirectional_breadth_first_search.py:: \
+ # graphs.bidirectional_breadth_first_search.BreadthFirstSearch
+ # >>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ # >>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1])
(0, 1)
- >>> [x.pos for x in bfs.get_successors(bfs.start)]
+ # >>> [x.pos for x in bfs.get_successors(bfs.start)]
[(1, 0), (0, 1)]
- >>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1])
+ # >>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1])
(1, 0)
- >>> bfs.retrace_path(bfs.start)
+ # >>> bfs.retrace_path(bfs.start)
[(0, 0)]
- >>> bfs.search() # doctest: +NORMALIZE_WHITESPACE
+ # >>> bfs.search() # doctest: +NORMALIZE_WHITESPACE
[(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1),
(5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
"""
@@ -57,7 +58,7 @@ class BreadthFirstSearch:
self.node_queue = [self.start]
self.reached = False
- def search(self) -> Optional[Path]:
+ def search(self) -> Path | None:
while self.node_queue:
current_node = self.node_queue.pop(0)
@@ -93,7 +94,7 @@ class BreadthFirstSearch:
)
return successors
- def retrace_path(self, node: Optional[Node]) -> Path:
+ def retrace_path(self, node: Node | None) -> Path:
"""
Retrace the path from parents to parents until start node
"""
@@ -125,7 +126,7 @@ class BidirectionalBreadthFirstSearch:
self.bwd_bfs = BreadthFirstSearch(goal, start)
self.reached = False
- def search(self) -> Optional[Path]:
+ def search(self) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
current_fwd_node = self.fwd_bfs.node_queue.pop(0)
current_bwd_node = self.bwd_bfs.node_queue.pop(0)
diff --git a/graphs/boruvka.py b/graphs/boruvka.py
new file mode 100644
index 000000000..2715a3085
--- /dev/null
+++ b/graphs/boruvka.py
@@ -0,0 +1,175 @@
+"""Borůvka's algorithm.
+
+ Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm.
+ Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a
+ connected graph, or a minimum spanning forest if a graph that is not connected.
+
+ The time complexity of this algorithm is O(ELogV), where E represents the number
+ of edges, while V represents the number of nodes.
+ O(number_of_edges Log number_of_nodes)
+
+ The space complexity of this algorithm is O(V + E), since we have to keep a couple
+ of lists whose sizes are equal to the number of nodes, as well as keep all the
+ edges of a graph inside of the data structure itself.
+
+ Borůvka's algorithm gives us pretty much the same result as other MST Algorithms -
+ they all find the minimum spanning tree, and the time complexity is approximately
+ the same.
+
+ One advantage that Borůvka's algorithm has compared to the alternatives is that it
+ doesn't need to presort the edges or maintain a priority queue in order to find the
+ minimum spanning tree.
+ Even though that doesn't help its complexity, since it still passes the edges logE
+ times, it is a bit simpler to code.
+
+ Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm
+"""
+from __future__ import annotations
+
+from typing import Any
+
+
+class Graph:
+ def __init__(self, num_of_nodes: int) -> None:
+ """
+ Arguments:
+ num_of_nodes - the number of nodes in the graph
+ Attributes:
+ m_num_of_nodes - the number of nodes in the graph.
+ m_edges - the list of edges.
+ m_component - the dictionary which stores the index of the component which
+ a node belongs to.
+ """
+
+ self.m_num_of_nodes = num_of_nodes
+ self.m_edges: list[list[int]] = []
+ self.m_component: dict[int, int] = {}
+
+ def add_edge(self, u_node: int, v_node: int, weight: int) -> None:
+ """Adds an edge in the format [first, second, edge weight] to graph."""
+
+ self.m_edges.append([u_node, v_node, weight])
+
+ def find_component(self, u_node: int) -> int:
+ """Propagates a new component throughout a given component."""
+
+ if self.m_component[u_node] == u_node:
+ return u_node
+ return self.find_component(self.m_component[u_node])
+
+ def set_component(self, u_node: int) -> None:
+ """Finds the component index of a given node"""
+
+ if self.m_component[u_node] != u_node:
+ for k in self.m_component:
+ self.m_component[k] = self.find_component(k)
+
+ def union(self, component_size: list[int], u_node: int, v_node: int) -> None:
+ """Union finds the roots of components for two nodes, compares the components
+ in terms of size, and attaches the smaller one to the larger one to form
+ single component"""
+
+ if component_size[u_node] <= component_size[v_node]:
+ self.m_component[u_node] = v_node
+ component_size[v_node] += component_size[u_node]
+ self.set_component(u_node)
+
+ elif component_size[u_node] >= component_size[v_node]:
+ self.m_component[v_node] = self.find_component(u_node)
+ component_size[u_node] += component_size[v_node]
+ self.set_component(v_node)
+
+ def boruvka(self) -> None:
+ """Performs Borůvka's algorithm to find MST."""
+
+ # Initialize additional lists required to algorithm.
+ component_size = []
+ mst_weight = 0
+
+ minimum_weight_edge: list[Any] = [-1] * self.m_num_of_nodes
+
+ # A list of components (initialized to all of the nodes)
+ for node in range(self.m_num_of_nodes):
+ self.m_component.update({node: node})
+ component_size.append(1)
+
+ num_of_components = self.m_num_of_nodes
+
+ while num_of_components > 1:
+ for edge in self.m_edges:
+ u, v, w = edge
+
+ u_component = self.m_component[u]
+ v_component = self.m_component[v]
+
+ if u_component != v_component:
+ """If the current minimum weight edge of component u doesn't
+ exist (is -1), or if it's greater than the edge we're
+ observing right now, we will assign the value of the edge
+ we're observing to it.
+
+ If the current minimum weight edge of component v doesn't
+ exist (is -1), or if it's greater than the edge we're
+ observing right now, we will assign the value of the edge
+ we're observing to it"""
+
+ for component in (u_component, v_component):
+ if (
+ minimum_weight_edge[component] == -1
+ or minimum_weight_edge[component][2] > w
+ ):
+ minimum_weight_edge[component] = [u, v, w]
+
+ for edge in minimum_weight_edge:
+ if isinstance(edge, list):
+ u, v, w = edge
+
+ u_component = self.m_component[u]
+ v_component = self.m_component[v]
+
+ if u_component != v_component:
+ mst_weight += w
+ self.union(component_size, u_component, v_component)
+ print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n")
+ num_of_components -= 1
+
+ minimum_weight_edge = [-1] * self.m_num_of_nodes
+ print(f"The total weight of the minimal spanning tree is: {mst_weight}")
+
+
+def test_vector() -> None:
+ """
+ >>> g = Graph(8)
+ >>> for u_v_w in ((0, 1, 10), (0, 2, 6), (0, 3, 5), (1, 3, 15), (2, 3, 4),
+ ... (3, 4, 8), (4, 5, 10), (4, 6, 6), (4, 7, 5), (5, 7, 15), (6, 7, 4)):
+ ... g.add_edge(*u_v_w)
+ >>> g.boruvka()
+ Added edge [0 - 3]
+ Added weight: 5
+
+ Added edge [0 - 1]
+ Added weight: 10
+
+ Added edge [2 - 3]
+ Added weight: 4
+
+ Added edge [4 - 7]
+ Added weight: 5
+
+ Added edge [4 - 5]
+ Added weight: 10
+
+ Added edge [6 - 7]
+ Added weight: 4
+
+ Added edge [3 - 4]
+ Added weight: 8
+
+ The total weight of the minimal spanning tree is: 46
+ """
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py
index 305db01e1..171d3875f 100644
--- a/graphs/breadth_first_search.py
+++ b/graphs/breadth_first_search.py
@@ -1,13 +1,14 @@
#!/usr/bin/python
""" Author: OMKAR PATHAK """
+from __future__ import annotations
-from typing import Dict, List, Set
+from queue import Queue
class Graph:
def __init__(self) -> None:
- self.vertices: Dict[int, List[int]] = {}
+ self.vertices: dict[int, list[int]] = {}
def print_graph(self) -> None:
"""
@@ -35,7 +36,7 @@ class Graph:
else:
self.vertices[from_vertex] = [to_vertex]
- def bfs(self, start_vertex: int) -> Set[int]:
+ def bfs(self, start_vertex: int) -> set[int]:
"""
>>> g = Graph()
>>> g.add_edge(0, 1)
@@ -52,19 +53,19 @@ class Graph:
visited = set()
# create a first in first out queue to store all the vertices for BFS
- queue = []
+ queue: Queue = Queue()
# mark the source node as visited and enqueue it
visited.add(start_vertex)
- queue.append(start_vertex)
+ queue.put(start_vertex)
- while queue:
- vertex = queue.pop(0)
+ while not queue.empty():
+ vertex = queue.get()
# loop through all adjacent vertex and enqueue it if not yet visited
for adjacent_vertex in self.vertices[vertex]:
if adjacent_vertex not in visited:
- queue.append(adjacent_vertex)
+ queue.put(adjacent_vertex)
visited.add(adjacent_vertex)
return visited
diff --git a/graphs/breadth_first_search_2.py b/graphs/breadth_first_search_2.py
index a90e963a4..a0b92b90b 100644
--- a/graphs/breadth_first_search_2.py
+++ b/graphs/breadth_first_search_2.py
@@ -14,6 +14,10 @@ while Q is non-empty:
"""
from __future__ import annotations
+from collections import deque
+from queue import Queue
+from timeit import timeit
+
G = {
"A": ["B", "C"],
"B": ["A", "D", "E"],
@@ -24,21 +28,60 @@ G = {
}
-def breadth_first_search(graph: dict, start: str) -> set[str]:
+def breadth_first_search(graph: dict, start: str) -> list[str]:
"""
- >>> ''.join(sorted(breadth_first_search(G, 'A')))
+ Implementation of breadth first search using queue.Queue.
+
+ >>> ''.join(breadth_first_search(G, 'A'))
'ABCDEF'
"""
explored = {start}
- queue = [start]
- while queue:
- v = queue.pop(0) # queue.popleft()
+ result = [start]
+ queue: Queue = Queue()
+ queue.put(start)
+ while not queue.empty():
+ v = queue.get()
for w in graph[v]:
if w not in explored:
explored.add(w)
- queue.append(w)
- return explored
+ result.append(w)
+ queue.put(w)
+ return result
+
+
+def breadth_first_search_with_deque(graph: dict, start: str) -> list[str]:
+ """
+ Implementation of breadth first search using collection.queue.
+
+ >>> ''.join(breadth_first_search_with_deque(G, 'A'))
+ 'ABCDEF'
+ """
+ visited = {start}
+ result = [start]
+ queue = deque([start])
+ while queue:
+ v = queue.popleft()
+ for child in graph[v]:
+ if child not in visited:
+ visited.add(child)
+ result.append(child)
+ queue.append(child)
+ return result
+
+
+def benchmark_function(name: str) -> None:
+ setup = f"from __main__ import G, {name}"
+ number = 10000
+ res = timeit(f"{name}(G, 'A')", setup=setup, number=number)
+ print(f"{name:<35} finished {number} runs in {res:.5f} seconds")
if __name__ == "__main__":
- print(breadth_first_search(G, "A"))
+ import doctest
+
+ doctest.testmod()
+
+ benchmark_function("breadth_first_search")
+ benchmark_function("breadth_first_search_with_deque")
+ # breadth_first_search finished 10000 runs in 0.20999 seconds
+ # breadth_first_search_with_deque finished 10000 runs in 0.01421 seconds
diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py
index 48f8ab1a4..d489b110b 100644
--- a/graphs/breadth_first_search_shortest_path.py
+++ b/graphs/breadth_first_search_shortest_path.py
@@ -3,8 +3,6 @@ from a given source node to a target node in an unweighted graph.
"""
from __future__ import annotations
-from typing import Optional
-
graph = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
@@ -24,7 +22,7 @@ class Graph:
"""
self.graph = graph
# mapping node to its parent in resulting breadth first tree
- self.parent: dict[str, Optional[str]] = {}
+ self.parent: dict[str, str | None] = {}
self.source_vertex = source_vertex
def breath_first_search(self) -> None:
@@ -60,7 +58,9 @@ class Graph:
Case 1 - No path is found.
>>> g.shortest_path("Foo")
- 'No path from vertex:G to vertex:Foo'
+ Traceback (most recent call last):
+ ...
+ ValueError: No path from vertex: G to vertex: Foo
Case 2 - The path is found.
>>> g.shortest_path("D")
@@ -73,7 +73,10 @@ class Graph:
target_vertex_parent = self.parent.get(target_vertex)
if target_vertex_parent is None:
- return f"No path from vertex:{self.source_vertex} to vertex:{target_vertex}"
+ msg = (
+ f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
+ )
+ raise ValueError(msg)
return self.shortest_path(target_vertex_parent) + f"->{target_vertex}"
diff --git a/graphs/bfs_shortest_path.py b/graphs/breadth_first_search_shortest_path_2.py
similarity index 100%
rename from graphs/bfs_shortest_path.py
rename to graphs/breadth_first_search_shortest_path_2.py
diff --git a/graphs/bfs_zero_one_shortest_path.py b/graphs/breadth_first_search_zero_one_shortest_path.py
similarity index 97%
rename from graphs/bfs_zero_one_shortest_path.py
rename to graphs/breadth_first_search_zero_one_shortest_path.py
index a68b5602c..78047c5d2 100644
--- a/graphs/bfs_zero_one_shortest_path.py
+++ b/graphs/breadth_first_search_zero_one_shortest_path.py
@@ -1,13 +1,13 @@
-from collections import deque
-from collections.abc import Iterator
-from dataclasses import dataclass
-from typing import Optional, Union
-
"""
Finding the shortest path in 0-1-graph in O(E + V) which is faster than dijkstra.
0-1-graph is the weighted graph with the weights equal to 0 or 1.
Link: https://codeforces.com/blog/entry/22276
"""
+from __future__ import annotations
+
+from collections import deque
+from collections.abc import Iterator
+from dataclasses import dataclass
@dataclass
@@ -59,7 +59,7 @@ class AdjacencyList:
self._graph[from_vertex].append(Edge(to_vertex, weight))
- def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> Optional[int]:
+ def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> int | None:
"""
Return the shortest distance from start_vertex to finish_vertex in 0-1-graph.
1 1 1
@@ -107,7 +107,7 @@ class AdjacencyList:
ValueError: No path from start_vertex to finish_vertex.
"""
queue = deque([start_vertex])
- distances: list[Union[int, None]] = [None] * self.size
+ distances: list[int | None] = [None] * self.size
distances[start_vertex] = 0
while queue:
diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py
index 00b771649..7fc57cbc7 100644
--- a/graphs/check_bipartite_graph_bfs.py
+++ b/graphs/check_bipartite_graph_bfs.py
@@ -6,24 +6,26 @@
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
-def checkBipartite(graph):
- queue = []
+from queue import Queue
+
+
+def check_bipartite(graph):
+ queue = Queue()
visited = [False] * len(graph)
color = [-1] * len(graph)
def bfs():
- while queue:
- u = queue.pop(0)
+ while not queue.empty():
+ u = queue.get()
visited[u] = True
for neighbour in graph[u]:
-
if neighbour == u:
return False
if color[neighbour] == -1:
color[neighbour] = 1 - color[u]
- queue.append(neighbour)
+ queue.put(neighbour)
elif color[neighbour] == color[u]:
return False
@@ -32,7 +34,7 @@ def checkBipartite(graph):
for i in range(len(graph)):
if not visited[i]:
- queue.append(i)
+ queue.put(i)
color[i] = 0
if bfs() is False:
return False
@@ -42,4 +44,4 @@ def checkBipartite(graph):
if __name__ == "__main__":
# Adjacency List of graph
- print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))
+ print(check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))
diff --git a/graphs/check_cycle.py b/graphs/check_cycle.py
new file mode 100644
index 000000000..9fd1cd80f
--- /dev/null
+++ b/graphs/check_cycle.py
@@ -0,0 +1,52 @@
+"""
+Program to check if a cycle is present in a given graph
+"""
+
+
+def check_cycle(graph: dict) -> bool:
+ """
+ Returns True if graph is cyclic else False
+ >>> check_cycle(graph={0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]})
+ False
+ >>> check_cycle(graph={0:[1, 2], 1:[2], 2:[0, 3], 3:[3]})
+ True
+ """
+ # Keep track of visited nodes
+ visited: set[int] = set()
+ # To detect a back edge, keep track of vertices currently in the recursion stack
+ rec_stk: set[int] = set()
+ return any(
+ node not in visited and depth_first_search(graph, node, visited, rec_stk)
+ for node in graph
+ )
+
+
+def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> bool:
+ """
+ Recur for all neighbours.
+ If any neighbour is visited and in rec_stk then graph is cyclic.
+ >>> graph = {0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]}
+ >>> vertex, visited, rec_stk = 0, set(), set()
+ >>> depth_first_search(graph, vertex, visited, rec_stk)
+ False
+ """
+ # Mark current node as visited and add to recursion stack
+ visited.add(vertex)
+ rec_stk.add(vertex)
+
+ for node in graph[vertex]:
+ if node not in visited:
+ if depth_first_search(graph, node, visited, rec_stk):
+ return True
+ elif node in rec_stk:
+ return True
+
+ # The node needs to be removed from recursion stack before function ends
+ rec_stk.remove(vertex)
+ return False
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/graphs/connected_components.py b/graphs/connected_components.py
index 4af7803d7..15c7633e1 100644
--- a/graphs/connected_components.py
+++ b/graphs/connected_components.py
@@ -27,7 +27,7 @@ def dfs(graph: dict, vert: int, visited: list) -> list:
if not visited[neighbour]:
connected_verts += dfs(graph, neighbour, visited)
- return [vert] + connected_verts
+ return [vert, *connected_verts]
def connected_components(graph: dict) -> list:
diff --git a/graphs/depth_first_search.py b/graphs/depth_first_search.py
index 5d74a6db9..f20a503ca 100644
--- a/graphs/depth_first_search.py
+++ b/graphs/depth_first_search.py
@@ -1,11 +1,8 @@
"""Non recursive implementation of a DFS algorithm."""
-
from __future__ import annotations
-from typing import Set
-
-def depth_first_search(graph: dict, start: str) -> Set[str]:
+def depth_first_search(graph: dict, start: str) -> set[str]:
"""Depth First Search on Graph
:param graph: directed graph in dictionary format
:param start: starting vertex as a string
diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py
index d15fcbbfe..b0bdfab60 100644
--- a/graphs/dijkstra.py
+++ b/graphs/dijkstra.py
@@ -56,8 +56,8 @@ def dijkstra(graph, start, end):
for v, c in graph[u]:
if v in visited:
continue
- next = cost + c
- heapq.heappush(heap, (next, v))
+ next_item = cost + c
+ heapq.heappush(heap, (next_item, v))
return -1
@@ -103,14 +103,14 @@ G3 = {
"G": [["F", 1]],
}
-shortDistance = dijkstra(G, "E", "C")
-print(shortDistance) # E -- 3 --> F -- 3 --> C == 6
+short_distance = dijkstra(G, "E", "C")
+print(short_distance) # E -- 3 --> F -- 3 --> C == 6
-shortDistance = dijkstra(G2, "E", "F")
-print(shortDistance) # E -- 3 --> F == 3
+short_distance = dijkstra(G2, "E", "F")
+print(short_distance) # E -- 3 --> F == 3
-shortDistance = dijkstra(G3, "E", "F")
-print(shortDistance) # E -- 2 --> G -- 1 --> F == 3
+short_distance = dijkstra(G3, "E", "F")
+print(short_distance) # E -- 2 --> G -- 1 --> F == 3
if __name__ == "__main__":
import doctest
diff --git a/graphs/dijkstra_2.py b/graphs/dijkstra_2.py
index 762884136..f548463ff 100644
--- a/graphs/dijkstra_2.py
+++ b/graphs/dijkstra_2.py
@@ -1,6 +1,6 @@
-def printDist(dist, V):
+def print_dist(dist, v):
print("\nVertex Distance")
- for i in range(V):
+ for i in range(v):
if dist[i] != float("inf"):
print(i, "\t", int(dist[i]), end="\t")
else:
@@ -8,34 +8,34 @@ def printDist(dist, V):
print()
-def minDist(mdist, vset, V):
- minVal = float("inf")
- minInd = -1
- for i in range(V):
- if (not vset[i]) and mdist[i] < minVal:
- minInd = i
- minVal = mdist[i]
- return minInd
+def min_dist(mdist, vset, v):
+ min_val = float("inf")
+ min_ind = -1
+ for i in range(v):
+ if (not vset[i]) and mdist[i] < min_val:
+ min_ind = i
+ min_val = mdist[i]
+ return min_ind
-def Dijkstra(graph, V, src):
- mdist = [float("inf") for i in range(V)]
- vset = [False for i in range(V)]
+def dijkstra(graph, v, src):
+ mdist = [float("inf") for _ in range(v)]
+ vset = [False for _ in range(v)]
mdist[src] = 0.0
- for i in range(V - 1):
- u = minDist(mdist, vset, V)
+ for _ in range(v - 1):
+ u = min_dist(mdist, vset, v)
vset[u] = True
- for v in range(V):
+ for i in range(v):
if (
- (not vset[v])
- and graph[u][v] != float("inf")
- and mdist[u] + graph[u][v] < mdist[v]
+ (not vset[i])
+ and graph[u][i] != float("inf")
+ and mdist[u] + graph[u][i] < mdist[i]
):
- mdist[v] = mdist[u] + graph[u][v]
+ mdist[i] = mdist[u] + graph[u][i]
- printDist(mdist, V)
+ print_dist(mdist, i)
if __name__ == "__main__":
@@ -55,4 +55,4 @@ if __name__ == "__main__":
graph[src][dst] = weight
gsrc = int(input("\nEnter shortest path source:").strip())
- Dijkstra(graph, V, gsrc)
+ dijkstra(graph, V, gsrc)
diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py
index 6b64834ac..452138fe9 100644
--- a/graphs/dijkstra_algorithm.py
+++ b/graphs/dijkstra_algorithm.py
@@ -15,7 +15,7 @@ class PriorityQueue:
self.array = []
self.pos = {} # To store the pos of node in array
- def isEmpty(self):
+ def is_empty(self):
return self.cur_size == 0
def min_heapify(self, idx):
@@ -89,13 +89,13 @@ class Graph:
# Edge going from node u to v and v to u with weight w
# u (w)-> v, v (w) -> u
# Check if u already in graph
- if u in self.adjList.keys():
+ if u in self.adjList:
self.adjList[u].append((v, w))
else:
self.adjList[u] = [(v, w)]
# Assuming undirected graph
- if v in self.adjList.keys():
+ if v in self.adjList:
self.adjList[v].append((u, w))
else:
self.adjList[v] = [(u, w)]
@@ -110,24 +110,24 @@ class Graph:
self.par = [-1] * self.num_nodes
# src is the source node
self.dist[src] = 0
- Q = PriorityQueue()
- Q.insert((0, src)) # (dist from src, node)
- for u in self.adjList.keys():
+ q = PriorityQueue()
+ q.insert((0, src)) # (dist from src, node)
+ for u in self.adjList:
if u != src:
self.dist[u] = sys.maxsize # Infinity
self.par[u] = -1
- while not Q.isEmpty():
- u = Q.extract_min() # Returns node with the min dist from source
+ while not q.is_empty():
+ u = q.extract_min() # Returns node with the min dist from source
# Update the distance of all the neighbours of u and
# if their prev dist was INFINITY then push them in Q
for v, w in self.adjList[u]:
new_dist = self.dist[u] + w
if self.dist[v] > new_dist:
if self.dist[v] == sys.maxsize:
- Q.insert((new_dist, v))
+ q.insert((new_dist, v))
else:
- Q.decrease_key((self.dist[v], v), new_dist)
+ q.decrease_key((self.dist[v], v), new_dist)
self.dist[v] = new_dist
self.par[v] = u
diff --git a/graphs/dijkstra_alternate.py b/graphs/dijkstra_alternate.py
new file mode 100644
index 000000000..7beef6b04
--- /dev/null
+++ b/graphs/dijkstra_alternate.py
@@ -0,0 +1,98 @@
+from __future__ import annotations
+
+
+class Graph:
+ def __init__(self, vertices: int) -> None:
+ """
+ >>> graph = Graph(2)
+ >>> graph.vertices
+ 2
+ >>> len(graph.graph)
+ 2
+ >>> len(graph.graph[0])
+ 2
+ """
+ self.vertices = vertices
+ self.graph = [[0] * vertices for _ in range(vertices)]
+
+ def print_solution(self, distances_from_source: list[int]) -> None:
+ """
+ >>> Graph(0).print_solution([]) # doctest: +NORMALIZE_WHITESPACE
+ Vertex Distance from Source
+ """
+ print("Vertex \t Distance from Source")
+ for vertex in range(self.vertices):
+ print(vertex, "\t\t", distances_from_source[vertex])
+
+ def minimum_distance(
+ self, distances_from_source: list[int], visited: list[bool]
+ ) -> int:
+ """
+ A utility function to find the vertex with minimum distance value, from the set
+ of vertices not yet included in shortest path tree.
+
+ >>> Graph(3).minimum_distance([1, 2, 3], [False, False, True])
+ 0
+ """
+
+ # Initialize minimum distance for next node
+ minimum = 1e7
+ min_index = 0
+
+ # Search not nearest vertex not in the shortest path tree
+ for vertex in range(self.vertices):
+ if distances_from_source[vertex] < minimum and visited[vertex] is False:
+ minimum = distances_from_source[vertex]
+ min_index = vertex
+ return min_index
+
+ def dijkstra(self, source: int) -> None:
+ """
+ Function that implements Dijkstra's single source shortest path algorithm for a
+ graph represented using adjacency matrix representation.
+
+ >>> Graph(4).dijkstra(1) # doctest: +NORMALIZE_WHITESPACE
+ Vertex Distance from Source
+ 0 10000000
+ 1 0
+ 2 10000000
+ 3 10000000
+ """
+
+ distances = [int(1e7)] * self.vertices # distances from the source
+ distances[source] = 0
+ visited = [False] * self.vertices
+
+ for _ in range(self.vertices):
+ u = self.minimum_distance(distances, visited)
+ visited[u] = True
+
+ # Update dist value of the adjacent vertices
+ # of the picked vertex only if the current
+ # distance is greater than new distance and
+ # the vertex in not in the shortest path tree
+ for v in range(self.vertices):
+ if (
+ self.graph[u][v] > 0
+ and visited[v] is False
+ and distances[v] > distances[u] + self.graph[u][v]
+ ):
+ distances[v] = distances[u] + self.graph[u][v]
+
+ self.print_solution(distances)
+
+
+if __name__ == "__main__":
+ graph = Graph(9)
+ graph.graph = [
+ [0, 4, 0, 0, 0, 0, 0, 8, 0],
+ [4, 0, 8, 0, 0, 0, 0, 11, 0],
+ [0, 8, 0, 7, 0, 4, 0, 0, 2],
+ [0, 0, 7, 0, 9, 14, 0, 0, 0],
+ [0, 0, 0, 9, 0, 10, 0, 0, 0],
+ [0, 0, 4, 14, 10, 0, 2, 0, 0],
+ [0, 0, 0, 0, 0, 2, 0, 1, 6],
+ [8, 11, 0, 0, 0, 0, 1, 0, 7],
+ [0, 0, 2, 0, 0, 0, 6, 7, 0],
+ ]
+ graph.dijkstra(0)
diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py
new file mode 100644
index 000000000..c23d82343
--- /dev/null
+++ b/graphs/dijkstra_binary_grid.py
@@ -0,0 +1,89 @@
+"""
+This script implements the Dijkstra algorithm on a binary grid.
+The grid consists of 0s and 1s, where 1 represents
+a walkable node and 0 represents an obstacle.
+The algorithm finds the shortest path from a start node to a destination node.
+Diagonal movement can be allowed or disallowed.
+"""
+
+from heapq import heappop, heappush
+
+import numpy as np
+
+
+def dijkstra(
+ grid: np.ndarray,
+ source: tuple[int, int],
+ destination: tuple[int, int],
+ allow_diagonal: bool,
+) -> tuple[float | int, list[tuple[int, int]]]:
+ """
+ Implements Dijkstra's algorithm on a binary grid.
+
+ Args:
+ grid (np.ndarray): A 2D numpy array representing the grid.
+ 1 represents a walkable node and 0 represents an obstacle.
+ source (Tuple[int, int]): A tuple representing the start node.
+ destination (Tuple[int, int]): A tuple representing the
+ destination node.
+ allow_diagonal (bool): A boolean determining whether
+ diagonal movements are allowed.
+
+ Returns:
+ Tuple[Union[float, int], List[Tuple[int, int]]]:
+ The shortest distance from the start node to the destination node
+ and the shortest path as a list of nodes.
+
+ >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), False)
+ (4.0, [(0, 0), (0, 1), (1, 1), (2, 1), (2, 2)])
+
+ >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), True)
+ (2.0, [(0, 0), (1, 1), (2, 2)])
+
+ >>> dijkstra(np.array([[1, 1, 1], [0, 0, 1], [0, 1, 1]]), (0, 0), (2, 2), False)
+ (4.0, [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)])
+ """
+ rows, cols = grid.shape
+ dx = [-1, 1, 0, 0]
+ dy = [0, 0, -1, 1]
+ if allow_diagonal:
+ dx += [-1, -1, 1, 1]
+ dy += [-1, 1, -1, 1]
+
+ queue, visited = [(0, source)], set()
+ matrix = np.full((rows, cols), np.inf)
+ matrix[source] = 0
+ predecessors = np.empty((rows, cols), dtype=object)
+ predecessors[source] = None
+
+ while queue:
+ (dist, (x, y)) = heappop(queue)
+ if (x, y) in visited:
+ continue
+ visited.add((x, y))
+
+ if (x, y) == destination:
+ path = []
+ while (x, y) != source:
+ path.append((x, y))
+ x, y = predecessors[x, y]
+ path.append(source) # add the source manually
+ path.reverse()
+ return matrix[destination], path
+
+ for i in range(len(dx)):
+ nx, ny = x + dx[i], y + dy[i]
+ if 0 <= nx < rows and 0 <= ny < cols:
+ next_node = grid[nx][ny]
+ if next_node == 1 and matrix[nx, ny] > dist + 1:
+ heappush(queue, (dist + 1, (nx, ny)))
+ matrix[nx, ny] = dist + 1
+ predecessors[nx, ny] = (x, y)
+
+ return np.inf, []
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py
index 5cfa9e13e..8ca645fda 100644
--- a/graphs/directed_and_undirected_(weighted)_graph.py
+++ b/graphs/directed_and_undirected_(weighted)_graph.py
@@ -39,7 +39,7 @@ class DirectedGraph:
stack = []
visited = []
if s == -2:
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
stack.append(s)
visited.append(s)
ss = s
@@ -87,7 +87,7 @@ class DirectedGraph:
d = deque()
visited = []
if s == -2:
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
d.append(s)
visited.append(s)
while d:
@@ -114,7 +114,7 @@ class DirectedGraph:
stack = []
visited = []
if s == -2:
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
stack.append(s)
visited.append(s)
ss = s
@@ -146,7 +146,7 @@ class DirectedGraph:
def cycle_nodes(self):
stack = []
visited = []
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
stack.append(s)
visited.append(s)
parent = -2
@@ -167,7 +167,7 @@ class DirectedGraph:
and not on_the_way_back
):
len_stack = len(stack) - 1
- while True and len_stack >= 0:
+ while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
@@ -199,7 +199,7 @@ class DirectedGraph:
def has_cycle(self):
stack = []
visited = []
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
stack.append(s)
visited.append(s)
parent = -2
@@ -220,15 +220,12 @@ class DirectedGraph:
and not on_the_way_back
):
len_stack_minus_one = len(stack) - 1
- while True and len_stack_minus_one >= 0:
+ while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
- # TODO:The following code is unreachable.
- anticipating_nodes.add(stack[len_stack_minus_one])
- len_stack_minus_one -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
@@ -308,7 +305,7 @@ class Graph:
stack = []
visited = []
if s == -2:
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
stack.append(s)
visited.append(s)
ss = s
@@ -356,7 +353,7 @@ class Graph:
d = deque()
visited = []
if s == -2:
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
d.append(s)
visited.append(s)
while d:
@@ -374,7 +371,7 @@ class Graph:
def cycle_nodes(self):
stack = []
visited = []
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
stack.append(s)
visited.append(s)
parent = -2
@@ -395,7 +392,7 @@ class Graph:
and not on_the_way_back
):
len_stack = len(stack) - 1
- while True and len_stack >= 0:
+ while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
@@ -427,7 +424,7 @@ class Graph:
def has_cycle(self):
stack = []
visited = []
- s = list(self.graph)[0]
+ s = next(iter(self.graph))
stack.append(s)
visited.append(s)
parent = -2
@@ -448,16 +445,12 @@ class Graph:
and not on_the_way_back
):
len_stack_minus_one = len(stack) - 1
- while True and len_stack_minus_one >= 0:
+ while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
- # TODO: the following code is unreachable
- # is this meant to be called in the else ?
- anticipating_nodes.add(stack[len_stack_minus_one])
- len_stack_minus_one -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
diff --git a/graphs/edmonds_karp_multiple_source_and_sink.py b/graphs/edmonds_karp_multiple_source_and_sink.py
index 0f359ff1a..5c774f4b8 100644
--- a/graphs/edmonds_karp_multiple_source_and_sink.py
+++ b/graphs/edmonds_karp_multiple_source_and_sink.py
@@ -1,15 +1,15 @@
class FlowNetwork:
def __init__(self, graph, sources, sinks):
- self.sourceIndex = None
- self.sinkIndex = None
+ self.source_index = None
+ self.sink_index = None
self.graph = graph
- self._normalizeGraph(sources, sinks)
- self.verticesCount = len(graph)
- self.maximumFlowAlgorithm = None
+ self._normalize_graph(sources, sinks)
+ self.vertices_count = len(graph)
+ self.maximum_flow_algorithm = None
# make only one source and one sink
- def _normalizeGraph(self, sources, sinks):
+ def _normalize_graph(self, sources, sinks):
if sources is int:
sources = [sources]
if sinks is int:
@@ -18,54 +18,54 @@ class FlowNetwork:
if len(sources) == 0 or len(sinks) == 0:
return
- self.sourceIndex = sources[0]
- self.sinkIndex = sinks[0]
+ self.source_index = sources[0]
+ self.sink_index = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(sources) > 1 or len(sinks) > 1:
- maxInputFlow = 0
+ max_input_flow = 0
for i in sources:
- maxInputFlow += sum(self.graph[i])
+ max_input_flow += sum(self.graph[i])
size = len(self.graph) + 1
for room in self.graph:
room.insert(0, 0)
self.graph.insert(0, [0] * size)
for i in sources:
- self.graph[0][i + 1] = maxInputFlow
- self.sourceIndex = 0
+ self.graph[0][i + 1] = max_input_flow
+ self.source_index = 0
size = len(self.graph) + 1
for room in self.graph:
room.append(0)
self.graph.append([0] * size)
for i in sinks:
- self.graph[i + 1][size - 1] = maxInputFlow
- self.sinkIndex = size - 1
+ self.graph[i + 1][size - 1] = max_input_flow
+ self.sink_index = size - 1
- def findMaximumFlow(self):
- if self.maximumFlowAlgorithm is None:
+ def find_maximum_flow(self):
+ if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before.")
- if self.sourceIndex is None or self.sinkIndex is None:
+ if self.source_index is None or self.sink_index is None:
return 0
- self.maximumFlowAlgorithm.execute()
- return self.maximumFlowAlgorithm.getMaximumFlow()
+ self.maximum_flow_algorithm.execute()
+ return self.maximum_flow_algorithm.getMaximumFlow()
- def setMaximumFlowAlgorithm(self, Algorithm):
- self.maximumFlowAlgorithm = Algorithm(self)
+ def set_maximum_flow_algorithm(self, algorithm):
+ self.maximum_flow_algorithm = algorithm(self)
class FlowNetworkAlgorithmExecutor:
- def __init__(self, flowNetwork):
- self.flowNetwork = flowNetwork
- self.verticesCount = flowNetwork.verticesCount
- self.sourceIndex = flowNetwork.sourceIndex
- self.sinkIndex = flowNetwork.sinkIndex
+ def __init__(self, flow_network):
+ self.flow_network = flow_network
+ self.verticies_count = flow_network.verticesCount
+ self.source_index = flow_network.sourceIndex
+ self.sink_index = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
- self.graph = flowNetwork.graph
+ self.graph = flow_network.graph
self.executed = False
def execute(self):
@@ -79,95 +79,95 @@ class FlowNetworkAlgorithmExecutor:
class MaximumFlowAlgorithmExecutor(FlowNetworkAlgorithmExecutor):
- def __init__(self, flowNetwork):
- super().__init__(flowNetwork)
+ def __init__(self, flow_network):
+ super().__init__(flow_network)
# use this to save your result
- self.maximumFlow = -1
+ self.maximum_flow = -1
- def getMaximumFlow(self):
+ def get_maximum_flow(self):
if not self.executed:
raise Exception("You should execute algorithm before using its result!")
- return self.maximumFlow
+ return self.maximum_flow
class PushRelabelExecutor(MaximumFlowAlgorithmExecutor):
- def __init__(self, flowNetwork):
- super().__init__(flowNetwork)
+ def __init__(self, flow_network):
+ super().__init__(flow_network)
- self.preflow = [[0] * self.verticesCount for i in range(self.verticesCount)]
+ self.preflow = [[0] * self.verticies_count for i in range(self.verticies_count)]
- self.heights = [0] * self.verticesCount
- self.excesses = [0] * self.verticesCount
+ self.heights = [0] * self.verticies_count
+ self.excesses = [0] * self.verticies_count
def _algorithm(self):
- self.heights[self.sourceIndex] = self.verticesCount
+ self.heights[self.source_index] = self.verticies_count
# push some substance to graph
- for nextVertexIndex, bandwidth in enumerate(self.graph[self.sourceIndex]):
- self.preflow[self.sourceIndex][nextVertexIndex] += bandwidth
- self.preflow[nextVertexIndex][self.sourceIndex] -= bandwidth
- self.excesses[nextVertexIndex] += bandwidth
+ for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]):
+ self.preflow[self.source_index][nextvertex_index] += bandwidth
+ self.preflow[nextvertex_index][self.source_index] -= bandwidth
+ self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
- verticesList = [
+ vertices_list = [
i
- for i in range(self.verticesCount)
- if i != self.sourceIndex and i != self.sinkIndex
+ for i in range(self.verticies_count)
+ if i not in {self.source_index, self.sink_index}
]
# move through list
i = 0
- while i < len(verticesList):
- vertexIndex = verticesList[i]
- previousHeight = self.heights[vertexIndex]
- self.processVertex(vertexIndex)
- if self.heights[vertexIndex] > previousHeight:
+ while i < len(vertices_list):
+ vertex_index = vertices_list[i]
+ previous_height = self.heights[vertex_index]
+ self.process_vertex(vertex_index)
+ if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
- verticesList.insert(0, verticesList.pop(i))
+ vertices_list.insert(0, vertices_list.pop(i))
i = 0
else:
i += 1
- self.maximumFlow = sum(self.preflow[self.sourceIndex])
+ self.maximum_flow = sum(self.preflow[self.source_index])
- def processVertex(self, vertexIndex):
- while self.excesses[vertexIndex] > 0:
- for neighbourIndex in range(self.verticesCount):
+ def process_vertex(self, vertex_index):
+ while self.excesses[vertex_index] > 0:
+ for neighbour_index in range(self.verticies_count):
# if it's neighbour and current vertex is higher
if (
- self.graph[vertexIndex][neighbourIndex]
- - self.preflow[vertexIndex][neighbourIndex]
+ self.graph[vertex_index][neighbour_index]
+ - self.preflow[vertex_index][neighbour_index]
> 0
- and self.heights[vertexIndex] > self.heights[neighbourIndex]
+ and self.heights[vertex_index] > self.heights[neighbour_index]
):
- self.push(vertexIndex, neighbourIndex)
+ self.push(vertex_index, neighbour_index)
- self.relabel(vertexIndex)
+ self.relabel(vertex_index)
- def push(self, fromIndex, toIndex):
- preflowDelta = min(
- self.excesses[fromIndex],
- self.graph[fromIndex][toIndex] - self.preflow[fromIndex][toIndex],
+ def push(self, from_index, to_index):
+ preflow_delta = min(
+ self.excesses[from_index],
+ self.graph[from_index][to_index] - self.preflow[from_index][to_index],
)
- self.preflow[fromIndex][toIndex] += preflowDelta
- self.preflow[toIndex][fromIndex] -= preflowDelta
- self.excesses[fromIndex] -= preflowDelta
- self.excesses[toIndex] += preflowDelta
+ self.preflow[from_index][to_index] += preflow_delta
+ self.preflow[to_index][from_index] -= preflow_delta
+ self.excesses[from_index] -= preflow_delta
+ self.excesses[to_index] += preflow_delta
- def relabel(self, vertexIndex):
- minHeight = None
- for toIndex in range(self.verticesCount):
+ def relabel(self, vertex_index):
+ min_height = None
+ for to_index in range(self.verticies_count):
if (
- self.graph[vertexIndex][toIndex] - self.preflow[vertexIndex][toIndex]
+ self.graph[vertex_index][to_index]
+ - self.preflow[vertex_index][to_index]
> 0
- ):
- if minHeight is None or self.heights[toIndex] < minHeight:
- minHeight = self.heights[toIndex]
+ ) and (min_height is None or self.heights[to_index] < min_height):
+ min_height = self.heights[to_index]
- if minHeight is not None:
- self.heights[vertexIndex] = minHeight + 1
+ if min_height is not None:
+ self.heights[vertex_index] = min_height + 1
if __name__ == "__main__":
@@ -184,10 +184,10 @@ if __name__ == "__main__":
graph = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
- flowNetwork = FlowNetwork(graph, entrances, exits)
+ flow_network = FlowNetwork(graph, entrances, exits)
# set algorithm
- flowNetwork.setMaximumFlowAlgorithm(PushRelabelExecutor)
+ flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
- maximumFlow = flowNetwork.findMaximumFlow()
+ maximum_flow = flow_network.find_maximum_flow()
- print(f"maximum flow is {maximumFlow}")
+ print(f"maximum flow is {maximum_flow}")
diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py
index 7850933b0..6b4ea8e21 100644
--- a/graphs/eulerian_path_and_circuit_for_undirected_graph.py
+++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py
@@ -6,8 +6,8 @@
# using dfs for finding eulerian path traversal
-def dfs(u, graph, visited_edge, path=[]):
- path = path + [u]
+def dfs(u, graph, visited_edge, path=None):
+ path = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
visited_edge[u][v], visited_edge[v][u] = True, True
@@ -20,7 +20,7 @@ def check_circuit_or_path(graph, max_node):
odd_degree_nodes = 0
odd_node = -1
for i in range(max_node):
- if i not in graph.keys():
+ if i not in graph:
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
@@ -50,21 +50,21 @@ def check_euler(graph, max_node):
def main():
- G1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
- G2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
- G3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
- G4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
- G5 = {
+ g1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
+ g2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
+ g3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
+ g4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
+ g5 = {
1: [],
2: []
# all degree is zero
}
max_node = 10
- check_euler(G1, max_node)
- check_euler(G2, max_node)
- check_euler(G3, max_node)
- check_euler(G4, max_node)
- check_euler(G5, max_node)
+ check_euler(g1, max_node)
+ check_euler(g2, max_node)
+ check_euler(g3, max_node)
+ check_euler(g4, max_node)
+ check_euler(g5, max_node)
if __name__ == "__main__":
diff --git a/graphs/even_tree.py b/graphs/even_tree.py
index c9aef6e78..92ffb4b23 100644
--- a/graphs/even_tree.py
+++ b/graphs/even_tree.py
@@ -16,12 +16,12 @@ components containing an even number of nodes.
from collections import defaultdict
-def dfs(start):
+def dfs(start: int) -> int:
"""DFS traversal"""
# pylint: disable=redefined-outer-name
ret = 1
visited[start] = True
- for v in tree.get(start):
+ for v in tree[start]:
if v not in visited:
ret += dfs(v)
if ret % 2 == 0:
@@ -48,8 +48,8 @@ def even_tree():
if __name__ == "__main__":
n, m = 10, 9
tree = defaultdict(list)
- visited = {}
- cuts = []
+ visited: dict[int, bool] = {}
+ cuts: list[int] = []
count = 0
edges = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
diff --git a/graphs/finding_bridges.py b/graphs/finding_bridges.py
index 6555dd7bc..c17606745 100644
--- a/graphs/finding_bridges.py
+++ b/graphs/finding_bridges.py
@@ -1,42 +1,106 @@
-# Finding Bridges in Undirected Graph
-def computeBridges(graph):
- id = 0
+"""
+An edge is a bridge if, after removing it count of connected components in graph will
+be increased by one. Bridges represent vulnerabilities in a connected network and are
+useful for designing reliable networks. For example, in a wired computer network, an
+articulation point indicates the critical computers and a bridge indicates the critical
+wires or connections.
+
+For more details, refer this article:
+https://www.geeksforgeeks.org/bridge-in-a-graph/
+"""
+
+
+def __get_demo_graph(index):
+ return [
+ {
+ 0: [1, 2],
+ 1: [0, 2],
+ 2: [0, 1, 3, 5],
+ 3: [2, 4],
+ 4: [3],
+ 5: [2, 6, 8],
+ 6: [5, 7],
+ 7: [6, 8],
+ 8: [5, 7],
+ },
+ {
+ 0: [6],
+ 1: [9],
+ 2: [4, 5],
+ 3: [4],
+ 4: [2, 3],
+ 5: [2],
+ 6: [0, 7],
+ 7: [6],
+ 8: [],
+ 9: [1],
+ },
+ {
+ 0: [4],
+ 1: [6],
+ 2: [],
+ 3: [5, 6, 7],
+ 4: [0, 6],
+ 5: [3, 8, 9],
+ 6: [1, 3, 4, 7],
+ 7: [3, 6, 8, 9],
+ 8: [5, 7],
+ 9: [5, 7],
+ },
+ {
+ 0: [1, 3],
+ 1: [0, 2, 4],
+ 2: [1, 3, 4],
+ 3: [0, 2, 4],
+ 4: [1, 2, 3],
+ },
+ ][index]
+
+
+def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]:
+ """
+ Return the list of undirected graph bridges [(a1, b1), ..., (ak, bk)]; ai <= bi
+ >>> compute_bridges(__get_demo_graph(0))
+ [(3, 4), (2, 3), (2, 5)]
+ >>> compute_bridges(__get_demo_graph(1))
+ [(6, 7), (0, 6), (1, 9), (3, 4), (2, 4), (2, 5)]
+ >>> compute_bridges(__get_demo_graph(2))
+ [(1, 6), (4, 6), (0, 4)]
+ >>> compute_bridges(__get_demo_graph(3))
+ []
+ >>> compute_bridges({})
+ []
+ """
+
+ id_ = 0
n = len(graph) # No of vertices in graph
low = [0] * n
visited = [False] * n
- def dfs(at, parent, bridges, id):
+ def dfs(at, parent, bridges, id_):
visited[at] = True
- low[at] = id
- id += 1
+ low[at] = id_
+ id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
- dfs(to, at, bridges, id)
+ dfs(to, at, bridges, id_)
low[at] = min(low[at], low[to])
- if at < low[to]:
- bridges.append([at, to])
+ if id_ <= low[to]:
+ bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
- low[at] = min(low[at], to)
+ low[at] = min(low[at], low[to])
- bridges = []
+ bridges: list[tuple[int, int]] = []
for i in range(n):
if not visited[i]:
- dfs(i, -1, bridges, id)
- print(bridges)
+ dfs(i, -1, bridges, id_)
+ return bridges
-graph = {
- 0: [1, 2],
- 1: [0, 2],
- 2: [0, 1, 3, 5],
- 3: [2, 4],
- 4: [3],
- 5: [2, 6, 8],
- 6: [5, 7],
- 7: [6, 8],
- 8: [5, 7],
-}
-computeBridges(graph)
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py
index ff7063082..208e57f9b 100644
--- a/graphs/frequent_pattern_graph_miner.py
+++ b/graphs/frequent_pattern_graph_miner.py
@@ -54,7 +54,7 @@ def get_frequency_table(edge_array):
Returns Frequency Table
"""
distinct_edge = get_distinct_edge(edge_array)
- frequency_table = dict()
+ frequency_table = {}
for item in distinct_edge:
bit = get_bitcode(edge_array, item)
@@ -79,7 +79,7 @@ def get_nodes(frequency_table):
{'11111': ['ab', 'ac', 'df', 'bd', 'bc']}
"""
nodes = {}
- for i, item in enumerate(frequency_table):
+ for _, item in enumerate(frequency_table):
nodes.setdefault(item[2], []).append(item[0])
return nodes
@@ -130,11 +130,11 @@ def create_edge(nodes, graph, cluster, c1):
"""
create edge between the nodes
"""
- for i in cluster[c1].keys():
+ for i in cluster[c1]:
count = 0
c2 = c1 + 1
while c2 < max(cluster.keys()):
- for j in cluster[c2].keys():
+ for j in cluster[c2]:
"""
creates edge only if the condition satisfies
"""
@@ -151,16 +151,16 @@ def create_edge(nodes, graph, cluster, c1):
def construct_graph(cluster, nodes):
- X = cluster[max(cluster.keys())]
+ x = cluster[max(cluster.keys())]
cluster[max(cluster.keys()) + 1] = "Header"
graph = {}
- for i in X:
- if tuple(["Header"]) in graph:
- graph[tuple(["Header"])].append(X[i])
+ for i in x:
+ if (["Header"],) in graph:
+ graph[(["Header"],)].append(x[i])
else:
- graph[tuple(["Header"])] = [X[i]]
- for i in X:
- graph[tuple(X[i])] = [["Header"]]
+ graph[(["Header"],)] = [x[i]]
+ for i in x:
+ graph[(x[i],)] = [["Header"]]
i = 1
while i < max(cluster) - 1:
create_edge(nodes, graph, cluster, i)
@@ -168,16 +168,16 @@ def construct_graph(cluster, nodes):
return graph
-def myDFS(graph, start, end, path=[]):
+def my_dfs(graph, start, end, path=None):
"""
find different DFS walk from given node to Header node
"""
- path = path + [start]
+ path = (path or []) + [start]
if start == end:
paths.append(path)
for node in graph[start]:
if tuple(node) not in path:
- myDFS(graph, tuple(node), end, path)
+ my_dfs(graph, tuple(node), end, path)
def find_freq_subgraph_given_support(s, cluster, graph):
@@ -185,24 +185,24 @@ def find_freq_subgraph_given_support(s, cluster, graph):
find edges of multiple frequent subgraphs
"""
k = int(s / 100 * (len(cluster) - 1))
- for i in cluster[k].keys():
- myDFS(graph, tuple(cluster[k][i]), tuple(["Header"]))
+ for i in cluster[k]:
+ my_dfs(graph, tuple(cluster[k][i]), (["Header"],))
def freq_subgraphs_edge_list(paths):
"""
returns Edge list for frequent subgraphs
"""
- freq_sub_EL = []
+ freq_sub_el = []
for edges in paths:
- EL = []
+ el = []
for j in range(len(edges) - 1):
temp = list(edges[j])
for e in temp:
edge = (e[0], e[1])
- EL.append(edge)
- freq_sub_EL.append(EL)
- return freq_sub_EL
+ el.append(edge)
+ freq_sub_el.append(el)
+ return freq_sub_el
def preprocess(edge_array):
@@ -227,6 +227,6 @@ if __name__ == "__main__":
support = get_support(cluster)
graph = construct_graph(cluster, nodes)
find_freq_subgraph_given_support(60, cluster, graph)
- paths = []
+ paths: list = []
freq_subgraph_edge_list = freq_subgraphs_edge_list(paths)
print_all()
diff --git a/graphs/gale_shapley_bigraph.py b/graphs/gale_shapley_bigraph.py
index 56b8c6c77..f4b315381 100644
--- a/graphs/gale_shapley_bigraph.py
+++ b/graphs/gale_shapley_bigraph.py
@@ -17,7 +17,7 @@ def stable_matching(
>>> donor_pref = [[0, 1, 3, 2], [0, 2, 3, 1], [1, 0, 2, 3], [0, 3, 1, 2]]
>>> recipient_pref = [[3, 1, 2, 0], [3, 1, 0, 2], [0, 3, 1, 2], [1, 0, 3, 2]]
- >>> print(stable_matching(donor_pref, recipient_pref))
+ >>> stable_matching(donor_pref, recipient_pref)
[1, 2, 3, 0]
"""
assert len(donor_pref) == len(recipient_pref)
diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py
new file mode 100644
index 000000000..76f34f845
--- /dev/null
+++ b/graphs/graph_adjacency_list.py
@@ -0,0 +1,589 @@
+#!/usr/bin/env python3
+"""
+Author: Vikram Nithyanandam
+
+Description:
+The following implementation is a robust unweighted Graph data structure
+implemented using an adjacency list. This vertices and edges of this graph can be
+effectively initialized and modified while storing your chosen generic
+value in each vertex.
+
+Adjacency List: https://en.wikipedia.org/wiki/Adjacency_list
+
+Potential Future Ideas:
+- Add a flag to set edge weights on and set edge weights
+- Make edge weights and vertex values customizable to store whatever the client wants
+- Support multigraph functionality if the client wants it
+"""
+from __future__ import annotations
+
+import random
+import unittest
+from pprint import pformat
+from typing import Generic, TypeVar
+
+T = TypeVar("T")
+
+
+class GraphAdjacencyList(Generic[T]):
+ def __init__(
+ self, vertices: list[T], edges: list[list[T]], directed: bool = True
+ ) -> None:
+ """
+ Parameters:
+ - vertices: (list[T]) The list of vertex names the client wants to
+ pass in. Default is empty.
+ - edges: (list[list[T]]) The list of edges the client wants to
+ pass in. Each edge is a 2-element list. Default is empty.
+ - directed: (bool) Indicates if graph is directed or undirected.
+ Default is True.
+ """
+ self.adj_list: dict[T, list[T]] = {} # dictionary of lists of T
+ self.directed = directed
+
+ # Falsey checks
+ edges = edges or []
+ vertices = vertices or []
+
+ for vertex in vertices:
+ self.add_vertex(vertex)
+
+ for edge in edges:
+ if len(edge) != 2:
+ msg = f"Invalid input: {edge} is the wrong length."
+ raise ValueError(msg)
+ self.add_edge(edge[0], edge[1])
+
+ def add_vertex(self, vertex: T) -> None:
+ """
+ Adds a vertex to the graph. If the given vertex already exists,
+ a ValueError will be thrown.
+ """
+ if self.contains_vertex(vertex):
+ msg = f"Incorrect input: {vertex} is already in the graph."
+ raise ValueError(msg)
+ self.adj_list[vertex] = []
+
+ def add_edge(self, source_vertex: T, destination_vertex: T) -> None:
+ """
+ Creates an edge from source vertex to destination vertex. If any
+ given vertex doesn't exist or the edge already exists, a ValueError
+ will be thrown.
+ """
+ if not (
+ self.contains_vertex(source_vertex)
+ and self.contains_vertex(destination_vertex)
+ ):
+ msg = (
+ f"Incorrect input: Either {source_vertex} or "
+ f"{destination_vertex} does not exist"
+ )
+ raise ValueError(msg)
+ if self.contains_edge(source_vertex, destination_vertex):
+ msg = (
+ "Incorrect input: The edge already exists between "
+ f"{source_vertex} and {destination_vertex}"
+ )
+ raise ValueError(msg)
+
+ # add the destination vertex to the list associated with the source vertex
+ # and vice versa if not directed
+ self.adj_list[source_vertex].append(destination_vertex)
+ if not self.directed:
+ self.adj_list[destination_vertex].append(source_vertex)
+
+ def remove_vertex(self, vertex: T) -> None:
+ """
+ Removes the given vertex from the graph and deletes all incoming and
+ outgoing edges from the given vertex as well. If the given vertex
+ does not exist, a ValueError will be thrown.
+ """
+ if not self.contains_vertex(vertex):
+ msg = f"Incorrect input: {vertex} does not exist in this graph."
+ raise ValueError(msg)
+
+ if not self.directed:
+ # If not directed, find all neighboring vertices and delete all references
+ # of edges connecting to the given vertex
+ for neighbor in self.adj_list[vertex]:
+ self.adj_list[neighbor].remove(vertex)
+ else:
+ # If directed, search all neighbors of all vertices and delete all
+ # references of edges connecting to the given vertex
+ for edge_list in self.adj_list.values():
+ if vertex in edge_list:
+ edge_list.remove(vertex)
+
+ # Finally, delete the given vertex and all of its outgoing edge references
+ self.adj_list.pop(vertex)
+
+ def remove_edge(self, source_vertex: T, destination_vertex: T) -> None:
+ """
+ Removes the edge between the two vertices. If any given vertex
+ doesn't exist or the edge does not exist, a ValueError will be thrown.
+ """
+ if not (
+ self.contains_vertex(source_vertex)
+ and self.contains_vertex(destination_vertex)
+ ):
+ msg = (
+ f"Incorrect input: Either {source_vertex} or "
+ f"{destination_vertex} does not exist"
+ )
+ raise ValueError(msg)
+ if not self.contains_edge(source_vertex, destination_vertex):
+ msg = (
+ "Incorrect input: The edge does NOT exist between "
+ f"{source_vertex} and {destination_vertex}"
+ )
+ raise ValueError(msg)
+
+ # remove the destination vertex from the list associated with the source
+ # vertex and vice versa if not directed
+ self.adj_list[source_vertex].remove(destination_vertex)
+ if not self.directed:
+ self.adj_list[destination_vertex].remove(source_vertex)
+
+ def contains_vertex(self, vertex: T) -> bool:
+ """
+ Returns True if the graph contains the vertex, False otherwise.
+ """
+ return vertex in self.adj_list
+
+ def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool:
+ """
+ Returns True if the graph contains the edge from the source_vertex to the
+ destination_vertex, False otherwise. If any given vertex doesn't exist, a
+ ValueError will be thrown.
+ """
+ if not (
+ self.contains_vertex(source_vertex)
+ and self.contains_vertex(destination_vertex)
+ ):
+ msg = (
+ f"Incorrect input: Either {source_vertex} "
+ f"or {destination_vertex} does not exist."
+ )
+ raise ValueError(msg)
+
+ return destination_vertex in self.adj_list[source_vertex]
+
+ def clear_graph(self) -> None:
+ """
+ Clears all vertices and edges.
+ """
+ self.adj_list = {}
+
+ def __repr__(self) -> str:
+ return pformat(self.adj_list)
+
+
+class TestGraphAdjacencyList(unittest.TestCase):
+ def __assert_graph_edge_exists_check(
+ self,
+ undirected_graph: GraphAdjacencyList,
+ directed_graph: GraphAdjacencyList,
+ edge: list[int],
+ ) -> None:
+ self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1]))
+ self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0]))
+ self.assertTrue(directed_graph.contains_edge(edge[0], edge[1]))
+
+ def __assert_graph_edge_does_not_exist_check(
+ self,
+ undirected_graph: GraphAdjacencyList,
+ directed_graph: GraphAdjacencyList,
+ edge: list[int],
+ ) -> None:
+ self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1]))
+ self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0]))
+ self.assertFalse(directed_graph.contains_edge(edge[0], edge[1]))
+
+ def __assert_graph_vertex_exists_check(
+ self,
+ undirected_graph: GraphAdjacencyList,
+ directed_graph: GraphAdjacencyList,
+ vertex: int,
+ ) -> None:
+ self.assertTrue(undirected_graph.contains_vertex(vertex))
+ self.assertTrue(directed_graph.contains_vertex(vertex))
+
+ def __assert_graph_vertex_does_not_exist_check(
+ self,
+ undirected_graph: GraphAdjacencyList,
+ directed_graph: GraphAdjacencyList,
+ vertex: int,
+ ) -> None:
+ self.assertFalse(undirected_graph.contains_vertex(vertex))
+ self.assertFalse(directed_graph.contains_vertex(vertex))
+
+ def __generate_random_edges(
+ self, vertices: list[int], edge_pick_count: int
+ ) -> list[list[int]]:
+ self.assertTrue(edge_pick_count <= len(vertices))
+
+ random_source_vertices: list[int] = random.sample(
+ vertices[0 : int(len(vertices) / 2)], edge_pick_count
+ )
+ random_destination_vertices: list[int] = random.sample(
+ vertices[int(len(vertices) / 2) :], edge_pick_count
+ )
+ random_edges: list[list[int]] = []
+
+ for source in random_source_vertices:
+ for dest in random_destination_vertices:
+ random_edges.append([source, dest])
+
+ return random_edges
+
+ def __generate_graphs(
+ self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int
+ ) -> tuple[GraphAdjacencyList, GraphAdjacencyList, list[int], list[list[int]]]:
+ if max_val - min_val + 1 < vertex_count:
+ raise ValueError(
+ "Will result in duplicate vertices. Either increase range "
+ "between min_val and max_val or decrease vertex count."
+ )
+
+ # generate graph input
+ random_vertices: list[int] = random.sample(
+ range(min_val, max_val + 1), vertex_count
+ )
+ random_edges: list[list[int]] = self.__generate_random_edges(
+ random_vertices, edge_pick_count
+ )
+
+ # build graphs
+ undirected_graph = GraphAdjacencyList(
+ vertices=random_vertices, edges=random_edges, directed=False
+ )
+ directed_graph = GraphAdjacencyList(
+ vertices=random_vertices, edges=random_edges, directed=True
+ )
+
+ return undirected_graph, directed_graph, random_vertices, random_edges
+
+ def test_init_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ # test graph initialization with vertices and edges
+ for num in random_vertices:
+ self.__assert_graph_vertex_exists_check(
+ undirected_graph, directed_graph, num
+ )
+
+ for edge in random_edges:
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, edge
+ )
+ self.assertFalse(undirected_graph.directed)
+ self.assertTrue(directed_graph.directed)
+
+ def test_contains_vertex(self) -> None:
+ random_vertices: list[int] = random.sample(range(101), 20)
+
+ # Build graphs WITHOUT edges
+ undirected_graph = GraphAdjacencyList(
+ vertices=random_vertices, edges=[], directed=False
+ )
+ directed_graph = GraphAdjacencyList(
+ vertices=random_vertices, edges=[], directed=True
+ )
+
+ # Test contains_vertex
+ for num in range(101):
+ self.assertEqual(
+ num in random_vertices, undirected_graph.contains_vertex(num)
+ )
+ self.assertEqual(
+ num in random_vertices, directed_graph.contains_vertex(num)
+ )
+
+ def test_add_vertices(self) -> None:
+ random_vertices: list[int] = random.sample(range(101), 20)
+
+ # build empty graphs
+ undirected_graph: GraphAdjacencyList = GraphAdjacencyList(
+ vertices=[], edges=[], directed=False
+ )
+ directed_graph: GraphAdjacencyList = GraphAdjacencyList(
+ vertices=[], edges=[], directed=True
+ )
+
+ # run add_vertex
+ for num in random_vertices:
+ undirected_graph.add_vertex(num)
+
+ for num in random_vertices:
+ directed_graph.add_vertex(num)
+
+ # test add_vertex worked
+ for num in random_vertices:
+ self.__assert_graph_vertex_exists_check(
+ undirected_graph, directed_graph, num
+ )
+
+ def test_remove_vertices(self) -> None:
+ random_vertices: list[int] = random.sample(range(101), 20)
+
+ # build graphs WITHOUT edges
+ undirected_graph = GraphAdjacencyList(
+ vertices=random_vertices, edges=[], directed=False
+ )
+ directed_graph = GraphAdjacencyList(
+ vertices=random_vertices, edges=[], directed=True
+ )
+
+ # test remove_vertex worked
+ for num in random_vertices:
+ self.__assert_graph_vertex_exists_check(
+ undirected_graph, directed_graph, num
+ )
+
+ undirected_graph.remove_vertex(num)
+ directed_graph.remove_vertex(num)
+
+ self.__assert_graph_vertex_does_not_exist_check(
+ undirected_graph, directed_graph, num
+ )
+
+ def test_add_and_remove_vertices_repeatedly(self) -> None:
+ random_vertices1: list[int] = random.sample(range(51), 20)
+ random_vertices2: list[int] = random.sample(range(51, 101), 20)
+
+ # build graphs WITHOUT edges
+ undirected_graph = GraphAdjacencyList(
+ vertices=random_vertices1, edges=[], directed=False
+ )
+ directed_graph = GraphAdjacencyList(
+ vertices=random_vertices1, edges=[], directed=True
+ )
+
+ # test adding and removing vertices
+ for i, _ in enumerate(random_vertices1):
+ undirected_graph.add_vertex(random_vertices2[i])
+ directed_graph.add_vertex(random_vertices2[i])
+
+ self.__assert_graph_vertex_exists_check(
+ undirected_graph, directed_graph, random_vertices2[i]
+ )
+
+ undirected_graph.remove_vertex(random_vertices1[i])
+ directed_graph.remove_vertex(random_vertices1[i])
+
+ self.__assert_graph_vertex_does_not_exist_check(
+ undirected_graph, directed_graph, random_vertices1[i]
+ )
+
+ # remove all vertices
+ for i, _ in enumerate(random_vertices1):
+ undirected_graph.remove_vertex(random_vertices2[i])
+ directed_graph.remove_vertex(random_vertices2[i])
+
+ self.__assert_graph_vertex_does_not_exist_check(
+ undirected_graph, directed_graph, random_vertices2[i]
+ )
+
+ def test_contains_edge(self) -> None:
+ # generate graphs and graph input
+ vertex_count = 20
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(vertex_count, 0, 100, 4)
+
+ # generate all possible edges for testing
+ all_possible_edges: list[list[int]] = []
+ for i in range(vertex_count - 1):
+ for j in range(i + 1, vertex_count):
+ all_possible_edges.append([random_vertices[i], random_vertices[j]])
+ all_possible_edges.append([random_vertices[j], random_vertices[i]])
+
+ # test contains_edge function
+ for edge in all_possible_edges:
+ if edge in random_edges:
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, edge
+ )
+ elif [edge[1], edge[0]] in random_edges:
+ # since this edge exists for undirected but the reverse
+ # may not exist for directed
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, [edge[1], edge[0]]
+ )
+ else:
+ self.__assert_graph_edge_does_not_exist_check(
+ undirected_graph, directed_graph, edge
+ )
+
+ def test_add_edge(self) -> None:
+ # generate graph input
+ random_vertices: list[int] = random.sample(range(101), 15)
+ random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
+
+ # build graphs WITHOUT edges
+ undirected_graph = GraphAdjacencyList(
+ vertices=random_vertices, edges=[], directed=False
+ )
+ directed_graph = GraphAdjacencyList(
+ vertices=random_vertices, edges=[], directed=True
+ )
+
+ # run and test add_edge
+ for edge in random_edges:
+ undirected_graph.add_edge(edge[0], edge[1])
+ directed_graph.add_edge(edge[0], edge[1])
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, edge
+ )
+
+ def test_remove_edge(self) -> None:
+ # generate graph input and graphs
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ # run and test remove_edge
+ for edge in random_edges:
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, edge
+ )
+ undirected_graph.remove_edge(edge[0], edge[1])
+ directed_graph.remove_edge(edge[0], edge[1])
+ self.__assert_graph_edge_does_not_exist_check(
+ undirected_graph, directed_graph, edge
+ )
+
+ def test_add_and_remove_edges_repeatedly(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ # make some more edge options!
+ more_random_edges: list[list[int]] = []
+
+ while len(more_random_edges) != len(random_edges):
+ edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
+ for edge in edges:
+ if len(more_random_edges) == len(random_edges):
+ break
+ elif edge not in more_random_edges and edge not in random_edges:
+ more_random_edges.append(edge)
+
+ for i, _ in enumerate(random_edges):
+ undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
+ directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
+
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, more_random_edges[i]
+ )
+
+ undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1])
+ directed_graph.remove_edge(random_edges[i][0], random_edges[i][1])
+
+ self.__assert_graph_edge_does_not_exist_check(
+ undirected_graph, directed_graph, random_edges[i]
+ )
+
+ def test_add_vertex_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ for vertex in random_vertices:
+ with self.assertRaises(ValueError):
+ undirected_graph.add_vertex(vertex)
+ with self.assertRaises(ValueError):
+ directed_graph.add_vertex(vertex)
+
+ def test_remove_vertex_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ for i in range(101):
+ if i not in random_vertices:
+ with self.assertRaises(ValueError):
+ undirected_graph.remove_vertex(i)
+ with self.assertRaises(ValueError):
+ directed_graph.remove_vertex(i)
+
+ def test_add_edge_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ for edge in random_edges:
+ with self.assertRaises(ValueError):
+ undirected_graph.add_edge(edge[0], edge[1])
+ with self.assertRaises(ValueError):
+ directed_graph.add_edge(edge[0], edge[1])
+
+ def test_remove_edge_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ more_random_edges: list[list[int]] = []
+
+ while len(more_random_edges) != len(random_edges):
+ edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
+ for edge in edges:
+ if len(more_random_edges) == len(random_edges):
+ break
+ elif edge not in more_random_edges and edge not in random_edges:
+ more_random_edges.append(edge)
+
+ for edge in more_random_edges:
+ with self.assertRaises(ValueError):
+ undirected_graph.remove_edge(edge[0], edge[1])
+ with self.assertRaises(ValueError):
+ directed_graph.remove_edge(edge[0], edge[1])
+
+ def test_contains_edge_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ for vertex in random_vertices:
+ with self.assertRaises(ValueError):
+ undirected_graph.contains_edge(vertex, 102)
+ with self.assertRaises(ValueError):
+ directed_graph.contains_edge(vertex, 102)
+
+ with self.assertRaises(ValueError):
+ undirected_graph.contains_edge(103, 102)
+ with self.assertRaises(ValueError):
+ directed_graph.contains_edge(103, 102)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py
new file mode 100644
index 000000000..4d2e02f73
--- /dev/null
+++ b/graphs/graph_adjacency_matrix.py
@@ -0,0 +1,608 @@
+#!/usr/bin/env python3
+"""
+Author: Vikram Nithyanandam
+
+Description:
+The following implementation is a robust unweighted Graph data structure
+implemented using an adjacency matrix. This vertices and edges of this graph can be
+effectively initialized and modified while storing your chosen generic
+value in each vertex.
+
+Adjacency Matrix: https://mathworld.wolfram.com/AdjacencyMatrix.html
+
+Potential Future Ideas:
+- Add a flag to set edge weights on and set edge weights
+- Make edge weights and vertex values customizable to store whatever the client wants
+- Support multigraph functionality if the client wants it
+"""
+from __future__ import annotations
+
+import random
+import unittest
+from pprint import pformat
+from typing import Generic, TypeVar
+
+T = TypeVar("T")
+
+
+class GraphAdjacencyMatrix(Generic[T]):
+ def __init__(
+ self, vertices: list[T], edges: list[list[T]], directed: bool = True
+ ) -> None:
+ """
+ Parameters:
+ - vertices: (list[T]) The list of vertex names the client wants to
+ pass in. Default is empty.
+ - edges: (list[list[T]]) The list of edges the client wants to
+ pass in. Each edge is a 2-element list. Default is empty.
+ - directed: (bool) Indicates if graph is directed or undirected.
+ Default is True.
+ """
+ self.directed = directed
+ self.vertex_to_index: dict[T, int] = {}
+ self.adj_matrix: list[list[int]] = []
+
+ # Falsey checks
+ edges = edges or []
+ vertices = vertices or []
+
+ for vertex in vertices:
+ self.add_vertex(vertex)
+
+ for edge in edges:
+ if len(edge) != 2:
+ msg = f"Invalid input: {edge} must have length 2."
+ raise ValueError(msg)
+ self.add_edge(edge[0], edge[1])
+
+ def add_edge(self, source_vertex: T, destination_vertex: T) -> None:
+ """
+ Creates an edge from source vertex to destination vertex. If any
+ given vertex doesn't exist or the edge already exists, a ValueError
+ will be thrown.
+ """
+ if not (
+ self.contains_vertex(source_vertex)
+ and self.contains_vertex(destination_vertex)
+ ):
+ msg = (
+ f"Incorrect input: Either {source_vertex} or "
+ f"{destination_vertex} does not exist"
+ )
+ raise ValueError(msg)
+ if self.contains_edge(source_vertex, destination_vertex):
+ msg = (
+ "Incorrect input: The edge already exists between "
+ f"{source_vertex} and {destination_vertex}"
+ )
+ raise ValueError(msg)
+
+ # Get the indices of the corresponding vertices and set their edge value to 1.
+ u: int = self.vertex_to_index[source_vertex]
+ v: int = self.vertex_to_index[destination_vertex]
+ self.adj_matrix[u][v] = 1
+ if not self.directed:
+ self.adj_matrix[v][u] = 1
+
+ def remove_edge(self, source_vertex: T, destination_vertex: T) -> None:
+ """
+ Removes the edge between the two vertices. If any given vertex
+ doesn't exist or the edge does not exist, a ValueError will be thrown.
+ """
+ if not (
+ self.contains_vertex(source_vertex)
+ and self.contains_vertex(destination_vertex)
+ ):
+ msg = (
+ f"Incorrect input: Either {source_vertex} or "
+ f"{destination_vertex} does not exist"
+ )
+ raise ValueError(msg)
+ if not self.contains_edge(source_vertex, destination_vertex):
+ msg = (
+ "Incorrect input: The edge does NOT exist between "
+ f"{source_vertex} and {destination_vertex}"
+ )
+ raise ValueError(msg)
+
+ # Get the indices of the corresponding vertices and set their edge value to 0.
+ u: int = self.vertex_to_index[source_vertex]
+ v: int = self.vertex_to_index[destination_vertex]
+ self.adj_matrix[u][v] = 0
+ if not self.directed:
+ self.adj_matrix[v][u] = 0
+
+ def add_vertex(self, vertex: T) -> None:
+ """
+ Adds a vertex to the graph. If the given vertex already exists,
+ a ValueError will be thrown.
+ """
+ if self.contains_vertex(vertex):
+ msg = f"Incorrect input: {vertex} already exists in this graph."
+ raise ValueError(msg)
+
+ # build column for vertex
+ for row in self.adj_matrix:
+ row.append(0)
+
+ # build row for vertex and update other data structures
+ self.adj_matrix.append([0] * (len(self.adj_matrix) + 1))
+ self.vertex_to_index[vertex] = len(self.adj_matrix) - 1
+
+ def remove_vertex(self, vertex: T) -> None:
+ """
+ Removes the given vertex from the graph and deletes all incoming and
+ outgoing edges from the given vertex as well. If the given vertex
+ does not exist, a ValueError will be thrown.
+ """
+ if not self.contains_vertex(vertex):
+ msg = f"Incorrect input: {vertex} does not exist in this graph."
+ raise ValueError(msg)
+
+ # first slide up the rows by deleting the row corresponding to
+ # the vertex being deleted.
+ start_index = self.vertex_to_index[vertex]
+ self.adj_matrix.pop(start_index)
+
+ # next, slide the columns to the left by deleting the values in
+ # the column corresponding to the vertex being deleted
+ for lst in self.adj_matrix:
+ lst.pop(start_index)
+
+ # final clean up
+ self.vertex_to_index.pop(vertex)
+
+ # decrement indices for vertices shifted by the deleted vertex in the adj matrix
+ for vertex in self.vertex_to_index:
+ if self.vertex_to_index[vertex] >= start_index:
+ self.vertex_to_index[vertex] = self.vertex_to_index[vertex] - 1
+
+ def contains_vertex(self, vertex: T) -> bool:
+ """
+ Returns True if the graph contains the vertex, False otherwise.
+ """
+ return vertex in self.vertex_to_index
+
+ def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool:
+ """
+ Returns True if the graph contains the edge from the source_vertex to the
+ destination_vertex, False otherwise. If any given vertex doesn't exist, a
+ ValueError will be thrown.
+ """
+ if not (
+ self.contains_vertex(source_vertex)
+ and self.contains_vertex(destination_vertex)
+ ):
+ msg = (
+ f"Incorrect input: Either {source_vertex} "
+ f"or {destination_vertex} does not exist."
+ )
+ raise ValueError(msg)
+
+ u = self.vertex_to_index[source_vertex]
+ v = self.vertex_to_index[destination_vertex]
+ return self.adj_matrix[u][v] == 1
+
+ def clear_graph(self) -> None:
+ """
+ Clears all vertices and edges.
+ """
+ self.vertex_to_index = {}
+ self.adj_matrix = []
+
+ def __repr__(self) -> str:
+ first = "Adj Matrix:\n" + pformat(self.adj_matrix)
+ second = "\nVertex to index mapping:\n" + pformat(self.vertex_to_index)
+ return first + second
+
+
+class TestGraphMatrix(unittest.TestCase):
+ def __assert_graph_edge_exists_check(
+ self,
+ undirected_graph: GraphAdjacencyMatrix,
+ directed_graph: GraphAdjacencyMatrix,
+ edge: list[int],
+ ) -> None:
+ self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1]))
+ self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0]))
+ self.assertTrue(directed_graph.contains_edge(edge[0], edge[1]))
+
+ def __assert_graph_edge_does_not_exist_check(
+ self,
+ undirected_graph: GraphAdjacencyMatrix,
+ directed_graph: GraphAdjacencyMatrix,
+ edge: list[int],
+ ) -> None:
+ self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1]))
+ self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0]))
+ self.assertFalse(directed_graph.contains_edge(edge[0], edge[1]))
+
+ def __assert_graph_vertex_exists_check(
+ self,
+ undirected_graph: GraphAdjacencyMatrix,
+ directed_graph: GraphAdjacencyMatrix,
+ vertex: int,
+ ) -> None:
+ self.assertTrue(undirected_graph.contains_vertex(vertex))
+ self.assertTrue(directed_graph.contains_vertex(vertex))
+
+ def __assert_graph_vertex_does_not_exist_check(
+ self,
+ undirected_graph: GraphAdjacencyMatrix,
+ directed_graph: GraphAdjacencyMatrix,
+ vertex: int,
+ ) -> None:
+ self.assertFalse(undirected_graph.contains_vertex(vertex))
+ self.assertFalse(directed_graph.contains_vertex(vertex))
+
+ def __generate_random_edges(
+ self, vertices: list[int], edge_pick_count: int
+ ) -> list[list[int]]:
+ self.assertTrue(edge_pick_count <= len(vertices))
+
+ random_source_vertices: list[int] = random.sample(
+ vertices[0 : int(len(vertices) / 2)], edge_pick_count
+ )
+ random_destination_vertices: list[int] = random.sample(
+ vertices[int(len(vertices) / 2) :], edge_pick_count
+ )
+ random_edges: list[list[int]] = []
+
+ for source in random_source_vertices:
+ for dest in random_destination_vertices:
+ random_edges.append([source, dest])
+
+ return random_edges
+
+ def __generate_graphs(
+ self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int
+ ) -> tuple[GraphAdjacencyMatrix, GraphAdjacencyMatrix, list[int], list[list[int]]]:
+ if max_val - min_val + 1 < vertex_count:
+ raise ValueError(
+ "Will result in duplicate vertices. Either increase "
+ "range between min_val and max_val or decrease vertex count"
+ )
+
+ # generate graph input
+ random_vertices: list[int] = random.sample(
+ range(min_val, max_val + 1), vertex_count
+ )
+ random_edges: list[list[int]] = self.__generate_random_edges(
+ random_vertices, edge_pick_count
+ )
+
+ # build graphs
+ undirected_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices, edges=random_edges, directed=False
+ )
+ directed_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices, edges=random_edges, directed=True
+ )
+
+ return undirected_graph, directed_graph, random_vertices, random_edges
+
+ def test_init_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ # test graph initialization with vertices and edges
+ for num in random_vertices:
+ self.__assert_graph_vertex_exists_check(
+ undirected_graph, directed_graph, num
+ )
+
+ for edge in random_edges:
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, edge
+ )
+
+ self.assertFalse(undirected_graph.directed)
+ self.assertTrue(directed_graph.directed)
+
+ def test_contains_vertex(self) -> None:
+ random_vertices: list[int] = random.sample(range(101), 20)
+
+ # Build graphs WITHOUT edges
+ undirected_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices, edges=[], directed=False
+ )
+ directed_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices, edges=[], directed=True
+ )
+
+ # Test contains_vertex
+ for num in range(101):
+ self.assertEqual(
+ num in random_vertices, undirected_graph.contains_vertex(num)
+ )
+ self.assertEqual(
+ num in random_vertices, directed_graph.contains_vertex(num)
+ )
+
+ def test_add_vertices(self) -> None:
+ random_vertices: list[int] = random.sample(range(101), 20)
+
+ # build empty graphs
+ undirected_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix(
+ vertices=[], edges=[], directed=False
+ )
+ directed_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix(
+ vertices=[], edges=[], directed=True
+ )
+
+ # run add_vertex
+ for num in random_vertices:
+ undirected_graph.add_vertex(num)
+
+ for num in random_vertices:
+ directed_graph.add_vertex(num)
+
+ # test add_vertex worked
+ for num in random_vertices:
+ self.__assert_graph_vertex_exists_check(
+ undirected_graph, directed_graph, num
+ )
+
+ def test_remove_vertices(self) -> None:
+ random_vertices: list[int] = random.sample(range(101), 20)
+
+ # build graphs WITHOUT edges
+ undirected_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices, edges=[], directed=False
+ )
+ directed_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices, edges=[], directed=True
+ )
+
+ # test remove_vertex worked
+ for num in random_vertices:
+ self.__assert_graph_vertex_exists_check(
+ undirected_graph, directed_graph, num
+ )
+
+ undirected_graph.remove_vertex(num)
+ directed_graph.remove_vertex(num)
+
+ self.__assert_graph_vertex_does_not_exist_check(
+ undirected_graph, directed_graph, num
+ )
+
+ def test_add_and_remove_vertices_repeatedly(self) -> None:
+ random_vertices1: list[int] = random.sample(range(51), 20)
+ random_vertices2: list[int] = random.sample(range(51, 101), 20)
+
+ # build graphs WITHOUT edges
+ undirected_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices1, edges=[], directed=False
+ )
+ directed_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices1, edges=[], directed=True
+ )
+
+ # test adding and removing vertices
+ for i, _ in enumerate(random_vertices1):
+ undirected_graph.add_vertex(random_vertices2[i])
+ directed_graph.add_vertex(random_vertices2[i])
+
+ self.__assert_graph_vertex_exists_check(
+ undirected_graph, directed_graph, random_vertices2[i]
+ )
+
+ undirected_graph.remove_vertex(random_vertices1[i])
+ directed_graph.remove_vertex(random_vertices1[i])
+
+ self.__assert_graph_vertex_does_not_exist_check(
+ undirected_graph, directed_graph, random_vertices1[i]
+ )
+
+ # remove all vertices
+ for i, _ in enumerate(random_vertices1):
+ undirected_graph.remove_vertex(random_vertices2[i])
+ directed_graph.remove_vertex(random_vertices2[i])
+
+ self.__assert_graph_vertex_does_not_exist_check(
+ undirected_graph, directed_graph, random_vertices2[i]
+ )
+
+ def test_contains_edge(self) -> None:
+ # generate graphs and graph input
+ vertex_count = 20
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(vertex_count, 0, 100, 4)
+
+ # generate all possible edges for testing
+ all_possible_edges: list[list[int]] = []
+ for i in range(vertex_count - 1):
+ for j in range(i + 1, vertex_count):
+ all_possible_edges.append([random_vertices[i], random_vertices[j]])
+ all_possible_edges.append([random_vertices[j], random_vertices[i]])
+
+ # test contains_edge function
+ for edge in all_possible_edges:
+ if edge in random_edges:
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, edge
+ )
+ elif [edge[1], edge[0]] in random_edges:
+ # since this edge exists for undirected but the reverse may
+ # not exist for directed
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, [edge[1], edge[0]]
+ )
+ else:
+ self.__assert_graph_edge_does_not_exist_check(
+ undirected_graph, directed_graph, edge
+ )
+
+ def test_add_edge(self) -> None:
+ # generate graph input
+ random_vertices: list[int] = random.sample(range(101), 15)
+ random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
+
+ # build graphs WITHOUT edges
+ undirected_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices, edges=[], directed=False
+ )
+ directed_graph = GraphAdjacencyMatrix(
+ vertices=random_vertices, edges=[], directed=True
+ )
+
+ # run and test add_edge
+ for edge in random_edges:
+ undirected_graph.add_edge(edge[0], edge[1])
+ directed_graph.add_edge(edge[0], edge[1])
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, edge
+ )
+
+ def test_remove_edge(self) -> None:
+ # generate graph input and graphs
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ # run and test remove_edge
+ for edge in random_edges:
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, edge
+ )
+ undirected_graph.remove_edge(edge[0], edge[1])
+ directed_graph.remove_edge(edge[0], edge[1])
+ self.__assert_graph_edge_does_not_exist_check(
+ undirected_graph, directed_graph, edge
+ )
+
+ def test_add_and_remove_edges_repeatedly(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ # make some more edge options!
+ more_random_edges: list[list[int]] = []
+
+ while len(more_random_edges) != len(random_edges):
+ edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
+ for edge in edges:
+ if len(more_random_edges) == len(random_edges):
+ break
+ elif edge not in more_random_edges and edge not in random_edges:
+ more_random_edges.append(edge)
+
+ for i, _ in enumerate(random_edges):
+ undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
+ directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
+
+ self.__assert_graph_edge_exists_check(
+ undirected_graph, directed_graph, more_random_edges[i]
+ )
+
+ undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1])
+ directed_graph.remove_edge(random_edges[i][0], random_edges[i][1])
+
+ self.__assert_graph_edge_does_not_exist_check(
+ undirected_graph, directed_graph, random_edges[i]
+ )
+
+ def test_add_vertex_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ for vertex in random_vertices:
+ with self.assertRaises(ValueError):
+ undirected_graph.add_vertex(vertex)
+ with self.assertRaises(ValueError):
+ directed_graph.add_vertex(vertex)
+
+ def test_remove_vertex_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ for i in range(101):
+ if i not in random_vertices:
+ with self.assertRaises(ValueError):
+ undirected_graph.remove_vertex(i)
+ with self.assertRaises(ValueError):
+ directed_graph.remove_vertex(i)
+
+ def test_add_edge_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ for edge in random_edges:
+ with self.assertRaises(ValueError):
+ undirected_graph.add_edge(edge[0], edge[1])
+ with self.assertRaises(ValueError):
+ directed_graph.add_edge(edge[0], edge[1])
+
+ def test_remove_edge_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ more_random_edges: list[list[int]] = []
+
+ while len(more_random_edges) != len(random_edges):
+ edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
+ for edge in edges:
+ if len(more_random_edges) == len(random_edges):
+ break
+ elif edge not in more_random_edges and edge not in random_edges:
+ more_random_edges.append(edge)
+
+ for edge in more_random_edges:
+ with self.assertRaises(ValueError):
+ undirected_graph.remove_edge(edge[0], edge[1])
+ with self.assertRaises(ValueError):
+ directed_graph.remove_edge(edge[0], edge[1])
+
+ def test_contains_edge_exception_check(self) -> None:
+ (
+ undirected_graph,
+ directed_graph,
+ random_vertices,
+ random_edges,
+ ) = self.__generate_graphs(20, 0, 100, 4)
+
+ for vertex in random_vertices:
+ with self.assertRaises(ValueError):
+ undirected_graph.contains_edge(vertex, 102)
+ with self.assertRaises(ValueError):
+ directed_graph.contains_edge(vertex, 102)
+
+ with self.assertRaises(ValueError):
+ undirected_graph.contains_edge(103, 102)
+ with self.assertRaises(ValueError):
+ directed_graph.contains_edge(103, 102)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/graphs/graph_list.py b/graphs/graph_list.py
index f04b7a923..e871f3b8a 100644
--- a/graphs/graph_list.py
+++ b/graphs/graph_list.py
@@ -18,7 +18,7 @@ class GraphAdjacencyList(Generic[T]):
Directed graph example:
>>> d_graph = GraphAdjacencyList()
- >>> d_graph
+ >>> print(d_graph)
{}
>>> d_graph.add_edge(0, 1)
{0: [1], 1: []}
@@ -26,7 +26,7 @@ class GraphAdjacencyList(Generic[T]):
{0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []}
>>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7)
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
- >>> print(d_graph)
+ >>> d_graph
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
>>> print(repr(d_graph))
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
@@ -68,7 +68,7 @@ class GraphAdjacencyList(Generic[T]):
{'a': ['b'], 'b': ['a']}
>>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f')
{'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']}
- >>> print(char_graph)
+ >>> char_graph
{'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']}
"""
diff --git a/graphs/graph_matrix.py b/graphs/graph_matrix.py
deleted file mode 100644
index 987168426..000000000
--- a/graphs/graph_matrix.py
+++ /dev/null
@@ -1,25 +0,0 @@
-class Graph:
- def __init__(self, vertex):
- self.vertex = vertex
- self.graph = [[0] * vertex for i in range(vertex)]
-
- def add_edge(self, u, v):
- self.graph[u - 1][v - 1] = 1
- self.graph[v - 1][u - 1] = 1
-
- def show(self):
-
- for i in self.graph:
- for j in i:
- print(j, end=" ")
- print(" ")
-
-
-g = Graph(100)
-
-g.add_edge(1, 4)
-g.add_edge(4, 2)
-g.add_edge(4, 5)
-g.add_edge(2, 5)
-g.add_edge(5, 3)
-g.show()
diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py
index d5e80247a..35f7ca9fe 100644
--- a/graphs/greedy_best_first.py
+++ b/graphs/greedy_best_first.py
@@ -4,8 +4,6 @@ https://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS
from __future__ import annotations
-from typing import Optional
-
Path = list[tuple[int, int]]
grid = [
@@ -44,7 +42,7 @@ class Node:
goal_x: int,
goal_y: int,
g_cost: float,
- parent: Optional[Node],
+ parent: Node | None,
):
self.pos_x = pos_x
self.pos_y = pos_y
@@ -60,8 +58,8 @@ class Node:
The heuristic here is the Manhattan Distance
Could elaborate to offer more than one choice
"""
- dy = abs(self.pos_x - self.goal_x)
- dx = abs(self.pos_y - self.goal_y)
+ dx = abs(self.pos_x - self.goal_x)
+ dy = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__(self, other) -> bool:
@@ -93,7 +91,7 @@ class GreedyBestFirst:
self.reached = False
- def search(self) -> Optional[Path]:
+ def search(self) -> Path | None:
"""
Search for the path,
if a path is not found, only the starting position is returned
@@ -156,7 +154,7 @@ class GreedyBestFirst:
)
return successors
- def retrace_path(self, node: Optional[Node]) -> Path:
+ def retrace_path(self, node: Node | None) -> Path:
"""
Retrace the path from parents to parents until start node
"""
diff --git a/graphs/greedy_min_vertex_cover.py b/graphs/greedy_min_vertex_cover.py
new file mode 100644
index 000000000..cdef69141
--- /dev/null
+++ b/graphs/greedy_min_vertex_cover.py
@@ -0,0 +1,64 @@
+"""
+* Author: Manuel Di Lullo (https://github.com/manueldilullo)
+* Description: Approximization algorithm for minimum vertex cover problem.
+ Greedy Approach. Uses graphs represented with an adjacency list
+URL: https://mathworld.wolfram.com/MinimumVertexCover.html
+URL: https://cs.stackexchange.com/questions/129017/greedy-algorithm-for-vertex-cover
+"""
+
+import heapq
+
+
+def greedy_min_vertex_cover(graph: dict) -> set[int]:
+ """
+ Greedy APX Algorithm for min Vertex Cover
+ @input: graph (graph stored in an adjacency list where each vertex
+ is represented with an integer)
+ @example:
+ >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
+ >>> greedy_min_vertex_cover(graph)
+ {0, 1, 2, 4}
+ """
+ # queue used to store nodes and their rank
+ queue: list[list] = []
+
+ # for each node and his adjacency list add them and the rank of the node to queue
+ # using heapq module the queue will be filled like a Priority Queue
+ # heapq works with a min priority queue, so I used -1*len(v) to build it
+ for key, value in graph.items():
+ # O(log(n))
+ heapq.heappush(queue, [-1 * len(value), (key, value)])
+
+ # chosen_vertices = set of chosen vertices
+ chosen_vertices = set()
+
+ # while queue isn't empty and there are still edges
+ # (queue[0][0] is the rank of the node with max rank)
+ while queue and queue[0][0] != 0:
+ # extract vertex with max rank from queue and add it to chosen_vertices
+ argmax = heapq.heappop(queue)[1][0]
+ chosen_vertices.add(argmax)
+
+ # Remove all arcs adjacent to argmax
+ for elem in queue:
+ # if v haven't adjacent node, skip
+ if elem[0] == 0:
+ continue
+ # if argmax is reachable from elem
+ # remove argmax from elem's adjacent list and update his rank
+ if argmax in elem[1][1]:
+ index = elem[1][1].index(argmax)
+ del elem[1][1][index]
+ elem[0] += 1
+ # re-order the queue
+ heapq.heapify(queue)
+ return chosen_vertices
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
+ print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py
index fed7517a2..63cbeb909 100644
--- a/graphs/kahns_algorithm_long.py
+++ b/graphs/kahns_algorithm_long.py
@@ -1,10 +1,10 @@
# Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm
-def longestDistance(graph):
+def longest_distance(graph):
indegree = [0] * len(graph)
queue = []
- longDist = [1] * len(graph)
+ long_dist = [1] * len(graph)
- for key, values in graph.items():
+ for values in graph.values():
for i in values:
indegree[i] += 1
@@ -17,15 +17,15 @@ def longestDistance(graph):
for x in graph[vertex]:
indegree[x] -= 1
- if longDist[vertex] + 1 > longDist[x]:
- longDist[x] = longDist[vertex] + 1
+ if long_dist[vertex] + 1 > long_dist[x]:
+ long_dist[x] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(x)
- print(max(longDist))
+ print(max(long_dist))
# Adjacency list of Graph
graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
-longestDistance(graph)
+longest_distance(graph)
diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py
index bf9f90299..b1260bd5b 100644
--- a/graphs/kahns_algorithm_topo.py
+++ b/graphs/kahns_algorithm_topo.py
@@ -1,4 +1,4 @@
-def topologicalSort(graph):
+def topological_sort(graph):
"""
Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph
using BFS
@@ -8,7 +8,7 @@ def topologicalSort(graph):
topo = []
cnt = 0
- for key, values in graph.items():
+ for values in graph.values():
for i in values:
indegree[i] += 1
@@ -33,4 +33,4 @@ def topologicalSort(graph):
# Adjacency List of Graph
graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
-topologicalSort(graph)
+topological_sort(graph)
diff --git a/graphs/karger.py b/graphs/karger.py
index f72128c81..3ef65c0d6 100644
--- a/graphs/karger.py
+++ b/graphs/karger.py
@@ -47,7 +47,6 @@ def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
graph_copy = {node: graph[node][:] for node in graph}
while len(graph_copy) > 2:
-
# Choose a random edge.
u = random.choice(list(graph_copy.keys()))
v = random.choice(graph_copy[u])
diff --git a/graphs/markov_chain.py b/graphs/markov_chain.py
index b93c408cd..0b6659822 100644
--- a/graphs/markov_chain.py
+++ b/graphs/markov_chain.py
@@ -35,6 +35,7 @@ class MarkovChainGraphUndirectedUnweighted:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
+ return ""
def get_transitions(
diff --git a/graphs/matching_min_vertex_cover.py b/graphs/matching_min_vertex_cover.py
new file mode 100644
index 000000000..5ac944ec1
--- /dev/null
+++ b/graphs/matching_min_vertex_cover.py
@@ -0,0 +1,62 @@
+"""
+* Author: Manuel Di Lullo (https://github.com/manueldilullo)
+* Description: Approximization algorithm for minimum vertex cover problem.
+ Matching Approach. Uses graphs represented with an adjacency list
+
+URL: https://mathworld.wolfram.com/MinimumVertexCover.html
+URL: https://www.princeton.edu/~aaa/Public/Teaching/ORF523/ORF523_Lec6.pdf
+"""
+
+
+def matching_min_vertex_cover(graph: dict) -> set:
+ """
+ APX Algorithm for min Vertex Cover using Matching Approach
+ @input: graph (graph stored in an adjacency list where each vertex
+ is represented as an integer)
+ @example:
+ >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
+ >>> matching_min_vertex_cover(graph)
+ {0, 1, 2, 4}
+ """
+ # chosen_vertices = set of chosen vertices
+ chosen_vertices = set()
+ # edges = list of graph's edges
+ edges = get_edges(graph)
+
+ # While there are still elements in edges list, take an arbitrary edge
+ # (from_node, to_node) and add his extremity to chosen_vertices and then
+ # remove all arcs adjacent to the from_node and to_node
+ while edges:
+ from_node, to_node = edges.pop()
+ chosen_vertices.add(from_node)
+ chosen_vertices.add(to_node)
+ for edge in edges.copy():
+ if from_node in edge or to_node in edge:
+ edges.discard(edge)
+ return chosen_vertices
+
+
+def get_edges(graph: dict) -> set:
+ """
+ Return a set of couples that represents all of the edges.
+ @input: graph (graph stored in an adjacency list where each vertex is
+ represented as an integer)
+ @example:
+ >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3], 3: [0, 1, 2]}
+ >>> get_edges(graph)
+ {(0, 1), (3, 1), (0, 3), (2, 0), (3, 0), (2, 3), (1, 0), (3, 2), (1, 3)}
+ """
+ edges = set()
+ for from_node, to_nodes in graph.items():
+ for to_node in to_nodes:
+ edges.add((from_node, to_node))
+ return edges
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
+ # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
diff --git a/graphs/minimum_path_sum.py b/graphs/minimum_path_sum.py
new file mode 100644
index 000000000..df1e545df
--- /dev/null
+++ b/graphs/minimum_path_sum.py
@@ -0,0 +1,63 @@
+def min_path_sum(grid: list) -> int:
+ """
+ Find the path from top left to bottom right of array of numbers
+ with the lowest possible sum and return the sum along this path.
+ >>> min_path_sum([
+ ... [1, 3, 1],
+ ... [1, 5, 1],
+ ... [4, 2, 1],
+ ... ])
+ 7
+
+ >>> min_path_sum([
+ ... [1, 0, 5, 6, 7],
+ ... [8, 9, 0, 4, 2],
+ ... [4, 4, 4, 5, 1],
+ ... [9, 6, 3, 1, 0],
+ ... [8, 4, 3, 2, 7],
+ ... ])
+ 20
+
+ >>> min_path_sum(None)
+ Traceback (most recent call last):
+ ...
+ TypeError: The grid does not contain the appropriate information
+
+ >>> min_path_sum([[]])
+ Traceback (most recent call last):
+ ...
+ TypeError: The grid does not contain the appropriate information
+ """
+
+ if not grid or not grid[0]:
+ raise TypeError("The grid does not contain the appropriate information")
+
+ for cell_n in range(1, len(grid[0])):
+ grid[0][cell_n] += grid[0][cell_n - 1]
+ row_above = grid[0]
+
+ for row_n in range(1, len(grid)):
+ current_row = grid[row_n]
+ grid[row_n] = fill_row(current_row, row_above)
+ row_above = grid[row_n]
+
+ return grid[-1][-1]
+
+
+def fill_row(current_row: list, row_above: list) -> list:
+ """
+ >>> fill_row([2, 2, 2], [1, 2, 3])
+ [3, 4, 5]
+ """
+
+ current_row[0] += row_above[0]
+ for cell_n in range(1, len(current_row)):
+ current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n])
+
+ return current_row
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py
index 32548b2ec..3c6888037 100644
--- a/graphs/minimum_spanning_tree_boruvka.py
+++ b/graphs/minimum_spanning_tree_boruvka.py
@@ -4,7 +4,6 @@ class Graph:
"""
def __init__(self):
-
self.num_vertices = 0
self.num_edges = 0
self.adjacency = {}
@@ -63,7 +62,7 @@ class Graph:
for tail in self.adjacency:
for head in self.adjacency[tail]:
weight = self.adjacency[head][tail]
- string += "%d -> %d == %d\n" % (head, tail, weight)
+ string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def get_edges(self):
@@ -145,6 +144,7 @@ class Graph:
self.rank[root1] += 1
self.parent[root2] = root1
return root1
+ return None
@staticmethod
def boruvka_mst(graph):
diff --git a/graphs/minimum_spanning_tree_kruskal.py b/graphs/minimum_spanning_tree_kruskal.py
index a51f97034..85d937010 100644
--- a/graphs/minimum_spanning_tree_kruskal.py
+++ b/graphs/minimum_spanning_tree_kruskal.py
@@ -1,15 +1,14 @@
-from typing import List, Tuple
-
-
-def kruskal(num_nodes: int, num_edges: int, edges: List[Tuple[int, int, int]]) -> int:
+def kruskal(
+ num_nodes: int, edges: list[tuple[int, int, int]]
+) -> list[tuple[int, int, int]]:
"""
- >>> kruskal(4, 3, [(0, 1, 3), (1, 2, 5), (2, 3, 1)])
+ >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1)])
[(2, 3, 1), (0, 1, 3), (1, 2, 5)]
- >>> kruskal(4, 5, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)])
+ >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)])
[(2, 3, 1), (0, 2, 1), (0, 1, 3)]
- >>> kruskal(4, 6, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2),
+ >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2),
... (2, 1, 1)])
[(2, 3, 1), (0, 2, 1), (2, 1, 1)]
"""
@@ -41,7 +40,7 @@ if __name__ == "__main__": # pragma: no cover
edges = []
for _ in range(num_edges):
- node1, node2, cost = [int(x) for x in input().strip().split()]
+ node1, node2, cost = (int(x) for x in input().strip().split())
edges.append((node1, node2, cost))
- kruskal(num_nodes, num_edges, edges)
+ kruskal(num_nodes, edges)
diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py
index 16b428614..5a08ec57f 100644
--- a/graphs/minimum_spanning_tree_prims.py
+++ b/graphs/minimum_spanning_tree_prims.py
@@ -2,115 +2,135 @@ import sys
from collections import defaultdict
-def PrimsAlgorithm(l): # noqa: E741
+class Heap:
+ def __init__(self):
+ self.node_position = []
- nodePosition = []
+ def get_position(self, vertex):
+ return self.node_position[vertex]
- def get_position(vertex):
- return nodePosition[vertex]
+ def set_position(self, vertex, pos):
+ self.node_position[vertex] = pos
- def set_position(vertex, pos):
- nodePosition[vertex] = pos
-
- def top_to_bottom(heap, start, size, positions):
+ def top_to_bottom(self, heap, start, size, positions):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
- m = 2 * start + 1
+ smallest_child = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
- m = 2 * start + 1
+ smallest_child = 2 * start + 1
else:
- m = 2 * start + 2
- if heap[m] < heap[start]:
- temp, temp1 = heap[m], positions[m]
- heap[m], positions[m] = heap[start], positions[start]
+ smallest_child = 2 * start + 2
+ if heap[smallest_child] < heap[start]:
+ temp, temp1 = heap[smallest_child], positions[smallest_child]
+ heap[smallest_child], positions[smallest_child] = (
+ heap[start],
+ positions[start],
+ )
heap[start], positions[start] = temp, temp1
- temp = get_position(positions[m])
- set_position(positions[m], get_position(positions[start]))
- set_position(positions[start], temp)
+ temp = self.get_position(positions[smallest_child])
+ self.set_position(
+ positions[smallest_child], self.get_position(positions[start])
+ )
+ self.set_position(positions[start], temp)
- top_to_bottom(heap, m, size, positions)
+ self.top_to_bottom(heap, smallest_child, size, positions)
# Update function if value of any node in min-heap decreases
- def bottom_to_top(val, index, heap, position):
+ def bottom_to_top(self, val, index, heap, position):
temp = position[index]
while index != 0:
- if index % 2 == 0:
- parent = int((index - 2) / 2)
- else:
- parent = int((index - 1) / 2)
+ parent = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
heap[index] = heap[parent]
position[index] = position[parent]
- set_position(position[parent], index)
+ self.set_position(position[parent], index)
else:
heap[index] = val
position[index] = temp
- set_position(temp, index)
+ self.set_position(temp, index)
break
index = parent
else:
heap[0] = val
position[0] = temp
- set_position(temp, 0)
+ self.set_position(temp, 0)
- def heapify(heap, positions):
+ def heapify(self, heap, positions):
start = len(heap) // 2 - 1
for i in range(start, -1, -1):
- top_to_bottom(heap, i, len(heap), positions)
+ self.top_to_bottom(heap, i, len(heap), positions)
- def deleteMinimum(heap, positions):
+ def delete_minimum(self, heap, positions):
temp = positions[0]
heap[0] = sys.maxsize
- top_to_bottom(heap, 0, len(heap), positions)
+ self.top_to_bottom(heap, 0, len(heap), positions)
return temp
- visited = [0 for i in range(len(l))]
- Nbr_TV = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex
+
+def prisms_algorithm(adjacency_list):
+ """
+ >>> adjacency_list = {0: [[1, 1], [3, 3]],
+ ... 1: [[0, 1], [2, 6], [3, 5], [4, 1]],
+ ... 2: [[1, 6], [4, 5], [5, 2]],
+ ... 3: [[0, 3], [1, 5], [4, 1]],
+ ... 4: [[1, 1], [2, 5], [3, 1], [5, 4]],
+ ... 5: [[2, 2], [4, 4]]}
+ >>> prisms_algorithm(adjacency_list)
+ [(0, 1), (1, 4), (4, 3), (4, 5), (5, 2)]
+ """
+
+ heap = Heap()
+
+ visited = [0] * len(adjacency_list)
+ nbr_tv = [-1] * len(adjacency_list) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
- Distance_TV = [] # Heap of Distance of vertices from their neighboring vertex
- Positions = []
+ distance_tv = [] # Heap of Distance of vertices from their neighboring vertex
+ positions = []
- for x in range(len(l)):
- p = sys.maxsize
- Distance_TV.append(p)
- Positions.append(x)
- nodePosition.append(x)
+ for vertex in range(len(adjacency_list)):
+ distance_tv.append(sys.maxsize)
+ positions.append(vertex)
+ heap.node_position.append(vertex)
- TreeEdges = []
+ tree_edges = []
visited[0] = 1
- Distance_TV[0] = sys.maxsize
- for x in l[0]:
- Nbr_TV[x[0]] = 0
- Distance_TV[x[0]] = x[1]
- heapify(Distance_TV, Positions)
+ distance_tv[0] = sys.maxsize
+ for neighbor, distance in adjacency_list[0]:
+ nbr_tv[neighbor] = 0
+ distance_tv[neighbor] = distance
+ heap.heapify(distance_tv, positions)
- for i in range(1, len(l)):
- vertex = deleteMinimum(Distance_TV, Positions)
+ for _ in range(1, len(adjacency_list)):
+ vertex = heap.delete_minimum(distance_tv, positions)
if visited[vertex] == 0:
- TreeEdges.append((Nbr_TV[vertex], vertex))
+ tree_edges.append((nbr_tv[vertex], vertex))
visited[vertex] = 1
- for v in l[vertex]:
- if visited[v[0]] == 0 and v[1] < Distance_TV[get_position(v[0])]:
- Distance_TV[get_position(v[0])] = v[1]
- bottom_to_top(v[1], get_position(v[0]), Distance_TV, Positions)
- Nbr_TV[v[0]] = vertex
- return TreeEdges
+ for neighbor, distance in adjacency_list[vertex]:
+ if (
+ visited[neighbor] == 0
+ and distance < distance_tv[heap.get_position(neighbor)]
+ ):
+ distance_tv[heap.get_position(neighbor)] = distance
+ heap.bottom_to_top(
+ distance, heap.get_position(neighbor), distance_tv, positions
+ )
+ nbr_tv[neighbor] = vertex
+ return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
- n = int(input("Enter number of vertices: ").strip())
- e = int(input("Enter number of edges: ").strip())
- adjlist = defaultdict(list)
- for x in range(e):
- l = [int(x) for x in input().strip().split()] # noqa: E741
- adjlist[l[0]].append([l[1], l[2]])
- adjlist[l[1]].append([l[0], l[2]])
- print(PrimsAlgorithm(adjlist))
+ edges_number = int(input("Enter number of edges: ").strip())
+ adjacency_list = defaultdict(list)
+ for _ in range(edges_number):
+ edge = [int(x) for x in input().strip().split()]
+ adjacency_list[edge[0]].append([edge[1], edge[2]])
+ adjacency_list[edge[1]].append([edge[0], edge[2]])
+ print(prisms_algorithm(adjacency_list))
diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py
index c3444c36f..81f30ef61 100644
--- a/graphs/minimum_spanning_tree_prims2.py
+++ b/graphs/minimum_spanning_tree_prims2.py
@@ -6,9 +6,10 @@ edges in the tree is minimized. The algorithm operates by building this tree one
at a time, from an arbitrary starting vertex, at each step adding the cheapest possible
connection from the tree to another vertex.
"""
+from __future__ import annotations
from sys import maxsize
-from typing import Generic, Optional, TypeVar
+from typing import Generic, TypeVar
T = TypeVar("T")
@@ -68,16 +69,16 @@ class MinPriorityQueue(Generic[T]):
>>> queue.push(3, 4000)
>>> queue.push(4, 3000)
- >>> print(queue.extract_min())
+ >>> queue.extract_min()
2
>>> queue.update_key(4, 50)
- >>> print(queue.extract_min())
+ >>> queue.extract_min()
4
- >>> print(queue.extract_min())
+ >>> queue.extract_min()
1
- >>> print(queue.extract_min())
+ >>> queue.extract_min()
3
"""
@@ -134,14 +135,14 @@ class MinPriorityQueue(Generic[T]):
# only]
curr_pos = self.position_map[elem]
if curr_pos == 0:
- return
+ return None
parent_position = get_parent_position(curr_pos)
_, weight = self.heap[curr_pos]
_, parent_weight = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(parent_position, curr_pos)
return self._bubble_up(elem)
- return
+ return None
def _bubble_down(self, elem: T) -> None:
# Place a node at the proper position (downward movement) [to be used
@@ -153,24 +154,22 @@ class MinPriorityQueue(Generic[T]):
if child_left_position < self.elements and child_right_position < self.elements:
_, child_left_weight = self.heap[child_left_position]
_, child_right_weight = self.heap[child_right_position]
- if child_right_weight < child_left_weight:
- if child_right_weight < weight:
- self._swap_nodes(child_right_position, curr_pos)
- return self._bubble_down(elem)
+ if child_right_weight < child_left_weight and child_right_weight < weight:
+ self._swap_nodes(child_right_position, curr_pos)
+ return self._bubble_down(elem)
if child_left_position < self.elements:
_, child_left_weight = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(child_left_position, curr_pos)
return self._bubble_down(elem)
else:
- return
+ return None
if child_right_position < self.elements:
_, child_right_weight = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(child_right_position, curr_pos)
return self._bubble_down(elem)
- else:
- return
+ return None
def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None:
# Swap the nodes at the given positions
@@ -219,7 +218,7 @@ class GraphUndirectedWeighted(Generic[T]):
def prims_algo(
graph: GraphUndirectedWeighted[T],
-) -> tuple[dict[T, int], dict[T, Optional[T]]]:
+) -> tuple[dict[T, int], dict[T, T | None]]:
"""
>>> graph = GraphUndirectedWeighted()
@@ -240,7 +239,7 @@ def prims_algo(
"""
# prim's algorithm for minimum spanning tree
dist: dict[T, int] = {node: maxsize for node in graph.connections}
- parent: dict[T, Optional[T]] = {node: None for node in graph.connections}
+ parent: dict[T, T | None] = {node: None for node in graph.connections}
priority_queue: MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py
index 77ca5760d..0a18ede6e 100644
--- a/graphs/multi_heuristic_astar.py
+++ b/graphs/multi_heuristic_astar.py
@@ -1,7 +1,10 @@
import heapq
+import sys
import numpy as np
+TPos = tuple[int, int]
+
class PriorityQueue:
def __init__(self):
@@ -30,7 +33,7 @@ class PriorityQueue:
temp.append((pri, x))
(pri, x) = heapq.heappop(self.elements)
temp.append((priority, item))
- for (pro, xxx) in temp:
+ for pro, xxx in temp:
heapq.heappush(self.elements, (pro, xxx))
def remove_element(self, item):
@@ -41,7 +44,7 @@ class PriorityQueue:
while x != item:
temp.append((pro, x))
(pro, x) = heapq.heappop(self.elements)
- for (prito, yyy) in temp:
+ for prito, yyy in temp:
heapq.heappush(self.elements, (prito, yyy))
def top_show(self):
@@ -53,24 +56,24 @@ class PriorityQueue:
return (priority, item)
-def consistent_heuristic(P, goal):
+def consistent_heuristic(p: TPos, goal: TPos):
# euclidean distance
- a = np.array(P)
+ a = np.array(p)
b = np.array(goal)
return np.linalg.norm(a - b)
-def heuristic_2(P, goal):
+def heuristic_2(p: TPos, goal: TPos):
# integer division by time variable
- return consistent_heuristic(P, goal) // t
+ return consistent_heuristic(p, goal) // t
-def heuristic_1(P, goal):
+def heuristic_1(p: TPos, goal: TPos):
# manhattan distance
- return abs(P[0] - goal[0]) + abs(P[1] - goal[1])
+ return abs(p[0] - goal[0]) + abs(p[1] - goal[1])
-def key(start, i, goal, g_function):
+def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]):
ans = g_function[start] + W1 * heuristics[i](start, goal)
return ans
@@ -114,10 +117,10 @@ def do_something(back_pointer, goal, start):
print(x, end=" ")
x = back_pointer[x]
print(x)
- quit()
+ sys.exit()
-def valid(p):
+def valid(p: TPos):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
@@ -215,7 +218,6 @@ blocks_blk = [
(18, 1),
(19, 1),
]
-blocks_no = []
blocks_all = make_common_ground()
@@ -233,7 +235,7 @@ goal = (n - 1, n - 1)
t = 1
-def multi_a_star(start, goal, n_heuristic):
+def multi_a_star(start: TPos, goal: TPos, n_heuristic: int):
g_function = {start: 0, goal: float("inf")}
back_pointer = {start: -1, goal: -1}
open_list = []
@@ -243,8 +245,8 @@ def multi_a_star(start, goal, n_heuristic):
open_list.append(PriorityQueue())
open_list[i].put(start, key(start, i, goal, g_function))
- close_list_anchor = []
- close_list_inad = []
+ close_list_anchor: list[int] = []
+ close_list_inad: list[int] = []
while open_list[0].minkey() < float("inf"):
for i in range(1, n_heuristic):
# print(open_list[0].minkey(), open_list[i].minkey())
diff --git a/graphs/page_rank.py b/graphs/page_rank.py
index 0f5129146..b9e4c4a72 100644
--- a/graphs/page_rank.py
+++ b/graphs/page_rank.py
@@ -27,7 +27,7 @@ class Node:
self.outbound.append(node)
def __repr__(self):
- return f"Node {self.name}: Inbound: {self.inbound} ; Outbound: {self.outbound}"
+ return f""
def page_rank(nodes, limit=3, d=0.85):
@@ -41,9 +41,9 @@ def page_rank(nodes, limit=3, d=0.85):
for i in range(limit):
print(f"======= Iteration {i + 1} =======")
- for j, node in enumerate(nodes):
+ for _, node in enumerate(nodes):
ranks[node.name] = (1 - d) + d * sum(
- [ranks[ib] / outbounds[ib] for ib in node.inbound]
+ ranks[ib] / outbounds[ib] for ib in node.inbound
)
print(ranks)
diff --git a/graphs/prim.py b/graphs/prim.py
index 70329da7e..6cb1a6def 100644
--- a/graphs/prim.py
+++ b/graphs/prim.py
@@ -7,13 +7,13 @@
import heapq as hq
import math
-from typing import Iterator
+from collections.abc import Iterator
class Vertex:
"""Class Vertex."""
- def __init__(self, id):
+ def __init__(self, id_):
"""
Arguments:
id - input an id to identify the vertex
@@ -21,7 +21,7 @@ class Vertex:
neighbors - a list of the vertices it is linked to
edges - a dict to store the edges's weight
"""
- self.id = str(id)
+ self.id = str(id_)
self.key = None
self.pi = None
self.neighbors = []
diff --git a/graphs/random_graph_generator.py b/graphs/random_graph_generator.py
new file mode 100644
index 000000000..0e7e18bc8
--- /dev/null
+++ b/graphs/random_graph_generator.py
@@ -0,0 +1,67 @@
+"""
+* Author: Manuel Di Lullo (https://github.com/manueldilullo)
+* Description: Random graphs generator.
+ Uses graphs represented with an adjacency list.
+
+URL: https://en.wikipedia.org/wiki/Random_graph
+"""
+
+import random
+
+
+def random_graph(
+ vertices_number: int, probability: float, directed: bool = False
+) -> dict:
+ """
+ Generate a random graph
+ @input: vertices_number (number of vertices),
+ probability (probability that a generic edge (u,v) exists),
+ directed (if True: graph will be a directed graph,
+ otherwise it will be an undirected graph)
+ @examples:
+ >>> random.seed(1)
+ >>> random_graph(4, 0.5)
+ {0: [1], 1: [0, 2, 3], 2: [1, 3], 3: [1, 2]}
+ >>> random.seed(1)
+ >>> random_graph(4, 0.5, True)
+ {0: [1], 1: [2, 3], 2: [3], 3: []}
+ """
+ graph: dict = {i: [] for i in range(vertices_number)}
+
+ # if probability is greater or equal than 1, then generate a complete graph
+ if probability >= 1:
+ return complete_graph(vertices_number)
+ # if probability is lower or equal than 0, then return a graph without edges
+ if probability <= 0:
+ return graph
+
+ # for each couple of nodes, add an edge from u to v
+ # if the number randomly generated is greater than probability probability
+ for i in range(vertices_number):
+ for j in range(i + 1, vertices_number):
+ if random.random() < probability:
+ graph[i].append(j)
+ if not directed:
+ # if the graph is undirected, add an edge in from j to i, either
+ graph[j].append(i)
+ return graph
+
+
+def complete_graph(vertices_number: int) -> dict:
+ """
+ Generate a complete graph with vertices_number vertices.
+ @input: vertices_number (number of vertices),
+ directed (False if the graph is undirected, True otherwise)
+ @example:
+ >>> complete_graph(3)
+ {0: [1, 2], 1: [0, 2], 2: [0, 1]}
+ """
+ return {
+ i: [j for j in range(vertices_number) if i != j] for i in range(vertices_number)
+ }
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py
index 2b3417014..39211c64b 100644
--- a/graphs/scc_kosaraju.py
+++ b/graphs/scc_kosaraju.py
@@ -1,8 +1,8 @@
-from typing import List
+from __future__ import annotations
def dfs(u):
- global graph, reversedGraph, scc, component, visit, stack
+ global graph, reversed_graph, scc, component, visit, stack
if visit[u]:
return
visit[u] = True
@@ -12,17 +12,17 @@ def dfs(u):
def dfs2(u):
- global graph, reversedGraph, scc, component, visit, stack
+ global graph, reversed_graph, scc, component, visit, stack
if visit[u]:
return
visit[u] = True
component.append(u)
- for v in reversedGraph[u]:
+ for v in reversed_graph[u]:
dfs2(v)
def kosaraju():
- global graph, reversedGraph, scc, component, visit, stack
+ global graph, reversed_graph, scc, component, visit, stack
for i in range(n):
dfs(i)
visit = [False] * n
@@ -39,16 +39,16 @@ if __name__ == "__main__":
# n - no of nodes, m - no of edges
n, m = list(map(int, input().strip().split()))
- graph: List[List[int]] = [[] for i in range(n)] # graph
- reversedGraph: List[List[int]] = [[] for i in range(n)] # reversed graph
+ graph: list[list[int]] = [[] for _ in range(n)] # graph
+ reversed_graph: list[list[int]] = [[] for i in range(n)] # reversed graph
# input graph data (edges)
- for i in range(m):
+ for _ in range(m):
u, v = list(map(int, input().strip().split()))
graph[u].append(v)
- reversedGraph[v].append(u)
+ reversed_graph[v].append(u)
- stack: List[int] = []
- visit: List[bool] = [False] * n
- scc: List[int] = []
- component: List[int] = []
+ stack: list[int] = []
+ visit: list[bool] = [False] * n
+ scc: list[int] = []
+ component: list[int] = []
print(kosaraju())
diff --git a/graphs/tests/__init__.py b/graphs/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/graphs/tests/test_min_spanning_tree_kruskal.py b/graphs/tests/test_min_spanning_tree_kruskal.py
index 3a527aef3..d6df242ec 100644
--- a/graphs/tests/test_min_spanning_tree_kruskal.py
+++ b/graphs/tests/test_min_spanning_tree_kruskal.py
@@ -2,7 +2,7 @@ from graphs.minimum_spanning_tree_kruskal import kruskal
def test_kruskal_successful_result():
- num_nodes, num_edges = 9, 14
+ num_nodes = 9
edges = [
[0, 1, 4],
[0, 7, 8],
@@ -20,7 +20,7 @@ def test_kruskal_successful_result():
[1, 7, 11],
]
- result = kruskal(num_nodes, num_edges, edges)
+ result = kruskal(num_nodes, edges)
expected = [
[7, 6, 1],
diff --git a/graphs/tests/test_min_spanning_tree_prim.py b/graphs/tests/test_min_spanning_tree_prim.py
index 048fbf595..91feab28f 100644
--- a/graphs/tests/test_min_spanning_tree_prim.py
+++ b/graphs/tests/test_min_spanning_tree_prim.py
@@ -1,6 +1,6 @@
from collections import defaultdict
-from graphs.minimum_spanning_tree_prims import PrimsAlgorithm as mst
+from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def test_prim_successful_result():
diff --git a/dynamic_programming/fractional_knapsack.py b/greedy_methods/fractional_knapsack.py
similarity index 56%
rename from dynamic_programming/fractional_knapsack.py
rename to greedy_methods/fractional_knapsack.py
index c74af7ef8..58976d40c 100644
--- a/dynamic_programming/fractional_knapsack.py
+++ b/greedy_methods/fractional_knapsack.py
@@ -2,20 +2,20 @@ from bisect import bisect
from itertools import accumulate
-def fracKnapsack(vl, wt, W, n):
+def frac_knapsack(vl, wt, w, n):
"""
- >>> fracKnapsack([60, 100, 120], [10, 20, 30], 50, 3)
+ >>> frac_knapsack([60, 100, 120], [10, 20, 30], 50, 3)
240.0
"""
- r = list(sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True))
+ r = sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True)
vl, wt = [i[0] for i in r], [i[1] for i in r]
acc = list(accumulate(wt))
- k = bisect(acc, W)
+ k = bisect(acc, w)
return (
0
if k == 0
- else sum(vl[:k]) + (W - acc[k - 1]) * (vl[k]) / (wt[k])
+ else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k])
)
diff --git a/dynamic_programming/fractional_knapsack_2.py b/greedy_methods/fractional_knapsack_2.py
similarity index 68%
rename from dynamic_programming/fractional_knapsack_2.py
rename to greedy_methods/fractional_knapsack_2.py
index cae577383..6d9ed2ec3 100644
--- a/dynamic_programming/fractional_knapsack_2.py
+++ b/greedy_methods/fractional_knapsack_2.py
@@ -1,60 +1,53 @@
-# https://en.wikipedia.org/wiki/Continuous_knapsack_problem
-# https://www.guru99.com/fractional-knapsack-problem-greedy.html
-# https://medium.com/walkinthecode/greedy-algorithm-fractional-knapsack-problem-9aba1daecc93
-
-from __future__ import annotations
-
-
-def fractional_knapsack(
- value: list[int], weight: list[int], capacity: int
-) -> tuple[int, list[int]]:
- """
- >>> value = [1, 3, 5, 7, 9]
- >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1]
- >>> fractional_knapsack(value, weight, 5)
- (25, [1, 1, 1, 1, 1])
- >>> fractional_knapsack(value, weight, 15)
- (25, [1, 1, 1, 1, 1])
- >>> fractional_knapsack(value, weight, 25)
- (25, [1, 1, 1, 1, 1])
- >>> fractional_knapsack(value, weight, 26)
- (25, [1, 1, 1, 1, 1])
- >>> fractional_knapsack(value, weight, -1)
- (-90.0, [0, 0, 0, 0, -10.0])
- >>> fractional_knapsack([1, 3, 5, 7], weight, 30)
- (16, [1, 1, 1, 1])
- >>> fractional_knapsack(value, [0.9, 0.7, 0.5, 0.3, 0.1], 30)
- (25, [1, 1, 1, 1, 1])
- >>> fractional_knapsack([], [], 30)
- (0, [])
- """
- index = list(range(len(value)))
- ratio = [v / w for v, w in zip(value, weight)]
- index.sort(key=lambda i: ratio[i], reverse=True)
-
- max_value = 0
- fractions = [0] * len(value)
- for i in index:
- if weight[i] <= capacity:
- fractions[i] = 1
- max_value += value[i]
- capacity -= weight[i]
- else:
- fractions[i] = capacity / weight[i]
- max_value += value[i] * capacity / weight[i]
- break
-
- return max_value, fractions
-
-
-if __name__ == "__main__":
- n = int(input("Enter number of items: "))
- value = input(f"Enter the values of the {n} item(s) in order: ").split()
- value = [int(v) for v in value]
- weight = input(f"Enter the positive weights of the {n} item(s) in order: ".split())
- weight = [int(w) for w in weight]
- capacity = int(input("Enter maximum weight: "))
-
- max_value, fractions = fractional_knapsack(value, weight, capacity)
- print("The maximum value of items that can be carried:", max_value)
- print("The fractions in which the items should be taken:", fractions)
+# https://en.wikipedia.org/wiki/Continuous_knapsack_problem
+# https://www.guru99.com/fractional-knapsack-problem-greedy.html
+# https://medium.com/walkinthecode/greedy-algorithm-fractional-knapsack-problem-9aba1daecc93
+
+from __future__ import annotations
+
+
+def fractional_knapsack(
+ value: list[int], weight: list[int], capacity: int
+) -> tuple[float, list[float]]:
+ """
+ >>> value = [1, 3, 5, 7, 9]
+ >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1]
+ >>> fractional_knapsack(value, weight, 5)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack(value, weight, 15)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack(value, weight, 25)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack(value, weight, 26)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack(value, weight, -1)
+ (-90.0, [0, 0, 0, 0, -10.0])
+ >>> fractional_knapsack([1, 3, 5, 7], weight, 30)
+ (16, [1, 1, 1, 1])
+ >>> fractional_knapsack(value, [0.9, 0.7, 0.5, 0.3, 0.1], 30)
+ (25, [1, 1, 1, 1, 1])
+ >>> fractional_knapsack([], [], 30)
+ (0, [])
+ """
+ index = list(range(len(value)))
+ ratio = [v / w for v, w in zip(value, weight)]
+ index.sort(key=lambda i: ratio[i], reverse=True)
+
+ max_value: float = 0
+ fractions: list[float] = [0] * len(value)
+ for i in index:
+ if weight[i] <= capacity:
+ fractions[i] = 1
+ max_value += value[i]
+ capacity -= weight[i]
+ else:
+ fractions[i] = capacity / weight[i]
+ max_value += value[i] * capacity / weight[i]
+ break
+
+ return max_value, fractions
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/greedy_methods/minimum_waiting_time.py b/greedy_methods/minimum_waiting_time.py
new file mode 100644
index 000000000..aaae8cf8f
--- /dev/null
+++ b/greedy_methods/minimum_waiting_time.py
@@ -0,0 +1,48 @@
+"""
+Calculate the minimum waiting time using a greedy algorithm.
+reference: https://www.youtube.com/watch?v=Sf3eiO12eJs
+
+For doctests run following command:
+python -m doctest -v minimum_waiting_time.py
+
+The minimum_waiting_time function uses a greedy algorithm to calculate the minimum
+time for queries to complete. It sorts the list in non-decreasing order, calculates
+the waiting time for each query by multiplying its position in the list with the
+sum of all remaining query times, and returns the total waiting time. A doctest
+ensures that the function produces the correct output.
+"""
+
+
+def minimum_waiting_time(queries: list[int]) -> int:
+ """
+ This function takes a list of query times and returns the minimum waiting time
+ for all queries to be completed.
+
+ Args:
+ queries: A list of queries measured in picoseconds
+
+ Returns:
+ total_waiting_time: Minimum waiting time measured in picoseconds
+
+ Examples:
+ >>> minimum_waiting_time([3, 2, 1, 2, 6])
+ 17
+ >>> minimum_waiting_time([3, 2, 1])
+ 4
+ >>> minimum_waiting_time([1, 2, 3, 4])
+ 10
+ >>> minimum_waiting_time([5, 5, 5, 5])
+ 30
+ >>> minimum_waiting_time([])
+ 0
+ """
+ n = len(queries)
+ if n in (0, 1):
+ return 0
+ return sum(query * (n - i - 1) for i, query in enumerate(sorted(queries)))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/greedy_methods/optimal_merge_pattern.py b/greedy_methods/optimal_merge_pattern.py
new file mode 100644
index 000000000..a1c934f84
--- /dev/null
+++ b/greedy_methods/optimal_merge_pattern.py
@@ -0,0 +1,56 @@
+"""
+This is a pure Python implementation of the greedy-merge-sort algorithm
+reference: https://www.geeksforgeeks.org/optimal-file-merge-patterns/
+
+For doctests run following command:
+python3 -m doctest -v greedy_merge_sort.py
+
+Objective
+Merge a set of sorted files of different length into a single sorted file.
+We need to find an optimal solution, where the resultant file
+will be generated in minimum time.
+
+Approach
+If the number of sorted files are given, there are many ways
+to merge them into a single sorted file.
+This merge can be performed pair wise.
+To merge a m-record file and a n-record file requires possibly m+n record moves
+the optimal choice being,
+merge the two smallest files together at each step (greedy approach).
+"""
+
+
+def optimal_merge_pattern(files: list) -> float:
+ """Function to merge all the files with optimum cost
+
+ Args:
+ files [list]: A list of sizes of different files to be merged
+
+ Returns:
+ optimal_merge_cost [int]: Optimal cost to merge all those files
+
+ Examples:
+ >>> optimal_merge_pattern([2, 3, 4])
+ 14
+ >>> optimal_merge_pattern([5, 10, 20, 30, 30])
+ 205
+ >>> optimal_merge_pattern([8, 8, 8, 8, 8])
+ 96
+ """
+ optimal_merge_cost = 0
+ while len(files) > 1:
+ temp = 0
+ # Consider two files with minimum cost to be merged
+ for _ in range(2):
+ min_index = files.index(min(files))
+ temp += files[min_index]
+ files.pop(min_index)
+ files.append(temp)
+ optimal_merge_cost += temp
+ return optimal_merge_cost
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/hashes/README.md b/hashes/README.md
new file mode 100644
index 000000000..6df9a2fb6
--- /dev/null
+++ b/hashes/README.md
@@ -0,0 +1,17 @@
+# Hashes
+Hashing is the process of mapping any amount of data to a specified size using an algorithm. This is known as a hash value (or, if you're feeling fancy, a hash code, hash sums, or even a hash digest). Hashing is a one-way function, whereas encryption is a two-way function. While it is functionally conceivable to reverse-hash stuff, the required computing power makes it impractical. Hashing is a one-way street.
+Unlike encryption, which is intended to protect data in transit, hashing is intended to authenticate that a file or piece of data has not been altered—that it is authentic. In other words, it functions as a checksum.
+
+## Common hashing algorithms
+### MD5
+This is one of the first algorithms that has gained widespread acceptance. MD5 is hashing algorithm made by Ray Rivest that is known to suffer vulnerabilities. It was created in 1992 as the successor to MD4. Currently MD6 is in the works, but as of 2009 Rivest had removed it from NIST consideration for SHA-3.
+
+### SHA
+SHA stands for Security Hashing Algorithm and it’s probably best known as the hashing algorithm used in most SSL/TLS cipher suites. A cipher suite is a collection of ciphers and algorithms that are used for SSL/TLS connections. SHA handles the hashing aspects. SHA-1, as we mentioned earlier, is now deprecated. SHA-2 is now mandatory. SHA-2 is sometimes known has SHA-256, though variants with longer bit lengths are also available.
+
+### SHA256
+SHA 256 is a member of the SHA 2 algorithm family, under which SHA stands for Secure Hash Algorithm. It was a collaborative effort between both the NSA and NIST to implement a successor to the SHA 1 family, which was beginning to lose potency against brute force attacks. It was published in 2001.
+The importance of the 256 in the name refers to the final hash digest value, i.e. the hash value will remain 256 bits regardless of the size of the plaintext/cleartext. Other algorithms in the SHA family are similar to SHA 256 in some ways.
+
+### Luhn
+The Luhn algorithm, also renowned as the modulus 10 or mod 10 algorithm, is a straightforward checksum formula used to validate a wide range of identification numbers, including credit card numbers, IMEI numbers, and Canadian Social Insurance Numbers. A community of mathematicians developed the LUHN formula in the late 1960s. Companies offering credit cards quickly followed suit. Since the algorithm is in the public interest, anyone can use it. The algorithm is used by most credit cards and many government identification numbers as a simple method of differentiating valid figures from mistyped or otherwise incorrect numbers. It was created to guard against unintentional errors, not malicious attacks.
\ No newline at end of file
diff --git a/hashes/adler32.py b/hashes/adler32.py
index 4a61b97e3..611ebc88b 100644
--- a/hashes/adler32.py
+++ b/hashes/adler32.py
@@ -8,6 +8,8 @@
source: https://en.wikipedia.org/wiki/Adler-32
"""
+MOD_ADLER = 65521
+
def adler32(plain_text: str) -> int:
"""
@@ -20,7 +22,6 @@ def adler32(plain_text: str) -> int:
>>> adler32('go adler em all')
708642122
"""
- MOD_ADLER = 65521
a = 1
b = 0
for plain_chr in plain_text:
diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py
index 7ef4fdb3c..238fdb1c0 100644
--- a/hashes/chaos_machine.py
+++ b/hashes/chaos_machine.py
@@ -43,17 +43,17 @@ def pull():
global buffer_space, params_space, machine_time, K, m, t
# PRNG (Xorshift by George Marsaglia)
- def xorshift(X, Y):
- X ^= Y >> 13
- Y ^= X << 17
- X ^= Y >> 5
- return X
+ def xorshift(x, y):
+ x ^= y >> 13
+ y ^= x << 17
+ x ^= y >> 5
+ return x
# Choosing Dynamical Systems (Increment)
key = machine_time % m
# Evolution (Time Length)
- for i in range(0, t):
+ for _ in range(0, t):
# Variables (Position + Parameters)
r = params_space[key]
value = buffer_space[key]
@@ -63,13 +63,13 @@ def pull():
params_space[key] = (machine_time * 0.01 + r * 1.01) % 1 + 3
# Choosing Chaotic Data
- X = int(buffer_space[(key + 2) % m] * (10 ** 10))
- Y = int(buffer_space[(key - 2) % m] * (10 ** 10))
+ x = int(buffer_space[(key + 2) % m] * (10**10))
+ y = int(buffer_space[(key - 2) % m] * (10**10))
# Machine Time
machine_time += 1
- return xorshift(X, Y) % 0xFFFFFFFF
+ return xorshift(x, y) % 0xFFFFFFFF
def reset():
@@ -96,7 +96,7 @@ if __name__ == "__main__":
# Pulling Data (Output)
while inp in ("e", "E"):
- print("%s" % format(pull(), "#04x"))
+ print(f"{format(pull(), '#04x')}")
print(buffer_space)
print(params_space)
inp = input("(e)exit? ").strip()
diff --git a/hashes/djb2.py b/hashes/djb2.py
index 2d1c9aabb..4c8463509 100644
--- a/hashes/djb2.py
+++ b/hashes/djb2.py
@@ -29,7 +29,7 @@ def djb2(s: str) -> int:
>>> djb2('scramble bits')
1609059040
"""
- hash = 5381
+ hash_value = 5381
for x in s:
- hash = ((hash << 5) + hash) + ord(x)
- return hash & 0xFFFFFFFF
+ hash_value = ((hash_value << 5) + hash_value) + ord(x)
+ return hash_value & 0xFFFFFFFF
diff --git a/hashes/elf.py b/hashes/elf.py
new file mode 100644
index 000000000..e4bfcec22
--- /dev/null
+++ b/hashes/elf.py
@@ -0,0 +1,21 @@
+def elf_hash(data: str) -> int:
+ """
+ Implementation of ElfHash Algorithm, a variant of PJW hash function.
+
+ >>> elf_hash('lorem ipsum')
+ 253956621
+ """
+ hash_ = x = 0
+ for letter in data:
+ hash_ = (hash_ << 4) + ord(letter)
+ x = hash_ & 0xF0000000
+ if x != 0:
+ hash_ ^= x >> 24
+ hash_ &= ~x
+ return hash_
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py
index d1cb6efc2..d95437d12 100644
--- a/hashes/enigma_machine.py
+++ b/hashes/enigma_machine.py
@@ -1,8 +1,8 @@
alphabets = [chr(i) for i in range(32, 126)]
-gear_one = [i for i in range(len(alphabets))]
-gear_two = [i for i in range(len(alphabets))]
-gear_three = [i for i in range(len(alphabets))]
-reflector = [i for i in reversed(range(len(alphabets)))]
+gear_one = list(range(len(alphabets)))
+gear_two = list(range(len(alphabets)))
+gear_three = list(range(len(alphabets)))
+reflector = list(reversed(range(len(alphabets))))
code = []
gear_one_pos = gear_two_pos = gear_three_pos = 0
@@ -48,12 +48,12 @@ if __name__ == "__main__":
break
except Exception as error:
print(error)
- for i in range(token):
+ for _ in range(token):
rotator()
for j in decode:
engine(j)
print("\n" + "".join(code))
print(
f"\nYour Token is {token} please write it down.\nIf you want to decode "
- f"this message again you should input same digits as token!"
+ "this message again you should input same digits as token!"
)
diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py
index 4a32bae1a..dc9303218 100644
--- a/hashes/hamming_code.py
+++ b/hashes/hamming_code.py
@@ -68,177 +68,175 @@ def text_from_bits(bits, encoding="utf-8", errors="surrogatepass"):
# Functions of hamming code-------------------------------------------
-def emitterConverter(sizePar, data):
+def emitter_converter(size_par, data):
"""
- :param sizePar: how many parity bits the message must have
+ :param size_par: how many parity bits the message must have
:param data: information bits
:return: message to be transmitted by unreliable medium
- bits of information merged with parity bits
- >>> emitterConverter(4, "101010111111")
+ >>> emitter_converter(4, "101010111111")
['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1']
"""
- if sizePar + len(data) <= 2 ** sizePar - (len(data) - 1):
- print("ERROR - size of parity don't match with size of data")
- exit(0)
+ if size_par + len(data) <= 2**size_par - (len(data) - 1):
+ raise ValueError("size of parity don't match with size of data")
- dataOut = []
+ data_out = []
parity = []
- binPos = [bin(x)[2:] for x in range(1, sizePar + len(data) + 1)]
+ bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data) + 1)]
# sorted information data for the size of the output data
- dataOrd = []
+ data_ord = []
# data position template + parity
- dataOutGab = []
+ data_out_gab = []
# parity bit counter
- qtdBP = 0
+ qtd_bp = 0
# counter position of data bits
- contData = 0
+ cont_data = 0
- for x in range(1, sizePar + len(data) + 1):
+ for x in range(1, size_par + len(data) + 1):
# Performs a template of bit positions - who should be given,
# and who should be parity
- if qtdBP < sizePar:
+ if qtd_bp < size_par:
if (np.log(x) / np.log(2)).is_integer():
- dataOutGab.append("P")
- qtdBP = qtdBP + 1
+ data_out_gab.append("P")
+ qtd_bp = qtd_bp + 1
else:
- dataOutGab.append("D")
+ data_out_gab.append("D")
else:
- dataOutGab.append("D")
+ data_out_gab.append("D")
# Sorts the data to the new output size
- if dataOutGab[-1] == "D":
- dataOrd.append(data[contData])
- contData += 1
+ if data_out_gab[-1] == "D":
+ data_ord.append(data[cont_data])
+ cont_data += 1
else:
- dataOrd.append(None)
+ data_ord.append(None)
# Calculates parity
- qtdBP = 0 # parity bit counter
- for bp in range(1, sizePar + 1):
+ qtd_bp = 0 # parity bit counter
+ for bp in range(1, size_par + 1):
# Bit counter one for a given parity
- contBO = 0
+ cont_bo = 0
# counter to control the loop reading
- contLoop = 0
- for x in dataOrd:
+ cont_loop = 0
+ for x in data_ord:
if x is not None:
try:
- aux = (binPos[contLoop])[-1 * (bp)]
+ aux = (bin_pos[cont_loop])[-1 * (bp)]
except IndexError:
aux = "0"
- if aux == "1":
- if x == "1":
- contBO += 1
- contLoop += 1
- parity.append(contBO % 2)
+ if aux == "1" and x == "1":
+ cont_bo += 1
+ cont_loop += 1
+ parity.append(cont_bo % 2)
- qtdBP += 1
+ qtd_bp += 1
# Mount the message
- ContBP = 0 # parity bit counter
- for x in range(0, sizePar + len(data)):
- if dataOrd[x] is None:
- dataOut.append(str(parity[ContBP]))
- ContBP += 1
+ cont_bp = 0 # parity bit counter
+ for x in range(0, size_par + len(data)):
+ if data_ord[x] is None:
+ data_out.append(str(parity[cont_bp]))
+ cont_bp += 1
else:
- dataOut.append(dataOrd[x])
+ data_out.append(data_ord[x])
- return dataOut
+ return data_out
-def receptorConverter(sizePar, data):
+def receptor_converter(size_par, data):
"""
- >>> receptorConverter(4, "1111010010111111")
+ >>> receptor_converter(4, "1111010010111111")
(['1', '0', '1', '0', '1', '0', '1', '1', '1', '1', '1', '1'], True)
"""
# data position template + parity
- dataOutGab = []
+ data_out_gab = []
# Parity bit counter
- qtdBP = 0
+ qtd_bp = 0
# Counter p data bit reading
- contData = 0
+ cont_data = 0
# list of parity received
- parityReceived = []
- dataOutput = []
+ parity_received = []
+ data_output = []
for x in range(1, len(data) + 1):
# Performs a template of bit positions - who should be given,
# and who should be parity
- if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer():
- dataOutGab.append("P")
- qtdBP = qtdBP + 1
+ if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer():
+ data_out_gab.append("P")
+ qtd_bp = qtd_bp + 1
else:
- dataOutGab.append("D")
+ data_out_gab.append("D")
# Sorts the data to the new output size
- if dataOutGab[-1] == "D":
- dataOutput.append(data[contData])
+ if data_out_gab[-1] == "D":
+ data_output.append(data[cont_data])
else:
- parityReceived.append(data[contData])
- contData += 1
+ parity_received.append(data[cont_data])
+ cont_data += 1
# -----------calculates the parity with the data
- dataOut = []
+ data_out = []
parity = []
- binPos = [bin(x)[2:] for x in range(1, sizePar + len(dataOutput) + 1)]
+ bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data_output) + 1)]
# sorted information data for the size of the output data
- dataOrd = []
+ data_ord = []
# Data position feedback + parity
- dataOutGab = []
+ data_out_gab = []
# Parity bit counter
- qtdBP = 0
+ qtd_bp = 0
# Counter p data bit reading
- contData = 0
+ cont_data = 0
- for x in range(1, sizePar + len(dataOutput) + 1):
+ for x in range(1, size_par + len(data_output) + 1):
# Performs a template position of bits - who should be given,
# and who should be parity
- if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer():
- dataOutGab.append("P")
- qtdBP = qtdBP + 1
+ if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer():
+ data_out_gab.append("P")
+ qtd_bp = qtd_bp + 1
else:
- dataOutGab.append("D")
+ data_out_gab.append("D")
# Sorts the data to the new output size
- if dataOutGab[-1] == "D":
- dataOrd.append(dataOutput[contData])
- contData += 1
+ if data_out_gab[-1] == "D":
+ data_ord.append(data_output[cont_data])
+ cont_data += 1
else:
- dataOrd.append(None)
+ data_ord.append(None)
# Calculates parity
- qtdBP = 0 # parity bit counter
- for bp in range(1, sizePar + 1):
+ qtd_bp = 0 # parity bit counter
+ for bp in range(1, size_par + 1):
# Bit counter one for a certain parity
- contBO = 0
+ cont_bo = 0
# Counter to control loop reading
- contLoop = 0
- for x in dataOrd:
+ cont_loop = 0
+ for x in data_ord:
if x is not None:
try:
- aux = (binPos[contLoop])[-1 * (bp)]
+ aux = (bin_pos[cont_loop])[-1 * (bp)]
except IndexError:
aux = "0"
if aux == "1" and x == "1":
- contBO += 1
- contLoop += 1
- parity.append(str(contBO % 2))
+ cont_bo += 1
+ cont_loop += 1
+ parity.append(str(cont_bo % 2))
- qtdBP += 1
+ qtd_bp += 1
# Mount the message
- ContBP = 0 # Parity bit counter
- for x in range(0, sizePar + len(dataOutput)):
- if dataOrd[x] is None:
- dataOut.append(str(parity[ContBP]))
- ContBP += 1
+ cont_bp = 0 # Parity bit counter
+ for x in range(0, size_par + len(data_output)):
+ if data_ord[x] is None:
+ data_out.append(str(parity[cont_bp]))
+ cont_bp += 1
else:
- dataOut.append(dataOrd[x])
+ data_out.append(data_ord[x])
- ack = parityReceived == parity
- return dataOutput, ack
+ ack = parity_received == parity
+ return data_output, ack
# ---------------------------------------------------------------------
diff --git a/hashes/luhn.py b/hashes/luhn.py
index 69e7b4ccf..bb77fd05c 100644
--- a/hashes/luhn.py
+++ b/hashes/luhn.py
@@ -1,42 +1,37 @@
""" Luhn Algorithm """
-from typing import List
+from __future__ import annotations
def is_luhn(string: str) -> bool:
"""
- Perform Luhn validation on input string
+ Perform Luhn validation on an input string
Algorithm:
* Double every other digit starting from 2nd last digit.
* Subtract 9 if number is greater than 9.
* Sum the numbers
*
- >>> test_cases = [79927398710, 79927398711, 79927398712, 79927398713,
+ >>> test_cases = (79927398710, 79927398711, 79927398712, 79927398713,
... 79927398714, 79927398715, 79927398716, 79927398717, 79927398718,
- ... 79927398719]
- >>> test_cases = list(map(str, test_cases))
- >>> list(map(is_luhn, test_cases))
+ ... 79927398719)
+ >>> [is_luhn(str(test_case)) for test_case in test_cases]
[False, False, False, True, False, False, False, False, False, False]
"""
check_digit: int
- _vector: List[str] = list(string)
+ _vector: list[str] = list(string)
__vector, check_digit = _vector[:-1], int(_vector[-1])
- vector: List[int] = [*map(int, __vector)]
+ vector: list[int] = [int(digit) for digit in __vector]
vector.reverse()
- for idx, i in enumerate(vector):
-
- if idx & 1 == 0:
- doubled: int = vector[idx] * 2
+ for i, digit in enumerate(vector):
+ if i & 1 == 0:
+ doubled: int = digit * 2
if doubled > 9:
doubled -= 9
-
check_digit += doubled
else:
- check_digit += i
+ check_digit += digit
- if (check_digit) % 10 == 0:
- return True
- return False
+ return check_digit % 10 == 0
if __name__ == "__main__":
@@ -44,3 +39,4 @@ if __name__ == "__main__":
doctest.testmod()
assert is_luhn("79927398713")
+ assert not is_luhn("79927398714")
diff --git a/hashes/md5.py b/hashes/md5.py
index b08ab9573..2187006ec 100644
--- a/hashes/md5.py
+++ b/hashes/md5.py
@@ -1,91 +1,223 @@
-import math
+"""
+The MD5 algorithm is a hash function that's commonly used as a checksum to
+detect data corruption. The algorithm works by processing a given message in
+blocks of 512 bits, padding the message as needed. It uses the blocks to operate
+a 128-bit state and performs a total of 64 such operations. Note that all values
+are little-endian, so inputs are converted as needed.
+
+Although MD5 was used as a cryptographic hash function in the past, it's since
+been cracked, so it shouldn't be used for security purposes.
+
+For more info, see https://en.wikipedia.org/wiki/MD5
+"""
+
+from collections.abc import Generator
+from math import sin
-def rearrange(bitString32):
- """[summary]
- Regroups the given binary string.
+def to_little_endian(string_32: bytes) -> bytes:
+ """
+ Converts the given string to little-endian in groups of 8 chars.
Arguments:
- bitString32 {[string]} -- [32 bit binary]
+ string_32 {[string]} -- [32-char string]
Raises:
- ValueError -- [if the given string not are 32 bit binary string]
+ ValueError -- [input is not 32 char]
Returns:
- [string] -- [32 bit binary string]
- >>> rearrange('1234567890abcdfghijklmnopqrstuvw')
- 'pqrstuvwhijklmno90abcdfg12345678'
+ 32-char little-endian string
+ >>> to_little_endian(b'1234567890abcdfghijklmnopqrstuvw')
+ b'pqrstuvwhijklmno90abcdfg12345678'
+ >>> to_little_endian(b'1234567890')
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be of length 32
"""
+ if len(string_32) != 32:
+ raise ValueError("Input must be of length 32")
- if len(bitString32) != 32:
- raise ValueError("Need length 32")
- newString = ""
+ little_endian = b""
for i in [3, 2, 1, 0]:
- newString += bitString32[8 * i : 8 * i + 8]
- return newString
+ little_endian += string_32[8 * i : 8 * i + 8]
+ return little_endian
-def reformatHex(i):
- """[summary]
- Converts the given integer into 8-digit hex number.
+def reformat_hex(i: int) -> bytes:
+ """
+ Converts the given non-negative integer to hex string.
+
+ Example: Suppose the input is the following:
+ i = 1234
+
+ The input is 0x000004d2 in hex, so the little-endian hex string is
+ "d2040000".
Arguments:
- i {[int]} -- [integer]
- >>> reformatHex(666)
- '9a020000'
- """
+ i {[int]} -- [integer]
- hexrep = format(i, "08x")
- thing = ""
- for i in [3, 2, 1, 0]:
- thing += hexrep[2 * i : 2 * i + 2]
- return thing
-
-
-def pad(bitString):
- """[summary]
- Fills up the binary string to a 512 bit binary string
-
- Arguments:
- bitString {[string]} -- [binary string]
+ Raises:
+ ValueError -- [input is negative]
Returns:
- [string] -- [binary string]
+ 8-char little-endian hex string
+
+ >>> reformat_hex(1234)
+ b'd2040000'
+ >>> reformat_hex(666)
+ b'9a020000'
+ >>> reformat_hex(0)
+ b'00000000'
+ >>> reformat_hex(1234567890)
+ b'd2029649'
+ >>> reformat_hex(1234567890987654321)
+ b'b11c6cb1'
+ >>> reformat_hex(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be non-negative
"""
- startLength = len(bitString)
- bitString += "1"
- while len(bitString) % 512 != 448:
- bitString += "0"
- lastPart = format(startLength, "064b")
- bitString += rearrange(lastPart[32:]) + rearrange(lastPart[:32])
- return bitString
+ if i < 0:
+ raise ValueError("Input must be non-negative")
+
+ hex_rep = format(i, "08x")[-8:]
+ little_endian_hex = b""
+ for i in [3, 2, 1, 0]:
+ little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8")
+ return little_endian_hex
-def getBlock(bitString):
- """[summary]
- Iterator:
- Returns by each call a list of length 16 with the 32 bit
- integer blocks.
+def preprocess(message: bytes) -> bytes:
+ """
+ Preprocesses the message string:
+ - Convert message to bit string
+ - Pad bit string to a multiple of 512 chars:
+ - Append a 1
+ - Append 0's until length = 448 (mod 512)
+ - Append length of original message (64 chars)
+
+ Example: Suppose the input is the following:
+ message = "a"
+
+ The message bit string is "01100001", which is 8 bits long. Thus, the
+ bit string needs 439 bits of padding so that
+ (bit_string + "1" + padding) = 448 (mod 512).
+ The message length is "000010000...0" in 64-bit little-endian binary.
+ The combined bit string is then 512 bits long.
Arguments:
- bitString {[string]} -- [binary string >= 512]
+ message {[string]} -- [message string]
+
+ Returns:
+ processed bit string padded to a multiple of 512 chars
+
+ >>> preprocess(b"a") == (b"01100001" + b"1" +
+ ... (b"0" * 439) + b"00001000" + (b"0" * 56))
+ True
+ >>> preprocess(b"") == b"1" + (b"0" * 447) + (b"0" * 64)
+ True
"""
+ bit_string = b""
+ for char in message:
+ bit_string += format(char, "08b").encode("utf-8")
+ start_len = format(len(bit_string), "064b").encode("utf-8")
- currPos = 0
- while currPos < len(bitString):
- currPart = bitString[currPos : currPos + 512]
- mySplits = []
- for i in range(16):
- mySplits.append(int(rearrange(currPart[32 * i : 32 * i + 32]), 2))
- yield mySplits
- currPos += 512
+ # Pad bit_string to a multiple of 512 chars
+ bit_string += b"1"
+ while len(bit_string) % 512 != 448:
+ bit_string += b"0"
+ bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
+
+ return bit_string
-def not32(i):
+def get_block_words(bit_string: bytes) -> Generator[list[int], None, None]:
"""
- >>> not32(34)
+ Splits bit string into blocks of 512 chars and yields each block as a list
+ of 32-bit words
+
+ Example: Suppose the input is the following:
+ bit_string =
+ "000000000...0" + # 0x00 (32 bits, padded to the right)
+ "000000010...0" + # 0x01 (32 bits, padded to the right)
+ "000000100...0" + # 0x02 (32 bits, padded to the right)
+ "000000110...0" + # 0x03 (32 bits, padded to the right)
+ ...
+ "000011110...0" # 0x0a (32 bits, padded to the right)
+
+ Then len(bit_string) == 512, so there'll be 1 block. The block is split
+ into 32-bit words, and each word is converted to little endian. The
+ first word is interpreted as 0 in decimal, the second word is
+ interpreted as 1 in decimal, etc.
+
+ Thus, block_words == [[0, 1, 2, 3, ..., 15]].
+
+ Arguments:
+ bit_string {[string]} -- [bit string with multiple of 512 as length]
+
+ Raises:
+ ValueError -- [length of bit string isn't multiple of 512]
+
+ Yields:
+ a list of 16 32-bit words
+
+ >>> test_string = ("".join(format(n << 24, "032b") for n in range(16))
+ ... .encode("utf-8"))
+ >>> list(get_block_words(test_string))
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
+ >>> list(get_block_words(test_string * 4)) == [list(range(16))] * 4
+ True
+ >>> list(get_block_words(b"1" * 512)) == [[4294967295] * 16]
+ True
+ >>> list(get_block_words(b""))
+ []
+ >>> list(get_block_words(b"1111"))
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must have length that's a multiple of 512
+ """
+ if len(bit_string) % 512 != 0:
+ raise ValueError("Input must have length that's a multiple of 512")
+
+ for pos in range(0, len(bit_string), 512):
+ block = bit_string[pos : pos + 512]
+ block_words = []
+ for i in range(0, 512, 32):
+ block_words.append(int(to_little_endian(block[i : i + 32]), 2))
+ yield block_words
+
+
+def not_32(i: int) -> int:
+ """
+ Perform bitwise NOT on given int.
+
+ Arguments:
+ i {[int]} -- [given int]
+
+ Raises:
+ ValueError -- [input is negative]
+
+ Returns:
+ Result of bitwise NOT on i
+
+ >>> not_32(34)
4294967261
+ >>> not_32(1234)
+ 4294966061
+ >>> not_32(4294966061)
+ 1234
+ >>> not_32(0)
+ 4294967295
+ >>> not_32(1)
+ 4294967294
+ >>> not_32(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be non-negative
"""
+ if i < 0:
+ raise ValueError("Input must be non-negative")
+
i_str = format(i, "032b")
new_str = ""
for c in i_str:
@@ -93,35 +225,114 @@ def not32(i):
return int(new_str, 2)
-def sum32(a, b):
- return (a + b) % 2 ** 32
-
-
-def leftrot32(i, s):
- return (i << s) ^ (i >> (32 - s))
-
-
-def md5me(testString):
- """[summary]
- Returns a 32-bit hash code of the string 'testString'
+def sum_32(a: int, b: int) -> int:
+ """
+ Add two numbers as 32-bit ints.
Arguments:
- testString {[string]} -- [message]
+ a {[int]} -- [first given int]
+ b {[int]} -- [second given int]
+
+ Returns:
+ (a + b) as an unsigned 32-bit int
+
+ >>> sum_32(1, 1)
+ 2
+ >>> sum_32(2, 3)
+ 5
+ >>> sum_32(0, 0)
+ 0
+ >>> sum_32(-1, -1)
+ 4294967294
+ >>> sum_32(4294967295, 1)
+ 0
+ """
+ return (a + b) % 2**32
+
+
+def left_rotate_32(i: int, shift: int) -> int:
+ """
+ Rotate the bits of a given int left by a given amount.
+
+ Arguments:
+ i {[int]} -- [given int]
+ shift {[int]} -- [shift amount]
+
+ Raises:
+ ValueError -- [either given int or shift is negative]
+
+ Returns:
+ `i` rotated to the left by `shift` bits
+
+ >>> left_rotate_32(1234, 1)
+ 2468
+ >>> left_rotate_32(1111, 4)
+ 17776
+ >>> left_rotate_32(2147483648, 1)
+ 1
+ >>> left_rotate_32(2147483648, 3)
+ 4
+ >>> left_rotate_32(4294967295, 4)
+ 4294967295
+ >>> left_rotate_32(1234, 0)
+ 1234
+ >>> left_rotate_32(0, 0)
+ 0
+ >>> left_rotate_32(-1, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be non-negative
+ >>> left_rotate_32(0, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Shift must be non-negative
+ """
+ if i < 0:
+ raise ValueError("Input must be non-negative")
+ if shift < 0:
+ raise ValueError("Shift must be non-negative")
+ return ((i << shift) ^ (i >> (32 - shift))) % 2**32
+
+
+def md5_me(message: bytes) -> bytes:
+ """
+ Returns the 32-char MD5 hash of a given message.
+
+ Reference: https://en.wikipedia.org/wiki/MD5#Algorithm
+
+ Arguments:
+ message {[string]} -- [message]
+
+ Returns:
+ 32-char MD5 hash string
+
+ >>> md5_me(b"")
+ b'd41d8cd98f00b204e9800998ecf8427e'
+ >>> md5_me(b"The quick brown fox jumps over the lazy dog")
+ b'9e107d9d372bb6826bd81d3542a419d6'
+ >>> md5_me(b"The quick brown fox jumps over the lazy dog.")
+ b'e4d909c290d0fb1ca068ffaddf22cbd0'
+
+ >>> import hashlib
+ >>> from string import ascii_letters
+ >>> msgs = [b"", ascii_letters.encode("utf-8"), "Üñîçø∂é".encode("utf-8"),
+ ... b"The quick brown fox jumps over the lazy dog."]
+ >>> all(md5_me(msg) == hashlib.md5(msg).hexdigest().encode("utf-8") for msg in msgs)
+ True
"""
- bs = ""
- for i in testString:
- bs += format(ord(i), "08b")
- bs = pad(bs)
+ # Convert to bit string, add padding and append message length
+ bit_string = preprocess(message)
- tvals = [int(2 ** 32 * abs(math.sin(i + 1))) for i in range(64)]
+ added_consts = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
+ # Starting states
a0 = 0x67452301
b0 = 0xEFCDAB89
c0 = 0x98BADCFE
d0 = 0x10325476
- s = [
+ shift_amounts = [
7,
12,
17,
@@ -188,51 +399,46 @@ def md5me(testString):
21,
]
- for m in getBlock(bs):
- A = a0
- B = b0
- C = c0
- D = d0
+ # Process bit string in chunks, each with 16 32-char words
+ for block_words in get_block_words(bit_string):
+ a = a0
+ b = b0
+ c = c0
+ d = d0
+
+ # Hash current chunk
for i in range(64):
if i <= 15:
- # f = (B & C) | (not32(B) & D)
- f = D ^ (B & (C ^ D))
+ # f = (b & c) | (not_32(b) & d) # Alternate definition for f
+ f = d ^ (b & (c ^ d))
g = i
elif i <= 31:
- # f = (D & B) | (not32(D) & C)
- f = C ^ (D & (B ^ C))
+ # f = (d & b) | (not_32(d) & c) # Alternate definition for f
+ f = c ^ (d & (b ^ c))
g = (5 * i + 1) % 16
elif i <= 47:
- f = B ^ C ^ D
+ f = b ^ c ^ d
g = (3 * i + 5) % 16
else:
- f = C ^ (B | not32(D))
+ f = c ^ (b | not_32(d))
g = (7 * i) % 16
- dtemp = D
- D = C
- C = B
- B = sum32(B, leftrot32((A + f + tvals[i] + m[g]) % 2 ** 32, s[i]))
- A = dtemp
- a0 = sum32(a0, A)
- b0 = sum32(b0, B)
- c0 = sum32(c0, C)
- d0 = sum32(d0, D)
+ f = (f + a + added_consts[i] + block_words[g]) % 2**32
+ a = d
+ d = c
+ c = b
+ b = sum_32(b, left_rotate_32(f, shift_amounts[i]))
- digest = reformatHex(a0) + reformatHex(b0) + reformatHex(c0) + reformatHex(d0)
+ # Add hashed chunk to running total
+ a0 = sum_32(a0, a)
+ b0 = sum_32(b0, b)
+ c0 = sum_32(c0, c)
+ d0 = sum_32(d0, d)
+
+ digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0)
return digest
-def test():
- assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e"
- assert (
- md5me("The quick brown fox jumps over the lazy dog")
- == "9e107d9d372bb6826bd81d3542a419d6"
- )
- print("Success.")
-
-
if __name__ == "__main__":
- test()
import doctest
doctest.testmod()
diff --git a/hashes/sdbm.py b/hashes/sdbm.py
index daf292717..a5432874b 100644
--- a/hashes/sdbm.py
+++ b/hashes/sdbm.py
@@ -31,7 +31,9 @@ def sdbm(plain_text: str) -> int:
>>> sdbm('scramble bits')
730247649148944819640658295400555317318720608290373040936089
"""
- hash = 0
+ hash_value = 0
for plain_chr in plain_text:
- hash = ord(plain_chr) + (hash << 6) + (hash << 16) - hash
- return hash
+ hash_value = (
+ ord(plain_chr) + (hash_value << 6) + (hash_value << 16) - hash_value
+ )
+ return hash_value
diff --git a/hashes/sha1.py b/hashes/sha1.py
index dde1efc55..b325ce3e4 100644
--- a/hashes/sha1.py
+++ b/hashes/sha1.py
@@ -26,7 +26,6 @@ Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
-import unittest
class SHA1Hash:
@@ -125,17 +124,12 @@ class SHA1Hash:
self.h[3] + d & 0xFFFFFFFF,
self.h[4] + e & 0xFFFFFFFF,
)
- return "%08x%08x%08x%08x%08x" % tuple(self.h)
+ return ("{:08x}" * 5).format(*self.h)
-class SHA1HashTest(unittest.TestCase):
- """
- Test class for the SHA1Hash class. Inherits the TestCase class from unittest
- """
-
- def testMatchHashes(self):
- msg = bytes("Test String", "utf-8")
- self.assertEqual(SHA1Hash(msg).final_hash(), hashlib.sha1(msg).hexdigest())
+def test_sha1_hash():
+ msg = b"Test String"
+ assert SHA1Hash(msg).final_hash() == hashlib.sha1(msg).hexdigest() # noqa: S324
def main():
diff --git a/hashes/sha256.py b/hashes/sha256.py
new file mode 100644
index 000000000..98f7c096e
--- /dev/null
+++ b/hashes/sha256.py
@@ -0,0 +1,248 @@
+# Author: M. Yathurshan
+# Black Formatter: True
+
+"""
+Implementation of SHA256 Hash function in a Python class and provides utilities
+to find hash of string or hash of text from a file.
+
+Usage: python sha256.py --string "Hello World!!"
+ python sha256.py --file "hello_world.txt"
+ When run without any arguments,
+ it prints the hash of the string "Hello World!! Welcome to Cryptography"
+
+References:
+https://qvault.io/cryptography/how-sha-2-works-step-by-step-sha-256/
+https://en.wikipedia.org/wiki/SHA-2
+"""
+
+import argparse
+import struct
+import unittest
+
+
+class SHA256:
+ """
+ Class to contain the entire pipeline for SHA1 Hashing Algorithm
+
+ >>> SHA256(b'Python').hash
+ '18885f27b5af9012df19e496460f9294d5ab76128824c6f993787004f6d9a7db'
+
+ >>> SHA256(b'hello world').hash
+ 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9'
+ """
+
+ def __init__(self, data: bytes) -> None:
+ self.data = data
+
+ # Initialize hash values
+ self.hashes = [
+ 0x6A09E667,
+ 0xBB67AE85,
+ 0x3C6EF372,
+ 0xA54FF53A,
+ 0x510E527F,
+ 0x9B05688C,
+ 0x1F83D9AB,
+ 0x5BE0CD19,
+ ]
+
+ # Initialize round constants
+ self.round_constants = [
+ 0x428A2F98,
+ 0x71374491,
+ 0xB5C0FBCF,
+ 0xE9B5DBA5,
+ 0x3956C25B,
+ 0x59F111F1,
+ 0x923F82A4,
+ 0xAB1C5ED5,
+ 0xD807AA98,
+ 0x12835B01,
+ 0x243185BE,
+ 0x550C7DC3,
+ 0x72BE5D74,
+ 0x80DEB1FE,
+ 0x9BDC06A7,
+ 0xC19BF174,
+ 0xE49B69C1,
+ 0xEFBE4786,
+ 0x0FC19DC6,
+ 0x240CA1CC,
+ 0x2DE92C6F,
+ 0x4A7484AA,
+ 0x5CB0A9DC,
+ 0x76F988DA,
+ 0x983E5152,
+ 0xA831C66D,
+ 0xB00327C8,
+ 0xBF597FC7,
+ 0xC6E00BF3,
+ 0xD5A79147,
+ 0x06CA6351,
+ 0x14292967,
+ 0x27B70A85,
+ 0x2E1B2138,
+ 0x4D2C6DFC,
+ 0x53380D13,
+ 0x650A7354,
+ 0x766A0ABB,
+ 0x81C2C92E,
+ 0x92722C85,
+ 0xA2BFE8A1,
+ 0xA81A664B,
+ 0xC24B8B70,
+ 0xC76C51A3,
+ 0xD192E819,
+ 0xD6990624,
+ 0xF40E3585,
+ 0x106AA070,
+ 0x19A4C116,
+ 0x1E376C08,
+ 0x2748774C,
+ 0x34B0BCB5,
+ 0x391C0CB3,
+ 0x4ED8AA4A,
+ 0x5B9CCA4F,
+ 0x682E6FF3,
+ 0x748F82EE,
+ 0x78A5636F,
+ 0x84C87814,
+ 0x8CC70208,
+ 0x90BEFFFA,
+ 0xA4506CEB,
+ 0xBEF9A3F7,
+ 0xC67178F2,
+ ]
+
+ self.preprocessed_data = self.preprocessing(self.data)
+ self.final_hash()
+
+ @staticmethod
+ def preprocessing(data: bytes) -> bytes:
+ padding = b"\x80" + (b"\x00" * (63 - (len(data) + 8) % 64))
+ big_endian_integer = struct.pack(">Q", (len(data) * 8))
+ return data + padding + big_endian_integer
+
+ def final_hash(self) -> None:
+ # Convert into blocks of 64 bytes
+ self.blocks = [
+ self.preprocessed_data[x : x + 64]
+ for x in range(0, len(self.preprocessed_data), 64)
+ ]
+
+ for block in self.blocks:
+ # Convert the given block into a list of 4 byte integers
+ words = list(struct.unpack(">16L", block))
+ # add 48 0-ed integers
+ words += [0] * 48
+
+ a, b, c, d, e, f, g, h = self.hashes
+
+ for index in range(0, 64):
+ if index > 15:
+ # modify the zero-ed indexes at the end of the array
+ s0 = (
+ self.ror(words[index - 15], 7)
+ ^ self.ror(words[index - 15], 18)
+ ^ (words[index - 15] >> 3)
+ )
+ s1 = (
+ self.ror(words[index - 2], 17)
+ ^ self.ror(words[index - 2], 19)
+ ^ (words[index - 2] >> 10)
+ )
+
+ words[index] = (
+ words[index - 16] + s0 + words[index - 7] + s1
+ ) % 0x100000000
+
+ # Compression
+ s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25)
+ ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g)
+ temp1 = (
+ h + s1 + ch + self.round_constants[index] + words[index]
+ ) % 0x100000000
+ s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22)
+ maj = (a & b) ^ (a & c) ^ (b & c)
+ temp2 = (s0 + maj) % 0x100000000
+
+ h, g, f, e, d, c, b, a = (
+ g,
+ f,
+ e,
+ ((d + temp1) % 0x100000000),
+ c,
+ b,
+ a,
+ ((temp1 + temp2) % 0x100000000),
+ )
+
+ mutated_hash_values = [a, b, c, d, e, f, g, h]
+
+ # Modify final values
+ self.hashes = [
+ ((element + mutated_hash_values[index]) % 0x100000000)
+ for index, element in enumerate(self.hashes)
+ ]
+
+ self.hash = "".join([hex(value)[2:].zfill(8) for value in self.hashes])
+
+ def ror(self, value: int, rotations: int) -> int:
+ """
+ Right rotate a given unsigned number by a certain amount of rotations
+ """
+ return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
+
+
+class SHA256HashTest(unittest.TestCase):
+ """
+ Test class for the SHA256 class. Inherits the TestCase class from unittest
+ """
+
+ def test_match_hashes(self) -> None:
+ import hashlib
+
+ msg = bytes("Test String", "utf-8")
+ self.assertEqual(SHA256(msg).hash, hashlib.sha256(msg).hexdigest())
+
+
+def main() -> None:
+ """
+ Provides option 'string' or 'file' to take input
+ and prints the calculated SHA-256 hash
+ """
+
+ # unittest.main()
+
+ import doctest
+
+ doctest.testmod()
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-s",
+ "--string",
+ dest="input_string",
+ default="Hello World!! Welcome to Cryptography",
+ help="Hash the string",
+ )
+ parser.add_argument(
+ "-f", "--file", dest="input_file", help="Hash contents of a file"
+ )
+
+ args = parser.parse_args()
+
+ input_string = args.input_string
+
+ # hash input should be a bytestring
+ if args.input_file:
+ with open(args.input_file, "rb") as f:
+ hash_input = f.read()
+ else:
+ hash_input = bytes(input_string, "utf-8")
+
+ print(SHA256(hash_input).hash)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/knapsack/README.md b/knapsack/README.md
index 6041c1e48..f31e5f591 100644
--- a/knapsack/README.md
+++ b/knapsack/README.md
@@ -17,7 +17,7 @@ The knapsack problem has been studied for more than a century, with early works
## Documentation
This module uses docstrings to enable the use of Python's in-built `help(...)` function.
-For instance, try `help(Vector)`, `help(unitBasisVector)`, and `help(CLASSNAME.METHODNAME)`.
+For instance, try `help(Vector)`, `help(unit_basis_vector)`, and `help(CLASSNAME.METHODNAME)`.
---
diff --git a/knapsack/knapsack.py b/knapsack/knapsack.py
index 756443ea6..18a36c3bc 100644
--- a/knapsack/knapsack.py
+++ b/knapsack/knapsack.py
@@ -1,11 +1,10 @@
-from typing import List
-
""" A naive recursive implementation of 0-1 Knapsack Problem
https://en.wikipedia.org/wiki/Knapsack_problem
"""
+from __future__ import annotations
-def knapsack(capacity: int, weights: List[int], values: List[int], counter: int) -> int:
+def knapsack(capacity: int, weights: list[int], values: list[int], counter: int) -> int:
"""
Returns the maximum value that can be put in a knapsack of a capacity cap,
whereby each weight w has a specific value val.
diff --git a/knapsack/recursive_approach_knapsack.py b/knapsack/recursive_approach_knapsack.py
new file mode 100644
index 000000000..9a8ed1886
--- /dev/null
+++ b/knapsack/recursive_approach_knapsack.py
@@ -0,0 +1,51 @@
+# To get an insight into naive recursive way to solve the Knapsack problem
+
+
+"""
+A shopkeeper has bags of wheat that each have different weights and different profits.
+eg.
+no_of_items 4
+profit 5 4 8 6
+weight 1 2 4 5
+max_weight 5
+Constraints:
+max_weight > 0
+profit[i] >= 0
+weight[i] >= 0
+Calculate the maximum profit that the shopkeeper can make given maxmum weight that can
+be carried.
+"""
+
+
+def knapsack(
+ weights: list, values: list, number_of_items: int, max_weight: int, index: int
+) -> int:
+ """
+ Function description is as follows-
+ :param weights: Take a list of weights
+ :param values: Take a list of profits corresponding to the weights
+ :param number_of_items: number of items available to pick from
+ :param max_weight: Maximum weight that could be carried
+ :param index: the element we are looking at
+ :return: Maximum expected gain
+ >>> knapsack([1, 2, 4, 5], [5, 4, 8, 6], 4, 5, 0)
+ 13
+ >>> knapsack([3 ,4 , 5], [10, 9 , 8], 3, 25, 0)
+ 27
+ """
+ if index == number_of_items:
+ return 0
+ ans1 = 0
+ ans2 = 0
+ ans1 = knapsack(weights, values, number_of_items, max_weight, index + 1)
+ if weights[index] <= max_weight:
+ ans2 = values[index] + knapsack(
+ weights, values, number_of_items, max_weight - weights[index], index + 1
+ )
+ return max(ans1, ans2)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/linear_algebra/README.md b/linear_algebra/README.md
index dc6085090..35b50b5e0 100644
--- a/linear_algebra/README.md
+++ b/linear_algebra/README.md
@@ -10,56 +10,56 @@ This module contains classes and functions for doing linear algebra.
-
- This class represents a vector of arbitrary size and related operations.
- **Overview about the methods:**
+ **Overview of the methods:**
- - constructor(components : list) : init the vector
- - set(components : list) : changes the vector components.
+ - constructor(components) : init the vector
+ - set(components) : changes the vector components.
- \_\_str\_\_() : toString method
- - component(i : int): gets the i-th component (start by 0)
+ - component(i): gets the i-th component (0-indexed)
- \_\_len\_\_() : gets the size / length of the vector (number of components)
- - euclidLength() : returns the eulidean length of the vector.
+ - euclidean_length() : returns the eulidean length of the vector
- operator + : vector addition
- operator - : vector subtraction
- operator * : scalar multiplication and dot product
- - copy() : copies this vector and returns it.
- - changeComponent(pos,value) : changes the specified component.
+ - copy() : copies this vector and returns it
+ - change_component(pos,value) : changes the specified component
-- function zeroVector(dimension)
+- function zero_vector(dimension)
- returns a zero vector of 'dimension'
-- function unitBasisVector(dimension,pos)
- - returns a unit basis vector with a One at index 'pos' (indexing at 0)
-- function axpy(scalar,vector1,vector2)
+- function unit_basis_vector(dimension, pos)
+ - returns a unit basis vector with a one at index 'pos' (0-indexed)
+- function axpy(scalar, vector1, vector2)
- computes the axpy operation
-- function randomVector(N,a,b)
- - returns a random vector of size N, with random integer components between 'a' and 'b'.
+- function random_vector(N, a, b)
+ - returns a random vector of size N, with random integer components between 'a' and 'b' inclusive
### class Matrix
-
- This class represents a matrix of arbitrary size and operations on it.
- **Overview about the methods:**
+ **Overview of the methods:**
- \_\_str\_\_() : returns a string representation
- operator * : implements the matrix vector multiplication
implements the matrix-scalar multiplication.
- - changeComponent(x,y,value) : changes the specified component.
- - component(x,y) : returns the specified component.
+ - change_component(x, y, value) : changes the specified component.
+ - component(x, y) : returns the specified component.
- width() : returns the width of the matrix
- height() : returns the height of the matrix
- - determinate() : returns the determinate of the matrix if it is square
+ - determinant() : returns the determinant of the matrix if it is square
- operator + : implements the matrix-addition.
- - operator - _ implements the matrix-subtraction
+ - operator - : implements the matrix-subtraction
-- function squareZeroMatrix(N)
+- function square_zero_matrix(N)
- returns a square zero-matrix of dimension NxN
-- function randomMatrix(W,H,a,b)
- - returns a random matrix WxH with integer components between 'a' and 'b'
+- function random_matrix(W, H, a, b)
+ - returns a random matrix WxH with integer components between 'a' and 'b' inclusive
---
## Documentation
This module uses docstrings to enable the use of Python's in-built `help(...)` function.
-For instance, try `help(Vector)`, `help(unitBasisVector)`, and `help(CLASSNAME.METHODNAME)`.
+For instance, try `help(Vector)`, `help(unit_basis_vector)`, and `help(CLASSNAME.METHODNAME)`.
---
diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py
index 418ae88a5..4cf566ec9 100644
--- a/linear_algebra/src/conjugate_gradient.py
+++ b/linear_algebra/src/conjugate_gradient.py
@@ -115,7 +115,6 @@ def conjugate_gradient(
iterations = 0
while error > tol:
-
# Save this value so we only calculate the matrix-vector product once.
w = np.dot(spd_matrix, p0)
diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py
index 5e2f82018..e3556e74c 100644
--- a/linear_algebra/src/lib.py
+++ b/linear_algebra/src/lib.py
@@ -10,19 +10,20 @@ with linear algebra in python.
Overview:
- class Vector
-- function zeroVector(dimension)
-- function unitBasisVector(dimension,pos)
-- function axpy(scalar,vector1,vector2)
-- function randomVector(N,a,b)
+- function zero_vector(dimension)
+- function unit_basis_vector(dimension, pos)
+- function axpy(scalar, vector1, vector2)
+- function random_vector(N, a, b)
- class Matrix
-- function squareZeroMatrix(N)
-- function randomMatrix(W,H,a,b)
+- function square_zero_matrix(N)
+- function random_matrix(W, H, a, b)
"""
-
+from __future__ import annotations
import math
import random
-from typing import Collection, Optional, Union, overload
+from collections.abc import Collection
+from typing import overload
class Vector:
@@ -30,23 +31,24 @@ class Vector:
This class represents a vector of arbitrary size.
You need to give the vector components.
- Overview about the methods:
+ Overview of the methods:
- constructor(components : list) : init the vector
- set(components : list) : changes the vector components.
- __str__() : toString method
- component(i : int): gets the i-th component (start by 0)
- __len__() : gets the size of the vector (number of components)
- euclidLength() : returns the euclidean length of the vector.
- operator + : vector addition
- operator - : vector subtraction
- operator * : scalar multiplication and dot product
- copy() : copies this vector and returns it.
- changeComponent(pos,value) : changes the specified component.
- TODO: compare-operator
+ __init__(components: Collection[float] | None): init the vector
+ __len__(): gets the size of the vector (number of components)
+ __str__(): returns a string representation
+ __add__(other: Vector): vector addition
+ __sub__(other: Vector): vector subtraction
+ __mul__(other: float): scalar multiplication
+ __mul__(other: Vector): dot product
+ copy(): copies this vector and returns it
+ component(i): gets the i-th component (0-indexed)
+ change_component(pos: int, value: float): changes specified component
+ euclidean_length(): returns the euclidean length of the vector
+ angle(other: Vector, deg: bool): returns the angle between two vectors
+ TODO: compare-operator
"""
- def __init__(self, components: Optional[Collection[float]] = None) -> None:
+ def __init__(self, components: Collection[float] | None = None) -> None:
"""
input: components or nothing
simple constructor for init the vector
@@ -55,16 +57,11 @@ class Vector:
components = []
self.__components = list(components)
- def set(self, components: Collection[float]) -> None:
+ def __len__(self) -> int:
"""
- input: new components
- changes the components of the vector.
- replace the components with newer one.
+ returns the size of the vector
"""
- if len(components) > 0:
- self.__components = list(components)
- else:
- raise Exception("please give any vector")
+ return len(self.__components)
def __str__(self) -> str:
"""
@@ -72,32 +69,7 @@ class Vector:
"""
return "(" + ",".join(map(str, self.__components)) + ")"
- def component(self, i: int) -> float:
- """
- input: index (start at 0)
- output: the i-th component of the vector.
- """
- if type(i) is int and -len(self.__components) <= i < len(self.__components):
- return self.__components[i]
- else:
- raise Exception("index out of range")
-
- def __len__(self) -> int:
- """
- returns the size of the vector
- """
- return len(self.__components)
-
- def euclidLength(self) -> float:
- """
- returns the euclidean length of the vector
- """
- summe: float = 0
- for c in self.__components:
- summe += c ** 2
- return math.sqrt(summe)
-
- def __add__(self, other: "Vector") -> "Vector":
+ def __add__(self, other: Vector) -> Vector:
"""
input: other vector
assumes: other vector has the same size
@@ -110,7 +82,7 @@ class Vector:
else:
raise Exception("must have the same size")
- def __sub__(self, other: "Vector") -> "Vector":
+ def __sub__(self, other: Vector) -> Vector:
"""
input: other vector
assumes: other vector has the same size
@@ -124,37 +96,45 @@ class Vector:
raise Exception("must have the same size")
@overload
- def __mul__(self, other: float) -> "Vector":
+ def __mul__(self, other: float) -> Vector:
...
@overload
- def __mul__(self, other: "Vector") -> float:
+ def __mul__(self, other: Vector) -> float:
...
- def __mul__(self, other: Union[float, "Vector"]) -> Union[float, "Vector"]:
+ def __mul__(self, other: float | Vector) -> float | Vector:
"""
mul implements the scalar multiplication
and the dot-product
"""
- if isinstance(other, float) or isinstance(other, int):
+ if isinstance(other, (float, int)):
ans = [c * other for c in self.__components]
return Vector(ans)
- elif isinstance(other, Vector) and (len(self) == len(other)):
+ elif isinstance(other, Vector) and len(self) == len(other):
size = len(self)
- summe: float = 0
- for i in range(size):
- summe += self.__components[i] * other.component(i)
- return summe
+ prods = [self.__components[i] * other.component(i) for i in range(size)]
+ return sum(prods)
else: # error case
raise Exception("invalid operand!")
- def copy(self) -> "Vector":
+ def copy(self) -> Vector:
"""
copies this vector and returns it.
"""
return Vector(self.__components)
- def changeComponent(self, pos: int, value: float) -> None:
+ def component(self, i: int) -> float:
+ """
+ input: index (0-indexed)
+ output: the i-th component of the vector.
+ """
+ if isinstance(i, int) and -len(self.__components) <= i < len(self.__components):
+ return self.__components[i]
+ else:
+ raise Exception("index out of range")
+
+ def change_component(self, pos: int, value: float) -> None:
"""
input: an index (pos) and a value
changes the specified component (pos) with the
@@ -164,8 +144,48 @@ class Vector:
assert -len(self.__components) <= pos < len(self.__components)
self.__components[pos] = value
+ def euclidean_length(self) -> float:
+ """
+ returns the euclidean length of the vector
-def zeroVector(dimension: int) -> Vector:
+ >>> Vector([2, 3, 4]).euclidean_length()
+ 5.385164807134504
+ >>> Vector([1]).euclidean_length()
+ 1.0
+ >>> Vector([0, -1, -2, -3, 4, 5, 6]).euclidean_length()
+ 9.539392014169456
+ >>> Vector([]).euclidean_length()
+ Traceback (most recent call last):
+ ...
+ Exception: Vector is empty
+ """
+ if len(self.__components) == 0:
+ raise Exception("Vector is empty")
+ squares = [c**2 for c in self.__components]
+ return math.sqrt(sum(squares))
+
+ def angle(self, other: Vector, deg: bool = False) -> float:
+ """
+ find angle between two Vector (self, Vector)
+
+ >>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]))
+ 1.4906464636572374
+ >>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]), deg = True)
+ 85.40775111366095
+ >>> Vector([3, 4, -1]).angle(Vector([2, -1]))
+ Traceback (most recent call last):
+ ...
+ Exception: invalid operand!
+ """
+ num = self * other
+ den = self.euclidean_length() * other.euclidean_length()
+ if deg:
+ return math.degrees(math.acos(num / den))
+ else:
+ return math.acos(num / den)
+
+
+def zero_vector(dimension: int) -> Vector:
"""
returns a zero-vector of size 'dimension'
"""
@@ -174,7 +194,7 @@ def zeroVector(dimension: int) -> Vector:
return Vector([0] * dimension)
-def unitBasisVector(dimension: int, pos: int) -> Vector:
+def unit_basis_vector(dimension: int, pos: int) -> Vector:
"""
returns a unit basis vector with a One
at index 'pos' (indexing at 0)
@@ -195,13 +215,13 @@ def axpy(scalar: float, x: Vector, y: Vector) -> Vector:
# precondition
assert (
isinstance(x, Vector)
- and (isinstance(y, Vector))
- and (isinstance(scalar, int) or isinstance(scalar, float))
+ and isinstance(y, Vector)
+ and (isinstance(scalar, (int, float)))
)
return x * scalar + y
-def randomVector(N: int, a: int, b: int) -> Vector:
+def random_vector(n: int, a: int, b: int) -> Vector:
"""
input: size (N) of the vector.
random range (a,b)
@@ -209,32 +229,35 @@ def randomVector(N: int, a: int, b: int) -> Vector:
random integer components between 'a' and 'b'.
"""
random.seed(None)
- ans = [random.randint(a, b) for _ in range(N)]
+ ans = [random.randint(a, b) for _ in range(n)]
return Vector(ans)
class Matrix:
"""
class: Matrix
- This class represents a arbitrary matrix.
+ This class represents an arbitrary matrix.
- Overview about the methods:
+ Overview of the methods:
- __str__() : returns a string representation
- operator * : implements the matrix vector multiplication
- implements the matrix-scalar multiplication.
- changeComponent(x,y,value) : changes the specified component.
- component(x,y) : returns the specified component.
- width() : returns the width of the matrix
- height() : returns the height of the matrix
- operator + : implements the matrix-addition.
- operator - _ implements the matrix-subtraction
+ __init__():
+ __str__(): returns a string representation
+ __add__(other: Matrix): matrix addition
+ __sub__(other: Matrix): matrix subtraction
+ __mul__(other: float): scalar multiplication
+ __mul__(other: Vector): vector multiplication
+ height() : returns height
+ width() : returns width
+ component(x: int, y: int): returns specified component
+ change_component(x: int, y: int, value: float): changes specified component
+ minor(x: int, y: int): returns minor along (x, y)
+ cofactor(x: int, y: int): returns cofactor along (x, y)
+ determinant() : returns determinant
"""
def __init__(self, matrix: list[list[float]], w: int, h: int) -> None:
"""
- simple constructor for initializing
- the matrix with components.
+ simple constructor for initializing the matrix with components.
"""
self.__matrix = matrix
self.__width = w
@@ -242,8 +265,7 @@ class Matrix:
def __str__(self) -> str:
"""
- returns a string representation of this
- matrix.
+ returns a string representation of this matrix.
"""
ans = ""
for i in range(self.__height):
@@ -255,14 +277,85 @@ class Matrix:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
- def changeComponent(self, x: int, y: int, value: float) -> None:
+ def __add__(self, other: Matrix) -> Matrix:
"""
- changes the x-y component of this matrix
+ implements matrix addition.
"""
- if 0 <= x < self.__height and 0 <= y < self.__width:
- self.__matrix[x][y] = value
+ if self.__width == other.width() and self.__height == other.height():
+ matrix = []
+ for i in range(self.__height):
+ row = [
+ self.__matrix[i][j] + other.component(i, j)
+ for j in range(self.__width)
+ ]
+ matrix.append(row)
+ return Matrix(matrix, self.__width, self.__height)
else:
- raise Exception("changeComponent: indices out of bounds")
+ raise Exception("matrix must have the same dimension!")
+
+ def __sub__(self, other: Matrix) -> Matrix:
+ """
+ implements matrix subtraction.
+ """
+ if self.__width == other.width() and self.__height == other.height():
+ matrix = []
+ for i in range(self.__height):
+ row = [
+ self.__matrix[i][j] - other.component(i, j)
+ for j in range(self.__width)
+ ]
+ matrix.append(row)
+ return Matrix(matrix, self.__width, self.__height)
+ else:
+ raise Exception("matrices must have the same dimension!")
+
+ @overload
+ def __mul__(self, other: float) -> Matrix:
+ ...
+
+ @overload
+ def __mul__(self, other: Vector) -> Vector:
+ ...
+
+ def __mul__(self, other: float | Vector) -> Vector | Matrix:
+ """
+ implements the matrix-vector multiplication.
+ implements the matrix-scalar multiplication
+ """
+ if isinstance(other, Vector): # matrix-vector
+ if len(other) == self.__width:
+ ans = zero_vector(self.__height)
+ for i in range(self.__height):
+ prods = [
+ self.__matrix[i][j] * other.component(j)
+ for j in range(self.__width)
+ ]
+ ans.change_component(i, sum(prods))
+ return ans
+ else:
+ raise Exception(
+ "vector must have the same size as the "
+ "number of columns of the matrix!"
+ )
+ elif isinstance(other, (int, float)): # matrix-scalar
+ matrix = [
+ [self.__matrix[i][j] * other for j in range(self.__width)]
+ for i in range(self.__height)
+ ]
+ return Matrix(matrix, self.__width, self.__height)
+ return None
+
+ def height(self) -> int:
+ """
+ getter for the height
+ """
+ return self.__height
+
+ def width(self) -> int:
+ """
+ getter for the width
+ """
+ return self.__width
def component(self, x: int, y: int) -> float:
"""
@@ -271,128 +364,76 @@ class Matrix:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
- raise Exception("changeComponent: indices out of bounds")
+ raise Exception("change_component: indices out of bounds")
- def width(self) -> int:
+ def change_component(self, x: int, y: int, value: float) -> None:
"""
- getter for the width
+ changes the x-y component of this matrix
"""
- return self.__width
-
- def height(self) -> int:
- """
- getter for the height
- """
- return self.__height
-
- def determinate(self) -> float:
- """
- returns the determinate of an nxn matrix using Laplace expansion
- """
- if self.__height == self.__width and self.__width >= 2:
- total = 0
- if self.__width > 2:
- for x in range(0, self.__width):
- for y in range(0, self.__height):
- total += (
- self.__matrix[x][y]
- * (-1) ** (x + y)
- * Matrix(
- self.__matrix[0:x] + self.__matrix[x + 1 :],
- self.__width - 1,
- self.__height - 1,
- ).determinate()
- )
- else:
- return (
- self.__matrix[0][0] * self.__matrix[1][1]
- - self.__matrix[0][1] * self.__matrix[1][0]
- )
- return total
+ if 0 <= x < self.__height and 0 <= y < self.__width:
+ self.__matrix[x][y] = value
else:
- raise Exception("matrix is not square")
+ raise Exception("change_component: indices out of bounds")
- @overload
- def __mul__(self, other: float) -> "Matrix":
- ...
-
- @overload
- def __mul__(self, other: Vector) -> Vector:
- ...
-
- def __mul__(self, other: Union[float, Vector]) -> Union[Vector, "Matrix"]:
+ def minor(self, x: int, y: int) -> float:
"""
- implements the matrix-vector multiplication.
- implements the matrix-scalar multiplication
+ returns the minor along (x, y)
"""
- if isinstance(other, Vector): # vector-matrix
- if len(other) == self.__width:
- ans = zeroVector(self.__height)
- for i in range(self.__height):
- summe: float = 0
- for j in range(self.__width):
- summe += other.component(j) * self.__matrix[i][j]
- ans.changeComponent(i, summe)
- summe = 0
- return ans
- else:
- raise Exception(
- "vector must have the same size as the "
- + "number of columns of the matrix!"
- )
- elif isinstance(other, int) or isinstance(other, float): # matrix-scalar
- matrix = [
- [self.__matrix[i][j] * other for j in range(self.__width)]
- for i in range(self.__height)
+ if self.__height != self.__width:
+ raise Exception("Matrix is not square")
+ minor = self.__matrix[:x] + self.__matrix[x + 1 :]
+ for i in range(len(minor)):
+ minor[i] = minor[i][:y] + minor[i][y + 1 :]
+ return Matrix(minor, self.__width - 1, self.__height - 1).determinant()
+
+ def cofactor(self, x: int, y: int) -> float:
+ """
+ returns the cofactor (signed minor) along (x, y)
+ """
+ if self.__height != self.__width:
+ raise Exception("Matrix is not square")
+ if 0 <= x < self.__height and 0 <= y < self.__width:
+ return (-1) ** (x + y) * self.minor(x, y)
+ else:
+ raise Exception("Indices out of bounds")
+
+ def determinant(self) -> float:
+ """
+ returns the determinant of an nxn matrix using Laplace expansion
+ """
+ if self.__height != self.__width:
+ raise Exception("Matrix is not square")
+ if self.__height < 1:
+ raise Exception("Matrix has no element")
+ elif self.__height == 1:
+ return self.__matrix[0][0]
+ elif self.__height == 2:
+ return (
+ self.__matrix[0][0] * self.__matrix[1][1]
+ - self.__matrix[0][1] * self.__matrix[1][0]
+ )
+ else:
+ cofactor_prods = [
+ self.__matrix[0][y] * self.cofactor(0, y) for y in range(self.__width)
]
- return Matrix(matrix, self.__width, self.__height)
-
- def __add__(self, other: "Matrix") -> "Matrix":
- """
- implements the matrix-addition.
- """
- if self.__width == other.width() and self.__height == other.height():
- matrix = []
- for i in range(self.__height):
- row = []
- for j in range(self.__width):
- row.append(self.__matrix[i][j] + other.component(i, j))
- matrix.append(row)
- return Matrix(matrix, self.__width, self.__height)
- else:
- raise Exception("matrix must have the same dimension!")
-
- def __sub__(self, other: "Matrix") -> "Matrix":
- """
- implements the matrix-subtraction.
- """
- if self.__width == other.width() and self.__height == other.height():
- matrix = []
- for i in range(self.__height):
- row = []
- for j in range(self.__width):
- row.append(self.__matrix[i][j] - other.component(i, j))
- matrix.append(row)
- return Matrix(matrix, self.__width, self.__height)
- else:
- raise Exception("matrix must have the same dimension!")
+ return sum(cofactor_prods)
-def squareZeroMatrix(N: int) -> Matrix:
+def square_zero_matrix(n: int) -> Matrix:
"""
returns a square zero-matrix of dimension NxN
"""
- ans: list[list[float]] = [[0] * N for _ in range(N)]
- return Matrix(ans, N, N)
+ ans: list[list[float]] = [[0] * n for _ in range(n)]
+ return Matrix(ans, n, n)
-def randomMatrix(W: int, H: int, a: int, b: int) -> Matrix:
+def random_matrix(width: int, height: int, a: int, b: int) -> Matrix:
"""
returns a random matrix WxH with integer components
between 'a' and 'b'
"""
random.seed(None)
matrix: list[list[float]] = [
- [random.randint(a, b) for _ in range(W)] for _ in range(H)
+ [random.randint(a, b) for _ in range(width)] for _ in range(height)
]
- return Matrix(matrix, W, H)
+ return Matrix(matrix, width, height)
diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py
index 091849542..f5e3db0cb 100644
--- a/linear_algebra/src/polynom_for_points.py
+++ b/linear_algebra/src/polynom_for_points.py
@@ -4,9 +4,13 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str:
number of points you want to use
>>> print(points_to_polynomial([]))
- The program cannot work out a fitting polynomial.
+ Traceback (most recent call last):
+ ...
+ ValueError: The program cannot work out a fitting polynomial.
>>> print(points_to_polynomial([[]]))
- The program cannot work out a fitting polynomial.
+ Traceback (most recent call last):
+ ...
+ ValueError: The program cannot work out a fitting polynomial.
>>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]]))
f(x)=x^2*0.0+x^1*-0.0+x^0*0.0
>>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]]))
@@ -24,96 +28,79 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str:
>>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]]))
f(x)=x^2*5.0+x^1*-18.0+x^0*18.0
"""
- try:
- check = 1
- more_check = 0
- d = coordinates[0][0]
- for j in range(len(coordinates)):
- if j == 0:
- continue
- if d == coordinates[j][0]:
- more_check += 1
- solved = "x=" + str(coordinates[j][0])
- if more_check == len(coordinates) - 1:
- check = 2
- break
- elif more_check > 0 and more_check != len(coordinates) - 1:
- check = 3
- else:
- check = 1
+ if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates):
+ raise ValueError("The program cannot work out a fitting polynomial.")
- if len(coordinates) == 1 and coordinates[0][0] == 0:
- check = 2
- solved = "x=0"
- except Exception:
- check = 3
+ if len({tuple(pair) for pair in coordinates}) != len(coordinates):
+ raise ValueError("The program cannot work out a fitting polynomial.")
+
+ set_x = {x for x, _ in coordinates}
+ if len(set_x) == 1:
+ return f"x={coordinates[0][0]}"
+
+ if len(set_x) != len(coordinates):
+ raise ValueError("The program cannot work out a fitting polynomial.")
x = len(coordinates)
- if check == 1:
- count_of_line = 0
- matrix: list[list[float]] = []
- # put the x and x to the power values in a matrix
- while count_of_line < x:
- count_in_line = 0
- a = coordinates[count_of_line][0]
- count_line: list[float] = []
- while count_in_line < x:
- count_line.append(a ** (x - (count_in_line + 1)))
- count_in_line += 1
- matrix.append(count_line)
- count_of_line += 1
+ count_of_line = 0
+ matrix: list[list[float]] = []
+ # put the x and x to the power values in a matrix
+ while count_of_line < x:
+ count_in_line = 0
+ a = coordinates[count_of_line][0]
+ count_line: list[float] = []
+ while count_in_line < x:
+ count_line.append(a ** (x - (count_in_line + 1)))
+ count_in_line += 1
+ matrix.append(count_line)
+ count_of_line += 1
- count_of_line = 0
- # put the y values into a vector
- vector: list[float] = []
- while count_of_line < x:
- vector.append(coordinates[count_of_line][1])
- count_of_line += 1
+ count_of_line = 0
+ # put the y values into a vector
+ vector: list[float] = []
+ while count_of_line < x:
+ vector.append(coordinates[count_of_line][1])
+ count_of_line += 1
- count = 0
+ count = 0
- while count < x:
- zahlen = 0
- while zahlen < x:
- if count == zahlen:
- zahlen += 1
- if zahlen == x:
- break
- bruch = matrix[zahlen][count] / matrix[count][count]
- for counting_columns, item in enumerate(matrix[count]):
- # manipulating all the values in the matrix
- matrix[zahlen][counting_columns] -= item * bruch
- # manipulating the values in the vector
- vector[zahlen] -= vector[count] * bruch
+ while count < x:
+ zahlen = 0
+ while zahlen < x:
+ if count == zahlen:
zahlen += 1
- count += 1
+ if zahlen == x:
+ break
+ bruch = matrix[zahlen][count] / matrix[count][count]
+ for counting_columns, item in enumerate(matrix[count]):
+ # manipulating all the values in the matrix
+ matrix[zahlen][counting_columns] -= item * bruch
+ # manipulating the values in the vector
+ vector[zahlen] -= vector[count] * bruch
+ zahlen += 1
+ count += 1
- count = 0
- # make solutions
- solution: list[str] = []
- while count < x:
- solution.append(str(vector[count] / matrix[count][count]))
- count += 1
+ count = 0
+ # make solutions
+ solution: list[str] = []
+ while count < x:
+ solution.append(str(vector[count] / matrix[count][count]))
+ count += 1
- count = 0
- solved = "f(x)="
+ count = 0
+ solved = "f(x)="
- while count < x:
- remove_e: list[str] = solution[count].split("E")
- if len(remove_e) > 1:
- solution[count] = remove_e[0] + "*10^" + remove_e[1]
- solved += "x^" + str(x - (count + 1)) + "*" + str(solution[count])
- if count + 1 != x:
- solved += "+"
- count += 1
+ while count < x:
+ remove_e: list[str] = solution[count].split("E")
+ if len(remove_e) > 1:
+ solution[count] = f"{remove_e[0]}*10^{remove_e[1]}"
+ solved += f"x^{x - (count + 1)}*{solution[count]}"
+ if count + 1 != x:
+ solved += "+"
+ count += 1
- return solved
-
- elif check == 2:
- return solved
- else:
- return "The program cannot work out a fitting polynomial."
+ return solved
if __name__ == "__main__":
diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py
index 2cf22838e..24fbd9a5e 100644
--- a/linear_algebra/src/power_iteration.py
+++ b/linear_algebra/src/power_iteration.py
@@ -9,10 +9,10 @@ def power_iteration(
) -> tuple[float, np.ndarray]:
"""
Power Iteration.
- Find the largest eignevalue and corresponding eigenvector
+ Find the largest eigenvalue and corresponding eigenvector
of matrix input_matrix given a random vector in the same space.
Will work so long as vector has component of largest eigenvector.
- input_matrix must be symmetric.
+ input_matrix must be either real or Hermitian.
Input
input_matrix: input matrix whose largest eigenvalue we will find.
@@ -41,12 +41,18 @@ def power_iteration(
assert np.shape(input_matrix)[0] == np.shape(input_matrix)[1]
# Ensure proper dimensionality.
assert np.shape(input_matrix)[0] == np.shape(vector)[0]
+ # Ensure inputs are either both complex or both real
+ assert np.iscomplexobj(input_matrix) == np.iscomplexobj(vector)
+ is_complex = np.iscomplexobj(input_matrix)
+ if is_complex:
+ # Ensure complex input_matrix is Hermitian
+ assert np.array_equal(input_matrix, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
convergence = False
- lamda_previous = 0
+ lambda_previous = 0
iterations = 0
error = 1e12
@@ -57,44 +63,62 @@ def power_iteration(
vector = w / np.linalg.norm(w)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
- lamda = np.dot(vector.T, np.dot(input_matrix, vector))
+ vector_h = vector.conj().T if is_complex else vector.T
+ lambda_ = np.dot(vector_h, np.dot(input_matrix, vector))
# Check convergence.
- error = np.abs(lamda - lamda_previous) / lamda
+ error = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
convergence = True
- lamda_previous = lamda
+ lambda_previous = lambda_
- return lamda, vector
+ if is_complex:
+ lambda_ = np.real(lambda_)
+
+ return lambda_, vector
def test_power_iteration() -> None:
"""
>>> test_power_iteration() # self running tests
"""
- # Our implementation.
- input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
- vector = np.array([41, 4, 20])
- eigen_value, eigen_vector = power_iteration(input_matrix, vector)
+ real_input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
+ real_vector = np.array([41, 4, 20])
+ complex_input_matrix = real_input_matrix.astype(np.complex128)
+ imag_matrix = np.triu(1j * complex_input_matrix, 1)
+ complex_input_matrix += imag_matrix
+ complex_input_matrix += -1 * imag_matrix.T
+ complex_vector = np.array([41, 4, 20]).astype(np.complex128)
- # Numpy implementation.
+ for problem_type in ["real", "complex"]:
+ if problem_type == "real":
+ input_matrix = real_input_matrix
+ vector = real_vector
+ elif problem_type == "complex":
+ input_matrix = complex_input_matrix
+ vector = complex_vector
- # Get eigen values and eigen vectors using built in numpy
- # eigh (eigh used for symmetric or hermetian matrices).
- eigen_values, eigen_vectors = np.linalg.eigh(input_matrix)
- # Last eigen value is the maximum one.
- eigen_value_max = eigen_values[-1]
- # Last column in this matrix is eigen vector corresponding to largest eigen value.
- eigen_vector_max = eigen_vectors[:, -1]
+ # Our implementation.
+ eigen_value, eigen_vector = power_iteration(input_matrix, vector)
- # Check our implementation and numpy gives close answers.
- assert np.abs(eigen_value - eigen_value_max) <= 1e-6
- # Take absolute values element wise of each eigenvector.
- # as they are only unique to a minus sign.
- assert np.linalg.norm(np.abs(eigen_vector) - np.abs(eigen_vector_max)) <= 1e-6
+ # Numpy implementation.
+
+ # Get eigenvalues and eigenvectors using built-in numpy
+ # eigh (eigh used for symmetric or hermetian matrices).
+ eigen_values, eigen_vectors = np.linalg.eigh(input_matrix)
+ # Last eigenvalue is the maximum one.
+ eigen_value_max = eigen_values[-1]
+ # Last column in this matrix is eigenvector corresponding to largest eigenvalue.
+ eigen_vector_max = eigen_vectors[:, -1]
+
+ # Check our implementation and numpy gives close answers.
+ assert np.abs(eigen_value - eigen_value_max) <= 1e-6
+ # Take absolute values element wise of each eigenvector.
+ # as they are only unique to a minus sign.
+ assert np.linalg.norm(np.abs(eigen_vector) - np.abs(eigen_vector_max)) <= 1e-6
if __name__ == "__main__":
diff --git a/linear_algebra/src/rank_of_matrix.py b/linear_algebra/src/rank_of_matrix.py
new file mode 100644
index 000000000..7ff3c1699
--- /dev/null
+++ b/linear_algebra/src/rank_of_matrix.py
@@ -0,0 +1,89 @@
+"""
+Calculate the rank of a matrix.
+
+See: https://en.wikipedia.org/wiki/Rank_(linear_algebra)
+"""
+
+
+def rank_of_matrix(matrix: list[list[int | float]]) -> int:
+ """
+ Finds the rank of a matrix.
+ Args:
+ matrix: The matrix as a list of lists.
+ Returns:
+ The rank of the matrix.
+ Example:
+ >>> matrix1 = [[1, 2, 3],
+ ... [4, 5, 6],
+ ... [7, 8, 9]]
+ >>> rank_of_matrix(matrix1)
+ 2
+ >>> matrix2 = [[1, 0, 0],
+ ... [0, 1, 0],
+ ... [0, 0, 0]]
+ >>> rank_of_matrix(matrix2)
+ 2
+ >>> matrix3 = [[1, 2, 3, 4],
+ ... [5, 6, 7, 8],
+ ... [9, 10, 11, 12]]
+ >>> rank_of_matrix(matrix3)
+ 2
+ >>> rank_of_matrix([[2,3,-1,-1],
+ ... [1,-1,-2,4],
+ ... [3,1,3,-2],
+ ... [6,3,0,-7]])
+ 4
+ >>> rank_of_matrix([[2,1,-3,-6],
+ ... [3,-3,1,2],
+ ... [1,1,1,2]])
+ 3
+ >>> rank_of_matrix([[2,-1,0],
+ ... [1,3,4],
+ ... [4,1,-3]])
+ 3
+ >>> rank_of_matrix([[3,2,1],
+ ... [-6,-4,-2]])
+ 1
+ >>> rank_of_matrix([[],[]])
+ 0
+ >>> rank_of_matrix([[1]])
+ 1
+ >>> rank_of_matrix([[]])
+ 0
+ """
+
+ rows = len(matrix)
+ columns = len(matrix[0])
+ rank = min(rows, columns)
+
+ for row in range(rank):
+ # Check if diagonal element is not zero
+ if matrix[row][row] != 0:
+ # Eliminate all the elements below the diagonal
+ for col in range(row + 1, rows):
+ multiplier = matrix[col][row] / matrix[row][row]
+ for i in range(row, columns):
+ matrix[col][i] -= multiplier * matrix[row][i]
+ else:
+ # Find a non-zero diagonal element to swap rows
+ reduce = True
+ for i in range(row + 1, rows):
+ if matrix[i][row] != 0:
+ matrix[row], matrix[i] = matrix[i], matrix[row]
+ reduce = False
+ break
+ if reduce:
+ rank -= 1
+ for i in range(rows):
+ matrix[i][row] = matrix[i][rank]
+
+ # Reduce the row pointer by one to stay on the same row
+ row -= 1
+
+ return rank
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/linear_algebra/src/rayleigh_quotient.py b/linear_algebra/src/rayleigh_quotient.py
index 78083aa75..4773429cb 100644
--- a/linear_algebra/src/rayleigh_quotient.py
+++ b/linear_algebra/src/rayleigh_quotient.py
@@ -26,7 +26,7 @@ def is_hermitian(matrix: np.ndarray) -> bool:
return np.array_equal(matrix, matrix.conjugate().T)
-def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any:
+def rayleigh_quotient(a: np.ndarray, v: np.ndarray) -> Any:
"""
Returns the Rayleigh quotient of a Hermitian matrix A and
vector v.
@@ -45,20 +45,20 @@ def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any:
array([[3.]])
"""
v_star = v.conjugate().T
- v_star_dot = v_star.dot(A)
+ v_star_dot = v_star.dot(a)
assert isinstance(v_star_dot, np.ndarray)
return (v_star_dot.dot(v)) / (v_star.dot(v))
def tests() -> None:
- A = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
+ a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
v = np.array([[1], [2], [3]])
- assert is_hermitian(A), f"{A} is not hermitian."
- print(rayleigh_quotient(A, v))
+ assert is_hermitian(a), f"{a} is not hermitian."
+ print(rayleigh_quotient(a, v))
- A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
- assert is_hermitian(A), f"{A} is not hermitian."
- assert rayleigh_quotient(A, v) == float(3)
+ a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
+ assert is_hermitian(a), f"{a} is not hermitian."
+ assert rayleigh_quotient(a, v) == float(3)
if __name__ == "__main__":
diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py
new file mode 100644
index 000000000..750f4de5e
--- /dev/null
+++ b/linear_algebra/src/schur_complement.py
@@ -0,0 +1,96 @@
+import unittest
+
+import numpy as np
+
+
+def schur_complement(
+ mat_a: np.ndarray,
+ mat_b: np.ndarray,
+ mat_c: np.ndarray,
+ pseudo_inv: np.ndarray | None = None,
+) -> np.ndarray:
+ """
+ Schur complement of a symmetric matrix X given as a 2x2 block matrix
+ consisting of matrices A, B and C.
+ Matrix A must be quadratic and non-singular.
+ In case A is singular, a pseudo-inverse may be provided using
+ the pseudo_inv argument.
+
+ Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement
+ See also Convex Optimization – Boyd and Vandenberghe, A.5.5
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [2, 1]])
+ >>> b = np.array([[0, 3], [3, 0]])
+ >>> c = np.array([[2, 1], [6, 3]])
+ >>> schur_complement(a, b, c)
+ array([[ 5., -5.],
+ [ 0., 6.]])
+ """
+ shape_a = np.shape(mat_a)
+ shape_b = np.shape(mat_b)
+ shape_c = np.shape(mat_c)
+
+ if shape_a[0] != shape_b[0]:
+ msg = (
+ "Expected the same number of rows for A and B. "
+ f"Instead found A of size {shape_a} and B of size {shape_b}"
+ )
+ raise ValueError(msg)
+
+ if shape_b[1] != shape_c[1]:
+ msg = (
+ "Expected the same number of columns for B and C. "
+ f"Instead found B of size {shape_b} and C of size {shape_c}"
+ )
+ raise ValueError(msg)
+
+ a_inv = pseudo_inv
+ if a_inv is None:
+ try:
+ a_inv = np.linalg.inv(mat_a)
+ except np.linalg.LinAlgError:
+ raise ValueError(
+ "Input matrix A is not invertible. Cannot compute Schur complement."
+ )
+
+ return mat_c - mat_b.T @ a_inv @ mat_b
+
+
+class TestSchurComplement(unittest.TestCase):
+ def test_schur_complement(self) -> None:
+ a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
+ b = np.array([[0, 3], [3, 0], [2, 3]])
+ c = np.array([[2, 1], [6, 3]])
+
+ s = schur_complement(a, b, c)
+
+ input_matrix = np.block([[a, b], [b.T, c]])
+
+ det_x = np.linalg.det(input_matrix)
+ det_a = np.linalg.det(a)
+ det_s = np.linalg.det(s)
+
+ self.assertAlmostEqual(det_x, det_a * det_s)
+
+ def test_improper_a_b_dimensions(self) -> None:
+ a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
+ b = np.array([[0, 3], [3, 0], [2, 3]])
+ c = np.array([[2, 1], [6, 3]])
+
+ with self.assertRaises(ValueError):
+ schur_complement(a, b, c)
+
+ def test_improper_b_c_dimensions(self) -> None:
+ a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
+ b = np.array([[0, 3], [3, 0], [2, 3]])
+ c = np.array([[2, 1, 3], [6, 3, 5]])
+
+ with self.assertRaises(ValueError):
+ schur_complement(a, b, c)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ unittest.main()
diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py
index 0954a2d93..50d079572 100644
--- a/linear_algebra/src/test_linear_algebra.py
+++ b/linear_algebra/src/test_linear_algebra.py
@@ -8,13 +8,20 @@ This file contains the test-suite for the linear algebra library.
"""
import unittest
-from .lib import Matrix, Vector, axpy, squareZeroMatrix, unitBasisVector, zeroVector
+from .lib import (
+ Matrix,
+ Vector,
+ axpy,
+ square_zero_matrix,
+ unit_basis_vector,
+ zero_vector,
+)
class Test(unittest.TestCase):
def test_component(self) -> None:
"""
- test for method component
+ test for method component()
"""
x = Vector([1, 2, 3])
self.assertEqual(x.component(0), 1)
@@ -23,24 +30,30 @@ class Test(unittest.TestCase):
def test_str(self) -> None:
"""
- test for toString() method
+ test for method toString()
"""
x = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(x), "(0,0,0,0,0,1)")
def test_size(self) -> None:
"""
- test for size()-method
+ test for method size()
"""
x = Vector([1, 2, 3, 4])
self.assertEqual(len(x), 4)
- def test_euclidLength(self) -> None:
+ def test_euclidean_length(self) -> None:
"""
- test for the eulidean length
+ test for method euclidean_length()
"""
x = Vector([1, 2])
- self.assertAlmostEqual(x.euclidLength(), 2.236, 3)
+ y = Vector([1, 2, 3, 4, 5])
+ z = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ w = Vector([1, -1, 1, -1, 2, -3, 4, -5])
+ self.assertAlmostEqual(x.euclidean_length(), 2.236, 3)
+ self.assertAlmostEqual(y.euclidean_length(), 7.416, 3)
+ self.assertEqual(z.euclidean_length(), 0)
+ self.assertAlmostEqual(w.euclidean_length(), 7.616, 3)
def test_add(self) -> None:
"""
@@ -67,26 +80,26 @@ class Test(unittest.TestCase):
test for * operator
"""
x = Vector([1, 2, 3])
- a = Vector([2, -1, 4]) # for test of dot-product
+ a = Vector([2, -1, 4]) # for test of dot product
b = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)")
self.assertEqual((a * b), 0)
- def test_zeroVector(self) -> None:
+ def test_zero_vector(self) -> None:
"""
- test for the global function zeroVector(...)
+ test for global function zero_vector()
"""
- self.assertTrue(str(zeroVector(10)).count("0") == 10)
+ self.assertEqual(str(zero_vector(10)).count("0"), 10)
- def test_unitBasisVector(self) -> None:
+ def test_unit_basis_vector(self) -> None:
"""
- test for the global function unitBasisVector(...)
+ test for global function unit_basis_vector()
"""
- self.assertEqual(str(unitBasisVector(3, 1)), "(0,1,0)")
+ self.assertEqual(str(unit_basis_vector(3, 1)), "(0,1,0)")
def test_axpy(self) -> None:
"""
- test for the global function axpy(...) (operation)
+ test for global function axpy() (operation)
"""
x = Vector([1, 2, 3])
y = Vector([1, 0, 1])
@@ -94,61 +107,102 @@ class Test(unittest.TestCase):
def test_copy(self) -> None:
"""
- test for the copy()-method
+ test for method copy()
"""
x = Vector([1, 0, 0, 0, 0, 0])
y = x.copy()
self.assertEqual(str(x), str(y))
- def test_changeComponent(self) -> None:
+ def test_change_component(self) -> None:
"""
- test for the changeComponent(...)-method
+ test for method change_component()
"""
x = Vector([1, 0, 0])
- x.changeComponent(0, 0)
- x.changeComponent(1, 1)
+ x.change_component(0, 0)
+ x.change_component(1, 1)
self.assertEqual(str(x), "(0,1,0)")
def test_str_matrix(self) -> None:
- A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
- self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(A))
+ """
+ test for Matrix method str()
+ """
+ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(a))
- def test_determinate(self) -> None:
+ def test_minor(self) -> None:
"""
- test for determinate()
+ test for Matrix method minor()
"""
- A = Matrix([[1, 1, 4, 5], [3, 3, 3, 2], [5, 1, 9, 0], [9, 7, 7, 9]], 4, 4)
- self.assertEqual(-376, A.determinate())
+ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ minors = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
+ for x in range(a.height()):
+ for y in range(a.width()):
+ self.assertEqual(minors[x][y], a.minor(x, y))
+
+ def test_cofactor(self) -> None:
+ """
+ test for Matrix method cofactor()
+ """
+ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ cofactors = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
+ for x in range(a.height()):
+ for y in range(a.width()):
+ self.assertEqual(cofactors[x][y], a.cofactor(x, y))
+
+ def test_determinant(self) -> None:
+ """
+ test for Matrix method determinant()
+ """
+ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ self.assertEqual(-5, a.determinant())
def test__mul__matrix(self) -> None:
- A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3)
+ """
+ test for Matrix * operator
+ """
+ a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3)
x = Vector([1, 2, 3])
- self.assertEqual("(14,32,50)", str(A * x))
- self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(A * 2))
+ self.assertEqual("(14,32,50)", str(a * x))
+ self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(a * 2))
- def test_changeComponent_matrix(self) -> None:
- A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
- A.changeComponent(0, 2, 5)
- self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(A))
+ def test_change_component_matrix(self) -> None:
+ """
+ test for Matrix method change_component()
+ """
+ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ a.change_component(0, 2, 5)
+ self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(a))
def test_component_matrix(self) -> None:
- A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
- self.assertEqual(7, A.component(2, 1), 0.01)
+ """
+ test for Matrix method component()
+ """
+ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ self.assertEqual(7, a.component(2, 1), 0.01)
def test__add__matrix(self) -> None:
- A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
- B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
- self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(A + B))
+ """
+ test for Matrix + operator
+ """
+ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
+ self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(a + b))
def test__sub__matrix(self) -> None:
- A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
- B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
- self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(A - B))
+ """
+ test for Matrix - operator
+ """
+ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
+ b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
+ self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(a - b))
- def test_squareZeroMatrix(self) -> None:
+ def test_square_zero_matrix(self) -> None:
+ """
+ test for global function square_zero_matrix()
+ """
self.assertEqual(
- "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|" + "\n|0,0,0,0,0|\n",
- str(squareZeroMatrix(5)),
+ "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n",
+ str(square_zero_matrix(5)),
)
diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py
new file mode 100644
index 000000000..ba64add40
--- /dev/null
+++ b/linear_programming/simplex.py
@@ -0,0 +1,311 @@
+"""
+Python implementation of the simplex algorithm for solving linear programs in
+tabular form with
+- `>=`, `<=`, and `=` constraints and
+- each variable `x1, x2, ...>= 0`.
+
+See https://gist.github.com/imengus/f9619a568f7da5bc74eaf20169a24d98 for how to
+convert linear programs to simplex tableaus, and the steps taken in the simplex
+algorithm.
+
+Resources:
+https://en.wikipedia.org/wiki/Simplex_algorithm
+https://tinyurl.com/simplex4beginners
+"""
+from typing import Any
+
+import numpy as np
+
+
+class Tableau:
+ """Operate on simplex tableaus
+
+ >>> t = Tableau(np.array([[-1,-1,0,0,-1],[1,3,1,0,4],[3,1,0,1,4.]]), 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: RHS must be > 0
+ """
+
+ def __init__(self, tableau: np.ndarray, n_vars: int) -> None:
+ # Check if RHS is negative
+ if np.any(tableau[:, -1], where=tableau[:, -1] < 0):
+ raise ValueError("RHS must be > 0")
+
+ self.tableau = tableau
+ self.n_rows, _ = tableau.shape
+
+ # Number of decision variables x1, x2, x3...
+ self.n_vars = n_vars
+
+ # Number of artificial variables to be minimised
+ self.n_art_vars = len(np.where(tableau[self.n_vars : -1] == -1)[0])
+
+ # 2 if there are >= or == constraints (nonstandard), 1 otherwise (std)
+ self.n_stages = (self.n_art_vars > 0) + 1
+
+ # Number of slack variables added to make inequalities into equalities
+ self.n_slack = self.n_rows - self.n_stages
+
+ # Objectives for each stage
+ self.objectives = ["max"]
+
+ # In two stage simplex, first minimise then maximise
+ if self.n_art_vars:
+ self.objectives.append("min")
+
+ self.col_titles = [""]
+
+ # Index of current pivot row and column
+ self.row_idx = None
+ self.col_idx = None
+
+ # Does objective row only contain (non)-negative values?
+ self.stop_iter = False
+
+ @staticmethod
+ def generate_col_titles(*args: int) -> list[str]:
+ """Generate column titles for tableau of specific dimensions
+
+ >>> Tableau.generate_col_titles(2, 3, 1)
+ ['x1', 'x2', 's1', 's2', 's3', 'a1', 'RHS']
+
+ >>> Tableau.generate_col_titles()
+ Traceback (most recent call last):
+ ...
+ ValueError: Must provide n_vars, n_slack, and n_art_vars
+ >>> Tableau.generate_col_titles(-2, 3, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: All arguments must be non-negative integers
+ """
+ if len(args) != 3:
+ raise ValueError("Must provide n_vars, n_slack, and n_art_vars")
+
+ if not all(x >= 0 and isinstance(x, int) for x in args):
+ raise ValueError("All arguments must be non-negative integers")
+
+ # decision | slack | artificial
+ string_starts = ["x", "s", "a"]
+ titles = []
+ for i in range(3):
+ for j in range(args[i]):
+ titles.append(string_starts[i] + str(j + 1))
+ titles.append("RHS")
+ return titles
+
+ def find_pivot(self, tableau: np.ndarray) -> tuple[Any, Any]:
+ """Finds the pivot row and column.
+ >>> t = Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), 2)
+ >>> t.find_pivot(t.tableau)
+ (1, 0)
+ """
+ objective = self.objectives[-1]
+
+ # Find entries of highest magnitude in objective rows
+ sign = (objective == "min") - (objective == "max")
+ col_idx = np.argmax(sign * tableau[0, : self.n_vars])
+
+ # Choice is only valid if below 0 for maximise, and above for minimise
+ if sign * self.tableau[0, col_idx] <= 0:
+ self.stop_iter = True
+ return 0, 0
+
+ # Pivot row is chosen as having the lowest quotient when elements of
+ # the pivot column divide the right-hand side
+
+ # Slice excluding the objective rows
+ s = slice(self.n_stages, self.n_rows)
+
+ # RHS
+ dividend = tableau[s, -1]
+
+ # Elements of pivot column within slice
+ divisor = tableau[s, col_idx]
+
+ # Array filled with nans
+ nans = np.full(self.n_rows - self.n_stages, np.nan)
+
+ # If element in pivot column is greater than zeron_stages, return
+ # quotient or nan otherwise
+ quotients = np.divide(dividend, divisor, out=nans, where=divisor > 0)
+
+ # Arg of minimum quotient excluding the nan values. n_stages is added
+ # to compensate for earlier exclusion of objective columns
+ row_idx = np.nanargmin(quotients) + self.n_stages
+ return row_idx, col_idx
+
+ def pivot(self, tableau: np.ndarray, row_idx: int, col_idx: int) -> np.ndarray:
+ """Pivots on value on the intersection of pivot row and column.
+
+ >>> t = Tableau(np.array([[-2,-3,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), 2)
+ >>> t.pivot(t.tableau, 1, 0).tolist()
+ ... # doctest: +NORMALIZE_WHITESPACE
+ [[0.0, 3.0, 2.0, 0.0, 8.0],
+ [1.0, 3.0, 1.0, 0.0, 4.0],
+ [0.0, -8.0, -3.0, 1.0, -8.0]]
+ """
+ # Avoid changes to original tableau
+ piv_row = tableau[row_idx].copy()
+
+ piv_val = piv_row[col_idx]
+
+ # Entry becomes 1
+ piv_row *= 1 / piv_val
+
+ # Variable in pivot column becomes basic, ie the only non-zero entry
+ for idx, coeff in enumerate(tableau[:, col_idx]):
+ tableau[idx] += -coeff * piv_row
+ tableau[row_idx] = piv_row
+ return tableau
+
+ def change_stage(self, tableau: np.ndarray) -> np.ndarray:
+ """Exits first phase of the two-stage method by deleting artificial
+ rows and columns, or completes the algorithm if exiting the standard
+ case.
+
+ >>> t = Tableau(np.array([
+ ... [3, 3, -1, -1, 0, 0, 4],
+ ... [2, 1, 0, 0, 0, 0, 0.],
+ ... [1, 2, -1, 0, 1, 0, 2],
+ ... [2, 1, 0, -1, 0, 1, 2]
+ ... ]), 2)
+ >>> t.change_stage(t.tableau).tolist()
+ ... # doctest: +NORMALIZE_WHITESPACE
+ [[2.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ [1.0, 2.0, -1.0, 0.0, 1.0, 2.0],
+ [2.0, 1.0, 0.0, -1.0, 0.0, 2.0]]
+ """
+ # Objective of original objective row remains
+ self.objectives.pop()
+
+ if not self.objectives:
+ return tableau
+
+ # Slice containing ids for artificial columns
+ s = slice(-self.n_art_vars - 1, -1)
+
+ # Delete the artificial variable columns
+ tableau = np.delete(tableau, s, axis=1)
+
+ # Delete the objective row of the first stage
+ tableau = np.delete(tableau, 0, axis=0)
+
+ self.n_stages = 1
+ self.n_rows -= 1
+ self.n_art_vars = 0
+ self.stop_iter = False
+ return tableau
+
+ def run_simplex(self) -> dict[Any, Any]:
+ """Operate on tableau until objective function cannot be
+ improved further.
+
+ # Standard linear program:
+ Max: x1 + x2
+ ST: x1 + 3x2 <= 4
+ 3x1 + x2 <= 4
+ >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]),
+ ... 2).run_simplex()
+ {'P': 2.0, 'x1': 1.0, 'x2': 1.0}
+
+ # Optimal tableau input:
+ >>> Tableau(np.array([
+ ... [0, 0, 0.25, 0.25, 2],
+ ... [0, 1, 0.375, -0.125, 1],
+ ... [1, 0, -0.125, 0.375, 1]
+ ... ]), 2).run_simplex()
+ {'P': 2.0, 'x1': 1.0, 'x2': 1.0}
+
+ # Non-standard: >= constraints
+ Max: 2x1 + 3x2 + x3
+ ST: x1 + x2 + x3 <= 40
+ 2x1 + x2 - x3 >= 10
+ - x2 + x3 >= 10
+ >>> Tableau(np.array([
+ ... [2, 0, 0, 0, -1, -1, 0, 0, 20],
+ ... [-2, -3, -1, 0, 0, 0, 0, 0, 0],
+ ... [1, 1, 1, 1, 0, 0, 0, 0, 40],
+ ... [2, 1, -1, 0, -1, 0, 1, 0, 10],
+ ... [0, -1, 1, 0, 0, -1, 0, 1, 10.]
+ ... ]), 3).run_simplex()
+ {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0}
+
+ # Non standard: minimisation and equalities
+ Min: x1 + x2
+ ST: 2x1 + x2 = 12
+ 6x1 + 5x2 = 40
+ >>> Tableau(np.array([
+ ... [8, 6, 0, -1, 0, -1, 0, 0, 52],
+ ... [1, 1, 0, 0, 0, 0, 0, 0, 0],
+ ... [2, 1, 1, 0, 0, 0, 0, 0, 12],
+ ... [2, 1, 0, -1, 0, 0, 1, 0, 12],
+ ... [6, 5, 0, 0, 1, 0, 0, 0, 40],
+ ... [6, 5, 0, 0, 0, -1, 0, 1, 40.]
+ ... ]), 2).run_simplex()
+ {'P': 7.0, 'x1': 5.0, 'x2': 2.0}
+ """
+ # Stop simplex algorithm from cycling.
+ for _ in range(100):
+ # Completion of each stage removes an objective. If both stages
+ # are complete, then no objectives are left
+ if not self.objectives:
+ self.col_titles = self.generate_col_titles(
+ self.n_vars, self.n_slack, self.n_art_vars
+ )
+
+ # Find the values of each variable at optimal solution
+ return self.interpret_tableau(self.tableau, self.col_titles)
+
+ row_idx, col_idx = self.find_pivot(self.tableau)
+
+ # If there are no more negative values in objective row
+ if self.stop_iter:
+ # Delete artificial variable columns and rows. Update attributes
+ self.tableau = self.change_stage(self.tableau)
+ else:
+ self.tableau = self.pivot(self.tableau, row_idx, col_idx)
+ return {}
+
+ def interpret_tableau(
+ self, tableau: np.ndarray, col_titles: list[str]
+ ) -> dict[str, float]:
+ """Given the final tableau, add the corresponding values of the basic
+ decision variables to the `output_dict`
+ >>> tableau = np.array([
+ ... [0,0,0.875,0.375,5],
+ ... [0,1,0.375,-0.125,1],
+ ... [1,0,-0.125,0.375,1]
+ ... ])
+ >>> t = Tableau(tableau, 2)
+ >>> t.interpret_tableau(tableau, ["x1", "x2", "s1", "s2", "RHS"])
+ {'P': 5.0, 'x1': 1.0, 'x2': 1.0}
+ """
+ # P = RHS of final tableau
+ output_dict = {"P": abs(tableau[0, -1])}
+
+ for i in range(self.n_vars):
+ # Gives ids of nonzero entries in the ith column
+ nonzero = np.nonzero(tableau[:, i])
+ n_nonzero = len(nonzero[0])
+
+ # First entry in the nonzero ids
+ nonzero_rowidx = nonzero[0][0]
+ nonzero_val = tableau[nonzero_rowidx, i]
+
+ # If there is only one nonzero value in column, which is one
+ if n_nonzero == nonzero_val == 1:
+ rhs_val = tableau[nonzero_rowidx, -1]
+ output_dict[col_titles[i]] = rhs_val
+
+ # Check for basic variables
+ for title in col_titles:
+ # Don't add RHS or slack variables to output dict
+ if title[0] not in "R-s-a":
+ output_dict.setdefault(title, 0)
+ return output_dict
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/machine_learning/astar.py b/machine_learning/astar.py
index ee3fcff0b..7a60ed225 100644
--- a/machine_learning/astar.py
+++ b/machine_learning/astar.py
@@ -1,41 +1,38 @@
"""
-The A* algorithm combines features of uniform-cost search and pure
-heuristic search to efficiently compute optimal solutions.
-A* algorithm is a best-first search algorithm in which the cost
-associated with a node is f(n) = g(n) + h(n),
-where g(n) is the cost of the path from the initial state to node n and
-h(n) is the heuristic estimate or the cost or a path
-from node n to a goal.A* algorithm introduces a heuristic into a
-regular graph-searching algorithm,
-essentially planning ahead at each step so a more optimal decision
-is made.A* also known as the algorithm with brains
+The A* algorithm combines features of uniform-cost search and pure heuristic search to
+efficiently compute optimal solutions.
+
+The A* algorithm is a best-first search algorithm in which the cost associated with a
+node is f(n) = g(n) + h(n), where g(n) is the cost of the path from the initial state to
+node n and h(n) is the heuristic estimate or the cost or a path from node n to a goal.
+
+The A* algorithm introduces a heuristic into a regular graph-searching algorithm,
+essentially planning ahead at each step so a more optimal decision is made. For this
+reason, A* is known as an algorithm with brains.
+
+https://en.wikipedia.org/wiki/A*_search_algorithm
"""
import numpy as np
class Cell:
"""
- Class cell represents a cell in the world which have the property
- position : The position of the represented by tupleof x and y
- coordinates initially set to (0,0)
- parent : This contains the parent cell object which we visited
- before arrinving this cell
- g,h,f : The parameters for constructing the heuristic function
- which can be any function. for simplicity used line
- distance
+ Class cell represents a cell in the world which have the properties:
+ position: represented by tuple of x and y coordinates initially set to (0,0).
+ parent: Contains the parent cell object visited before we arrived at this cell.
+ g, h, f: Parameters used when calling our heuristic function.
"""
def __init__(self):
self.position = (0, 0)
self.parent = None
-
self.g = 0
self.h = 0
self.f = 0
"""
- overrides equals method because otherwise cell assign will give
- wrong results
+ Overrides equals method because otherwise cell assign will give
+ wrong results.
"""
def __eq__(self, cell):
@@ -48,8 +45,8 @@ class Cell:
class Gridworld:
"""
Gridworld class represents the external world here a grid M*M
- matrix
- world_size: create a numpy array with the given world_size default is 5
+ matrix.
+ world_size: create a numpy array with the given world_size default is 5.
"""
def __init__(self, world_size=(5, 5)):
@@ -90,10 +87,10 @@ class Gridworld:
def astar(world, start, goal):
"""
- Implementation of a start algorithm
- world : Object of the world object
- start : Object of the cell as start position
- stop : Object of the cell as goal position
+ Implementation of a start algorithm.
+ world : Object of the world object.
+ start : Object of the cell as start position.
+ stop : Object of the cell as goal position.
>>> p = Gridworld()
>>> start = Cell()
@@ -137,14 +134,14 @@ def astar(world, start, goal):
if __name__ == "__main__":
world = Gridworld()
- # stat position and Goal
+ # Start position and goal
start = Cell()
start.position = (0, 0)
goal = Cell()
goal.position = (4, 4)
print(f"path from {start.position} to {goal.position}")
s = astar(world, start, goal)
- # Just for visual reasons
+ # Just for visual reasons.
for i in s:
world.w[i] = 1
print(world.w)
diff --git a/machine_learning/data_transformations.py b/machine_learning/data_transformations.py
index 9e0d747e9..ecfd3b9e2 100644
--- a/machine_learning/data_transformations.py
+++ b/machine_learning/data_transformations.py
@@ -1,5 +1,7 @@
"""
-Normalization Wikipedia: https://en.wikipedia.org/wiki/Normalization
+Normalization.
+
+Wikipedia: https://en.wikipedia.org/wiki/Normalization
Normalization is the process of converting numerical data to a standard range of values.
This range is typically between [0, 1] or [-1, 1]. The equation for normalization is
x_norm = (x - x_min)/(x_max - x_min) where x_norm is the normalized value, x is the
@@ -28,7 +30,8 @@ from statistics import mean, stdev
def normalization(data: list, ndigits: int = 3) -> list:
"""
- Returns a normalized list of values
+ Return a normalized list of values.
+
@params: data, a list of values to normalize
@returns: a list of normalized values (rounded to ndigits decimal places)
@examples:
@@ -46,7 +49,8 @@ def normalization(data: list, ndigits: int = 3) -> list:
def standardization(data: list, ndigits: int = 3) -> list:
"""
- Returns a standardized list of values
+ Return a standardized list of values.
+
@params: data, a list of values to standardize
@returns: a list of standardized values (rounded to ndigits decimal places)
@examples:
diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py
index ace6fb0fa..7cd1b02c4 100644
--- a/machine_learning/decision_tree.py
+++ b/machine_learning/decision_tree.py
@@ -6,7 +6,7 @@ Output: The decision tree maps a real number input to a real number output.
import numpy as np
-class Decision_Tree:
+class DecisionTree:
def __init__(self, depth=5, min_leaf_size=5):
self.depth = depth
self.decision_boundary = 0
@@ -22,17 +22,17 @@ class Decision_Tree:
@param prediction: a floating point value
return value: mean_squared_error calculates the error if prediction is used to
estimate the labels
- >>> tester = Decision_Tree()
+ >>> tester = DecisionTree()
>>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10])
- >>> test_prediction = np.float(6)
+ >>> test_prediction = float(6)
>>> tester.mean_squared_error(test_labels, test_prediction) == (
- ... Test_Decision_Tree.helper_mean_squared_error_test(test_labels,
+ ... TestDecisionTree.helper_mean_squared_error_test(test_labels,
... test_prediction))
True
>>> test_labels = np.array([1,2,3])
- >>> test_prediction = np.float(2)
+ >>> test_prediction = float(2)
>>> tester.mean_squared_error(test_labels, test_prediction) == (
- ... Test_Decision_Tree.helper_mean_squared_error_test(test_labels,
+ ... TestDecisionTree.helper_mean_squared_error_test(test_labels,
... test_prediction))
True
"""
@@ -41,10 +41,10 @@ class Decision_Tree:
return np.mean((labels - prediction) ** 2)
- def train(self, X, y):
+ def train(self, x, y):
"""
train:
- @param X: a one dimensional numpy array
+ @param x: a one dimensional numpy array
@param y: a one dimensional numpy array.
The contents of y are the labels for the corresponding X values
@@ -55,17 +55,17 @@ class Decision_Tree:
this section is to check that the inputs conform to our dimensionality
constraints
"""
- if X.ndim != 1:
+ if x.ndim != 1:
print("Error: Input data set must be one dimensional")
return
- if len(X) != len(y):
+ if len(x) != len(y):
print("Error: X and y have different lengths")
return
if y.ndim != 1:
print("Error: Data set labels must be one dimensional")
return
- if len(X) < 2 * self.min_leaf_size:
+ if len(x) < 2 * self.min_leaf_size:
self.prediction = np.mean(y)
return
@@ -74,7 +74,7 @@ class Decision_Tree:
return
best_split = 0
- min_error = self.mean_squared_error(X, np.mean(y)) * 2
+ min_error = self.mean_squared_error(x, np.mean(y)) * 2
"""
loop over all possible splits for the decision tree. find the best split.
@@ -82,34 +82,34 @@ class Decision_Tree:
then the data set is not split and the average for the entire array is used as
the predictor
"""
- for i in range(len(X)):
- if len(X[:i]) < self.min_leaf_size:
+ for i in range(len(x)):
+ if len(x[:i]) < self.min_leaf_size:
continue
- elif len(X[i:]) < self.min_leaf_size:
+ elif len(x[i:]) < self.min_leaf_size:
continue
else:
- error_left = self.mean_squared_error(X[:i], np.mean(y[:i]))
- error_right = self.mean_squared_error(X[i:], np.mean(y[i:]))
+ error_left = self.mean_squared_error(x[:i], np.mean(y[:i]))
+ error_right = self.mean_squared_error(x[i:], np.mean(y[i:]))
error = error_left + error_right
if error < min_error:
best_split = i
min_error = error
if best_split != 0:
- left_X = X[:best_split]
+ left_x = x[:best_split]
left_y = y[:best_split]
- right_X = X[best_split:]
+ right_x = x[best_split:]
right_y = y[best_split:]
- self.decision_boundary = X[best_split]
- self.left = Decision_Tree(
+ self.decision_boundary = x[best_split]
+ self.left = DecisionTree(
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
)
- self.right = Decision_Tree(
+ self.right = DecisionTree(
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
)
- self.left.train(left_X, left_y)
- self.right.train(right_X, right_y)
+ self.left.train(left_x, left_y)
+ self.right.train(right_x, right_y)
else:
self.prediction = np.mean(y)
@@ -134,7 +134,7 @@ class Decision_Tree:
return None
-class Test_Decision_Tree:
+class TestDecisionTree:
"""Decision Tres test class"""
@staticmethod
@@ -145,11 +145,11 @@ class Test_Decision_Tree:
@param prediction: a floating point value
return value: helper_mean_squared_error_test calculates the mean squared error
"""
- squared_error_sum = np.float(0)
+ squared_error_sum = float(0)
for label in labels:
squared_error_sum += (label - prediction) ** 2
- return np.float(squared_error_sum / labels.size)
+ return float(squared_error_sum / labels.size)
def main():
@@ -159,11 +159,11 @@ def main():
predict the label of 10 different test values. Then the mean squared error over
this test is displayed.
"""
- X = np.arange(-1.0, 1.0, 0.005)
- y = np.sin(X)
+ x = np.arange(-1.0, 1.0, 0.005)
+ y = np.sin(x)
- tree = Decision_Tree(depth=10, min_leaf_size=10)
- tree.train(X, y)
+ tree = DecisionTree(depth=10, min_leaf_size=10)
+ tree.train(x, y)
test_cases = (np.random.rand(10) * 2) - 1
predictions = np.array([tree.predict(x) for x in test_cases])
diff --git a/machine_learning/dimensionality_reduction.py b/machine_learning/dimensionality_reduction.py
new file mode 100644
index 000000000..d2046f81a
--- /dev/null
+++ b/machine_learning/dimensionality_reduction.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2023 Diego Gasco (diego.gasco99@gmail.com), Diegomangasco on GitHub
+
+"""
+Requirements:
+ - numpy version 1.21
+ - scipy version 1.3.3
+Notes:
+ - Each column of the features matrix corresponds to a class item
+"""
+
+import logging
+
+import numpy as np
+import pytest
+from scipy.linalg import eigh
+
+logging.basicConfig(level=logging.INFO, format="%(message)s")
+
+
+def column_reshape(input_array: np.ndarray) -> np.ndarray:
+ """Function to reshape a row Numpy array into a column Numpy array
+ >>> input_array = np.array([1, 2, 3])
+ >>> column_reshape(input_array)
+ array([[1],
+ [2],
+ [3]])
+ """
+
+ return input_array.reshape((input_array.size, 1))
+
+
+def covariance_within_classes(
+ features: np.ndarray, labels: np.ndarray, classes: int
+) -> np.ndarray:
+ """Function to compute the covariance matrix inside each class.
+ >>> features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ >>> labels = np.array([0, 1, 0])
+ >>> covariance_within_classes(features, labels, 2)
+ array([[0.66666667, 0.66666667, 0.66666667],
+ [0.66666667, 0.66666667, 0.66666667],
+ [0.66666667, 0.66666667, 0.66666667]])
+ """
+
+ covariance_sum = np.nan
+ for i in range(classes):
+ data = features[:, labels == i]
+ data_mean = data.mean(1)
+ # Centralize the data of class i
+ centered_data = data - column_reshape(data_mean)
+ if i > 0:
+ # If covariance_sum is not None
+ covariance_sum += np.dot(centered_data, centered_data.T)
+ else:
+ # If covariance_sum is np.nan (i.e. first loop)
+ covariance_sum = np.dot(centered_data, centered_data.T)
+
+ return covariance_sum / features.shape[1]
+
+
+def covariance_between_classes(
+ features: np.ndarray, labels: np.ndarray, classes: int
+) -> np.ndarray:
+ """Function to compute the covariance matrix between multiple classes
+ >>> features = np.array([[9, 2, 3], [4, 3, 6], [1, 8, 9]])
+ >>> labels = np.array([0, 1, 0])
+ >>> covariance_between_classes(features, labels, 2)
+ array([[ 3.55555556, 1.77777778, -2.66666667],
+ [ 1.77777778, 0.88888889, -1.33333333],
+ [-2.66666667, -1.33333333, 2. ]])
+ """
+
+ general_data_mean = features.mean(1)
+ covariance_sum = np.nan
+ for i in range(classes):
+ data = features[:, labels == i]
+ device_data = data.shape[1]
+ data_mean = data.mean(1)
+ if i > 0:
+ # If covariance_sum is not None
+ covariance_sum += device_data * np.dot(
+ column_reshape(data_mean) - column_reshape(general_data_mean),
+ (column_reshape(data_mean) - column_reshape(general_data_mean)).T,
+ )
+ else:
+ # If covariance_sum is np.nan (i.e. first loop)
+ covariance_sum = device_data * np.dot(
+ column_reshape(data_mean) - column_reshape(general_data_mean),
+ (column_reshape(data_mean) - column_reshape(general_data_mean)).T,
+ )
+
+ return covariance_sum / features.shape[1]
+
+
+def principal_component_analysis(features: np.ndarray, dimensions: int) -> np.ndarray:
+ """
+ Principal Component Analysis.
+
+ For more details, see: https://en.wikipedia.org/wiki/Principal_component_analysis.
+ Parameters:
+ * features: the features extracted from the dataset
+ * dimensions: to filter the projected data for the desired dimension
+
+ >>> test_principal_component_analysis()
+ """
+
+ # Check if the features have been loaded
+ if features.any():
+ data_mean = features.mean(1)
+ # Center the dataset
+ centered_data = features - np.reshape(data_mean, (data_mean.size, 1))
+ covariance_matrix = np.dot(centered_data, centered_data.T) / features.shape[1]
+ _, eigenvectors = np.linalg.eigh(covariance_matrix)
+ # Take all the columns in the reverse order (-1), and then takes only the first
+ filtered_eigenvectors = eigenvectors[:, ::-1][:, 0:dimensions]
+ # Project the database on the new space
+ projected_data = np.dot(filtered_eigenvectors.T, features)
+ logging.info("Principal Component Analysis computed")
+
+ return projected_data
+ else:
+ logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True)
+ logging.error("Dataset empty")
+ raise AssertionError
+
+
+def linear_discriminant_analysis(
+ features: np.ndarray, labels: np.ndarray, classes: int, dimensions: int
+) -> np.ndarray:
+ """
+ Linear Discriminant Analysis.
+
+ For more details, see: https://en.wikipedia.org/wiki/Linear_discriminant_analysis.
+ Parameters:
+ * features: the features extracted from the dataset
+ * labels: the class labels of the features
+ * classes: the number of classes present in the dataset
+ * dimensions: to filter the projected data for the desired dimension
+
+ >>> test_linear_discriminant_analysis()
+ """
+
+ # Check if the dimension desired is less than the number of classes
+ assert classes > dimensions
+
+ # Check if features have been already loaded
+ if features.any:
+ _, eigenvectors = eigh(
+ covariance_between_classes(features, labels, classes),
+ covariance_within_classes(features, labels, classes),
+ )
+ filtered_eigenvectors = eigenvectors[:, ::-1][:, :dimensions]
+ svd_matrix, _, _ = np.linalg.svd(filtered_eigenvectors)
+ filtered_svd_matrix = svd_matrix[:, 0:dimensions]
+ projected_data = np.dot(filtered_svd_matrix.T, features)
+ logging.info("Linear Discriminant Analysis computed")
+
+ return projected_data
+ else:
+ logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True)
+ logging.error("Dataset empty")
+ raise AssertionError
+
+
+def test_linear_discriminant_analysis() -> None:
+ # Create dummy dataset with 2 classes and 3 features
+ features = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
+ labels = np.array([0, 0, 0, 1, 1])
+ classes = 2
+ dimensions = 2
+
+ # Assert that the function raises an AssertionError if dimensions > classes
+ with pytest.raises(AssertionError) as error_info:
+ projected_data = linear_discriminant_analysis(
+ features, labels, classes, dimensions
+ )
+ if isinstance(projected_data, np.ndarray):
+ raise AssertionError(
+ "Did not raise AssertionError for dimensions > classes"
+ )
+ assert error_info.type is AssertionError
+
+
+def test_principal_component_analysis() -> None:
+ features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ dimensions = 2
+ expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]])
+
+ with pytest.raises(AssertionError) as error_info:
+ output = principal_component_analysis(features, dimensions)
+ if not np.allclose(expected_output, output):
+ raise AssertionError
+ assert error_info.type is AssertionError
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py
index b11a23012..0909b76d8 100644
--- a/machine_learning/forecasting/run.py
+++ b/machine_learning/forecasting/run.py
@@ -1,7 +1,7 @@
"""
this is code for forecasting
but i modified it and used it for safety checker of data
-for ex: you have a online shop and for some reason some data are
+for ex: you have an online shop and for some reason some data are
missing (the amount of data that u expected are not supposed to be)
then we can use it
*ps : 1. ofc we can use normal statistic method but in this case
@@ -91,14 +91,14 @@ def interquartile_range_checker(train_user: list) -> float:
return low_lim
-def data_safety_checker(list_vote: list, actual_result: float) -> None:
+def data_safety_checker(list_vote: list, actual_result: float) -> bool:
"""
Used to review all the votes (list result prediction)
and compare it to the actual result.
input : list of predictions
output : print whether it's safe or not
- >>> data_safety_checker([2,3,4],5.0)
- Today's data is not safe.
+ >>> data_safety_checker([2, 3, 4], 5.0)
+ False
"""
safe = 0
not_safe = 0
@@ -107,50 +107,54 @@ def data_safety_checker(list_vote: list, actual_result: float) -> None:
safe = not_safe + 1
else:
if abs(abs(i) - abs(actual_result)) <= 0.1:
- safe = safe + 1
+ safe += 1
else:
- not_safe = not_safe + 1
- print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.")
+ not_safe += 1
+ return safe > not_safe
-# data_input_df = pd.read_csv("ex_data.csv", header=None)
-data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
-data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"])
+if __name__ == "__main__":
+ # data_input_df = pd.read_csv("ex_data.csv", header=None)
+ data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
+ data_input_df = pd.DataFrame(
+ data_input, columns=["total_user", "total_even", "days"]
+ )
-"""
-data column = total user in a day, how much online event held in one day,
-what day is that(sunday-saturday)
-"""
+ """
+ data column = total user in a day, how much online event held in one day,
+ what day is that(sunday-saturday)
+ """
-# start normalization
-normalize_df = Normalizer().fit_transform(data_input_df.values)
-# split data
-total_date = normalize_df[:, 2].tolist()
-total_user = normalize_df[:, 0].tolist()
-total_match = normalize_df[:, 1].tolist()
+ # start normalization
+ normalize_df = Normalizer().fit_transform(data_input_df.values)
+ # split data
+ total_date = normalize_df[:, 2].tolist()
+ total_user = normalize_df[:, 0].tolist()
+ total_match = normalize_df[:, 1].tolist()
-# for svr (input variable = total date and total match)
-x = normalize_df[:, [1, 2]].tolist()
-x_train = x[: len(x) - 1]
-x_test = x[len(x) - 1 :]
+ # for svr (input variable = total date and total match)
+ x = normalize_df[:, [1, 2]].tolist()
+ x_train = x[: len(x) - 1]
+ x_test = x[len(x) - 1 :]
-# for linear reression & sarimax
-trn_date = total_date[: len(total_date) - 1]
-trn_user = total_user[: len(total_user) - 1]
-trn_match = total_match[: len(total_match) - 1]
+ # for linear regression & sarimax
+ trn_date = total_date[: len(total_date) - 1]
+ trn_user = total_user[: len(total_user) - 1]
+ trn_match = total_match[: len(total_match) - 1]
-tst_date = total_date[len(total_date) - 1 :]
-tst_user = total_user[len(total_user) - 1 :]
-tst_match = total_match[len(total_match) - 1 :]
+ tst_date = total_date[len(total_date) - 1 :]
+ tst_user = total_user[len(total_user) - 1 :]
+ tst_match = total_match[len(total_match) - 1 :]
+ # voting system with forecasting
+ res_vote = [
+ linear_regression_prediction(
+ trn_date, trn_user, trn_match, tst_date, tst_match
+ ),
+ sarimax_predictor(trn_user, trn_match, tst_match),
+ support_vector_regressor(x_train, x_test, trn_user),
+ ]
-# voting system with forecasting
-res_vote = []
-res_vote.append(
- linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match)
-)
-res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match))
-res_vote.append(support_vector_regressor(x_train, x_test, trn_user))
-
-# check the safety of todays'data^^
-data_safety_checker(res_vote, tst_user)
+ # check the safety of today's data
+ not_str = "" if data_safety_checker(res_vote, tst_user) else "not "
+ print("Today's data is {not_str}safe.")
diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py.broken.txt
similarity index 57%
rename from machine_learning/gaussian_naive_bayes.py
rename to machine_learning/gaussian_naive_bayes.py.broken.txt
index c200aa5a4..7e9a8d7f6 100644
--- a/machine_learning/gaussian_naive_bayes.py
+++ b/machine_learning/gaussian_naive_bayes.py.broken.txt
@@ -1,7 +1,9 @@
# Gaussian Naive Bayes Example
+import time
+
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
-from sklearn.metrics import plot_confusion_matrix
+from sklearn.metrics import accuracy_score, plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
@@ -17,28 +19,34 @@ def main():
iris = load_iris()
# Split dataset into train and test data
- X = iris["data"] # features
- Y = iris["target"]
+ x = iris["data"] # features
+ y = iris["target"]
x_train, x_test, y_train, y_test = train_test_split(
- X, Y, test_size=0.3, random_state=1
+ x, y, test_size=0.3, random_state=1
)
# Gaussian Naive Bayes
- NB_model = GaussianNB()
- NB_model.fit(x_train, y_train)
+ nb_model = GaussianNB()
+ time.sleep(2.9)
+ model_fit = nb_model.fit(x_train, y_train)
+ y_pred = model_fit.predict(x_test) # Predictions on the test set
# Display Confusion Matrix
plot_confusion_matrix(
- NB_model,
+ nb_model,
x_test,
y_test,
display_labels=iris["target_names"],
- cmap="Blues",
+ cmap="Blues", # although, Greys_r has a better contrast...
normalize="true",
)
plt.title("Normalized Confusion Matrix - IRIS Dataset")
plt.show()
+ time.sleep(1.8)
+ final_accuracy = 100 * accuracy_score(y_true=y_test, y_pred=y_pred)
+ print(f"The overall accuracy of the model is: {round(final_accuracy, 2)}%")
+
if __name__ == "__main__":
main()
diff --git a/machine_learning/gradient_boosting_regressor.py b/machine_learning/gradient_boosting_regressor.py.broken.txt
similarity index 80%
rename from machine_learning/gradient_boosting_regressor.py
rename to machine_learning/gradient_boosting_regressor.py.broken.txt
index 0aa0e7a10..c082f3caf 100644
--- a/machine_learning/gradient_boosting_regressor.py
+++ b/machine_learning/gradient_boosting_regressor.py.broken.txt
@@ -26,30 +26,30 @@ def main():
print(df_boston.describe().T)
# Feature selection
- X = df_boston.iloc[:, :-1]
+ x = df_boston.iloc[:, :-1]
y = df_boston.iloc[:, -1] # target variable
# split the data with 75% train and 25% test sets.
- X_train, X_test, y_train, y_test = train_test_split(
- X, y, random_state=0, test_size=0.25
+ x_train, x_test, y_train, y_test = train_test_split(
+ x, y, random_state=0, test_size=0.25
)
model = GradientBoostingRegressor(
n_estimators=500, max_depth=5, min_samples_split=4, learning_rate=0.01
)
# training the model
- model.fit(X_train, y_train)
+ model.fit(x_train, y_train)
# to see how good the model fit the data
- training_score = model.score(X_train, y_train).round(3)
- test_score = model.score(X_test, y_test).round(3)
+ training_score = model.score(x_train, y_train).round(3)
+ test_score = model.score(x_test, y_test).round(3)
print("Training score of GradientBoosting is :", training_score)
print("The test score of GradientBoosting is :", test_score)
# Let us evaluation the model by finding the errors
- y_pred = model.predict(X_test)
+ y_pred = model.predict(x_test)
# The mean squared error
- print("Mean squared error: %.2f" % mean_squared_error(y_test, y_pred))
+ print(f"Mean squared error: {mean_squared_error(y_test, y_pred):.2f}")
# Explained variance score: 1 is perfect prediction
- print("Test Variance score: %.2f" % r2_score(y_test, y_pred))
+ print(f"Test Variance score: {r2_score(y_test, y_pred):.2f}")
# So let's run the model against the test data
fig, ax = plt.subplots()
diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py
index 9fa460a07..5b74dad08 100644
--- a/machine_learning/gradient_descent.py
+++ b/machine_learning/gradient_descent.py
@@ -55,6 +55,7 @@ def output(example_no, data_set):
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
+ return None
def calculate_hypothesis_value(example_no, data_set):
@@ -68,6 +69,7 @@ def calculate_hypothesis_value(example_no, data_set):
return _hypothesis_value(train_data[example_no][0])
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0])
+ return None
def summation_of_cost_derivative(index, end=m):
diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py
index c45be8a4c..7c8142aab 100644
--- a/machine_learning/k_means_clust.py
+++ b/machine_learning/k_means_clust.py
@@ -69,12 +69,11 @@ def get_initial_centroids(data, k, seed=None):
return centroids
-def centroid_pairwise_dist(X, centroids):
- return pairwise_distances(X, centroids, metric="euclidean")
+def centroid_pairwise_dist(x, centroids):
+ return pairwise_distances(x, centroids, metric="euclidean")
def assign_clusters(data, centroids):
-
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = centroid_pairwise_dist(data, centroids)
@@ -100,10 +99,8 @@ def revise_centroids(data, k, cluster_assignment):
def compute_heterogeneity(data, k, centroids, cluster_assignment):
-
heterogeneity = 0.0
for i in range(k):
-
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i, :]
@@ -112,7 +109,7 @@ def compute_heterogeneity(data, k, centroids, cluster_assignment):
distances = pairwise_distances(
member_data_points, [centroids[i]], metric="euclidean"
)
- squared_distances = distances ** 2
+ squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
return heterogeneity
@@ -164,9 +161,7 @@ def kmeans(
num_changed = np.sum(prev_cluster_assignment != cluster_assignment)
if verbose:
print(
- " {:5d} elements changed their cluster assignment.".format(
- num_changed
- )
+ f" {num_changed:5d} elements changed their cluster assignment."
)
# Record heterogeneity convergence metric
@@ -199,8 +194,8 @@ if False: # change to true to run this test case.
plot_heterogeneity(heterogeneity, k)
-def ReportGenerator(
- df: pd.DataFrame, ClusteringVariables: np.ndarray, FillMissingReport=None
+def report_generator(
+ df: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None
) -> pd.DataFrame:
"""
Function generates easy-erading clustering report. It takes 2 arguments as an input:
@@ -216,7 +211,7 @@ def ReportGenerator(
>>> data['col2'] = [100, 200, 300]
>>> data['col3'] = [10, 20, 30]
>>> data['Cluster'] = [1, 1, 2]
- >>> ReportGenerator(data, ['col1', 'col2'], 0)
+ >>> report_generator(data, ['col1', 'col2'], 0)
Features Type Mark 1 2
0 # of Customers ClusterSize False 2.000000 1.000000
1 % of Customers ClusterProportion False 0.666667 0.333333
@@ -233,8 +228,8 @@ def ReportGenerator(
[104 rows x 5 columns]
"""
# Fill missing values with given rules
- if FillMissingReport:
- df.fillna(value=FillMissingReport, inplace=True)
+ if fill_missing_report:
+ df = df.fillna(value=fill_missing_report)
df["dummy"] = 1
numeric_cols = df.select_dtypes(np.number).columns
report = (
@@ -315,7 +310,7 @@ def ReportGenerator(
report = pd.concat(
[report, a, clustersize, clusterproportion], axis=0
) # concat report with clustert size and nan values
- report["Mark"] = report["Features"].isin(ClusteringVariables)
+ report["Mark"] = report["Features"].isin(clustering_variables)
cols = report.columns.tolist()
cols = cols[0:2] + cols[-1:] + cols[2:-1]
report = report[cols]
@@ -343,7 +338,7 @@ def ReportGenerator(
)
report.columns.name = ""
report = report.reset_index()
- report.drop(columns=["index"], inplace=True)
+ report = report.drop(columns=["index"])
return report
diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py
index 18553a77a..88c047157 100644
--- a/machine_learning/linear_discriminant_analysis.py
+++ b/machine_learning/linear_discriminant_analysis.py
@@ -42,10 +42,11 @@
Author: @EverLookNeverSee
"""
+from collections.abc import Callable
from math import log
from os import name, system
from random import gauss, seed
-from typing import Callable, TypeVar
+from typing import TypeVar
# Make a training dataset drawn from a gaussian distribution
@@ -255,7 +256,7 @@ def valid_input(
input_msg: str,
err_msg: str,
condition: Callable[[num], bool] = lambda x: True,
- default: str = None,
+ default: str | None = None,
) -> num:
"""
Ask for user value and validate that it fulfill a condition.
@@ -398,7 +399,7 @@ def main():
if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q":
print("\n" + "GoodBye!".center(100, "-") + "\n")
break
- system("cls" if name == "nt" else "clear")
+ system("cls" if name == "nt" else "clear") # noqa: S605
if __name__ == "__main__":
diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py
index b0bbc7b90..75943ac9f 100644
--- a/machine_learning/linear_regression.py
+++ b/machine_learning/linear_regression.py
@@ -17,9 +17,8 @@ def collect_dataset():
:return : dataset obtained from the link, as matrix
"""
response = requests.get(
- "https://raw.githubusercontent.com/yashLadha/"
- + "The_Math_of_Intelligence/master/Week1/ADRvs"
- + "Rating.csv"
+ "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
+ "master/Week1/ADRvsRating.csv"
)
lines = response.text.splitlines()
data = []
@@ -82,11 +81,21 @@ def run_linear_regression(data_x, data_y):
for i in range(0, iterations):
theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)
error = sum_of_square_error(data_x, data_y, len_data, theta)
- print("At Iteration %d - Error is %.5f " % (i + 1, error))
+ print(f"At Iteration {i + 1} - Error is {error:.5f}")
return theta
+def mean_absolute_error(predicted_y, original_y):
+ """Return sum of square error for error calculation
+ :param predicted_y : contains the output of prediction (result vector)
+ :param original_y : contains values of expected outcome
+ :return : mean absolute error computed from given feature's
+ """
+ total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y))
+ return total / len(original_y)
+
+
def main():
"""Driver function"""
data = collect_dataset()
@@ -99,7 +108,7 @@ def main():
len_result = theta.shape[1]
print("Resultant Feature vector : ")
for i in range(0, len_result):
- print("%.5f" % (theta[0, i]))
+ print(f"{theta[0, i]:.5f}")
if __name__ == "__main__":
diff --git a/machine_learning/local_weighted_learning/__init__.py b/machine_learning/local_weighted_learning/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.md b/machine_learning/local_weighted_learning/local_weighted_learning.md
new file mode 100644
index 000000000..ef4dbc958
--- /dev/null
+++ b/machine_learning/local_weighted_learning/local_weighted_learning.md
@@ -0,0 +1,66 @@
+# Locally Weighted Linear Regression
+It is a non-parametric ML algorithm that does not learn on a fixed set of parameters such as **linear regression**. \
+So, here comes a question of what is *linear regression*? \
+**Linear regression** is a supervised learning algorithm used for computing linear relationships between input (X) and output (Y). \
+
+### Terminology Involved
+
+number_of_features(i) = Number of features involved. \
+number_of_training_examples(m) = Number of training examples. \
+output_sequence(y) = Output Sequence. \
+$\theta$ $^T$ x = predicted point. \
+J($\theta$) = COst function of point.
+
+The steps involved in ordinary linear regression are:
+
+Training phase: Compute \theta to minimize the cost. \
+J($\theta$) = $\sum_{i=1}^m$ (($\theta$)$^T$ $x^i$ - $y^i$)$^2$
+
+Predict output: for given query point x, \
+ return: ($\theta$)$^T$ x
+
+
+
+This training phase is possible when data points are linear, but there again comes a question can we predict non-linear relationship between x and y ? as shown below
+
+
+
+
+So, here comes the role of non-parametric algorithm which doesn't compute predictions based on fixed set of params. Rather parameters $\theta$ are computed individually for each query point/data point x.
+
+
+While Computing $\theta$ , a higher preference is given to points in the vicinity of x than points farther from x.
+
+Cost Function J($\theta$) = $\sum_{i=1}^m$ $w^i$ (($\theta$)$^T$ $x^i$ - $y^i$)$^2$
+
+$w^i$ is non-negative weight associated to training point $x^i$. \
+$w^i$ is large fr $x^i$'s lying closer to query point $x_i$. \
+$w^i$ is small for $x^i$'s lying farther to query point $x_i$.
+
+A Typical weight can be computed using \
+
+$w^i$ = $\exp$(-$\frac{(x^i-x)(x^i-x)^T}{2\tau^2}$)
+
+Where $\tau$ is the bandwidth parameter that controls $w^i$ distance from x.
+
+Let's look at a example :
+
+Suppose, we had a query point x=5.0 and training points $x^1$=4.9 and $x^2$=5.0 than we can calculate weights as :
+
+$w^i$ = $\exp$(-$\frac{(x^i-x)(x^i-x)^T}{2\tau^2}$) with $\tau$=0.5
+
+$w^1$ = $\exp$(-$\frac{(4.9-5)^2}{2(0.5)^2}$) = 0.9802
+
+$w^2$ = $\exp$(-$\frac{(3-5)^2}{2(0.5)^2}$) = 0.000335
+
+So, J($\theta$) = 0.9802*($\theta$ $^T$ $x^1$ - $y^1$) + 0.000335*($\theta$ $^T$ $x^2$ - $y^2$)
+
+So, here by we can conclude that the weight fall exponentially as the distance between x & $x^i$ increases and So, does the contribution of error in prediction for $x^i$ to the cost.
+
+Steps involved in LWL are : \
+Compute \theta to minimize the cost.
+J($\theta$) = $\sum_{i=1}^m$ $w^i$ (($\theta$)$^T$ $x^i$ - $y^i$)$^2$ \
+Predict Output: for given query point x, \
+return : $\theta$ $^T$ x
+
+
diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py
new file mode 100644
index 000000000..8dd0e55d4
--- /dev/null
+++ b/machine_learning/local_weighted_learning/local_weighted_learning.py
@@ -0,0 +1,185 @@
+"""
+Locally weighted linear regression, also called local regression, is a type of
+non-parametric linear regression that prioritizes data closest to a given
+prediction point. The algorithm estimates the vector of model coefficients β
+using weighted least squares regression:
+
+β = (XᵀWX)⁻¹(XᵀWy),
+
+where X is the design matrix, y is the response vector, and W is the diagonal
+weight matrix.
+
+This implementation calculates wᵢ, the weight of the ith training sample, using
+the Gaussian weight:
+
+wᵢ = exp(-‖xᵢ - x‖²/(2τ²)),
+
+where xᵢ is the ith training sample, x is the prediction point, τ is the
+"bandwidth", and ‖x‖ is the Euclidean norm (also called the 2-norm or the L²
+norm). The bandwidth τ controls how quickly the weight of a training sample
+decreases as its distance from the prediction point increases. One can think of
+the Gaussian weight as a bell curve centered around the prediction point: a
+training sample is weighted lower if it's farther from the center, and τ
+controls the spread of the bell curve.
+
+Other types of locally weighted regression such as locally estimated scatterplot
+smoothing (LOESS) typically use different weight functions.
+
+References:
+ - https://en.wikipedia.org/wiki/Local_regression
+ - https://en.wikipedia.org/wiki/Weighted_least_squares
+ - https://cs229.stanford.edu/notes2022fall/main_notes.pdf
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+def weight_matrix(point: np.ndarray, x_train: np.ndarray, tau: float) -> np.ndarray:
+ """
+ Calculate the weight of every point in the training data around a given
+ prediction point
+
+ Args:
+ point: x-value at which the prediction is being made
+ x_train: ndarray of x-values for training
+ tau: bandwidth value, controls how quickly the weight of training values
+ decreases as the distance from the prediction point increases
+
+ Returns:
+ m x m weight matrix around the prediction point, where m is the size of
+ the training set
+ >>> weight_matrix(
+ ... np.array([1., 1.]),
+ ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]),
+ ... 0.6
+ ... )
+ array([[1.43807972e-207, 0.00000000e+000, 0.00000000e+000],
+ [0.00000000e+000, 0.00000000e+000, 0.00000000e+000],
+ [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]])
+ """
+ m = len(x_train) # Number of training samples
+ weights = np.eye(m) # Initialize weights as identity matrix
+ for j in range(m):
+ diff = point - x_train[j]
+ weights[j, j] = np.exp(diff @ diff.T / (-2.0 * tau**2))
+
+ return weights
+
+
+def local_weight(
+ point: np.ndarray, x_train: np.ndarray, y_train: np.ndarray, tau: float
+) -> np.ndarray:
+ """
+ Calculate the local weights at a given prediction point using the weight
+ matrix for that point
+
+ Args:
+ point: x-value at which the prediction is being made
+ x_train: ndarray of x-values for training
+ y_train: ndarray of y-values for training
+ tau: bandwidth value, controls how quickly the weight of training values
+ decreases as the distance from the prediction point increases
+ Returns:
+ ndarray of local weights
+ >>> local_weight(
+ ... np.array([1., 1.]),
+ ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]),
+ ... np.array([[1.01, 1.66, 3.5]]),
+ ... 0.6
+ ... )
+ array([[0.00873174],
+ [0.08272556]])
+ """
+ weight_mat = weight_matrix(point, x_train, tau)
+ weight = np.linalg.inv(x_train.T @ weight_mat @ x_train) @ (
+ x_train.T @ weight_mat @ y_train.T
+ )
+
+ return weight
+
+
+def local_weight_regression(
+ x_train: np.ndarray, y_train: np.ndarray, tau: float
+) -> np.ndarray:
+ """
+ Calculate predictions for each point in the training data
+
+ Args:
+ x_train: ndarray of x-values for training
+ y_train: ndarray of y-values for training
+ tau: bandwidth value, controls how quickly the weight of training values
+ decreases as the distance from the prediction point increases
+
+ Returns:
+ ndarray of predictions
+ >>> local_weight_regression(
+ ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]),
+ ... np.array([[1.01, 1.66, 3.5]]),
+ ... 0.6
+ ... )
+ array([1.07173261, 1.65970737, 3.50160179])
+ """
+ y_pred = np.zeros(len(x_train)) # Initialize array of predictions
+ for i, item in enumerate(x_train):
+ y_pred[i] = item @ local_weight(item, x_train, y_train, tau)
+
+ return y_pred
+
+
+def load_data(
+ dataset_name: str, x_name: str, y_name: str
+) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
+ """
+ Load data from seaborn and split it into x and y points
+ >>> pass # No doctests, function is for demo purposes only
+ """
+ import seaborn as sns
+
+ data = sns.load_dataset(dataset_name)
+ x_data = np.array(data[x_name])
+ y_data = np.array(data[y_name])
+
+ one = np.ones(len(y_data))
+
+ # pairing elements of one and x_data
+ x_train = np.column_stack((one, x_data))
+
+ return x_train, x_data, y_data
+
+
+def plot_preds(
+ x_train: np.ndarray,
+ preds: np.ndarray,
+ x_data: np.ndarray,
+ y_data: np.ndarray,
+ x_name: str,
+ y_name: str,
+) -> None:
+ """
+ Plot predictions and display the graph
+ >>> pass # No doctests, function is for demo purposes only
+ """
+ x_train_sorted = np.sort(x_train, axis=0)
+ plt.scatter(x_data, y_data, color="blue")
+ plt.plot(
+ x_train_sorted[:, 1],
+ preds[x_train[:, 1].argsort(0)],
+ color="yellow",
+ linewidth=5,
+ )
+ plt.title("Local Weighted Regression")
+ plt.xlabel(x_name)
+ plt.ylabel(y_name)
+ plt.show()
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ # Demo with a dataset from the seaborn module
+ training_data_x, total_bill, tip = load_data("tips", "total_bill", "tip")
+ predictions = local_weight_regression(training_data_x, tip, 5)
+ plot_preds(training_data_x, predictions, total_bill, tip, "total_bill", "tip")
diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py
index 48d88ef61..87bc8f668 100644
--- a/machine_learning/logistic_regression.py
+++ b/machine_learning/logistic_regression.py
@@ -35,25 +35,25 @@ def cost_function(h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
-def log_likelihood(X, Y, weights):
- scores = np.dot(X, weights)
- return np.sum(Y * scores - np.log(1 + np.exp(scores)))
+def log_likelihood(x, y, weights):
+ scores = np.dot(x, weights)
+ return np.sum(y * scores - np.log(1 + np.exp(scores)))
# here alpha is the learning rate, X is the feature matrix,y is the target matrix
-def logistic_reg(alpha, X, y, max_iterations=70000):
- theta = np.zeros(X.shape[1])
+def logistic_reg(alpha, x, y, max_iterations=70000):
+ theta = np.zeros(x.shape[1])
for iterations in range(max_iterations):
- z = np.dot(X, theta)
+ z = np.dot(x, theta)
h = sigmoid_function(z)
- gradient = np.dot(X.T, h - y) / y.size
+ gradient = np.dot(x.T, h - y) / y.size
theta = theta - alpha * gradient # updating the weights
- z = np.dot(X, theta)
+ z = np.dot(x, theta)
h = sigmoid_function(z)
- J = cost_function(h, y)
+ j = cost_function(h, y)
if iterations % 100 == 0:
- print(f"loss: {J} \t") # printing the loss after every 100 iterations
+ print(f"loss: {j} \t") # printing the loss after every 100 iterations
return theta
@@ -61,23 +61,23 @@ def logistic_reg(alpha, X, y, max_iterations=70000):
if __name__ == "__main__":
iris = datasets.load_iris()
- X = iris.data[:, :2]
+ x = iris.data[:, :2]
y = (iris.target != 0) * 1
alpha = 0.1
- theta = logistic_reg(alpha, X, y, max_iterations=70000)
+ theta = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
- def predict_prob(X):
+ def predict_prob(x):
return sigmoid_function(
- np.dot(X, theta)
+ np.dot(x, theta)
) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
- plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="b", label="0")
- plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="r", label="1")
- (x1_min, x1_max) = (X[:, 0].min(), X[:, 0].max())
- (x2_min, x2_max) = (X[:, 1].min(), X[:, 1].max())
+ plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
+ plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
+ (x1_min, x1_max) = (x[:, 0].min(), x[:, 0].max())
+ (x2_min, x2_max) = (x[:, 1].min(), x[:, 1].max())
(xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
grid = np.c_[xx1.ravel(), xx2.ravel()]
probs = predict_prob(grid).reshape(xx1.shape)
diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py
index 6fd3cf291..74197c46a 100644
--- a/machine_learning/lstm/lstm_prediction.py
+++ b/machine_learning/lstm/lstm_prediction.py
@@ -1,7 +1,7 @@
"""
Create a Long Short Term Memory (LSTM) network model
An LSTM is a type of Recurrent Neural Network (RNN) as discussed at:
- * http://colah.github.io/posts/2015-08-Understanding-LSTMs
+ * https://colah.github.io/posts/2015-08-Understanding-LSTMs
* https://en.wikipedia.org/wiki/Long_short-term_memory
"""
import numpy as np
diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py
index 604185cef..e99a4131e 100644
--- a/machine_learning/multilayer_perceptron_classifier.py
+++ b/machine_learning/multilayer_perceptron_classifier.py
@@ -15,12 +15,12 @@ test = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
Y = clf.predict(test)
-def wrapper(Y):
+def wrapper(y):
"""
>>> wrapper(Y)
[0, 0, 1]
"""
- return list(Y)
+ return list(y)
if __name__ == "__main__":
diff --git a/machine_learning/polymonial_regression.py b/machine_learning/polymonial_regression.py
deleted file mode 100644
index 374c35f7f..000000000
--- a/machine_learning/polymonial_regression.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import pandas as pd
-from matplotlib import pyplot as plt
-from sklearn.linear_model import LinearRegression
-
-# Splitting the dataset into the Training set and Test set
-from sklearn.model_selection import train_test_split
-
-# Fitting Polynomial Regression to the dataset
-from sklearn.preprocessing import PolynomialFeatures
-
-# Importing the dataset
-dataset = pd.read_csv(
- "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
- "position_salaries.csv"
-)
-X = dataset.iloc[:, 1:2].values
-y = dataset.iloc[:, 2].values
-
-
-X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
-
-
-poly_reg = PolynomialFeatures(degree=4)
-X_poly = poly_reg.fit_transform(X)
-pol_reg = LinearRegression()
-pol_reg.fit(X_poly, y)
-
-
-# Visualizing the Polymonial Regression results
-def viz_polymonial():
- plt.scatter(X, y, color="red")
- plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue")
- plt.title("Truth or Bluff (Linear Regression)")
- plt.xlabel("Position level")
- plt.ylabel("Salary")
- plt.show()
- return
-
-
-if __name__ == "__main__":
- viz_polymonial()
-
- # Predicting a new result with Polymonial Regression
- pol_reg.predict(poly_reg.fit_transform([[5.5]]))
- # output should be 132148.43750003
diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py
new file mode 100644
index 000000000..5bafea96f
--- /dev/null
+++ b/machine_learning/polynomial_regression.py
@@ -0,0 +1,213 @@
+"""
+Polynomial regression is a type of regression analysis that models the relationship
+between a predictor x and the response y as an mth-degree polynomial:
+
+y = β₀ + β₁x + β₂x² + ... + βₘxᵐ + ε
+
+By treating x, x², ..., xᵐ as distinct variables, we see that polynomial regression is a
+special case of multiple linear regression. Therefore, we can use ordinary least squares
+(OLS) estimation to estimate the vector of model parameters β = (β₀, β₁, β₂, ..., βₘ)
+for polynomial regression:
+
+β = (XᵀX)⁻¹Xᵀy = X⁺y
+
+where X is the design matrix, y is the response vector, and X⁺ denotes the Moore–Penrose
+pseudoinverse of X. In the case of polynomial regression, the design matrix is
+
+ |1 x₁ x₁² ⋯ x₁ᵐ|
+X = |1 x₂ x₂² ⋯ x₂ᵐ|
+ |⋮ ⋮ ⋮ ⋱ ⋮ |
+ |1 xₙ xₙ² ⋯ xₙᵐ|
+
+In OLS estimation, inverting XᵀX to compute X⁺ can be very numerically unstable. This
+implementation sidesteps this need to invert XᵀX by computing X⁺ using singular value
+decomposition (SVD):
+
+β = VΣ⁺Uᵀy
+
+where UΣVᵀ is an SVD of X.
+
+References:
+ - https://en.wikipedia.org/wiki/Polynomial_regression
+ - https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse
+ - https://en.wikipedia.org/wiki/Numerical_methods_for_linear_least_squares
+ - https://en.wikipedia.org/wiki/Singular_value_decomposition
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+class PolynomialRegression:
+ __slots__ = "degree", "params"
+
+ def __init__(self, degree: int) -> None:
+ """
+ @raises ValueError: if the polynomial degree is negative
+ """
+ if degree < 0:
+ raise ValueError("Polynomial degree must be non-negative")
+
+ self.degree = degree
+ self.params = None
+
+ @staticmethod
+ def _design_matrix(data: np.ndarray, degree: int) -> np.ndarray:
+ """
+ Constructs a polynomial regression design matrix for the given input data. For
+ input data x = (x₁, x₂, ..., xₙ) and polynomial degree m, the design matrix is
+ the Vandermonde matrix
+
+ |1 x₁ x₁² ⋯ x₁ᵐ|
+ X = |1 x₂ x₂² ⋯ x₂ᵐ|
+ |⋮ ⋮ ⋮ ⋱ ⋮ |
+ |1 xₙ xₙ² ⋯ xₙᵐ|
+
+ Reference: https://en.wikipedia.org/wiki/Vandermonde_matrix
+
+ @param data: the input predictor values x, either for model fitting or for
+ prediction
+ @param degree: the polynomial degree m
+ @returns: the Vandermonde matrix X (see above)
+ @raises ValueError: if input data is not N x 1
+
+ >>> x = np.array([0, 1, 2])
+ >>> PolynomialRegression._design_matrix(x, degree=0)
+ array([[1],
+ [1],
+ [1]])
+ >>> PolynomialRegression._design_matrix(x, degree=1)
+ array([[1, 0],
+ [1, 1],
+ [1, 2]])
+ >>> PolynomialRegression._design_matrix(x, degree=2)
+ array([[1, 0, 0],
+ [1, 1, 1],
+ [1, 2, 4]])
+ >>> PolynomialRegression._design_matrix(x, degree=3)
+ array([[1, 0, 0, 0],
+ [1, 1, 1, 1],
+ [1, 2, 4, 8]])
+ >>> PolynomialRegression._design_matrix(np.array([[0, 0], [0 , 0]]), degree=3)
+ Traceback (most recent call last):
+ ...
+ ValueError: Data must have dimensions N x 1
+ """
+ rows, *remaining = data.shape
+ if remaining:
+ raise ValueError("Data must have dimensions N x 1")
+
+ return np.vander(data, N=degree + 1, increasing=True)
+
+ def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
+ """
+ Computes the polynomial regression model parameters using ordinary least squares
+ (OLS) estimation:
+
+ β = (XᵀX)⁻¹Xᵀy = X⁺y
+
+ where X⁺ denotes the Moore–Penrose pseudoinverse of the design matrix X. This
+ function computes X⁺ using singular value decomposition (SVD).
+
+ References:
+ - https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse
+ - https://en.wikipedia.org/wiki/Singular_value_decomposition
+ - https://en.wikipedia.org/wiki/Multicollinearity
+
+ @param x_train: the predictor values x for model fitting
+ @param y_train: the response values y for model fitting
+ @raises ArithmeticError: if X isn't full rank, then XᵀX is singular and β
+ doesn't exist
+
+ >>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+ >>> y = x**3 - 2 * x**2 + 3 * x - 5
+ >>> poly_reg = PolynomialRegression(degree=3)
+ >>> poly_reg.fit(x, y)
+ >>> poly_reg.params
+ array([-5., 3., -2., 1.])
+ >>> poly_reg = PolynomialRegression(degree=20)
+ >>> poly_reg.fit(x, y)
+ Traceback (most recent call last):
+ ...
+ ArithmeticError: Design matrix is not full rank, can't compute coefficients
+
+ Make sure errors don't grow too large:
+ >>> coefs = np.array([-250, 50, -2, 36, 20, -12, 10, 2, -1, -15, 1])
+ >>> y = PolynomialRegression._design_matrix(x, len(coefs) - 1) @ coefs
+ >>> poly_reg = PolynomialRegression(degree=len(coefs) - 1)
+ >>> poly_reg.fit(x, y)
+ >>> np.allclose(poly_reg.params, coefs, atol=10e-3)
+ True
+ """
+ X = PolynomialRegression._design_matrix(x_train, self.degree) # noqa: N806
+ _, cols = X.shape
+ if np.linalg.matrix_rank(X) < cols:
+ raise ArithmeticError(
+ "Design matrix is not full rank, can't compute coefficients"
+ )
+
+ # np.linalg.pinv() computes the Moore–Penrose pseudoinverse using SVD
+ self.params = np.linalg.pinv(X) @ y_train
+
+ def predict(self, data: np.ndarray) -> np.ndarray:
+ """
+ Computes the predicted response values y for the given input data by
+ constructing the design matrix X and evaluating y = Xβ.
+
+ @param data: the predictor values x for prediction
+ @returns: the predicted response values y = Xβ
+ @raises ArithmeticError: if this function is called before the model
+ parameters are fit
+
+ >>> x = np.array([0, 1, 2, 3, 4])
+ >>> y = x**3 - 2 * x**2 + 3 * x - 5
+ >>> poly_reg = PolynomialRegression(degree=3)
+ >>> poly_reg.fit(x, y)
+ >>> poly_reg.predict(np.array([-1]))
+ array([-11.])
+ >>> poly_reg.predict(np.array([-2]))
+ array([-27.])
+ >>> poly_reg.predict(np.array([6]))
+ array([157.])
+ >>> PolynomialRegression(degree=3).predict(x)
+ Traceback (most recent call last):
+ ...
+ ArithmeticError: Predictor hasn't been fit yet
+ """
+ if self.params is None:
+ raise ArithmeticError("Predictor hasn't been fit yet")
+
+ return PolynomialRegression._design_matrix(data, self.degree) @ self.params
+
+
+def main() -> None:
+ """
+ Fit a polynomial regression model to predict fuel efficiency using seaborn's mpg
+ dataset
+
+ >>> pass # Placeholder, function is only for demo purposes
+ """
+ import seaborn as sns
+
+ mpg_data = sns.load_dataset("mpg")
+
+ poly_reg = PolynomialRegression(degree=2)
+ poly_reg.fit(mpg_data.weight, mpg_data.mpg)
+
+ weight_sorted = np.sort(mpg_data.weight)
+ predictions = poly_reg.predict(weight_sorted)
+
+ plt.scatter(mpg_data.weight, mpg_data.mpg, color="gray", alpha=0.5)
+ plt.plot(weight_sorted, predictions, color="red", linewidth=3)
+ plt.title("Predicting Fuel Efficiency Using Polynomial Regression")
+ plt.xlabel("Weight (lbs)")
+ plt.ylabel("Fuel Efficiency (mpg)")
+ plt.show()
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ main()
diff --git a/machine_learning/random_forest_classifier.py b/machine_learning/random_forest_classifier.py.broken.txt
similarity index 91%
rename from machine_learning/random_forest_classifier.py
rename to machine_learning/random_forest_classifier.py.broken.txt
index 637025409..3267fa209 100644
--- a/machine_learning/random_forest_classifier.py
+++ b/machine_learning/random_forest_classifier.py.broken.txt
@@ -17,10 +17,10 @@ def main():
iris = load_iris()
# Split dataset into train and test data
- X = iris["data"] # features
- Y = iris["target"]
+ x = iris["data"] # features
+ y = iris["target"]
x_train, x_test, y_train, y_test = train_test_split(
- X, Y, test_size=0.3, random_state=1
+ x, y, test_size=0.3, random_state=1
)
# Random Forest Classifier
diff --git a/machine_learning/random_forest_regressor.py b/machine_learning/random_forest_regressor.py.broken.txt
similarity index 91%
rename from machine_learning/random_forest_regressor.py
rename to machine_learning/random_forest_regressor.py.broken.txt
index 0aade626b..1001931a1 100644
--- a/machine_learning/random_forest_regressor.py
+++ b/machine_learning/random_forest_regressor.py.broken.txt
@@ -17,10 +17,10 @@ def main():
print(boston.keys())
# Split dataset into train and test data
- X = boston["data"] # features
- Y = boston["target"]
+ x = boston["data"] # features
+ y = boston["target"]
x_train, x_test, y_train, y_test = train_test_split(
- X, Y, test_size=0.3, random_state=1
+ x, y, test_size=0.3, random_state=1
)
# Random Forest Regressor
diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py
new file mode 100644
index 000000000..32fdf1d2b
--- /dev/null
+++ b/machine_learning/self_organizing_map.py
@@ -0,0 +1,72 @@
+"""
+https://en.wikipedia.org/wiki/Self-organizing_map
+"""
+import math
+
+
+class SelfOrganizingMap:
+ def get_winner(self, weights: list[list[float]], sample: list[int]) -> int:
+ """
+ Compute the winning vector by Euclidean distance
+
+ >>> SelfOrganizingMap().get_winner([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
+ 1
+ """
+ d0 = 0.0
+ d1 = 0.0
+ for i in range(len(sample)):
+ d0 += math.pow((sample[i] - weights[0][i]), 2)
+ d1 += math.pow((sample[i] - weights[1][i]), 2)
+ return 0 if d0 > d1 else 1
+ return 0
+
+ def update(
+ self, weights: list[list[int | float]], sample: list[int], j: int, alpha: float
+ ) -> list[list[int | float]]:
+ """
+ Update the winning vector.
+
+ >>> SelfOrganizingMap().update([[1, 2, 3], [4, 5, 6]], [1, 2, 3], 1, 0.1)
+ [[1, 2, 3], [3.7, 4.7, 6]]
+ """
+ for i in range(len(weights)):
+ weights[j][i] += alpha * (sample[i] - weights[j][i])
+ return weights
+
+
+# Driver code
+def main() -> None:
+ # Training Examples ( m, n )
+ training_samples = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
+
+ # weight initialization ( n, C )
+ weights = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
+
+ # training
+ self_organizing_map = SelfOrganizingMap()
+ epochs = 3
+ alpha = 0.5
+
+ for _ in range(epochs):
+ for j in range(len(training_samples)):
+ # training sample
+ sample = training_samples[j]
+
+ # Compute the winning vector
+ winner = self_organizing_map.get_winner(weights, sample)
+
+ # Update the winning vector
+ weights = self_organizing_map.update(weights, sample, winner, alpha)
+
+ # classify test sample
+ sample = [0, 0, 0, 1]
+ winner = self_organizing_map.get_winner(weights, sample)
+
+ # results
+ print(f"Clusters that the test sample belongs to : {winner}")
+ print(f"Weights that have been trained : {weights}")
+
+
+# running the main() function
+if __name__ == "__main__":
+ main()
diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py
index 98ce05c46..b24f5669e 100644
--- a/machine_learning/sequential_minimum_optimization.py
+++ b/machine_learning/sequential_minimum_optimization.py
@@ -1,632 +1,621 @@
-"""
- Implementation of sequential minimal optimization (SMO) for support vector machines
- (SVM).
-
- Sequential minimal optimization (SMO) is an algorithm for solving the quadratic
- programming (QP) problem that arises during the training of support vector
- machines.
- It was invented by John Platt in 1998.
-
-Input:
- 0: type: numpy.ndarray.
- 1: first column of ndarray must be tags of samples, must be 1 or -1.
- 2: rows of ndarray represent samples.
-
-Usage:
- Command:
- python3 sequential_minimum_optimization.py
- Code:
- from sequential_minimum_optimization import SmoSVM, Kernel
-
- kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5)
- init_alphas = np.zeros(train.shape[0])
- SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4,
- b=0.0, tolerance=0.001)
- SVM.fit()
- predict = SVM.predict(test_samples)
-
-Reference:
- https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf
- https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf
- http://web.cs.iastate.edu/~honavar/smo-svm.pdf
-"""
-
-
-import os
-import sys
-import urllib.request
-
-import numpy as np
-import pandas as pd
-from matplotlib import pyplot as plt
-from sklearn.datasets import make_blobs, make_circles
-from sklearn.preprocessing import StandardScaler
-
-CANCER_DATASET_URL = (
- "http://archive.ics.uci.edu/ml/machine-learning-databases/"
- "breast-cancer-wisconsin/wdbc.data"
-)
-
-
-class SmoSVM:
- def __init__(
- self,
- train,
- kernel_func,
- alpha_list=None,
- cost=0.4,
- b=0.0,
- tolerance=0.001,
- auto_norm=True,
- ):
- self._init = True
- self._auto_norm = auto_norm
- self._c = np.float64(cost)
- self._b = np.float64(b)
- self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001)
-
- self.tags = train[:, 0]
- self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:]
- self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0])
- self.Kernel = kernel_func
-
- self._eps = 0.001
- self._all_samples = list(range(self.length))
- self._K_matrix = self._calculate_k_matrix()
- self._error = np.zeros(self.length)
- self._unbound = []
-
- self.choose_alpha = self._choose_alphas()
-
- # Calculate alphas using SMO algorithm
- def fit(self):
- K = self._k
- state = None
- while True:
-
- # 1: Find alpha1, alpha2
- try:
- i1, i2 = self.choose_alpha.send(state)
- state = None
- except StopIteration:
- print("Optimization done!\nEvery sample satisfy the KKT condition!")
- break
-
- # 2: calculate new alpha2 and new alpha1
- y1, y2 = self.tags[i1], self.tags[i2]
- a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy()
- e1, e2 = self._e(i1), self._e(i2)
- args = (i1, i2, a1, a2, e1, e2, y1, y2)
- a1_new, a2_new = self._get_new_alpha(*args)
- if not a1_new and not a2_new:
- state = False
- continue
- self.alphas[i1], self.alphas[i2] = a1_new, a2_new
-
- # 3: update threshold(b)
- b1_new = np.float64(
- -e1
- - y1 * K(i1, i1) * (a1_new - a1)
- - y2 * K(i2, i1) * (a2_new - a2)
- + self._b
- )
- b2_new = np.float64(
- -e2
- - y2 * K(i2, i2) * (a2_new - a2)
- - y1 * K(i1, i2) * (a1_new - a1)
- + self._b
- )
- if 0.0 < a1_new < self._c:
- b = b1_new
- if 0.0 < a2_new < self._c:
- b = b2_new
- if not (np.float64(0) < a2_new < self._c) and not (
- np.float64(0) < a1_new < self._c
- ):
- b = (b1_new + b2_new) / 2.0
- b_old = self._b
- self._b = b
-
- # 4: update error value,here we only calculate those non-bound samples'
- # error
- self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
- for s in self.unbound:
- if s == i1 or s == i2:
- continue
- self._error[s] += (
- y1 * (a1_new - a1) * K(i1, s)
- + y2 * (a2_new - a2) * K(i2, s)
- + (self._b - b_old)
- )
-
- # if i1 or i2 is non-bound,update there error value to zero
- if self._is_unbound(i1):
- self._error[i1] = 0
- if self._is_unbound(i2):
- self._error[i2] = 0
-
- # Predict test samles
- def predict(self, test_samples, classify=True):
-
- if test_samples.shape[1] > self.samples.shape[1]:
- raise ValueError(
- "Test samples' feature length does not equal to that of train samples"
- )
-
- if self._auto_norm:
- test_samples = self._norm(test_samples)
-
- results = []
- for test_sample in test_samples:
- result = self._predict(test_sample)
- if classify:
- results.append(1 if result > 0 else -1)
- else:
- results.append(result)
- return np.array(results)
-
- # Check if alpha violate KKT condition
- def _check_obey_kkt(self, index):
- alphas = self.alphas
- tol = self._tol
- r = self._e(index) * self.tags[index]
- c = self._c
-
- return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0)
-
- # Get value calculated from kernel function
- def _k(self, i1, i2):
- # for test samples,use Kernel function
- if isinstance(i2, np.ndarray):
- return self.Kernel(self.samples[i1], i2)
- # for train samples,Kernel values have been saved in matrix
- else:
- return self._K_matrix[i1, i2]
-
- # Get sample's error
- def _e(self, index):
- """
- Two cases:
- 1:Sample[index] is non-bound,Fetch error from list: _error
- 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi
-
- """
- # get from error data
- if self._is_unbound(index):
- return self._error[index]
- # get by g(xi) - yi
- else:
- gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b
- yi = self.tags[index]
- return gx - yi
-
- # Calculate Kernel matrix of all possible i1,i2 ,saving time
- def _calculate_k_matrix(self):
- k_matrix = np.zeros([self.length, self.length])
- for i in self._all_samples:
- for j in self._all_samples:
- k_matrix[i, j] = np.float64(
- self.Kernel(self.samples[i, :], self.samples[j, :])
- )
- return k_matrix
-
- # Predict test sample's tag
- def _predict(self, sample):
- k = self._k
- predicted_value = (
- np.sum(
- [
- self.alphas[i1] * self.tags[i1] * k(i1, sample)
- for i1 in self._all_samples
- ]
- )
- + self._b
- )
- return predicted_value
-
- # Choose alpha1 and alpha2
- def _choose_alphas(self):
- locis = yield from self._choose_a1()
- if not locis:
- return
- return locis
-
- def _choose_a1(self):
- """
- Choose first alpha ;steps:
- 1:First loop over all sample
- 2:Second loop over all non-bound samples till all non-bound samples does not
- voilate kkt condition.
- 3:Repeat this two process endlessly,till all samples does not voilate kkt
- condition samples after first loop.
- """
- while True:
- all_not_obey = True
- # all sample
- print("scanning all sample!")
- for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]:
- all_not_obey = False
- yield from self._choose_a2(i1)
-
- # non-bound sample
- print("scanning non-bound sample!")
- while True:
- not_obey = True
- for i1 in [
- i
- for i in self._all_samples
- if self._check_obey_kkt(i) and self._is_unbound(i)
- ]:
- not_obey = False
- yield from self._choose_a2(i1)
- if not_obey:
- print("all non-bound samples fit the KKT condition!")
- break
- if all_not_obey:
- print("all samples fit the KKT condition! Optimization done!")
- break
- return False
-
- def _choose_a2(self, i1):
- """
- Choose the second alpha by using heuristic algorithm ;steps:
- 1: Choose alpha2 which gets the maximum step size (|E1 - E2|).
- 2: Start in a random point,loop over all non-bound samples till alpha1 and
- alpha2 are optimized.
- 3: Start in a random point,loop over all samples till alpha1 and alpha2 are
- optimized.
- """
- self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
-
- if len(self.unbound) > 0:
- tmp_error = self._error.copy().tolist()
- tmp_error_dict = {
- index: value
- for index, value in enumerate(tmp_error)
- if self._is_unbound(index)
- }
- if self._e(i1) >= 0:
- i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index])
- else:
- i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index])
- cmd = yield i1, i2
- if cmd is None:
- return
-
- for i2 in np.roll(self.unbound, np.random.choice(self.length)):
- cmd = yield i1, i2
- if cmd is None:
- return
-
- for i2 in np.roll(self._all_samples, np.random.choice(self.length)):
- cmd = yield i1, i2
- if cmd is None:
- return
-
- # Get the new alpha2 and new alpha1
- def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2):
- K = self._k
- if i1 == i2:
- return None, None
-
- # calculate L and H which bound the new alpha2
- s = y1 * y2
- if s == -1:
- L, H = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1)
- else:
- L, H = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1)
- if L == H:
- return None, None
-
- # calculate eta
- k11 = K(i1, i1)
- k22 = K(i2, i2)
- k12 = K(i1, i2)
- eta = k11 + k22 - 2.0 * k12
-
- # select the new alpha2 which could get the minimal objectives
- if eta > 0.0:
- a2_new_unc = a2 + (y2 * (e1 - e2)) / eta
- # a2_new has a boundary
- if a2_new_unc >= H:
- a2_new = H
- elif a2_new_unc <= L:
- a2_new = L
- else:
- a2_new = a2_new_unc
- else:
- b = self._b
- l1 = a1 + s * (a2 - L)
- h1 = a1 + s * (a2 - H)
-
- # way 1
- f1 = y1 * (e1 + b) - a1 * K(i1, i1) - s * a2 * K(i1, i2)
- f2 = y2 * (e2 + b) - a2 * K(i2, i2) - s * a1 * K(i1, i2)
- ol = (
- l1 * f1
- + L * f2
- + 1 / 2 * l1 ** 2 * K(i1, i1)
- + 1 / 2 * L ** 2 * K(i2, i2)
- + s * L * l1 * K(i1, i2)
- )
- oh = (
- h1 * f1
- + H * f2
- + 1 / 2 * h1 ** 2 * K(i1, i1)
- + 1 / 2 * H ** 2 * K(i2, i2)
- + s * H * h1 * K(i1, i2)
- )
- """
- # way 2
- Use objective function check which alpha2 new could get the minimal
- objectives
- """
- if ol < (oh - self._eps):
- a2_new = L
- elif ol > oh + self._eps:
- a2_new = H
- else:
- a2_new = a2
-
- # a1_new has a boundary too
- a1_new = a1 + s * (a2 - a2_new)
- if a1_new < 0:
- a2_new += s * a1_new
- a1_new = 0
- if a1_new > self._c:
- a2_new += s * (a1_new - self._c)
- a1_new = self._c
-
- return a1_new, a2_new
-
- # Normalise data using min_max way
- def _norm(self, data):
- if self._init:
- self._min = np.min(data, axis=0)
- self._max = np.max(data, axis=0)
- self._init = False
- return (data - self._min) / (self._max - self._min)
- else:
- return (data - self._min) / (self._max - self._min)
-
- def _is_unbound(self, index):
- if 0.0 < self.alphas[index] < self._c:
- return True
- else:
- return False
-
- def _is_support(self, index):
- if self.alphas[index] > 0:
- return True
- else:
- return False
-
- @property
- def unbound(self):
- return self._unbound
-
- @property
- def support(self):
- return [i for i in range(self.length) if self._is_support(i)]
-
- @property
- def length(self):
- return self.samples.shape[0]
-
-
-class Kernel:
- def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0):
- self.degree = np.float64(degree)
- self.coef0 = np.float64(coef0)
- self.gamma = np.float64(gamma)
- self._kernel_name = kernel
- self._kernel = self._get_kernel(kernel_name=kernel)
- self._check()
-
- def _polynomial(self, v1, v2):
- return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree
-
- def _linear(self, v1, v2):
- return np.inner(v1, v2) + self.coef0
-
- def _rbf(self, v1, v2):
- return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2))
-
- def _check(self):
- if self._kernel == self._rbf:
- if self.gamma < 0:
- raise ValueError("gamma value must greater than 0")
-
- def _get_kernel(self, kernel_name):
- maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf}
- return maps[kernel_name]
-
- def __call__(self, v1, v2):
- return self._kernel(v1, v2)
-
- def __repr__(self):
- return self._kernel_name
-
-
-def count_time(func):
- def call_func(*args, **kwargs):
- import time
-
- start_time = time.time()
- func(*args, **kwargs)
- end_time = time.time()
- print(f"smo algorithm cost {end_time - start_time} seconds")
-
- return call_func
-
-
-@count_time
-def test_cancel_data():
- print("Hello!\nStart test svm by smo algorithm!")
- # 0: download dataset and load into pandas' dataframe
- if not os.path.exists(r"cancel_data.csv"):
- request = urllib.request.Request(
- CANCER_DATASET_URL,
- headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"},
- )
- response = urllib.request.urlopen(request)
- content = response.read().decode("utf-8")
- with open(r"cancel_data.csv", "w") as f:
- f.write(content)
-
- data = pd.read_csv(r"cancel_data.csv", header=None)
-
- # 1: pre-processing data
- del data[data.columns.tolist()[0]]
- data = data.dropna(axis=0)
- data = data.replace({"M": np.float64(1), "B": np.float64(-1)})
- samples = np.array(data)[:, :]
-
- # 2: dividing data into train_data data and test_data data
- train_data, test_data = samples[:328, :], samples[328:, :]
- test_tags, test_samples = test_data[:, 0], test_data[:, 1:]
-
- # 3: choose kernel function,and set initial alphas to zero(optional)
- mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
- al = np.zeros(train_data.shape[0])
-
- # 4: calculating best alphas using SMO algorithm and predict test_data samples
- mysvm = SmoSVM(
- train=train_data,
- kernel_func=mykernel,
- alpha_list=al,
- cost=0.4,
- b=0.0,
- tolerance=0.001,
- )
- mysvm.fit()
- predict = mysvm.predict(test_samples)
-
- # 5: check accuracy
- score = 0
- test_num = test_tags.shape[0]
- for i in range(test_tags.shape[0]):
- if test_tags[i] == predict[i]:
- score += 1
- print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}")
- print(f"Rough Accuracy: {score / test_tags.shape[0]}")
-
-
-def test_demonstration():
- # change stdout
- print("\nStart plot,please wait!!!")
- sys.stdout = open(os.devnull, "w")
-
- ax1 = plt.subplot2grid((2, 2), (0, 0))
- ax2 = plt.subplot2grid((2, 2), (0, 1))
- ax3 = plt.subplot2grid((2, 2), (1, 0))
- ax4 = plt.subplot2grid((2, 2), (1, 1))
- ax1.set_title("linear svm,cost:0.1")
- test_linear_kernel(ax1, cost=0.1)
- ax2.set_title("linear svm,cost:500")
- test_linear_kernel(ax2, cost=500)
- ax3.set_title("rbf kernel svm,cost:0.1")
- test_rbf_kernel(ax3, cost=0.1)
- ax4.set_title("rbf kernel svm,cost:500")
- test_rbf_kernel(ax4, cost=500)
-
- sys.stdout = sys.__stdout__
- print("Plot done!!!")
-
-
-def test_linear_kernel(ax, cost):
- train_x, train_y = make_blobs(
- n_samples=500, centers=2, n_features=2, random_state=1
- )
- train_y[train_y == 0] = -1
- scaler = StandardScaler()
- train_x_scaled = scaler.fit_transform(train_x, train_y)
- train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
- mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5)
- mysvm = SmoSVM(
- train=train_data,
- kernel_func=mykernel,
- cost=cost,
- tolerance=0.001,
- auto_norm=False,
- )
- mysvm.fit()
- plot_partition_boundary(mysvm, train_data, ax=ax)
-
-
-def test_rbf_kernel(ax, cost):
- train_x, train_y = make_circles(
- n_samples=500, noise=0.1, factor=0.1, random_state=1
- )
- train_y[train_y == 0] = -1
- scaler = StandardScaler()
- train_x_scaled = scaler.fit_transform(train_x, train_y)
- train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
- mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
- mysvm = SmoSVM(
- train=train_data,
- kernel_func=mykernel,
- cost=cost,
- tolerance=0.001,
- auto_norm=False,
- )
- mysvm.fit()
- plot_partition_boundary(mysvm, train_data, ax=ax)
-
-
-def plot_partition_boundary(
- model, train_data, ax, resolution=100, colors=("b", "k", "r")
-):
- """
- We can not get the optimum w of our kernel svm model which is different from linear
- svm. For this reason, we generate randomly distributed points with high desity and
- prediced values of these points are calculated by using our tained model. Then we
- could use this prediced values to draw contour map.
- And this contour map can represent svm's partition boundary.
- """
- train_data_x = train_data[:, 1]
- train_data_y = train_data[:, 2]
- train_data_tags = train_data[:, 0]
- xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution)
- yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution)
- test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape(
- resolution * resolution, 2
- )
-
- test_tags = model.predict(test_samples, classify=False)
- grid = test_tags.reshape((len(xrange), len(yrange)))
-
- # Plot contour map which represents the partition boundary
- ax.contour(
- xrange,
- yrange,
- np.mat(grid).T,
- levels=(-1, 0, 1),
- linestyles=("--", "-", "--"),
- linewidths=(1, 1, 1),
- colors=colors,
- )
- # Plot all train samples
- ax.scatter(
- train_data_x,
- train_data_y,
- c=train_data_tags,
- cmap=plt.cm.Dark2,
- lw=0,
- alpha=0.5,
- )
-
- # Plot support vectors
- support = model.support
- ax.scatter(
- train_data_x[support],
- train_data_y[support],
- c=train_data_tags[support],
- cmap=plt.cm.Dark2,
- )
-
-
-if __name__ == "__main__":
- test_cancel_data()
- test_demonstration()
- plt.show()
+"""
+ Implementation of sequential minimal optimization (SMO) for support vector machines
+ (SVM).
+
+ Sequential minimal optimization (SMO) is an algorithm for solving the quadratic
+ programming (QP) problem that arises during the training of support vector
+ machines.
+ It was invented by John Platt in 1998.
+
+Input:
+ 0: type: numpy.ndarray.
+ 1: first column of ndarray must be tags of samples, must be 1 or -1.
+ 2: rows of ndarray represent samples.
+
+Usage:
+ Command:
+ python3 sequential_minimum_optimization.py
+ Code:
+ from sequential_minimum_optimization import SmoSVM, Kernel
+
+ kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5)
+ init_alphas = np.zeros(train.shape[0])
+ SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4,
+ b=0.0, tolerance=0.001)
+ SVM.fit()
+ predict = SVM.predict(test_samples)
+
+Reference:
+ https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf
+ https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf
+"""
+
+
+import os
+import sys
+import urllib.request
+
+import numpy as np
+import pandas as pd
+from matplotlib import pyplot as plt
+from sklearn.datasets import make_blobs, make_circles
+from sklearn.preprocessing import StandardScaler
+
+CANCER_DATASET_URL = (
+ "https://archive.ics.uci.edu/ml/machine-learning-databases/"
+ "breast-cancer-wisconsin/wdbc.data"
+)
+
+
+class SmoSVM:
+ def __init__(
+ self,
+ train,
+ kernel_func,
+ alpha_list=None,
+ cost=0.4,
+ b=0.0,
+ tolerance=0.001,
+ auto_norm=True,
+ ):
+ self._init = True
+ self._auto_norm = auto_norm
+ self._c = np.float64(cost)
+ self._b = np.float64(b)
+ self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001)
+
+ self.tags = train[:, 0]
+ self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:]
+ self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0])
+ self.Kernel = kernel_func
+
+ self._eps = 0.001
+ self._all_samples = list(range(self.length))
+ self._K_matrix = self._calculate_k_matrix()
+ self._error = np.zeros(self.length)
+ self._unbound = []
+
+ self.choose_alpha = self._choose_alphas()
+
+ # Calculate alphas using SMO algorithm
+ def fit(self):
+ k = self._k
+ state = None
+ while True:
+ # 1: Find alpha1, alpha2
+ try:
+ i1, i2 = self.choose_alpha.send(state)
+ state = None
+ except StopIteration:
+ print("Optimization done!\nEvery sample satisfy the KKT condition!")
+ break
+
+ # 2: calculate new alpha2 and new alpha1
+ y1, y2 = self.tags[i1], self.tags[i2]
+ a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy()
+ e1, e2 = self._e(i1), self._e(i2)
+ args = (i1, i2, a1, a2, e1, e2, y1, y2)
+ a1_new, a2_new = self._get_new_alpha(*args)
+ if not a1_new and not a2_new:
+ state = False
+ continue
+ self.alphas[i1], self.alphas[i2] = a1_new, a2_new
+
+ # 3: update threshold(b)
+ b1_new = np.float64(
+ -e1
+ - y1 * k(i1, i1) * (a1_new - a1)
+ - y2 * k(i2, i1) * (a2_new - a2)
+ + self._b
+ )
+ b2_new = np.float64(
+ -e2
+ - y2 * k(i2, i2) * (a2_new - a2)
+ - y1 * k(i1, i2) * (a1_new - a1)
+ + self._b
+ )
+ if 0.0 < a1_new < self._c:
+ b = b1_new
+ if 0.0 < a2_new < self._c:
+ b = b2_new
+ if not (np.float64(0) < a2_new < self._c) and not (
+ np.float64(0) < a1_new < self._c
+ ):
+ b = (b1_new + b2_new) / 2.0
+ b_old = self._b
+ self._b = b
+
+ # 4: update error value,here we only calculate those non-bound samples'
+ # error
+ self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
+ for s in self.unbound:
+ if s in (i1, i2):
+ continue
+ self._error[s] += (
+ y1 * (a1_new - a1) * k(i1, s)
+ + y2 * (a2_new - a2) * k(i2, s)
+ + (self._b - b_old)
+ )
+
+ # if i1 or i2 is non-bound,update there error value to zero
+ if self._is_unbound(i1):
+ self._error[i1] = 0
+ if self._is_unbound(i2):
+ self._error[i2] = 0
+
+ # Predict test samples
+ def predict(self, test_samples, classify=True):
+ if test_samples.shape[1] > self.samples.shape[1]:
+ raise ValueError(
+ "Test samples' feature length does not equal to that of train samples"
+ )
+
+ if self._auto_norm:
+ test_samples = self._norm(test_samples)
+
+ results = []
+ for test_sample in test_samples:
+ result = self._predict(test_sample)
+ if classify:
+ results.append(1 if result > 0 else -1)
+ else:
+ results.append(result)
+ return np.array(results)
+
+ # Check if alpha violate KKT condition
+ def _check_obey_kkt(self, index):
+ alphas = self.alphas
+ tol = self._tol
+ r = self._e(index) * self.tags[index]
+ c = self._c
+
+ return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0)
+
+ # Get value calculated from kernel function
+ def _k(self, i1, i2):
+ # for test samples,use Kernel function
+ if isinstance(i2, np.ndarray):
+ return self.Kernel(self.samples[i1], i2)
+ # for train samples,Kernel values have been saved in matrix
+ else:
+ return self._K_matrix[i1, i2]
+
+ # Get sample's error
+ def _e(self, index):
+ """
+ Two cases:
+ 1:Sample[index] is non-bound,Fetch error from list: _error
+ 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi
+
+ """
+ # get from error data
+ if self._is_unbound(index):
+ return self._error[index]
+ # get by g(xi) - yi
+ else:
+ gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b
+ yi = self.tags[index]
+ return gx - yi
+
+ # Calculate Kernel matrix of all possible i1,i2 ,saving time
+ def _calculate_k_matrix(self):
+ k_matrix = np.zeros([self.length, self.length])
+ for i in self._all_samples:
+ for j in self._all_samples:
+ k_matrix[i, j] = np.float64(
+ self.Kernel(self.samples[i, :], self.samples[j, :])
+ )
+ return k_matrix
+
+ # Predict test sample's tag
+ def _predict(self, sample):
+ k = self._k
+ predicted_value = (
+ np.sum(
+ [
+ self.alphas[i1] * self.tags[i1] * k(i1, sample)
+ for i1 in self._all_samples
+ ]
+ )
+ + self._b
+ )
+ return predicted_value
+
+ # Choose alpha1 and alpha2
+ def _choose_alphas(self):
+ locis = yield from self._choose_a1()
+ if not locis:
+ return None
+ return locis
+
+ def _choose_a1(self):
+ """
+ Choose first alpha ;steps:
+ 1:First loop over all sample
+ 2:Second loop over all non-bound samples till all non-bound samples does not
+ voilate kkt condition.
+ 3:Repeat this two process endlessly,till all samples does not voilate kkt
+ condition samples after first loop.
+ """
+ while True:
+ all_not_obey = True
+ # all sample
+ print("scanning all sample!")
+ for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]:
+ all_not_obey = False
+ yield from self._choose_a2(i1)
+
+ # non-bound sample
+ print("scanning non-bound sample!")
+ while True:
+ not_obey = True
+ for i1 in [
+ i
+ for i in self._all_samples
+ if self._check_obey_kkt(i) and self._is_unbound(i)
+ ]:
+ not_obey = False
+ yield from self._choose_a2(i1)
+ if not_obey:
+ print("all non-bound samples fit the KKT condition!")
+ break
+ if all_not_obey:
+ print("all samples fit the KKT condition! Optimization done!")
+ break
+ return False
+
+ def _choose_a2(self, i1):
+ """
+ Choose the second alpha by using heuristic algorithm ;steps:
+ 1: Choose alpha2 which gets the maximum step size (|E1 - E2|).
+ 2: Start in a random point,loop over all non-bound samples till alpha1 and
+ alpha2 are optimized.
+ 3: Start in a random point,loop over all samples till alpha1 and alpha2 are
+ optimized.
+ """
+ self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
+
+ if len(self.unbound) > 0:
+ tmp_error = self._error.copy().tolist()
+ tmp_error_dict = {
+ index: value
+ for index, value in enumerate(tmp_error)
+ if self._is_unbound(index)
+ }
+ if self._e(i1) >= 0:
+ i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index])
+ else:
+ i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index])
+ cmd = yield i1, i2
+ if cmd is None:
+ return
+
+ for i2 in np.roll(self.unbound, np.random.choice(self.length)):
+ cmd = yield i1, i2
+ if cmd is None:
+ return
+
+ for i2 in np.roll(self._all_samples, np.random.choice(self.length)):
+ cmd = yield i1, i2
+ if cmd is None:
+ return
+
+ # Get the new alpha2 and new alpha1
+ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2):
+ k = self._k
+ if i1 == i2:
+ return None, None
+
+ # calculate L and H which bound the new alpha2
+ s = y1 * y2
+ if s == -1:
+ l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1)
+ else:
+ l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1)
+ if l == h:
+ return None, None
+
+ # calculate eta
+ k11 = k(i1, i1)
+ k22 = k(i2, i2)
+ k12 = k(i1, i2)
+
+ # select the new alpha2 which could get the minimal objectives
+ if (eta := k11 + k22 - 2.0 * k12) > 0.0:
+ a2_new_unc = a2 + (y2 * (e1 - e2)) / eta
+ # a2_new has a boundary
+ if a2_new_unc >= h:
+ a2_new = h
+ elif a2_new_unc <= l:
+ a2_new = l
+ else:
+ a2_new = a2_new_unc
+ else:
+ b = self._b
+ l1 = a1 + s * (a2 - l)
+ h1 = a1 + s * (a2 - h)
+
+ # way 1
+ f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2)
+ f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2)
+ ol = (
+ l1 * f1
+ + l * f2
+ + 1 / 2 * l1**2 * k(i1, i1)
+ + 1 / 2 * l**2 * k(i2, i2)
+ + s * l * l1 * k(i1, i2)
+ )
+ oh = (
+ h1 * f1
+ + h * f2
+ + 1 / 2 * h1**2 * k(i1, i1)
+ + 1 / 2 * h**2 * k(i2, i2)
+ + s * h * h1 * k(i1, i2)
+ )
+ """
+ # way 2
+ Use objective function check which alpha2 new could get the minimal
+ objectives
+ """
+ if ol < (oh - self._eps):
+ a2_new = l
+ elif ol > oh + self._eps:
+ a2_new = h
+ else:
+ a2_new = a2
+
+ # a1_new has a boundary too
+ a1_new = a1 + s * (a2 - a2_new)
+ if a1_new < 0:
+ a2_new += s * a1_new
+ a1_new = 0
+ if a1_new > self._c:
+ a2_new += s * (a1_new - self._c)
+ a1_new = self._c
+
+ return a1_new, a2_new
+
+ # Normalise data using min_max way
+ def _norm(self, data):
+ if self._init:
+ self._min = np.min(data, axis=0)
+ self._max = np.max(data, axis=0)
+ self._init = False
+ return (data - self._min) / (self._max - self._min)
+ else:
+ return (data - self._min) / (self._max - self._min)
+
+ def _is_unbound(self, index):
+ return bool(0.0 < self.alphas[index] < self._c)
+
+ def _is_support(self, index):
+ return bool(self.alphas[index] > 0)
+
+ @property
+ def unbound(self):
+ return self._unbound
+
+ @property
+ def support(self):
+ return [i for i in range(self.length) if self._is_support(i)]
+
+ @property
+ def length(self):
+ return self.samples.shape[0]
+
+
+class Kernel:
+ def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0):
+ self.degree = np.float64(degree)
+ self.coef0 = np.float64(coef0)
+ self.gamma = np.float64(gamma)
+ self._kernel_name = kernel
+ self._kernel = self._get_kernel(kernel_name=kernel)
+ self._check()
+
+ def _polynomial(self, v1, v2):
+ return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree
+
+ def _linear(self, v1, v2):
+ return np.inner(v1, v2) + self.coef0
+
+ def _rbf(self, v1, v2):
+ return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2))
+
+ def _check(self):
+ if self._kernel == self._rbf and self.gamma < 0:
+ raise ValueError("gamma value must greater than 0")
+
+ def _get_kernel(self, kernel_name):
+ maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf}
+ return maps[kernel_name]
+
+ def __call__(self, v1, v2):
+ return self._kernel(v1, v2)
+
+ def __repr__(self):
+ return self._kernel_name
+
+
+def count_time(func):
+ def call_func(*args, **kwargs):
+ import time
+
+ start_time = time.time()
+ func(*args, **kwargs)
+ end_time = time.time()
+ print(f"smo algorithm cost {end_time - start_time} seconds")
+
+ return call_func
+
+
+@count_time
+def test_cancel_data():
+ print("Hello!\nStart test svm by smo algorithm!")
+ # 0: download dataset and load into pandas' dataframe
+ if not os.path.exists(r"cancel_data.csv"):
+ request = urllib.request.Request(
+ CANCER_DATASET_URL,
+ headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"},
+ )
+ response = urllib.request.urlopen(request) # noqa: S310
+ content = response.read().decode("utf-8")
+ with open(r"cancel_data.csv", "w") as f:
+ f.write(content)
+
+ data = pd.read_csv(r"cancel_data.csv", header=None)
+
+ # 1: pre-processing data
+ del data[data.columns.tolist()[0]]
+ data = data.dropna(axis=0)
+ data = data.replace({"M": np.float64(1), "B": np.float64(-1)})
+ samples = np.array(data)[:, :]
+
+ # 2: dividing data into train_data data and test_data data
+ train_data, test_data = samples[:328, :], samples[328:, :]
+ test_tags, test_samples = test_data[:, 0], test_data[:, 1:]
+
+ # 3: choose kernel function,and set initial alphas to zero(optional)
+ mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
+ al = np.zeros(train_data.shape[0])
+
+ # 4: calculating best alphas using SMO algorithm and predict test_data samples
+ mysvm = SmoSVM(
+ train=train_data,
+ kernel_func=mykernel,
+ alpha_list=al,
+ cost=0.4,
+ b=0.0,
+ tolerance=0.001,
+ )
+ mysvm.fit()
+ predict = mysvm.predict(test_samples)
+
+ # 5: check accuracy
+ score = 0
+ test_num = test_tags.shape[0]
+ for i in range(test_tags.shape[0]):
+ if test_tags[i] == predict[i]:
+ score += 1
+ print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}")
+ print(f"Rough Accuracy: {score / test_tags.shape[0]}")
+
+
+def test_demonstration():
+ # change stdout
+ print("\nStart plot,please wait!!!")
+ sys.stdout = open(os.devnull, "w")
+
+ ax1 = plt.subplot2grid((2, 2), (0, 0))
+ ax2 = plt.subplot2grid((2, 2), (0, 1))
+ ax3 = plt.subplot2grid((2, 2), (1, 0))
+ ax4 = plt.subplot2grid((2, 2), (1, 1))
+ ax1.set_title("linear svm,cost:0.1")
+ test_linear_kernel(ax1, cost=0.1)
+ ax2.set_title("linear svm,cost:500")
+ test_linear_kernel(ax2, cost=500)
+ ax3.set_title("rbf kernel svm,cost:0.1")
+ test_rbf_kernel(ax3, cost=0.1)
+ ax4.set_title("rbf kernel svm,cost:500")
+ test_rbf_kernel(ax4, cost=500)
+
+ sys.stdout = sys.__stdout__
+ print("Plot done!!!")
+
+
+def test_linear_kernel(ax, cost):
+ train_x, train_y = make_blobs(
+ n_samples=500, centers=2, n_features=2, random_state=1
+ )
+ train_y[train_y == 0] = -1
+ scaler = StandardScaler()
+ train_x_scaled = scaler.fit_transform(train_x, train_y)
+ train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
+ mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5)
+ mysvm = SmoSVM(
+ train=train_data,
+ kernel_func=mykernel,
+ cost=cost,
+ tolerance=0.001,
+ auto_norm=False,
+ )
+ mysvm.fit()
+ plot_partition_boundary(mysvm, train_data, ax=ax)
+
+
+def test_rbf_kernel(ax, cost):
+ train_x, train_y = make_circles(
+ n_samples=500, noise=0.1, factor=0.1, random_state=1
+ )
+ train_y[train_y == 0] = -1
+ scaler = StandardScaler()
+ train_x_scaled = scaler.fit_transform(train_x, train_y)
+ train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
+ mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
+ mysvm = SmoSVM(
+ train=train_data,
+ kernel_func=mykernel,
+ cost=cost,
+ tolerance=0.001,
+ auto_norm=False,
+ )
+ mysvm.fit()
+ plot_partition_boundary(mysvm, train_data, ax=ax)
+
+
+def plot_partition_boundary(
+ model, train_data, ax, resolution=100, colors=("b", "k", "r")
+):
+ """
+ We can not get the optimum w of our kernel svm model which is different from linear
+ svm. For this reason, we generate randomly distributed points with high desity and
+ prediced values of these points are calculated by using our trained model. Then we
+ could use this prediced values to draw contour map.
+ And this contour map can represent svm's partition boundary.
+ """
+ train_data_x = train_data[:, 1]
+ train_data_y = train_data[:, 2]
+ train_data_tags = train_data[:, 0]
+ xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution)
+ yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution)
+ test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape(
+ resolution * resolution, 2
+ )
+
+ test_tags = model.predict(test_samples, classify=False)
+ grid = test_tags.reshape((len(xrange), len(yrange)))
+
+ # Plot contour map which represents the partition boundary
+ ax.contour(
+ xrange,
+ yrange,
+ np.mat(grid).T,
+ levels=(-1, 0, 1),
+ linestyles=("--", "-", "--"),
+ linewidths=(1, 1, 1),
+ colors=colors,
+ )
+ # Plot all train samples
+ ax.scatter(
+ train_data_x,
+ train_data_y,
+ c=train_data_tags,
+ cmap=plt.cm.Dark2,
+ lw=0,
+ alpha=0.5,
+ )
+
+ # Plot support vectors
+ support = model.support
+ ax.scatter(
+ train_data_x[support],
+ train_data_y[support],
+ c=train_data_tags[support],
+ cmap=plt.cm.Dark2,
+ )
+
+
+if __name__ == "__main__":
+ test_cancel_data()
+ test_demonstration()
+ plt.show()
diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py
index af845c910..7a23ec463 100644
--- a/machine_learning/similarity_search.py
+++ b/machine_learning/similarity_search.py
@@ -7,10 +7,12 @@ returns a list containing two data for each vector:
1. the nearest vector
2. distance between the vector and the nearest vector (float)
"""
+from __future__ import annotations
+
import math
-from typing import List, Union
import numpy as np
+from numpy.linalg import norm
def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float:
@@ -33,7 +35,7 @@ def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float:
def similarity_search(
dataset: np.ndarray, value_array: np.ndarray
-) -> List[List[Union[List[float], float]]]:
+) -> list[list[list[float] | float]]:
"""
:param dataset: Set containing the vectors. Should be ndarray.
:param value_array: vector/vectors we want to know the nearest vector from dataset.
@@ -69,7 +71,7 @@ def similarity_search(
>>> value_array = np.array([1])
>>> similarity_search(dataset, value_array)
Traceback (most recent call last):
- ...
+ ...
ValueError: Wrong input data's dimensions... dataset : 2, value_array : 1
2. If data's shapes are different.
@@ -79,7 +81,7 @@ def similarity_search(
>>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
>>> similarity_search(dataset, value_array)
Traceback (most recent call last):
- ...
+ ...
ValueError: Wrong input data's shape... dataset : 2, value_array : 3
3. If data types are different.
@@ -89,32 +91,35 @@ def similarity_search(
>>> value_array = np.array([[0, 0], [0, 1]], dtype=np.int32)
>>> similarity_search(dataset, value_array) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
- ...
+ ...
TypeError: Input data have different datatype...
dataset : float32, value_array : int32
"""
if dataset.ndim != value_array.ndim:
- raise ValueError(
- f"Wrong input data's dimensions... dataset : {dataset.ndim}, "
- f"value_array : {value_array.ndim}"
+ msg = (
+ "Wrong input data's dimensions... "
+ f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
+ raise ValueError(msg)
try:
if dataset.shape[1] != value_array.shape[1]:
- raise ValueError(
- f"Wrong input data's shape... dataset : {dataset.shape[1]}, "
- f"value_array : {value_array.shape[1]}"
+ msg = (
+ "Wrong input data's shape... "
+ f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
+ raise ValueError(msg)
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape")
if dataset.dtype != value_array.dtype:
- raise TypeError(
- f"Input data have different datatype... dataset : {dataset.dtype}, "
- f"value_array : {value_array.dtype}"
+ msg = (
+ "Input data have different datatype... "
+ f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
+ raise TypeError(msg)
answer = []
@@ -134,6 +139,22 @@ def similarity_search(
return answer
+def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float:
+ """
+ Calculates cosine similarity between two data.
+ :param input_a: ndarray of first vector.
+ :param input_b: ndarray of second vector.
+ :return: Cosine similarity of input_a and input_b. By using math.sqrt(),
+ result will be float.
+
+ >>> cosine_similarity(np.array([1]), np.array([1]))
+ 1.0
+ >>> cosine_similarity(np.array([1, 2]), np.array([6, 32]))
+ 0.9615239476408232
+ """
+ return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b))
+
+
if __name__ == "__main__":
import doctest
diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py
index c5e5085d8..24046115e 100644
--- a/machine_learning/support_vector_machines.py
+++ b/machine_learning/support_vector_machines.py
@@ -1,55 +1,203 @@
-from sklearn import svm
-from sklearn.datasets import load_iris
-from sklearn.model_selection import train_test_split
+import numpy as np
+from numpy import ndarray
+from scipy.optimize import Bounds, LinearConstraint, minimize
-# different functions implementing different types of SVM's
-def NuSVC(train_x, train_y):
- svc_NuSVC = svm.NuSVC()
- svc_NuSVC.fit(train_x, train_y)
- return svc_NuSVC
-
-
-def Linearsvc(train_x, train_y):
- svc_linear = svm.LinearSVC(tol=10e-2)
- svc_linear.fit(train_x, train_y)
- return svc_linear
-
-
-def SVC(train_x, train_y):
- # svm.SVC(C=1.0, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, shrinking=True,
- # probability=False,tol=0.001, cache_size=200, class_weight=None, verbose=False,
- # max_iter=1000, random_state=None)
- # various parameters like "kernel","gamma","C" can effectively tuned for a given
- # machine learning model.
- SVC = svm.SVC(gamma="auto")
- SVC.fit(train_x, train_y)
- return SVC
-
-
-def test(X_new):
+def norm_squared(vector: ndarray) -> float:
"""
- 3 test cases to be passed
- an array containing the sepal length (cm), sepal width (cm), petal length (cm),
- petal width (cm) based on which the target name will be predicted
- >>> test([1,2,1,4])
- 'virginica'
- >>> test([5, 2, 4, 1])
- 'versicolor'
- >>> test([6,3,4,1])
- 'versicolor'
+ Return the squared second norm of vector
+ norm_squared(v) = sum(x * x for x in v)
+
+ Args:
+ vector (ndarray): input vector
+
+ Returns:
+ float: squared second norm of vector
+
+ >>> norm_squared([1, 2])
+ 5
+ >>> norm_squared(np.asarray([1, 2]))
+ 5
+ >>> norm_squared([0, 0])
+ 0
"""
- iris = load_iris()
- # splitting the dataset to test and train
- train_x, test_x, train_y, test_y = train_test_split(
- iris["data"], iris["target"], random_state=4
- )
- # any of the 3 types of SVM can be used
- # current_model=SVC(train_x, train_y)
- # current_model=NuSVC(train_x, train_y)
- current_model = Linearsvc(train_x, train_y)
- prediction = current_model.predict([X_new])
- return iris["target_names"][prediction][0]
+ return np.dot(vector, vector)
+
+
+class SVC:
+ """
+ Support Vector Classifier
+
+ Args:
+ kernel (str): kernel to use. Default: linear
+ Possible choices:
+ - linear
+ regularization: constraint for soft margin (data not linearly separable)
+ Default: unbound
+
+ >>> SVC(kernel="asdf")
+ Traceback (most recent call last):
+ ...
+ ValueError: Unknown kernel: asdf
+
+ >>> SVC(kernel="rbf")
+ Traceback (most recent call last):
+ ...
+ ValueError: rbf kernel requires gamma
+
+ >>> SVC(kernel="rbf", gamma=-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: gamma must be > 0
+ """
+
+ def __init__(
+ self,
+ *,
+ regularization: float = np.inf,
+ kernel: str = "linear",
+ gamma: float = 0.0,
+ ) -> None:
+ self.regularization = regularization
+ self.gamma = gamma
+ if kernel == "linear":
+ self.kernel = self.__linear
+ elif kernel == "rbf":
+ if self.gamma == 0:
+ raise ValueError("rbf kernel requires gamma")
+ if not isinstance(self.gamma, (float, int)):
+ raise ValueError("gamma must be float or int")
+ if not self.gamma > 0:
+ raise ValueError("gamma must be > 0")
+ self.kernel = self.__rbf
+ # in the future, there could be a default value like in sklearn
+ # sklear: def_gamma = 1/(n_features * X.var()) (wiki)
+ # previously it was 1/(n_features)
+ else:
+ msg = f"Unknown kernel: {kernel}"
+ raise ValueError(msg)
+
+ # kernels
+ def __linear(self, vector1: ndarray, vector2: ndarray) -> float:
+ """Linear kernel (as if no kernel used at all)"""
+ return np.dot(vector1, vector2)
+
+ def __rbf(self, vector1: ndarray, vector2: ndarray) -> float:
+ """
+ RBF: Radial Basis Function Kernel
+
+ Note: for more information see:
+ https://en.wikipedia.org/wiki/Radial_basis_function_kernel
+
+ Args:
+ vector1 (ndarray): first vector
+ vector2 (ndarray): second vector)
+
+ Returns:
+ float: exp(-(gamma * norm_squared(vector1 - vector2)))
+ """
+ return np.exp(-(self.gamma * norm_squared(vector1 - vector2)))
+
+ def fit(self, observations: list[ndarray], classes: ndarray) -> None:
+ """
+ Fits the SVC with a set of observations.
+
+ Args:
+ observations (list[ndarray]): list of observations
+ classes (ndarray): classification of each observation (in {1, -1})
+ """
+
+ self.observations = observations
+ self.classes = classes
+
+ # using Wolfe's Dual to calculate w.
+ # Primal problem: minimize 1/2*norm_squared(w)
+ # constraint: yn(w . xn + b) >= 1
+ #
+ # With l a vector
+ # Dual problem: maximize sum_n(ln) -
+ # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
+ # constraint: self.C >= ln >= 0
+ # and sum_n(ln*yn) = 0
+ # Then we get w using w = sum_n(ln*yn*xn)
+ # At the end we can get b ~= mean(yn - w . xn)
+ #
+ # Since we use kernels, we only need l_star to calculate b
+ # and to classify observations
+
+ (n,) = np.shape(classes)
+
+ def to_minimize(candidate: ndarray) -> float:
+ """
+ Opposite of the function to maximize
+
+ Args:
+ candidate (ndarray): candidate array to test
+
+ Return:
+ float: Wolfe's Dual result to minimize
+ """
+ s = 0
+ (n,) = np.shape(candidate)
+ for i in range(n):
+ for j in range(n):
+ s += (
+ candidate[i]
+ * candidate[j]
+ * classes[i]
+ * classes[j]
+ * self.kernel(observations[i], observations[j])
+ )
+ return 1 / 2 * s - sum(candidate)
+
+ ly_contraint = LinearConstraint(classes, 0, 0)
+ l_bounds = Bounds(0, self.regularization)
+
+ l_star = minimize(
+ to_minimize, np.ones(n), bounds=l_bounds, constraints=[ly_contraint]
+ ).x
+ self.optimum = l_star
+
+ # calculating mean offset of separation plane to points
+ s = 0
+ for i in range(n):
+ for j in range(n):
+ s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
+ observations[i], observations[j]
+ )
+ self.offset = s / n
+
+ def predict(self, observation: ndarray) -> int:
+ """
+ Get the expected class of an observation
+
+ Args:
+ observation (Vector): observation
+
+ Returns:
+ int {1, -1}: expected class
+
+ >>> xs = [
+ ... np.asarray([0, 1]), np.asarray([0, 2]),
+ ... np.asarray([1, 1]), np.asarray([1, 2])
+ ... ]
+ >>> y = np.asarray([1, 1, -1, -1])
+ >>> s = SVC()
+ >>> s.fit(xs, y)
+ >>> s.predict(np.asarray([0, 1]))
+ 1
+ >>> s.predict(np.asarray([1, 1]))
+ -1
+ >>> s.predict(np.asarray([2, 2]))
+ -1
+ """
+ s = sum(
+ self.optimum[n]
+ * self.classes[n]
+ * self.kernel(self.observations[n], observation)
+ for n in range(len(self.classes))
+ )
+ return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
diff --git a/machine_learning/word_frequency_functions.py b/machine_learning/word_frequency_functions.py
index 3e8faf39c..8fd2741f6 100644
--- a/machine_learning/word_frequency_functions.py
+++ b/machine_learning/word_frequency_functions.py
@@ -83,7 +83,7 @@ the third document in the corpus.")
return (len([doc for doc in docs if term in doc]), len(docs))
-def inverse_document_frequency(df: int, N: int, smoothing=False) -> float:
+def inverse_document_frequency(df: int, n: int, smoothing=False) -> float:
"""
Return an integer denoting the importance
of a word. This measure of importance is
@@ -109,15 +109,15 @@ def inverse_document_frequency(df: int, N: int, smoothing=False) -> float:
1.477
"""
if smoothing:
- if N == 0:
+ if n == 0:
raise ValueError("log10(0) is undefined.")
- return round(1 + log10(N / (1 + df)), 3)
+ return round(1 + log10(n / (1 + df)), 3)
if df == 0:
raise ZeroDivisionError("df must be > 0")
- elif N == 0:
+ elif n == 0:
raise ValueError("log10(0) is undefined.")
- return round(log10(N / df), 3)
+ return round(log10(n / df), 3)
def tf_idf(tf: int, idf: int) -> float:
diff --git a/machine_learning/xgboost_classifier.py b/machine_learning/xgboost_classifier.py
new file mode 100644
index 000000000..1da933cf6
--- /dev/null
+++ b/machine_learning/xgboost_classifier.py
@@ -0,0 +1,81 @@
+# XGBoost Classifier Example
+import numpy as np
+from matplotlib import pyplot as plt
+from sklearn.datasets import load_iris
+from sklearn.metrics import ConfusionMatrixDisplay
+from sklearn.model_selection import train_test_split
+from xgboost import XGBClassifier
+
+
+def data_handling(data: dict) -> tuple:
+ # Split dataset into features and target
+ # data is features
+ """
+ >>> data_handling(({'data':'[5.1, 3.5, 1.4, 0.2]','target':([0])}))
+ ('[5.1, 3.5, 1.4, 0.2]', [0])
+ >>> data_handling(
+ ... {'data': '[4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2]', 'target': ([0, 0])}
+ ... )
+ ('[4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2]', [0, 0])
+ """
+ return (data["data"], data["target"])
+
+
+def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier:
+ """
+ # THIS TEST IS BROKEN!! >>> xgboost(np.array([[5.1, 3.6, 1.4, 0.2]]), np.array([0]))
+ XGBClassifier(base_score=0.5, booster='gbtree', callbacks=None,
+ colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1,
+ early_stopping_rounds=None, enable_categorical=False,
+ eval_metric=None, gamma=0, gpu_id=-1, grow_policy='depthwise',
+ importance_type=None, interaction_constraints='',
+ learning_rate=0.300000012, max_bin=256, max_cat_to_onehot=4,
+ max_delta_step=0, max_depth=6, max_leaves=0, min_child_weight=1,
+ missing=nan, monotone_constraints='()', n_estimators=100,
+ n_jobs=0, num_parallel_tree=1, predictor='auto', random_state=0,
+ reg_alpha=0, reg_lambda=1, ...)
+ """
+ classifier = XGBClassifier()
+ classifier.fit(features, target)
+ return classifier
+
+
+def main() -> None:
+ """
+ >>> main()
+
+ Url for the algorithm:
+ https://xgboost.readthedocs.io/en/stable/
+ Iris type dataset is used to demonstrate algorithm.
+ """
+
+ # Load Iris dataset
+ iris = load_iris()
+ features, targets = data_handling(iris)
+ x_train, x_test, y_train, y_test = train_test_split(
+ features, targets, test_size=0.25
+ )
+
+ names = iris["target_names"]
+
+ # Create an XGBoost Classifier from the training data
+ xgboost_classifier = xgboost(x_train, y_train)
+
+ # Display the confusion matrix of the classifier with both training and test sets
+ ConfusionMatrixDisplay.from_estimator(
+ xgboost_classifier,
+ x_test,
+ y_test,
+ display_labels=names,
+ cmap="Blues",
+ normalize="true",
+ )
+ plt.title("Normalized Confusion Matrix - IRIS Dataset")
+ plt.show()
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod(verbose=True)
+ main()
diff --git a/machine_learning/xgboost_regressor.py b/machine_learning/xgboost_regressor.py
new file mode 100644
index 000000000..023984fc1
--- /dev/null
+++ b/machine_learning/xgboost_regressor.py
@@ -0,0 +1,64 @@
+# XGBoost Regressor Example
+import numpy as np
+from sklearn.datasets import fetch_california_housing
+from sklearn.metrics import mean_absolute_error, mean_squared_error
+from sklearn.model_selection import train_test_split
+from xgboost import XGBRegressor
+
+
+def data_handling(data: dict) -> tuple:
+ # Split dataset into features and target. Data is features.
+ """
+ >>> data_handling((
+ ... {'data':'[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]'
+ ... ,'target':([4.526])}))
+ ('[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]', [4.526])
+ """
+ return (data["data"], data["target"])
+
+
+def xgboost(
+ features: np.ndarray, target: np.ndarray, test_features: np.ndarray
+) -> np.ndarray:
+ """
+ >>> xgboost(np.array([[ 2.3571 , 52. , 6.00813008, 1.06775068,
+ ... 907. , 2.45799458, 40.58 , -124.26]]),np.array([1.114]),
+ ... np.array([[1.97840000e+00, 3.70000000e+01, 4.98858447e+00, 1.03881279e+00,
+ ... 1.14300000e+03, 2.60958904e+00, 3.67800000e+01, -1.19780000e+02]]))
+ array([[1.1139996]], dtype=float32)
+ """
+ xgb = XGBRegressor(verbosity=0, random_state=42)
+ xgb.fit(features, target)
+ # Predict target for test data
+ predictions = xgb.predict(test_features)
+ predictions = predictions.reshape(len(predictions), 1)
+ return predictions
+
+
+def main() -> None:
+ """
+ >>> main()
+ Mean Absolute Error : 0.30957163379906033
+ Mean Square Error : 0.22611560196662744
+
+ The URL for this algorithm
+ https://xgboost.readthedocs.io/en/stable/
+ California house price dataset is used to demonstrate the algorithm.
+ """
+ # Load California house price dataset
+ california = fetch_california_housing()
+ data, target = data_handling(california)
+ x_train, x_test, y_train, y_test = train_test_split(
+ data, target, test_size=0.25, random_state=1
+ )
+ predictions = xgboost(x_train, y_train, x_test)
+ # Error printing
+ print(f"Mean Absolute Error : {mean_absolute_error(y_test, predictions)}")
+ print(f"Mean Square Error : {mean_squared_error(y_test, predictions)}")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod(verbose=True)
+ main()
diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py
deleted file mode 100644
index e455a158e..000000000
--- a/maths/3n_plus_1.py
+++ /dev/null
@@ -1,149 +0,0 @@
-from __future__ import annotations
-
-
-def n31(a: int) -> tuple[list[int], int]:
- """
- Returns the Collatz sequence and its length of any positive integer.
- >>> n31(4)
- ([4, 2, 1], 3)
- """
-
- if not isinstance(a, int):
- raise TypeError(f"Must be int, not {type(a).__name__}")
- if a < 1:
- raise ValueError(f"Given integer must be greater than 1, not {a}")
-
- path = [a]
- while a != 1:
- if a % 2 == 0:
- a = a // 2
- else:
- a = 3 * a + 1
- path += [a]
- return path, len(path)
-
-
-def test_n31():
- """
- >>> test_n31()
- """
- assert n31(4) == ([4, 2, 1], 3)
- assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15)
- assert n31(31) == (
- [
- 31,
- 94,
- 47,
- 142,
- 71,
- 214,
- 107,
- 322,
- 161,
- 484,
- 242,
- 121,
- 364,
- 182,
- 91,
- 274,
- 137,
- 412,
- 206,
- 103,
- 310,
- 155,
- 466,
- 233,
- 700,
- 350,
- 175,
- 526,
- 263,
- 790,
- 395,
- 1186,
- 593,
- 1780,
- 890,
- 445,
- 1336,
- 668,
- 334,
- 167,
- 502,
- 251,
- 754,
- 377,
- 1132,
- 566,
- 283,
- 850,
- 425,
- 1276,
- 638,
- 319,
- 958,
- 479,
- 1438,
- 719,
- 2158,
- 1079,
- 3238,
- 1619,
- 4858,
- 2429,
- 7288,
- 3644,
- 1822,
- 911,
- 2734,
- 1367,
- 4102,
- 2051,
- 6154,
- 3077,
- 9232,
- 4616,
- 2308,
- 1154,
- 577,
- 1732,
- 866,
- 433,
- 1300,
- 650,
- 325,
- 976,
- 488,
- 244,
- 122,
- 61,
- 184,
- 92,
- 46,
- 23,
- 70,
- 35,
- 106,
- 53,
- 160,
- 80,
- 40,
- 20,
- 10,
- 5,
- 16,
- 8,
- 4,
- 2,
- 1,
- ],
- 107,
- )
-
-
-if __name__ == "__main__":
- num = 4
- path, length = n31(num)
- print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}")
diff --git a/maths/abs.py b/maths/abs.py
index 68c99a1d5..b357e98d8 100644
--- a/maths/abs.py
+++ b/maths/abs.py
@@ -1,7 +1,7 @@
"""Absolute Value."""
-def abs_val(num):
+def abs_val(num: float) -> float:
"""
Find the absolute value of a number.
@@ -15,14 +15,80 @@ def abs_val(num):
return -num if num < 0 else num
+def abs_min(x: list[int]) -> int:
+ """
+ >>> abs_min([0,5,1,11])
+ 0
+ >>> abs_min([3,-10,-2])
+ -2
+ >>> abs_min([])
+ Traceback (most recent call last):
+ ...
+ ValueError: abs_min() arg is an empty sequence
+ """
+ if len(x) == 0:
+ raise ValueError("abs_min() arg is an empty sequence")
+ j = x[0]
+ for i in x:
+ if abs_val(i) < abs_val(j):
+ j = i
+ return j
+
+
+def abs_max(x: list[int]) -> int:
+ """
+ >>> abs_max([0,5,1,11])
+ 11
+ >>> abs_max([3,-10,-2])
+ -10
+ >>> abs_max([])
+ Traceback (most recent call last):
+ ...
+ ValueError: abs_max() arg is an empty sequence
+ """
+ if len(x) == 0:
+ raise ValueError("abs_max() arg is an empty sequence")
+ j = x[0]
+ for i in x:
+ if abs(i) > abs(j):
+ j = i
+ return j
+
+
+def abs_max_sort(x: list[int]) -> int:
+ """
+ >>> abs_max_sort([0,5,1,11])
+ 11
+ >>> abs_max_sort([3,-10,-2])
+ -10
+ >>> abs_max_sort([])
+ Traceback (most recent call last):
+ ...
+ ValueError: abs_max_sort() arg is an empty sequence
+ """
+ if len(x) == 0:
+ raise ValueError("abs_max_sort() arg is an empty sequence")
+ return sorted(x, key=abs)[-1]
+
+
def test_abs_val():
"""
>>> test_abs_val()
"""
- assert 0 == abs_val(0)
- assert 34 == abs_val(34)
- assert 100000000000 == abs_val(-100000000000)
+ assert abs_val(0) == 0
+ assert abs_val(34) == 34
+ assert abs_val(-100000000000) == 100000000000
+
+ a = [-3, -1, 2, -11]
+ assert abs_max(a) == -11
+ assert abs_max_sort(a) == -11
+ assert abs_min(a) == -1
if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ test_abs_val()
print(abs_val(-34)) # --> 34
diff --git a/maths/abs_max.py b/maths/abs_max.py
deleted file mode 100644
index e5a821965..000000000
--- a/maths/abs_max.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from __future__ import annotations
-
-
-def abs_max(x: list[int]) -> int:
- """
- >>> abs_max([0,5,1,11])
- 11
- >>> abs_max([3,-10,-2])
- -10
- """
- j = x[0]
- for i in x:
- if abs(i) > abs(j):
- j = i
- return j
-
-
-def abs_max_sort(x):
- """
- >>> abs_max_sort([0,5,1,11])
- 11
- >>> abs_max_sort([3,-10,-2])
- -10
- """
- return sorted(x, key=abs)[-1]
-
-
-def main():
- a = [1, 2, -11]
- assert abs_max(a) == -11
- assert abs_max_sort(a) == -11
-
-
-if __name__ == "__main__":
- main()
diff --git a/maths/abs_min.py b/maths/abs_min.py
deleted file mode 100644
index eb84de37c..000000000
--- a/maths/abs_min.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from .abs import abs_val
-
-
-def absMin(x):
- """
- >>> absMin([0,5,1,11])
- 0
- >>> absMin([3,-10,-2])
- -2
- """
- j = x[0]
- for i in x:
- if abs_val(i) < abs_val(j):
- j = i
- return j
-
-
-def main():
- a = [-3, -1, 2, -11]
- print(absMin(a)) # = -1
-
-
-if __name__ == "__main__":
- main()
diff --git a/maths/add.py b/maths/add.py
index 0bc7da969..c89252c64 100644
--- a/maths/add.py
+++ b/maths/add.py
@@ -3,7 +3,7 @@ Just to check
"""
-def add(a, b):
+def add(a: float, b: float) -> float:
"""
>>> add(2, 2)
4
diff --git a/maths/addition_without_arithmetic.py b/maths/addition_without_arithmetic.py
new file mode 100644
index 000000000..409604e4c
--- /dev/null
+++ b/maths/addition_without_arithmetic.py
@@ -0,0 +1,39 @@
+"""
+Illustrate how to add the integer without arithmetic operation
+Author: suraj Kumar
+Time Complexity: 1
+https://en.wikipedia.org/wiki/Bitwise_operation
+"""
+
+
+def add(first: int, second: int) -> int:
+ """
+ Implementation of addition of integer
+
+ Examples:
+ >>> add(3, 5)
+ 8
+ >>> add(13, 5)
+ 18
+ >>> add(-7, 2)
+ -5
+ >>> add(0, -7)
+ -7
+ >>> add(-321, 0)
+ -321
+ """
+ while second != 0:
+ c = first & second
+ first ^= second
+ second = c << 1
+ return first
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ first = int(input("Enter the first number: ").strip())
+ second = int(input("Enter the second number: ").strip())
+ print(f"{add(first, second) = }")
diff --git a/maths/arc_length.py b/maths/arc_length.py
new file mode 100644
index 000000000..9e87ca38c
--- /dev/null
+++ b/maths/arc_length.py
@@ -0,0 +1,15 @@
+from math import pi
+
+
+def arc_length(angle: int, radius: int) -> float:
+ """
+ >>> arc_length(45, 5)
+ 3.9269908169872414
+ >>> arc_length(120, 15)
+ 31.415926535897928
+ """
+ return 2 * pi * radius * (angle / 360)
+
+
+if __name__ == "__main__":
+ print(arc_length(90, 10))
diff --git a/maths/area.py b/maths/area.py
index 8689f323c..ea7216c8f 100644
--- a/maths/area.py
+++ b/maths/area.py
@@ -1,7 +1,8 @@
"""
Find the area of various geometric shapes
+Wikipedia reference: https://en.wikipedia.org/wiki/Area
"""
-from math import pi, sqrt
+from math import pi, sqrt, tan
def surface_area_cube(side_length: float) -> float:
@@ -10,6 +11,10 @@ def surface_area_cube(side_length: float) -> float:
>>> surface_area_cube(1)
6
+ >>> surface_area_cube(1.6)
+ 15.360000000000003
+ >>> surface_area_cube(0)
+ 0
>>> surface_area_cube(3)
54
>>> surface_area_cube(-1)
@@ -19,7 +24,35 @@ def surface_area_cube(side_length: float) -> float:
"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
- return 6 * side_length ** 2
+ return 6 * side_length**2
+
+
+def surface_area_cuboid(length: float, breadth: float, height: float) -> float:
+ """
+ Calculate the Surface Area of a Cuboid.
+
+ >>> surface_area_cuboid(1, 2, 3)
+ 22
+ >>> surface_area_cuboid(0, 0, 0)
+ 0
+ >>> surface_area_cuboid(1.6, 2.6, 3.6)
+ 38.56
+ >>> surface_area_cuboid(-1, 2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cuboid() only accepts non-negative values
+ >>> surface_area_cuboid(1, -2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cuboid() only accepts non-negative values
+ >>> surface_area_cuboid(1, 2, -3)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cuboid() only accepts non-negative values
+ """
+ if length < 0 or breadth < 0 or height < 0:
+ raise ValueError("surface_area_cuboid() only accepts non-negative values")
+ return 2 * ((length * breadth) + (breadth * height) + (length * height))
def surface_area_sphere(radius: float) -> float:
@@ -32,6 +65,10 @@ def surface_area_sphere(radius: float) -> float:
314.1592653589793
>>> surface_area_sphere(1)
12.566370614359172
+ >>> surface_area_sphere(1.6)
+ 32.169908772759484
+ >>> surface_area_sphere(0)
+ 0.0
>>> surface_area_sphere(-1)
Traceback (most recent call last):
...
@@ -39,7 +76,163 @@ def surface_area_sphere(radius: float) -> float:
"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
- return 4 * pi * radius ** 2
+ return 4 * pi * radius**2
+
+
+def surface_area_hemisphere(radius: float) -> float:
+ """
+ Calculate the Surface Area of a Hemisphere.
+ Formula: 3 * pi * r^2
+
+ >>> surface_area_hemisphere(5)
+ 235.61944901923448
+ >>> surface_area_hemisphere(1)
+ 9.42477796076938
+ >>> surface_area_hemisphere(0)
+ 0.0
+ >>> surface_area_hemisphere(1.1)
+ 11.40398133253095
+ >>> surface_area_hemisphere(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_hemisphere() only accepts non-negative values
+ """
+ if radius < 0:
+ raise ValueError("surface_area_hemisphere() only accepts non-negative values")
+ return 3 * pi * radius**2
+
+
+def surface_area_cone(radius: float, height: float) -> float:
+ """
+ Calculate the Surface Area of a Cone.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Cone
+ Formula: pi * r * (r + (h ** 2 + r ** 2) ** 0.5)
+
+ >>> surface_area_cone(10, 24)
+ 1130.9733552923256
+ >>> surface_area_cone(6, 8)
+ 301.59289474462014
+ >>> surface_area_cone(1.6, 2.6)
+ 23.387862992395807
+ >>> surface_area_cone(0, 0)
+ 0.0
+ >>> surface_area_cone(-1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cone() only accepts non-negative values
+ >>> surface_area_cone(1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cone() only accepts non-negative values
+ >>> surface_area_cone(-1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cone() only accepts non-negative values
+ """
+ if radius < 0 or height < 0:
+ raise ValueError("surface_area_cone() only accepts non-negative values")
+ return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
+
+
+def surface_area_conical_frustum(
+ radius_1: float, radius_2: float, height: float
+) -> float:
+ """
+ Calculate the Surface Area of a Conical Frustum.
+
+ >>> surface_area_conical_frustum(1, 2, 3)
+ 45.511728065337266
+ >>> surface_area_conical_frustum(4, 5, 6)
+ 300.7913575056268
+ >>> surface_area_conical_frustum(0, 0, 0)
+ 0.0
+ >>> surface_area_conical_frustum(1.6, 2.6, 3.6)
+ 78.57907060751548
+ >>> surface_area_conical_frustum(-1, 2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_conical_frustum() only accepts non-negative values
+ >>> surface_area_conical_frustum(1, -2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_conical_frustum() only accepts non-negative values
+ >>> surface_area_conical_frustum(1, 2, -3)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_conical_frustum() only accepts non-negative values
+ """
+ if radius_1 < 0 or radius_2 < 0 or height < 0:
+ raise ValueError(
+ "surface_area_conical_frustum() only accepts non-negative values"
+ )
+ slant_height = (height**2 + (radius_1 - radius_2) ** 2) ** 0.5
+ return pi * ((slant_height * (radius_1 + radius_2)) + radius_1**2 + radius_2**2)
+
+
+def surface_area_cylinder(radius: float, height: float) -> float:
+ """
+ Calculate the Surface Area of a Cylinder.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder
+ Formula: 2 * pi * r * (h + r)
+
+ >>> surface_area_cylinder(7, 10)
+ 747.6990515543707
+ >>> surface_area_cylinder(1.6, 2.6)
+ 42.22300526424682
+ >>> surface_area_cylinder(0, 0)
+ 0.0
+ >>> surface_area_cylinder(6, 8)
+ 527.7875658030853
+ >>> surface_area_cylinder(-1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cylinder() only accepts non-negative values
+ >>> surface_area_cylinder(1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cylinder() only accepts non-negative values
+ >>> surface_area_cylinder(-1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_cylinder() only accepts non-negative values
+ """
+ if radius < 0 or height < 0:
+ raise ValueError("surface_area_cylinder() only accepts non-negative values")
+ return 2 * pi * radius * (height + radius)
+
+
+def surface_area_torus(torus_radius: float, tube_radius: float) -> float:
+ """Calculate the Area of a Torus.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Torus
+ :return 4pi^2 * torus_radius * tube_radius
+ >>> surface_area_torus(1, 1)
+ 39.47841760435743
+ >>> surface_area_torus(4, 3)
+ 473.7410112522892
+ >>> surface_area_torus(3, 4)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_torus() does not support spindle or self intersecting tori
+ >>> surface_area_torus(1.6, 1.6)
+ 101.06474906715503
+ >>> surface_area_torus(0, 0)
+ 0.0
+ >>> surface_area_torus(-1, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_torus() only accepts non-negative values
+ >>> surface_area_torus(1, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: surface_area_torus() only accepts non-negative values
+ """
+ if torus_radius < 0 or tube_radius < 0:
+ raise ValueError("surface_area_torus() only accepts non-negative values")
+ if torus_radius < tube_radius:
+ raise ValueError(
+ "surface_area_torus() does not support spindle or self intersecting tori"
+ )
+ return 4 * pow(pi, 2) * torus_radius * tube_radius
def area_rectangle(length: float, width: float) -> float:
@@ -48,6 +241,10 @@ def area_rectangle(length: float, width: float) -> float:
>>> area_rectangle(10, 20)
200
+ >>> area_rectangle(1.6, 2.6)
+ 4.16
+ >>> area_rectangle(0, 0)
+ 0
>>> area_rectangle(-1, -2)
Traceback (most recent call last):
...
@@ -72,6 +269,10 @@ def area_square(side_length: float) -> float:
>>> area_square(10)
100
+ >>> area_square(0)
+ 0
+ >>> area_square(1.6)
+ 2.5600000000000005
>>> area_square(-1)
Traceback (most recent call last):
...
@@ -79,7 +280,7 @@ def area_square(side_length: float) -> float:
"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
- return side_length ** 2
+ return side_length**2
def area_triangle(base: float, height: float) -> float:
@@ -88,6 +289,10 @@ def area_triangle(base: float, height: float) -> float:
>>> area_triangle(10, 10)
50.0
+ >>> area_triangle(1.6, 2.6)
+ 2.08
+ >>> area_triangle(0, 0)
+ 0.0
>>> area_triangle(-1, -2)
Traceback (most recent call last):
...
@@ -109,13 +314,16 @@ def area_triangle(base: float, height: float) -> float:
def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float:
"""
Calculate area of triangle when the length of 3 sides are known.
-
This function uses Heron's formula: https://en.wikipedia.org/wiki/Heron%27s_formula
>>> area_triangle_three_sides(5, 12, 13)
30.0
>>> area_triangle_three_sides(10, 11, 12)
51.521233486786784
+ >>> area_triangle_three_sides(0, 0, 0)
+ 0.0
+ >>> area_triangle_three_sides(1.6, 2.6, 3.6)
+ 1.8703742940919619
>>> area_triangle_three_sides(-1, -2, -1)
Traceback (most recent call last):
...
@@ -124,6 +332,18 @@ def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float
Traceback (most recent call last):
...
ValueError: area_triangle_three_sides() only accepts non-negative values
+ >>> area_triangle_three_sides(2, 4, 7)
+ Traceback (most recent call last):
+ ...
+ ValueError: Given three sides do not form a triangle
+ >>> area_triangle_three_sides(2, 7, 4)
+ Traceback (most recent call last):
+ ...
+ ValueError: Given three sides do not form a triangle
+ >>> area_triangle_three_sides(7, 2, 4)
+ Traceback (most recent call last):
+ ...
+ ValueError: Given three sides do not form a triangle
"""
if side1 < 0 or side2 < 0 or side3 < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
@@ -145,6 +365,10 @@ def area_parallelogram(base: float, height: float) -> float:
>>> area_parallelogram(10, 20)
200
+ >>> area_parallelogram(1.6, 2.6)
+ 4.16
+ >>> area_parallelogram(0, 0)
+ 0
>>> area_parallelogram(-1, -2)
Traceback (most recent call last):
...
@@ -169,6 +393,10 @@ def area_trapezium(base1: float, base2: float, height: float) -> float:
>>> area_trapezium(10, 20, 30)
450.0
+ >>> area_trapezium(1.6, 2.6, 3.6)
+ 7.5600000000000005
+ >>> area_trapezium(0, 0, 0)
+ 0.0
>>> area_trapezium(-1, -2, -3)
Traceback (most recent call last):
...
@@ -209,6 +437,10 @@ def area_circle(radius: float) -> float:
>>> area_circle(20)
1256.6370614359173
+ >>> area_circle(1.6)
+ 8.042477193189871
+ >>> area_circle(0)
+ 0.0
>>> area_circle(-1)
Traceback (most recent call last):
...
@@ -216,7 +448,7 @@ def area_circle(radius: float) -> float:
"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
- return pi * radius ** 2
+ return pi * radius**2
def area_ellipse(radius_x: float, radius_y: float) -> float:
@@ -227,6 +459,10 @@ def area_ellipse(radius_x: float, radius_y: float) -> float:
314.1592653589793
>>> area_ellipse(10, 20)
628.3185307179587
+ >>> area_ellipse(0, 0)
+ 0.0
+ >>> area_ellipse(1.6, 2.6)
+ 13.06902543893354
>>> area_ellipse(-10, 20)
Traceback (most recent call last):
...
@@ -251,6 +487,10 @@ def area_rhombus(diagonal_1: float, diagonal_2: float) -> float:
>>> area_rhombus(10, 20)
100.0
+ >>> area_rhombus(1.6, 2.6)
+ 2.08
+ >>> area_rhombus(0, 0)
+ 0.0
>>> area_rhombus(-1, -2)
Traceback (most recent call last):
...
@@ -269,6 +509,51 @@ def area_rhombus(diagonal_1: float, diagonal_2: float) -> float:
return 1 / 2 * diagonal_1 * diagonal_2
+def area_reg_polygon(sides: int, length: float) -> float:
+ """
+ Calculate the area of a regular polygon.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Polygon#Regular_polygons
+ Formula: (n*s^2*cot(pi/n))/4
+
+ >>> area_reg_polygon(3, 10)
+ 43.301270189221945
+ >>> area_reg_polygon(4, 10)
+ 100.00000000000001
+ >>> area_reg_polygon(0, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_reg_polygon() only accepts integers greater than or equal to \
+three as number of sides
+ >>> area_reg_polygon(-1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_reg_polygon() only accepts integers greater than or equal to \
+three as number of sides
+ >>> area_reg_polygon(5, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_reg_polygon() only accepts non-negative values as \
+length of a side
+ >>> area_reg_polygon(-1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: area_reg_polygon() only accepts integers greater than or equal to \
+three as number of sides
+ """
+ if not isinstance(sides, int) or sides < 3:
+ raise ValueError(
+ "area_reg_polygon() only accepts integers greater than or \
+equal to three as number of sides"
+ )
+ elif length < 0:
+ raise ValueError(
+ "area_reg_polygon() only accepts non-negative values as \
+length of a side"
+ )
+ return (sides * length**2) / (4 * tan(pi / sides))
+ return (sides * length**2) / (4 * tan(pi / sides))
+
+
if __name__ == "__main__":
import doctest
@@ -280,9 +565,19 @@ if __name__ == "__main__":
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
+ print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
+ print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
+ print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
- print(f"Rhombus: {area_rhombus(10, 20) = }")
+ print(f"Hemisphere: {surface_area_hemisphere(20) = }")
+ print(f"Cone: {surface_area_cone(10, 20) = }")
+ print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
+ print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
+ print(f"Torus: {surface_area_torus(20, 10) = }")
+ print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
+ print(f"Square: {area_reg_polygon(4, 10) = }")
+ print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py
index 2d01e414b..b557b2029 100644
--- a/maths/area_under_curve.py
+++ b/maths/area_under_curve.py
@@ -1,14 +1,15 @@
"""
Approximates the area under the curve using the trapezoidal rule
"""
+from __future__ import annotations
-from typing import Callable, Union
+from collections.abc import Callable
def trapezoidal_area(
- fnc: Callable[[Union[int, float]], Union[int, float]],
- x_start: Union[int, float],
- x_end: Union[int, float],
+ fnc: Callable[[int | float], int | float],
+ x_start: int | float,
+ x_end: int | float,
steps: int = 100,
) -> float:
"""
@@ -34,7 +35,7 @@ def trapezoidal_area(
x1 = x_start
fx1 = fnc(x_start)
area = 0.0
- for i in range(steps):
+ for _ in range(steps):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
x2 = (x_end - x_start) / steps + x1
@@ -49,7 +50,7 @@ def trapezoidal_area(
if __name__ == "__main__":
def f(x):
- return x ** 3 + x ** 2
+ return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py
index ce8c62182..26709b428 100644
--- a/maths/armstrong_numbers.py
+++ b/maths/armstrong_numbers.py
@@ -9,7 +9,7 @@ Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers.
On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188
"""
PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401)
-FAILING = (-153, -1, 0, 1.2, 200, "A", [], {}, None)
+FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None)
def armstrong_number(n: int) -> bool:
@@ -25,7 +25,7 @@ def armstrong_number(n: int) -> bool:
return False
# Initialization of sum and number of digits.
- sum = 0
+ total = 0
number_of_digits = 0
temp = n
# Calculation of digits of the number
@@ -36,9 +36,9 @@ def armstrong_number(n: int) -> bool:
temp = n
while temp > 0:
rem = temp % 10
- sum += rem ** number_of_digits
+ total += rem**number_of_digits
temp //= 10
- return n == sum
+ return n == total
def pluperfect_number(n: int) -> bool:
@@ -55,17 +55,17 @@ def pluperfect_number(n: int) -> bool:
# Init a "histogram" of the digits
digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
digit_total = 0
- sum = 0
+ total = 0
temp = n
while temp > 0:
temp, rem = divmod(temp, 10)
digit_histogram[rem] += 1
digit_total += 1
- for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))):
- sum += cnt * i ** digit_total
+ for cnt, i in zip(digit_histogram, range(len(digit_histogram))):
+ total += cnt * i**digit_total
- return n == sum
+ return n == total
def narcissistic_number(n: int) -> bool:
diff --git a/maths/automorphic_number.py b/maths/automorphic_number.py
new file mode 100644
index 000000000..8ed937563
--- /dev/null
+++ b/maths/automorphic_number.py
@@ -0,0 +1,59 @@
+"""
+== Automorphic Numbers ==
+A number n is said to be a Automorphic number if
+the square of n "ends" in the same digits as n itself.
+
+Examples of Automorphic Numbers: 0, 1, 5, 6, 25, 76, 376, 625, 9376, 90625, ...
+https://en.wikipedia.org/wiki/Automorphic_number
+"""
+
+# Author : Akshay Dubey (https://github.com/itsAkshayDubey)
+# Time Complexity : O(log10n)
+
+
+def is_automorphic_number(number: int) -> bool:
+ """
+ # doctest: +NORMALIZE_WHITESPACE
+ This functions takes an integer number as input.
+ returns True if the number is automorphic.
+ >>> is_automorphic_number(-1)
+ False
+ >>> is_automorphic_number(0)
+ True
+ >>> is_automorphic_number(5)
+ True
+ >>> is_automorphic_number(6)
+ True
+ >>> is_automorphic_number(7)
+ False
+ >>> is_automorphic_number(25)
+ True
+ >>> is_automorphic_number(259918212890625)
+ True
+ >>> is_automorphic_number(259918212890636)
+ False
+ >>> is_automorphic_number(740081787109376)
+ True
+ >>> is_automorphic_number(5.0)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value of [number=5.0] must be an integer
+ """
+ if not isinstance(number, int):
+ msg = f"Input value of [number={number}] must be an integer"
+ raise TypeError(msg)
+ if number < 0:
+ return False
+ number_square = number * number
+ while number > 0:
+ if number % 10 != number_square % 10:
+ return False
+ number //= 10
+ number_square //= 10
+ return True
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/average_absolute_deviation.py b/maths/average_absolute_deviation.py
new file mode 100644
index 000000000..193d94a2f
--- /dev/null
+++ b/maths/average_absolute_deviation.py
@@ -0,0 +1,29 @@
+def average_absolute_deviation(nums: list[int]) -> float:
+ """
+ Return the average absolute deviation of a list of numbers.
+ Wiki: https://en.wikipedia.org/wiki/Average_absolute_deviation
+
+ >>> average_absolute_deviation([0])
+ 0.0
+ >>> average_absolute_deviation([4, 1, 3, 2])
+ 1.0
+ >>> average_absolute_deviation([2, 70, 6, 50, 20, 8, 4, 0])
+ 20.0
+ >>> average_absolute_deviation([-20, 0, 30, 15])
+ 16.25
+ >>> average_absolute_deviation([])
+ Traceback (most recent call last):
+ ...
+ ValueError: List is empty
+ """
+ if not nums: # Makes sure that the list is not empty
+ raise ValueError("List is empty")
+
+ average = sum(nums) / len(nums) # Calculate the average
+ return sum(abs(x - average) for x in nums) / len(nums)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/average_mean.py b/maths/average_mean.py
index e02e307f2..274c434ab 100644
--- a/maths/average_mean.py
+++ b/maths/average_mean.py
@@ -1,7 +1,7 @@
-from typing import List
+from __future__ import annotations
-def mean(nums: List) -> float:
+def mean(nums: list) -> float:
"""
Find mean of a list of numbers.
Wiki: https://en.wikipedia.org/wiki/Mean
diff --git a/maths/average_median.py b/maths/average_median.py
index 57e01368b..cd1ec1574 100644
--- a/maths/average_median.py
+++ b/maths/average_median.py
@@ -1,14 +1,14 @@
-from typing import Union
+from __future__ import annotations
-def median(nums: Union[int, float]) -> Union[int, float]:
+def median(nums: list) -> int | float:
"""
Find median of a list of numbers.
Wiki: https://en.wikipedia.org/wiki/Median
>>> median([0])
0
- >>> median([4,1,3,2])
+ >>> median([4, 1, 3, 2])
2.5
>>> median([2, 70, 6, 50, 20, 8, 4])
8
diff --git a/maths/average_mode.py b/maths/average_mode.py
index 83db82007..40f88f41f 100644
--- a/maths/average_mode.py
+++ b/maths/average_mode.py
@@ -1,34 +1,29 @@
-def mode(input_list: list) -> list: # Defining function "mode."
+from typing import Any
+
+
+def mode(input_list: list) -> list[Any]:
"""This function returns the mode(Mode as in the measures of
central tendency) of the input data.
The input list may contain any Datastructure or any Datatype.
- >>> input_list = [2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2]
- >>> mode(input_list)
+ >>> mode([2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2])
[2]
- >>> input_list = [3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 2, 2, 2]
- >>> mode(input_list)
+ >>> mode([3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 2, 2, 2])
[2]
- >>> input_list = [3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 4, 2, 2, 4, 2]
- >>> mode(input_list)
+ >>> mode([3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 4, 2, 2, 4, 2])
[2, 4]
- >>> input_list = ["x", "y", "y", "z"]
- >>> mode(input_list)
+ >>> mode(["x", "y", "y", "z"])
['y']
- >>> input_list = ["x", "x" , "y", "y", "z"]
- >>> mode(input_list)
+ >>> mode(["x", "x" , "y", "y", "z"])
['x', 'y']
"""
- result = list() # Empty list to store the counts of elements in input_list
- for x in input_list:
- result.append(input_list.count(x))
- if not result:
+ if not input_list:
return []
- y = max(result) # Gets the maximum value in the result list.
+ result = [input_list.count(value) for value in input_list]
+ y = max(result) # Gets the maximum count in the input list.
# Gets values of modes
- result = {input_list[i] for i, value in enumerate(result) if value == y}
- return sorted(result)
+ return sorted({input_list[i] for i, value in enumerate(result) if value == y})
if __name__ == "__main__":
diff --git a/maths/bailey_borwein_plouffe.py b/maths/bailey_borwein_plouffe.py
index b647ae56d..389b1566e 100644
--- a/maths/bailey_borwein_plouffe.py
+++ b/maths/bailey_borwein_plouffe.py
@@ -67,7 +67,7 @@ def _subsum(
@param precision: same as precision in main function
@return: floating-point number whose integer part is not important
"""
- sum = 0.0
+ total = 0.0
for sum_index in range(digit_pos_to_extract + precision):
denominator = 8 * sum_index + denominator_addend
if sum_index < digit_pos_to_extract:
@@ -79,8 +79,8 @@ def _subsum(
)
else:
exponential_term = pow(16, digit_pos_to_extract - 1 - sum_index)
- sum += exponential_term / denominator
- return sum
+ total += exponential_term / denominator
+ return total
if __name__ == "__main__":
diff --git a/maths/basic_maths.py b/maths/basic_maths.py
index 47d3d91b3..26c52c549 100644
--- a/maths/basic_maths.py
+++ b/maths/basic_maths.py
@@ -57,6 +57,8 @@ def number_of_divisors(n: int) -> int:
temp += 1
n = int(n / i)
div *= temp
+ if n > 1:
+ div *= 2
return div
@@ -81,14 +83,14 @@ def sum_of_divisors(n: int) -> int:
temp += 1
n = int(n / 2)
if temp > 1:
- s *= (2 ** temp - 1) / (2 - 1)
+ s *= (2**temp - 1) / (2 - 1)
for i in range(3, int(math.sqrt(n)) + 1, 2):
temp = 1
while n % i == 0:
temp += 1
n = int(n / i)
if temp > 1:
- s *= (i ** temp - 1) / (i - 1)
+ s *= (i**temp - 1) / (i - 1)
return int(s)
diff --git a/maths/binary_exp_mod.py b/maths/binary_exp_mod.py
index 67dd1e728..df688892d 100644
--- a/maths/binary_exp_mod.py
+++ b/maths/binary_exp_mod.py
@@ -6,7 +6,7 @@ def bin_exp_mod(a, n, b):
7
"""
# mod b
- assert not (b == 0), "This cannot accept modulo that is == 0"
+ assert b != 0, "This cannot accept modulo that is == 0"
if n == 0:
return 1
diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py
index 8dda5245c..147b4285f 100644
--- a/maths/binary_exponentiation.py
+++ b/maths/binary_exponentiation.py
@@ -5,7 +5,6 @@
def binary_exponentiation(a, n):
-
if n == 0:
return 1
diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py
index 4def04149..0d4b3d1a8 100644
--- a/maths/binomial_coefficient.py
+++ b/maths/binomial_coefficient.py
@@ -5,16 +5,16 @@ def binomial_coefficient(n, r):
>>> binomial_coefficient(10, 5)
252
"""
- C = [0 for i in range(r + 1)]
+ c = [0 for i in range(r + 1)]
# nc0 = 1
- C[0] = 1
+ c[0] = 1
for i in range(1, n + 1):
# to compute current row from previous row.
j = min(i, r)
while j > 0:
- C[j] += C[j - 1]
+ c[j] += c[j - 1]
j -= 1
- return C[r]
+ return c[r]
print(binomial_coefficient(n=10, r=5))
diff --git a/maths/binomial_distribution.py b/maths/binomial_distribution.py
index a74a5a7ed..5b56f2d59 100644
--- a/maths/binomial_distribution.py
+++ b/maths/binomial_distribution.py
@@ -24,7 +24,7 @@ def binomial_distribution(successes: int, trials: int, prob: float) -> float:
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
- probability = (prob ** successes) * ((1 - prob) ** (trials - successes))
+ probability = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
coefficient = float(factorial(trials))
coefficient /= factorial(successes) * factorial(trials - successes)
diff --git a/maths/bisection.py b/maths/bisection.py
index 93cc2247b..45f26d8d8 100644
--- a/maths/bisection.py
+++ b/maths/bisection.py
@@ -32,7 +32,7 @@ def bisection(a: float, b: float) -> float:
3.158203125
>>> bisection(2, 3)
Traceback (most recent call last):
- ...
+ ...
ValueError: Wrong space!
"""
# Bolzano theory in order to find if there is a root between a and b
diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py
new file mode 100644
index 000000000..c9c144759
--- /dev/null
+++ b/maths/carmichael_number.py
@@ -0,0 +1,47 @@
+"""
+== Carmichael Numbers ==
+A number n is said to be a Carmichael number if it
+satisfies the following modular arithmetic condition:
+
+ power(b, n-1) MOD n = 1,
+ for all b ranging from 1 to n such that b and
+ n are relatively prime, i.e, gcd(b, n) = 1
+
+Examples of Carmichael Numbers: 561, 1105, ...
+https://en.wikipedia.org/wiki/Carmichael_number
+"""
+
+
+def gcd(a: int, b: int) -> int:
+ if a < b:
+ return gcd(b, a)
+ if a % b == 0:
+ return b
+ return gcd(b, a % b)
+
+
+def power(x: int, y: int, mod: int) -> int:
+ if y == 0:
+ return 1
+ temp = power(x, y // 2, mod) % mod
+ temp = (temp * temp) % mod
+ if y % 2 == 1:
+ temp = (temp * x) % mod
+ return temp
+
+
+def is_carmichael_number(n: int) -> bool:
+ b = 2
+ while b < n:
+ if gcd(b, n) == 1 and power(b, n - 1, n) != 1:
+ return False
+ b += 1
+ return True
+
+
+if __name__ == "__main__":
+ number = int(input("Enter number: ").strip())
+ if is_carmichael_number(number):
+ print(f"{number} is a Carmichael Number.")
+ else:
+ print(f"{number} is not a Carmichael Number.")
diff --git a/maths/catalan_number.py b/maths/catalan_number.py
new file mode 100644
index 000000000..20c2cfb17
--- /dev/null
+++ b/maths/catalan_number.py
@@ -0,0 +1,53 @@
+"""
+
+Calculate the nth Catalan number
+
+Source:
+ https://en.wikipedia.org/wiki/Catalan_number
+
+"""
+
+
+def catalan(number: int) -> int:
+ """
+ :param number: nth catalan number to calculate
+ :return: the nth catalan number
+ Note: A catalan number is only defined for positive integers
+
+ >>> catalan(5)
+ 14
+ >>> catalan(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value of [number=0] must be > 0
+ >>> catalan(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value of [number=-1] must be > 0
+ >>> catalan(5.0)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value of [number=5.0] must be an integer
+ """
+
+ if not isinstance(number, int):
+ msg = f"Input value of [number={number}] must be an integer"
+ raise TypeError(msg)
+
+ if number < 1:
+ msg = f"Input value of [number={number}] must be > 0"
+ raise ValueError(msg)
+
+ current_number = 1
+
+ for i in range(1, number):
+ current_number *= 4 * i - 2
+ current_number //= i + 1
+
+ return current_number
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/ceil.py b/maths/ceil.py
index 97578265c..909e02b3f 100644
--- a/maths/ceil.py
+++ b/maths/ceil.py
@@ -3,7 +3,7 @@ https://en.wikipedia.org/wiki/Floor_and_ceiling_functions
"""
-def ceil(x) -> int:
+def ceil(x: float) -> int:
"""
Return the ceiling of x as an Integral.
diff --git a/maths/check_polygon.py b/maths/check_polygon.py
new file mode 100644
index 000000000..1e8dce718
--- /dev/null
+++ b/maths/check_polygon.py
@@ -0,0 +1,44 @@
+from __future__ import annotations
+
+
+def check_polygon(nums: list[float]) -> bool:
+ """
+ Takes list of possible side lengths and determines whether a
+ two-dimensional polygon with such side lengths can exist.
+
+ Returns a boolean value for the < comparison
+ of the largest side length with sum of the rest.
+ Wiki: https://en.wikipedia.org/wiki/Triangle_inequality
+
+ >>> check_polygon([6, 10, 5])
+ True
+ >>> check_polygon([3, 7, 13, 2])
+ False
+ >>> check_polygon([1, 4.3, 5.2, 12.2])
+ False
+ >>> nums = [3, 7, 13, 2]
+ >>> _ = check_polygon(nums) # Run function, do not show answer in output
+ >>> nums # Check numbers are not reordered
+ [3, 7, 13, 2]
+ >>> check_polygon([])
+ Traceback (most recent call last):
+ ...
+ ValueError: Monogons and Digons are not polygons in the Euclidean space
+ >>> check_polygon([-2, 5, 6])
+ Traceback (most recent call last):
+ ...
+ ValueError: All values must be greater than 0
+ """
+ if len(nums) < 2:
+ raise ValueError("Monogons and Digons are not polygons in the Euclidean space")
+ if any(i <= 0 for i in nums):
+ raise ValueError("All values must be greater than 0")
+ copy_nums = nums.copy()
+ copy_nums.sort()
+ return copy_nums[-1] < sum(copy_nums[:-1])
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py
index 7b3636de6..b47017146 100644
--- a/maths/collatz_sequence.py
+++ b/maths/collatz_sequence.py
@@ -1,43 +1,66 @@
+"""
+The Collatz conjecture is a famous unsolved problem in mathematics. Given a starting
+positive integer, define the following sequence:
+- If the current term n is even, then the next term is n/2.
+- If the current term n is odd, then the next term is 3n + 1.
+The conjecture claims that this sequence will always reach 1 for any starting number.
+
+Other names for this problem include the 3n + 1 problem, the Ulam conjecture, Kakutani's
+problem, the Thwaites conjecture, Hasse's algorithm, the Syracuse problem, and the
+hailstone sequence.
+
+Reference: https://en.wikipedia.org/wiki/Collatz_conjecture
+"""
+
from __future__ import annotations
+from collections.abc import Generator
-def collatz_sequence(n: int) -> list[int]:
+
+def collatz_sequence(n: int) -> Generator[int, None, None]:
"""
- Collatz conjecture: start with any positive integer n. The next term is
- obtained as follows:
- If n term is even, the next term is: n / 2 .
- If n is odd, the next term is: 3 * n + 1.
-
- The conjecture states the sequence will always reach 1 for any starting value n.
- Example:
- >>> collatz_sequence(2.1)
+ Generate the Collatz sequence starting at n.
+ >>> tuple(collatz_sequence(2.1))
Traceback (most recent call last):
...
- Exception: Sequence only defined for natural numbers
- >>> collatz_sequence(0)
+ Exception: Sequence only defined for positive integers
+ >>> tuple(collatz_sequence(0))
Traceback (most recent call last):
...
- Exception: Sequence only defined for natural numbers
- >>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE
- [43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7,
- 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
+ Exception: Sequence only defined for positive integers
+ >>> tuple(collatz_sequence(4))
+ (4, 2, 1)
+ >>> tuple(collatz_sequence(11))
+ (11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1)
+ >>> tuple(collatz_sequence(31)) # doctest: +NORMALIZE_WHITESPACE
+ (31, 94, 47, 142, 71, 214, 107, 322, 161, 484, 242, 121, 364, 182, 91, 274, 137,
+ 412, 206, 103, 310, 155, 466, 233, 700, 350, 175, 526, 263, 790, 395, 1186, 593,
+ 1780, 890, 445, 1336, 668, 334, 167, 502, 251, 754, 377, 1132, 566, 283, 850, 425,
+ 1276, 638, 319, 958, 479, 1438, 719, 2158, 1079, 3238, 1619, 4858, 2429, 7288, 3644,
+ 1822, 911, 2734, 1367, 4102, 2051, 6154, 3077, 9232, 4616, 2308, 1154, 577, 1732,
+ 866, 433, 1300, 650, 325, 976, 488, 244, 122, 61, 184, 92, 46, 23, 70, 35, 106, 53,
+ 160, 80, 40, 20, 10, 5, 16, 8, 4, 2, 1)
+ >>> tuple(collatz_sequence(43)) # doctest: +NORMALIZE_WHITESPACE
+ (43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, 22, 11, 34, 17, 52, 26,
+ 13, 40, 20, 10, 5, 16, 8, 4, 2, 1)
"""
-
if not isinstance(n, int) or n < 1:
- raise Exception("Sequence only defined for natural numbers")
+ raise Exception("Sequence only defined for positive integers")
- sequence = [n]
+ yield n
while n != 1:
- n = 3 * n + 1 if n & 1 else n // 2
- sequence.append(n)
- return sequence
+ if n % 2 == 0:
+ n //= 2
+ else:
+ n = 3 * n + 1
+ yield n
def main():
- n = 43
- sequence = collatz_sequence(n)
+ n = int(input("Your number: "))
+ sequence = tuple(collatz_sequence(n))
print(sequence)
- print(f"collatz sequence from {n} took {len(sequence)} steps.")
+ print(f"Collatz sequence from {n} took {len(sequence)} steps.")
if __name__ == "__main__":
diff --git a/maths/combinations.py b/maths/combinations.py
index 40f4f7a9f..a2324012c 100644
--- a/maths/combinations.py
+++ b/maths/combinations.py
@@ -35,18 +35,17 @@ def combinations(n: int, k: int) -> int:
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k")
- return int(factorial(n) / ((factorial(k)) * (factorial(n - k))))
+ return factorial(n) // (factorial(k) * factorial(n - k))
if __name__ == "__main__":
-
print(
- "\nThe number of five-card hands possible from a standard",
- f"fifty-two card deck is: {combinations(52, 5)}",
+ "The number of five-card hands possible from a standard",
+ f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
- "\nIf a class of 40 students must be arranged into groups of",
+ "If a class of 40 students must be arranged into groups of",
f"4 for group projects, there are {combinations(40, 4)} ways",
"to arrange them.\n",
)
@@ -54,5 +53,5 @@ if __name__ == "__main__":
print(
"If 10 teams are competing in a Formula One race, there",
f"are {combinations(10, 3)} ways that first, second and",
- "third place can be awarded.\n",
+ "third place can be awarded.",
)
diff --git a/maths/decimal_isolate.py b/maths/decimal_isolate.py
index 0e3967a46..058ed1bb9 100644
--- a/maths/decimal_isolate.py
+++ b/maths/decimal_isolate.py
@@ -4,8 +4,7 @@ https://stackoverflow.com/questions/3886402/how-to-get-numbers-after-decimal-poi
"""
-def decimal_isolate(number, digitAmount):
-
+def decimal_isolate(number: float, digit_amount: int) -> float:
"""
Isolates the decimal part of a number.
If digitAmount > 0 round to that decimal place, else print the entire decimal.
@@ -28,8 +27,8 @@ def decimal_isolate(number, digitAmount):
>>> decimal_isolate(-14.123, 3)
-0.123
"""
- if digitAmount > 0:
- return round(number - int(number), digitAmount)
+ if digit_amount > 0:
+ return round(number - int(number), digit_amount)
return number - int(number)
diff --git a/maths/decimal_to_fraction.py b/maths/decimal_to_fraction.py
new file mode 100644
index 000000000..9462bafe0
--- /dev/null
+++ b/maths/decimal_to_fraction.py
@@ -0,0 +1,48 @@
+def decimal_to_fraction(decimal: int | float | str) -> tuple[int, int]:
+ """
+ Return a decimal number in its simplest fraction form
+ >>> decimal_to_fraction(2)
+ (2, 1)
+ >>> decimal_to_fraction(89.)
+ (89, 1)
+ >>> decimal_to_fraction("67")
+ (67, 1)
+ >>> decimal_to_fraction("45.0")
+ (45, 1)
+ >>> decimal_to_fraction(1.5)
+ (3, 2)
+ >>> decimal_to_fraction("6.25")
+ (25, 4)
+ >>> decimal_to_fraction("78td")
+ Traceback (most recent call last):
+ ValueError: Please enter a valid number
+ """
+ try:
+ decimal = float(decimal)
+ except ValueError:
+ raise ValueError("Please enter a valid number")
+ fractional_part = decimal - int(decimal)
+ if fractional_part == 0:
+ return int(decimal), 1
+ else:
+ number_of_frac_digits = len(str(decimal).split(".")[1])
+ numerator = int(decimal * (10**number_of_frac_digits))
+ denominator = 10**number_of_frac_digits
+ divisor, dividend = denominator, numerator
+ while True:
+ remainder = dividend % divisor
+ if remainder == 0:
+ break
+ dividend, divisor = divisor, remainder
+ numerator, denominator = numerator / divisor, denominator / divisor
+ return int(numerator), int(denominator)
+
+
+if __name__ == "__main__":
+ print(f"{decimal_to_fraction(2) = }")
+ print(f"{decimal_to_fraction(89.0) = }")
+ print(f"{decimal_to_fraction('67') = }")
+ print(f"{decimal_to_fraction('45.0') = }")
+ print(f"{decimal_to_fraction(1.5) = }")
+ print(f"{decimal_to_fraction('6.25') = }")
+ print(f"{decimal_to_fraction('78td') = }")
diff --git a/maths/dodecahedron.py b/maths/dodecahedron.py
new file mode 100644
index 000000000..856245f4a
--- /dev/null
+++ b/maths/dodecahedron.py
@@ -0,0 +1,73 @@
+# dodecahedron.py
+
+"""
+A regular dodecahedron is a three-dimensional figure made up of
+12 pentagon faces having the same equal size.
+"""
+
+
+def dodecahedron_surface_area(edge: float) -> float:
+ """
+ Calculates the surface area of a regular dodecahedron
+ a = 3 * ((25 + 10 * (5** (1 / 2))) ** (1 / 2 )) * (e**2)
+ where:
+ a --> is the area of the dodecahedron
+ e --> is the length of the edge
+ reference-->"Dodecahedron" Study.com
+
+
+ :param edge: length of the edge of the dodecahedron
+ :type edge: float
+ :return: the surface area of the dodecahedron as a float
+
+
+ Tests:
+ >>> dodecahedron_surface_area(5)
+ 516.1432201766901
+ >>> dodecahedron_surface_area(10)
+ 2064.5728807067603
+ >>> dodecahedron_surface_area(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Length must be a positive.
+ """
+
+ if edge <= 0 or not isinstance(edge, int):
+ raise ValueError("Length must be a positive.")
+ return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
+
+
+def dodecahedron_volume(edge: float) -> float:
+ """
+ Calculates the volume of a regular dodecahedron
+ v = ((15 + (7 * (5** (1 / 2)))) / 4) * (e**3)
+ where:
+ v --> is the volume of the dodecahedron
+ e --> is the length of the edge
+ reference-->"Dodecahedron" Study.com
+
+
+ :param edge: length of the edge of the dodecahedron
+ :type edge: float
+ :return: the volume of the dodecahedron as a float
+
+ Tests:
+ >>> dodecahedron_volume(5)
+ 957.8898700780791
+ >>> dodecahedron_volume(10)
+ 7663.118960624633
+ >>> dodecahedron_volume(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Length must be a positive.
+ """
+
+ if edge <= 0 or not isinstance(edge, int):
+ raise ValueError("Length must be a positive.")
+ return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/double_factorial_iterative.py b/maths/double_factorial_iterative.py
new file mode 100644
index 000000000..b2b58aa04
--- /dev/null
+++ b/maths/double_factorial_iterative.py
@@ -0,0 +1,33 @@
+def double_factorial(num: int) -> int:
+ """
+ Compute double factorial using iterative method.
+
+ To learn about the theory behind this algorithm:
+ https://en.wikipedia.org/wiki/Double_factorial
+
+ >>> import math
+ >>> all(double_factorial(i) == math.prod(range(i, 0, -2)) for i in range(20))
+ True
+ >>> double_factorial(0.1)
+ Traceback (most recent call last):
+ ...
+ ValueError: double_factorial() only accepts integral values
+ >>> double_factorial(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: double_factorial() not defined for negative values
+ """
+ if not isinstance(num, int):
+ raise ValueError("double_factorial() only accepts integral values")
+ if num < 0:
+ raise ValueError("double_factorial() not defined for negative values")
+ value = 1
+ for i in range(num, 0, -2):
+ value *= i
+ return value
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/double_factorial_recursive.py b/maths/double_factorial_recursive.py
new file mode 100644
index 000000000..05c9b2968
--- /dev/null
+++ b/maths/double_factorial_recursive.py
@@ -0,0 +1,31 @@
+def double_factorial(n: int) -> int:
+ """
+ Compute double factorial using recursive method.
+ Recursion can be costly for large numbers.
+
+ To learn about the theory behind this algorithm:
+ https://en.wikipedia.org/wiki/Double_factorial
+
+ >>> import math
+ >>> all(double_factorial(i) == math.prod(range(i, 0, -2)) for i in range(20))
+ True
+ >>> double_factorial(0.1)
+ Traceback (most recent call last):
+ ...
+ ValueError: double_factorial() only accepts integral values
+ >>> double_factorial(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: double_factorial() not defined for negative values
+ """
+ if not isinstance(n, int):
+ raise ValueError("double_factorial() only accepts integral values")
+ if n < 0:
+ raise ValueError("double_factorial() not defined for negative values")
+ return 1 if n <= 1 else n * double_factorial(n - 2)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py
new file mode 100644
index 000000000..f98997c8b
--- /dev/null
+++ b/maths/dual_number_automatic_differentiation.py
@@ -0,0 +1,141 @@
+from math import factorial
+
+"""
+https://en.wikipedia.org/wiki/Automatic_differentiation#Automatic_differentiation_using_dual_numbers
+https://blog.jliszka.org/2013/10/24/exact-numeric-nth-derivatives.html
+
+Note this only works for basic functions, f(x) where the power of x is positive.
+"""
+
+
+class Dual:
+ def __init__(self, real, rank):
+ self.real = real
+ if isinstance(rank, int):
+ self.duals = [1] * rank
+ else:
+ self.duals = rank
+
+ def __repr__(self):
+ return (
+ f"{self.real}+"
+ f"{'+'.join(str(dual)+'E'+str(n+1)for n,dual in enumerate(self.duals))}"
+ )
+
+ def reduce(self):
+ cur = self.duals.copy()
+ while cur[-1] == 0:
+ cur.pop(-1)
+ return Dual(self.real, cur)
+
+ def __add__(self, other):
+ if not isinstance(other, Dual):
+ return Dual(self.real + other, self.duals)
+ s_dual = self.duals.copy()
+ o_dual = other.duals.copy()
+ if len(s_dual) > len(o_dual):
+ o_dual.extend([1] * (len(s_dual) - len(o_dual)))
+ elif len(s_dual) < len(o_dual):
+ s_dual.extend([1] * (len(o_dual) - len(s_dual)))
+ new_duals = []
+ for i in range(len(s_dual)):
+ new_duals.append(s_dual[i] + o_dual[i])
+ return Dual(self.real + other.real, new_duals)
+
+ __radd__ = __add__
+
+ def __sub__(self, other):
+ return self + other * -1
+
+ def __mul__(self, other):
+ if not isinstance(other, Dual):
+ new_duals = []
+ for i in self.duals:
+ new_duals.append(i * other)
+ return Dual(self.real * other, new_duals)
+ new_duals = [0] * (len(self.duals) + len(other.duals) + 1)
+ for i, item in enumerate(self.duals):
+ for j, jtem in enumerate(other.duals):
+ new_duals[i + j + 1] += item * jtem
+ for k in range(len(self.duals)):
+ new_duals[k] += self.duals[k] * other.real
+ for index in range(len(other.duals)):
+ new_duals[index] += other.duals[index] * self.real
+ return Dual(self.real * other.real, new_duals)
+
+ __rmul__ = __mul__
+
+ def __truediv__(self, other):
+ if not isinstance(other, Dual):
+ new_duals = []
+ for i in self.duals:
+ new_duals.append(i / other)
+ return Dual(self.real / other, new_duals)
+ raise ValueError
+
+ def __floordiv__(self, other):
+ if not isinstance(other, Dual):
+ new_duals = []
+ for i in self.duals:
+ new_duals.append(i // other)
+ return Dual(self.real // other, new_duals)
+ raise ValueError
+
+ def __pow__(self, n):
+ if n < 0 or isinstance(n, float):
+ raise ValueError("power must be a positive integer")
+ if n == 0:
+ return 1
+ if n == 1:
+ return self
+ x = self
+ for _ in range(n - 1):
+ x *= self
+ return x
+
+
+def differentiate(func, position, order):
+ """
+ >>> differentiate(lambda x: x**2, 2, 2)
+ 2
+ >>> differentiate(lambda x: x**2 * x**4, 9, 2)
+ 196830
+ >>> differentiate(lambda y: 0.5 * (y + 3) ** 6, 3.5, 4)
+ 7605.0
+ >>> differentiate(lambda y: y ** 2, 4, 3)
+ 0
+ >>> differentiate(8, 8, 8)
+ Traceback (most recent call last):
+ ...
+ ValueError: differentiate() requires a function as input for func
+ >>> differentiate(lambda x: x **2, "", 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: differentiate() requires a float as input for position
+ >>> differentiate(lambda x: x**2, 3, "")
+ Traceback (most recent call last):
+ ...
+ ValueError: differentiate() requires an int as input for order
+ """
+ if not callable(func):
+ raise ValueError("differentiate() requires a function as input for func")
+ if not isinstance(position, (float, int)):
+ raise ValueError("differentiate() requires a float as input for position")
+ if not isinstance(order, int):
+ raise ValueError("differentiate() requires an int as input for order")
+ d = Dual(position, 1)
+ result = func(d)
+ if order == 0:
+ return result.real
+ return result.duals[order - 1] * factorial(order)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ def f(y):
+ return y**2 * y**4
+
+ print(differentiate(f, 9, 2))
diff --git a/maths/entropy.py b/maths/entropy.py
index 43bb3860f..498c28f31 100644
--- a/maths/entropy.py
+++ b/maths/entropy.py
@@ -68,7 +68,7 @@ def calculate_prob(text: str) -> None:
my_fir_sum += prob * math.log2(prob) # entropy formula.
# print entropy
- print("{:.1f}".format(round(-1 * my_fir_sum)))
+ print(f"{round(-1 * my_fir_sum):.1f}")
# two len string
all_sum = sum(two_char_strings.values())
@@ -83,10 +83,10 @@ def calculate_prob(text: str) -> None:
my_sec_sum += prob * math.log2(prob)
# print second entropy
- print("{:.1f}".format(round(-1 * my_sec_sum)))
+ print(f"{round(-1 * my_sec_sum):.1f}")
# print the difference between them
- print("{:.1f}".format(round((-1 * my_sec_sum) - (-1 * my_fir_sum))))
+ print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}")
def analyze_text(text: str) -> tuple[dict, dict]:
diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py
index 6e0da6370..9b29b37b0 100644
--- a/maths/euclidean_distance.py
+++ b/maths/euclidean_distance.py
@@ -1,9 +1,12 @@
-from typing import Iterable, Union
+from __future__ import annotations
+
+import typing
+from collections.abc import Iterable
import numpy as np
-Vector = Union[Iterable[float], Iterable[int], np.ndarray]
-VectorOut = Union[np.float64, int, float]
+Vector = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
+VectorOut = typing.Union[np.float64, int, float] # noqa: UP007
def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:
diff --git a/maths/euler_method.py b/maths/euler_method.py
index 7c7801986..30f193e6d 100644
--- a/maths/euler_method.py
+++ b/maths/euler_method.py
@@ -1,18 +1,25 @@
+from collections.abc import Callable
+
import numpy as np
-def explicit_euler(ode_func, y0, x0, step_size, x_end):
- """
- Calculate numeric solution at each step to an ODE using Euler's Method
+def explicit_euler(
+ ode_func: Callable, y0: float, x0: float, step_size: float, x_end: float
+) -> np.ndarray:
+ """Calculate numeric solution at each step to an ODE using Euler's Method
- https://en.wikipedia.org/wiki/Euler_method
+ For reference to Euler's method refer to https://en.wikipedia.org/wiki/Euler_method.
- Arguments:
- ode_func -- The ode as a function of x and y
- y0 -- the initial value for y
- x0 -- the initial value for x
- stepsize -- the increment value for x
- x_end -- the end value for x
+ Args:
+ ode_func (Callable): The ordinary differential equation
+ as a function of x and y.
+ y0 (float): The initial value for y.
+ x0 (float): The initial value for x.
+ step_size (float): The increment value for x.
+ x_end (float): The final value of x to be calculated.
+
+ Returns:
+ np.ndarray: Solution of y for every step in x.
>>> # the exact solution is math.exp(x)
>>> def f(x, y):
@@ -22,12 +29,12 @@ def explicit_euler(ode_func, y0, x0, step_size, x_end):
>>> y[-1]
144.77277243257308
"""
- N = int(np.ceil((x_end - x0) / step_size))
- y = np.zeros((N + 1,))
+ n = int(np.ceil((x_end - x0) / step_size))
+ y = np.zeros((n + 1,))
y[0] = y0
x = x0
- for k in range(N):
+ for k in range(n):
y[k + 1] = y[k] + step_size * ode_func(x, y[k])
x += step_size
diff --git a/maths/euler_modified.py b/maths/euler_modified.py
new file mode 100644
index 000000000..14bddadf4
--- /dev/null
+++ b/maths/euler_modified.py
@@ -0,0 +1,54 @@
+from collections.abc import Callable
+
+import numpy as np
+
+
+def euler_modified(
+ ode_func: Callable, y0: float, x0: float, step_size: float, x_end: float
+) -> np.array:
+ """
+ Calculate solution at each step to an ODE using Euler's Modified Method
+ The Euler Method is straightforward to implement, but can't give accurate solutions.
+ So, some changes were proposed to improve accuracy.
+
+ https://en.wikipedia.org/wiki/Euler_method
+
+ Arguments:
+ ode_func -- The ode as a function of x and y
+ y0 -- the initial value for y
+ x0 -- the initial value for x
+ stepsize -- the increment value for x
+ x_end -- the end value for x
+
+ >>> # the exact solution is math.exp(x)
+ >>> def f1(x, y):
+ ... return -2*x*(y**2)
+ >>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0)
+ >>> y[-1]
+ 0.503338255442106
+ >>> import math
+ >>> def f2(x, y):
+ ... return -2*y + (x**3)*math.exp(-2*x)
+ >>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3)
+ >>> y[-1]
+ 0.5525976431951775
+ """
+ n = int(np.ceil((x_end - x0) / step_size))
+ y = np.zeros((n + 1,))
+ y[0] = y0
+ x = x0
+
+ for k in range(n):
+ y_get = y[k] + step_size * ode_func(x, y[k])
+ y[k + 1] = y[k] + (
+ (step_size / 2) * (ode_func(x, y[k]) + ode_func(x + step_size, y_get))
+ )
+ x += step_size
+
+ return y
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/eulers_totient.py b/maths/eulers_totient.py
index 6a35e69bd..a15664703 100644
--- a/maths/eulers_totient.py
+++ b/maths/eulers_totient.py
@@ -1,5 +1,20 @@
# Eulers Totient function finds the number of relative primes of a number n from 1 to n
def totient(n: int) -> list:
+ """
+ >>> n = 10
+ >>> totient_calculation = totient(n)
+ >>> for i in range(1, n):
+ ... print(f"{i} has {totient_calculation[i]} relative primes.")
+ 1 has 0 relative primes.
+ 2 has 1 relative primes.
+ 3 has 2 relative primes.
+ 4 has 2 relative primes.
+ 5 has 4 relative primes.
+ 6 has 2 relative primes.
+ 7 has 6 relative primes.
+ 8 has 4 relative primes.
+ 9 has 6 relative primes.
+ """
is_prime = [True for i in range(n + 1)]
totients = [i - 1 for i in range(n + 1)]
primes = []
@@ -20,25 +35,6 @@ def totient(n: int) -> list:
return totients
-def test_totient() -> None:
- """
- >>> n = 10
- >>> totient_calculation = totient(n)
- >>> for i in range(1, n):
- ... print(f"{i} has {totient_calculation[i]} relative primes.")
- 1 has 0 relative primes.
- 2 has 1 relative primes.
- 3 has 2 relative primes.
- 4 has 2 relative primes.
- 5 has 4 relative primes.
- 6 has 2 relative primes.
- 7 has 6 relative primes.
- 8 has 4 relative primes.
- 9 has 6 relative primes.
- """
- pass
-
-
if __name__ == "__main__":
import doctest
diff --git a/maths/extended_euclidean_algorithm.py b/maths/extended_euclidean_algorithm.py
index e7087636c..c54909e19 100644
--- a/maths/extended_euclidean_algorithm.py
+++ b/maths/extended_euclidean_algorithm.py
@@ -12,12 +12,12 @@ https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
# @Email: silentcat@protonmail.com
# @Last modified by: pikulet
# @Last modified time: 2020-10-02
+from __future__ import annotations
import sys
-from typing import Tuple
-def extended_euclidean_algorithm(a: int, b: int) -> Tuple[int, int]:
+def extended_euclidean_algorithm(a: int, b: int) -> tuple[int, int]:
"""
Extended Euclidean Algorithm.
@@ -75,11 +75,12 @@ def main():
"""Call Extended Euclidean Algorithm."""
if len(sys.argv) < 3:
print("2 integer arguments required")
- exit(1)
+ return 1
a = int(sys.argv[1])
b = int(sys.argv[2])
print(extended_euclidean_algorithm(a, b))
+ return 0
if __name__ == "__main__":
- main()
+ raise SystemExit(main())
diff --git a/maths/factorial.py b/maths/factorial.py
new file mode 100644
index 000000000..18cacdef9
--- /dev/null
+++ b/maths/factorial.py
@@ -0,0 +1,67 @@
+"""Factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial
+"""
+
+
+def factorial(number: int) -> int:
+ """
+ Calculate the factorial of specified number (n!).
+
+ >>> import math
+ >>> all(factorial(i) == math.factorial(i) for i in range(20))
+ True
+ >>> factorial(0.1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() only accepts integral values
+ >>> factorial(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() not defined for negative values
+ >>> factorial(1)
+ 1
+ >>> factorial(6)
+ 720
+ >>> factorial(0)
+ 1
+ """
+ if number != int(number):
+ raise ValueError("factorial() only accepts integral values")
+ if number < 0:
+ raise ValueError("factorial() not defined for negative values")
+ value = 1
+ for i in range(1, number + 1):
+ value *= i
+ return value
+
+
+def factorial_recursive(n: int) -> int:
+ """
+ Calculate the factorial of a positive integer
+ https://en.wikipedia.org/wiki/Factorial
+
+ >>> import math
+ >>> all(factorial(i) == math.factorial(i) for i in range(20))
+ True
+ >>> factorial(0.1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() only accepts integral values
+ >>> factorial(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: factorial() not defined for negative values
+ """
+ if not isinstance(n, int):
+ raise ValueError("factorial() only accepts integral values")
+ if n < 0:
+ raise ValueError("factorial() not defined for negative values")
+ return 1 if n in {0, 1} else n * factorial(n - 1)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ n = int(input("Enter a positive integer: ").strip() or 0)
+ print(f"factorial{n} is {factorial(n)}")
diff --git a/maths/factorial_iterative.py b/maths/factorial_iterative.py
deleted file mode 100644
index 64314790c..000000000
--- a/maths/factorial_iterative.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial
-
-
-def factorial(n: int) -> int:
- """
- >>> import math
- >>> all(factorial(i) == math.factorial(i) for i in range(20))
- True
- >>> factorial(0.1)
- Traceback (most recent call last):
- ...
- ValueError: factorial() only accepts integral values
- >>> factorial(-1)
- Traceback (most recent call last):
- ...
- ValueError: factorial() not defined for negative values
- """
- if n != int(n):
- raise ValueError("factorial() only accepts integral values")
- if n < 0:
- raise ValueError("factorial() not defined for negative values")
- value = 1
- for i in range(1, n + 1):
- value *= i
- return value
-
-
-if __name__ == "__main__":
- n = int(input("Enter a positive integer: ").strip() or 0)
- print(f"factorial{n} is {factorial(n)}")
diff --git a/maths/factorial_python.py b/maths/factorial_python.py
deleted file mode 100644
index 46688261a..000000000
--- a/maths/factorial_python.py
+++ /dev/null
@@ -1,34 +0,0 @@
-def factorial(input_number: int) -> int:
- """
- Calculate the factorial of specified number
-
- >>> factorial(1)
- 1
- >>> factorial(6)
- 720
- >>> factorial(0)
- 1
- >>> factorial(-1)
- Traceback (most recent call last):
- ...
- ValueError: factorial() not defined for negative values
- >>> factorial(0.1)
- Traceback (most recent call last):
- ...
- ValueError: factorial() only accepts integral values
- """
-
- if input_number < 0:
- raise ValueError("factorial() not defined for negative values")
- if not isinstance(input_number, int):
- raise ValueError("factorial() only accepts integral values")
- result = 1
- for i in range(1, input_number):
- result = result * (i + 1)
- return result
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()
diff --git a/maths/factorial_recursive.py b/maths/factorial_recursive.py
deleted file mode 100644
index 137112738..000000000
--- a/maths/factorial_recursive.py
+++ /dev/null
@@ -1,28 +0,0 @@
-def factorial(n: int) -> int:
- """
- Calculate the factorial of a positive integer
- https://en.wikipedia.org/wiki/Factorial
-
- >>> import math
- >>> all(factorial(i) == math.factorial(i) for i in range(20))
- True
- >>> factorial(0.1)
- Traceback (most recent call last):
- ...
- ValueError: factorial() only accepts integral values
- >>> factorial(-1)
- Traceback (most recent call last):
- ...
- ValueError: factorial() not defined for negative values
- """
- if not isinstance(n, int):
- raise ValueError("factorial() only accepts integral values")
- if n < 0:
- raise ValueError("factorial() not defined for negative values")
- return 1 if n == 0 or n == 1 else n * factorial(n - 1)
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()
diff --git a/maths/factors.py b/maths/factors.py
index e2fdc4063..ae2e5316c 100644
--- a/maths/factors.py
+++ b/maths/factors.py
@@ -1,3 +1,7 @@
+from doctest import testmod
+from math import sqrt
+
+
def factors_of_a_number(num: int) -> list:
"""
>>> factors_of_a_number(1)
@@ -9,10 +13,22 @@ def factors_of_a_number(num: int) -> list:
>>> factors_of_a_number(-24)
[]
"""
- return [i for i in range(1, num + 1) if num % i == 0]
+ facs: list[int] = []
+ if num < 1:
+ return facs
+ facs.append(1)
+ if num == 1:
+ return facs
+ facs.append(num)
+ for i in range(2, int(sqrt(num)) + 1):
+ if num % i == 0: # If i is a factor of num
+ facs.append(i)
+ d = num // i # num//i is the other factor of num
+ if d != i: # If d and i are distinct
+ facs.append(d) # we have found another factor
+ facs.sort()
+ return facs
if __name__ == "__main__":
- num = int(input("Enter a number to find its factors: "))
- factors = factors_of_a_number(num)
- print(f"{num} has {len(factors)} factors: {', '.join(str(f) for f in factors)}")
+ testmod(name="factors_of_a_number", verbose=True)
diff --git a/maths/fermat_little_theorem.py b/maths/fermat_little_theorem.py
index 73af3e28c..eea03be24 100644
--- a/maths/fermat_little_theorem.py
+++ b/maths/fermat_little_theorem.py
@@ -6,7 +6,6 @@
def binary_exponentiation(a, n, mod):
-
if n == 0:
return 1
diff --git a/maths/fibonacci.py b/maths/fibonacci.py
index e65190354..e810add69 100644
--- a/maths/fibonacci.py
+++ b/maths/fibonacci.py
@@ -1,130 +1,205 @@
-# fibonacci.py
-"""
-1. Calculates the iterative fibonacci sequence
-
-2. Calculates the fibonacci sequence with a formula
- an = [ Phin - (phi)n ]/Sqrt[5]
- reference-->Su, Francis E., et al. "Fibonacci Number Formula." Math Fun Facts.
-
-"""
-import functools
-import math
-import time
-from decimal import Decimal, getcontext
-
-getcontext().prec = 100
-
-
-def timer_decorator(func):
- @functools.wraps(func)
- def timer_wrapper(*args, **kwargs):
- start = time.time()
- func(*args, **kwargs)
- end = time.time()
- if int(end - start) > 0:
- print(f"Run time for {func.__name__}: {(end - start):0.2f}s")
- else:
- print(f"Run time for {func.__name__}: {(end - start)*1000:0.2f}ms")
- return func(*args, **kwargs)
-
- return timer_wrapper
-
-
-# define Python user-defined exceptions
-class Error(Exception):
- """Base class for other exceptions"""
-
-
-class ValueTooLargeError(Error):
- """Raised when the input value is too large"""
-
-
-class ValueTooSmallError(Error):
- """Raised when the input value is not greater than one"""
-
-
-class ValueLessThanZero(Error):
- """Raised when the input value is less than zero"""
-
-
-def _check_number_input(n, min_thresh, max_thresh=None):
- """
- :param n: single integer
- :type n: int
- :param min_thresh: min threshold, single integer
- :type min_thresh: int
- :param max_thresh: max threshold, single integer
- :type max_thresh: int
- :return: boolean
- """
- try:
- if n >= min_thresh and max_thresh is None:
- return True
- elif min_thresh <= n <= max_thresh:
- return True
- elif n < 0:
- raise ValueLessThanZero
- elif n < min_thresh:
- raise ValueTooSmallError
- elif n > max_thresh:
- raise ValueTooLargeError
- except ValueLessThanZero:
- print("Incorrect Input: number must not be less than 0")
- except ValueTooSmallError:
- print(
- f"Incorrect Input: input number must be > {min_thresh} for the recursive "
- "calculation"
- )
- except ValueTooLargeError:
- print(
- f"Incorrect Input: input number must be < {max_thresh} for the recursive "
- "calculation"
- )
- return False
-
-
-@timer_decorator
-def fib_iterative(n):
- """
- :param n: calculate Fibonacci to the nth integer
- :type n:int
- :return: Fibonacci sequence as a list
- """
- n = int(n)
- if _check_number_input(n, 2):
- seq_out = [0, 1]
- a, b = 0, 1
- for _ in range(n - len(seq_out)):
- a, b = b, a + b
- seq_out.append(b)
- return seq_out
-
-
-@timer_decorator
-def fib_formula(n):
- """
- :param n: calculate Fibonacci to the nth integer
- :type n:int
- :return: Fibonacci sequence as a list
- """
- seq_out = [0, 1]
- n = int(n)
- if _check_number_input(n, 2, 1000000):
- sqrt = Decimal(math.sqrt(5))
- phi_1 = Decimal(1 + sqrt) / Decimal(2)
- phi_2 = Decimal(1 - sqrt) / Decimal(2)
- for i in range(2, n):
- temp_out = ((phi_1 ** Decimal(i)) - (phi_2 ** Decimal(i))) * (
- Decimal(sqrt) ** Decimal(-1)
- )
- seq_out.append(int(temp_out))
- return seq_out
-
-
-if __name__ == "__main__":
- num = 20
- # print(f'{fib_recursive(num)}\n')
- # print(f'{fib_iterative(num)}\n')
- # print(f'{fib_formula(num)}\n')
- fib_iterative(num)
- fib_formula(num)
+# fibonacci.py
+"""
+Calculates the Fibonacci sequence using iteration, recursion, memoization,
+and a simplified form of Binet's formula
+
+NOTE 1: the iterative, recursive, memoization functions are more accurate than
+the Binet's formula function because the Binet formula function uses floats
+
+NOTE 2: the Binet's formula function is much more limited in the size of inputs
+that it can handle due to the size limitations of Python floats
+
+RESULTS: (n = 20)
+fib_iterative runtime: 0.0055 ms
+fib_recursive runtime: 6.5627 ms
+fib_memoization runtime: 0.0107 ms
+fib_binet runtime: 0.0174 ms
+"""
+
+import functools
+from math import sqrt
+from time import time
+
+
+def time_func(func, *args, **kwargs):
+ """
+ Times the execution of a function with parameters
+ """
+ start = time()
+ output = func(*args, **kwargs)
+ end = time()
+ if int(end - start) > 0:
+ print(f"{func.__name__} runtime: {(end - start):0.4f} s")
+ else:
+ print(f"{func.__name__} runtime: {(end - start) * 1000:0.4f} ms")
+ return output
+
+
+def fib_iterative(n: int) -> list[int]:
+ """
+ Calculates the first n (0-indexed) Fibonacci numbers using iteration
+ >>> fib_iterative(0)
+ [0]
+ >>> fib_iterative(1)
+ [0, 1]
+ >>> fib_iterative(5)
+ [0, 1, 1, 2, 3, 5]
+ >>> fib_iterative(10)
+ [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
+ >>> fib_iterative(-1)
+ Traceback (most recent call last):
+ ...
+ Exception: n is negative
+ """
+ if n < 0:
+ raise Exception("n is negative")
+ if n == 0:
+ return [0]
+ fib = [0, 1]
+ for _ in range(n - 1):
+ fib.append(fib[-1] + fib[-2])
+ return fib
+
+
+def fib_recursive(n: int) -> list[int]:
+ """
+ Calculates the first n (0-indexed) Fibonacci numbers using recursion
+ >>> fib_iterative(0)
+ [0]
+ >>> fib_iterative(1)
+ [0, 1]
+ >>> fib_iterative(5)
+ [0, 1, 1, 2, 3, 5]
+ >>> fib_iterative(10)
+ [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
+ >>> fib_iterative(-1)
+ Traceback (most recent call last):
+ ...
+ Exception: n is negative
+ """
+
+ def fib_recursive_term(i: int) -> int:
+ """
+ Calculates the i-th (0-indexed) Fibonacci number using recursion
+ """
+ if i < 0:
+ raise Exception("n is negative")
+ if i < 2:
+ return i
+ return fib_recursive_term(i - 1) + fib_recursive_term(i - 2)
+
+ if n < 0:
+ raise Exception("n is negative")
+ return [fib_recursive_term(i) for i in range(n + 1)]
+
+
+def fib_recursive_cached(n: int) -> list[int]:
+ """
+ Calculates the first n (0-indexed) Fibonacci numbers using recursion
+ >>> fib_iterative(0)
+ [0]
+ >>> fib_iterative(1)
+ [0, 1]
+ >>> fib_iterative(5)
+ [0, 1, 1, 2, 3, 5]
+ >>> fib_iterative(10)
+ [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
+ >>> fib_iterative(-1)
+ Traceback (most recent call last):
+ ...
+ Exception: n is negative
+ """
+
+ @functools.cache
+ def fib_recursive_term(i: int) -> int:
+ """
+ Calculates the i-th (0-indexed) Fibonacci number using recursion
+ """
+ if i < 0:
+ raise Exception("n is negative")
+ if i < 2:
+ return i
+ return fib_recursive_term(i - 1) + fib_recursive_term(i - 2)
+
+ if n < 0:
+ raise Exception("n is negative")
+ return [fib_recursive_term(i) for i in range(n + 1)]
+
+
+def fib_memoization(n: int) -> list[int]:
+ """
+ Calculates the first n (0-indexed) Fibonacci numbers using memoization
+ >>> fib_memoization(0)
+ [0]
+ >>> fib_memoization(1)
+ [0, 1]
+ >>> fib_memoization(5)
+ [0, 1, 1, 2, 3, 5]
+ >>> fib_memoization(10)
+ [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
+ >>> fib_iterative(-1)
+ Traceback (most recent call last):
+ ...
+ Exception: n is negative
+ """
+ if n < 0:
+ raise Exception("n is negative")
+ # Cache must be outside recursuive function
+ # other it will reset every time it calls itself.
+ cache: dict[int, int] = {0: 0, 1: 1, 2: 1} # Prefilled cache
+
+ def rec_fn_memoized(num: int) -> int:
+ if num in cache:
+ return cache[num]
+
+ value = rec_fn_memoized(num - 1) + rec_fn_memoized(num - 2)
+ cache[num] = value
+ return value
+
+ return [rec_fn_memoized(i) for i in range(n + 1)]
+
+
+def fib_binet(n: int) -> list[int]:
+ """
+ Calculates the first n (0-indexed) Fibonacci numbers using a simplified form
+ of Binet's formula:
+ https://en.m.wikipedia.org/wiki/Fibonacci_number#Computation_by_rounding
+
+ NOTE 1: this function diverges from fib_iterative at around n = 71, likely
+ due to compounding floating-point arithmetic errors
+
+ NOTE 2: this function doesn't accept n >= 1475 because it overflows
+ thereafter due to the size limitations of Python floats
+ >>> fib_binet(0)
+ [0]
+ >>> fib_binet(1)
+ [0, 1]
+ >>> fib_binet(5)
+ [0, 1, 1, 2, 3, 5]
+ >>> fib_binet(10)
+ [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
+ >>> fib_binet(-1)
+ Traceback (most recent call last):
+ ...
+ Exception: n is negative
+ >>> fib_binet(1475)
+ Traceback (most recent call last):
+ ...
+ Exception: n is too large
+ """
+ if n < 0:
+ raise Exception("n is negative")
+ if n >= 1475:
+ raise Exception("n is too large")
+ sqrt_5 = sqrt(5)
+ phi = (1 + sqrt_5) / 2
+ return [round(phi**i / sqrt_5) for i in range(n + 1)]
+
+
+if __name__ == "__main__":
+ num = 30
+ time_func(fib_iterative, num)
+ time_func(fib_recursive, num) # Around 3s runtime
+ time_func(fib_recursive_cached, num) # Around 0ms runtime
+ time_func(fib_memoization, num)
+ time_func(fib_binet, num)
diff --git a/maths/fibonacci_sequence_recursion.py b/maths/fibonacci_sequence_recursion.py
deleted file mode 100644
index 794b9fc0b..000000000
--- a/maths/fibonacci_sequence_recursion.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Fibonacci Sequence Using Recursion
-
-
-def recur_fibo(n: int) -> int:
- """
- >>> [recur_fibo(i) for i in range(12)]
- [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
- """
- return n if n <= 1 else recur_fibo(n - 1) + recur_fibo(n - 2)
-
-
-def main() -> None:
- limit = int(input("How many terms to include in fibonacci series: "))
- if limit > 0:
- print(f"The first {limit} terms of the fibonacci series are as follows:")
- print([recur_fibo(n) for n in range(limit)])
- else:
- print("Please enter a positive integer: ")
-
-
-if __name__ == "__main__":
- main()
diff --git a/maths/find_max.py b/maths/find_max.py
index 4d92e37eb..684fbe816 100644
--- a/maths/find_max.py
+++ b/maths/find_max.py
@@ -1,7 +1,7 @@
-# NguyenU
+from __future__ import annotations
-def find_max(nums):
+def find_max(nums: list[int | float]) -> int | float:
"""
>>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]):
... find_max(nums) == max(nums)
@@ -9,7 +9,15 @@ def find_max(nums):
True
True
True
+ >>> find_max([2, 4, 9, 7, 19, 94, 5])
+ 94
+ >>> find_max([])
+ Traceback (most recent call last):
+ ...
+ ValueError: find_max() arg is an empty sequence
"""
+ if len(nums) == 0:
+ raise ValueError("find_max() arg is an empty sequence")
max_num = nums[0]
for x in nums:
if x > max_num:
@@ -17,9 +25,7 @@ def find_max(nums):
return max_num
-def main():
- print(find_max([2, 4, 9, 7, 19, 94, 5])) # 94
-
-
if __name__ == "__main__":
- main()
+ import doctest
+
+ doctest.testmod(verbose=True)
diff --git a/maths/find_max_recursion.py b/maths/find_max_recursion.py
index 03fb81950..629932e08 100644
--- a/maths/find_max_recursion.py
+++ b/maths/find_max_recursion.py
@@ -1,5 +1,8 @@
+from __future__ import annotations
+
+
# Divide and Conquer algorithm
-def find_max(nums, left, right):
+def find_max(nums: list[int | float], left: int, right: int) -> int | float:
"""
find max value in list
:param nums: contains elements
@@ -7,10 +10,39 @@ def find_max(nums, left, right):
:param right: index of last element
:return: max in nums
+ >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]):
+ ... find_max(nums, 0, len(nums) - 1) == max(nums)
+ True
+ True
+ True
+ True
>>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
>>> find_max(nums, 0, len(nums) - 1) == max(nums)
True
+ >>> find_max([], 0, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: find_max() arg is an empty sequence
+ >>> find_max(nums, 0, len(nums)) == max(nums)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ >>> find_max(nums, -len(nums), -1) == max(nums)
+ True
+ >>> find_max(nums, -len(nums) - 1, -1) == max(nums)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
"""
+ if len(nums) == 0:
+ raise ValueError("find_max() arg is an empty sequence")
+ if (
+ left >= len(nums)
+ or left < -len(nums)
+ or right >= len(nums)
+ or right < -len(nums)
+ ):
+ raise IndexError("list index out of range")
if left == right:
return nums[left]
mid = (left + right) >> 1 # the middle
@@ -21,5 +53,6 @@ def find_max(nums, left, right):
if __name__ == "__main__":
- nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
- assert find_max(nums, 0, len(nums) - 1) == 10
+ import doctest
+
+ doctest.testmod(verbose=True)
diff --git a/maths/find_min.py b/maths/find_min.py
index 2af2e44ba..2eac087c6 100644
--- a/maths/find_min.py
+++ b/maths/find_min.py
@@ -1,4 +1,7 @@
-def find_min(nums):
+from __future__ import annotations
+
+
+def find_min(nums: list[int | float]) -> int | float:
"""
Find Minimum Number in a List
:param nums: contains elements
@@ -10,17 +13,22 @@ def find_min(nums):
True
True
True
+ >>> find_min([0, 1, 2, 3, 4, 5, -3, 24, -56])
+ -56
+ >>> find_min([])
+ Traceback (most recent call last):
+ ...
+ ValueError: find_min() arg is an empty sequence
"""
+ if len(nums) == 0:
+ raise ValueError("find_min() arg is an empty sequence")
min_num = nums[0]
for num in nums:
- if min_num > num:
- min_num = num
+ min_num = min(min_num, num)
return min_num
-def main():
- assert find_min([0, 1, 2, 3, 4, 5, -3, 24, -56]) == -56
-
-
if __name__ == "__main__":
- main()
+ import doctest
+
+ doctest.testmod(verbose=True)
diff --git a/maths/find_min_recursion.py b/maths/find_min_recursion.py
index 4488967cc..4d11015ef 100644
--- a/maths/find_min_recursion.py
+++ b/maths/find_min_recursion.py
@@ -1,5 +1,8 @@
+from __future__ import annotations
+
+
# Divide and Conquer algorithm
-def find_min(nums, left, right):
+def find_min(nums: list[int | float], left: int, right: int) -> int | float:
"""
find min value in list
:param nums: contains elements
@@ -7,10 +10,39 @@ def find_min(nums, left, right):
:param right: index of last element
:return: min in nums
+ >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]):
+ ... find_min(nums, 0, len(nums) - 1) == min(nums)
+ True
+ True
+ True
+ True
>>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
>>> find_min(nums, 0, len(nums) - 1) == min(nums)
True
+ >>> find_min([], 0, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: find_min() arg is an empty sequence
+ >>> find_min(nums, 0, len(nums)) == min(nums)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ >>> find_min(nums, -len(nums), -1) == min(nums)
+ True
+ >>> find_min(nums, -len(nums) - 1, -1) == min(nums)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
"""
+ if len(nums) == 0:
+ raise ValueError("find_min() arg is an empty sequence")
+ if (
+ left >= len(nums)
+ or left < -len(nums)
+ or right >= len(nums)
+ or right < -len(nums)
+ ):
+ raise IndexError("list index out of range")
if left == right:
return nums[left]
mid = (left + right) >> 1 # the middle
@@ -21,5 +53,6 @@ def find_min(nums, left, right):
if __name__ == "__main__":
- nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
- assert find_min(nums, 0, len(nums) - 1) == 1
+ import doctest
+
+ doctest.testmod(verbose=True)
diff --git a/maths/floor.py b/maths/floor.py
index 482250f5e..8bbcb21aa 100644
--- a/maths/floor.py
+++ b/maths/floor.py
@@ -3,7 +3,7 @@ https://en.wikipedia.org/wiki/Floor_and_ceiling_functions
"""
-def floor(x) -> int:
+def floor(x: float) -> int:
"""
Return the floor of x as an Integral.
:param x: the number
diff --git a/maths/gamma.py b/maths/gamma.py
index 69cd819ef..d5debc587 100644
--- a/maths/gamma.py
+++ b/maths/gamma.py
@@ -11,42 +11,27 @@ def gamma(num: float) -> float:
used extension of the factorial function to complex numbers.
The gamma function is defined for all complex numbers except the non-positive
integers
-
-
>>> gamma(-1)
Traceback (most recent call last):
...
ValueError: math domain error
-
-
-
>>> gamma(0)
Traceback (most recent call last):
...
ValueError: math domain error
-
-
>>> gamma(9)
40320.0
-
>>> from math import gamma as math_gamma
>>> all(.99999999 < gamma(i) / math_gamma(i) <= 1.000000001
... for i in range(1, 50))
True
-
-
- >>> from math import gamma as math_gamma
>>> gamma(-1)/math_gamma(-1) <= 1.000000001
Traceback (most recent call last):
...
ValueError: math domain error
-
-
- >>> from math import gamma as math_gamma
>>> gamma(3.3) - math_gamma(3.3) <= 0.00000001
True
"""
-
if num <= 0:
raise ValueError("math domain error")
diff --git a/maths/gamma_recursive.py b/maths/gamma_recursive.py
new file mode 100644
index 000000000..3d6b8c5e8
--- /dev/null
+++ b/maths/gamma_recursive.py
@@ -0,0 +1,77 @@
+"""
+Gamma function is a very useful tool in math and physics.
+It helps calculating complex integral in a convenient way.
+for more info: https://en.wikipedia.org/wiki/Gamma_function
+Python's Standard Library math.gamma() function overflows around gamma(171.624).
+"""
+from math import pi, sqrt
+
+
+def gamma(num: float) -> float:
+ """
+ Calculates the value of Gamma function of num
+ where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...).
+ Implemented using recursion
+ Examples:
+ >>> from math import isclose, gamma as math_gamma
+ >>> gamma(0.5)
+ 1.7724538509055159
+ >>> gamma(2)
+ 1.0
+ >>> gamma(3.5)
+ 3.3233509704478426
+ >>> gamma(171.5)
+ 9.483367566824795e+307
+ >>> all(isclose(gamma(num), math_gamma(num)) for num in (0.5, 2, 3.5, 171.5))
+ True
+ >>> gamma(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: math domain error
+ >>> gamma(-1.1)
+ Traceback (most recent call last):
+ ...
+ ValueError: math domain error
+ >>> gamma(-4)
+ Traceback (most recent call last):
+ ...
+ ValueError: math domain error
+ >>> gamma(172)
+ Traceback (most recent call last):
+ ...
+ OverflowError: math range error
+ >>> gamma(1.1)
+ Traceback (most recent call last):
+ ...
+ NotImplementedError: num must be an integer or a half-integer
+ """
+ if num <= 0:
+ raise ValueError("math domain error")
+ if num > 171.5:
+ raise OverflowError("math range error")
+ elif num - int(num) not in (0, 0.5):
+ raise NotImplementedError("num must be an integer or a half-integer")
+ elif num == 0.5:
+ return sqrt(pi)
+ else:
+ return 1.0 if num == 1 else (num - 1) * gamma(num - 1)
+
+
+def test_gamma() -> None:
+ """
+ >>> test_gamma()
+ """
+ assert gamma(0.5) == sqrt(pi)
+ assert gamma(1) == 1.0
+ assert gamma(2) == 1.0
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
+ num = 1.0
+ while num:
+ num = float(input("Gamma of: "))
+ print(f"gamma({num}) = {gamma(num)}")
+ print("\nEnter 0 to exit...")
diff --git a/maths/gaussian.py b/maths/gaussian.py
index a5dba50a9..51ebc2e25 100644
--- a/maths/gaussian.py
+++ b/maths/gaussian.py
@@ -52,7 +52,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int:
>>> gaussian(2523, mu=234234, sigma=3425)
0.0
"""
- return 1 / sqrt(2 * pi * sigma ** 2) * exp(-((x - mu) ** 2) / (2 * sigma ** 2))
+ return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2))
if __name__ == "__main__":
diff --git a/maths/gaussian_error_linear_unit.py b/maths/gaussian_error_linear_unit.py
new file mode 100644
index 000000000..7b5f87514
--- /dev/null
+++ b/maths/gaussian_error_linear_unit.py
@@ -0,0 +1,53 @@
+"""
+This script demonstrates an implementation of the Gaussian Error Linear Unit function.
+* https://en.wikipedia.org/wiki/Activation_function#Comparison_of_activation_functions
+
+The function takes a vector of K real numbers as input and returns x * sigmoid(1.702*x).
+Gaussian Error Linear Unit (GELU) is a high-performing neural network activation
+function.
+
+This script is inspired by a corresponding research paper.
+* https://arxiv.org/abs/1606.08415
+"""
+
+import numpy as np
+
+
+def sigmoid(vector: np.array) -> np.array:
+ """
+ Mathematical function sigmoid takes a vector x of K real numbers as input and
+ returns 1/ (1 + e^-x).
+ https://en.wikipedia.org/wiki/Sigmoid_function
+
+ >>> sigmoid(np.array([-1.0, 1.0, 2.0]))
+ array([0.26894142, 0.73105858, 0.88079708])
+ """
+ return 1 / (1 + np.exp(-vector))
+
+
+def gaussian_error_linear_unit(vector: np.array) -> np.array:
+ """
+ Implements the Gaussian Error Linear Unit (GELU) function
+
+ Parameters:
+ vector (np.array): A numpy array of shape (1,n)
+ consisting of real values
+
+ Returns:
+ gelu_vec (np.array): The input numpy array, after applying
+ gelu.
+
+ Examples:
+ >>> gaussian_error_linear_unit(np.array([-1.0, 1.0, 2.0]))
+ array([-0.15420423, 0.84579577, 1.93565862])
+
+ >>> gaussian_error_linear_unit(np.array([-3]))
+ array([-0.01807131])
+ """
+ return vector * sigmoid(1.702 * vector)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/gcd_of_n_numbers.py b/maths/gcd_of_n_numbers.py
new file mode 100644
index 000000000..63236c236
--- /dev/null
+++ b/maths/gcd_of_n_numbers.py
@@ -0,0 +1,109 @@
+"""
+Gcd of N Numbers
+Reference: https://en.wikipedia.org/wiki/Greatest_common_divisor
+"""
+
+from collections import Counter
+
+
+def get_factors(
+ number: int, factors: Counter | None = None, factor: int = 2
+) -> Counter:
+ """
+ this is a recursive function for get all factors of number
+ >>> get_factors(45)
+ Counter({3: 2, 5: 1})
+ >>> get_factors(2520)
+ Counter({2: 3, 3: 2, 5: 1, 7: 1})
+ >>> get_factors(23)
+ Counter({23: 1})
+ >>> get_factors(0)
+ Traceback (most recent call last):
+ ...
+ TypeError: number must be integer and greater than zero
+ >>> get_factors(-1)
+ Traceback (most recent call last):
+ ...
+ TypeError: number must be integer and greater than zero
+ >>> get_factors(1.5)
+ Traceback (most recent call last):
+ ...
+ TypeError: number must be integer and greater than zero
+
+ factor can be all numbers from 2 to number that we check if number % factor == 0
+ if it is equal to zero, we check again with number // factor
+ else we increase factor by one
+ """
+
+ match number:
+ case int(number) if number == 1:
+ return Counter({1: 1})
+ case int(num) if number > 0:
+ number = num
+ case _:
+ raise TypeError("number must be integer and greater than zero")
+
+ factors = factors or Counter()
+
+ if number == factor: # break condition
+ # all numbers are factors of itself
+ factors[factor] += 1
+ return factors
+
+ if number % factor > 0:
+ # if it is greater than zero
+ # so it is not a factor of number and we check next number
+ return get_factors(number, factors, factor + 1)
+
+ factors[factor] += 1
+ # else we update factors (that is Counter(dict-like) type) and check again
+ return get_factors(number // factor, factors, factor)
+
+
+def get_greatest_common_divisor(*numbers: int) -> int:
+ """
+ get gcd of n numbers:
+ >>> get_greatest_common_divisor(18, 45)
+ 9
+ >>> get_greatest_common_divisor(23, 37)
+ 1
+ >>> get_greatest_common_divisor(2520, 8350)
+ 10
+ >>> get_greatest_common_divisor(-10, 20)
+ Traceback (most recent call last):
+ ...
+ Exception: numbers must be integer and greater than zero
+ >>> get_greatest_common_divisor(1.5, 2)
+ Traceback (most recent call last):
+ ...
+ Exception: numbers must be integer and greater than zero
+ >>> get_greatest_common_divisor(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+ 1
+ >>> get_greatest_common_divisor("1", 2, 3, 4, 5, 6, 7, 8, 9, 10)
+ Traceback (most recent call last):
+ ...
+ Exception: numbers must be integer and greater than zero
+ """
+
+ # we just need factors, not numbers itself
+ try:
+ same_factors, *factors = map(get_factors, numbers)
+ except TypeError as e:
+ raise Exception("numbers must be integer and greater than zero") from e
+
+ for factor in factors:
+ same_factors &= factor
+ # get common factor between all
+ # `&` return common elements with smaller value (for Counter type)
+
+ # now, same_factors is something like {2: 2, 3: 4} that means 2 * 2 * 3 * 3 * 3 * 3
+ mult = 1
+ # power each factor and multiply
+ # for {2: 2, 3: 4}, it is [4, 81] and then 324
+ for m in [factor**power for factor, power in same_factors.items()]:
+ mult *= m
+ return mult
+
+
+if __name__ == "__main__":
+ print(get_greatest_common_divisor(18, 45)) # 9
diff --git a/maths/greedy_coin_change.py b/maths/greedy_coin_change.py
index 5a7d9e8d8..7cf669bcb 100644
--- a/maths/greedy_coin_change.py
+++ b/maths/greedy_coin_change.py
@@ -41,7 +41,7 @@ Following is minimal change for 456 :
"""
-def find_minimum_change(denominations: list[int], value: int) -> list[int]:
+def find_minimum_change(denominations: list[int], value: str) -> list[int]:
"""
Find the minimum change from the given denominations and value
>>> find_minimum_change([1, 5, 10, 20, 50, 100, 200, 500, 1000,2000], 18745)
@@ -62,7 +62,6 @@ def find_minimum_change(denominations: list[int], value: int) -> list[int]:
# Traverse through all denomination
for denomination in reversed(denominations):
-
# Find denominations
while int(total_value) >= int(denomination):
total_value -= int(denomination)
@@ -73,9 +72,8 @@ def find_minimum_change(denominations: list[int], value: int) -> list[int]:
# Driver Code
if __name__ == "__main__":
-
- denominations = list()
- value = 0
+ denominations = []
+ value = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
diff --git a/maths/hamming_numbers.py b/maths/hamming_numbers.py
new file mode 100644
index 000000000..4575119c8
--- /dev/null
+++ b/maths/hamming_numbers.py
@@ -0,0 +1,51 @@
+"""
+A Hamming number is a positive integer of the form 2^i*3^j*5^k, for some
+non-negative integers i, j, and k. They are often referred to as regular numbers.
+More info at: https://en.wikipedia.org/wiki/Regular_number.
+"""
+
+
+def hamming(n_element: int) -> list:
+ """
+ This function creates an ordered list of n length as requested, and afterwards
+ returns the last value of the list. It must be given a positive integer.
+
+ :param n_element: The number of elements on the list
+ :return: The nth element of the list
+
+ >>> hamming(5)
+ [1, 2, 3, 4, 5]
+ >>> hamming(10)
+ [1, 2, 3, 4, 5, 6, 8, 9, 10, 12]
+ >>> hamming(15)
+ [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
+ """
+ n_element = int(n_element)
+ if n_element < 1:
+ my_error = ValueError("a should be a positive number")
+ raise my_error
+
+ hamming_list = [1]
+ i, j, k = (0, 0, 0)
+ index = 1
+ while index < n_element:
+ while hamming_list[i] * 2 <= hamming_list[-1]:
+ i += 1
+ while hamming_list[j] * 3 <= hamming_list[-1]:
+ j += 1
+ while hamming_list[k] * 5 <= hamming_list[-1]:
+ k += 1
+ hamming_list.append(
+ min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5)
+ )
+ index += 1
+ return hamming_list
+
+
+if __name__ == "__main__":
+ n = input("Enter the last number (nth term) of the Hamming Number Series: ")
+ print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
+ hamming_numbers = hamming(int(n))
+ print("-----------------------------------------------------")
+ print(f"The list with nth numbers is: {hamming_numbers}")
+ print("-----------------------------------------------------")
diff --git a/maths/hardy_ramanujanalgo.py b/maths/hardy_ramanujanalgo.py
index 90e4913c7..6929533fc 100644
--- a/maths/hardy_ramanujanalgo.py
+++ b/maths/hardy_ramanujanalgo.py
@@ -4,9 +4,9 @@
import math
-def exactPrimeFactorCount(n):
+def exact_prime_factor_count(n):
"""
- >>> exactPrimeFactorCount(51242183)
+ >>> exact_prime_factor_count(51242183)
3
"""
count = 0
@@ -36,8 +36,8 @@ def exactPrimeFactorCount(n):
if __name__ == "__main__":
n = 51242183
- print(f"The number of distinct prime factors is/are {exactPrimeFactorCount(n)}")
- print("The value of log(log(n)) is {:.4f}".format(math.log(math.log(n))))
+ print(f"The number of distinct prime factors is/are {exact_prime_factor_count(n)}")
+ print(f"The value of log(log(n)) is {math.log(math.log(n)):.4f}")
"""
The number of distinct prime factors is/are 3
diff --git a/maths/hexagonal_number.py b/maths/hexagonal_number.py
new file mode 100644
index 000000000..3677ab95e
--- /dev/null
+++ b/maths/hexagonal_number.py
@@ -0,0 +1,49 @@
+"""
+== Hexagonal Number ==
+The nth hexagonal number hn is the number of distinct dots
+in a pattern of dots consisting of the outlines of regular
+hexagons with sides up to n dots, when the hexagons are
+overlaid so that they share one vertex.
+
+https://en.wikipedia.org/wiki/Hexagonal_number
+"""
+
+# Author : Akshay Dubey (https://github.com/itsAkshayDubey)
+
+
+def hexagonal(number: int) -> int:
+ """
+ :param number: nth hexagonal number to calculate
+ :return: the nth hexagonal number
+ Note: A hexagonal number is only defined for positive integers
+ >>> hexagonal(4)
+ 28
+ >>> hexagonal(11)
+ 231
+ >>> hexagonal(22)
+ 946
+ >>> hexagonal(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a positive integer
+ >>> hexagonal(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a positive integer
+ >>> hexagonal(11.0)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value of [number=11.0] must be an integer
+ """
+ if not isinstance(number, int):
+ msg = f"Input value of [number={number}] must be an integer"
+ raise TypeError(msg)
+ if number < 1:
+ raise ValueError("Input must be a positive integer")
+ return number * (2 * number - 1)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/integration_by_simpson_approx.py b/maths/integration_by_simpson_approx.py
index da0e1cffd..f77ae7613 100644
--- a/maths/integration_by_simpson_approx.py
+++ b/maths/integration_by_simpson_approx.py
@@ -35,12 +35,11 @@ xn = b
def simpson_integration(function, a: float, b: float, precision: int = 4) -> float:
-
"""
Args:
function : the function which's integration is desired
a : the lower limit of integration
- b : upper limit of integraion
+ b : upper limit of integration
precision : precision of the result,error required default is 4
Returns:
@@ -92,21 +91,17 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo
assert callable(
function
), f"the function(object) passed should be callable your input : {function}"
- assert isinstance(a, float) or isinstance(
- a, int
- ), f"a should be float or integer your input : {a}"
- assert isinstance(function(a), float) or isinstance(function(a), int), (
+ assert isinstance(a, (float, int)), f"a should be float or integer your input : {a}"
+ assert isinstance(function(a), (float, int)), (
"the function should return integer or float return type of your function, "
f"{type(a)}"
)
- assert isinstance(b, float) or isinstance(
- b, int
- ), f"b should be float or integer your input : {b}"
+ assert isinstance(b, (float, int)), f"b should be float or integer your input : {b}"
assert (
isinstance(precision, int) and precision > 0
), f"precision should be positive integer your input : {precision}"
- # just applying the formula of simpson for approximate integraion written in
+ # just applying the formula of simpson for approximate integration written in
# mentioned article in first comment of this file and above this function
h = (b - a) / N_STEPS
diff --git a/maths/is_int_palindrome.py b/maths/is_int_palindrome.py
new file mode 100644
index 000000000..63dc9e213
--- /dev/null
+++ b/maths/is_int_palindrome.py
@@ -0,0 +1,34 @@
+def is_int_palindrome(num: int) -> bool:
+ """
+ Returns whether `num` is a palindrome or not
+ (see for reference https://en.wikipedia.org/wiki/Palindromic_number).
+
+ >>> is_int_palindrome(-121)
+ False
+ >>> is_int_palindrome(0)
+ True
+ >>> is_int_palindrome(10)
+ False
+ >>> is_int_palindrome(11)
+ True
+ >>> is_int_palindrome(101)
+ True
+ >>> is_int_palindrome(120)
+ False
+ """
+ if num < 0:
+ return False
+
+ num_copy: int = num
+ rev_num: int = 0
+ while num > 0:
+ rev_num = rev_num * 10 + (num % 10)
+ num //= 10
+
+ return num_copy == rev_num
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/is_ip_v4_address_valid.py b/maths/is_ip_v4_address_valid.py
new file mode 100644
index 000000000..0ae8e021e
--- /dev/null
+++ b/maths/is_ip_v4_address_valid.py
@@ -0,0 +1,56 @@
+"""
+Is IP v4 address valid?
+A valid IP address must be four octets in the form of A.B.C.D,
+where A,B,C and D are numbers from 0-254
+for example: 192.168.23.1, 172.254.254.254 are valid IP address
+ 192.168.255.0, 255.192.3.121 are invalid IP address
+"""
+
+
+def is_ip_v4_address_valid(ip_v4_address: str) -> bool:
+ """
+ print "Valid IP address" If IP is valid.
+ or
+ print "Invalid IP address" If IP is invalid.
+
+ >>> is_ip_v4_address_valid("192.168.0.23")
+ True
+
+ >>> is_ip_v4_address_valid("192.255.15.8")
+ False
+
+ >>> is_ip_v4_address_valid("172.100.0.8")
+ True
+
+ >>> is_ip_v4_address_valid("254.255.0.255")
+ False
+
+ >>> is_ip_v4_address_valid("1.2.33333333.4")
+ False
+
+ >>> is_ip_v4_address_valid("1.2.-3.4")
+ False
+
+ >>> is_ip_v4_address_valid("1.2.3")
+ False
+
+ >>> is_ip_v4_address_valid("1.2.3.4.5")
+ False
+
+ >>> is_ip_v4_address_valid("1.2.A.4")
+ False
+
+ >>> is_ip_v4_address_valid("0.0.0.0")
+ True
+
+ >>> is_ip_v4_address_valid("1.2.3.")
+ False
+ """
+ octets = [int(i) for i in ip_v4_address.split(".") if i.isdigit()]
+ return len(octets) == 4 and all(0 <= int(octet) <= 254 for octet in octets)
+
+
+if __name__ == "__main__":
+ ip = input().strip()
+ valid_or_invalid = "valid" if is_ip_v4_address_valid(ip) else "invalid"
+ print(f"{ip} is a {valid_or_invalid} IP v4 address.")
diff --git a/maths/is_square_free.py b/maths/is_square_free.py
index 8d83d95ff..08c70dc32 100644
--- a/maths/is_square_free.py
+++ b/maths/is_square_free.py
@@ -1,7 +1,7 @@
"""
References: wikipedia:square free number
-python/black : True
-flake8 : True
+psf/black : True
+ruff : True
"""
from __future__ import annotations
@@ -15,7 +15,7 @@ def is_square_free(factors: list[int]) -> bool:
False
These are wrong but should return some value
- it simply checks for repition in the numbers.
+ it simply checks for repetition in the numbers.
>>> is_square_free([1, 3, 4, 'sd', 0.0])
True
diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py
index 4f24d308f..32054414c 100644
--- a/maths/jaccard_similarity.py
+++ b/maths/jaccard_similarity.py
@@ -14,7 +14,7 @@ Jaccard similarity is widely used with MinHashing.
"""
-def jaccard_similariy(setA, setB, alternativeUnion=False):
+def jaccard_similarity(set_a, set_b, alternative_union=False):
"""
Finds the jaccard similarity between two sets.
Essentially, its intersection over union.
@@ -24,8 +24,8 @@ def jaccard_similariy(setA, setB, alternativeUnion=False):
of a set with itself be 1/2 instead of 1. [MMDS 2nd Edition, Page 77]
Parameters:
- :setA (set,list,tuple): A non-empty set/list
- :setB (set,list,tuple): A non-empty set/list
+ :set_a (set,list,tuple): A non-empty set/list
+ :set_b (set,list,tuple): A non-empty set/list
:alternativeUnion (boolean): If True, use sum of number of
items as union
@@ -33,48 +33,48 @@ def jaccard_similariy(setA, setB, alternativeUnion=False):
(float) The jaccard similarity between the two sets.
Examples:
- >>> setA = {'a', 'b', 'c', 'd', 'e'}
- >>> setB = {'c', 'd', 'e', 'f', 'h', 'i'}
- >>> jaccard_similariy(setA,setB)
+ >>> set_a = {'a', 'b', 'c', 'd', 'e'}
+ >>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'}
+ >>> jaccard_similarity(set_a, set_b)
0.375
- >>> jaccard_similariy(setA,setA)
+ >>> jaccard_similarity(set_a, set_a)
1.0
- >>> jaccard_similariy(setA,setA,True)
+ >>> jaccard_similarity(set_a, set_a, True)
0.5
- >>> setA = ['a', 'b', 'c', 'd', 'e']
- >>> setB = ('c', 'd', 'e', 'f', 'h', 'i')
- >>> jaccard_similariy(setA,setB)
+ >>> set_a = ['a', 'b', 'c', 'd', 'e']
+ >>> set_b = ('c', 'd', 'e', 'f', 'h', 'i')
+ >>> jaccard_similarity(set_a, set_b)
0.375
"""
- if isinstance(setA, set) and isinstance(setB, set):
+ if isinstance(set_a, set) and isinstance(set_b, set):
+ intersection = len(set_a.intersection(set_b))
- intersection = len(setA.intersection(setB))
-
- if alternativeUnion:
- union = len(setA) + len(setB)
+ if alternative_union:
+ union = len(set_a) + len(set_b)
else:
- union = len(setA.union(setB))
+ union = len(set_a.union(set_b))
return intersection / union
- if isinstance(setA, (list, tuple)) and isinstance(setB, (list, tuple)):
+ if isinstance(set_a, (list, tuple)) and isinstance(set_b, (list, tuple)):
+ intersection = [element for element in set_a if element in set_b]
- intersection = [element for element in setA if element in setB]
-
- if alternativeUnion:
- union = len(setA) + len(setB)
+ if alternative_union:
+ union = len(set_a) + len(set_b)
+ return len(intersection) / union
else:
- union = setA + [element for element in setB if element not in setA]
+ union = set_a + [element for element in set_b if element not in set_a]
+ return len(intersection) / len(union)
return len(intersection) / len(union)
+ return None
if __name__ == "__main__":
-
- setA = {"a", "b", "c", "d", "e"}
- setB = {"c", "d", "e", "f", "h", "i"}
- print(jaccard_similariy(setA, setB))
+ set_a = {"a", "b", "c", "d", "e"}
+ set_b = {"c", "d", "e", "f", "h", "i"}
+ print(jaccard_similarity(set_a, set_b))
diff --git a/maths/juggler_sequence.py b/maths/juggler_sequence.py
new file mode 100644
index 000000000..7f65d1dff
--- /dev/null
+++ b/maths/juggler_sequence.py
@@ -0,0 +1,63 @@
+"""
+== Juggler Sequence ==
+Juggler sequence start with any positive integer n. The next term is
+obtained as follows:
+ If n term is even, the next term is floor value of square root of n .
+ If n is odd, the next term is floor value of 3 time the square root of n.
+
+https://en.wikipedia.org/wiki/Juggler_sequence
+"""
+
+# Author : Akshay Dubey (https://github.com/itsAkshayDubey)
+import math
+
+
+def juggler_sequence(number: int) -> list[int]:
+ """
+ >>> juggler_sequence(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value of [number=0] must be a positive integer
+ >>> juggler_sequence(1)
+ [1]
+ >>> juggler_sequence(2)
+ [2, 1]
+ >>> juggler_sequence(3)
+ [3, 5, 11, 36, 6, 2, 1]
+ >>> juggler_sequence(5)
+ [5, 11, 36, 6, 2, 1]
+ >>> juggler_sequence(10)
+ [10, 3, 5, 11, 36, 6, 2, 1]
+ >>> juggler_sequence(25)
+ [25, 125, 1397, 52214, 228, 15, 58, 7, 18, 4, 2, 1]
+ >>> juggler_sequence(6.0)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value of [number=6.0] must be an integer
+ >>> juggler_sequence(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value of [number=-1] must be a positive integer
+ """
+ if not isinstance(number, int):
+ msg = f"Input value of [number={number}] must be an integer"
+ raise TypeError(msg)
+ if number < 1:
+ msg = f"Input value of [number={number}] must be a positive integer"
+ raise ValueError(msg)
+ sequence = [number]
+ while number != 1:
+ if number % 2 == 0:
+ number = math.floor(math.sqrt(number))
+ else:
+ number = math.floor(
+ math.sqrt(number) * math.sqrt(number) * math.sqrt(number)
+ )
+ sequence.append(number)
+ return sequence
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/kadanes.py b/maths/kadanes.py
deleted file mode 100644
index d239d4a25..000000000
--- a/maths/kadanes.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-Kadane's algorithm to get maximum subarray sum
-https://medium.com/@rsinghal757/kadanes-algorithm-dynamic-programming-how-and-why-does-it-work-3fd8849ed73d
-https://en.wikipedia.org/wiki/Maximum_subarray_problem
-"""
-test_data: tuple = ([-2, -8, -9], [2, 8, 9], [-1, 0, 1], [0, 0], [])
-
-
-def negative_exist(arr: list) -> int:
- """
- >>> negative_exist([-2,-8,-9])
- -2
- >>> [negative_exist(arr) for arr in test_data]
- [-2, 0, 0, 0, 0]
- """
- arr = arr or [0]
- max = arr[0]
- for i in arr:
- if i >= 0:
- return 0
- elif max <= i:
- max = i
- return max
-
-
-def kadanes(arr: list) -> int:
- """
- If negative_exist() returns 0 than this function will execute
- else it will return the value return by negative_exist function
-
- For example: arr = [2, 3, -9, 8, -2]
- Initially we set value of max_sum to 0 and max_till_element to 0 than when
- max_sum is less than max_till particular element it will assign that value to
- max_sum and when value of max_till_sum is less than 0 it will assign 0 to i
- and after that whole process, return the max_sum
- So the output for above arr is 8
-
- >>> kadanes([2, 3, -9, 8, -2])
- 8
- >>> [kadanes(arr) for arr in test_data]
- [-2, 19, 1, 0, 0]
- """
- max_sum = negative_exist(arr)
- if max_sum < 0:
- return max_sum
-
- max_sum = 0
- max_till_element = 0
-
- for i in arr:
- max_till_element += i
- if max_sum <= max_till_element:
- max_sum = max_till_element
- if max_till_element < 0:
- max_till_element = 0
- return max_sum
-
-
-if __name__ == "__main__":
- try:
- print("Enter integer values sepatated by spaces")
- arr = [int(x) for x in input().split()]
- print(f"Maximum subarray sum of {arr} is {kadanes(arr)}")
- except ValueError:
- print("Please enter integer values.")
diff --git a/maths/karatsuba.py b/maths/karatsuba.py
index df29c77a5..4bf4aecdc 100644
--- a/maths/karatsuba.py
+++ b/maths/karatsuba.py
@@ -10,18 +10,18 @@ def karatsuba(a, b):
"""
if len(str(a)) == 1 or len(str(b)) == 1:
return a * b
- else:
- m1 = max(len(str(a)), len(str(b)))
- m2 = m1 // 2
- a1, a2 = divmod(a, 10 ** m2)
- b1, b2 = divmod(b, 10 ** m2)
+ m1 = max(len(str(a)), len(str(b)))
+ m2 = m1 // 2
- x = karatsuba(a2, b2)
- y = karatsuba((a1 + a2), (b1 + b2))
- z = karatsuba(a1, b1)
+ a1, a2 = divmod(a, 10**m2)
+ b1, b2 = divmod(b, 10**m2)
- return (z * 10 ** (2 * m2)) + ((y - z - x) * 10 ** (m2)) + (x)
+ x = karatsuba(a2, b2)
+ y = karatsuba((a1 + a2), (b1 + b2))
+ z = karatsuba(a1, b1)
+
+ return (z * 10 ** (2 * m2)) + ((y - z - x) * 10 ** (m2)) + (x)
def main():
diff --git a/maths/krishnamurthy_number.py b/maths/krishnamurthy_number.py
index c88f68a07..c1d8a8fc5 100644
--- a/maths/krishnamurthy_number.py
+++ b/maths/krishnamurthy_number.py
@@ -33,12 +33,12 @@ def krishnamurthy(number: int) -> bool:
True
"""
- factSum = 0
+ fact_sum = 0
duplicate = number
while duplicate > 0:
duplicate, digit = divmod(duplicate, 10)
- factSum += factorial(digit)
- return factSum == number
+ fact_sum += factorial(digit)
+ return fact_sum == number
if __name__ == "__main__":
diff --git a/maths/kth_lexicographic_permutation.py b/maths/kth_lexicographic_permutation.py
index 23eab626f..b85558aca 100644
--- a/maths/kth_lexicographic_permutation.py
+++ b/maths/kth_lexicographic_permutation.py
@@ -1,17 +1,17 @@
-def kthPermutation(k, n):
+def kth_permutation(k, n):
"""
Finds k'th lexicographic permutation (in increasing order) of
0,1,2,...n-1 in O(n^2) time.
Examples:
First permutation is always 0,1,2,...n
- >>> kthPermutation(0,5)
+ >>> kth_permutation(0,5)
[0, 1, 2, 3, 4]
The order of permutation of 0,1,2,3 is [0,1,2,3], [0,1,3,2], [0,2,1,3],
[0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3],
[1,2,3,0], [1,3,0,2]
- >>> kthPermutation(10,4)
+ >>> kth_permutation(10,4)
[1, 3, 0, 2]
"""
# Factorails from 1! to (n-1)!
diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py
index d2dc0af18..7e7fea004 100644
--- a/maths/largest_of_very_large_numbers.py
+++ b/maths/largest_of_very_large_numbers.py
@@ -12,6 +12,7 @@ def res(x, y):
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
+ raise AssertionError("This should never happen")
if __name__ == "__main__": # Main function
diff --git a/maths/largest_subarray_sum.py b/maths/largest_subarray_sum.py
deleted file mode 100644
index 0449e72e6..000000000
--- a/maths/largest_subarray_sum.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from sys import maxsize
-
-
-def max_sub_array_sum(a: list, size: int = 0):
- """
- >>> max_sub_array_sum([-13, -3, -25, -20, -3, -16, -23, -12, -5, -22, -15, -4, -7])
- -3
- """
- size = size or len(a)
- max_so_far = -maxsize - 1
- max_ending_here = 0
- for i in range(0, size):
- max_ending_here = max_ending_here + a[i]
- if max_so_far < max_ending_here:
- max_so_far = max_ending_here
- if max_ending_here < 0:
- max_ending_here = 0
- return max_so_far
-
-
-if __name__ == "__main__":
- a = [-13, -3, -25, -20, 1, -16, -23, -12, -5, -22, -15, -4, -7]
- print(("Maximum contiguous sum is", max_sub_array_sum(a, len(a))))
diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py
index 0d087643e..10cc63ac7 100644
--- a/maths/least_common_multiple.py
+++ b/maths/least_common_multiple.py
@@ -67,8 +67,7 @@ def benchmark():
class TestLeastCommonMultiple(unittest.TestCase):
-
- test_inputs = [
+ test_inputs = (
(10, 20),
(13, 15),
(4, 31),
@@ -78,8 +77,8 @@ class TestLeastCommonMultiple(unittest.TestCase):
(12, 25),
(10, 25),
(6, 9),
- ]
- expected_results = [20, 195, 124, 210, 1462, 60, 300, 50, 18]
+ )
+ expected_results = (20, 195, 124, 210, 1462, 60, 300, 50, 18)
def test_lcm_function(self):
for i, (first_num, second_num) in enumerate(self.test_inputs):
diff --git a/maths/line_length.py b/maths/line_length.py
index 1d386b44b..b810f2d9a 100644
--- a/maths/line_length.py
+++ b/maths/line_length.py
@@ -1,14 +1,15 @@
+from __future__ import annotations
+
import math
-from typing import Callable, Union
+from collections.abc import Callable
def line_length(
- fnc: Callable[[Union[int, float]], Union[int, float]],
- x_start: Union[int, float],
- x_end: Union[int, float],
+ fnc: Callable[[int | float], int | float],
+ x_start: int | float,
+ x_end: int | float,
steps: int = 100,
) -> float:
-
"""
Approximates the arc length of a line segment by treating the curve as a
sequence of linear lines and summing their lengths
@@ -38,8 +39,7 @@ def line_length(
fx1 = fnc(x_start)
length = 0.0
- for i in range(steps):
-
+ for _ in range(steps):
# Approximates curve as a sequence of linear lines and sums their length
x2 = (x_end - x_start) / steps + x1
fx2 = fnc(x2)
diff --git a/maths/liouville_lambda.py b/maths/liouville_lambda.py
new file mode 100644
index 000000000..1ed228dd5
--- /dev/null
+++ b/maths/liouville_lambda.py
@@ -0,0 +1,46 @@
+"""
+== Liouville Lambda Function ==
+The Liouville Lambda function, denoted by λ(n)
+and λ(n) is 1 if n is the product of an even number of prime numbers,
+and -1 if it is the product of an odd number of primes.
+
+https://en.wikipedia.org/wiki/Liouville_function
+"""
+
+# Author : Akshay Dubey (https://github.com/itsAkshayDubey)
+from maths.prime_factors import prime_factors
+
+
+def liouville_lambda(number: int) -> int:
+ """
+ This functions takes an integer number as input.
+ returns 1 if n has even number of prime factors and -1 otherwise.
+ >>> liouville_lambda(10)
+ 1
+ >>> liouville_lambda(11)
+ -1
+ >>> liouville_lambda(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a positive integer
+ >>> liouville_lambda(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a positive integer
+ >>> liouville_lambda(11.0)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value of [number=11.0] must be an integer
+ """
+ if not isinstance(number, int):
+ msg = f"Input value of [number={number}] must be an integer"
+ raise TypeError(msg)
+ if number < 1:
+ raise ValueError("Input must be a positive integer")
+ return -1 if len(prime_factors(number)) % 2 else 1
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py
index 15e25cbfe..0a5621aac 100644
--- a/maths/lucas_lehmer_primality_test.py
+++ b/maths/lucas_lehmer_primality_test.py
@@ -30,9 +30,9 @@ def lucas_lehmer_test(p: int) -> bool:
return True
s = 4
- M = (1 << p) - 1
- for i in range(p - 2):
- s = ((s * s) - 2) % M
+ m = (1 << p) - 1
+ for _ in range(p - 2):
+ s = ((s * s) - 2) % m
return s == 0
diff --git a/maths/lucas_series.py b/maths/lucas_series.py
index 6b32c2022..cae6c2815 100644
--- a/maths/lucas_series.py
+++ b/maths/lucas_series.py
@@ -50,7 +50,7 @@ def dynamic_lucas_number(n_th_number: int) -> int:
if not isinstance(n_th_number, int):
raise TypeError("dynamic_lucas_number accepts only integer arguments.")
a, b = 2, 1
- for i in range(n_th_number):
+ for _ in range(n_th_number):
a, b = b, a + b
return a
diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py
new file mode 100644
index 000000000..e55839bc1
--- /dev/null
+++ b/maths/maclaurin_series.py
@@ -0,0 +1,122 @@
+"""
+https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions
+"""
+from math import factorial, pi
+
+
+def maclaurin_sin(theta: float, accuracy: int = 30) -> float:
+ """
+ Finds the maclaurin approximation of sin
+
+ :param theta: the angle to which sin is found
+ :param accuracy: the degree of accuracy wanted minimum
+ :return: the value of sine in radians
+
+
+ >>> from math import isclose, sin
+ >>> all(isclose(maclaurin_sin(x, 50), sin(x)) for x in range(-25, 25))
+ True
+ >>> maclaurin_sin(10)
+ -0.544021110889369
+ >>> maclaurin_sin(-10)
+ 0.5440211108893703
+ >>> maclaurin_sin(10, 15)
+ -0.5440211108893689
+ >>> maclaurin_sin(-10, 15)
+ 0.5440211108893703
+ >>> maclaurin_sin("10")
+ Traceback (most recent call last):
+ ...
+ ValueError: maclaurin_sin() requires either an int or float for theta
+ >>> maclaurin_sin(10, -30)
+ Traceback (most recent call last):
+ ...
+ ValueError: maclaurin_sin() requires a positive int for accuracy
+ >>> maclaurin_sin(10, 30.5)
+ Traceback (most recent call last):
+ ...
+ ValueError: maclaurin_sin() requires a positive int for accuracy
+ >>> maclaurin_sin(10, "30")
+ Traceback (most recent call last):
+ ...
+ ValueError: maclaurin_sin() requires a positive int for accuracy
+ """
+
+ if not isinstance(theta, (int, float)):
+ raise ValueError("maclaurin_sin() requires either an int or float for theta")
+
+ if not isinstance(accuracy, int) or accuracy <= 0:
+ raise ValueError("maclaurin_sin() requires a positive int for accuracy")
+
+ theta = float(theta)
+ div = theta // (2 * pi)
+ theta -= 2 * div * pi
+ return sum(
+ (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(accuracy)
+ )
+
+
+def maclaurin_cos(theta: float, accuracy: int = 30) -> float:
+ """
+ Finds the maclaurin approximation of cos
+
+ :param theta: the angle to which cos is found
+ :param accuracy: the degree of accuracy wanted
+ :return: the value of cosine in radians
+
+
+ >>> from math import isclose, cos
+ >>> all(isclose(maclaurin_cos(x, 50), cos(x)) for x in range(-25, 25))
+ True
+ >>> maclaurin_cos(5)
+ 0.28366218546322675
+ >>> maclaurin_cos(-5)
+ 0.2836621854632266
+ >>> maclaurin_cos(10, 15)
+ -0.8390715290764525
+ >>> maclaurin_cos(-10, 15)
+ -0.8390715290764521
+ >>> maclaurin_cos("10")
+ Traceback (most recent call last):
+ ...
+ ValueError: maclaurin_cos() requires either an int or float for theta
+ >>> maclaurin_cos(10, -30)
+ Traceback (most recent call last):
+ ...
+ ValueError: maclaurin_cos() requires a positive int for accuracy
+ >>> maclaurin_cos(10, 30.5)
+ Traceback (most recent call last):
+ ...
+ ValueError: maclaurin_cos() requires a positive int for accuracy
+ >>> maclaurin_cos(10, "30")
+ Traceback (most recent call last):
+ ...
+ ValueError: maclaurin_cos() requires a positive int for accuracy
+ """
+
+ if not isinstance(theta, (int, float)):
+ raise ValueError("maclaurin_cos() requires either an int or float for theta")
+
+ if not isinstance(accuracy, int) or accuracy <= 0:
+ raise ValueError("maclaurin_cos() requires a positive int for accuracy")
+
+ theta = float(theta)
+ div = theta // (2 * pi)
+ theta -= 2 * div * pi
+ return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(accuracy))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ print(maclaurin_sin(10))
+ print(maclaurin_sin(-10))
+ print(maclaurin_sin(10, 15))
+ print(maclaurin_sin(-10, 15))
+
+ print(maclaurin_cos(5))
+ print(maclaurin_cos(-5))
+ print(maclaurin_cos(10, 15))
+ print(maclaurin_cos(-10, 15))
diff --git a/maths/manhattan_distance.py b/maths/manhattan_distance.py
new file mode 100644
index 000000000..413991468
--- /dev/null
+++ b/maths/manhattan_distance.py
@@ -0,0 +1,126 @@
+def manhattan_distance(point_a: list, point_b: list) -> float:
+ """
+ Expectts two list of numbers representing two points in the same
+ n-dimensional space
+
+ https://en.wikipedia.org/wiki/Taxicab_geometry
+
+ >>> manhattan_distance([1,1], [2,2])
+ 2.0
+ >>> manhattan_distance([1.5,1.5], [2,2])
+ 1.0
+ >>> manhattan_distance([1.5,1.5], [2.5,2])
+ 1.5
+ >>> manhattan_distance([-3, -3, -3], [0, 0, 0])
+ 9.0
+ >>> manhattan_distance([1,1], None)
+ Traceback (most recent call last):
+ ...
+ ValueError: Missing an input
+ >>> manhattan_distance([1,1], [2, 2, 2])
+ Traceback (most recent call last):
+ ...
+ ValueError: Both points must be in the same n-dimensional space
+ >>> manhattan_distance([1,"one"], [2, 2, 2])
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found str
+ >>> manhattan_distance(1, [2, 2, 2])
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found int
+ >>> manhattan_distance([1,1], "not_a_list")
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found str
+ """
+
+ _validate_point(point_a)
+ _validate_point(point_b)
+ if len(point_a) != len(point_b):
+ raise ValueError("Both points must be in the same n-dimensional space")
+
+ return float(sum(abs(a - b) for a, b in zip(point_a, point_b)))
+
+
+def _validate_point(point: list[float]) -> None:
+ """
+ >>> _validate_point(None)
+ Traceback (most recent call last):
+ ...
+ ValueError: Missing an input
+ >>> _validate_point([1,"one"])
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found str
+ >>> _validate_point(1)
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found int
+ >>> _validate_point("not_a_list")
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found str
+ """
+ if point:
+ if isinstance(point, list):
+ for item in point:
+ if not isinstance(item, (int, float)):
+ msg = (
+ "Expected a list of numbers as input, found "
+ f"{type(item).__name__}"
+ )
+ raise TypeError(msg)
+ else:
+ msg = f"Expected a list of numbers as input, found {type(point).__name__}"
+ raise TypeError(msg)
+ else:
+ raise ValueError("Missing an input")
+
+
+def manhattan_distance_one_liner(point_a: list, point_b: list) -> float:
+ """
+ Version with one liner
+
+ >>> manhattan_distance_one_liner([1,1], [2,2])
+ 2.0
+ >>> manhattan_distance_one_liner([1.5,1.5], [2,2])
+ 1.0
+ >>> manhattan_distance_one_liner([1.5,1.5], [2.5,2])
+ 1.5
+ >>> manhattan_distance_one_liner([-3, -3, -3], [0, 0, 0])
+ 9.0
+ >>> manhattan_distance_one_liner([1,1], None)
+ Traceback (most recent call last):
+ ...
+ ValueError: Missing an input
+ >>> manhattan_distance_one_liner([1,1], [2, 2, 2])
+ Traceback (most recent call last):
+ ...
+ ValueError: Both points must be in the same n-dimensional space
+ >>> manhattan_distance_one_liner([1,"one"], [2, 2, 2])
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found str
+ >>> manhattan_distance_one_liner(1, [2, 2, 2])
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found int
+ >>> manhattan_distance_one_liner([1,1], "not_a_list")
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a list of numbers as input, found str
+ """
+
+ _validate_point(point_a)
+ _validate_point(point_b)
+ if len(point_a) != len(point_b):
+ raise ValueError("Both points must be in the same n-dimensional space")
+
+ return float(sum(abs(x - y) for x, y in zip(point_a, point_b)))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/matrix_exponentiation.py b/maths/matrix_exponentiation.py
index 033ceb3f2..7c37151c8 100644
--- a/maths/matrix_exponentiation.py
+++ b/maths/matrix_exponentiation.py
@@ -5,7 +5,7 @@ import timeit
"""
Matrix Exponentiation is a technique to solve linear recurrences in logarithmic time.
You read more about it here:
-http://zobayer.blogspot.com/2010/11/matrix-exponentiation.html
+https://zobayer.blogspot.com/2010/11/matrix-exponentiation.html
https://www.hackerearth.com/practice/notes/matrix-exponentiation-1/
"""
diff --git a/maths/max_sum_sliding_window.py b/maths/max_sum_sliding_window.py
index 593cb5c8b..c6f9b4ed0 100644
--- a/maths/max_sum_sliding_window.py
+++ b/maths/max_sum_sliding_window.py
@@ -6,10 +6,10 @@ Instead of using a nested for loop, in a Brute force approach we will use a tech
called 'Window sliding technique' where the nested loops can be converted to a single
loop to reduce time complexity.
"""
-from typing import List
+from __future__ import annotations
-def max_sum_in_array(array: List[int], k: int) -> int:
+def max_sum_in_array(array: list[int], k: int) -> int:
"""
Returns the maximum sum of k consecutive elements
>>> arr = [1, 4, 2, 10, 2, 3, 1, 0, 20]
diff --git a/maths/median_of_two_arrays.py b/maths/median_of_two_arrays.py
index cde12f5d7..55aa587a9 100644
--- a/maths/median_of_two_arrays.py
+++ b/maths/median_of_two_arrays.py
@@ -1,7 +1,7 @@
-from typing import List
+from __future__ import annotations
-def median_of_two_arrays(nums1: List[float], nums2: List[float]) -> float:
+def median_of_two_arrays(nums1: list[float], nums2: list[float]) -> float:
"""
>>> median_of_two_arrays([1, 2], [3])
2
diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py
index fe9920271..9f2668dba 100644
--- a/maths/miller_rabin.py
+++ b/maths/miller_rabin.py
@@ -6,10 +6,11 @@ from .binary_exp_mod import bin_exp_mod
# This is a probabilistic check to test primality, useful for big numbers!
# if it's a prime, it will return true
# if it's not a prime, the chance of it returning true is at most 1/4**prec
-def is_prime(n, prec=1000):
+def is_prime_big(n, prec=1000):
"""
- >>> from .prime_check import prime_check
- >>> all(is_prime(i) == prime_check(i) for i in range(1000))
+ >>> from maths.prime_check import is_prime
+ >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s
+ >>> all(is_prime_big(i) == is_prime(i) for i in range(256))
True
"""
if n < 2:
@@ -32,7 +33,7 @@ def is_prime(n, prec=1000):
b = bin_exp_mod(a, d, n)
if b != 1:
flag = True
- for i in range(exp):
+ for _ in range(exp):
if b == n - 1:
flag = False
break
@@ -47,4 +48,4 @@ def is_prime(n, prec=1000):
if __name__ == "__main__":
n = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
- print(", ".join(str(i) for i in range(n + 1) if is_prime(i)))
+ print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
diff --git a/maths/mobius_function.py b/maths/mobius_function.py
index 4fcf35f21..8abdc4caf 100644
--- a/maths/mobius_function.py
+++ b/maths/mobius_function.py
@@ -1,8 +1,8 @@
"""
References: https://en.wikipedia.org/wiki/M%C3%B6bius_function
References: wikipedia:square free number
-python/black : True
-flake8 : True
+psf/black : True
+ruff : True
"""
from maths.is_square_free import is_square_free
diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py
index 28027cbe4..474f1f65d 100644
--- a/maths/monte_carlo.py
+++ b/maths/monte_carlo.py
@@ -1,10 +1,10 @@
"""
@author: MatteoRaso
"""
+from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
-from typing import Callable
def pi_estimator(iterations: int):
@@ -18,9 +18,10 @@ def pi_estimator(iterations: int):
5. Multiply this value by 4 to get your estimate of pi.
6. Print the estimated and numpy value of pi
"""
+
# A local function to see if a dot lands in the circle.
def is_in_circle(x: float, y: float) -> bool:
- distance_from_centre = sqrt((x ** 2) + (y ** 2))
+ distance_from_centre = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
diff --git a/maths/monte_carlo_dice.py b/maths/monte_carlo_dice.py
index 17cedbdbc..362f70b49 100644
--- a/maths/monte_carlo_dice.py
+++ b/maths/monte_carlo_dice.py
@@ -13,9 +13,6 @@ class Dice:
def roll(self):
return random.choice(self.sides)
- def _str_(self):
- return "Fair Dice"
-
def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]:
"""
@@ -35,7 +32,7 @@ def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]:
"""
dices = [Dice() for i in range(num_dice)]
count_of_sum = [0] * (len(dices) * Dice.NUM_SIDES + 1)
- for i in range(num_throws):
+ for _ in range(num_throws):
count_of_sum[sum(dice.roll() for dice in dices)] += 1
probability = [round((count * 100) / num_throws, 2) for count in count_of_sum]
return probability[num_dice:] # remove probability of sums that never appear
diff --git a/maths/nevilles_method.py b/maths/nevilles_method.py
new file mode 100644
index 000000000..1f48b43fb
--- /dev/null
+++ b/maths/nevilles_method.py
@@ -0,0 +1,55 @@
+"""
+ Python program to show how to interpolate and evaluate a polynomial
+ using Neville's method.
+ Neville’s method evaluates a polynomial that passes through a
+ given set of x and y points for a particular x value (x0) using the
+ Newton polynomial form.
+ Reference:
+ https://rpubs.com/aaronsc32/nevilles-method-polynomial-interpolation
+"""
+
+
+def neville_interpolate(x_points: list, y_points: list, x0: int) -> list:
+ """
+ Interpolate and evaluate a polynomial using Neville's method.
+ Arguments:
+ x_points, y_points: Iterables of x and corresponding y points through
+ which the polynomial passes.
+ x0: The value of x to evaluate the polynomial for.
+ Return Value: A list of the approximated value and the Neville iterations
+ table respectively.
+ >>> import pprint
+ >>> neville_interpolate((1,2,3,4,6), (6,7,8,9,11), 5)[0]
+ 10.0
+ >>> pprint.pprint(neville_interpolate((1,2,3,4,6), (6,7,8,9,11), 99)[1])
+ [[0, 6, 0, 0, 0],
+ [0, 7, 0, 0, 0],
+ [0, 8, 104.0, 0, 0],
+ [0, 9, 104.0, 104.0, 0],
+ [0, 11, 104.0, 104.0, 104.0]]
+ >>> neville_interpolate((1,2,3,4,6), (6,7,8,9,11), 99)[0]
+ 104.0
+ >>> neville_interpolate((1,2,3,4,6), (6,7,8,9,11), '')
+ Traceback (most recent call last):
+ ...
+ TypeError: unsupported operand type(s) for -: 'str' and 'int'
+ """
+ n = len(x_points)
+ q = [[0] * n for i in range(n)]
+ for i in range(n):
+ q[i][1] = y_points[i]
+
+ for i in range(2, n):
+ for j in range(i, n):
+ q[j][i] = (
+ (x0 - x_points[j - i + 1]) * q[j][i - 1]
+ - (x0 - x_points[j]) * q[j - 1][i - 1]
+ ) / (x_points[j] - x_points[j - i + 1])
+
+ return [q[n - 1][n - 1], q]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/newton_raphson.py b/maths/newton_raphson.py
index f2b7cb976..2c9cd1de9 100644
--- a/maths/newton_raphson.py
+++ b/maths/newton_raphson.py
@@ -19,7 +19,6 @@ def calc_derivative(f, a, h=0.001):
def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=False):
-
a = x0 # set the initial guess
steps = [a]
error = abs(f(a))
diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py
index 3c0eb7b38..86bc67f72 100644
--- a/maths/number_of_digits.py
+++ b/maths/number_of_digits.py
@@ -67,93 +67,23 @@ def num_digits_faster(n: int) -> int:
def benchmark() -> None:
"""
- Benchmark code for comparing 3 functions,
- with 3 different length int values.
+ Benchmark multiple functions, with three different length int values.
"""
- print("\nFor small_num = ", small_num, ":")
- print(
- "> num_digits()",
- "\t\tans =",
- num_digits(small_num),
- "\ttime =",
- timeit("z.num_digits(z.small_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> num_digits_fast()",
- "\tans =",
- num_digits_fast(small_num),
- "\ttime =",
- timeit("z.num_digits_fast(z.small_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> num_digits_faster()",
- "\tans =",
- num_digits_faster(small_num),
- "\ttime =",
- timeit("z.num_digits_faster(z.small_num)", setup="import __main__ as z"),
- "seconds",
- )
+ from collections.abc import Callable
- print("\nFor medium_num = ", medium_num, ":")
- print(
- "> num_digits()",
- "\t\tans =",
- num_digits(medium_num),
- "\ttime =",
- timeit("z.num_digits(z.medium_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> num_digits_fast()",
- "\tans =",
- num_digits_fast(medium_num),
- "\ttime =",
- timeit("z.num_digits_fast(z.medium_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> num_digits_faster()",
- "\tans =",
- num_digits_faster(medium_num),
- "\ttime =",
- timeit("z.num_digits_faster(z.medium_num)", setup="import __main__ as z"),
- "seconds",
- )
+ def benchmark_a_function(func: Callable, value: int) -> None:
+ call = f"{func.__name__}({value})"
+ timing = timeit(f"__main__.{call}", setup="import __main__")
+ print(f"{call}: {func(value)} -- {timing} seconds")
- print("\nFor large_num = ", large_num, ":")
- print(
- "> num_digits()",
- "\t\tans =",
- num_digits(large_num),
- "\ttime =",
- timeit("z.num_digits(z.large_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> num_digits_fast()",
- "\tans =",
- num_digits_fast(large_num),
- "\ttime =",
- timeit("z.num_digits_fast(z.large_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> num_digits_faster()",
- "\tans =",
- num_digits_faster(large_num),
- "\ttime =",
- timeit("z.num_digits_faster(z.large_num)", setup="import __main__ as z"),
- "seconds",
- )
+ for value in (262144, 1125899906842624, 1267650600228229401496703205376):
+ for func in (num_digits, num_digits_fast, num_digits_faster):
+ benchmark_a_function(func, value)
+ print()
if __name__ == "__main__":
- small_num = 262144
- medium_num = 1125899906842624
- large_num = 1267650600228229401496703205376
- benchmark()
import doctest
doctest.testmod()
+ benchmark()
diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py
index 87184a76b..f2d65f89e 100644
--- a/maths/numerical_integration.py
+++ b/maths/numerical_integration.py
@@ -1,17 +1,17 @@
"""
Approximates the area under the curve using the trapezoidal rule
"""
+from __future__ import annotations
-from typing import Callable, Union
+from collections.abc import Callable
def trapezoidal_area(
- fnc: Callable[[Union[int, float]], Union[int, float]],
- x_start: Union[int, float],
- x_end: Union[int, float],
+ fnc: Callable[[int | float], int | float],
+ x_start: int | float,
+ x_end: int | float,
steps: int = 100,
) -> float:
-
"""
Treats curve as a collection of linear lines and sums the area of the
trapezium shape they form
@@ -38,8 +38,7 @@ def trapezoidal_area(
fx1 = fnc(x_start)
area = 0.0
- for i in range(steps):
-
+ for _ in range(steps):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
x2 = (x_end - x_start) / steps + x1
@@ -55,7 +54,7 @@ def trapezoidal_area(
if __name__ == "__main__":
def f(x):
- return x ** 3
+ return x**3
print("f(x) = x^3")
print("The area between the curve, x = -10, x = 10 and the x axis is:")
diff --git a/maths/odd_sieve.py b/maths/odd_sieve.py
new file mode 100644
index 000000000..60e92921a
--- /dev/null
+++ b/maths/odd_sieve.py
@@ -0,0 +1,42 @@
+from itertools import compress, repeat
+from math import ceil, sqrt
+
+
+def odd_sieve(num: int) -> list[int]:
+ """
+ Returns the prime numbers < `num`. The prime numbers are calculated using an
+ odd sieve implementation of the Sieve of Eratosthenes algorithm
+ (see for reference https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes).
+
+ >>> odd_sieve(2)
+ []
+ >>> odd_sieve(3)
+ [2]
+ >>> odd_sieve(10)
+ [2, 3, 5, 7]
+ >>> odd_sieve(20)
+ [2, 3, 5, 7, 11, 13, 17, 19]
+ """
+
+ if num <= 2:
+ return []
+ if num == 3:
+ return [2]
+
+ # Odd sieve for numbers in range [3, num - 1]
+ sieve = bytearray(b"\x01") * ((num >> 1) - 1)
+
+ for i in range(3, int(sqrt(num)) + 1, 2):
+ if sieve[(i >> 1) - 1]:
+ i_squared = i**2
+ sieve[(i_squared >> 1) - 1 :: i] = repeat(
+ 0, ceil((num - i_squared) / (i << 1))
+ )
+
+ return [2] + list(compress(range(3, num, 2), sieve))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/perfect_square.py b/maths/perfect_square.py
index 4393dcfbc..107e68528 100644
--- a/maths/perfect_square.py
+++ b/maths/perfect_square.py
@@ -58,9 +58,9 @@ def perfect_square_binary_search(n: int) -> bool:
right = n
while left <= right:
mid = (left + right) // 2
- if mid ** 2 == n:
+ if mid**2 == n:
return True
- elif mid ** 2 > n:
+ elif mid**2 > n:
right = mid - 1
else:
left = mid + 1
diff --git a/maths/persistence.py b/maths/persistence.py
new file mode 100644
index 000000000..607641e67
--- /dev/null
+++ b/maths/persistence.py
@@ -0,0 +1,82 @@
+def multiplicative_persistence(num: int) -> int:
+ """
+ Return the persistence of a given number.
+
+ https://en.wikipedia.org/wiki/Persistence_of_a_number
+
+ >>> multiplicative_persistence(217)
+ 2
+ >>> multiplicative_persistence(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: multiplicative_persistence() does not accept negative values
+ >>> multiplicative_persistence("long number")
+ Traceback (most recent call last):
+ ...
+ ValueError: multiplicative_persistence() only accepts integral values
+ """
+
+ if not isinstance(num, int):
+ raise ValueError("multiplicative_persistence() only accepts integral values")
+ if num < 0:
+ raise ValueError("multiplicative_persistence() does not accept negative values")
+
+ steps = 0
+ num_string = str(num)
+
+ while len(num_string) != 1:
+ numbers = [int(i) for i in num_string]
+
+ total = 1
+ for i in range(0, len(numbers)):
+ total *= numbers[i]
+
+ num_string = str(total)
+
+ steps += 1
+ return steps
+
+
+def additive_persistence(num: int) -> int:
+ """
+ Return the persistence of a given number.
+
+ https://en.wikipedia.org/wiki/Persistence_of_a_number
+
+ >>> additive_persistence(199)
+ 3
+ >>> additive_persistence(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: additive_persistence() does not accept negative values
+ >>> additive_persistence("long number")
+ Traceback (most recent call last):
+ ...
+ ValueError: additive_persistence() only accepts integral values
+ """
+
+ if not isinstance(num, int):
+ raise ValueError("additive_persistence() only accepts integral values")
+ if num < 0:
+ raise ValueError("additive_persistence() does not accept negative values")
+
+ steps = 0
+ num_string = str(num)
+
+ while len(num_string) != 1:
+ numbers = [int(i) for i in num_string]
+
+ total = 0
+ for i in range(0, len(numbers)):
+ total += numbers[i]
+
+ num_string = str(total)
+
+ steps += 1
+ return steps
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/pi_generator.py b/maths/pi_generator.py
new file mode 100644
index 000000000..dcd218aae
--- /dev/null
+++ b/maths/pi_generator.py
@@ -0,0 +1,94 @@
+def calculate_pi(limit: int) -> str:
+ """
+ https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80
+ Leibniz Formula for Pi
+
+ The Leibniz formula is the special case arctan 1 = 1/4 Pi .
+ Leibniz's formula converges extremely slowly: it exhibits sublinear convergence.
+
+ Convergence (https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Convergence)
+
+ We cannot try to prove against an interrupted, uncompleted generation.
+ https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Unusual_behaviour
+ The errors can in fact be predicted;
+ but those calculations also approach infinity for accuracy.
+
+ Our output will always be a string since we can defintely store all digits in there.
+ For simplicity' sake, let's just compare against known values and since our outpit
+ is a string, we need to convert to float.
+
+ >>> import math
+ >>> float(calculate_pi(15)) == math.pi
+ True
+
+ Since we cannot predict errors or interrupt any infinite alternating
+ series generation since they approach infinity,
+ or interrupt any alternating series, we are going to need math.isclose()
+
+ >>> math.isclose(float(calculate_pi(50)), math.pi)
+ True
+
+ >>> math.isclose(float(calculate_pi(100)), math.pi)
+ True
+
+ Since math.pi-constant contains only 16 digits, here some test with preknown values:
+
+ >>> calculate_pi(50)
+ '3.14159265358979323846264338327950288419716939937510'
+ >>> calculate_pi(80)
+ '3.14159265358979323846264338327950288419716939937510582097494459230781640628620899'
+
+ To apply the Leibniz formula for calculating pi,
+ the variables q, r, t, k, n, and l are used for the iteration process.
+ """
+ q = 1
+ r = 0
+ t = 1
+ k = 1
+ n = 3
+ l = 3
+ decimal = limit
+ counter = 0
+
+ result = ""
+
+ """
+ We will avoid using yield since we otherwise get a Generator-Object,
+ which we can't just compare against anything. We would have to make a list out of it
+ after the generation, so we will just stick to plain return logic:
+ """
+ while counter != decimal + 1:
+ if 4 * q + r - t < n * t:
+ result += str(n)
+ if counter == 0:
+ result += "."
+
+ if decimal == counter:
+ break
+
+ counter += 1
+ nr = 10 * (r - n * t)
+ n = ((10 * (3 * q + r)) // t) - 10 * n
+ q *= 10
+ r = nr
+ else:
+ nr = (2 * q + r) * l
+ nn = (q * (7 * k) + 2 + (r * l)) // (t * l)
+ q *= k
+ t *= l
+ l += 2
+ k += 1
+ n = nn
+ r = nr
+ return result
+
+
+def main() -> None:
+ print(f"{calculate_pi(50) = }")
+ import doctest
+
+ doctest.testmod()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/maths/pi_monte_carlo_estimation.py b/maths/pi_monte_carlo_estimation.py
index 20b46dddc..29b679907 100644
--- a/maths/pi_monte_carlo_estimation.py
+++ b/maths/pi_monte_carlo_estimation.py
@@ -11,7 +11,7 @@ class Point:
True, if the point lies in the unit circle
False, otherwise
"""
- return (self.x ** 2 + self.y ** 2) <= 1
+ return (self.x**2 + self.y**2) <= 1
@classmethod
def random_unit_square(cls):
@@ -47,7 +47,7 @@ def estimate_pi(number_of_simulations: int) -> float:
raise ValueError("At least one simulation is necessary to estimate PI.")
number_in_unit_circle = 0
- for simulation_index in range(number_of_simulations):
+ for _ in range(number_of_simulations):
random_point = Point.random_unit_square()
if random_point.is_in_unit_circle():
diff --git a/maths/points_are_collinear_3d.py b/maths/points_are_collinear_3d.py
new file mode 100644
index 000000000..3bc0b3b9e
--- /dev/null
+++ b/maths/points_are_collinear_3d.py
@@ -0,0 +1,126 @@
+"""
+Check if three points are collinear in 3D.
+
+In short, the idea is that we are able to create a triangle using three points,
+and the area of that triangle can determine if the three points are collinear or not.
+
+
+First, we create two vectors with the same initial point from the three points,
+then we will calculate the cross-product of them.
+
+The length of the cross vector is numerically equal to the area of a parallelogram.
+
+Finally, the area of the triangle is equal to half of the area of the parallelogram.
+
+Since we are only differentiating between zero and anything else,
+we can get rid of the square root when calculating the length of the vector,
+and also the division by two at the end.
+
+From a second perspective, if the two vectors are parallel and overlapping,
+we can't get a nonzero perpendicular vector,
+since there will be an infinite number of orthogonal vectors.
+
+To simplify the solution we will not calculate the length,
+but we will decide directly from the vector whether it is equal to (0, 0, 0) or not.
+
+
+Read More:
+ https://math.stackexchange.com/a/1951650
+"""
+
+Vector3d = tuple[float, float, float]
+Point3d = tuple[float, float, float]
+
+
+def create_vector(end_point1: Point3d, end_point2: Point3d) -> Vector3d:
+ """
+ Pass two points to get the vector from them in the form (x, y, z).
+
+ >>> create_vector((0, 0, 0), (1, 1, 1))
+ (1, 1, 1)
+ >>> create_vector((45, 70, 24), (47, 32, 1))
+ (2, -38, -23)
+ >>> create_vector((-14, -1, -8), (-7, 6, 4))
+ (7, 7, 12)
+ """
+ x = end_point2[0] - end_point1[0]
+ y = end_point2[1] - end_point1[1]
+ z = end_point2[2] - end_point1[2]
+ return (x, y, z)
+
+
+def get_3d_vectors_cross(ab: Vector3d, ac: Vector3d) -> Vector3d:
+ """
+ Get the cross of the two vectors AB and AC.
+
+ I used determinant of 2x2 to get the determinant of the 3x3 matrix in the process.
+
+ Read More:
+ https://en.wikipedia.org/wiki/Cross_product
+ https://en.wikipedia.org/wiki/Determinant
+
+ >>> get_3d_vectors_cross((3, 4, 7), (4, 9, 2))
+ (-55, 22, 11)
+ >>> get_3d_vectors_cross((1, 1, 1), (1, 1, 1))
+ (0, 0, 0)
+ >>> get_3d_vectors_cross((-4, 3, 0), (3, -9, -12))
+ (-36, -48, 27)
+ >>> get_3d_vectors_cross((17.67, 4.7, 6.78), (-9.5, 4.78, -19.33))
+ (-123.2594, 277.15110000000004, 129.11260000000001)
+ """
+ x = ab[1] * ac[2] - ab[2] * ac[1] # *i
+ y = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
+ z = ab[0] * ac[1] - ab[1] * ac[0] # *k
+ return (x, y, z)
+
+
+def is_zero_vector(vector: Vector3d, accuracy: int) -> bool:
+ """
+ Check if vector is equal to (0, 0, 0) of not.
+
+ Sine the algorithm is very accurate, we will never get a zero vector,
+ so we need to round the vector axis,
+ because we want a result that is either True or False.
+ In other applications, we can return a float that represents the collinearity ratio.
+
+ >>> is_zero_vector((0, 0, 0), accuracy=10)
+ True
+ >>> is_zero_vector((15, 74, 32), accuracy=10)
+ False
+ >>> is_zero_vector((-15, -74, -32), accuracy=10)
+ False
+ """
+ return tuple(round(x, accuracy) for x in vector) == (0, 0, 0)
+
+
+def are_collinear(a: Point3d, b: Point3d, c: Point3d, accuracy: int = 10) -> bool:
+ """
+ Check if three points are collinear or not.
+
+ 1- Create tow vectors AB and AC.
+ 2- Get the cross vector of the tow vectors.
+ 3- Calcolate the length of the cross vector.
+ 4- If the length is zero then the points are collinear, else they are not.
+
+ The use of the accuracy parameter is explained in is_zero_vector docstring.
+
+ >>> are_collinear((4.802293498137402, 3.536233125455244, 0),
+ ... (-2.186788107953106, -9.24561398001649, 7.141509524846482),
+ ... (1.530169574640268, -2.447927606600034, 3.343487096469054))
+ True
+ >>> are_collinear((-6, -2, 6),
+ ... (6.200213806439997, -4.930157614926678, -4.482371908289856),
+ ... (-4.085171149525941, -2.459889509029438, 4.354787180795383))
+ True
+ >>> are_collinear((2.399001826862445, -2.452009976680793, 4.464656666157666),
+ ... (-3.682816335934376, 5.753788986533145, 9.490993909044244),
+ ... (1.962903518985307, 3.741415730125627, 7))
+ False
+ >>> are_collinear((1.875375340689544, -7.268426006071538, 7.358196269835993),
+ ... (-3.546599383667157, -4.630005261513976, 3.208784032924246),
+ ... (-2.564606140206386, 3.937845170672183, 7))
+ False
+ """
+ ab = create_vector(a, b)
+ ac = create_vector(a, c)
+ return is_zero_vector(get_3d_vectors_cross(ab, ac), accuracy)
diff --git a/maths/pollard_rho.py b/maths/pollard_rho.py
new file mode 100644
index 000000000..5082f54f7
--- /dev/null
+++ b/maths/pollard_rho.py
@@ -0,0 +1,149 @@
+from __future__ import annotations
+
+from math import gcd
+
+
+def pollard_rho(
+ num: int,
+ seed: int = 2,
+ step: int = 1,
+ attempts: int = 3,
+) -> int | None:
+ """
+ Use Pollard's Rho algorithm to return a nontrivial factor of ``num``.
+ The returned factor may be composite and require further factorization.
+ If the algorithm will return None if it fails to find a factor within
+ the specified number of attempts or within the specified number of steps.
+ If ``num`` is prime, this algorithm is guaranteed to return None.
+ https://en.wikipedia.org/wiki/Pollard%27s_rho_algorithm
+
+ >>> pollard_rho(18446744073709551617)
+ 274177
+ >>> pollard_rho(97546105601219326301)
+ 9876543191
+ >>> pollard_rho(100)
+ 2
+ >>> pollard_rho(17)
+ >>> pollard_rho(17**3)
+ 17
+ >>> pollard_rho(17**3, attempts=1)
+ >>> pollard_rho(3*5*7)
+ 21
+ >>> pollard_rho(1)
+ Traceback (most recent call last):
+ ...
+ ValueError: The input value cannot be less than 2
+ """
+ # A value less than 2 can cause an infinite loop in the algorithm.
+ if num < 2:
+ raise ValueError("The input value cannot be less than 2")
+
+ # Because of the relationship between ``f(f(x))`` and ``f(x)``, this
+ # algorithm struggles to find factors that are divisible by two.
+ # As a workaround, we specifically check for two and even inputs.
+ # See: https://math.stackexchange.com/a/2856214/165820
+ if num > 2 and num % 2 == 0:
+ return 2
+
+ # Pollard's Rho algorithm requires a function that returns pseudorandom
+ # values between 0 <= X < ``num``. It doesn't need to be random in the
+ # sense that the output value is cryptographically secure or difficult
+ # to calculate, it only needs to be random in the sense that all output
+ # values should be equally likely to appear.
+ # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
+ # However, the success of Pollard's algorithm isn't guaranteed and is
+ # determined in part by the initial seed and the chosen random function.
+ # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
+ # where ``C`` is a value that we can modify between each attempt.
+ def rand_fn(value: int, step: int, modulus: int) -> int:
+ """
+ Returns a pseudorandom value modulo ``modulus`` based on the
+ input ``value`` and attempt-specific ``step`` size.
+
+ >>> rand_fn(0, 0, 0)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError: integer division or modulo by zero
+ >>> rand_fn(1, 2, 3)
+ 0
+ >>> rand_fn(0, 10, 7)
+ 3
+ >>> rand_fn(1234, 1, 17)
+ 16
+ """
+ return (pow(value, 2) + step) % modulus
+
+ for _ in range(attempts):
+ # These track the position within the cycle detection logic.
+ tortoise = seed
+ hare = seed
+
+ while True:
+ # At each iteration, the tortoise moves one step and the hare moves two.
+ tortoise = rand_fn(tortoise, step, num)
+ hare = rand_fn(hare, step, num)
+ hare = rand_fn(hare, step, num)
+
+ # At some point both the tortoise and the hare will enter a cycle whose
+ # length ``p`` is a divisor of ``num``. Once in that cycle, at some point
+ # the tortoise and hare will end up on the same value modulo ``p``.
+ # We can detect when this happens because the position difference between
+ # the tortoise and the hare will share a common divisor with ``num``.
+ divisor = gcd(hare - tortoise, num)
+
+ if divisor == 1:
+ # No common divisor yet, just keep searching.
+ continue
+ else:
+ # We found a common divisor!
+ if divisor == num:
+ # Unfortunately, the divisor is ``num`` itself and is useless.
+ break
+ else:
+ # The divisor is a nontrivial factor of ``num``!
+ return divisor
+
+ # If we made it here, then this attempt failed.
+ # We need to pick a new starting seed for the tortoise and hare
+ # in addition to a new step value for the random function.
+ # To keep this example implementation deterministic, the
+ # new values will be generated based on currently available
+ # values instead of using something like ``random.randint``.
+
+ # We can use the hare's position as the new seed.
+ # This is actually what Richard Brent's the "optimized" variant does.
+ seed = hare
+
+ # The new step value for the random function can just be incremented.
+ # At first the results will be similar to what the old function would
+ # have produced, but the value will quickly diverge after a bit.
+ step += 1
+
+ # We haven't found a divisor within the requested number of attempts.
+ # We were unlucky or ``num`` itself is actually prime.
+ return None
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "num",
+ type=int,
+ help="The value to find a divisor of",
+ )
+ parser.add_argument(
+ "--attempts",
+ type=int,
+ default=3,
+ help="The number of attempts before giving up",
+ )
+ args = parser.parse_args()
+
+ divisor = pollard_rho(args.num, attempts=args.attempts)
+ if divisor is None:
+ print(f"{args.num} is probably prime")
+ else:
+ quotient = args.num // divisor
+ print(f"{args.num} = {divisor} * {quotient}")
diff --git a/maths/polynomial_evaluation.py b/maths/polynomial_evaluation.py
index 68ff97ddd..90a51f521 100644
--- a/maths/polynomial_evaluation.py
+++ b/maths/polynomial_evaluation.py
@@ -1,4 +1,4 @@
-from typing import Sequence
+from collections.abc import Sequence
def evaluate_poly(poly: Sequence[float], x: float) -> float:
@@ -12,7 +12,7 @@ def evaluate_poly(poly: Sequence[float], x: float) -> float:
>>> evaluate_poly((0.0, 0.0, 5.0, 9.3, 7.0), 10.0)
79800.0
"""
- return sum(c * (x ** i) for i, c in enumerate(poly))
+ return sum(c * (x**i) for i, c in enumerate(poly))
def horner(poly: Sequence[float], x: float) -> float:
@@ -45,7 +45,7 @@ if __name__ == "__main__":
>>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7.0x^4 + 9.3x^3 + 5.0x^2
>>> x = -13.0
>>> # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9
- >>> print(evaluate_poly(poly, x))
+ >>> evaluate_poly(poly, x)
180339.9
"""
poly = (0.0, 0.0, 5.0, 9.3, 7.0)
diff --git a/maths/polynomials/__init__.py b/maths/polynomials/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/maths/polynomials/single_indeterminate_operations.py b/maths/polynomials/single_indeterminate_operations.py
new file mode 100644
index 000000000..8bafdb591
--- /dev/null
+++ b/maths/polynomials/single_indeterminate_operations.py
@@ -0,0 +1,188 @@
+"""
+
+This module implements a single indeterminate polynomials class
+with some basic operations
+
+Reference: https://en.wikipedia.org/wiki/Polynomial
+
+"""
+
+from __future__ import annotations
+
+from collections.abc import MutableSequence
+
+
+class Polynomial:
+ def __init__(self, degree: int, coefficients: MutableSequence[float]) -> None:
+ """
+ The coefficients should be in order of degree, from smallest to largest.
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> p = Polynomial(2, [1, 2, 3, 4])
+ Traceback (most recent call last):
+ ...
+ ValueError: The number of coefficients should be equal to the degree + 1.
+
+ """
+ if len(coefficients) != degree + 1:
+ raise ValueError(
+ "The number of coefficients should be equal to the degree + 1."
+ )
+
+ self.coefficients: list[float] = list(coefficients)
+ self.degree = degree
+
+ def __add__(self, polynomial_2: Polynomial) -> Polynomial:
+ """
+ Polynomial addition
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> q = Polynomial(2, [1, 2, 3])
+ >>> p + q
+ 6x^2 + 4x + 2
+ """
+
+ if self.degree > polynomial_2.degree:
+ coefficients = self.coefficients[:]
+ for i in range(polynomial_2.degree + 1):
+ coefficients[i] += polynomial_2.coefficients[i]
+ return Polynomial(self.degree, coefficients)
+ else:
+ coefficients = polynomial_2.coefficients[:]
+ for i in range(self.degree + 1):
+ coefficients[i] += self.coefficients[i]
+ return Polynomial(polynomial_2.degree, coefficients)
+
+ def __sub__(self, polynomial_2: Polynomial) -> Polynomial:
+ """
+ Polynomial subtraction
+ >>> p = Polynomial(2, [1, 2, 4])
+ >>> q = Polynomial(2, [1, 2, 3])
+ >>> p - q
+ 1x^2
+ """
+ return self + polynomial_2 * Polynomial(0, [-1])
+
+ def __neg__(self) -> Polynomial:
+ """
+ Polynomial negation
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> -p
+ - 3x^2 - 2x - 1
+ """
+ return Polynomial(self.degree, [-c for c in self.coefficients])
+
+ def __mul__(self, polynomial_2: Polynomial) -> Polynomial:
+ """
+ Polynomial multiplication
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> q = Polynomial(2, [1, 2, 3])
+ >>> p * q
+ 9x^4 + 12x^3 + 10x^2 + 4x + 1
+ """
+ coefficients: list[float] = [0] * (self.degree + polynomial_2.degree + 1)
+ for i in range(self.degree + 1):
+ for j in range(polynomial_2.degree + 1):
+ coefficients[i + j] += (
+ self.coefficients[i] * polynomial_2.coefficients[j]
+ )
+
+ return Polynomial(self.degree + polynomial_2.degree, coefficients)
+
+ def evaluate(self, substitution: int | float) -> int | float:
+ """
+ Evaluates the polynomial at x.
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> p.evaluate(2)
+ 17
+ """
+ result: int | float = 0
+ for i in range(self.degree + 1):
+ result += self.coefficients[i] * (substitution**i)
+ return result
+
+ def __str__(self) -> str:
+ """
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> print(p)
+ 3x^2 + 2x + 1
+ """
+ polynomial = ""
+ for i in range(self.degree, -1, -1):
+ if self.coefficients[i] == 0:
+ continue
+ elif self.coefficients[i] > 0:
+ if polynomial:
+ polynomial += " + "
+ else:
+ polynomial += " - "
+
+ if i == 0:
+ polynomial += str(abs(self.coefficients[i]))
+ elif i == 1:
+ polynomial += str(abs(self.coefficients[i])) + "x"
+ else:
+ polynomial += str(abs(self.coefficients[i])) + "x^" + str(i)
+
+ return polynomial
+
+ def __repr__(self) -> str:
+ """
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> p
+ 3x^2 + 2x + 1
+ """
+ return self.__str__()
+
+ def derivative(self) -> Polynomial:
+ """
+ Returns the derivative of the polynomial.
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> p.derivative()
+ 6x + 2
+ """
+ coefficients: list[float] = [0] * self.degree
+ for i in range(self.degree):
+ coefficients[i] = self.coefficients[i + 1] * (i + 1)
+ return Polynomial(self.degree - 1, coefficients)
+
+ def integral(self, constant: int | float = 0) -> Polynomial:
+ """
+ Returns the integral of the polynomial.
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> p.integral()
+ 1.0x^3 + 1.0x^2 + 1.0x
+ """
+ coefficients: list[float] = [0] * (self.degree + 2)
+ coefficients[0] = constant
+ for i in range(self.degree + 1):
+ coefficients[i + 1] = self.coefficients[i] / (i + 1)
+ return Polynomial(self.degree + 1, coefficients)
+
+ def __eq__(self, polynomial_2: object) -> bool:
+ """
+ Checks if two polynomials are equal.
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> q = Polynomial(2, [1, 2, 3])
+ >>> p == q
+ True
+ """
+ if not isinstance(polynomial_2, Polynomial):
+ return False
+
+ if self.degree != polynomial_2.degree:
+ return False
+
+ for i in range(self.degree + 1):
+ if self.coefficients[i] != polynomial_2.coefficients[i]:
+ return False
+
+ return True
+
+ def __ne__(self, polynomial_2: object) -> bool:
+ """
+ Checks if two polynomials are not equal.
+ >>> p = Polynomial(2, [1, 2, 3])
+ >>> q = Polynomial(2, [1, 2, 3])
+ >>> p != q
+ False
+ """
+ return not self.__eq__(polynomial_2)
diff --git a/maths/prime_check.py b/maths/prime_check.py
index e2bcb7b8f..80ab8bc5d 100644
--- a/maths/prime_check.py
+++ b/maths/prime_check.py
@@ -4,54 +4,79 @@ import math
import unittest
-def prime_check(number: int) -> bool:
- """Checks to see if a number is a prime.
+def is_prime(number: int) -> bool:
+ """Checks to see if a number is a prime in O(sqrt(n)).
A number is prime if it has exactly two factors: 1 and itself.
+
+ >>> is_prime(0)
+ False
+ >>> is_prime(1)
+ False
+ >>> is_prime(2)
+ True
+ >>> is_prime(3)
+ True
+ >>> is_prime(27)
+ False
+ >>> is_prime(87)
+ False
+ >>> is_prime(563)
+ True
+ >>> is_prime(2999)
+ True
+ >>> is_prime(67483)
+ False
"""
+ # precondition
+ assert isinstance(number, int) and (
+ number >= 0
+ ), "'number' must been an int and positive"
+
if 1 < number < 4:
# 2 and 3 are primes
return True
- elif number < 2 or not number % 2:
- # Negatives, 0, 1 and all even numbers are not primes
+ elif number < 2 or number % 2 == 0 or number % 3 == 0:
+ # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
- odd_numbers = range(3, int(math.sqrt(number) + 1), 2)
- return not any(not number % i for i in odd_numbers)
+ # All primes number are in format of 6k +/- 1
+ for i in range(5, int(math.sqrt(number) + 1), 6):
+ if number % i == 0 or number % (i + 2) == 0:
+ return False
+ return True
class Test(unittest.TestCase):
def test_primes(self):
- self.assertTrue(prime_check(2))
- self.assertTrue(prime_check(3))
- self.assertTrue(prime_check(5))
- self.assertTrue(prime_check(7))
- self.assertTrue(prime_check(11))
- self.assertTrue(prime_check(13))
- self.assertTrue(prime_check(17))
- self.assertTrue(prime_check(19))
- self.assertTrue(prime_check(23))
- self.assertTrue(prime_check(29))
+ self.assertTrue(is_prime(2))
+ self.assertTrue(is_prime(3))
+ self.assertTrue(is_prime(5))
+ self.assertTrue(is_prime(7))
+ self.assertTrue(is_prime(11))
+ self.assertTrue(is_prime(13))
+ self.assertTrue(is_prime(17))
+ self.assertTrue(is_prime(19))
+ self.assertTrue(is_prime(23))
+ self.assertTrue(is_prime(29))
def test_not_primes(self):
+ with self.assertRaises(AssertionError):
+ is_prime(-19)
self.assertFalse(
- prime_check(-19),
- "Negative numbers are excluded by definition of prime numbers.",
- )
- self.assertFalse(
- prime_check(0),
+ is_prime(0),
"Zero doesn't have any positive factors, primes must have exactly two.",
)
self.assertFalse(
- prime_check(1),
+ is_prime(1),
"One only has 1 positive factor, primes must have exactly two.",
)
- self.assertFalse(prime_check(2 * 2))
- self.assertFalse(prime_check(2 * 3))
- self.assertFalse(prime_check(3 * 3))
- self.assertFalse(prime_check(3 * 5))
- self.assertFalse(prime_check(3 * 5 * 7))
+ self.assertFalse(is_prime(2 * 2))
+ self.assertFalse(is_prime(2 * 3))
+ self.assertFalse(is_prime(3 * 3))
+ self.assertFalse(is_prime(3 * 5))
+ self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py
index 38bebddee..c5297ed92 100644
--- a/maths/prime_numbers.py
+++ b/maths/prime_numbers.py
@@ -1,8 +1,8 @@
import math
-from typing import Generator
+from collections.abc import Generator
-def slow_primes(max: int) -> Generator[int, None, None]:
+def slow_primes(max_n: int) -> Generator[int, None, None]:
"""
Return a list of all primes numbers up to max.
>>> list(slow_primes(0))
@@ -20,7 +20,7 @@ def slow_primes(max: int) -> Generator[int, None, None]:
>>> list(slow_primes(10000))[-1]
9973
"""
- numbers: Generator = (i for i in range(1, (max + 1)))
+ numbers: Generator = (i for i in range(1, (max_n + 1)))
for i in (n for n in numbers if n > 1):
for j in range(2, i):
if (i % j) == 0:
@@ -29,7 +29,7 @@ def slow_primes(max: int) -> Generator[int, None, None]:
yield i
-def primes(max: int) -> Generator[int, None, None]:
+def primes(max_n: int) -> Generator[int, None, None]:
"""
Return a list of all primes numbers up to max.
>>> list(primes(0))
@@ -47,7 +47,7 @@ def primes(max: int) -> Generator[int, None, None]:
>>> list(primes(10000))[-1]
9973
"""
- numbers: Generator = (i for i in range(1, (max + 1)))
+ numbers: Generator = (i for i in range(1, (max_n + 1)))
for i in (n for n in numbers if n > 1):
# only need to check for factors up to sqrt(i)
bound = int(math.sqrt(i)) + 1
@@ -58,13 +58,52 @@ def primes(max: int) -> Generator[int, None, None]:
yield i
+def fast_primes(max_n: int) -> Generator[int, None, None]:
+ """
+ Return a list of all primes numbers up to max.
+ >>> list(fast_primes(0))
+ []
+ >>> list(fast_primes(-1))
+ []
+ >>> list(fast_primes(-10))
+ []
+ >>> list(fast_primes(25))
+ [2, 3, 5, 7, 11, 13, 17, 19, 23]
+ >>> list(fast_primes(11))
+ [2, 3, 5, 7, 11]
+ >>> list(fast_primes(33))
+ [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
+ >>> list(fast_primes(10000))[-1]
+ 9973
+ """
+ numbers: Generator = (i for i in range(1, (max_n + 1), 2))
+ # It's useless to test even numbers as they will not be prime
+ if max_n > 2:
+ yield 2 # Because 2 will not be tested, it's necessary to yield it now
+ for i in (n for n in numbers if n > 1):
+ bound = int(math.sqrt(i)) + 1
+ for j in range(3, bound, 2):
+ # As we removed the even numbers, we don't need them now
+ if (i % j) == 0:
+ break
+ else:
+ yield i
+
+
+def benchmark():
+ """
+ Let's benchmark our functions side-by-side...
+ """
+ from timeit import timeit
+
+ setup = "from __main__ import slow_primes, primes, fast_primes"
+ print(timeit("slow_primes(1_000_000_000_000)", setup=setup, number=1_000_000))
+ print(timeit("primes(1_000_000_000_000)", setup=setup, number=1_000_000))
+ print(timeit("fast_primes(1_000_000_000_000)", setup=setup, number=1_000_000))
+
+
if __name__ == "__main__":
number = int(input("Calculate primes up to:\n>> ").strip())
for ret in primes(number):
print(ret)
-
- # Let's benchmark them side-by-side...
- from timeit import timeit
-
- print(timeit("slow_primes(1_000_000)", setup="from __main__ import slow_primes"))
- print(timeit("primes(1_000_000)", setup="from __main__ import primes"))
+ benchmark()
diff --git a/maths/prime_sieve_eratosthenes.py b/maths/prime_sieve_eratosthenes.py
index 8d60e48c2..32eef9165 100644
--- a/maths/prime_sieve_eratosthenes.py
+++ b/maths/prime_sieve_eratosthenes.py
@@ -1,12 +1,10 @@
-# flake8: noqa
-
"""
Sieve of Eratosthenes
-Input : n =10
+Input: n = 10
Output: 2 3 5 7
-Input : n = 20
+Input: n = 20
Output: 2 3 5 7 11 13 17 19
you can read in detail about this at
@@ -14,34 +12,43 @@ https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
"""
-def prime_sieve_eratosthenes(num):
+def prime_sieve_eratosthenes(num: int) -> list[int]:
"""
- print the prime numbers up to n
+ Print the prime numbers up to n
>>> prime_sieve_eratosthenes(10)
- 2,3,5,7,
+ [2, 3, 5, 7]
>>> prime_sieve_eratosthenes(20)
- 2,3,5,7,11,13,17,19,
+ [2, 3, 5, 7, 11, 13, 17, 19]
+ >>> prime_sieve_eratosthenes(2)
+ [2]
+ >>> prime_sieve_eratosthenes(1)
+ []
+ >>> prime_sieve_eratosthenes(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be a positive integer
"""
- primes = [True for i in range(num + 1)]
- p = 2
+ if num <= 0:
+ raise ValueError("Input must be a positive integer")
+ primes = [True] * (num + 1)
+
+ p = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, p):
primes[i] = False
p += 1
- for prime in range(2, num + 1):
- if primes[prime]:
- print(prime, end=",")
+ return [prime for prime in range(2, num + 1) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
- num = int(input())
- prime_sieve_eratosthenes(num)
+ user_num = int(input("Enter a positive integer: ").strip())
+ print(prime_sieve_eratosthenes(user_num))
diff --git a/maths/primelib.py b/maths/primelib.py
index 37883d9cf..28b5aee9d 100644
--- a/maths/primelib.py
+++ b/maths/primelib.py
@@ -8,27 +8,27 @@ prime numbers and whole numbers.
Overview:
-isPrime(number)
-sieveEr(N)
-getPrimeNumbers(N)
-primeFactorization(number)
-greatestPrimeFactor(number)
-smallestPrimeFactor(number)
-getPrime(n)
-getPrimesBetween(pNumber1, pNumber2)
+is_prime(number)
+sieve_er(N)
+get_prime_numbers(N)
+prime_factorization(number)
+greatest_prime_factor(number)
+smallest_prime_factor(number)
+get_prime(n)
+get_primes_between(pNumber1, pNumber2)
----
-isEven(number)
-isOdd(number)
+is_even(number)
+is_odd(number)
gcd(number1, number2) // greatest common divisor
-kgV(number1, number2) // least common multiple
-getDivisors(number) // all divisors of 'number' inclusive 1, number
-isPerfectNumber(number)
+kg_v(number1, number2) // least common multiple
+get_divisors(number) // all divisors of 'number' inclusive 1, number
+is_perfect_number(number)
NEW-FUNCTIONS
-simplifyFraction(numerator, denominator)
+simplify_fraction(numerator, denominator)
factorial (n) // n!
fib (n) // calculate the n-th fibonacci term.
@@ -41,7 +41,7 @@ goldbach(number) // Goldbach's assumption
from math import sqrt
-def isPrime(number):
+def is_prime(number: int) -> bool:
"""
input: positive integer 'number'
returns true if 'number' is prime otherwise false.
@@ -59,7 +59,6 @@ def isPrime(number):
status = False
for divisor in range(2, int(round(sqrt(number))) + 1):
-
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
@@ -75,7 +74,7 @@ def isPrime(number):
# ------------------------------------------
-def sieveEr(N):
+def sieve_er(n):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N.
@@ -86,23 +85,21 @@ def sieveEr(N):
"""
# precondition
- assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2"
+ assert isinstance(n, int) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
- beginList = [x for x in range(2, N + 1)]
+ begin_list = list(range(2, n + 1))
ans = [] # this list will be returns.
# actual sieve of erathostenes
- for i in range(len(beginList)):
-
- for j in range(i + 1, len(beginList)):
-
- if (beginList[i] != 0) and (beginList[j] % beginList[i] == 0):
- beginList[j] = 0
+ for i in range(len(begin_list)):
+ for j in range(i + 1, len(begin_list)):
+ if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
+ begin_list[j] = 0
# filters actual prime numbers.
- ans = [x for x in beginList if x != 0]
+ ans = [x for x in begin_list if x != 0]
# precondition
assert isinstance(ans, list), "'ans' must been from type list"
@@ -113,7 +110,7 @@ def sieveEr(N):
# --------------------------------
-def getPrimeNumbers(N):
+def get_prime_numbers(n):
"""
input: positive integer 'N' > 2
returns a list of prime numbers from 2 up to N (inclusive)
@@ -121,16 +118,14 @@ def getPrimeNumbers(N):
"""
# precondition
- assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2"
+ assert isinstance(n, int) and (n > 2), "'N' must been an int and > 2"
ans = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
- for number in range(2, N + 1):
-
- if isPrime(number):
-
+ for number in range(2, n + 1):
+ if is_prime(number):
ans.append(number)
# precondition
@@ -142,7 +137,7 @@ def getPrimeNumbers(N):
# -----------------------------------------
-def primeFactorization(number):
+def prime_factorization(number):
"""
input: positive integer 'number'
returns a list of the prime number factors of 'number'
@@ -159,16 +154,13 @@ def primeFactorization(number):
quotient = number
- if number == 0 or number == 1:
-
+ if number in {0, 1}:
ans.append(number)
# if 'number' not prime then builds the prime factorization of 'number'
- elif not isPrime(number):
-
+ elif not is_prime(number):
while quotient != 1:
-
- if isPrime(factor) and (quotient % factor == 0):
+ if is_prime(factor) and (quotient % factor == 0):
ans.append(factor)
quotient /= factor
else:
@@ -186,7 +178,7 @@ def primeFactorization(number):
# -----------------------------------------
-def greatestPrimeFactor(number):
+def greatest_prime_factor(number):
"""
input: positive integer 'number' >= 0
returns the greatest prime number factor of 'number'
@@ -200,9 +192,9 @@ def greatestPrimeFactor(number):
ans = 0
# prime factorization of 'number'
- primeFactors = primeFactorization(number)
+ prime_factors = prime_factorization(number)
- ans = max(primeFactors)
+ ans = max(prime_factors)
# precondition
assert isinstance(ans, int), "'ans' must been from type int"
@@ -213,7 +205,7 @@ def greatestPrimeFactor(number):
# ----------------------------------------------
-def smallestPrimeFactor(number):
+def smallest_prime_factor(number):
"""
input: integer 'number' >= 0
returns the smallest prime number factor of 'number'
@@ -227,9 +219,9 @@ def smallestPrimeFactor(number):
ans = 0
# prime factorization of 'number'
- primeFactors = primeFactorization(number)
+ prime_factors = prime_factorization(number)
- ans = min(primeFactors)
+ ans = min(prime_factors)
# precondition
assert isinstance(ans, int), "'ans' must been from type int"
@@ -240,7 +232,7 @@ def smallestPrimeFactor(number):
# ----------------------
-def isEven(number):
+def is_even(number):
"""
input: integer 'number'
returns true if 'number' is even, otherwise false.
@@ -256,7 +248,7 @@ def isEven(number):
# ------------------------
-def isOdd(number):
+def is_odd(number):
"""
input: integer 'number'
returns true if 'number' is odd, otherwise false.
@@ -281,14 +273,14 @@ def goldbach(number):
# precondition
assert (
- isinstance(number, int) and (number > 2) and isEven(number)
+ isinstance(number, int) and (number > 2) and is_even(number)
), "'number' must been an int, even and > 2"
ans = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
- primeNumbers = getPrimeNumbers(number)
- lenPN = len(primeNumbers)
+ prime_numbers = get_prime_numbers(number)
+ len_pn = len(prime_numbers)
# run variable for while-loops.
i = 0
@@ -297,16 +289,14 @@ def goldbach(number):
# exit variable. for break up the loops
loop = True
- while i < lenPN and loop:
-
+ while i < len_pn and loop:
j = i + 1
- while j < lenPN and loop:
-
- if primeNumbers[i] + primeNumbers[j] == number:
+ while j < len_pn and loop:
+ if prime_numbers[i] + prime_numbers[j] == number:
loop = False
- ans.append(primeNumbers[i])
- ans.append(primeNumbers[j])
+ ans.append(prime_numbers[i])
+ ans.append(prime_numbers[j])
j += 1
@@ -317,8 +307,8 @@ def goldbach(number):
isinstance(ans, list)
and (len(ans) == 2)
and (ans[0] + ans[1] == number)
- and isPrime(ans[0])
- and isPrime(ans[1])
+ and is_prime(ans[0])
+ and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
@@ -345,7 +335,6 @@ def gcd(number1, number2):
rest = 0
while number2 != 0:
-
rest = number1 % number2
number1 = number2
number2 = rest
@@ -361,7 +350,7 @@ def gcd(number1, number2):
# ----------------------------------------------------
-def kgV(number1, number2):
+def kg_v(number1, number2):
"""
Least common multiple
input: two positive integer 'number1' and 'number2'
@@ -380,15 +369,13 @@ def kgV(number1, number2):
# for kgV (x,1)
if number1 > 1 and number2 > 1:
-
# builds the prime factorization of 'number1' and 'number2'
- primeFac1 = primeFactorization(number1)
- primeFac2 = primeFactorization(number2)
+ prime_fac_1 = prime_factorization(number1)
+ prime_fac_2 = prime_factorization(number2)
elif number1 == 1 or number2 == 1:
-
- primeFac1 = []
- primeFac2 = []
+ prime_fac_1 = []
+ prime_fac_2 = []
ans = max(number1, number2)
count1 = 0
@@ -397,35 +384,29 @@ def kgV(number1, number2):
done = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
- for n in primeFac1:
-
+ for n in prime_fac_1:
if n not in done:
+ if n in prime_fac_2:
+ count1 = prime_fac_1.count(n)
+ count2 = prime_fac_2.count(n)
- if n in primeFac2:
-
- count1 = primeFac1.count(n)
- count2 = primeFac2.count(n)
-
- for i in range(max(count1, count2)):
+ for _ in range(max(count1, count2)):
ans *= n
else:
+ count1 = prime_fac_1.count(n)
- count1 = primeFac1.count(n)
-
- for i in range(count1):
+ for _ in range(count1):
ans *= n
done.append(n)
# iterates through primeFac2
- for n in primeFac2:
-
+ for n in prime_fac_2:
if n not in done:
+ count2 = prime_fac_2.count(n)
- count2 = primeFac2.count(n)
-
- for i in range(count2):
+ for _ in range(count2):
ans *= n
done.append(n)
@@ -441,7 +422,7 @@ def kgV(number1, number2):
# ----------------------------------
-def getPrime(n):
+def get_prime(n):
"""
Gets the n-th prime number.
input: positive integer 'n' >= 0
@@ -455,18 +436,17 @@ def getPrime(n):
ans = 2 # this variable holds the answer
while index < n:
-
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
- while not isPrime(ans):
+ while not is_prime(ans):
ans += 1
# precondition
- assert isinstance(ans, int) and isPrime(
+ assert isinstance(ans, int) and is_prime(
ans
), "'ans' must been a prime number and from type int"
@@ -476,7 +456,7 @@ def getPrime(n):
# ---------------------------------------------------
-def getPrimesBetween(pNumber1, pNumber2):
+def get_primes_between(p_number_1, p_number_2):
"""
input: prime numbers 'pNumber1' and 'pNumber2'
pNumber1 < pNumber2
@@ -486,31 +466,32 @@ def getPrimesBetween(pNumber1, pNumber2):
# precondition
assert (
- isPrime(pNumber1) and isPrime(pNumber2) and (pNumber1 < pNumber2)
+ is_prime(p_number_1) and is_prime(p_number_2) and (p_number_1 < p_number_2)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
- number = pNumber1 + 1 # jump to the next number
+ number = p_number_1 + 1 # jump to the next number
ans = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
- while not isPrime(number):
+ while not is_prime(number):
number += 1
- while number < pNumber2:
-
+ while number < p_number_2:
ans.append(number)
number += 1
# fetch the next prime number.
- while not isPrime(number):
+ while not is_prime(number):
number += 1
# precondition
assert (
- isinstance(ans, list) and ans[0] != pNumber1 and ans[len(ans) - 1] != pNumber2
+ isinstance(ans, list)
+ and ans[0] != p_number_1
+ and ans[len(ans) - 1] != p_number_2
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
@@ -520,7 +501,7 @@ def getPrimesBetween(pNumber1, pNumber2):
# ----------------------------------------------------
-def getDivisors(n):
+def get_divisors(n):
"""
input: positive integer 'n' >= 1
returns all divisors of n (inclusive 1 and 'n')
@@ -532,7 +513,6 @@ def getDivisors(n):
ans = [] # will be returned.
for divisor in range(1, n + 1):
-
if n % divisor == 0:
ans.append(divisor)
@@ -545,7 +525,7 @@ def getDivisors(n):
# ----------------------------------------------------
-def isPerfectNumber(number):
+def is_perfect_number(number):
"""
input: positive integer 'number' > 1
returns true if 'number' is a perfect number otherwise false.
@@ -556,7 +536,7 @@ def isPerfectNumber(number):
number > 1
), "'number' must been an int and >= 1"
- divisors = getDivisors(number)
+ divisors = get_divisors(number)
# precondition
assert (
@@ -572,7 +552,7 @@ def isPerfectNumber(number):
# ------------------------------------------------------------
-def simplifyFraction(numerator, denominator):
+def simplify_fraction(numerator, denominator):
"""
input: two integer 'numerator' and 'denominator'
assumes: 'denominator' != 0
@@ -587,16 +567,16 @@ def simplifyFraction(numerator, denominator):
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
- gcdOfFraction = gcd(abs(numerator), abs(denominator))
+ gcd_of_fraction = gcd(abs(numerator), abs(denominator))
# precondition
assert (
- isinstance(gcdOfFraction, int)
- and (numerator % gcdOfFraction == 0)
- and (denominator % gcdOfFraction == 0)
+ isinstance(gcd_of_fraction, int)
+ and (numerator % gcd_of_fraction == 0)
+ and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
- return (numerator // gcdOfFraction, denominator // gcdOfFraction)
+ return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
# -----------------------------------------------------------------
@@ -635,8 +615,7 @@ def fib(n):
fib1 = 1
ans = 1 # this will be return
- for i in range(n - 1):
-
+ for _ in range(n - 1):
tmp = ans
ans += fib1
fib1 = tmp
diff --git a/maths/print_multiplication_table.py b/maths/print_multiplication_table.py
new file mode 100644
index 000000000..dbe4a4be0
--- /dev/null
+++ b/maths/print_multiplication_table.py
@@ -0,0 +1,26 @@
+def multiplication_table(number: int, number_of_terms: int) -> str:
+ """
+ Prints the multiplication table of a given number till the given number of terms
+
+ >>> print(multiplication_table(3, 5))
+ 3 * 1 = 3
+ 3 * 2 = 6
+ 3 * 3 = 9
+ 3 * 4 = 12
+ 3 * 5 = 15
+
+ >>> print(multiplication_table(-4, 6))
+ -4 * 1 = -4
+ -4 * 2 = -8
+ -4 * 3 = -12
+ -4 * 4 = -16
+ -4 * 5 = -20
+ -4 * 6 = -24
+ """
+ return "\n".join(
+ f"{number} * {i} = {number * i}" for i in range(1, number_of_terms + 1)
+ )
+
+
+if __name__ == "__main__":
+ print(multiplication_table(number=5, number_of_terms=10))
diff --git a/maths/pronic_number.py b/maths/pronic_number.py
new file mode 100644
index 000000000..cf4d3d2eb
--- /dev/null
+++ b/maths/pronic_number.py
@@ -0,0 +1,55 @@
+"""
+== Pronic Number ==
+A number n is said to be a Proic number if
+there exists an integer m such that n = m * (m + 1)
+
+Examples of Proic Numbers: 0, 2, 6, 12, 20, 30, 42, 56, 72, 90, 110 ...
+https://en.wikipedia.org/wiki/Pronic_number
+"""
+
+# Author : Akshay Dubey (https://github.com/itsAkshayDubey)
+
+
+def is_pronic(number: int) -> bool:
+ """
+ # doctest: +NORMALIZE_WHITESPACE
+ This functions takes an integer number as input.
+ returns True if the number is pronic.
+ >>> is_pronic(-1)
+ False
+ >>> is_pronic(0)
+ True
+ >>> is_pronic(2)
+ True
+ >>> is_pronic(5)
+ False
+ >>> is_pronic(6)
+ True
+ >>> is_pronic(8)
+ False
+ >>> is_pronic(30)
+ True
+ >>> is_pronic(32)
+ False
+ >>> is_pronic(2147441940)
+ True
+ >>> is_pronic(9223372033963249500)
+ True
+ >>> is_pronic(6.0)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value of [number=6.0] must be an integer
+ """
+ if not isinstance(number, int):
+ msg = f"Input value of [number={number}] must be an integer"
+ raise TypeError(msg)
+ if number < 0 or number % 2 == 1:
+ return False
+ number_sqrt = int(number**0.5)
+ return number == number_sqrt * (number_sqrt + 1)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/proth_number.py b/maths/proth_number.py
new file mode 100644
index 000000000..47747ed26
--- /dev/null
+++ b/maths/proth_number.py
@@ -0,0 +1,75 @@
+"""
+Calculate the nth Proth number
+Source:
+ https://handwiki.org/wiki/Proth_number
+"""
+
+import math
+
+
+def proth(number: int) -> int:
+ """
+ :param number: nth number to calculate in the sequence
+ :return: the nth number in Proth number
+ Note: indexing starts at 1 i.e. proth(1) gives the first Proth number of 3
+ >>> proth(6)
+ 25
+ >>> proth(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value of [number=0] must be > 0
+ >>> proth(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input value of [number=-1] must be > 0
+ >>> proth(6.0)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value of [number=6.0] must be an integer
+ """
+
+ if not isinstance(number, int):
+ msg = f"Input value of [number={number}] must be an integer"
+ raise TypeError(msg)
+
+ if number < 1:
+ msg = f"Input value of [number={number}] must be > 0"
+ raise ValueError(msg)
+ elif number == 1:
+ return 3
+ elif number == 2:
+ return 5
+ else:
+ """
+ +1 for binary starting at 0 i.e. 2^0, 2^1, etc.
+ +1 to start the sequence at the 3rd Proth number
+ Hence, we have a +2 in the below statement
+ """
+ block_index = int(math.log(number // 3, 2)) + 2
+
+ proth_list = [3, 5]
+ proth_index = 2
+ increment = 3
+ for block in range(1, block_index):
+ for _ in range(increment):
+ proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
+ proth_index += 1
+ increment *= 2
+
+ return proth_list[number - 1]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ for number in range(11):
+ value = 0
+ try:
+ value = proth(number)
+ except ValueError:
+ print(f"ValueError: there is no {number}th Proth number")
+ continue
+
+ print(f"The {number}th Proth number: {value}")
diff --git a/maths/pythagoras.py b/maths/pythagoras.py
index 69a17731a..7770e981d 100644
--- a/maths/pythagoras.py
+++ b/maths/pythagoras.py
@@ -14,17 +14,13 @@ class Point:
def distance(a: Point, b: Point) -> float:
- return math.sqrt(abs((b.x - a.x) ** 2 + (b.y - a.y) ** 2 + (b.z - a.z) ** 2))
-
-
-def test_distance() -> None:
"""
>>> point1 = Point(2, -1, 7)
>>> point2 = Point(1, -3, 5)
>>> print(f"Distance from {point1} to {point2} is {distance(point1, point2)}")
Distance from Point(2, -1, 7) to Point(1, -3, 5) is 3.0
"""
- pass
+ return math.sqrt(abs((b.x - a.x) ** 2 + (b.y - a.y) ** 2 + (b.z - a.z) ** 2))
if __name__ == "__main__":
diff --git a/maths/qr_decomposition.py b/maths/qr_decomposition.py
index 5e15fede4..a8414fbec 100644
--- a/maths/qr_decomposition.py
+++ b/maths/qr_decomposition.py
@@ -1,7 +1,7 @@
import numpy as np
-def qr_householder(A):
+def qr_householder(a):
"""Return a QR-decomposition of the matrix A using Householder reflection.
The QR-decomposition decomposes the matrix A of shape (m, n) into an
@@ -37,14 +37,14 @@ def qr_householder(A):
>>> np.allclose(np.triu(R), R)
True
"""
- m, n = A.shape
+ m, n = a.shape
t = min(m, n)
- Q = np.eye(m)
- R = A.copy()
+ q = np.eye(m)
+ r = a.copy()
for k in range(t - 1):
# select a column of modified matrix A':
- x = R[k:, [k]]
+ x = r[k:, [k]]
# construct first basis vector
e1 = np.zeros_like(x)
e1[0] = 1.0
@@ -55,14 +55,14 @@ def qr_householder(A):
v /= np.linalg.norm(v)
# construct the Householder matrix
- Q_k = np.eye(m - k) - 2.0 * v @ v.T
+ q_k = np.eye(m - k) - 2.0 * v @ v.T
# pad with ones and zeros as necessary
- Q_k = np.block([[np.eye(k), np.zeros((k, m - k))], [np.zeros((m - k, k)), Q_k]])
+ q_k = np.block([[np.eye(k), np.zeros((k, m - k))], [np.zeros((m - k, k)), q_k]])
- Q = Q @ Q_k.T
- R = Q_k @ R
+ q = q @ q_k.T
+ r = q_k @ r
- return Q, R
+ return q, r
if __name__ == "__main__":
diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py
index de87071e5..2c5cdc004 100644
--- a/maths/radix2_fft.py
+++ b/maths/radix2_fft.py
@@ -39,7 +39,7 @@ class FFT:
>>> x = FFT(A, B)
Print product
- >>> print(x.product) # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5
+ >>> x.product # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5
[(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)]
__str__ test
@@ -49,10 +49,10 @@ class FFT:
A*B = 0*x^(-0+0j) + 1*x^(2+0j) + 2*x^(3+0j) + 3*x^(8+0j) + 4*x^(6+0j) + 5*x^(8+0j)
"""
- def __init__(self, polyA=[0], polyB=[0]):
+ def __init__(self, poly_a=None, poly_b=None):
# Input as list
- self.polyA = list(polyA)[:]
- self.polyB = list(polyB)[:]
+ self.polyA = list(poly_a or [0])[:]
+ self.polyB = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
@@ -64,44 +64,41 @@ class FFT:
self.len_B = len(self.polyB)
# Add 0 to make lengths equal a power of 2
- self.C_max_length = int(
+ self.c_max_length = int(
2 ** np.ceil(np.log2(len(self.polyA) + len(self.polyB) - 1))
)
- while len(self.polyA) < self.C_max_length:
+ while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
- while len(self.polyB) < self.C_max_length:
+ while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
- self.root = complex(mpmath.root(x=1, n=self.C_max_length, k=1))
+ self.root = complex(mpmath.root(x=1, n=self.c_max_length, k=1))
# The product
self.product = self.__multiply()
# Discrete fourier transform of A and B
- def __DFT(self, which):
- if which == "A":
- dft = [[x] for x in self.polyA]
- else:
- dft = [[x] for x in self.polyB]
+ def __dft(self, which):
+ dft = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(dft) <= 1:
return dft[0]
#
- next_ncol = self.C_max_length // 2
+ next_ncol = self.c_max_length // 2
while next_ncol > 0:
new_dft = [[] for i in range(next_ncol)]
- root = self.root ** next_ncol
+ root = self.root**next_ncol
# First half of next step
current_root = 1
- for j in range(self.C_max_length // (next_ncol * 2)):
+ for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(next_ncol):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
current_root = 1
- for j in range(self.C_max_length // (next_ncol * 2)):
+ for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(next_ncol):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
@@ -112,65 +109,65 @@ class FFT:
# multiply the DFTs of A and B and find A*B
def __multiply(self):
- dftA = self.__DFT("A")
- dftB = self.__DFT("B")
- inverseC = [[dftA[i] * dftB[i] for i in range(self.C_max_length)]]
- del dftA
- del dftB
+ dft_a = self.__dft("A")
+ dft_b = self.__dft("B")
+ inverce_c = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
+ del dft_a
+ del dft_b
# Corner Case
- if len(inverseC[0]) <= 1:
- return inverseC[0]
+ if len(inverce_c[0]) <= 1:
+ return inverce_c[0]
# Inverse DFT
next_ncol = 2
- while next_ncol <= self.C_max_length:
- new_inverseC = [[] for i in range(next_ncol)]
+ while next_ncol <= self.c_max_length:
+ new_inverse_c = [[] for i in range(next_ncol)]
root = self.root ** (next_ncol // 2)
current_root = 1
# First half of next step
- for j in range(self.C_max_length // next_ncol):
+ for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
- new_inverseC[i].append(
+ new_inverse_c[i].append(
(
- inverseC[i][j]
- + inverseC[i][j + self.C_max_length // next_ncol]
+ inverce_c[i][j]
+ + inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2
)
# Odd positions
- new_inverseC[i + next_ncol // 2].append(
+ new_inverse_c[i + next_ncol // 2].append(
(
- inverseC[i][j]
- - inverseC[i][j + self.C_max_length // next_ncol]
+ inverce_c[i][j]
+ - inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root)
)
current_root *= root
# Update
- inverseC = new_inverseC
+ inverce_c = new_inverse_c
next_ncol *= 2
# Unpack
- inverseC = [round(x[0].real, 8) + round(x[0].imag, 8) * 1j for x in inverseC]
+ inverce_c = [round(x[0].real, 8) + round(x[0].imag, 8) * 1j for x in inverce_c]
# Remove leading 0's
- while inverseC[-1] == 0:
- inverseC.pop()
- return inverseC
+ while inverce_c[-1] == 0:
+ inverce_c.pop()
+ return inverce_c
# Overwrite __str__ for print(); Shows A, B and A*B
def __str__(self):
- A = "A = " + " + ".join(
+ a = "A = " + " + ".join(
f"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A])
)
- B = "B = " + " + ".join(
+ b = "B = " + " + ".join(
f"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B])
)
- C = "A*B = " + " + ".join(
+ c = "A*B = " + " + ".join(
f"{coef}*x^{i}" for coef, i in enumerate(self.product)
)
- return "\n".join((A, B, C))
+ return f"{a}\n{b}\n{c}"
# Unit tests
diff --git a/maths/remove_digit.py b/maths/remove_digit.py
new file mode 100644
index 000000000..db14ac902
--- /dev/null
+++ b/maths/remove_digit.py
@@ -0,0 +1,37 @@
+def remove_digit(num: int) -> int:
+ """
+
+ returns the biggest possible result
+ that can be achieved by removing
+ one digit from the given number
+
+ >>> remove_digit(152)
+ 52
+ >>> remove_digit(6385)
+ 685
+ >>> remove_digit(-11)
+ 1
+ >>> remove_digit(2222222)
+ 222222
+ >>> remove_digit("2222222")
+ Traceback (most recent call last):
+ TypeError: only integers accepted as input
+ >>> remove_digit("string input")
+ Traceback (most recent call last):
+ TypeError: only integers accepted as input
+ """
+
+ if not isinstance(num, int):
+ raise TypeError("only integers accepted as input")
+ else:
+ num_str = str(abs(num))
+ num_transpositions = [list(num_str) for char in range(len(num_str))]
+ for index in range(len(num_str)):
+ num_transpositions[index].pop(index)
+ return max(
+ int("".join(list(transposition))) for transposition in num_transpositions
+ )
+
+
+if __name__ == "__main__":
+ __import__("doctest").testmod()
diff --git a/maths/runge_kutta.py b/maths/runge_kutta.py
index 383797daa..4cac017ee 100644
--- a/maths/runge_kutta.py
+++ b/maths/runge_kutta.py
@@ -22,12 +22,12 @@ def runge_kutta(f, y0, x0, h, x_end):
>>> y[-1]
148.41315904125113
"""
- N = int(np.ceil((x_end - x0) / h))
- y = np.zeros((N + 1,))
+ n = int(np.ceil((x_end - x0) / h))
+ y = np.zeros((n + 1,))
y[0] = y0
x = x0
- for k in range(N):
+ for k in range(n):
k1 = f(x, y[k])
k2 = f(x + 0.5 * h, y[k] + 0.5 * h * k1)
k3 = f(x + 0.5 * h, y[k] + 0.5 * h * k2)
diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py
index c1cc497ad..e950a83b7 100644
--- a/maths/segmented_sieve.py
+++ b/maths/segmented_sieve.py
@@ -3,7 +3,7 @@
import math
-def sieve(n):
+def sieve(n: int) -> list[int]:
"""Segmented Sieve."""
in_prime = []
start = 2
@@ -15,20 +15,16 @@ def sieve(n):
if temp[start] is True:
in_prime.append(start)
for i in range(start * start, end + 1, start):
- if temp[i] is True:
- temp[i] = False
+ temp[i] = False
start += 1
prime += in_prime
low = end + 1
- high = low + end - 1
- if high > n:
- high = n
+ high = min(2 * end, n)
while low <= n:
temp = [True] * (high - low + 1)
for each in in_prime:
-
t = math.floor(low / each) * each
if t < low:
t += each
@@ -41,11 +37,9 @@ def sieve(n):
prime.append(j + low)
low = high + 1
- high = low + end - 1
- if high > n:
- high = n
+ high = min(high + end, n)
return prime
-print(sieve(10 ** 6))
+print(sieve(10**6))
diff --git a/maths/series/arithmetic_mean.py b/maths/series/arithmetic.py
similarity index 68%
rename from maths/series/arithmetic_mean.py
rename to maths/series/arithmetic.py
index b5d64b63a..dc28c5c7b 100644
--- a/maths/series/arithmetic_mean.py
+++ b/maths/series/arithmetic.py
@@ -1,20 +1,35 @@
"""
-ARITHMETIC MEAN : https://en.wikipedia.org/wiki/Arithmetic_mean
+Arithmetic mean
+Reference: https://en.wikipedia.org/wiki/Arithmetic_mean
+Arithmetic series
+Reference: https://en.wikipedia.org/wiki/Arithmetic_series
+(The URL above will redirect you to arithmetic progression)
"""
def is_arithmetic_series(series: list) -> bool:
"""
checking whether the input series is arithmetic series or not
-
>>> is_arithmetic_series([2, 4, 6])
True
>>> is_arithmetic_series([3, 6, 12, 24])
False
>>> is_arithmetic_series([1, 2, 3])
True
+ >>> is_arithmetic_series(4)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input series is not valid, valid series - [2, 4, 6]
+ >>> is_arithmetic_series([])
+ Traceback (most recent call last):
+ ...
+ ValueError: Input list must be a non empty list
"""
+ if not isinstance(series, list):
+ raise ValueError("Input series is not valid, valid series - [2, 4, 6]")
+ if len(series) == 0:
+ raise ValueError("Input list must be a non empty list")
if len(series) == 1:
return True
common_diff = series[1] - series[0]
@@ -37,9 +52,7 @@ def arithmetic_mean(series: list) -> float:
...
ValueError: Input series is not valid, valid series - [2, 4, 6]
>>> arithmetic_mean([4, 8, 1])
- Traceback (most recent call last):
- ...
- ValueError: Input list is not an arithmetic series
+ 4.333333333333333
>>> arithmetic_mean([1, 2, 3])
2.0
>>> arithmetic_mean([])
@@ -52,8 +65,6 @@ def arithmetic_mean(series: list) -> float:
raise ValueError("Input series is not valid, valid series - [2, 4, 6]")
if len(series) == 0:
raise ValueError("Input list must be a non empty list")
- if not is_arithmetic_series(series):
- raise ValueError("Input list is not an arithmetic series")
answer = 0
for val in series:
answer += val
diff --git a/maths/series/geometric_mean.py b/maths/series/geometric.py
similarity index 75%
rename from maths/series/geometric_mean.py
rename to maths/series/geometric.py
index 50ae54ad6..7b6239b15 100644
--- a/maths/series/geometric_mean.py
+++ b/maths/series/geometric.py
@@ -1,12 +1,15 @@
"""
-GEOMETRIC MEAN : https://en.wikipedia.org/wiki/Geometric_mean
+Geometric Mean
+Reference : https://en.wikipedia.org/wiki/Geometric_mean
+
+Geometric series
+Reference: https://en.wikipedia.org/wiki/Geometric_series
"""
def is_geometric_series(series: list) -> bool:
"""
checking whether the input series is geometric series or not
-
>>> is_geometric_series([2, 4, 8])
True
>>> is_geometric_series([3, 6, 12, 24])
@@ -15,8 +18,19 @@ def is_geometric_series(series: list) -> bool:
False
>>> is_geometric_series([0, 0, 3])
False
-
+ >>> is_geometric_series([])
+ Traceback (most recent call last):
+ ...
+ ValueError: Input list must be a non empty list
+ >>> is_geometric_series(4)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input series is not valid, valid series - [2, 4, 8]
"""
+ if not isinstance(series, list):
+ raise ValueError("Input series is not valid, valid series - [2, 4, 8]")
+ if len(series) == 0:
+ raise ValueError("Input list must be a non empty list")
if len(series) == 1:
return True
try:
@@ -44,13 +58,9 @@ def geometric_mean(series: list) -> float:
...
ValueError: Input series is not valid, valid series - [2, 4, 8]
>>> geometric_mean([1, 2, 3])
- Traceback (most recent call last):
- ...
- ValueError: Input list is not a geometric series
+ 1.8171205928321397
>>> geometric_mean([0, 2, 3])
- Traceback (most recent call last):
- ...
- ValueError: Input list is not a geometric series
+ 0.0
>>> geometric_mean([])
Traceback (most recent call last):
...
@@ -61,8 +71,6 @@ def geometric_mean(series: list) -> float:
raise ValueError("Input series is not valid, valid series - [2, 4, 8]")
if len(series) == 0:
raise ValueError("Input list must be a non empty list")
- if not is_geometric_series(series):
- raise ValueError("Input list is not a geometric series")
answer = 1
for value in series:
answer *= value
diff --git a/maths/series/geometric_series.py b/maths/series/geometric_series.py
index d12382e6d..90c9fe77b 100644
--- a/maths/series/geometric_series.py
+++ b/maths/series/geometric_series.py
@@ -1,7 +1,6 @@
"""
This is a pure Python implementation of the Geometric Series algorithm
https://en.wikipedia.org/wiki/Geometric_series
-
Run the doctests with the following command:
python3 -m doctest -v geometric_series.py
or
@@ -11,8 +10,17 @@ python3 geometric_series.py
"""
-def geometric_series(nth_term: int, start_term_a: int, common_ratio_r: int) -> list:
- """Pure Python implementation of Geometric Series algorithm
+from __future__ import annotations
+
+
+def geometric_series(
+ nth_term: float | int,
+ start_term_a: float | int,
+ common_ratio_r: float | int,
+) -> list[float | int]:
+ """
+ Pure Python implementation of Geometric Series algorithm
+
:param nth_term: The last term (nth term of Geometric Series)
:param start_term_a : The first term of Geometric Series
:param common_ratio_r : The common ratio between all the terms
@@ -20,15 +28,15 @@ def geometric_series(nth_term: int, start_term_a: int, common_ratio_r: int) -> l
ration with first term with increase in power till last term (nth term)
Examples:
>>> geometric_series(4, 2, 2)
- [2, '4.0', '8.0', '16.0']
+ [2, 4.0, 8.0, 16.0]
>>> geometric_series(4.0, 2.0, 2.0)
- [2.0, '4.0', '8.0', '16.0']
+ [2.0, 4.0, 8.0, 16.0]
>>> geometric_series(4.1, 2.1, 2.1)
- [2.1, '4.41', '9.261000000000001', '19.448100000000004']
+ [2.1, 4.41, 9.261000000000001, 19.448100000000004]
>>> geometric_series(4, 2, -2)
- [2, '-4.0', '8.0', '-16.0']
+ [2, -4.0, 8.0, -16.0]
>>> geometric_series(4, -2, 2)
- [-2, '-4.0', '-8.0', '-16.0']
+ [-2, -4.0, -8.0, -16.0]
>>> geometric_series(-4, 2, 2)
[]
>>> geometric_series(0, 100, 500)
@@ -38,26 +46,30 @@ def geometric_series(nth_term: int, start_term_a: int, common_ratio_r: int) -> l
>>> geometric_series(0, 0, 0)
[]
"""
- if "" in (nth_term, start_term_a, common_ratio_r):
- return ""
- series = []
+ if not all((nth_term, start_term_a, common_ratio_r)):
+ return []
+ series: list[float | int] = []
power = 1
multiple = common_ratio_r
for _ in range(int(nth_term)):
- if series == []:
+ if not series:
series.append(start_term_a)
else:
power += 1
- series.append(str(float(start_term_a) * float(multiple)))
+ series.append(float(start_term_a * multiple))
multiple = pow(float(common_ratio_r), power)
return series
if __name__ == "__main__":
- nth_term = input("Enter the last number (n term) of the Geometric Series")
- start_term_a = input("Enter the starting term (a) of the Geometric Series")
- common_ratio_r = input(
- "Enter the common ratio between two terms (r) of the Geometric Series"
+ import doctest
+
+ doctest.testmod()
+
+ nth_term = float(input("Enter the last number (n term) of the Geometric Series"))
+ start_term_a = float(input("Enter the starting term (a) of the Geometric Series"))
+ common_ratio_r = float(
+ input("Enter the common ratio between two terms (r) of the Geometric Series")
)
print("Formula of Geometric Series => a + ar + ar^2 ... +ar^n")
print(geometric_series(nth_term, start_term_a, common_ratio_r))
diff --git a/maths/series/harmonic.py b/maths/series/harmonic.py
new file mode 100644
index 000000000..50f29c93d
--- /dev/null
+++ b/maths/series/harmonic.py
@@ -0,0 +1,92 @@
+"""
+Harmonic mean
+Reference: https://en.wikipedia.org/wiki/Harmonic_mean
+
+Harmonic series
+Reference: https://en.wikipedia.org/wiki/Harmonic_series(mathematics)
+"""
+
+
+def is_harmonic_series(series: list) -> bool:
+ """
+ checking whether the input series is arithmetic series or not
+ >>> is_harmonic_series([ 1, 2/3, 1/2, 2/5, 1/3])
+ True
+ >>> is_harmonic_series([ 1, 2/3, 2/5, 1/3])
+ False
+ >>> is_harmonic_series([1, 2, 3])
+ False
+ >>> is_harmonic_series([1/2, 1/3, 1/4])
+ True
+ >>> is_harmonic_series([2/5, 2/10, 2/15, 2/20, 2/25])
+ True
+ >>> is_harmonic_series(4)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input series is not valid, valid series - [1, 2/3, 2]
+ >>> is_harmonic_series([])
+ Traceback (most recent call last):
+ ...
+ ValueError: Input list must be a non empty list
+ >>> is_harmonic_series([0])
+ Traceback (most recent call last):
+ ...
+ ValueError: Input series cannot have 0 as an element
+ >>> is_harmonic_series([1,2,0,6])
+ Traceback (most recent call last):
+ ...
+ ValueError: Input series cannot have 0 as an element
+ """
+ if not isinstance(series, list):
+ raise ValueError("Input series is not valid, valid series - [1, 2/3, 2]")
+ if len(series) == 0:
+ raise ValueError("Input list must be a non empty list")
+ if len(series) == 1 and series[0] != 0:
+ return True
+ rec_series = []
+ series_len = len(series)
+ for i in range(0, series_len):
+ if series[i] == 0:
+ raise ValueError("Input series cannot have 0 as an element")
+ rec_series.append(1 / series[i])
+ common_diff = rec_series[1] - rec_series[0]
+ for index in range(2, series_len):
+ if rec_series[index] - rec_series[index - 1] != common_diff:
+ return False
+ return True
+
+
+def harmonic_mean(series: list) -> float:
+ """
+ return the harmonic mean of series
+
+ >>> harmonic_mean([1, 4, 4])
+ 2.0
+ >>> harmonic_mean([3, 6, 9, 12])
+ 5.759999999999999
+ >>> harmonic_mean(4)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input series is not valid, valid series - [2, 4, 6]
+ >>> harmonic_mean([1, 2, 3])
+ 1.6363636363636365
+ >>> harmonic_mean([])
+ Traceback (most recent call last):
+ ...
+ ValueError: Input list must be a non empty list
+
+ """
+ if not isinstance(series, list):
+ raise ValueError("Input series is not valid, valid series - [2, 4, 6]")
+ if len(series) == 0:
+ raise ValueError("Input list must be a non empty list")
+ answer = 0
+ for val in series:
+ answer += 1 / val
+ return len(series) / answer
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/series/harmonic_series.py b/maths/series/harmonic_series.py
index 91b594458..d42d13d91 100644
--- a/maths/series/harmonic_series.py
+++ b/maths/series/harmonic_series.py
@@ -33,8 +33,8 @@ def harmonic_series(n_term: str) -> list:
['1']
"""
if n_term == "":
- return n_term
- series = []
+ return []
+ series: list = []
for temp in range(int(n_term)):
series.append(f"1/{temp + 1}" if series else "1")
return series
diff --git a/maths/series/hexagonal_numbers.py b/maths/series/hexagonal_numbers.py
new file mode 100644
index 000000000..582b1989b
--- /dev/null
+++ b/maths/series/hexagonal_numbers.py
@@ -0,0 +1,42 @@
+"""
+A hexagonal number sequence is a sequence of figurate numbers
+where the nth hexagonal number hₙ is the number of distinct dots
+in a pattern of dots consisting of the outlines of regular
+hexagons with sides up to n dots, when the hexagons are overlaid
+so that they share one vertex.
+
+ Calculates the hexagonal numbers sequence with a formula
+ hₙ = n(2n-1)
+ where:
+ hₙ --> is nth element of the sequence
+ n --> is the number of element in the sequence
+ reference-->"Hexagonal number" Wikipedia
+
+"""
+
+
+def hexagonal_numbers(length: int) -> list[int]:
+ """
+ :param len: max number of elements
+ :type len: int
+ :return: Hexagonal numbers as a list
+
+ Tests:
+ >>> hexagonal_numbers(10)
+ [0, 1, 6, 15, 28, 45, 66, 91, 120, 153]
+ >>> hexagonal_numbers(5)
+ [0, 1, 6, 15, 28]
+ >>> hexagonal_numbers(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Length must be a positive integer.
+ """
+
+ if length <= 0 or not isinstance(length, int):
+ raise ValueError("Length must be a positive integer.")
+ return [n * (2 * n - 1) for n in range(length)]
+
+
+if __name__ == "__main__":
+ print(hexagonal_numbers(length=5))
+ print(hexagonal_numbers(length=10))
diff --git a/maths/series/p_series.py b/maths/series/p_series.py
index 04019aed5..34fa3f239 100644
--- a/maths/series/p_series.py
+++ b/maths/series/p_series.py
@@ -1,48 +1,52 @@
"""
This is a pure Python implementation of the P-Series algorithm
https://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#P-series
-
For doctests run following command:
python -m doctest -v p_series.py
or
python3 -m doctest -v p_series.py
-
For manual testing run:
python3 p_series.py
"""
-def p_series(nth_term: int, power: int) -> list:
- """Pure Python implementation of P-Series algorithm
+from __future__ import annotations
+
+def p_series(nth_term: int | float | str, power: int | float | str) -> list[str]:
+ """
+ Pure Python implementation of P-Series algorithm
:return: The P-Series starting from 1 to last (nth) term
-
Examples:
>>> p_series(5, 2)
- [1, '1/4', '1/9', '1/16', '1/25']
+ ['1', '1 / 4', '1 / 9', '1 / 16', '1 / 25']
>>> p_series(-5, 2)
[]
>>> p_series(5, -2)
- [1, '1/0.25', '1/0.1111111111111111', '1/0.0625', '1/0.04']
+ ['1', '1 / 0.25', '1 / 0.1111111111111111', '1 / 0.0625', '1 / 0.04']
>>> p_series("", 1000)
- ''
+ ['']
>>> p_series(0, 0)
[]
>>> p_series(1, 1)
- [1]
+ ['1']
"""
if nth_term == "":
- return nth_term
+ return [""]
nth_term = int(nth_term)
power = int(power)
- series = []
+ series: list[str] = []
for temp in range(int(nth_term)):
- series.append(f"1/{pow(temp + 1, int(power))}" if series else 1)
+ series.append(f"1 / {pow(temp + 1, int(power))}" if series else "1")
return series
if __name__ == "__main__":
- nth_term = input("Enter the last number (nth term) of the P-Series")
- power = input("Enter the power for P-Series")
+ import doctest
+
+ doctest.testmod()
+
+ nth_term = int(input("Enter the last number (nth term) of the P-Series"))
+ power = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py
index 47a086546..a0520aa5c 100644
--- a/maths/sieve_of_eratosthenes.py
+++ b/maths/sieve_of_eratosthenes.py
@@ -10,13 +10,12 @@ Reference: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich)
Also thanks to Dmitry (https://github.com/LizardWizzard) for finding the problem
"""
-
+from __future__ import annotations
import math
-from typing import List
-def prime_sieve(num: int) -> List[int]:
+def prime_sieve(num: int) -> list[int]:
"""
Returns a list with all prime numbers up to n.
@@ -35,7 +34,8 @@ def prime_sieve(num: int) -> List[int]:
"""
if num <= 0:
- raise ValueError(f"{num}: Invalid input, please enter a positive integer.")
+ msg = f"{num}: Invalid input, please enter a positive integer."
+ raise ValueError(msg)
sieve = [True] * (num + 1)
prime = []
diff --git a/maths/sigmoid_linear_unit.py b/maths/sigmoid_linear_unit.py
new file mode 100644
index 000000000..0ee09bf82
--- /dev/null
+++ b/maths/sigmoid_linear_unit.py
@@ -0,0 +1,55 @@
+"""
+This script demonstrates the implementation of the Sigmoid Linear Unit (SiLU)
+or swish function.
+* https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
+* https://en.wikipedia.org/wiki/Swish_function
+
+The function takes a vector x of K real numbers as input and returns x * sigmoid(x).
+Swish is a smooth, non-monotonic function defined as f(x) = x * sigmoid(x).
+Extensive experiments shows that Swish consistently matches or outperforms ReLU
+on deep networks applied to a variety of challenging domains such as
+image classification and machine translation.
+
+This script is inspired by a corresponding research paper.
+* https://arxiv.org/abs/1710.05941
+"""
+
+import numpy as np
+
+
+def sigmoid(vector: np.ndarray) -> np.ndarray:
+ """
+ Mathematical function sigmoid takes a vector x of K real numbers as input and
+ returns 1/ (1 + e^-x).
+ https://en.wikipedia.org/wiki/Sigmoid_function
+
+ >>> sigmoid(np.array([-1.0, 1.0, 2.0]))
+ array([0.26894142, 0.73105858, 0.88079708])
+ """
+ return 1 / (1 + np.exp(-vector))
+
+
+def sigmoid_linear_unit(vector: np.ndarray) -> np.ndarray:
+ """
+ Implements the Sigmoid Linear Unit (SiLU) or swish function
+
+ Parameters:
+ vector (np.ndarray): A numpy array consisting of real values
+
+ Returns:
+ swish_vec (np.ndarray): The input numpy array, after applying swish
+
+ Examples:
+ >>> sigmoid_linear_unit(np.array([-1.0, 1.0, 2.0]))
+ array([-0.26894142, 0.73105858, 1.76159416])
+
+ >>> sigmoid_linear_unit(np.array([-2]))
+ array([-0.23840584])
+ """
+ return vector * sigmoid(vector)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/signum.py b/maths/signum.py
new file mode 100644
index 000000000..148f93176
--- /dev/null
+++ b/maths/signum.py
@@ -0,0 +1,34 @@
+"""
+Signum function -- https://en.wikipedia.org/wiki/Sign_function
+"""
+
+
+def signum(num: float) -> int:
+ """
+ Applies signum function on the number
+
+ >>> signum(-10)
+ -1
+ >>> signum(10)
+ 1
+ >>> signum(0)
+ 0
+ """
+ if num < 0:
+ return -1
+ return 1 if num else 0
+
+
+def test_signum() -> None:
+ """
+ Tests the signum function
+ """
+ assert signum(5) == 1
+ assert signum(-5) == -1
+ assert signum(0) == 0
+
+
+if __name__ == "__main__":
+ print(signum(12))
+ print(signum(-12))
+ print(signum(0))
diff --git a/maths/simultaneous_linear_equation_solver.py b/maths/simultaneous_linear_equation_solver.py
new file mode 100644
index 000000000..1287b2002
--- /dev/null
+++ b/maths/simultaneous_linear_equation_solver.py
@@ -0,0 +1,142 @@
+"""
+https://en.wikipedia.org/wiki/Augmented_matrix
+
+This algorithm solves simultaneous linear equations of the form
+λa + λb + λc + λd + ... = γ as [λ, λ, λ, λ, ..., γ]
+Where λ & γ are individual coefficients, the no. of equations = no. of coefficients - 1
+
+Note in order to work there must exist 1 equation where all instances of λ and γ != 0
+"""
+
+
+def simplify(current_set: list[list]) -> list[list]:
+ """
+ >>> simplify([[1, 2, 3], [4, 5, 6]])
+ [[1.0, 2.0, 3.0], [0.0, 0.75, 1.5]]
+ >>> simplify([[5, 2, 5], [5, 1, 10]])
+ [[1.0, 0.4, 1.0], [0.0, 0.2, -1.0]]
+ """
+ # Divide each row by magnitude of first term --> creates 'unit' matrix
+ duplicate_set = current_set.copy()
+ for row_index, row in enumerate(duplicate_set):
+ magnitude = row[0]
+ for column_index, column in enumerate(row):
+ if magnitude == 0:
+ current_set[row_index][column_index] = column
+ continue
+ current_set[row_index][column_index] = column / magnitude
+ # Subtract to cancel term
+ first_row = current_set[0]
+ final_set = [first_row]
+ current_set = current_set[1::]
+ for row in current_set:
+ temp_row = []
+ # If first term is 0, it is already in form we want, so we preserve it
+ if row[0] == 0:
+ final_set.append(row)
+ continue
+ for column_index in range(len(row)):
+ temp_row.append(first_row[column_index] - row[column_index])
+ final_set.append(temp_row)
+ # Create next recursion iteration set
+ if len(final_set[0]) != 3:
+ current_first_row = final_set[0]
+ current_first_column = []
+ next_iteration = []
+ for row in final_set[1::]:
+ current_first_column.append(row[0])
+ next_iteration.append(row[1::])
+ resultant = simplify(next_iteration)
+ for i in range(len(resultant)):
+ resultant[i].insert(0, current_first_column[i])
+ resultant.insert(0, current_first_row)
+ final_set = resultant
+ return final_set
+
+
+def solve_simultaneous(equations: list[list]) -> list:
+ """
+ >>> solve_simultaneous([[1, 2, 3],[4, 5, 6]])
+ [-1.0, 2.0]
+ >>> solve_simultaneous([[0, -3, 1, 7],[3, 2, -1, 11],[5, 1, -2, 12]])
+ [6.4, 1.2, 10.6]
+ >>> solve_simultaneous([])
+ Traceback (most recent call last):
+ ...
+ IndexError: solve_simultaneous() requires n lists of length n+1
+ >>> solve_simultaneous([[1, 2, 3],[1, 2]])
+ Traceback (most recent call last):
+ ...
+ IndexError: solve_simultaneous() requires n lists of length n+1
+ >>> solve_simultaneous([[1, 2, 3],["a", 7, 8]])
+ Traceback (most recent call last):
+ ...
+ ValueError: solve_simultaneous() requires lists of integers
+ >>> solve_simultaneous([[0, 2, 3],[4, 0, 6]])
+ Traceback (most recent call last):
+ ...
+ ValueError: solve_simultaneous() requires at least 1 full equation
+ """
+ if len(equations) == 0:
+ raise IndexError("solve_simultaneous() requires n lists of length n+1")
+ _length = len(equations) + 1
+ if any(len(item) != _length for item in equations):
+ raise IndexError("solve_simultaneous() requires n lists of length n+1")
+ for row in equations:
+ if any(not isinstance(column, (int, float)) for column in row):
+ raise ValueError("solve_simultaneous() requires lists of integers")
+ if len(equations) == 1:
+ return [equations[0][-1] / equations[0][0]]
+ data_set = equations.copy()
+ if any(0 in row for row in data_set):
+ temp_data = data_set.copy()
+ full_row = []
+ for row_index, row in enumerate(temp_data):
+ if 0 not in row:
+ full_row = data_set.pop(row_index)
+ break
+ if not full_row:
+ raise ValueError("solve_simultaneous() requires at least 1 full equation")
+ data_set.insert(0, full_row)
+ useable_form = data_set.copy()
+ simplified = simplify(useable_form)
+ simplified = simplified[::-1]
+ solutions: list = []
+ for row in simplified:
+ current_solution = row[-1]
+ if not solutions:
+ if row[-2] == 0:
+ solutions.append(0)
+ continue
+ solutions.append(current_solution / row[-2])
+ continue
+ temp_row = row.copy()[: len(row) - 1 :]
+ while temp_row[0] == 0:
+ temp_row.pop(0)
+ if len(temp_row) == 0:
+ solutions.append(0)
+ continue
+ temp_row = temp_row[1::]
+ temp_row = temp_row[::-1]
+ for column_index, column in enumerate(temp_row):
+ current_solution -= column * solutions[column_index]
+ solutions.append(current_solution)
+ final = []
+ for item in solutions:
+ final.append(float(round(item, 5)))
+ return final[::-1]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ eq = [
+ [2, 1, 1, 1, 1, 4],
+ [1, 2, 1, 1, 1, 5],
+ [1, 1, 2, 1, 1, 6],
+ [1, 1, 1, 2, 1, 7],
+ [1, 1, 1, 1, 2, 8],
+ ]
+ print(solve_simultaneous(eq))
+ print(solve_simultaneous([[4, 2]]))
diff --git a/maths/sin.py b/maths/sin.py
new file mode 100644
index 000000000..b06e6c9f1
--- /dev/null
+++ b/maths/sin.py
@@ -0,0 +1,64 @@
+"""
+Calculate sin function.
+
+It's not a perfect function so I am rounding the result to 10 decimal places by default.
+
+Formula: sin(x) = x - x^3/3! + x^5/5! - x^7/7! + ...
+Where: x = angle in randians.
+
+Source:
+ https://www.homeschoolmath.net/teaching/sine_calculator.php
+
+"""
+
+from math import factorial, radians
+
+
+def sin(
+ angle_in_degrees: float, accuracy: int = 18, rounded_values_count: int = 10
+) -> float:
+ """
+ Implement sin function.
+
+ >>> sin(0.0)
+ 0.0
+ >>> sin(90.0)
+ 1.0
+ >>> sin(180.0)
+ 0.0
+ >>> sin(270.0)
+ -1.0
+ >>> sin(0.68)
+ 0.0118679603
+ >>> sin(1.97)
+ 0.0343762121
+ >>> sin(64.0)
+ 0.8987940463
+ >>> sin(9999.0)
+ -0.9876883406
+ >>> sin(-689.0)
+ 0.5150380749
+ >>> sin(89.7)
+ 0.9999862922
+ """
+ # Simplify the angle to be between 360 and -360 degrees.
+ angle_in_degrees = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
+
+ # Converting from degrees to radians
+ angle_in_radians = radians(angle_in_degrees)
+
+ result = angle_in_radians
+ a = 3
+ b = -1
+
+ for _ in range(accuracy):
+ result += (b * (angle_in_radians**a)) / factorial(a)
+
+ b = -b # One positive term and the next will be negative and so on...
+ a += 2 # Increased by 2 for every term.
+
+ return round(result, rounded_values_count)
+
+
+if __name__ == "__main__":
+ __import__("doctest").testmod()
diff --git a/maths/sock_merchant.py b/maths/sock_merchant.py
new file mode 100644
index 000000000..304efec9b
--- /dev/null
+++ b/maths/sock_merchant.py
@@ -0,0 +1,20 @@
+from collections import Counter
+
+
+def sock_merchant(colors: list[int]) -> int:
+ """
+ >>> sock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20])
+ 3
+ >>> sock_merchant([1, 1, 3, 3])
+ 2
+ """
+ return sum(socks_by_color // 2 for socks_by_color in Counter(colors).values())
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
+ colors = [int(x) for x in input("Enter socks by color :").rstrip().split()]
+ print(f"sock_merchant({colors}) = {sock_merchant(colors)}")
diff --git a/maths/softmax.py b/maths/softmax.py
index e021a7f8a..04cf77525 100644
--- a/maths/softmax.py
+++ b/maths/softmax.py
@@ -41,13 +41,13 @@ def softmax(vector):
# Calculate e^x for each x in your vector where e is Euler's
# number (approximately 2.718)
- exponentVector = np.exp(vector)
+ exponent_vector = np.exp(vector)
# Add up the all the exponentials
- sumOfExponents = np.sum(exponentVector)
+ sum_of_exponents = np.sum(exponent_vector)
# Divide every exponent by the sum of all exponents
- softmax_vector = exponentVector / sumOfExponents
+ softmax_vector = exponent_vector / sum_of_exponents
return softmax_vector
diff --git a/maths/square_root.py b/maths/square_root.py
index b324c7230..2cbf14bea 100644
--- a/maths/square_root.py
+++ b/maths/square_root.py
@@ -49,7 +49,7 @@ def square_root_iterative(
value = get_initial_point(a)
- for i in range(max_iter):
+ for _ in range(max_iter):
prev_value = value
value = value - fx(value, a) / fx_derivative(value)
if abs(prev_value - value) < tolerance:
diff --git a/maths/sum_of_arithmetic_series.py b/maths/sum_of_arithmetic_series.py
index 74eef0f18..3e381b8c2 100644
--- a/maths/sum_of_arithmetic_series.py
+++ b/maths/sum_of_arithmetic_series.py
@@ -1,5 +1,5 @@
# DarkCoder
-def sum_of_series(first_term, common_diff, num_of_terms):
+def sum_of_series(first_term: int, common_diff: int, num_of_terms: int) -> float:
"""
Find the sum of n terms in an arithmetic progression.
@@ -8,9 +8,9 @@ def sum_of_series(first_term, common_diff, num_of_terms):
>>> sum_of_series(1, 10, 100)
49600.0
"""
- sum = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
+ total = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
- return sum
+ return total
def main():
diff --git a/maths/sum_of_digits.py b/maths/sum_of_digits.py
index 64da00d46..d5488bb9e 100644
--- a/maths/sum_of_digits.py
+++ b/maths/sum_of_digits.py
@@ -1,10 +1,6 @@
-from timeit import timeit
-
-
def sum_of_digits(n: int) -> int:
"""
Find the sum of digits of a number.
-
>>> sum_of_digits(12345)
15
>>> sum_of_digits(123)
@@ -14,18 +10,17 @@ def sum_of_digits(n: int) -> int:
>>> sum_of_digits(0)
0
"""
- n = -n if n < 0 else n
+ n = abs(n)
res = 0
while n > 0:
res += n % 10
- n = n // 10
+ n //= 10
return res
def sum_of_digits_recursion(n: int) -> int:
"""
Find the sum of digits of a number using recursion
-
>>> sum_of_digits_recursion(12345)
15
>>> sum_of_digits_recursion(123)
@@ -35,14 +30,13 @@ def sum_of_digits_recursion(n: int) -> int:
>>> sum_of_digits_recursion(0)
0
"""
- n = -n if n < 0 else n
+ n = abs(n)
return n if n < 10 else n % 10 + sum_of_digits(n // 10)
def sum_of_digits_compact(n: int) -> int:
"""
Find the sum of digits of a number
-
>>> sum_of_digits_compact(12345)
15
>>> sum_of_digits_compact(123)
@@ -57,93 +51,24 @@ def sum_of_digits_compact(n: int) -> int:
def benchmark() -> None:
"""
- Benchmark code for comparing 3 functions,
- with 3 different length int values.
+ Benchmark multiple functions, with three different length int values.
"""
- print("\nFor small_num = ", small_num, ":")
- print(
- "> sum_of_digits()",
- "\t\tans =",
- sum_of_digits(small_num),
- "\ttime =",
- timeit("z.sum_of_digits(z.small_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> sum_of_digits_recursion()",
- "\tans =",
- sum_of_digits_recursion(small_num),
- "\ttime =",
- timeit("z.sum_of_digits_recursion(z.small_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> sum_of_digits_compact()",
- "\tans =",
- sum_of_digits_compact(small_num),
- "\ttime =",
- timeit("z.sum_of_digits_compact(z.small_num)", setup="import __main__ as z"),
- "seconds",
- )
+ from collections.abc import Callable
+ from timeit import timeit
- print("\nFor medium_num = ", medium_num, ":")
- print(
- "> sum_of_digits()",
- "\t\tans =",
- sum_of_digits(medium_num),
- "\ttime =",
- timeit("z.sum_of_digits(z.medium_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> sum_of_digits_recursion()",
- "\tans =",
- sum_of_digits_recursion(medium_num),
- "\ttime =",
- timeit("z.sum_of_digits_recursion(z.medium_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> sum_of_digits_compact()",
- "\tans =",
- sum_of_digits_compact(medium_num),
- "\ttime =",
- timeit("z.sum_of_digits_compact(z.medium_num)", setup="import __main__ as z"),
- "seconds",
- )
+ def benchmark_a_function(func: Callable, value: int) -> None:
+ call = f"{func.__name__}({value})"
+ timing = timeit(f"__main__.{call}", setup="import __main__")
+ print(f"{call:56} = {func(value)} -- {timing:.4f} seconds")
- print("\nFor large_num = ", large_num, ":")
- print(
- "> sum_of_digits()",
- "\t\tans =",
- sum_of_digits(large_num),
- "\ttime =",
- timeit("z.sum_of_digits(z.large_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> sum_of_digits_recursion()",
- "\tans =",
- sum_of_digits_recursion(large_num),
- "\ttime =",
- timeit("z.sum_of_digits_recursion(z.large_num)", setup="import __main__ as z"),
- "seconds",
- )
- print(
- "> sum_of_digits_compact()",
- "\tans =",
- sum_of_digits_compact(large_num),
- "\ttime =",
- timeit("z.sum_of_digits_compact(z.large_num)", setup="import __main__ as z"),
- "seconds",
- )
+ for value in (262144, 1125899906842624, 1267650600228229401496703205376):
+ for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
+ benchmark_a_function(func, value)
+ print()
if __name__ == "__main__":
- small_num = 262144
- medium_num = 1125899906842624
- large_num = 1267650600228229401496703205376
- benchmark()
import doctest
doctest.testmod()
+ benchmark()
diff --git a/maths/sum_of_geometric_progression.py b/maths/sum_of_geometric_progression.py
index f29dd8005..9079f35af 100644
--- a/maths/sum_of_geometric_progression.py
+++ b/maths/sum_of_geometric_progression.py
@@ -25,4 +25,4 @@ def sum_of_geometric_progression(
return num_of_terms * first_term
# Formula for finding sum of n terms of a GeometricProgression
- return (first_term / (1 - common_ratio)) * (1 - common_ratio ** num_of_terms)
+ return (first_term / (1 - common_ratio)) * (1 - common_ratio**num_of_terms)
diff --git a/maths/sum_of_harmonic_series.py b/maths/sum_of_harmonic_series.py
new file mode 100644
index 000000000..9e0d6b19b
--- /dev/null
+++ b/maths/sum_of_harmonic_series.py
@@ -0,0 +1,29 @@
+def sum_of_harmonic_progression(
+ first_term: float, common_difference: float, number_of_terms: int
+) -> float:
+ """
+ https://en.wikipedia.org/wiki/Harmonic_progression_(mathematics)
+
+ Find the sum of n terms in an harmonic progression. The calculation starts with the
+ first_term and loops adding the common difference of Arithmetic Progression by which
+ the given Harmonic Progression is linked.
+
+ >>> sum_of_harmonic_progression(1 / 2, 2, 2)
+ 0.75
+ >>> sum_of_harmonic_progression(1 / 5, 5, 5)
+ 0.45666666666666667
+ """
+ arithmetic_progression = [1 / first_term]
+ first_term = 1 / first_term
+ for _ in range(number_of_terms - 1):
+ first_term += common_difference
+ arithmetic_progression.append(first_term)
+ harmonic_series = [1 / step for step in arithmetic_progression]
+ return sum(harmonic_series)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ print(sum_of_harmonic_progression(1 / 2, 2, 2))
diff --git a/maths/sumset.py b/maths/sumset.py
new file mode 100644
index 000000000..fa18f9e24
--- /dev/null
+++ b/maths/sumset.py
@@ -0,0 +1,37 @@
+"""
+
+Calculates the SumSet of two sets of numbers (A and B)
+
+Source:
+ https://en.wikipedia.org/wiki/Sumset
+
+"""
+
+
+def sumset(set_a: set, set_b: set) -> set:
+ """
+ :param first set: a set of numbers
+ :param second set: a set of numbers
+ :return: the nth number in Sylvester's sequence
+
+ >>> sumset({1, 2, 3}, {4, 5, 6})
+ {5, 6, 7, 8, 9}
+
+ >>> sumset({1, 2, 3}, {4, 5, 6, 7})
+ {5, 6, 7, 8, 9, 10}
+
+ >>> sumset({1, 2, 3, 4}, 3)
+ Traceback (most recent call last):
+ ...
+ AssertionError: The input value of [set_b=3] is not a set
+ """
+ assert isinstance(set_a, set), f"The input value of [set_a={set_a}] is not a set"
+ assert isinstance(set_b, set), f"The input value of [set_b={set_b}] is not a set"
+
+ return {a + b for a in set_a for b in set_b}
+
+
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
diff --git a/maths/sylvester_sequence.py b/maths/sylvester_sequence.py
new file mode 100644
index 000000000..607424c6a
--- /dev/null
+++ b/maths/sylvester_sequence.py
@@ -0,0 +1,44 @@
+"""
+
+Calculates the nth number in Sylvester's sequence
+
+Source:
+ https://en.wikipedia.org/wiki/Sylvester%27s_sequence
+
+"""
+
+
+def sylvester(number: int) -> int:
+ """
+ :param number: nth number to calculate in the sequence
+ :return: the nth number in Sylvester's sequence
+
+ >>> sylvester(8)
+ 113423713055421844361000443
+
+ >>> sylvester(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: The input value of [n=-1] has to be > 0
+
+ >>> sylvester(8.0)
+ Traceback (most recent call last):
+ ...
+ AssertionError: The input value of [n=8.0] is not an integer
+ """
+ assert isinstance(number, int), f"The input value of [n={number}] is not an integer"
+
+ if number == 1:
+ return 2
+ elif number < 1:
+ msg = f"The input value of [n={number}] has to be > 0"
+ raise ValueError(msg)
+ else:
+ num = sylvester(number - 1)
+ lower = num - 1
+ upper = num
+ return lower * upper + 1
+
+
+if __name__ == "__main__":
+ print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
diff --git a/maths/tanh.py b/maths/tanh.py
new file mode 100644
index 000000000..ddab3e1ab
--- /dev/null
+++ b/maths/tanh.py
@@ -0,0 +1,42 @@
+"""
+This script demonstrates the implementation of the tangent hyperbolic
+or tanh function.
+
+The function takes a vector of K real numbers as input and
+then (e^x - e^(-x))/(e^x + e^(-x)). After through tanh, the
+element of the vector mostly -1 between 1.
+
+Script inspired from its corresponding Wikipedia article
+https://en.wikipedia.org/wiki/Activation_function
+"""
+import numpy as np
+
+
+def tangent_hyperbolic(vector: np.array) -> np.array:
+ """
+ Implements the tanh function
+
+ Parameters:
+ vector: np.array
+
+ Returns:
+ tanh (np.array): The input numpy array after applying tanh.
+
+ mathematically (e^x - e^(-x))/(e^x + e^(-x)) can be written as (2/(1+e^(-2x))-1
+
+ Examples:
+ >>> tangent_hyperbolic(np.array([1,5,6,-0.67]))
+ array([ 0.76159416, 0.9999092 , 0.99998771, -0.58497988])
+
+ >>> tangent_hyperbolic(np.array([8,10,2,-0.98,13]))
+ array([ 0.99999977, 1. , 0.96402758, -0.7530659 , 1. ])
+
+ """
+
+ return (2 / (1 + np.exp(-2 * vector))) - 1
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/test_prime_check.py b/maths/test_prime_check.py
index b6389684a..3ea3b2f1f 100644
--- a/maths/test_prime_check.py
+++ b/maths/test_prime_check.py
@@ -1,6 +1,6 @@
"""
Minimalist file that allows pytest to find and run the Test unittest. For details, see:
-http://doc.pytest.org/en/latest/goodpractices.html#conventions-for-python-test-discovery
+https://doc.pytest.org/en/latest/goodpractices.html#conventions-for-python-test-discovery
"""
from .prime_check import Test
diff --git a/maths/triplet_sum.py b/maths/triplet_sum.py
index 22fab17d3..af77ed145 100644
--- a/maths/triplet_sum.py
+++ b/maths/triplet_sum.py
@@ -19,7 +19,7 @@ def make_dataset() -> tuple[list[int], int]:
dataset = make_dataset()
-def triplet_sum1(arr: list[int], target: int) -> tuple[int, int, int]:
+def triplet_sum1(arr: list[int], target: int) -> tuple[int, ...]:
"""
Returns a triplet in the array with sum equal to target,
else (0, 0, 0).
diff --git a/maths/twin_prime.py b/maths/twin_prime.py
new file mode 100644
index 000000000..912b10b36
--- /dev/null
+++ b/maths/twin_prime.py
@@ -0,0 +1,46 @@
+"""
+== Twin Prime ==
+A number n+2 is said to be a Twin prime of number n if
+both n and n+2 are prime.
+
+Examples of Twin pairs: (3, 5), (5, 7), (11, 13), (17, 19), (29, 31), (41, 43), ...
+https://en.wikipedia.org/wiki/Twin_prime
+"""
+
+# Author : Akshay Dubey (https://github.com/itsAkshayDubey)
+from maths.prime_check import is_prime
+
+
+def twin_prime(number: int) -> int:
+ """
+ # doctest: +NORMALIZE_WHITESPACE
+ This functions takes an integer number as input.
+ returns n+2 if n and n+2 are prime numbers and -1 otherwise.
+ >>> twin_prime(3)
+ 5
+ >>> twin_prime(4)
+ -1
+ >>> twin_prime(5)
+ 7
+ >>> twin_prime(17)
+ 19
+ >>> twin_prime(0)
+ -1
+ >>> twin_prime(6.0)
+ Traceback (most recent call last):
+ ...
+ TypeError: Input value of [number=6.0] must be an integer
+ """
+ if not isinstance(number, int):
+ msg = f"Input value of [number={number}] must be an integer"
+ raise TypeError(msg)
+ if is_prime(number) and is_prime(number + 2):
+ return number + 2
+ else:
+ return -1
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/maths/two_pointer.py b/maths/two_pointer.py
index ff234cddc..d0fb0fc9c 100644
--- a/maths/two_pointer.py
+++ b/maths/two_pointer.py
@@ -43,7 +43,6 @@ def two_pointer(nums: list[int], target: int) -> list[int]:
j = len(nums) - 1
while i < j:
-
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
diff --git a/maths/two_sum.py b/maths/two_sum.py
index 5209acbc7..12ad332d6 100644
--- a/maths/two_sum.py
+++ b/maths/two_sum.py
@@ -31,7 +31,7 @@ def two_sum(nums: list[int], target: int) -> list[int]:
>>> two_sum([3 * i for i in range(10)], 19)
[]
"""
- chk_map = {}
+ chk_map: dict[int, int] = {}
for index, val in enumerate(nums):
compl = target - val
if compl in chk_map:
diff --git a/maths/ugly_numbers.py b/maths/ugly_numbers.py
index 4451a68cd..81bd928c6 100644
--- a/maths/ugly_numbers.py
+++ b/maths/ugly_numbers.py
@@ -32,7 +32,7 @@ def ugly_numbers(n: int) -> int:
next_3 = ugly_nums[i3] * 3
next_5 = ugly_nums[i5] * 5
- for i in range(1, n):
+ for _ in range(1, n):
next_num = min(next_2, next_3, next_5)
ugly_nums.append(next_num)
if next_num == next_2:
diff --git a/maths/volume.py b/maths/volume.py
index 41d2331db..1da4584c8 100644
--- a/maths/volume.py
+++ b/maths/volume.py
@@ -1,62 +1,237 @@
"""
-Find Volumes of Various Shapes.
+Find the volume of various shapes.
+* https://en.wikipedia.org/wiki/Volume
+* https://en.wikipedia.org/wiki/Spherical_cap
+"""
+from __future__ import annotations
-Wikipedia reference: https://en.wikipedia.org/wiki/Volume
-"""
from math import pi, pow
-from typing import Union
-def vol_cube(side_length: Union[int, float]) -> float:
+def vol_cube(side_length: int | float) -> float:
"""
Calculate the Volume of a Cube.
-
>>> vol_cube(1)
1.0
>>> vol_cube(3)
27.0
+ >>> vol_cube(0)
+ 0.0
+ >>> vol_cube(1.6)
+ 4.096000000000001
+ >>> vol_cube(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_cube() only accepts non-negative values
"""
+ if side_length < 0:
+ raise ValueError("vol_cube() only accepts non-negative values")
return pow(side_length, 3)
+def vol_spherical_cap(height: float, radius: float) -> float:
+ """
+ Calculate the volume of the spherical cap.
+ >>> vol_spherical_cap(1, 2)
+ 5.235987755982988
+ >>> vol_spherical_cap(1.6, 2.6)
+ 16.621119532592402
+ >>> vol_spherical_cap(0, 0)
+ 0.0
+ >>> vol_spherical_cap(-1, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_spherical_cap() only accepts non-negative values
+ >>> vol_spherical_cap(1, -2)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_spherical_cap() only accepts non-negative values
+ """
+ if height < 0 or radius < 0:
+ raise ValueError("vol_spherical_cap() only accepts non-negative values")
+ # Volume is 1/3 pi * height squared * (3 * radius - height)
+ return 1 / 3 * pi * pow(height, 2) * (3 * radius - height)
+
+
+def vol_spheres_intersect(
+ radius_1: float, radius_2: float, centers_distance: float
+) -> float:
+ """
+ Calculate the volume of the intersection of two spheres.
+ The intersection is composed by two spherical caps and therefore its volume is the
+ sum of the volumes of the spherical caps. First, it calculates the heights (h1, h2)
+ of the spherical caps, then the two volumes and it returns the sum.
+ The height formulas are
+ h1 = (radius_1 - radius_2 + centers_distance)
+ * (radius_1 + radius_2 - centers_distance)
+ / (2 * centers_distance)
+ h2 = (radius_2 - radius_1 + centers_distance)
+ * (radius_2 + radius_1 - centers_distance)
+ / (2 * centers_distance)
+ if centers_distance is 0 then it returns the volume of the smallers sphere
+ :return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1)
+ >>> vol_spheres_intersect(2, 2, 1)
+ 21.205750411731103
+ >>> vol_spheres_intersect(2.6, 2.6, 1.6)
+ 40.71504079052372
+ >>> vol_spheres_intersect(0, 0, 0)
+ 0.0
+ >>> vol_spheres_intersect(-2, 2, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_spheres_intersect() only accepts non-negative values
+ >>> vol_spheres_intersect(2, -2, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_spheres_intersect() only accepts non-negative values
+ >>> vol_spheres_intersect(2, 2, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_spheres_intersect() only accepts non-negative values
+ """
+ if radius_1 < 0 or radius_2 < 0 or centers_distance < 0:
+ raise ValueError("vol_spheres_intersect() only accepts non-negative values")
+ if centers_distance == 0:
+ return vol_sphere(min(radius_1, radius_2))
+
+ h1 = (
+ (radius_1 - radius_2 + centers_distance)
+ * (radius_1 + radius_2 - centers_distance)
+ / (2 * centers_distance)
+ )
+ h2 = (
+ (radius_2 - radius_1 + centers_distance)
+ * (radius_2 + radius_1 - centers_distance)
+ / (2 * centers_distance)
+ )
+
+ return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1)
+
+
+def vol_spheres_union(
+ radius_1: float, radius_2: float, centers_distance: float
+) -> float:
+ """
+ Calculate the volume of the union of two spheres that possibly intersect.
+ It is the sum of sphere A and sphere B minus their intersection.
+ First, it calculates the volumes (v1, v2) of the spheres,
+ then the volume of the intersection (i) and it returns the sum v1+v2-i.
+ If centers_distance is 0 then it returns the volume of the larger sphere
+ :return vol_sphere(radius_1) + vol_sphere(radius_2)
+ - vol_spheres_intersect(radius_1, radius_2, centers_distance)
+
+ >>> vol_spheres_union(2, 2, 1)
+ 45.814892864851146
+ >>> vol_spheres_union(1.56, 2.2, 1.4)
+ 48.77802773671288
+ >>> vol_spheres_union(0, 2, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_spheres_union() only accepts non-negative values, non-zero radius
+ >>> vol_spheres_union('1.56', '2.2', '1.4')
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'str' and 'int'
+ >>> vol_spheres_union(1, None, 1)
+ Traceback (most recent call last):
+ ...
+ TypeError: '<=' not supported between instances of 'NoneType' and 'int'
+ """
+
+ if radius_1 <= 0 or radius_2 <= 0 or centers_distance < 0:
+ raise ValueError(
+ "vol_spheres_union() only accepts non-negative values, non-zero radius"
+ )
+
+ if centers_distance == 0:
+ return vol_sphere(max(radius_1, radius_2))
+
+ return (
+ vol_sphere(radius_1)
+ + vol_sphere(radius_2)
+ - vol_spheres_intersect(radius_1, radius_2, centers_distance)
+ )
+
+
def vol_cuboid(width: float, height: float, length: float) -> float:
"""
Calculate the Volume of a Cuboid.
:return multiple of width, length and height
-
>>> vol_cuboid(1, 1, 1)
1.0
>>> vol_cuboid(1, 2, 3)
6.0
+ >>> vol_cuboid(1.6, 2.6, 3.6)
+ 14.976
+ >>> vol_cuboid(0, 0, 0)
+ 0.0
+ >>> vol_cuboid(-1, 2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_cuboid() only accepts non-negative values
+ >>> vol_cuboid(1, -2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_cuboid() only accepts non-negative values
+ >>> vol_cuboid(1, 2, -3)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_cuboid() only accepts non-negative values
"""
+ if width < 0 or height < 0 or length < 0:
+ raise ValueError("vol_cuboid() only accepts non-negative values")
return float(width * height * length)
def vol_cone(area_of_base: float, height: float) -> float:
"""
Calculate the Volume of a Cone.
-
Wikipedia reference: https://en.wikipedia.org/wiki/Cone
:return (1/3) * area_of_base * height
-
>>> vol_cone(10, 3)
10.0
>>> vol_cone(1, 1)
0.3333333333333333
+ >>> vol_cone(1.6, 1.6)
+ 0.8533333333333335
+ >>> vol_cone(0, 0)
+ 0.0
+ >>> vol_cone(-1, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_cone() only accepts non-negative values
+ >>> vol_cone(1, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_cone() only accepts non-negative values
"""
+ if height < 0 or area_of_base < 0:
+ raise ValueError("vol_cone() only accepts non-negative values")
return area_of_base * height / 3.0
def vol_right_circ_cone(radius: float, height: float) -> float:
"""
Calculate the Volume of a Right Circular Cone.
-
Wikipedia reference: https://en.wikipedia.org/wiki/Cone
:return (1/3) * pi * radius^2 * height
-
>>> vol_right_circ_cone(2, 3)
12.566370614359172
+ >>> vol_right_circ_cone(0, 0)
+ 0.0
+ >>> vol_right_circ_cone(1.6, 1.6)
+ 4.289321169701265
+ >>> vol_right_circ_cone(-1, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_right_circ_cone() only accepts non-negative values
+ >>> vol_right_circ_cone(1, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_right_circ_cone() only accepts non-negative values
"""
+ if height < 0 or radius < 0:
+ raise ValueError("vol_right_circ_cone() only accepts non-negative values")
return pi * pow(radius, 2) * height / 3.0
@@ -65,12 +240,25 @@ def vol_prism(area_of_base: float, height: float) -> float:
Calculate the Volume of a Prism.
Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry)
:return V = Bh
-
>>> vol_prism(10, 2)
20.0
>>> vol_prism(11, 1)
11.0
+ >>> vol_prism(1.6, 1.6)
+ 2.5600000000000005
+ >>> vol_prism(0, 0)
+ 0.0
+ >>> vol_prism(-1, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_prism() only accepts non-negative values
+ >>> vol_prism(1, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_prism() only accepts non-negative values
"""
+ if height < 0 or area_of_base < 0:
+ raise ValueError("vol_prism() only accepts non-negative values")
return float(area_of_base * height)
@@ -79,12 +267,25 @@ def vol_pyramid(area_of_base: float, height: float) -> float:
Calculate the Volume of a Pyramid.
Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry)
:return (1/3) * Bh
-
>>> vol_pyramid(10, 3)
10.0
>>> vol_pyramid(1.5, 3)
1.5
+ >>> vol_pyramid(1.6, 1.6)
+ 0.8533333333333335
+ >>> vol_pyramid(0, 0)
+ 0.0
+ >>> vol_pyramid(-1, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_pyramid() only accepts non-negative values
+ >>> vol_pyramid(1, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_pyramid() only accepts non-negative values
"""
+ if height < 0 or area_of_base < 0:
+ raise ValueError("vol_pyramid() only accepts non-negative values")
return area_of_base * height / 3.0
@@ -93,39 +294,201 @@ def vol_sphere(radius: float) -> float:
Calculate the Volume of a Sphere.
Wikipedia reference: https://en.wikipedia.org/wiki/Sphere
:return (4/3) * pi * r^3
-
>>> vol_sphere(5)
523.5987755982989
>>> vol_sphere(1)
4.1887902047863905
+ >>> vol_sphere(1.6)
+ 17.15728467880506
+ >>> vol_sphere(0)
+ 0.0
+ >>> vol_sphere(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_sphere() only accepts non-negative values
"""
+ if radius < 0:
+ raise ValueError("vol_sphere() only accepts non-negative values")
+ # Volume is 4/3 * pi * radius cubed
return 4 / 3 * pi * pow(radius, 3)
+def vol_hemisphere(radius: float) -> float:
+ """Calculate the volume of a hemisphere
+ Wikipedia reference: https://en.wikipedia.org/wiki/Hemisphere
+ Other references: https://www.cuemath.com/geometry/hemisphere
+ :return 2/3 * pi * radius^3
+ >>> vol_hemisphere(1)
+ 2.0943951023931953
+ >>> vol_hemisphere(7)
+ 718.377520120866
+ >>> vol_hemisphere(1.6)
+ 8.57864233940253
+ >>> vol_hemisphere(0)
+ 0.0
+ >>> vol_hemisphere(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_hemisphere() only accepts non-negative values
+ """
+ if radius < 0:
+ raise ValueError("vol_hemisphere() only accepts non-negative values")
+ # Volume is radius cubed * pi * 2/3
+ return pow(radius, 3) * pi * 2 / 3
+
+
def vol_circular_cylinder(radius: float, height: float) -> float:
"""Calculate the Volume of a Circular Cylinder.
Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder
:return pi * radius^2 * height
-
>>> vol_circular_cylinder(1, 1)
3.141592653589793
>>> vol_circular_cylinder(4, 3)
150.79644737231007
+ >>> vol_circular_cylinder(1.6, 1.6)
+ 12.867963509103795
+ >>> vol_circular_cylinder(0, 0)
+ 0.0
+ >>> vol_circular_cylinder(-1, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_circular_cylinder() only accepts non-negative values
+ >>> vol_circular_cylinder(1, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_circular_cylinder() only accepts non-negative values
"""
- return pi * pow(radius, 2) * height
+ if height < 0 or radius < 0:
+ raise ValueError("vol_circular_cylinder() only accepts non-negative values")
+ # Volume is radius squared * height * pi
+ return pow(radius, 2) * height * pi
+
+
+def vol_hollow_circular_cylinder(
+ inner_radius: float, outer_radius: float, height: float
+) -> float:
+ """Calculate the Volume of a Hollow Circular Cylinder.
+ >>> vol_hollow_circular_cylinder(1, 2, 3)
+ 28.274333882308138
+ >>> vol_hollow_circular_cylinder(1.6, 2.6, 3.6)
+ 47.50088092227767
+ >>> vol_hollow_circular_cylinder(-1, 2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_hollow_circular_cylinder() only accepts non-negative values
+ >>> vol_hollow_circular_cylinder(1, -2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_hollow_circular_cylinder() only accepts non-negative values
+ >>> vol_hollow_circular_cylinder(1, 2, -3)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_hollow_circular_cylinder() only accepts non-negative values
+ >>> vol_hollow_circular_cylinder(2, 1, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: outer_radius must be greater than inner_radius
+ >>> vol_hollow_circular_cylinder(0, 0, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: outer_radius must be greater than inner_radius
+ """
+ # Volume - (outer_radius squared - inner_radius squared) * pi * height
+ if inner_radius < 0 or outer_radius < 0 or height < 0:
+ raise ValueError(
+ "vol_hollow_circular_cylinder() only accepts non-negative values"
+ )
+ if outer_radius <= inner_radius:
+ raise ValueError("outer_radius must be greater than inner_radius")
+ return pi * (pow(outer_radius, 2) - pow(inner_radius, 2)) * height
+
+
+def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> float:
+ """Calculate the Volume of a Conical Frustum.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Frustum
+
+ >>> vol_conical_frustum(45, 7, 28)
+ 48490.482608158454
+ >>> vol_conical_frustum(1, 1, 2)
+ 7.330382858376184
+ >>> vol_conical_frustum(1.6, 2.6, 3.6)
+ 48.7240076620753
+ >>> vol_conical_frustum(0, 0, 0)
+ 0.0
+ >>> vol_conical_frustum(-2, 2, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_conical_frustum() only accepts non-negative values
+ >>> vol_conical_frustum(2, -2, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_conical_frustum() only accepts non-negative values
+ >>> vol_conical_frustum(2, 2, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_conical_frustum() only accepts non-negative values
+ """
+ # Volume is 1/3 * pi * height *
+ # (radius_1 squared + radius_2 squared + radius_1 * radius_2)
+ if radius_1 < 0 or radius_2 < 0 or height < 0:
+ raise ValueError("vol_conical_frustum() only accepts non-negative values")
+ return (
+ 1
+ / 3
+ * pi
+ * height
+ * (pow(radius_1, 2) + pow(radius_2, 2) + radius_1 * radius_2)
+ )
+
+
+def vol_torus(torus_radius: float, tube_radius: float) -> float:
+ """Calculate the Volume of a Torus.
+ Wikipedia reference: https://en.wikipedia.org/wiki/Torus
+ :return 2pi^2 * torus_radius * tube_radius^2
+ >>> vol_torus(1, 1)
+ 19.739208802178716
+ >>> vol_torus(4, 3)
+ 710.6115168784338
+ >>> vol_torus(3, 4)
+ 947.4820225045784
+ >>> vol_torus(1.6, 1.6)
+ 80.85179925372404
+ >>> vol_torus(0, 0)
+ 0.0
+ >>> vol_torus(-1, 1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_torus() only accepts non-negative values
+ >>> vol_torus(1, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: vol_torus() only accepts non-negative values
+ """
+ if torus_radius < 0 or tube_radius < 0:
+ raise ValueError("vol_torus() only accepts non-negative values")
+ return 2 * pow(pi, 2) * torus_radius * pow(tube_radius, 2)
def main():
"""Print the Results of Various Volume Calculations."""
print("Volumes:")
- print("Cube: " + str(vol_cube(2))) # = 8
- print("Cuboid: " + str(vol_cuboid(2, 2, 2))) # = 8
- print("Cone: " + str(vol_cone(2, 2))) # ~= 1.33
- print("Right Circular Cone: " + str(vol_right_circ_cone(2, 2))) # ~= 8.38
- print("Prism: " + str(vol_prism(2, 2))) # = 4
- print("Pyramid: " + str(vol_pyramid(2, 2))) # ~= 1.33
- print("Sphere: " + str(vol_sphere(2))) # ~= 33.5
- print("Circular Cylinder: " + str(vol_circular_cylinder(2, 2))) # ~= 25.1
+ print(f"Cube: {vol_cube(2) = }") # = 8
+ print(f"Cuboid: {vol_cuboid(2, 2, 2) = }") # = 8
+ print(f"Cone: {vol_cone(2, 2) = }") # ~= 1.33
+ print(f"Right Circular Cone: {vol_right_circ_cone(2, 2) = }") # ~= 8.38
+ print(f"Prism: {vol_prism(2, 2) = }") # = 4
+ print(f"Pyramid: {vol_pyramid(2, 2) = }") # ~= 1.33
+ print(f"Sphere: {vol_sphere(2) = }") # ~= 33.5
+ print(f"Hemisphere: {vol_hemisphere(2) = }") # ~= 16.75
+ print(f"Circular Cylinder: {vol_circular_cylinder(2, 2) = }") # ~= 25.1
+ print(f"Torus: {vol_torus(2, 2) = }") # ~= 157.9
+ print(f"Conical Frustum: {vol_conical_frustum(2, 2, 4) = }") # ~= 58.6
+ print(f"Spherical cap: {vol_spherical_cap(1, 2) = }") # ~= 5.24
+ print(f"Spheres intersetion: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21
+ print(f"Spheres union: {vol_spheres_union(2, 2, 1) = }") # ~= 45.81
+ print(
+ f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }"
+ ) # ~= 28.3
if __name__ == "__main__":
diff --git a/maths/weird_number.py b/maths/weird_number.py
new file mode 100644
index 000000000..2834a9fee
--- /dev/null
+++ b/maths/weird_number.py
@@ -0,0 +1,100 @@
+"""
+https://en.wikipedia.org/wiki/Weird_number
+
+Fun fact: The set of weird numbers has positive asymptotic density.
+"""
+from math import sqrt
+
+
+def factors(number: int) -> list[int]:
+ """
+ >>> factors(12)
+ [1, 2, 3, 4, 6]
+ >>> factors(1)
+ [1]
+ >>> factors(100)
+ [1, 2, 4, 5, 10, 20, 25, 50]
+
+ # >>> factors(-12)
+ # [1, 2, 3, 4, 6]
+ """
+
+ values = [1]
+ for i in range(2, int(sqrt(number)) + 1, 1):
+ if number % i == 0:
+ values.append(i)
+ if int(number // i) != i:
+ values.append(int(number // i))
+ return sorted(values)
+
+
+def abundant(n: int) -> bool:
+ """
+ >>> abundant(0)
+ True
+ >>> abundant(1)
+ False
+ >>> abundant(12)
+ True
+ >>> abundant(13)
+ False
+ >>> abundant(20)
+ True
+
+ # >>> abundant(-12)
+ # True
+ """
+ return sum(factors(n)) > n
+
+
+def semi_perfect(number: int) -> bool:
+ """
+ >>> semi_perfect(0)
+ True
+ >>> semi_perfect(1)
+ True
+ >>> semi_perfect(12)
+ True
+ >>> semi_perfect(13)
+ False
+
+ # >>> semi_perfect(-12)
+ # True
+ """
+ values = factors(number)
+ r = len(values)
+ subset = [[0 for i in range(number + 1)] for j in range(r + 1)]
+ for i in range(r + 1):
+ subset[i][0] = True
+
+ for i in range(1, number + 1):
+ subset[0][i] = False
+
+ for i in range(1, r + 1):
+ for j in range(1, number + 1):
+ if j < values[i - 1]:
+ subset[i][j] = subset[i - 1][j]
+ else:
+ subset[i][j] = subset[i - 1][j] or subset[i - 1][j - values[i - 1]]
+
+ return subset[r][number] != 0
+
+
+def weird(number: int) -> bool:
+ """
+ >>> weird(0)
+ False
+ >>> weird(70)
+ True
+ >>> weird(77)
+ False
+ """
+ return abundant(number) and not semi_perfect(number)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod(verbose=True)
+ for number in (69, 70, 71):
+ print(f"{number} is {'' if weird(number) else 'not '}weird.")
diff --git a/maths/zellers_congruence.py b/maths/zellers_congruence.py
index 2d4a22a0a..483fb000f 100644
--- a/maths/zellers_congruence.py
+++ b/maths/zellers_congruence.py
@@ -3,7 +3,6 @@ import datetime
def zeller(date_input: str) -> str:
-
"""
Zellers Congruence Algorithm
Find the day of the week for nearly any Gregorian or Julian calendar date
@@ -14,11 +13,11 @@ def zeller(date_input: str) -> str:
Validate out of range month
>>> zeller('13-31-2010')
Traceback (most recent call last):
- ...
+ ...
ValueError: Month must be between 1 - 12
>>> zeller('.2-31-2010')
Traceback (most recent call last):
- ...
+ ...
ValueError: invalid literal for int() with base 10: '.2'
Validate out of range date:
diff --git a/matrix/binary_search_matrix.py b/matrix/binary_search_matrix.py
new file mode 100644
index 000000000..6f203b7a3
--- /dev/null
+++ b/matrix/binary_search_matrix.py
@@ -0,0 +1,57 @@
+def binary_search(array: list, lower_bound: int, upper_bound: int, value: int) -> int:
+ """
+ This function carries out Binary search on a 1d array and
+ return -1 if it do not exist
+ array: A 1d sorted array
+ value : the value meant to be searched
+ >>> matrix = [1, 4, 7, 11, 15]
+ >>> binary_search(matrix, 0, len(matrix) - 1, 1)
+ 0
+ >>> binary_search(matrix, 0, len(matrix) - 1, 23)
+ -1
+ """
+
+ r = int((lower_bound + upper_bound) // 2)
+ if array[r] == value:
+ return r
+ if lower_bound >= upper_bound:
+ return -1
+ if array[r] < value:
+ return binary_search(array, r + 1, upper_bound, value)
+ else:
+ return binary_search(array, lower_bound, r - 1, value)
+
+
+def mat_bin_search(value: int, matrix: list) -> list:
+ """
+ This function loops over a 2d matrix and calls binarySearch on
+ the selected 1d array and returns [-1, -1] is it do not exist
+ value : value meant to be searched
+ matrix = a sorted 2d matrix
+ >>> matrix = [[1, 4, 7, 11, 15],
+ ... [2, 5, 8, 12, 19],
+ ... [3, 6, 9, 16, 22],
+ ... [10, 13, 14, 17, 24],
+ ... [18, 21, 23, 26, 30]]
+ >>> target = 1
+ >>> mat_bin_search(target, matrix)
+ [0, 0]
+ >>> target = 34
+ >>> mat_bin_search(target, matrix)
+ [-1, -1]
+ """
+ index = 0
+ if matrix[index][0] == value:
+ return [index, 0]
+ while index < len(matrix) and matrix[index][0] < value:
+ r = binary_search(matrix[index], 0, len(matrix[index]) - 1, value)
+ if r != -1:
+ return [index, r]
+ index += 1
+ return [-1, -1]
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/matrix/count_islands_in_matrix.py b/matrix/count_islands_in_matrix.py
index ad9c67fb8..64c595e84 100644
--- a/matrix/count_islands_in_matrix.py
+++ b/matrix/count_islands_in_matrix.py
@@ -3,13 +3,13 @@
# connections.
-class matrix: # Public class to implement a graph
- def __init__(self, row: int, col: int, graph: list):
+class Matrix: # Public class to implement a graph
+ def __init__(self, row: int, col: int, graph: list[list[bool]]) -> None:
self.ROW = row
self.COL = col
self.graph = graph
- def is_safe(self, i, j, visited) -> bool:
+ def is_safe(self, i: int, j: int, visited: list[list[bool]]) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
@@ -17,13 +17,14 @@ class matrix: # Public class to implement a graph
and self.graph[i][j]
)
- def diffs(self, i, j, visited): # Checking all 8 elements surrounding nth element
- rowNbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
- colNbr = [-1, 0, 1, -1, 1, -1, 0, 1]
+ def diffs(self, i: int, j: int, visited: list[list[bool]]) -> None:
+ # Checking all 8 elements surrounding nth element
+ row_nbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
+ col_nbr = [-1, 0, 1, -1, 1, -1, 0, 1]
visited[i][j] = True # Make those cells visited
for k in range(8):
- if self.is_safe(i + rowNbr[k], j + colNbr[k], visited):
- self.diffs(i + rowNbr[k], j + colNbr[k], visited)
+ if self.is_safe(i + row_nbr[k], j + col_nbr[k], visited):
+ self.diffs(i + row_nbr[k], j + col_nbr[k], visited)
def count_islands(self) -> int: # And finally, count all islands.
visited = [[False for j in range(self.COL)] for i in range(self.ROW)]
diff --git a/matrix/count_negative_numbers_in_sorted_matrix.py b/matrix/count_negative_numbers_in_sorted_matrix.py
new file mode 100644
index 000000000..2799ff3b4
--- /dev/null
+++ b/matrix/count_negative_numbers_in_sorted_matrix.py
@@ -0,0 +1,151 @@
+"""
+Given an matrix of numbers in which all rows and all columns are sorted in decreasing
+order, return the number of negative numbers in grid.
+
+Reference: https://leetcode.com/problems/count-negative-numbers-in-a-sorted-matrix
+"""
+
+
+def generate_large_matrix() -> list[list[int]]:
+ """
+ >>> generate_large_matrix() # doctest: +ELLIPSIS
+ [[1000, ..., -999], [999, ..., -1001], ..., [2, ..., -1998]]
+ """
+ return [list(range(1000 - i, -1000 - i, -1)) for i in range(1000)]
+
+
+grid = generate_large_matrix()
+test_grids = (
+ [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
+ [[3, 2], [1, 0]],
+ [[7, 7, 6]],
+ [[7, 7, 6], [-1, -2, -3]],
+ grid,
+)
+
+
+def validate_grid(grid: list[list[int]]) -> None:
+ """
+ Validate that the rows and columns of the grid is sorted in decreasing order.
+ >>> for grid in test_grids:
+ ... validate_grid(grid)
+ """
+ assert all(row == sorted(row, reverse=True) for row in grid)
+ assert all(list(col) == sorted(col, reverse=True) for col in zip(*grid))
+
+
+def find_negative_index(array: list[int]) -> int:
+ """
+ Find the smallest negative index
+
+ >>> find_negative_index([0,0,0,0])
+ 4
+ >>> find_negative_index([4,3,2,-1])
+ 3
+ >>> find_negative_index([1,0,-1,-10])
+ 2
+ >>> find_negative_index([0,0,0,-1])
+ 3
+ >>> find_negative_index([11,8,7,-3,-5,-9])
+ 3
+ >>> find_negative_index([-1,-1,-2,-3])
+ 0
+ >>> find_negative_index([5,1,0])
+ 3
+ >>> find_negative_index([-5,-5,-5])
+ 0
+ >>> find_negative_index([0])
+ 1
+ >>> find_negative_index([])
+ 0
+ """
+ left = 0
+ right = len(array) - 1
+
+ # Edge cases such as no values or all numbers are negative.
+ if not array or array[0] < 0:
+ return 0
+
+ while right + 1 > left:
+ mid = (left + right) // 2
+ num = array[mid]
+
+ # Num must be negative and the index must be greater than or equal to 0.
+ if num < 0 and array[mid - 1] >= 0:
+ return mid
+
+ if num >= 0:
+ left = mid + 1
+ else:
+ right = mid - 1
+ # No negative numbers so return the last index of the array + 1 which is the length.
+ return len(array)
+
+
+def count_negatives_binary_search(grid: list[list[int]]) -> int:
+ """
+ An O(m logn) solution that uses binary search in order to find the boundary between
+ positive and negative numbers
+
+ >>> [count_negatives_binary_search(grid) for grid in test_grids]
+ [8, 0, 0, 3, 1498500]
+ """
+ total = 0
+ bound = len(grid[0])
+
+ for i in range(len(grid)):
+ bound = find_negative_index(grid[i][:bound])
+ total += bound
+ return (len(grid) * len(grid[0])) - total
+
+
+def count_negatives_brute_force(grid: list[list[int]]) -> int:
+ """
+ This solution is O(n^2) because it iterates through every column and row.
+
+ >>> [count_negatives_brute_force(grid) for grid in test_grids]
+ [8, 0, 0, 3, 1498500]
+ """
+ return len([number for row in grid for number in row if number < 0])
+
+
+def count_negatives_brute_force_with_break(grid: list[list[int]]) -> int:
+ """
+ Similar to the brute force solution above but uses break in order to reduce the
+ number of iterations.
+
+ >>> [count_negatives_brute_force_with_break(grid) for grid in test_grids]
+ [8, 0, 0, 3, 1498500]
+ """
+ total = 0
+ for row in grid:
+ for i, number in enumerate(row):
+ if number < 0:
+ total += len(row) - i
+ break
+ return total
+
+
+def benchmark() -> None:
+ """Benchmark our functions next to each other"""
+ from timeit import timeit
+
+ print("Running benchmarks")
+ setup = (
+ "from __main__ import count_negatives_binary_search, "
+ "count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
+ )
+ for func in (
+ "count_negatives_binary_search", # took 0.7727 seconds
+ "count_negatives_brute_force_with_break", # took 4.6505 seconds
+ "count_negatives_brute_force", # took 12.8160 seconds
+ ):
+ time = timeit(f"{func}(grid=grid)", setup=setup, number=500)
+ print(f"{func}() took {time:0.4f} seconds")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ benchmark()
diff --git a/matrix/count_paths.py b/matrix/count_paths.py
new file mode 100644
index 000000000..4861ad5fd
--- /dev/null
+++ b/matrix/count_paths.py
@@ -0,0 +1,75 @@
+"""
+Given a grid, where you start from the top left position [0, 0],
+you want to find how many paths you can take to get to the bottom right position.
+
+start here -> 0 0 0 0
+ 1 1 0 0
+ 0 0 0 1
+ 0 1 0 0 <- finish here
+how many 'distinct' paths can you take to get to the finish?
+Using a recursive depth-first search algorithm below, you are able to
+find the number of distinct unique paths (count).
+
+'*' will demonstrate a path
+In the example above, there are two distinct paths:
+1. 2.
+ * * * 0 * * * *
+ 1 1 * 0 1 1 * *
+ 0 0 * 1 0 0 * 1
+ 0 1 * * 0 1 * *
+"""
+
+
+def depth_first_search(grid: list[list[int]], row: int, col: int, visit: set) -> int:
+ """
+ Recursive Backtracking Depth First Search Algorithm
+
+ Starting from top left of a matrix, count the number of
+ paths that can reach the bottom right of a matrix.
+ 1 represents a block (inaccessible)
+ 0 represents a valid space (accessible)
+
+ 0 0 0 0
+ 1 1 0 0
+ 0 0 0 1
+ 0 1 0 0
+ >>> grid = [[0, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0]]
+ >>> depth_first_search(grid, 0, 0, set())
+ 2
+
+ 0 0 0 0 0
+ 0 1 1 1 0
+ 0 1 1 1 0
+ 0 0 0 0 0
+ >>> grid = [[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]
+ >>> depth_first_search(grid, 0, 0, set())
+ 2
+ """
+ row_length, col_length = len(grid), len(grid[0])
+ if (
+ min(row, col) < 0
+ or row == row_length
+ or col == col_length
+ or (row, col) in visit
+ or grid[row][col] == 1
+ ):
+ return 0
+ if row == row_length - 1 and col == col_length - 1:
+ return 1
+
+ visit.add((row, col))
+
+ count = 0
+ count += depth_first_search(grid, row + 1, col, visit)
+ count += depth_first_search(grid, row - 1, col, visit)
+ count += depth_first_search(grid, row, col + 1, visit)
+ count += depth_first_search(grid, row, col - 1, visit)
+
+ visit.remove((row, col))
+ return count
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/matrix/cramers_rule_2x2.py b/matrix/cramers_rule_2x2.py
new file mode 100644
index 000000000..4f52dbe64
--- /dev/null
+++ b/matrix/cramers_rule_2x2.py
@@ -0,0 +1,84 @@
+# https://www.chilimath.com/lessons/advanced-algebra/cramers-rule-with-two-variables
+# https://en.wikipedia.org/wiki/Cramer%27s_rule
+
+
+def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> tuple[float, float]:
+ """
+ Solves the system of linear equation in 2 variables.
+ :param: equation1: list of 3 numbers
+ :param: equation2: list of 3 numbers
+ :return: String of result
+ input format : [a1, b1, d1], [a2, b2, d2]
+ determinant = [[a1, b1], [a2, b2]]
+ determinant_x = [[d1, b1], [d2, b2]]
+ determinant_y = [[a1, d1], [a2, d2]]
+
+ >>> cramers_rule_2x2([2, 3, 0], [5, 1, 0])
+ (0.0, 0.0)
+ >>> cramers_rule_2x2([0, 4, 50], [2, 0, 26])
+ (13.0, 12.5)
+ >>> cramers_rule_2x2([11, 2, 30], [1, 0, 4])
+ (4.0, -7.0)
+ >>> cramers_rule_2x2([4, 7, 1], [1, 2, 0])
+ (2.0, -1.0)
+
+ >>> cramers_rule_2x2([1, 2, 3], [2, 4, 6])
+ Traceback (most recent call last):
+ ...
+ ValueError: Infinite solutions. (Consistent system)
+ >>> cramers_rule_2x2([1, 2, 3], [2, 4, 7])
+ Traceback (most recent call last):
+ ...
+ ValueError: No solution. (Inconsistent system)
+ >>> cramers_rule_2x2([1, 2, 3], [11, 22])
+ Traceback (most recent call last):
+ ...
+ ValueError: Please enter a valid equation.
+ >>> cramers_rule_2x2([0, 1, 6], [0, 0, 3])
+ Traceback (most recent call last):
+ ...
+ ValueError: No solution. (Inconsistent system)
+ >>> cramers_rule_2x2([0, 0, 6], [0, 0, 3])
+ Traceback (most recent call last):
+ ...
+ ValueError: Both a & b of two equations can't be zero.
+ >>> cramers_rule_2x2([1, 2, 3], [1, 2, 3])
+ Traceback (most recent call last):
+ ...
+ ValueError: Infinite solutions. (Consistent system)
+ >>> cramers_rule_2x2([0, 4, 50], [0, 3, 99])
+ Traceback (most recent call last):
+ ...
+ ValueError: No solution. (Inconsistent system)
+ """
+
+ # Check if the input is valid
+ if not len(equation1) == len(equation2) == 3:
+ raise ValueError("Please enter a valid equation.")
+ if equation1[0] == equation1[1] == equation2[0] == equation2[1] == 0:
+ raise ValueError("Both a & b of two equations can't be zero.")
+
+ # Extract the coefficients
+ a1, b1, c1 = equation1
+ a2, b2, c2 = equation2
+
+ # Calculate the determinants of the matrices
+ determinant = a1 * b2 - a2 * b1
+ determinant_x = c1 * b2 - c2 * b1
+ determinant_y = a1 * c2 - a2 * c1
+
+ # Check if the system of linear equations has a solution (using Cramer's rule)
+ if determinant == 0:
+ if determinant_x == determinant_y == 0:
+ raise ValueError("Infinite solutions. (Consistent system)")
+ else:
+ raise ValueError("No solution. (Inconsistent system)")
+ else:
+ if determinant_x == determinant_y == 0:
+ # Trivial solution (Inconsistent system)
+ return (0.0, 0.0)
+ else:
+ x = determinant_x / determinant
+ y = determinant_y / determinant
+ # Non-Trivial Solution (Consistent system)
+ return (x, y)
diff --git a/matrix/inverse_of_matrix.py b/matrix/inverse_of_matrix.py
index 9deca6c3c..e53d90df8 100644
--- a/matrix/inverse_of_matrix.py
+++ b/matrix/inverse_of_matrix.py
@@ -2,22 +2,25 @@ from __future__ import annotations
from decimal import Decimal
+from numpy import array
+
def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]:
"""
A matrix multiplied with its inverse gives the identity matrix.
- This function finds the inverse of a 2x2 matrix.
+ This function finds the inverse of a 2x2 and 3x3 matrix.
If the determinant of a matrix is 0, its inverse does not exist.
Sources for fixing inaccurate float arithmetic:
https://stackoverflow.com/questions/6563058/how-do-i-use-accurate-float-arithmetic-in-python
https://docs.python.org/3/library/decimal.html
+ Doctests for 2x2
>>> inverse_of_matrix([[2, 5], [2, 0]])
[[0.0, 0.5], [0.2, -0.2]]
>>> inverse_of_matrix([[2.5, 5], [1, 2]])
Traceback (most recent call last):
- ...
+ ...
ValueError: This matrix has no inverse.
>>> inverse_of_matrix([[12, -16], [-9, 0]])
[[0.0, -0.1111111111111111], [-0.0625, -0.08333333333333333]]
@@ -25,16 +28,128 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]:
[[0.16666666666666666, -0.0625], [-0.3333333333333333, 0.25]]
>>> inverse_of_matrix([[10, 5], [3, 2.5]])
[[0.25, -0.5], [-0.3, 1.0]]
+
+ Doctests for 3x3
+ >>> inverse_of_matrix([[2, 5, 7], [2, 0, 1], [1, 2, 3]])
+ [[2.0, 5.0, -4.0], [1.0, 1.0, -1.0], [-5.0, -12.0, 10.0]]
+ >>> inverse_of_matrix([[1, 2, 2], [1, 2, 2], [3, 2, -1]])
+ Traceback (most recent call last):
+ ...
+ ValueError: This matrix has no inverse.
+
+ >>> inverse_of_matrix([[],[]])
+ Traceback (most recent call last):
+ ...
+ ValueError: Please provide a matrix of size 2x2 or 3x3.
+
+ >>> inverse_of_matrix([[1, 2], [3, 4], [5, 6]])
+ Traceback (most recent call last):
+ ...
+ ValueError: Please provide a matrix of size 2x2 or 3x3.
+
+ >>> inverse_of_matrix([[1, 2, 1], [0,3, 4]])
+ Traceback (most recent call last):
+ ...
+ ValueError: Please provide a matrix of size 2x2 or 3x3.
+
+ >>> inverse_of_matrix([[1, 2, 3], [7, 8, 9], [7, 8, 9]])
+ Traceback (most recent call last):
+ ...
+ ValueError: This matrix has no inverse.
+
+ >>> inverse_of_matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
"""
- D = Decimal # An abbreviation to be conciseness
- # Calculate the determinant of the matrix
- determinant = D(matrix[0][0]) * D(matrix[1][1]) - D(matrix[1][0]) * D(matrix[0][1])
- if determinant == 0:
- raise ValueError("This matrix has no inverse.")
- # Creates a copy of the matrix with swapped positions of the elements
- swapped_matrix = [[0.0, 0.0], [0.0, 0.0]]
- swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0]
- swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1]
- # Calculate the inverse of the matrix
- return [[float(D(n) / determinant) or 0.0 for n in row] for row in swapped_matrix]
+ d = Decimal
+
+ # Check if the provided matrix has 2 rows and 2 columns
+ # since this implementation only works for 2x2 matrices
+ if len(matrix) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
+ # Calculate the determinant of the matrix
+ determinant = float(
+ d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1])
+ )
+ if determinant == 0:
+ raise ValueError("This matrix has no inverse.")
+
+ # Creates a copy of the matrix with swapped positions of the elements
+ swapped_matrix = [[0.0, 0.0], [0.0, 0.0]]
+ swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0]
+ swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1]
+
+ # Calculate the inverse of the matrix
+ return [
+ [(float(d(n)) / determinant) or 0.0 for n in row] for row in swapped_matrix
+ ]
+ elif (
+ len(matrix) == 3
+ and len(matrix[0]) == 3
+ and len(matrix[1]) == 3
+ and len(matrix[2]) == 3
+ ):
+ # Calculate the determinant of the matrix using Sarrus rule
+ determinant = float(
+ (
+ (d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ + (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ + (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
+ )
+ - (
+ (d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ + (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ + (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
+ )
+ )
+ if determinant == 0:
+ raise ValueError("This matrix has no inverse.")
+
+ # Creating cofactor matrix
+ cofactor_matrix = [
+ [d(0.0), d(0.0), d(0.0)],
+ [d(0.0), d(0.0), d(0.0)],
+ [d(0.0), d(0.0), d(0.0)],
+ ]
+ cofactor_matrix[0][0] = (d(matrix[1][1]) * d(matrix[2][2])) - (
+ d(matrix[1][2]) * d(matrix[2][1])
+ )
+ cofactor_matrix[0][1] = -(
+ (d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
+ )
+ cofactor_matrix[0][2] = (d(matrix[1][0]) * d(matrix[2][1])) - (
+ d(matrix[1][1]) * d(matrix[2][0])
+ )
+ cofactor_matrix[1][0] = -(
+ (d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
+ )
+ cofactor_matrix[1][1] = (d(matrix[0][0]) * d(matrix[2][2])) - (
+ d(matrix[0][2]) * d(matrix[2][0])
+ )
+ cofactor_matrix[1][2] = -(
+ (d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
+ )
+ cofactor_matrix[2][0] = (d(matrix[0][1]) * d(matrix[1][2])) - (
+ d(matrix[0][2]) * d(matrix[1][1])
+ )
+ cofactor_matrix[2][1] = -(
+ (d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
+ )
+ cofactor_matrix[2][2] = (d(matrix[0][0]) * d(matrix[1][1])) - (
+ d(matrix[0][1]) * d(matrix[1][0])
+ )
+
+ # Transpose the cofactor matrix (Adjoint matrix)
+ adjoint_matrix = array(cofactor_matrix)
+ for i in range(3):
+ for j in range(3):
+ adjoint_matrix[i][j] = cofactor_matrix[j][i]
+
+ # Inverse of the matrix using the formula (1/determinant) * adjoint matrix
+ inverse_matrix = array(cofactor_matrix)
+ for i in range(3):
+ for j in range(3):
+ inverse_matrix[i][j] /= d(determinant)
+
+ # Calculate the inverse of the matrix
+ return [[float(d(n)) or 0.0 for n in row] for row in inverse_matrix]
+ raise ValueError("Please provide a matrix of size 2x2 or 3x3.")
diff --git a/matrix/largest_square_area_in_matrix.py b/matrix/largest_square_area_in_matrix.py
new file mode 100644
index 000000000..a93369c56
--- /dev/null
+++ b/matrix/largest_square_area_in_matrix.py
@@ -0,0 +1,188 @@
+"""
+Question:
+Given a binary matrix mat of size n * m, find out the maximum size square
+sub-matrix with all 1s.
+
+---
+Example 1:
+
+Input:
+n = 2, m = 2
+mat = [[1, 1],
+ [1, 1]]
+
+Output:
+2
+
+Explanation: The maximum size of the square
+sub-matrix is 2. The matrix itself is the
+maximum sized sub-matrix in this case.
+---
+Example 2
+
+Input:
+n = 2, m = 2
+mat = [[0, 0],
+ [0, 0]]
+Output: 0
+
+Explanation: There is no 1 in the matrix.
+
+
+Approach:
+We initialize another matrix (dp) with the same dimensions
+as the original one initialized with all 0’s.
+
+dp_array(i,j) represents the side length of the maximum square whose
+bottom right corner is the cell with index (i,j) in the original matrix.
+
+Starting from index (0,0), for every 1 found in the original matrix,
+we update the value of the current element as
+
+dp_array(i,j)=dp_array(dp(i−1,j),dp_array(i−1,j−1),dp_array(i,j−1)) + 1.
+"""
+
+
+def largest_square_area_in_matrix_top_down_approch(
+ rows: int, cols: int, mat: list[list[int]]
+) -> int:
+ """
+ Function updates the largest_square_area[0], if recursive call found
+ square with maximum area.
+
+ We aren't using dp_array here, so the time complexity would be exponential.
+
+ >>> largest_square_area_in_matrix_top_down_approch(2, 2, [[1,1], [1,1]])
+ 2
+ >>> largest_square_area_in_matrix_top_down_approch(2, 2, [[0,0], [0,0]])
+ 0
+ """
+
+ def update_area_of_max_square(row: int, col: int) -> int:
+ # BASE CASE
+ if row >= rows or col >= cols:
+ return 0
+
+ right = update_area_of_max_square(row, col + 1)
+ diagonal = update_area_of_max_square(row + 1, col + 1)
+ down = update_area_of_max_square(row + 1, col)
+
+ if mat[row][col]:
+ sub_problem_sol = 1 + min([right, diagonal, down])
+ largest_square_area[0] = max(largest_square_area[0], sub_problem_sol)
+ return sub_problem_sol
+ else:
+ return 0
+
+ largest_square_area = [0]
+ update_area_of_max_square(0, 0)
+ return largest_square_area[0]
+
+
+def largest_square_area_in_matrix_top_down_approch_with_dp(
+ rows: int, cols: int, mat: list[list[int]]
+) -> int:
+ """
+ Function updates the largest_square_area[0], if recursive call found
+ square with maximum area.
+
+ We are using dp_array here, so the time complexity would be O(N^2).
+
+ >>> largest_square_area_in_matrix_top_down_approch_with_dp(2, 2, [[1,1], [1,1]])
+ 2
+ >>> largest_square_area_in_matrix_top_down_approch_with_dp(2, 2, [[0,0], [0,0]])
+ 0
+ """
+
+ def update_area_of_max_square_using_dp_array(
+ row: int, col: int, dp_array: list[list[int]]
+ ) -> int:
+ if row >= rows or col >= cols:
+ return 0
+ if dp_array[row][col] != -1:
+ return dp_array[row][col]
+
+ right = update_area_of_max_square_using_dp_array(row, col + 1, dp_array)
+ diagonal = update_area_of_max_square_using_dp_array(row + 1, col + 1, dp_array)
+ down = update_area_of_max_square_using_dp_array(row + 1, col, dp_array)
+
+ if mat[row][col]:
+ sub_problem_sol = 1 + min([right, diagonal, down])
+ largest_square_area[0] = max(largest_square_area[0], sub_problem_sol)
+ dp_array[row][col] = sub_problem_sol
+ return sub_problem_sol
+ else:
+ return 0
+
+ largest_square_area = [0]
+ dp_array = [[-1] * cols for _ in range(rows)]
+ update_area_of_max_square_using_dp_array(0, 0, dp_array)
+
+ return largest_square_area[0]
+
+
+def largest_square_area_in_matrix_bottom_up(
+ rows: int, cols: int, mat: list[list[int]]
+) -> int:
+ """
+ Function updates the largest_square_area, using bottom up approach.
+
+ >>> largest_square_area_in_matrix_bottom_up(2, 2, [[1,1], [1,1]])
+ 2
+ >>> largest_square_area_in_matrix_bottom_up(2, 2, [[0,0], [0,0]])
+ 0
+
+ """
+ dp_array = [[0] * (cols + 1) for _ in range(rows + 1)]
+ largest_square_area = 0
+ for row in range(rows - 1, -1, -1):
+ for col in range(cols - 1, -1, -1):
+ right = dp_array[row][col + 1]
+ diagonal = dp_array[row + 1][col + 1]
+ bottom = dp_array[row + 1][col]
+
+ if mat[row][col] == 1:
+ dp_array[row][col] = 1 + min(right, diagonal, bottom)
+ largest_square_area = max(dp_array[row][col], largest_square_area)
+ else:
+ dp_array[row][col] = 0
+
+ return largest_square_area
+
+
+def largest_square_area_in_matrix_bottom_up_space_optimization(
+ rows: int, cols: int, mat: list[list[int]]
+) -> int:
+ """
+ Function updates the largest_square_area, using bottom up
+ approach. with space optimization.
+
+ >>> largest_square_area_in_matrix_bottom_up_space_optimization(2, 2, [[1,1], [1,1]])
+ 2
+ >>> largest_square_area_in_matrix_bottom_up_space_optimization(2, 2, [[0,0], [0,0]])
+ 0
+ """
+ current_row = [0] * (cols + 1)
+ next_row = [0] * (cols + 1)
+ largest_square_area = 0
+ for row in range(rows - 1, -1, -1):
+ for col in range(cols - 1, -1, -1):
+ right = current_row[col + 1]
+ diagonal = next_row[col + 1]
+ bottom = next_row[col]
+
+ if mat[row][col] == 1:
+ current_row[col] = 1 + min(right, diagonal, bottom)
+ largest_square_area = max(current_row[col], largest_square_area)
+ else:
+ current_row[col] = 0
+ next_row = current_row
+
+ return largest_square_area
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+ print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py
index 57a2fc45f..a73e8b92a 100644
--- a/matrix/matrix_class.py
+++ b/matrix/matrix_class.py
@@ -1,358 +1,366 @@
-# An OOP approach to representing and manipulating matrices
-
-
-class Matrix:
- """
- Matrix object generated from a 2D array where each element is an array representing
- a row.
- Rows can contain type int or float.
- Common operations and information available.
- >>> rows = [
- ... [1, 2, 3],
- ... [4, 5, 6],
- ... [7, 8, 9]
- ... ]
- >>> matrix = Matrix(rows)
- >>> print(matrix)
- [[1. 2. 3.]
- [4. 5. 6.]
- [7. 8. 9.]]
-
- Matrix rows and columns are available as 2D arrays
- >>> print(matrix.rows)
- [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
- >>> print(matrix.columns())
- [[1, 4, 7], [2, 5, 8], [3, 6, 9]]
-
- Order is returned as a tuple
- >>> matrix.order
- (3, 3)
-
- Squareness and invertability are represented as bool
- >>> matrix.is_square
- True
- >>> matrix.is_invertable()
- False
-
- Identity, Minors, Cofactors and Adjugate are returned as Matrices. Inverse can be
- a Matrix or Nonetype
- >>> print(matrix.identity())
- [[1. 0. 0.]
- [0. 1. 0.]
- [0. 0. 1.]]
- >>> print(matrix.minors())
- [[-3. -6. -3.]
- [-6. -12. -6.]
- [-3. -6. -3.]]
- >>> print(matrix.cofactors())
- [[-3. 6. -3.]
- [6. -12. 6.]
- [-3. 6. -3.]]
- >>> # won't be apparent due to the nature of the cofactor matrix
- >>> print(matrix.adjugate())
- [[-3. 6. -3.]
- [6. -12. 6.]
- [-3. 6. -3.]]
- >>> print(matrix.inverse())
- None
-
- Determinant is an int, float, or Nonetype
- >>> matrix.determinant()
- 0
-
- Negation, scalar multiplication, addition, subtraction, multiplication and
- exponentiation are available and all return a Matrix
- >>> print(-matrix)
- [[-1. -2. -3.]
- [-4. -5. -6.]
- [-7. -8. -9.]]
- >>> matrix2 = matrix * 3
- >>> print(matrix2)
- [[3. 6. 9.]
- [12. 15. 18.]
- [21. 24. 27.]]
- >>> print(matrix + matrix2)
- [[4. 8. 12.]
- [16. 20. 24.]
- [28. 32. 36.]]
- >>> print(matrix - matrix2)
- [[-2. -4. -6.]
- [-8. -10. -12.]
- [-14. -16. -18.]]
- >>> print(matrix ** 3)
- [[468. 576. 684.]
- [1062. 1305. 1548.]
- [1656. 2034. 2412.]]
-
- Matrices can also be modified
- >>> matrix.add_row([10, 11, 12])
- >>> print(matrix)
- [[1. 2. 3.]
- [4. 5. 6.]
- [7. 8. 9.]
- [10. 11. 12.]]
- >>> matrix2.add_column([8, 16, 32])
- >>> print(matrix2)
- [[3. 6. 9. 8.]
- [12. 15. 18. 16.]
- [21. 24. 27. 32.]]
- >>> print(matrix * matrix2)
- [[90. 108. 126. 136.]
- [198. 243. 288. 304.]
- [306. 378. 450. 472.]
- [414. 513. 612. 640.]]
-
- """
-
- def __init__(self, rows):
- error = TypeError(
- "Matrices must be formed from a list of zero or more lists containing at "
- "least one and the same number of values, each of which must be of type "
- "int or float."
- )
- if len(rows) != 0:
- cols = len(rows[0])
- if cols == 0:
- raise error
- for row in rows:
- if len(row) != cols:
- raise error
- for value in row:
- if not isinstance(value, (int, float)):
- raise error
- self.rows = rows
- else:
- self.rows = []
-
- # MATRIX INFORMATION
- def columns(self):
- return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
-
- @property
- def num_rows(self):
- return len(self.rows)
-
- @property
- def num_columns(self):
- return len(self.rows[0])
-
- @property
- def order(self):
- return (self.num_rows, self.num_columns)
-
- @property
- def is_square(self):
- return self.order[0] == self.order[1]
-
- def identity(self):
- values = [
- [0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
- for row_num in range(self.num_rows)
- ]
- return Matrix(values)
-
- def determinant(self):
- if not self.is_square:
- return None
- if self.order == (0, 0):
- return 1
- if self.order == (1, 1):
- return self.rows[0][0]
- if self.order == (2, 2):
- return (self.rows[0][0] * self.rows[1][1]) - (
- self.rows[0][1] * self.rows[1][0]
- )
- else:
- return sum(
- self.rows[0][column] * self.cofactors().rows[0][column]
- for column in range(self.num_columns)
- )
-
- def is_invertable(self):
- return bool(self.determinant())
-
- def get_minor(self, row, column):
- values = [
- [
- self.rows[other_row][other_column]
- for other_column in range(self.num_columns)
- if other_column != column
- ]
- for other_row in range(self.num_rows)
- if other_row != row
- ]
- return Matrix(values).determinant()
-
- def get_cofactor(self, row, column):
- if (row + column) % 2 == 0:
- return self.get_minor(row, column)
- return -1 * self.get_minor(row, column)
-
- def minors(self):
- return Matrix(
- [
- [self.get_minor(row, column) for column in range(self.num_columns)]
- for row in range(self.num_rows)
- ]
- )
-
- def cofactors(self):
- return Matrix(
- [
- [
- self.minors().rows[row][column]
- if (row + column) % 2 == 0
- else self.minors().rows[row][column] * -1
- for column in range(self.minors().num_columns)
- ]
- for row in range(self.minors().num_rows)
- ]
- )
-
- def adjugate(self):
- values = [
- [self.cofactors().rows[column][row] for column in range(self.num_columns)]
- for row in range(self.num_rows)
- ]
- return Matrix(values)
-
- def inverse(self):
- determinant = self.determinant()
- return None if not determinant else self.adjugate() * (1 / determinant)
-
- def __repr__(self):
- return str(self.rows)
-
- def __str__(self):
- if self.num_rows == 0:
- return "[]"
- if self.num_rows == 1:
- return "[[" + ". ".join(self.rows[0]) + "]]"
- return (
- "["
- + "\n ".join(
- [
- "[" + ". ".join([str(value) for value in row]) + ".]"
- for row in self.rows
- ]
- )
- + "]"
- )
-
- # MATRIX MANIPULATION
- def add_row(self, row, position=None):
- type_error = TypeError("Row must be a list containing all ints and/or floats")
- if not isinstance(row, list):
- raise type_error
- for value in row:
- if not isinstance(value, (int, float)):
- raise type_error
- if len(row) != self.num_columns:
- raise ValueError(
- "Row must be equal in length to the other rows in the matrix"
- )
- if position is None:
- self.rows.append(row)
- else:
- self.rows = self.rows[0:position] + [row] + self.rows[position:]
-
- def add_column(self, column, position=None):
- type_error = TypeError(
- "Column must be a list containing all ints and/or floats"
- )
- if not isinstance(column, list):
- raise type_error
- for value in column:
- if not isinstance(value, (int, float)):
- raise type_error
- if len(column) != self.num_rows:
- raise ValueError(
- "Column must be equal in length to the other columns in the matrix"
- )
- if position is None:
- self.rows = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
- else:
- self.rows = [
- self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
- for i in range(self.num_rows)
- ]
-
- # MATRIX OPERATIONS
- def __eq__(self, other):
- if not isinstance(other, Matrix):
- raise TypeError("A Matrix can only be compared with another Matrix")
- return self.rows == other.rows
-
- def __ne__(self, other):
- return not self == other
-
- def __neg__(self):
- return self * -1
-
- def __add__(self, other):
- if self.order != other.order:
- raise ValueError("Addition requires matrices of the same order")
- return Matrix(
- [
- [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
- for i in range(self.num_rows)
- ]
- )
-
- def __sub__(self, other):
- if self.order != other.order:
- raise ValueError("Subtraction requires matrices of the same order")
- return Matrix(
- [
- [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
- for i in range(self.num_rows)
- ]
- )
-
- def __mul__(self, other):
- if isinstance(other, (int, float)):
- return Matrix([[element * other for element in row] for row in self.rows])
- elif isinstance(other, Matrix):
- if self.num_columns != other.num_rows:
- raise ValueError(
- "The number of columns in the first matrix must "
- "be equal to the number of rows in the second"
- )
- return Matrix(
- [
- [Matrix.dot_product(row, column) for column in other.columns()]
- for row in self.rows
- ]
- )
- else:
- raise TypeError(
- "A Matrix can only be multiplied by an int, float, or another matrix"
- )
-
- def __pow__(self, other):
- if not isinstance(other, int):
- raise TypeError("A Matrix can only be raised to the power of an int")
- if not self.is_square:
- raise ValueError("Only square matrices can be raised to a power")
- if other == 0:
- return self.identity()
- if other < 0:
- if self.is_invertable:
- return self.inverse() ** (-other)
- raise ValueError(
- "Only invertable matrices can be raised to a negative power"
- )
- result = self
- for i in range(other - 1):
- result *= self
- return result
-
- @classmethod
- def dot_product(cls, row, column):
- return sum(row[i] * column[i] for i in range(len(row)))
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()
+# An OOP approach to representing and manipulating matrices
+
+from __future__ import annotations
+
+
+class Matrix:
+ """
+ Matrix object generated from a 2D array where each element is an array representing
+ a row.
+ Rows can contain type int or float.
+ Common operations and information available.
+ >>> rows = [
+ ... [1, 2, 3],
+ ... [4, 5, 6],
+ ... [7, 8, 9]
+ ... ]
+ >>> matrix = Matrix(rows)
+ >>> print(matrix)
+ [[1. 2. 3.]
+ [4. 5. 6.]
+ [7. 8. 9.]]
+
+ Matrix rows and columns are available as 2D arrays
+ >>> matrix.rows
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
+ >>> matrix.columns()
+ [[1, 4, 7], [2, 5, 8], [3, 6, 9]]
+
+ Order is returned as a tuple
+ >>> matrix.order
+ (3, 3)
+
+ Squareness and invertability are represented as bool
+ >>> matrix.is_square
+ True
+ >>> matrix.is_invertable()
+ False
+
+ Identity, Minors, Cofactors and Adjugate are returned as Matrices. Inverse can be
+ a Matrix or Nonetype
+ >>> print(matrix.identity())
+ [[1. 0. 0.]
+ [0. 1. 0.]
+ [0. 0. 1.]]
+ >>> print(matrix.minors())
+ [[-3. -6. -3.]
+ [-6. -12. -6.]
+ [-3. -6. -3.]]
+ >>> print(matrix.cofactors())
+ [[-3. 6. -3.]
+ [6. -12. 6.]
+ [-3. 6. -3.]]
+ >>> # won't be apparent due to the nature of the cofactor matrix
+ >>> print(matrix.adjugate())
+ [[-3. 6. -3.]
+ [6. -12. 6.]
+ [-3. 6. -3.]]
+ >>> matrix.inverse()
+ Traceback (most recent call last):
+ ...
+ TypeError: Only matrices with a non-zero determinant have an inverse
+
+ Determinant is an int, float, or Nonetype
+ >>> matrix.determinant()
+ 0
+
+ Negation, scalar multiplication, addition, subtraction, multiplication and
+ exponentiation are available and all return a Matrix
+ >>> print(-matrix)
+ [[-1. -2. -3.]
+ [-4. -5. -6.]
+ [-7. -8. -9.]]
+ >>> matrix2 = matrix * 3
+ >>> print(matrix2)
+ [[3. 6. 9.]
+ [12. 15. 18.]
+ [21. 24. 27.]]
+ >>> print(matrix + matrix2)
+ [[4. 8. 12.]
+ [16. 20. 24.]
+ [28. 32. 36.]]
+ >>> print(matrix - matrix2)
+ [[-2. -4. -6.]
+ [-8. -10. -12.]
+ [-14. -16. -18.]]
+ >>> print(matrix ** 3)
+ [[468. 576. 684.]
+ [1062. 1305. 1548.]
+ [1656. 2034. 2412.]]
+
+ Matrices can also be modified
+ >>> matrix.add_row([10, 11, 12])
+ >>> print(matrix)
+ [[1. 2. 3.]
+ [4. 5. 6.]
+ [7. 8. 9.]
+ [10. 11. 12.]]
+ >>> matrix2.add_column([8, 16, 32])
+ >>> print(matrix2)
+ [[3. 6. 9. 8.]
+ [12. 15. 18. 16.]
+ [21. 24. 27. 32.]]
+ >>> print(matrix * matrix2)
+ [[90. 108. 126. 136.]
+ [198. 243. 288. 304.]
+ [306. 378. 450. 472.]
+ [414. 513. 612. 640.]]
+ """
+
+ def __init__(self, rows: list[list[int]]):
+ error = TypeError(
+ "Matrices must be formed from a list of zero or more lists containing at "
+ "least one and the same number of values, each of which must be of type "
+ "int or float."
+ )
+ if len(rows) != 0:
+ cols = len(rows[0])
+ if cols == 0:
+ raise error
+ for row in rows:
+ if len(row) != cols:
+ raise error
+ for value in row:
+ if not isinstance(value, (int, float)):
+ raise error
+ self.rows = rows
+ else:
+ self.rows = []
+
+ # MATRIX INFORMATION
+ def columns(self) -> list[list[int]]:
+ return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
+
+ @property
+ def num_rows(self) -> int:
+ return len(self.rows)
+
+ @property
+ def num_columns(self) -> int:
+ return len(self.rows[0])
+
+ @property
+ def order(self) -> tuple[int, int]:
+ return (self.num_rows, self.num_columns)
+
+ @property
+ def is_square(self) -> bool:
+ return self.order[0] == self.order[1]
+
+ def identity(self) -> Matrix:
+ values = [
+ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
+ for row_num in range(self.num_rows)
+ ]
+ return Matrix(values)
+
+ def determinant(self) -> int:
+ if not self.is_square:
+ return 0
+ if self.order == (0, 0):
+ return 1
+ if self.order == (1, 1):
+ return int(self.rows[0][0])
+ if self.order == (2, 2):
+ return int(
+ (self.rows[0][0] * self.rows[1][1])
+ - (self.rows[0][1] * self.rows[1][0])
+ )
+ else:
+ return sum(
+ self.rows[0][column] * self.cofactors().rows[0][column]
+ for column in range(self.num_columns)
+ )
+
+ def is_invertable(self) -> bool:
+ return bool(self.determinant())
+
+ def get_minor(self, row: int, column: int) -> int:
+ values = [
+ [
+ self.rows[other_row][other_column]
+ for other_column in range(self.num_columns)
+ if other_column != column
+ ]
+ for other_row in range(self.num_rows)
+ if other_row != row
+ ]
+ return Matrix(values).determinant()
+
+ def get_cofactor(self, row: int, column: int) -> int:
+ if (row + column) % 2 == 0:
+ return self.get_minor(row, column)
+ return -1 * self.get_minor(row, column)
+
+ def minors(self) -> Matrix:
+ return Matrix(
+ [
+ [self.get_minor(row, column) for column in range(self.num_columns)]
+ for row in range(self.num_rows)
+ ]
+ )
+
+ def cofactors(self) -> Matrix:
+ return Matrix(
+ [
+ [
+ self.minors().rows[row][column]
+ if (row + column) % 2 == 0
+ else self.minors().rows[row][column] * -1
+ for column in range(self.minors().num_columns)
+ ]
+ for row in range(self.minors().num_rows)
+ ]
+ )
+
+ def adjugate(self) -> Matrix:
+ values = [
+ [self.cofactors().rows[column][row] for column in range(self.num_columns)]
+ for row in range(self.num_rows)
+ ]
+ return Matrix(values)
+
+ def inverse(self) -> Matrix:
+ determinant = self.determinant()
+ if not determinant:
+ raise TypeError("Only matrices with a non-zero determinant have an inverse")
+ return self.adjugate() * (1 / determinant)
+
+ def __repr__(self) -> str:
+ return str(self.rows)
+
+ def __str__(self) -> str:
+ if self.num_rows == 0:
+ return "[]"
+ if self.num_rows == 1:
+ return "[[" + ". ".join(str(self.rows[0])) + "]]"
+ return (
+ "["
+ + "\n ".join(
+ [
+ "[" + ". ".join([str(value) for value in row]) + ".]"
+ for row in self.rows
+ ]
+ )
+ + "]"
+ )
+
+ # MATRIX MANIPULATION
+ def add_row(self, row: list[int], position: int | None = None) -> None:
+ type_error = TypeError("Row must be a list containing all ints and/or floats")
+ if not isinstance(row, list):
+ raise type_error
+ for value in row:
+ if not isinstance(value, (int, float)):
+ raise type_error
+ if len(row) != self.num_columns:
+ raise ValueError(
+ "Row must be equal in length to the other rows in the matrix"
+ )
+ if position is None:
+ self.rows.append(row)
+ else:
+ self.rows = self.rows[0:position] + [row] + self.rows[position:]
+
+ def add_column(self, column: list[int], position: int | None = None) -> None:
+ type_error = TypeError(
+ "Column must be a list containing all ints and/or floats"
+ )
+ if not isinstance(column, list):
+ raise type_error
+ for value in column:
+ if not isinstance(value, (int, float)):
+ raise type_error
+ if len(column) != self.num_rows:
+ raise ValueError(
+ "Column must be equal in length to the other columns in the matrix"
+ )
+ if position is None:
+ self.rows = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
+ else:
+ self.rows = [
+ self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
+ for i in range(self.num_rows)
+ ]
+
+ # MATRIX OPERATIONS
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Matrix):
+ return NotImplemented
+ return self.rows == other.rows
+
+ def __ne__(self, other: object) -> bool:
+ return not self == other
+
+ def __neg__(self) -> Matrix:
+ return self * -1
+
+ def __add__(self, other: Matrix) -> Matrix:
+ if self.order != other.order:
+ raise ValueError("Addition requires matrices of the same order")
+ return Matrix(
+ [
+ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
+ for i in range(self.num_rows)
+ ]
+ )
+
+ def __sub__(self, other: Matrix) -> Matrix:
+ if self.order != other.order:
+ raise ValueError("Subtraction requires matrices of the same order")
+ return Matrix(
+ [
+ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
+ for i in range(self.num_rows)
+ ]
+ )
+
+ def __mul__(self, other: Matrix | int | float) -> Matrix:
+ if isinstance(other, (int, float)):
+ return Matrix(
+ [[int(element * other) for element in row] for row in self.rows]
+ )
+ elif isinstance(other, Matrix):
+ if self.num_columns != other.num_rows:
+ raise ValueError(
+ "The number of columns in the first matrix must "
+ "be equal to the number of rows in the second"
+ )
+ return Matrix(
+ [
+ [Matrix.dot_product(row, column) for column in other.columns()]
+ for row in self.rows
+ ]
+ )
+ else:
+ raise TypeError(
+ "A Matrix can only be multiplied by an int, float, or another matrix"
+ )
+
+ def __pow__(self, other: int) -> Matrix:
+ if not isinstance(other, int):
+ raise TypeError("A Matrix can only be raised to the power of an int")
+ if not self.is_square:
+ raise ValueError("Only square matrices can be raised to a power")
+ if other == 0:
+ return self.identity()
+ if other < 0:
+ if self.is_invertable():
+ return self.inverse() ** (-other)
+ raise ValueError(
+ "Only invertable matrices can be raised to a negative power"
+ )
+ result = self
+ for _ in range(other - 1):
+ result *= self
+ return result
+
+ @classmethod
+ def dot_product(cls, row: list[int], column: list[int]) -> int:
+ return sum(row[i] * column[i] for i in range(len(row)))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py
index dca01f9c3..f189f1898 100644
--- a/matrix/matrix_operation.py
+++ b/matrix/matrix_operation.py
@@ -4,8 +4,10 @@ Functions for 2D matrix operations
from __future__ import annotations
+from typing import Any
-def add(*matrix_s: list[list]) -> list[list]:
+
+def add(*matrix_s: list[list[int]]) -> list[list[int]]:
"""
>>> add([[1,2],[3,4]],[[2,3],[4,5]])
[[3, 5], [7, 9]]
@@ -13,19 +15,28 @@ def add(*matrix_s: list[list]) -> list[list]:
[[3.2, 5.4], [7, 9]]
>>> add([[1, 2], [4, 5]], [[3, 7], [3, 4]], [[3, 5], [5, 7]])
[[7, 14], [12, 16]]
+ >>> add([3], [4, 5])
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a matrix, got int/list instead
"""
if all(_check_not_integer(m) for m in matrix_s):
for i in matrix_s[1:]:
_verify_matrix_sizes(matrix_s[0], i)
return [[sum(t) for t in zip(*m)] for m in zip(*matrix_s)]
+ raise TypeError("Expected a matrix, got int/list instead")
-def subtract(matrix_a: list[list], matrix_b: list[list]) -> list[list]:
+def subtract(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[int]]:
"""
>>> subtract([[1,2],[3,4]],[[2,3],[4,5]])
[[-1, -1], [-1, -1]]
>>> subtract([[1,2.5],[3,4]],[[2,3],[4,5.5]])
[[-1, -0.5], [-1, -1.5]]
+ >>> subtract([3], [4, 5])
+ Traceback (most recent call last):
+ ...
+ TypeError: Expected a matrix, got int/list instead
"""
if (
_check_not_integer(matrix_a)
@@ -33,9 +44,10 @@ def subtract(matrix_a: list[list], matrix_b: list[list]) -> list[list]:
and _verify_matrix_sizes(matrix_a, matrix_b)
):
return [[i - j for i, j in zip(*m)] for m in zip(matrix_a, matrix_b)]
+ raise TypeError("Expected a matrix, got int/list instead")
-def scalar_multiply(matrix: list[list], n: int) -> list[list]:
+def scalar_multiply(matrix: list[list[int]], n: int | float) -> list[list[float]]:
"""
>>> scalar_multiply([[1,2],[3,4]],5)
[[5, 10], [15, 20]]
@@ -45,7 +57,7 @@ def scalar_multiply(matrix: list[list], n: int) -> list[list]:
return [[x * n for x in row] for row in matrix]
-def multiply(matrix_a: list[list], matrix_b: list[list]) -> list[list]:
+def multiply(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[int]]:
"""
>>> multiply([[1,2],[3,4]],[[5,5],[7,5]])
[[19, 15], [43, 35]]
@@ -58,16 +70,17 @@ def multiply(matrix_a: list[list], matrix_b: list[list]) -> list[list]:
rows, cols = _verify_matrix_sizes(matrix_a, matrix_b)
if cols[0] != rows[1]:
- raise ValueError(
- f"Cannot multiply matrix of dimensions ({rows[0]},{cols[0]}) "
- f"and ({rows[1]},{cols[1]})"
+ msg = (
+ "Cannot multiply matrix of dimensions "
+ f"({rows[0]},{cols[0]}) and ({rows[1]},{cols[1]})"
)
+ raise ValueError(msg)
return [
[sum(m * n for m, n in zip(i, j)) for j in zip(*matrix_b)] for i in matrix_a
]
-def identity(n: int) -> list[list]:
+def identity(n: int) -> list[list[int]]:
"""
:param n: dimension for nxn matrix
:type n: int
@@ -79,21 +92,28 @@ def identity(n: int) -> list[list]:
return [[int(row == column) for column in range(n)] for row in range(n)]
-def transpose(matrix: list[list], return_map: bool = True) -> list[list]:
+def transpose(
+ matrix: list[list[int]], return_map: bool = True
+) -> list[list[int]] | map[list[int]]:
"""
>>> transpose([[1,2],[3,4]]) # doctest: +ELLIPSIS