mirror of
https://github.com/TheAlgorithms/Python.git
synced 2024-11-27 15:01:08 +00:00
Add pep8-naming to pre-commit hooks and fixes incorrect naming conventions (#7062)
* ci(pre-commit): Add pep8-naming to `pre-commit` hooks (#7038) * refactor: Fix naming conventions (#7038) * Update arithmetic_analysis/lu_decomposition.py Co-authored-by: Christian Clauss <cclauss@me.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor(lu_decomposition): Replace `NDArray` with `ArrayLike` (#7038) * chore: Fix naming conventions in doctests (#7038) * fix: Temporarily disable project euler problem 104 (#7069) * chore: Fix naming conventions in doctests (#7038) Co-authored-by: Christian Clauss <cclauss@me.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
e2cd982b11
commit
07e991d553
|
@ -40,6 +40,7 @@ repos:
|
|||
- --ignore=E203,W503
|
||||
- --max-complexity=25
|
||||
- --max-line-length=88
|
||||
additional_dependencies: [pep8-naming]
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.982
|
||||
|
|
|
@ -6,13 +6,13 @@ Reference:
|
|||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as NDArray
|
||||
from numpy import float64
|
||||
from numpy.typing import ArrayLike
|
||||
|
||||
|
||||
def lower_upper_decomposition(
|
||||
table: NDArray[float64],
|
||||
) -> tuple[NDArray[float64], NDArray[float64]]:
|
||||
table: ArrayLike[float64],
|
||||
) -> tuple[ArrayLike[float64], ArrayLike[float64]]:
|
||||
"""Lower-Upper (LU) Decomposition
|
||||
|
||||
Example:
|
||||
|
|
|
@ -12,7 +12,7 @@ from __future__ import annotations
|
|||
solution = []
|
||||
|
||||
|
||||
def isSafe(board: list[list[int]], row: int, column: int) -> bool:
|
||||
def is_safe(board: list[list[int]], row: int, column: int) -> bool:
|
||||
"""
|
||||
This function returns a boolean value True if it is safe to place a queen there
|
||||
considering the current state of the board.
|
||||
|
@ -63,7 +63,7 @@ def solve(board: list[list[int]], row: int) -> bool:
|
|||
If all the combinations for that particular branch are successful the board is
|
||||
reinitialized for the next possible combination.
|
||||
"""
|
||||
if isSafe(board, row, i):
|
||||
if is_safe(board, row, i):
|
||||
board[row][i] = 1
|
||||
solve(board, row + 1)
|
||||
board[row][i] = 0
|
||||
|
|
|
@ -9,26 +9,26 @@ SYMBOLS = (
|
|||
)
|
||||
|
||||
|
||||
def check_keys(keyA: int, keyB: int, mode: str) -> None:
|
||||
def check_keys(key_a: int, key_b: int, mode: str) -> None:
|
||||
if mode == "encrypt":
|
||||
if keyA == 1:
|
||||
if key_a == 1:
|
||||
sys.exit(
|
||||
"The affine cipher becomes weak when key "
|
||||
"A is set to 1. Choose different key"
|
||||
)
|
||||
if keyB == 0:
|
||||
if key_b == 0:
|
||||
sys.exit(
|
||||
"The affine cipher becomes weak when key "
|
||||
"B is set to 0. Choose different key"
|
||||
)
|
||||
if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1:
|
||||
if key_a < 0 or key_b < 0 or key_b > len(SYMBOLS) - 1:
|
||||
sys.exit(
|
||||
"Key A must be greater than 0 and key B must "
|
||||
f"be between 0 and {len(SYMBOLS) - 1}."
|
||||
)
|
||||
if cryptomath.gcd(keyA, len(SYMBOLS)) != 1:
|
||||
if cryptomath.gcd(key_a, len(SYMBOLS)) != 1:
|
||||
sys.exit(
|
||||
f"Key A {keyA} and the symbol set size {len(SYMBOLS)} "
|
||||
f"Key A {key_a} and the symbol set size {len(SYMBOLS)} "
|
||||
"are not relatively prime. Choose a different key."
|
||||
)
|
||||
|
||||
|
@ -39,16 +39,16 @@ def encrypt_message(key: int, message: str) -> str:
|
|||
... 'substitution cipher.')
|
||||
'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi'
|
||||
"""
|
||||
keyA, keyB = divmod(key, len(SYMBOLS))
|
||||
check_keys(keyA, keyB, "encrypt")
|
||||
cipherText = ""
|
||||
key_a, key_b = divmod(key, len(SYMBOLS))
|
||||
check_keys(key_a, key_b, "encrypt")
|
||||
cipher_text = ""
|
||||
for symbol in message:
|
||||
if symbol in SYMBOLS:
|
||||
symIndex = SYMBOLS.find(symbol)
|
||||
cipherText += SYMBOLS[(symIndex * keyA + keyB) % len(SYMBOLS)]
|
||||
sym_index = SYMBOLS.find(symbol)
|
||||
cipher_text += SYMBOLS[(sym_index * key_a + key_b) % len(SYMBOLS)]
|
||||
else:
|
||||
cipherText += symbol
|
||||
return cipherText
|
||||
cipher_text += symbol
|
||||
return cipher_text
|
||||
|
||||
|
||||
def decrypt_message(key: int, message: str) -> str:
|
||||
|
@ -57,25 +57,27 @@ def decrypt_message(key: int, message: str) -> str:
|
|||
... '{xIp~{HL}Gi')
|
||||
'The affine cipher is a type of monoalphabetic substitution cipher.'
|
||||
"""
|
||||
keyA, keyB = divmod(key, len(SYMBOLS))
|
||||
check_keys(keyA, keyB, "decrypt")
|
||||
plainText = ""
|
||||
modInverseOfkeyA = cryptomath.find_mod_inverse(keyA, len(SYMBOLS))
|
||||
key_a, key_b = divmod(key, len(SYMBOLS))
|
||||
check_keys(key_a, key_b, "decrypt")
|
||||
plain_text = ""
|
||||
mod_inverse_of_key_a = cryptomath.find_mod_inverse(key_a, len(SYMBOLS))
|
||||
for symbol in message:
|
||||
if symbol in SYMBOLS:
|
||||
symIndex = SYMBOLS.find(symbol)
|
||||
plainText += SYMBOLS[(symIndex - keyB) * modInverseOfkeyA % len(SYMBOLS)]
|
||||
sym_index = SYMBOLS.find(symbol)
|
||||
plain_text += SYMBOLS[
|
||||
(sym_index - key_b) * mod_inverse_of_key_a % len(SYMBOLS)
|
||||
]
|
||||
else:
|
||||
plainText += symbol
|
||||
return plainText
|
||||
plain_text += symbol
|
||||
return plain_text
|
||||
|
||||
|
||||
def get_random_key() -> int:
|
||||
while True:
|
||||
keyA = random.randint(2, len(SYMBOLS))
|
||||
keyB = random.randint(2, len(SYMBOLS))
|
||||
if cryptomath.gcd(keyA, len(SYMBOLS)) == 1 and keyB % len(SYMBOLS) != 0:
|
||||
return keyA * len(SYMBOLS) + keyB
|
||||
key_b = random.randint(2, len(SYMBOLS))
|
||||
key_b = random.randint(2, len(SYMBOLS))
|
||||
if cryptomath.gcd(key_b, len(SYMBOLS)) == 1 and key_b % len(SYMBOLS) != 0:
|
||||
return key_b * len(SYMBOLS) + key_b
|
||||
|
||||
|
||||
def main() -> None:
|
||||
|
|
|
@ -12,7 +12,7 @@ import numpy as np
|
|||
|
||||
class BifidCipher:
|
||||
def __init__(self) -> None:
|
||||
SQUARE = [
|
||||
SQUARE = [ # noqa: N806
|
||||
["a", "b", "c", "d", "e"],
|
||||
["f", "g", "h", "i", "k"],
|
||||
["l", "m", "n", "o", "p"],
|
||||
|
|
|
@ -28,7 +28,7 @@ def decrypt(message: str) -> None:
|
|||
Decryption using Key #24: VOFGVWZ ROFXW
|
||||
Decryption using Key #25: UNEFUVY QNEWV
|
||||
"""
|
||||
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # noqa: N806
|
||||
for key in range(len(LETTERS)):
|
||||
translated = ""
|
||||
for symbol in message:
|
||||
|
|
|
@ -26,7 +26,7 @@ def primitive_root(p_val: int) -> int:
|
|||
|
||||
def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
|
||||
print("Generating prime p...")
|
||||
p = rabin_miller.generateLargePrime(key_size) # select large prime number.
|
||||
p = rabin_miller.generate_large_prime(key_size) # select large prime number.
|
||||
e_1 = primitive_root(p) # one primitive root on modulo p.
|
||||
d = random.randrange(3, p) # private_key -> have to be greater than 2 for safety.
|
||||
e_2 = cryptomath.find_mod_inverse(pow(e_1, d, p), p)
|
||||
|
@ -37,7 +37,7 @@ def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, i
|
|||
return public_key, private_key
|
||||
|
||||
|
||||
def make_key_files(name: str, keySize: int) -> None:
|
||||
def make_key_files(name: str, key_size: int) -> None:
|
||||
if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"):
|
||||
print("\nWARNING:")
|
||||
print(
|
||||
|
@ -47,16 +47,16 @@ def make_key_files(name: str, keySize: int) -> None:
|
|||
)
|
||||
sys.exit()
|
||||
|
||||
publicKey, privateKey = generate_key(keySize)
|
||||
public_key, private_key = generate_key(key_size)
|
||||
print(f"\nWriting public key to file {name}_pubkey.txt...")
|
||||
with open(f"{name}_pubkey.txt", "w") as fo:
|
||||
fo.write(
|
||||
"%d,%d,%d,%d" % (publicKey[0], publicKey[1], publicKey[2], publicKey[3])
|
||||
"%d,%d,%d,%d" % (public_key[0], public_key[1], public_key[2], public_key[3])
|
||||
)
|
||||
|
||||
print(f"Writing private key to file {name}_privkey.txt...")
|
||||
with open(f"{name}_privkey.txt", "w") as fo:
|
||||
fo.write("%d,%d" % (privateKey[0], privateKey[1]))
|
||||
fo.write("%d,%d" % (private_key[0], private_key[1]))
|
||||
|
||||
|
||||
def main() -> None:
|
||||
|
|
|
@ -201,11 +201,11 @@ class HillCipher:
|
|||
|
||||
|
||||
def main() -> None:
|
||||
N = int(input("Enter the order of the encryption key: "))
|
||||
n = int(input("Enter the order of the encryption key: "))
|
||||
hill_matrix = []
|
||||
|
||||
print("Enter each row of the encryption key with space separated integers")
|
||||
for _ in range(N):
|
||||
for _ in range(n):
|
||||
row = [int(x) for x in input().split()]
|
||||
hill_matrix.append(row)
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import numpy as np
|
|||
|
||||
class PolybiusCipher:
|
||||
def __init__(self) -> None:
|
||||
SQUARE = [
|
||||
SQUARE = [ # noqa: N806
|
||||
["a", "b", "c", "d", "e"],
|
||||
["f", "g", "h", "i", "k"],
|
||||
["l", "m", "n", "o", "p"],
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
import random
|
||||
|
||||
|
||||
def rabinMiller(num: int) -> bool:
|
||||
def rabin_miller(num: int) -> bool:
|
||||
s = num - 1
|
||||
t = 0
|
||||
|
||||
|
@ -29,7 +29,7 @@ def is_prime_low_num(num: int) -> bool:
|
|||
if num < 2:
|
||||
return False
|
||||
|
||||
lowPrimes = [
|
||||
low_primes = [
|
||||
2,
|
||||
3,
|
||||
5,
|
||||
|
@ -200,17 +200,17 @@ def is_prime_low_num(num: int) -> bool:
|
|||
997,
|
||||
]
|
||||
|
||||
if num in lowPrimes:
|
||||
if num in low_primes:
|
||||
return True
|
||||
|
||||
for prime in lowPrimes:
|
||||
for prime in low_primes:
|
||||
if (num % prime) == 0:
|
||||
return False
|
||||
|
||||
return rabinMiller(num)
|
||||
return rabin_miller(num)
|
||||
|
||||
|
||||
def generateLargePrime(keysize: int = 1024) -> int:
|
||||
def generate_large_prime(keysize: int = 1024) -> int:
|
||||
while True:
|
||||
num = random.randrange(2 ** (keysize - 1), 2 ** (keysize))
|
||||
if is_prime_low_num(num):
|
||||
|
@ -218,6 +218,6 @@ def generateLargePrime(keysize: int = 1024) -> int:
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
num = generateLargePrime()
|
||||
num = generate_large_prime()
|
||||
print(("Prime number:", num))
|
||||
print(("is_prime_low_num:", is_prime_low_num(num)))
|
||||
|
|
|
@ -37,12 +37,12 @@ def get_text_from_blocks(
|
|||
|
||||
|
||||
def encrypt_message(
|
||||
message: str, key: tuple[int, int], blockSize: int = DEFAULT_BLOCK_SIZE
|
||||
message: str, key: tuple[int, int], block_size: int = DEFAULT_BLOCK_SIZE
|
||||
) -> list[int]:
|
||||
encrypted_blocks = []
|
||||
n, e = key
|
||||
|
||||
for block in get_blocks_from_text(message, blockSize):
|
||||
for block in get_blocks_from_text(message, block_size):
|
||||
encrypted_blocks.append(pow(block, e, n))
|
||||
return encrypted_blocks
|
||||
|
||||
|
@ -63,8 +63,8 @@ def decrypt_message(
|
|||
def read_key_file(key_filename: str) -> tuple[int, int, int]:
|
||||
with open(key_filename) as fo:
|
||||
content = fo.read()
|
||||
key_size, n, EorD = content.split(",")
|
||||
return (int(key_size), int(n), int(EorD))
|
||||
key_size, n, eor_d = content.split(",")
|
||||
return (int(key_size), int(n), int(eor_d))
|
||||
|
||||
|
||||
def encrypt_and_write_to_file(
|
||||
|
@ -125,15 +125,15 @@ def main() -> None:
|
|||
|
||||
if mode == "encrypt":
|
||||
if not os.path.exists("rsa_pubkey.txt"):
|
||||
rkg.makeKeyFiles("rsa", 1024)
|
||||
rkg.make_key_files("rsa", 1024)
|
||||
|
||||
message = input("\nEnter message: ")
|
||||
pubkey_filename = "rsa_pubkey.txt"
|
||||
print(f"Encrypting and writing to {filename}...")
|
||||
encryptedText = encrypt_and_write_to_file(filename, pubkey_filename, message)
|
||||
encrypted_text = encrypt_and_write_to_file(filename, pubkey_filename, message)
|
||||
|
||||
print("\nEncrypted text:")
|
||||
print(encryptedText)
|
||||
print(encrypted_text)
|
||||
|
||||
elif mode == "decrypt":
|
||||
privkey_filename = "rsa_privkey.txt"
|
||||
|
|
|
@ -13,7 +13,7 @@ import math
|
|||
import random
|
||||
|
||||
|
||||
def rsafactor(d: int, e: int, N: int) -> list[int]:
|
||||
def rsafactor(d: int, e: int, n: int) -> list[int]:
|
||||
"""
|
||||
This function returns the factors of N, where p*q=N
|
||||
Return: [p, q]
|
||||
|
@ -35,16 +35,16 @@ def rsafactor(d: int, e: int, N: int) -> list[int]:
|
|||
p = 0
|
||||
q = 0
|
||||
while p == 0:
|
||||
g = random.randint(2, N - 1)
|
||||
g = random.randint(2, n - 1)
|
||||
t = k
|
||||
while True:
|
||||
if t % 2 == 0:
|
||||
t = t // 2
|
||||
x = (g**t) % N
|
||||
y = math.gcd(x - 1, N)
|
||||
x = (g**t) % n
|
||||
y = math.gcd(x - 1, n)
|
||||
if x > 1 and y > 1:
|
||||
p = y
|
||||
q = N // y
|
||||
q = n // y
|
||||
break # find the correct factors
|
||||
else:
|
||||
break # t is not divisible by 2, break and choose another g
|
||||
|
|
|
@ -2,38 +2,38 @@ import os
|
|||
import random
|
||||
import sys
|
||||
|
||||
from . import cryptomath_module as cryptoMath
|
||||
from . import rabin_miller as rabinMiller
|
||||
from . import cryptomath_module as cryptoMath # noqa: N812
|
||||
from . import rabin_miller as rabinMiller # noqa: N812
|
||||
|
||||
|
||||
def main() -> None:
|
||||
print("Making key files...")
|
||||
makeKeyFiles("rsa", 1024)
|
||||
make_key_files("rsa", 1024)
|
||||
print("Key files generation successful.")
|
||||
|
||||
|
||||
def generateKey(keySize: int) -> tuple[tuple[int, int], tuple[int, int]]:
|
||||
def generate_key(key_size: int) -> tuple[tuple[int, int], tuple[int, int]]:
|
||||
print("Generating prime p...")
|
||||
p = rabinMiller.generateLargePrime(keySize)
|
||||
p = rabinMiller.generate_large_prime(key_size)
|
||||
print("Generating prime q...")
|
||||
q = rabinMiller.generateLargePrime(keySize)
|
||||
q = rabinMiller.generate_large_prime(key_size)
|
||||
n = p * q
|
||||
|
||||
print("Generating e that is relatively prime to (p - 1) * (q - 1)...")
|
||||
while True:
|
||||
e = random.randrange(2 ** (keySize - 1), 2 ** (keySize))
|
||||
e = random.randrange(2 ** (key_size - 1), 2 ** (key_size))
|
||||
if cryptoMath.gcd(e, (p - 1) * (q - 1)) == 1:
|
||||
break
|
||||
|
||||
print("Calculating d that is mod inverse of e...")
|
||||
d = cryptoMath.find_mod_inverse(e, (p - 1) * (q - 1))
|
||||
|
||||
publicKey = (n, e)
|
||||
privateKey = (n, d)
|
||||
return (publicKey, privateKey)
|
||||
public_key = (n, e)
|
||||
private_key = (n, d)
|
||||
return (public_key, private_key)
|
||||
|
||||
|
||||
def makeKeyFiles(name: str, keySize: int) -> None:
|
||||
def make_key_files(name: str, key_size: int) -> None:
|
||||
if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"):
|
||||
print("\nWARNING:")
|
||||
print(
|
||||
|
@ -43,14 +43,14 @@ def makeKeyFiles(name: str, keySize: int) -> None:
|
|||
)
|
||||
sys.exit()
|
||||
|
||||
publicKey, privateKey = generateKey(keySize)
|
||||
public_key, private_key = generate_key(key_size)
|
||||
print(f"\nWriting public key to file {name}_pubkey.txt...")
|
||||
with open(f"{name}_pubkey.txt", "w") as out_file:
|
||||
out_file.write(f"{keySize},{publicKey[0]},{publicKey[1]}")
|
||||
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}")
|
||||
|
||||
print(f"Writing private key to file {name}_privkey.txt...")
|
||||
with open(f"{name}_privkey.txt", "w") as out_file:
|
||||
out_file.write(f"{keySize},{privateKey[0]},{privateKey[1]}")
|
||||
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -9,66 +9,66 @@ def main() -> None:
|
|||
key = "LFWOAYUISVKMNXPBDCRJTQEGHZ"
|
||||
resp = input("Encrypt/Decrypt [e/d]: ")
|
||||
|
||||
checkValidKey(key)
|
||||
check_valid_key(key)
|
||||
|
||||
if resp.lower().startswith("e"):
|
||||
mode = "encrypt"
|
||||
translated = encryptMessage(key, message)
|
||||
translated = encrypt_message(key, message)
|
||||
elif resp.lower().startswith("d"):
|
||||
mode = "decrypt"
|
||||
translated = decryptMessage(key, message)
|
||||
translated = decrypt_message(key, message)
|
||||
|
||||
print(f"\n{mode.title()}ion: \n{translated}")
|
||||
|
||||
|
||||
def checkValidKey(key: str) -> None:
|
||||
keyList = list(key)
|
||||
lettersList = list(LETTERS)
|
||||
keyList.sort()
|
||||
lettersList.sort()
|
||||
def check_valid_key(key: str) -> None:
|
||||
key_list = list(key)
|
||||
letters_list = list(LETTERS)
|
||||
key_list.sort()
|
||||
letters_list.sort()
|
||||
|
||||
if keyList != lettersList:
|
||||
if key_list != letters_list:
|
||||
sys.exit("Error in the key or symbol set.")
|
||||
|
||||
|
||||
def encryptMessage(key: str, message: str) -> str:
|
||||
def encrypt_message(key: str, message: str) -> str:
|
||||
"""
|
||||
>>> encryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji')
|
||||
>>> encrypt_message('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji')
|
||||
'Ilcrism Olcvs'
|
||||
"""
|
||||
return translateMessage(key, message, "encrypt")
|
||||
return translate_message(key, message, "encrypt")
|
||||
|
||||
|
||||
def decryptMessage(key: str, message: str) -> str:
|
||||
def decrypt_message(key: str, message: str) -> str:
|
||||
"""
|
||||
>>> decryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs')
|
||||
>>> decrypt_message('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs')
|
||||
'Harshil Darji'
|
||||
"""
|
||||
return translateMessage(key, message, "decrypt")
|
||||
return translate_message(key, message, "decrypt")
|
||||
|
||||
|
||||
def translateMessage(key: str, message: str, mode: str) -> str:
|
||||
def translate_message(key: str, message: str, mode: str) -> str:
|
||||
translated = ""
|
||||
charsA = LETTERS
|
||||
charsB = key
|
||||
chars_a = LETTERS
|
||||
chars_b = key
|
||||
|
||||
if mode == "decrypt":
|
||||
charsA, charsB = charsB, charsA
|
||||
chars_a, chars_b = chars_b, chars_a
|
||||
|
||||
for symbol in message:
|
||||
if symbol.upper() in charsA:
|
||||
symIndex = charsA.find(symbol.upper())
|
||||
if symbol.upper() in chars_a:
|
||||
sym_index = chars_a.find(symbol.upper())
|
||||
if symbol.isupper():
|
||||
translated += charsB[symIndex].upper()
|
||||
translated += chars_b[sym_index].upper()
|
||||
else:
|
||||
translated += charsB[symIndex].lower()
|
||||
translated += chars_b[sym_index].lower()
|
||||
else:
|
||||
translated += symbol
|
||||
|
||||
return translated
|
||||
|
||||
|
||||
def getRandomKey() -> str:
|
||||
def get_random_key() -> str:
|
||||
key = list(LETTERS)
|
||||
random.shuffle(key)
|
||||
return "".join(key)
|
||||
|
|
|
@ -2,12 +2,12 @@
|
|||
from __future__ import annotations
|
||||
|
||||
|
||||
def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str:
|
||||
def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str:
|
||||
one, two, three = "", "", ""
|
||||
tmp = []
|
||||
|
||||
for character in messagePart:
|
||||
tmp.append(character2Number[character])
|
||||
for character in message_part:
|
||||
tmp.append(character_to_number[character])
|
||||
|
||||
for each in tmp:
|
||||
one += each[0]
|
||||
|
@ -17,18 +17,18 @@ def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str:
|
|||
return one + two + three
|
||||
|
||||
|
||||
def __decryptPart(
|
||||
messagePart: str, character2Number: dict[str, str]
|
||||
def __decrypt_part(
|
||||
message_part: str, character_to_number: dict[str, str]
|
||||
) -> tuple[str, str, str]:
|
||||
tmp, thisPart = "", ""
|
||||
tmp, this_part = "", ""
|
||||
result = []
|
||||
|
||||
for character in messagePart:
|
||||
thisPart += character2Number[character]
|
||||
for character in message_part:
|
||||
this_part += character_to_number[character]
|
||||
|
||||
for digit in thisPart:
|
||||
for digit in this_part:
|
||||
tmp += digit
|
||||
if len(tmp) == len(messagePart):
|
||||
if len(tmp) == len(message_part):
|
||||
result.append(tmp)
|
||||
tmp = ""
|
||||
|
||||
|
@ -79,51 +79,57 @@ def __prepare(
|
|||
"332",
|
||||
"333",
|
||||
)
|
||||
character2Number = {}
|
||||
number2Character = {}
|
||||
character_to_number = {}
|
||||
number_to_character = {}
|
||||
for letter, number in zip(alphabet, numbers):
|
||||
character2Number[letter] = number
|
||||
number2Character[number] = letter
|
||||
character_to_number[letter] = number
|
||||
number_to_character[number] = letter
|
||||
|
||||
return message, alphabet, character2Number, number2Character
|
||||
return message, alphabet, character_to_number, number_to_character
|
||||
|
||||
|
||||
def encryptMessage(
|
||||
def encrypt_message(
|
||||
message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5
|
||||
) -> str:
|
||||
message, alphabet, character2Number, number2Character = __prepare(message, alphabet)
|
||||
message, alphabet, character_to_number, number_to_character = __prepare(
|
||||
message, alphabet
|
||||
)
|
||||
encrypted, encrypted_numeric = "", ""
|
||||
|
||||
for i in range(0, len(message) + 1, period):
|
||||
encrypted_numeric += __encryptPart(message[i : i + period], character2Number)
|
||||
encrypted_numeric += __encrypt_part(
|
||||
message[i : i + period], character_to_number
|
||||
)
|
||||
|
||||
for i in range(0, len(encrypted_numeric), 3):
|
||||
encrypted += number2Character[encrypted_numeric[i : i + 3]]
|
||||
encrypted += number_to_character[encrypted_numeric[i : i + 3]]
|
||||
|
||||
return encrypted
|
||||
|
||||
|
||||
def decryptMessage(
|
||||
def decrypt_message(
|
||||
message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5
|
||||
) -> str:
|
||||
message, alphabet, character2Number, number2Character = __prepare(message, alphabet)
|
||||
message, alphabet, character_to_number, number_to_character = __prepare(
|
||||
message, alphabet
|
||||
)
|
||||
decrypted_numeric = []
|
||||
decrypted = ""
|
||||
|
||||
for i in range(0, len(message) + 1, period):
|
||||
a, b, c = __decryptPart(message[i : i + period], character2Number)
|
||||
a, b, c = __decrypt_part(message[i : i + period], character_to_number)
|
||||
|
||||
for j in range(0, len(a)):
|
||||
decrypted_numeric.append(a[j] + b[j] + c[j])
|
||||
|
||||
for each in decrypted_numeric:
|
||||
decrypted += number2Character[each]
|
||||
decrypted += number_to_character[each]
|
||||
|
||||
return decrypted
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
msg = "DEFEND THE EAST WALL OF THE CASTLE."
|
||||
encrypted = encryptMessage(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
|
||||
decrypted = decryptMessage(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
|
||||
encrypted = encrypt_message(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
|
||||
decrypted = decrypt_message(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ")
|
||||
print(f"Encrypted: {encrypted}\nDecrypted: {decrypted}")
|
||||
|
|
|
@ -14,53 +14,53 @@ def main() -> None:
|
|||
mode = input("Encryption/Decryption [e/d]: ")
|
||||
|
||||
if mode.lower().startswith("e"):
|
||||
text = encryptMessage(key, message)
|
||||
text = encrypt_message(key, message)
|
||||
elif mode.lower().startswith("d"):
|
||||
text = decryptMessage(key, message)
|
||||
text = decrypt_message(key, message)
|
||||
|
||||
# Append pipe symbol (vertical bar) to identify spaces at the end.
|
||||
print(f"Output:\n{text + '|'}")
|
||||
|
||||
|
||||
def encryptMessage(key: int, message: str) -> str:
|
||||
def encrypt_message(key: int, message: str) -> str:
|
||||
"""
|
||||
>>> encryptMessage(6, 'Harshil Darji')
|
||||
>>> encrypt_message(6, 'Harshil Darji')
|
||||
'Hlia rDsahrij'
|
||||
"""
|
||||
cipherText = [""] * key
|
||||
cipher_text = [""] * key
|
||||
for col in range(key):
|
||||
pointer = col
|
||||
while pointer < len(message):
|
||||
cipherText[col] += message[pointer]
|
||||
cipher_text[col] += message[pointer]
|
||||
pointer += key
|
||||
return "".join(cipherText)
|
||||
return "".join(cipher_text)
|
||||
|
||||
|
||||
def decryptMessage(key: int, message: str) -> str:
|
||||
def decrypt_message(key: int, message: str) -> str:
|
||||
"""
|
||||
>>> decryptMessage(6, 'Hlia rDsahrij')
|
||||
>>> decrypt_message(6, 'Hlia rDsahrij')
|
||||
'Harshil Darji'
|
||||
"""
|
||||
numCols = math.ceil(len(message) / key)
|
||||
numRows = key
|
||||
numShadedBoxes = (numCols * numRows) - len(message)
|
||||
plainText = [""] * numCols
|
||||
num_cols = math.ceil(len(message) / key)
|
||||
num_rows = key
|
||||
num_shaded_boxes = (num_cols * num_rows) - len(message)
|
||||
plain_text = [""] * num_cols
|
||||
col = 0
|
||||
row = 0
|
||||
|
||||
for symbol in message:
|
||||
plainText[col] += symbol
|
||||
plain_text[col] += symbol
|
||||
col += 1
|
||||
|
||||
if (
|
||||
(col == numCols)
|
||||
or (col == numCols - 1)
|
||||
and (row >= numRows - numShadedBoxes)
|
||||
(col == num_cols)
|
||||
or (col == num_cols - 1)
|
||||
and (row >= num_rows - num_shaded_boxes)
|
||||
):
|
||||
col = 0
|
||||
row += 1
|
||||
|
||||
return "".join(plainText)
|
||||
return "".join(plain_text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -2,39 +2,39 @@ import os
|
|||
import sys
|
||||
import time
|
||||
|
||||
from . import transposition_cipher as transCipher
|
||||
from . import transposition_cipher as trans_cipher
|
||||
|
||||
|
||||
def main() -> None:
|
||||
inputFile = "Prehistoric Men.txt"
|
||||
outputFile = "Output.txt"
|
||||
input_file = "Prehistoric Men.txt"
|
||||
output_file = "Output.txt"
|
||||
key = int(input("Enter key: "))
|
||||
mode = input("Encrypt/Decrypt [e/d]: ")
|
||||
|
||||
if not os.path.exists(inputFile):
|
||||
print(f"File {inputFile} does not exist. Quitting...")
|
||||
if not os.path.exists(input_file):
|
||||
print(f"File {input_file} does not exist. Quitting...")
|
||||
sys.exit()
|
||||
if os.path.exists(outputFile):
|
||||
print(f"Overwrite {outputFile}? [y/n]")
|
||||
if os.path.exists(output_file):
|
||||
print(f"Overwrite {output_file}? [y/n]")
|
||||
response = input("> ")
|
||||
if not response.lower().startswith("y"):
|
||||
sys.exit()
|
||||
|
||||
startTime = time.time()
|
||||
start_time = time.time()
|
||||
if mode.lower().startswith("e"):
|
||||
with open(inputFile) as f:
|
||||
with open(input_file) as f:
|
||||
content = f.read()
|
||||
translated = transCipher.encryptMessage(key, content)
|
||||
translated = trans_cipher.encrypt_message(key, content)
|
||||
elif mode.lower().startswith("d"):
|
||||
with open(outputFile) as f:
|
||||
with open(output_file) as f:
|
||||
content = f.read()
|
||||
translated = transCipher.decryptMessage(key, content)
|
||||
translated = trans_cipher.decrypt_message(key, content)
|
||||
|
||||
with open(outputFile, "w") as outputObj:
|
||||
outputObj.write(translated)
|
||||
with open(output_file, "w") as output_obj:
|
||||
output_obj.write(translated)
|
||||
|
||||
totalTime = round(time.time() - startTime, 2)
|
||||
print(("Done (", totalTime, "seconds )"))
|
||||
total_time = round(time.time() - start_time, 2)
|
||||
print(("Done (", total_time, "seconds )"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -8,43 +8,43 @@ def main() -> None:
|
|||
|
||||
if mode.lower().startswith("e"):
|
||||
mode = "encrypt"
|
||||
translated = encryptMessage(key, message)
|
||||
translated = encrypt_message(key, message)
|
||||
elif mode.lower().startswith("d"):
|
||||
mode = "decrypt"
|
||||
translated = decryptMessage(key, message)
|
||||
translated = decrypt_message(key, message)
|
||||
|
||||
print(f"\n{mode.title()}ed message:")
|
||||
print(translated)
|
||||
|
||||
|
||||
def encryptMessage(key: str, message: str) -> str:
|
||||
def encrypt_message(key: str, message: str) -> str:
|
||||
"""
|
||||
>>> encryptMessage('HDarji', 'This is Harshil Darji from Dharmaj.')
|
||||
>>> encrypt_message('HDarji', 'This is Harshil Darji from Dharmaj.')
|
||||
'Akij ra Odrjqqs Gaisq muod Mphumrs.'
|
||||
"""
|
||||
return translateMessage(key, message, "encrypt")
|
||||
return translate_message(key, message, "encrypt")
|
||||
|
||||
|
||||
def decryptMessage(key: str, message: str) -> str:
|
||||
def decrypt_message(key: str, message: str) -> str:
|
||||
"""
|
||||
>>> decryptMessage('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.')
|
||||
>>> decrypt_message('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.')
|
||||
'This is Harshil Darji from Dharmaj.'
|
||||
"""
|
||||
return translateMessage(key, message, "decrypt")
|
||||
return translate_message(key, message, "decrypt")
|
||||
|
||||
|
||||
def translateMessage(key: str, message: str, mode: str) -> str:
|
||||
def translate_message(key: str, message: str, mode: str) -> str:
|
||||
translated = []
|
||||
keyIndex = 0
|
||||
key_index = 0
|
||||
key = key.upper()
|
||||
|
||||
for symbol in message:
|
||||
num = LETTERS.find(symbol.upper())
|
||||
if num != -1:
|
||||
if mode == "encrypt":
|
||||
num += LETTERS.find(key[keyIndex])
|
||||
num += LETTERS.find(key[key_index])
|
||||
elif mode == "decrypt":
|
||||
num -= LETTERS.find(key[keyIndex])
|
||||
num -= LETTERS.find(key[key_index])
|
||||
|
||||
num %= len(LETTERS)
|
||||
|
||||
|
@ -53,9 +53,9 @@ def translateMessage(key: str, message: str, mode: str) -> str:
|
|||
elif symbol.islower():
|
||||
translated.append(LETTERS[num].lower())
|
||||
|
||||
keyIndex += 1
|
||||
if keyIndex == len(key):
|
||||
keyIndex = 0
|
||||
key_index += 1
|
||||
if key_index == len(key):
|
||||
key_index = 0
|
||||
else:
|
||||
translated.append(symbol)
|
||||
return "".join(translated)
|
||||
|
|
|
@ -43,10 +43,10 @@ def decompress_data(data_bits: str) -> str:
|
|||
lexicon[curr_string] = last_match_id + "0"
|
||||
|
||||
if math.log2(index).is_integer():
|
||||
newLex = {}
|
||||
new_lex = {}
|
||||
for curr_key in list(lexicon):
|
||||
newLex["0" + curr_key] = lexicon.pop(curr_key)
|
||||
lexicon = newLex
|
||||
new_lex["0" + curr_key] = lexicon.pop(curr_key)
|
||||
lexicon = new_lex
|
||||
|
||||
lexicon[bin(index)[2:]] = last_match_id + "1"
|
||||
index += 1
|
||||
|
|
|
@ -16,8 +16,8 @@ def psnr(original: float, contrast: float) -> float:
|
|||
mse = np.mean((original - contrast) ** 2)
|
||||
if mse == 0:
|
||||
return 100
|
||||
PIXEL_MAX = 255.0
|
||||
PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
|
||||
PIXEL_MAX = 255.0 # noqa: N806
|
||||
PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) # noqa: N806
|
||||
return PSNR
|
||||
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ https://en.wikipedia.org/wiki/Harris_Corner_Detector
|
|||
"""
|
||||
|
||||
|
||||
class Harris_Corner:
|
||||
class HarrisCorner:
|
||||
def __init__(self, k: float, window_size: int):
|
||||
|
||||
"""
|
||||
|
@ -70,6 +70,6 @@ class Harris_Corner:
|
|||
|
||||
if __name__ == "__main__":
|
||||
|
||||
edge_detect = Harris_Corner(0.04, 3)
|
||||
edge_detect = HarrisCorner(0.04, 3)
|
||||
color_img, _ = edge_detect.detect("path_to_image")
|
||||
cv2.imwrite("detect.png", color_img)
|
||||
|
|
|
@ -17,7 +17,7 @@ def bin_to_hexadecimal(binary_str: str) -> str:
|
|||
...
|
||||
ValueError: Empty string was passed to the function
|
||||
"""
|
||||
BITS_TO_HEX = {
|
||||
BITS_TO_HEX = { # noqa: N806
|
||||
"0000": "0",
|
||||
"0001": "1",
|
||||
"0010": "2",
|
||||
|
|
|
@ -66,7 +66,7 @@ def decimal_to_any(num: int, base: int) -> str:
|
|||
if base > 36:
|
||||
raise ValueError("base must be <= 36")
|
||||
# fmt: off
|
||||
ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F',
|
||||
ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', # noqa: N806, E501
|
||||
'16': 'G', '17': 'H', '18': 'I', '19': 'J', '20': 'K', '21': 'L',
|
||||
'22': 'M', '23': 'N', '24': 'O', '25': 'P', '26': 'Q', '27': 'R',
|
||||
'28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X',
|
||||
|
|
|
@ -6,7 +6,7 @@ from __future__ import annotations
|
|||
from enum import Enum
|
||||
|
||||
|
||||
class SI_Unit(Enum):
|
||||
class SIUnit(Enum):
|
||||
yotta = 24
|
||||
zetta = 21
|
||||
exa = 18
|
||||
|
@ -29,7 +29,7 @@ class SI_Unit(Enum):
|
|||
yocto = -24
|
||||
|
||||
|
||||
class Binary_Unit(Enum):
|
||||
class BinaryUnit(Enum):
|
||||
yotta = 8
|
||||
zetta = 7
|
||||
exa = 6
|
||||
|
@ -42,17 +42,17 @@ class Binary_Unit(Enum):
|
|||
|
||||
def convert_si_prefix(
|
||||
known_amount: float,
|
||||
known_prefix: str | SI_Unit,
|
||||
unknown_prefix: str | SI_Unit,
|
||||
known_prefix: str | SIUnit,
|
||||
unknown_prefix: str | SIUnit,
|
||||
) -> float:
|
||||
"""
|
||||
Wikipedia reference: https://en.wikipedia.org/wiki/Binary_prefix
|
||||
Wikipedia reference: https://en.wikipedia.org/wiki/International_System_of_Units
|
||||
>>> convert_si_prefix(1, SI_Unit.giga, SI_Unit.mega)
|
||||
>>> convert_si_prefix(1, SIUnit.giga, SIUnit.mega)
|
||||
1000
|
||||
>>> convert_si_prefix(1, SI_Unit.mega, SI_Unit.giga)
|
||||
>>> convert_si_prefix(1, SIUnit.mega, SIUnit.giga)
|
||||
0.001
|
||||
>>> convert_si_prefix(1, SI_Unit.kilo, SI_Unit.kilo)
|
||||
>>> convert_si_prefix(1, SIUnit.kilo, SIUnit.kilo)
|
||||
1
|
||||
>>> convert_si_prefix(1, 'giga', 'mega')
|
||||
1000
|
||||
|
@ -60,9 +60,9 @@ def convert_si_prefix(
|
|||
1000
|
||||
"""
|
||||
if isinstance(known_prefix, str):
|
||||
known_prefix = SI_Unit[known_prefix.lower()]
|
||||
known_prefix = SIUnit[known_prefix.lower()]
|
||||
if isinstance(unknown_prefix, str):
|
||||
unknown_prefix = SI_Unit[unknown_prefix.lower()]
|
||||
unknown_prefix = SIUnit[unknown_prefix.lower()]
|
||||
unknown_amount: float = known_amount * (
|
||||
10 ** (known_prefix.value - unknown_prefix.value)
|
||||
)
|
||||
|
@ -71,16 +71,16 @@ def convert_si_prefix(
|
|||
|
||||
def convert_binary_prefix(
|
||||
known_amount: float,
|
||||
known_prefix: str | Binary_Unit,
|
||||
unknown_prefix: str | Binary_Unit,
|
||||
known_prefix: str | BinaryUnit,
|
||||
unknown_prefix: str | BinaryUnit,
|
||||
) -> float:
|
||||
"""
|
||||
Wikipedia reference: https://en.wikipedia.org/wiki/Metric_prefix
|
||||
>>> convert_binary_prefix(1, Binary_Unit.giga, Binary_Unit.mega)
|
||||
>>> convert_binary_prefix(1, BinaryUnit.giga, BinaryUnit.mega)
|
||||
1024
|
||||
>>> convert_binary_prefix(1, Binary_Unit.mega, Binary_Unit.giga)
|
||||
>>> convert_binary_prefix(1, BinaryUnit.mega, BinaryUnit.giga)
|
||||
0.0009765625
|
||||
>>> convert_binary_prefix(1, Binary_Unit.kilo, Binary_Unit.kilo)
|
||||
>>> convert_binary_prefix(1, BinaryUnit.kilo, BinaryUnit.kilo)
|
||||
1
|
||||
>>> convert_binary_prefix(1, 'giga', 'mega')
|
||||
1024
|
||||
|
@ -88,9 +88,9 @@ def convert_binary_prefix(
|
|||
1024
|
||||
"""
|
||||
if isinstance(known_prefix, str):
|
||||
known_prefix = Binary_Unit[known_prefix.lower()]
|
||||
known_prefix = BinaryUnit[known_prefix.lower()]
|
||||
if isinstance(unknown_prefix, str):
|
||||
unknown_prefix = Binary_Unit[unknown_prefix.lower()]
|
||||
unknown_prefix = BinaryUnit[unknown_prefix.lower()]
|
||||
unknown_amount: float = known_amount * (
|
||||
2 ** ((known_prefix.value - unknown_prefix.value) * 10)
|
||||
)
|
||||
|
|
|
@ -29,7 +29,7 @@ def int_to_roman(number: int) -> str:
|
|||
>>> all(int_to_roman(value) == key for key, value in tests.items())
|
||||
True
|
||||
"""
|
||||
ROMAN = [
|
||||
ROMAN = [ # noqa: N806
|
||||
(1000, "M"),
|
||||
(900, "CM"),
|
||||
(500, "D"),
|
||||
|
|
|
@ -12,7 +12,7 @@ import random
|
|||
from typing import Any
|
||||
|
||||
|
||||
class my_queue:
|
||||
class MyQueue:
|
||||
def __init__(self) -> None:
|
||||
self.data: list[Any] = []
|
||||
self.head: int = 0
|
||||
|
@ -39,20 +39,20 @@ class my_queue:
|
|||
print(self.data[self.head : self.tail])
|
||||
|
||||
|
||||
class my_node:
|
||||
class MyNode:
|
||||
def __init__(self, data: Any) -> None:
|
||||
self.data = data
|
||||
self.left: my_node | None = None
|
||||
self.right: my_node | None = None
|
||||
self.left: MyNode | None = None
|
||||
self.right: MyNode | None = None
|
||||
self.height: int = 1
|
||||
|
||||
def get_data(self) -> Any:
|
||||
return self.data
|
||||
|
||||
def get_left(self) -> my_node | None:
|
||||
def get_left(self) -> MyNode | None:
|
||||
return self.left
|
||||
|
||||
def get_right(self) -> my_node | None:
|
||||
def get_right(self) -> MyNode | None:
|
||||
return self.right
|
||||
|
||||
def get_height(self) -> int:
|
||||
|
@ -62,11 +62,11 @@ class my_node:
|
|||
self.data = data
|
||||
return
|
||||
|
||||
def set_left(self, node: my_node | None) -> None:
|
||||
def set_left(self, node: MyNode | None) -> None:
|
||||
self.left = node
|
||||
return
|
||||
|
||||
def set_right(self, node: my_node | None) -> None:
|
||||
def set_right(self, node: MyNode | None) -> None:
|
||||
self.right = node
|
||||
return
|
||||
|
||||
|
@ -75,7 +75,7 @@ class my_node:
|
|||
return
|
||||
|
||||
|
||||
def get_height(node: my_node | None) -> int:
|
||||
def get_height(node: MyNode | None) -> int:
|
||||
if node is None:
|
||||
return 0
|
||||
return node.get_height()
|
||||
|
@ -87,7 +87,7 @@ def my_max(a: int, b: int) -> int:
|
|||
return b
|
||||
|
||||
|
||||
def right_rotation(node: my_node) -> my_node:
|
||||
def right_rotation(node: MyNode) -> MyNode:
|
||||
r"""
|
||||
A B
|
||||
/ \ / \
|
||||
|
@ -110,7 +110,7 @@ def right_rotation(node: my_node) -> my_node:
|
|||
return ret
|
||||
|
||||
|
||||
def left_rotation(node: my_node) -> my_node:
|
||||
def left_rotation(node: MyNode) -> MyNode:
|
||||
"""
|
||||
a mirror symmetry rotation of the left_rotation
|
||||
"""
|
||||
|
@ -126,7 +126,7 @@ def left_rotation(node: my_node) -> my_node:
|
|||
return ret
|
||||
|
||||
|
||||
def lr_rotation(node: my_node) -> my_node:
|
||||
def lr_rotation(node: MyNode) -> MyNode:
|
||||
r"""
|
||||
A A Br
|
||||
/ \ / \ / \
|
||||
|
@ -143,16 +143,16 @@ def lr_rotation(node: my_node) -> my_node:
|
|||
return right_rotation(node)
|
||||
|
||||
|
||||
def rl_rotation(node: my_node) -> my_node:
|
||||
def rl_rotation(node: MyNode) -> MyNode:
|
||||
right_child = node.get_right()
|
||||
assert right_child is not None
|
||||
node.set_right(right_rotation(right_child))
|
||||
return left_rotation(node)
|
||||
|
||||
|
||||
def insert_node(node: my_node | None, data: Any) -> my_node | None:
|
||||
def insert_node(node: MyNode | None, data: Any) -> MyNode | None:
|
||||
if node is None:
|
||||
return my_node(data)
|
||||
return MyNode(data)
|
||||
if data < node.get_data():
|
||||
node.set_left(insert_node(node.get_left(), data))
|
||||
if (
|
||||
|
@ -180,7 +180,7 @@ def insert_node(node: my_node | None, data: Any) -> my_node | None:
|
|||
return node
|
||||
|
||||
|
||||
def get_rightMost(root: my_node) -> Any:
|
||||
def get_right_most(root: MyNode) -> Any:
|
||||
while True:
|
||||
right_child = root.get_right()
|
||||
if right_child is None:
|
||||
|
@ -189,7 +189,7 @@ def get_rightMost(root: my_node) -> Any:
|
|||
return root.get_data()
|
||||
|
||||
|
||||
def get_leftMost(root: my_node) -> Any:
|
||||
def get_left_most(root: MyNode) -> Any:
|
||||
while True:
|
||||
left_child = root.get_left()
|
||||
if left_child is None:
|
||||
|
@ -198,12 +198,12 @@ def get_leftMost(root: my_node) -> Any:
|
|||
return root.get_data()
|
||||
|
||||
|
||||
def del_node(root: my_node, data: Any) -> my_node | None:
|
||||
def del_node(root: MyNode, data: Any) -> MyNode | None:
|
||||
left_child = root.get_left()
|
||||
right_child = root.get_right()
|
||||
if root.get_data() == data:
|
||||
if left_child is not None and right_child is not None:
|
||||
temp_data = get_leftMost(right_child)
|
||||
temp_data = get_left_most(right_child)
|
||||
root.set_data(temp_data)
|
||||
root.set_right(del_node(right_child, temp_data))
|
||||
elif left_child is not None:
|
||||
|
@ -276,7 +276,7 @@ class AVLtree:
|
|||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.root: my_node | None = None
|
||||
self.root: MyNode | None = None
|
||||
|
||||
def get_height(self) -> int:
|
||||
return get_height(self.root)
|
||||
|
@ -296,7 +296,7 @@ class AVLtree:
|
|||
self,
|
||||
) -> str: # a level traversale, gives a more intuitive look on the tree
|
||||
output = ""
|
||||
q = my_queue()
|
||||
q = MyQueue()
|
||||
q.push(self.root)
|
||||
layer = self.get_height()
|
||||
if layer == 0:
|
||||
|
|
|
@ -37,14 +37,14 @@ class SegmentTree:
|
|||
return idx * 2 + 1
|
||||
|
||||
def build(
|
||||
self, idx: int, left_element: int, right_element: int, A: list[int]
|
||||
self, idx: int, left_element: int, right_element: int, a: list[int]
|
||||
) -> None:
|
||||
if left_element == right_element:
|
||||
self.segment_tree[idx] = A[left_element - 1]
|
||||
self.segment_tree[idx] = a[left_element - 1]
|
||||
else:
|
||||
mid = (left_element + right_element) // 2
|
||||
self.build(self.left(idx), left_element, mid, A)
|
||||
self.build(self.right(idx), mid + 1, right_element, A)
|
||||
self.build(self.left(idx), left_element, mid, a)
|
||||
self.build(self.right(idx), mid + 1, right_element, a)
|
||||
self.segment_tree[idx] = max(
|
||||
self.segment_tree[self.left(idx)], self.segment_tree[self.right(idx)]
|
||||
)
|
||||
|
|
|
@ -2,8 +2,8 @@ import math
|
|||
|
||||
|
||||
class SegmentTree:
|
||||
def __init__(self, A):
|
||||
self.N = len(A)
|
||||
def __init__(self, a):
|
||||
self.N = len(a)
|
||||
self.st = [0] * (
|
||||
4 * self.N
|
||||
) # approximate the overall size of segment tree with array N
|
||||
|
@ -58,11 +58,11 @@ class SegmentTree:
|
|||
q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)
|
||||
return max(q1, q2)
|
||||
|
||||
def showData(self):
|
||||
showList = []
|
||||
def show_data(self):
|
||||
show_list = []
|
||||
for i in range(1, N + 1):
|
||||
showList += [self.query(i, i)]
|
||||
print(showList)
|
||||
show_list += [self.query(i, i)]
|
||||
print(show_list)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -75,4 +75,4 @@ if __name__ == "__main__":
|
|||
segt.update(1, 3, 111)
|
||||
print(segt.query(1, 15))
|
||||
segt.update(7, 8, 235)
|
||||
segt.showData()
|
||||
segt.show_data()
|
||||
|
|
|
@ -121,28 +121,28 @@ def inorder(root: Node | None) -> None:
|
|||
inorder(root.right)
|
||||
|
||||
|
||||
def interactTreap(root: Node | None, args: str) -> Node | None:
|
||||
def interact_treap(root: Node | None, args: str) -> Node | None:
|
||||
"""
|
||||
Commands:
|
||||
+ value to add value into treap
|
||||
- value to erase all nodes with value
|
||||
|
||||
>>> root = interactTreap(None, "+1")
|
||||
>>> root = interact_treap(None, "+1")
|
||||
>>> inorder(root)
|
||||
1,
|
||||
>>> root = interactTreap(root, "+3 +5 +17 +19 +2 +16 +4 +0")
|
||||
>>> root = interact_treap(root, "+3 +5 +17 +19 +2 +16 +4 +0")
|
||||
>>> inorder(root)
|
||||
0,1,2,3,4,5,16,17,19,
|
||||
>>> root = interactTreap(root, "+4 +4 +4")
|
||||
>>> root = interact_treap(root, "+4 +4 +4")
|
||||
>>> inorder(root)
|
||||
0,1,2,3,4,4,4,4,5,16,17,19,
|
||||
>>> root = interactTreap(root, "-0")
|
||||
>>> root = interact_treap(root, "-0")
|
||||
>>> inorder(root)
|
||||
1,2,3,4,4,4,4,5,16,17,19,
|
||||
>>> root = interactTreap(root, "-4")
|
||||
>>> root = interact_treap(root, "-4")
|
||||
>>> inorder(root)
|
||||
1,2,3,5,16,17,19,
|
||||
>>> root = interactTreap(root, "=0")
|
||||
>>> root = interact_treap(root, "=0")
|
||||
Unknown command
|
||||
"""
|
||||
for arg in args.split():
|
||||
|
@ -168,7 +168,7 @@ def main() -> None:
|
|||
|
||||
args = input()
|
||||
while args != "q":
|
||||
root = interactTreap(root, args)
|
||||
root = interact_treap(root, args)
|
||||
print(root)
|
||||
args = input()
|
||||
|
||||
|
|
|
@ -52,14 +52,14 @@ class MinHeap:
|
|||
return self.heap_dict[key]
|
||||
|
||||
def build_heap(self, array):
|
||||
lastIdx = len(array) - 1
|
||||
startFrom = self.get_parent_idx(lastIdx)
|
||||
last_idx = len(array) - 1
|
||||
start_from = self.get_parent_idx(last_idx)
|
||||
|
||||
for idx, i in enumerate(array):
|
||||
self.idx_of_element[i] = idx
|
||||
self.heap_dict[i.name] = i.val
|
||||
|
||||
for i in range(startFrom, -1, -1):
|
||||
for i in range(start_from, -1, -1):
|
||||
self.sift_down(i, array)
|
||||
return array
|
||||
|
||||
|
@ -123,12 +123,12 @@ class MinHeap:
|
|||
def is_empty(self):
|
||||
return True if len(self.heap) == 0 else False
|
||||
|
||||
def decrease_key(self, node, newValue):
|
||||
def decrease_key(self, node, new_value):
|
||||
assert (
|
||||
self.heap[self.idx_of_element[node]].val > newValue
|
||||
self.heap[self.idx_of_element[node]].val > new_value
|
||||
), "newValue must be less that current value"
|
||||
node.val = newValue
|
||||
self.heap_dict[node.name] = newValue
|
||||
node.val = new_value
|
||||
self.heap_dict[node.name] = new_value
|
||||
self.sift_up(self.idx_of_element[node])
|
||||
|
||||
|
||||
|
@ -143,7 +143,7 @@ e = Node("E", 4)
|
|||
# Use one of these two ways to generate Min-Heap
|
||||
|
||||
# Generating Min-Heap from array
|
||||
myMinHeap = MinHeap([r, b, a, x, e])
|
||||
my_min_heap = MinHeap([r, b, a, x, e])
|
||||
|
||||
# Generating Min-Heap by Insert method
|
||||
# myMinHeap.insert(a)
|
||||
|
@ -154,14 +154,14 @@ myMinHeap = MinHeap([r, b, a, x, e])
|
|||
|
||||
# Before
|
||||
print("Min Heap - before decrease key")
|
||||
for i in myMinHeap.heap:
|
||||
for i in my_min_heap.heap:
|
||||
print(i)
|
||||
|
||||
print("Min Heap - After decrease key of node [B -> -17]")
|
||||
myMinHeap.decrease_key(b, -17)
|
||||
my_min_heap.decrease_key(b, -17)
|
||||
|
||||
# After
|
||||
for i in myMinHeap.heap:
|
||||
for i in my_min_heap.heap:
|
||||
print(i)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -15,9 +15,9 @@ Enter an Infix Equation = a + b ^c
|
|||
"""
|
||||
|
||||
|
||||
def infix_2_postfix(Infix):
|
||||
Stack = []
|
||||
Postfix = []
|
||||
def infix_2_postfix(infix):
|
||||
stack = []
|
||||
post_fix = []
|
||||
priority = {
|
||||
"^": 3,
|
||||
"*": 2,
|
||||
|
@ -26,7 +26,7 @@ def infix_2_postfix(Infix):
|
|||
"+": 1,
|
||||
"-": 1,
|
||||
} # Priority of each operator
|
||||
print_width = len(Infix) if (len(Infix) > 7) else 7
|
||||
print_width = len(infix) if (len(infix) > 7) else 7
|
||||
|
||||
# Print table header for output
|
||||
print(
|
||||
|
@ -37,52 +37,52 @@ def infix_2_postfix(Infix):
|
|||
)
|
||||
print("-" * (print_width * 3 + 7))
|
||||
|
||||
for x in Infix:
|
||||
for x in infix:
|
||||
if x.isalpha() or x.isdigit():
|
||||
Postfix.append(x) # if x is Alphabet / Digit, add it to Postfix
|
||||
post_fix.append(x) # if x is Alphabet / Digit, add it to Postfix
|
||||
elif x == "(":
|
||||
Stack.append(x) # if x is "(" push to Stack
|
||||
stack.append(x) # if x is "(" push to Stack
|
||||
elif x == ")": # if x is ")" pop stack until "(" is encountered
|
||||
while Stack[-1] != "(":
|
||||
Postfix.append(Stack.pop()) # Pop stack & add the content to Postfix
|
||||
Stack.pop()
|
||||
while stack[-1] != "(":
|
||||
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
|
||||
stack.pop()
|
||||
else:
|
||||
if len(Stack) == 0:
|
||||
Stack.append(x) # If stack is empty, push x to stack
|
||||
if len(stack) == 0:
|
||||
stack.append(x) # If stack is empty, push x to stack
|
||||
else: # while priority of x is not > priority of element in the stack
|
||||
while len(Stack) > 0 and priority[x] <= priority[Stack[-1]]:
|
||||
Postfix.append(Stack.pop()) # pop stack & add to Postfix
|
||||
Stack.append(x) # push x to stack
|
||||
while len(stack) > 0 and priority[x] <= priority[stack[-1]]:
|
||||
post_fix.append(stack.pop()) # pop stack & add to Postfix
|
||||
stack.append(x) # push x to stack
|
||||
|
||||
print(
|
||||
x.center(8),
|
||||
("".join(Stack)).ljust(print_width),
|
||||
("".join(Postfix)).ljust(print_width),
|
||||
("".join(stack)).ljust(print_width),
|
||||
("".join(post_fix)).ljust(print_width),
|
||||
sep=" | ",
|
||||
) # Output in tabular format
|
||||
|
||||
while len(Stack) > 0: # while stack is not empty
|
||||
Postfix.append(Stack.pop()) # pop stack & add to Postfix
|
||||
while len(stack) > 0: # while stack is not empty
|
||||
post_fix.append(stack.pop()) # pop stack & add to Postfix
|
||||
print(
|
||||
" ".center(8),
|
||||
("".join(Stack)).ljust(print_width),
|
||||
("".join(Postfix)).ljust(print_width),
|
||||
("".join(stack)).ljust(print_width),
|
||||
("".join(post_fix)).ljust(print_width),
|
||||
sep=" | ",
|
||||
) # Output in tabular format
|
||||
|
||||
return "".join(Postfix) # return Postfix as str
|
||||
return "".join(post_fix) # return Postfix as str
|
||||
|
||||
|
||||
def infix_2_prefix(Infix):
|
||||
Infix = list(Infix[::-1]) # reverse the infix equation
|
||||
def infix_2_prefix(infix):
|
||||
infix = list(infix[::-1]) # reverse the infix equation
|
||||
|
||||
for i in range(len(Infix)):
|
||||
if Infix[i] == "(":
|
||||
Infix[i] = ")" # change "(" to ")"
|
||||
elif Infix[i] == ")":
|
||||
Infix[i] = "(" # change ")" to "("
|
||||
for i in range(len(infix)):
|
||||
if infix[i] == "(":
|
||||
infix[i] = ")" # change "(" to ")"
|
||||
elif infix[i] == ")":
|
||||
infix[i] = "(" # change ")" to "("
|
||||
|
||||
return (infix_2_postfix("".join(Infix)))[
|
||||
return (infix_2_postfix("".join(infix)))[
|
||||
::-1
|
||||
] # call infix_2_postfix on Infix, return reverse of Postfix
|
||||
|
||||
|
|
|
@ -20,49 +20,49 @@ Enter a Postfix Equation (space separated) = 5 6 9 * +
|
|||
import operator as op
|
||||
|
||||
|
||||
def Solve(Postfix):
|
||||
Stack = []
|
||||
Div = lambda x, y: int(x / y) # noqa: E731 integer division operation
|
||||
Opr = {
|
||||
def solve(post_fix):
|
||||
stack = []
|
||||
div = lambda x, y: int(x / y) # noqa: E731 integer division operation
|
||||
opr = {
|
||||
"^": op.pow,
|
||||
"*": op.mul,
|
||||
"/": Div,
|
||||
"/": div,
|
||||
"+": op.add,
|
||||
"-": op.sub,
|
||||
} # operators & their respective operation
|
||||
|
||||
# print table header
|
||||
print("Symbol".center(8), "Action".center(12), "Stack", sep=" | ")
|
||||
print("-" * (30 + len(Postfix)))
|
||||
print("-" * (30 + len(post_fix)))
|
||||
|
||||
for x in Postfix:
|
||||
for x in post_fix:
|
||||
if x.isdigit(): # if x in digit
|
||||
Stack.append(x) # append x to stack
|
||||
stack.append(x) # append x to stack
|
||||
# output in tabular format
|
||||
print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | ")
|
||||
print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(stack), sep=" | ")
|
||||
else:
|
||||
B = Stack.pop() # pop stack
|
||||
b = stack.pop() # pop stack
|
||||
# output in tabular format
|
||||
print("".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | ")
|
||||
print("".rjust(8), ("pop(" + b + ")").ljust(12), ",".join(stack), sep=" | ")
|
||||
|
||||
A = Stack.pop() # pop stack
|
||||
a = stack.pop() # pop stack
|
||||
# output in tabular format
|
||||
print("".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | ")
|
||||
print("".rjust(8), ("pop(" + a + ")").ljust(12), ",".join(stack), sep=" | ")
|
||||
|
||||
Stack.append(
|
||||
str(Opr[x](int(A), int(B)))
|
||||
stack.append(
|
||||
str(opr[x](int(a), int(b)))
|
||||
) # evaluate the 2 values popped from stack & push result to stack
|
||||
# output in tabular format
|
||||
print(
|
||||
x.rjust(8),
|
||||
("push(" + A + x + B + ")").ljust(12),
|
||||
",".join(Stack),
|
||||
("push(" + a + x + b + ")").ljust(12),
|
||||
",".join(stack),
|
||||
sep=" | ",
|
||||
)
|
||||
|
||||
return int(Stack[0])
|
||||
return int(stack[0])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
Postfix = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
|
||||
print("\n\tResult = ", Solve(Postfix))
|
||||
print("\n\tResult = ", solve(Postfix))
|
||||
|
|
|
@ -8,7 +8,7 @@ on the current day is less than or equal to its price on the given day.
|
|||
"""
|
||||
|
||||
|
||||
def calculateSpan(price, S):
|
||||
def calculation_span(price, s):
|
||||
|
||||
n = len(price)
|
||||
# Create a stack and push index of fist element to it
|
||||
|
@ -16,7 +16,7 @@ def calculateSpan(price, S):
|
|||
st.append(0)
|
||||
|
||||
# Span value of first element is always 1
|
||||
S[0] = 1
|
||||
s[0] = 1
|
||||
|
||||
# Calculate span values for rest of the elements
|
||||
for i in range(1, n):
|
||||
|
@ -30,14 +30,14 @@ def calculateSpan(price, S):
|
|||
# than all elements on left of it, i.e. price[0],
|
||||
# price[1], ..price[i-1]. Else the price[i] is
|
||||
# greater than elements after top of stack
|
||||
S[i] = i + 1 if len(st) <= 0 else (i - st[0])
|
||||
s[i] = i + 1 if len(st) <= 0 else (i - st[0])
|
||||
|
||||
# Push this element to stack
|
||||
st.append(i)
|
||||
|
||||
|
||||
# A utility function to print elements of array
|
||||
def printArray(arr, n):
|
||||
def print_array(arr, n):
|
||||
for i in range(0, n):
|
||||
print(arr[i], end=" ")
|
||||
|
||||
|
@ -47,7 +47,7 @@ price = [10, 4, 5, 90, 120, 80]
|
|||
S = [0 for i in range(len(price) + 1)]
|
||||
|
||||
# Fill the span values in array S[]
|
||||
calculateSpan(price, S)
|
||||
calculation_span(price, S)
|
||||
|
||||
# Print the calculated span values
|
||||
printArray(S, len(price))
|
||||
print_array(S, len(price))
|
||||
|
|
|
@ -43,33 +43,33 @@ def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255):
|
|||
or 15 * PI / 8 <= direction <= 2 * PI
|
||||
or 7 * PI / 8 <= direction <= 9 * PI / 8
|
||||
):
|
||||
W = sobel_grad[row, col - 1]
|
||||
E = sobel_grad[row, col + 1]
|
||||
if sobel_grad[row, col] >= W and sobel_grad[row, col] >= E:
|
||||
w = sobel_grad[row, col - 1]
|
||||
e = sobel_grad[row, col + 1]
|
||||
if sobel_grad[row, col] >= w and sobel_grad[row, col] >= e:
|
||||
dst[row, col] = sobel_grad[row, col]
|
||||
|
||||
elif (PI / 8 <= direction < 3 * PI / 8) or (
|
||||
9 * PI / 8 <= direction < 11 * PI / 8
|
||||
):
|
||||
SW = sobel_grad[row + 1, col - 1]
|
||||
NE = sobel_grad[row - 1, col + 1]
|
||||
if sobel_grad[row, col] >= SW and sobel_grad[row, col] >= NE:
|
||||
sw = sobel_grad[row + 1, col - 1]
|
||||
ne = sobel_grad[row - 1, col + 1]
|
||||
if sobel_grad[row, col] >= sw and sobel_grad[row, col] >= ne:
|
||||
dst[row, col] = sobel_grad[row, col]
|
||||
|
||||
elif (3 * PI / 8 <= direction < 5 * PI / 8) or (
|
||||
11 * PI / 8 <= direction < 13 * PI / 8
|
||||
):
|
||||
N = sobel_grad[row - 1, col]
|
||||
S = sobel_grad[row + 1, col]
|
||||
if sobel_grad[row, col] >= N and sobel_grad[row, col] >= S:
|
||||
n = sobel_grad[row - 1, col]
|
||||
s = sobel_grad[row + 1, col]
|
||||
if sobel_grad[row, col] >= n and sobel_grad[row, col] >= s:
|
||||
dst[row, col] = sobel_grad[row, col]
|
||||
|
||||
elif (5 * PI / 8 <= direction < 7 * PI / 8) or (
|
||||
13 * PI / 8 <= direction < 15 * PI / 8
|
||||
):
|
||||
NW = sobel_grad[row - 1, col - 1]
|
||||
SE = sobel_grad[row + 1, col + 1]
|
||||
if sobel_grad[row, col] >= NW and sobel_grad[row, col] >= SE:
|
||||
nw = sobel_grad[row - 1, col - 1]
|
||||
se = sobel_grad[row + 1, col + 1]
|
||||
if sobel_grad[row, col] >= nw and sobel_grad[row, col] >= se:
|
||||
dst[row, col] = sobel_grad[row, col]
|
||||
|
||||
"""
|
||||
|
|
|
@ -46,16 +46,16 @@ def bilateral_filter(
|
|||
kernel_size: int,
|
||||
) -> np.ndarray:
|
||||
img2 = np.zeros(img.shape)
|
||||
gaussKer = get_gauss_kernel(kernel_size, spatial_variance)
|
||||
sizeX, sizeY = img.shape
|
||||
for i in range(kernel_size // 2, sizeX - kernel_size // 2):
|
||||
for j in range(kernel_size // 2, sizeY - kernel_size // 2):
|
||||
gauss_ker = get_gauss_kernel(kernel_size, spatial_variance)
|
||||
size_x, size_y = img.shape
|
||||
for i in range(kernel_size // 2, size_x - kernel_size // 2):
|
||||
for j in range(kernel_size // 2, size_y - kernel_size // 2):
|
||||
|
||||
imgS = get_slice(img, i, j, kernel_size)
|
||||
imgI = imgS - imgS[kernel_size // 2, kernel_size // 2]
|
||||
imgIG = vec_gaussian(imgI, intensity_variance)
|
||||
weights = np.multiply(gaussKer, imgIG)
|
||||
vals = np.multiply(imgS, weights)
|
||||
img_s = get_slice(img, i, j, kernel_size)
|
||||
img_i = img_s - img_s[kernel_size // 2, kernel_size // 2]
|
||||
img_ig = vec_gaussian(img_i, intensity_variance)
|
||||
weights = np.multiply(gauss_ker, img_ig)
|
||||
vals = np.multiply(img_s, weights)
|
||||
val = np.sum(vals) / np.sum(weights)
|
||||
img2[i, j] = val
|
||||
return img2
|
||||
|
|
|
@ -11,7 +11,7 @@ import numpy as np
|
|||
from matplotlib import pyplot as plt
|
||||
|
||||
|
||||
class contrastStretch:
|
||||
class ConstantStretch:
|
||||
def __init__(self):
|
||||
self.img = ""
|
||||
self.original_image = ""
|
||||
|
@ -45,10 +45,10 @@ class contrastStretch:
|
|||
self.img[j][i] = self.last_list[num]
|
||||
cv2.imwrite("output_data/output.jpg", self.img)
|
||||
|
||||
def plotHistogram(self):
|
||||
def plot_histogram(self):
|
||||
plt.hist(self.img.ravel(), 256, [0, 256])
|
||||
|
||||
def showImage(self):
|
||||
def show_image(self):
|
||||
cv2.imshow("Output-Image", self.img)
|
||||
cv2.imshow("Input-Image", self.original_image)
|
||||
cv2.waitKey(5000)
|
||||
|
@ -57,7 +57,7 @@ class contrastStretch:
|
|||
|
||||
if __name__ == "__main__":
|
||||
file_path = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
|
||||
stretcher = contrastStretch()
|
||||
stretcher = ConstantStretch()
|
||||
stretcher.stretch(file_path)
|
||||
stretcher.plotHistogram()
|
||||
stretcher.showImage()
|
||||
stretcher.plot_histogram()
|
||||
stretcher.show_image()
|
||||
|
|
|
@ -104,72 +104,72 @@ class IndexCalculation:
|
|||
#RGBIndex = ["GLI", "CI", "Hue", "I", "NGRDI", "RI", "S", "IF"]
|
||||
"""
|
||||
|
||||
def __init__(self, red=None, green=None, blue=None, redEdge=None, nir=None):
|
||||
def __init__(self, red=None, green=None, blue=None, red_edge=None, nir=None):
|
||||
# print("Numpy version: " + np.__version__)
|
||||
self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
|
||||
self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir)
|
||||
|
||||
def setMatrices(self, red=None, green=None, blue=None, redEdge=None, nir=None):
|
||||
def set_matricies(self, red=None, green=None, blue=None, red_edge=None, nir=None):
|
||||
if red is not None:
|
||||
self.red = red
|
||||
if green is not None:
|
||||
self.green = green
|
||||
if blue is not None:
|
||||
self.blue = blue
|
||||
if redEdge is not None:
|
||||
self.redEdge = redEdge
|
||||
if red_edge is not None:
|
||||
self.redEdge = red_edge
|
||||
if nir is not None:
|
||||
self.nir = nir
|
||||
return True
|
||||
|
||||
def calculation(
|
||||
self, index="", red=None, green=None, blue=None, redEdge=None, nir=None
|
||||
self, index="", red=None, green=None, blue=None, red_edge=None, nir=None
|
||||
):
|
||||
"""
|
||||
performs the calculation of the index with the values instantiated in the class
|
||||
:str index: abbreviation of index name to perform
|
||||
"""
|
||||
self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
|
||||
self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir)
|
||||
funcs = {
|
||||
"ARVI2": self.ARVI2,
|
||||
"CCCI": self.CCCI,
|
||||
"CVI": self.CVI,
|
||||
"GLI": self.GLI,
|
||||
"NDVI": self.NDVI,
|
||||
"BNDVI": self.BNDVI,
|
||||
"redEdgeNDVI": self.redEdgeNDVI,
|
||||
"GNDVI": self.GNDVI,
|
||||
"GBNDVI": self.GBNDVI,
|
||||
"GRNDVI": self.GRNDVI,
|
||||
"RBNDVI": self.RBNDVI,
|
||||
"PNDVI": self.PNDVI,
|
||||
"ATSAVI": self.ATSAVI,
|
||||
"BWDRVI": self.BWDRVI,
|
||||
"CIgreen": self.CIgreen,
|
||||
"CIrededge": self.CIrededge,
|
||||
"CI": self.CI,
|
||||
"CTVI": self.CTVI,
|
||||
"GDVI": self.GDVI,
|
||||
"EVI": self.EVI,
|
||||
"GEMI": self.GEMI,
|
||||
"GOSAVI": self.GOSAVI,
|
||||
"GSAVI": self.GSAVI,
|
||||
"Hue": self.Hue,
|
||||
"IVI": self.IVI,
|
||||
"IPVI": self.IPVI,
|
||||
"I": self.I,
|
||||
"RVI": self.RVI,
|
||||
"MRVI": self.MRVI,
|
||||
"MSAVI": self.MSAVI,
|
||||
"NormG": self.NormG,
|
||||
"NormNIR": self.NormNIR,
|
||||
"NormR": self.NormR,
|
||||
"NGRDI": self.NGRDI,
|
||||
"RI": self.RI,
|
||||
"S": self.S,
|
||||
"IF": self.IF,
|
||||
"DVI": self.DVI,
|
||||
"TVI": self.TVI,
|
||||
"NDRE": self.NDRE,
|
||||
"ARVI2": self.arv12,
|
||||
"CCCI": self.ccci,
|
||||
"CVI": self.cvi,
|
||||
"GLI": self.gli,
|
||||
"NDVI": self.ndvi,
|
||||
"BNDVI": self.bndvi,
|
||||
"redEdgeNDVI": self.red_edge_ndvi,
|
||||
"GNDVI": self.gndvi,
|
||||
"GBNDVI": self.gbndvi,
|
||||
"GRNDVI": self.grndvi,
|
||||
"RBNDVI": self.rbndvi,
|
||||
"PNDVI": self.pndvi,
|
||||
"ATSAVI": self.atsavi,
|
||||
"BWDRVI": self.bwdrvi,
|
||||
"CIgreen": self.ci_green,
|
||||
"CIrededge": self.ci_rededge,
|
||||
"CI": self.ci,
|
||||
"CTVI": self.ctvi,
|
||||
"GDVI": self.gdvi,
|
||||
"EVI": self.evi,
|
||||
"GEMI": self.gemi,
|
||||
"GOSAVI": self.gosavi,
|
||||
"GSAVI": self.gsavi,
|
||||
"Hue": self.hue,
|
||||
"IVI": self.ivi,
|
||||
"IPVI": self.ipvi,
|
||||
"I": self.i,
|
||||
"RVI": self.rvi,
|
||||
"MRVI": self.mrvi,
|
||||
"MSAVI": self.m_savi,
|
||||
"NormG": self.norm_g,
|
||||
"NormNIR": self.norm_nir,
|
||||
"NormR": self.norm_r,
|
||||
"NGRDI": self.ngrdi,
|
||||
"RI": self.ri,
|
||||
"S": self.s,
|
||||
"IF": self._if,
|
||||
"DVI": self.dvi,
|
||||
"TVI": self.tvi,
|
||||
"NDRE": self.ndre,
|
||||
}
|
||||
|
||||
try:
|
||||
|
@ -178,7 +178,7 @@ class IndexCalculation:
|
|||
print("Index not in the list!")
|
||||
return False
|
||||
|
||||
def ARVI2(self):
|
||||
def arv12(self):
|
||||
"""
|
||||
Atmospherically Resistant Vegetation Index 2
|
||||
https://www.indexdatabase.de/db/i-single.php?id=396
|
||||
|
@ -187,7 +187,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
|
||||
|
||||
def CCCI(self):
|
||||
def ccci(self):
|
||||
"""
|
||||
Canopy Chlorophyll Content Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=224
|
||||
|
@ -197,7 +197,7 @@ class IndexCalculation:
|
|||
(self.nir - self.red) / (self.nir + self.red)
|
||||
)
|
||||
|
||||
def CVI(self):
|
||||
def cvi(self):
|
||||
"""
|
||||
Chlorophyll vegetation index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=391
|
||||
|
@ -205,7 +205,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return self.nir * (self.red / (self.green**2))
|
||||
|
||||
def GLI(self):
|
||||
def gli(self):
|
||||
"""
|
||||
self.green leaf index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=375
|
||||
|
@ -215,7 +215,7 @@ class IndexCalculation:
|
|||
2 * self.green + self.red + self.blue
|
||||
)
|
||||
|
||||
def NDVI(self):
|
||||
def ndvi(self):
|
||||
"""
|
||||
Normalized Difference self.nir/self.red Normalized Difference Vegetation
|
||||
Index, Calibrated NDVI - CDVI
|
||||
|
@ -224,7 +224,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.nir - self.red) / (self.nir + self.red)
|
||||
|
||||
def BNDVI(self):
|
||||
def bndvi(self):
|
||||
"""
|
||||
Normalized Difference self.nir/self.blue self.blue-normalized difference
|
||||
vegetation index
|
||||
|
@ -233,7 +233,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.nir - self.blue) / (self.nir + self.blue)
|
||||
|
||||
def redEdgeNDVI(self):
|
||||
def red_edge_ndvi(self):
|
||||
"""
|
||||
Normalized Difference self.rededge/self.red
|
||||
https://www.indexdatabase.de/db/i-single.php?id=235
|
||||
|
@ -241,7 +241,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.redEdge - self.red) / (self.redEdge + self.red)
|
||||
|
||||
def GNDVI(self):
|
||||
def gndvi(self):
|
||||
"""
|
||||
Normalized Difference self.nir/self.green self.green NDVI
|
||||
https://www.indexdatabase.de/db/i-single.php?id=401
|
||||
|
@ -249,7 +249,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.nir - self.green) / (self.nir + self.green)
|
||||
|
||||
def GBNDVI(self):
|
||||
def gbndvi(self):
|
||||
"""
|
||||
self.green-self.blue NDVI
|
||||
https://www.indexdatabase.de/db/i-single.php?id=186
|
||||
|
@ -259,7 +259,7 @@ class IndexCalculation:
|
|||
self.nir + (self.green + self.blue)
|
||||
)
|
||||
|
||||
def GRNDVI(self):
|
||||
def grndvi(self):
|
||||
"""
|
||||
self.green-self.red NDVI
|
||||
https://www.indexdatabase.de/db/i-single.php?id=185
|
||||
|
@ -269,7 +269,7 @@ class IndexCalculation:
|
|||
self.nir + (self.green + self.red)
|
||||
)
|
||||
|
||||
def RBNDVI(self):
|
||||
def rbndvi(self):
|
||||
"""
|
||||
self.red-self.blue NDVI
|
||||
https://www.indexdatabase.de/db/i-single.php?id=187
|
||||
|
@ -277,7 +277,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
|
||||
|
||||
def PNDVI(self):
|
||||
def pndvi(self):
|
||||
"""
|
||||
Pan NDVI
|
||||
https://www.indexdatabase.de/db/i-single.php?id=188
|
||||
|
@ -287,7 +287,7 @@ class IndexCalculation:
|
|||
self.nir + (self.green + self.red + self.blue)
|
||||
)
|
||||
|
||||
def ATSAVI(self, X=0.08, a=1.22, b=0.03):
|
||||
def atsavi(self, x=0.08, a=1.22, b=0.03):
|
||||
"""
|
||||
Adjusted transformed soil-adjusted VI
|
||||
https://www.indexdatabase.de/db/i-single.php?id=209
|
||||
|
@ -295,10 +295,10 @@ class IndexCalculation:
|
|||
"""
|
||||
return a * (
|
||||
(self.nir - a * self.red - b)
|
||||
/ (a * self.nir + self.red - a * b + X * (1 + a**2))
|
||||
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
|
||||
)
|
||||
|
||||
def BWDRVI(self):
|
||||
def bwdrvi(self):
|
||||
"""
|
||||
self.blue-wide dynamic range vegetation index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=136
|
||||
|
@ -306,7 +306,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
|
||||
|
||||
def CIgreen(self):
|
||||
def ci_green(self):
|
||||
"""
|
||||
Chlorophyll Index self.green
|
||||
https://www.indexdatabase.de/db/i-single.php?id=128
|
||||
|
@ -314,7 +314,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.nir / self.green) - 1
|
||||
|
||||
def CIrededge(self):
|
||||
def ci_rededge(self):
|
||||
"""
|
||||
Chlorophyll Index self.redEdge
|
||||
https://www.indexdatabase.de/db/i-single.php?id=131
|
||||
|
@ -322,7 +322,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.nir / self.redEdge) - 1
|
||||
|
||||
def CI(self):
|
||||
def ci(self):
|
||||
"""
|
||||
Coloration Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=11
|
||||
|
@ -330,16 +330,16 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.red - self.blue) / self.red
|
||||
|
||||
def CTVI(self):
|
||||
def ctvi(self):
|
||||
"""
|
||||
Corrected Transformed Vegetation Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=244
|
||||
:return: index
|
||||
"""
|
||||
ndvi = self.NDVI()
|
||||
ndvi = self.ndvi()
|
||||
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
|
||||
|
||||
def GDVI(self):
|
||||
def gdvi(self):
|
||||
"""
|
||||
Difference self.nir/self.green self.green Difference Vegetation Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=27
|
||||
|
@ -347,7 +347,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return self.nir - self.green
|
||||
|
||||
def EVI(self):
|
||||
def evi(self):
|
||||
"""
|
||||
Enhanced Vegetation Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=16
|
||||
|
@ -357,7 +357,7 @@ class IndexCalculation:
|
|||
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
|
||||
)
|
||||
|
||||
def GEMI(self):
|
||||
def gemi(self):
|
||||
"""
|
||||
Global Environment Monitoring Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=25
|
||||
|
@ -368,25 +368,25 @@ class IndexCalculation:
|
|||
)
|
||||
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
|
||||
|
||||
def GOSAVI(self, Y=0.16):
|
||||
def gosavi(self, y=0.16):
|
||||
"""
|
||||
self.green Optimized Soil Adjusted Vegetation Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=29
|
||||
mit Y = 0,16
|
||||
:return: index
|
||||
"""
|
||||
return (self.nir - self.green) / (self.nir + self.green + Y)
|
||||
return (self.nir - self.green) / (self.nir + self.green + y)
|
||||
|
||||
def GSAVI(self, L=0.5):
|
||||
def gsavi(self, n=0.5):
|
||||
"""
|
||||
self.green Soil Adjusted Vegetation Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=31
|
||||
mit L = 0,5
|
||||
mit N = 0,5
|
||||
:return: index
|
||||
"""
|
||||
return ((self.nir - self.green) / (self.nir + self.green + L)) * (1 + L)
|
||||
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
|
||||
|
||||
def Hue(self):
|
||||
def hue(self):
|
||||
"""
|
||||
Hue
|
||||
https://www.indexdatabase.de/db/i-single.php?id=34
|
||||
|
@ -396,7 +396,7 @@ class IndexCalculation:
|
|||
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue)
|
||||
)
|
||||
|
||||
def IVI(self, a=None, b=None):
|
||||
def ivi(self, a=None, b=None):
|
||||
"""
|
||||
Ideal vegetation index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=276
|
||||
|
@ -406,15 +406,15 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.nir - b) / (a * self.red)
|
||||
|
||||
def IPVI(self):
|
||||
def ipvi(self):
|
||||
"""
|
||||
Infraself.red percentage vegetation index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=35
|
||||
:return: index
|
||||
"""
|
||||
return (self.nir / ((self.nir + self.red) / 2)) * (self.NDVI() + 1)
|
||||
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
|
||||
|
||||
def I(self): # noqa: E741,E743
|
||||
def i(self): # noqa: E741,E743
|
||||
"""
|
||||
Intensity
|
||||
https://www.indexdatabase.de/db/i-single.php?id=36
|
||||
|
@ -422,7 +422,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.red + self.green + self.blue) / 30.5
|
||||
|
||||
def RVI(self):
|
||||
def rvi(self):
|
||||
"""
|
||||
Ratio-Vegetation-Index
|
||||
http://www.seos-project.eu/modules/remotesensing/remotesensing-c03-s01-p01.html
|
||||
|
@ -430,15 +430,15 @@ class IndexCalculation:
|
|||
"""
|
||||
return self.nir / self.red
|
||||
|
||||
def MRVI(self):
|
||||
def mrvi(self):
|
||||
"""
|
||||
Modified Normalized Difference Vegetation Index RVI
|
||||
https://www.indexdatabase.de/db/i-single.php?id=275
|
||||
:return: index
|
||||
"""
|
||||
return (self.RVI() - 1) / (self.RVI() + 1)
|
||||
return (self.rvi() - 1) / (self.rvi() + 1)
|
||||
|
||||
def MSAVI(self):
|
||||
def m_savi(self):
|
||||
"""
|
||||
Modified Soil Adjusted Vegetation Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=44
|
||||
|
@ -449,7 +449,7 @@ class IndexCalculation:
|
|||
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
|
||||
) / 2
|
||||
|
||||
def NormG(self):
|
||||
def norm_g(self):
|
||||
"""
|
||||
Norm G
|
||||
https://www.indexdatabase.de/db/i-single.php?id=50
|
||||
|
@ -457,7 +457,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return self.green / (self.nir + self.red + self.green)
|
||||
|
||||
def NormNIR(self):
|
||||
def norm_nir(self):
|
||||
"""
|
||||
Norm self.nir
|
||||
https://www.indexdatabase.de/db/i-single.php?id=51
|
||||
|
@ -465,7 +465,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return self.nir / (self.nir + self.red + self.green)
|
||||
|
||||
def NormR(self):
|
||||
def norm_r(self):
|
||||
"""
|
||||
Norm R
|
||||
https://www.indexdatabase.de/db/i-single.php?id=52
|
||||
|
@ -473,7 +473,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return self.red / (self.nir + self.red + self.green)
|
||||
|
||||
def NGRDI(self):
|
||||
def ngrdi(self):
|
||||
"""
|
||||
Normalized Difference self.green/self.red Normalized self.green self.red
|
||||
difference index, Visible Atmospherically Resistant Indices self.green
|
||||
|
@ -483,7 +483,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.green - self.red) / (self.green + self.red)
|
||||
|
||||
def RI(self):
|
||||
def ri(self):
|
||||
"""
|
||||
Normalized Difference self.red/self.green self.redness Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=74
|
||||
|
@ -491,7 +491,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (self.red - self.green) / (self.red + self.green)
|
||||
|
||||
def S(self):
|
||||
def s(self):
|
||||
"""
|
||||
Saturation
|
||||
https://www.indexdatabase.de/db/i-single.php?id=77
|
||||
|
@ -501,7 +501,7 @@ class IndexCalculation:
|
|||
min = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
|
||||
return (max - min) / max
|
||||
|
||||
def IF(self):
|
||||
def _if(self):
|
||||
"""
|
||||
Shape Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=79
|
||||
|
@ -509,7 +509,7 @@ class IndexCalculation:
|
|||
"""
|
||||
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
|
||||
|
||||
def DVI(self):
|
||||
def dvi(self):
|
||||
"""
|
||||
Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index
|
||||
Number (VIN)
|
||||
|
@ -518,15 +518,15 @@ class IndexCalculation:
|
|||
"""
|
||||
return self.nir / self.red
|
||||
|
||||
def TVI(self):
|
||||
def tvi(self):
|
||||
"""
|
||||
Transformed Vegetation Index
|
||||
https://www.indexdatabase.de/db/i-single.php?id=98
|
||||
:return: index
|
||||
"""
|
||||
return (self.NDVI() + 0.5) ** (1 / 2)
|
||||
return (self.ndvi() + 0.5) ** (1 / 2)
|
||||
|
||||
def NDRE(self):
|
||||
def ndre(self):
|
||||
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
|
||||
|
||||
|
||||
|
|
|
@ -62,8 +62,8 @@ def test_gen_gaussian_kernel_filter():
|
|||
|
||||
def test_convolve_filter():
|
||||
# laplace diagonals
|
||||
Laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
|
||||
res = conv.img_convolve(gray, Laplace).astype(uint8)
|
||||
laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
|
||||
res = conv.img_convolve(gray, laplace).astype(uint8)
|
||||
assert res.any()
|
||||
|
||||
|
||||
|
|
|
@ -63,18 +63,18 @@ def count_inversions_recursive(arr):
|
|||
if len(arr) <= 1:
|
||||
return arr, 0
|
||||
mid = len(arr) // 2
|
||||
P = arr[0:mid]
|
||||
Q = arr[mid:]
|
||||
p = arr[0:mid]
|
||||
q = arr[mid:]
|
||||
|
||||
A, inversion_p = count_inversions_recursive(P)
|
||||
B, inversions_q = count_inversions_recursive(Q)
|
||||
C, cross_inversions = _count_cross_inversions(A, B)
|
||||
a, inversion_p = count_inversions_recursive(p)
|
||||
b, inversions_q = count_inversions_recursive(q)
|
||||
c, cross_inversions = _count_cross_inversions(a, b)
|
||||
|
||||
num_inversions = inversion_p + inversions_q + cross_inversions
|
||||
return C, num_inversions
|
||||
return c, num_inversions
|
||||
|
||||
|
||||
def _count_cross_inversions(P, Q):
|
||||
def _count_cross_inversions(p, q):
|
||||
"""
|
||||
Counts the inversions across two sorted arrays.
|
||||
And combine the two arrays into one sorted array
|
||||
|
@ -96,26 +96,26 @@ def _count_cross_inversions(P, Q):
|
|||
([1, 2, 3, 3, 4, 5], 0)
|
||||
"""
|
||||
|
||||
R = []
|
||||
r = []
|
||||
i = j = num_inversion = 0
|
||||
while i < len(P) and j < len(Q):
|
||||
if P[i] > Q[j]:
|
||||
while i < len(p) and j < len(q):
|
||||
if p[i] > q[j]:
|
||||
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
|
||||
# These are all inversions. The claim emerges from the
|
||||
# property that P is sorted.
|
||||
num_inversion += len(P) - i
|
||||
R.append(Q[j])
|
||||
num_inversion += len(p) - i
|
||||
r.append(q[j])
|
||||
j += 1
|
||||
else:
|
||||
R.append(P[i])
|
||||
r.append(p[i])
|
||||
i += 1
|
||||
|
||||
if i < len(P):
|
||||
R.extend(P[i:])
|
||||
if i < len(p):
|
||||
r.extend(p[i:])
|
||||
else:
|
||||
R.extend(Q[j:])
|
||||
r.extend(q[j:])
|
||||
|
||||
return R, num_inversion
|
||||
return r, num_inversion
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
@ -28,7 +28,7 @@ class AssignmentUsingBitmask:
|
|||
# to 1
|
||||
self.final_mask = (1 << len(task_performed)) - 1
|
||||
|
||||
def CountWaysUtil(self, mask, task_no):
|
||||
def count_ways_until(self, mask, task_no):
|
||||
|
||||
# if mask == self.finalmask all persons are distributed tasks, return 1
|
||||
if mask == self.final_mask:
|
||||
|
@ -43,7 +43,7 @@ class AssignmentUsingBitmask:
|
|||
return self.dp[mask][task_no]
|
||||
|
||||
# Number of ways when we don't this task in the arrangement
|
||||
total_ways_util = self.CountWaysUtil(mask, task_no + 1)
|
||||
total_ways_util = self.count_ways_until(mask, task_no + 1)
|
||||
|
||||
# now assign the tasks one by one to all possible persons and recursively
|
||||
# assign for the remaining tasks.
|
||||
|
@ -56,14 +56,14 @@ class AssignmentUsingBitmask:
|
|||
|
||||
# assign this task to p and change the mask value. And recursively
|
||||
# assign tasks with the new mask value.
|
||||
total_ways_util += self.CountWaysUtil(mask | (1 << p), task_no + 1)
|
||||
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1)
|
||||
|
||||
# save the value.
|
||||
self.dp[mask][task_no] = total_ways_util
|
||||
|
||||
return self.dp[mask][task_no]
|
||||
|
||||
def countNoOfWays(self, task_performed):
|
||||
def count_no_of_ways(self, task_performed):
|
||||
|
||||
# Store the list of persons for each task
|
||||
for i in range(len(task_performed)):
|
||||
|
@ -71,7 +71,7 @@ class AssignmentUsingBitmask:
|
|||
self.task[j].append(i)
|
||||
|
||||
# call the function to fill the DP table, final answer is stored in dp[0][1]
|
||||
return self.CountWaysUtil(0, 1)
|
||||
return self.count_ways_until(0, 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -81,7 +81,7 @@ if __name__ == "__main__":
|
|||
# the list of tasks that can be done by M persons.
|
||||
task_performed = [[1, 3, 4], [1, 2, 5], [3, 4]]
|
||||
print(
|
||||
AssignmentUsingBitmask(task_performed, total_tasks).countNoOfWays(
|
||||
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
|
||||
task_performed
|
||||
)
|
||||
)
|
||||
|
|
|
@ -21,10 +21,10 @@ class EditDistance:
|
|||
def __init__(self):
|
||||
self.__prepare__()
|
||||
|
||||
def __prepare__(self, N=0, M=0):
|
||||
self.dp = [[-1 for y in range(0, M)] for x in range(0, N)]
|
||||
def __prepare__(self, n=0, m=0):
|
||||
self.dp = [[-1 for y in range(0, m)] for x in range(0, n)]
|
||||
|
||||
def __solveDP(self, x, y):
|
||||
def __solve_dp(self, x, y):
|
||||
if x == -1:
|
||||
return y + 1
|
||||
elif y == -1:
|
||||
|
@ -32,30 +32,30 @@ class EditDistance:
|
|||
elif self.dp[x][y] > -1:
|
||||
return self.dp[x][y]
|
||||
else:
|
||||
if self.A[x] == self.B[y]:
|
||||
self.dp[x][y] = self.__solveDP(x - 1, y - 1)
|
||||
if self.a[x] == self.b[y]:
|
||||
self.dp[x][y] = self.__solve_dp(x - 1, y - 1)
|
||||
else:
|
||||
self.dp[x][y] = 1 + min(
|
||||
self.__solveDP(x, y - 1),
|
||||
self.__solveDP(x - 1, y),
|
||||
self.__solveDP(x - 1, y - 1),
|
||||
self.__solve_dp(x, y - 1),
|
||||
self.__solve_dp(x - 1, y),
|
||||
self.__solve_dp(x - 1, y - 1),
|
||||
)
|
||||
|
||||
return self.dp[x][y]
|
||||
|
||||
def solve(self, A, B):
|
||||
if isinstance(A, bytes):
|
||||
A = A.decode("ascii")
|
||||
def solve(self, a, b):
|
||||
if isinstance(a, bytes):
|
||||
a = a.decode("ascii")
|
||||
|
||||
if isinstance(B, bytes):
|
||||
B = B.decode("ascii")
|
||||
if isinstance(b, bytes):
|
||||
b = b.decode("ascii")
|
||||
|
||||
self.A = str(A)
|
||||
self.B = str(B)
|
||||
self.a = str(a)
|
||||
self.b = str(b)
|
||||
|
||||
self.__prepare__(len(A), len(B))
|
||||
self.__prepare__(len(a), len(b))
|
||||
|
||||
return self.__solveDP(len(A) - 1, len(B) - 1)
|
||||
return self.__solve_dp(len(a) - 1, len(b) - 1)
|
||||
|
||||
|
||||
def min_distance_bottom_up(word1: str, word2: str) -> int:
|
||||
|
|
|
@ -2,41 +2,41 @@ import math
|
|||
|
||||
|
||||
class Graph:
|
||||
def __init__(self, N=0): # a graph with Node 0,1,...,N-1
|
||||
self.N = N
|
||||
self.W = [
|
||||
[math.inf for j in range(0, N)] for i in range(0, N)
|
||||
def __init__(self, n=0): # a graph with Node 0,1,...,N-1
|
||||
self.n = n
|
||||
self.w = [
|
||||
[math.inf for j in range(0, n)] for i in range(0, n)
|
||||
] # adjacency matrix for weight
|
||||
self.dp = [
|
||||
[math.inf for j in range(0, N)] for i in range(0, N)
|
||||
[math.inf for j in range(0, n)] for i in range(0, n)
|
||||
] # dp[i][j] stores minimum distance from i to j
|
||||
|
||||
def addEdge(self, u, v, w):
|
||||
def add_edge(self, u, v, w):
|
||||
self.dp[u][v] = w
|
||||
|
||||
def floyd_warshall(self):
|
||||
for k in range(0, self.N):
|
||||
for i in range(0, self.N):
|
||||
for j in range(0, self.N):
|
||||
for k in range(0, self.n):
|
||||
for i in range(0, self.n):
|
||||
for j in range(0, self.n):
|
||||
self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j])
|
||||
|
||||
def showMin(self, u, v):
|
||||
def show_min(self, u, v):
|
||||
return self.dp[u][v]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
graph = Graph(5)
|
||||
graph.addEdge(0, 2, 9)
|
||||
graph.addEdge(0, 4, 10)
|
||||
graph.addEdge(1, 3, 5)
|
||||
graph.addEdge(2, 3, 7)
|
||||
graph.addEdge(3, 0, 10)
|
||||
graph.addEdge(3, 1, 2)
|
||||
graph.addEdge(3, 2, 1)
|
||||
graph.addEdge(3, 4, 6)
|
||||
graph.addEdge(4, 1, 3)
|
||||
graph.addEdge(4, 2, 4)
|
||||
graph.addEdge(4, 3, 9)
|
||||
graph.add_edge(0, 2, 9)
|
||||
graph.add_edge(0, 4, 10)
|
||||
graph.add_edge(1, 3, 5)
|
||||
graph.add_edge(2, 3, 7)
|
||||
graph.add_edge(3, 0, 10)
|
||||
graph.add_edge(3, 1, 2)
|
||||
graph.add_edge(3, 2, 1)
|
||||
graph.add_edge(3, 4, 6)
|
||||
graph.add_edge(4, 1, 3)
|
||||
graph.add_edge(4, 2, 4)
|
||||
graph.add_edge(4, 3, 9)
|
||||
graph.floyd_warshall()
|
||||
graph.showMin(1, 4)
|
||||
graph.showMin(0, 3)
|
||||
graph.show_min(1, 4)
|
||||
graph.show_min(0, 3)
|
||||
|
|
|
@ -2,20 +2,20 @@ from bisect import bisect
|
|||
from itertools import accumulate
|
||||
|
||||
|
||||
def fracKnapsack(vl, wt, W, n):
|
||||
def frac_knapsack(vl, wt, w, n):
|
||||
"""
|
||||
>>> fracKnapsack([60, 100, 120], [10, 20, 30], 50, 3)
|
||||
>>> frac_knapsack([60, 100, 120], [10, 20, 30], 50, 3)
|
||||
240.0
|
||||
"""
|
||||
|
||||
r = list(sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True))
|
||||
vl, wt = [i[0] for i in r], [i[1] for i in r]
|
||||
acc = list(accumulate(wt))
|
||||
k = bisect(acc, W)
|
||||
k = bisect(acc, w)
|
||||
return (
|
||||
0
|
||||
if k == 0
|
||||
else sum(vl[:k]) + (W - acc[k - 1]) * (vl[k]) / (wt[k])
|
||||
else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
|
||||
if k != n
|
||||
else sum(vl[:k])
|
||||
)
|
||||
|
|
|
@ -7,39 +7,39 @@ Note that only the integer weights 0-1 knapsack problem is solvable
|
|||
"""
|
||||
|
||||
|
||||
def MF_knapsack(i, wt, val, j):
|
||||
def mf_knapsack(i, wt, val, j):
|
||||
"""
|
||||
This code involves the concept of memory functions. Here we solve the subproblems
|
||||
which are needed unlike the below example
|
||||
F is a 2D array with -1s filled up
|
||||
"""
|
||||
global F # a global dp table for knapsack
|
||||
if F[i][j] < 0:
|
||||
global f # a global dp table for knapsack
|
||||
if f[i][j] < 0:
|
||||
if j < wt[i - 1]:
|
||||
val = MF_knapsack(i - 1, wt, val, j)
|
||||
val = mf_knapsack(i - 1, wt, val, j)
|
||||
else:
|
||||
val = max(
|
||||
MF_knapsack(i - 1, wt, val, j),
|
||||
MF_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1],
|
||||
mf_knapsack(i - 1, wt, val, j),
|
||||
mf_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1],
|
||||
)
|
||||
F[i][j] = val
|
||||
return F[i][j]
|
||||
f[i][j] = val
|
||||
return f[i][j]
|
||||
|
||||
|
||||
def knapsack(W, wt, val, n):
|
||||
dp = [[0 for i in range(W + 1)] for j in range(n + 1)]
|
||||
def knapsack(w, wt, val, n):
|
||||
dp = [[0 for i in range(w + 1)] for j in range(n + 1)]
|
||||
|
||||
for i in range(1, n + 1):
|
||||
for w in range(1, W + 1):
|
||||
for w in range(1, w + 1):
|
||||
if wt[i - 1] <= w:
|
||||
dp[i][w] = max(val[i - 1] + dp[i - 1][w - wt[i - 1]], dp[i - 1][w])
|
||||
else:
|
||||
dp[i][w] = dp[i - 1][w]
|
||||
|
||||
return dp[n][W], dp
|
||||
return dp[n][w], dp
|
||||
|
||||
|
||||
def knapsack_with_example_solution(W: int, wt: list, val: list):
|
||||
def knapsack_with_example_solution(w: int, wt: list, val: list):
|
||||
"""
|
||||
Solves the integer weights knapsack problem returns one of
|
||||
the several possible optimal subsets.
|
||||
|
@ -90,9 +90,9 @@ def knapsack_with_example_solution(W: int, wt: list, val: list):
|
|||
f"got weight of type {type(wt[i])} at index {i}"
|
||||
)
|
||||
|
||||
optimal_val, dp_table = knapsack(W, wt, val, num_items)
|
||||
optimal_val, dp_table = knapsack(w, wt, val, num_items)
|
||||
example_optional_set: set = set()
|
||||
_construct_solution(dp_table, wt, num_items, W, example_optional_set)
|
||||
_construct_solution(dp_table, wt, num_items, w, example_optional_set)
|
||||
|
||||
return optimal_val, example_optional_set
|
||||
|
||||
|
@ -136,10 +136,10 @@ if __name__ == "__main__":
|
|||
wt = [4, 3, 2, 3]
|
||||
n = 4
|
||||
w = 6
|
||||
F = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)]
|
||||
f = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)]
|
||||
optimal_solution, _ = knapsack(w, wt, val, n)
|
||||
print(optimal_solution)
|
||||
print(MF_knapsack(n, wt, val, w)) # switched the n and w
|
||||
print(mf_knapsack(n, wt, val, w)) # switched the n and w
|
||||
|
||||
# testing the dynamic programming problem with example
|
||||
# the optimal subset for the above example are items 3 and 4
|
||||
|
|
|
@ -38,7 +38,7 @@ def longest_common_subsequence(x: str, y: str):
|
|||
n = len(y)
|
||||
|
||||
# declaring the array for storing the dp values
|
||||
L = [[0] * (n + 1) for _ in range(m + 1)]
|
||||
l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741
|
||||
|
||||
for i in range(1, m + 1):
|
||||
for j in range(1, n + 1):
|
||||
|
@ -47,7 +47,7 @@ def longest_common_subsequence(x: str, y: str):
|
|||
else:
|
||||
match = 0
|
||||
|
||||
L[i][j] = max(L[i - 1][j], L[i][j - 1], L[i - 1][j - 1] + match)
|
||||
l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match)
|
||||
|
||||
seq = ""
|
||||
i, j = m, n
|
||||
|
@ -57,17 +57,17 @@ def longest_common_subsequence(x: str, y: str):
|
|||
else:
|
||||
match = 0
|
||||
|
||||
if L[i][j] == L[i - 1][j - 1] + match:
|
||||
if l[i][j] == l[i - 1][j - 1] + match:
|
||||
if match == 1:
|
||||
seq = x[i - 1] + seq
|
||||
i -= 1
|
||||
j -= 1
|
||||
elif L[i][j] == L[i - 1][j]:
|
||||
elif l[i][j] == l[i - 1][j]:
|
||||
i -= 1
|
||||
else:
|
||||
j -= 1
|
||||
|
||||
return L[m][n], seq
|
||||
return l[m][n], seq
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -34,12 +34,12 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu
|
|||
return array
|
||||
# Else
|
||||
pivot = array[0]
|
||||
isFound = False
|
||||
is_found = False
|
||||
i = 1
|
||||
longest_subseq: list[int] = []
|
||||
while not isFound and i < array_length:
|
||||
while not is_found and i < array_length:
|
||||
if array[i] < pivot:
|
||||
isFound = True
|
||||
is_found = True
|
||||
temp_array = [element for element in array[i:] if element >= array[i]]
|
||||
temp_array = longest_subsequence(temp_array)
|
||||
if len(temp_array) > len(longest_subseq):
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
from __future__ import annotations
|
||||
|
||||
|
||||
def CeilIndex(v, l, r, key): # noqa: E741
|
||||
def ceil_index(v, l, r, key): # noqa: E741
|
||||
while r - l > 1:
|
||||
m = (l + r) // 2
|
||||
if v[m] >= key:
|
||||
|
@ -17,16 +17,16 @@ def CeilIndex(v, l, r, key): # noqa: E741
|
|||
return r
|
||||
|
||||
|
||||
def LongestIncreasingSubsequenceLength(v: list[int]) -> int:
|
||||
def longest_increasing_subsequence_length(v: list[int]) -> int:
|
||||
"""
|
||||
>>> LongestIncreasingSubsequenceLength([2, 5, 3, 7, 11, 8, 10, 13, 6])
|
||||
>>> longest_increasing_subsequence_length([2, 5, 3, 7, 11, 8, 10, 13, 6])
|
||||
6
|
||||
>>> LongestIncreasingSubsequenceLength([])
|
||||
>>> longest_increasing_subsequence_length([])
|
||||
0
|
||||
>>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3,
|
||||
... 11, 7, 15])
|
||||
>>> longest_increasing_subsequence_length([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13,
|
||||
... 3, 11, 7, 15])
|
||||
6
|
||||
>>> LongestIncreasingSubsequenceLength([5, 4, 3, 2, 1])
|
||||
>>> longest_increasing_subsequence_length([5, 4, 3, 2, 1])
|
||||
1
|
||||
"""
|
||||
if len(v) == 0:
|
||||
|
@ -44,7 +44,7 @@ def LongestIncreasingSubsequenceLength(v: list[int]) -> int:
|
|||
tail[length] = v[i]
|
||||
length += 1
|
||||
else:
|
||||
tail[CeilIndex(tail, -1, length - 1, v[i])] = v[i]
|
||||
tail[ceil_index(tail, -1, length - 1, v[i])] = v[i]
|
||||
|
||||
return length
|
||||
|
||||
|
|
|
@ -8,34 +8,34 @@ Space Complexity: O(n^2)
|
|||
"""
|
||||
|
||||
|
||||
def MatrixChainOrder(array):
|
||||
N = len(array)
|
||||
Matrix = [[0 for x in range(N)] for x in range(N)]
|
||||
Sol = [[0 for x in range(N)] for x in range(N)]
|
||||
def matrix_chain_order(array):
|
||||
n = len(array)
|
||||
matrix = [[0 for x in range(n)] for x in range(n)]
|
||||
sol = [[0 for x in range(n)] for x in range(n)]
|
||||
|
||||
for ChainLength in range(2, N):
|
||||
for a in range(1, N - ChainLength + 1):
|
||||
b = a + ChainLength - 1
|
||||
for chain_length in range(2, n):
|
||||
for a in range(1, n - chain_length + 1):
|
||||
b = a + chain_length - 1
|
||||
|
||||
Matrix[a][b] = sys.maxsize
|
||||
matrix[a][b] = sys.maxsize
|
||||
for c in range(a, b):
|
||||
cost = (
|
||||
Matrix[a][c] + Matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
|
||||
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
|
||||
)
|
||||
if cost < Matrix[a][b]:
|
||||
Matrix[a][b] = cost
|
||||
Sol[a][b] = c
|
||||
return Matrix, Sol
|
||||
if cost < matrix[a][b]:
|
||||
matrix[a][b] = cost
|
||||
sol[a][b] = c
|
||||
return matrix, sol
|
||||
|
||||
|
||||
# Print order of matrix with Ai as Matrix
|
||||
def PrintOptimalSolution(OptimalSolution, i, j):
|
||||
def print_optiomal_solution(optimal_solution, i, j):
|
||||
if i == j:
|
||||
print("A" + str(i), end=" ")
|
||||
else:
|
||||
print("(", end=" ")
|
||||
PrintOptimalSolution(OptimalSolution, i, OptimalSolution[i][j])
|
||||
PrintOptimalSolution(OptimalSolution, OptimalSolution[i][j] + 1, j)
|
||||
print_optiomal_solution(optimal_solution, i, optimal_solution[i][j])
|
||||
print_optiomal_solution(optimal_solution, optimal_solution[i][j] + 1, j)
|
||||
print(")", end=" ")
|
||||
|
||||
|
||||
|
@ -44,10 +44,10 @@ def main():
|
|||
n = len(array)
|
||||
# Size of matrix created from above array will be
|
||||
# 30*35 35*15 15*5 5*10 10*20 20*25
|
||||
Matrix, OptimalSolution = MatrixChainOrder(array)
|
||||
matrix, optimal_solution = matrix_chain_order(array)
|
||||
|
||||
print("No. of Operation required: " + str(Matrix[1][n - 1]))
|
||||
PrintOptimalSolution(OptimalSolution, 1, n - 1)
|
||||
print("No. of Operation required: " + str(matrix[1][n - 1]))
|
||||
print_optiomal_solution(optimal_solution, 1, n - 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -4,14 +4,14 @@ author : Mayank Kumar Jha (mk9440)
|
|||
from __future__ import annotations
|
||||
|
||||
|
||||
def find_max_sub_array(A, low, high):
|
||||
def find_max_sub_array(a, low, high):
|
||||
if low == high:
|
||||
return low, high, A[low]
|
||||
return low, high, a[low]
|
||||
else:
|
||||
mid = (low + high) // 2
|
||||
left_low, left_high, left_sum = find_max_sub_array(A, low, mid)
|
||||
right_low, right_high, right_sum = find_max_sub_array(A, mid + 1, high)
|
||||
cross_left, cross_right, cross_sum = find_max_cross_sum(A, low, mid, high)
|
||||
left_low, left_high, left_sum = find_max_sub_array(a, low, mid)
|
||||
right_low, right_high, right_sum = find_max_sub_array(a, mid + 1, high)
|
||||
cross_left, cross_right, cross_sum = find_max_cross_sum(a, low, mid, high)
|
||||
if left_sum >= right_sum and left_sum >= cross_sum:
|
||||
return left_low, left_high, left_sum
|
||||
elif right_sum >= left_sum and right_sum >= cross_sum:
|
||||
|
@ -20,18 +20,18 @@ def find_max_sub_array(A, low, high):
|
|||
return cross_left, cross_right, cross_sum
|
||||
|
||||
|
||||
def find_max_cross_sum(A, low, mid, high):
|
||||
def find_max_cross_sum(a, low, mid, high):
|
||||
left_sum, max_left = -999999999, -1
|
||||
right_sum, max_right = -999999999, -1
|
||||
summ = 0
|
||||
for i in range(mid, low - 1, -1):
|
||||
summ += A[i]
|
||||
summ += a[i]
|
||||
if summ > left_sum:
|
||||
left_sum = summ
|
||||
max_left = i
|
||||
summ = 0
|
||||
for i in range(mid + 1, high + 1):
|
||||
summ += A[i]
|
||||
summ += a[i]
|
||||
if summ > right_sum:
|
||||
right_sum = summ
|
||||
max_right = i
|
||||
|
|
|
@ -7,7 +7,7 @@ https://www.hackerrank.com/challenges/coin-change/problem
|
|||
"""
|
||||
|
||||
|
||||
def dp_count(S, n):
|
||||
def dp_count(s, n):
|
||||
"""
|
||||
>>> dp_count([1, 2, 3], 4)
|
||||
4
|
||||
|
@ -33,7 +33,7 @@ def dp_count(S, n):
|
|||
# Pick all coins one by one and update table[] values
|
||||
# after the index greater than or equal to the value of the
|
||||
# picked coin
|
||||
for coin_val in S:
|
||||
for coin_val in s:
|
||||
for j in range(coin_val, n + 1):
|
||||
table[j] += table[j - coin_val]
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ Partition a set into two subsets such that the difference of subset sums is mini
|
|||
"""
|
||||
|
||||
|
||||
def findMin(arr):
|
||||
def find_min(arr):
|
||||
n = len(arr)
|
||||
s = sum(arr)
|
||||
|
||||
|
|
|
@ -1,25 +1,25 @@
|
|||
def isSumSubset(arr, arrLen, requiredSum):
|
||||
def is_sum_subset(arr, arr_len, required_sum):
|
||||
"""
|
||||
>>> isSumSubset([2, 4, 6, 8], 4, 5)
|
||||
>>> is_sum_subset([2, 4, 6, 8], 4, 5)
|
||||
False
|
||||
>>> isSumSubset([2, 4, 6, 8], 4, 14)
|
||||
>>> is_sum_subset([2, 4, 6, 8], 4, 14)
|
||||
True
|
||||
"""
|
||||
# a subset value says 1 if that subset sum can be formed else 0
|
||||
# initially no subsets can be formed hence False/0
|
||||
subset = [[False for i in range(requiredSum + 1)] for i in range(arrLen + 1)]
|
||||
subset = [[False for i in range(required_sum + 1)] for i in range(arr_len + 1)]
|
||||
|
||||
# for each arr value, a sum of zero(0) can be formed by not taking any element
|
||||
# hence True/1
|
||||
for i in range(arrLen + 1):
|
||||
for i in range(arr_len + 1):
|
||||
subset[i][0] = True
|
||||
|
||||
# sum is not zero and set is empty then false
|
||||
for i in range(1, requiredSum + 1):
|
||||
for i in range(1, required_sum + 1):
|
||||
subset[0][i] = False
|
||||
|
||||
for i in range(1, arrLen + 1):
|
||||
for j in range(1, requiredSum + 1):
|
||||
for i in range(1, arr_len + 1):
|
||||
for j in range(1, required_sum + 1):
|
||||
if arr[i - 1] > j:
|
||||
subset[i][j] = subset[i - 1][j]
|
||||
if arr[i - 1] <= j:
|
||||
|
@ -28,7 +28,7 @@ def isSumSubset(arr, arrLen, requiredSum):
|
|||
# uncomment to print the subset
|
||||
# for i in range(arrLen+1):
|
||||
# print(subset[i])
|
||||
print(subset[arrLen][requiredSum])
|
||||
print(subset[arr_len][required_sum])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -35,30 +35,30 @@ PROGNAME = "Sierpinski Triangle"
|
|||
points = [[-175, -125], [0, 175], [175, -125]] # size of triangle
|
||||
|
||||
|
||||
def getMid(p1, p2):
|
||||
def get_mid(p1, p2):
|
||||
return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2) # find midpoint
|
||||
|
||||
|
||||
def triangle(points, depth):
|
||||
|
||||
myPen.up()
|
||||
myPen.goto(points[0][0], points[0][1])
|
||||
myPen.down()
|
||||
myPen.goto(points[1][0], points[1][1])
|
||||
myPen.goto(points[2][0], points[2][1])
|
||||
myPen.goto(points[0][0], points[0][1])
|
||||
my_pen.up()
|
||||
my_pen.goto(points[0][0], points[0][1])
|
||||
my_pen.down()
|
||||
my_pen.goto(points[1][0], points[1][1])
|
||||
my_pen.goto(points[2][0], points[2][1])
|
||||
my_pen.goto(points[0][0], points[0][1])
|
||||
|
||||
if depth > 0:
|
||||
triangle(
|
||||
[points[0], getMid(points[0], points[1]), getMid(points[0], points[2])],
|
||||
[points[0], get_mid(points[0], points[1]), get_mid(points[0], points[2])],
|
||||
depth - 1,
|
||||
)
|
||||
triangle(
|
||||
[points[1], getMid(points[0], points[1]), getMid(points[1], points[2])],
|
||||
[points[1], get_mid(points[0], points[1]), get_mid(points[1], points[2])],
|
||||
depth - 1,
|
||||
)
|
||||
triangle(
|
||||
[points[2], getMid(points[2], points[1]), getMid(points[0], points[2])],
|
||||
[points[2], get_mid(points[2], points[1]), get_mid(points[0], points[2])],
|
||||
depth - 1,
|
||||
)
|
||||
|
||||
|
@ -69,8 +69,8 @@ if __name__ == "__main__":
|
|||
"right format for using this script: "
|
||||
"$python fractals.py <int:depth_for_fractal>"
|
||||
)
|
||||
myPen = turtle.Turtle()
|
||||
myPen.ht()
|
||||
myPen.speed(5)
|
||||
myPen.pencolor("red")
|
||||
my_pen = turtle.Turtle()
|
||||
my_pen.ht()
|
||||
my_pen.speed(5)
|
||||
my_pen.pencolor("red")
|
||||
triangle(points, int(sys.argv[1]))
|
||||
|
|
|
@ -30,9 +30,9 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl
|
|||
"""
|
||||
# CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System
|
||||
# Distance in metres(m)
|
||||
AXIS_A = 6378137.0
|
||||
AXIS_B = 6356752.314245
|
||||
RADIUS = 6378137
|
||||
AXIS_A = 6378137.0 # noqa: N806
|
||||
AXIS_B = 6356752.314245 # noqa: N806
|
||||
RADIUS = 6378137 # noqa: N806
|
||||
# Equation parameters
|
||||
# Equation https://en.wikipedia.org/wiki/Haversine_formula#Formulation
|
||||
flattening = (AXIS_A - AXIS_B) / AXIS_A
|
||||
|
|
|
@ -45,9 +45,9 @@ def lamberts_ellipsoidal_distance(
|
|||
|
||||
# CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System
|
||||
# Distance in metres(m)
|
||||
AXIS_A = 6378137.0
|
||||
AXIS_B = 6356752.314245
|
||||
EQUATORIAL_RADIUS = 6378137
|
||||
AXIS_A = 6378137.0 # noqa: N806
|
||||
AXIS_B = 6356752.314245 # noqa: N806
|
||||
EQUATORIAL_RADIUS = 6378137 # noqa: N806
|
||||
|
||||
# Equation Parameters
|
||||
# https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines
|
||||
|
@ -62,22 +62,22 @@ def lamberts_ellipsoidal_distance(
|
|||
sigma = haversine_distance(lat1, lon1, lat2, lon2) / EQUATORIAL_RADIUS
|
||||
|
||||
# Intermediate P and Q values
|
||||
P_value = (b_lat1 + b_lat2) / 2
|
||||
Q_value = (b_lat2 - b_lat1) / 2
|
||||
p_value = (b_lat1 + b_lat2) / 2
|
||||
q_value = (b_lat2 - b_lat1) / 2
|
||||
|
||||
# Intermediate X value
|
||||
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
|
||||
X_numerator = (sin(P_value) ** 2) * (cos(Q_value) ** 2)
|
||||
X_demonimator = cos(sigma / 2) ** 2
|
||||
X_value = (sigma - sin(sigma)) * (X_numerator / X_demonimator)
|
||||
x_numerator = (sin(p_value) ** 2) * (cos(q_value) ** 2)
|
||||
x_demonimator = cos(sigma / 2) ** 2
|
||||
x_value = (sigma - sin(sigma)) * (x_numerator / x_demonimator)
|
||||
|
||||
# Intermediate Y value
|
||||
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
|
||||
Y_numerator = (cos(P_value) ** 2) * (sin(Q_value) ** 2)
|
||||
Y_denominator = sin(sigma / 2) ** 2
|
||||
Y_value = (sigma + sin(sigma)) * (Y_numerator / Y_denominator)
|
||||
y_numerator = (cos(p_value) ** 2) * (sin(q_value) ** 2)
|
||||
y_denominator = sin(sigma / 2) ** 2
|
||||
y_value = (sigma + sin(sigma)) * (y_numerator / y_denominator)
|
||||
|
||||
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (X_value + Y_value)))
|
||||
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# Finding Articulation Points in Undirected Graph
|
||||
def computeAP(l): # noqa: E741
|
||||
def compute_ap(l): # noqa: E741
|
||||
n = len(l)
|
||||
outEdgeCount = 0
|
||||
out_edge_count = 0
|
||||
low = [0] * n
|
||||
visited = [False] * n
|
||||
isArt = [False] * n
|
||||
is_art = [False] * n
|
||||
|
||||
def dfs(root, at, parent, outEdgeCount):
|
||||
def dfs(root, at, parent, out_edge_count):
|
||||
if parent == root:
|
||||
outEdgeCount += 1
|
||||
out_edge_count += 1
|
||||
visited[at] = True
|
||||
low[at] = at
|
||||
|
||||
|
@ -16,27 +16,27 @@ def computeAP(l): # noqa: E741
|
|||
if to == parent:
|
||||
pass
|
||||
elif not visited[to]:
|
||||
outEdgeCount = dfs(root, to, at, outEdgeCount)
|
||||
out_edge_count = dfs(root, to, at, out_edge_count)
|
||||
low[at] = min(low[at], low[to])
|
||||
|
||||
# AP found via bridge
|
||||
if at < low[to]:
|
||||
isArt[at] = True
|
||||
is_art[at] = True
|
||||
# AP found via cycle
|
||||
if at == low[to]:
|
||||
isArt[at] = True
|
||||
is_art[at] = True
|
||||
else:
|
||||
low[at] = min(low[at], to)
|
||||
return outEdgeCount
|
||||
return out_edge_count
|
||||
|
||||
for i in range(n):
|
||||
if not visited[i]:
|
||||
outEdgeCount = 0
|
||||
outEdgeCount = dfs(i, i, -1, outEdgeCount)
|
||||
isArt[i] = outEdgeCount > 1
|
||||
out_edge_count = 0
|
||||
out_edge_count = dfs(i, i, -1, out_edge_count)
|
||||
is_art[i] = out_edge_count > 1
|
||||
|
||||
for x in range(len(isArt)):
|
||||
if isArt[x] is True:
|
||||
for x in range(len(is_art)):
|
||||
if is_art[x] is True:
|
||||
print(x)
|
||||
|
||||
|
||||
|
@ -52,4 +52,4 @@ data = {
|
|||
7: [6, 8],
|
||||
8: [5, 7],
|
||||
}
|
||||
computeAP(data)
|
||||
compute_ap(data)
|
||||
|
|
|
@ -76,20 +76,20 @@ if __name__ == "__main__":
|
|||
"""
|
||||
|
||||
|
||||
def dfs(G, s):
|
||||
vis, S = {s}, [s]
|
||||
def dfs(g, s):
|
||||
vis, _s = {s}, [s]
|
||||
print(s)
|
||||
while S:
|
||||
while _s:
|
||||
flag = 0
|
||||
for i in G[S[-1]]:
|
||||
for i in g[_s[-1]]:
|
||||
if i not in vis:
|
||||
S.append(i)
|
||||
_s.append(i)
|
||||
vis.add(i)
|
||||
flag = 1
|
||||
print(i)
|
||||
break
|
||||
if not flag:
|
||||
S.pop()
|
||||
_s.pop()
|
||||
|
||||
|
||||
"""
|
||||
|
@ -103,15 +103,15 @@ def dfs(G, s):
|
|||
"""
|
||||
|
||||
|
||||
def bfs(G, s):
|
||||
vis, Q = {s}, deque([s])
|
||||
def bfs(g, s):
|
||||
vis, q = {s}, deque([s])
|
||||
print(s)
|
||||
while Q:
|
||||
u = Q.popleft()
|
||||
for v in G[u]:
|
||||
while q:
|
||||
u = q.popleft()
|
||||
for v in g[u]:
|
||||
if v not in vis:
|
||||
vis.add(v)
|
||||
Q.append(v)
|
||||
q.append(v)
|
||||
print(v)
|
||||
|
||||
|
||||
|
@ -127,10 +127,10 @@ def bfs(G, s):
|
|||
"""
|
||||
|
||||
|
||||
def dijk(G, s):
|
||||
def dijk(g, s):
|
||||
dist, known, path = {s: 0}, set(), {s: 0}
|
||||
while True:
|
||||
if len(known) == len(G) - 1:
|
||||
if len(known) == len(g) - 1:
|
||||
break
|
||||
mini = 100000
|
||||
for i in dist:
|
||||
|
@ -138,7 +138,7 @@ def dijk(G, s):
|
|||
mini = dist[i]
|
||||
u = i
|
||||
known.add(u)
|
||||
for v in G[u]:
|
||||
for v in g[u]:
|
||||
if v[0] not in known:
|
||||
if dist[u] + v[1] < dist.get(v[0], 100000):
|
||||
dist[v[0]] = dist[u] + v[1]
|
||||
|
@ -155,27 +155,27 @@ def dijk(G, s):
|
|||
"""
|
||||
|
||||
|
||||
def topo(G, ind=None, Q=None):
|
||||
if Q is None:
|
||||
Q = [1]
|
||||
def topo(g, ind=None, q=None):
|
||||
if q is None:
|
||||
q = [1]
|
||||
if ind is None:
|
||||
ind = [0] * (len(G) + 1) # SInce oth Index is ignored
|
||||
for u in G:
|
||||
for v in G[u]:
|
||||
ind = [0] * (len(g) + 1) # SInce oth Index is ignored
|
||||
for u in g:
|
||||
for v in g[u]:
|
||||
ind[v] += 1
|
||||
Q = deque()
|
||||
for i in G:
|
||||
q = deque()
|
||||
for i in g:
|
||||
if ind[i] == 0:
|
||||
Q.append(i)
|
||||
if len(Q) == 0:
|
||||
q.append(i)
|
||||
if len(q) == 0:
|
||||
return
|
||||
v = Q.popleft()
|
||||
v = q.popleft()
|
||||
print(v)
|
||||
for w in G[v]:
|
||||
for w in g[v]:
|
||||
ind[w] -= 1
|
||||
if ind[w] == 0:
|
||||
Q.append(w)
|
||||
topo(G, ind, Q)
|
||||
q.append(w)
|
||||
topo(g, ind, q)
|
||||
|
||||
|
||||
"""
|
||||
|
@ -206,9 +206,9 @@ def adjm():
|
|||
"""
|
||||
|
||||
|
||||
def floy(A_and_n):
|
||||
(A, n) = A_and_n
|
||||
dist = list(A)
|
||||
def floy(a_and_n):
|
||||
(a, n) = a_and_n
|
||||
dist = list(a)
|
||||
path = [[0] * n for i in range(n)]
|
||||
for k in range(n):
|
||||
for i in range(n):
|
||||
|
@ -231,10 +231,10 @@ def floy(A_and_n):
|
|||
"""
|
||||
|
||||
|
||||
def prim(G, s):
|
||||
def prim(g, s):
|
||||
dist, known, path = {s: 0}, set(), {s: 0}
|
||||
while True:
|
||||
if len(known) == len(G) - 1:
|
||||
if len(known) == len(g) - 1:
|
||||
break
|
||||
mini = 100000
|
||||
for i in dist:
|
||||
|
@ -242,7 +242,7 @@ def prim(G, s):
|
|||
mini = dist[i]
|
||||
u = i
|
||||
known.add(u)
|
||||
for v in G[u]:
|
||||
for v in g[u]:
|
||||
if v[0] not in known:
|
||||
if v[1] < dist.get(v[0], 100000):
|
||||
dist[v[0]] = v[1]
|
||||
|
@ -279,16 +279,16 @@ def edglist():
|
|||
"""
|
||||
|
||||
|
||||
def krusk(E_and_n):
|
||||
def krusk(e_and_n):
|
||||
# Sort edges on the basis of distance
|
||||
(E, n) = E_and_n
|
||||
E.sort(reverse=True, key=lambda x: x[2])
|
||||
(e, n) = e_and_n
|
||||
e.sort(reverse=True, key=lambda x: x[2])
|
||||
s = [{i} for i in range(1, n + 1)]
|
||||
while True:
|
||||
if len(s) == 1:
|
||||
break
|
||||
print(s)
|
||||
x = E.pop()
|
||||
x = e.pop()
|
||||
for i in range(len(s)):
|
||||
if x[0] in s[i]:
|
||||
break
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
from queue import Queue
|
||||
|
||||
|
||||
def checkBipartite(graph):
|
||||
def check_bipartite(graph):
|
||||
queue = Queue()
|
||||
visited = [False] * len(graph)
|
||||
color = [-1] * len(graph)
|
||||
|
@ -45,4 +45,4 @@ def checkBipartite(graph):
|
|||
|
||||
if __name__ == "__main__":
|
||||
# Adjacency List of graph
|
||||
print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))
|
||||
print(check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))
|
||||
|
|
|
@ -103,14 +103,14 @@ G3 = {
|
|||
"G": [["F", 1]],
|
||||
}
|
||||
|
||||
shortDistance = dijkstra(G, "E", "C")
|
||||
print(shortDistance) # E -- 3 --> F -- 3 --> C == 6
|
||||
short_distance = dijkstra(G, "E", "C")
|
||||
print(short_distance) # E -- 3 --> F -- 3 --> C == 6
|
||||
|
||||
shortDistance = dijkstra(G2, "E", "F")
|
||||
print(shortDistance) # E -- 3 --> F == 3
|
||||
short_distance = dijkstra(G2, "E", "F")
|
||||
print(short_distance) # E -- 3 --> F == 3
|
||||
|
||||
shortDistance = dijkstra(G3, "E", "F")
|
||||
print(shortDistance) # E -- 2 --> G -- 1 --> F == 3
|
||||
short_distance = dijkstra(G3, "E", "F")
|
||||
print(short_distance) # E -- 2 --> G -- 1 --> F == 3
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
def printDist(dist, V):
|
||||
def print_dist(dist, v):
|
||||
print("\nVertex Distance")
|
||||
for i in range(V):
|
||||
for i in range(v):
|
||||
if dist[i] != float("inf"):
|
||||
print(i, "\t", int(dist[i]), end="\t")
|
||||
else:
|
||||
|
@ -8,26 +8,26 @@ def printDist(dist, V):
|
|||
print()
|
||||
|
||||
|
||||
def minDist(mdist, vset, V):
|
||||
minVal = float("inf")
|
||||
minInd = -1
|
||||
for i in range(V):
|
||||
if (not vset[i]) and mdist[i] < minVal:
|
||||
minInd = i
|
||||
minVal = mdist[i]
|
||||
return minInd
|
||||
def min_dist(mdist, vset, v):
|
||||
min_val = float("inf")
|
||||
min_ind = -1
|
||||
for i in range(v):
|
||||
if (not vset[i]) and mdist[i] < min_val:
|
||||
min_ind = i
|
||||
min_val = mdist[i]
|
||||
return min_ind
|
||||
|
||||
|
||||
def Dijkstra(graph, V, src):
|
||||
mdist = [float("inf") for i in range(V)]
|
||||
vset = [False for i in range(V)]
|
||||
def dijkstra(graph, v, src):
|
||||
mdist = [float("inf") for i in range(v)]
|
||||
vset = [False for i in range(v)]
|
||||
mdist[src] = 0.0
|
||||
|
||||
for i in range(V - 1):
|
||||
u = minDist(mdist, vset, V)
|
||||
for i in range(v - 1):
|
||||
u = min_dist(mdist, vset, v)
|
||||
vset[u] = True
|
||||
|
||||
for v in range(V):
|
||||
for v in range(v):
|
||||
if (
|
||||
(not vset[v])
|
||||
and graph[u][v] != float("inf")
|
||||
|
@ -35,7 +35,7 @@ def Dijkstra(graph, V, src):
|
|||
):
|
||||
mdist[v] = mdist[u] + graph[u][v]
|
||||
|
||||
printDist(mdist, V)
|
||||
print_dist(mdist, v)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -55,4 +55,4 @@ if __name__ == "__main__":
|
|||
graph[src][dst] = weight
|
||||
|
||||
gsrc = int(input("\nEnter shortest path source:").strip())
|
||||
Dijkstra(graph, V, gsrc)
|
||||
dijkstra(graph, V, gsrc)
|
||||
|
|
|
@ -15,7 +15,7 @@ class PriorityQueue:
|
|||
self.array = []
|
||||
self.pos = {} # To store the pos of node in array
|
||||
|
||||
def isEmpty(self):
|
||||
def is_empty(self):
|
||||
return self.cur_size == 0
|
||||
|
||||
def min_heapify(self, idx):
|
||||
|
@ -110,24 +110,24 @@ class Graph:
|
|||
self.par = [-1] * self.num_nodes
|
||||
# src is the source node
|
||||
self.dist[src] = 0
|
||||
Q = PriorityQueue()
|
||||
Q.insert((0, src)) # (dist from src, node)
|
||||
q = PriorityQueue()
|
||||
q.insert((0, src)) # (dist from src, node)
|
||||
for u in self.adjList.keys():
|
||||
if u != src:
|
||||
self.dist[u] = sys.maxsize # Infinity
|
||||
self.par[u] = -1
|
||||
|
||||
while not Q.isEmpty():
|
||||
u = Q.extract_min() # Returns node with the min dist from source
|
||||
while not q.is_empty():
|
||||
u = q.extract_min() # Returns node with the min dist from source
|
||||
# Update the distance of all the neighbours of u and
|
||||
# if their prev dist was INFINITY then push them in Q
|
||||
for v, w in self.adjList[u]:
|
||||
new_dist = self.dist[u] + w
|
||||
if self.dist[v] > new_dist:
|
||||
if self.dist[v] == sys.maxsize:
|
||||
Q.insert((new_dist, v))
|
||||
q.insert((new_dist, v))
|
||||
else:
|
||||
Q.decrease_key((self.dist[v], v), new_dist)
|
||||
q.decrease_key((self.dist[v], v), new_dist)
|
||||
self.dist[v] = new_dist
|
||||
self.par[v] = u
|
||||
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
class FlowNetwork:
|
||||
def __init__(self, graph, sources, sinks):
|
||||
self.sourceIndex = None
|
||||
self.sinkIndex = None
|
||||
self.source_index = None
|
||||
self.sink_index = None
|
||||
self.graph = graph
|
||||
|
||||
self._normalizeGraph(sources, sinks)
|
||||
self.verticesCount = len(graph)
|
||||
self.maximumFlowAlgorithm = None
|
||||
self._normalize_graph(sources, sinks)
|
||||
self.vertices_count = len(graph)
|
||||
self.maximum_flow_algorithm = None
|
||||
|
||||
# make only one source and one sink
|
||||
def _normalizeGraph(self, sources, sinks):
|
||||
def _normalize_graph(self, sources, sinks):
|
||||
if sources is int:
|
||||
sources = [sources]
|
||||
if sinks is int:
|
||||
|
@ -18,54 +18,54 @@ class FlowNetwork:
|
|||
if len(sources) == 0 or len(sinks) == 0:
|
||||
return
|
||||
|
||||
self.sourceIndex = sources[0]
|
||||
self.sinkIndex = sinks[0]
|
||||
self.source_index = sources[0]
|
||||
self.sink_index = sinks[0]
|
||||
|
||||
# make fake vertex if there are more
|
||||
# than one source or sink
|
||||
if len(sources) > 1 or len(sinks) > 1:
|
||||
maxInputFlow = 0
|
||||
max_input_flow = 0
|
||||
for i in sources:
|
||||
maxInputFlow += sum(self.graph[i])
|
||||
max_input_flow += sum(self.graph[i])
|
||||
|
||||
size = len(self.graph) + 1
|
||||
for room in self.graph:
|
||||
room.insert(0, 0)
|
||||
self.graph.insert(0, [0] * size)
|
||||
for i in sources:
|
||||
self.graph[0][i + 1] = maxInputFlow
|
||||
self.sourceIndex = 0
|
||||
self.graph[0][i + 1] = max_input_flow
|
||||
self.source_index = 0
|
||||
|
||||
size = len(self.graph) + 1
|
||||
for room in self.graph:
|
||||
room.append(0)
|
||||
self.graph.append([0] * size)
|
||||
for i in sinks:
|
||||
self.graph[i + 1][size - 1] = maxInputFlow
|
||||
self.sinkIndex = size - 1
|
||||
self.graph[i + 1][size - 1] = max_input_flow
|
||||
self.sink_index = size - 1
|
||||
|
||||
def findMaximumFlow(self):
|
||||
if self.maximumFlowAlgorithm is None:
|
||||
def find_maximum_flow(self):
|
||||
if self.maximum_flow_algorithm is None:
|
||||
raise Exception("You need to set maximum flow algorithm before.")
|
||||
if self.sourceIndex is None or self.sinkIndex is None:
|
||||
if self.source_index is None or self.sink_index is None:
|
||||
return 0
|
||||
|
||||
self.maximumFlowAlgorithm.execute()
|
||||
return self.maximumFlowAlgorithm.getMaximumFlow()
|
||||
self.maximum_flow_algorithm.execute()
|
||||
return self.maximum_flow_algorithm.getMaximumFlow()
|
||||
|
||||
def setMaximumFlowAlgorithm(self, Algorithm):
|
||||
self.maximumFlowAlgorithm = Algorithm(self)
|
||||
def set_maximum_flow_algorithm(self, algorithm):
|
||||
self.maximum_flow_algorithm = algorithm(self)
|
||||
|
||||
|
||||
class FlowNetworkAlgorithmExecutor:
|
||||
def __init__(self, flowNetwork):
|
||||
self.flowNetwork = flowNetwork
|
||||
self.verticesCount = flowNetwork.verticesCount
|
||||
self.sourceIndex = flowNetwork.sourceIndex
|
||||
self.sinkIndex = flowNetwork.sinkIndex
|
||||
def __init__(self, flow_network):
|
||||
self.flow_network = flow_network
|
||||
self.verticies_count = flow_network.verticesCount
|
||||
self.source_index = flow_network.sourceIndex
|
||||
self.sink_index = flow_network.sinkIndex
|
||||
# it's just a reference, so you shouldn't change
|
||||
# it in your algorithms, use deep copy before doing that
|
||||
self.graph = flowNetwork.graph
|
||||
self.graph = flow_network.graph
|
||||
self.executed = False
|
||||
|
||||
def execute(self):
|
||||
|
@ -79,95 +79,96 @@ class FlowNetworkAlgorithmExecutor:
|
|||
|
||||
|
||||
class MaximumFlowAlgorithmExecutor(FlowNetworkAlgorithmExecutor):
|
||||
def __init__(self, flowNetwork):
|
||||
super().__init__(flowNetwork)
|
||||
def __init__(self, flow_network):
|
||||
super().__init__(flow_network)
|
||||
# use this to save your result
|
||||
self.maximumFlow = -1
|
||||
self.maximum_flow = -1
|
||||
|
||||
def getMaximumFlow(self):
|
||||
def get_maximum_flow(self):
|
||||
if not self.executed:
|
||||
raise Exception("You should execute algorithm before using its result!")
|
||||
|
||||
return self.maximumFlow
|
||||
return self.maximum_flow
|
||||
|
||||
|
||||
class PushRelabelExecutor(MaximumFlowAlgorithmExecutor):
|
||||
def __init__(self, flowNetwork):
|
||||
super().__init__(flowNetwork)
|
||||
def __init__(self, flow_network):
|
||||
super().__init__(flow_network)
|
||||
|
||||
self.preflow = [[0] * self.verticesCount for i in range(self.verticesCount)]
|
||||
self.preflow = [[0] * self.verticies_count for i in range(self.verticies_count)]
|
||||
|
||||
self.heights = [0] * self.verticesCount
|
||||
self.excesses = [0] * self.verticesCount
|
||||
self.heights = [0] * self.verticies_count
|
||||
self.excesses = [0] * self.verticies_count
|
||||
|
||||
def _algorithm(self):
|
||||
self.heights[self.sourceIndex] = self.verticesCount
|
||||
self.heights[self.source_index] = self.verticies_count
|
||||
|
||||
# push some substance to graph
|
||||
for nextVertexIndex, bandwidth in enumerate(self.graph[self.sourceIndex]):
|
||||
self.preflow[self.sourceIndex][nextVertexIndex] += bandwidth
|
||||
self.preflow[nextVertexIndex][self.sourceIndex] -= bandwidth
|
||||
self.excesses[nextVertexIndex] += bandwidth
|
||||
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]):
|
||||
self.preflow[self.source_index][nextvertex_index] += bandwidth
|
||||
self.preflow[nextvertex_index][self.source_index] -= bandwidth
|
||||
self.excesses[nextvertex_index] += bandwidth
|
||||
|
||||
# Relabel-to-front selection rule
|
||||
verticesList = [
|
||||
vertices_list = [
|
||||
i
|
||||
for i in range(self.verticesCount)
|
||||
if i != self.sourceIndex and i != self.sinkIndex
|
||||
for i in range(self.verticies_count)
|
||||
if i != self.source_index and i != self.sink_index
|
||||
]
|
||||
|
||||
# move through list
|
||||
i = 0
|
||||
while i < len(verticesList):
|
||||
vertexIndex = verticesList[i]
|
||||
previousHeight = self.heights[vertexIndex]
|
||||
self.processVertex(vertexIndex)
|
||||
if self.heights[vertexIndex] > previousHeight:
|
||||
while i < len(vertices_list):
|
||||
vertex_index = vertices_list[i]
|
||||
previous_height = self.heights[vertex_index]
|
||||
self.process_vertex(vertex_index)
|
||||
if self.heights[vertex_index] > previous_height:
|
||||
# if it was relabeled, swap elements
|
||||
# and start from 0 index
|
||||
verticesList.insert(0, verticesList.pop(i))
|
||||
vertices_list.insert(0, vertices_list.pop(i))
|
||||
i = 0
|
||||
else:
|
||||
i += 1
|
||||
|
||||
self.maximumFlow = sum(self.preflow[self.sourceIndex])
|
||||
self.maximum_flow = sum(self.preflow[self.source_index])
|
||||
|
||||
def processVertex(self, vertexIndex):
|
||||
while self.excesses[vertexIndex] > 0:
|
||||
for neighbourIndex in range(self.verticesCount):
|
||||
def process_vertex(self, vertex_index):
|
||||
while self.excesses[vertex_index] > 0:
|
||||
for neighbour_index in range(self.verticies_count):
|
||||
# if it's neighbour and current vertex is higher
|
||||
if (
|
||||
self.graph[vertexIndex][neighbourIndex]
|
||||
- self.preflow[vertexIndex][neighbourIndex]
|
||||
self.graph[vertex_index][neighbour_index]
|
||||
- self.preflow[vertex_index][neighbour_index]
|
||||
> 0
|
||||
and self.heights[vertexIndex] > self.heights[neighbourIndex]
|
||||
and self.heights[vertex_index] > self.heights[neighbour_index]
|
||||
):
|
||||
self.push(vertexIndex, neighbourIndex)
|
||||
self.push(vertex_index, neighbour_index)
|
||||
|
||||
self.relabel(vertexIndex)
|
||||
self.relabel(vertex_index)
|
||||
|
||||
def push(self, fromIndex, toIndex):
|
||||
preflowDelta = min(
|
||||
self.excesses[fromIndex],
|
||||
self.graph[fromIndex][toIndex] - self.preflow[fromIndex][toIndex],
|
||||
def push(self, from_index, to_index):
|
||||
preflow_delta = min(
|
||||
self.excesses[from_index],
|
||||
self.graph[from_index][to_index] - self.preflow[from_index][to_index],
|
||||
)
|
||||
self.preflow[fromIndex][toIndex] += preflowDelta
|
||||
self.preflow[toIndex][fromIndex] -= preflowDelta
|
||||
self.excesses[fromIndex] -= preflowDelta
|
||||
self.excesses[toIndex] += preflowDelta
|
||||
self.preflow[from_index][to_index] += preflow_delta
|
||||
self.preflow[to_index][from_index] -= preflow_delta
|
||||
self.excesses[from_index] -= preflow_delta
|
||||
self.excesses[to_index] += preflow_delta
|
||||
|
||||
def relabel(self, vertexIndex):
|
||||
minHeight = None
|
||||
for toIndex in range(self.verticesCount):
|
||||
def relabel(self, vertex_index):
|
||||
min_height = None
|
||||
for to_index in range(self.verticies_count):
|
||||
if (
|
||||
self.graph[vertexIndex][toIndex] - self.preflow[vertexIndex][toIndex]
|
||||
self.graph[vertex_index][to_index]
|
||||
- self.preflow[vertex_index][to_index]
|
||||
> 0
|
||||
):
|
||||
if minHeight is None or self.heights[toIndex] < minHeight:
|
||||
minHeight = self.heights[toIndex]
|
||||
if min_height is None or self.heights[to_index] < min_height:
|
||||
min_height = self.heights[to_index]
|
||||
|
||||
if minHeight is not None:
|
||||
self.heights[vertexIndex] = minHeight + 1
|
||||
if min_height is not None:
|
||||
self.heights[vertex_index] = min_height + 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -184,10 +185,10 @@ if __name__ == "__main__":
|
|||
graph = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
|
||||
|
||||
# prepare our network
|
||||
flowNetwork = FlowNetwork(graph, entrances, exits)
|
||||
flow_network = FlowNetwork(graph, entrances, exits)
|
||||
# set algorithm
|
||||
flowNetwork.setMaximumFlowAlgorithm(PushRelabelExecutor)
|
||||
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
|
||||
# and calculate
|
||||
maximumFlow = flowNetwork.findMaximumFlow()
|
||||
maximum_flow = flow_network.find_maximum_flow()
|
||||
|
||||
print(f"maximum flow is {maximumFlow}")
|
||||
print(f"maximum flow is {maximum_flow}")
|
||||
|
|
|
@ -50,21 +50,21 @@ def check_euler(graph, max_node):
|
|||
|
||||
|
||||
def main():
|
||||
G1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
|
||||
G2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
|
||||
G3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
|
||||
G4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
|
||||
G5 = {
|
||||
g1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
|
||||
g2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
|
||||
g3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
|
||||
g4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
|
||||
g5 = {
|
||||
1: [],
|
||||
2: []
|
||||
# all degree is zero
|
||||
}
|
||||
max_node = 10
|
||||
check_euler(G1, max_node)
|
||||
check_euler(G2, max_node)
|
||||
check_euler(G3, max_node)
|
||||
check_euler(G4, max_node)
|
||||
check_euler(G5, max_node)
|
||||
check_euler(g1, max_node)
|
||||
check_euler(g2, max_node)
|
||||
check_euler(g3, max_node)
|
||||
check_euler(g4, max_node)
|
||||
check_euler(g5, max_node)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -151,16 +151,16 @@ def create_edge(nodes, graph, cluster, c1):
|
|||
|
||||
|
||||
def construct_graph(cluster, nodes):
|
||||
X = cluster[max(cluster.keys())]
|
||||
x = cluster[max(cluster.keys())]
|
||||
cluster[max(cluster.keys()) + 1] = "Header"
|
||||
graph = {}
|
||||
for i in X:
|
||||
for i in x:
|
||||
if tuple(["Header"]) in graph:
|
||||
graph[tuple(["Header"])].append(X[i])
|
||||
graph[tuple(["Header"])].append(x[i])
|
||||
else:
|
||||
graph[tuple(["Header"])] = [X[i]]
|
||||
for i in X:
|
||||
graph[tuple(X[i])] = [["Header"]]
|
||||
graph[tuple(["Header"])] = [x[i]]
|
||||
for i in x:
|
||||
graph[tuple(x[i])] = [["Header"]]
|
||||
i = 1
|
||||
while i < max(cluster) - 1:
|
||||
create_edge(nodes, graph, cluster, i)
|
||||
|
@ -168,7 +168,7 @@ def construct_graph(cluster, nodes):
|
|||
return graph
|
||||
|
||||
|
||||
def myDFS(graph, start, end, path=None):
|
||||
def my_dfs(graph, start, end, path=None):
|
||||
"""
|
||||
find different DFS walk from given node to Header node
|
||||
"""
|
||||
|
@ -177,7 +177,7 @@ def myDFS(graph, start, end, path=None):
|
|||
paths.append(path)
|
||||
for node in graph[start]:
|
||||
if tuple(node) not in path:
|
||||
myDFS(graph, tuple(node), end, path)
|
||||
my_dfs(graph, tuple(node), end, path)
|
||||
|
||||
|
||||
def find_freq_subgraph_given_support(s, cluster, graph):
|
||||
|
@ -186,23 +186,23 @@ def find_freq_subgraph_given_support(s, cluster, graph):
|
|||
"""
|
||||
k = int(s / 100 * (len(cluster) - 1))
|
||||
for i in cluster[k].keys():
|
||||
myDFS(graph, tuple(cluster[k][i]), tuple(["Header"]))
|
||||
my_dfs(graph, tuple(cluster[k][i]), tuple(["Header"]))
|
||||
|
||||
|
||||
def freq_subgraphs_edge_list(paths):
|
||||
"""
|
||||
returns Edge list for frequent subgraphs
|
||||
"""
|
||||
freq_sub_EL = []
|
||||
freq_sub_el = []
|
||||
for edges in paths:
|
||||
EL = []
|
||||
el = []
|
||||
for j in range(len(edges) - 1):
|
||||
temp = list(edges[j])
|
||||
for e in temp:
|
||||
edge = (e[0], e[1])
|
||||
EL.append(edge)
|
||||
freq_sub_EL.append(EL)
|
||||
return freq_sub_EL
|
||||
el.append(edge)
|
||||
freq_sub_el.append(el)
|
||||
return freq_sub_el
|
||||
|
||||
|
||||
def preprocess(edge_array):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm
|
||||
def longestDistance(graph):
|
||||
def longest_distance(graph):
|
||||
indegree = [0] * len(graph)
|
||||
queue = []
|
||||
longDist = [1] * len(graph)
|
||||
long_dist = [1] * len(graph)
|
||||
|
||||
for key, values in graph.items():
|
||||
for i in values:
|
||||
|
@ -17,15 +17,15 @@ def longestDistance(graph):
|
|||
for x in graph[vertex]:
|
||||
indegree[x] -= 1
|
||||
|
||||
if longDist[vertex] + 1 > longDist[x]:
|
||||
longDist[x] = longDist[vertex] + 1
|
||||
if long_dist[vertex] + 1 > long_dist[x]:
|
||||
long_dist[x] = long_dist[vertex] + 1
|
||||
|
||||
if indegree[x] == 0:
|
||||
queue.append(x)
|
||||
|
||||
print(max(longDist))
|
||||
print(max(long_dist))
|
||||
|
||||
|
||||
# Adjacency list of Graph
|
||||
graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
|
||||
longestDistance(graph)
|
||||
longest_distance(graph)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
def topologicalSort(graph):
|
||||
def topological_sort(graph):
|
||||
"""
|
||||
Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph
|
||||
using BFS
|
||||
|
@ -33,4 +33,4 @@ def topologicalSort(graph):
|
|||
|
||||
# Adjacency List of Graph
|
||||
graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
|
||||
topologicalSort(graph)
|
||||
topological_sort(graph)
|
||||
|
|
|
@ -2,15 +2,15 @@ import sys
|
|||
from collections import defaultdict
|
||||
|
||||
|
||||
def PrimsAlgorithm(l): # noqa: E741
|
||||
def prisms_algorithm(l): # noqa: E741
|
||||
|
||||
nodePosition = []
|
||||
node_position = []
|
||||
|
||||
def get_position(vertex):
|
||||
return nodePosition[vertex]
|
||||
return node_position[vertex]
|
||||
|
||||
def set_position(vertex, pos):
|
||||
nodePosition[vertex] = pos
|
||||
node_position[vertex] = pos
|
||||
|
||||
def top_to_bottom(heap, start, size, positions):
|
||||
if start > size // 2 - 1:
|
||||
|
@ -64,44 +64,44 @@ def PrimsAlgorithm(l): # noqa: E741
|
|||
for i in range(start, -1, -1):
|
||||
top_to_bottom(heap, i, len(heap), positions)
|
||||
|
||||
def deleteMinimum(heap, positions):
|
||||
def delete_minimum(heap, positions):
|
||||
temp = positions[0]
|
||||
heap[0] = sys.maxsize
|
||||
top_to_bottom(heap, 0, len(heap), positions)
|
||||
return temp
|
||||
|
||||
visited = [0 for i in range(len(l))]
|
||||
Nbr_TV = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex
|
||||
nbr_tv = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex
|
||||
# Minimum Distance of explored vertex with neighboring vertex of partial tree
|
||||
# formed in graph
|
||||
Distance_TV = [] # Heap of Distance of vertices from their neighboring vertex
|
||||
Positions = []
|
||||
distance_tv = [] # Heap of Distance of vertices from their neighboring vertex
|
||||
positions = []
|
||||
|
||||
for x in range(len(l)):
|
||||
p = sys.maxsize
|
||||
Distance_TV.append(p)
|
||||
Positions.append(x)
|
||||
nodePosition.append(x)
|
||||
distance_tv.append(p)
|
||||
positions.append(x)
|
||||
node_position.append(x)
|
||||
|
||||
TreeEdges = []
|
||||
tree_edges = []
|
||||
visited[0] = 1
|
||||
Distance_TV[0] = sys.maxsize
|
||||
distance_tv[0] = sys.maxsize
|
||||
for x in l[0]:
|
||||
Nbr_TV[x[0]] = 0
|
||||
Distance_TV[x[0]] = x[1]
|
||||
heapify(Distance_TV, Positions)
|
||||
nbr_tv[x[0]] = 0
|
||||
distance_tv[x[0]] = x[1]
|
||||
heapify(distance_tv, positions)
|
||||
|
||||
for i in range(1, len(l)):
|
||||
vertex = deleteMinimum(Distance_TV, Positions)
|
||||
vertex = delete_minimum(distance_tv, positions)
|
||||
if visited[vertex] == 0:
|
||||
TreeEdges.append((Nbr_TV[vertex], vertex))
|
||||
tree_edges.append((nbr_tv[vertex], vertex))
|
||||
visited[vertex] = 1
|
||||
for v in l[vertex]:
|
||||
if visited[v[0]] == 0 and v[1] < Distance_TV[get_position(v[0])]:
|
||||
Distance_TV[get_position(v[0])] = v[1]
|
||||
bottom_to_top(v[1], get_position(v[0]), Distance_TV, Positions)
|
||||
Nbr_TV[v[0]] = vertex
|
||||
return TreeEdges
|
||||
if visited[v[0]] == 0 and v[1] < distance_tv[get_position(v[0])]:
|
||||
distance_tv[get_position(v[0])] = v[1]
|
||||
bottom_to_top(v[1], get_position(v[0]), distance_tv, positions)
|
||||
nbr_tv[v[0]] = vertex
|
||||
return tree_edges
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
|
@ -113,4 +113,4 @@ if __name__ == "__main__": # pragma: no cover
|
|||
l = [int(x) for x in input().strip().split()] # noqa: E741
|
||||
adjlist[l[0]].append([l[1], l[2]])
|
||||
adjlist[l[1]].append([l[0], l[2]])
|
||||
print(PrimsAlgorithm(adjlist))
|
||||
print(prisms_algorithm(adjlist))
|
||||
|
|
|
@ -55,21 +55,21 @@ class PriorityQueue:
|
|||
return (priority, item)
|
||||
|
||||
|
||||
def consistent_heuristic(P: TPos, goal: TPos):
|
||||
def consistent_heuristic(p: TPos, goal: TPos):
|
||||
# euclidean distance
|
||||
a = np.array(P)
|
||||
a = np.array(p)
|
||||
b = np.array(goal)
|
||||
return np.linalg.norm(a - b)
|
||||
|
||||
|
||||
def heuristic_2(P: TPos, goal: TPos):
|
||||
def heuristic_2(p: TPos, goal: TPos):
|
||||
# integer division by time variable
|
||||
return consistent_heuristic(P, goal) // t
|
||||
return consistent_heuristic(p, goal) // t
|
||||
|
||||
|
||||
def heuristic_1(P: TPos, goal: TPos):
|
||||
def heuristic_1(p: TPos, goal: TPos):
|
||||
# manhattan distance
|
||||
return abs(P[0] - goal[0]) + abs(P[1] - goal[1])
|
||||
return abs(p[0] - goal[0]) + abs(p[1] - goal[1])
|
||||
|
||||
|
||||
def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]):
|
||||
|
|
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||
|
||||
|
||||
def dfs(u):
|
||||
global graph, reversedGraph, scc, component, visit, stack
|
||||
global graph, reversed_graph, scc, component, visit, stack
|
||||
if visit[u]:
|
||||
return
|
||||
visit[u] = True
|
||||
|
@ -12,17 +12,17 @@ def dfs(u):
|
|||
|
||||
|
||||
def dfs2(u):
|
||||
global graph, reversedGraph, scc, component, visit, stack
|
||||
global graph, reversed_graph, scc, component, visit, stack
|
||||
if visit[u]:
|
||||
return
|
||||
visit[u] = True
|
||||
component.append(u)
|
||||
for v in reversedGraph[u]:
|
||||
for v in reversed_graph[u]:
|
||||
dfs2(v)
|
||||
|
||||
|
||||
def kosaraju():
|
||||
global graph, reversedGraph, scc, component, visit, stack
|
||||
global graph, reversed_graph, scc, component, visit, stack
|
||||
for i in range(n):
|
||||
dfs(i)
|
||||
visit = [False] * n
|
||||
|
@ -40,12 +40,12 @@ if __name__ == "__main__":
|
|||
n, m = list(map(int, input().strip().split()))
|
||||
|
||||
graph: list[list[int]] = [[] for i in range(n)] # graph
|
||||
reversedGraph: list[list[int]] = [[] for i in range(n)] # reversed graph
|
||||
reversed_graph: list[list[int]] = [[] for i in range(n)] # reversed graph
|
||||
# input graph data (edges)
|
||||
for i in range(m):
|
||||
u, v = list(map(int, input().strip().split()))
|
||||
graph[u].append(v)
|
||||
reversedGraph[v].append(u)
|
||||
reversed_graph[v].append(u)
|
||||
|
||||
stack: list[int] = []
|
||||
visit: list[bool] = [False] * n
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from collections import defaultdict
|
||||
|
||||
from graphs.minimum_spanning_tree_prims import PrimsAlgorithm as mst
|
||||
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
|
||||
|
||||
|
||||
def test_prim_successful_result():
|
||||
|
|
|
@ -20,7 +20,7 @@ def adler32(plain_text: str) -> int:
|
|||
>>> adler32('go adler em all')
|
||||
708642122
|
||||
"""
|
||||
MOD_ADLER = 65521
|
||||
MOD_ADLER = 65521 # noqa: N806
|
||||
a = 1
|
||||
b = 0
|
||||
for plain_chr in plain_text:
|
||||
|
|
|
@ -43,11 +43,11 @@ def pull():
|
|||
global buffer_space, params_space, machine_time, K, m, t
|
||||
|
||||
# PRNG (Xorshift by George Marsaglia)
|
||||
def xorshift(X, Y):
|
||||
X ^= Y >> 13
|
||||
Y ^= X << 17
|
||||
X ^= Y >> 5
|
||||
return X
|
||||
def xorshift(x, y):
|
||||
x ^= y >> 13
|
||||
y ^= x << 17
|
||||
x ^= y >> 5
|
||||
return x
|
||||
|
||||
# Choosing Dynamical Systems (Increment)
|
||||
key = machine_time % m
|
||||
|
@ -63,13 +63,13 @@ def pull():
|
|||
params_space[key] = (machine_time * 0.01 + r * 1.01) % 1 + 3
|
||||
|
||||
# Choosing Chaotic Data
|
||||
X = int(buffer_space[(key + 2) % m] * (10**10))
|
||||
Y = int(buffer_space[(key - 2) % m] * (10**10))
|
||||
x = int(buffer_space[(key + 2) % m] * (10**10))
|
||||
y = int(buffer_space[(key - 2) % m] * (10**10))
|
||||
|
||||
# Machine Time
|
||||
machine_time += 1
|
||||
|
||||
return xorshift(X, Y) % 0xFFFFFFFF
|
||||
return xorshift(x, y) % 0xFFFFFFFF
|
||||
|
||||
|
||||
def reset():
|
||||
|
|
|
@ -68,177 +68,177 @@ def text_from_bits(bits, encoding="utf-8", errors="surrogatepass"):
|
|||
|
||||
|
||||
# Functions of hamming code-------------------------------------------
|
||||
def emitterConverter(sizePar, data):
|
||||
def emitter_converter(size_par, data):
|
||||
"""
|
||||
:param sizePar: how many parity bits the message must have
|
||||
:param size_par: how many parity bits the message must have
|
||||
:param data: information bits
|
||||
:return: message to be transmitted by unreliable medium
|
||||
- bits of information merged with parity bits
|
||||
|
||||
>>> emitterConverter(4, "101010111111")
|
||||
>>> emitter_converter(4, "101010111111")
|
||||
['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1']
|
||||
"""
|
||||
if sizePar + len(data) <= 2**sizePar - (len(data) - 1):
|
||||
if size_par + len(data) <= 2**size_par - (len(data) - 1):
|
||||
print("ERROR - size of parity don't match with size of data")
|
||||
exit(0)
|
||||
|
||||
dataOut = []
|
||||
data_out = []
|
||||
parity = []
|
||||
binPos = [bin(x)[2:] for x in range(1, sizePar + len(data) + 1)]
|
||||
bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data) + 1)]
|
||||
|
||||
# sorted information data for the size of the output data
|
||||
dataOrd = []
|
||||
data_ord = []
|
||||
# data position template + parity
|
||||
dataOutGab = []
|
||||
data_out_gab = []
|
||||
# parity bit counter
|
||||
qtdBP = 0
|
||||
qtd_bp = 0
|
||||
# counter position of data bits
|
||||
contData = 0
|
||||
cont_data = 0
|
||||
|
||||
for x in range(1, sizePar + len(data) + 1):
|
||||
for x in range(1, size_par + len(data) + 1):
|
||||
# Performs a template of bit positions - who should be given,
|
||||
# and who should be parity
|
||||
if qtdBP < sizePar:
|
||||
if qtd_bp < size_par:
|
||||
if (np.log(x) / np.log(2)).is_integer():
|
||||
dataOutGab.append("P")
|
||||
qtdBP = qtdBP + 1
|
||||
data_out_gab.append("P")
|
||||
qtd_bp = qtd_bp + 1
|
||||
else:
|
||||
dataOutGab.append("D")
|
||||
data_out_gab.append("D")
|
||||
else:
|
||||
dataOutGab.append("D")
|
||||
data_out_gab.append("D")
|
||||
|
||||
# Sorts the data to the new output size
|
||||
if dataOutGab[-1] == "D":
|
||||
dataOrd.append(data[contData])
|
||||
contData += 1
|
||||
if data_out_gab[-1] == "D":
|
||||
data_ord.append(data[cont_data])
|
||||
cont_data += 1
|
||||
else:
|
||||
dataOrd.append(None)
|
||||
data_ord.append(None)
|
||||
|
||||
# Calculates parity
|
||||
qtdBP = 0 # parity bit counter
|
||||
for bp in range(1, sizePar + 1):
|
||||
qtd_bp = 0 # parity bit counter
|
||||
for bp in range(1, size_par + 1):
|
||||
# Bit counter one for a given parity
|
||||
contBO = 0
|
||||
cont_bo = 0
|
||||
# counter to control the loop reading
|
||||
contLoop = 0
|
||||
for x in dataOrd:
|
||||
cont_loop = 0
|
||||
for x in data_ord:
|
||||
if x is not None:
|
||||
try:
|
||||
aux = (binPos[contLoop])[-1 * (bp)]
|
||||
aux = (bin_pos[cont_loop])[-1 * (bp)]
|
||||
except IndexError:
|
||||
aux = "0"
|
||||
if aux == "1":
|
||||
if x == "1":
|
||||
contBO += 1
|
||||
contLoop += 1
|
||||
parity.append(contBO % 2)
|
||||
cont_bo += 1
|
||||
cont_loop += 1
|
||||
parity.append(cont_bo % 2)
|
||||
|
||||
qtdBP += 1
|
||||
qtd_bp += 1
|
||||
|
||||
# Mount the message
|
||||
ContBP = 0 # parity bit counter
|
||||
for x in range(0, sizePar + len(data)):
|
||||
if dataOrd[x] is None:
|
||||
dataOut.append(str(parity[ContBP]))
|
||||
ContBP += 1
|
||||
cont_bp = 0 # parity bit counter
|
||||
for x in range(0, size_par + len(data)):
|
||||
if data_ord[x] is None:
|
||||
data_out.append(str(parity[cont_bp]))
|
||||
cont_bp += 1
|
||||
else:
|
||||
dataOut.append(dataOrd[x])
|
||||
data_out.append(data_ord[x])
|
||||
|
||||
return dataOut
|
||||
return data_out
|
||||
|
||||
|
||||
def receptorConverter(sizePar, data):
|
||||
def receptor_converter(size_par, data):
|
||||
"""
|
||||
>>> receptorConverter(4, "1111010010111111")
|
||||
>>> receptor_converter(4, "1111010010111111")
|
||||
(['1', '0', '1', '0', '1', '0', '1', '1', '1', '1', '1', '1'], True)
|
||||
"""
|
||||
# data position template + parity
|
||||
dataOutGab = []
|
||||
data_out_gab = []
|
||||
# Parity bit counter
|
||||
qtdBP = 0
|
||||
qtd_bp = 0
|
||||
# Counter p data bit reading
|
||||
contData = 0
|
||||
cont_data = 0
|
||||
# list of parity received
|
||||
parityReceived = []
|
||||
dataOutput = []
|
||||
parity_received = []
|
||||
data_output = []
|
||||
|
||||
for x in range(1, len(data) + 1):
|
||||
# Performs a template of bit positions - who should be given,
|
||||
# and who should be parity
|
||||
if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer():
|
||||
dataOutGab.append("P")
|
||||
qtdBP = qtdBP + 1
|
||||
if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer():
|
||||
data_out_gab.append("P")
|
||||
qtd_bp = qtd_bp + 1
|
||||
else:
|
||||
dataOutGab.append("D")
|
||||
data_out_gab.append("D")
|
||||
|
||||
# Sorts the data to the new output size
|
||||
if dataOutGab[-1] == "D":
|
||||
dataOutput.append(data[contData])
|
||||
if data_out_gab[-1] == "D":
|
||||
data_output.append(data[cont_data])
|
||||
else:
|
||||
parityReceived.append(data[contData])
|
||||
contData += 1
|
||||
parity_received.append(data[cont_data])
|
||||
cont_data += 1
|
||||
|
||||
# -----------calculates the parity with the data
|
||||
dataOut = []
|
||||
data_out = []
|
||||
parity = []
|
||||
binPos = [bin(x)[2:] for x in range(1, sizePar + len(dataOutput) + 1)]
|
||||
bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data_output) + 1)]
|
||||
|
||||
# sorted information data for the size of the output data
|
||||
dataOrd = []
|
||||
data_ord = []
|
||||
# Data position feedback + parity
|
||||
dataOutGab = []
|
||||
data_out_gab = []
|
||||
# Parity bit counter
|
||||
qtdBP = 0
|
||||
qtd_bp = 0
|
||||
# Counter p data bit reading
|
||||
contData = 0
|
||||
cont_data = 0
|
||||
|
||||
for x in range(1, sizePar + len(dataOutput) + 1):
|
||||
for x in range(1, size_par + len(data_output) + 1):
|
||||
# Performs a template position of bits - who should be given,
|
||||
# and who should be parity
|
||||
if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer():
|
||||
dataOutGab.append("P")
|
||||
qtdBP = qtdBP + 1
|
||||
if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer():
|
||||
data_out_gab.append("P")
|
||||
qtd_bp = qtd_bp + 1
|
||||
else:
|
||||
dataOutGab.append("D")
|
||||
data_out_gab.append("D")
|
||||
|
||||
# Sorts the data to the new output size
|
||||
if dataOutGab[-1] == "D":
|
||||
dataOrd.append(dataOutput[contData])
|
||||
contData += 1
|
||||
if data_out_gab[-1] == "D":
|
||||
data_ord.append(data_output[cont_data])
|
||||
cont_data += 1
|
||||
else:
|
||||
dataOrd.append(None)
|
||||
data_ord.append(None)
|
||||
|
||||
# Calculates parity
|
||||
qtdBP = 0 # parity bit counter
|
||||
for bp in range(1, sizePar + 1):
|
||||
qtd_bp = 0 # parity bit counter
|
||||
for bp in range(1, size_par + 1):
|
||||
# Bit counter one for a certain parity
|
||||
contBO = 0
|
||||
cont_bo = 0
|
||||
# Counter to control loop reading
|
||||
contLoop = 0
|
||||
for x in dataOrd:
|
||||
cont_loop = 0
|
||||
for x in data_ord:
|
||||
if x is not None:
|
||||
try:
|
||||
aux = (binPos[contLoop])[-1 * (bp)]
|
||||
aux = (bin_pos[cont_loop])[-1 * (bp)]
|
||||
except IndexError:
|
||||
aux = "0"
|
||||
if aux == "1" and x == "1":
|
||||
contBO += 1
|
||||
contLoop += 1
|
||||
parity.append(str(contBO % 2))
|
||||
cont_bo += 1
|
||||
cont_loop += 1
|
||||
parity.append(str(cont_bo % 2))
|
||||
|
||||
qtdBP += 1
|
||||
qtd_bp += 1
|
||||
|
||||
# Mount the message
|
||||
ContBP = 0 # Parity bit counter
|
||||
for x in range(0, sizePar + len(dataOutput)):
|
||||
if dataOrd[x] is None:
|
||||
dataOut.append(str(parity[ContBP]))
|
||||
ContBP += 1
|
||||
cont_bp = 0 # Parity bit counter
|
||||
for x in range(0, size_par + len(data_output)):
|
||||
if data_ord[x] is None:
|
||||
data_out.append(str(parity[cont_bp]))
|
||||
cont_bp += 1
|
||||
else:
|
||||
dataOut.append(dataOrd[x])
|
||||
data_out.append(data_ord[x])
|
||||
|
||||
ack = parityReceived == parity
|
||||
return dataOutput, ack
|
||||
ack = parity_received == parity
|
||||
return data_output, ack
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import math
|
||||
|
||||
|
||||
def rearrange(bitString32):
|
||||
def rearrange(bit_string_32):
|
||||
"""[summary]
|
||||
Regroups the given binary string.
|
||||
|
||||
|
@ -17,21 +17,21 @@ def rearrange(bitString32):
|
|||
'pqrstuvwhijklmno90abcdfg12345678'
|
||||
"""
|
||||
|
||||
if len(bitString32) != 32:
|
||||
if len(bit_string_32) != 32:
|
||||
raise ValueError("Need length 32")
|
||||
newString = ""
|
||||
new_string = ""
|
||||
for i in [3, 2, 1, 0]:
|
||||
newString += bitString32[8 * i : 8 * i + 8]
|
||||
return newString
|
||||
new_string += bit_string_32[8 * i : 8 * i + 8]
|
||||
return new_string
|
||||
|
||||
|
||||
def reformatHex(i):
|
||||
def reformat_hex(i):
|
||||
"""[summary]
|
||||
Converts the given integer into 8-digit hex number.
|
||||
|
||||
Arguments:
|
||||
i {[int]} -- [integer]
|
||||
>>> reformatHex(666)
|
||||
>>> reformat_hex(666)
|
||||
'9a020000'
|
||||
"""
|
||||
|
||||
|
@ -42,7 +42,7 @@ def reformatHex(i):
|
|||
return thing
|
||||
|
||||
|
||||
def pad(bitString):
|
||||
def pad(bit_string):
|
||||
"""[summary]
|
||||
Fills up the binary string to a 512 bit binary string
|
||||
|
||||
|
@ -52,33 +52,33 @@ def pad(bitString):
|
|||
Returns:
|
||||
[string] -- [binary string]
|
||||
"""
|
||||
startLength = len(bitString)
|
||||
bitString += "1"
|
||||
while len(bitString) % 512 != 448:
|
||||
bitString += "0"
|
||||
lastPart = format(startLength, "064b")
|
||||
bitString += rearrange(lastPart[32:]) + rearrange(lastPart[:32])
|
||||
return bitString
|
||||
start_length = len(bit_string)
|
||||
bit_string += "1"
|
||||
while len(bit_string) % 512 != 448:
|
||||
bit_string += "0"
|
||||
last_part = format(start_length, "064b")
|
||||
bit_string += rearrange(last_part[32:]) + rearrange(last_part[:32])
|
||||
return bit_string
|
||||
|
||||
|
||||
def getBlock(bitString):
|
||||
def get_block(bit_string):
|
||||
"""[summary]
|
||||
Iterator:
|
||||
Returns by each call a list of length 16 with the 32 bit
|
||||
integer blocks.
|
||||
|
||||
Arguments:
|
||||
bitString {[string]} -- [binary string >= 512]
|
||||
bit_string {[string]} -- [binary string >= 512]
|
||||
"""
|
||||
|
||||
currPos = 0
|
||||
while currPos < len(bitString):
|
||||
currPart = bitString[currPos : currPos + 512]
|
||||
mySplits = []
|
||||
curr_pos = 0
|
||||
while curr_pos < len(bit_string):
|
||||
curr_part = bit_string[curr_pos : curr_pos + 512]
|
||||
my_splits = []
|
||||
for i in range(16):
|
||||
mySplits.append(int(rearrange(currPart[32 * i : 32 * i + 32]), 2))
|
||||
yield mySplits
|
||||
currPos += 512
|
||||
my_splits.append(int(rearrange(curr_part[32 * i : 32 * i + 32]), 2))
|
||||
yield my_splits
|
||||
curr_pos += 512
|
||||
|
||||
|
||||
def not32(i):
|
||||
|
@ -101,7 +101,7 @@ def leftrot32(i, s):
|
|||
return (i << s) ^ (i >> (32 - s))
|
||||
|
||||
|
||||
def md5me(testString):
|
||||
def md5me(test_string):
|
||||
"""[summary]
|
||||
Returns a 32-bit hash code of the string 'testString'
|
||||
|
||||
|
@ -110,7 +110,7 @@ def md5me(testString):
|
|||
"""
|
||||
|
||||
bs = ""
|
||||
for i in testString:
|
||||
for i in test_string:
|
||||
bs += format(ord(i), "08b")
|
||||
bs = pad(bs)
|
||||
|
||||
|
@ -188,37 +188,37 @@ def md5me(testString):
|
|||
21,
|
||||
]
|
||||
|
||||
for m in getBlock(bs):
|
||||
A = a0
|
||||
B = b0
|
||||
C = c0
|
||||
D = d0
|
||||
for m in get_block(bs):
|
||||
a = a0
|
||||
b = b0
|
||||
c = c0
|
||||
d = d0
|
||||
for i in range(64):
|
||||
if i <= 15:
|
||||
# f = (B & C) | (not32(B) & D)
|
||||
f = D ^ (B & (C ^ D))
|
||||
f = d ^ (b & (c ^ d))
|
||||
g = i
|
||||
elif i <= 31:
|
||||
# f = (D & B) | (not32(D) & C)
|
||||
f = C ^ (D & (B ^ C))
|
||||
f = c ^ (d & (b ^ c))
|
||||
g = (5 * i + 1) % 16
|
||||
elif i <= 47:
|
||||
f = B ^ C ^ D
|
||||
f = b ^ c ^ d
|
||||
g = (3 * i + 5) % 16
|
||||
else:
|
||||
f = C ^ (B | not32(D))
|
||||
f = c ^ (b | not32(d))
|
||||
g = (7 * i) % 16
|
||||
dtemp = D
|
||||
D = C
|
||||
C = B
|
||||
B = sum32(B, leftrot32((A + f + tvals[i] + m[g]) % 2**32, s[i]))
|
||||
A = dtemp
|
||||
a0 = sum32(a0, A)
|
||||
b0 = sum32(b0, B)
|
||||
c0 = sum32(c0, C)
|
||||
d0 = sum32(d0, D)
|
||||
dtemp = d
|
||||
d = c
|
||||
c = b
|
||||
b = sum32(b, leftrot32((a + f + tvals[i] + m[g]) % 2**32, s[i]))
|
||||
a = dtemp
|
||||
a0 = sum32(a0, a)
|
||||
b0 = sum32(b0, b)
|
||||
c0 = sum32(c0, c)
|
||||
d0 = sum32(d0, d)
|
||||
|
||||
digest = reformatHex(a0) + reformatHex(b0) + reformatHex(c0) + reformatHex(d0)
|
||||
digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0)
|
||||
return digest
|
||||
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ class SHA1HashTest(unittest.TestCase):
|
|||
Test class for the SHA1Hash class. Inherits the TestCase class from unittest
|
||||
"""
|
||||
|
||||
def testMatchHashes(self):
|
||||
def testMatchHashes(self): # noqa: N802
|
||||
msg = bytes("Test String", "utf-8")
|
||||
self.assertEqual(SHA1Hash(msg).final_hash(), hashlib.sha1(msg).hexdigest())
|
||||
|
||||
|
|
|
@ -157,14 +157,14 @@ class SHA256:
|
|||
) % 0x100000000
|
||||
|
||||
# Compression
|
||||
S1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25)
|
||||
s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25)
|
||||
ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g)
|
||||
temp1 = (
|
||||
h + S1 + ch + self.round_constants[index] + words[index]
|
||||
h + s1 + ch + self.round_constants[index] + words[index]
|
||||
) % 0x100000000
|
||||
S0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22)
|
||||
s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22)
|
||||
maj = (a & b) ^ (a & c) ^ (b & c)
|
||||
temp2 = (S0 + maj) % 0x100000000
|
||||
temp2 = (s0 + maj) % 0x100000000
|
||||
|
||||
h, g, f, e, d, c, b, a = (
|
||||
g,
|
||||
|
|
|
@ -63,8 +63,8 @@ def power_iteration(
|
|||
vector = w / np.linalg.norm(w)
|
||||
# Find rayleigh quotient
|
||||
# (faster than usual b/c we know vector is normalized already)
|
||||
vectorH = vector.conj().T if is_complex else vector.T
|
||||
lambda_ = np.dot(vectorH, np.dot(input_matrix, vector))
|
||||
vector_h = vector.conj().T if is_complex else vector.T
|
||||
lambda_ = np.dot(vector_h, np.dot(input_matrix, vector))
|
||||
|
||||
# Check convergence.
|
||||
error = np.abs(lambda_ - lambda_previous) / lambda_
|
||||
|
|
|
@ -26,7 +26,7 @@ def is_hermitian(matrix: np.ndarray) -> bool:
|
|||
return np.array_equal(matrix, matrix.conjugate().T)
|
||||
|
||||
|
||||
def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any:
|
||||
def rayleigh_quotient(a: np.ndarray, v: np.ndarray) -> Any:
|
||||
"""
|
||||
Returns the Rayleigh quotient of a Hermitian matrix A and
|
||||
vector v.
|
||||
|
@ -45,20 +45,20 @@ def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any:
|
|||
array([[3.]])
|
||||
"""
|
||||
v_star = v.conjugate().T
|
||||
v_star_dot = v_star.dot(A)
|
||||
v_star_dot = v_star.dot(a)
|
||||
assert isinstance(v_star_dot, np.ndarray)
|
||||
return (v_star_dot.dot(v)) / (v_star.dot(v))
|
||||
|
||||
|
||||
def tests() -> None:
|
||||
A = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
|
||||
a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
|
||||
v = np.array([[1], [2], [3]])
|
||||
assert is_hermitian(A), f"{A} is not hermitian."
|
||||
print(rayleigh_quotient(A, v))
|
||||
assert is_hermitian(a), f"{a} is not hermitian."
|
||||
print(rayleigh_quotient(a, v))
|
||||
|
||||
A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
|
||||
assert is_hermitian(A), f"{A} is not hermitian."
|
||||
assert rayleigh_quotient(A, v) == float(3)
|
||||
a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
|
||||
assert is_hermitian(a), f"{a} is not hermitian."
|
||||
assert rayleigh_quotient(a, v) == float(3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -85,13 +85,13 @@ class Test(unittest.TestCase):
|
|||
self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)")
|
||||
self.assertEqual((a * b), 0)
|
||||
|
||||
def test_zeroVector(self) -> None:
|
||||
def test_zero_vector(self) -> None:
|
||||
"""
|
||||
test for global function zero_vector()
|
||||
"""
|
||||
self.assertTrue(str(zero_vector(10)).count("0") == 10)
|
||||
|
||||
def test_unitBasisVector(self) -> None:
|
||||
def test_unit_basis_vector(self) -> None:
|
||||
"""
|
||||
test for global function unit_basis_vector()
|
||||
"""
|
||||
|
@ -113,7 +113,7 @@ class Test(unittest.TestCase):
|
|||
y = x.copy()
|
||||
self.assertEqual(str(x), str(y))
|
||||
|
||||
def test_changeComponent(self) -> None:
|
||||
def test_change_component(self) -> None:
|
||||
"""
|
||||
test for method change_component()
|
||||
"""
|
||||
|
@ -126,77 +126,77 @@ class Test(unittest.TestCase):
|
|||
"""
|
||||
test for Matrix method str()
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(A))
|
||||
a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(a))
|
||||
|
||||
def test_minor(self) -> None:
|
||||
"""
|
||||
test for Matrix method minor()
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
minors = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
|
||||
for x in range(A.height()):
|
||||
for y in range(A.width()):
|
||||
self.assertEqual(minors[x][y], A.minor(x, y))
|
||||
for x in range(a.height()):
|
||||
for y in range(a.width()):
|
||||
self.assertEqual(minors[x][y], a.minor(x, y))
|
||||
|
||||
def test_cofactor(self) -> None:
|
||||
"""
|
||||
test for Matrix method cofactor()
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
cofactors = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
|
||||
for x in range(A.height()):
|
||||
for y in range(A.width()):
|
||||
self.assertEqual(cofactors[x][y], A.cofactor(x, y))
|
||||
for x in range(a.height()):
|
||||
for y in range(a.width()):
|
||||
self.assertEqual(cofactors[x][y], a.cofactor(x, y))
|
||||
|
||||
def test_determinant(self) -> None:
|
||||
"""
|
||||
test for Matrix method determinant()
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
self.assertEqual(-5, A.determinant())
|
||||
a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
self.assertEqual(-5, a.determinant())
|
||||
|
||||
def test__mul__matrix(self) -> None:
|
||||
"""
|
||||
test for Matrix * operator
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3)
|
||||
a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3)
|
||||
x = Vector([1, 2, 3])
|
||||
self.assertEqual("(14,32,50)", str(A * x))
|
||||
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(A * 2))
|
||||
self.assertEqual("(14,32,50)", str(a * x))
|
||||
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(a * 2))
|
||||
|
||||
def test_change_component_matrix(self) -> None:
|
||||
"""
|
||||
test for Matrix method change_component()
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
A.change_component(0, 2, 5)
|
||||
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(A))
|
||||
a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
a.change_component(0, 2, 5)
|
||||
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(a))
|
||||
|
||||
def test_component_matrix(self) -> None:
|
||||
"""
|
||||
test for Matrix method component()
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
self.assertEqual(7, A.component(2, 1), 0.01)
|
||||
a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
self.assertEqual(7, a.component(2, 1), 0.01)
|
||||
|
||||
def test__add__matrix(self) -> None:
|
||||
"""
|
||||
test for Matrix + operator
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
|
||||
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(A + B))
|
||||
a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
|
||||
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(a + b))
|
||||
|
||||
def test__sub__matrix(self) -> None:
|
||||
"""
|
||||
test for Matrix - operator
|
||||
"""
|
||||
A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
|
||||
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(A - B))
|
||||
a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
|
||||
b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
|
||||
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(a - b))
|
||||
|
||||
def test_squareZeroMatrix(self) -> None:
|
||||
def test_square_zero_matrix(self) -> None:
|
||||
"""
|
||||
test for global function square_zero_matrix()
|
||||
"""
|
||||
|
|
|
@ -6,7 +6,7 @@ Output: The decision tree maps a real number input to a real number output.
|
|||
import numpy as np
|
||||
|
||||
|
||||
class Decision_Tree:
|
||||
class DecisionTree:
|
||||
def __init__(self, depth=5, min_leaf_size=5):
|
||||
self.depth = depth
|
||||
self.decision_boundary = 0
|
||||
|
@ -22,17 +22,17 @@ class Decision_Tree:
|
|||
@param prediction: a floating point value
|
||||
return value: mean_squared_error calculates the error if prediction is used to
|
||||
estimate the labels
|
||||
>>> tester = Decision_Tree()
|
||||
>>> tester = DecisionTree()
|
||||
>>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10])
|
||||
>>> test_prediction = np.float(6)
|
||||
>>> tester.mean_squared_error(test_labels, test_prediction) == (
|
||||
... Test_Decision_Tree.helper_mean_squared_error_test(test_labels,
|
||||
... TestDecisionTree.helper_mean_squared_error_test(test_labels,
|
||||
... test_prediction))
|
||||
True
|
||||
>>> test_labels = np.array([1,2,3])
|
||||
>>> test_prediction = np.float(2)
|
||||
>>> tester.mean_squared_error(test_labels, test_prediction) == (
|
||||
... Test_Decision_Tree.helper_mean_squared_error_test(test_labels,
|
||||
... TestDecisionTree.helper_mean_squared_error_test(test_labels,
|
||||
... test_prediction))
|
||||
True
|
||||
"""
|
||||
|
@ -41,10 +41,10 @@ class Decision_Tree:
|
|||
|
||||
return np.mean((labels - prediction) ** 2)
|
||||
|
||||
def train(self, X, y):
|
||||
def train(self, x, y):
|
||||
"""
|
||||
train:
|
||||
@param X: a one dimensional numpy array
|
||||
@param x: a one dimensional numpy array
|
||||
@param y: a one dimensional numpy array.
|
||||
The contents of y are the labels for the corresponding X values
|
||||
|
||||
|
@ -55,17 +55,17 @@ class Decision_Tree:
|
|||
this section is to check that the inputs conform to our dimensionality
|
||||
constraints
|
||||
"""
|
||||
if X.ndim != 1:
|
||||
if x.ndim != 1:
|
||||
print("Error: Input data set must be one dimensional")
|
||||
return
|
||||
if len(X) != len(y):
|
||||
if len(x) != len(y):
|
||||
print("Error: X and y have different lengths")
|
||||
return
|
||||
if y.ndim != 1:
|
||||
print("Error: Data set labels must be one dimensional")
|
||||
return
|
||||
|
||||
if len(X) < 2 * self.min_leaf_size:
|
||||
if len(x) < 2 * self.min_leaf_size:
|
||||
self.prediction = np.mean(y)
|
||||
return
|
||||
|
||||
|
@ -74,7 +74,7 @@ class Decision_Tree:
|
|||
return
|
||||
|
||||
best_split = 0
|
||||
min_error = self.mean_squared_error(X, np.mean(y)) * 2
|
||||
min_error = self.mean_squared_error(x, np.mean(y)) * 2
|
||||
|
||||
"""
|
||||
loop over all possible splits for the decision tree. find the best split.
|
||||
|
@ -82,34 +82,34 @@ class Decision_Tree:
|
|||
then the data set is not split and the average for the entire array is used as
|
||||
the predictor
|
||||
"""
|
||||
for i in range(len(X)):
|
||||
if len(X[:i]) < self.min_leaf_size:
|
||||
for i in range(len(x)):
|
||||
if len(x[:i]) < self.min_leaf_size:
|
||||
continue
|
||||
elif len(X[i:]) < self.min_leaf_size:
|
||||
elif len(x[i:]) < self.min_leaf_size:
|
||||
continue
|
||||
else:
|
||||
error_left = self.mean_squared_error(X[:i], np.mean(y[:i]))
|
||||
error_right = self.mean_squared_error(X[i:], np.mean(y[i:]))
|
||||
error_left = self.mean_squared_error(x[:i], np.mean(y[:i]))
|
||||
error_right = self.mean_squared_error(x[i:], np.mean(y[i:]))
|
||||
error = error_left + error_right
|
||||
if error < min_error:
|
||||
best_split = i
|
||||
min_error = error
|
||||
|
||||
if best_split != 0:
|
||||
left_X = X[:best_split]
|
||||
left_x = x[:best_split]
|
||||
left_y = y[:best_split]
|
||||
right_X = X[best_split:]
|
||||
right_x = x[best_split:]
|
||||
right_y = y[best_split:]
|
||||
|
||||
self.decision_boundary = X[best_split]
|
||||
self.left = Decision_Tree(
|
||||
self.decision_boundary = x[best_split]
|
||||
self.left = DecisionTree(
|
||||
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
|
||||
)
|
||||
self.right = Decision_Tree(
|
||||
self.right = DecisionTree(
|
||||
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
|
||||
)
|
||||
self.left.train(left_X, left_y)
|
||||
self.right.train(right_X, right_y)
|
||||
self.left.train(left_x, left_y)
|
||||
self.right.train(right_x, right_y)
|
||||
else:
|
||||
self.prediction = np.mean(y)
|
||||
|
||||
|
@ -134,7 +134,7 @@ class Decision_Tree:
|
|||
return None
|
||||
|
||||
|
||||
class Test_Decision_Tree:
|
||||
class TestDecisionTree:
|
||||
"""Decision Tres test class"""
|
||||
|
||||
@staticmethod
|
||||
|
@ -159,11 +159,11 @@ def main():
|
|||
predict the label of 10 different test values. Then the mean squared error over
|
||||
this test is displayed.
|
||||
"""
|
||||
X = np.arange(-1.0, 1.0, 0.005)
|
||||
y = np.sin(X)
|
||||
x = np.arange(-1.0, 1.0, 0.005)
|
||||
y = np.sin(x)
|
||||
|
||||
tree = Decision_Tree(depth=10, min_leaf_size=10)
|
||||
tree.train(X, y)
|
||||
tree = DecisionTree(depth=10, min_leaf_size=10)
|
||||
tree.train(x, y)
|
||||
|
||||
test_cases = (np.random.rand(10) * 2) - 1
|
||||
predictions = np.array([tree.predict(x) for x in test_cases])
|
||||
|
|
|
@ -17,19 +17,19 @@ def main():
|
|||
iris = load_iris()
|
||||
|
||||
# Split dataset into train and test data
|
||||
X = iris["data"] # features
|
||||
Y = iris["target"]
|
||||
x = iris["data"] # features
|
||||
y = iris["target"]
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
X, Y, test_size=0.3, random_state=1
|
||||
x, y, test_size=0.3, random_state=1
|
||||
)
|
||||
|
||||
# Gaussian Naive Bayes
|
||||
NB_model = GaussianNB()
|
||||
NB_model.fit(x_train, y_train)
|
||||
nb_model = GaussianNB()
|
||||
nb_model.fit(x_train, y_train)
|
||||
|
||||
# Display Confusion Matrix
|
||||
plot_confusion_matrix(
|
||||
NB_model,
|
||||
nb_model,
|
||||
x_test,
|
||||
y_test,
|
||||
display_labels=iris["target_names"],
|
||||
|
|
|
@ -26,25 +26,25 @@ def main():
|
|||
print(df_boston.describe().T)
|
||||
# Feature selection
|
||||
|
||||
X = df_boston.iloc[:, :-1]
|
||||
x = df_boston.iloc[:, :-1]
|
||||
y = df_boston.iloc[:, -1] # target variable
|
||||
# split the data with 75% train and 25% test sets.
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, random_state=0, test_size=0.25
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
x, y, random_state=0, test_size=0.25
|
||||
)
|
||||
|
||||
model = GradientBoostingRegressor(
|
||||
n_estimators=500, max_depth=5, min_samples_split=4, learning_rate=0.01
|
||||
)
|
||||
# training the model
|
||||
model.fit(X_train, y_train)
|
||||
model.fit(x_train, y_train)
|
||||
# to see how good the model fit the data
|
||||
training_score = model.score(X_train, y_train).round(3)
|
||||
test_score = model.score(X_test, y_test).round(3)
|
||||
training_score = model.score(x_train, y_train).round(3)
|
||||
test_score = model.score(x_test, y_test).round(3)
|
||||
print("Training score of GradientBoosting is :", training_score)
|
||||
print("The test score of GradientBoosting is :", test_score)
|
||||
# Let us evaluation the model by finding the errors
|
||||
y_pred = model.predict(X_test)
|
||||
y_pred = model.predict(x_test)
|
||||
|
||||
# The mean squared error
|
||||
print(f"Mean squared error: {mean_squared_error(y_test, y_pred):.2f}")
|
||||
|
|
|
@ -69,8 +69,8 @@ def get_initial_centroids(data, k, seed=None):
|
|||
return centroids
|
||||
|
||||
|
||||
def centroid_pairwise_dist(X, centroids):
|
||||
return pairwise_distances(X, centroids, metric="euclidean")
|
||||
def centroid_pairwise_dist(x, centroids):
|
||||
return pairwise_distances(x, centroids, metric="euclidean")
|
||||
|
||||
|
||||
def assign_clusters(data, centroids):
|
||||
|
@ -197,8 +197,8 @@ if False: # change to true to run this test case.
|
|||
plot_heterogeneity(heterogeneity, k)
|
||||
|
||||
|
||||
def ReportGenerator(
|
||||
df: pd.DataFrame, ClusteringVariables: np.ndarray, FillMissingReport=None
|
||||
def report_generator(
|
||||
df: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Function generates easy-erading clustering report. It takes 2 arguments as an input:
|
||||
|
@ -214,7 +214,7 @@ def ReportGenerator(
|
|||
>>> data['col2'] = [100, 200, 300]
|
||||
>>> data['col3'] = [10, 20, 30]
|
||||
>>> data['Cluster'] = [1, 1, 2]
|
||||
>>> ReportGenerator(data, ['col1', 'col2'], 0)
|
||||
>>> report_generator(data, ['col1', 'col2'], 0)
|
||||
Features Type Mark 1 2
|
||||
0 # of Customers ClusterSize False 2.000000 1.000000
|
||||
1 % of Customers ClusterProportion False 0.666667 0.333333
|
||||
|
@ -231,8 +231,8 @@ def ReportGenerator(
|
|||
[104 rows x 5 columns]
|
||||
"""
|
||||
# Fill missing values with given rules
|
||||
if FillMissingReport:
|
||||
df.fillna(value=FillMissingReport, inplace=True)
|
||||
if fill_missing_report:
|
||||
df.fillna(value=fill_missing_report, inplace=True)
|
||||
df["dummy"] = 1
|
||||
numeric_cols = df.select_dtypes(np.number).columns
|
||||
report = (
|
||||
|
@ -313,7 +313,7 @@ def ReportGenerator(
|
|||
report = pd.concat(
|
||||
[report, a, clustersize, clusterproportion], axis=0
|
||||
) # concat report with clustert size and nan values
|
||||
report["Mark"] = report["Features"].isin(ClusteringVariables)
|
||||
report["Mark"] = report["Features"].isin(clustering_variables)
|
||||
cols = report.columns.tolist()
|
||||
cols = cols[0:2] + cols[-1:] + cols[2:-1]
|
||||
report = report[cols]
|
||||
|
|
|
@ -41,11 +41,11 @@ def local_weight(
|
|||
[0.08272556]])
|
||||
"""
|
||||
weight = weighted_matrix(point, training_data_x, bandwidth)
|
||||
W = (training_data_x.T * (weight * training_data_x)).I * (
|
||||
w = (training_data_x.T * (weight * training_data_x)).I * (
|
||||
training_data_x.T * weight * training_data_y.T
|
||||
)
|
||||
|
||||
return W
|
||||
return w
|
||||
|
||||
|
||||
def local_weight_regression(
|
||||
|
|
|
@ -35,25 +35,25 @@ def cost_function(h, y):
|
|||
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
|
||||
|
||||
|
||||
def log_likelihood(X, Y, weights):
|
||||
scores = np.dot(X, weights)
|
||||
return np.sum(Y * scores - np.log(1 + np.exp(scores)))
|
||||
def log_likelihood(x, y, weights):
|
||||
scores = np.dot(x, weights)
|
||||
return np.sum(y * scores - np.log(1 + np.exp(scores)))
|
||||
|
||||
|
||||
# here alpha is the learning rate, X is the feature matrix,y is the target matrix
|
||||
def logistic_reg(alpha, X, y, max_iterations=70000):
|
||||
theta = np.zeros(X.shape[1])
|
||||
def logistic_reg(alpha, x, y, max_iterations=70000):
|
||||
theta = np.zeros(x.shape[1])
|
||||
|
||||
for iterations in range(max_iterations):
|
||||
z = np.dot(X, theta)
|
||||
z = np.dot(x, theta)
|
||||
h = sigmoid_function(z)
|
||||
gradient = np.dot(X.T, h - y) / y.size
|
||||
gradient = np.dot(x.T, h - y) / y.size
|
||||
theta = theta - alpha * gradient # updating the weights
|
||||
z = np.dot(X, theta)
|
||||
z = np.dot(x, theta)
|
||||
h = sigmoid_function(z)
|
||||
J = cost_function(h, y)
|
||||
j = cost_function(h, y)
|
||||
if iterations % 100 == 0:
|
||||
print(f"loss: {J} \t") # printing the loss after every 100 iterations
|
||||
print(f"loss: {j} \t") # printing the loss after every 100 iterations
|
||||
return theta
|
||||
|
||||
|
||||
|
@ -61,23 +61,23 @@ def logistic_reg(alpha, X, y, max_iterations=70000):
|
|||
|
||||
if __name__ == "__main__":
|
||||
iris = datasets.load_iris()
|
||||
X = iris.data[:, :2]
|
||||
x = iris.data[:, :2]
|
||||
y = (iris.target != 0) * 1
|
||||
|
||||
alpha = 0.1
|
||||
theta = logistic_reg(alpha, X, y, max_iterations=70000)
|
||||
theta = logistic_reg(alpha, x, y, max_iterations=70000)
|
||||
print("theta: ", theta) # printing the theta i.e our weights vector
|
||||
|
||||
def predict_prob(X):
|
||||
def predict_prob(x):
|
||||
return sigmoid_function(
|
||||
np.dot(X, theta)
|
||||
np.dot(x, theta)
|
||||
) # predicting the value of probability from the logistic regression algorithm
|
||||
|
||||
plt.figure(figsize=(10, 6))
|
||||
plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="b", label="0")
|
||||
plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="r", label="1")
|
||||
(x1_min, x1_max) = (X[:, 0].min(), X[:, 0].max())
|
||||
(x2_min, x2_max) = (X[:, 1].min(), X[:, 1].max())
|
||||
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
|
||||
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
|
||||
(x1_min, x1_max) = (x[:, 0].min(), x[:, 0].max())
|
||||
(x2_min, x2_max) = (x[:, 1].min(), x[:, 1].max())
|
||||
(xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
|
||||
grid = np.c_[xx1.ravel(), xx2.ravel()]
|
||||
probs = predict_prob(grid).reshape(xx1.shape)
|
||||
|
|
|
@ -15,12 +15,12 @@ test = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
|
|||
Y = clf.predict(test)
|
||||
|
||||
|
||||
def wrapper(Y):
|
||||
def wrapper(y):
|
||||
"""
|
||||
>>> wrapper(Y)
|
||||
[0, 0, 1]
|
||||
"""
|
||||
return list(Y)
|
||||
return list(y)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -17,10 +17,10 @@ def main():
|
|||
iris = load_iris()
|
||||
|
||||
# Split dataset into train and test data
|
||||
X = iris["data"] # features
|
||||
Y = iris["target"]
|
||||
x = iris["data"] # features
|
||||
y = iris["target"]
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
X, Y, test_size=0.3, random_state=1
|
||||
x, y, test_size=0.3, random_state=1
|
||||
)
|
||||
|
||||
# Random Forest Classifier
|
||||
|
|
|
@ -17,10 +17,10 @@ def main():
|
|||
print(boston.keys())
|
||||
|
||||
# Split dataset into train and test data
|
||||
X = boston["data"] # features
|
||||
Y = boston["target"]
|
||||
x = boston["data"] # features
|
||||
y = boston["target"]
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
X, Y, test_size=0.3, random_state=1
|
||||
x, y, test_size=0.3, random_state=1
|
||||
)
|
||||
|
||||
# Random Forest Regressor
|
||||
|
|
|
@ -80,7 +80,7 @@ class SmoSVM:
|
|||
|
||||
# Calculate alphas using SMO algorithm
|
||||
def fit(self):
|
||||
K = self._k
|
||||
k = self._k
|
||||
state = None
|
||||
while True:
|
||||
|
||||
|
@ -106,14 +106,14 @@ class SmoSVM:
|
|||
# 3: update threshold(b)
|
||||
b1_new = np.float64(
|
||||
-e1
|
||||
- y1 * K(i1, i1) * (a1_new - a1)
|
||||
- y2 * K(i2, i1) * (a2_new - a2)
|
||||
- y1 * k(i1, i1) * (a1_new - a1)
|
||||
- y2 * k(i2, i1) * (a2_new - a2)
|
||||
+ self._b
|
||||
)
|
||||
b2_new = np.float64(
|
||||
-e2
|
||||
- y2 * K(i2, i2) * (a2_new - a2)
|
||||
- y1 * K(i1, i2) * (a1_new - a1)
|
||||
- y2 * k(i2, i2) * (a2_new - a2)
|
||||
- y1 * k(i1, i2) * (a1_new - a1)
|
||||
+ self._b
|
||||
)
|
||||
if 0.0 < a1_new < self._c:
|
||||
|
@ -134,8 +134,8 @@ class SmoSVM:
|
|||
if s == i1 or s == i2:
|
||||
continue
|
||||
self._error[s] += (
|
||||
y1 * (a1_new - a1) * K(i1, s)
|
||||
+ y2 * (a2_new - a2) * K(i2, s)
|
||||
y1 * (a1_new - a1) * k(i1, s)
|
||||
+ y2 * (a2_new - a2) * k(i2, s)
|
||||
+ (self._b - b_old)
|
||||
)
|
||||
|
||||
|
@ -305,56 +305,56 @@ class SmoSVM:
|
|||
|
||||
# Get the new alpha2 and new alpha1
|
||||
def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2):
|
||||
K = self._k
|
||||
k = self._k
|
||||
if i1 == i2:
|
||||
return None, None
|
||||
|
||||
# calculate L and H which bound the new alpha2
|
||||
s = y1 * y2
|
||||
if s == -1:
|
||||
L, H = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1)
|
||||
l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1)
|
||||
else:
|
||||
L, H = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1)
|
||||
if L == H:
|
||||
l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1)
|
||||
if l == h: # noqa: E741
|
||||
return None, None
|
||||
|
||||
# calculate eta
|
||||
k11 = K(i1, i1)
|
||||
k22 = K(i2, i2)
|
||||
k12 = K(i1, i2)
|
||||
k11 = k(i1, i1)
|
||||
k22 = k(i2, i2)
|
||||
k12 = k(i1, i2)
|
||||
eta = k11 + k22 - 2.0 * k12
|
||||
|
||||
# select the new alpha2 which could get the minimal objectives
|
||||
if eta > 0.0:
|
||||
a2_new_unc = a2 + (y2 * (e1 - e2)) / eta
|
||||
# a2_new has a boundary
|
||||
if a2_new_unc >= H:
|
||||
a2_new = H
|
||||
elif a2_new_unc <= L:
|
||||
a2_new = L
|
||||
if a2_new_unc >= h:
|
||||
a2_new = h
|
||||
elif a2_new_unc <= l:
|
||||
a2_new = l
|
||||
else:
|
||||
a2_new = a2_new_unc
|
||||
else:
|
||||
b = self._b
|
||||
l1 = a1 + s * (a2 - L)
|
||||
h1 = a1 + s * (a2 - H)
|
||||
l1 = a1 + s * (a2 - l)
|
||||
h1 = a1 + s * (a2 - h)
|
||||
|
||||
# way 1
|
||||
f1 = y1 * (e1 + b) - a1 * K(i1, i1) - s * a2 * K(i1, i2)
|
||||
f2 = y2 * (e2 + b) - a2 * K(i2, i2) - s * a1 * K(i1, i2)
|
||||
f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2)
|
||||
f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2)
|
||||
ol = (
|
||||
l1 * f1
|
||||
+ L * f2
|
||||
+ 1 / 2 * l1**2 * K(i1, i1)
|
||||
+ 1 / 2 * L**2 * K(i2, i2)
|
||||
+ s * L * l1 * K(i1, i2)
|
||||
+ l * f2
|
||||
+ 1 / 2 * l1**2 * k(i1, i1)
|
||||
+ 1 / 2 * l**2 * k(i2, i2)
|
||||
+ s * l * l1 * k(i1, i2)
|
||||
)
|
||||
oh = (
|
||||
h1 * f1
|
||||
+ H * f2
|
||||
+ 1 / 2 * h1**2 * K(i1, i1)
|
||||
+ 1 / 2 * H**2 * K(i2, i2)
|
||||
+ s * H * h1 * K(i1, i2)
|
||||
+ h * f2
|
||||
+ 1 / 2 * h1**2 * k(i1, i1)
|
||||
+ 1 / 2 * h**2 * k(i2, i2)
|
||||
+ s * h * h1 * k(i1, i2)
|
||||
)
|
||||
"""
|
||||
# way 2
|
||||
|
@ -362,9 +362,9 @@ class SmoSVM:
|
|||
objectives
|
||||
"""
|
||||
if ol < (oh - self._eps):
|
||||
a2_new = L
|
||||
a2_new = l
|
||||
elif ol > oh + self._eps:
|
||||
a2_new = H
|
||||
a2_new = h
|
||||
else:
|
||||
a2_new = a2
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ the third document in the corpus.")
|
|||
return (len([doc for doc in docs if term in doc]), len(docs))
|
||||
|
||||
|
||||
def inverse_document_frequency(df: int, N: int, smoothing=False) -> float:
|
||||
def inverse_document_frequency(df: int, n: int, smoothing=False) -> float:
|
||||
"""
|
||||
Return an integer denoting the importance
|
||||
of a word. This measure of importance is
|
||||
|
@ -109,15 +109,15 @@ def inverse_document_frequency(df: int, N: int, smoothing=False) -> float:
|
|||
1.477
|
||||
"""
|
||||
if smoothing:
|
||||
if N == 0:
|
||||
if n == 0:
|
||||
raise ValueError("log10(0) is undefined.")
|
||||
return round(1 + log10(N / (1 + df)), 3)
|
||||
return round(1 + log10(n / (1 + df)), 3)
|
||||
|
||||
if df == 0:
|
||||
raise ZeroDivisionError("df must be > 0")
|
||||
elif N == 0:
|
||||
elif n == 0:
|
||||
raise ValueError("log10(0) is undefined.")
|
||||
return round(log10(N / df), 3)
|
||||
return round(log10(n / df), 3)
|
||||
|
||||
|
||||
def tf_idf(tf: int, idf: int) -> float:
|
||||
|
|
|
@ -5,16 +5,16 @@ def binomial_coefficient(n, r):
|
|||
>>> binomial_coefficient(10, 5)
|
||||
252
|
||||
"""
|
||||
C = [0 for i in range(r + 1)]
|
||||
c = [0 for i in range(r + 1)]
|
||||
# nc0 = 1
|
||||
C[0] = 1
|
||||
c[0] = 1
|
||||
for i in range(1, n + 1):
|
||||
# to compute current row from previous row.
|
||||
j = min(i, r)
|
||||
while j > 0:
|
||||
C[j] += C[j - 1]
|
||||
c[j] += c[j - 1]
|
||||
j -= 1
|
||||
return C[r]
|
||||
return c[r]
|
||||
|
||||
|
||||
print(binomial_coefficient(n=10, r=5))
|
||||
|
|
|
@ -30,7 +30,7 @@ def power(x: int, y: int, mod: int) -> int:
|
|||
return temp
|
||||
|
||||
|
||||
def isCarmichaelNumber(n: int) -> bool:
|
||||
def is_carmichael_number(n: int) -> bool:
|
||||
b = 2
|
||||
while b < n:
|
||||
if gcd(b, n) == 1 and power(b, n - 1, n) != 1:
|
||||
|
@ -41,7 +41,7 @@ def isCarmichaelNumber(n: int) -> bool:
|
|||
|
||||
if __name__ == "__main__":
|
||||
number = int(input("Enter number: ").strip())
|
||||
if isCarmichaelNumber(number):
|
||||
if is_carmichael_number(number):
|
||||
print(f"{number} is a Carmichael Number.")
|
||||
else:
|
||||
print(f"{number} is not a Carmichael Number.")
|
||||
|
|
|
@ -4,7 +4,7 @@ https://stackoverflow.com/questions/3886402/how-to-get-numbers-after-decimal-poi
|
|||
"""
|
||||
|
||||
|
||||
def decimal_isolate(number, digitAmount):
|
||||
def decimal_isolate(number, digit_amount):
|
||||
|
||||
"""
|
||||
Isolates the decimal part of a number.
|
||||
|
@ -28,8 +28,8 @@ def decimal_isolate(number, digitAmount):
|
|||
>>> decimal_isolate(-14.123, 3)
|
||||
-0.123
|
||||
"""
|
||||
if digitAmount > 0:
|
||||
return round(number - int(number), digitAmount)
|
||||
if digit_amount > 0:
|
||||
return round(number - int(number), digit_amount)
|
||||
return number - int(number)
|
||||
|
||||
|
||||
|
|
|
@ -29,12 +29,12 @@ def explicit_euler(
|
|||
>>> y[-1]
|
||||
144.77277243257308
|
||||
"""
|
||||
N = int(np.ceil((x_end - x0) / step_size))
|
||||
y = np.zeros((N + 1,))
|
||||
n = int(np.ceil((x_end - x0) / step_size))
|
||||
y = np.zeros((n + 1,))
|
||||
y[0] = y0
|
||||
x = x0
|
||||
|
||||
for k in range(N):
|
||||
for k in range(n):
|
||||
y[k + 1] = y[k] + step_size * ode_func(x, y[k])
|
||||
x += step_size
|
||||
|
||||
|
|
|
@ -33,12 +33,12 @@ def euler_modified(
|
|||
>>> y[-1]
|
||||
0.5525976431951775
|
||||
"""
|
||||
N = int(np.ceil((x_end - x0) / step_size))
|
||||
y = np.zeros((N + 1,))
|
||||
n = int(np.ceil((x_end - x0) / step_size))
|
||||
y = np.zeros((n + 1,))
|
||||
y[0] = y0
|
||||
x = x0
|
||||
|
||||
for k in range(N):
|
||||
for k in range(n):
|
||||
y_get = y[k] + step_size * ode_func(x, y[k])
|
||||
y[k + 1] = y[k] + (
|
||||
(step_size / 2) * (ode_func(x, y[k]) + ode_func(x + step_size, y_get))
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
import math
|
||||
|
||||
|
||||
def exactPrimeFactorCount(n):
|
||||
def exact_prime_factor_count(n):
|
||||
"""
|
||||
>>> exactPrimeFactorCount(51242183)
|
||||
>>> exact_prime_factor_count(51242183)
|
||||
3
|
||||
"""
|
||||
count = 0
|
||||
|
@ -36,7 +36,7 @@ def exactPrimeFactorCount(n):
|
|||
|
||||
if __name__ == "__main__":
|
||||
n = 51242183
|
||||
print(f"The number of distinct prime factors is/are {exactPrimeFactorCount(n)}")
|
||||
print(f"The number of distinct prime factors is/are {exact_prime_factor_count(n)}")
|
||||
print(f"The value of log(log(n)) is {math.log(math.log(n)):.4f}")
|
||||
|
||||
"""
|
||||
|
|
|
@ -14,7 +14,7 @@ Jaccard similarity is widely used with MinHashing.
|
|||
"""
|
||||
|
||||
|
||||
def jaccard_similariy(setA, setB, alternativeUnion=False):
|
||||
def jaccard_similariy(set_a, set_b, alternative_union=False):
|
||||
"""
|
||||
Finds the jaccard similarity between two sets.
|
||||
Essentially, its intersection over union.
|
||||
|
@ -24,8 +24,8 @@ def jaccard_similariy(setA, setB, alternativeUnion=False):
|
|||
of a set with itself be 1/2 instead of 1. [MMDS 2nd Edition, Page 77]
|
||||
|
||||
Parameters:
|
||||
:setA (set,list,tuple): A non-empty set/list
|
||||
:setB (set,list,tuple): A non-empty set/list
|
||||
:set_a (set,list,tuple): A non-empty set/list
|
||||
:set_b (set,list,tuple): A non-empty set/list
|
||||
:alternativeUnion (boolean): If True, use sum of number of
|
||||
items as union
|
||||
|
||||
|
@ -33,48 +33,48 @@ def jaccard_similariy(setA, setB, alternativeUnion=False):
|
|||
(float) The jaccard similarity between the two sets.
|
||||
|
||||
Examples:
|
||||
>>> setA = {'a', 'b', 'c', 'd', 'e'}
|
||||
>>> setB = {'c', 'd', 'e', 'f', 'h', 'i'}
|
||||
>>> jaccard_similariy(setA,setB)
|
||||
>>> set_a = {'a', 'b', 'c', 'd', 'e'}
|
||||
>>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'}
|
||||
>>> jaccard_similariy(set_a, set_b)
|
||||
0.375
|
||||
|
||||
>>> jaccard_similariy(setA,setA)
|
||||
>>> jaccard_similariy(set_a, set_a)
|
||||
1.0
|
||||
|
||||
>>> jaccard_similariy(setA,setA,True)
|
||||
>>> jaccard_similariy(set_a, set_a, True)
|
||||
0.5
|
||||
|
||||
>>> setA = ['a', 'b', 'c', 'd', 'e']
|
||||
>>> setB = ('c', 'd', 'e', 'f', 'h', 'i')
|
||||
>>> jaccard_similariy(setA,setB)
|
||||
>>> set_a = ['a', 'b', 'c', 'd', 'e']
|
||||
>>> set_b = ('c', 'd', 'e', 'f', 'h', 'i')
|
||||
>>> jaccard_similariy(set_a, set_b)
|
||||
0.375
|
||||
"""
|
||||
|
||||
if isinstance(setA, set) and isinstance(setB, set):
|
||||
if isinstance(set_a, set) and isinstance(set_b, set):
|
||||
|
||||
intersection = len(setA.intersection(setB))
|
||||
intersection = len(set_a.intersection(set_b))
|
||||
|
||||
if alternativeUnion:
|
||||
union = len(setA) + len(setB)
|
||||
if alternative_union:
|
||||
union = len(set_a) + len(set_b)
|
||||
else:
|
||||
union = len(setA.union(setB))
|
||||
union = len(set_a.union(set_b))
|
||||
|
||||
return intersection / union
|
||||
|
||||
if isinstance(setA, (list, tuple)) and isinstance(setB, (list, tuple)):
|
||||
if isinstance(set_a, (list, tuple)) and isinstance(set_b, (list, tuple)):
|
||||
|
||||
intersection = [element for element in setA if element in setB]
|
||||
intersection = [element for element in set_a if element in set_b]
|
||||
|
||||
if alternativeUnion:
|
||||
union = len(setA) + len(setB)
|
||||
if alternative_union:
|
||||
union = len(set_a) + len(set_b)
|
||||
else:
|
||||
union = setA + [element for element in setB if element not in setA]
|
||||
union = set_a + [element for element in set_b if element not in set_a]
|
||||
|
||||
return len(intersection) / len(union)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
setA = {"a", "b", "c", "d", "e"}
|
||||
setB = {"c", "d", "e", "f", "h", "i"}
|
||||
print(jaccard_similariy(setA, setB))
|
||||
set_a = {"a", "b", "c", "d", "e"}
|
||||
set_b = {"c", "d", "e", "f", "h", "i"}
|
||||
print(jaccard_similariy(set_a, set_b))
|
||||
|
|
|
@ -33,12 +33,12 @@ def krishnamurthy(number: int) -> bool:
|
|||
True
|
||||
"""
|
||||
|
||||
factSum = 0
|
||||
fact_sum = 0
|
||||
duplicate = number
|
||||
while duplicate > 0:
|
||||
duplicate, digit = divmod(duplicate, 10)
|
||||
factSum += factorial(digit)
|
||||
return factSum == number
|
||||
fact_sum += factorial(digit)
|
||||
return fact_sum == number
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
def kthPermutation(k, n):
|
||||
def kth_permutation(k, n):
|
||||
"""
|
||||
Finds k'th lexicographic permutation (in increasing order) of
|
||||
0,1,2,...n-1 in O(n^2) time.
|
||||
|
||||
Examples:
|
||||
First permutation is always 0,1,2,...n
|
||||
>>> kthPermutation(0,5)
|
||||
>>> kth_permutation(0,5)
|
||||
[0, 1, 2, 3, 4]
|
||||
|
||||
The order of permutation of 0,1,2,3 is [0,1,2,3], [0,1,3,2], [0,2,1,3],
|
||||
[0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3],
|
||||
[1,2,3,0], [1,3,0,2]
|
||||
>>> kthPermutation(10,4)
|
||||
>>> kth_permutation(10,4)
|
||||
[1, 3, 0, 2]
|
||||
"""
|
||||
# Factorails from 1! to (n-1)!
|
||||
|
|
|
@ -30,9 +30,9 @@ def lucas_lehmer_test(p: int) -> bool:
|
|||
return True
|
||||
|
||||
s = 4
|
||||
M = (1 << p) - 1
|
||||
m = (1 << p) - 1
|
||||
for i in range(p - 2):
|
||||
s = ((s * s) - 2) % M
|
||||
s = ((s * s) - 2) % m
|
||||
return s == 0
|
||||
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user