Set the Python file maximum line length to 88 characters (#2122)

* flake8 --max-line-length=88

* fixup! Format Python code with psf/black push

Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com>
This commit is contained in:
Christian Clauss 2020-06-16 10:09:19 +02:00 committed by GitHub
parent 9438c6bf0b
commit 9316e7c014
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
90 changed files with 473 additions and 320 deletions

View File

@ -10,7 +10,7 @@ notifications:
on_success: never
before_script:
- black --check . || true
- flake8 --ignore=E203,W503 --max-complexity=25 --max-line-length=120 --statistics --count .
- flake8 --ignore=E203,W503 --max-complexity=25 --max-line-length=88 --statistics --count .
- scripts/validate_filenames.py # no uppercase, no spaces, in a directory
- pip install -r requirements.txt # fast fail on black, flake8, validate_filenames
script:

View File

@ -1,9 +1,11 @@
import math
def intersection(
function, x0, x1
): # function is the f we want to find its root and x0 and x1 are two random starting points
def intersection(function, x0, x1):
"""
function is the f we want to find its root
x0 and x1 are two random starting points
"""
x_n = x0
x_n1 = x1
while True:

View File

@ -16,8 +16,8 @@ def valid_connection(
Checks whether it is possible to add next into path by validating 2 statements
1. There should be path between current and next vertex
2. Next vertex should not be in path
If both validations succeeds we return true saying that it is possible to connect this vertices
either we return false
If both validations succeeds we return True saying that it is possible to connect
this vertices either we return False
Case 1:Use exact graph as in main function, with initialized values
>>> graph = [[0, 1, 0, 1, 0],
@ -52,7 +52,8 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int)
Pseudo-Code
Base Case:
1. Chceck if we visited all of vertices
1.1 If last visited vertex has path to starting vertex return True either return False
1.1 If last visited vertex has path to starting vertex return True either
return False
Recursive Step:
2. Iterate over each vertex
Check if next vertex is valid for transiting from current vertex
@ -74,7 +75,8 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int)
>>> print(path)
[0, 1, 2, 4, 3, 0]
Case 2: Use exact graph as in previous case, but in the properties taken from middle of calculation
Case 2: Use exact graph as in previous case, but in the properties taken from
middle of calculation
>>> graph = [[0, 1, 0, 1, 0],
... [1, 0, 1, 1, 1],
... [0, 1, 0, 0, 1],

View File

@ -12,8 +12,8 @@ solution = []
def isSafe(board, row, column):
"""
This function returns a boolean value True if it is safe to place a queen there considering
the current state of the board.
This function returns a boolean value True if it is safe to place a queen there
considering the current state of the board.
Parameters :
board(2D matrix) : board
@ -56,8 +56,8 @@ def solve(board, row):
return
for i in range(len(board)):
"""
For every row it iterates through each column to check if it is feasible to place a
queen there.
For every row it iterates through each column to check if it is feasible to
place a queen there.
If all the combinations for that particular branch are successful the board is
reinitialized for the next possible combination.
"""

View File

@ -1,9 +1,10 @@
"""
The sum-of-subsetsproblem states that a set of non-negative integers, and a value M,
determine all possible subsets of the given set whose summation sum equal to given M.
The sum-of-subsetsproblem states that a set of non-negative integers, and a
value M, determine all possible subsets of the given set whose summation sum
equal to given M.
Summation of the chosen numbers must be equal to given number M and one number can
be used only once.
Summation of the chosen numbers must be equal to given number M and one number
can be used only once.
"""
@ -21,7 +22,8 @@ def create_state_space_tree(nums, max_sum, num_index, path, result, remaining_nu
Creates a state space tree to iterate through each branch using DFS.
It terminates the branching of a node when any of the two conditions
given below satisfy.
This algorithm follows depth-fist-search and backtracks when the node is not branchable.
This algorithm follows depth-fist-search and backtracks when the node is not
branchable.
"""
if sum(path) > max_sum or (remaining_nums_sum + sum(path)) < max_sum:

View File

@ -1,8 +1,9 @@
# Chinese Remainder Theorem:
# GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor )
# If GCD(a,b) = 1, then for any remainder ra modulo a and any remainder rb modulo b there exists integer n,
# such that n = ra (mod a) and n = ra(mod b). If n1 and n2 are two such integers, then n1=n2(mod ab)
# If GCD(a,b) = 1, then for any remainder ra modulo a and any remainder rb modulo b
# there exists integer n, such that n = ra (mod a) and n = ra(mod b). If n1 and n2 are
# two such integers, then n1=n2(mod ab)
# Algorithm :

View File

@ -1,5 +1,6 @@
# Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the diophantine equation
# a*x + b*y = c has a solution (where x and y are integers) iff gcd(a,b) divides c.
# Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the
# diophantine equation a*x + b*y = c has a solution (where x and y are integers)
# iff gcd(a,b) divides c.
# GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor )
@ -29,8 +30,9 @@ def diophantine(a, b, c):
# Finding All solutions of Diophantine Equations:
# Theorem : Let gcd(a,b) = d, a = d*p, b = d*q. If (x0,y0) is a solution of Diophantine Equation a*x + b*y = c.
# a*x0 + b*y0 = c, then all the solutions have the form a(x0 + t*q) + b(y0 - t*p) = c, where t is an arbitrary integer.
# Theorem : Let gcd(a,b) = d, a = d*p, b = d*q. If (x0,y0) is a solution of Diophantine
# Equation a*x + b*y = c. a*x0 + b*y0 = c, then all the solutions have the form
# a(x0 + t*q) + b(y0 - t*p) = c, where t is an arbitrary integer.
# n is the number of solution you want, n = 2 by default
@ -75,8 +77,9 @@ def greatest_common_divisor(a, b):
>>> greatest_common_divisor(7,5)
1
Note : In number theory, two integers a and b are said to be relatively prime, mutually prime, or co-prime
if the only positive integer (factor) that divides both of them is 1 i.e., gcd(a,b) = 1.
Note : In number theory, two integers a and b are said to be relatively prime,
mutually prime, or co-prime if the only positive integer (factor) that
divides both of them is 1 i.e., gcd(a,b) = 1.
>>> greatest_common_divisor(121, 11)
11
@ -91,7 +94,8 @@ def greatest_common_divisor(a, b):
return b
# Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x and y, then d = gcd(a,b)
# Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers
# x and y, then d = gcd(a,b)
def extended_gcd(a, b):

View File

@ -3,8 +3,8 @@
# GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor )
# Given three integers a, b, and n, such that gcd(a,n)=1 and n>1, the algorithm should return an integer x such that
# 0≤x≤n1, and b/a=x(modn) (that is, b=ax(modn)).
# Given three integers a, b, and n, such that gcd(a,n)=1 and n>1, the algorithm should
# return an integer x such that 0≤x≤n1, and b/a=x(modn) (that is, b=ax(modn)).
# Theorem:
# a has a multiplicative inverse modulo n iff gcd(a,n) = 1
@ -68,7 +68,8 @@ def modular_division2(a, b, n):
return x
# Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x and y, then d = gcd(a,b)
# Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x
# and y, then d = gcd(a,b)
def extended_gcd(a, b):
@ -123,8 +124,9 @@ def greatest_common_divisor(a, b):
>>> greatest_common_divisor(7,5)
1
Note : In number theory, two integers a and b are said to be relatively prime, mutually prime, or co-prime
if the only positive integer (factor) that divides both of them is 1 i.e., gcd(a,b) = 1.
Note : In number theory, two integers a and b are said to be relatively prime,
mutually prime, or co-prime if the only positive integer (factor) that divides
both of them is 1 i.e., gcd(a,b) = 1.
>>> greatest_common_divisor(121, 11)
11

View File

@ -55,7 +55,8 @@ def check_keys(keyA, keyB, mode):
def encrypt_message(key: int, message: str) -> str:
"""
>>> encrypt_message(4545, 'The affine cipher is a type of monoalphabetic substitution cipher.')
>>> encrypt_message(4545, 'The affine cipher is a type of monoalphabetic '
... 'substitution cipher.')
'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi'
"""
keyA, keyB = divmod(key, len(SYMBOLS))
@ -72,7 +73,8 @@ def encrypt_message(key: int, message: str) -> str:
def decrypt_message(key: int, message: str) -> str:
"""
>>> decrypt_message(4545, 'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi')
>>> decrypt_message(4545, 'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF'
... '{xIp~{HL}Gi')
'The affine cipher is a type of monoalphabetic substitution cipher.'
"""
keyA, keyB = divmod(key, len(SYMBOLS))

View File

@ -39,7 +39,8 @@ def decode_base64(text):
'WELCOME to base64 encoding 😁'
>>> decode_base64('QcOF4ZCD8JCAj/CfpJM=')
'AÅᐃ𐀏🤓'
>>> decode_base64("QUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB\r\nQUFB")
>>> decode_base64("QUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUF"
... "BQUFBQUFBQUFB\r\nQUFB")
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
"""
base64_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"

View File

@ -16,7 +16,8 @@ def main():
# I have written my code naively same as definition of primitive root
# however every time I run this program, memory exceeded...
# so I used 4.80 Algorithm in Handbook of Applied Cryptography(CRC Press, ISBN : 0-8493-8523-7, October 1996)
# so I used 4.80 Algorithm in
# Handbook of Applied Cryptography(CRC Press, ISBN : 0-8493-8523-7, October 1996)
# and it seems to run nicely!
def primitiveRoot(p_val):
print("Generating primitive root of p")

View File

@ -106,7 +106,8 @@ class HillCipher:
req_l = len(self.key_string)
if greatest_common_divisor(det, len(self.key_string)) != 1:
raise ValueError(
f"determinant modular {req_l} of encryption key({det}) is not co prime w.r.t {req_l}.\nTry another key."
f"determinant modular {req_l} of encryption key({det}) is not co prime "
f"w.r.t {req_l}.\nTry another key."
)
def process_text(self, text: str) -> str:

View File

@ -83,19 +83,21 @@ class ShuffledShiftCipher:
Shuffling only 26 letters of the english alphabet can generate 26!
combinations for the shuffled list. In the program we consider, a set of
97 characters (including letters, digits, punctuation and whitespaces),
thereby creating a possibility of 97! combinations (which is a 152 digit number in itself),
thus diminishing the possibility of a brute force approach. Moreover,
shift keys even introduce a multiple of 26 for a brute force approach
thereby creating a possibility of 97! combinations (which is a 152 digit number
in itself), thus diminishing the possibility of a brute force approach.
Moreover, shift keys even introduce a multiple of 26 for a brute force approach
for each of the already 97! combinations.
"""
# key_list_options contain nearly all printable except few elements from string.whitespace
# key_list_options contain nearly all printable except few elements from
# string.whitespace
key_list_options = (
string.ascii_letters + string.digits + string.punctuation + " \t\n"
)
keys_l = []
# creates points known as breakpoints to break the key_list_options at those points and pivot each substring
# creates points known as breakpoints to break the key_list_options at those
# points and pivot each substring
breakpoints = sorted(set(self.__passcode))
temp_list = []
@ -103,7 +105,8 @@ class ShuffledShiftCipher:
for i in key_list_options:
temp_list.extend(i)
# checking breakpoints at which to pivot temporary sublist and add it into keys_l
# checking breakpoints at which to pivot temporary sublist and add it into
# keys_l
if i in breakpoints or i == key_list_options[-1]:
keys_l.extend(temp_list[::-1])
temp_list = []
@ -131,7 +134,8 @@ class ShuffledShiftCipher:
"""
decoded_message = ""
# decoding shift like Caesar cipher algorithm implementing negative shift or reverse shift or left shift
# decoding shift like Caesar cipher algorithm implementing negative shift or
# reverse shift or left shift
for i in encoded_message:
position = self.__key_list.index(i)
decoded_message += self.__key_list[
@ -152,7 +156,8 @@ class ShuffledShiftCipher:
"""
encoded_message = ""
# encoding shift like Caesar cipher algorithm implementing positive shift or forward shift or right shift
# encoding shift like Caesar cipher algorithm implementing positive shift or
# forward shift or right shift
for i in plaintext:
position = self.__key_list.index(i)
encoded_message += self.__key_list[

View File

@ -1,6 +1,8 @@
"""
Peak signal-to-noise ratio - PSNR - https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Source: https://tutorials.techonical.com/how-to-calculate-psnr-value-of-two-images-using-python/
Peak signal-to-noise ratio - PSNR
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Source:
https://tutorials.techonical.com/how-to-calculate-psnr-value-of-two-images-using-python
"""
import math

View File

@ -66,9 +66,10 @@ def decimal_to_any(num: int, base: int) -> str:
if base > 36:
raise ValueError("base must be <= 36")
# fmt: off
ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', '16': 'G', '17': 'H',
'18': 'I', '19': 'J', '20': 'K', '21': 'L', '22': 'M', '23': 'N', '24': 'O', '25': 'P',
'26': 'Q', '27': 'R', '28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X',
ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F',
'16': 'G', '17': 'H', '18': 'I', '19': 'J', '20': 'K', '21': 'L',
'22': 'M', '23': 'N', '24': 'O', '25': 'P', '26': 'Q', '27': 'R',
'28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X',
'34': 'Y', '35': 'Z'}
# fmt: on
new_value = ""

View File

@ -23,7 +23,8 @@ values = {
def decimal_to_hexadecimal(decimal):
"""
take integer decimal value, return hexadecimal representation as str beginning with 0x
take integer decimal value, return hexadecimal representation as str beginning
with 0x
>>> decimal_to_hexadecimal(5)
'0x5'
>>> decimal_to_hexadecimal(15)

View File

@ -9,7 +9,8 @@ import math
def decimal_to_octal(num: int) -> str:
"""Convert a Decimal Number to an Octal Number.
>>> all(decimal_to_octal(i) == oct(i) for i in (0, 2, 8, 64, 65, 216, 255, 256, 512))
>>> all(decimal_to_octal(i) == oct(i) for i
... in (0, 2, 8, 64, 65, 216, 255, 256, 512))
True
"""
octal = 0

View File

@ -1,5 +1,7 @@
class Heap:
"""A generic Heap class, can be used as min or max by passing the key function accordingly.
"""
A generic Heap class, can be used as min or max by passing the key function
accordingly.
"""
def __init__(self, key=None):
@ -9,7 +11,8 @@ class Heap:
self.pos_map = {}
# Stores current size of heap.
self.size = 0
# Stores function used to evaluate the score of an item on which basis ordering will be done.
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
self.key = key or (lambda x: x)
def _parent(self, i):
@ -41,7 +44,10 @@ class Heap:
return self.arr[i][1] < self.arr[j][1]
def _get_valid_parent(self, i):
"""Returns index of valid parent as per desired ordering among given index and both it's children"""
"""
Returns index of valid parent as per desired ordering among given index and
both it's children
"""
left = self._left(i)
right = self._right(i)
valid_parent = i
@ -87,8 +93,8 @@ class Heap:
self.arr[index] = self.arr[self.size - 1]
self.pos_map[self.arr[self.size - 1][0]] = index
self.size -= 1
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change- so no performance loss in calling both.
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(index)
self._heapify_down(index)
@ -109,7 +115,10 @@ class Heap:
return self.arr[0] if self.size else None
def extract_top(self):
"""Returns top item tuple (Calculated value, item) from heap and removes it as well if present"""
"""
Return top item tuple (Calculated value, item) from heap and removes it as well
if present
"""
top_item_tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])

View File

@ -126,7 +126,8 @@ class SkipList(Generic[KT, VT]):
"""
:param key: Searched key,
:return: Tuple with searched node (or None if given key is not present)
and list of nodes that refer (if key is present) of should refer to given node.
and list of nodes that refer (if key is present) of should refer to
given node.
"""
# Nodes with refer or should refer to output node
@ -141,7 +142,8 @@ class SkipList(Generic[KT, VT]):
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
node = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to be updated.
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(node)
update_vector.reverse() # Note that we were inserting values in reverse order.

View File

@ -49,10 +49,8 @@ def infix_2_postfix(Infix):
else:
if len(Stack) == 0:
Stack.append(x) # If stack is empty, push x to stack
else:
while (
len(Stack) > 0 and priority[x] <= priority[Stack[-1]]
): # while priority of x is not greater than priority of element in the stack
else: # while priority of x is not > priority of element in the stack
while len(Stack) > 0 and priority[x] <= priority[Stack[-1]]:
Postfix.append(Stack.pop()) # pop stack & add to Postfix
Stack.append(x) # push x to stack

View File

@ -1,8 +1,8 @@
"""
A Trie/Prefix Tree is a kind of search tree used to provide quick lookup
of words/patterns in a set of words. A basic Trie however has O(n^2) space complexity
making it impractical in practice. It however provides O(max(search_string, length of longest word))
lookup time making it an optimal approach when space is not an issue.
making it impractical in practice. It however provides O(max(search_string, length of
longest word)) lookup time making it an optimal approach when space is not an issue.
"""

View File

@ -29,8 +29,9 @@ def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255):
dst = np.zeros((image_row, image_col))
"""
Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels
in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed.
Non-maximum suppression. If the edge strength of the current pixel is the largest
compared to the other pixels in the mask with the same direction, the value will be
preserved. Otherwise, the value will be suppressed.
"""
for row in range(1, image_row - 1):
for col in range(1, image_col - 1):
@ -71,10 +72,12 @@ def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255):
dst[row, col] = sobel_grad[row, col]
"""
High-Low threshold detection. If an edge pixels gradient value is higher than the high threshold
value, it is marked as a strong edge pixel. If an edge pixels gradient value is smaller than the high
threshold value and larger than the low threshold value, it is marked as a weak edge pixel. If an edge
pixel's value is smaller than the low threshold value, it will be suppressed.
High-Low threshold detection. If an edge pixels gradient value is higher
than the high threshold value, it is marked as a strong edge pixel. If an
edge pixels gradient value is smaller than the high threshold value and
larger than the low threshold value, it is marked as a weak edge pixel. If
an edge pixel's value is smaller than the low threshold value, it will be
suppressed.
"""
if dst[row, col] >= threshold_high:
dst[row, col] = strong
@ -84,9 +87,10 @@ def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255):
dst[row, col] = weak
"""
Edge tracking. Usually a weak edge pixel caused from true edges will be connected to a strong edge pixel while
noise responses are unconnected. As long as there is one strong edge pixel that is involved in its 8-connected
neighborhood, that weak edge point can be identified as one that should be preserved.
Edge tracking. Usually a weak edge pixel caused from true edges will be connected
to a strong edge pixel while noise responses are unconnected. As long as there is
one strong edge pixel that is involved in its 8-connected neighborhood, that weak
edge point can be identified as one that should be preserved.
"""
for row in range(1, image_row):
for col in range(1, image_col):

View File

@ -36,7 +36,8 @@ class NearestNeighbour:
Get parent X coordinate for destination X
:param x: Destination X coordinate
:return: Parent X coordinate based on `x ratio`
>>> nn = NearestNeighbour(imread("digital_image_processing/image_data/lena.jpg", 1), 100, 100)
>>> nn = NearestNeighbour(imread("digital_image_processing/image_data/lena.jpg",
... 1), 100, 100)
>>> nn.ratio_x = 0.5
>>> nn.get_x(4)
2
@ -48,7 +49,8 @@ class NearestNeighbour:
Get parent Y coordinate for destination Y
:param y: Destination X coordinate
:return: Parent X coordinate based on `y ratio`
>>> nn = NearestNeighbour(imread("digital_image_processing/image_data/lena.jpg", 1), 100, 100)
>>> nn = NearestNeighbour(imread("digital_image_processing/image_data/lena.jpg",
1), 100, 100)
>>> nn.ratio_y = 0.5
>>> nn.get_y(4)
2

View File

@ -6,7 +6,10 @@ from cv2 import imread, imshow, waitKey, destroyAllWindows
def make_sepia(img, factor: int):
""" Function create sepia tone. Source: https://en.wikipedia.org/wiki/Sepia_(color) """
"""
Function create sepia tone.
Source: https://en.wikipedia.org/wiki/Sepia_(color)
"""
pixel_h, pixel_v = img.shape[0], img.shape[1]
def to_grayscale(blue, green, red):

View File

@ -140,7 +140,8 @@ def _validate_input(points):
Exception
---------
ValueError: if points is empty or None, or if a wrong data structure like a scalar is passed
ValueError: if points is empty or None, or if a wrong data structure like a scalar
is passed
TypeError: if an iterable but non-indexable object (eg. dictionary) is passed.
The exception to this a set which we'll convert to a list before using
@ -229,10 +230,10 @@ def convex_hull_bf(points):
"""
Constructs the convex hull of a set of 2D points using a brute force algorithm.
The algorithm basically considers all combinations of points (i, j) and uses the
definition of convexity to determine whether (i, j) is part of the convex hull or not.
(i, j) is part of the convex hull if and only iff there are no points on both sides
of the line segment connecting the ij, and there is no point k such that k is on either end
of the ij.
definition of convexity to determine whether (i, j) is part of the convex hull or
not. (i, j) is part of the convex hull if and only iff there are no points on both
sides of the line segment connecting the ij, and there is no point k such that k is
on either end of the ij.
Runtime: O(n^3) - definitely horrible
@ -255,9 +256,11 @@ def convex_hull_bf(points):
[(0.0, 0.0), (1.0, 0.0), (10.0, 1.0)]
>>> convex_hull_bf([[0, 0], [1, 0], [10, 0]])
[(0.0, 0.0), (10.0, 0.0)]
>>> convex_hull_bf([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1], [-0.75, 1]])
>>> convex_hull_bf([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1],
... [-0.75, 1]])
[(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]
>>> convex_hull_bf([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3), (2, -1), (2, -4), (1, -3)])
>>> convex_hull_bf([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3),
... (2, -1), (2, -4), (1, -3)])
[(0.0, 0.0), (0.0, 3.0), (1.0, -3.0), (2.0, -4.0), (3.0, 0.0), (3.0, 3.0)]
"""
@ -299,9 +302,10 @@ def convex_hull_bf(points):
def convex_hull_recursive(points):
"""
Constructs the convex hull of a set of 2D points using a divide-and-conquer strategy
The algorithm exploits the geometric properties of the problem by repeatedly partitioning
the set of points into smaller hulls, and finding the convex hull of these smaller hulls.
The union of the convex hull from smaller hulls is the solution to the convex hull of the larger problem.
The algorithm exploits the geometric properties of the problem by repeatedly
partitioning the set of points into smaller hulls, and finding the convex hull of
these smaller hulls. The union of the convex hull from smaller hulls is the
solution to the convex hull of the larger problem.
Parameter
---------
@ -320,9 +324,11 @@ def convex_hull_recursive(points):
[(0.0, 0.0), (1.0, 0.0), (10.0, 1.0)]
>>> convex_hull_recursive([[0, 0], [1, 0], [10, 0]])
[(0.0, 0.0), (10.0, 0.0)]
>>> convex_hull_recursive([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1], [-0.75, 1]])
>>> convex_hull_recursive([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1],
[-0.75, 1]])
[(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]
>>> convex_hull_recursive([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3), (2, -1), (2, -4), (1, -3)])
>>> convex_hull_recursive([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3),
(2, -1), (2, -4), (1, -3)])
[(0.0, 0.0), (0.0, 3.0), (1.0, -3.0), (2.0, -4.0), (3.0, 0.0), (3.0, 3.0)]
"""
@ -335,10 +341,12 @@ def convex_hull_recursive(points):
# use these two anchors to divide all the points into two hulls,
# an upper hull and a lower hull.
# all points to the left (above) the line joining the extreme points belong to the upper hull
# all points to the right (below) the line joining the extreme points below to the lower hull
# ignore all points on the line joining the extreme points since they cannot be part of the
# convex hull
# all points to the left (above) the line joining the extreme points belong to the
# upper hull
# all points to the right (below) the line joining the extreme points below to the
# lower hull
# ignore all points on the line joining the extreme points since they cannot be
# part of the convex hull
left_most_point = points[0]
right_most_point = points[n - 1]
@ -366,10 +374,12 @@ def _construct_hull(points, left, right, convex_set):
Parameters
---------
points: list or None, the hull of points from which to choose the next convex-hull point
points: list or None, the hull of points from which to choose the next convex-hull
point
left: Point, the point to the left of line segment joining left and right
right: The point to the right of the line segment joining left and right
convex_set: set, the current convex-hull. The state of convex-set gets updated by this function
convex_set: set, the current convex-hull. The state of convex-set gets updated by
this function
Note
----

View File

@ -1,5 +1,6 @@
"""
This is a pure Python implementation of Dynamic Programming solution to the fibonacci sequence problem.
This is a pure Python implementation of Dynamic Programming solution to the fibonacci
sequence problem.
"""

View File

@ -1,7 +1,8 @@
"""
The number of partitions of a number n into at least k parts equals the number of partitions into exactly k parts
plus the number of partitions into at least k-1 parts. Subtracting 1 from each part of a partition of n into k parts
gives a partition of n-k into k parts. These two facts together are used for this algorithm.
The number of partitions of a number n into at least k parts equals the number of
partitions into exactly k parts plus the number of partitions into at least k-1 parts.
Subtracting 1 from each part of a partition of n into k parts gives a partition of n-k
into k parts. These two facts together are used for this algorithm.
"""

View File

@ -12,7 +12,8 @@ def list_of_submasks(mask: int) -> List[int]:
"""
Args:
mask : number which shows mask ( always integer > 0, zero does not have any submasks )
mask : number which shows mask ( always integer > 0, zero does not have any
submasks )
Returns:
all_submasks : the list of submasks of mask (mask s is called submask of mask

View File

@ -9,8 +9,8 @@ Note that only the integer weights 0-1 knapsack problem is solvable
def MF_knapsack(i, wt, val, j):
"""
This code involves the concept of memory functions. Here we solve the subproblems which are needed
unlike the below example
This code involves the concept of memory functions. Here we solve the subproblems
which are needed unlike the below example
F is a 2D array with -1s filled up
"""
global F # a global dp table for knapsack

View File

@ -1,6 +1,7 @@
"""
LCS Problem Statement: Given two sequences, find the length of longest subsequence present in both of them.
A subsequence is a sequence that appears in the same relative order, but not necessarily continuous.
LCS Problem Statement: Given two sequences, find the length of longest subsequence
present in both of them. A subsequence is a sequence that appears in the same relative
order, but not necessarily continuous.
Example:"abc", "abg" are subsequences of "abcdefgh".
"""

View File

@ -1,10 +1,12 @@
"""
Author : Yvonne
This is a pure Python implementation of Dynamic Programming solution to the longest_sub_array problem.
This is a pure Python implementation of Dynamic Programming solution to the
longest_sub_array problem.
The problem is :
Given an array, to find the longest and continuous sub array and get the max sum of the sub array in the given array.
Given an array, to find the longest and continuous sub array and get the max sum of the
sub array in the given array.
"""

View File

@ -13,8 +13,9 @@ pieces separately or not cutting it at all if the price of it is the maximum obt
def naive_cut_rod_recursive(n: int, prices: list):
"""
Solves the rod-cutting problem via naively without using the benefit of dynamic programming.
The results is the same sub-problems are solved several times leading to an exponential runtime
Solves the rod-cutting problem via naively without using the benefit of dynamic
programming. The results is the same sub-problems are solved several times
leading to an exponential runtime
Runtime: O(2^n)
@ -26,7 +27,8 @@ def naive_cut_rod_recursive(n: int, prices: list):
Returns
-------
The maximum revenue obtainable for a rod of length n given the list of prices for each piece.
The maximum revenue obtainable for a rod of length n given the list of prices
for each piece.
Examples
--------
@ -50,8 +52,9 @@ def naive_cut_rod_recursive(n: int, prices: list):
def top_down_cut_rod(n: int, prices: list):
"""
Constructs a top-down dynamic programming solution for the rod-cutting problem
via memoization. This function serves as a wrapper for _top_down_cut_rod_recursive
Constructs a top-down dynamic programming solution for the rod-cutting
problem via memoization. This function serves as a wrapper for
_top_down_cut_rod_recursive
Runtime: O(n^2)
@ -63,12 +66,13 @@ def top_down_cut_rod(n: int, prices: list):
Note
----
For convenience and because Python's lists using 0-indexing, length(max_rev) = n + 1,
to accommodate for the revenue obtainable from a rod of length 0.
For convenience and because Python's lists using 0-indexing, length(max_rev) =
n + 1, to accommodate for the revenue obtainable from a rod of length 0.
Returns
-------
The maximum revenue obtainable for a rod of length n given the list of prices for each piece.
The maximum revenue obtainable for a rod of length n given the list of prices
for each piece.
Examples
-------
@ -99,7 +103,8 @@ def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list):
Returns
-------
The maximum revenue obtainable for a rod of length n given the list of prices for each piece.
The maximum revenue obtainable for a rod of length n given the list of prices
for each piece.
"""
if max_rev[n] >= 0:
return max_rev[n]
@ -144,7 +149,8 @@ def bottom_up_cut_rod(n: int, prices: list):
"""
_enforce_args(n, prices)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of length 0.
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
max_rev = [float("-inf") for _ in range(n + 1)]
max_rev[0] = 0
@ -167,7 +173,8 @@ def _enforce_args(n: int, prices: list):
Throws ValueError:
if n is negative or there are fewer items in the price list than the length of the rod
if n is negative or there are fewer items in the price list than the length of
the rod
"""
if n < 0:
raise ValueError(f"n must be greater than or equal to 0. Got n = {n}")

View File

@ -1,4 +1,4 @@
# Python program to print all subset combinations of n element in given set of r element.
# Print all subset combinations of n element in given set of r element.
def combination_util(arr, n, r, index, data, i):

View File

@ -9,7 +9,8 @@ def isSumSubset(arr, arrLen, requiredSum):
# initially no subsets can be formed hence False/0
subset = [[False for i in range(requiredSum + 1)] for i in range(arrLen + 1)]
# for each arr value, a sum of zero(0) can be formed by not taking any element hence True/1
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arrLen + 1):
subset[i][0] = True

View File

@ -14,7 +14,8 @@ if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
X = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function (trapmf(), gbellmf(),gaussmf(), etc).
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
abc1 = [0, 25, 50]
abc2 = [25, 50, 75]
young = fuzz.membership.trimf(X, abc1)

View File

@ -11,15 +11,16 @@ def lamberts_ellipsoidal_distance(
two points on the surface of earth given longitudes and latitudes
https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines
NOTE: This algorithm uses geodesy/haversine_distance.py to compute central angle, sigma
NOTE: This algorithm uses geodesy/haversine_distance.py to compute central angle,
sigma
Representing the earth as an ellipsoid allows us to approximate distances between points
on the surface much better than a sphere. Ellipsoidal formulas treat the Earth as an
oblate ellipsoid which means accounting for the flattening that happens at the North
and South poles. Lambert's formulae provide accuracy on the order of 10 meteres over
thousands of kilometeres. Other methods can provide millimeter-level accuracy but this
is a simpler method to calculate long range distances without increasing computational
intensity.
Representing the earth as an ellipsoid allows us to approximate distances between
points on the surface much better than a sphere. Ellipsoidal formulas treat the
Earth as an oblate ellipsoid which means accounting for the flattening that happens
at the North and South poles. Lambert's formulae provide accuracy on the order of
10 meteres over thousands of kilometeres. Other methods can provide
millimeter-level accuracy but this is a simpler method to calculate long range
distances without increasing computational intensity.
Args:
lat1, lon1: latitude and longitude of coordinate 1
@ -50,7 +51,8 @@ def lamberts_ellipsoidal_distance(
# Equation Parameters
# https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines
flattening = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
b_lat1 = atan((1 - flattening) * tan(radians(lat1)))
b_lat2 = atan((1 - flattening) * tan(radians(lat2)))

View File

@ -53,8 +53,8 @@ def search(grid, init, goal, cost, heuristic):
while not found and not resign:
if len(cell) == 0:
return "FAIL"
else:
cell.sort() # to choose the least costliest action so as to move closer to the goal
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
next = cell.pop()
x = next[2]

View File

@ -1,7 +1,7 @@
# floyd_warshall.py
"""
The problem is to find the shortest distance between all pairs of vertices in a weighted directed graph that can
have negative edge weights.
The problem is to find the shortest distance between all pairs of vertices in a
weighted directed graph that can have negative edge weights.
"""
@ -26,10 +26,11 @@ def floyd_warshall(graph, v):
distance[u][v] will contain the shortest distance from vertex u to v.
1. For all edges from v to n, distance[i][j] = weight(edge(i, j)).
3. The algorithm then performs distance[i][j] = min(distance[i][j], distance[i][k] + distance[k][j]) for each
possible pair i, j of vertices.
3. The algorithm then performs distance[i][j] = min(distance[i][j], distance[i][k] +
distance[k][j]) for each possible pair i, j of vertices.
4. The above is repeated for each vertex k in the graph.
5. Whenever distance[i][j] is given a new minimum value, next vertex[i][j] is updated to the next vertex[i][k].
5. Whenever distance[i][j] is given a new minimum value, next vertex[i][j] is
updated to the next vertex[i][k].
"""
dist = [[float("inf") for _ in range(v)] for _ in range(v)]

View File

@ -72,7 +72,8 @@ def PrimsAlgorithm(l): # noqa: E741
visited = [0 for i in range(len(l))]
Nbr_TV = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree formed in graph
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
Distance_TV = [] # Heap of Distance of vertices from their neighboring vertex
Positions = []

View File

@ -5,19 +5,20 @@ def tarjan(g):
"""
Tarjan's algo for finding strongly connected components in a directed graph
Uses two main attributes of each node to track reachability, the index of that node within a component(index),
and the lowest index reachable from that node(lowlink).
Uses two main attributes of each node to track reachability, the index of that node
within a component(index), and the lowest index reachable from that node(lowlink).
We then perform a dfs of the each component making sure to update these parameters for each node and saving the
nodes we visit on the way.
We then perform a dfs of the each component making sure to update these parameters
for each node and saving the nodes we visit on the way.
If ever we find that the lowest reachable node from a current node is equal to the index of the current node then it
must be the root of a strongly connected component and so we save it and it's equireachable vertices as a strongly
If ever we find that the lowest reachable node from a current node is equal to the
index of the current node then it must be the root of a strongly connected
component and so we save it and it's equireachable vertices as a strongly
connected component.
Complexity: strong_connect() is called at most once for each node and has a complexity of O(|E|) as it is DFS.
Complexity: strong_connect() is called at most once for each node and has a
complexity of O(|E|) as it is DFS.
Therefore this has complexity O(|V| + |E|) for a graph G = (V, E)
"""
n = len(g)

View File

@ -1,7 +1,9 @@
"""
Adler-32 is a checksum algorithm which was invented by Mark Adler in 1995.
Compared to a cyclic redundancy check of the same length, it trades reliability for speed (preferring the latter).
Adler-32 is more reliable than Fletcher-16, and slightly less reliable than Fletcher-32.[2]
Compared to a cyclic redundancy check of the same length, it trades reliability for
speed (preferring the latter).
Adler-32 is more reliable than Fletcher-16, and slightly less reliable than
Fletcher-32.[2]
source: https://en.wikipedia.org/wiki/Adler-32
"""

View File

@ -1,13 +1,17 @@
"""
This algorithm was created for sdbm (a public-domain reimplementation of ndbm) database library.
It was found to do well in scrambling bits, causing better distribution of the keys and fewer splits.
This algorithm was created for sdbm (a public-domain reimplementation of ndbm)
database library.
It was found to do well in scrambling bits, causing better distribution of the keys
and fewer splits.
It also happens to be a good general hashing function with good distribution.
The actual function (pseudo code) is:
for i in i..len(str):
hash(i) = hash(i - 1) * 65599 + str[i];
What is included below is the faster version used in gawk. [there is even a faster, duff-device version]
The magic constant 65599 was picked out of thin air while experimenting with different constants.
What is included below is the faster version used in gawk. [there is even a faster,
duff-device version]
The magic constant 65599 was picked out of thin air while experimenting with
different constants.
It turns out to be a prime.
This is one of the algorithms used in berkeley db (see sleepycat) and elsewhere.
@ -18,7 +22,8 @@
def sdbm(plain_text: str) -> str:
"""
Function implements sdbm hash, easy to use, great for bits scrambling.
iterates over each character in the given string and applies function to each of them.
iterates over each character in the given string and applies function to each of
them.
>>> sdbm('Algorithms')
1462174910723540325254304520539387479031000036

View File

@ -3,7 +3,8 @@ Demonstrates implementation of SHA1 Hash function in a Python class and gives ut
to find hash of string or hash of text from a file.
Usage: python sha1.py --string "Hello World!!"
python sha1.py --file "hello_world.txt"
When run without any arguments, it prints the hash of the string "Hello World!! Welcome to Cryptography"
When run without any arguments, it prints the hash of the string "Hello World!!
Welcome to Cryptography"
Also contains a Test class to verify that the generated Hash is same as that
returned by the hashlib library
@ -39,7 +40,8 @@ class SHA1Hash:
def __init__(self, data):
"""
Inititates the variables data and h. h is a list of 5 8-digit Hexadecimal
numbers corresponding to (1732584193, 4023233417, 2562383102, 271733878, 3285377520)
numbers corresponding to
(1732584193, 4023233417, 2562383102, 271733878, 3285377520)
respectively. We will start with this as a message digest. 0x is how you write
Hexadecimal numbers in Python
"""
@ -74,8 +76,8 @@ class SHA1Hash:
# @staticmethod
def expand_block(self, block):
"""
Takes a bytestring-block of length 64, unpacks it to a list of integers and returns a
list of 80 integers after some bit operations
Takes a bytestring-block of length 64, unpacks it to a list of integers and
returns a list of 80 integers after some bit operations
"""
w = list(struct.unpack(">16L", block)) + [0] * 64
for i in range(16, 80):
@ -84,12 +86,13 @@ class SHA1Hash:
def final_hash(self):
"""
Calls all the other methods to process the input. Pads the data, then splits into
blocks and then does a series of operations for each block (including expansion).
Calls all the other methods to process the input. Pads the data, then splits
into blocks and then does a series of operations for each block (including
expansion).
For each block, the variable h that was initialized is copied to a,b,c,d,e
and these 5 variables a,b,c,d,e undergo several changes. After all the blocks are
processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1] and so on.
This h becomes our final hash which is returned.
and these 5 variables a,b,c,d,e undergo several changes. After all the blocks
are processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1]
and so on. This h becomes our final hash which is returned.
"""
self.padded_data = self.padding()
self.blocks = self.split_blocks()
@ -138,8 +141,8 @@ class SHA1HashTest(unittest.TestCase):
def main():
"""
Provides option 'string' or 'file' to take input and prints the calculated SHA1 hash.
unittest.main() has been commented because we probably don't want to run
Provides option 'string' or 'file' to take input and prints the calculated SHA1
hash. unittest.main() has been commented because we probably don't want to run
the test each time.
"""
# unittest.main()

View File

@ -1,5 +1,6 @@
"""
Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis function.
Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis
function.
"""
import numpy
@ -75,7 +76,8 @@ def summation_of_cost_derivative(index, end=m):
:param index: index wrt derivative is being calculated
:param end: value where summation ends, default is m, number of examples
:return: Returns the summation of cost derivative
Note: If index is -1, this means we are calculating summation wrt to biased parameter.
Note: If index is -1, this means we are calculating summation wrt to biased
parameter.
"""
summation_value = 0
for i in range(end):
@ -90,7 +92,8 @@ def get_cost_derivative(index):
"""
:param index: index of the parameter vector wrt to derivative is to be calculated
:return: derivative wrt to that index
Note: If index is -1, this means we are calculating summation wrt to biased parameter.
Note: If index is -1, this means we are calculating summation wrt to biased
parameter.
"""
cost_derivative_value = summation_of_cost_derivative(index, m) / m
return cost_derivative_value

View File

@ -10,7 +10,8 @@ from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
dataset = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/position_salaries.csv"
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values

View File

@ -42,7 +42,10 @@ import pandas as pd
from sklearn.datasets import make_blobs, make_circles
from sklearn.preprocessing import StandardScaler
CANCER_DATASET_URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
CANCER_DATASET_URL = (
"http://archive.ics.uci.edu/ml/machine-learning-databases/"
"breast-cancer-wisconsin/wdbc.data"
)
class SmoSVM:
@ -124,7 +127,8 @@ class SmoSVM:
b_old = self._b
self._b = b
# 4: update error value,here we only calculate those non-bound samples' error
# 4: update error value,here we only calculate those non-bound samples'
# error
self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
for s in self.unbound:
if s == i1 or s == i2:
@ -231,8 +235,10 @@ class SmoSVM:
"""
Choose first alpha ;steps:
1:First loop over all sample
2:Second loop over all non-bound samples till all non-bound samples does not voilate kkt condition.
3:Repeat this two process endlessly,till all samples does not voilate kkt condition samples after first loop.
2:Second loop over all non-bound samples till all non-bound samples does not
voilate kkt condition.
3:Repeat this two process endlessly,till all samples does not voilate kkt
condition samples after first loop.
"""
while True:
all_not_obey = True
@ -352,8 +358,8 @@ class SmoSVM:
)
"""
# way 2
Use objective function check which alpha2 new could get the minimal objectives
Use objective function check which alpha2 new could get the minimal
objectives
"""
if ol < (oh - self._eps):
a2_new = L
@ -572,11 +578,11 @@ def plot_partition_boundary(
model, train_data, ax, resolution=100, colors=("b", "k", "r")
):
"""
We can not get the optimum w of our kernel svm model which is different from linear svm.
For this reason, we generate randomly distributed points with high desity and prediced values of these points are
calculated by using our tained model. Then we could use this prediced values to draw contour map.
We can not get the optimum w of our kernel svm model which is different from linear
svm. For this reason, we generate randomly distributed points with high desity and
prediced values of these points are calculated by using our tained model. Then we
could use this prediced values to draw contour map.
And this contour map can represent svm's partition boundary.
"""
train_data_x = train_data[:, 1]
train_data_y = train_data[:, 2]

View File

@ -4,7 +4,8 @@ def bailey_borwein_plouffe(digit_position: int, precision: int = 1000) -> str:
Bailey-Borwein-Plouffe (BBP) formula to calculate the nth hex digit of pi.
Wikipedia page:
https://en.wikipedia.org/wiki/Bailey%E2%80%93Borwein%E2%80%93Plouffe_formula
@param digit_position: a positive integer representing the position of the digit to extract.
@param digit_position: a positive integer representing the position of the digit to
extract.
The digit immediately after the decimal point is located at position 1.
@param precision: number of terms in the second summation to calculate.
A higher number reduces the chance of an error but increases the runtime.
@ -41,7 +42,8 @@ def bailey_borwein_plouffe(digit_position: int, precision: int = 1000) -> str:
elif (not isinstance(precision, int)) or (precision < 0):
raise ValueError("Precision must be a nonnegative integer")
# compute an approximation of (16 ** (n - 1)) * pi whose fractional part is mostly accurate
# compute an approximation of (16 ** (n - 1)) * pi whose fractional part is mostly
# accurate
sum_result = (
4 * _subsum(digit_position, 1, precision)
- 2 * _subsum(digit_position, 4, precision)
@ -70,8 +72,9 @@ def _subsum(
denominator = 8 * sum_index + denominator_addend
exponential_term = 0.0
if sum_index < digit_pos_to_extract:
# if the exponential term is an integer and we mod it by the denominator before
# dividing, only the integer part of the sum will change; the fractional part will not
# if the exponential term is an integer and we mod it by the denominator
# before dividing, only the integer part of the sum will change;
# the fractional part will not
exponential_term = pow(
16, digit_pos_to_extract - 1 - sum_index, denominator
)

View File

@ -6,7 +6,8 @@ def ceil(x) -> int:
:return: the smallest integer >= x.
>>> import math
>>> all(ceil(n) == math.ceil(n) for n in (1, -1, 0, -0, 1.1, -1.1, 1.0, -1.0, 1_000_000_000))
>>> all(ceil(n) == math.ceil(n) for n
... in (1, -1, 0, -0, 1.1, -1.1, 1.0, -1.0, 1_000_000_000))
True
"""
return (

View File

@ -1,5 +1,6 @@
# Python program to show the usage of Fermat's little theorem in a division
# According to Fermat's little theorem, (a / b) mod p always equals a * (b ^ (p - 2)) mod p
# According to Fermat's little theorem, (a / b) mod p always equals
# a * (b ^ (p - 2)) mod p
# Here we assume that p is a prime number, b divides a, and p doesn't divide b
# Wikipedia reference: https://en.wikipedia.org/wiki/Fermat%27s_little_theorem

View File

@ -4,7 +4,8 @@
2. Calculates the fibonacci sequence with a formula
an = [ Phin - (phi)n ]/Sqrt[5]
reference-->Su, Francis E., et al. "Fibonacci Number Formula." Math Fun Facts. <http://www.math.hmc.edu/funfacts>
reference-->Su, Francis E., et al. "Fibonacci Number Formula." Math Fun Facts.
<http://www.math.hmc.edu/funfacts>
"""
import math
import functools
@ -71,11 +72,13 @@ def _check_number_input(n, min_thresh, max_thresh=None):
print("Incorrect Input: number must not be less than 0")
except ValueTooSmallError:
print(
f"Incorrect Input: input number must be > {min_thresh} for the recursive calculation"
f"Incorrect Input: input number must be > {min_thresh} for the recursive "
"calculation"
)
except ValueTooLargeError:
print(
f"Incorrect Input: input number must be < {max_thresh} for the recursive calculation"
f"Incorrect Input: input number must be < {max_thresh} for the recursive "
"calculation"
)
return False

View File

@ -6,7 +6,8 @@ def floor(x) -> int:
:return: the largest integer <= x.
>>> import math
>>> all(floor(n) == math.floor(n) for n in (1, -1, 0, -0, 1.1, -1.1, 1.0, -1.0, 1_000_000_000))
>>> all(floor(n) == math.floor(n) for n
... in (1, -1, 0, -0, 1.1, -1.1, 1.0, -1.0, 1_000_000_000))
True
"""
return (

View File

@ -8,7 +8,8 @@ def gamma(num: float) -> float:
https://en.wikipedia.org/wiki/Gamma_function
In mathematics, the gamma function is one commonly
used extension of the factorial function to complex numbers.
The gamma function is defined for all complex numbers except the non-positive integers
The gamma function is defined for all complex numbers except the non-positive
integers
>>> gamma(-1)

View File

@ -25,9 +25,9 @@ def greatest_common_divisor(a, b):
"""
Below method is more memory efficient because it does not use the stack (chunk of memory).
While above method is good, uses more memory for huge numbers because of the recursive calls
required to calculate the greatest common divisor.
Below method is more memory efficient because it does not use the stack (chunk of
memory). While above method is good, uses more memory for huge numbers because of the
recursive calls required to calculate the greatest common divisor.
"""
@ -50,7 +50,8 @@ def main():
num_1 = int(nums[0])
num_2 = int(nums[1])
print(
f"greatest_common_divisor({num_1}, {num_2}) = {greatest_common_divisor(num_1, num_2)}"
f"greatest_common_divisor({num_1}, {num_2}) = "
f"{greatest_common_divisor(num_1, num_2)}"
)
print(f"By iterative gcd({num_1}, {num_2}) = {gcd_by_iterative(num_1, num_2)}")
except (IndexError, UnboundLocalError, ValueError):

View File

@ -1,6 +1,6 @@
"""
In mathematics, the LucasLehmer test (LLT) is a primality test for Mersenne numbers.
https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test
In mathematics, the LucasLehmer test (LLT) is a primality test for Mersenne
numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test
A Mersenne number is a number that is one less than a power of two.
That is M_p = 2^p - 1

View File

@ -15,7 +15,7 @@ class Matrix:
if isinstance(arg, list): # Initializes a matrix identical to the one provided.
self.t = arg
self.n = len(arg)
else: # Initializes a square matrix of the given size and set the values to zero.
else: # Initializes a square matrix of the given size and set values to zero.
self.n = arg
self.t = [[0 for _ in range(self.n)] for _ in range(self.n)]

View File

@ -1,7 +1,8 @@
"""
Modular Exponential.
Modular exponentiation is a type of exponentiation performed over a modulus.
For more explanation, please check https://en.wikipedia.org/wiki/Modular_exponentiation
For more explanation, please check
https://en.wikipedia.org/wiki/Modular_exponentiation
"""
"""Calculate Modular Exponential."""

View File

@ -44,7 +44,8 @@ if __name__ == "__main__":
Example:
>>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7.0x^4 + 9.3x^3 + 5.0x^2
>>> x = -13.0
>>> print(evaluate_poly(poly, x)) # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9
>>> # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9
>>> print(evaluate_poly(poly, x))
180339.9
"""
poly = (0.0, 0.0, 5.0, 9.3, 7.0)

View File

@ -1,7 +1,8 @@
"""
This script demonstrates the implementation of the ReLU function.
It's a kind of activation function defined as the positive part of its argument in the context of neural network.
It's a kind of activation function defined as the positive part of its argument in the
context of neural network.
The function takes a vector of K real numbers as input and then argmax(x, 0).
After through ReLU, the element of the vector always 0 or real number.

View File

@ -1,8 +1,10 @@
"""
Sieve of Eratosthones
The sieve of Eratosthenes is an algorithm used to find prime numbers, less than or equal to a given value.
Illustration: https://upload.wikimedia.org/wikipedia/commons/b/b9/Sieve_of_Eratosthenes_animation.gif
The sieve of Eratosthenes is an algorithm used to find prime numbers, less than or
equal to a given value.
Illustration:
https://upload.wikimedia.org/wikipedia/commons/b/b9/Sieve_of_Eratosthenes_animation.gif
Reference: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich)

View File

@ -25,7 +25,8 @@ def square_root_iterative(
Square root is aproximated using Newtons method.
https://en.wikipedia.org/wiki/Newton%27s_method
>>> all(abs(square_root_iterative(i)-math.sqrt(i)) <= .00000000000001 for i in range(0, 500))
>>> all(abs(square_root_iterative(i)-math.sqrt(i)) <= .00000000000001
... for i in range(500))
True
>>> square_root_iterative(-1)

View File

@ -1,6 +1,7 @@
"""
Implementation of finding nth fibonacci number using matrix exponentiation.
Time Complexity is about O(log(n)*8), where 8 is the complexity of matrix multiplication of size 2 by 2.
Time Complexity is about O(log(n)*8), where 8 is the complexity of matrix
multiplication of size 2 by 2.
And on the other hand complexity of bruteforce solution is O(n).
As we know
f[n] = f[n-1] + f[n-1]
@ -70,7 +71,10 @@ def nth_fibonacci_bruteforce(n):
def main():
fmt = "{} fibonacci number using matrix exponentiation is {} and using bruteforce is {}\n"
fmt = (
"{} fibonacci number using matrix exponentiation is {} and using bruteforce "
"is {}\n"
)
for ordinal in "0th 1st 2nd 3rd 10th 100th 1000th".split():
n = int("".join(c for c in ordinal if c in "0123456789")) # 1000th --> 1000
print(fmt.format(ordinal, nth_fibonacci_matrix(n), nth_fibonacci_bruteforce(n)))

View File

@ -1,5 +1,6 @@
"""
In this problem, we want to rotate the matrix elements by 90, 180, 270 (counterclockwise)
In this problem, we want to rotate the matrix elements by 90, 180, 270
(counterclockwise)
Discussion in stackoverflow:
https://stackoverflow.com/questions/42519/how-do-you-rotate-a-two-dimensional-array
"""

View File

@ -207,8 +207,10 @@ class Matrix:
"""
<method Matrix.ShermanMorrison>
Apply Sherman-Morrison formula in O(n^2).
To learn this formula, please look this: https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
This method returns (A + uv^T)^(-1) where A^(-1) is self. Returns None if it's impossible to calculate.
To learn this formula, please look this:
https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
This method returns (A + uv^T)^(-1) where A^(-1) is self. Returns None if it's
impossible to calculate.
Warning: This method doesn't check if self is invertible.
Make sure self is invertible before execute this method.

View File

@ -1,7 +1,8 @@
"""
Testing here assumes that numpy and linalg is ALWAYS correct!!!!
If running from PyCharm you can place the following line in "Additional Arguments" for the pytest run configuration
If running from PyCharm you can place the following line in "Additional Arguments" for
the pytest run configuration
-vv -m mat_ops -p no:cacheprovider
"""

View File

@ -88,9 +88,11 @@ class BankersAlgorithm:
This function builds an index control dictionary to track original ids/indices
of processes when altered during execution of method "main"
Return: {0: [a: int, b: int], 1: [c: int, d: int]}
>>> BankersAlgorithm(test_claim_vector, test_allocated_res_table,
>>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table,
... test_maximum_claim_table)._BankersAlgorithm__need_index_manager()
{0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], 4: [2, 0, 0, 3]}
... ) # doctest: +NORMALIZE_WHITESPACE
{0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0],
4: [2, 0, 0, 3]}
"""
return {self.__need().index(i): i for i in self.__need()}

View File

@ -1,6 +1,7 @@
#!/usr/bin/python
"""
The FisherYates shuffle is an algorithm for generating a random permutation of a finite sequence.
The FisherYates shuffle is an algorithm for generating a random permutation of a
finite sequence.
For more details visit
wikipedia/Fischer-Yates-Shuffle.
"""

View File

@ -52,7 +52,8 @@ def seed(canvas):
def run(canvas):
""" This function runs the rules of game through all points, and changes their status accordingly.(in the same canvas)
""" This function runs the rules of game through all points, and changes their
status accordingly.(in the same canvas)
@Args:
--
canvas : canvas of population to run the rules on.

View File

@ -4,7 +4,8 @@ Github : faizan2700
Purpose : You have one function f(x) which takes float integer and returns
float you have to integrate the function in limits a to b.
The approximation proposed by Thomas Simpsons in 1743 is one way to calculate integration.
The approximation proposed by Thomas Simpsons in 1743 is one way to calculate
integration.
( read article : https://cp-algorithms.com/num_methods/simpson-integration.html )
@ -25,7 +26,8 @@ def f(x: float) -> float:
Summary of Simpson Approximation :
By simpsons integration :
1.integration of fxdx with limit a to b is = f(x0) + 4 * f(x1) + 2 * f(x2) + 4 * f(x3) + 2 * f(x4)..... + f(xn)
1. integration of fxdx with limit a to b is =
f(x0) + 4 * f(x1) + 2 * f(x2) + 4 * f(x3) + 2 * f(x4)..... + f(xn)
where x0 = a
xi = a + i * h
xn = b
@ -71,7 +73,7 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo
>>> simpson_integration('wrong_input',2,3,4)
Traceback (most recent call last):
...
AssertionError: the function(object) passed should be callable your input : wrong_input
AssertionError: the function(object) passed should be callable your input : ...
>>> simpson_integration(lambda x : x*x,3.45,3.2,1)
-2.8
@ -93,9 +95,10 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo
assert isinstance(a, float) or isinstance(
a, int
), f"a should be float or integer your input : {a}"
assert isinstance(function(a), float) or isinstance(
function(a), int
), f"the function should return integer or float return type of your function, {type(a)}"
assert isinstance(function(a), float) or isinstance(function(a), int), (
"the function should return integer or float return type of your function, "
f"{type(a)}"
)
assert isinstance(b, float) or isinstance(
b, int
), f"b should be float or integer your input : {b}"

View File

@ -23,7 +23,8 @@ class LinearCongruentialGenerator:
def next_number(self):
"""
The smallest number that can be generated is zero.
The largest number that can be generated is modulo-1. modulo is set in the constructor.
The largest number that can be generated is modulo-1. modulo is set in the
constructor.
"""
self.seed = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed

View File

@ -9,9 +9,8 @@ if any of the following conditions are true:
For example, the string "()()[()]" is properly nested but "[(()]" is not.
The function called is_balanced takes as input a string S which is a sequence of brackets and
returns true if S is nested and false otherwise.
The function called is_balanced takes as input a string S which is a sequence of
brackets and returns true if S is nested and false otherwise.
"""
@ -37,14 +36,11 @@ def is_balanced(S):
def main():
S = input("Enter sequence of brackets: ")
if is_balanced(S):
print((S, "is balanced"))
s = input("Enter sequence of brackets: ")
if is_balanced(s):
print(s, "is balanced")
else:
print((S, "is not balanced"))
print(s, "is not balanced")
if __name__ == "__main__":

View File

@ -1,7 +1,9 @@
"""
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
Given an array of integers, return indices of the two numbers such that they add up to
a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
You may assume that each input would have exactly one solution, and you may not use the
same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,

View File

@ -1,6 +1,7 @@
""" Problem Statement (Digit Fifth Power ): https://projecteuler.net/problem=30
Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits:
Surprisingly there are only three numbers that can be written as the sum of fourth
powers of their digits:
1634 = 1^4 + 6^4 + 3^4 + 4^4
8208 = 8^4 + 2^4 + 0^4 + 8^4
@ -9,7 +10,8 @@ As 1 = 1^4 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.
Find the sum of all the numbers that can be written as the sum of fifth powers of their
digits.
(9^5)=59,049
59049*7=4,13,343 (which is only 6 digit number )

View File

@ -15,7 +15,8 @@ def maximum_digital_sum(a: int, b: int) -> int:
1872
"""
# RETURN the MAXIMUM from the list of SUMs of the list of INT converted from STR of BASE raised to the POWER
# RETURN the MAXIMUM from the list of SUMs of the list of INT converted from STR of
# BASE raised to the POWER
return max(
[
sum([int(x) for x in str(base ** power)])

View File

@ -101,7 +101,8 @@ if __name__ == "__main__":
print("Process ID\tDuration Time\tWaiting Time\tTurnaround Time")
for i, process in enumerate(processes):
print(
f"{process}\t\t{duration_times[i]}\t\t{waiting_times[i]}\t\t{turnaround_times[i]}"
f"{process}\t\t{duration_times[i]}\t\t{waiting_times[i]}\t\t"
f"{turnaround_times[i]}"
)
print(f"Average waiting time = {average_waiting_time}")
print(f"Average turn around time = {average_turnaround_time}")

View File

@ -14,7 +14,8 @@ python linear_search.py
def linear_search(sequence, target):
"""Pure implementation of linear search algorithm in Python
:param sequence: a collection with comparable items (as sorted items not required in Linear Search)
:param sequence: a collection with comparable items (as sorted items not required
in Linear Search)
:param target: item value to search
:return: index of found item or None if item is not found

View File

@ -18,11 +18,13 @@ def simulated_annealing(
threshold_temp: float = 1,
) -> SearchProblem:
"""
implementation of the simulated annealing algorithm. We start with a given state, find
all its neighbors. Pick a random neighbor, if that neighbor improves the solution, we move
in that direction, if that neighbor does not improve the solution, we generate a random
real number between 0 and 1, if the number is within a certain range (calculated using
temperature) we move in that direction, else we pick another neighbor randomly and repeat the process.
Implementation of the simulated annealing algorithm. We start with a given state,
find all its neighbors. Pick a random neighbor, if that neighbor improves the
solution, we move in that direction, if that neighbor does not improve the solution,
we generate a random real number between 0 and 1, if the number is within a certain
range (calculated using temperature) we move in that direction, else we pick
another neighbor randomly and repeat the process.
Args:
search_prob: The search state at the start.
find_max: If True, the algorithm should find the minimum else the minimum.

View File

@ -1,8 +1,9 @@
"""
This is pure Python implementation of Tabu search algorithm for a Travelling Salesman Problem, that the distances
between the cities are symmetric (the distance between city 'a' and city 'b' is the same between city 'b' and city 'a').
The TSP can be represented into a graph. The cities are represented by nodes and the distance between them is
represented by the weight of the ark between the nodes.
This is pure Python implementation of Tabu search algorithm for a Travelling Salesman
Problem, that the distances between the cities are symmetric (the distance between city
'a' and city 'b' is the same between city 'b' and city 'a').
The TSP can be represented into a graph. The cities are represented by nodes and the
distance between them is represented by the weight of the ark between the nodes.
The .txt file with the graph has the form:
@ -10,8 +11,8 @@ node1 node2 distance_between_node1_and_node2
node1 node3 distance_between_node1_and_node3
...
Be careful node1, node2 and the distance between them, must exist only once. This means in the .txt file
should not exist:
Be careful node1, node2 and the distance between them, must exist only once. This means
in the .txt file should not exist:
node1 node2 distance_between_node1_and_node2
node2 node1 distance_between_node2_and_node1
@ -19,7 +20,8 @@ For pytests run following command:
pytest
For manual testing run:
python tabu_search.py -f your_file_name.txt -number_of_iterations_of_tabu_search -s size_of_tabu_search
python tabu_search.py -f your_file_name.txt -number_of_iterations_of_tabu_search \
-s size_of_tabu_search
e.g. python tabu_search.py -f tabudata2.txt -i 4 -s 3
"""
@ -33,16 +35,16 @@ def generate_neighbours(path):
neighbor, given a path file that includes a graph.
:param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt)
:return dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node
and the cost (distance) for each neighbor.
:return dict_of_neighbours: Dictionary with key each node and value a list of lists
with the neighbors of the node and the cost (distance) for each neighbor.
Example of dict_of_neighbours:
>>) dict_of_neighbours[a]
[[b,20],[c,18],[d,22],[e,26]]
This indicates the neighbors of node (city) 'a', which has neighbor the node 'b' with distance 20,
the node 'c' with distance 18, the node 'd' with distance 22 and the node 'e' with distance 26.
This indicates the neighbors of node (city) 'a', which has neighbor the node 'b'
with distance 20, the node 'c' with distance 18, the node 'd' with distance 22 and
the node 'e' with distance 26.
"""
dict_of_neighbours = {}
@ -71,19 +73,19 @@ def generate_neighbours(path):
def generate_first_solution(path, dict_of_neighbours):
"""
Pure implementation of generating the first solution for the Tabu search to start, with the redundant resolution
strategy. That means that we start from the starting node (e.g. node 'a'), then we go to the city nearest (lowest
distance) to this node (let's assume is node 'c'), then we go to the nearest city of the node 'c', etc
Pure implementation of generating the first solution for the Tabu search to start,
with the redundant resolution strategy. That means that we start from the starting
node (e.g. node 'a'), then we go to the city nearest (lowest distance) to this node
(let's assume is node 'c'), then we go to the nearest city of the node 'c', etc.
till we have visited all cities and return to the starting node.
:param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt)
:param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node
and the cost (distance) for each neighbor.
:return first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy
in a list.
:return distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path
in first_solution.
:param dict_of_neighbours: Dictionary with key each node and value a list of lists
with the neighbors of the node and the cost (distance) for each neighbor.
:return first_solution: The solution for the first iteration of Tabu search using
the redundant resolution strategy in a list.
:return distance_of_first_solution: The total distance that Travelling Salesman
will travel, if he follows the path in first_solution.
"""
with open(path) as f:
@ -124,22 +126,23 @@ def generate_first_solution(path, dict_of_neighbours):
def find_neighborhood(solution, dict_of_neighbours):
"""
Pure implementation of generating the neighborhood (sorted by total distance of each solution from
lowest to highest) of a solution with 1-1 exchange method, that means we exchange each node in a solution with each
other node and generating a number of solution named neighborhood.
Pure implementation of generating the neighborhood (sorted by total distance of
each solution from lowest to highest) of a solution with 1-1 exchange method, that
means we exchange each node in a solution with each other node and generating a
number of solution named neighborhood.
:param solution: The solution in which we want to find the neighborhood.
:param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node
and the cost (distance) for each neighbor.
:return neighborhood_of_solution: A list that includes the solutions and the total distance of each solution
(in form of list) that are produced with 1-1 exchange from the solution that the method took as an input
:param dict_of_neighbours: Dictionary with key each node and value a list of lists
with the neighbors of the node and the cost (distance) for each neighbor.
:return neighborhood_of_solution: A list that includes the solutions and the total
distance of each solution (in form of list) that are produced with 1-1 exchange
from the solution that the method took as an input
Example:
>>) find_neighborhood(['a','c','b','d','e','a'])
[['a','e','b','d','c','a',90], [['a','c','d','b','e','a',90],['a','d','b','c','e','a',93],
['a','c','b','e','d','a',102], ['a','c','e','d','b','a',113], ['a','b','c','d','e','a',93]]
>>> find_neighborhood(['a','c','b','d','e','a']) # doctest: +NORMALIZE_WHITESPACE
[['a','e','b','d','c','a',90], [['a','c','d','b','e','a',90],
['a','d','b','c','e','a',93], ['a','c','b','e','d','a',102],
['a','c','e','d','b','a',113], ['a','b','c','d','e','a',93]]
"""
neighborhood_of_solution = []
@ -177,20 +180,21 @@ def tabu_search(
first_solution, distance_of_first_solution, dict_of_neighbours, iters, size
):
"""
Pure implementation of Tabu search algorithm for a Travelling Salesman Problem in Python.
Pure implementation of Tabu search algorithm for a Travelling Salesman Problem in
Python.
:param first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy
in a list.
:param distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path
in first_solution.
:param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node
and the cost (distance) for each neighbor.
:param first_solution: The solution for the first iteration of Tabu search using
the redundant resolution strategy in a list.
:param distance_of_first_solution: The total distance that Travelling Salesman will
travel, if he follows the path in first_solution.
:param dict_of_neighbours: Dictionary with key each node and value a list of lists
with the neighbors of the node and the cost (distance) for each neighbor.
:param iters: The number of iterations that Tabu search will execute.
:param size: The size of Tabu List.
:return best_solution_ever: The solution with the lowest distance that occurred during the execution of Tabu search.
:return best_cost: The total distance that Travelling Salesman will travel, if he follows the path in best_solution
ever.
:return best_solution_ever: The solution with the lowest distance that occurred
during the execution of Tabu search.
:return best_cost: The total distance that Travelling Salesman will travel, if he
follows the path in best_solution ever.
"""
count = 1
solution = first_solution

View File

@ -17,8 +17,9 @@
# number of buckets.
# Time Complexity of Solution:
# Worst case scenario occurs when all the elements are placed in a single bucket. The overall performance
# would then be dominated by the algorithm used to sort each bucket. In this case, O(n log n), because of TimSort
# Worst case scenario occurs when all the elements are placed in a single bucket. The
# overall performance would then be dominated by the algorithm used to sort each bucket.
# In this case, O(n log n), because of TimSort
#
# Average Case O(n + (n^2)/k + k), where k is the number of buckets
#

View File

@ -1,9 +1,11 @@
"""
This is pure Python implementation of comb sort algorithm.
Comb sort is a relatively simple sorting algorithm originally designed by Wlodzimierz Dobosiewicz in 1980.
It was rediscovered by Stephen Lacey and Richard Box in 1991. Comb sort improves on bubble sort algorithm.
Comb sort is a relatively simple sorting algorithm originally designed by Wlodzimierz
Dobosiewicz in 1980. It was rediscovered by Stephen Lacey and Richard Box in 1991.
Comb sort improves on bubble sort algorithm.
In bubble sort, distance (or gap) between two compared elements is always one.
Comb sort improvement is that gap can be much more than 1, in order to prevent slowing down by small values
Comb sort improvement is that gap can be much more than 1, in order to prevent slowing
down by small values
at the end of a list.
More info on: https://en.wikipedia.org/wiki/Comb_sort

View File

@ -57,6 +57,7 @@ r = len(M) - 1
z = _inPlaceQuickSort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution is :"
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)

View File

@ -1,7 +1,8 @@
"""
Python implementation of a sort algorithm.
Best Case Scenario : O(n)
Worst Case Scenario : O(n^2) because native Python functions:min, max and remove are already O(n)
Worst Case Scenario : O(n^2) because native Python functions:min, max and remove are
already O(n)
"""

View File

@ -41,7 +41,9 @@ class BoyerMooreSearch:
return -1
def mismatch_in_text(self, currentPos):
""" finds the index of mis-matched character in text when compared with pattern from last
"""
find the index of mis-matched character in text when compared with pattern
from last
Parameters :
currentPos (int): current index position of text

View File

@ -14,8 +14,8 @@ def is_palindrome(s: str) -> bool:
>>> is_palindrome("Mr. Owl ate my metal worm?")
True
"""
# Since Punctuation, capitalization, and spaces are usually ignored while checking Palindrome,
# we first remove them from our string.
# Since Punctuation, capitalization, and spaces are usually ignored while checking
# Palindrome, we first remove them from our string.
s = "".join([character for character in s.lower() if character.isalnum()])
return s == s[::-1]

View File

@ -3,7 +3,8 @@
def jaro_winkler(str1: str, str2: str) -> float:
"""
JaroWinkler distance is a string metric measuring an edit distance between two sequences.
JaroWinkler distance is a string metric measuring an edit distance between two
sequences.
Output value is between 0.0 and 1.0.
>>> jaro_winkler("martha", "marhta")

View File

@ -5,11 +5,11 @@ def kmp(pattern, text):
1) Preprocess pattern to identify any suffixes that are identical to prefixes
This tells us where to continue from if we get a mismatch between a character in our pattern
and the text.
This tells us where to continue from if we get a mismatch between a character
in our pattern and the text.
2) Step through the text one character at a time and compare it to a character in the pattern
updating our location within the pattern if necessary
2) Step through the text one character at a time and compare it to a character in
the pattern updating our location within the pattern if necessary
"""

View File

@ -16,8 +16,9 @@ def lower(word: str) -> str:
'what'
"""
# converting to ascii value int value and checking to see if char is a capital letter
# if it is a capital letter it is getting shift by 32 which makes it a lower case letter
# converting to ascii value int value and checking to see if char is a capital
# letter if it is a capital letter it is getting shift by 32 which makes it a lower
# case letter
return "".join(
chr(ord(char) + 32) if 65 <= ord(char) <= 90 else char for char in word
)

View File

@ -1,5 +1,6 @@
"""
Algorithm for calculating the most cost-efficient sequence for converting one string into another.
Algorithm for calculating the most cost-efficient sequence for converting one string
into another.
The only allowed operations are
---Copy character with cost cC
---Replace character with cost cR

View File

@ -8,15 +8,18 @@ def rabin_karp(pattern, text):
"""
The Rabin-Karp Algorithm for finding a pattern within a piece of text
with complexity O(nm), most efficient when it is used with multiple patterns
as it is able to check if any of a set of patterns match a section of text in o(1) given the precomputed hashes.
as it is able to check if any of a set of patterns match a section of text in o(1)
given the precomputed hashes.
This will be the simple version which only assumes one pattern is being searched for but it's not hard to modify
This will be the simple version which only assumes one pattern is being searched
for but it's not hard to modify
1) Calculate pattern hash
2) Step through the text one character at a time passing a window with the same length as the pattern
calculating the hash of the text within the window compare it with the hash of the pattern. Only testing
equality if the hashes match
2) Step through the text one character at a time passing a window with the same
length as the pattern
calculating the hash of the text within the window compare it with the hash
of the pattern. Only testing equality if the hashes match
"""
p_len = len(pattern)
t_len = len(text)

View File

@ -1,6 +1,7 @@
def split(string: str, separator: str = " ") -> list:
"""
Will split the string up into all the values separated by the separator (defaults to spaces)
Will split the string up into all the values separated by the separator
(defaults to spaces)
>>> split("apple#banana#cherry#orange",separator='#')
['apple', 'banana', 'cherry', 'orange']

View File

@ -14,7 +14,8 @@ def upper(word: str) -> str:
"""
# converting to ascii value int value and checking to see if char is a lower letter
# if it is a capital letter it is getting shift by 32 which makes it a capital case letter
# if it is a capital letter it is getting shift by 32 which makes it a capital case
# letter
return "".join(
chr(ord(char) - 32) if 97 <= ord(char) <= 122 else char for char in word
)