mirror of
https://github.com/TheAlgorithms/Python.git
synced 2024-11-23 21:11:08 +00:00
Create codespell.yml (#1698)
* fixup! Format Python code with psf/black push * Create codespell.yml * fixup! Format Python code with psf/black push
This commit is contained in:
parent
c01d178798
commit
bfcb95b297
14
.github/workflows/codespell.yml
vendored
Normal file
14
.github/workflows/codespell.yml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
# GitHub Action to automate the identification of common misspellings in text files
|
||||
# https://github.com/codespell-project/codespell
|
||||
name: codespell
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
codespell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
- run: pip install codespell flake8
|
||||
- run: |
|
||||
SKIP="./.*,./other/dictionary.txt,./other/words,./project_euler/problem_22/p022_names.txt,*.bak,*.gif,*.jpeg,*.jpg,*.json,*.png,*.pyc"
|
||||
codespell -L ans,fo,hist,iff,secant,tim --skip=$SKIP
|
12
DIRECTORY.md
12
DIRECTORY.md
|
@ -177,14 +177,14 @@
|
|||
* [Longest Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_sub_array.py)
|
||||
* [Matrix Chain Order](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/matrix_chain_order.py)
|
||||
* [Max Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sub_array.py)
|
||||
* [Max Sum Contigous Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sum_contigous_subsequence.py)
|
||||
* [Max Sum Contiguous Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sum_contiguous_subsequence.py)
|
||||
* [Minimum Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_partition.py)
|
||||
* [Rod Cutting](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/rod_cutting.py)
|
||||
* [Subset Generation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/subset_generation.py)
|
||||
* [Sum Of Subset](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/sum_of_subset.py)
|
||||
|
||||
## File Transfer
|
||||
* [Recieve File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/recieve_file.py)
|
||||
* [Receive File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/receive_file.py)
|
||||
* [Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/send_file.py)
|
||||
|
||||
## Fuzzy Logic
|
||||
|
@ -219,7 +219,7 @@
|
|||
* [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py)
|
||||
* [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py)
|
||||
* [Minimum Spanning Tree Prims](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_prims.py)
|
||||
* [Multi Hueristic Astar](https://github.com/TheAlgorithms/Python/blob/master/graphs/multi_hueristic_astar.py)
|
||||
* [Multi Heuristic Astar](https://github.com/TheAlgorithms/Python/blob/master/graphs/multi_heuristic_astar.py)
|
||||
* [Page Rank](https://github.com/TheAlgorithms/Python/blob/master/graphs/page_rank.py)
|
||||
* [Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/prim.py)
|
||||
* [Scc Kosaraju](https://github.com/TheAlgorithms/Python/blob/master/graphs/scc_kosaraju.py)
|
||||
|
@ -319,6 +319,7 @@
|
|||
* [Sieve Of Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/sieve_of_eratosthenes.py)
|
||||
* [Simpson Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/simpson_rule.py)
|
||||
* [Softmax](https://github.com/TheAlgorithms/Python/blob/master/maths/softmax.py)
|
||||
* [Square Root](https://github.com/TheAlgorithms/Python/blob/master/maths/square_root.py)
|
||||
* [Sum Of Arithmetic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_arithmetic_series.py)
|
||||
* [Test Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/test_prime_check.py)
|
||||
* [Trapezoidal Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/trapezoidal_rule.py)
|
||||
|
@ -469,6 +470,8 @@
|
|||
* [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_28/sol1.py)
|
||||
* Problem 29
|
||||
* [Solution](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_29/solution.py)
|
||||
* Problem 30
|
||||
* [Soln](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_30/soln.py)
|
||||
* Problem 31
|
||||
* [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_31/sol1.py)
|
||||
* Problem 32
|
||||
|
@ -508,6 +511,7 @@
|
|||
* [Quick Select](https://github.com/TheAlgorithms/Python/blob/master/searches/quick_select.py)
|
||||
* [Sentinel Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/sentinel_linear_search.py)
|
||||
* [Simple-Binary-Search](https://github.com/TheAlgorithms/Python/blob/master/searches/simple-binary-search.py)
|
||||
* [Simulated Annealing](https://github.com/TheAlgorithms/Python/blob/master/searches/simulated_annealing.py)
|
||||
* [Tabu Search](https://github.com/TheAlgorithms/Python/blob/master/searches/tabu_search.py)
|
||||
* [Ternary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/ternary_search.py)
|
||||
|
||||
|
@ -564,7 +568,7 @@
|
|||
* [Reverse Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_words.py)
|
||||
* [Split](https://github.com/TheAlgorithms/Python/blob/master/strings/split.py)
|
||||
* [Upper](https://github.com/TheAlgorithms/Python/blob/master/strings/upper.py)
|
||||
* [Word Occurence](https://github.com/TheAlgorithms/Python/blob/master/strings/word_occurence.py)
|
||||
* [Word Occurrence](https://github.com/TheAlgorithms/Python/blob/master/strings/word_occurrence.py)
|
||||
|
||||
## Traversals
|
||||
* [Binary Tree Traversals](https://github.com/TheAlgorithms/Python/blob/master/traversals/binary_tree_traversals.py)
|
||||
|
|
Before Width: | Height: | Size: 57 KiB After Width: | Height: | Size: 57 KiB |
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 40 KiB |
|
@ -40,8 +40,8 @@ def isSafe(board, row, column):
|
|||
|
||||
def solve(board, row):
|
||||
"""
|
||||
It creates a state space tree and calls the safe function untill it receives a
|
||||
False Boolean and terminates that brach and backtracks to the next
|
||||
It creates a state space tree and calls the safe function until it receives a
|
||||
False Boolean and terminates that branch and backtracks to the next
|
||||
poosible solution branch.
|
||||
"""
|
||||
if row >= len(board):
|
||||
|
@ -58,7 +58,7 @@ def solve(board, row):
|
|||
"""
|
||||
For every row it iterates through each column to check if it is feesible to place a
|
||||
queen there.
|
||||
If all the combinations for that particaular branch are successfull the board is
|
||||
If all the combinations for that particular branch are successful the board is
|
||||
reinitialized for the next possible combination.
|
||||
"""
|
||||
if isSafe(board, row, i):
|
||||
|
@ -70,7 +70,7 @@ def solve(board, row):
|
|||
|
||||
def printboard(board):
|
||||
"""
|
||||
Prints the boards that have a successfull combination.
|
||||
Prints the boards that have a successful combination.
|
||||
"""
|
||||
for i in range(len(board)):
|
||||
for j in range(len(board)):
|
||||
|
|
|
@ -3,15 +3,15 @@
|
|||
Hill Cipher:
|
||||
The below defined class 'HillCipher' implements the Hill Cipher algorithm.
|
||||
The Hill Cipher is an algorithm that implements modern linear algebra techniques
|
||||
In this algortihm, you have an encryption key matrix. This is what will be used
|
||||
In this algorithm, you have an encryption key matrix. This is what will be used
|
||||
in encoding and decoding your text.
|
||||
|
||||
Algortihm:
|
||||
Algorithm:
|
||||
Let the order of the encryption key be N (as it is a square matrix).
|
||||
Your text is divided into batches of length N and converted to numerical vectors
|
||||
by a simple mapping starting with A=0 and so on.
|
||||
|
||||
The key is then mulitplied with the newly created batch vector to obtain the
|
||||
The key is then multiplied with the newly created batch vector to obtain the
|
||||
encoded vector. After each multiplication modular 36 calculations are performed
|
||||
on the vectors so as to bring the numbers between 0 and 36 and then mapped with
|
||||
their corresponding alphanumerics.
|
||||
|
|
|
@ -6,7 +6,7 @@ BYTE_SIZE = 256
|
|||
|
||||
def main():
|
||||
filename = "encrypted_file.txt"
|
||||
response = input(r"Encrypte\Decrypt [e\d]: ")
|
||||
response = input(r"Encrypt\Decrypt [e\d]: ")
|
||||
|
||||
if response.lower().startswith("e"):
|
||||
mode = "encrypt"
|
||||
|
|
|
@ -42,12 +42,12 @@ def makeKeyFiles(name, keySize):
|
|||
|
||||
publicKey, privateKey = generateKey(keySize)
|
||||
print("\nWriting public key to file %s_pubkey.txt..." % name)
|
||||
with open("%s_pubkey.txt" % name, "w") as fo:
|
||||
fo.write("{},{},{}".format(keySize, publicKey[0], publicKey[1]))
|
||||
with open("%s_pubkey.txt" % name, "w") as out_file:
|
||||
out_file.write("{},{},{}".format(keySize, publicKey[0], publicKey[1]))
|
||||
|
||||
print("Writing private key to file %s_privkey.txt..." % name)
|
||||
with open("%s_privkey.txt" % name, "w") as fo:
|
||||
fo.write("{},{},{}".format(keySize, privateKey[0], privateKey[1]))
|
||||
with open("%s_privkey.txt" % name, "w") as out_file:
|
||||
out_file.write("{},{},{}".format(keySize, privateKey[0], privateKey[1]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -157,11 +157,11 @@ if __name__ == "__main__":
|
|||
entry_msg = "Provide a string that I will generate its BWT transform: "
|
||||
s = input(entry_msg).strip()
|
||||
result = bwt_transform(s)
|
||||
bwt_output_msg = "Burrows Wheeler tranform for string '{}' results in '{}'"
|
||||
bwt_output_msg = "Burrows Wheeler transform for string '{}' results in '{}'"
|
||||
print(bwt_output_msg.format(s, result["bwt_string"]))
|
||||
original_string = reverse_bwt(result["bwt_string"], result["idx_original_string"])
|
||||
fmt = (
|
||||
"Reversing Burrows Wheeler tranform for entry '{}' we get original"
|
||||
"Reversing Burrows Wheeler transform for entry '{}' we get original"
|
||||
" string '{}'"
|
||||
)
|
||||
print(fmt.format(result["bwt_string"], original_string))
|
||||
|
|
|
@ -21,7 +21,7 @@ class TreeNode:
|
|||
def parse_file(file_path):
|
||||
"""
|
||||
Read the file and build a dict of all letters and their
|
||||
frequences, then convert the dict into a list of Letters.
|
||||
frequencies, then convert the dict into a list of Letters.
|
||||
"""
|
||||
chars = {}
|
||||
with open(file_path) as f:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""
|
||||
Peak signal-to-noise ratio - PSNR - https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
|
||||
Soruce: https://tutorials.techonical.com/how-to-calculate-psnr-value-of-two-images-using-python/
|
||||
Source: https://tutorials.techonical.com/how-to-calculate-psnr-value-of-two-images-using-python/
|
||||
"""
|
||||
|
||||
import math
|
||||
|
|
|
@ -24,7 +24,7 @@ def decimal_to_octal(num: int) -> str:
|
|||
|
||||
|
||||
def main():
|
||||
"""Print octal equivelents of decimal numbers."""
|
||||
"""Print octal equivalents of decimal numbers."""
|
||||
print("\n2 in octal is:")
|
||||
print(decimal_to_octal(2)) # = 2
|
||||
print("\n8 in octal is:")
|
||||
|
|
|
@ -15,7 +15,7 @@ class Node:
|
|||
|
||||
if self.left is None and self.right is None:
|
||||
return str(self.value)
|
||||
return pformat({"%s" % (self.value): (self.left, self.right)}, indent=1,)
|
||||
return pformat({"%s" % (self.value): (self.left, self.right)}, indent=1)
|
||||
|
||||
|
||||
class BinarySearchTree:
|
||||
|
|
|
@ -11,7 +11,7 @@ def swap(a, b):
|
|||
return a, b
|
||||
|
||||
|
||||
# creating sparse table which saves each nodes 2^ith parent
|
||||
# creating sparse table which saves each nodes 2^i-th parent
|
||||
def creatSparse(max_node, parent):
|
||||
j = 1
|
||||
while (1 << j) < max_node:
|
||||
|
|
|
@ -41,7 +41,7 @@ def binomial_coefficient(n: int, k: int) -> int:
|
|||
|
||||
def catalan_number(node_count: int) -> int:
|
||||
"""
|
||||
We can find Catalan number many ways but here we use Binomial Coefficent because it
|
||||
We can find Catalan number many ways but here we use Binomial Coefficient because it
|
||||
does the job in O(n)
|
||||
|
||||
return the Catalan number of n using 2nCn/(n+1).
|
||||
|
|
|
@ -12,7 +12,7 @@ class RedBlackTree:
|
|||
less strict, so it will perform faster for writing/deleting nodes
|
||||
and slower for reading in the average case, though, because they're
|
||||
both balanced binary search trees, both will get the same asymptotic
|
||||
perfomance.
|
||||
performance.
|
||||
To read more about them, https://en.wikipedia.org/wiki/Red–black_tree
|
||||
Unless otherwise specified, all asymptotic runtimes are specified in
|
||||
terms of the size of the tree.
|
||||
|
@ -37,7 +37,7 @@ class RedBlackTree:
|
|||
def rotate_left(self):
|
||||
"""Rotate the subtree rooted at this node to the left and
|
||||
returns the new root to this subtree.
|
||||
Perfoming one rotation can be done in O(1).
|
||||
Performing one rotation can be done in O(1).
|
||||
"""
|
||||
parent = self.parent
|
||||
right = self.right
|
||||
|
@ -656,7 +656,7 @@ def test_tree_traversal():
|
|||
|
||||
|
||||
def test_tree_chaining():
|
||||
"""Tests the three different tree chaning functions."""
|
||||
"""Tests the three different tree chaining functions."""
|
||||
tree = RedBlackTree(0)
|
||||
tree = tree.insert(-16).insert(16).insert(8).insert(24).insert(20).insert(22)
|
||||
if list(tree.inorder_traverse()) != [-16, 0, 8, 16, 20, 22, 24]:
|
||||
|
|
|
@ -21,7 +21,7 @@ class Node:
|
|||
return f"'{self.value}: {self.prior:.5}'"
|
||||
else:
|
||||
return pformat(
|
||||
{f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1,
|
||||
{f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
|
@ -161,7 +161,7 @@ def main():
|
|||
"""After each command, program prints treap"""
|
||||
root = None
|
||||
print(
|
||||
"enter numbers to creat a tree, + value to add value into treap, - value to erase all nodes with value. 'q' to quit. "
|
||||
"enter numbers to create a tree, + value to add value into treap, - value to erase all nodes with value. 'q' to quit. "
|
||||
)
|
||||
|
||||
args = input()
|
||||
|
|
|
@ -49,7 +49,7 @@ class BinomialHeap:
|
|||
r"""
|
||||
Min-oriented priority queue implemented with the Binomial Heap data
|
||||
structure implemented with the BinomialHeap class. It supports:
|
||||
- Insert element in a heap with n elemnts: Guaranteed logn, amoratized 1
|
||||
- Insert element in a heap with n elements: Guaranteed logn, amoratized 1
|
||||
- Merge (meld) heaps of size m and n: O(logn + logm)
|
||||
- Delete Min: O(logn)
|
||||
- Peek (return min without deleting it): O(1)
|
||||
|
|
|
@ -23,6 +23,7 @@ class Heap(object):
|
|||
[1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201]
|
||||
>>>
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.h = []
|
||||
self.curr_size = 0
|
||||
|
@ -107,28 +108,28 @@ def main():
|
|||
[2, 5, 3, 0, 2, 3, 0, 3],
|
||||
[6, 1, 2, 7, 9, 3, 4, 5, 10, 8],
|
||||
[103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5],
|
||||
[-45, -2, -5]
|
||||
[-45, -2, -5],
|
||||
]:
|
||||
print('source unsorted list: %s' % unsorted)
|
||||
print("source unsorted list: %s" % unsorted)
|
||||
|
||||
h = Heap()
|
||||
h.build_heap(unsorted)
|
||||
print('after build heap: ', end=' ')
|
||||
print("after build heap: ", end=" ")
|
||||
h.display()
|
||||
|
||||
print('max value: %s' % h.get_max())
|
||||
print('delete max value: ', end=' ')
|
||||
print("max value: %s" % h.get_max())
|
||||
print("delete max value: ", end=" ")
|
||||
h.display()
|
||||
|
||||
h.insert(100)
|
||||
print('after insert new value 100: ', end=' ')
|
||||
print("after insert new value 100: ", end=" ")
|
||||
h.display()
|
||||
|
||||
h.heap_sort()
|
||||
print('heap sort: ', end=' ')
|
||||
print("heap sort: ", end=" ")
|
||||
h.display()
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -3,7 +3,7 @@ Implementing Deque using DoublyLinkedList ...
|
|||
Operations:
|
||||
1. insertion in the front -> O(1)
|
||||
2. insertion in the end -> O(1)
|
||||
3. remove fron the front -> O(1)
|
||||
3. remove from the front -> O(1)
|
||||
4. remove from the end -> O(1)
|
||||
"""
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
- This is an example of a double ended, doubly linked list.
|
||||
- Each link references the next link and the previous one.
|
||||
- A Doubly Linked List (DLL) contains an extra pointer, typically called previous pointer, together with next pointer and data which are there in singly linked list.
|
||||
- Advantages over SLL - IT can be traversed in both forward and backward direction.,Delete operation is more efficent"""
|
||||
- Advantages over SLL - IT can be traversed in both forward and backward direction.,Delete operation is more efficient"""
|
||||
|
||||
|
||||
class LinkedList: # making main class named linked list
|
||||
|
|
|
@ -79,7 +79,7 @@ class LinkedList:
|
|||
# END represents end of the LinkedList
|
||||
return string_repr + "END"
|
||||
|
||||
# Indexing Support. Used to get a node at particaular position
|
||||
# Indexing Support. Used to get a node at particular position
|
||||
def __getitem__(self, index):
|
||||
current = self.head
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ class LinkedList:
|
|||
def print_list(self):
|
||||
temp = self.head
|
||||
while temp is not None:
|
||||
print(temp.data, end=' ')
|
||||
print(temp.data, end=" ")
|
||||
temp = temp.next
|
||||
print()
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ def Solve(Postfix):
|
|||
|
||||
Stack.append(
|
||||
str(Opr[x](int(A), int(B)))
|
||||
) # evaluate the 2 values poped from stack & push result to stack
|
||||
) # evaluate the 2 values popped from stack & push result to stack
|
||||
print(
|
||||
x.rjust(8),
|
||||
("push(" + A + x + B + ")").ljust(12),
|
||||
|
|
|
@ -21,7 +21,7 @@ def calculateSpan(price, S):
|
|||
# Calculate span values for rest of the elements
|
||||
for i in range(1, n):
|
||||
|
||||
# Pop elements from stack whlie stack is not
|
||||
# Pop elements from stack while stack is not
|
||||
# empty and top of stack is smaller than price[i]
|
||||
while len(st) > 0 and price[st[0]] <= price[i]:
|
||||
st.pop()
|
||||
|
|
|
@ -541,7 +541,7 @@ cl = IndexCalculation()
|
|||
# instantiating the class with the values
|
||||
#cl = indexCalculation(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
|
||||
|
||||
# how set the values after instantiate the class cl, (for update the data or when dont
|
||||
# how set the values after instantiate the class cl, (for update the data or when don't
|
||||
# instantiating the class with the values)
|
||||
cl.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir)
|
||||
|
||||
|
@ -551,8 +551,8 @@ indexValue_form1 = cl.calculation("CCCI", red=red, green=green, blue=blue,
|
|||
redEdge=redEdge, nir=nir).astype(np.float64)
|
||||
indexValue_form2 = cl.CCCI()
|
||||
|
||||
# calculating the index with the values directly -- you can set just the values preferred --
|
||||
# note: the *calculation* fuction performs the function *setMatrices*
|
||||
# calculating the index with the values directly -- you can set just the values
|
||||
# preferred note: the *calculation* function performs the function *setMatrices*
|
||||
indexValue_form3 = cl.calculation("CCCI", red=red, green=green, blue=blue,
|
||||
redEdge=redEdge, nir=nir).astype(np.float64)
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ def test_gen_gaussian_kernel():
|
|||
# canny.py
|
||||
def test_canny():
|
||||
canny_img = imread("digital_image_processing/image_data/lena_small.jpg", 0)
|
||||
# assert ambiguos array for all == True
|
||||
# assert ambiguous array for all == True
|
||||
assert canny_img.all()
|
||||
canny_array = canny.canny(canny_img)
|
||||
# assert canny array for at least one True
|
||||
|
|
|
@ -42,7 +42,7 @@ class AssignmentUsingBitmask:
|
|||
if self.dp[mask][taskno] != -1:
|
||||
return self.dp[mask][taskno]
|
||||
|
||||
# Number of ways when we dont this task in the arrangement
|
||||
# Number of ways when we don't this task in the arrangement
|
||||
total_ways_util = self.CountWaysUtil(mask, taskno + 1)
|
||||
|
||||
# now assign the tasks one by one to all possible persons and recursively assign for the remaining tasks.
|
||||
|
|
|
@ -49,9 +49,9 @@ def knapsack_with_example_solution(W: int, wt: list, val: list):
|
|||
|
||||
W: int, the total maximum weight for the given knapsack problem.
|
||||
wt: list, the vector of weights for all items where wt[i] is the weight
|
||||
of the ith item.
|
||||
of the i-th item.
|
||||
val: list, the vector of values for all items where val[i] is the value
|
||||
of te ith item
|
||||
of the i-th item
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
Auther : Yvonne
|
||||
Author : Yvonne
|
||||
|
||||
This is a pure Python implementation of Dynamic Programming solution to the longest_sub_array problem.
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# python program to print all subset combination of n element in given set of r element .
|
||||
# Python program to print all subset combinations of n element in given set of r element.
|
||||
# arr[] ---> Input Array
|
||||
# data[] ---> Temporary array to store current combination
|
||||
# start & end ---> Staring and Ending indexes in arr[]
|
||||
# index ---> Current index in data[]
|
||||
# r ---> Size of a combination to be printed
|
||||
def combinationUtil(arr, n, r, index, data, i):
|
||||
def combination_util(arr, n, r, index, data, i):
|
||||
# Current combination is ready to be printed,
|
||||
# print it
|
||||
if index == r:
|
||||
|
@ -15,29 +15,26 @@ def combinationUtil(arr, n, r, index, data, i):
|
|||
# When no more elements are there to put in data[]
|
||||
if i >= n:
|
||||
return
|
||||
# current is included, put next at next
|
||||
# location
|
||||
# current is included, put next at next location
|
||||
data[index] = arr[i]
|
||||
combinationUtil(arr, n, r, index + 1, data, i + 1)
|
||||
combination_util(arr, n, r, index + 1, data, i + 1)
|
||||
# current is excluded, replace it with
|
||||
# next (Note that i+1 is passed, but
|
||||
# index is not changed)
|
||||
combinationUtil(arr, n, r, index, data, i + 1)
|
||||
combination_util(arr, n, r, index, data, i + 1)
|
||||
# The main function that prints all combinations
|
||||
# of size r in arr[] of size n. This function
|
||||
# mainly uses combinationUtil()
|
||||
|
||||
|
||||
def printcombination(arr, n, r):
|
||||
# A temporary array to store all combination
|
||||
# one by one
|
||||
def print_combination(arr, n, r):
|
||||
# A temporary array to store all combination one by one
|
||||
data = [0] * r
|
||||
# Print all combination using temprary
|
||||
# array 'data[]'
|
||||
combinationUtil(arr, n, r, 0, data, 0)
|
||||
# Print all combination using temporary array 'data[]'
|
||||
combination_util(arr, n, r, 0, data, 0)
|
||||
|
||||
|
||||
# Driver function to check for above function
|
||||
arr = [10, 20, 30, 40, 50]
|
||||
printcombination(arr, len(arr), 3)
|
||||
print_combination(arr, len(arr), 3)
|
||||
# This code is contributed by Ambuj sahu
|
||||
|
|
|
@ -35,7 +35,7 @@ def search(grid, init, goal, cost, heuristic):
|
|||
|
||||
closed = [
|
||||
[0 for col in range(len(grid[0]))] for row in range(len(grid))
|
||||
] # the referrence grid
|
||||
] # the reference grid
|
||||
closed[init[0]][init[1]] = 1
|
||||
action = [
|
||||
[0 for col in range(len(grid[0]))] for row in range(len(grid))
|
||||
|
|
|
@ -69,7 +69,7 @@ def dfs(G, s):
|
|||
Args : G - Dictionary of edges
|
||||
s - Starting Node
|
||||
Vars : vis - Set of visited nodes
|
||||
Q - Traveral Stack
|
||||
Q - Traversal Stack
|
||||
--------------------------------------------------------------------------------
|
||||
"""
|
||||
from collections import deque
|
||||
|
|
|
@ -7,12 +7,12 @@ class Graph:
|
|||
def __init__(self):
|
||||
self.vertex = {}
|
||||
|
||||
# for printing the Graph vertexes
|
||||
# for printing the Graph vertices
|
||||
def printGraph(self):
|
||||
for i in self.vertex.keys():
|
||||
print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]]))
|
||||
|
||||
# for adding the edge beween two vertexes
|
||||
# for adding the edge between two vertices
|
||||
def addEdge(self, fromVertex, toVertex):
|
||||
# check if vertex is already present,
|
||||
if fromVertex in self.vertex.keys():
|
||||
|
@ -22,10 +22,10 @@ class Graph:
|
|||
self.vertex[fromVertex] = [toVertex]
|
||||
|
||||
def BFS(self, startVertex):
|
||||
# Take a list for stoting already visited vertexes
|
||||
# Take a list for stoting already visited vertices
|
||||
visited = [False] * len(self.vertex)
|
||||
|
||||
# create a list to store all the vertexes for BFS
|
||||
# create a list to store all the vertices for BFS
|
||||
queue = []
|
||||
|
||||
# mark the source node as visited and enqueue it
|
||||
|
|
|
@ -7,13 +7,13 @@ class Graph:
|
|||
def __init__(self):
|
||||
self.vertex = {}
|
||||
|
||||
# for printing the Graph vertexes
|
||||
# for printing the Graph vertices
|
||||
def printGraph(self):
|
||||
print(self.vertex)
|
||||
for i in self.vertex.keys():
|
||||
print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]]))
|
||||
|
||||
# for adding the edge beween two vertexes
|
||||
# for adding the edge between two vertices
|
||||
def addEdge(self, fromVertex, toVertex):
|
||||
# check if vertex is already present,
|
||||
if fromVertex in self.vertex.keys():
|
||||
|
@ -37,7 +37,7 @@ class Graph:
|
|||
|
||||
print(startVertex, end=" ")
|
||||
|
||||
# Recur for all the vertexes that are adjacent to this node
|
||||
# Recur for all the vertices that are adjacent to this node
|
||||
for i in self.vertex.keys():
|
||||
if visited[i] == False:
|
||||
self.DFSRec(i, visited)
|
||||
|
|
|
@ -22,7 +22,7 @@ DIJKSTRA(graph G, start vertex s, destination vertex d):
|
|||
13 - add (total_cost,V) to H
|
||||
|
||||
You can think at cost as a distance where Dijkstra finds the shortest distance
|
||||
between vertexes s and v in a graph G. The use of a min heap as H guarantees
|
||||
between vertices s and v in a graph G. The use of a min heap as H guarantees
|
||||
that if a vertex has already been explored there will be no other path with
|
||||
shortest distance, that happens because heapq.heappop will always return the
|
||||
next vertex with the shortest distance, considering that the heap stores not
|
||||
|
@ -35,7 +35,7 @@ import heapq
|
|||
|
||||
|
||||
def dijkstra(graph, start, end):
|
||||
"""Return the cost of the shortest path between vertexes start and end.
|
||||
"""Return the cost of the shortest path between vertices start and end.
|
||||
|
||||
>>> dijkstra(G, "E", "C")
|
||||
6
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
import math
|
||||
import sys
|
||||
|
||||
# For storing the vertex set to retreive node with the lowest distance
|
||||
# For storing the vertex set to retrieve node with the lowest distance
|
||||
|
||||
|
||||
class PriorityQueue:
|
||||
|
@ -103,9 +103,7 @@ class Graph:
|
|||
def show_graph(self):
|
||||
# u -> v(w)
|
||||
for u in self.adjList:
|
||||
print(
|
||||
u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u]),
|
||||
)
|
||||
print(u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u]))
|
||||
|
||||
def dijkstra(self, src):
|
||||
# Flush old junk values in par[]
|
||||
|
|
|
@ -3,7 +3,7 @@ import random as rand
|
|||
import math as math
|
||||
import time
|
||||
|
||||
# the dfault weight is 1 if not assigend but all the implementation is weighted
|
||||
# the dfault weight is 1 if not assigned but all the implementation is weighted
|
||||
|
||||
|
||||
class DirectedGraph:
|
||||
|
@ -12,7 +12,7 @@ class DirectedGraph:
|
|||
|
||||
# adding vertices and edges
|
||||
# adding the weight is optional
|
||||
# handels repetition
|
||||
# handles repetition
|
||||
def add_pair(self, u, v, w=1):
|
||||
if self.graph.get(u):
|
||||
if self.graph[u].count([w, v]) == 0:
|
||||
|
@ -25,14 +25,14 @@ class DirectedGraph:
|
|||
def all_nodes(self):
|
||||
return list(self.graph)
|
||||
|
||||
# handels if the input does not exist
|
||||
# handles if the input does not exist
|
||||
def remove_pair(self, u, v):
|
||||
if self.graph.get(u):
|
||||
for _ in self.graph[u]:
|
||||
if _[1] == v:
|
||||
self.graph[u].remove(_)
|
||||
|
||||
# if no destination is meant the defaut value is -1
|
||||
# if no destination is meant the default value is -1
|
||||
def dfs(self, s=-2, d=-1):
|
||||
if s == d:
|
||||
return []
|
||||
|
@ -71,7 +71,7 @@ class DirectedGraph:
|
|||
if len(stack) == 0:
|
||||
return visited
|
||||
|
||||
# c is the count of nodes you want and if you leave it or pass -1 to the funtion the count
|
||||
# c is the count of nodes you want and if you leave it or pass -1 to the function the count
|
||||
# will be random from 10 to 10000
|
||||
def fill_graph_randomly(self, c=-1):
|
||||
if c == -1:
|
||||
|
@ -271,7 +271,7 @@ class Graph:
|
|||
|
||||
# adding vertices and edges
|
||||
# adding the weight is optional
|
||||
# handels repetition
|
||||
# handles repetition
|
||||
def add_pair(self, u, v, w=1):
|
||||
# check if the u exists
|
||||
if self.graph.get(u):
|
||||
|
@ -290,7 +290,7 @@ class Graph:
|
|||
# if u does not exist
|
||||
self.graph[v] = [[w, u]]
|
||||
|
||||
# handels if the input does not exist
|
||||
# handles if the input does not exist
|
||||
def remove_pair(self, u, v):
|
||||
if self.graph.get(u):
|
||||
for _ in self.graph[u]:
|
||||
|
@ -302,7 +302,7 @@ class Graph:
|
|||
if _[1] == u:
|
||||
self.graph[v].remove(_)
|
||||
|
||||
# if no destination is meant the defaut value is -1
|
||||
# if no destination is meant the default value is -1
|
||||
def dfs(self, s=-2, d=-1):
|
||||
if s == d:
|
||||
return []
|
||||
|
@ -341,7 +341,7 @@ class Graph:
|
|||
if len(stack) == 0:
|
||||
return visited
|
||||
|
||||
# c is the count of nodes you want and if you leave it or pass -1 to the funtion the count
|
||||
# c is the count of nodes you want and if you leave it or pass -1 to the function the count
|
||||
# will be random from 10 to 10000
|
||||
def fill_graph_randomly(self, c=-1):
|
||||
if c == -1:
|
||||
|
|
|
@ -8,7 +8,7 @@ Also contains a Test class to verify that the generated Hash is same as that
|
|||
returned by the hashlib library
|
||||
|
||||
SHA1 hash or SHA1 sum of a string is a crytpographic function which means it is easy
|
||||
to calculate forwards but extemely difficult to calculate backwards. What this means
|
||||
to calculate forwards but extremely difficult to calculate backwards. What this means
|
||||
is, you can easily calculate the hash of a string, but it is extremely difficult to
|
||||
know the original string if you have its hash. This property is useful to communicate
|
||||
securely, send encrypted messages and is very useful in payment systems, blockchain
|
||||
|
@ -139,7 +139,7 @@ class SHA1HashTest(unittest.TestCase):
|
|||
def main():
|
||||
"""
|
||||
Provides option 'string' or 'file' to take input and prints the calculated SHA1 hash.
|
||||
unittest.main() has been commented because we probably dont want to run
|
||||
unittest.main() has been commented because we probably don't want to run
|
||||
the test each time.
|
||||
"""
|
||||
# unittest.main()
|
||||
|
|
|
@ -8,7 +8,7 @@ This module contains classes and functions for doing linear algebra.
|
|||
|
||||
### class Vector
|
||||
-
|
||||
- This class represents a vector of arbitray size and related operations.
|
||||
- This class represents a vector of arbitrary size and related operations.
|
||||
|
||||
**Overview about the methods:**
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ def classifier(train_data, train_target, classes, point, k=5):
|
|||
distances.append((distance, data_point[1]))
|
||||
# Choosing 'k' points with the least distances.
|
||||
votes = [i[1] for i in sorted(distances)[:k]]
|
||||
# Most commonly occuring class among them
|
||||
# Most commonly occurring class among them
|
||||
# is the class into which the point is classified
|
||||
result = Counter(votes).most_common(1)[0][0]
|
||||
return classes[result]
|
||||
|
|
|
@ -10,7 +10,7 @@ import numpy as np
|
|||
even log is used.
|
||||
|
||||
Using log and roots can be perceived as tools for penalizing big
|
||||
erors. However, using appropriate metrics depends on the situations,
|
||||
errors. However, using appropriate metrics depends on the situations,
|
||||
and types of data
|
||||
"""
|
||||
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
"""
|
||||
Implementation of sequential minimal optimization(SMO) for support vector machines(SVM).
|
||||
Implementation of sequential minimal optimization (SMO) for support vector machines
|
||||
(SVM).
|
||||
|
||||
Sequential minimal optimization (SMO) is an algorithm for solving the quadratic programming (QP) problem
|
||||
that arises during the training of support vector machines.
|
||||
Sequential minimal optimization (SMO) is an algorithm for solving the quadratic
|
||||
programming (QP) problem that arises during the training of support vector
|
||||
machines.
|
||||
It was invented by John Platt in 1998.
|
||||
|
||||
Input:
|
||||
|
@ -18,7 +20,8 @@ Usage:
|
|||
|
||||
kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5)
|
||||
init_alphas = np.zeros(train.shape[0])
|
||||
SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4, b=0.0, tolerance=0.001)
|
||||
SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4,
|
||||
b=0.0, tolerance=0.001)
|
||||
SVM.fit()
|
||||
predict = SVM.predict(test_samples)
|
||||
|
||||
|
@ -72,7 +75,7 @@ class SmoSVM:
|
|||
|
||||
self.choose_alpha = self._choose_alphas()
|
||||
|
||||
# Calculate alphas using SMO algorithsm
|
||||
# Calculate alphas using SMO algorithm
|
||||
def fit(self):
|
||||
K = self._k
|
||||
state = None
|
||||
|
@ -227,7 +230,7 @@ class SmoSVM:
|
|||
def _choose_a1(self):
|
||||
"""
|
||||
Choose first alpha ;steps:
|
||||
1:Fisrt loop over all sample
|
||||
1:First loop over all sample
|
||||
2:Second loop over all non-bound samples till all non-bound samples does not voilate kkt condition.
|
||||
3:Repeat this two process endlessly,till all samples does not voilate kkt condition samples after first loop.
|
||||
"""
|
||||
|
@ -261,9 +264,11 @@ class SmoSVM:
|
|||
def _choose_a2(self, i1):
|
||||
"""
|
||||
Choose the second alpha by using heuristic algorithm ;steps:
|
||||
1:Choosed alpha2 which get the maximum step size (|E1 - E2|).
|
||||
2:Start in a random point,loop over all non-bound samples till alpha1 and alpha2 are optimized.
|
||||
3:Start in a random point,loop over all samples till alpha1 and alpha2 are optimized.
|
||||
1: Choose alpha2 which gets the maximum step size (|E1 - E2|).
|
||||
2: Start in a random point,loop over all non-bound samples till alpha1 and
|
||||
alpha2 are optimized.
|
||||
3: Start in a random point,loop over all samples till alpha1 and alpha2 are
|
||||
optimized.
|
||||
"""
|
||||
self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
|
||||
|
||||
|
@ -316,7 +321,7 @@ class SmoSVM:
|
|||
# select the new alpha2 which could get the minimal objectives
|
||||
if eta > 0.0:
|
||||
a2_new_unc = a2 + (y2 * (e1 - e2)) / eta
|
||||
# a2_new has a boundry
|
||||
# a2_new has a boundary
|
||||
if a2_new_unc >= H:
|
||||
a2_new = H
|
||||
elif a2_new_unc <= L:
|
||||
|
@ -357,7 +362,7 @@ class SmoSVM:
|
|||
else:
|
||||
a2_new = a2
|
||||
|
||||
# a1_new has a boundry too
|
||||
# a1_new has a boundary too
|
||||
a1_new = a1 + s * (a2 - a2_new)
|
||||
if a1_new < 0:
|
||||
a2_new += s * a1_new
|
||||
|
@ -471,7 +476,7 @@ def test_cancel_data():
|
|||
data = data.replace({"M": np.float64(1), "B": np.float64(-1)})
|
||||
samples = np.array(data)[:, :]
|
||||
|
||||
# 2: deviding data into train_data data and test_data data
|
||||
# 2: dividing data into train_data data and test_data data
|
||||
train_data, test_data = samples[:328, :], samples[328:, :]
|
||||
test_tags, test_samples = test_data[:, 0], test_data[:, 1:]
|
||||
|
||||
|
@ -568,7 +573,7 @@ def plot_partition_boundary(
|
|||
):
|
||||
"""
|
||||
We can not get the optimum w of our kernel svm model which is different from linear svm.
|
||||
For this reason, we generate randomly destributed points with high desity and prediced values of these points are
|
||||
For this reason, we generate randomly distributed points with high desity and prediced values of these points are
|
||||
calculated by using our tained model. Then we could use this prediced values to draw contour map.
|
||||
And this contour map can represent svm's partition boundary.
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ def Linearsvc(train_x, train_y):
|
|||
|
||||
def SVC(train_x, train_y):
|
||||
# svm.SVC(C=1.0, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, shrinking=True, probability=False,tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, random_state=None)
|
||||
# various parameters like "kernal","gamma","C" can effectively tuned for a given machine learning model.
|
||||
# various parameters like "kernel","gamma","C" can effectively tuned for a given machine learning model.
|
||||
SVC = svm.SVC(gamma="auto")
|
||||
SVC.fit(train_x, train_y)
|
||||
return SVC
|
||||
|
|
|
@ -12,7 +12,7 @@ https://www.hackerearth.com/practice/notes/matrix-exponentiation-1/
|
|||
|
||||
class Matrix:
|
||||
def __init__(self, arg):
|
||||
if isinstance(arg, list): # Initialzes a matrix identical to the one provided.
|
||||
if isinstance(arg, list): # Initializes a matrix identical to the one provided.
|
||||
self.t = arg
|
||||
self.n = len(arg)
|
||||
else: # Initializes a square matrix of the given size and set the values to zero.
|
||||
|
@ -50,7 +50,7 @@ def fibonacci_with_matrix_exponentiation(n, f1, f2):
|
|||
|
||||
|
||||
def simple_fibonacci(n, f1, f2):
|
||||
# Trival Cases
|
||||
# Trivial Cases
|
||||
if n == 1:
|
||||
return f1
|
||||
elif n == 2:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
Refrences: https://en.wikipedia.org/wiki/M%C3%B6bius_function
|
||||
References: https://en.wikipedia.org/wiki/M%C3%B6bius_function
|
||||
References: wikipedia:square free number
|
||||
python/black : True
|
||||
flake8 : True
|
||||
|
|
|
@ -14,7 +14,7 @@ https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
|
|||
|
||||
def prime_sieve_eratosthenes(num):
|
||||
"""
|
||||
print the prime numbers upto n
|
||||
print the prime numbers up to n
|
||||
|
||||
>>> prime_sieve_eratosthenes(10)
|
||||
2 3 5 7
|
||||
|
@ -26,7 +26,7 @@ def prime_sieve_eratosthenes(num):
|
|||
p = 2
|
||||
|
||||
while p * p <= num:
|
||||
if primes[p] == True:
|
||||
if primes[p]:
|
||||
for i in range(p * p, num + 1, p):
|
||||
primes[i] = False
|
||||
p += 1
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""
|
||||
Numerical integration or quadrature for a smooth function f with known values at x_i
|
||||
|
||||
This method is the classical approch of suming 'Equally Spaced Abscissas'
|
||||
This method is the classical approach of suming 'Equally Spaced Abscissas'
|
||||
|
||||
method 2:
|
||||
"Simpson Rule"
|
||||
|
|
|
@ -22,7 +22,7 @@ def square_root_iterative(
|
|||
a: float, max_iter: int = 9999, tolerance: float = 0.00000000000001
|
||||
) -> float:
|
||||
"""
|
||||
Sqaure root is aproximated using Newtons method.
|
||||
Square root is aproximated using Newtons method.
|
||||
https://en.wikipedia.org/wiki/Newton%27s_method
|
||||
|
||||
>>> all(abs(square_root_iterative(i)-math.sqrt(i)) <= .00000000000001 for i in range(0, 500))
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""
|
||||
Numerical integration or quadrature for a smooth function f with known values at x_i
|
||||
|
||||
This method is the classical approch of suming 'Equally Spaced Abscissas'
|
||||
This method is the classical approach of suming 'Equally Spaced Abscissas'
|
||||
|
||||
method 1:
|
||||
"extended trapezoidal rule"
|
||||
|
|
|
@ -31,17 +31,17 @@ def zeller(date_input: str) -> str:
|
|||
...
|
||||
ValueError: invalid literal for int() with base 10: '.4'
|
||||
|
||||
Validate second seperator:
|
||||
Validate second separator:
|
||||
>>> zeller('01-31*2010')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: Date seperator must be '-' or '/'
|
||||
ValueError: Date separator must be '-' or '/'
|
||||
|
||||
Validate first seperator:
|
||||
Validate first separator:
|
||||
>>> zeller('01^31-2010')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: Date seperator must be '-' or '/'
|
||||
ValueError: Date separator must be '-' or '/'
|
||||
|
||||
Validate out of range year:
|
||||
>>> zeller('01-31-8999')
|
||||
|
@ -55,7 +55,7 @@ def zeller(date_input: str) -> str:
|
|||
...
|
||||
TypeError: zeller() missing 1 required positional argument: 'date_input'
|
||||
|
||||
Test length fo date_input:
|
||||
Test length of date_input:
|
||||
>>> zeller('')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
|
@ -92,7 +92,7 @@ def zeller(date_input: str) -> str:
|
|||
sep_1: str = date_input[2]
|
||||
# Validate
|
||||
if sep_1 not in ["-", "/"]:
|
||||
raise ValueError("Date seperator must be '-' or '/'")
|
||||
raise ValueError("Date separator must be '-' or '/'")
|
||||
|
||||
# Get day
|
||||
d: int = int(date_input[3] + date_input[4])
|
||||
|
@ -100,11 +100,11 @@ def zeller(date_input: str) -> str:
|
|||
if not 0 < d < 32:
|
||||
raise ValueError("Date must be between 1 - 31")
|
||||
|
||||
# Get second seperator
|
||||
# Get second separator
|
||||
sep_2: str = date_input[5]
|
||||
# Validate
|
||||
if sep_2 not in ["-", "/"]:
|
||||
raise ValueError("Date seperator must be '-' or '/'")
|
||||
raise ValueError("Date separator must be '-' or '/'")
|
||||
|
||||
# Get year
|
||||
y: int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# An OOP aproach to representing and manipulating matrices
|
||||
# An OOP approach to representing and manipulating matrices
|
||||
|
||||
|
||||
class Matrix:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
This program print the matix in spiral form.
|
||||
This program print the matrix in spiral form.
|
||||
This problem has been solved through recursive way.
|
||||
|
||||
Matrix must satisfy below conditions
|
||||
|
|
|
@ -35,7 +35,7 @@ def mincut(graph, source, sink):
|
|||
parent = [-1] * (len(graph))
|
||||
max_flow = 0
|
||||
res = []
|
||||
temp = [i[:] for i in graph] # Record orignial cut, copy.
|
||||
temp = [i[:] for i in graph] # Record original cut, copy.
|
||||
while BFS(graph, source, sink, parent):
|
||||
path_flow = float("Inf")
|
||||
s = sink
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
"""
|
||||
- - - - - -- - - - - - - - - - - - - - - - - - - - - - -
|
||||
Name - - CNN - Convolution Neural Network For Photo Recognizing
|
||||
Goal - - Recognize Handing Writting Word Photo
|
||||
Goal - - Recognize Handing Writing Word Photo
|
||||
Detail:Total 5 layers neural network
|
||||
* Convolution layer
|
||||
* Pooling layer
|
||||
* Input layer layer of BP
|
||||
* Hiden layer of BP
|
||||
* Hidden layer of BP
|
||||
* Output layer of BP
|
||||
Author: Stephen Lee
|
||||
Github: 245885195@qq.com
|
||||
|
@ -116,7 +116,7 @@ class CNN:
|
|||
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
|
||||
]
|
||||
data_focus.append(focus)
|
||||
# caculate the feature map of every single kernel, and saved as list of matrix
|
||||
# calculate the feature map of every single kernel, and saved as list of matrix
|
||||
data_featuremap = []
|
||||
Size_FeatureMap = int((size_data - size_conv) / conv_step + 1)
|
||||
for i_map in range(num_conv):
|
||||
|
@ -163,12 +163,12 @@ class CNN:
|
|||
featuremap_pooled.append(map_pooled)
|
||||
return featuremap_pooled
|
||||
|
||||
def _expand(self, datas):
|
||||
def _expand(self, data):
|
||||
# expanding three dimension data to one dimension list
|
||||
data_expanded = []
|
||||
for i in range(len(datas)):
|
||||
shapes = np.shape(datas[i])
|
||||
data_listed = datas[i].reshape(1, shapes[0] * shapes[1])
|
||||
for i in range(len(data)):
|
||||
shapes = np.shape(data[i])
|
||||
data_listed = data[i].reshape(1, shapes[0] * shapes[1])
|
||||
data_listed = data_listed.getA().tolist()[0]
|
||||
data_expanded.extend(data_listed)
|
||||
data_expanded = np.asarray(data_expanded)
|
||||
|
@ -185,7 +185,7 @@ class CNN:
|
|||
self, out_map, pd_pool, num_map, size_map, size_pooling
|
||||
):
|
||||
"""
|
||||
calcluate the gradient from the data slice of pool layer
|
||||
calculate the gradient from the data slice of pool layer
|
||||
pd_pool: list of matrix
|
||||
out_map: the shape of data slice(size_map*size_map)
|
||||
return: pd_all: list of matrix, [num, size_map, size_map]
|
||||
|
@ -217,7 +217,7 @@ class CNN:
|
|||
all_mse = []
|
||||
mse = 10000
|
||||
while rp < n_repeat and mse >= error_accuracy:
|
||||
alle = 0
|
||||
error_count = 0
|
||||
print("-------------Learning Time %d--------------" % rp)
|
||||
for p in range(len(datas_train)):
|
||||
# print('------------Learning Image: %d--------------'%p)
|
||||
|
@ -246,7 +246,7 @@ class CNN:
|
|||
bp_out3 = self.sig(bp_net_k)
|
||||
|
||||
# --------------Model Leaning ------------------------
|
||||
# calcluate error and gradient---------------
|
||||
# calculate error and gradient---------------
|
||||
pd_k_all = np.multiply(
|
||||
(data_teach - bp_out3), np.multiply(bp_out3, (1 - bp_out3))
|
||||
)
|
||||
|
@ -285,11 +285,11 @@ class CNN:
|
|||
self.thre_bp2 = self.thre_bp2 - pd_j_all * self.rate_thre
|
||||
# calculate the sum error of all single image
|
||||
errors = np.sum(abs(data_teach - bp_out3))
|
||||
alle = alle + errors
|
||||
error_count += errors
|
||||
# print(' ----Teach ',data_teach)
|
||||
# print(' ----BP_output ',bp_out3)
|
||||
rp = rp + 1
|
||||
mse = alle / patterns
|
||||
mse = error_count / patterns
|
||||
all_mse.append(mse)
|
||||
|
||||
def draw_error():
|
||||
|
|
|
@ -76,11 +76,11 @@ class Perceptron:
|
|||
has_misclassified = True
|
||||
# print('Epoch: \n',epoch_count)
|
||||
epoch_count = epoch_count + 1
|
||||
# if you want controle the epoch or just by erro
|
||||
# if you want control the epoch or just by error
|
||||
if not has_misclassified:
|
||||
print(("\nEpoch:\n", epoch_count))
|
||||
print("------------------------\n")
|
||||
# if epoch_count > self.epoch_number or not erro:
|
||||
# if epoch_count > self.epoch_number or not error:
|
||||
break
|
||||
|
||||
def sort(self, sample) -> None:
|
||||
|
|
|
@ -13,7 +13,7 @@ class LinearCongruentialGenerator:
|
|||
These parameters are saved and used when nextNumber() is called.
|
||||
|
||||
modulo is the largest number that can be generated (exclusive). The most
|
||||
efficent values are powers of 2. 2^32 is a common value.
|
||||
efficient values are powers of 2. 2^32 is a common value.
|
||||
"""
|
||||
self.multiplier = multiplier
|
||||
self.increment = increment
|
||||
|
|
|
@ -88,7 +88,7 @@ def sieveEr(N):
|
|||
# precondition
|
||||
assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2"
|
||||
|
||||
# beginList: conatins all natural numbers from 2 upt to N
|
||||
# beginList: contains all natural numbers from 2 up to N
|
||||
beginList = [x for x in range(2, N + 1)]
|
||||
|
||||
ans = [] # this list will be returns.
|
||||
|
@ -480,8 +480,8 @@ def getPrimesBetween(pNumber1, pNumber2):
|
|||
"""
|
||||
input: prime numbers 'pNumber1' and 'pNumber2'
|
||||
pNumber1 < pNumber2
|
||||
returns a list of all prime numbers between 'pNumber1' (exclusiv)
|
||||
and 'pNumber2' (exclusiv)
|
||||
returns a list of all prime numbers between 'pNumber1' (exclusive)
|
||||
and 'pNumber2' (exclusive)
|
||||
"""
|
||||
|
||||
# precondition
|
||||
|
|
|
@ -19,7 +19,7 @@ def solution(n):
|
|||
>>> solution(40000)
|
||||
39893
|
||||
"""
|
||||
# fetchs the next number
|
||||
# fetches the next number
|
||||
for number in range(n - 1, 10000, -1):
|
||||
|
||||
# converts number into string.
|
||||
|
|
|
@ -30,5 +30,5 @@ def digitsum(s: str) -> int:
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
count = sum(digitsum(str(i)) for i in range(1000,1000000))
|
||||
count = sum(digitsum(str(i)) for i in range(1000, 1000000))
|
||||
print(count) # --> 443839
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
Sum of digits sequence
|
||||
Problem 551
|
||||
|
||||
Let a(0), a(1),... be an interger sequence defined by:
|
||||
Let a(0), a(1),... be an integer sequence defined by:
|
||||
a(0) = 1
|
||||
for n >= 1, a(n) is the sum of the digits of all preceding terms
|
||||
|
||||
|
@ -33,7 +33,7 @@ def next_term(a_i, k, i, n):
|
|||
k -- k when terms are written in the from a(i) = b*10^k + c.
|
||||
Term are calulcated until c > 10^k or the n-th term is reached.
|
||||
i -- position along the sequence
|
||||
n -- term to caluclate up to if k is large enough
|
||||
n -- term to calculate up to if k is large enough
|
||||
|
||||
Return: a tuple of difference between ending term and starting term, and
|
||||
the number of terms calculated. ex. if starting term is a_0=1, and
|
||||
|
|
|
@ -87,7 +87,7 @@ def hill_climbing(
|
|||
"""
|
||||
implementation of the hill climbling algorithm. We start with a given state, find
|
||||
all its neighbors, move towards the neighbor which provides the maximum (or
|
||||
minimum) change. We keep doing this untill we are at a state where we do not
|
||||
minimum) change. We keep doing this until we are at a state where we do not
|
||||
have any neighbors which can improve the solution.
|
||||
Args:
|
||||
search_prob: The search state at the start.
|
||||
|
|
|
@ -15,7 +15,7 @@ def interpolation_search(sorted_collection, item):
|
|||
right = len(sorted_collection) - 1
|
||||
|
||||
while left <= right:
|
||||
# avoid devided by 0 during interpolation
|
||||
# avoid divided by 0 during interpolation
|
||||
if sorted_collection[left] == sorted_collection[right]:
|
||||
if sorted_collection[left] == item:
|
||||
return left
|
||||
|
@ -59,7 +59,7 @@ def interpolation_search_by_recursion(sorted_collection, item, left, right):
|
|||
:return: index of found item or None if item is not found
|
||||
"""
|
||||
|
||||
# avoid devided by 0 during interpolation
|
||||
# avoid divided by 0 during interpolation
|
||||
if sorted_collection[left] == sorted_collection[right]:
|
||||
if sorted_collection[left] == item:
|
||||
return left
|
||||
|
|
|
@ -20,7 +20,7 @@ def jump_search(arr, x):
|
|||
return -1
|
||||
|
||||
|
||||
arr = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
|
||||
x = 55
|
||||
index = jump_search(arr, x)
|
||||
print("\nNumber " + str(x) + " is at index " + str(index))
|
||||
if __name__ == "__main__":
|
||||
arr = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
|
||||
x = 55
|
||||
print(f"Number {x} is at index {jump_search(arr, x)}")
|
||||
|
|
|
@ -62,7 +62,7 @@ def simulated_annealing(
|
|||
continue # neighbor outside our bounds
|
||||
|
||||
if not find_max:
|
||||
change = change * -1 # incase we are finding minimum
|
||||
change = change * -1 # in case we are finding minimum
|
||||
if change > 0: # improves the solution
|
||||
next_state = picked_neighbor
|
||||
else:
|
||||
|
@ -73,10 +73,8 @@ def simulated_annealing(
|
|||
next_state = picked_neighbor
|
||||
current_temp = current_temp - (current_temp * rate_of_decrease)
|
||||
|
||||
if (
|
||||
current_temp < threshold_temp or next_state is None
|
||||
): # temperature below threshold, or
|
||||
# couldnt find a suitaable neighbor
|
||||
if current_temp < threshold_temp or next_state is None:
|
||||
# temperature below threshold, or could not find a suitaable neighbor
|
||||
search_end = True
|
||||
else:
|
||||
current_state = next_state
|
||||
|
|
|
@ -188,7 +188,7 @@ def tabu_search(
|
|||
and the cost (distance) for each neighbor.
|
||||
:param iters: The number of iterations that Tabu search will execute.
|
||||
:param size: The size of Tabu List.
|
||||
:return best_solution_ever: The solution with the lowest distance that occured during the execution of Tabu search.
|
||||
:return best_solution_ever: The solution with the lowest distance that occurred during the execution of Tabu search.
|
||||
:return best_cost: The total distance that Travelling Salesman will travel, if he follows the path in best_solution
|
||||
ever.
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ def bitonicMerge(a, low, cnt, dire):
|
|||
bitonicMerge(a, low, k, dire)
|
||||
bitonicMerge(a, low + k, k, dire)
|
||||
|
||||
# This funcion first produces a bitonic sequence by recursively
|
||||
# This function first produces a bitonic sequence by recursively
|
||||
|
||||
|
||||
# sorting its two halves in opposite sorting orders, and then
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
def double_sort(lst):
|
||||
"""this sorting algorithm sorts an array using the principle of bubble sort ,
|
||||
but does it both from left to right and right to left ,
|
||||
"""this sorting algorithm sorts an array using the principle of bubble sort,
|
||||
but does it both from left to right and right to left,
|
||||
hence i decided to call it "double sort"
|
||||
:param collection: mutable ordered sequence of elements
|
||||
:return: the same collection in ascending order
|
||||
|
@ -17,7 +17,7 @@ def double_sort(lst):
|
|||
no_of_elements = len(lst)
|
||||
for i in range(
|
||||
0, int(((no_of_elements - 1) / 2) + 1)
|
||||
): # we dont need to traverse to end of list as
|
||||
): # we don't need to traverse to end of list as
|
||||
for j in range(0, no_of_elements - 1):
|
||||
if (
|
||||
lst[j + 1] < lst[j]
|
||||
|
|
|
@ -7,7 +7,7 @@ def pigeonhole_sort(a):
|
|||
"""
|
||||
>>> a = [8, 3, 2, 7, 4, 6, 8]
|
||||
>>> b = sorted(a) # a nondestructive sort
|
||||
>>> pigeonhole_sort(a) # a distructive sort
|
||||
>>> pigeonhole_sort(a) # a destructive sort
|
||||
>>> a == b
|
||||
True
|
||||
"""
|
||||
|
|
|
@ -69,7 +69,7 @@ def insert_next(collection: List, index: int):
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
numbers = input("Enter integers seperated by spaces: ")
|
||||
numbers = input("Enter integers separated by spaces: ")
|
||||
numbers = [int(num) for num in numbers.split()]
|
||||
rec_insertion_sort(numbers, len(numbers))
|
||||
print(numbers)
|
||||
|
|
|
@ -67,7 +67,7 @@ class Automaton:
|
|||
>>> A.search_in("whatever, err ... , wherever")
|
||||
{'what': [0], 'hat': [1], 'ver': [5, 25], 'er': [6, 10, 22, 26]}
|
||||
"""
|
||||
result = dict() # returns a dict with keywords and list of its occurences
|
||||
result = dict() # returns a dict with keywords and list of its occurrences
|
||||
current_state = 0
|
||||
for i in range(len(string)):
|
||||
while (
|
||||
|
|
|
@ -27,7 +27,7 @@ class BoyerMooreSearch:
|
|||
def match_in_pattern(self, char):
|
||||
""" finds the index of char in pattern in reverse order
|
||||
|
||||
Paremeters :
|
||||
Parameters :
|
||||
char (chr): character to be searched
|
||||
|
||||
Returns :
|
||||
|
@ -43,12 +43,12 @@ class BoyerMooreSearch:
|
|||
def mismatch_in_text(self, currentPos):
|
||||
""" finds the index of mis-matched character in text when compared with pattern from last
|
||||
|
||||
Paremeters :
|
||||
Parameters :
|
||||
currentPos (int): current index position of text
|
||||
|
||||
Returns :
|
||||
i (int): index of mismatched char from last in text
|
||||
-1 (int): if there is no mis-match between pattern and text block
|
||||
-1 (int): if there is no mismatch between pattern and text block
|
||||
"""
|
||||
|
||||
for i in range(self.patLen - 1, -1, -1):
|
||||
|
|
|
@ -13,12 +13,13 @@ def palindromic_string(input_string):
|
|||
"""
|
||||
Manacher’s algorithm which finds Longest Palindromic Substring in linear time.
|
||||
|
||||
1. first this conver input_string("xyx") into new_string("x|y|x") where odd positions are actual input
|
||||
characters.
|
||||
1. first this convert input_string("xyx") into new_string("x|y|x") where odd
|
||||
positions are actual input characters.
|
||||
2. for each character in new_string it find corresponding length and store,
|
||||
a. max_length
|
||||
b. max_length's center
|
||||
3. return output_string from center - max_length to center + max_length and remove all "|"
|
||||
3. return output_string from center - max_length to center + max_length and remove
|
||||
all "|"
|
||||
"""
|
||||
max_length = 0
|
||||
|
||||
|
@ -35,7 +36,7 @@ def palindromic_string(input_string):
|
|||
# for each character in new_string find corresponding palindromic string
|
||||
for i in range(len(new_input_string)):
|
||||
|
||||
# get palindromic length from ith position
|
||||
# get palindromic length from i-th position
|
||||
length = palindromic_length(i, 1, new_input_string)
|
||||
|
||||
# update max_length and start position
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
def split(string: str, seperator: str = " ") -> list:
|
||||
def split(string: str, separator: str = " ") -> list:
|
||||
"""
|
||||
Will split the string up into all the values seperated by the seperator (defaults to spaces)
|
||||
Will split the string up into all the values separated by the separator (defaults to spaces)
|
||||
|
||||
>>> split("apple#banana#cherry#orange",seperator='#')
|
||||
>>> split("apple#banana#cherry#orange",separator='#')
|
||||
['apple', 'banana', 'cherry', 'orange']
|
||||
|
||||
>>> split("Hello there")
|
||||
['Hello', 'there']
|
||||
|
||||
>>> split("11/22/63",seperator = '/')
|
||||
>>> split("11/22/63",separator = '/')
|
||||
['11', '22', '63']
|
||||
|
||||
>>> split("12:43:39",seperator = ":")
|
||||
>>> split("12:43:39",separator = ":")
|
||||
['12', '43', '39']
|
||||
"""
|
||||
|
||||
|
@ -19,7 +19,7 @@ def split(string: str, seperator: str = " ") -> list:
|
|||
|
||||
last_index = 0
|
||||
for index, char in enumerate(string):
|
||||
if char == seperator:
|
||||
if char == separator:
|
||||
split_words.append(string[last_index:index])
|
||||
last_index = index + 1
|
||||
elif index + 1 == len(string):
|
||||
|
|
|
@ -11,11 +11,11 @@ def word_occurence(sentence: str) -> dict:
|
|||
... in Counter(SENTENCE.split()).items())
|
||||
True
|
||||
"""
|
||||
occurence = defaultdict(int)
|
||||
occurrence = defaultdict(int)
|
||||
# Creating a dictionary containing count of each word
|
||||
for word in sentence.split(" "):
|
||||
occurence[word] += 1
|
||||
return occurence
|
||||
occurrence[word] += 1
|
||||
return occurrence
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
Loading…
Reference in New Issue
Block a user