requirements.txt: Unpin numpy (#2287)

* requirements.txt: Unpin numpy

* fixup! Format Python code with psf/black push

* Less clutter

* fixup! Format Python code with psf/black push

Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com>
This commit is contained in:
Christian Clauss 2020-08-06 17:50:23 +02:00 committed by GitHub
parent f0d7879a11
commit 1fb1fdd130
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 29 additions and 25 deletions

View File

@ -5,20 +5,19 @@ An implementation of Karger's Algorithm for partitioning a graph.
import random import random
from typing import Dict, List, Set, Tuple from typing import Dict, List, Set, Tuple
# Adjacency list representation of this graph: # Adjacency list representation of this graph:
# https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg # https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg
TEST_GRAPH = { TEST_GRAPH = {
'1': ['2', '3', '4', '5'], "1": ["2", "3", "4", "5"],
'2': ['1', '3', '4', '5'], "2": ["1", "3", "4", "5"],
'3': ['1', '2', '4', '5', '10'], "3": ["1", "2", "4", "5", "10"],
'4': ['1', '2', '3', '5', '6'], "4": ["1", "2", "3", "5", "6"],
'5': ['1', '2', '3', '4', '7'], "5": ["1", "2", "3", "4", "7"],
'6': ['7', '8', '9', '10', '4'], "6": ["7", "8", "9", "10", "4"],
'7': ['6', '8', '9', '10', '5'], "7": ["6", "8", "9", "10", "5"],
'8': ['6', '7', '9', '10'], "8": ["6", "7", "9", "10"],
'9': ['6', '7', '8', '10'], "9": ["6", "7", "8", "10"],
'10': ['6', '7', '8', '9', '3'] "10": ["6", "7", "8", "9", "3"],
} }
@ -61,8 +60,9 @@ def partition_graph(graph: Dict[str, List[str]]) -> Set[Tuple[str, str]]:
for neighbor in uv_neighbors: for neighbor in uv_neighbors:
graph_copy[neighbor].append(uv) graph_copy[neighbor].append(uv)
contracted_nodes[uv] = {contracted_node for contracted_node in contracted_nodes[uv] = {
contracted_nodes[u].union(contracted_nodes[v])} node for node in contracted_nodes[u].union(contracted_nodes[v])
}
# Remove nodes u and v. # Remove nodes u and v.
del graph_copy[u] del graph_copy[u]
@ -75,8 +75,12 @@ def partition_graph(graph: Dict[str, List[str]]) -> Set[Tuple[str, str]]:
# Find cutset. # Find cutset.
groups = [contracted_nodes[node] for node in graph_copy] groups = [contracted_nodes[node] for node in graph_copy]
return {(node, neighbor) for node in groups[0] return {
for neighbor in graph[node] if neighbor in groups[1]} (node, neighbor)
for node in groups[0]
for neighbor in graph[node]
if neighbor in groups[1]
}
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -1,4 +1,4 @@
''' """
developed by: markmelnic developed by: markmelnic
original repo: https://github.com/markmelnic/Scoring-Algorithm original repo: https://github.com/markmelnic/Scoring-Algorithm
@ -23,17 +23,17 @@ Thus the weights for each column are as follows:
>>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) >>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1])
[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] [[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]]
''' """
def procentual_proximity(source_data : list, weights : list) -> list: def procentual_proximity(source_data: list, weights: list) -> list:
''' """
weights - int list weights - int list
possible values - 0 / 1 possible values - 0 / 1
0 if lower values have higher weight in the data set 0 if lower values have higher weight in the data set
1 if higher values have higher weight in the data set 1 if higher values have higher weight in the data set
''' """
# getting data # getting data
data_lists = [] data_lists = []

View File

@ -5,7 +5,7 @@ flake8
keras keras
matplotlib matplotlib
mypy mypy
numpy>=1.17.4 numpy
opencv-python opencv-python
pandas pandas
pillow pillow

View File

@ -1,9 +1,9 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
''' """
Provide the current worldwide COVID-19 statistics. Provide the current worldwide COVID-19 statistics.
This data is being scrapped from 'https://www.worldometers.info/coronavirus/'. This data is being scrapped from 'https://www.worldometers.info/coronavirus/'.
''' """
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
@ -13,8 +13,8 @@ def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus")
""" """
Return a dict of current worldwide COVID-19 statistics Return a dict of current worldwide COVID-19 statistics
""" """
soup = BeautifulSoup(requests.get(url).text, 'html.parser') soup = BeautifulSoup(requests.get(url).text, "html.parser")
keys = soup.findAll('h1') keys = soup.findAll("h1")
values = soup.findAll("div", {"class": "maincounter-number"}) values = soup.findAll("div", {"class": "maincounter-number"})
keys += soup.findAll("span", {"class": "panel-title"}) keys += soup.findAll("span", {"class": "panel-title"})
values += soup.findAll("div", {"class": "number-table-main"}) values += soup.findAll("div", {"class": "number-table-main"})