mirror of
https://github.com/TheAlgorithms/Python.git
synced 2024-11-23 21:11:08 +00:00
Reduce the complexity of other/scoring_algorithm.py (#8045)
* Increase the --max-complexity threshold in the file .flake8
This commit is contained in:
parent
069a14b1c5
commit
ee778128bd
|
@ -23,29 +23,29 @@ Thus the weights for each column are as follows:
|
|||
"""
|
||||
|
||||
|
||||
def procentual_proximity(
|
||||
source_data: list[list[float]], weights: list[int]
|
||||
) -> list[list[float]]:
|
||||
def get_data(source_data: list[list[float]]) -> list[list[float]]:
|
||||
"""
|
||||
weights - int list
|
||||
possible values - 0 / 1
|
||||
0 if lower values have higher weight in the data set
|
||||
1 if higher values have higher weight in the data set
|
||||
|
||||
>>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1])
|
||||
[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]]
|
||||
>>> get_data([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]])
|
||||
[[20.0, 23.0, 22.0], [60.0, 90.0, 50.0], [2012.0, 2015.0, 2011.0]]
|
||||
"""
|
||||
|
||||
# getting data
|
||||
data_lists: list[list[float]] = []
|
||||
for data in source_data:
|
||||
for i, el in enumerate(data):
|
||||
if len(data_lists) < i + 1:
|
||||
data_lists.append([])
|
||||
data_lists[i].append(float(el))
|
||||
return data_lists
|
||||
|
||||
|
||||
def calculate_each_score(
|
||||
data_lists: list[list[float]], weights: list[int]
|
||||
) -> list[list[float]]:
|
||||
"""
|
||||
>>> calculate_each_score([[20, 23, 22], [60, 90, 50], [2012, 2015, 2011]],
|
||||
... [0, 0, 1])
|
||||
[[1.0, 0.0, 0.33333333333333337], [0.75, 0.0, 1.0], [0.25, 1.0, 0.0]]
|
||||
"""
|
||||
score_lists: list[list[float]] = []
|
||||
# calculating each score
|
||||
for dlist, weight in zip(data_lists, weights):
|
||||
mind = min(dlist)
|
||||
maxd = max(dlist)
|
||||
|
@ -72,14 +72,43 @@ def procentual_proximity(
|
|||
|
||||
score_lists.append(score)
|
||||
|
||||
return score_lists
|
||||
|
||||
|
||||
def generate_final_scores(score_lists: list[list[float]]) -> list[float]:
|
||||
"""
|
||||
>>> generate_final_scores([[1.0, 0.0, 0.33333333333333337],
|
||||
... [0.75, 0.0, 1.0],
|
||||
... [0.25, 1.0, 0.0]])
|
||||
[2.0, 1.0, 1.3333333333333335]
|
||||
"""
|
||||
# initialize final scores
|
||||
final_scores: list[float] = [0 for i in range(len(score_lists[0]))]
|
||||
|
||||
# generate final scores
|
||||
for slist in score_lists:
|
||||
for j, ele in enumerate(slist):
|
||||
final_scores[j] = final_scores[j] + ele
|
||||
|
||||
return final_scores
|
||||
|
||||
|
||||
def procentual_proximity(
|
||||
source_data: list[list[float]], weights: list[int]
|
||||
) -> list[list[float]]:
|
||||
"""
|
||||
weights - int list
|
||||
possible values - 0 / 1
|
||||
0 if lower values have higher weight in the data set
|
||||
1 if higher values have higher weight in the data set
|
||||
|
||||
>>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1])
|
||||
[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]]
|
||||
"""
|
||||
|
||||
data_lists = get_data(source_data)
|
||||
score_lists = calculate_each_score(data_lists, weights)
|
||||
final_scores = generate_final_scores(score_lists)
|
||||
|
||||
# append scores to source data
|
||||
for i, ele in enumerate(final_scores):
|
||||
source_data[i].append(ele)
|
||||
|
|
Loading…
Reference in New Issue
Block a user