Compare commits

...

3 Commits

Author SHA1 Message Date
Anamaria Miranda
0dfd6dacd7
Merge acb955cb87 into f3f32ae3ca 2024-11-19 18:08:28 +01:00
pre-commit-ci[bot]
f3f32ae3ca
[pre-commit.ci] pre-commit autoupdate (#12385)
updates:
- [github.com/astral-sh/ruff-pre-commit: v0.7.3 → v0.7.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.3...v0.7.4)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2024-11-18 22:07:12 +01:00
Anamaria Miranda
acb955cb87 Added test to linear regression #9943 2024-11-06 14:57:19 +01:00
2 changed files with 23 additions and 1 deletions

View File

@ -16,7 +16,7 @@ repos:
- id: auto-walrus - id: auto-walrus
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.3 rev: v0.7.4
hooks: hooks:
- id: ruff - id: ruff
- id: ruff-format - id: ruff-format

View File

@ -41,6 +41,14 @@ def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):
:param theta : Feature vector (weight's for our model) :param theta : Feature vector (weight's for our model)
;param return : Updated Feature's, using ;param return : Updated Feature's, using
curr_features - alpha_ * gradient(w.r.t. feature) curr_features - alpha_ * gradient(w.r.t. feature)
>>> import numpy as np
>>> data_x = np.array([[1, 2], [3, 4]])
>>> data_y = np.array([5, 6])
>>> len_data = len(data_x)
>>> alpha = 0.01
>>> theta = np.array([0.1, 0.2])
>>> run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)
array([0.196, 0.343])
""" """
n = len_data n = len_data
@ -58,6 +66,12 @@ def sum_of_square_error(data_x, data_y, len_data, theta):
:param len_data : len of the dataset :param len_data : len of the dataset
:param theta : contains the feature vector :param theta : contains the feature vector
:return : sum of square error computed from given feature's :return : sum of square error computed from given feature's
Example:
>>> vc_x = np.array([[1.1], [2.1], [3.1]])
>>> vc_y = np.array([1.2, 2.2, 3.2])
>>> round(sum_of_square_error(vc_x, vc_y, 3, np.array([1])),3)
np.float64(0.005)
""" """
prod = np.dot(theta, data_x.transpose()) prod = np.dot(theta, data_x.transpose())
prod -= data_y.transpose() prod -= data_y.transpose()
@ -93,6 +107,11 @@ def mean_absolute_error(predicted_y, original_y):
:param predicted_y : contains the output of prediction (result vector) :param predicted_y : contains the output of prediction (result vector)
:param original_y : contains values of expected outcome :param original_y : contains values of expected outcome
:return : mean absolute error computed from given feature's :return : mean absolute error computed from given feature's
>>> predicted_y = [3, -0.5, 2, 7]
>>> original_y = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(predicted_y, original_y)
0.5
""" """
total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y)) total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y))
return total / len(original_y) return total / len(original_y)
@ -114,4 +133,7 @@ def main():
if __name__ == "__main__": if __name__ == "__main__":
import doctest
doctest.testmod()
main() main()