Compare commits

...

7 Commits

Author SHA1 Message Date
Maneesh
21de9f9fec
Merge e23173e6a3 into f3f32ae3ca 2024-11-22 13:57:19 +01:00
pre-commit-ci[bot]
f3f32ae3ca
[pre-commit.ci] pre-commit autoupdate (#12385)
updates:
- [github.com/astral-sh/ruff-pre-commit: v0.7.3 → v0.7.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.3...v0.7.4)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2024-11-18 22:07:12 +01:00
Christian Clauss
e3bd7721c8
validate_filenames.py Shebang python for Windows (#12371) 2024-11-15 14:59:14 +01:00
pre-commit-ci[bot]
e3f3d668be
[pre-commit.ci] pre-commit autoupdate (#12370)
* [pre-commit.ci] pre-commit autoupdate

updates:
- [github.com/astral-sh/ruff-pre-commit: v0.7.2 → v0.7.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.2...v0.7.3)
- [github.com/abravalheri/validate-pyproject: v0.22 → v0.23](https://github.com/abravalheri/validate-pyproject/compare/v0.22...v0.23)

* Update sudoku_solver.py

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Christian Clauss <cclauss@me.com>
2024-11-11 21:05:50 +01:00
pre-commit-ci[bot]
e23173e6a3 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-16 03:49:30 +00:00
Maneeshbhaskarpulidindi
b91bc91ee5 ridge_regression 2024-10-16 08:55:57 +05:30
Maneeshbhaskarpulidindi
07db6e01b5 Ridge Regression 2024-10-16 08:10:31 +05:30
4 changed files with 129 additions and 4 deletions

View File

@ -16,7 +16,7 @@ repos:
- id: auto-walrus
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.2
rev: v0.7.4
hooks:
- id: ruff
- id: ruff-format
@ -42,7 +42,7 @@ repos:
pass_filenames: false
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.22
rev: v0.23
hooks:
- id: validate-pyproject

View File

@ -172,7 +172,7 @@ def solved(values):
def from_file(filename, sep="\n"):
"Parse a file into a list of strings, separated by sep."
return open(filename).read().strip().split(sep) # noqa: SIM115
return open(filename).read().strip().split(sep)
def random_puzzle(assignments=17):

View File

@ -0,0 +1,125 @@
import numpy as np
import requests
def collect_dataset():
"""Collect dataset of CSGO
The dataset contains ADR vs Rating of a Player
:return : dataset obtained from the link, as matrix
"""
response = requests.get(
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
"master/Week1/ADRvsRating.csv",
timeout=10,
)
lines = response.text.splitlines()
data = []
for item in lines:
item = item.split(",")
data.append(item)
data.pop(0) # This is for removing the labels from the list
dataset = np.matrix(data)
return dataset
def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta, lambda_reg):
"""Run steep gradient descent and updates the Feature vector accordingly
:param data_x : contains the dataset
:param data_y : contains the output associated with each data-entry
:param len_data : length of the data
:param alpha : Learning rate of the model
:param theta : Feature vector (weights for our model)
:param lambda_reg: Regularization parameter
:return : Updated Features using
curr_features - alpha_ * gradient(w.r.t. feature)
"""
n = len_data
prod = np.dot(theta, data_x.transpose())
prod -= data_y.transpose()
sum_grad = np.dot(prod, data_x)
# Add regularization to the gradient
theta_regularized = np.copy(theta)
theta_regularized[0, 0] = 0 # Don't regularize the bias term
sum_grad += lambda_reg * theta_regularized # Add regularization to gradient
theta = theta - (alpha / n) * sum_grad
return theta
def sum_of_square_error(data_x, data_y, len_data, theta, lambda_reg):
"""Return sum of square error for error calculation
:param data_x : contains our dataset
:param data_y : contains the output (result vector)
:param len_data : len of the dataset
:param theta : contains the feature vector
:param lambda_reg: Regularization parameter
:return : sum of square error computed from given features
"""
prod = np.dot(theta, data_x.transpose())
prod -= data_y.transpose()
sum_elem = np.sum(np.square(prod))
# Add regularization to the cost function
regularization_term = lambda_reg * np.sum(
np.square(theta[:, 1:])
) # Don't regularize the bias term
error = (sum_elem / (2 * len_data)) + (regularization_term / (2 * len_data))
return error
def run_ridge_regression(data_x, data_y, lambda_reg=1.0):
"""Implement Ridge Regression over the dataset
:param data_x : contains our dataset
:param data_y : contains the output (result vector)
:param lambda_reg: Regularization parameter
:return : feature for line of best fit (Feature vector)
"""
iterations = 100000
alpha = 0.0001550
no_features = data_x.shape[1]
len_data = data_x.shape[0]
theta = np.zeros((1, no_features))
for i in range(iterations):
theta = run_steep_gradient_descent(
data_x, data_y, len_data, alpha, theta, lambda_reg
)
error = sum_of_square_error(data_x, data_y, len_data, theta, lambda_reg)
print(f"At Iteration {i + 1} - Error is {error:.5f}")
return theta
def mean_absolute_error(predicted_y, original_y):
"""Return mean absolute error for error calculation
:param predicted_y : contains the output of prediction (result vector)
:param original_y : contains values of expected outcome
:return : mean absolute error computed from given features
"""
total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y))
return total / len(original_y)
def main():
"""Driver function"""
data = collect_dataset()
len_data = data.shape[0]
data_x = np.c_[np.ones(len_data), data[:, :-1]].astype(float)
data_y = data[:, -1].astype(float)
lambda_reg = 1.0 # Set your desired regularization parameter
theta = run_ridge_regression(data_x, data_y, lambda_reg)
len_result = theta.shape[1]
print("Resultant Feature vector : ")
for i in range(len_result):
print(f"{theta[0, i]:.5f}")
if __name__ == "__main__":
main()

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!python
import os
try: