Compare commits

...

13 Commits

Author SHA1 Message Date
Harmanaya
6e8b5bf66e
Merge 4204bf6d28 into e3bd7721c8 2024-11-19 00:04:57 +05:30
Christian Clauss
e3bd7721c8
validate_filenames.py Shebang python for Windows (#12371) 2024-11-15 14:59:14 +01:00
pre-commit-ci[bot]
4204bf6d28 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-22 18:59:34 +00:00
Harmanaya Sharma
52345d9013 Resolved ruff checks 2024-10-23 00:28:46 +05:30
pre-commit-ci[bot]
85020a76c2 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-22 18:51:21 +00:00
Harmanaya Sharma
5bf9b854b4 Resolved mypy checks 2024-10-23 00:20:29 +05:30
Harmanaya Sharma
8f1f091aa4 Resolved ruff checks 2024-10-23 00:14:37 +05:30
pre-commit-ci[bot]
a2d07af8c1 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-22 18:39:21 +00:00
Harmanaya Sharma
61945d03c6 Added doctests 2024-10-23 00:08:52 +05:30
Harmanaya Sharma
2dc60f475b Resolved ruff checks 2024-10-22 23:58:50 +05:30
pre-commit-ci[bot]
861618ef11 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2024-10-22 18:19:51 +00:00
Harmanaya Sharma
e9ef03eadb Added type hints and minor case improvements 2024-10-22 23:49:07 +05:30
Harmanaya Sharma
2293d37599 Fix issue #12108: Added Ridge Regression to Machine Learning 2024-10-22 23:19:28 +05:30
2 changed files with 178 additions and 1 deletions

View File

@ -0,0 +1,177 @@
import numpy as np
import pandas as pd
class RidgeRegression:
def __init__(
self, alpha: float = 0.001, lambda_: float = 0.1, iterations: int = 1000
) -> None:
"""
Ridge Regression Constructor
:param alpha: Learning rate for gradient descent
:param lambda_: Regularization parameter (L2 regularization)
:param iterations: Number of iterations for gradient descent
"""
self.alpha = alpha
self.lambda_ = lambda_
self.iterations = iterations
self.theta: np.ndarray | None = (
None # Initialize as None, later will be ndarray
)
def feature_scaling(
self, features: np.ndarray
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Normalize features to have mean 0 and standard deviation 1.
:param features: Input features, shape (m, n)
:return: Tuple containing:
- Scaled features
- Mean of each feature
- Standard deviation of each feature
Example:
>>> rr = RidgeRegression()
>>> features = np.array([[1, 2], [2, 3], [4, 6]])
>>> scaled_features, mean, std = rr.feature_scaling(features)
>>> np.allclose(scaled_features.mean(axis=0), 0)
True
>>> np.allclose(scaled_features.std(axis=0), 1)
True
"""
mean = np.mean(features, axis=0)
std = np.std(features, axis=0)
# Avoid division by zero for constant features (std = 0)
std[std == 0] = 1 # Set std=1 for constant features to avoid NaN
scaled_features = (features - mean) / std
return scaled_features, mean, std
def fit(self, features: np.ndarray, target: np.ndarray) -> None:
"""
Fit the Ridge Regression model to the training data.
:param features: Input features, shape (m, n)
:param target: Target values, shape (m,)
Example:
>>> rr = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=10)
>>> features = np.array([[1, 2], [2, 3], [4, 6]])
>>> target = np.array([1, 2, 3])
>>> rr.fit(features, target)
>>> rr.theta is not None
True
"""
features_scaled, mean, std = self.feature_scaling(
features
) # Normalize features
m, n = features_scaled.shape
self.theta = np.zeros(n) # Initialize weights to zeros
for _ in range(self.iterations):
predictions = features_scaled.dot(self.theta)
error = predictions - target
# Compute gradient with L2 regularization
gradient = (features_scaled.T.dot(error) + self.lambda_ * self.theta) / m
self.theta -= self.alpha * gradient # Update weights
def predict(self, features: np.ndarray) -> np.ndarray:
"""
Predict values using the trained model.
:param features: Input features, shape (m, n)
:return: Predicted values, shape (m,)
Example:
>>> rr = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=10)
>>> features = np.array([[1, 2], [2, 3], [4, 6]])
>>> target = np.array([1, 2, 3])
>>> rr.fit(features, target)
>>> predictions = rr.predict(features)
>>> predictions.shape == target.shape
True
"""
if self.theta is None:
raise ValueError("Model is not trained yet. Call the `fit` method first.")
features_scaled, _, _ = self.feature_scaling(
features
) # Scale features using training data
return features_scaled.dot(self.theta)
def compute_cost(self, features: np.ndarray, target: np.ndarray) -> float:
"""
Compute the cost function with regularization.
:param features: Input features, shape (m, n)
:param target: Target values, shape (m,)
:return: Computed cost
Example:
>>> rr = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=10)
>>> features = np.array([[1, 2], [2, 3], [4, 6]])
>>> target = np.array([1, 2, 3])
>>> rr.fit(features, target)
>>> cost = rr.compute_cost(features, target)
>>> isinstance(cost, float)
True
"""
if self.theta is None:
raise ValueError("Model is not trained yet. Call the `fit` method first.")
features_scaled, _, _ = self.feature_scaling(
features
) # Scale features using training data
m = len(target)
predictions = features_scaled.dot(self.theta)
cost = (1 / (2 * m)) * np.sum((predictions - target) ** 2) + (
self.lambda_ / (2 * m)
) * np.sum(self.theta**2)
return cost
def mean_absolute_error(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Compute Mean Absolute Error (MAE) between true and predicted values.
:param y_true: Actual target values, shape (m,)
:param y_pred: Predicted target values, shape (m,)
:return: MAE
Example:
>>> rr = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=10)
>>> y_true = np.array([1, 2, 3])
>>> y_pred = np.array([1.1, 2.1, 2.9])
>>> mae = rr.mean_absolute_error(y_true, y_pred)
>>> isinstance(mae, float)
True
"""
return np.mean(np.abs(y_true - y_pred))
# Example usage
if __name__ == "__main__":
# Load dataset
data = pd.read_csv(
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/master/Week1/ADRvsRating.csv"
)
data_x = data[["Rating"]].to_numpy() # Feature: Rating
data_y = data["ADR"].to_numpy() # Target: ADR
data_y = (data_y - np.mean(data_y)) / np.std(data_y)
# Add bias term (intercept) to the feature matrix
data_x = np.c_[np.ones(data_x.shape[0]), data_x] # Add intercept term
# Initialize and train the Ridge Regression model
model = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=1000)
model.fit(data_x, data_y)
# Predictions
predictions = model.predict(data_x)
# Results
print("Optimized Weights:", model.theta)
print("Cost:", model.compute_cost(data_x, data_y))
print("Mean Absolute Error:", model.mean_absolute_error(data_y, predictions))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!python
import os
try: