mirror of
https://github.com/TheAlgorithms/Python.git
synced 2024-11-24 05:21:09 +00:00
descriptive names
This commit is contained in:
parent
dcf47d4821
commit
0ea341a18b
|
@ -15,68 +15,68 @@ class RidgeRegression:
|
||||||
self.theta: np.ndarray = None
|
self.theta: np.ndarray = None
|
||||||
|
|
||||||
def feature_scaling(
|
def feature_scaling(
|
||||||
self, x: np.ndarray
|
self, features: np.ndarray
|
||||||
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||||
mean = np.mean(x, axis=0)
|
mean = np.mean(features, axis=0)
|
||||||
std = np.std(x, axis=0)
|
std = np.std(features, axis=0)
|
||||||
|
|
||||||
# avoid division by zero for constant features (std = 0)
|
# avoid division by zero for constant features (std = 0)
|
||||||
std[std == 0] = 1 # set std=1 for constant features to avoid NaN
|
std[std == 0] = 1 # set std=1 for constant features to avoid NaN
|
||||||
|
|
||||||
x_scaled = (x - mean) / std
|
features_scaled = (features - mean) / std
|
||||||
return x_scaled, mean, std
|
return features_scaled, mean, std
|
||||||
|
|
||||||
def fit(self, x: np.ndarray, y: np.ndarray) -> None:
|
def fit(self, features: np.ndarray, target: np.ndarray) -> None:
|
||||||
x_scaled, mean, std = self.feature_scaling(x)
|
features_scaled, mean, std = self.feature_scaling(features)
|
||||||
m, n = x_scaled.shape
|
m, n = features_scaled.shape
|
||||||
self.theta = np.zeros(n) # initializing weights to zeros
|
self.theta = np.zeros(n) # initializing weights to zeros
|
||||||
|
|
||||||
for _ in range(self.num_iterations):
|
for _ in range(self.num_iterations):
|
||||||
predictions = x_scaled.dot(self.theta)
|
predictions = features_scaled.dot(self.theta)
|
||||||
error = predictions - y
|
error = predictions - target
|
||||||
|
|
||||||
# computing gradient with L2 regularization
|
# computing gradient with L2 regularization
|
||||||
gradient = (
|
gradient = (
|
||||||
x_scaled.T.dot(error) + self.regularization_param * self.theta
|
features_scaled.T.dot(error) + self.regularization_param * self.theta
|
||||||
) / m
|
) / m
|
||||||
self.theta -= self.alpha * gradient # updating weights
|
self.theta -= self.alpha * gradient # updating weights
|
||||||
|
|
||||||
def predict(self, x: np.ndarray) -> np.ndarray:
|
def predict(self, features: np.ndarray) -> np.ndarray:
|
||||||
x_scaled, _, _ = self.feature_scaling(x)
|
features_scaled, _, _ = self.feature_scaling(features)
|
||||||
return x_scaled.dot(self.theta)
|
return features_scaled.dot(self.theta)
|
||||||
|
|
||||||
def compute_cost(self, x: np.ndarray, y: np.ndarray) -> float:
|
def compute_cost(self, features: np.ndarray, target: np.ndarray) -> float:
|
||||||
x_scaled, _, _ = self.feature_scaling(x)
|
features_scaled, _, _ = self.feature_scaling(features)
|
||||||
m = len(y)
|
m = len(target)
|
||||||
|
|
||||||
predictions = x_scaled.dot(self.theta)
|
predictions = features_scaled.dot(self.theta)
|
||||||
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + (
|
cost = (1 / (2 * m)) * np.sum((predictions - target) ** 2) + (
|
||||||
self.regularization_param / (2 * m)
|
self.regularization_param / (2 * m)
|
||||||
) * np.sum(self.theta**2)
|
) * np.sum(self.theta**2)
|
||||||
return cost
|
return cost
|
||||||
|
|
||||||
def mean_absolute_error(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
|
def mean_absolute_error(self, target: np.ndarray, predictions: np.ndarray) -> float:
|
||||||
return np.mean(np.abs(y_true - y_pred))
|
return np.mean(np.abs(target - predictions))
|
||||||
|
|
||||||
|
|
||||||
# Example usage
|
# Example usage
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
data = pd.read_csv("ADRvsRating.csv")
|
data = pd.read_csv("ADRvsRating.csv")
|
||||||
x = data[["Rating"]].to_numpy()
|
features_matrix = data[["Rating"]].to_numpy()
|
||||||
y = data["ADR"].to_numpy()
|
target = data["ADR"].to_numpy()
|
||||||
y = (y - np.mean(y)) / np.std(y)
|
target = (target - np.mean(target)) / np.std(target)
|
||||||
|
|
||||||
# added bias term to the feature matrix
|
# added bias term to the feature matrix
|
||||||
x = np.c_[np.ones(x.shape[0]), x]
|
x = np.c_[np.ones(features_matrix.shape[0]), features_matrix]
|
||||||
|
|
||||||
# initialize and train the ridge regression model
|
# initialize and train the ridge regression model
|
||||||
model = RidgeRegression(alpha=0.01, regularization_param=0.1, num_iterations=1000)
|
model = RidgeRegression(alpha=0.01, regularization_param=0.1, num_iterations=1000)
|
||||||
model.fit(x, y)
|
model.fit(features_matrix, target)
|
||||||
|
|
||||||
# predictions
|
# predictions
|
||||||
predictions = model.predict(x)
|
predictions = model.predict(features_matrix)
|
||||||
|
|
||||||
# results
|
# results
|
||||||
print("Optimized Weights:", model.theta)
|
print("Optimized Weights:", model.theta)
|
||||||
print("Cost:", model.compute_cost(x, y))
|
print("Cost:", model.compute_cost(features_matrix, target))
|
||||||
print("Mean Absolute Error:", model.mean_absolute_error(y, predictions))
|
print("Mean Absolute Error:", model.mean_absolute_error(target, predictions))
|
||||||
|
|
|
@ -23,9 +23,9 @@ def test_feature_scaling():
|
||||||
Tests the feature_scaling function of RidgeRegression.
|
Tests the feature_scaling function of RidgeRegression.
|
||||||
--------
|
--------
|
||||||
>>> model = RidgeRegression()
|
>>> model = RidgeRegression()
|
||||||
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
|
>>> features = np.array([[1, 2], [2, 3], [3, 4]])
|
||||||
>>> X_scaled, mean, std = model.feature_scaling(X)
|
>>> features_scaled, mean, std = model.feature_scaling(features)
|
||||||
>>> np.round(X_scaled, 2)
|
>>> np.round(features_scaled, 2)
|
||||||
array([[-1.22, -1.22],
|
array([[-1.22, -1.22],
|
||||||
[ 0. , 0. ],
|
[ 0. , 0. ],
|
||||||
[ 1.22, 1.22]])
|
[ 1.22, 1.22]])
|
||||||
|
@ -43,14 +43,14 @@ def test_fit():
|
||||||
>>> model = RidgeRegression(alpha=0.01,
|
>>> model = RidgeRegression(alpha=0.01,
|
||||||
... regularization_param=0.1,
|
... regularization_param=0.1,
|
||||||
... num_iterations=1000)
|
... num_iterations=1000)
|
||||||
>>> X = np.array([[1], [2], [3]])
|
>>> features = np.array([[1], [2], [3]])
|
||||||
>>> y = np.array([2, 3, 4])
|
>>> target = np.array([2, 3, 4])
|
||||||
|
|
||||||
# Adding a bias term
|
# Adding a bias term
|
||||||
>>> X = np.c_[np.ones(X.shape[0]), X]
|
>>> features = np.c_[np.ones(features.shape[0]), features]
|
||||||
|
|
||||||
# Fit the model
|
# Fit the model
|
||||||
>>> model.fit(X, y)
|
>>> model.fit(features, target)
|
||||||
|
|
||||||
# Check if the weights have been updated
|
# Check if the weights have been updated
|
||||||
>>> np.round(model.theta, decimals=2)
|
>>> np.round(model.theta, decimals=2)
|
||||||
|
@ -65,17 +65,17 @@ def test_predict():
|
||||||
>>> model = RidgeRegression(alpha=0.01,
|
>>> model = RidgeRegression(alpha=0.01,
|
||||||
... regularization_param=0.1,
|
... regularization_param=0.1,
|
||||||
... num_iterations=1000)
|
... num_iterations=1000)
|
||||||
>>> X = np.array([[1], [2], [3]])
|
>>> features = np.array([[1], [2], [3]])
|
||||||
>>> y = np.array([2, 3, 4])
|
>>> target = np.array([2, 3, 4])
|
||||||
|
|
||||||
# Adding a bias term
|
# Adding a bias term
|
||||||
>>> X = np.c_[np.ones(X.shape[0]), X]
|
>>> features = np.c_[np.ones(features.shape[0]), features]
|
||||||
|
|
||||||
# Fit the model
|
# Fit the model
|
||||||
>>> model.fit(X, y)
|
>>> model.fit(features, target)
|
||||||
|
|
||||||
# Predict with the model
|
# Predict with the model
|
||||||
>>> predictions = model.predict(X)
|
>>> predictions = model.predict(features)
|
||||||
>>> np.round(predictions, decimals=2)
|
>>> np.round(predictions, decimals=2)
|
||||||
array([-0.97, 0. , 0.97])
|
array([-0.97, 0. , 0.97])
|
||||||
"""
|
"""
|
||||||
|
@ -86,9 +86,9 @@ def test_mean_absolute_error():
|
||||||
Tests the mean_absolute_error function of RidgeRegression
|
Tests the mean_absolute_error function of RidgeRegression
|
||||||
--------
|
--------
|
||||||
>>> model = RidgeRegression()
|
>>> model = RidgeRegression()
|
||||||
>>> y_true = np.array([2, 3, 4])
|
>>> target = np.array([2, 3, 4])
|
||||||
>>> y_pred = np.array([2.1, 3.0, 3.9])
|
>>> predictions = np.array([2.1, 3.0, 3.9])
|
||||||
>>> mae = model.mean_absolute_error(y_true, y_pred)
|
>>> mae = model.mean_absolute_error(target, predictions)
|
||||||
>>> float(np.round(mae, 2))
|
>>> float(np.round(mae, 2))
|
||||||
0.07
|
0.07
|
||||||
"""
|
"""
|
||||||
|
|
Loading…
Reference in New Issue
Block a user