diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py index 947f8692f..5d743a250 100644 --- a/machine_learning/frequent_pattern_growth.py +++ b/machine_learning/frequent_pattern_growth.py @@ -240,7 +240,7 @@ def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None: ascend_tree(leaf_node.parent, prefix_path) -def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: # noqa: ARG001 +def find_prefix_path(_: frozenset, tree_node: TreeNode | None) -> dict: """ Find the conditional pattern base for a given base pattern. diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 0bd9aa8b5..8308d3684 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -629,13 +629,15 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> return np.mean(loss) -def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float: +def kullback_leibler_divergence( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-10 +) -> float: """ Calculate the Kullback-Leibler divergence (KL divergence) loss between true labels and predicted probabilities. - KL divergence loss quantifies dissimilarity between true labels and predicted - probabilities. It's often used in training generative models. + KL divergence loss quantifies the dissimilarity between true labels and predicted + probabilities. It is often used in training generative models. KL = Σ(y_true * ln(y_true / y_pred)) @@ -649,6 +651,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float >>> predicted_probs = np.array([0.3, 0.3, 0.4]) >>> float(kullback_leibler_divergence(true_labels, predicted_probs)) 0.030478754035472025 + >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) >>> kullback_leibler_divergence(true_labels, predicted_probs) @@ -659,7 +662,13 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float if len(y_true) != len(y_pred): raise ValueError("Input arrays must have the same length.") - kl_loss = y_true * np.log(y_true / y_pred) + # negligible epsilon to avoid issues with log(0) or division by zero + epsilon = 1e-10 + y_pred = np.clip(y_pred, epsilon, None) + + # calculate KL divergence only where y_true is not zero + kl_loss = np.where(y_true != 0, y_true * np.log(y_true / y_pred), 0.0) + return np.sum(kl_loss) diff --git a/machine_learning/ridge_regression/ridge_regression.py b/machine_learning/ridge_regression/ridge_regression.py index b0908f9ef..1c2c13fa9 100644 --- a/machine_learning/ridge_regression/ridge_regression.py +++ b/machine_learning/ridge_regression/ridge_regression.py @@ -15,68 +15,68 @@ class RidgeRegression: self.theta: np.ndarray = None def feature_scaling( - self, x: np.ndarray + self, features: np.ndarray ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - mean = np.mean(x, axis=0) - std = np.std(x, axis=0) + mean = np.mean(features, axis=0) + std = np.std(features, axis=0) # avoid division by zero for constant features (std = 0) std[std == 0] = 1 # set std=1 for constant features to avoid NaN - x_scaled = (x - mean) / std - return x_scaled, mean, std + features_scaled = (features - mean) / std + return features_scaled, mean, std - def fit(self, x: np.ndarray, y: np.ndarray) -> None: - x_scaled, mean, std = self.feature_scaling(x) - m, n = x_scaled.shape + def fit(self, features: np.ndarray, target: np.ndarray) -> None: + features_scaled, mean, std = self.feature_scaling(features) + m, n = features_scaled.shape self.theta = np.zeros(n) # initializing weights to zeros for _ in range(self.num_iterations): - predictions = x_scaled.dot(self.theta) - error = predictions - y + predictions = features_scaled.dot(self.theta) + error = predictions - target # computing gradient with L2 regularization gradient = ( - x_scaled.T.dot(error) + self.regularization_param * self.theta + features_scaled.T.dot(error) + self.regularization_param * self.theta ) / m self.theta -= self.alpha * gradient # updating weights - def predict(self, x: np.ndarray) -> np.ndarray: - x_scaled, _, _ = self.feature_scaling(x) - return x_scaled.dot(self.theta) + def predict(self, features: np.ndarray) -> np.ndarray: + features_scaled, _, _ = self.feature_scaling(features) + return features_scaled.dot(self.theta) - def compute_cost(self, x: np.ndarray, y: np.ndarray) -> float: - x_scaled, _, _ = self.feature_scaling(x) - m = len(y) + def compute_cost(self, features: np.ndarray, target: np.ndarray) -> float: + features_scaled, _, _ = self.feature_scaling(features) + m = len(target) - predictions = x_scaled.dot(self.theta) - cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + ( + predictions = features_scaled.dot(self.theta) + cost = (1 / (2 * m)) * np.sum((predictions - target) ** 2) + ( self.regularization_param / (2 * m) ) * np.sum(self.theta**2) return cost - def mean_absolute_error(self, y_true: np.ndarray, y_pred: np.ndarray) -> float: - return np.mean(np.abs(y_true - y_pred)) + def mean_absolute_error(self, target: np.ndarray, predictions: np.ndarray) -> float: + return np.mean(np.abs(target - predictions)) # Example usage if __name__ == "__main__": data = pd.read_csv("ADRvsRating.csv") - x = data[["Rating"]].to_numpy() - y = data["ADR"].to_numpy() - y = (y - np.mean(y)) / np.std(y) + features_matrix = data[["Rating"]].to_numpy() + target = data["ADR"].to_numpy() + target = (target - np.mean(target)) / np.std(target) # added bias term to the feature matrix - x = np.c_[np.ones(x.shape[0]), x] + x = np.c_[np.ones(features_matrix.shape[0]), features_matrix] # initialize and train the ridge regression model model = RidgeRegression(alpha=0.01, regularization_param=0.1, num_iterations=1000) - model.fit(x, y) + model.fit(features_matrix, target) # predictions - predictions = model.predict(x) + predictions = model.predict(features_matrix) # results print("Optimized Weights:", model.theta) - print("Cost:", model.compute_cost(x, y)) - print("Mean Absolute Error:", model.mean_absolute_error(y, predictions)) + print("Cost:", model.compute_cost(features_matrix, target)) + print("Mean Absolute Error:", model.mean_absolute_error(target, predictions)) diff --git a/machine_learning/ridge_regression/test_ridge_regression.py b/machine_learning/ridge_regression/test_ridge_regression.py index 71d485bfa..6bf6d6024 100644 --- a/machine_learning/ridge_regression/test_ridge_regression.py +++ b/machine_learning/ridge_regression/test_ridge_regression.py @@ -12,7 +12,10 @@ To run these tests, use the following command: """ import numpy as np # noqa: F401 -from ridge_regression import RidgeRegression # noqa: F401 + +from machine_learning.ridge_regression.ridge_regression import ( + RidgeRegression, # noqa: F401 +) def test_feature_scaling(): @@ -20,9 +23,9 @@ def test_feature_scaling(): Tests the feature_scaling function of RidgeRegression. -------- >>> model = RidgeRegression() - >>> X = np.array([[1, 2], [2, 3], [3, 4]]) - >>> X_scaled, mean, std = model.feature_scaling(X) - >>> np.round(X_scaled, 2) + >>> features = np.array([[1, 2], [2, 3], [3, 4]]) + >>> features_scaled, mean, std = model.feature_scaling(features) + >>> np.round(features_scaled, 2) array([[-1.22, -1.22], [ 0. , 0. ], [ 1.22, 1.22]]) @@ -40,14 +43,14 @@ def test_fit(): >>> model = RidgeRegression(alpha=0.01, ... regularization_param=0.1, ... num_iterations=1000) - >>> X = np.array([[1], [2], [3]]) - >>> y = np.array([2, 3, 4]) + >>> features = np.array([[1], [2], [3]]) + >>> target = np.array([2, 3, 4]) # Adding a bias term - >>> X = np.c_[np.ones(X.shape[0]), X] + >>> features = np.c_[np.ones(features.shape[0]), features] # Fit the model - >>> model.fit(X, y) + >>> model.fit(features, target) # Check if the weights have been updated >>> np.round(model.theta, decimals=2) @@ -62,17 +65,17 @@ def test_predict(): >>> model = RidgeRegression(alpha=0.01, ... regularization_param=0.1, ... num_iterations=1000) - >>> X = np.array([[1], [2], [3]]) - >>> y = np.array([2, 3, 4]) + >>> features = np.array([[1], [2], [3]]) + >>> target = np.array([2, 3, 4]) # Adding a bias term - >>> X = np.c_[np.ones(X.shape[0]), X] + >>> features = np.c_[np.ones(features.shape[0]), features] # Fit the model - >>> model.fit(X, y) + >>> model.fit(features, target) # Predict with the model - >>> predictions = model.predict(X) + >>> predictions = model.predict(features) >>> np.round(predictions, decimals=2) array([-0.97, 0. , 0.97]) """ @@ -83,9 +86,9 @@ def test_mean_absolute_error(): Tests the mean_absolute_error function of RidgeRegression -------- >>> model = RidgeRegression() - >>> y_true = np.array([2, 3, 4]) - >>> y_pred = np.array([2.1, 3.0, 3.9]) - >>> mae = model.mean_absolute_error(y_true, y_pred) + >>> target = np.array([2, 3, 4]) + >>> predictions = np.array([2.1, 3.0, 3.9]) + >>> mae = model.mean_absolute_error(target, predictions) >>> float(np.round(mae, 2)) 0.07 """