Created
February 19, 2021 15:57
-
-
Save ogrisel/ea98add276dc9624fea8e41a5bb6c989 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# %% | |
import numpy as np | |
from numpy.testing import assert_allclose | |
from sklearn.linear_model import Ridge | |
from sklearn.datasets import make_regression | |
from sklearn.model_selection import train_test_split | |
from scipy.optimize import fmin_l_bfgs_b, check_grad | |
np.random.seed(0) | |
n_samples = 100 | |
X, y = make_regression(n_samples=n_samples, n_features=300) | |
X_train, X_test, y_train, y_test = train_test_split(X, y) | |
# Check that setting the sw to 0 for the first 10 samples | |
# is equivalent to removing them: | |
sw = 10 * np.ones(X_train.shape[0]) | |
sw[:10] = 0. | |
alpha = 1e-3 | |
def ridge_bfgs(X, y, sample_weight, alpha, X_test): | |
def f(w): | |
coef, intercept = w[:-1], w[-1] | |
return ( | |
0.5 * np.sum(sample_weight * (y - X @ coef - intercept) ** 2) + | |
0.5 * alpha * np.sum(coef ** 2) | |
) | |
def fprime(w): | |
coef, intercept = w[:-1], w[-1] | |
residual = y - X @ coef - intercept | |
grad = np.empty(X.shape[1] + 1) | |
grad[:-1] = -X.T @ (sample_weight * residual) + alpha * coef | |
grad[-1] = -np.sum(sample_weight * residual) | |
return grad | |
dim = X.shape[1] + 1 | |
print(check_grad(f, fprime, np.random.RandomState(0).randn(dim))) | |
w0 = np.zeros(dim) | |
w0[-1] = np.average(y, weights=sample_weight) | |
w = fmin_l_bfgs_b(f, w0, fprime, iprint=10)[0] | |
coef, intercept = w[:-1], w[-1] | |
y_pred = X_test @ coef + intercept | |
return y_pred | |
y_pred = ridge_bfgs(X_train, y_train, sw, alpha, X_test) | |
y_pred_trimmed = ridge_bfgs(X_train[10:], y_train[10:], sw[10:], alpha, X_test) | |
assert_allclose( | |
y_pred, | |
y_pred_trimmed, | |
rtol=1e-5 | |
) | |
y_pred_sklearn = Ridge( | |
alpha=alpha, normalize=False, # solver="svd", | |
).fit( | |
X_train, y_train, sample_weight=sw | |
).predict(X_test) | |
assert_allclose( | |
y_pred, | |
y_pred_sklearn, | |
rtol=1e-5 | |
) | |
y_pred_2x = ridge_bfgs(X_train, y_train, sw * 2, alpha, X_test) | |
y_pred_duplicated = ridge_bfgs( | |
np.concatenate([X_train, X_train]), | |
np.concatenate([y_train, y_train]), | |
np.concatenate([sw, sw]), | |
alpha, X_test) | |
assert_allclose( | |
y_pred_2x, | |
y_pred_duplicated, | |
rtol=1e-5 | |
) | |
y_pred_sklearn_2x = Ridge( | |
alpha=alpha, normalize=False, # solver="svd", | |
).fit( | |
X_train, y_train, sample_weight=2 * sw | |
).predict(X_test) | |
assert_allclose( | |
y_pred_2x, | |
y_pred_sklearn_2x, | |
rtol=1e-5 | |
) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
on the 19426 branch, I get the same results (the
normalize=False
case is apparently not impacted by the PR):