Skip to content

Commit

Permalink
Adds weights
Browse files Browse the repository at this point in the history
  • Loading branch information
PTNobel committed Feb 6, 2025
1 parent b630377 commit 2f18ac8
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 9 deletions.
8 changes: 4 additions & 4 deletions randalo/adelie_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def set_index(self, idx):
self.index = idx
self.ra_lmda.value = self.state.lmda_path[idx]

def adelie_state_to_jacobian(y, state, adelie_state):
def adelie_state_to_jacobian(y, weights, state, adelie_state):
n, p = state.X.shape
G, = state.groups.shape
L, = state.lmda_path.shape
Expand All @@ -112,7 +112,7 @@ def adelie_state_to_jacobian(y, state, adelie_state):
ell_2_2_term = (1 - state.alpha) / 2 * ra.SquareRegularizer(slice(None, -1))
reg = adelie_state.ra_lmda * (ell_1_term + ell_2_2_term)

loss = ra.MSELoss()
loss = ra.MSELoss(weights)
J = ra.Jacobian(
y,
AdelieOperator(state.X, state.intercept),
Expand Down Expand Up @@ -161,10 +161,10 @@ def get_alo_for_sweep_v2(y, state, risk_fun, step=1):

return state.lmda_path[:L:step], output, times, r2

def get_alo_for_sweep(y, state, risk_fun, step=1):
def get_alo_for_sweep(y, state, risk_fun, weights, step=1):
L, _ = state.betas.shape
adelie_state = AdelieState(state)
loss, J = adelie_state_to_jacobian(y, state, adelie_state)
loss, J = adelie_state_to_jacobian(y, weights, state, adelie_state)
y_hat = ad.diagnostic.predict(state.X, state.betas, state.intercepts)

lmda = state.lmda_path[:L:step]
Expand Down
9 changes: 7 additions & 2 deletions randalo/modeling_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,13 @@ def to_cvxpy(self, y, z):


class MSELoss(Loss):
def __init__(self, weights=None):
self.weights = weights
def func(self, y, z):
return (y - z) ** 2
return self.weights * (y - z) ** 2

def to_cvxpy(self, y, z):
return cp.sum_squares(y - z) / np.prod(y.shape)
if self.weights is not None:
return cp.sum(cp.multiply(weights, cp.square(y - z)))
else:
return cp.sum_squares(y - z) / np.prod(y.shape)
10 changes: 7 additions & 3 deletions utils/sherlock_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,12 @@
rng = np.random.default_rng(task_id)
P = np.random.permutation(y.shape[-1])
n_train = P.size * 9 // 10

train_mask = P[:n_train]
test_mask = P[n_train:]
weights = np.ones(P.size)
weights[train_mask] = 0.0
weights /= np.sum(weights)
X_train = X[train_mask]
y_train = y[train_mask]
X_test = X[test_mask]
Expand Down Expand Up @@ -78,8 +82,8 @@ def __init__(self):
else:
ti_solve = time.monotonic()
state = ad.grpnet(
X=X_train,
glm=ad.glm.gaussian(y_train, dtype=np.float64),
X=X,
glm=ad.glm.gaussian(y, dtype=np.float64, weights=weights),
early_exit=False,
min_ratio=1e-9,
n_threads=32,
Expand All @@ -101,7 +105,7 @@ def __init__(self):
ins[i] = loss(torch.from_numpy(y_hat_train[i]), torch.from_numpy(y_train))

ti_alo = time.monotonic()
ld, alo, ts, r2 = ai.get_alo_for_sweep(y_train, state, loss, 20)
ld, alo, ts, r2 = ai.get_alo_for_sweep(y_train, state, loss, weights, 20)
tf_alo = time.monotonic()

np.savez(sys.argv[1], alo_lamda=ld, full_lamda=state.lmda_path, alo=alo, oos=oos, in_sample=ins, ts=ts, r2=r2, solve_time=tf_solve - ti_solve, alo_time=tf_alo - ti_alo)

0 comments on commit 2f18ac8

Please sign in to comment.