diff --git a/src/glum/_glm.py b/src/glum/_glm.py index 312c243d..1dd9cb01 100644 --- a/src/glum/_glm.py +++ b/src/glum/_glm.py @@ -940,7 +940,7 @@ def _set_up_for_fit(self, y: np.ndarray) -> None: elif (self.lower_bounds is None) and (self.upper_bounds is None): if np.all(np.asarray(self.l1_ratio) == 0): self._solver = "irls-ls" - elif getattr(self, "alpha", 1) == 0 and not self.alpha_search: + elif getattr(self, "alpha", 0) == 0 and not self.alpha_search: self._solver = "irls-ls" else: self._solver = "irls-cd" @@ -2304,8 +2304,7 @@ def covariance_matrix( _expected_information = expected_information if ( - (hasattr(self, "alpha") and self.alpha is None) - or ( + ( hasattr(self, "alpha") and isinstance(self.alpha, (int, float)) and self.alpha > 0 @@ -2914,11 +2913,11 @@ class GeneralizedLinearRegressor(GeneralizedLinearRegressorBase): alpha : {float, array-like}, optional (default=None) Constant that multiplies the penalty terms and thus determines the regularization strength. If ``alpha_search`` is ``False`` (the default), - then ``alpha`` must be a scalar or None (equivalent to ``alpha=1.0``). + then ``alpha`` must be a scalar or None (equivalent to ``alpha=0``). If ``alpha_search`` is ``True``, then ``alpha`` must be an iterable or ``None``. See ``alpha_search`` to find how the regularization path is set if ``alpha`` is ``None``. See the notes for the exact mathematical - meaning of this parameter. ``alpha = 0`` is equivalent to unpenalized + meaning of this parameter. ``alpha=0`` is equivalent to unpenalized GLMs. In this case, the design matrix ``X`` must have full column rank (no collinearities). @@ -3146,10 +3145,11 @@ class GeneralizedLinearRegressor(GeneralizedLinearRegressorBase): drop_first : bool, optional (default = False) If ``True``, drop the first column when encoding categorical variables. - Set this to True when alpha=0 and solver='auto' to prevent an error due to a - singular feature matrix. In the case of using a formula with interactions, - setting this argument to ``True`` ensures structural full-rankness (it is - equivalent to ``ensure_full_rank`` in formulaic and tabmat). + Set this to True when ``alpha=0`` and ``solver='auto'`` to prevent an error + due to a singular feature matrix. In the case of using a formula with + interactions, setting this argument to ``True`` ensures structural + full-rankness (it is equivalent to ``ensure_full_rank`` in formulaic and + tabmat). robust : bool, optional (default = False) If true, then robust standard errors are computed by default. @@ -3573,7 +3573,7 @@ def fit( self.coef_ = self.coef_path_[-1] else: if self.alpha is None: - _alpha = 1.0 + _alpha = 0.0 else: _alpha = self.alpha if _alpha > 0 and self.l1_ratio > 0 and self._solver != "irls-cd": diff --git a/tests/glm/test_distribution.py b/tests/glm/test_distribution.py index d241ff07..be2a694a 100644 --- a/tests/glm/test_distribution.py +++ b/tests/glm/test_distribution.py @@ -296,7 +296,6 @@ def test_poisson_deviance_dispersion_loglihood(weighted): # logLik(glm_model) # -7.390977 (df=1) regressor = GeneralizedLinearRegressor( - alpha=0, family="poisson", fit_intercept=False, gradient_tol=1e-8, @@ -345,7 +344,6 @@ def test_gamma_deviance_dispersion_loglihood(weighted): # logLik(glm_model) # -7.057068 (df=2) regressor = GeneralizedLinearRegressor( - alpha=0, family="gamma", fit_intercept=False, gradient_tol=1e-8, @@ -393,7 +391,6 @@ def test_gaussian_deviance_dispersion_loglihood(family, weighted): # logLik(glm_model) # -7.863404 (df=2) regressor = GeneralizedLinearRegressor( - alpha=0, family=family, fit_intercept=False, gradient_tol=1e-8, @@ -441,7 +438,6 @@ def test_tweedie_deviance_dispersion_loglihood(weighted): # logLiktweedie(glm_model) # -8.35485 regressor = GeneralizedLinearRegressor( - alpha=0, family=TweedieDistribution(1.5), fit_intercept=False, gradient_tol=1e-8, @@ -490,7 +486,6 @@ def test_binomial_deviance_dispersion_loglihood(weighted): # logLik(glm_model) # -3.365058 (df=1) regressor = GeneralizedLinearRegressor( - alpha=0, family="binomial", fit_intercept=False, gradient_tol=1e-8, @@ -535,7 +530,6 @@ def test_negative_binomial_deviance_dispersion_loglihood(weighted): # logLik(glm_model) # -4.187887 (df=1) regressor = GeneralizedLinearRegressor( - alpha=0, family="negative.binomial", fit_intercept=False, gradient_tol=1e-8, diff --git a/tests/glm/test_glm.py b/tests/glm/test_glm.py index 469f464e..c742c5a7 100644 --- a/tests/glm/test_glm.py +++ b/tests/glm/test_glm.py @@ -203,7 +203,7 @@ def test_gradient_tol_setting(estimator, kwargs, solver, gradient_tol): ) def test_glm_family_argument(f, fam, y, X): """Test GLM family argument set as string.""" - glm = GeneralizedLinearRegressor(family=f, alpha=0).fit(X, y) + glm = GeneralizedLinearRegressor(family=f).fit(X, y) assert isinstance(glm._family_instance, fam.__class__) @@ -373,6 +373,7 @@ def test_P1_P2_expansion_with_categoricals_missings(): y = rng.normal(size=60) mdl1 = GeneralizedLinearRegressor( + alpha=1.0, l1_ratio=0.01, P1=[1, 2, 2, 2, 2, 2], P2=[2, 1, 1, 1, 1, 1], @@ -381,6 +382,7 @@ def test_P1_P2_expansion_with_categoricals_missings(): mdl1.fit(X, y) mdl2 = GeneralizedLinearRegressor( + alpha=1.0, l1_ratio=0.01, P1=[1, 2], P2=[2, 1], @@ -390,6 +392,7 @@ def test_P1_P2_expansion_with_categoricals_missings(): np.testing.assert_allclose(mdl1.coef_, mdl2.coef_) mdl3 = GeneralizedLinearRegressor( + alpha=1.0, l1_ratio=0.01, P1=[1, 2], P2=sparse.diags([2, 1, 1, 1, 1, 1]), @@ -575,7 +578,6 @@ def test_glm_identity_regression(solver, fit_intercept, offset, convert_x_fn): X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T y = np.dot(X, coef) + (0 if offset is None else offset) glm = GeneralizedLinearRegressor( - alpha=0, family="normal", link="identity", fit_intercept=fit_intercept, @@ -695,7 +697,6 @@ def test_x_not_modified_inplace(solver, fit_intercept, offset, convert_x_fn): X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T y = np.dot(X, coef) + (0 if offset is None else offset) glm = GeneralizedLinearRegressor( - alpha=0, family="normal", link="identity", fit_intercept=fit_intercept, @@ -737,7 +738,6 @@ def test_glm_identity_regression_categorical_data(solver, offset, convert_x_fn): y = np.dot(x_mat, coef) + (0 if offset is None else offset) glm = GeneralizedLinearRegressor( - alpha=0, family="normal", link="identity", fit_intercept=False, @@ -776,7 +776,6 @@ def test_glm_log_regression(family, solver, tol, fit_intercept, offset): X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T y = np.exp(np.dot(X, coef) + (0 if offset is None else offset)) glm = GeneralizedLinearRegressor( - alpha=0, family=family, link="log", fit_intercept=fit_intercept, @@ -1250,7 +1249,6 @@ def test_binomial_cloglog_unregularized(solver): sm_fit = sm_glm.fit() glum_glm = GeneralizedLinearRegressor( - alpha=0, family="binomial", link="cloglog", solver=solver, @@ -1986,7 +1984,7 @@ def test_verbose(regression_data, capsys): def test_ols_std_errors(regression_data): X, y = regression_data - mdl = GeneralizedLinearRegressor(alpha=0, family="normal") + mdl = GeneralizedLinearRegressor(family="normal") mdl.fit(X=X, y=y) mdl_sm = sm.OLS(endog=y, exog=sm.add_constant(X)) @@ -2029,9 +2027,9 @@ def test_array_std_errors(regression_data, family, fit_intercept): sm_family = sm.families.Gaussian() dispersion = None - mdl = GeneralizedLinearRegressor( - alpha=0, family=family, fit_intercept=fit_intercept - ).fit(X=X, y=y) + mdl = GeneralizedLinearRegressor(family=family, fit_intercept=fit_intercept).fit( + X=X, y=y + ) if fit_intercept: mdl_sm = sm.GLM(endog=y, exog=sm.add_constant(X), family=sm_family) @@ -2063,7 +2061,7 @@ def test_array_std_errors(regression_data, family, fit_intercept): def test_sparse_std_errors(regression_data): X, y = regression_data sp_X = sparse.csc_matrix(X) - mdl = GeneralizedLinearRegressor(alpha=0, family="normal") + mdl = GeneralizedLinearRegressor(family="normal") mdl.fit(X=X, y=y) actual1 = mdl.std_errors(X=sp_X, y=y, robust=False) @@ -2105,9 +2103,7 @@ def test_inputtype_std_errors(regression_data, categorical, split, fit_intercept tm.CategoricalMatrix(pd.Categorical(group, categories=categories)), ] ) - mdl = GeneralizedLinearRegressor( - alpha=0, family="normal", fit_intercept=fit_intercept - ) + mdl = GeneralizedLinearRegressor(family="normal", fit_intercept=fit_intercept) mdl.fit(X=X, y=y) if isinstance(X, tm.MatrixBase): X_sm = X.toarray() @@ -2146,7 +2142,7 @@ def test_coef_table(regression_data, fit_intercept, confidence_level): X_df = pd.DataFrame(X, columns=colnames) mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=fit_intercept + family="gaussian", fit_intercept=fit_intercept ).fit(X=X_df, y=y) if fit_intercept: @@ -2209,9 +2205,9 @@ def test_wald_test_matrix(regression_data, family, fit_intercept, R, r): sm_family = sm.families.Gaussian() dispersion = None - mdl = GeneralizedLinearRegressor( - alpha=0, family=family, fit_intercept=fit_intercept - ).fit(X=X, y=y) + mdl = GeneralizedLinearRegressor(family=family, fit_intercept=fit_intercept).fit( + X=X, y=y + ) if fit_intercept: mdl_sm = sm.GLM(endog=y, exog=sm.add_constant(X), family=sm_family) @@ -2283,9 +2279,9 @@ def test_wald_test_matrix(regression_data, family, fit_intercept, R, r): def test_wald_test_matrix_public(regression_data, R, r): X, y = regression_data - mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=True - ).fit(X=X, y=y, store_covariance_matrix=True) + mdl = GeneralizedLinearRegressor(family="gaussian", fit_intercept=True).fit( + X=X, y=y, store_covariance_matrix=True + ) assert mdl._wald_test_matrix(R, r) == mdl.wald_test(R=R, r=r) @@ -2306,9 +2302,9 @@ def test_wald_test_matrix_public(regression_data, R, r): def test_wald_test_matrix_fixed_cov(regression_data, R, r): X, y = regression_data - mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=False - ).fit(X=X, y=y, store_covariance_matrix=True) + mdl = GeneralizedLinearRegressor(family="gaussian", fit_intercept=False).fit( + X=X, y=y, store_covariance_matrix=True + ) mdl_sm = sm.GLM(endog=y, exog=X, family=sm.families.Gaussian()) # Use the same covariance matrix for both so that we can use tighter tolerances @@ -2351,9 +2347,9 @@ def test_wald_test_feature_names(regression_data, names, R, r): X, y = regression_data X_df = pd.DataFrame(X, columns=[f"col_{i}" for i in range(X.shape[1])]) - mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=True - ).fit(X=X_df, y=y, store_covariance_matrix=True) + mdl = GeneralizedLinearRegressor(family="gaussian", fit_intercept=True).fit( + X=X_df, y=y, store_covariance_matrix=True + ) feature_names_results = mdl._wald_test_feature_names(names, r) if r is not None: @@ -2392,9 +2388,9 @@ def test_wald_test_feature_names_public(regression_data, names, r): X, y = regression_data X_df = pd.DataFrame(X, columns=[f"col_{i}" for i in range(X.shape[1])]) - mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=True - ).fit(X=X_df, y=y, store_covariance_matrix=True) + mdl = GeneralizedLinearRegressor(family="gaussian", fit_intercept=True).fit( + X=X_df, y=y, store_covariance_matrix=True + ) assert mdl._wald_test_feature_names(names, r) == mdl.wald_test(features=names, r=r) @@ -2449,7 +2445,7 @@ def test_wald_test_term_names(regression_data, names, R, r, r_feat): X_df = X_df[["col_1", "col_2"]].assign(term_3=pd.cut(X_df["col_3"], bins=5)) mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=True, drop_first=True + family="gaussian", fit_intercept=True, drop_first=True ).fit(X=X_df, y=y, store_covariance_matrix=True) term_names_results = mdl._wald_test_term_names(names, r) @@ -2515,7 +2511,7 @@ def test_wald_test_term_names_public(regression_data, names, R, r, r_feat): X_df = X_df[["col_1", "col_2"]].assign(term_3=pd.cut(X_df["col_3"], bins=5)) mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=True, drop_first=True + family="gaussian", fit_intercept=True, drop_first=True ).fit(X=X_df, y=y, store_covariance_matrix=True) term_names_results = mdl.wald_test(terms=names, r=r) @@ -2554,7 +2550,7 @@ def test_wald_test_formula(regression_data, formula, R, r_feat): X_df = pd.DataFrame(X, columns=[f"col_{i}" for i in range(X.shape[1])]) mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=True, drop_first=True + family="gaussian", fit_intercept=True, drop_first=True ).fit(X=X_df, y=y, store_covariance_matrix=True) term_names_results = mdl._wald_test_formula(formula) @@ -2599,7 +2595,7 @@ def test_wald_test_formula_public(regression_data, formula, R, r_feat): X_df = pd.DataFrame(X, columns=[f"col_{i}" for i in range(X.shape[1])]) mdl = GeneralizedLinearRegressor( - alpha=0, family="gaussian", fit_intercept=True, drop_first=True + family="gaussian", fit_intercept=True, drop_first=True ).fit(X=X_df, y=y, store_covariance_matrix=True) term_names_results = mdl.wald_test(formula=formula) @@ -2617,7 +2613,7 @@ def test_wald_test_formula_public(regression_data, formula, R, r_feat): def test_wald_test_raise_on_wrong_input(regression_data): X, y = regression_data - mdl = GeneralizedLinearRegressor(alpha=0, family="gaussian", fit_intercept=True) + mdl = GeneralizedLinearRegressor(family="gaussian", fit_intercept=True) mdl.fit(X=X, y=y) with pytest.raises(ValueError): @@ -2632,7 +2628,6 @@ def test_wald_test_raise_on_wrong_input(regression_data): @pytest.mark.parametrize("weighted", [False, True]) def test_score_method(as_data_frame, offset, weighted): regressor = GeneralizedLinearRegressor( - alpha=0, family="normal", fit_intercept=False, gradient_tol=1e-8, @@ -2666,7 +2661,7 @@ def test_score_method(as_data_frame, offset, weighted): def test_information_criteria(regression_data): X, y = regression_data - regressor = GeneralizedLinearRegressor(family="gaussian", alpha=0) + regressor = GeneralizedLinearRegressor(family="gaussian") regressor.fit(X, y) llf = regressor.family_instance.log_likelihood(y, regressor.predict(X)) @@ -2721,10 +2716,10 @@ def test_drop_first_allows_alpha_equals_0(): rng = np.random.default_rng(42) y = np.random.normal(size=10) X = pd.DataFrame(data={"cat": pd.Categorical(rng.integers(2, size=10))}) - regressor = GeneralizedLinearRegressor(alpha=0, drop_first=True) + regressor = GeneralizedLinearRegressor(drop_first=True) regressor.fit(X, y) - regressor = GeneralizedLinearRegressor(alpha=0) # default is False + regressor = GeneralizedLinearRegressor() # default is False with pytest.raises(np.linalg.LinAlgError): regressor.fit(X, y) @@ -2732,7 +2727,7 @@ def test_drop_first_allows_alpha_equals_0(): def test_dropping_distinct_categorical_column(): y = np.random.normal(size=10) X = pd.DataFrame(data={"cat": pd.Categorical(np.ones(10)), "num": np.ones(10)}) - regressor = GeneralizedLinearRegressor(alpha=0, drop_first=True) + regressor = GeneralizedLinearRegressor(drop_first=True) regressor.fit(X, y) assert regressor.coef_.shape == (1,) assert regressor.feature_names_ == ["num"] @@ -2769,7 +2764,6 @@ def test_store_covariance_matrix( regressor = GeneralizedLinearRegressor( family="gaussian", - alpha=0, robust=robust, expected_information=expected_information, ) @@ -2804,7 +2798,6 @@ def test_store_covariance_matrix_formula(regression_data, formula): regressor = GeneralizedLinearRegressor( formula=formula, family="gaussian", - alpha=0, ) regressor.fit(df, y, store_covariance_matrix=True) @@ -2827,7 +2820,6 @@ def test_store_covariance_matrix_formula_errors(regression_data): regressor = GeneralizedLinearRegressor( formula=formula, family="gaussian", - alpha=0, ) regressor.fit(df, y) with pytest.raises(ValueError, match="Either X and y must be provided"): @@ -2837,7 +2829,7 @@ def test_store_covariance_matrix_formula_errors(regression_data): def test_store_covariance_matrix_errors(regression_data): X, y = regression_data - regressor = GeneralizedLinearRegressor(family="gaussian", alpha=0) + regressor = GeneralizedLinearRegressor(family="gaussian") regressor.fit(X, y, store_covariance_matrix=False) with pytest.raises(ValueError, match="Either X and y must be provided"): @@ -3151,7 +3143,6 @@ def test_formula_against_smf(get_mixed_data, formula, fit_intercept): family="normal", drop_first=True, formula=formula, - alpha=0.0, fit_intercept=fit_intercept, ).fit(data) @@ -3174,7 +3165,6 @@ def test_formula_context(get_mixed_data): family="normal", drop_first=True, formula=formula, - alpha=0.0, fit_intercept=True, ).fit(data) @@ -3206,7 +3196,6 @@ def test_formula_predict(get_mixed_data, formula, fit_intercept): family="normal", drop_first=True, formula=formula, - alpha=0.0, fit_intercept=fit_intercept, ).fit(data)