Skip to content

Commit 9c04a08

Browse files
committed
Merge branch 'main' into glum-v3
Merge conflicts were due to new black version.
2 parents 5dfd446 + 6202d6b commit 9c04a08

File tree

9 files changed

+45
-41
lines changed

9 files changed

+45
-41
lines changed

.github/workflows/build_wheels.yml

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,10 @@ jobs:
2020
uses: pypa/[email protected]
2121
env:
2222
CIBW_ARCHS_MACOS: x86_64 arm64
23-
- uses: actions/upload-artifact@v3
23+
- uses: actions/upload-artifact@v4
2424
with:
2525
path: ./wheelhouse/*.whl
26+
name: wheels-${{ matrix.os }}
2627

2728
build_sdist:
2829
name: Build source distribution
@@ -37,9 +38,10 @@ jobs:
3738
run: python -m pip install setuptools setuptools-scm wheel Cython numpy scikit-learn
3839
- name: Build sdist
3940
run: python setup.py sdist
40-
- uses: actions/upload-artifact@v3
41+
- uses: actions/upload-artifact@v4
4142
with:
4243
path: dist/*.tar.gz
44+
name: sdist
4345

4446
upload_testpypi:
4547
if: github.event_name == 'release' && github.event.action == 'published'
@@ -51,9 +53,9 @@ jobs:
5153
permissions:
5254
id-token: write
5355
steps:
54-
- uses: actions/download-artifact@v3
56+
- uses: actions/download-artifact@v4
5557
with:
56-
name: artifact
58+
merge-multiple: true
5759
path: dist
5860
- uses: pypa/[email protected]
5961
with:
@@ -69,8 +71,8 @@ jobs:
6971
permissions:
7072
id-token: write
7173
steps:
72-
- uses: actions/download-artifact@v3
74+
- uses: actions/download-artifact@v4
7375
with:
74-
name: artifact
7576
path: dist
77+
merge-multiple: true
7678
- uses: pypa/[email protected]

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
repos:
22
- repo: https://github.com/Quantco/pre-commit-mirrors-black
3-
rev: 23.12.1
3+
rev: 24.1.1
44
hooks:
55
- id: black-conda
66
args:
77
- --safe
88
- --target-version=py39
99
- repo: https://github.com/Quantco/pre-commit-mirrors-flake8
10-
rev: 6.1.0
10+
rev: 7.0.0
1111
hooks:
1212
- id: flake8-conda
1313
additional_dependencies: [

setup.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -74,9 +74,9 @@
7474
package_dir={"": "src"},
7575
packages=find_packages(
7676
where="src",
77-
include=["glum"]
78-
if os.environ.get("CONDA_BUILD")
79-
else ["glum", "glum_benchmarks"],
77+
include=(
78+
["glum"] if os.environ.get("CONDA_BUILD") else ["glum", "glum_benchmarks"]
79+
),
8080
),
8181
python_requires=">=3.9",
8282
install_requires=[
@@ -89,13 +89,15 @@
8989
"formulaic>=0.6",
9090
"tabmat>=4.0.0a3",
9191
],
92-
entry_points=None
93-
if os.environ.get("CONDA_BUILD")
94-
else """
92+
entry_points=(
93+
None
94+
if os.environ.get("CONDA_BUILD")
95+
else """
9596
[console_scripts]
9697
glm_benchmarks_run = glum_benchmarks.cli_run:cli_run
9798
glm_benchmarks_analyze = glum_benchmarks.cli_analyze:cli_analyze
98-
""",
99+
"""
100+
),
99101
ext_modules=cythonize(
100102
ext_modules,
101103
annotate=False,

src/glum/_distribution.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -942,9 +942,7 @@ def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
942942
-------
943943
array-like
944944
"""
945-
return 2 * y * (np.arctan(y) - np.arctan(mu)) + np.log(
946-
(1 + mu**2) / (1 + y**2)
947-
)
945+
return 2 * y * (np.arctan(y) - np.arctan(mu)) + np.log((1 + mu**2) / (1 + y**2))
948946

949947

950948
class BinomialDistribution(ExponentialDispersionModel):

src/glum/_glm.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2759,16 +2759,18 @@ def _expand_categorical_penalties(
27592759
return np.array(
27602760
list(
27612761
chain.from_iterable(
2762-
[
2763-
elmt
2764-
for _ in range(
2765-
len(dtype.categories)
2766-
+ has_missing_category[col]
2767-
- drop_first
2768-
)
2769-
]
2770-
if pd.api.types.is_categorical_dtype(dtype)
2771-
else [elmt]
2762+
(
2763+
[
2764+
elmt
2765+
for _ in range(
2766+
len(dtype.categories)
2767+
+ has_missing_category[col]
2768+
- drop_first
2769+
)
2770+
]
2771+
if pd.api.types.is_categorical_dtype(dtype)
2772+
else [elmt]
2773+
)
27722774
for elmt, (col, dtype) in zip(
27732775
penalty, X.dtypes.items()
27742776
)

src/glum_benchmarks/bench_liblinear.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,9 +77,11 @@ def liblinear_bench(
7777
model_args = dict(
7878
penalty=pen,
7979
tol=benchmark_convergence_tolerance,
80-
C=1 / (X.shape[0] * alpha)
81-
if reg_multiplier is None
82-
else 1 / (X.shape[0] * alpha * reg_multiplier),
80+
C=(
81+
1 / (X.shape[0] * alpha)
82+
if reg_multiplier is None
83+
else 1 / (X.shape[0] * alpha * reg_multiplier)
84+
),
8385
# Note that when an intercept is fitted, it is subject to regularization, unlike
8486
# other solvers. intercept_scaling helps combat this by inflating the intercept
8587
# column, though too low of a value leaves too much regularization and too high

src/glum_benchmarks/cli_run.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,14 @@ def cli_run(
8181
for Ln in libraries.keys():
8282
click.echo(f"running problem={Pn} library={Ln}")
8383
new_params = params.update_params(problem_name=Pn, library_name=Ln)
84-
result, regularization_strength_ = execute_problem_library(
84+
result, _ = execute_problem_library(
8585
new_params,
8686
iterations,
87-
defaults["diagnostics_level"]
88-
if params.diagnostics_level is None
89-
else params.diagnostics_level, # type: ignore
87+
(
88+
defaults["diagnostics_level"] # type: ignore
89+
if params.diagnostics_level is None # type: ignore
90+
else params.diagnostics_level # type: ignore
91+
),
9092
)
9193
_save_benchmark_results(
9294
output_dir,

src/glum_benchmarks/orig_sklearn_fork/_glm.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -889,9 +889,7 @@ def unit_variance_derivative(self, mu):
889889
return 2 * mu
890890

891891
def unit_deviance(self, y, mu):
892-
return 2 * y * (np.arctan(y) - np.arctan(mu)) + np.log(
893-
(1 + mu**2) / (1 + y**2)
894-
)
892+
return 2 * y * (np.arctan(y) - np.arctan(mu)) + np.log((1 + mu**2) / (1 + y**2))
895893

896894

897895
class BinomialDistribution(ExponentialDispersionModel):

src/glum_benchmarks/util.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -409,9 +409,7 @@ def clear_cache(force=False):
409409
if cache_location is None:
410410
return
411411

412-
cache_size_limit = float(
413-
os.environ.get("GLM_BENCHMARKS_CACHE_SIZE_LIMIT", 1024**3)
414-
)
412+
cache_size_limit = float(os.environ.get("GLM_BENCHMARKS_CACHE_SIZE_LIMIT", 1024**3))
415413

416414
if force or _get_size_of_cache_directory() > cache_size_limit:
417415
shutil.rmtree(cache_location)

0 commit comments

Comments
 (0)