Skip to content

Commit e9a8daf

Browse files
authored
Merge pull request #54 from neuroinformatics-unit/add-snakefmt-to-precommit
Add snakefmt to precommit
2 parents 80ed935 + 1789b99 commit e9a8daf

File tree

4 files changed

+72
-19
lines changed

4 files changed

+72
-19
lines changed

.pre-commit-config.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,10 @@ repos:
1717
args: [--fix=lf]
1818
- id: requirements-txt-fixer
1919
- id: trailing-whitespace
20+
- repo: https://github.com/snakemake/snakefmt
21+
rev: v0.11.2
22+
hooks:
23+
- id: snakefmt
2024
- repo: https://github.com/charliermarsh/ruff-pre-commit
2125
rev: v0.6.3
2226
hooks:

photon_mosaic/workflow/Snakefile

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,20 @@ from photon_mosaic import log_cuda_availability
1818
import logging
1919

2020
# Configure logging based on config settings
21-
log_level = logging.DEBUG if config.get("logging", {}).get("snakemake_verbose", False) else logging.INFO
21+
log_level = (
22+
logging.DEBUG
23+
if config.get("logging", {}).get("snakemake_verbose", False)
24+
else logging.INFO
25+
)
2226
logging.basicConfig(level=log_level)
2327
logger = logging.getLogger("snakemake.workflow")
2428

29+
2530
# CUDA availability check on workflow start
2631
onstart:
2732
log_cuda_availability()
2833

34+
2935
raw_data_base = Path(config["raw_data_base"]).resolve()
3036
processed_data_base = Path(config["processed_data_base"]).resolve()
3137
slurm_config = config.get("slurm", {})
@@ -49,7 +55,9 @@ discoverer = DatasetDiscoverer(
4955
exclude_datasets=config["dataset_discovery"].get("exclude_datasets"),
5056
exclude_sessions=config["dataset_discovery"].get("exclude_sessions"),
5157
tiff_patterns=config["dataset_discovery"].get("tiff_patterns"),
52-
neuroblueprint_format=config["dataset_discovery"].get("neuroblueprint_format", False),
58+
neuroblueprint_format=config["dataset_discovery"].get(
59+
"neuroblueprint_format", False
60+
),
5361
)
5462

5563
discoverer.discover()
@@ -66,7 +74,9 @@ preproc_targets = [
6674
/ f"{output_pattern}{tiff_name}"
6775
)
6876
for i, dataset_name in enumerate(discoverer.transformed_datasets)
69-
for session_idx, tiff_list in discoverer.tiff_files[discoverer.original_datasets[i]].items()
77+
for session_idx, tiff_list in discoverer.tiff_files[
78+
discoverer.original_datasets[i]
79+
].items()
7080
for tiff_name in tiff_list
7181
]
7282

@@ -83,17 +93,21 @@ suite2p_targets = [
8393
/ fname
8494
)
8595
for i, dataset_name in enumerate(discoverer.transformed_datasets)
86-
for session_idx, tiff_list in discoverer.tiff_files[discoverer.original_datasets[i]].items()
96+
for session_idx, tiff_list in discoverer.tiff_files[
97+
discoverer.original_datasets[i]
98+
].items()
8799
for fname in ["F.npy", "data.bin"]
88100
if tiff_list # Only create targets for sessions that have files
89101
]
90102

91103
logger.info(f"Suite2p targets: {suite2p_targets}")
92104

105+
93106
include: "preprocessing.smk"
94107
include: "suite2p.smk"
95108

109+
96110
rule all:
97111
input:
98112
preproc_targets,
99-
suite2p_targets
113+
suite2p_targets,

photon_mosaic/workflow/preprocessing.smk

Lines changed: 33 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,36 +25,61 @@ import os
2525
# Configure SLURM resources if enabled
2626
slurm_config = config.get("slurm", {}) if config.get("use_slurm") else {}
2727

28+
2829
# Preprocessing rule
2930
rule preprocessing:
3031
input:
31-
img=lambda wildcards: cross_platform_path(raw_data_base / discoverer.original_datasets[discoverer.transformed_datasets.index(wildcards.subject_name)])
32+
img=lambda wildcards: cross_platform_path(
33+
raw_data_base
34+
/ discoverer.original_datasets[
35+
discoverer.transformed_datasets.index(wildcards.subject_name)
36+
]
37+
),
3238
output:
3339
processed=cross_platform_path(
3440
Path(processed_data_base).resolve()
3541
/ "{subject_name}"
3642
/ "{session_name}"
3743
/ "funcimg"
38-
/ (f"{output_pattern}"+ "{tiff}")
39-
)
44+
/ (f"{output_pattern}" + "{tiff}")
45+
),
4046
params:
41-
dataset_folder=lambda wildcards: cross_platform_path(raw_data_base / discoverer.original_datasets[discoverer.transformed_datasets.index(wildcards.subject_name)]),
47+
dataset_folder=lambda wildcards: cross_platform_path(
48+
raw_data_base
49+
/ discoverer.original_datasets[
50+
discoverer.transformed_datasets.index(wildcards.subject_name)
51+
]
52+
),
4253
output_folder=lambda wildcards: cross_platform_path(
4354
Path(processed_data_base).resolve()
4455
/ wildcards.subject_name
4556
/ wildcards.session_name
4657
/ "funcimg"
4758
),
48-
ses_idx=lambda wildcards: int(wildcards.session_name.split("_")[0].replace("ses-", "")),
59+
ses_idx=lambda wildcards: int(
60+
wildcards.session_name.split("_")[0].replace("ses-", "")
61+
),
4962
wildcard_constraints:
50-
tiff="|".join(sorted(discoverer.tiff_files_flat)) if discoverer.tiff_files_flat else "dummy",
63+
tiff=(
64+
"|".join(sorted(discoverer.tiff_files_flat))
65+
if discoverer.tiff_files_flat
66+
else "dummy"
67+
),
5168
subject_name="|".join(discoverer.transformed_datasets),
52-
session_name="|".join([discoverer.get_session_name(i, session_idx) for i in range(len(discoverer.transformed_datasets))
53-
for session_idx in discoverer.tiff_files[discoverer.original_datasets[i]].keys()]),
69+
session_name="|".join(
70+
[
71+
discoverer.get_session_name(i, session_idx)
72+
for i in range(len(discoverer.transformed_datasets))
73+
for session_idx in discoverer.tiff_files[
74+
discoverer.original_datasets[i]
75+
].keys()
76+
]
77+
),
5478
resources:
5579
**(slurm_config if config.get("use_slurm") else {}),
5680
run:
5781
from photon_mosaic.rules.preprocessing import run_preprocessing
82+
5883
run_preprocessing(
5984
Path(params.output_folder),
6085
config["preprocessing"],

photon_mosaic/workflow/suite2p.smk

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ Output: Suite2p analysis results (F.npy, data.bin) in suite2p/plane0/ directory
1717
import re
1818
from photon_mosaic.snakemake_utils import cross_platform_path
1919

20+
2021
rule suite2p:
2122
input:
2223
tiffs=lambda wildcards: [
@@ -27,9 +28,11 @@ rule suite2p:
2728
/ "funcimg"
2829
/ f"{output_pattern}{tiff_name}"
2930
)
30-
for tiff_name in discoverer.tiff_files[discoverer.original_datasets[discoverer.transformed_datasets.index(wildcards.subject_name)]][
31-
int(wildcards.session_name.split("_")[0].replace("ses-", ""))
32-
]
31+
for tiff_name in discoverer.tiff_files[
32+
discoverer.original_datasets[
33+
discoverer.transformed_datasets.index(wildcards.subject_name)
34+
]
35+
][int(wildcards.session_name.split("_")[0].replace("ses-", ""))]
3336
],
3437
output:
3538
F=cross_platform_path(
@@ -49,7 +52,7 @@ rule suite2p:
4952
/ "suite2p"
5053
/ "plane0"
5154
/ "data.bin"
52-
)
55+
),
5356
params:
5457
dataset_folder=lambda wildcards: cross_platform_path(
5558
Path(processed_data_base).resolve()
@@ -59,8 +62,15 @@ rule suite2p:
5962
),
6063
wildcard_constraints:
6164
subject_name="|".join(discoverer.transformed_datasets),
62-
session_name="|".join([discoverer.get_session_name(i, session_idx) for i in range(len(discoverer.transformed_datasets))
63-
for session_idx in discoverer.tiff_files[discoverer.original_datasets[i]].keys()]),
65+
session_name="|".join(
66+
[
67+
discoverer.get_session_name(i, session_idx)
68+
for i in range(len(discoverer.transformed_datasets))
69+
for session_idx in discoverer.tiff_files[
70+
discoverer.original_datasets[i]
71+
].keys()
72+
]
73+
),
6474
resources:
6575
**(slurm_config if config.get("use_slurm") else {}),
6676
run:

0 commit comments

Comments
 (0)