-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathpixi.toml
More file actions
314 lines (260 loc) · 10.1 KB
/
pixi.toml
File metadata and controls
314 lines (260 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
# Underworld3 In-Repository Build Configuration
#
# MPI is chosen automatically per platform:
# macOS → OpenMPI (MPICH 4.x has a collective-op scaling bug — issue #68)
# Linux → MPICH (traditional default)
#
# Two PETSc tracks:
# conda-forge PETSc (~5 min install, no AMR tools)
# Custom PETSc (~1 hour build, includes pragmatic/mmg/parmmg/slepc)
#
# Three feature tiers:
# minimal — build and run underworld3
# runtime — add visualization + Jupyter
# dev — add Claude, linting, documentation tools
#
# Interactive setup (recommended):
# ./uw setup # Guides you through all choices
#
# Manual install:
# pixi install # Minimal (platform-default MPI)
# pixi install -e dev # Development
# pixi install -e amr-dev # AMR + development
# pixi install -e amr-openmpi-dev # AMR + explicit OpenMPI + development
[workspace]
name = "underworld3"
authors = ["Louis Moresi <louis.moresi@anu.edu.au>"]
channels = ["conda-forge"]
platforms = ["osx-arm64", "linux-64"]
version = "0.99.0b"
# ============================================
# CORE TASKS
# ============================================
[tasks]
# Build underworld3 (compiles Cython extensions)
build = "pip install . --no-build-isolation"
clean = "./scripts/clean.sh"
# Testing
test = "./scripts/test_levels.sh 1" # Quick level-1 tests (~2 min)
test-all = "./scripts/test_levels.sh" # All test levels
# Code quality
format = "black src/underworld3 tests --line-length=100"
type-check = "mypy src/underworld3 --ignore-missing-imports"
# Optional installers for specialized tools (not bundled)
install-geo = "pip install gdal pyproj cartopy geopandas owslib"
# ============================================
# BASE DEPENDENCIES (shared by all environments)
# ============================================
# NOTE: petsc/petsc4py and MPI are in features below, not here
# Prevent OpenMP thread oversubscription with MPI. The conda-forge
# OpenBLAS, SuiteSparse, and SuperLU_DIST are all built with OpenMP and
# default to spawning one thread per core. With N MPI ranks that means
# N × cores threads competing for cores — catastrophic at scale.
# The pthreads variant of OpenBLAS would be ideal but mumps-mpi in
# conda-forge hard-requires the openmp build, so we cap threads instead.
[activation.env]
OMP_NUM_THREADS = "1"
OPENBLAS_NUM_THREADS = "1"
[dependencies]
python = "3.12.*"
# Build requirements
cython = ">=3.1,<4"
numpy = "<2"
pip = ">=25,<26"
setuptools = ">=75,<76"
compilers = "*"
# Underworld3 runtime dependencies
sympy = ">=1.13,<2"
scipy = ">=1.15,<2"
pint = ">=0.24,<0.25"
pydantic = ">=2.0,<3"
pyyaml = ">=6.0,<7"
typeguard = ">=4.4,<5"
xxhash = ">=0.8.3,<0.9"
python-xxhash = ">=3.0"
# Testing (included in base for CI compatibility)
pytest = ">=8.3,<9"
pytest-mpi = ">=0.6,<0.7"
pytest-timeout = ">=2.3,<3"
pytest-forked = ">=1.6,<2"
pytest-xdist = ">=3.8,<4"
# Mesh generation
pykdtree = ">=1.4,<2"
meshio = ">=5.3,<6"
# Utilities
requests = "*"
matplotlib = ">=3.10,<4"
# Jupyter kernel support (needed for pixi-kernel in all environments)
ipykernel = ">=6.29,<7"
pixi-kernel = ">=0.7.1,<0.8"
sphinx-design = ">=0.6.1,<0.7"
[pypi-dependencies]
gmsh = ">=4.13,<5"
pygmsh = ">=7.1,<8"
rich = "*"
# ============================================
# CONDA-FORGE PETSC — platform-default MPI
# ============================================
# macOS gets OpenMPI, Linux gets MPICH (via [target] sections).
# Quick install (~5 min), no AMR tools.
[feature.conda-petsc.dependencies]
petsc = ">=3.21,<4"
petsc4py = ">=3.21,<4"
mpi4py = ">=4,<5"
[feature.conda-petsc.target.osx-arm64.dependencies]
openmpi = ">=5.0,<6"
h5py = { version = ">=3.12,<4", build = "*openmpi*" }
[feature.conda-petsc.target.linux-64.dependencies]
h5py = { version = ">=3.12,<4", build = "*mpich*" }
# ============================================
# CONDA-FORGE PETSC — explicit MPI overrides
# ============================================
# Use these when you need the non-default MPI for your platform.
[feature.conda-petsc-mpich.dependencies]
petsc = ">=3.21,<4"
petsc4py = ">=3.21,<4"
mpi4py = ">=4,<5"
h5py = { version = ">=3.12,<4", build = "*mpich*" }
[feature.conda-petsc-openmpi.dependencies]
openmpi = ">=5.0,<6"
petsc = ">=3.21,<4"
petsc4py = ">=3.21,<4"
mpi4py = ">=4,<5"
h5py = { version = ">=3.12,<4", build = "*openmpi*" }
# ============================================
# CUSTOM PETSC (AMR) — platform-default MPI
# ============================================
# Build PETSc from source with adaptive mesh tools:
# - pragmatic: anisotropic mesh adaptation
# - mmg/parmmg: surface/volume mesh adaptation
# - slepc: eigenvalue solvers
#
# The build script (petsc-custom/build-petsc.sh) auto-detects MPI
# from the active pixi environment and sets PETSC_ARCH accordingly.
# MPICH and OpenMPI builds co-exist under the same PETSc source tree.
#
# Build time: ~1 hour on Apple Silicon
[feature.amr.dependencies]
gfortran = ">=14.2,<15"
cxx-compiler = ">=1.9,<2"
cmake = ">=3.31,<4"
make = ">=4.4,<5"
mpi4py = ">=4,<5"
[feature.amr.activation.env]
PETSC_DIR = "$PIXI_PROJECT_ROOT/petsc-custom/petsc"
[feature.amr.target.osx-arm64.dependencies]
openmpi = ">=5.0,<6"
hdf5 = { version = ">=1.14,<2", build = "*openmpi*" }
h5py = { version = ">=3.12,<4", build = "*openmpi*" }
[feature.amr.target.osx-arm64.activation.env]
PETSC_ARCH = "petsc-4-uw-openmpi"
[feature.amr.target.linux-64.dependencies]
hdf5 = { version = ">=1.14,<2", build = "*mpich*" }
h5py = { version = ">=3.12,<4", build = "*mpich*" }
[feature.amr.target.linux-64.activation.env]
PETSC_ARCH = "petsc-4-uw-mpich"
[feature.amr.tasks]
petsc-local-build = { cmd = "./build-petsc.sh", cwd = "petsc-custom" }
petsc-local-clean = { cmd = "./build-petsc.sh clean", cwd = "petsc-custom" }
# ============================================
# CUSTOM PETSC (AMR) — explicit MPI overrides
# ============================================
# --- Force MPICH (e.g. macOS user needing MPICH for compatibility) ---
[feature.amr-mpich.dependencies]
gfortran = ">=14.2,<15"
cxx-compiler = ">=1.9,<2"
cmake = ">=3.31,<4"
make = ">=4.4,<5"
mpich = ">=4.3,<5"
mpi4py = ">=4,<5"
hdf5 = { version = ">=1.14,<2", build = "*mpich*" }
h5py = { version = ">=3.12,<4", build = "*mpich*" }
[feature.amr-mpich.activation.env]
PETSC_DIR = "$PIXI_PROJECT_ROOT/petsc-custom/petsc"
PETSC_ARCH = "petsc-4-uw-mpich"
[feature.amr-mpich.tasks]
petsc-local-build = { cmd = "./build-petsc.sh", cwd = "petsc-custom" }
petsc-local-clean = { cmd = "./build-petsc.sh clean", cwd = "petsc-custom" }
# --- Force OpenMPI (e.g. Linux cluster with OpenMPI) ---
[feature.amr-openmpi.dependencies]
gfortran = ">=14.2,<15"
cxx-compiler = ">=1.9,<2"
cmake = ">=3.31,<4"
make = ">=4.4,<5"
openmpi = ">=5.0,<6"
mpi4py = ">=4,<5"
hdf5 = { version = ">=1.14,<2", build = "*openmpi*" }
h5py = { version = ">=3.12,<4", build = "*openmpi*" }
[feature.amr-openmpi.activation.env]
PETSC_DIR = "$PIXI_PROJECT_ROOT/petsc-custom/petsc"
PETSC_ARCH = "petsc-4-uw-openmpi"
[feature.amr-openmpi.tasks]
petsc-local-build = { cmd = "./build-petsc.sh", cwd = "petsc-custom" }
petsc-local-clean = { cmd = "./build-petsc.sh clean", cwd = "petsc-custom" }
# ============================================
# RUNTIME FEATURE (for tutorials/examples)
# ============================================
# Visualization and interactive computing
[feature.runtime.dependencies]
# 3D visualization
vtk = ">=9.5.2,<10"
pyvista = ">=0.46,<0.47"
trame = ">=3.8,<4"
trame-vuetify = ">=2.8,<3"
trame-vtk = ">=2.8,<3"
# Jupyter notebooks (ipykernel and pixi-kernel are in base dependencies)
ipywidgets = ">=8.1,<9"
jupyterlab = ">=4.3,<5"
jupyter-server-proxy = ">=4,<5" # Required for trame backend in remote Jupyter
# ============================================
# DEV FEATURE (for development)
# ============================================
# Code quality, documentation, AI assistance
[feature.dev.dependencies]
# Code quality
black = ">=24,<25"
mypy = ">=1.8,<2"
ipdb = ">=0.13,<0.14"
# Documentation
jupyter-ai = ">=2.31,<3"
jupytext = ">=1.16,<2"
# Sphinx API documentation
sphinx = "*"
furo = "*"
myst-nb = "*"
# Claude Code
nodejs = ">=18"
[feature.dev.pypi-dependencies]
anthropic = "*"
sphinx-math-dollar = "*"
sphinxcontrib-mermaid = "*"
[feature.dev.tasks]
install-claude = "npm install -g @anthropic-ai/claude-code"
claude = "claude"
docs-build = { cmd = "python -m sphinx -b html docs docs/_build/html", description = "Build documentation with Sphinx" }
api-docs-build = { cmd = "python -m sphinx -b html docs/api docs/api/_build/html" }
api-docs-clean = { cmd = "rm -rf docs/api/_build" }
docs-audit = { cmd = "python scripts/docs_audit.py", description = "Audit API documentation coverage" }
# ============================================
# ENVIRONMENT DEFINITIONS
# ============================================
# Primary environments use platform-default MPI (OpenMPI on macOS,
# MPICH on Linux). Override environments force a specific MPI.
[environments]
# --- Primary (platform-default MPI) ---
default = { features = ["conda-petsc"], solve-group = "default" }
runtime = { features = ["conda-petsc", "runtime"], solve-group = "default" }
dev = { features = ["conda-petsc", "runtime", "dev"], solve-group = "default" }
amr = { features = ["amr"], solve-group = "amr" }
amr-runtime = { features = ["amr", "runtime"], solve-group = "amr" }
amr-dev = { features = ["amr", "runtime", "dev"], solve-group = "amr" }
# --- Explicit MPICH (override for macOS, or when MPICH is required) ---
mpich = { features = ["conda-petsc-mpich"], solve-group = "mpich" }
mpich-dev = { features = ["conda-petsc-mpich", "runtime", "dev"], solve-group = "mpich" }
amr-mpich = { features = ["amr-mpich"], solve-group = "amr-mpich" }
amr-mpich-dev = { features = ["amr-mpich", "runtime", "dev"], solve-group = "amr-mpich" }
# --- Explicit OpenMPI (override for Linux clusters, or when OpenMPI is required) ---
openmpi = { features = ["conda-petsc-openmpi"], solve-group = "openmpi" }
openmpi-dev = { features = ["conda-petsc-openmpi", "runtime", "dev"], solve-group = "openmpi" }
amr-openmpi = { features = ["amr-openmpi"], solve-group = "amr-openmpi" }
amr-openmpi-dev = { features = ["amr-openmpi", "runtime", "dev"], solve-group = "amr-openmpi" }