From 25bf2620321fe95e0e71a0d0354f7abc33bd1f47 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 14 Jan 2026 15:46:13 -0700 Subject: [PATCH 01/45] docs: add draft YAML configuration and workflow specification for PLEIADES --- docs/Notes/pleiades_config_workflow.md | 195 +++++++++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 docs/Notes/pleiades_config_workflow.md diff --git a/docs/Notes/pleiades_config_workflow.md b/docs/Notes/pleiades_config_workflow.md new file mode 100644 index 00000000..508288f0 --- /dev/null +++ b/docs/Notes/pleiades_config_workflow.md @@ -0,0 +1,195 @@ +PLEIADES YAML config + workflow (draft) +====================================== + +Purpose +------- +This note defines the draft YAML structure that instructs PLEIADES how to process data, +configure SAMMY, and execute fitting routines. It reflects the desired directory layout +for multi-fit workflows and serves as a working specification for end-to-end operation. + +Backbone + reproducibility intent +--------------------------------- +The YAML file is intended to be the backbone structure for running PLEIADES. It should: +- Define the complete workspace layout for consistent file placement. +- Declare datasets and fit routines in a single, structured source of truth. +- Record each run as an append-only entry for analysis provenance and reproducibility. +- Capture configuration inputs (fit options, nuclear parameters, data sources) alongside + execution details (backend, paths, outputs) to enable re-running or auditing results. +This makes the config both an operational entry point and a durable record of analysis. + +Directory layout +---------------- +working_dir/ + endf_dir/ + isotope_dir_1/ + results_dir/ + dummy.inp + dummy.par + isotope_dir_2/ + ... + fitting_dir/ + / + results_dir/ + input.inp + params.par + results_dir/ + run_results_*.json + results_map.json + data_dir/ + .dat + image_dir/ + ... + config.yaml + +Notes: +- The SAMMY fit directory is named after the routine_id. +- The data file for a run is keyed by routine_id: data_dir/.dat +- endf_dir should map to PleiadesConfig.nuclear_data_cache_dir so NuclearDataManager + uses it for ENDF caching. + +Draft YAML schema (example) +--------------------------- +pleiades_version: 1 + +workspace: + root: /path/to/working_dir + endf_dir: ${workspace.root}/endf_dir + fitting_dir: ${workspace.root}/fitting_dir + results_dir: ${workspace.root}/results_dir + data_dir: ${workspace.root}/data_dir + image_dir: ${workspace.root}/image_dir + +nuclear: + data_cache_dir: ${workspace.endf_dir} + sources: + DIRECT: https://www-nds.iaea.org/public/download-endf + API: https://www-nds.iaea.org/exfor/servlet + default_library: ENDF-B-VIII.0 + +sammy: + backend: local # local | docker | nova + local: + sammy_executable: /path/to/sammy + shell_path: /bin/bash + env_vars: {} + docker: + image_name: kedokudo/sammy-docker + container_working_dir: /sammy/work + container_data_dir: /sammy/data + nova: + url: ${NOVA_URL} + api_key: ${NOVA_API_KEY} + tool_id: neutrons_imaging_sammy + timeout: 3600 + +datasets: + example_dataset: + description: "Natural Si transmission" + data_kind: raw_imaging # raw_imaging | sammy_dat | sammy_twenty + raw: + facility: ornl + sample_folders: + - /path/to/sample/run_1 + ob_folders: + - /path/to/ob/run_1 + nexus_dir: /path/to/nexus + roi: + x1: 0 + y1: 0 + width: 512 + height: 512 + image_dir: ${workspace.image_dir} + processed: + transmission_files: [] + energy_units: eV + cross_section_units: barn + sammy_data_file: ${workspace.data_dir}/example_fit.dat + metadata: {} + +fit_routines: + example_fit: + dataset_id: example_dataset + mode: fitting # fitting | endf_extraction | multi_isotope + update_from_results: false + fit_config: + fit_title: "SAMMY Fit" + tolerance: null + max_iterations: 1 + i_correlation: 50 + max_cpu_time: null + max_wall_time: null + max_memory: null + max_disk: null + nuclear_params: {} # pleiades.nuclear.models.nuclearParameters + physics_params: {} # pleiades.experimental.models.PhysicsParameters + data_params: {} # pleiades.sammy.data.options.SammyData + options_and_routines: {} # pleiades.sammy.fitting.options.FitOptions + io: + input_title: null + input_file: null + parameter_file: null + +runs: + - run_id: run_001 + routine_id: example_fit + dataset_id: example_dataset + created_at: "2026-01-14T12:00:00Z" + fit_dir: ${workspace.fitting_dir}/${routine_id} + results_dir: ${workspace.fitting_dir}/${routine_id}/results_dir + input_files: + inp: ${workspace.fitting_dir}/${routine_id}/input.inp + par: ${workspace.fitting_dir}/${routine_id}/params.par + data: ${workspace.data_dir}/${routine_id}.dat + output_files: + lpt: ${workspace.fitting_dir}/${routine_id}/results_dir/SAMMY.LPT + lst: ${workspace.fitting_dir}/${routine_id}/results_dir/SAMMY.LST + sammy_par: ${workspace.fitting_dir}/${routine_id}/results_dir/SAMMY.PAR + sammy_execution: + backend: local + success: false + console_output: ${workspace.fitting_dir}/${routine_id}/results_dir/sammy_console.txt + results: + run_results_path: ${workspace.results_dir}/run_results_001.json + summary: + chi_squared: null + dof: null + reduced_chi_squared: null + +results_index: + per_fit: [] + aggregate: ${workspace.results_dir}/results_map.json + +How this config is used +----------------------- +1) Load config.yaml into PleiadesConfig (workspace + nuclear + sammy + datasets + routines). +2) Prepare workspace directories from workspace.* (create if missing). +3) Resolve dataset inputs: + - raw_imaging: run normalization to produce transmission data, then export + to data_dir/.dat (or .twenty). + - sammy_dat/sammy_twenty: use sammy_data_file or input_files.data directly. +4) Cache isotope data with NuclearDataManager: + - Use nuclear.data_cache_dir (workspace.endf_dir) and default_library. + - Download/cache ENDF data for isotopes referenced by fit_config.nuclear_params. +5) Create a run record: + - Append a new entry to runs with run_id, routine_id, dataset_id, and paths. + - Capture runtime metadata (timestamps, user, host, software versions). +6) Build SAMMY inputs: + - Construct FitConfig from fit_routines..fit_config. + - Write input.inp and params.par via InpManager and ParManager into the fit_dir. +7) Execute SAMMY: + - Instantiate SammyRunner via SammyFactory using sammy.backend. + - Run SAMMY with SammyFiles; collect output files in results_dir. +8) Parse outputs: + - LptManager and LstManager create RunResults. + - Serialize RunResults to JSON and store the path in runs[].results. +9) Record provenance and reproducibility: + - Persist config snapshot, SAMMY outputs, and run metadata together. + - Store git commit, environment, and dependency versions for re-running. +10) Optional iteration: + - If update_from_results is true, update FitConfig for the next run. + +ENDF integration +---------------- +- NuclearDataManager uses PleiadesConfig.nuclear_data_cache_dir as its cache root. +- The YAML field nuclear.data_cache_dir should be mapped to that attribute so ENDF + downloads and cached files live under workspace.endf_dir. From a17c718f83181b2504f12c943fae530ca99cdf0e Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 14 Jan 2026 15:46:34 -0700 Subject: [PATCH 02/45] docs: update PLEIADES YAML config version and refine workflow steps --- docs/Notes/pleiades_config_workflow.md | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/docs/Notes/pleiades_config_workflow.md b/docs/Notes/pleiades_config_workflow.md index 508288f0..45b5e8ec 100644 --- a/docs/Notes/pleiades_config_workflow.md +++ b/docs/Notes/pleiades_config_workflow.md @@ -49,7 +49,7 @@ Notes: Draft YAML schema (example) --------------------------- -pleiades_version: 1 +pleiades_version: 2 workspace: root: /path/to/working_dir @@ -162,30 +162,28 @@ results_index: How this config is used ----------------------- 1) Load config.yaml into PleiadesConfig (workspace + nuclear + sammy + datasets + routines). -2) Prepare workspace directories from workspace.* (create if missing). -3) Resolve dataset inputs: +2) Resolve dataset inputs: - raw_imaging: run normalization to produce transmission data, then export to data_dir/.dat (or .twenty). - sammy_dat/sammy_twenty: use sammy_data_file or input_files.data directly. -4) Cache isotope data with NuclearDataManager: - - Use nuclear.data_cache_dir (workspace.endf_dir) and default_library. - - Download/cache ENDF data for isotopes referenced by fit_config.nuclear_params. -5) Create a run record: +3) Cache isotope data with NuclearDataManager + - if isotopic data is not already cached then download isotopic data using parameters referenced by fit_config.nuclear_params. +4) Create a run record: - Append a new entry to runs with run_id, routine_id, dataset_id, and paths. - Capture runtime metadata (timestamps, user, host, software versions). -6) Build SAMMY inputs: +5) Build SAMMY inputs: - Construct FitConfig from fit_routines..fit_config. - Write input.inp and params.par via InpManager and ParManager into the fit_dir. -7) Execute SAMMY: +6) Execute SAMMY: - Instantiate SammyRunner via SammyFactory using sammy.backend. - Run SAMMY with SammyFiles; collect output files in results_dir. -8) Parse outputs: +7) Parse outputs: - LptManager and LstManager create RunResults. - Serialize RunResults to JSON and store the path in runs[].results. -9) Record provenance and reproducibility: +8) Record provenance and reproducibility: - Persist config snapshot, SAMMY outputs, and run metadata together. - Store git commit, environment, and dependency versions for re-running. -10) Optional iteration: +9) Optional iteration: - If update_from_results is true, update FitConfig for the next run. ENDF integration From 8a885e763edb0bd7c6b8c957a14975de6ffb2e25 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Tue, 20 Jan 2026 12:48:30 -0700 Subject: [PATCH 03/45] refactor: enhance configuration management with Pydantic models and path normalization --- src/pleiades/utils/config.py | 166 +++++++++++++++++++++++++++++------ 1 file changed, 141 insertions(+), 25 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 93d4b6b4..feec5ab2 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -2,47 +2,162 @@ """Global configuration management for PLEIADES.""" import os -from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, Optional import yaml +from pydantic import BaseModel, ConfigDict, Field, model_validator +DEFAULT_NUCLEAR_SOURCES = { + "DIRECT": "https://www-nds.iaea.org/public/download-endf", + "API": "https://www-nds.iaea.org/exfor/servlet", +} -@dataclass -class PleiadesConfig: + +def _expand_path(value: Optional[Any], workspace: Optional["WorkspaceConfig"] = None) -> Optional[Path]: + if value is None: + return None + + raw = str(value) + if workspace is not None: + if workspace.root is not None: + raw = raw.replace("${workspace.root}", str(workspace.root)) + if workspace.endf_dir is not None: + raw = raw.replace("${workspace.endf_dir}", str(workspace.endf_dir)) + if workspace.fitting_dir is not None: + raw = raw.replace("${workspace.fitting_dir}", str(workspace.fitting_dir)) + if workspace.results_dir is not None: + raw = raw.replace("${workspace.results_dir}", str(workspace.results_dir)) + if workspace.data_dir is not None: + raw = raw.replace("${workspace.data_dir}", str(workspace.data_dir)) + if workspace.image_dir is not None: + raw = raw.replace("${workspace.image_dir}", str(workspace.image_dir)) + + raw = os.path.expandvars(os.path.expanduser(raw)) + return Path(raw) + + +class WorkspaceConfig(BaseModel): + """Workspace directory configuration for PLEIADES.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + root: Optional[Path] = None + endf_dir: Optional[Path] = None + fitting_dir: Optional[Path] = None + results_dir: Optional[Path] = None + data_dir: Optional[Path] = None + image_dir: Optional[Path] = None + + @model_validator(mode="after") + def _expand_paths(self) -> "WorkspaceConfig": + self.root = _expand_path(self.root) + self.endf_dir = _expand_path(self.endf_dir, self) + self.fitting_dir = _expand_path(self.fitting_dir, self) + self.results_dir = _expand_path(self.results_dir, self) + self.data_dir = _expand_path(self.data_dir, self) + self.image_dir = _expand_path(self.image_dir, self) + return self + + +class NuclearConfig(BaseModel): + """Nuclear data configuration for PLEIADES.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + data_cache_dir: Optional[Path] = None + sources: Dict[str, str] = Field(default_factory=lambda: dict(DEFAULT_NUCLEAR_SOURCES)) + default_library: Optional[str] = None + + @model_validator(mode="after") + def _expand_paths(self) -> "NuclearConfig": + self.data_cache_dir = _expand_path(self.data_cache_dir) + return self + + +class SammyConfig(BaseModel): + """SAMMY backend configuration for PLEIADES.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + backend: Optional[str] = None + local: Dict[str, Any] = Field(default_factory=dict) + docker: Dict[str, Any] = Field(default_factory=dict) + nova: Dict[str, Any] = Field(default_factory=dict) + + +class PleiadesConfig(BaseModel): """Global configuration for PLEIADES.""" + model_config = ConfigDict(arbitrary_types_allowed=True) + + pleiades_version: Optional[int] = None + + workspace: Optional[WorkspaceConfig] = None + nuclear: Optional[NuclearConfig] = None + sammy: Optional[SammyConfig] = None + + datasets: Dict[str, Dict[str, Any]] = Field(default_factory=dict) + fit_routines: Dict[str, Dict[str, Any]] = Field(default_factory=dict) + runs: list[Dict[str, Any]] = Field(default_factory=list) + results_index: Dict[str, Any] = Field(default_factory=dict) + # Nuclear data configuration - nuclear_data_cache_dir: Path = field(default_factory=lambda: Path(os.path.expanduser("~/.pleiades/nuclear_data"))) + nuclear_data_cache_dir: Path = Field(default_factory=lambda: Path(os.path.expanduser("~/.pleiades/nuclear_data"))) # Nuclear data retrieval methods and URLs - nuclear_data_sources: Dict[str, str] = field( - default_factory=lambda: { - "DIRECT": "https://www-nds.iaea.org/public/download-endf", # IAEA direct file download - "API": "https://www-nds.iaea.org/exfor/servlet", # IAEA EXFOR API for section retrieval - } - ) + nuclear_data_sources: Dict[str, str] = Field(default_factory=lambda: dict(DEFAULT_NUCLEAR_SOURCES)) # Other configuration sections can be added here as needed - def __post_init__(self): - """Ensure Path objects for all directory configurations.""" - self.nuclear_data_cache_dir = Path(self.nuclear_data_cache_dir) + @model_validator(mode="after") + def _normalize_config(self) -> "PleiadesConfig": + """Normalize paths and keep nuclear fields in sync.""" + if self.workspace: + self.nuclear_data_cache_dir = _expand_path(self.nuclear_data_cache_dir, self.workspace) + else: + self.nuclear_data_cache_dir = _expand_path(self.nuclear_data_cache_dir) + + if self.nuclear is None: + self.nuclear = NuclearConfig( + data_cache_dir=self.nuclear_data_cache_dir, + sources=dict(self.nuclear_data_sources), + ) + else: + if self.workspace: + self.nuclear.data_cache_dir = _expand_path(self.nuclear.data_cache_dir, self.workspace) + else: + self.nuclear.data_cache_dir = _expand_path(self.nuclear.data_cache_dir) + + if self.nuclear.data_cache_dir is None: + self.nuclear.data_cache_dir = self.nuclear_data_cache_dir + else: + self.nuclear_data_cache_dir = self.nuclear.data_cache_dir + + if not self.nuclear.sources: + self.nuclear.sources = dict(self.nuclear_data_sources) + else: + self.nuclear_data_sources = dict(self.nuclear.sources) + return self def ensure_directories(self): """Ensure all configured directories exist.""" self.nuclear_data_cache_dir.mkdir(parents=True, exist_ok=True) + if self.workspace: + for path in ( + self.workspace.root, + self.workspace.endf_dir, + self.workspace.fitting_dir, + self.workspace.results_dir, + self.workspace.data_dir, + self.workspace.image_dir, + ): + if path is not None: + path.mkdir(parents=True, exist_ok=True) def to_dict(self) -> Dict[str, Any]: """Convert configuration to a dictionary.""" - result = {} - for key, value in self.__dict__.items(): - if isinstance(value, Path): - result[key] = str(value) - else: - result[key] = value - return result + return self.model_dump(mode="json") def save(self, path: Optional[Path] = None) -> Path: """ @@ -62,7 +177,7 @@ def save(self, path: Optional[Path] = None) -> Path: # Save config as YAML with open(path, "w") as f: - yaml.dump(self.to_dict(), f) + yaml.safe_dump(self.to_dict(), f, sort_keys=False) return path @@ -89,11 +204,12 @@ def load(cls, path: Optional[Path] = None) -> "PleiadesConfig": if not config_dict: return cls() - # Convert string paths back to Path objects - if "nuclear_data_cache_dir" in config_dict: - config_dict["nuclear_data_cache_dir"] = Path(config_dict["nuclear_data_cache_dir"]) + return cls.from_dict(config_dict) - return cls(**config_dict) + @classmethod + def from_dict(cls, config_dict: Dict[str, Any]) -> "PleiadesConfig": + """Build a configuration from a dictionary.""" + return cls.model_validate(config_dict or {}) # Global configuration instance From 42c4bcdfea5056e3b14879eb7be8d87d5c4a0724 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Tue, 20 Jan 2026 12:48:41 -0700 Subject: [PATCH 04/45] test: enhance PleiadesConfig tests for nuclear data attributes and initialization --- tests/unit/pleiades/utils/test_utils_config.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tests/unit/pleiades/utils/test_utils_config.py b/tests/unit/pleiades/utils/test_utils_config.py index c160c243..5027c2a3 100644 --- a/tests/unit/pleiades/utils/test_utils_config.py +++ b/tests/unit/pleiades/utils/test_utils_config.py @@ -27,6 +27,9 @@ def test_default_initialization(self): assert "API" in config.nuclear_data_sources assert config.nuclear_data_sources["DIRECT"] == "https://www-nds.iaea.org/public/download-endf" assert config.nuclear_data_sources["API"] == "https://www-nds.iaea.org/exfor/servlet" + assert config.nuclear is not None + assert config.nuclear.data_cache_dir == expected_path + assert config.nuclear.sources == config.nuclear_data_sources def test_custom_initialization(self): """Test custom initialization of PleiadesConfig.""" @@ -37,13 +40,18 @@ def test_custom_initialization(self): assert config.nuclear_data_cache_dir == custom_path assert config.nuclear_data_sources == custom_sources + assert config.nuclear is not None + assert config.nuclear.data_cache_dir == custom_path + assert config.nuclear.sources == custom_sources def test_post_init_conversion(self): - """Test __post_init__ conversion of string paths to Path objects.""" + """Test conversion of string paths to Path objects.""" config = PleiadesConfig(nuclear_data_cache_dir="/test/string/path") assert isinstance(config.nuclear_data_cache_dir, Path) assert config.nuclear_data_cache_dir == Path("/test/string/path") + assert config.nuclear is not None + assert config.nuclear.data_cache_dir == Path("/test/string/path") def test_ensure_directories(self, monkeypatch): """Test directory creation functionality.""" @@ -105,6 +113,9 @@ def test_save_and_load(self): # Verify loaded config matches original assert loaded_config.nuclear_data_cache_dir == temp_path assert loaded_config.nuclear_data_sources == custom_sources + assert loaded_config.nuclear is not None + assert loaded_config.nuclear.data_cache_dir == temp_path + assert loaded_config.nuclear.sources == custom_sources def test_load_nonexistent_file(self): """Test loading from nonexistent file returns default config.""" From 4b47d4820f4fd5355d8dee4266b2614549ca1013 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Tue, 20 Jan 2026 15:17:21 -0700 Subject: [PATCH 05/45] refactor: streamline path expansion in configuration and enhance workspace handling --- src/pleiades/utils/config.py | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index feec5ab2..b5a22a44 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -20,20 +20,25 @@ def _expand_path(value: Optional[Any], workspace: Optional["WorkspaceConfig"] = raw = str(value) if workspace is not None: - if workspace.root is not None: - raw = raw.replace("${workspace.root}", str(workspace.root)) - if workspace.endf_dir is not None: - raw = raw.replace("${workspace.endf_dir}", str(workspace.endf_dir)) - if workspace.fitting_dir is not None: - raw = raw.replace("${workspace.fitting_dir}", str(workspace.fitting_dir)) - if workspace.results_dir is not None: - raw = raw.replace("${workspace.results_dir}", str(workspace.results_dir)) - if workspace.data_dir is not None: - raw = raw.replace("${workspace.data_dir}", str(workspace.data_dir)) - if workspace.image_dir is not None: - raw = raw.replace("${workspace.image_dir}", str(workspace.image_dir)) + mapping = { + "${workspace.root}": workspace.root, + "${workspace.endf_dir}": workspace.endf_dir, + "${workspace.fitting_dir}": workspace.fitting_dir, + "${workspace.results_dir}": workspace.results_dir, + "${workspace.data_dir}": workspace.data_dir, + "${workspace.image_dir}": workspace.image_dir, + } + if raw in mapping: + replacement = mapping[raw] + if replacement is None or str(replacement) == raw: + return None + for token, path in mapping.items(): + if path is not None: + raw = raw.replace(token, str(path)) raw = os.path.expandvars(os.path.expanduser(raw)) + if "${workspace." in raw: + return None return Path(raw) @@ -117,6 +122,7 @@ def _normalize_config(self) -> "PleiadesConfig": self.nuclear_data_cache_dir = _expand_path(self.nuclear_data_cache_dir, self.workspace) else: self.nuclear_data_cache_dir = _expand_path(self.nuclear_data_cache_dir) + self.workspace = WorkspaceConfig(endf_dir=self.nuclear_data_cache_dir) if self.nuclear is None: self.nuclear = NuclearConfig( @@ -138,6 +144,8 @@ def _normalize_config(self) -> "PleiadesConfig": self.nuclear.sources = dict(self.nuclear_data_sources) else: self.nuclear_data_sources = dict(self.nuclear.sources) + if self.workspace and self.workspace.endf_dir is None: + self.workspace.endf_dir = self.nuclear_data_cache_dir return self def ensure_directories(self): From 798735fe890276f82710c12b72c48e69af801d87 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Tue, 20 Jan 2026 16:14:48 -0700 Subject: [PATCH 06/45] feat: enhance nuclear configuration with isotope management and default library handling --- src/pleiades/utils/config.py | 132 ++++++++++++++++++++++++++++++++++- 1 file changed, 130 insertions(+), 2 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index b5a22a44..bc13413d 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -3,11 +3,14 @@ import os from pathlib import Path -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional import yaml from pydantic import BaseModel, ConfigDict, Field, model_validator +from pleiades.nuclear.models import DataRetrievalMethod, EndfLibrary, IsotopeParameters, nuclearParameters +from pleiades.utils.helper import VaryFlag + DEFAULT_NUCLEAR_SOURCES = { "DIRECT": "https://www-nds.iaea.org/public/download-endf", "API": "https://www-nds.iaea.org/exfor/servlet", @@ -72,7 +75,8 @@ class NuclearConfig(BaseModel): data_cache_dir: Optional[Path] = None sources: Dict[str, str] = Field(default_factory=lambda: dict(DEFAULT_NUCLEAR_SOURCES)) - default_library: Optional[str] = None + default_library: Optional[EndfLibrary] = None + isotopes: List["IsotopeConfig"] = Field(default_factory=list) @model_validator(mode="after") def _expand_paths(self) -> "NuclearConfig": @@ -144,10 +148,119 @@ def _normalize_config(self) -> "PleiadesConfig": self.nuclear.sources = dict(self.nuclear_data_sources) else: self.nuclear_data_sources = dict(self.nuclear.sources) + if self.nuclear.default_library is None: + self.nuclear.default_library = EndfLibrary.ENDF_B_VIII_0 if self.workspace and self.workspace.endf_dir is None: self.workspace.endf_dir = self.nuclear_data_cache_dir + + default_library = self.nuclear.default_library or EndfLibrary.ENDF_B_VIII_0 + self.nuclear.isotopes = [ + IsotopeConfig(**entry) if isinstance(entry, dict) else entry for entry in self.nuclear.isotopes + ] + for entry in self.nuclear.isotopes: + if entry.endf_library is None: + entry.endf_library = default_library + + for routine in self.fit_routines.values(): + routine_nuclear = routine.get("nuclear") or {} + routine_isotopes = routine_nuclear.get("isotopes") + if routine_isotopes is None: + continue + updated: List[IsotopeConfig] = [] + for entry in routine_isotopes: + if isinstance(entry, dict): + entry = IsotopeConfig(**entry) + if entry.endf_library is None: + entry.endf_library = default_library + updated.append(entry) + routine_nuclear["isotopes"] = updated + routine["nuclear"] = routine_nuclear + return self + def build_nuclear_params(self, routine_id: str) -> nuclearParameters: + """Build nuclearParameters from configured isotope entries.""" + routine = self.fit_routines.get(routine_id, {}) + routine_isotopes = (routine.get("nuclear") or {}).get("isotopes") + isotope_entries = routine_isotopes if routine_isotopes is not None else self.nuclear.isotopes + if not isotope_entries: + raise ValueError("No isotopes configured. Set fit_routines..nuclear.isotopes or nuclear.isotopes.") + from pleiades.nuclear.isotopes.manager import IsotopeManager + + manager = IsotopeManager() + isotopes: List[IsotopeParameters] = [] + + default_library = self.nuclear.default_library or EndfLibrary.ENDF_B_VIII_0 + + for entry in isotope_entries: + if isinstance(entry, dict): + entry = IsotopeConfig(**entry) + + isotope_params = manager.get_isotope_parameters_from_isotope_string(entry.isotope) + if isotope_params is None: + raise ValueError(f"Isotope not found: {entry.isotope}") + + isotope_params.abundance = entry.abundance + isotope_params.uncertainty = entry.uncertainty + isotope_params.vary_abundance = entry.vary_abundance + isotope_params.endf_library = entry.endf_library or default_library + + isotopes.append(isotope_params) + + return nuclearParameters(isotopes=isotopes) + + def ensure_endf_cache( + self, + routine_id: Optional[str] = None, + method: DataRetrievalMethod = DataRetrievalMethod.DIRECT, + output_dir: Optional[Path] = None, + use_cache: bool = True, + ) -> List[Path]: + """Ensure ENDF cache files exist for configured isotopes.""" + if routine_id: + routine = self.fit_routines.get(routine_id, {}) + routine_isotopes = (routine.get("nuclear") or {}).get("isotopes") + isotope_entries = routine_isotopes if routine_isotopes is not None else self.nuclear.isotopes + else: + isotope_entries = self.nuclear.isotopes + + if not isotope_entries: + raise ValueError("No isotopes configured. Set fit_routines..nuclear.isotopes or nuclear.isotopes.") + + from pleiades.nuclear.manager import NuclearDataManager + + output_dir = ( + Path(output_dir) + if output_dir is not None + else ( + self.workspace.endf_dir if self.workspace and self.workspace.endf_dir else self.nuclear_data_cache_dir + ) + ) + output_dir.mkdir(parents=True, exist_ok=True) + + set_config(self) + manager = NuclearDataManager() + default_library = self.nuclear.default_library or EndfLibrary.ENDF_B_VIII_0 + + outputs: List[Path] = [] + for entry in isotope_entries: + if isinstance(entry, dict): + entry = IsotopeConfig(**entry) + isotope_info = manager.isotope_manager.get_isotope_info(entry.isotope) + if isotope_info is None: + raise ValueError(f"Isotope not found: {entry.isotope}") + library = entry.endf_library or default_library + output_path = manager.download_endf_resonance_file( + isotope=isotope_info, + library=library, + output_dir=str(output_dir), + method=method, + use_cache=use_cache, + ) + outputs.append(output_path) + + return outputs + def ensure_directories(self): """Ensure all configured directories exist.""" self.nuclear_data_cache_dir.mkdir(parents=True, exist_ok=True) @@ -220,6 +333,21 @@ def from_dict(cls, config_dict: Dict[str, Any]) -> "PleiadesConfig": return cls.model_validate(config_dict or {}) +class IsotopeConfig(BaseModel): + """Configuration for a single isotope entry.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + isotope: str + abundance: Optional[float] = None + uncertainty: Optional[float] = None + vary_abundance: Optional[VaryFlag] = None + endf_library: Optional[EndfLibrary] = None + + +NuclearConfig.model_rebuild() + + # Global configuration instance _config: Optional[PleiadesConfig] = None From a17b03d9e1a9190a54185cc6af31409efd1e3476 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Tue, 20 Jan 2026 17:02:34 -0700 Subject: [PATCH 07/45] feat: enhance PleiadesConfig with nuclear parameters handling and routine directory creation --- src/pleiades/utils/config.py | 63 +++++++++++++++++++++++++++++++++--- 1 file changed, 59 insertions(+), 4 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index bc13413d..4bef9ea6 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -178,11 +178,16 @@ def _normalize_config(self) -> "PleiadesConfig": return self - def build_nuclear_params(self, routine_id: str) -> nuclearParameters: + def build_nuclear_params(self, routine_id: Optional[str] = None) -> nuclearParameters: """Build nuclearParameters from configured isotope entries.""" - routine = self.fit_routines.get(routine_id, {}) - routine_isotopes = (routine.get("nuclear") or {}).get("isotopes") - isotope_entries = routine_isotopes if routine_isotopes is not None else self.nuclear.isotopes + isotope_entries = None + if routine_id: + routine = self.fit_routines.get(routine_id, {}) + routine_isotopes = (routine.get("nuclear") or {}).get("isotopes") + if routine_isotopes: + isotope_entries = routine_isotopes + if isotope_entries is None: + isotope_entries = self.nuclear.isotopes if not isotope_entries: raise ValueError("No isotopes configured. Set fit_routines..nuclear.isotopes or nuclear.isotopes.") from pleiades.nuclear.isotopes.manager import IsotopeManager @@ -209,6 +214,14 @@ def build_nuclear_params(self, routine_id: str) -> nuclearParameters: return nuclearParameters(isotopes=isotopes) + def populate_fit_config_isotopes(self, fit_config: Any, routine_id: Optional[str] = None) -> Any: + """Populate fit_config.nuclear_params.isotopes from config if missing.""" + if not hasattr(fit_config, "nuclear_params"): + raise ValueError("fit_config must have a nuclear_params attribute") + if not fit_config.nuclear_params.isotopes: + fit_config.nuclear_params = self.build_nuclear_params(routine_id) + return fit_config + def ensure_endf_cache( self, routine_id: Optional[str] = None, @@ -276,6 +289,48 @@ def ensure_directories(self): if path is not None: path.mkdir(parents=True, exist_ok=True) + def create_routine_dirs( + self, + base_routine_ids: Optional[List[str]] = None, + timestamp: Optional[str] = None, + ) -> List[Dict[str, Path]]: + """Create timestamped routine directories under workspace.fitting_dir.""" + if not self.workspace or not self.workspace.fitting_dir: + raise ValueError("workspace.fitting_dir is required to create routine directories") + + routine_ids = base_routine_ids or list(self.fit_routines.keys()) + if not routine_ids: + raise ValueError("No fit_routines defined to create routine directories") + + if timestamp is None: + from datetime import datetime, timezone + + timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ") + + created: List[Dict[str, Path]] = [] + fitting_dir = self.workspace.fitting_dir + + for base_routine_id in routine_ids: + routine_id = f"{base_routine_id}_{timestamp}" + fit_dir = fitting_dir / routine_id + results_dir = fit_dir / "results_dir" + + fit_dir.mkdir(parents=True, exist_ok=True) + results_dir.mkdir(parents=True, exist_ok=True) + + created.append( + { + "routine_id": routine_id, + "fit_dir": fit_dir, + "results_dir": results_dir, + } + ) + + if self.workspace.results_dir: + self.workspace.results_dir.mkdir(parents=True, exist_ok=True) + + return created + def to_dict(self) -> Dict[str, Any]: """Convert configuration to a dictionary.""" return self.model_dump(mode="json") From f8e7827dfbd243474260bad03605a103a5135c99 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Tue, 20 Jan 2026 17:03:46 -0700 Subject: [PATCH 08/45] feat: integrate isotopes management into nuclear configuration and update data caching logic --- docs/Notes/pleiades_config_workflow.md | 34 ++++++++++++++++++-------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/docs/Notes/pleiades_config_workflow.md b/docs/Notes/pleiades_config_workflow.md index 45b5e8ec..7ef740f5 100644 --- a/docs/Notes/pleiades_config_workflow.md +++ b/docs/Notes/pleiades_config_workflow.md @@ -60,11 +60,18 @@ workspace: image_dir: ${workspace.root}/image_dir nuclear: - data_cache_dir: ${workspace.endf_dir} sources: DIRECT: https://www-nds.iaea.org/public/download-endf API: https://www-nds.iaea.org/exfor/servlet default_library: ENDF-B-VIII.0 + isotopes: + - isotope: "U-235" + abundance: 0.0072 + vary_abundance: 0 + endf_library: ENDF-B-VIII.0 + - isotope: "U-238" + abundance: 0.9928 + vary_abundance: 0 sammy: backend: local # local | docker | nova @@ -111,6 +118,15 @@ fit_routines: dataset_id: example_dataset mode: fitting # fitting | endf_extraction | multi_isotope update_from_results: false + nuclear: + isotopes: + - isotope: "U-235" + abundance: 0.0072 + vary_abundance: 0 + endf_library: ENDF-B-VIII.0 + - isotope: "U-238" + abundance: 0.9928 + vary_abundance: 0 fit_config: fit_title: "SAMMY Fit" tolerance: null @@ -124,11 +140,6 @@ fit_routines: physics_params: {} # pleiades.experimental.models.PhysicsParameters data_params: {} # pleiades.sammy.data.options.SammyData options_and_routines: {} # pleiades.sammy.fitting.options.FitOptions - io: - input_title: null - input_file: null - parameter_file: null - runs: - run_id: run_001 routine_id: example_fit @@ -166,8 +177,10 @@ How this config is used - raw_imaging: run normalization to produce transmission data, then export to data_dir/.dat (or .twenty). - sammy_dat/sammy_twenty: use sammy_data_file or input_files.data directly. -3) Cache isotope data with NuclearDataManager - - if isotopic data is not already cached then download isotopic data using parameters referenced by fit_config.nuclear_params. +3) Cache isotope data with NuclearDataManager: + - Use fit_routines..nuclear.isotopes, or fall back to nuclear.isotopes. + - If isotopic data is not already cached, download using nuclear.data_cache_dir + (default: ~/.pleiades/nuclear_data) and default_library. 4) Create a run record: - Append a new entry to runs with run_id, routine_id, dataset_id, and paths. - Capture runtime metadata (timestamps, user, host, software versions). @@ -189,5 +202,6 @@ How this config is used ENDF integration ---------------- - NuclearDataManager uses PleiadesConfig.nuclear_data_cache_dir as its cache root. -- The YAML field nuclear.data_cache_dir should be mapped to that attribute so ENDF - downloads and cached files live under workspace.endf_dir. +- If nuclear.data_cache_dir is omitted, it defaults to ~/.pleiades/nuclear_data. +- When provided, nuclear.data_cache_dir overrides the default and can be placed + under workspace.endf_dir or any other location. From 54a1a54de890cbdb1732e7287614e3b3a3f44d65 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 08:53:37 -0700 Subject: [PATCH 09/45] Remove unused input file for U238 dummy case in ex027 example --- examples/samexm/ex012/results/SAMMY.PAR | 207 --- examples/samexm/ex012/results/SAMQUA.PAR | 299 ---- examples/samexm/ex027/endf/endf_dummy.dat | 2 - examples/samexm/ex027/endf/ex027a | 9 - examples/samexm/ex027/endf/ex027a.endf | 1790 --------------------- examples/samexm/ex027/endf/ex027a.inp | 9 - 6 files changed, 2316 deletions(-) delete mode 100644 examples/samexm/ex012/results/SAMMY.PAR delete mode 100644 examples/samexm/ex012/results/SAMQUA.PAR delete mode 100644 examples/samexm/ex027/endf/endf_dummy.dat delete mode 100755 examples/samexm/ex027/endf/ex027a delete mode 100755 examples/samexm/ex027/endf/ex027a.endf delete mode 100755 examples/samexm/ex027/endf/ex027a.inp diff --git a/examples/samexm/ex012/results/SAMMY.PAR b/examples/samexm/ex012/results/SAMMY.PAR deleted file mode 100644 index b2773ccb..00000000 --- a/examples/samexm/ex012/results/SAMMY.PAR +++ /dev/null @@ -1,207 +0,0 @@ --3661600.00 158770.000 4075253.+3 0.0000 0 0 1 0 1 --873730.000 1025.30000 101.510303 0.0000 0 0 1 0 1 --365290.000 1000.00000 30.4060000 0.0000 0 0 0 0 1 --63159.0000 1000.00000 46.8940000 0.0000 0 0 0 0 1 --48801.0000 1000.00000 9.24960000 0.0000 0 0 0 0 1 -31739.99805 1000.00000 15.6670000 0.0000 0.0000 0 0 0 0 0 5 -55676.96094 1580.30000 653310.000 0.0000 0 0 0 0 1 -67732.84375 2500.00000 2658.90000 0.0000 0 0 0 0 3 -70800.00781 1000.00000 29.6170000 0.0000 0.0000 0 0 0 0 0 5 -86797.35938 2500.00000 726.180000 0.0000 0 0 0 0 3 -181617.5000 5600.00000 34894000.0 0.0000 0 0 0 0 1 -298700.0000 1000.00000 9886.00000 0.0000 0.0000 0 0 0 0 0 5 -301310.8125 3600.00000 2354.80000 0.0000 0 0 0 0 1 -354588.6875 1000.00000 14460.0000 0.0000 0.0000 0 0 0 0 0 5 -399675.9375 660.000000 813.610000 0.0000 0 0 0 0 3 -532659.8750 2500.00000 532810.000 0.0000 0 0 0 0 3 -565576.8750 2900.00000 10953000.0 0.0000 0 0 0 0 3 -587165.7500 8800.00000 199160.000 0.0000 0 0 0 0 2 -590290.1250 3600.00000 523660.000 0.0000 0 0 0 0 1 -602467.3125 3400.00000 50491.0000 0.0000 0.0000 0 0 0 0 0 4 -714043.4375 2500.00000 1216.50000 0.0000 0 0 0 0 3 -771711.9375 1000.00000 53139.0000 0.0000 0.0000 0 0 0 0 0 5 -812491.6250 9700.00000 30100000.0 0.0000 0 0 0 0 3 -845233.8750 2000.00000 397910.000 0.0000 0.0000 0 0 0 0 0 4 -872305.8125 1300.00000 32140.0000 0.0000 0.0000 0 0 0 0 0 5 -910043.5625 1130.00000 3673300.00 0.0000 0 0 0 0 3 -962233.0000 16000.0000 76614000.0 0.0000 0 0 0 0 2 -1017777.188 1000.00000 76192.0000 0.0000 0.0000 0 0 0 0 0 5 -1042856.812 1000.00000 933700.000 0.0000 0.0000 0 0 0 0 0 5 -1085169.250 3600.00000 72794.0000 0.0000 0 0 0 0 1 -1148103.625 1000.00000 3146.90000 0.0000 0.0000 0 0 0 0 0 5 -1162663.625 3800.00000 3013600.00 0.0000 0 0 0 0 1 -1199501.375 7600.00000 14914000.0 0.0000 0 0 0 0 2 -1201238.750 3600.00000 4601200.00 0.0000 0 0 0 0 1 -1256447.250 3600.00000 17383000.0 0.0000 0 0 0 0 1 -1264441.750 1000.00000 843640.000 0.0000 0.0000 0 0 0 0 0 5 -1379920.250 2400.00000 65299.0000 0.0000 0.0000 0 0 0 0 0 4 -1408269.750 2700.00000 5198300.00 0.0000 0 0 0 0 3 -1479927.250 1650.00000 3502500.00 0.0000 0.0000 0 0 0 0 0 4 -1482395.375 8800.00000 886.940000 0.0000 0 0 0 0 2 -1512343.875 1000.00000 91493.0000 0.0000 0.0000 0 0 0 0 0 5 -1528742.375 2400.00000 2922500.00 0.0000 0.0000 0 0 0 0 0 4 -1580564.875 2400.00000 1495500.00 0.0000 0.0000 0 0 0 0 0 4 -1592844.250 8800.00000 11199000.0 0.0000 0 0 0 0 2 -1597168.625 2400.00000 4017200.00 0.0000 0.0000 0 0 0 0 0 4 -1639561.500 1000.00000 15293000.0 0.0000 0.0000 0 0 0 0 0 5 -1651146.000 1000.00000 21555000.0 0.0000 0.0000 0 0 0 0 0 5 -1658595.000 8800.00000 1555300.00 0.0000 0 0 0 0 2 -1664961.125 2400.00000 215900.000 0.0000 0.0000 0 0 0 0 0 4 -1784952.750 2400.00000 192940.000 0.0000 0.0000 0 0 0 0 0 4 -1805652.625 2500.00000 1299100.00 0.0000 0 0 0 0 3 -1850667.250 1000.00000 35515000.0 0.0000 0.0000 0 0 0 0 0 5 -1852435.250 2500.00000 70707000.0 0.0000 0 0 0 0 3 -1923655.250 1000.00000 1017100.00 0.0000 0.0000 0 0 0 0 0 5 -2248678.250 3600.00000 444760000. 0.0000 0 0 0 0 1 -1968869.750 1000.00000 5734100.00 0.0000 0.0000 0 0 0 0 0 5 -3007280.500 3600.00000 289960.000 0.0000 0 0 0 0 1 -3067775.250 3600.00000 422290.000 0.0000 0 0 0 0 1 --2179600.00 409080.000 1722200.+3 0 0 0 8 --860240.000 999.970000 34170000.0 0 0 0 8 --431280.000 1005.90000 228510000. 0 0 0 8 -15282.00000 1646.00000 10000.0000 0 0 0 10 -38819.00000 2400.00000 75926.0000 0 0 0 13 -159682.9688 1900.00000 1200300.00 0 0 0 10 -184456.4844 1500.00000 136740.000 0 0 0 10 -336790.2812 800.000000 2512800.00 0 0 0 10 -385764.1875 4670.00000 24133000.0 0 0 0 9 -552241.8125 5700.00000 1298900.00 0 0 0 13 -566558.4375 3000.00000 70820000.0 0 0 0 10 -619664.6250 3000.00000 725960.000 0 0 0 13 -649726.0000 3000.00000 1095900.00 0 0 0 13 -653064.6250 6300.00000 19386000.0 0 0 0 12 -715064.6250 300.000000 978570.000 0 0 0 10 -716771.3125 3000.00000 219300000. 0 0 0 8 -802258.9375 3000.00000 9934900.00 0 0 0 15 -862003.6875 3000.00000 432930000. 0 0 0 8 -872483.5000 300.000000 17335000.0 0 0 0 10 -955891.2500 300.000000 982890.000 0 0 0 13 -1098425.500 3000.00000 57787.0000 0 0 0 15 -1113807.625 300.000000 76533000.0 0 0 0 10 -1122279.500 300.000000 4881600.00 0 0 0 13 -1178601.750 3000.00000 8295900.00 0 0 0 9 -1192267.500 300.000000 375060.000 0 0 0 12 -1207629.500 300.000000 19795000.0 0 0 0 13 -1388859.125 3000.00000 4271400.00 0 0 0 15 -1769072.750 3000.00000 32136.0000 0 0 0 8 -2248487.000 3000.00000 169320.000 0 0 0 8 --1185100.00 118480.000 260570000. 0 0 0 18 --161550.000 650.000000 426400.000 0 0 0 18 -2235.000000 370.000000 932.660000 0 0 0 20 -4977.000000 600.000000 1122.00000 0 0 0 19 -183488.8281 6000.00000 9997600.00 0 0 0 18 -235225.3906 800.000000 115410.000 0 0 0 21 -302839.2188 370.000000 274430.000 0 0 0 20 -413136.1875 600.000000 1580100.00 0 0 0 19 -645239.8125 800.000000 401430.000 0 0 0 21 -704912.0000 800.000000 423190.000 0 0 0 21 -745454.0000 370.000000 14735000.0 0 0 0 20 -796946.1250 600.000000 469320.000 0 0 0 19 -807379.8125 600.000000 274330.000 0 0 0 19 -810796.9375 600.000000 419310.000 0 0 0 19 -844674.6250 370.000000 3315300.00 0 0 0 20 -878822.8125 800.000000 110630.000 0 0 0 21 -979821.3125 600.000000 591820.000 0 0 0 19 -1182175.750 6000.00000 5912400.00 0 0 0 18 -1217821.125 800.000000 1888900.00 0 0 0 21 -1274871.500 600.000000 2225500.00 0 0 0 19 -1302032.875 800.000000 304790.000 0 0 0 21 -1310774.875 370.000000 339710.000 0 0 0 20 -1337984.375 600.000000 4624000.00 0 0 0 19 -1356024.750 370.000000 12271000.0 0 0 0 20 -1383597.625 600.000000 25336000.0 0 0 0 19 -1400981.375 800.000000 1897600.00 0 0 0 21 -1412107.750 370.000000 668620.000 0 0 0 20 -1586007.000 6000.00000 23644000.0 0 0 0 18 -2583249.500 6000.00000 92076000.0 0 0 0 18 --11592000.0 1200.00000 7517400.+3 0 0 0 23 --9192600.00 1200.00000 1435600.+4 0 0 0 23 -433901.3750 1200.00000 43760000.0 0 0 0 25 -999736.9375 1200.00000 96583000.0 0 0 0 26 -1307683.875 1200.00000 42027000.0 0 0 0 25 -1630813.125 1200.00000 71367.0000 0 0 0 25 -1650581.000 1200.00000 3901300.00 0 0 0 29 -1833142.125 1200.00000 7835200.00 0 0 0 26 -1898468.875 1200.00000 28678000.0 0 0 0 24 -2372699.000 1200.00000 155330000. 0 0 0 23 -2888249.750 1200.00000 1885900.00 0 0 0 24 -3004838.500 1200.00000 2333.80000 0 0 0 24 -3181814.500 1200.00000 517690000. 0 0 0 24 -3203037.750 1200.00000 179230000. 0 0 0 23 -3204505.000 1200.00000 310910.000 0 0 0 28 -3239197.250 1200.00000 251410000. 0 0 0 26 -3431828.250 1200.00000 615700.000 0 0 0 27 -3438834.250 1200.00000 1803900.00 0 0 0 28 -3636796.000 1200.00000 701840000. 0 0 0 25 -3763624.250 1200.00000 16031000.0 0 0 0 29 -3853751.500 1200.00000 562450000. 0 0 0 23 -4175216.750 1200.00000 70220000.0 0 0 0 26 -4288830.500 1200.00000 56268000.0 0 0 0 25 -4303685.000 1200.00000 16078000.0 0 0 0 24 -4461693.000 1200.00000 77996000.0 0 0 0 23 -4525253.000 1200.00000 4173300.00 0 0 0 27 -4591673.500 1200.00000 91070.0000 0 0 0 29 -4592562.000 1200.00000 175270.000 0 0 0 28 -4816678.000 1200.00000 47145000.0 0 0 0 25 -5055537.000 1200.00000 52134000.0 0 0 0 26 -5118596.000 1200.00000 17864000.0 0 0 0 29 -5365329.500 1200.00000 980620.000 0 0 0 27 -5617318.000 1200.00000 33197000.0 0 0 0 28 -5666636.000 1200.00000 12687000.0 0 0 0 27 -5912201.500 1200.00000 12942000.0 0 0 0 29 -5986110.000 1200.00000 7106100.00 0 0 0 26 -6076846.000 1200.00000 5166700.00 0 0 0 28 -6116665.000 1200.00000 5549900.00 0 0 0 24 -6389893.000 1200.00000 4935600.00 0 0 0 29 -6813680.500 1200.00000 3262900.00 0 0 0 28 -6833136.500 1200.00000 5140500.00 0 0 0 27 -7373000.000 1200.00000 2400000.00 0 0 0 24 -8820920.000 1200.00000 1123400.+4 0 0 0 25 -10934820.00 1200.00000 276110.000 0 0 0 23 -15050371.00 1200.00000 634990.000 0 0 0 23 -25004488.00 1200.00000 6918.60000 0 0 0 23 - -.100000000 -Channel radii in key-word format -Radii= 4.136420, 4.136420 Flags=0,-1 - Group= 1 Chan= 1, 2, - Group= 4 Chan= 1, 2, 3, - Group= 5 Chan= 1, 2, 3, -Radii= 4.943720, 4.943720 Flags=0,-1 - Group= 2 Chan= 1, 2, - Group= 3 Chan= 1, 2, - Group= 6 Chan= 1, 2, 3, - Group= 7 Chan= 1, 2, 3, -Radii= 4.400000, 4.400000 Flags=0,-1 - Group= 8 Chan= 1, - Group= 9 Chan= 1, - Group= 10 Chan= 1, - Group= 11 Chan= 1, - Group= 12 Chan= 1, - Group= 13 Chan= 1, - Group= 14 Chan= 1, - Group= 15 Chan= 1, - Group= 16 Chan= 1, - Group= 17 Chan= 1, -Radii= 4.200000, 4.200000 Flags=0,-1 - Group= 18 Chan= 1, - Group= 19 Chan= 1, - Group= 20 Chan= 1, - Group= 21 Chan= 1, - Group= 22 Chan= 1, -Radii= 4.200000, 4.200000 Flags=0,-1 - Group= 23 Chan= 1, - Group= 24 Chan= 1, - Group= 25 Chan= 1, - Group= 26 Chan= 1, - Group= 27 Chan= 1, - Group= 28 Chan= 1, - Group= 29 Chan= 1, - -NUCLIDE MASSES AND ABUNDANCES FOLLOW -27.9769290 .93272250 .92000000 1 1 2 3 4 5 6 7 -28.9764960 .03877251 .05000000 1 8 91011121314151617 -29.9737720 .02177828 .02000000 11819202122 -16.0000000 1.0000000 .01000000 023242526272829 - -COVARIANCE MATRIX IS IN BINARY FORM diff --git a/examples/samexm/ex012/results/SAMQUA.PAR b/examples/samexm/ex012/results/SAMQUA.PAR deleted file mode 100644 index 12907ac3..00000000 --- a/examples/samexm/ex012/results/SAMQUA.PAR +++ /dev/null @@ -1,299 +0,0 @@ -PARTICLE PAIR DEFINITIONS -Name=Inc Chan Particle a=neutron Particle b=Other - Za= 0 Zb= 0 Pent=1 Shift=0 - Sa= 0.5 Sb= 0.0 Ma= 1.008664915780000 Mb= 27.976928999999 -Name=PPair #2 Particle a=neutron Particle b=Other - Za= 0 Zb= 0 Pent=1 Shift=0 - Sa=999.0 Sb= 999.0 Ma= 1.008664915780000 Mb= 27.976928999999 - Q= -1779000.8742580006 -Name=PPair #4 Particle a=neutron Particle b=Other - Za= 0 Zb= 0 Pent=1 Shift=0 - Sa= 0.5 Sb= 0.0 Ma= 1.008664915780000 Mb= 27.976928999999 - Q= -1779000.8742580006 -Name=Inc Ch#1 Particle a=neutron Particle b=Other - Za= 0 Zb= 0 Pent=1 Shift=0 - Sa= 0.5 Sb= 0.5 Ma= 1.008664915780000 Mb= 28.976496000000 -Name=Inc Ch#2 Particle a=neutron Particle b=Other - Za= 0 Zb= 0 Pent=1 Shift=0 - Sa= 0.5 Sb= 0.0 Ma= 1.008664915780000 Mb= 29.973772000000 -Name=Inc Ch#3 Particle a=neutron Particle b=Other - Za= 0 Zb= 0 Pent=1 Shift=0 - Sa= 0.5 Sb= 0.0 Ma= 1.008664915780000 Mb= 16.000000000000 - -SPIN GROUPS - 1 1 1 0.5 1.0000000 - 1 Inc Chan 0 0.5 - 2 PPair #2 2 1.5 - 2 1 1 -0.5 1.0000000 - 1 Inc Chan 1 0.5 - 2 PPair #4 1 0.5 - 3 1 1 -1.5 1.0000000 - 1 Inc Chan 1 0.5 - 2 PPair #4 1 0.5 - 4 1 2 1.5 1.0000000 - 1 Inc Chan 2 0.5 - 2 PPair #4 2 0.5 - 3 PPair #2 0 1.5 - 5 1 2 2.5 1.0000000 - 1 Inc Chan 2 0.5 - 2 PPair #4 2 0.5 - 3 PPair #2 0 2.5 - 6 1 2 -2.5 1.0000000 - 1 Inc Chan 3 0.5 - 2 PPair #4 3 0.5 - 3 PPair #2 1 2.5 - 7 1 2 -3.5 1.0000000 - 1 Inc Chan 3 0.5 - 2 PPair #4 3 0.5 - 3 PPair #2 1 2.5 - 8 1 0 0.0 0.0467000 - 1 Inc Ch#1 0 0.0 - 9 1 0 1.0 0.0467000 - 1 Inc Ch#1 0 1.0 - 10 1 0 -1.0 0.0467000 - 1 Inc Ch#1 1 0.0 - 11 1 0 -0.0 0.0467000 - 1 Inc Ch#1 1 1.0 - 12 1 0 -1.0 0.0467000 - 1 Inc Ch#1 1 1.0 - 13 1 0 -2.0 0.0467000 - 1 Inc Ch#1 1 1.0 - 14 1 0 2.0 0.0467000 - 1 Inc Ch#1 2 0.0 - 15 1 0 1.0 0.0467000 - 1 Inc Ch#1 2 1.0 - 16 1 0 2.0 0.0467000 - 1 Inc Ch#1 2 1.0 - 17 1 0 3.0 0.0467000 - 1 Inc Ch#1 2 1.0 - 18 1 0 0.5 0.0310000 - 1 Inc Ch#2 0 0.5 - 19 1 0 -0.5 0.0310000 - 1 Inc Ch#2 1 0.5 - 20 1 0 -1.5 0.0310000 - 1 Inc Ch#2 1 0.5 - 21 1 0 1.5 0.0310000 - 1 Inc Ch#2 2 0.5 - 22 1 0 2.5 0.0310000 - 1 Inc Ch#2 2 0.5 - 23 X 1 0 0.5 1.0000000 - 1 Inc Ch#3 0 0.5 - 24 X 1 0 -0.5 1.0000000 - 1 Inc Ch#3 1 0.5 - 25 X 1 0 -1.5 1.0000000 - 1 Inc Ch#3 1 0.5 - 26 X 1 0 1.5 1.0000000 - 1 Inc Ch#3 2 0.5 - 27 X 1 0 2.5 1.0000000 - 1 Inc Ch#3 2 0.5 - 28 X 1 0 -2.5 1.0000000 - 1 Inc Ch#3 3 0.5 - 29 X 1 0 -3.5 1.0000000 - 1 Inc Ch#3 3 0.5 - -RESONANCES are listed next --3661600.00 158770.000 3698500.+3 0.0000 0 0 1 0 1 --873730.000 1025.30000 101.510000 0.0000 0 0 1 0 1 --365290.000 1000.00000 30.4060000 0.0000 0 0 0 0 1 --63159.0000 1000.00000 46.8940000 0.0000 0 0 0 0 1 --48801.0000 1000.00000 9.24960000 0.0000 0 0 0 0 1 -31739.99805 1000.00000 15.6670000 0.0000 0.0000 0 0 0 0 0 5 -55676.96094 1580.30000 653310.000 0.0000 0 0 0 0 1 -67732.84375 2500.00000 2658.90000 0.0000 0 0 0 0 3 -70800.00781 1000.00000 29.6170000 0.0000 0.0000 0 0 0 0 0 5 -86797.35938 2500.00000 726.180000 0.0000 0 0 0 0 3 -181617.5000 5600.00000 34894000.0 0.0000 0 0 0 0 1 -298700.0000 1000.00000 9886.00000 0.0000 0.0000 0 0 0 0 0 5 -301310.8125 3600.00000 2354.80000 0.0000 0 0 0 0 1 -354588.6875 1000.00000 14460.0000 0.0000 0.0000 0 0 0 0 0 5 -399675.9375 660.000000 813.610000 0.0000 0 0 0 0 3 -532659.8750 2500.00000 532810.000 0.0000 0 0 0 0 3 -565576.8750 2900.00000 10953000.0 0.0000 0 0 0 0 3 -587165.7500 8800.00000 199160.000 0.0000 0 0 0 0 2 -590290.1250 3600.00000 523660.000 0.0000 0 0 0 0 1 -602467.3125 3400.00000 50491.0000 0.0000 0.0000 0 0 0 0 0 4 -714043.4375 2500.00000 1216.50000 0.0000 0 0 0 0 3 -771711.9375 1000.00000 53139.0000 0.0000 0.0000 0 0 0 0 0 5 -812491.6250 9700.00000 30100000.0 0.0000 0 0 0 0 3 -845233.8750 2000.00000 397910.000 0.0000 0.0000 0 0 0 0 0 4 -872305.8125 1300.00000 32140.0000 0.0000 0.0000 0 0 0 0 0 5 -910043.5625 1130.00000 3673300.00 0.0000 0 0 0 0 3 -962233.0000 16000.0000 76614000.0 0.0000 0 0 0 0 2 -1017777.188 1000.00000 76192.0000 0.0000 0.0000 0 0 0 0 0 5 -1042856.812 1000.00000 933700.000 0.0000 0.0000 0 0 0 0 0 5 -1085169.250 3600.00000 72794.0000 0.0000 0 0 0 0 1 -1148103.625 1000.00000 3146.90000 0.0000 0.0000 0 0 0 0 0 5 -1162663.625 3800.00000 3013600.00 0.0000 0 0 0 0 1 -1199501.375 7600.00000 14914000.0 0.0000 0 0 0 0 2 -1201238.750 3600.00000 4601200.00 0.0000 0 0 0 0 1 -1256447.250 3600.00000 17383000.0 0.0000 0 0 0 0 1 -1264441.750 1000.00000 843640.000 0.0000 0.0000 0 0 0 0 0 5 -1379920.250 2400.00000 65299.0000 0.0000 0.0000 0 0 0 0 0 4 -1408269.750 2700.00000 5198300.00 0.0000 0 0 0 0 3 -1479927.250 1650.00000 3502500.00 0.0000 0.0000 0 0 0 0 0 4 -1482395.375 8800.00000 886.940000 0.0000 0 0 0 0 2 -1512343.875 1000.00000 91493.0000 0.0000 0.0000 0 0 0 0 0 5 -1528742.375 2400.00000 2922500.00 0.0000 0.0000 0 0 0 0 0 4 -1580564.875 2400.00000 1495500.00 0.0000 0.0000 0 0 0 0 0 4 -1592844.250 8800.00000 11199000.0 0.0000 0 0 0 0 2 -1597168.625 2400.00000 4017200.00 0.0000 0.0000 0 0 0 0 0 4 -1639561.500 1000.00000 15293000.0 0.0000 0.0000 0 0 0 0 0 5 -1651146.000 1000.00000 21555000.0 0.0000 0.0000 0 0 0 0 0 5 -1658595.000 8800.00000 1555300.00 0.0000 0 0 0 0 2 -1664961.125 2400.00000 215900.000 0.0000 0.0000 0 0 0 0 0 4 -1784952.750 2400.00000 192940.000 0.0000 0.0000 0 0 0 0 0 4 -1805652.625 2500.00000 1299100.00 0.0000 0 0 0 0 3 -1850667.250 1000.00000 35515000.0 0.0000 0.0000 0 0 0 0 0 5 -1852435.250 2500.00000 70707000.0 0.0000 0 0 0 0 3 -1923655.250 1000.00000 1017100.00 0.0000 0.0000 0 0 0 0 0 5 -2248678.250 3600.00000 444760000. 0.0000 0 0 0 0 1 -1968869.750 1000.00000 5734100.00 0.0000 0.0000 0 0 0 0 0 5 -3007280.500 3600.00000 289960.000 0.0000 0 0 0 0 1 -3067775.250 3600.00000 422290.000 0.0000 0 0 0 0 1 --2179600.00 409080.000 1722200.+3 0 0 0 8 --860240.000 999.970000 34170000.0 0 0 0 8 --431280.000 1005.90000 228510000. 0 0 0 8 -15282.00000 1646.00000 10000.0000 0 0 0 10 -38819.00000 2400.00000 75926.0000 0 0 0 13 -159682.9688 1900.00000 1200300.00 0 0 0 10 -184456.4844 1500.00000 136740.000 0 0 0 10 -336790.2812 800.000000 2512800.00 0 0 0 10 -385764.1875 4670.00000 24133000.0 0 0 0 9 -552241.8125 5700.00000 1298900.00 0 0 0 13 -566558.4375 3000.00000 70820000.0 0 0 0 10 -619664.6250 3000.00000 725960.000 0 0 0 13 -649726.0000 3000.00000 1095900.00 0 0 0 13 -653064.6250 6300.00000 19386000.0 0 0 0 12 -715064.6250 300.000000 978570.000 0 0 0 10 -716771.3125 3000.00000 219300000. 0 0 0 8 -802258.9375 3000.00000 9934900.00 0 0 0 15 -862003.6875 3000.00000 432930000. 0 0 0 8 -872483.5000 300.000000 17335000.0 0 0 0 10 -955891.2500 300.000000 982890.000 0 0 0 13 -1098425.500 3000.00000 57787.0000 0 0 0 15 -1113807.625 300.000000 76533000.0 0 0 0 10 -1122279.500 300.000000 4881600.00 0 0 0 13 -1178601.750 3000.00000 8295900.00 0 0 0 9 -1192267.500 300.000000 375060.000 0 0 0 12 -1207629.500 300.000000 19795000.0 0 0 0 13 -1388859.125 3000.00000 4271400.00 0 0 0 15 -1769072.750 3000.00000 32136.0000 0 0 0 8 -2248487.000 3000.00000 169320.000 0 0 0 8 --1185100.00 118480.000 260570000. 0 0 0 18 --161550.000 650.000000 426400.000 0 0 0 18 -2235.000000 370.000000 932.660000 0 0 0 20 -4977.000000 600.000000 1122.00000 0 0 0 19 -183488.8281 6000.00000 9997600.00 0 0 0 18 -235225.3906 800.000000 115410.000 0 0 0 21 -302839.2188 370.000000 274430.000 0 0 0 20 -413136.1875 600.000000 1580100.00 0 0 0 19 -645239.8125 800.000000 401430.000 0 0 0 21 -704912.0000 800.000000 423190.000 0 0 0 21 -745454.0000 370.000000 14735000.0 0 0 0 20 -796946.1250 600.000000 469320.000 0 0 0 19 -807379.8125 600.000000 274330.000 0 0 0 19 -810796.9375 600.000000 419310.000 0 0 0 19 -844674.6250 370.000000 3315300.00 0 0 0 20 -878822.8125 800.000000 110630.000 0 0 0 21 -979821.3125 600.000000 591820.000 0 0 0 19 -1182175.750 6000.00000 5912400.00 0 0 0 18 -1217821.125 800.000000 1888900.00 0 0 0 21 -1274871.500 600.000000 2225500.00 0 0 0 19 -1302032.875 800.000000 304790.000 0 0 0 21 -1310774.875 370.000000 339710.000 0 0 0 20 -1337984.375 600.000000 4624000.00 0 0 0 19 -1356024.750 370.000000 12271000.0 0 0 0 20 -1383597.625 600.000000 25336000.0 0 0 0 19 -1400981.375 800.000000 1897600.00 0 0 0 21 -1412107.750 370.000000 668620.000 0 0 0 20 -1586007.000 6000.00000 23644000.0 0 0 0 18 -2583249.500 6000.00000 92076000.0 0 0 0 18 --11592000.0 1200.00000 7517400.+3 0 0 0 23 --9192600.00 1200.00000 1435600.+4 0 0 0 23 -433901.3750 1200.00000 43760000.0 0 0 0 25 -999736.9375 1200.00000 96583000.0 0 0 0 26 -1307683.875 1200.00000 42027000.0 0 0 0 25 -1630813.125 1200.00000 71367.0000 0 0 0 25 -1650581.000 1200.00000 3901300.00 0 0 0 29 -1833142.125 1200.00000 7835200.00 0 0 0 26 -1898468.875 1200.00000 28678000.0 0 0 0 24 -2372699.000 1200.00000 155330000. 0 0 0 23 -2888249.750 1200.00000 1885900.00 0 0 0 24 -3004838.500 1200.00000 2333.80000 0 0 0 24 -3181814.500 1200.00000 517690000. 0 0 0 24 -3203037.750 1200.00000 179230000. 0 0 0 23 -3204505.000 1200.00000 310910.000 0 0 0 28 -3239197.250 1200.00000 251410000. 0 0 0 26 -3431828.250 1200.00000 615700.000 0 0 0 27 -3438834.250 1200.00000 1803900.00 0 0 0 28 -3636796.000 1200.00000 701840000. 0 0 0 25 -3763624.250 1200.00000 16031000.0 0 0 0 29 -3853751.500 1200.00000 562450000. 0 0 0 23 -4175216.750 1200.00000 70220000.0 0 0 0 26 -4288830.500 1200.00000 56268000.0 0 0 0 25 -4303685.000 1200.00000 16078000.0 0 0 0 24 -4461693.000 1200.00000 77996000.0 0 0 0 23 -4525253.000 1200.00000 4173300.00 0 0 0 27 -4591673.500 1200.00000 91070.0000 0 0 0 29 -4592562.000 1200.00000 175270.000 0 0 0 28 -4816678.000 1200.00000 47145000.0 0 0 0 25 -5055537.000 1200.00000 52134000.0 0 0 0 26 -5118596.000 1200.00000 17864000.0 0 0 0 29 -5365329.500 1200.00000 980620.000 0 0 0 27 -5617318.000 1200.00000 33197000.0 0 0 0 28 -5666636.000 1200.00000 12687000.0 0 0 0 27 -5912201.500 1200.00000 12942000.0 0 0 0 29 -5986110.000 1200.00000 7106100.00 0 0 0 26 -6076846.000 1200.00000 5166700.00 0 0 0 28 -6116665.000 1200.00000 5549900.00 0 0 0 24 -6389893.000 1200.00000 4935600.00 0 0 0 29 -6813680.500 1200.00000 3262900.00 0 0 0 28 -6833136.500 1200.00000 5140500.00 0 0 0 27 -7373000.000 1200.00000 2400000.00 0 0 0 24 -8820920.000 1200.00000 1123400.+4 0 0 0 25 -10934820.00 1200.00000 276110.000 0 0 0 23 -15050371.00 1200.00000 634990.000 0 0 0 23 -25004488.00 1200.00000 6918.60000 0 0 0 23 - -.100000000 -Channel radii in key-word format -Radii= 4.136420, 4.136420 Flags=0,-1 - Group= 1 Chan= 1, 2, - Group= 4 Chan= 1, 2, 3, - Group= 5 Chan= 1, 2, 3, -Radii= 4.943720, 4.943720 Flags=0,-1 - Group= 2 Chan= 1, 2, - Group= 3 Chan= 1, 2, - Group= 6 Chan= 1, 2, 3, - Group= 7 Chan= 1, 2, 3, -Radii= 4.400000, 4.400000 Flags=0,-1 - Group= 8 Chan= 1, - Group= 9 Chan= 1, - Group= 10 Chan= 1, - Group= 11 Chan= 1, - Group= 12 Chan= 1, - Group= 13 Chan= 1, - Group= 14 Chan= 1, - Group= 15 Chan= 1, - Group= 16 Chan= 1, - Group= 17 Chan= 1, -Radii= 4.200000, 4.200000 Flags=0,-1 - Group= 18 Chan= 1, - Group= 19 Chan= 1, - Group= 20 Chan= 1, - Group= 21 Chan= 1, - Group= 22 Chan= 1, -Radii= 4.200000, 4.200000 Flags=0,-1 - Group= 23 Chan= 1, - Group= 24 Chan= 1, - Group= 25 Chan= 1, - Group= 26 Chan= 1, - Group= 27 Chan= 1, - Group= 28 Chan= 1, - Group= 29 Chan= 1, - -NUCLIDE MASSES AND ABUNDANCES FOLLOW -27.9769290 1.0000000 .92000000 1 1 2 3 4 5 6 7 -28.9764960 .04670000 .05000000 1 8 91011121314151617 -29.9737720 .03100000 .02000000 11819202122 -16.0000000 1.0000000 .01000000 023242526272829 diff --git a/examples/samexm/ex027/endf/endf_dummy.dat b/examples/samexm/ex027/endf/endf_dummy.dat deleted file mode 100644 index cb63c2e6..00000000 --- a/examples/samexm/ex027/endf/endf_dummy.dat +++ /dev/null @@ -1,2 +0,0 @@ - 6.0770001411 24.7000007629 2.4700000286 - 90591.0000000000 79.0000000000 7.9000000954 diff --git a/examples/samexm/ex027/endf/ex027a b/examples/samexm/ex027/endf/ex027a deleted file mode 100755 index 2cfa0c9b..00000000 --- a/examples/samexm/ex027/endf/ex027a +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -sammy < Date: Wed, 21 Jan 2026 10:35:20 -0700 Subject: [PATCH 10/45] Refactor code structure for improved readability and maintainability --- examples/samexm/ex027/ex027b | 6 +- examples/samexm/ex027/ex027b.inp | 23 - examples/samexm/ex027/ex027b.par | 1662 ------------------------------ 3 files changed, 3 insertions(+), 1688 deletions(-) delete mode 100755 examples/samexm/ex027/ex027b.inp delete mode 100755 examples/samexm/ex027/ex027b.par diff --git a/examples/samexm/ex027/ex027b b/examples/samexm/ex027/ex027b index b7d1d054..86b231ba 100755 --- a/examples/samexm/ex027/ex027b +++ b/examples/samexm/ex027/ex027b @@ -1,8 +1,8 @@ -#!/bin/csh +#!/bin/bash sammy < Date: Wed, 21 Jan 2026 10:36:00 -0700 Subject: [PATCH 11/45] feat: add ex027 example script for nuclear configuration processing --- examples/samexm/ex027/{ex027b => ex027} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename examples/samexm/ex027/{ex027b => ex027} (86%) diff --git a/examples/samexm/ex027/ex027b b/examples/samexm/ex027/ex027 similarity index 86% rename from examples/samexm/ex027/ex027b rename to examples/samexm/ex027/ex027 index 86b231ba..32513217 100755 --- a/examples/samexm/ex027/ex027b +++ b/examples/samexm/ex027/ex027 @@ -3,7 +3,7 @@ sammy < Date: Wed, 21 Jan 2026 13:45:10 -0700 Subject: [PATCH 12/45] rename: inp03_constants.py is now inp05_broadening.py --- ...inp03_constants.py => inp05_broadening.py} | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) rename src/pleiades/sammy/io/card_formats/{inp03_constants.py => inp05_broadening.py} (79%) diff --git a/src/pleiades/sammy/io/card_formats/inp03_constants.py b/src/pleiades/sammy/io/card_formats/inp05_broadening.py similarity index 79% rename from src/pleiades/sammy/io/card_formats/inp03_constants.py rename to src/pleiades/sammy/io/card_formats/inp05_broadening.py index 6fd5228c..9caff423 100644 --- a/src/pleiades/sammy/io/card_formats/inp03_constants.py +++ b/src/pleiades/sammy/io/card_formats/inp05_broadening.py @@ -1,12 +1,12 @@ #!/usr/bin/env python """ -Card Set 3 (Physical Constants) for SAMMY INP files. +Card Set 5 (Broadening/Resolution Parameters) for SAMMY INP files. -This module provides the Card03 class for parsing and generating the physical -constants line in SAMMY input files. This card appears after the element information -and defines temperature, flight path, and resolution parameters. +This module provides the Card05 class for parsing and generating the broadening +constants line in SAMMY input files. This card appears after the alphanumeric +command block and defines temperature, flight path, and resolution parameters. -Format specification (Card Set 3 - Physical Constants): +Format specification (Card Set 5 - Broadening/Resolution): The line contains five floating-point values with variable spacing: - TEMP: Temperature (K) - FPL: Flight path length (m) @@ -28,7 +28,7 @@ class PhysicalConstants(BaseModel): - """Pydantic model for physical constants in Card Set 3. + """Pydantic model for broadening constants in Card Set 5. Attributes: temperature: Temperature in Kelvin @@ -45,28 +45,28 @@ class PhysicalConstants(BaseModel): delta_e: float = Field(default=0.0, description="e-folding width of exponential resolution (μs)", ge=0) -class Card03(BaseModel): +class Card05(BaseModel): """ - Class representing Card Set 3 (physical constants) in SAMMY INP files. + Class representing Card Set 5 (broadening constants) in SAMMY INP files. This card defines temperature, flight path, and resolution parameters for the analysis. """ @classmethod def from_lines(cls, lines: List[str]) -> PhysicalConstants: - """Parse physical constants from Card Set 3 line. + """Parse broadening constants from Card Set 5 line. Args: - lines: List of input lines (expects single line for Card 3) + lines: List of input lines (expects single line for Card 5) Returns: - PhysicalConstants: Parsed physical constants + PhysicalConstants: Parsed broadening constants Raises: ValueError: If format is invalid or required values missing """ if not lines or not lines[0].strip(): - message = "No valid Card 3 line provided" + message = "No valid Card 5 line provided" logger.error(message) raise ValueError(message) @@ -74,7 +74,7 @@ def from_lines(cls, lines: List[str]) -> PhysicalConstants: fields = line.split() if len(fields) < 2: - message = f"Card 3 line must have at least 2 fields (TEMP, FPL), got {len(fields)}" + message = f"Card 5 line must have at least 2 fields (TEMP, FPL), got {len(fields)}" logger.error(message) raise ValueError(message) @@ -85,7 +85,7 @@ def from_lines(cls, lines: List[str]) -> PhysicalConstants: delta_g = float(fields[3]) if len(fields) > 3 else 0.0 delta_e = float(fields[4]) if len(fields) > 4 else 0.0 except (ValueError, IndexError) as e: - message = f"Failed to parse Card 3 line: {e}" + message = f"Failed to parse Card 5 line: {e}" logger.error(message) raise ValueError(message) @@ -99,13 +99,13 @@ def from_lines(cls, lines: List[str]) -> PhysicalConstants: @classmethod def to_lines(cls, constants: PhysicalConstants) -> List[str]: - """Convert physical constants to Card Set 3 formatted line. + """Convert broadening constants to Card Set 5 formatted line. Args: constants: PhysicalConstants object containing parameter data Returns: - List containing single formatted line for Card Set 3 + List containing single formatted line for Card Set 5 """ if not isinstance(constants, PhysicalConstants): message = "constants must be an instance of PhysicalConstants" From ee95cc760f9097413ee57f50191f97193cbf005c Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 13:46:08 -0700 Subject: [PATCH 13/45] rename: inp03_density.py was renamed to inp07_density.py to better reflect the card implementation in the SAMMY manual. --- .../io/card_formats/{inp03_density.py => inp07_density.py} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename src/pleiades/sammy/io/card_formats/{inp03_density.py => inp07_density.py} (94%) diff --git a/src/pleiades/sammy/io/card_formats/inp03_density.py b/src/pleiades/sammy/io/card_formats/inp07_density.py similarity index 94% rename from src/pleiades/sammy/io/card_formats/inp03_density.py rename to src/pleiades/sammy/io/card_formats/inp07_density.py index 602f5b6b..78b61479 100644 --- a/src/pleiades/sammy/io/card_formats/inp03_density.py +++ b/src/pleiades/sammy/io/card_formats/inp07_density.py @@ -2,8 +2,8 @@ """ Sample Density for SAMMY INP files. -This module provides the Card03Density class for parsing and generating the sample -density line in SAMMY input files. This line appears after the physical constants +This module provides the Card07Density class for parsing and generating the sample +density line in SAMMY input files. This line appears after the broadening constants and defines the material density and number density. Format specification (Sample Density): @@ -36,7 +36,7 @@ class SampleDensity(BaseModel): number_density: float = Field(..., description="Number density (atoms/barn-cm)", gt=0) -class Card03Density(BaseModel): +class Card07Density(BaseModel): """ Class representing sample density line in SAMMY INP files. From c6019c7ac5e7021c0e283365c1df52c293989105 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 13:46:26 -0700 Subject: [PATCH 14/45] refactor: update card formats documentation by removing deprecated modules and adding new ones --- .../api/pleiades.sammy.io.card_formats.inp03_constants.rst | 7 ------- docs/api/pleiades.sammy.io.card_formats.inp03_density.rst | 7 ------- .../pleiades.sammy.io.card_formats.inp05_broadening.rst | 7 +++++++ docs/api/pleiades.sammy.io.card_formats.inp07_density.rst | 7 +++++++ docs/api/pleiades.sammy.io.card_formats.rst | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) delete mode 100644 docs/api/pleiades.sammy.io.card_formats.inp03_constants.rst delete mode 100644 docs/api/pleiades.sammy.io.card_formats.inp03_density.rst create mode 100644 docs/api/pleiades.sammy.io.card_formats.inp05_broadening.rst create mode 100644 docs/api/pleiades.sammy.io.card_formats.inp07_density.rst diff --git a/docs/api/pleiades.sammy.io.card_formats.inp03_constants.rst b/docs/api/pleiades.sammy.io.card_formats.inp03_constants.rst deleted file mode 100644 index 1b85a46a..00000000 --- a/docs/api/pleiades.sammy.io.card_formats.inp03_constants.rst +++ /dev/null @@ -1,7 +0,0 @@ -pleiades.sammy.io.card\_formats.inp03\_constants module -======================================================= - -.. automodule:: pleiades.sammy.io.card_formats.inp03_constants - :members: - :show-inheritance: - :undoc-members: diff --git a/docs/api/pleiades.sammy.io.card_formats.inp03_density.rst b/docs/api/pleiades.sammy.io.card_formats.inp03_density.rst deleted file mode 100644 index a8762f37..00000000 --- a/docs/api/pleiades.sammy.io.card_formats.inp03_density.rst +++ /dev/null @@ -1,7 +0,0 @@ -pleiades.sammy.io.card\_formats.inp03\_density module -===================================================== - -.. automodule:: pleiades.sammy.io.card_formats.inp03_density - :members: - :show-inheritance: - :undoc-members: diff --git a/docs/api/pleiades.sammy.io.card_formats.inp05_broadening.rst b/docs/api/pleiades.sammy.io.card_formats.inp05_broadening.rst new file mode 100644 index 00000000..20d48788 --- /dev/null +++ b/docs/api/pleiades.sammy.io.card_formats.inp05_broadening.rst @@ -0,0 +1,7 @@ +pleiades.sammy.io.card\_formats.inp05\_broadening module +======================================================== + +.. automodule:: pleiades.sammy.io.card_formats.inp05_broadening + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/api/pleiades.sammy.io.card_formats.inp07_density.rst b/docs/api/pleiades.sammy.io.card_formats.inp07_density.rst new file mode 100644 index 00000000..cfc635ef --- /dev/null +++ b/docs/api/pleiades.sammy.io.card_formats.inp07_density.rst @@ -0,0 +1,7 @@ +pleiades.sammy.io.card\_formats.inp07\_density module +===================================================== + +.. automodule:: pleiades.sammy.io.card_formats.inp07_density + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/api/pleiades.sammy.io.card_formats.rst b/docs/api/pleiades.sammy.io.card_formats.rst index f2eb5c2d..3dc9e007 100644 --- a/docs/api/pleiades.sammy.io.card_formats.rst +++ b/docs/api/pleiades.sammy.io.card_formats.rst @@ -13,8 +13,8 @@ Submodules :maxdepth: 4 pleiades.sammy.io.card_formats.inp02_element - pleiades.sammy.io.card_formats.inp03_constants - pleiades.sammy.io.card_formats.inp03_density + pleiades.sammy.io.card_formats.inp05_broadening + pleiades.sammy.io.card_formats.inp07_density pleiades.sammy.io.card_formats.inp04_particlepairs pleiades.sammy.io.card_formats.inp10_spingroups pleiades.sammy.io.card_formats.par01_resonances From 1609043b3605118ea9bb59b77c02ee7a18584efe Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 13:47:31 -0700 Subject: [PATCH 15/45] updating unit tests for SAMMY INP files to reflecte updated names of inp05_broadening and inp07_density --- ..._constants.py => test_inp05_broadening.py} | 32 +++++++++---------- ...inp03_density.py => test_inp07_density.py} | 32 +++++++++---------- 2 files changed, 32 insertions(+), 32 deletions(-) rename tests/unit/pleiades/sammy/io/card_formats/{test_inp03_constants.py => test_inp05_broadening.py} (87%) rename tests/unit/pleiades/sammy/io/card_formats/{test_inp03_density.py => test_inp07_density.py} (84%) diff --git a/tests/unit/pleiades/sammy/io/card_formats/test_inp03_constants.py b/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py similarity index 87% rename from tests/unit/pleiades/sammy/io/card_formats/test_inp03_constants.py rename to tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py index 73542849..acd66321 100644 --- a/tests/unit/pleiades/sammy/io/card_formats/test_inp03_constants.py +++ b/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py @@ -1,8 +1,8 @@ -"""Unit tests for SAMMY INP file - Card 3 (physical constants) class.""" +"""Unit tests for SAMMY INP file - Card 5 (broadening constants) class.""" import pytest -from pleiades.sammy.io.card_formats.inp03_constants import Card03, PhysicalConstants +from pleiades.sammy.io.card_formats.inp05_broadening import Card05, PhysicalConstants @pytest.fixture @@ -25,7 +25,7 @@ def minimal_line(): def test_parse_ex012_line(ex012_line): """Test parsing ex012 physical constants line.""" - constants = Card03.from_lines(ex012_line) + constants = Card05.from_lines(ex012_line) assert pytest.approx(constants.temperature, rel=1e-3) == 300.0 assert pytest.approx(constants.flight_path_length, rel=1e-4) == 200.0 @@ -36,7 +36,7 @@ def test_parse_ex012_line(ex012_line): def test_parse_venus_default_line(venus_default_line): """Test parsing VENUS default configuration.""" - constants = Card03.from_lines(venus_default_line) + constants = Card05.from_lines(venus_default_line) assert pytest.approx(constants.temperature, rel=1e-3) == 293.6 assert pytest.approx(constants.flight_path_length, rel=1e-3) == 25.0 @@ -47,7 +47,7 @@ def test_parse_venus_default_line(venus_default_line): def test_parse_minimal_line(minimal_line): """Test parsing minimal valid line (only TEMP and FPL).""" - constants = Card03.from_lines(minimal_line) + constants = Card05.from_lines(minimal_line) assert pytest.approx(constants.temperature, rel=1e-3) == 300.0 assert pytest.approx(constants.flight_path_length, rel=1e-3) == 25.0 @@ -59,25 +59,25 @@ def test_parse_minimal_line(minimal_line): def test_parse_empty_line(): """Test that empty line raises ValueError.""" with pytest.raises(ValueError, match="No valid Card 3 line"): - Card03.from_lines([""]) + Card05.from_lines([""]) def test_parse_no_lines(): """Test that empty list raises ValueError.""" with pytest.raises(ValueError, match="No valid Card 3 line"): - Card03.from_lines([]) + Card05.from_lines([]) def test_parse_insufficient_fields(): """Test that line with only one field raises ValueError.""" with pytest.raises(ValueError, match="Card 3 line must have at least 2 fields"): - Card03.from_lines(["300.0"]) + Card05.from_lines(["300.0"]) def test_parse_invalid_format(): """Test that invalid numeric format raises ValueError.""" with pytest.raises(ValueError, match="Failed to parse Card 3 line"): - Card03.from_lines(["InvalidData MoreInvalidData"]) + Card05.from_lines(["InvalidData MoreInvalidData"]) def test_to_lines_ex012(): @@ -86,7 +86,7 @@ def test_to_lines_ex012(): temperature=300.0, flight_path_length=200.0, delta_l=0.182233, delta_g=0.0, delta_e=0.002518 ) - lines = Card03.to_lines(constants) + lines = Card05.to_lines(constants) assert len(lines) == 1 assert "300.0" in lines[0] @@ -100,7 +100,7 @@ def test_to_lines_venus_default(): """Test generating VENUS default line.""" constants = PhysicalConstants(temperature=293.6, flight_path_length=25.0, delta_l=0.0, delta_g=0.0, delta_e=0.0) - lines = Card03.to_lines(constants) + lines = Card05.to_lines(constants) assert len(lines) == 1 assert "293.6" in lines[0] @@ -112,7 +112,7 @@ def test_to_lines_minimal(): """Test generating minimal line with defaults.""" constants = PhysicalConstants(temperature=300.0, flight_path_length=25.0) - lines = Card03.to_lines(constants) + lines = Card05.to_lines(constants) assert len(lines) == 1 assert "300.0" in lines[0] @@ -121,10 +121,10 @@ def test_to_lines_minimal(): def test_roundtrip_ex012(ex012_line): """Test parse and regenerate produces consistent result.""" - constants = Card03.from_lines(ex012_line) - regenerated_lines = Card03.to_lines(constants) + constants = Card05.from_lines(ex012_line) + regenerated_lines = Card05.to_lines(constants) - reparsed_constants = Card03.from_lines(regenerated_lines) + reparsed_constants = Card05.from_lines(regenerated_lines) assert pytest.approx(reparsed_constants.temperature, rel=1e-3) == constants.temperature assert pytest.approx(reparsed_constants.flight_path_length, rel=1e-3) == constants.flight_path_length @@ -172,7 +172,7 @@ def test_physical_constants_validation_negative_delta_e(): def test_to_lines_invalid_input(): """Test that to_lines rejects non-PhysicalConstants input.""" with pytest.raises(ValueError, match="constants must be an instance of PhysicalConstants"): - Card03.to_lines("not a PhysicalConstants object") + Card05.to_lines("not a PhysicalConstants object") def test_default_values(): diff --git a/tests/unit/pleiades/sammy/io/card_formats/test_inp03_density.py b/tests/unit/pleiades/sammy/io/card_formats/test_inp07_density.py similarity index 84% rename from tests/unit/pleiades/sammy/io/card_formats/test_inp03_density.py rename to tests/unit/pleiades/sammy/io/card_formats/test_inp07_density.py index 3406387f..599162c0 100644 --- a/tests/unit/pleiades/sammy/io/card_formats/test_inp03_density.py +++ b/tests/unit/pleiades/sammy/io/card_formats/test_inp07_density.py @@ -2,7 +2,7 @@ import pytest -from pleiades.sammy.io.card_formats.inp03_density import Card03Density, SampleDensity +from pleiades.sammy.io.card_formats.inp07_density import Card07Density, SampleDensity @pytest.fixture @@ -25,7 +25,7 @@ def scientific_notation_line(): def test_parse_ex012_line(ex012_line): """Test parsing ex012 density line.""" - density = Card03Density.from_lines(ex012_line) + density = Card07Density.from_lines(ex012_line) assert pytest.approx(density.density, rel=1e-5) == 4.20000 assert pytest.approx(density.number_density, rel=1e-6) == 0.347162 @@ -33,7 +33,7 @@ def test_parse_ex012_line(ex012_line): def test_parse_default_line(default_line): """Test parsing default density line.""" - density = Card03Density.from_lines(default_line) + density = Card07Density.from_lines(default_line) assert pytest.approx(density.density, rel=1e-6) == 9.0 assert pytest.approx(density.number_density, rel=1e-6) == 1.797e-03 @@ -41,7 +41,7 @@ def test_parse_default_line(default_line): def test_parse_scientific_notation_line(scientific_notation_line): """Test parsing line with scientific notation.""" - density = Card03Density.from_lines(scientific_notation_line) + density = Card07Density.from_lines(scientific_notation_line) assert pytest.approx(density.density, rel=1e-5) == 19.3 assert pytest.approx(density.number_density, rel=1e-6) == 3.456789e-02 @@ -50,32 +50,32 @@ def test_parse_scientific_notation_line(scientific_notation_line): def test_parse_empty_line(): """Test that empty line raises ValueError.""" with pytest.raises(ValueError, match="No valid density line"): - Card03Density.from_lines([""]) + Card07Density.from_lines([""]) def test_parse_no_lines(): """Test that empty list raises ValueError.""" with pytest.raises(ValueError, match="No valid density line"): - Card03Density.from_lines([]) + Card07Density.from_lines([]) def test_parse_insufficient_fields(): """Test that line with only one field raises ValueError.""" with pytest.raises(ValueError, match="Density line must have 2 fields"): - Card03Density.from_lines(["4.2"]) + Card07Density.from_lines(["4.2"]) def test_parse_invalid_format(): """Test that invalid numeric format raises ValueError.""" with pytest.raises(ValueError, match="Failed to parse density line"): - Card03Density.from_lines(["InvalidData MoreInvalidData"]) + Card07Density.from_lines(["InvalidData MoreInvalidData"]) def test_to_lines_ex012(): """Test generating ex012-style line.""" density = SampleDensity(density=4.20000, number_density=0.347162) - lines = Card03Density.to_lines(density) + lines = Card07Density.to_lines(density) assert len(lines) == 1 assert "4.200000" in lines[0] @@ -86,7 +86,7 @@ def test_to_lines_default(): """Test generating default line.""" density = SampleDensity(density=9.0, number_density=1.797e-03) - lines = Card03Density.to_lines(density) + lines = Card07Density.to_lines(density) assert len(lines) == 1 assert "9.000000" in lines[0] @@ -98,7 +98,7 @@ def test_to_lines_scientific(): """Test generating line with scientific notation.""" density = SampleDensity(density=19.3, number_density=3.456789e-02) - lines = Card03Density.to_lines(density) + lines = Card07Density.to_lines(density) assert len(lines) == 1 assert "19.300000" in lines[0] @@ -107,10 +107,10 @@ def test_to_lines_scientific(): def test_roundtrip_ex012(ex012_line): """Test parse and regenerate produces consistent result.""" - density = Card03Density.from_lines(ex012_line) - regenerated_lines = Card03Density.to_lines(density) + density = Card07Density.from_lines(ex012_line) + regenerated_lines = Card07Density.to_lines(density) - reparsed_density = Card03Density.from_lines(regenerated_lines) + reparsed_density = Card07Density.from_lines(regenerated_lines) assert pytest.approx(reparsed_density.density, rel=1e-5) == density.density assert pytest.approx(reparsed_density.number_density, rel=1e-5) == density.number_density @@ -143,7 +143,7 @@ def test_sample_density_validation_zero_number_density(): def test_to_lines_invalid_input(): """Test that to_lines rejects non-SampleDensity input.""" with pytest.raises(ValueError, match="sample_density must be an instance of SampleDensity"): - Card03Density.to_lines("not a SampleDensity object") + Card07Density.to_lines("not a SampleDensity object") def test_typical_values(): @@ -158,7 +158,7 @@ def test_small_number_density(): """Test very small number density values.""" density = SampleDensity(density=0.001, number_density=1e-10) - lines = Card03Density.to_lines(density) + lines = Card07Density.to_lines(density) assert len(lines) == 1 assert "0.001000" in lines[0] assert "e-10" in lines[0] From a5dae1b6cd376dc3427ef1bbed3b2c6bc4e7a79b Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 13:47:51 -0700 Subject: [PATCH 16/45] feat: extend FitConfig with additional parameters for covariance output and material number --- src/pleiades/sammy/fitting/config.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/pleiades/sammy/fitting/config.py b/src/pleiades/sammy/fitting/config.py index d3a1c348..8967930e 100644 --- a/src/pleiades/sammy/fitting/config.py +++ b/src/pleiades/sammy/fitting/config.py @@ -28,6 +28,16 @@ class FitConfig(BaseModel): max_wall_time: Optional[float] = Field(default=None, description="Maximum wall time allowed") max_memory: Optional[float] = Field(default=None, description="Maximum memory allowed") max_disk: Optional[float] = Field(default=None, description="Maximum disk space allowed") + iptdop: Optional[int] = Field( + default=None, description="Grid enhancement for Doppler broadening (Card Set 2 IPTDOP)" + ) + iptwid: Optional[int] = Field(default=None, description="Grid enhancement for resonance tails (Card Set 2 IPTWID)") + ixxchn: Optional[int] = Field(default=None, description="Special channel skip or ENDF ZA (Card Set 2 IXXCHN)") + ndigit: Optional[int] = Field(default=None, description="Digits for compact covariance output (Card Set 2 NDIGIT)") + idropp: Optional[int] = Field( + default=None, description="Percent threshold for zeroing covariances (Card Set 2 IDROPP)" + ) + matnum: Optional[int] = Field(default=None, description="ENDF material number (Card Set 2 MATNUM)") nuclear_params: nuclearParameters = Field( default_factory=nuclearParameters, description="Nuclear parameters used in SAMMY calculations" From 24a25900a756beb80c5199a1000f7e513171d337 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 13:48:25 -0700 Subject: [PATCH 17/45] feat: add extended fields to Card Set 2 and corresponding tests for full line parsing --- .../sammy/io/card_formats/inp02_element.py | 115 ++++++++++++++++-- .../io/card_formats/test_inp02_element.py | 54 ++++++++ 2 files changed, 162 insertions(+), 7 deletions(-) diff --git a/src/pleiades/sammy/io/card_formats/inp02_element.py b/src/pleiades/sammy/io/card_formats/inp02_element.py index c363af7d..23824609 100644 --- a/src/pleiades/sammy/io/card_formats/inp02_element.py +++ b/src/pleiades/sammy/io/card_formats/inp02_element.py @@ -12,6 +12,16 @@ 11-20 F AW Atomic weight (amu) 21-30 F EMIN Minimum energy for dataset (eV) 31-40 F EMAX Maximum energy (eV) + 41-45 I NEPNTS Points in artificial energy grid + 46-50 I ITMAX Maximum iterations for Bayes' solution + 51-52 I ICORR Correlation threshold x100 + 53-55 I NXTRA Extra points between experimental points + 56-57 I IPTDOP Grid enhancement for Doppler broadening + 59-60 I IPTWID Grid enhancement for resonance tails + 61-70 I IXXCHN Channel skip or ZA for ENDF output + 71-72 I NDIGIT Digits for compact covariance output + 73-74 I IDROPP Percent threshold for zeroing covariances + 75-80 I MATNUM ENDF material number Example: Si 27.976928 300000. 1800000. @@ -31,6 +41,16 @@ "atomic_weight": slice(10, 20), "min_energy": slice(20, 30), "max_energy": slice(30, 40), + "nepnts": slice(40, 45), + "itmax": slice(45, 50), + "icorr": slice(50, 52), + "nxtra": slice(52, 55), + "iptdop": slice(55, 57), + "iptwid": slice(58, 60), + "ixxchn": slice(60, 70), + "ndigit": slice(70, 72), + "idropp": slice(72, 74), + "matnum": slice(74, 80), } @@ -42,12 +62,32 @@ class ElementInfo(BaseModel): atomic_weight: Atomic weight in amu min_energy: Minimum energy for dataset in eV max_energy: Maximum energy in eV + nepnts: Points in artificial energy grid + itmax: Maximum iterations for Bayes' solution + icorr: Correlation threshold x100 + nxtra: Extra points between experimental points + iptdop: Grid enhancement for Doppler broadening + iptwid: Grid enhancement for resonance tails + ixxchn: Channel skip or ZA for ENDF output + ndigit: Digits for compact covariance output + idropp: Percent threshold for zeroing covariances + matnum: ENDF material number """ element: str = Field(..., description="Sample element's name", max_length=10) atomic_weight: float = Field(..., description="Atomic weight (amu)", gt=0) min_energy: float = Field(..., description="Minimum energy (eV)", ge=0) max_energy: float = Field(..., description="Maximum energy (eV)", gt=0) + nepnts: int | None = Field(default=None, description="Points in artificial energy grid") + itmax: int | None = Field(default=None, description="Maximum iterations for Bayes' solution") + icorr: int | None = Field(default=None, description="Correlation threshold x100") + nxtra: int | None = Field(default=None, description="Extra points between experimental points") + iptdop: int | None = Field(default=None, description="Grid enhancement for Doppler broadening") + iptwid: int | None = Field(default=None, description="Grid enhancement for resonance tails") + ixxchn: int | None = Field(default=None, description="Channel skip or ZA for ENDF output") + ndigit: int | None = Field(default=None, description="Digits for compact covariance output") + idropp: int | None = Field(default=None, description="Percent threshold for zeroing covariances") + matnum: int | None = Field(default=None, description="ENDF material number") def model_post_init(self, __context) -> None: """Validate that max_energy > min_energy.""" @@ -81,14 +121,29 @@ def from_lines(cls, lines: List[str]) -> ElementInfo: raise ValueError(message) line = lines[0] - if len(line) < 40: - line = f"{line:<40}" + if len(line) < 80: + line = f"{line:<80}" try: element = line[CARD02_FORMAT["element"]].strip() atomic_weight = float(line[CARD02_FORMAT["atomic_weight"]].strip()) min_energy = float(line[CARD02_FORMAT["min_energy"]].strip()) max_energy = float(line[CARD02_FORMAT["max_energy"]].strip()) + + def parse_int(value: str) -> int | None: + stripped = value.strip() + return int(stripped) if stripped else None + + nepnts = parse_int(line[CARD02_FORMAT["nepnts"]]) + itmax = parse_int(line[CARD02_FORMAT["itmax"]]) + icorr = parse_int(line[CARD02_FORMAT["icorr"]]) + nxtra = parse_int(line[CARD02_FORMAT["nxtra"]]) + iptdop = parse_int(line[CARD02_FORMAT["iptdop"]]) + iptwid = parse_int(line[CARD02_FORMAT["iptwid"]]) + ixxchn = parse_int(line[CARD02_FORMAT["ixxchn"]]) + ndigit = parse_int(line[CARD02_FORMAT["ndigit"]]) + idropp = parse_int(line[CARD02_FORMAT["idropp"]]) + matnum = parse_int(line[CARD02_FORMAT["matnum"]]) except (ValueError, IndexError) as e: message = f"Failed to parse Card 2 line: {e}" logger.error(message) @@ -104,6 +159,16 @@ def from_lines(cls, lines: List[str]) -> ElementInfo: atomic_weight=atomic_weight, min_energy=min_energy, max_energy=max_energy, + nepnts=nepnts, + itmax=itmax, + icorr=icorr, + nxtra=nxtra, + iptdop=iptdop, + iptwid=iptwid, + ixxchn=ixxchn, + ndigit=ndigit, + idropp=idropp, + matnum=matnum, ) @classmethod @@ -121,11 +186,47 @@ def to_lines(cls, element_info: ElementInfo) -> List[str]: logger.error(message) raise ValueError(message) - line = ( - f"{element_info.element:<10s}" - f"{element_info.atomic_weight:10.6f}" - f"{element_info.min_energy:10.3f}" - f"{element_info.max_energy:10.1f}" + line = f"{element_info.element:<10s}{element_info.atomic_weight:10.6f}" + + def format_energy(value: float, fixed_format: str) -> str: + if value != 0.0 and abs(value) < 1.0e-3: + return f"{value:10.3E}" + return format(value, fixed_format) + + line += format_energy(element_info.min_energy, "10.3f") + format_energy(element_info.max_energy, "10.1f") + + extra_fields = ( + element_info.nepnts, + element_info.itmax, + element_info.icorr, + element_info.nxtra, + element_info.iptdop, + element_info.iptwid, + element_info.ixxchn, + element_info.ndigit, + element_info.idropp, + element_info.matnum, ) + if any(value is not None for value in extra_fields): + + def fmt_int(value: int | None, width: int) -> str: + if value is None: + return " " * width + return f"{value:>{width}d}" + + line += ( + f"{fmt_int(element_info.nepnts, 5)}" + f"{fmt_int(element_info.itmax, 5)}" + f"{fmt_int(element_info.icorr, 2)}" + f"{fmt_int(element_info.nxtra, 3)}" + f"{fmt_int(element_info.iptdop, 2)}" + f" " + f"{fmt_int(element_info.iptwid, 2)}" + f"{fmt_int(element_info.ixxchn, 10)}" + f"{fmt_int(element_info.ndigit, 2)}" + f"{fmt_int(element_info.idropp, 2)}" + f"{fmt_int(element_info.matnum, 6)}" + ) + return [line] diff --git a/tests/unit/pleiades/sammy/io/card_formats/test_inp02_element.py b/tests/unit/pleiades/sammy/io/card_formats/test_inp02_element.py index 8cc47ca9..e4348975 100644 --- a/tests/unit/pleiades/sammy/io/card_formats/test_inp02_element.py +++ b/tests/unit/pleiades/sammy/io/card_formats/test_inp02_element.py @@ -23,6 +23,12 @@ def minimal_line(): return ["H 1.00794 0.001 10.0"] +@pytest.fixture +def full_line(): + """Example with extended Card Set 2 fields.""" + return ["U 238 238.050972 0.0000 100.0 0 0 0 0 0 0 0"] + + def test_parse_silicon_line(silicon_line): """Test parsing silicon element line.""" element_info = Card02.from_lines(silicon_line) @@ -53,6 +59,26 @@ def test_parse_minimal_line(minimal_line): assert pytest.approx(element_info.max_energy, rel=1e-3) == 10.0 +def test_parse_full_line(full_line): + """Test parsing extended Card Set 2 line.""" + element_info = Card02.from_lines(full_line) + + assert element_info.element == "U 238" + assert pytest.approx(element_info.atomic_weight, rel=1e-6) == 238.050972 + assert pytest.approx(element_info.min_energy, rel=1e-4) == 0.0 + assert pytest.approx(element_info.max_energy, rel=1e-3) == 100.0 + assert element_info.nepnts == 0 + assert element_info.itmax == 0 + assert element_info.icorr == 0 + assert element_info.nxtra == 0 + assert element_info.iptdop == 0 + assert element_info.iptwid == 0 + assert element_info.ixxchn == 0 + assert element_info.ndigit is None + assert element_info.idropp is None + assert element_info.matnum is None + + def test_parse_empty_line(): """Test that empty line raises ValueError.""" with pytest.raises(ValueError, match="No valid Card 2 line"): @@ -110,6 +136,34 @@ def test_roundtrip_silicon(silicon_line): assert pytest.approx(reparsed_info.max_energy, rel=1e-3) == element_info.max_energy +def test_to_lines_with_extras(): + """Test generating line with extended Card Set 2 fields.""" + element_info = ElementInfo( + element="U 238", + atomic_weight=238.050972, + min_energy=0.0, + max_energy=100.0, + nepnts=10001, + itmax=2, + icorr=50, + nxtra=0, + iptdop=9, + iptwid=5, + ixxchn=0, + ndigit=2, + idropp=2, + matnum=92238, + ) + + lines = Card02.to_lines(element_info) + + assert len(lines) == 1 + assert "10001" in lines[0] + assert " 2" in lines[0] + assert "50" in lines[0] + assert "92238" in lines[0] + + def test_element_info_validation_max_less_than_min(): """Test that max_energy must be greater than min_energy.""" with pytest.raises(ValueError, match="max_energy.*must be greater than min_energy"): From daed418913b17cb9f8dad8ce016a58ec14345047 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 14:20:00 -0700 Subject: [PATCH 18/45] feat: add flight-path length and corresponding uncertainty to BroadeningParameters --- src/pleiades/experimental/models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/pleiades/experimental/models.py b/src/pleiades/experimental/models.py index cfc19a12..98526a2f 100644 --- a/src/pleiades/experimental/models.py +++ b/src/pleiades/experimental/models.py @@ -119,16 +119,18 @@ class BroadeningParameters(BaseModel): """ # Main parameters - crfn: float = Field(default=None, description="Matching radius (F)") temp: float = Field(default=None, description="Effective temperature (K)") + dist: float = Field(default=None, description="Flight-path length (m)") + crfn: float = Field(default=None, description="Matching radius (F)") thick: float = Field(default=None, description="Sample thickness (atoms/barn)") deltal: float = Field(default=None, description="Spread in flight-path length (m)") deltag: float = Field(default=None, description="Gaussian resolution width (μs)") deltae: float = Field(default=None, description="e-folding width of exponential resolution (μs)") # Optional uncertainties for main parameters - d_crfn: Optional[float] = Field(None, description="Uncertainty on CRFN") d_temp: Optional[float] = Field(None, description="Uncertainty on TEMP") + d_dist: Optional[float] = Field(None, description="Uncertainty on DIST") + d_crfn: Optional[float] = Field(None, description="Uncertainty on CRFN") d_thick: Optional[float] = Field(None, description="Uncertainty on THICK") d_deltal: Optional[float] = Field(None, description="Uncertainty on DELTAL") d_deltag: Optional[float] = Field(None, description="Uncertainty on DELTAG") From 4a2b4058287556d335b35ad3165499f2028bd765 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 14:20:27 -0700 Subject: [PATCH 19/45] reworked the inp07_density card --- .../sammy/io/card_formats/inp07_density.py | 69 ++++++++----------- 1 file changed, 30 insertions(+), 39 deletions(-) diff --git a/src/pleiades/sammy/io/card_formats/inp07_density.py b/src/pleiades/sammy/io/card_formats/inp07_density.py index 78b61479..ade877fe 100644 --- a/src/pleiades/sammy/io/card_formats/inp07_density.py +++ b/src/pleiades/sammy/io/card_formats/inp07_density.py @@ -1,15 +1,15 @@ #!/usr/bin/env python """ -Sample Density for SAMMY INP files. +Card Set 7 (Sample Thickness) for SAMMY INP files. -This module provides the Card07Density class for parsing and generating the sample -density line in SAMMY input files. This line appears after the broadening constants -and defines the material density and number density. +This module provides the Card07 class for parsing and generating the Card Set 7 +line in SAMMY input files. This line appears after the broadening constants and +defines the matching radius and sample thickness. -Format specification (Sample Density): +Format specification (Card Set 7): The line contains two floating-point values: - - Density: Material density (g/cm³) - - Number density: Number density (atoms/barn-cm) + - CRFN: Matching radius (F) + - THICK: Sample thickness (atoms/barn) Example: 4.20000 0.347162 @@ -24,40 +24,31 @@ logger = loguru_logger.bind(name=__name__) -class SampleDensity(BaseModel): - """Pydantic model for sample density parameters. +class Card07Parameters(BaseModel): + """Pydantic model for Card Set 7 parameters.""" - Attributes: - density: Material density in g/cm³ - number_density: Number density in atoms/barn-cm - """ + crfn: float = Field(..., description="Matching radius (F)", ge=0) + thick: float = Field(..., description="Sample thickness (atoms/barn)", ge=0) - density: float = Field(..., description="Material density (g/cm³)", gt=0) - number_density: float = Field(..., description="Number density (atoms/barn-cm)", gt=0) - -class Card07Density(BaseModel): - """ - Class representing sample density line in SAMMY INP files. - - This line defines the material density and number density for the sample. - """ +class Card07(BaseModel): + """Class representing Card Set 7 line in SAMMY INP files.""" @classmethod - def from_lines(cls, lines: List[str]) -> SampleDensity: - """Parse sample density from density line. + def from_lines(cls, lines: List[str]) -> Card07Parameters: + """Parse Card Set 7 parameters from line. Args: lines: List of input lines (expects single line) Returns: - SampleDensity: Parsed sample density parameters + Card07Parameters: Parsed Card Set 7 parameters Raises: ValueError: If format is invalid or required values missing """ if not lines or not lines[0].strip(): - message = "No valid density line provided" + message = "No valid Card 7 line provided" logger.error(message) raise ValueError(message) @@ -65,38 +56,38 @@ def from_lines(cls, lines: List[str]) -> SampleDensity: fields = line.split() if len(fields) < 2: - message = f"Density line must have 2 fields (density, number_density), got {len(fields)}" + message = f"Card 7 line must have at least 2 fields (CRFN, THICK), got {len(fields)}" logger.error(message) raise ValueError(message) try: - density = float(fields[0]) - number_density = float(fields[1]) + crfn = float(fields[0]) + thick = float(fields[1]) except (ValueError, IndexError) as e: - message = f"Failed to parse density line: {e}" + message = f"Failed to parse Card 7 line: {e}" logger.error(message) raise ValueError(message) - return SampleDensity( - density=density, - number_density=number_density, + return Card07Parameters( + crfn=crfn, + thick=thick, ) @classmethod - def to_lines(cls, sample_density: SampleDensity) -> List[str]: - """Convert sample density to formatted line. + def to_lines(cls, params: Card07Parameters) -> List[str]: + """Convert Card Set 7 parameters to formatted line. Args: - sample_density: SampleDensity object containing density data + params: Card07Parameters object containing CRFN/THICK values Returns: List containing single formatted line """ - if not isinstance(sample_density, SampleDensity): - message = "sample_density must be an instance of SampleDensity" + if not isinstance(params, Card07Parameters): + message = "params must be an instance of Card07Parameters" logger.error(message) raise ValueError(message) - line = f" {sample_density.density:8.6f} {sample_density.number_density:.6e}" + line = f" {params.crfn:8.6f} {params.thick:.6e}" return [line] From 270d1e0c9c3235f905b493149fc73d45d67ce54b Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 14:20:43 -0700 Subject: [PATCH 20/45] refactor: update tests for Card Set 7 to use Card07 and Card07Parameters --- .../io/card_formats/test_inp07_density.py | 104 ++++++++---------- 1 file changed, 46 insertions(+), 58 deletions(-) diff --git a/tests/unit/pleiades/sammy/io/card_formats/test_inp07_density.py b/tests/unit/pleiades/sammy/io/card_formats/test_inp07_density.py index 599162c0..39153da8 100644 --- a/tests/unit/pleiades/sammy/io/card_formats/test_inp07_density.py +++ b/tests/unit/pleiades/sammy/io/card_formats/test_inp07_density.py @@ -1,8 +1,8 @@ -"""Unit tests for SAMMY INP file - Sample Density class.""" +"""Unit tests for SAMMY INP file - Card Set 7 class.""" import pytest -from pleiades.sammy.io.card_formats.inp07_density import Card07Density, SampleDensity +from pleiades.sammy.io.card_formats.inp07_density import Card07, Card07Parameters @pytest.fixture @@ -24,58 +24,58 @@ def scientific_notation_line(): def test_parse_ex012_line(ex012_line): - """Test parsing ex012 density line.""" - density = Card07Density.from_lines(ex012_line) + """Test parsing ex012 Card 7 line.""" + params = Card07.from_lines(ex012_line) - assert pytest.approx(density.density, rel=1e-5) == 4.20000 - assert pytest.approx(density.number_density, rel=1e-6) == 0.347162 + assert pytest.approx(params.crfn, rel=1e-5) == 4.20000 + assert pytest.approx(params.thick, rel=1e-6) == 0.347162 def test_parse_default_line(default_line): - """Test parsing default density line.""" - density = Card07Density.from_lines(default_line) + """Test parsing default Card 7 line.""" + params = Card07.from_lines(default_line) - assert pytest.approx(density.density, rel=1e-6) == 9.0 - assert pytest.approx(density.number_density, rel=1e-6) == 1.797e-03 + assert pytest.approx(params.crfn, rel=1e-6) == 9.0 + assert pytest.approx(params.thick, rel=1e-6) == 1.797e-03 def test_parse_scientific_notation_line(scientific_notation_line): """Test parsing line with scientific notation.""" - density = Card07Density.from_lines(scientific_notation_line) + params = Card07.from_lines(scientific_notation_line) - assert pytest.approx(density.density, rel=1e-5) == 19.3 - assert pytest.approx(density.number_density, rel=1e-6) == 3.456789e-02 + assert pytest.approx(params.crfn, rel=1e-5) == 19.3 + assert pytest.approx(params.thick, rel=1e-6) == 3.456789e-02 def test_parse_empty_line(): """Test that empty line raises ValueError.""" - with pytest.raises(ValueError, match="No valid density line"): - Card07Density.from_lines([""]) + with pytest.raises(ValueError, match="No valid Card 7 line"): + Card07.from_lines([""]) def test_parse_no_lines(): """Test that empty list raises ValueError.""" - with pytest.raises(ValueError, match="No valid density line"): - Card07Density.from_lines([]) + with pytest.raises(ValueError, match="No valid Card 7 line"): + Card07.from_lines([]) def test_parse_insufficient_fields(): """Test that line with only one field raises ValueError.""" - with pytest.raises(ValueError, match="Density line must have 2 fields"): - Card07Density.from_lines(["4.2"]) + with pytest.raises(ValueError, match="Card 7 line must have at least 2 fields"): + Card07.from_lines(["4.2"]) def test_parse_invalid_format(): """Test that invalid numeric format raises ValueError.""" - with pytest.raises(ValueError, match="Failed to parse density line"): - Card07Density.from_lines(["InvalidData MoreInvalidData"]) + with pytest.raises(ValueError, match="Failed to parse Card 7 line"): + Card07.from_lines(["InvalidData MoreInvalidData"]) def test_to_lines_ex012(): """Test generating ex012-style line.""" - density = SampleDensity(density=4.20000, number_density=0.347162) + params = Card07Parameters(crfn=4.20000, thick=0.347162) - lines = Card07Density.to_lines(density) + lines = Card07.to_lines(params) assert len(lines) == 1 assert "4.200000" in lines[0] @@ -84,9 +84,9 @@ def test_to_lines_ex012(): def test_to_lines_default(): """Test generating default line.""" - density = SampleDensity(density=9.0, number_density=1.797e-03) + params = Card07Parameters(crfn=9.0, thick=1.797e-03) - lines = Card07Density.to_lines(density) + lines = Card07.to_lines(params) assert len(lines) == 1 assert "9.000000" in lines[0] @@ -96,9 +96,9 @@ def test_to_lines_default(): def test_to_lines_scientific(): """Test generating line with scientific notation.""" - density = SampleDensity(density=19.3, number_density=3.456789e-02) + params = Card07Parameters(crfn=19.3, thick=3.456789e-02) - lines = Card07Density.to_lines(density) + lines = Card07.to_lines(params) assert len(lines) == 1 assert "19.300000" in lines[0] @@ -107,58 +107,46 @@ def test_to_lines_scientific(): def test_roundtrip_ex012(ex012_line): """Test parse and regenerate produces consistent result.""" - density = Card07Density.from_lines(ex012_line) - regenerated_lines = Card07Density.to_lines(density) + params = Card07.from_lines(ex012_line) + regenerated_lines = Card07.to_lines(params) - reparsed_density = Card07Density.from_lines(regenerated_lines) + reparsed = Card07.from_lines(regenerated_lines) - assert pytest.approx(reparsed_density.density, rel=1e-5) == density.density - assert pytest.approx(reparsed_density.number_density, rel=1e-5) == density.number_density + assert pytest.approx(reparsed.crfn, rel=1e-5) == params.crfn + assert pytest.approx(reparsed.thick, rel=1e-5) == params.thick -def test_sample_density_validation_negative_density(): - """Test that density must be positive.""" +def test_card07_validation_negative_crfn(): + """Test that CRFN must be non-negative.""" with pytest.raises(ValueError): - SampleDensity(density=-9.0, number_density=0.001) + Card07Parameters(crfn=-9.0, thick=0.001) -def test_sample_density_validation_zero_density(): - """Test that density must be positive (not zero).""" +def test_card07_validation_negative_thick(): + """Test that THICK must be non-negative.""" with pytest.raises(ValueError): - SampleDensity(density=0.0, number_density=0.001) - - -def test_sample_density_validation_negative_number_density(): - """Test that number_density must be positive.""" - with pytest.raises(ValueError): - SampleDensity(density=9.0, number_density=-0.001) - - -def test_sample_density_validation_zero_number_density(): - """Test that number_density must be positive (not zero).""" - with pytest.raises(ValueError): - SampleDensity(density=9.0, number_density=0.0) + Card07Parameters(crfn=9.0, thick=-0.001) def test_to_lines_invalid_input(): - """Test that to_lines rejects non-SampleDensity input.""" - with pytest.raises(ValueError, match="sample_density must be an instance of SampleDensity"): - Card07Density.to_lines("not a SampleDensity object") + """Test that to_lines rejects non-Card07Parameters input.""" + with pytest.raises(ValueError, match="params must be an instance of Card07Parameters"): + Card07.to_lines("not a Card07Parameters object") def test_typical_values(): """Test typical material density values.""" - gold_density = SampleDensity(density=19.3, number_density=0.059) + params = Card07Parameters(crfn=19.3, thick=0.059) - assert gold_density.density == 19.3 - assert gold_density.number_density == 0.059 + assert params.crfn == 19.3 + assert params.thick == 0.059 def test_small_number_density(): """Test very small number density values.""" - density = SampleDensity(density=0.001, number_density=1e-10) + params = Card07Parameters(crfn=0.001, thick=1e-10) - lines = Card07Density.to_lines(density) + lines = Card07.to_lines(params) assert len(lines) == 1 assert "0.001000" in lines[0] assert "e-10" in lines[0] From 3ab2805741801e38a2dbef11f72b85297bdf0cda Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 14:20:52 -0700 Subject: [PATCH 21/45] fix: update error messages in Card 5 tests to reflect correct card number --- .../sammy/io/card_formats/test_inp05_broadening.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py b/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py index acd66321..016b64f6 100644 --- a/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py +++ b/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py @@ -58,25 +58,25 @@ def test_parse_minimal_line(minimal_line): def test_parse_empty_line(): """Test that empty line raises ValueError.""" - with pytest.raises(ValueError, match="No valid Card 3 line"): + with pytest.raises(ValueError, match="No valid Card 5 line"): Card05.from_lines([""]) def test_parse_no_lines(): """Test that empty list raises ValueError.""" - with pytest.raises(ValueError, match="No valid Card 3 line"): + with pytest.raises(ValueError, match="No valid Card 5 line"): Card05.from_lines([]) def test_parse_insufficient_fields(): """Test that line with only one field raises ValueError.""" - with pytest.raises(ValueError, match="Card 3 line must have at least 2 fields"): + with pytest.raises(ValueError, match="Card 5 line must have at least 2 fields"): Card05.from_lines(["300.0"]) def test_parse_invalid_format(): """Test that invalid numeric format raises ValueError.""" - with pytest.raises(ValueError, match="Failed to parse Card 3 line"): + with pytest.raises(ValueError, match="Failed to parse Card 5 line"): Card05.from_lines(["InvalidData MoreInvalidData"]) From c713d8c3eb3d82b57433dc7db3cf1a7ec71ae8d1 Mon Sep 17 00:00:00 2001 From: Alexander Long Date: Wed, 21 Jan 2026 14:27:09 -0700 Subject: [PATCH 22/45] Update src/pleiades/utils/config.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/pleiades/utils/config.py | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 4bef9ea6..56da4aa2 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -31,15 +31,40 @@ def _expand_path(value: Optional[Any], workspace: Optional["WorkspaceConfig"] = "${workspace.data_dir}": workspace.data_dir, "${workspace.image_dir}": workspace.image_dir, } + + # Handle the simple case where the value is exactly one workspace token. + # If the replacement is None or does not actually change the value, + # treat this as an unresolved or self-referential path and return None. if raw in mapping: replacement = mapping[raw] - if replacement is None or str(replacement) == raw: + if replacement is None: + return None + replacement_str = str(replacement) + if replacement_str == raw: return None - for token, path in mapping.items(): - if path is not None: - raw = raw.replace(token, str(path)) + raw = replacement_str + + # Perform iterative substitution of workspace tokens, tracking which + # tokens have already been expanded to detect circular references, + # including multi-level indirections (e.g., A -> B -> C -> A). + visited_tokens = set() + changed = True + while changed: + changed = False + for token, path in mapping.items(): + if path is None: + continue + if token in raw: + if token in visited_tokens: + # Circular reference detected (token reappeared after expansion). + return None + visited_tokens.add(token) + raw = raw.replace(token, str(path)) + changed = True raw = os.path.expandvars(os.path.expanduser(raw)) + # If any workspace tokens remain at this point, the path could not be + # resolved (possibly due to an indirect circular reference); return None. if "${workspace." in raw: return None return Path(raw) From b61974414f461d14605f1d9c9d4ac583d6fd25a2 Mon Sep 17 00:00:00 2001 From: Alexander Long Date: Wed, 21 Jan 2026 14:27:37 -0700 Subject: [PATCH 23/45] Update src/pleiades/utils/config.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/pleiades/utils/config.py | 38 +++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 56da4aa2..097a55cd 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -186,21 +186,31 @@ def _normalize_config(self) -> "PleiadesConfig": if entry.endf_library is None: entry.endf_library = default_library - for routine in self.fit_routines.values(): - routine_nuclear = routine.get("nuclear") or {} - routine_isotopes = routine_nuclear.get("isotopes") - if routine_isotopes is None: - continue - updated: List[IsotopeConfig] = [] - for entry in routine_isotopes: - if isinstance(entry, dict): - entry = IsotopeConfig(**entry) - if entry.endf_library is None: - entry.endf_library = default_library - updated.append(entry) - routine_nuclear["isotopes"] = updated - routine["nuclear"] = routine_nuclear + # Normalize isotope configuration inside fit_routines without mutating + # the original routine dictionaries in-place. + new_fit_routines: Dict[str, Dict[str, Any]] = {} + for routine_id, routine in self.fit_routines.items(): + # Work on shallow copies to avoid surprising side effects for callers + # that may hold references to the original routine dictionaries. + new_routine: Dict[str, Any] = dict(routine) + routine_nuclear_src = routine.get("nuclear") or {} + routine_nuclear: Dict[str, Any] = dict(routine_nuclear_src) + routine_isotopes = routine_nuclear.get("isotopes") + if routine_isotopes is not None: + updated: List[IsotopeConfig] = [] + for entry in routine_isotopes: + if isinstance(entry, dict): + entry = IsotopeConfig(**entry) + if entry.endf_library is None: + entry.endf_library = default_library + updated.append(entry) + routine_nuclear["isotopes"] = updated + new_routine["nuclear"] = routine_nuclear + + new_fit_routines[routine_id] = new_routine + + self.fit_routines = new_fit_routines return self def build_nuclear_params(self, routine_id: Optional[str] = None) -> nuclearParameters: From 192def21e0a29da21c59f6377ac8160b1c08979e Mon Sep 17 00:00:00 2001 From: Alexander Long Date: Wed, 21 Jan 2026 14:29:33 -0700 Subject: [PATCH 24/45] Update docs/Notes/pleiades_config_workflow.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- docs/Notes/pleiades_config_workflow.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/Notes/pleiades_config_workflow.md b/docs/Notes/pleiades_config_workflow.md index 7ef740f5..1b560835 100644 --- a/docs/Notes/pleiades_config_workflow.md +++ b/docs/Notes/pleiades_config_workflow.md @@ -145,20 +145,20 @@ runs: routine_id: example_fit dataset_id: example_dataset created_at: "2026-01-14T12:00:00Z" - fit_dir: ${workspace.fitting_dir}/${routine_id} - results_dir: ${workspace.fitting_dir}/${routine_id}/results_dir + fit_dir: ${workspace.fitting_dir}/example_fit + results_dir: ${workspace.fitting_dir}/example_fit/results_dir input_files: - inp: ${workspace.fitting_dir}/${routine_id}/input.inp - par: ${workspace.fitting_dir}/${routine_id}/params.par - data: ${workspace.data_dir}/${routine_id}.dat + inp: ${workspace.fitting_dir}/example_fit/input.inp + par: ${workspace.fitting_dir}/example_fit/params.par + data: ${workspace.data_dir}/example_fit.dat output_files: - lpt: ${workspace.fitting_dir}/${routine_id}/results_dir/SAMMY.LPT - lst: ${workspace.fitting_dir}/${routine_id}/results_dir/SAMMY.LST - sammy_par: ${workspace.fitting_dir}/${routine_id}/results_dir/SAMMY.PAR + lpt: ${workspace.fitting_dir}/example_fit/results_dir/SAMMY.LPT + lst: ${workspace.fitting_dir}/example_fit/results_dir/SAMMY.LST + sammy_par: ${workspace.fitting_dir}/example_fit/results_dir/SAMMY.PAR sammy_execution: backend: local success: false - console_output: ${workspace.fitting_dir}/${routine_id}/results_dir/sammy_console.txt + console_output: ${workspace.fitting_dir}/example_fit/results_dir/sammy_console.txt results: run_results_path: ${workspace.results_dir}/run_results_001.json summary: From 8d37501897cdd4a3475cf2dc006dcafc61652926 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 14:34:36 -0700 Subject: [PATCH 25/45] feat: integrate FitConfig into InpManager for enhanced input file handling --- src/pleiades/sammy/io/inp_manager.py | 527 +++++++++++++++++++++++++-- 1 file changed, 487 insertions(+), 40 deletions(-) diff --git a/src/pleiades/sammy/io/inp_manager.py b/src/pleiades/sammy/io/inp_manager.py index bb1a04eb..8b13b6df 100644 --- a/src/pleiades/sammy/io/inp_manager.py +++ b/src/pleiades/sammy/io/inp_manager.py @@ -6,13 +6,20 @@ through the refactored FitOptions class and its factory methods. """ +from functools import lru_cache from pathlib import Path -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple, Union, get_args, get_origin +from pleiades.nuclear.isotopes.models import IsotopeInfo, IsotopeMassData +from pleiades.nuclear.models import IsotopeParameters +from pleiades.sammy.data.options import DataTypeOptions +from pleiades.sammy.fitting.config import FitConfig from pleiades.sammy.fitting.options import FitOptions from pleiades.sammy.io.card_formats.inp02_element import Card02, ElementInfo -from pleiades.sammy.io.card_formats.inp03_constants import Card03, PhysicalConstants -from pleiades.sammy.io.card_formats.inp03_density import Card03Density, SampleDensity +from pleiades.sammy.io.card_formats.inp04_particlepairs import Card04 +from pleiades.sammy.io.card_formats.inp05_broadening import Card05, PhysicalConstants +from pleiades.sammy.io.card_formats.inp07_density import Card07, Card07Parameters +from pleiades.sammy.io.card_formats.inp10_spingroups import Card10p2 from pleiades.utils.logger import loguru_logger logger = loguru_logger.bind(name=__name__) @@ -50,6 +57,7 @@ def __init__( isotope_info: Optional[Dict] = None, physical_constants: Optional[Dict] = None, reaction_type: str = None, + fit_config: FitConfig = None, ): """ Initialize with optional FitOptions and section information. @@ -61,10 +69,22 @@ def __init__( physical_constants: Physical constants (temperature, flight path, etc.) reaction_type: Reaction type (transmission, capture, etc.) """ - self.options = options or FitOptions() + self.fit_config = fit_config if fit_config else FitConfig() + self._fit_config_provided = fit_config is not None + + self.options = options or (self.fit_config.options_and_routines if self._fit_config_provided else FitOptions()) + if self._fit_config_provided and options is not None: + self.fit_config.options_and_routines = options + + if title is None and self._fit_config_provided: + title = self.fit_config.fit_title self.title = title + self.isotope_info = isotope_info self.physical_constants = physical_constants + + if reaction_type is None and self._fit_config_provided: + reaction_type = self.fit_config.data_params.data_type.value self.reaction_type = reaction_type def set_options(self, options: FitOptions) -> None: @@ -75,6 +95,185 @@ def set_options(self, options: FitOptions) -> None: options: FitOptions instance to use """ self.options = options + if self._fit_config_provided: + self.fit_config.options_and_routines = options + + @staticmethod + def _normalize_command(command: str) -> str: + return " ".join(command.upper().split()) + + @staticmethod + def _option_class_for_field(field_info) -> Optional[type]: + annotation = field_info.annotation + origin = get_origin(annotation) + if origin is Union: + args = [arg for arg in get_args(annotation) if arg is not type(None)] # noqa: E721 + return args[0] if args else None + return annotation + + @classmethod + @lru_cache + def _alphanumeric_command_map(cls) -> Dict[str, Tuple[str, str]]: + mapping: Dict[str, Tuple[str, str]] = {} + + for section_name, field_info in FitOptions.model_fields.items(): + option_class = cls._option_class_for_field(field_info) + if option_class is None or not hasattr(option_class, "get_alphanumeric_commands"): + continue + + bool_fields = {name: field for name, field in option_class.model_fields.items() if field.annotation is bool} + + for field_name in bool_fields: + data = {name: False for name in bool_fields} + data[field_name] = True + try: + instance = option_class(**data) + except Exception: + continue + for command in instance.get_alphanumeric_commands(): + normalized = cls._normalize_command(command) + mapping[normalized] = (section_name, field_name) + + return mapping + + def _apply_alphanumeric_commands(self, commands: List[str]) -> None: + if not commands: + return + + command_map = self._alphanumeric_command_map() + aliases = { + "CSISRS": "USE CSISRS FORMAT FOR DATA", + "TWENTY": "USE TWENTY SIGNIFICANT DIGITS", + "GENERATE ODF FILE AUTOMATICALLY": "GENERATE PLOT FILE AUTOMATICALLY", + "DO NOT SUPPRESS ANY INTERMEDIATE RESULTS": "DO NOT SUPPRESS ANY INTERMEDIATE PRINTOUT", + } + sections: Dict[str, Dict[str, bool]] = {} + unknown = [] + + for command in commands: + normalized = self._normalize_command(command) + if normalized in aliases: + normalized = self._normalize_command(aliases[normalized]) + if normalized in command_map: + section_name, field_name = command_map[normalized] + sections.setdefault(section_name, {})[field_name] = True + else: + unknown.append(command) + + if unknown: + logger.warning(f"Unmapped alphanumeric commands: {unknown}") + + options = FitOptions() + for section_name, fields in sections.items(): + field_info = FitOptions.model_fields[section_name] + option_class = self._option_class_for_field(field_info) + if option_class is None: + continue + try: + section_instance = option_class(**fields) + except Exception as exc: + logger.warning(f"Failed to parse commands for {section_name}: {exc}") + continue + setattr(options, section_name, section_instance) + + self.fit_config.options_and_routines = options + self.options = options + + def _element_name_from_fit_config(self) -> str: + isotopes = self.fit_config.nuclear_params.isotopes + if not isotopes: + return "Sample" + + isotope = isotopes[0] + info = isotope.isotope_information + if info and info.element and info.mass_number: + return f"{info.element}{info.mass_number}" + if info and info.name: + return info.name.replace("-", "") + return "Sample" + + def _atomic_mass_from_fit_config(self) -> float: + isotopes = self.fit_config.nuclear_params.isotopes + if not isotopes: + return 1.0 + + info = isotopes[0].isotope_information + if info and info.mass_data and info.mass_data.atomic_mass: + return info.mass_data.atomic_mass + if info and info.mass_number: + return float(info.mass_number) + return 1.0 + + @staticmethod + def _isotope_info_from_element(element_info: ElementInfo) -> IsotopeInfo: + raw = element_info.element.strip() + letters = "".join(ch for ch in raw if ch.isalpha()) + digits = "".join(ch for ch in raw if ch.isdigit()) + info = None + + if letters and digits: + try: + info = IsotopeInfo.from_string(f"{letters}-{digits}") + except ValueError: + info = None + + if info is None and letters: + info = IsotopeInfo(name=raw or letters, element=letters) + if digits: + info.mass_number = int(digits) + + if info is None: + info = IsotopeInfo(name=raw or "UNK", element=letters or None) + if digits: + info.mass_number = int(digits) + + if element_info.atomic_weight is not None: + info.mass_data = IsotopeMassData(atomic_mass=element_info.atomic_weight) + + return info + + def _element_info_from_fit_config(self) -> ElementInfo: + energy = self.fit_config.physics_params.energy_parameters + element_info = ElementInfo( + element=self._element_name_from_fit_config(), + atomic_weight=self._atomic_mass_from_fit_config(), + min_energy=energy.min_energy, + max_energy=energy.max_energy, + nepnts=energy.number_of_energy_points, + itmax=self.fit_config.max_iterations, + icorr=self.fit_config.i_correlation, + nxtra=energy.number_of_extra_points, + iptdop=self.fit_config.iptdop, + iptwid=self.fit_config.iptwid, + ixxchn=self.fit_config.ixxchn, + ndigit=self.fit_config.ndigit, + idropp=self.fit_config.idropp, + matnum=self.fit_config.matnum, + ) + return element_info + + def _element_info_from_dict(self, isotope_info: Dict) -> ElementInfo: + element = isotope_info.get("element", "Sample") + atomic_mass = isotope_info.get("atomic_mass_amu", 1.0) + min_energy = isotope_info.get("min_energy_eV", 0.001) + max_energy = isotope_info.get("max_energy_eV", 1000.0) + + return ElementInfo( + element=element, + atomic_weight=atomic_mass, + min_energy=min_energy, + max_energy=max_energy, + nepnts=isotope_info.get("nepnts", isotope_info.get("number_of_energy_points")), + itmax=isotope_info.get("itmax", isotope_info.get("max_iterations")), + icorr=isotope_info.get("icorr", isotope_info.get("i_correlation")), + nxtra=isotope_info.get("nxtra", isotope_info.get("number_of_extra_points")), + iptdop=isotope_info.get("iptdop"), + iptwid=isotope_info.get("iptwid"), + ixxchn=isotope_info.get("ixxchn"), + ndigit=isotope_info.get("ndigit"), + idropp=isotope_info.get("idropp"), + matnum=isotope_info.get("matnum"), + ) def generate_commands(self) -> List[str]: """ @@ -104,17 +303,9 @@ def generate_isotope_section(self) -> str: str: Properly formatted Card Set 2 element information line """ if self.isotope_info: - element = self.isotope_info.get("element", "Sample") - atomic_mass = self.isotope_info.get("atomic_mass_amu", 1.0) - min_energy = self.isotope_info.get("min_energy_eV", 0.001) - max_energy = self.isotope_info.get("max_energy_eV", 1000.0) - - element_info = ElementInfo( - element=element, - atomic_weight=atomic_mass, - min_energy=min_energy, - max_energy=max_energy, - ) + element_info = self._element_info_from_dict(self.isotope_info) + elif self._fit_config_provided: + element_info = self._element_info_from_fit_config() else: element_info = ElementInfo( element="Sample", @@ -136,9 +327,32 @@ def generate_physical_constants_section(self, material_properties: Dict = None) Returns: str: Physical constants line """ + if material_properties is None and self.physical_constants: + material_properties = self.physical_constants + + if material_properties is None and self._fit_config_provided: + broadening = self.fit_config.physics_params.broadening_parameters + material_properties = { + "temperature_K": broadening.temp, + "delta_l": broadening.deltal, + "delta_g": broadening.deltag, + "delta_e": broadening.deltae, + } + material_properties = {key: value for key, value in material_properties.items() if value is not None} + if material_properties: - temperature = material_properties.get("temperature_K", 293.6) - flight_path = material_properties.get("flight_path_m", 25.0) + temperature = material_properties.get("temperature_K") + if temperature is None: + temperature = material_properties.get("temperature") + if temperature is None: + temperature = 293.6 + + flight_path = material_properties.get("flight_path_m") + if flight_path is None: + flight_path = material_properties.get("flight_path") + if flight_path is None: + flight_path = 25.0 + delta_l = material_properties.get("delta_l", 0.0) delta_g = material_properties.get("delta_g", 0.0) delta_e = material_properties.get("delta_e", 0.0) @@ -159,39 +373,45 @@ def generate_physical_constants_section(self, material_properties: Dict = None) delta_e=0.0, ) - lines = Card03.to_lines(constants) + lines = Card05.to_lines(constants) return "\n" + lines[0] - def generate_sample_density_section(self, material_properties: Dict = None) -> str: + def generate_card_7_section(self, material_properties: Dict = None) -> str: """ - Generate the sample density section. + Generate the Card Set 7 section (CRFN, THICK). Args: material_properties: Dict with material properties Returns: - str: Sample density line with density (g/cm3) and number density (atoms/barn) + str: Card Set 7 line or empty string if unavailable """ + crfn = None + thick = None + if material_properties: - from pleiades.utils.units import calculate_number_density + crfn = material_properties.get("crfn") + thick = material_properties.get("thick") - density = material_properties.get("density_g_cm3", 9.0) - thickness_mm = material_properties.get("thickness_mm", 5.0) - atomic_mass = material_properties.get("atomic_mass_amu", 28.0) + if thick is None: + density = material_properties.get("density_g_cm3") + thickness_mm = material_properties.get("thickness_mm") + atomic_mass = material_properties.get("atomic_mass_amu") + if density is not None and thickness_mm is not None and atomic_mass is not None: + from pleiades.utils.units import calculate_number_density - number_density = calculate_number_density(density, thickness_mm, atomic_mass) + thick = calculate_number_density(density, thickness_mm, atomic_mass) - sample_density = SampleDensity( - density=density, - number_density=number_density, - ) - else: - sample_density = SampleDensity( - density=DEFAULT_DENSITY, - number_density=DEFAULT_NUMBER_DENSITY, - ) + if crfn is None or thick is None: + broadening = self.fit_config.physics_params.broadening_parameters + crfn = broadening.crfn if crfn is None else crfn + thick = broadening.thick if thick is None else thick - lines = Card03Density.to_lines(sample_density) + if crfn is None or thick is None: + return "" + + params = Card07Parameters(crfn=crfn, thick=thick) + lines = Card07.to_lines(params) return lines[0] def generate_reaction_type_section(self) -> str: @@ -220,19 +440,34 @@ def generate_card_set_2_element_info(self, material_properties: Dict = None) -> """ if material_properties: element = material_properties.get("element", "Au") - mass_number = material_properties.get("mass_number", 197) + mass_number = material_properties.get("mass_number") atomic_mass = material_properties.get("atomic_mass_amu", 196.966569) min_energy = material_properties.get("min_energy_eV", 0.001) max_energy = material_properties.get("max_energy_eV", 1000.0) - element_name = f"{element}{mass_number}" + if mass_number is not None: + element_name = f"{element}{mass_number}" + else: + element_name = element element_info = ElementInfo( element=element_name, atomic_weight=atomic_mass, min_energy=min_energy, max_energy=max_energy, + nepnts=material_properties.get("nepnts", material_properties.get("number_of_energy_points")), + itmax=material_properties.get("itmax", material_properties.get("max_iterations")), + icorr=material_properties.get("icorr", material_properties.get("i_correlation")), + nxtra=material_properties.get("nxtra", material_properties.get("number_of_extra_points")), + iptdop=material_properties.get("iptdop", self.fit_config.iptdop if self._fit_config_provided else None), + iptwid=material_properties.get("iptwid", self.fit_config.iptwid if self._fit_config_provided else None), + ixxchn=material_properties.get("ixxchn", self.fit_config.ixxchn if self._fit_config_provided else None), + ndigit=material_properties.get("ndigit", self.fit_config.ndigit if self._fit_config_provided else None), + idropp=material_properties.get("idropp", self.fit_config.idropp if self._fit_config_provided else None), + matnum=material_properties.get("matnum", self.fit_config.matnum if self._fit_config_provided else None), ) + elif self._fit_config_provided: + element_info = self._element_info_from_fit_config() else: element_info = ElementInfo( element="Au197", @@ -406,7 +641,7 @@ def generate_multi_isotope_inp_content( self.generate_card_set_2_element_info(material_properties), # Use Card Set 2 for element info "\n".join(self.generate_commands()), self.generate_physical_constants_section(material_properties), - self.generate_sample_density_section(material_properties), + self.generate_card_7_section(material_properties), self.generate_reaction_type_section(), self.generate_broadening_parameters_section(material_properties), self.generate_misc_parameters_section(), @@ -424,14 +659,17 @@ def generate_inp_content(self) -> str: Returns: str: Complete content for SAMMY input file """ + card_7 = self.generate_card_7_section() sections = [ self.generate_title_section(), self.generate_isotope_section(), "\n".join(self.generate_commands()), "", # Empty line for readability self.generate_physical_constants_section(), - self.generate_reaction_type_section(), ] + if card_7: + sections.append(card_7) + sections.append(self.generate_reaction_type_section()) return "\n".join(sections) def write_inp_file(self, file_path: Path) -> Path: @@ -462,6 +700,215 @@ def write_inp_file(self, file_path: Path) -> Path: logger.error(f"Failed to write SAMMY input file: {str(e)}") raise IOError(f"Failed to write SAMMY input file: {str(e)}") + def read_inp_file(self, file_path: Path, fit_config: FitConfig = None) -> FitConfig: + """ + Read a SAMMY input file and populate a FitConfig instance. + + Args: + file_path: Path to the input file + fit_config: Optional FitConfig to populate + + Returns: + FitConfig: Populated FitConfig instance + """ + file_path = Path(file_path) + if not file_path.exists(): + raise FileNotFoundError(f"Input file not found: {file_path}") + + target_config = fit_config if fit_config else self.fit_config + if target_config is None or not isinstance(target_config, FitConfig): + raise ValueError("fit_config must be an instance of FitConfig") + + self.fit_config = target_config + self._fit_config_provided = True + + lines = [line.rstrip("\n") for line in file_path.read_text().splitlines()] + idx = 0 + + def next_nonempty(start: int) -> int: + while start < len(lines) and not lines[start].strip(): + start += 1 + return start + + def is_numeric_line(line: str) -> bool: + parts = line.split() + if len(parts) < 2: + return False + try: + float(parts[0]) + float(parts[1]) + return True + except ValueError: + return False + + def parse_reaction_type(line: str) -> Optional[DataTypeOptions]: + candidate = line.strip().upper() + for option in DataTypeOptions: + if candidate == option.value.upper(): + return option + return None + + # Title line + idx = next_nonempty(idx) + if idx >= len(lines): + raise ValueError("Input file is empty") + self.fit_config.fit_title = lines[idx].strip() + idx += 1 + + # Card Set 2 element line + idx = next_nonempty(idx) + if idx >= len(lines): + raise ValueError("Missing Card Set 2 element line") + element_info = Card02.from_lines([lines[idx]]) + idx += 1 + + if not self.fit_config.nuclear_params.isotopes: + isotope_info = self._isotope_info_from_element(element_info) + self.fit_config.nuclear_params.isotopes.append(IsotopeParameters(isotope_information=isotope_info)) + + energy = self.fit_config.physics_params.energy_parameters + energy.min_energy = element_info.min_energy + energy.max_energy = element_info.max_energy + if element_info.nepnts is not None: + energy.number_of_energy_points = element_info.nepnts + if element_info.nxtra is not None: + energy.number_of_extra_points = element_info.nxtra + if element_info.itmax is not None: + self.fit_config.max_iterations = element_info.itmax + if element_info.icorr is not None: + self.fit_config.i_correlation = element_info.icorr + + self.fit_config.iptdop = element_info.iptdop + self.fit_config.iptwid = element_info.iptwid + self.fit_config.ixxchn = element_info.ixxchn + self.fit_config.ndigit = element_info.ndigit + self.fit_config.idropp = element_info.idropp + self.fit_config.matnum = element_info.matnum + + # Alphanumeric command lines + commands = [] + while idx < len(lines): + line = lines[idx].strip() + if not line: + idx += 1 + continue + if line.startswith("#") or set(line) == {"-"}: + idx += 1 + continue + if is_numeric_line(line): + break + commands.append(lines[idx]) + idx += 1 + self._apply_alphanumeric_commands(commands) + + # Physical constants line + if idx < len(lines) and is_numeric_line(lines[idx]): + try: + constants = Card05.from_lines([lines[idx]]) + broadening = self.fit_config.physics_params.broadening_parameters + broadening.temp = constants.temperature + broadening.deltal = constants.delta_l + broadening.deltag = constants.delta_g + broadening.deltae = constants.delta_e + except ValueError: + logger.warning("Failed to parse Card Set 5 constants line") + idx += 1 + + # Card Set 7 line (CRFN, THICK) + idx = next_nonempty(idx) + if idx < len(lines) and is_numeric_line(lines[idx]): + try: + card_7 = Card07.from_lines([lines[idx]]) + broadening = self.fit_config.physics_params.broadening_parameters + broadening.crfn = card_7.crfn + broadening.thick = card_7.thick + except ValueError: + logger.warning("Failed to parse Card Set 7 line") + idx += 1 + + # Reaction type line + idx = next_nonempty(idx) + if idx < len(lines): + reaction_type = parse_reaction_type(lines[idx]) + if reaction_type: + self.fit_config.data_params.data_type = reaction_type + idx += 1 + + # Particle pair definitions (Card 4) + remaining_lines = lines[idx:] + skip_indices = set() + for line_idx, line in enumerate(remaining_lines): + if Card04.is_header_line(line): + block_indices = [line_idx] + cursor = line_idx + 1 + while cursor < len(remaining_lines) and remaining_lines[cursor].strip(): + block_indices.append(cursor) + cursor += 1 + skip_indices.update(block_indices) + block = [remaining_lines[i] for i in block_indices] + try: + Card04.from_lines(block, self.fit_config) + except ValueError: + logger.warning("Failed to parse particle pair definitions from input file") + + # Spin group lines + spin_group_lines = [line for i, line in enumerate(remaining_lines) if line.strip() and i not in skip_indices] + if spin_group_lines: + try: + Card10p2.from_lines(spin_group_lines, self.fit_config) + except ValueError: + logger.warning("Failed to parse spin group lines from input file") + + return self.fit_config + + @classmethod + def from_fit_config( + cls, + fit_config: FitConfig, + isotope_info: Optional[Dict] = None, + physical_constants: Optional[Dict] = None, + reaction_type: Optional[str] = None, + options: Optional[FitOptions] = None, + title: Optional[str] = None, + ) -> "InpManager": + """ + Create an InpManager instance backed by a FitConfig. + + Args: + fit_config: FitConfig object to read defaults from + isotope_info: Optional dict to override Card Set 2 element info + physical_constants: Optional dict to override Card Set 5 constants + reaction_type: Optional reaction type override + options: Optional FitOptions override + title: Optional title override + """ + if fit_config is None or not isinstance(fit_config, FitConfig): + raise ValueError("fit_config must be an instance of FitConfig") + + return cls( + fit_config=fit_config, + isotope_info=isotope_info, + physical_constants=physical_constants, + reaction_type=reaction_type, + options=options, + title=title, + ) + + @classmethod + def parse_inp_file(cls, file_path: Path, fit_config: FitConfig = None) -> FitConfig: + """ + Parse an input file into a FitConfig instance. + + Args: + file_path: Path to the input file + fit_config: Optional FitConfig to populate + + Returns: + FitConfig: Populated FitConfig + """ + manager = cls(fit_config=fit_config if fit_config else FitConfig()) + return manager.read_inp_file(file_path, manager.fit_config) + @classmethod def create_endf_inp(cls, output_path: Path, title: str = None) -> Path: """ From 5662c715052c79a931d9e3d673d6f90bc54443dc Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 14:56:04 -0700 Subject: [PATCH 26/45] fix: improve numeric line validation in InpManager --- src/pleiades/sammy/io/inp_manager.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/pleiades/sammy/io/inp_manager.py b/src/pleiades/sammy/io/inp_manager.py index 8b13b6df..4ed13513 100644 --- a/src/pleiades/sammy/io/inp_manager.py +++ b/src/pleiades/sammy/io/inp_manager.py @@ -732,14 +732,14 @@ def next_nonempty(start: int) -> int: def is_numeric_line(line: str) -> bool: parts = line.split() - if len(parts) < 2: - return False - try: - float(parts[0]) - float(parts[1]) - return True - except ValueError: + if not parts: return False + for part in parts: + try: + float(part) + except ValueError: + return False + return True def parse_reaction_type(line: str) -> Optional[DataTypeOptions]: candidate = line.strip().upper() From 87831ac009e47f52f3179381ed6f560a2877ec37 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 14:56:19 -0700 Subject: [PATCH 27/45] fix: update Card 5 validation to require at least one field and set default flight path length --- src/pleiades/sammy/io/card_formats/inp05_broadening.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pleiades/sammy/io/card_formats/inp05_broadening.py b/src/pleiades/sammy/io/card_formats/inp05_broadening.py index 9caff423..c37051e0 100644 --- a/src/pleiades/sammy/io/card_formats/inp05_broadening.py +++ b/src/pleiades/sammy/io/card_formats/inp05_broadening.py @@ -73,14 +73,14 @@ def from_lines(cls, lines: List[str]) -> PhysicalConstants: line = lines[0].strip() fields = line.split() - if len(fields) < 2: - message = f"Card 5 line must have at least 2 fields (TEMP, FPL), got {len(fields)}" + if len(fields) < 1: + message = f"Card 5 line must have at least 1 field (TEMP), got {len(fields)}" logger.error(message) raise ValueError(message) try: temperature = float(fields[0]) - flight_path_length = float(fields[1]) + flight_path_length = float(fields[1]) if len(fields) > 1 else 25.0 delta_l = float(fields[2]) if len(fields) > 2 else 0.0 delta_g = float(fields[3]) if len(fields) > 3 else 0.0 delta_e = float(fields[4]) if len(fields) > 4 else 0.0 From e6e43c15c020ab59ae122883c57470a0960865a4 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 14:56:27 -0700 Subject: [PATCH 28/45] fix: update minimal_line fixture to only require temperature and adjust related test description --- .../sammy/io/card_formats/test_inp05_broadening.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py b/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py index 016b64f6..db46e9f7 100644 --- a/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py +++ b/tests/unit/pleiades/sammy/io/card_formats/test_inp05_broadening.py @@ -20,7 +20,7 @@ def venus_default_line(): @pytest.fixture def minimal_line(): """Minimal valid example with only required fields.""" - return ["300.0 25.0"] + return ["300.0"] def test_parse_ex012_line(ex012_line): @@ -46,7 +46,7 @@ def test_parse_venus_default_line(venus_default_line): def test_parse_minimal_line(minimal_line): - """Test parsing minimal valid line (only TEMP and FPL).""" + """Test parsing minimal valid line (TEMP only).""" constants = Card05.from_lines(minimal_line) assert pytest.approx(constants.temperature, rel=1e-3) == 300.0 @@ -68,12 +68,6 @@ def test_parse_no_lines(): Card05.from_lines([]) -def test_parse_insufficient_fields(): - """Test that line with only one field raises ValueError.""" - with pytest.raises(ValueError, match="Card 5 line must have at least 2 fields"): - Card05.from_lines(["300.0"]) - - def test_parse_invalid_format(): """Test that invalid numeric format raises ValueError.""" with pytest.raises(ValueError, match="Failed to parse Card 5 line"): From 656333a89cb0a12a78438885855f09bbbc4084fd Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 16:00:16 -0700 Subject: [PATCH 29/45] fix: require fit_routines in PleiadesConfig when loading from user config --- src/pleiades/utils/config.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 097a55cd..36d4a4da 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -213,6 +213,14 @@ def _normalize_config(self) -> "PleiadesConfig": self.fit_routines = new_fit_routines return self + @model_validator(mode="after") + def _require_fit_routines(self, info) -> "PleiadesConfig": + """Require fit_routines when loading from a user config.""" + if info.context and info.context.get("require_fit_routines"): + if not self.fit_routines: + raise ValueError("fit_routines must be defined in the config file") + return self + def build_nuclear_params(self, routine_id: Optional[str] = None) -> nuclearParameters: """Build nuclearParameters from configured isotope entries.""" isotope_entries = None @@ -420,7 +428,7 @@ def load(cls, path: Optional[Path] = None) -> "PleiadesConfig": @classmethod def from_dict(cls, config_dict: Dict[str, Any]) -> "PleiadesConfig": """Build a configuration from a dictionary.""" - return cls.model_validate(config_dict or {}) + return cls.model_validate(config_dict or {}, context={"require_fit_routines": True}) class IsotopeConfig(BaseModel): From 5debcb5cf5ae5962872584d4122998f7a5884801 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 21 Jan 2026 16:00:22 -0700 Subject: [PATCH 30/45] fix: integrate fit_routines into PleiadesConfig for custom initialization --- tests/unit/pleiades/utils/test_utils_config.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/unit/pleiades/utils/test_utils_config.py b/tests/unit/pleiades/utils/test_utils_config.py index 5027c2a3..494a6f29 100644 --- a/tests/unit/pleiades/utils/test_utils_config.py +++ b/tests/unit/pleiades/utils/test_utils_config.py @@ -91,7 +91,11 @@ def test_save_and_load(self): temp_path = Path(tmpdir) / "nuclear_data" custom_sources = {"TEST": "https://test.com"} - config = PleiadesConfig(nuclear_data_cache_dir=temp_path, nuclear_data_sources=custom_sources) + config = PleiadesConfig( + nuclear_data_cache_dir=temp_path, + nuclear_data_sources=custom_sources, + fit_routines={"example_fit": {"dataset_id": "example_dataset"}}, + ) # Save to temp file save_path = Path(tmpdir) / "config.yaml" From e30dc9d03fa1f7626d2a344b962bb2abb226e98c Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Thu, 22 Jan 2026 16:23:38 -0700 Subject: [PATCH 31/45] Moved 5 notebooks to subfolder "functionality" --- .../Notebooks/{ => functionality}/pleiades_Si_transmission.ipynb | 0 .../{ => functionality}/pleiades_endf_retrieval_caching.ipynb | 0 .../Notebooks/{ => functionality}/pleiades_logging_tutorial.ipynb | 0 examples/Notebooks/{ => functionality}/pleiades_sammy_endf.ipynb | 0 .../Notebooks/{ => functionality}/pleiades_sammy_parFile.ipynb | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename examples/Notebooks/{ => functionality}/pleiades_Si_transmission.ipynb (100%) rename examples/Notebooks/{ => functionality}/pleiades_endf_retrieval_caching.ipynb (100%) rename examples/Notebooks/{ => functionality}/pleiades_logging_tutorial.ipynb (100%) rename examples/Notebooks/{ => functionality}/pleiades_sammy_endf.ipynb (100%) rename examples/Notebooks/{ => functionality}/pleiades_sammy_parFile.ipynb (100%) diff --git a/examples/Notebooks/pleiades_Si_transmission.ipynb b/examples/Notebooks/functionality/pleiades_Si_transmission.ipynb similarity index 100% rename from examples/Notebooks/pleiades_Si_transmission.ipynb rename to examples/Notebooks/functionality/pleiades_Si_transmission.ipynb diff --git a/examples/Notebooks/pleiades_endf_retrieval_caching.ipynb b/examples/Notebooks/functionality/pleiades_endf_retrieval_caching.ipynb similarity index 100% rename from examples/Notebooks/pleiades_endf_retrieval_caching.ipynb rename to examples/Notebooks/functionality/pleiades_endf_retrieval_caching.ipynb diff --git a/examples/Notebooks/pleiades_logging_tutorial.ipynb b/examples/Notebooks/functionality/pleiades_logging_tutorial.ipynb similarity index 100% rename from examples/Notebooks/pleiades_logging_tutorial.ipynb rename to examples/Notebooks/functionality/pleiades_logging_tutorial.ipynb diff --git a/examples/Notebooks/pleiades_sammy_endf.ipynb b/examples/Notebooks/functionality/pleiades_sammy_endf.ipynb similarity index 100% rename from examples/Notebooks/pleiades_sammy_endf.ipynb rename to examples/Notebooks/functionality/pleiades_sammy_endf.ipynb diff --git a/examples/Notebooks/pleiades_sammy_parFile.ipynb b/examples/Notebooks/functionality/pleiades_sammy_parFile.ipynb similarity index 100% rename from examples/Notebooks/pleiades_sammy_parFile.ipynb rename to examples/Notebooks/functionality/pleiades_sammy_parFile.ipynb From 75dbec152425fb58c655bf8c5a4bca4d3658a5b4 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 4 Feb 2026 11:49:41 -0700 Subject: [PATCH 32/45] fix: enhance PleiadesConfig with dataset and fit routine configurations --- src/pleiades/utils/config.py | 150 ++++++++++++++++++++++++----------- 1 file changed, 105 insertions(+), 45 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 36d4a4da..9e35f970 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -2,6 +2,7 @@ """Global configuration management for PLEIADES.""" import os +from datetime import datetime from pathlib import Path from typing import Any, Dict, List, Optional @@ -9,6 +10,7 @@ from pydantic import BaseModel, ConfigDict, Field, model_validator from pleiades.nuclear.models import DataRetrievalMethod, EndfLibrary, IsotopeParameters, nuclearParameters +from pleiades.sammy.fitting.config import FitConfig from pleiades.utils.helper import VaryFlag DEFAULT_NUCLEAR_SOURCES = { @@ -94,7 +96,11 @@ def _expand_paths(self) -> "WorkspaceConfig": class NuclearConfig(BaseModel): - """Nuclear data configuration for PLEIADES.""" + """Nuclear data configuration for PLEIADES. + + This is the global/default nuclear configuration. Per-fit overrides live in + FitRoutineConfig.nuclear and are used when present. + """ model_config = ConfigDict(arbitrary_types_allowed=True) @@ -110,7 +116,11 @@ def _expand_paths(self) -> "NuclearConfig": class SammyConfig(BaseModel): - """SAMMY backend configuration for PLEIADES.""" + """SAMMY backend configuration for PLEIADES. + + This captures how to execute SAMMY (local, docker, nova) and the backend-specific + settings required to launch it. + """ model_config = ConfigDict(arbitrary_types_allowed=True) @@ -120,8 +130,75 @@ class SammyConfig(BaseModel): nova: Dict[str, Any] = Field(default_factory=dict) +class DatasetMetadata(BaseModel): + """Metadata for a dataset entry to be used in DatasetConfig. + + These fields are used to seed INP generation (energy bounds, element hints, etc.) + and can be extended without changing the core schema. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow", populate_by_name=True) + + # Facility where the data was collected (e.g., LANSCE, SNS). + facility: Optional[str] = None + # Instrument or beamline identifier. + instrument: Optional[str] = None + # General timestamp for when the data was recorded (UTC recommended). + recorded_date: Optional[datetime] = Field(default=None, alias="RecordedDate") + + # Energy bounds for the dataset (in eV). + min_energy_eV: Optional[float] = None + max_energy_eV: Optional[float] = None + + +class DatasetConfig(BaseModel): + """Configuration for a dataset entry. + + A dataset represents an input data file (e.g. transmission .dat/.twenty) + plus optional metadata for building a FitConfig/INP. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow") + + # brief description of the dataset + description: Optional[str] = None + + # Kind of data (e.g., transmission, capture) + data_kind: Optional[str] = None + + # Path to the data file/files + path_to_data_files: Optional[Path] = None + + # Metadata for the given dataset + metadata: Optional[DatasetMetadata] = None + + +class FitRoutineConfig(BaseModel): + """Configuration for a single fit routine. + + A routine defines how a specific fit should be run (dataset selection, + fit mode, and optional FitConfig overrides). + """ + + model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow") + + dataset_id: Optional[str] = None + mode: Optional[str] = None + update_from_results: Optional[bool] = None + fit_config: Optional[FitConfig] = None + + class PleiadesConfig(BaseModel): - """Global configuration for PLEIADES.""" + """Global configuration for PLEIADES. + + High-level intent: + - workspace: where PLEIADES writes files + - nuclear: global isotope defaults and ENDF cache + - sammy: how to execute SAMMY + - datasets: input data definitions + - fit_routines: per-run configurations (including FitConfig) + - runs/results_index: execution records and outputs + """ model_config = ConfigDict(arbitrary_types_allowed=True) @@ -131,8 +208,8 @@ class PleiadesConfig(BaseModel): nuclear: Optional[NuclearConfig] = None sammy: Optional[SammyConfig] = None - datasets: Dict[str, Dict[str, Any]] = Field(default_factory=dict) - fit_routines: Dict[str, Dict[str, Any]] = Field(default_factory=dict) + datasets: Dict[str, DatasetConfig] = Field(default_factory=dict) + fit_routines: Dict[str, FitRoutineConfig] = Field(default_factory=dict) runs: list[Dict[str, Any]] = Field(default_factory=list) results_index: Dict[str, Any] = Field(default_factory=dict) @@ -146,7 +223,11 @@ class PleiadesConfig(BaseModel): @model_validator(mode="after") def _normalize_config(self) -> "PleiadesConfig": - """Normalize paths and keep nuclear fields in sync.""" + """Normalize paths and keep nuclear fields in sync. + + This also normalizes routine-level isotope entries (fills defaults for + endf_library) so downstream code can rely on consistent types. + """ if self.workspace: self.nuclear_data_cache_dir = _expand_path(self.nuclear_data_cache_dir, self.workspace) else: @@ -186,29 +267,12 @@ def _normalize_config(self) -> "PleiadesConfig": if entry.endf_library is None: entry.endf_library = default_library - # Normalize isotope configuration inside fit_routines without mutating - # the original routine dictionaries in-place. - new_fit_routines: Dict[str, Dict[str, Any]] = {} + # Normalize fit_routines into typed models. + new_fit_routines: Dict[str, FitRoutineConfig] = {} for routine_id, routine in self.fit_routines.items(): - # Work on shallow copies to avoid surprising side effects for callers - # that may hold references to the original routine dictionaries. - new_routine: Dict[str, Any] = dict(routine) - routine_nuclear_src = routine.get("nuclear") or {} - routine_nuclear: Dict[str, Any] = dict(routine_nuclear_src) - - routine_isotopes = routine_nuclear.get("isotopes") - if routine_isotopes is not None: - updated: List[IsotopeConfig] = [] - for entry in routine_isotopes: - if isinstance(entry, dict): - entry = IsotopeConfig(**entry) - if entry.endf_library is None: - entry.endf_library = default_library - updated.append(entry) - routine_nuclear["isotopes"] = updated - new_routine["nuclear"] = routine_nuclear - - new_fit_routines[routine_id] = new_routine + if isinstance(routine, dict): + routine = FitRoutineConfig.model_validate(routine) + new_fit_routines[routine_id] = routine self.fit_routines = new_fit_routines return self @@ -222,17 +286,14 @@ def _require_fit_routines(self, info) -> "PleiadesConfig": return self def build_nuclear_params(self, routine_id: Optional[str] = None) -> nuclearParameters: - """Build nuclearParameters from configured isotope entries.""" + """Build nuclearParameters from configured isotope entries. + + Use the global NuclearConfig.isotopes list. + """ isotope_entries = None - if routine_id: - routine = self.fit_routines.get(routine_id, {}) - routine_isotopes = (routine.get("nuclear") or {}).get("isotopes") - if routine_isotopes: - isotope_entries = routine_isotopes - if isotope_entries is None: - isotope_entries = self.nuclear.isotopes + isotope_entries = self.nuclear.isotopes if not isotope_entries: - raise ValueError("No isotopes configured. Set fit_routines..nuclear.isotopes or nuclear.isotopes.") + raise ValueError("No isotopes configured. Set nuclear.isotopes.") from pleiades.nuclear.isotopes.manager import IsotopeManager manager = IsotopeManager() @@ -258,7 +319,11 @@ def build_nuclear_params(self, routine_id: Optional[str] = None) -> nuclearParam return nuclearParameters(isotopes=isotopes) def populate_fit_config_isotopes(self, fit_config: Any, routine_id: Optional[str] = None) -> Any: - """Populate fit_config.nuclear_params.isotopes from config if missing.""" + """Populate fit_config.nuclear_params.isotopes from config if missing. + + This is the bridge that ensures a FitConfig has isotopes before INP/PAR + generation or SAMMY execution. + """ if not hasattr(fit_config, "nuclear_params"): raise ValueError("fit_config must have a nuclear_params attribute") if not fit_config.nuclear_params.isotopes: @@ -273,15 +338,10 @@ def ensure_endf_cache( use_cache: bool = True, ) -> List[Path]: """Ensure ENDF cache files exist for configured isotopes.""" - if routine_id: - routine = self.fit_routines.get(routine_id, {}) - routine_isotopes = (routine.get("nuclear") or {}).get("isotopes") - isotope_entries = routine_isotopes if routine_isotopes is not None else self.nuclear.isotopes - else: - isotope_entries = self.nuclear.isotopes + isotope_entries = self.nuclear.isotopes if not isotope_entries: - raise ValueError("No isotopes configured. Set fit_routines..nuclear.isotopes or nuclear.isotopes.") + raise ValueError("No isotopes configured. Set nuclear.isotopes.") from pleiades.nuclear.manager import NuclearDataManager From b6555106c3b25ea633a68a7e1b4ea58dd2c6d290 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 4 Feb 2026 11:50:10 -0700 Subject: [PATCH 33/45] feat: add initial PleiadesConfig and YAML configuration files --- .../getting_started/pleiades_config.ipynb | 172 ++++++++++++++++++ .../getting_started/pleiades_config.yaml | 22 +++ 2 files changed, 194 insertions(+) create mode 100644 examples/Notebooks/getting_started/pleiades_config.ipynb create mode 100644 examples/Notebooks/getting_started/pleiades_config.yaml diff --git a/examples/Notebooks/getting_started/pleiades_config.ipynb b/examples/Notebooks/getting_started/pleiades_config.ipynb new file mode 100644 index 00000000..06c36e64 --- /dev/null +++ b/examples/Notebooks/getting_started/pleiades_config.ipynb @@ -0,0 +1,172 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "87ea1625", + "metadata": {}, + "source": [ + "# PleiadesConfig\n", + "\n", + "`PleiadesConfig` is the backbone configuration object for PLEIADES. It\n", + "captures **what to run**, **where to run it**, and **what inputs/results\n", + "belong to each fit**. The goal is to keep a single, reproducible record\n", + "of the workflow so a run can be re-created later from the same config.\n", + "\n", + "In practice it: \n", + "- Defines the workspace directory layout used by SAMMY runs.\n", + "- Records nuclear inputs (isotopes + ENDF libraries).\n", + "- Declares datasets and fit routines to run.\n", + "- Serves as the source of record for a given execution of SAMMY with PLEIADES.\n", + "- Tracks run metadata and result locations.\n" + ] + }, + { + "cell_type": "markdown", + "id": "f4778af4", + "metadata": {}, + "source": [ + "## Create a PleiadesConfig from scratch in Python\n", + "\n", + "Below we build a small PleiadesConfig with a workspace, a single isotope (Ta-181), and\n", + "the infomation of the fit routine.\n", + "\n", + "This has the following substructures \n", + "#### PleiadesConfig (overall)\n", + "- A working record of a given PLEIADES run: workspace layout, nuclear inputs, datasets, fit routines, results and metadata are all stored.\n", + "- Designed to make runs reproducible by keeping config + outputs + run metadata together.\n", + "\n", + "\n", + "#### WorkspaceConfig\n", + "- `root`: top-level workspace folder for all run artifacts.\n", + "- `endf_dir`: cache location for ENDF resonance files (defaults to the nuclear cache).\n", + "- `fitting_dir`: per‑routine working directories where `SAMMY` is executed.\n", + "- `results_dir`: top‑level results folder for aggregated outputs across run routines.\n", + "- `data_dir`: location for input data files (e.g., transmission `.dat`/`.twenty`).\n", + "- `image_dir`: optional location where the energy-resolved neutron imaging data is stored.\n", + "\n", + "#### NuclearConfig\n", + "- `data_cache_dir`: root cache for ENDF resonance files (defaults to ~/.pleiades/nuclear_data).\n", + "- `sources`: URLs for ENDF retrieval (DIRECT/API).\n", + "- `default_library`: default ENDF library if not specified per isotope.\n", + "- `isotopes`: list of isotope entries to seed runs; used to populate fit_config.nuclear_params.isotopes (unless overridden per routine).\n", + "\n", + "#### SammyConfig\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63d58d26", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "pleiades_version=1 workspace=WorkspaceConfig(root=PosixPath('/tmp/pleiades_workspace'), endf_dir=PosixPath('/Users/alexlong/.pleiades/nuclear_data'), fitting_dir=PosixPath('/tmp/pleiades_workspace/fitting_dir'), results_dir=PosixPath('/tmp/pleiades_workspace/results_dir'), data_dir=PosixPath('/tmp/pleiades_workspace/data_dir'), image_dir=PosixPath('/tmp/pleiades_workspace/image_dir')) nuclear=NuclearConfig(data_cache_dir=PosixPath('/Users/alexlong/.pleiades/nuclear_data'), sources={'DIRECT': 'https://www-nds.iaea.org/public/download-endf', 'API': 'https://www-nds.iaea.org/exfor/servlet'}, default_library=, isotopes=[IsotopeConfig(isotope='Ta-181', abundance=0.016, uncertainty=None, vary_abundance=, endf_library=)]) sammy=None datasets={} fit_routines={'example_fit': {'dataset_id': 'example_dataset'}} runs=[] results_index={} nuclear_data_cache_dir=PosixPath('/Users/alexlong/.pleiades/nuclear_data') nuclear_data_sources={'DIRECT': 'https://www-nds.iaea.org/public/download-endf', 'API': 'https://www-nds.iaea.org/exfor/servlet'}\n" + ] + } + ], + "source": [ + "from pathlib import Path\n", + "\n", + "from pleiades.utils.config import PleiadesConfig, WorkspaceConfig, NuclearConfig\n", + "\n", + "# Workspace paths for generated files and SAMMY runs\n", + "workspace = WorkspaceConfig(\n", + " root=Path(\"/tmp/pleiades_workspace\"),\n", + " fitting_dir=Path(\"/tmp/pleiades_workspace/fitting_dir\"),\n", + " results_dir=Path(\"/tmp/pleiades_workspace/results_dir\"),\n", + " data_dir=Path(\"/tmp/pleiades_workspace/data_dir\"),\n", + " image_dir=Path(\"/tmp/pleiades_workspace/image_dir\"),\n", + ")\n", + "\n", + "# Minimal nuclear configuration with one isotope\n", + "nuclear = NuclearConfig(\n", + " isotopes=[\n", + " {\"isotope\": \"Ta-181\", \"abundance\": 0.016, \"vary_abundance\": True},\n", + " ]\n", + ")\n", + "\n", + "# Fit routines must be present when loading from YAML\n", + "config = PleiadesConfig(\n", + " pleiades_version=1,\n", + " workspace=workspace,\n", + " nuclear=nuclear,\n", + " fit_routines={\"example_fit\": {\"dataset_id\": \"example_dataset\"}},\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "id": "abba9881", + "metadata": {}, + "source": [ + "## Export to a YAML-friendly dictionary\n", + "\n", + "`to_dict()` converts the Pydantic model into plain Python values that\n", + "can be serialized to YAML.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "08cd43b8", + "metadata": {}, + "outputs": [], + "source": [ + "config.to_dict()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load from YAML\n", + "\n", + "The example YAML file lives next to this notebook at\n", + "`examples/Notebooks/getting_started/pleiades_config.yaml`.\n", + "Loading uses the same validation rules as the Python model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pleiades.utils.config import PleiadesConfig\n", + "\n", + "yaml_path = Path(\"examples/Notebooks/getting_started/pleiades_config.yaml\")\n", + "loaded_config = PleiadesConfig.load(yaml_path)\n", + "\n", + "loaded_config\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Notes\n", + "\n", + "- `fit_routines` is required when loading from YAML.\n", + "- `workspace.endf_dir` defaults to the nuclear data cache if omitted.\n", + "- Isotopes are normalized so a missing `endf_library` defaults to ENDF-B-VIII.0.\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.x" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/Notebooks/getting_started/pleiades_config.yaml b/examples/Notebooks/getting_started/pleiades_config.yaml new file mode 100644 index 00000000..8ddf85fe --- /dev/null +++ b/examples/Notebooks/getting_started/pleiades_config.yaml @@ -0,0 +1,22 @@ +pleiades_version: 1 + +workspace: + root: /tmp/pleiades_workspace + fitting_dir: ${workspace.root}/fitting_dir + results_dir: ${workspace.root}/results_dir + data_dir: ${workspace.root}/data_dir + image_dir: ${workspace.root}/image_dir + +nuclear: + isotopes: + - isotope: Ta-181 + abundance: 0.016 + vary_abundance: true + default_library: ENDF-B-VIII.0 + +fit_routines: + example_fit: + dataset_id: example_dataset + +runs: [] +results_index: {} From 18cc30a97befbb65a8070d9049783ad6513112c1 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 4 Feb 2026 11:57:07 -0700 Subject: [PATCH 34/45] fix: update Python version in notebook metadata to 3.11.13 --- examples/Notebooks/getting_started/pleiades_config.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/Notebooks/getting_started/pleiades_config.ipynb b/examples/Notebooks/getting_started/pleiades_config.ipynb index 06c36e64..67c4cb13 100644 --- a/examples/Notebooks/getting_started/pleiades_config.ipynb +++ b/examples/Notebooks/getting_started/pleiades_config.ipynb @@ -158,13 +158,13 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "default", "language": "python", "name": "python3" }, "language_info": { "name": "python", - "version": "3.x" + "version": "3.11.13" } }, "nbformat": 4, From 44a307ad3ed21fc687e7b4eb7ef1e50d2a4dc3b5 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 4 Feb 2026 11:59:29 -0700 Subject: [PATCH 35/45] fix: remove nuclear isotopes from fit_routines and update usage instructions --- docs/Notes/pleiades_config_workflow.md | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/docs/Notes/pleiades_config_workflow.md b/docs/Notes/pleiades_config_workflow.md index 1b560835..42ce8f6d 100644 --- a/docs/Notes/pleiades_config_workflow.md +++ b/docs/Notes/pleiades_config_workflow.md @@ -118,15 +118,6 @@ fit_routines: dataset_id: example_dataset mode: fitting # fitting | endf_extraction | multi_isotope update_from_results: false - nuclear: - isotopes: - - isotope: "U-235" - abundance: 0.0072 - vary_abundance: 0 - endf_library: ENDF-B-VIII.0 - - isotope: "U-238" - abundance: 0.9928 - vary_abundance: 0 fit_config: fit_title: "SAMMY Fit" tolerance: null @@ -178,7 +169,7 @@ How this config is used to data_dir/.dat (or .twenty). - sammy_dat/sammy_twenty: use sammy_data_file or input_files.data directly. 3) Cache isotope data with NuclearDataManager: - - Use fit_routines..nuclear.isotopes, or fall back to nuclear.isotopes. + - Use nuclear.isotopes for FitConfig population. - If isotopic data is not already cached, download using nuclear.data_cache_dir (default: ~/.pleiades/nuclear_data) and default_library. 4) Create a run record: From 6e76a1cccab9bd648c01446346637e6ea25c1f7b Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 4 Feb 2026 12:00:31 -0700 Subject: [PATCH 36/45] fix: enhance test for PleiadesConfig loading to verify fit routines --- tests/unit/pleiades/utils/test_utils_config.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/unit/pleiades/utils/test_utils_config.py b/tests/unit/pleiades/utils/test_utils_config.py index 494a6f29..8a53f766 100644 --- a/tests/unit/pleiades/utils/test_utils_config.py +++ b/tests/unit/pleiades/utils/test_utils_config.py @@ -114,12 +114,14 @@ def test_save_and_load(self): # Load config from saved file loaded_config = PleiadesConfig.load(save_path) - # Verify loaded config matches original - assert loaded_config.nuclear_data_cache_dir == temp_path - assert loaded_config.nuclear_data_sources == custom_sources - assert loaded_config.nuclear is not None - assert loaded_config.nuclear.data_cache_dir == temp_path - assert loaded_config.nuclear.sources == custom_sources + # Verify loaded config matches original + assert loaded_config.nuclear_data_cache_dir == temp_path + assert loaded_config.nuclear_data_sources == custom_sources + assert loaded_config.nuclear is not None + assert loaded_config.nuclear.data_cache_dir == temp_path + assert loaded_config.nuclear.sources == custom_sources + assert "example_fit" in loaded_config.fit_routines + assert loaded_config.fit_routines["example_fit"].dataset_id == "example_dataset" def test_load_nonexistent_file(self): """Test loading from nonexistent file returns default config.""" From 6a0c818864d86864d67113da063f36d07fbfa676 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 11 Feb 2026 12:31:55 -0700 Subject: [PATCH 37/45] fix: rename sammy_data_file to path_to_data_files for clarity in dataset configuration --- docs/Notes/pleiades_config_workflow.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/Notes/pleiades_config_workflow.md b/docs/Notes/pleiades_config_workflow.md index 42ce8f6d..dc063d3f 100644 --- a/docs/Notes/pleiades_config_workflow.md +++ b/docs/Notes/pleiades_config_workflow.md @@ -110,7 +110,7 @@ datasets: transmission_files: [] energy_units: eV cross_section_units: barn - sammy_data_file: ${workspace.data_dir}/example_fit.dat + path_to_data_files: ${workspace.data_dir}/example_fit.dat metadata: {} fit_routines: @@ -167,7 +167,7 @@ How this config is used 2) Resolve dataset inputs: - raw_imaging: run normalization to produce transmission data, then export to data_dir/.dat (or .twenty). - - sammy_dat/sammy_twenty: use sammy_data_file or input_files.data directly. + - sammy_dat/sammy_twenty: use path_to_data_files or input_files.data directly. 3) Cache isotope data with NuclearDataManager: - Use nuclear.isotopes for FitConfig population. - If isotopic data is not already cached, download using nuclear.data_cache_dir From 2641018305010fc1fff29d54da9b95930ea68cb8 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 18 Feb 2026 09:26:08 -0700 Subject: [PATCH 38/45] fix: enhance WorkspaceConfig with optional subdirectories for ENDF cache, fitting, results, data, and images --- src/pleiades/utils/config.py | 53 ++++++++++++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 6 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 9e35f970..14fe852d 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -77,11 +77,23 @@ class WorkspaceConfig(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) + # Working root directory for PLEIADES. This is the base path that other workspace-relative paths can reference. root: Optional[Path] = None + + # Optional subdirectory for ENDF cache files. If not set, defaults to the same path as nuclear_data_cache_dir in NuclearConfig. endf_dir: Optional[Path] = None + + # Optional subdirectory for fit routine working directories. Each routine gets its own subdirectory here with a specific routine_id name. fitting_dir: Optional[Path] = None + + # Optional subdirectory for aggregate results across routines (e.g., combined CSVs, summary reports). This is separate from the per-routine fit_results_dir. results_dir: Optional[Path] = None + + # Optional subdirectory for input data files (e.g., transmission .dat/.twenty). This is separate from the fitting and results directories. + # Each fit_routine sub directory should have a symlink to the relevant data files from this directory to avoid duplication. data_dir: Optional[Path] = None + + # Optional subdirectory for generated images/plots. This is separate from the fitting and results directories. image_dir: Optional[Path] = None @model_validator(mode="after") @@ -396,39 +408,68 @@ def create_routine_dirs( self, base_routine_ids: Optional[List[str]] = None, timestamp: Optional[str] = None, - ) -> List[Dict[str, Path]]: - """Create timestamped routine directories under workspace.fitting_dir.""" + ) -> List[Dict[str, str | Path]]: + """Create per-routine fit directories and a per-fit results subdirectory. + + Directory layout produced by this method: + // + //fit_results_dir/ + + Where ``routine_id`` is built as ``_``. + + Args: + base_routine_ids: Optional list of base routine names. If omitted, all keys + from ``self.fit_routines`` are used. + timestamp: Optional UTC timestamp string to make routine directories unique. + If omitted, a timestamp in ``YYYYMMDDTHHMMSSZ`` format is generated. + + Returns: + A list of dictionaries, one per created routine directory, each containing: + - ``routine_id`` (str): The final timestamped routine identifier. + - ``fit_dir`` (Path): The routine working directory under ``fitting_dir``. + - ``fit_results_dir`` (Path): Subdirectory for SAMMY outputs for that routine. + + Raises: + ValueError: If ``workspace.fitting_dir`` is not configured. + ValueError: If no routine ids are available to create. + """ + # The fitting root is required because each routine directory is created under it. if not self.workspace or not self.workspace.fitting_dir: raise ValueError("workspace.fitting_dir is required to create routine directories") + # If explicit routine ids are not provided, use configured fit routine keys. routine_ids = base_routine_ids or list(self.fit_routines.keys()) if not routine_ids: raise ValueError("No fit_routines defined to create routine directories") + # Generate a UTC timestamp once so all routines created in this call share it. if timestamp is None: from datetime import datetime, timezone timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ") - created: List[Dict[str, Path]] = [] + created: List[Dict[str, str | Path]] = [] fitting_dir = self.workspace.fitting_dir for base_routine_id in routine_ids: + # Compose a run-unique routine id and derive both routine directories. routine_id = f"{base_routine_id}_{timestamp}" fit_dir = fitting_dir / routine_id - results_dir = fit_dir / "results_dir" + fit_results_dir = fit_dir / "fit_results_dir" + # Ensure both the routine root and its SAMMY output subdirectory exist. fit_dir.mkdir(parents=True, exist_ok=True) - results_dir.mkdir(parents=True, exist_ok=True) + fit_results_dir.mkdir(parents=True, exist_ok=True) created.append( { "routine_id": routine_id, "fit_dir": fit_dir, - "results_dir": results_dir, + "fit_results_dir": fit_results_dir, } ) + # Ensure workspace-level aggregate results directory exists (if configured). if self.workspace.results_dir: self.workspace.results_dir.mkdir(parents=True, exist_ok=True) From 766c10044e50ef7028688af9eea06774f8738598 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 18 Feb 2026 11:12:59 -0700 Subject: [PATCH 39/45] added comprehensive unit coverage for the new Pydantic config features --- .../unit/pleiades/utils/test_utils_config.py | 257 +++++++++++++++++- 1 file changed, 256 insertions(+), 1 deletion(-) diff --git a/tests/unit/pleiades/utils/test_utils_config.py b/tests/unit/pleiades/utils/test_utils_config.py index 8a53f766..5d6d3f3a 100644 --- a/tests/unit/pleiades/utils/test_utils_config.py +++ b/tests/unit/pleiades/utils/test_utils_config.py @@ -7,8 +7,19 @@ import pytest import yaml +from pydantic import ValidationError -from pleiades.utils.config import PleiadesConfig, get_config, reset_config, set_config +from pleiades.nuclear.isotopes.models import IsotopeInfo, IsotopeMassData +from pleiades.nuclear.models import DataRetrievalMethod, EndfLibrary, IsotopeParameters +from pleiades.utils.config import ( + FitRoutineConfig, + IsotopeConfig, + PleiadesConfig, + get_config, + reset_config, + set_config, +) +from pleiades.utils.helper import VaryFlag class TestPleiadesConfig: @@ -155,6 +166,250 @@ def test_load_empty_file(self): assert "DIRECT" in config.nuclear_data_sources assert "API" in config.nuclear_data_sources + def test_workspace_path_expansion_from_workspace_tokens(self, tmp_path): + """Workspace fields should expand ${workspace.*} tokens consistently.""" + config = PleiadesConfig( + workspace={ + "root": tmp_path, + "endf_dir": "${workspace.root}/endf_dir", + "fitting_dir": "${workspace.root}/fitting_dir", + "results_dir": "${workspace.root}/results_dir", + "data_dir": "${workspace.root}/data_dir", + "image_dir": "${workspace.root}/image_dir", + }, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + assert config.workspace is not None + assert config.workspace.root == tmp_path + assert config.workspace.endf_dir == tmp_path / "endf_dir" + assert config.workspace.fitting_dir == tmp_path / "fitting_dir" + assert config.workspace.results_dir == tmp_path / "results_dir" + assert config.workspace.data_dir == tmp_path / "data_dir" + assert config.workspace.image_dir == tmp_path / "image_dir" + + def test_workspace_unresolved_token_results_in_none(self, tmp_path): + """Unresolved workspace tokens should produce None after expansion.""" + config = PleiadesConfig( + workspace={ + "root": tmp_path, + "fitting_dir": "${workspace.missing_dir}/fits", + }, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + assert config.workspace is not None + assert config.workspace.fitting_dir is None + + def test_fit_routines_are_normalized_to_typed_models(self): + """fit_routines entries provided as dicts should be normalized to FitRoutineConfig.""" + pre_typed = FitRoutineConfig(dataset_id="dataset_2") + config = PleiadesConfig( + fit_routines={ + "fit_1": {"dataset_id": "dataset_1", "mode": "sammy"}, + "fit_2": pre_typed, + } + ) + + assert isinstance(config.fit_routines["fit_1"], FitRoutineConfig) + assert isinstance(config.fit_routines["fit_2"], FitRoutineConfig) + assert config.fit_routines["fit_1"].dataset_id == "dataset_1" + assert config.fit_routines["fit_2"].dataset_id == "dataset_2" + + def test_from_dict_requires_fit_routines(self): + """Loading from user config should fail when fit_routines are missing.""" + with pytest.raises(ValidationError, match="fit_routines must be defined"): + PleiadesConfig.from_dict({"workspace": {"root": "/tmp/pleiades"}}) + + def test_isotope_config_normalization_and_defaults(self): + """Isotope dicts should normalize to IsotopeConfig with default library applied.""" + config = PleiadesConfig( + nuclear={ + "default_library": EndfLibrary.JEFF_3_3, + "isotopes": [ + { + "isotope": "Ta-181", + "abundance": 0.6, + "uncertainty": 0.01, + "vary_abundance": VaryFlag.YES, + } + ], + }, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + assert len(config.nuclear.isotopes) == 1 + isotope = config.nuclear.isotopes[0] + assert isinstance(isotope, IsotopeConfig) + assert isotope.endf_library == EndfLibrary.JEFF_3_3 + assert isotope.vary_abundance == VaryFlag.YES + + def test_isotope_config_validation_rejects_invalid_library(self): + """Invalid enum values in isotope config should raise validation errors.""" + with pytest.raises(ValidationError): + PleiadesConfig( + nuclear={"isotopes": [{"isotope": "Ta-181", "endf_library": "NOT_A_LIBRARY"}]}, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + def test_build_nuclear_params_uses_configured_isotopes(self, monkeypatch): + """build_nuclear_params should map config isotopes into nuclearParameters.""" + + class FakeIsotopeManager: + def get_isotope_parameters_from_isotope_string(self, isotope: str): + if isotope != "Ta-181": + return None + return IsotopeParameters( + isotope_information=IsotopeInfo( + name="Ta-181", + element="Ta", + mass_number=181, + atomic_number=73, + mass_data=IsotopeMassData(atomic_mass=180.9479958), + spin=3.5, + ) + ) + + monkeypatch.setattr("pleiades.nuclear.isotopes.manager.IsotopeManager", FakeIsotopeManager) + + config = PleiadesConfig( + nuclear={ + "default_library": EndfLibrary.JEFF_3_3, + "isotopes": [ + { + "isotope": "Ta-181", + "abundance": 0.8, + "uncertainty": 0.05, + "vary_abundance": VaryFlag.NO, + } + ], + }, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + nuclear_params = config.build_nuclear_params() + assert len(nuclear_params.isotopes) == 1 + isotope = nuclear_params.isotopes[0] + assert isotope.isotope_information.name == "Ta-181" + assert isotope.abundance == pytest.approx(0.8) + assert isotope.uncertainty == pytest.approx(0.05) + assert isotope.vary_abundance == VaryFlag.NO + assert isotope.endf_library == EndfLibrary.JEFF_3_3 + + def test_build_nuclear_params_raises_when_isotope_not_found(self, monkeypatch): + """Unknown isotopes should raise a clear error.""" + + class FakeIsotopeManager: + def get_isotope_parameters_from_isotope_string(self, isotope: str): + return None + + monkeypatch.setattr("pleiades.nuclear.isotopes.manager.IsotopeManager", FakeIsotopeManager) + + config = PleiadesConfig( + nuclear={"isotopes": [{"isotope": "Unknown-999"}]}, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + with pytest.raises(ValueError, match="Isotope not found"): + config.build_nuclear_params() + + def test_ensure_endf_cache_downloads_to_workspace_endf_dir(self, monkeypatch, tmp_path): + """ensure_endf_cache should call downloader for each isotope and return output paths.""" + calls = [] + + class FakeIsotopeManager: + def get_isotope_info(self, isotope: str): + return IsotopeInfo(name=isotope, element="Ta", mass_number=181, atomic_number=73) + + class FakeNuclearDataManager: + def __init__(self): + self.isotope_manager = FakeIsotopeManager() + + def download_endf_resonance_file(self, isotope, library, output_dir, method, use_cache): + calls.append( + { + "isotope": isotope.name, + "library": library, + "output_dir": output_dir, + "method": method, + "use_cache": use_cache, + } + ) + return Path(output_dir) / f"{isotope.name}.endf" + + monkeypatch.setattr("pleiades.utils.config.set_config", lambda cfg: None) + monkeypatch.setattr("pleiades.nuclear.manager.NuclearDataManager", FakeNuclearDataManager) + + config = PleiadesConfig( + workspace={"root": tmp_path, "endf_dir": "${workspace.root}/endf_dir"}, + nuclear={"isotopes": [{"isotope": "Ta-181"}]}, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + outputs = config.ensure_endf_cache(method=DataRetrievalMethod.API, use_cache=False) + + expected_dir = tmp_path / "endf_dir" + assert expected_dir.exists() + assert outputs == [expected_dir / "Ta-181.endf"] + assert calls == [ + { + "isotope": "Ta-181", + "library": EndfLibrary.ENDF_B_VIII_0, + "output_dir": str(expected_dir), + "method": DataRetrievalMethod.API, + "use_cache": False, + } + ] + + def test_create_routine_dirs_creates_fit_and_fit_results_dirs(self, tmp_path): + """create_routine_dirs should create timestamped routine dirs and fit_results_dir.""" + config = PleiadesConfig( + workspace={ + "root": tmp_path, + "fitting_dir": "${workspace.root}/fitting_dir", + "results_dir": "${workspace.root}/results_dir", + }, + fit_routines={ + "fit_1": {"dataset_id": "dataset_1"}, + "fit_2": {"dataset_id": "dataset_2"}, + }, + ) + + timestamp = "20260101T000000Z" + created = config.create_routine_dirs(timestamp=timestamp) + + assert len(created) == 2 + routine_ids = {entry["routine_id"] for entry in created} + assert routine_ids == {f"fit_1_{timestamp}", f"fit_2_{timestamp}"} + for entry in created: + assert entry["fit_dir"].exists() + assert entry["fit_dir"].is_dir() + assert entry["fit_results_dir"].exists() + assert entry["fit_results_dir"].is_dir() + + assert config.workspace is not None + assert config.workspace.results_dir is not None + assert config.workspace.results_dir.exists() + + def test_create_routine_dirs_requires_fitting_dir(self, tmp_path): + """create_routine_dirs should fail when workspace.fitting_dir is not configured.""" + config = PleiadesConfig( + workspace={"root": tmp_path}, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + with pytest.raises(ValueError, match="workspace.fitting_dir is required"): + config.create_routine_dirs() + + def test_create_routine_dirs_requires_fit_routines(self, tmp_path): + """create_routine_dirs should fail when no fit routines are available.""" + config = PleiadesConfig( + workspace={"root": tmp_path, "fitting_dir": "${workspace.root}/fitting_dir"}, + ) + + with pytest.raises(ValueError, match="No fit_routines defined"): + config.create_routine_dirs() + class TestGlobalConfigFunctions: """Test suite for global configuration functions.""" From d27a7c81ebd0128ad3463451e3f7ca88a3983a35 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 18 Feb 2026 11:30:46 -0700 Subject: [PATCH 40/45] added two stage path expansion for WorkSpaceConfig --- src/pleiades/utils/config.py | 48 ++++++++++++++++--- .../unit/pleiades/utils/test_utils_config.py | 15 ++++++ 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 14fe852d..57da1744 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -20,6 +20,23 @@ def _expand_path(value: Optional[Any], workspace: Optional["WorkspaceConfig"] = None) -> Optional[Path]: + """Expand a path-like value into an absolute/relative ``Path`` object. + + Expansion behavior: + - Accepts ``Path`` or string-like inputs. + - Expands ``~`` and environment variables (e.g. ``$HOME``). + - When ``workspace`` is provided, replaces supported + ``${workspace.}`` tokens. + - Returns ``None`` when tokens cannot be resolved or when a circular + token reference is detected. + + Args: + value: Raw value from config (string/Path/None). + workspace: Workspace model used for token substitution. + + Returns: + Expanded ``Path`` or ``None`` if unresolved. + """ if value is None: return None @@ -98,14 +115,33 @@ class WorkspaceConfig(BaseModel): @model_validator(mode="after") def _expand_paths(self) -> "WorkspaceConfig": - self.root = _expand_path(self.root) - self.endf_dir = _expand_path(self.endf_dir, self) - self.fitting_dir = _expand_path(self.fitting_dir, self) - self.results_dir = _expand_path(self.results_dir, self) - self.data_dir = _expand_path(self.data_dir, self) - self.image_dir = _expand_path(self.image_dir, self) + """Normalize workspace paths after model construction. + + This is intentionally ordered in two phases: + 1. Expand ``root`` first. + 2. Expand all other fields that may reference ``${workspace.root}`` + or other workspace tokens. + + The explicit ordering keeps token substitution deterministic and makes + field dependencies easy to reason about during maintenance. + """ + # Pass 1: resolve root so dependent fields can reference it. + self._expand_root_path() + + # Pass 2: resolve fields that may contain workspace token references. + self._expand_dependent_paths() return self + def _expand_root_path(self) -> None: + """Pass 1: expand only the workspace root path.""" + self.root = _expand_path(self.root) + + def _expand_dependent_paths(self) -> None: + """Pass 2: expand workspace fields that may reference ``${workspace.*}`` tokens.""" + for field_name in ("endf_dir", "fitting_dir", "results_dir", "data_dir", "image_dir"): + raw_value = getattr(self, field_name) + setattr(self, field_name, _expand_path(raw_value, self)) + class NuclearConfig(BaseModel): """Nuclear data configuration for PLEIADES. diff --git a/tests/unit/pleiades/utils/test_utils_config.py b/tests/unit/pleiades/utils/test_utils_config.py index 5d6d3f3a..8f66730c 100644 --- a/tests/unit/pleiades/utils/test_utils_config.py +++ b/tests/unit/pleiades/utils/test_utils_config.py @@ -188,6 +188,21 @@ def test_workspace_path_expansion_from_workspace_tokens(self, tmp_path): assert config.workspace.data_dir == tmp_path / "data_dir" assert config.workspace.image_dir == tmp_path / "image_dir" + def test_workspace_expands_root_first_then_dependent_tokens(self, tmp_path, monkeypatch): + """Dependent workspace tokens should resolve against expanded root values.""" + monkeypatch.setenv("PLEIADES_WORK_ROOT", str(tmp_path)) + config = PleiadesConfig( + workspace={ + "root": "$PLEIADES_WORK_ROOT", + "fitting_dir": "${workspace.root}/fitting_dir", + }, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + assert config.workspace is not None + assert config.workspace.root == tmp_path + assert config.workspace.fitting_dir == tmp_path / "fitting_dir" + def test_workspace_unresolved_token_results_in_none(self, tmp_path): """Unresolved workspace tokens should produce None after expansion.""" config = PleiadesConfig( From 2d5fa21f548c0138acc88d96b614a8b2b1258737 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 18 Feb 2026 12:43:58 -0700 Subject: [PATCH 41/45] fix: enhance PleiadesConfig to handle duplicate isotopes and ensure safe mutations --- src/pleiades/utils/config.py | 130 ++++++++++++++++-- .../unit/pleiades/utils/test_utils_config.py | 108 ++++++++++++++- 2 files changed, 224 insertions(+), 14 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 57da1744..512f1571 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -12,6 +12,9 @@ from pleiades.nuclear.models import DataRetrievalMethod, EndfLibrary, IsotopeParameters, nuclearParameters from pleiades.sammy.fitting.config import FitConfig from pleiades.utils.helper import VaryFlag +from pleiades.utils.logger import loguru_logger + +logger = loguru_logger.bind(name=__name__) DEFAULT_NUCLEAR_SOURCES = { "DIRECT": "https://www-nds.iaea.org/public/download-endf", @@ -133,12 +136,22 @@ def _expand_paths(self) -> "WorkspaceConfig": return self def _expand_root_path(self) -> None: - """Pass 1: expand only the workspace root path.""" + """Pass 1: normalize only ``workspace.root``. + + ``root`` is treated as the anchor for other workspace paths, so it must + be expanded before token-based expansion of dependent fields. + """ self.root = _expand_path(self.root) def _expand_dependent_paths(self) -> None: - """Pass 2: expand workspace fields that may reference ``${workspace.*}`` tokens.""" + """Pass 2: normalize fields that may reference ``${workspace.*}`` tokens. + + This uses the already-expanded ``self.root`` (and any other resolved + workspace fields) as the substitution source. + """ for field_name in ("endf_dir", "fitting_dir", "results_dir", "data_dir", "image_dir"): + # Resolve each field independently so unresolved/circular references + # in one field do not prevent expansion of the others. raw_value = getattr(self, field_name) setattr(self, field_name, _expand_path(raw_value, self)) @@ -336,7 +349,29 @@ def _require_fit_routines(self, info) -> "PleiadesConfig": def build_nuclear_params(self, routine_id: Optional[str] = None) -> nuclearParameters: """Build nuclearParameters from configured isotope entries. - Use the global NuclearConfig.isotopes list. + This method converts configuration-level isotope entries + (``self.nuclear.isotopes``) into concrete ``IsotopeParameters`` objects + used by SAMMY fit execution. + + The implementation intentionally applies two safeguards: + 1. Duplicate isotope identifiers are detected early and logged as + warnings so users can correct accidental duplicate entries in + configuration files. + 2. Objects returned by ``IsotopeManager`` are deep-copied before + mutation (abundance, uncertainty, vary flag, library) to avoid + mutating shared/cached manager state across runs. + + Args: + routine_id: Optional routine identifier. Currently unused but kept + for API compatibility with routine-aware workflows. + + Returns: + ``nuclearParameters`` populated with per-isotope values from config. + + Raises: + ValueError: If no isotopes are configured. + ValueError: If any isotope string cannot be resolved by + ``IsotopeManager``. """ isotope_entries = None isotope_entries = self.nuclear.isotopes @@ -346,22 +381,53 @@ def build_nuclear_params(self, routine_id: Optional[str] = None) -> nuclearParam manager = IsotopeManager() isotopes: List[IsotopeParameters] = [] + seen_isotopes: set[str] = set() + retrieved_instance_ids: Dict[str, int] = {} default_library = self.nuclear.default_library or EndfLibrary.ENDF_B_VIII_0 for entry in isotope_entries: + # Normalize plain dict entries into the typed isotope model. if isinstance(entry, dict): entry = IsotopeConfig(**entry) - isotope_params = manager.get_isotope_parameters_from_isotope_string(entry.isotope) - if isotope_params is None: + # Warn on duplicate config entries before final nuclearParameters + # validation. Validation still rejects duplicate isotope names, but + # this warning points users to the root config issue sooner. + if entry.isotope in seen_isotopes: + logger.warning( + f"Duplicate isotope entry detected in config for '{entry.isotope}'. " + "This may trigger duplicate isotope validation errors." + ) + else: + seen_isotopes.add(entry.isotope) + + # Fetch baseline isotope parameters from the isotope manager. + retrieved_isotope_params = manager.get_isotope_parameters_from_isotope_string(entry.isotope) + if retrieved_isotope_params is None: raise ValueError(f"Isotope not found: {entry.isotope}") + # If the manager returns the same object instance for repeated + # lookups, warn that we are about to isolate mutation via copying. + previous_instance_id = retrieved_instance_ids.get(entry.isotope) + current_instance_id = id(retrieved_isotope_params) + if previous_instance_id == current_instance_id: + logger.warning( + f"IsotopeManager returned a reused IsotopeParameters instance for '{entry.isotope}'; " + "applying changes to a deep copy to prevent shared-state mutation." + ) + retrieved_instance_ids[entry.isotope] = current_instance_id + + # Apply config-specific overrides on a deep copy to avoid mutating + # manager-owned/cached instances. + isotope_params = retrieved_isotope_params.model_copy(deep=True) isotope_params.abundance = entry.abundance isotope_params.uncertainty = entry.uncertainty isotope_params.vary_abundance = entry.vary_abundance isotope_params.endf_library = entry.endf_library or default_library + # Collect per-entry isotope params; nuclearParameters validates + # aggregate constraints (including duplicate isotope names). isotopes.append(isotope_params) return nuclearParameters(isotopes=isotopes) @@ -382,10 +448,41 @@ def ensure_endf_cache( self, routine_id: Optional[str] = None, method: DataRetrievalMethod = DataRetrievalMethod.DIRECT, - output_dir: Optional[Path] = None, + endf_cache_dir: Optional[Path] = None, use_cache: bool = True, + update_config: bool = True, ) -> List[Path]: - """Ensure ENDF cache files exist for configured isotopes.""" + """Ensure ENDF cache files exist for configured isotopes. + + This method validates configured isotopes, resolves a target output + directory, and delegates file retrieval to ``NuclearDataManager``. + For each isotope entry, it requests the resonance file and returns the + list of resulting file paths. + + Args: + routine_id: Optional routine identifier (reserved for future routine-specific + behavior). + method: Nuclear data retrieval method. + endf_cache_dir: Optional override for the ENDF cache/output directory. + use_cache: If True, reuse existing cached artifacts when available. + update_config: If True, update module-global config via ``set_config(self)`` + before constructing ``NuclearDataManager``. + + Returns: + List of output paths, one per configured isotope, in the same order + as ``self.nuclear.isotopes``. + + Raises: + ValueError: If no isotopes are configured. + ValueError: If an isotope identifier cannot be resolved by the + isotope manager. + + Side Effects: + - Creates ``endf_cache_dir`` (or resolved default cache directory) + if it does not exist. + - Optionally updates module-global config state when + ``update_config=True``. + """ isotope_entries = self.nuclear.isotopes if not isotope_entries: @@ -393,16 +490,20 @@ def ensure_endf_cache( from pleiades.nuclear.manager import NuclearDataManager - output_dir = ( - Path(output_dir) - if output_dir is not None + endf_cache_dir = ( + Path(endf_cache_dir) + if endf_cache_dir is not None else ( self.workspace.endf_dir if self.workspace and self.workspace.endf_dir else self.nuclear_data_cache_dir ) ) - output_dir.mkdir(parents=True, exist_ok=True) + # Ensure download destination exists before any retrieval calls. + endf_cache_dir.mkdir(parents=True, exist_ok=True) - set_config(self) + # Keep this side effect opt-in/explicit for callers that need global + # configuration state synchronized for downstream manager behavior. + if update_config: + set_config(self) manager = NuclearDataManager() default_library = self.nuclear.default_library or EndfLibrary.ENDF_B_VIII_0 @@ -410,14 +511,17 @@ def ensure_endf_cache( for entry in isotope_entries: if isinstance(entry, dict): entry = IsotopeConfig(**entry) + # Resolve isotope metadata used by the download manager. isotope_info = manager.isotope_manager.get_isotope_info(entry.isotope) if isotope_info is None: raise ValueError(f"Isotope not found: {entry.isotope}") + # Apply per-isotope library override when present; otherwise use + # the config default library. library = entry.endf_library or default_library output_path = manager.download_endf_resonance_file( isotope=isotope_info, library=library, - output_dir=str(output_dir), + output_dir=str(endf_cache_dir), method=method, use_cache=use_cache, ) diff --git a/tests/unit/pleiades/utils/test_utils_config.py b/tests/unit/pleiades/utils/test_utils_config.py index 8f66730c..b30bc7c2 100644 --- a/tests/unit/pleiades/utils/test_utils_config.py +++ b/tests/unit/pleiades/utils/test_utils_config.py @@ -328,9 +328,78 @@ def get_isotope_parameters_from_isotope_string(self, isotope: str): with pytest.raises(ValueError, match="Isotope not found"): config.build_nuclear_params() + def test_build_nuclear_params_copies_retrieved_isotope_before_mutation(self, monkeypatch): + """Mutations should be applied to a copy, not to manager-owned isotope objects.""" + shared_isotope_params = IsotopeParameters( + isotope_information=IsotopeInfo( + name="Ta-181", + element="Ta", + mass_number=181, + atomic_number=73, + mass_data=IsotopeMassData(atomic_mass=180.9479958), + spin=3.5, + ) + ) + + class FakeIsotopeManager: + def get_isotope_parameters_from_isotope_string(self, isotope: str): + if isotope != "Ta-181": + return None + return shared_isotope_params + + monkeypatch.setattr("pleiades.nuclear.isotopes.manager.IsotopeManager", FakeIsotopeManager) + + config = PleiadesConfig( + nuclear={"isotopes": [{"isotope": "Ta-181", "abundance": 0.8, "uncertainty": 0.05}]}, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + nuclear_params = config.build_nuclear_params() + + assert len(nuclear_params.isotopes) == 1 + built = nuclear_params.isotopes[0] + assert built is not shared_isotope_params + assert built.abundance == pytest.approx(0.8) + assert built.uncertainty == pytest.approx(0.05) + assert shared_isotope_params.abundance is None + assert shared_isotope_params.uncertainty is None + + def test_build_nuclear_params_warns_on_duplicate_isotope_entries(self, monkeypatch): + """Duplicate isotope entries should emit a warning before validation fails.""" + warnings_seen = [] + + class FakeIsotopeManager: + def get_isotope_parameters_from_isotope_string(self, isotope: str): + if isotope != "Ta-181": + return None + return IsotopeParameters( + isotope_information=IsotopeInfo( + name="Ta-181", + element="Ta", + mass_number=181, + atomic_number=73, + mass_data=IsotopeMassData(atomic_mass=180.9479958), + spin=3.5, + ) + ) + + monkeypatch.setattr("pleiades.nuclear.isotopes.manager.IsotopeManager", FakeIsotopeManager) + monkeypatch.setattr("pleiades.utils.config.logger.warning", lambda message: warnings_seen.append(message)) + + config = PleiadesConfig( + nuclear={"isotopes": [{"isotope": "Ta-181"}, {"isotope": "Ta-181"}]}, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + with pytest.raises(ValueError, match="Duplicate isotope names found"): + config.build_nuclear_params() + + assert any("Duplicate isotope entry detected in config for 'Ta-181'" in message for message in warnings_seen) + def test_ensure_endf_cache_downloads_to_workspace_endf_dir(self, monkeypatch, tmp_path): """ensure_endf_cache should call downloader for each isotope and return output paths.""" calls = [] + set_config_calls = {"count": 0} class FakeIsotopeManager: def get_isotope_info(self, isotope: str): @@ -352,7 +421,10 @@ def download_endf_resonance_file(self, isotope, library, output_dir, method, use ) return Path(output_dir) / f"{isotope.name}.endf" - monkeypatch.setattr("pleiades.utils.config.set_config", lambda cfg: None) + monkeypatch.setattr( + "pleiades.utils.config.set_config", + lambda cfg: set_config_calls.__setitem__("count", set_config_calls["count"] + 1), + ) monkeypatch.setattr("pleiades.nuclear.manager.NuclearDataManager", FakeNuclearDataManager) config = PleiadesConfig( @@ -375,6 +447,40 @@ def download_endf_resonance_file(self, isotope, library, output_dir, method, use "use_cache": False, } ] + assert set_config_calls["count"] == 1 + + def test_ensure_endf_cache_can_skip_global_config_update(self, monkeypatch, tmp_path): + """ensure_endf_cache should not touch global config when update_config is False.""" + set_config_calls = {"count": 0} + + class FakeIsotopeManager: + def get_isotope_info(self, isotope: str): + return IsotopeInfo(name=isotope, element="Ta", mass_number=181, atomic_number=73) + + class FakeNuclearDataManager: + def __init__(self): + self.isotope_manager = FakeIsotopeManager() + + def download_endf_resonance_file(self, isotope, library, output_dir, method, use_cache): + return Path(output_dir) / f"{isotope.name}.endf" + + monkeypatch.setattr( + "pleiades.utils.config.set_config", + lambda cfg: set_config_calls.__setitem__("count", set_config_calls["count"] + 1), + ) + monkeypatch.setattr("pleiades.nuclear.manager.NuclearDataManager", FakeNuclearDataManager) + + config = PleiadesConfig( + workspace={"root": tmp_path, "endf_dir": "${workspace.root}/endf_dir"}, + nuclear={"isotopes": [{"isotope": "Ta-181"}]}, + fit_routines={"fit_1": {"dataset_id": "dataset_1"}}, + ) + + custom_endf_cache_dir = tmp_path / "custom_endf_cache_dir" + outputs = config.ensure_endf_cache(endf_cache_dir=custom_endf_cache_dir, update_config=False) + + assert outputs == [custom_endf_cache_dir / "Ta-181.endf"] + assert set_config_calls["count"] == 0 def test_create_routine_dirs_creates_fit_and_fit_results_dirs(self, tmp_path): """create_routine_dirs should create timestamped routine dirs and fit_results_dir.""" From 051cd78676b557934697e06483da15e0a9257d84 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 18 Feb 2026 13:56:51 -0700 Subject: [PATCH 42/45] hardened Docker image handling --- docs/Notes/pleiades_config_workflow.md | 6 ++- src/pleiades/sammy/backends/docker.py | 4 +- src/pleiades/sammy/config.py | 32 +++++++++++++ src/pleiades/sammy/factory.py | 12 +++-- tests/data/config/sammy_runner.yaml | 2 +- .../pleiades/sammy/backends/test_docker.py | 2 +- tests/unit/pleiades/sammy/test_config.py | 47 +++++++++++++++++-- 7 files changed, 91 insertions(+), 14 deletions(-) diff --git a/docs/Notes/pleiades_config_workflow.md b/docs/Notes/pleiades_config_workflow.md index dc063d3f..0e7d84a4 100644 --- a/docs/Notes/pleiades_config_workflow.md +++ b/docs/Notes/pleiades_config_workflow.md @@ -46,6 +46,9 @@ Notes: - The data file for a run is keyed by routine_id: data_dir/.dat - endf_dir should map to PleiadesConfig.nuclear_data_cache_dir so NuclearDataManager uses it for ENDF caching. +- For the docker backend, ``sammy.docker.image_name`` should be digest-pinned + (``repo/image@sha256:...``) or at least use an explicit non-mutable version + tag (for example ``repo/image:1.2.3``); unpinned or mutable tags are rejected. Draft YAML schema (example) --------------------------- @@ -80,7 +83,8 @@ sammy: shell_path: /bin/bash env_vars: {} docker: - image_name: kedokudo/sammy-docker + # Use a pinned digest when possible; vetted version tags are acceptable fallback. + image_name: kedokudo/sammy-docker:1.0.0 container_working_dir: /sammy/work container_data_dir: /sammy/data nova: diff --git a/src/pleiades/sammy/backends/docker.py b/src/pleiades/sammy/backends/docker.py index 91756f25..8fd79804 100644 --- a/src/pleiades/sammy/backends/docker.py +++ b/src/pleiades/sammy/backends/docker.py @@ -169,7 +169,9 @@ def validate_config(self) -> bool: try: # Create and validate config - config = DockerSammyConfig(image_name="kedokudo/sammy-docker", working_dir=working_dir, output_dir=output_dir) + config = DockerSammyConfig( + image_name="kedokudo/sammy-docker:1.0.0", working_dir=working_dir, output_dir=output_dir + ) config.validate() # Create files container diff --git a/src/pleiades/sammy/config.py b/src/pleiades/sammy/config.py index d7ee6650..f12795da 100644 --- a/src/pleiades/sammy/config.py +++ b/src/pleiades/sammy/config.py @@ -6,6 +6,7 @@ inheriting from the base configuration defined in the interface module. """ +import re import shutil from dataclasses import dataclass, field from pathlib import Path @@ -48,6 +49,31 @@ class DockerSammyConfig(BaseSammyConfig): container_working_dir: Path = Path("/sammy/work") container_data_dir: Path = Path("/sammy/data") + _MUTABLE_TAGS = {"latest", "stable", "main", "master", "dev", "edge", "nightly"} + + @staticmethod + def _is_digest_pinned_image_reference(image_name: str) -> bool: + """Return True if the image reference uses an immutable sha256 digest.""" + return re.fullmatch(r".+@sha256:[0-9a-f]{64}", image_name) is not None + + @classmethod + def _has_explicit_non_mutable_tag(cls, image_name: str) -> bool: + """Return True if the image reference has an explicit, non-mutable tag.""" + # Docker tags are separated by ":" after the last "/" in the image reference. + last_slash = image_name.rfind("/") + last_colon = image_name.rfind(":") + if last_colon <= last_slash: + return False + tag = image_name[last_colon + 1 :].strip().lower() + if not tag: + return False + return tag not in cls._MUTABLE_TAGS + + @classmethod + def _is_pinned_or_versioned_image_reference(cls, image_name: str) -> bool: + """Return True for digest-pinned references or explicit non-mutable tags.""" + return cls._is_digest_pinned_image_reference(image_name) or cls._has_explicit_non_mutable_tag(image_name) + def validate(self) -> bool: """ Validate Docker SAMMY configuration. @@ -64,6 +90,12 @@ def validate(self) -> bool: # Validate image name if not self.image_name: raise ConfigurationError("Docker image name cannot be empty") + if not self._is_pinned_or_versioned_image_reference(self.image_name): + raise ConfigurationError( + "Docker image name must be pinned to an immutable digest " + "(e.g. repo/image@sha256:) or use an explicit non-mutable version tag " + "(e.g. repo/image:1.2.3)." + ) # Validate container paths are absolute if not self.container_working_dir.is_absolute(): diff --git a/src/pleiades/sammy/factory.py b/src/pleiades/sammy/factory.py index c370937c..8fbfda03 100644 --- a/src/pleiades/sammy/factory.py +++ b/src/pleiades/sammy/factory.py @@ -121,7 +121,9 @@ def create_runner( sammy_executable: Path to SAMMY executable shell_path: Path to shell Docker backend: - image_name: Docker image name + image_name: Docker image name pinned by digest + (repo/image@sha256:...) or explicit non-mutable version tag + (repo/image:1.2.3) container_working_dir: Working directory in container container_data_dir: Data directory in container NOVA backend: @@ -180,7 +182,7 @@ def create_runner( config = DockerSammyConfig( working_dir=working_dir, output_dir=output_dir, - image_name=kwargs.get("image_name", "kedokudo/sammy-docker"), + image_name=kwargs.get("image_name", "kedokudo/sammy-docker:1.0.0"), container_working_dir=Path(kwargs.get("container_working_dir", "/sammy/work")), container_data_dir=Path(kwargs.get("container_data_dir", "/sammy/data")), ) @@ -245,7 +247,7 @@ def from_config(cls, config_path: Union[str, Path]) -> SammyRunner: shell_path: /bin/bash docker: - image_name: kedokudo/sammy-docker + image_name: kedokudo/sammy-docker:1.0.0 container_working_dir: /sammy/work container_data_dir: /sammy/data @@ -351,7 +353,7 @@ def auto_select( >>> runner = SammyFactory.auto_select( ... working_dir="/path/to/work", ... preferred_backend="docker", - ... image_name="custom/sammy:latest" + ... image_name="custom/sammy:1.2.3" ... ) """ # Check available backends @@ -416,7 +418,7 @@ def auto_select( shell_path: /bin/bash docker: - image_name: kedokudo/sammy-docker + image_name: kedokudo/sammy-docker:1.0.0 container_working_dir: /sammy/work container_data_dir: /sammy/data diff --git a/tests/data/config/sammy_runner.yaml b/tests/data/config/sammy_runner.yaml index 55b73e20..749dcd99 100644 --- a/tests/data/config/sammy_runner.yaml +++ b/tests/data/config/sammy_runner.yaml @@ -7,7 +7,7 @@ local: shell_path: /bin/bash docker: - image_name: kedokudo/sammy-docker + image_name: kedokudo/sammy-docker:1.0.0 container_working_dir: /sammy/work container_data_dir: /sammy/data diff --git a/tests/unit/pleiades/sammy/backends/test_docker.py b/tests/unit/pleiades/sammy/backends/test_docker.py index 76f0591d..d0f510de 100644 --- a/tests/unit/pleiades/sammy/backends/test_docker.py +++ b/tests/unit/pleiades/sammy/backends/test_docker.py @@ -67,7 +67,7 @@ def docker_config(temp_working_dir): config = DockerSammyConfig( working_dir=temp_working_dir, output_dir=temp_working_dir / "output", - image_name="kedokudo/sammy-docker", + image_name="kedokudo/sammy-docker:1.0.0", container_working_dir=Path("/sammy/work"), container_data_dir=Path("/sammy/data"), ) diff --git a/tests/unit/pleiades/sammy/test_config.py b/tests/unit/pleiades/sammy/test_config.py index 516aa6fe..b45f6176 100644 --- a/tests/unit/pleiades/sammy/test_config.py +++ b/tests/unit/pleiades/sammy/test_config.py @@ -74,16 +74,27 @@ def test_create_with_valid_config(self, temp_working_dir): config = DockerSammyConfig( working_dir=temp_working_dir, output_dir=temp_working_dir / "output", - image_name="kedokudo/sammy-docker", + image_name="kedokudo/sammy-docker:1.0.0", container_working_dir=Path("/sammy/work"), container_data_dir=Path("/sammy/data"), ) - assert config.image_name == "kedokudo/sammy-docker" + assert config.image_name == "kedokudo/sammy-docker:1.0.0" assert config.container_working_dir == Path("/sammy/work") assert config.container_data_dir == Path("/sammy/data") # call validate assert config.validate() + def test_validate_digest_pinned_image_name(self, temp_working_dir): + """Should accept immutable digest-pinned image references.""" + config = DockerSammyConfig( + working_dir=temp_working_dir, + output_dir=temp_working_dir / "output", + image_name="kedokudo/sammy-docker@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + container_working_dir=Path("/sammy/work"), + container_data_dir=Path("/sammy/data"), + ) + assert config.validate() + def test_validate_empty_image_name(self, temp_working_dir): """Should raise error for empty image name.""" config = DockerSammyConfig( @@ -97,12 +108,38 @@ def test_validate_empty_image_name(self, temp_working_dir): config.validate() assert "image name cannot be empty" in str(exc.value) + def test_validate_rejects_unpinned_image_name(self, temp_working_dir): + """Should reject image references with no tag and no digest.""" + config = DockerSammyConfig( + working_dir=temp_working_dir, + output_dir=temp_working_dir / "output", + image_name="kedokudo/sammy-docker", + container_working_dir=Path("/sammy/work"), + container_data_dir=Path("/sammy/data"), + ) + with pytest.raises(ConfigurationError) as exc: + config.validate() + assert "must be pinned to an immutable digest" in str(exc.value) + + def test_validate_rejects_mutable_latest_tag(self, temp_working_dir): + """Should reject mutable tags such as :latest.""" + config = DockerSammyConfig( + working_dir=temp_working_dir, + output_dir=temp_working_dir / "output", + image_name="kedokudo/sammy-docker:latest", + container_working_dir=Path("/sammy/work"), + container_data_dir=Path("/sammy/data"), + ) + with pytest.raises(ConfigurationError) as exc: + config.validate() + assert "must be pinned to an immutable digest" in str(exc.value) + def test_validate_relative_container_paths(self, temp_working_dir): """Should raise error for relative container paths.""" config = DockerSammyConfig( working_dir=temp_working_dir, output_dir=temp_working_dir / "output", - image_name="kedokudo/sammy-docker", + image_name="kedokudo/sammy-docker:1.0.0", container_working_dir=Path("relative/path"), container_data_dir=Path("/sammy/data"), ) @@ -115,7 +152,7 @@ def test_validate_relative_data_dir(self, temp_working_dir): config = DockerSammyConfig( working_dir=temp_working_dir, output_dir=temp_working_dir / "output", - image_name="kedokudo/sammy-docker", + image_name="kedokudo/sammy-docker:1.0.0", container_working_dir=Path("/sammy/work"), container_data_dir=Path("relative/path"), ) @@ -129,7 +166,7 @@ def test_validate_same_container_dirs(self, temp_working_dir): config = DockerSammyConfig( working_dir=temp_working_dir, output_dir=temp_working_dir / "output", - image_name="kedokudo/sammy-docker", + image_name="kedokudo/sammy-docker:1.0.0", container_working_dir=same_path, container_data_dir=same_path, ) From 1213a08d40cd7e09f905ea91f0886c338fc7f7d5 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 18 Feb 2026 14:10:13 -0700 Subject: [PATCH 43/45] fixed both sides of the Card 5 flight-path mapping --- src/pleiades/sammy/io/inp_manager.py | 2 + .../pleiades/sammy/io/test_inp_manager.py | 49 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/src/pleiades/sammy/io/inp_manager.py b/src/pleiades/sammy/io/inp_manager.py index 4ed13513..75b61be9 100644 --- a/src/pleiades/sammy/io/inp_manager.py +++ b/src/pleiades/sammy/io/inp_manager.py @@ -334,6 +334,7 @@ def generate_physical_constants_section(self, material_properties: Dict = None) broadening = self.fit_config.physics_params.broadening_parameters material_properties = { "temperature_K": broadening.temp, + "flight_path_m": broadening.dist, "delta_l": broadening.deltal, "delta_g": broadening.deltag, "delta_e": broadening.deltae, @@ -807,6 +808,7 @@ def parse_reaction_type(line: str) -> Optional[DataTypeOptions]: constants = Card05.from_lines([lines[idx]]) broadening = self.fit_config.physics_params.broadening_parameters broadening.temp = constants.temperature + broadening.dist = constants.flight_path_length broadening.deltal = constants.delta_l broadening.deltag = constants.delta_g broadening.deltae = constants.delta_e diff --git a/tests/unit/pleiades/sammy/io/test_inp_manager.py b/tests/unit/pleiades/sammy/io/test_inp_manager.py index 819ded8b..f3f796ae 100644 --- a/tests/unit/pleiades/sammy/io/test_inp_manager.py +++ b/tests/unit/pleiades/sammy/io/test_inp_manager.py @@ -12,7 +12,10 @@ import pytest +from pleiades.sammy.fitting.config import FitConfig from pleiades.sammy.fitting.options import FitOptions +from pleiades.sammy.io.card_formats.inp02_element import Card02, ElementInfo +from pleiades.sammy.io.card_formats.inp05_broadening import Card05, PhysicalConstants from pleiades.sammy.io.inp_manager import InpManager @@ -348,3 +351,49 @@ def test_multi_isotope_missing_required_properties(temp_dir): with pytest.raises(ValueError, match="must contain 'density_g_cm3' and 'atomic_mass_amu'"): InpManager.create_multi_isotope_inp(output_path, title="Should fail", material_properties=incomplete_props) + + +def test_generate_physical_constants_section_uses_fit_config_dist(): + """Card 5 generation should preserve fit_config broadening.dist as flight path.""" + fit_config = FitConfig() + broadening = fit_config.physics_params.broadening_parameters + broadening.temp = 300.0 + broadening.dist = 123.4 + broadening.deltal = 0.2 + broadening.deltag = 0.1 + broadening.deltae = 0.01 + + manager = InpManager(fit_config=fit_config) + section = manager.generate_physical_constants_section() + constants = Card05.from_lines([section.strip()]) + + assert constants.temperature == pytest.approx(300.0) + assert constants.flight_path_length == pytest.approx(123.4) + assert constants.delta_l == pytest.approx(0.2) + assert constants.delta_g == pytest.approx(0.1) + assert constants.delta_e == pytest.approx(0.01) + + +def test_read_inp_file_sets_broadening_dist_from_card5(temp_dir): + """Card 5 parsing should map flight path length back to broadening.dist.""" + fit_config = FitConfig() + manager = InpManager(fit_config=fit_config) + output_path = temp_dir / "roundtrip_card5.inp" + + card2_line = Card02.to_lines(ElementInfo(element="Au", atomic_weight=196.966569, min_energy=0.001, max_energy=1.0))[ + 0 + ] + card5_line = Card05.to_lines( + PhysicalConstants(temperature=296.0, flight_path_length=48.5, delta_l=0.3, delta_g=0.2, delta_e=0.1) + )[0] + + output_path.write_text(f"Card5 Parse Test\n{card2_line}\n{card5_line}\ntransmission\n") + + loaded = manager.read_inp_file(output_path, fit_config=fit_config) + broadening = loaded.physics_params.broadening_parameters + + assert broadening.temp == pytest.approx(296.0) + assert broadening.dist == pytest.approx(48.5) + assert broadening.deltal == pytest.approx(0.3) + assert broadening.deltag == pytest.approx(0.2) + assert broadening.deltae == pytest.approx(0.1) From 4843c700cfa7c18de3ea301120b250c5a086b13b Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 18 Feb 2026 14:25:08 -0700 Subject: [PATCH 44/45] centralizing IO validation in PleiadesConfig._normalize_config_for_io() and making it strict --- src/pleiades/utils/config.py | 20 +++++++++++++++++-- .../unit/pleiades/utils/test_utils_config.py | 11 ++++++++-- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/src/pleiades/utils/config.py b/src/pleiades/utils/config.py index 512f1571..f935711e 100644 --- a/src/pleiades/utils/config.py +++ b/src/pleiades/utils/config.py @@ -619,6 +619,19 @@ def to_dict(self) -> Dict[str, Any]: """Convert configuration to a dictionary.""" return self.model_dump(mode="json") + @classmethod + def _normalize_config_for_io(cls, config_dict: Dict[str, Any]) -> Dict[str, Any]: + """Normalize config payload before save/load validation. + + Enforces strict IO requirements so save/load behavior is consistent: + ``fit_routines`` must be present and non-empty. + """ + normalized = dict(config_dict or {}) + fit_routines = normalized.get("fit_routines") + if not fit_routines: + raise ValueError("fit_routines must be defined in the config file") + return normalized + def save(self, path: Optional[Path] = None) -> Path: """ Save configuration to a YAML file. @@ -635,9 +648,11 @@ def save(self, path: Optional[Path] = None) -> Path: # Ensure directory exists path.parent.mkdir(parents=True, exist_ok=True) + normalized = self._normalize_config_for_io(self.to_dict()) + # Save config as YAML with open(path, "w") as f: - yaml.safe_dump(self.to_dict(), f, sort_keys=False) + yaml.safe_dump(normalized, f, sort_keys=False) return path @@ -669,7 +684,8 @@ def load(cls, path: Optional[Path] = None) -> "PleiadesConfig": @classmethod def from_dict(cls, config_dict: Dict[str, Any]) -> "PleiadesConfig": """Build a configuration from a dictionary.""" - return cls.model_validate(config_dict or {}, context={"require_fit_routines": True}) + normalized = cls._normalize_config_for_io(config_dict) + return cls.model_validate(normalized, context={"require_fit_routines": True}) class IsotopeConfig(BaseModel): diff --git a/tests/unit/pleiades/utils/test_utils_config.py b/tests/unit/pleiades/utils/test_utils_config.py index b30bc7c2..e94a43ae 100644 --- a/tests/unit/pleiades/utils/test_utils_config.py +++ b/tests/unit/pleiades/utils/test_utils_config.py @@ -232,10 +232,17 @@ def test_fit_routines_are_normalized_to_typed_models(self): assert config.fit_routines["fit_2"].dataset_id == "dataset_2" def test_from_dict_requires_fit_routines(self): - """Loading from user config should fail when fit_routines are missing.""" - with pytest.raises(ValidationError, match="fit_routines must be defined"): + """Loading from user payload should fail when fit_routines are missing.""" + with pytest.raises(ValueError, match="fit_routines must be defined"): PleiadesConfig.from_dict({"workspace": {"root": "/tmp/pleiades"}}) + def test_save_requires_fit_routines(self, tmp_path): + """Saving config with empty fit_routines should fail.""" + config = PleiadesConfig() + save_path = tmp_path / "config.yaml" + with pytest.raises(ValueError, match="fit_routines must be defined"): + config.save(save_path) + def test_isotope_config_normalization_and_defaults(self): """Isotope dicts should normalize to IsotopeConfig with default library applied.""" config = PleiadesConfig( From 4dbf965a12c2afd4b7462426b4d42caced932197 Mon Sep 17 00:00:00 2001 From: "Alexander M. Long" Date: Wed, 18 Feb 2026 15:42:48 -0700 Subject: [PATCH 45/45] feat: integrated PleiadesConfig fit configuration into INP file generation --- docs/guides/input_preparation.rst | 97 ++--- src/pleiades/sammy/io/inp_manager.py | 340 +++++++++--------- .../pleiades/sammy/io/test_inp_manager.py | 73 +++- 3 files changed, 286 insertions(+), 224 deletions(-) diff --git a/docs/guides/input_preparation.rst b/docs/guides/input_preparation.rst index 09238ea2..1490fee7 100644 --- a/docs/guides/input_preparation.rst +++ b/docs/guides/input_preparation.rst @@ -69,24 +69,29 @@ INP File Generation The INP file controls SAMMY execution parameters. PLEIADES generates INP files through :class:`~pleiades.sammy.io.inp_manager.InpManager`. -Material Properties -^^^^^^^^^^^^^^^^^^^ +Dataset Metadata +^^^^^^^^^^^^^^^^ -Define material properties as a dictionary: +Define typed dataset metadata and fit configuration: .. code-block:: python - material_props = { - 'element': 'Au', # Element symbol - 'mass_number': 197, # Isotope mass number - 'density_g_cm3': 19.32, # Material density (g/cm³) - 'thickness_mm': 0.025, # Sample thickness (mm) - 'atomic_mass_amu': 196.966569, # Atomic mass (amu) - 'abundance': 1.0, # Isotopic abundance (0-1) - 'min_energy': 1.0, # Minimum energy (eV) - 'max_energy_eV': 200.0, # Maximum energy (eV) - 'temperature_K': 293.6, # Sample temperature (K) - } + from pleiades.sammy.fitting.config import FitConfig + from pleiades.sammy.io.inp_manager import InpDatasetMetadata + + fit_config = FitConfig() + fit_config.physics_params.broadening_parameters.crfn = 8.0 + + dataset_metadata = InpDatasetMetadata( + element="Au", + mass_number=197, + density_g_cm3=19.32, + thickness_mm=0.025, + atomic_mass_amu=196.966569, + min_energy_eV=1.0, + max_energy_eV=200.0, + temperature_K=293.6, + ) Creating the INP File ^^^^^^^^^^^^^^^^^^^^^ @@ -94,20 +99,21 @@ Creating the INP File .. code-block:: python from pathlib import Path - from pleiades.sammy.io.inp_manager import InpManager - - # Material properties - material_props = { - 'element': 'Au', - 'mass_number': 197, - 'density_g_cm3': 19.32, - 'thickness_mm': 0.025, - 'atomic_mass_amu': 196.966569, - 'abundance': 1.0, - 'min_energy': 1.0, - 'max_energy_eV': 200.0, - 'temperature_K': 293.6, - } + from pleiades.sammy.fitting.config import FitConfig + from pleiades.sammy.io.inp_manager import InpDatasetMetadata, InpManager + + fit_config = FitConfig() + fit_config.physics_params.broadening_parameters.crfn = 8.0 + dataset_metadata = InpDatasetMetadata( + element="Au", + mass_number=197, + density_g_cm3=19.32, + thickness_mm=0.025, + atomic_mass_amu=196.966569, + min_energy_eV=1.0, + max_energy_eV=200.0, + temperature_K=293.6, + ) # Resolution function file (facility-specific) resolution_file = Path("/path/to/resolution_function.dat") @@ -116,8 +122,9 @@ Creating the INP File inp_file = Path("./working/analysis.inp") InpManager.create_multi_isotope_inp( inp_file, + fit_config=fit_config, title="Au-197 neutron transmission analysis", - material_properties=material_props, + dataset_metadata=dataset_metadata, resolution_file_path=resolution_file, ) @@ -261,7 +268,8 @@ Putting it all together: .. code-block:: python from pathlib import Path - from pleiades.sammy.io.inp_manager import InpManager + from pleiades.sammy.fitting.config import FitConfig + from pleiades.sammy.io.inp_manager import InpDatasetMetadata, InpManager from pleiades.sammy.io.json_manager import JsonManager from pleiades.sammy.io.data_manager import convert_csv_to_sammy_twenty from pleiades.sammy.interface import SammyFilesMultiMode @@ -289,24 +297,27 @@ Putting it all together: working_dir=str(working_dir), ) - # 3. Create INP file - material_props = { - 'element': 'Au', - 'mass_number': 197, - 'density_g_cm3': 19.32, - 'thickness_mm': 0.025, - 'atomic_mass_amu': 196.966569, - 'abundance': 1.0, - 'min_energy': 1.0, - 'max_energy_eV': 200.0, - 'temperature_K': 293.6, - } + # 3. Create INP file from FitConfig + typed metadata + fit_config = FitConfig() + fit_config.physics_params.broadening_parameters.crfn = 8.0 + + dataset_metadata = InpDatasetMetadata( + element="Au", + mass_number=197, + density_g_cm3=19.32, + thickness_mm=0.025, + atomic_mass_amu=196.966569, + min_energy_eV=1.0, + max_energy_eV=200.0, + temperature_K=293.6, + ) inp_file = working_dir / "au_fitting.inp" InpManager.create_multi_isotope_inp( inp_file, + fit_config=fit_config, title="Au-197 analysis", - material_properties=material_props, + dataset_metadata=dataset_metadata, resolution_file_path=Path("/path/to/resolution.dat"), ) diff --git a/src/pleiades/sammy/io/inp_manager.py b/src/pleiades/sammy/io/inp_manager.py index 75b61be9..108cd836 100644 --- a/src/pleiades/sammy/io/inp_manager.py +++ b/src/pleiades/sammy/io/inp_manager.py @@ -10,6 +10,8 @@ from pathlib import Path from typing import Dict, List, Optional, Tuple, Union, get_args, get_origin +from pydantic import BaseModel, Field + from pleiades.nuclear.isotopes.models import IsotopeInfo, IsotopeMassData from pleiades.nuclear.models import IsotopeParameters from pleiades.sammy.data.options import DataTypeOptions @@ -35,6 +37,23 @@ DEFAULT_L0_UNCERTAINTY = 2.00000e-5 # Uncertainty on L₀ +class InpDatasetMetadata(BaseModel): + """Optional typed dataset metadata used to seed INP generation. + + These values are dataset-level hints and should only be used when the + corresponding value is not already provided in ``FitConfig``. + """ + + element: Optional[str] = Field(default=None, description="Element symbol (e.g. Au, Ta)") + mass_number: Optional[int] = Field(default=None, description="Mass number for isotope name composition") + atomic_mass_amu: Optional[float] = Field(default=None, description="Atomic mass (amu)") + min_energy_eV: Optional[float] = Field(default=None, description="Minimum fit energy (eV)") + max_energy_eV: Optional[float] = Field(default=None, description="Maximum fit energy (eV)") + temperature_K: Optional[float] = Field(default=None, description="Sample temperature (K)") + density_g_cm3: Optional[float] = Field(default=None, description="Material density (g/cm^3)") + thickness_mm: Optional[float] = Field(default=None, description="Sample thickness (mm)") + + class InpManager: """ Manages creation and writing of SAMMY input (.inp) files. @@ -234,11 +253,14 @@ def _isotope_info_from_element(element_info: ElementInfo) -> IsotopeInfo: def _element_info_from_fit_config(self) -> ElementInfo: energy = self.fit_config.physics_params.energy_parameters + # Preserve legacy-safe defaults when FitConfig energy bounds are unset. + min_energy = energy.min_energy if energy.min_energy is not None and energy.min_energy > 0 else 0.001 + max_energy = energy.max_energy if energy.max_energy is not None and energy.max_energy > 0 else 1000.0 element_info = ElementInfo( element=self._element_name_from_fit_config(), atomic_weight=self._atomic_mass_from_fit_config(), - min_energy=energy.min_energy, - max_energy=energy.max_energy, + min_energy=min_energy, + max_energy=max_energy, nepnts=energy.number_of_energy_points, itmax=self.fit_config.max_iterations, icorr=self.fit_config.i_correlation, @@ -317,96 +339,87 @@ def generate_isotope_section(self) -> str: lines = Card02.to_lines(element_info) return lines[0] - def generate_physical_constants_section(self, material_properties: Dict = None) -> str: + def _number_density_from_dataset_metadata(self, dataset_metadata: Optional[InpDatasetMetadata]) -> Optional[float]: + """Derive number density from typed dataset metadata. + + Number density derivation is all-or-nothing: if any of the required + inputs are provided, all three must be present to avoid silently + generating inconsistent values. + """ + if dataset_metadata is None: + return None + + density = dataset_metadata.density_g_cm3 + thickness = dataset_metadata.thickness_mm + atomic_mass = dataset_metadata.atomic_mass_amu + + has_any_density_input = any(value is not None for value in (density, thickness, atomic_mass)) + if not has_any_density_input: + return None + if density is None or thickness is None or atomic_mass is None: + raise ValueError( + "dataset_metadata must include density_g_cm3, thickness_mm, and atomic_mass_amu to derive THICK" + ) + + from pleiades.utils.units import calculate_number_density + + return calculate_number_density(density, thickness, atomic_mass) + + def generate_physical_constants_section(self, dataset_metadata: Optional[InpDatasetMetadata] = None) -> str: """ - Generate the physical constants section for multi-isotope mode. + Generate Card Set 5 physical constants from FitConfig. Args: - material_properties: Dict with material properties + dataset_metadata: Optional typed metadata used only as a fallback + source for temperature when FitConfig does not define one. Returns: str: Physical constants line """ - if material_properties is None and self.physical_constants: - material_properties = self.physical_constants - - if material_properties is None and self._fit_config_provided: - broadening = self.fit_config.physics_params.broadening_parameters - material_properties = { - "temperature_K": broadening.temp, - "flight_path_m": broadening.dist, - "delta_l": broadening.deltal, - "delta_g": broadening.deltag, - "delta_e": broadening.deltae, - } - material_properties = {key: value for key, value in material_properties.items() if value is not None} - - if material_properties: - temperature = material_properties.get("temperature_K") - if temperature is None: - temperature = material_properties.get("temperature") - if temperature is None: - temperature = 293.6 - - flight_path = material_properties.get("flight_path_m") - if flight_path is None: - flight_path = material_properties.get("flight_path") - if flight_path is None: - flight_path = 25.0 - - delta_l = material_properties.get("delta_l", 0.0) - delta_g = material_properties.get("delta_g", 0.0) - delta_e = material_properties.get("delta_e", 0.0) - - constants = PhysicalConstants( - temperature=temperature, - flight_path_length=flight_path, - delta_l=delta_l, - delta_g=delta_g, - delta_e=delta_e, - ) - else: - constants = PhysicalConstants( - temperature=293.6, - flight_path_length=25.0, - delta_l=0.0, - delta_g=0.0, - delta_e=0.0, - ) + broadening = self.fit_config.physics_params.broadening_parameters + + temperature = broadening.temp + if temperature is None and dataset_metadata is not None: + temperature = dataset_metadata.temperature_K + temperature = 293.6 if temperature is None else temperature + + flight_path = broadening.dist + flight_path = 25.0 if flight_path is None else flight_path + + delta_l = broadening.deltal + delta_l = 0.0 if delta_l is None else delta_l + delta_g = broadening.deltag + delta_g = 0.0 if delta_g is None else delta_g + delta_e = broadening.deltae + delta_e = 0.0 if delta_e is None else delta_e + + constants = PhysicalConstants( + temperature=temperature, + flight_path_length=flight_path, + delta_l=delta_l, + delta_g=delta_g, + delta_e=delta_e, + ) lines = Card05.to_lines(constants) return "\n" + lines[0] - def generate_card_7_section(self, material_properties: Dict = None) -> str: + def generate_card_7_section(self, dataset_metadata: Optional[InpDatasetMetadata] = None) -> str: """ - Generate the Card Set 7 section (CRFN, THICK). + Generate Card Set 7 (CRFN, THICK) from FitConfig. Args: - material_properties: Dict with material properties + dataset_metadata: Optional typed metadata used to derive THICK when + broadening.thick is not already defined in FitConfig. Returns: str: Card Set 7 line or empty string if unavailable """ - crfn = None - thick = None - - if material_properties: - crfn = material_properties.get("crfn") - thick = material_properties.get("thick") - - if thick is None: - density = material_properties.get("density_g_cm3") - thickness_mm = material_properties.get("thickness_mm") - atomic_mass = material_properties.get("atomic_mass_amu") - if density is not None and thickness_mm is not None and atomic_mass is not None: - from pleiades.utils.units import calculate_number_density - - thick = calculate_number_density(density, thickness_mm, atomic_mass) - - if crfn is None or thick is None: - broadening = self.fit_config.physics_params.broadening_parameters - crfn = broadening.crfn if crfn is None else crfn - thick = broadening.thick if thick is None else thick + broadening = self.fit_config.physics_params.broadening_parameters + crfn = broadening.crfn + thick = broadening.thick + if thick is None: + thick = self._number_density_from_dataset_metadata(dataset_metadata) if crfn is None or thick is None: return "" @@ -426,7 +439,7 @@ def generate_reaction_type_section(self) -> str: return self.reaction_type return "transmission" - def generate_card_set_2_element_info(self, material_properties: Dict = None) -> str: + def generate_card_set_2_element_info(self, dataset_metadata: Optional[InpDatasetMetadata] = None) -> str: """ Generate Card Set 2 (element information) according to SAMMY documentation. @@ -434,103 +447,78 @@ def generate_card_set_2_element_info(self, material_properties: Dict = None) -> according to SAMMY Card Set 2 specification. Args: - material_properties: Dict with material properties including element info + dataset_metadata: Optional typed metadata used to override selected + Card 2 values after reading defaults from FitConfig. Returns: str: Properly formatted Card Set 2 element information line """ - if material_properties: - element = material_properties.get("element", "Au") - mass_number = material_properties.get("mass_number") - atomic_mass = material_properties.get("atomic_mass_amu", 196.966569) - min_energy = material_properties.get("min_energy_eV", 0.001) - max_energy = material_properties.get("max_energy_eV", 1000.0) - - if mass_number is not None: - element_name = f"{element}{mass_number}" - else: - element_name = element - - element_info = ElementInfo( - element=element_name, - atomic_weight=atomic_mass, - min_energy=min_energy, - max_energy=max_energy, - nepnts=material_properties.get("nepnts", material_properties.get("number_of_energy_points")), - itmax=material_properties.get("itmax", material_properties.get("max_iterations")), - icorr=material_properties.get("icorr", material_properties.get("i_correlation")), - nxtra=material_properties.get("nxtra", material_properties.get("number_of_extra_points")), - iptdop=material_properties.get("iptdop", self.fit_config.iptdop if self._fit_config_provided else None), - iptwid=material_properties.get("iptwid", self.fit_config.iptwid if self._fit_config_provided else None), - ixxchn=material_properties.get("ixxchn", self.fit_config.ixxchn if self._fit_config_provided else None), - ndigit=material_properties.get("ndigit", self.fit_config.ndigit if self._fit_config_provided else None), - idropp=material_properties.get("idropp", self.fit_config.idropp if self._fit_config_provided else None), - matnum=material_properties.get("matnum", self.fit_config.matnum if self._fit_config_provided else None), - ) - elif self._fit_config_provided: - element_info = self._element_info_from_fit_config() - else: - element_info = ElementInfo( - element="Au197", - atomic_weight=196.96657, - min_energy=0.001, - max_energy=1000.0, - ) + element_info = self._element_info_from_fit_config() + if dataset_metadata: + if dataset_metadata.element: + if dataset_metadata.mass_number is not None: + element_info.element = f"{dataset_metadata.element}{dataset_metadata.mass_number}" + else: + element_info.element = dataset_metadata.element + if dataset_metadata.atomic_mass_amu is not None: + element_info.atomic_weight = dataset_metadata.atomic_mass_amu + if dataset_metadata.min_energy_eV is not None: + element_info.min_energy = dataset_metadata.min_energy_eV + if dataset_metadata.max_energy_eV is not None: + element_info.max_energy = dataset_metadata.max_energy_eV lines = Card02.to_lines(element_info) return lines[0] - def generate_broadening_parameters_section(self, material_properties: Dict = None) -> str: + def generate_broadening_parameters_section(self, dataset_metadata: Optional[InpDatasetMetadata] = None) -> str: """ - Generate broadening parameters section for multi-isotope mode. + Generate broadening parameters section from FitConfig. Args: - material_properties: Dict with material properties for calculations + dataset_metadata: Optional typed metadata used to fill temperature + and derive THICK when these are not defined in FitConfig. Returns: str: Broadening parameters section with required blank line before it """ - if material_properties: - from pleiades.experimental.models import BroadeningParameters - from pleiades.sammy.fitting.config import FitConfig - from pleiades.sammy.io.card_formats.par04_broadening import Card04 - from pleiades.utils.helper import VaryFlag - from pleiades.utils.units import calculate_number_density - - # Extract and validate material properties - density = material_properties.get("density_g_cm3") - thickness = material_properties.get("thickness_mm", 5.0) - atomic_mass = material_properties.get("atomic_mass_amu") - temperature = material_properties.get("temperature_K", 293.6) - - if density is None or atomic_mass is None: - raise ValueError("material_properties must contain 'density_g_cm3' and 'atomic_mass_amu'") - - # Calculate number density - number_density = calculate_number_density(density, thickness, atomic_mass) - - # Create FitConfig with broadening parameters using proper Card04 - fit_config = FitConfig() - - # Create BroadeningParameters object - broadening_params = BroadeningParameters( - crfn=8.0, # Matching radius - temp=temperature, # Temperature - thick=number_density, # Calculated number density - deltal=0.0, # Flight path spread - deltag=0.0, # Gaussian resolution - deltae=0.0, # Exponential resolution - flag_thick=VaryFlag.YES, # Allow SAMMY to vary thickness - ) - - # Add to fit_config - fit_config.physics_params.broadening_parameters = broadening_params + from pleiades.sammy.fitting.config import FitConfig + from pleiades.sammy.io.card_formats.par04_broadening import Card04 + from pleiades.utils.helper import VaryFlag - # Generate proper Card04 output with required blank line before it - lines = [""] + Card04.to_lines(fit_config) # Add blank line before broadening section - return "\n".join(lines) + broadening_params = self.fit_config.physics_params.broadening_parameters.model_copy(deep=True) + + if ( + broadening_params.temp is None + and dataset_metadata is not None + and dataset_metadata.temperature_K is not None + ): + broadening_params.temp = dataset_metadata.temperature_K + + if broadening_params.thick is None: + derived_thick = self._number_density_from_dataset_metadata(dataset_metadata) + if derived_thick is not None: + broadening_params.thick = derived_thick + broadening_params.flag_thick = VaryFlag.YES + + # Skip Card 4 generation when no primary broadening values exist. + has_primary_broadening_values = any( + value is not None + for value in ( + broadening_params.crfn, + broadening_params.temp, + broadening_params.thick, + broadening_params.deltal, + broadening_params.deltag, + broadening_params.deltae, + ) + ) + if not has_primary_broadening_values: + return "" - return "" # Return empty string when no broadening parameters + fit_config = FitConfig() + fit_config.physics_params.broadening_parameters = broadening_params + lines = [""] + Card04.to_lines(fit_config) + return "\n".join(lines) def generate_misc_parameters_section(self, flight_path_m: float = 25.0) -> str: """ @@ -625,27 +613,32 @@ def generate_resolution_function_section(self, resolution_file: str = "venus_res return "\n" + "\n".join(lines) def generate_multi_isotope_inp_content( - self, material_properties: Dict = None, resolution_file_path: Path = None + self, + dataset_metadata: Optional[InpDatasetMetadata] = None, + resolution_file_path: Path = None, ) -> str: """ Generate complete multi-isotope INP content with parameter sections. Args: - material_properties: Dict with material properties for parameter calculations + dataset_metadata: Optional typed metadata with dataset-level hints resolution_file_path: Optional absolute path to resolution function file Returns: str: Complete multi-isotope INP file content """ + broadening = self.fit_config.physics_params.broadening_parameters + flight_path_m = broadening.dist if broadening.dist is not None else 25.0 + sections = [ self.generate_title_section(), - self.generate_card_set_2_element_info(material_properties), # Use Card Set 2 for element info + self.generate_card_set_2_element_info(dataset_metadata), "\n".join(self.generate_commands()), - self.generate_physical_constants_section(material_properties), - self.generate_card_7_section(material_properties), + self.generate_physical_constants_section(dataset_metadata), + self.generate_card_7_section(dataset_metadata), self.generate_reaction_type_section(), - self.generate_broadening_parameters_section(material_properties), - self.generate_misc_parameters_section(), + self.generate_broadening_parameters_section(dataset_metadata), + self.generate_misc_parameters_section(flight_path_m=flight_path_m), self.generate_normalization_parameters_section(), self.generate_resolution_function_section( str(resolution_file_path.resolve()) if resolution_file_path else None @@ -945,7 +938,12 @@ def create_fitting_inp(cls, output_path: Path, title: str = None) -> Path: @classmethod def create_multi_isotope_inp( - cls, output_path: Path, title: str = None, material_properties: Dict = None, resolution_file_path: Path = None + cls, + output_path: Path, + fit_config: FitConfig, + title: str = None, + dataset_metadata: Optional[InpDatasetMetadata] = None, + resolution_file_path: Path = None, ) -> Path: """ Create input file for multi-isotope JSON mode fitting. @@ -955,19 +953,29 @@ def create_multi_isotope_inp( Args: output_path: Path to write the input file + fit_config: Typed fit configuration used as the source of INP values title: Optional title for the inp file - material_properties: Optional dict with material properties for parameter calculations + raise ValueError("fit_config is required and must be an instance of FitConfig") + values (for example, Card 2 overrides or THICK derivation inputs) resolution_file_path: Optional absolute path to resolution function file Returns: Path: Path to the created file """ + if fit_config is None or not isinstance(fit_config, FitConfig): + raise ValueError("fit_config must be an instance of FitConfig") + options = FitOptions.from_multi_isotope_config() - manager = cls(options, title=title or "Multi-isotope JSON mode fitting", reaction_type="transmission") + manager = cls( + options=options, + fit_config=fit_config, + title=title or "Multi-isotope JSON mode fitting", + reaction_type="transmission", + ) # Use specialized multi-isotope content generation try: - content = manager.generate_multi_isotope_inp_content(material_properties, resolution_file_path) + content = manager.generate_multi_isotope_inp_content(dataset_metadata, resolution_file_path) output_path = Path(output_path) output_path.parent.mkdir(parents=True, exist_ok=True) diff --git a/tests/unit/pleiades/sammy/io/test_inp_manager.py b/tests/unit/pleiades/sammy/io/test_inp_manager.py index f3f796ae..4311b417 100644 --- a/tests/unit/pleiades/sammy/io/test_inp_manager.py +++ b/tests/unit/pleiades/sammy/io/test_inp_manager.py @@ -16,7 +16,7 @@ from pleiades.sammy.fitting.options import FitOptions from pleiades.sammy.io.card_formats.inp02_element import Card02, ElementInfo from pleiades.sammy.io.card_formats.inp05_broadening import Card05, PhysicalConstants -from pleiades.sammy.io.inp_manager import InpManager +from pleiades.sammy.io.inp_manager import InpDatasetMetadata, InpManager @pytest.fixture @@ -60,7 +60,7 @@ def test_init_with_all_parameters(): options = FitOptions() title = "Test Title" isotope_info = {"name": "Fe56", "mass": 55.934} - physical_constants = {"temperature": 300, "flight_path": 200} + physical_constants = {"temperature_K": 300, "flight_path_m": 200} reaction_type = "TRANSMISSION" inp_manager = InpManager( @@ -251,6 +251,7 @@ def test_custom_inp_creation(temp_dir): def test_create_multi_isotope_inp(temp_dir): """Test creating input file for multi-isotope JSON mode using class method.""" output_path = temp_dir / "multi_isotope.inp" + fit_config = FitConfig() with patch.object(FitOptions, "from_multi_isotope_config") as mock_from_multi: mock_options = MagicMock(spec=FitOptions) @@ -264,7 +265,9 @@ def test_create_multi_isotope_inp(temp_dir): ] mock_from_multi.return_value = mock_options - result_path = InpManager.create_multi_isotope_inp(output_path, title="Multi-isotope test") + result_path = InpManager.create_multi_isotope_inp( + output_path, fit_config=fit_config, title="Multi-isotope test" + ) assert result_path == output_path assert output_path.exists() @@ -283,9 +286,12 @@ def test_create_multi_isotope_inp(temp_dir): def test_multi_isotope_config_integration(temp_dir): """Test multi-isotope configuration integration without mocking.""" output_path = temp_dir / "multi_isotope_real.inp" + fit_config = FitConfig() # Test real implementation without mocking - result_path = InpManager.create_multi_isotope_inp(output_path, title="Real multi-isotope integration test") + result_path = InpManager.create_multi_isotope_inp( + output_path, fit_config=fit_config, title="Real multi-isotope integration test" + ) assert result_path == output_path assert output_path.exists() @@ -315,15 +321,24 @@ def test_multi_isotope_config_integration(temp_dir): ) -def test_multi_isotope_with_material_properties(temp_dir): - """Test multi-isotope INP generation with material properties.""" +def test_multi_isotope_with_dataset_metadata(temp_dir): + """Test multi-isotope INP generation with typed dataset metadata.""" output_path = temp_dir / "multi_isotope_with_materials.inp" + fit_config = FitConfig() + fit_config.physics_params.broadening_parameters.crfn = 8.0 - # Test with Hafnium material properties - material_props = {"density_g_cm3": 13.31, "thickness_mm": 5.0, "atomic_mass_amu": 178.49, "temperature_K": 293.6} + dataset_metadata = InpDatasetMetadata( + density_g_cm3=13.31, + thickness_mm=5.0, + atomic_mass_amu=178.49, + temperature_K=293.6, + ) result_path = InpManager.create_multi_isotope_inp( - output_path, title="Hafnium multi-isotope test", material_properties=material_props + output_path, + fit_config=fit_config, + title="Hafnium multi-isotope test", + dataset_metadata=dataset_metadata, ) assert result_path == output_path @@ -342,15 +357,22 @@ def test_multi_isotope_with_material_properties(temp_dir): # No resolution function expected when no resolution_file_path provided -def test_multi_isotope_missing_required_properties(temp_dir): - """Test multi-isotope INP generation with missing required properties.""" +def test_multi_isotope_missing_required_dataset_metadata_fields(temp_dir): + """Typed metadata should require all THICK derivation inputs when any are provided.""" output_path = temp_dir / "multi_isotope_missing.inp" + fit_config = FitConfig() - # Missing required density - incomplete_props = {"thickness_mm": 5.0, "atomic_mass_amu": 178.49} + incomplete_metadata = InpDatasetMetadata(thickness_mm=5.0, atomic_mass_amu=178.49) - with pytest.raises(ValueError, match="must contain 'density_g_cm3' and 'atomic_mass_amu'"): - InpManager.create_multi_isotope_inp(output_path, title="Should fail", material_properties=incomplete_props) + with pytest.raises( + ValueError, match="dataset_metadata must include density_g_cm3, thickness_mm, and atomic_mass_amu" + ): + InpManager.create_multi_isotope_inp( + output_path, + fit_config=fit_config, + title="Should fail", + dataset_metadata=incomplete_metadata, + ) def test_generate_physical_constants_section_uses_fit_config_dist(): @@ -374,6 +396,27 @@ def test_generate_physical_constants_section_uses_fit_config_dist(): assert constants.delta_e == pytest.approx(0.01) +def test_generate_physical_constants_section_uses_dataset_metadata_temperature_fallback(): + """Card 5 generation should use dataset metadata temperature when FitConfig temp is unset.""" + fit_config = FitConfig() + broadening = fit_config.physics_params.broadening_parameters + broadening.temp = None + broadening.dist = 40.0 + broadening.deltal = 0.4 + broadening.deltag = 0.3 + broadening.deltae = 0.2 + + manager = InpManager(fit_config=fit_config) + section = manager.generate_physical_constants_section(dataset_metadata=InpDatasetMetadata(temperature_K=310.0)) + constants = Card05.from_lines([section.strip()]) + + assert constants.temperature == pytest.approx(310.0) + assert constants.flight_path_length == pytest.approx(40.0) + assert constants.delta_l == pytest.approx(0.4) + assert constants.delta_g == pytest.approx(0.3) + assert constants.delta_e == pytest.approx(0.2) + + def test_read_inp_file_sets_broadening_dist_from_card5(temp_dir): """Card 5 parsing should map flight path length back to broadening.dist.""" fit_config = FitConfig()