44import os
55from pathlib import Path
66from typing import Literal
7+ from unittest .mock import patch
78
9+ import numpy as np
810import pytest
911
1012from .test_buffer_handler import create_parquet_dir
@@ -132,11 +134,13 @@ def test_save_read_parquet_cfel(config: dict) -> None:
132134
133135
134136def test_get_elapsed_time_fid (config : dict ) -> None :
135- """Test get_elapsed_time method of CFELLoader class """
136- # Create an instance of CFELLoader
137+ """Test get_elapsed_time and get_elapsed_time_per_file methods of CFELLoader"""
138+
137139 fl = CFELLoader (config = config )
138140
139- # Mock the file_statistics and files
141+ # -------------------------
142+ # Mock valid metadata
143+ # -------------------------
140144 fl .metadata = {
141145 "file_statistics" : {
142146 "timed" : {
@@ -149,79 +153,97 @@ def test_get_elapsed_time_fid(config: dict) -> None:
149153 fl .files = ["file0" , "file1" , "file2" ]
150154
151155 # -------------------------
152- # Aggregate=True → sum differences
156+ # Total elapsed time (aggregated)
153157 # -------------------------
154- elapsed_total = fl .get_elapsed_time (fids = [0 , 1 ], aggregate = True )
155- expected_total = (20 - 10 ) + (30 - 20 ) # 20
158+ elapsed_total = fl .get_elapsed_time (fids = [0 , 1 ])
159+ expected_total = (20 - 10 ) + (30 - 20 )
156160 assert elapsed_total == expected_total
157161
158162 # -------------------------
159- # Aggregate=False → list of per -file differences
163+ # Per -file elapsed time
160164 # -------------------------
161- elapsed_list = fl .get_elapsed_time (fids = [0 , 1 ], aggregate = False )
162- expected_list = [( 20 - 10 ), ( 30 - 20 )] # [ 10, 10]
163- assert elapsed_list == expected_list
165+ elapsed_list = fl .get_elapsed_time_per_file (fids = [0 , 1 ])
166+ expected_list = [10 , 10 ]
167+ assert list ( elapsed_list ) == expected_list
164168
165169 # -------------------------
166- # Test KeyError when file_statistics is missing
170+ # Missing metadata → fallback (mocked, no file access)
167171 # -------------------------
168172 fl .metadata = {"something" : "else" }
169- with pytest .raises (KeyError ) as e :
170- fl .get_elapsed_time (fids = [0 , 1 ])
171- assert "File statistics missing. Use 'read_dataframe' first." in str (e .value )
173+
174+ with patch .object (fl , "_get_elapsed_time_single" , return_value = 5.0 ):
175+ elapsed = fl .get_elapsed_time (fids = [0 , 1 ])
176+ assert elapsed == 10.0 # 2 files × 5.0
172177
173178 # -------------------------
174- # Test KeyError when timeStamp metadata is missing for a file
179+ # Missing timestamp in one file → fallback (mocked)
175180 # -------------------------
176181 fl .metadata = {
177182 "file_statistics" : {
178183 "timed" : {
179- "0" : {},
184+ "0" : {}, # broken entry
180185 "1" : {"columns" : {"timeStamp" : {"min" : 20 , "max" : 30 }}},
181186 },
182187 },
183188 }
184- with pytest .raises (KeyError ) as e :
185- fl .get_elapsed_time (fids = [0 , 1 ])
186- assert "Timestamp metadata missing in file file0 (fid=0)" in str (e .value )
189+
190+ with patch .object (fl , "_get_elapsed_time_single" , return_value = 7.0 ):
191+ elapsed = fl .get_elapsed_time (fids = [0 , 1 ])
192+ assert elapsed == 14.0 # 2 files × 7.0
193+
194+ # -------------------------
195+ # Precise mode → always uses HDF5 → must mock
196+ # -------------------------
197+ with patch .object (fl , "_get_elapsed_time_single" , return_value = 3.0 ):
198+ elapsed_precise = fl .get_elapsed_time (fids = [0 , 1 ], precise = True )
199+ assert elapsed_precise == 6.0
187200
188201
189202def test_get_elapsed_time_run (config : dict ) -> None :
190203 """Test get_elapsed_time method for runs with multiple files"""
204+
191205 config_ = config .copy ()
192206 data_parquet_dir = create_parquet_dir (config_ , "get_elapsed_time_run" )
193207 config_ ["core" ]["paths" ]["processed" ] = data_parquet_dir
194208 config_ ["core" ]["paths" ]["raw" ] = "tests/data/loader/cfel/"
195209
196- # Create an instance of CFELLoader
210+ # Create loader
197211 fl = CFELLoader (config = config_ )
198212
199- # Read dataframe for run 123
213+ # -------------------------
214+ # Load real data
215+ # -------------------------
200216 fl .read_dataframe (runs = [123 ])
201217
202- # Extract expected elapsed times per file from metadata
203- file_stats = fl .metadata ["file_statistics" ]["electron" ]
204- expected_elapsed_list = [
205- file_stats [str (fid )]["columns" ]["timeStamp" ]["max" ]
206- - file_stats [str (fid )]["columns" ]["timeStamp" ]["min" ]
207- for fid in range (len (fl .files ))
208- ]
218+ # -------------------------
219+ # Extract expected elapsed times from *timed* metadata
220+ # -------------------------
221+ file_stats = fl .metadata ["file_statistics" ]["timed" ]
222+
223+ expected_elapsed_list = []
224+ for fid in range (len (fl .files )):
225+ ts = file_stats [str (fid )]["columns" ]["timeStamp" ]
226+
227+ tmin = ts ["min" ].total_seconds () if hasattr (ts ["min" ], "total_seconds" ) else ts ["min" ]
228+ tmax = ts ["max" ].total_seconds () if hasattr (ts ["max" ], "total_seconds" ) else ts ["max" ]
229+
230+ expected_elapsed_list .append (tmax - tmin )
209231
210232 # -------------------------
211- # Aggregate=False → list of per -file elapsed times
233+ # Per -file elapsed time (NEW API)
212234 # -------------------------
213- elapsed_list = fl .get_elapsed_time (runs = [123 ], aggregate = False )
214- assert elapsed_list == expected_elapsed_list
235+ elapsed_list = fl .get_elapsed_time_per_file (runs = [123 ])
236+ assert np . allclose ( elapsed_list , expected_elapsed_list )
215237
216238 # -------------------------
217- # Aggregate=True → sum of per-file elapsed times
239+ # Total elapsed time (NEW behavior)
218240 # -------------------------
219- elapsed_total = fl .get_elapsed_time (runs = [123 ], aggregate = True )
241+ elapsed_total = fl .get_elapsed_time (runs = [123 ])
220242 expected_total = sum (expected_elapsed_list )
221- assert elapsed_total == expected_total
243+ assert np . isclose ( elapsed_total , expected_total )
222244
223245 # -------------------------
224- # Remove the parquet files created during test
246+ # Cleanup buffer files
225247 # -------------------------
226248 buffer_dir = Path (fl .processed_dir , "buffer" )
227249 if buffer_dir .exists ():
0 commit comments