@@ -845,9 +845,6 @@ def normalize_group_time_vals(self, time_vals: np.ndarray) -> np.ndarray:
845
845
def drop_attributes (self , xr_ds : xr .Dataset ) -> xr .Dataset :
846
846
""" Drop attributes that cause conflicts with xarray dataset merge"""
847
847
drop_atts = ['average_T2' ,
848
- 'time_bnds' ,
849
- 'lat_bnds' ,
850
- 'lon_bnds' ,
851
848
'average_DT' ,
852
849
'average_T1' ,
853
850
'height' ,
@@ -1477,7 +1474,7 @@ def log_history_attr(self, var, ds):
1477
1474
ds .attrs ['history' ] = hist
1478
1475
return ds
1479
1476
1480
- def write_dataset (self , var , ds ):
1477
+ def write_dataset (self , var : varlist_util . VarlistEntry , ds : xr . Dataset ):
1481
1478
"""Writes processed Dataset *ds* to location specified by the
1482
1479
``dest_path`` attribute of *var*, using xarray `to_netcdf()
1483
1480
<https://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_netcdf.html>`__.
@@ -1486,11 +1483,27 @@ def write_dataset(self, var, ds):
1486
1483
os .makedirs (os .path .dirname (var .dest_path ), exist_ok = True )
1487
1484
var_ds = ds [var .translation .name ].to_dataset ()
1488
1485
var_ds = var_ds .rename_vars (name_dict = {var .translation .name : var .name })
1489
- var .log .info ("Writing '%s'." , var .dest_path , tags = util .ObjectLogTag .OUT_FILE )
1490
1486
if var .is_static :
1491
1487
unlimited_dims = []
1492
1488
else :
1493
1489
unlimited_dims = [var .T .name ]
1490
+ # append other grid types here as needed
1491
+ irregular_grids = {'tripolar' }
1492
+ if ds .attrs .get ('grid' , None ) is not None :
1493
+ # search for irregular grid types
1494
+ for g in irregular_grids :
1495
+ grid_search = re .compile (g , re .IGNORECASE )
1496
+ grid_regex_result = grid_search .search (ds .attrs .get ('grid' ))
1497
+ if grid_regex_result is not None :
1498
+ # add variables not included in xarray dataset if dims correspond to vertices and bounds
1499
+ append_vars = \
1500
+ (list (set ([v for v in ds .variables
1501
+ if 'vertices' in ds [v ].dims
1502
+ or 'bnds' in ds [v ].dims ]).difference ([v for v in var_ds .variables ])))
1503
+ for v in append_vars :
1504
+ v_dataset = ds [v ].to_dataset ()
1505
+ var_ds = xr .merge ([var_ds , v_dataset ])
1506
+
1494
1507
1495
1508
# The following block is retained for time comparison with dask delayed write procedure
1496
1509
# var_ds.to_netcdf(
@@ -1503,6 +1516,7 @@ def write_dataset(self, var, ds):
1503
1516
1504
1517
# Uncomment the timing lines and log calls if desired
1505
1518
# start_time = time.monotonic()
1519
+ var .log .info ("Writing '%s'." , var .dest_path , tags = util .ObjectLogTag .OUT_FILE )
1506
1520
delayed_write = var_ds .to_netcdf (
1507
1521
path = var .dest_path ,
1508
1522
mode = 'w' ,
@@ -1543,6 +1557,7 @@ def write_ds(self, case_list: dict,
1543
1557
raise util .chain_exc (exc , f"writing data for { var .full_name } ." ,
1544
1558
util .DataPreprocessEvent )
1545
1559
1560
+
1546
1561
# del ds # shouldn't be necessary
1547
1562
1548
1563
def parse_ds (self ,
@@ -1648,7 +1663,6 @@ def write_pp_catalog(self,
1648
1663
elif not var .is_static :
1649
1664
d .update ({'frequency' : var .T .frequency .unit })
1650
1665
cat_entries .append (d )
1651
-
1652
1666
# create a Pandas dataframe from the catalog entries
1653
1667
1654
1668
cat_df = pd .DataFrame (cat_entries )
0 commit comments