diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml index e46ff3b5d3..29f3975d39 100644 --- a/.github/workflows/pytest.yaml +++ b/.github/workflows/pytest.yaml @@ -75,8 +75,7 @@ jobs: # - Latest supported Python version for those or other dependencies. # Minimum version given in pyproject.toml + earlier version of Python # For this job only, the oldest version of Python supported by message-ix-models - - { upstream: v3.6.0, python: "3.9" } # Released 2022-08-18 - - { upstream: v3.7.0, python: "3.11" } # 2023-05-17 + - { upstream: v3.7.0, python: "3.9" } # Released 2023-05-17 - { upstream: v3.8.0, python: "3.12" } # 2024-01-12 # Latest released version + latest released Python - { upstream: v3.9.0, python: "3.13" } # 2024-06-04 @@ -86,7 +85,6 @@ jobs: exclude: # Specific version combinations that are invalid / not to be used # These versions of ixmp are not able locate the arm64 GAMS API binaries - - { os: macos-latest, version: { upstream: v3.6.0 }} - { os: macos-latest, version: { upstream: v3.7.0 }} - { os: macos-latest, version: { upstream: v3.8.0 }} - { os: macos-latest, version: { upstream: v3.9.0 }} @@ -133,9 +131,6 @@ jobs: v, result = "${{ matrix.version.upstream }}".replace("main", "vmain"), [] for condition, dependency in ( - (v <= "v3.6.0", "dask < 2024.3.0"), # dask[dataframe] >= 2024.3.0 requires dask-expr and in turn pandas >= 2.0 (#156) - (v <= "v3.6.0", "numpy < 2.0"), - (v <= "v3.6.0", "pandas < 2.0"), (v >= "v3.7.0", "dask[dataframe] < 2024.11.0"), # dask >= 2024.11.0 changes handling of dict (will be addressed in #225) (v <= "v3.7.0", "genno < 1.25"), # Upstream versions < 3.8.0 import genno.computations, removed in 1.25.0 (#156) (v < "v3.9.0", "pytest == 8.0.0"), # Upstream versions < 3.9.0 use a hook argument removed in pytest 8.1.0 (#155) diff --git a/.github/workflows/transport.yaml b/.github/workflows/transport.yaml index 1b3a16a96b..5c278d7ed4 100644 --- a/.github/workflows/transport.yaml +++ b/.github/workflows/transport.yaml @@ -7,15 +7,46 @@ env: target-workflow: transport # Starting point of the workflow. + # # Use this value to build from a certain scenario: # base: --url="ixmp://ixmp-dev/MESSAGEix-GLOBIOM 1.1-R12/baseline_DEFAULT#21" + # # Use this value to allow the workflow to determine model & scenario names # and versions: base: --platform=ixmp-dev - # Set this to a particular step to truncate the workflow + # Set this to a particular step to truncate the workflow. from-step: "" + # Workflow steps/labels to run. These correspond to the 'TARGET' argument to + # 'mix-models transport run'. Each label triggers 1 job in the target-repo/ + # target-workflow. + # + # - Delete lines to disable some runs. + # - Ensure there is NO trailing comma on the last line. + labels: >- + [ + "SSP1", + "SSP1 policy", + "SSP2", + "SSP2 policy", + "SSP3", + "SSP3 policy", + "SSP4", + "SSP4 policy", + "SSP5", + "SSP5 policy", + "EDITS-CA", + "EDITS-HA", + "LED-SSP1", + "LED-SSP2" + ] + + # Currently disabled: + # [ + # + # ] + on: # Uncomment these lines for debugging, but leave them commented on 'main' # pull_request: @@ -36,22 +67,28 @@ jobs: runs-on: ubuntu-latest steps: - - name: Invoke "${{ env.target-workflow }}" workflow in ${{ env.target-repo }} - env: - GH_TOKEN: ${{ secrets.MESSAGE_DATA_DISPATCH_TOKEN }} - run: | - gh workflow run \ - ${{ env.target-workflow }}.yaml \ - --repo=${{ env.target-repo }} \ - --ref=${{ env.target-ref }} \ - --field ref=${{ github.ref }} \ - --field sha=${{ github.sha }} \ - --field base=${{ env.base }} \ - --field from-step=${{ env.from-step }} - - sleep 5 - - gh run list \ - --workflow=${{ env.target-workflow.yaml }} --repo=${{ env.target-repo }} \ - --json url,status \ - --jq 'map(select(.status != "completed"))[0].url' >> $GITHUB_STEP_SUMMARY + - name: Assemble JSON payload + run: | + echo '{ + "ref": "${{ github.ref }}", + "sha": "${{ github.sha }}", + "base": "${{ env.base }}", + "from-step": "${{ env.from-step }}", + "labels": ${{ env.labels }} + }' | jq -r -c '.labels = (.labels | tostring)' | tee payload.json + + - name: Invoke "${{ env.target-workflow }}" workflow in ${{ env.target-repo }} + env: + GH_TOKEN: ${{ secrets.MESSAGE_DATA_DISPATCH_TOKEN }} + run: | + cat payload.json | gh workflow run --json \ + ${{ env.target-workflow }}.yaml \ + --repo=${{ env.target-repo }} \ + --ref=${{ env.target-ref }} + + sleep 5 + + gh run list \ + --workflow=${{ env.target-workflow.yaml }} --repo=${{ env.target-repo }} \ + --json url,status \ + --jq 'map(select(.status != "completed"))[0].url' >>$GITHUB_STEP_SUMMARY diff --git a/doc/api/report/index.rst b/doc/api/report/index.rst index 05a299d251..8d7e42c7d2 100644 --- a/doc/api/report/index.rst +++ b/doc/api/report/index.rst @@ -172,7 +172,6 @@ Utilities .. autosummary:: add_replacements - as_quantity collapse collapse_gwp_info copy_ts diff --git a/doc/api/util.rst b/doc/api/util.rst index bafb678523..22c673eabd 100644 --- a/doc/api/util.rst +++ b/doc/api/util.rst @@ -54,7 +54,6 @@ Commonly used: private_data_path same_node same_time - series_of_pint_quantity show_versions .. automodule:: message_ix_models.util diff --git a/doc/conf.py b/doc/conf.py index f9933ff269..a0ec51b7fd 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -75,6 +75,7 @@ .. role:: underline .. |c| replace:: :math:`c` +.. |l| replace:: :math:`l` .. |n| replace:: :math:`n` .. |t| replace:: :math:`t` .. |y| replace:: :math:`y` diff --git a/doc/transport/input.rst b/doc/transport/input.rst index 08c1ade700..a628c2d615 100644 --- a/doc/transport/input.rst +++ b/doc/transport/input.rst @@ -204,6 +204,8 @@ node = R12_SAS [2]_ .. [2] A. Javaid, `message_data#180 (comment) `_. .. [3] A. Javaid, `message_data#538 (comment) `__. +.. _transport-pdt-cap-proj: + :file:`pdt-cap.csv` → ``P activity:scenario-n-t-y:exo`` ------------------------------------------------------- @@ -221,7 +223,6 @@ PDT per capita. .. todo:: Transcribe the method into this document. - :file:`pdt-cap-ref.csv` → ``pdt:n:capita+ref`` ---------------------------------------------- diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst index f24fd60d5e..81a18eea95 100644 --- a/doc/whatsnew.rst +++ b/doc/whatsnew.rst @@ -4,11 +4,12 @@ What's new Next release ============ -- Support for :mod:`ixmp` and :mod:`message_ix` versions 3.4.x and 3.5.x is dropped in accordance with the :ref:`policy-upstream-versions` (:pull:`288`). - The minimum version of both packages is 3.6.0. +- Support for :mod:`ixmp` and :mod:`message_ix` versions 3.4, 3.5, and 3.6 is dropped in accordance with the :ref:`policy-upstream-versions` (:pull:`288`, :pull:`289`). + The minimum version of both packages is 3.7.0. - Update :class:`.IEA_EWEB` to support :py:`transform="B"` / :func:`.transform_B` (:issue:`230`, :pull:`259`). - Add :func:`.prepare_method_B` to :mod:`.ssp.transport` (:pull:`259`). - New utility :class:`.sdmx.AnnotationsMixIn` (:pull:`259`). +- Drop obsolete :py:`series_of_pint_quantity()` (:pull:`289`). By topic: @@ -19,13 +20,22 @@ By topic: Transport --------- -Update :doc:`/transport/index` (:pull:`259`). +Update :doc:`/transport/index` (:pull:`259`, :pull:`289`). - Adjust constraints on :py:`t="conm_ar"`. - Recompute :attr:`.minimum_activity` for transport technologies. -- Adjust freight activity, freight and passenger mode shares for some regions. +- Improve freight representation: + + - Adjust freight activity, freight and passenger mode shares for some regions. + - Add dynamic constraints on activity of freight technologies. + - Fix alignment of freight technology outputs with demand |l|. + +- Implement LED override using exogenous passenger activity data from :ref:`transport-pdt-cap-proj`. - Drop :file:`base-scenario-url.json`; store base scenario URLs in :ref:`CL_TRANSPORT_SCENARIO`. +- Generate SDMX-ML structural metadata, including data flow definitions, and SDMX-{CSV,ML} data outputs for certain reported quantities. +- Expand use of fixed/shared keys from :mod:`.transport.key`. - Simplify and consolidate tests. +- Improve :func:`.simulated_solution` to load ‘simulated’ solution data from file to reduce test durations. Documentation ------------- @@ -49,6 +59,8 @@ Documentation :doc:`project/sparccle`, and :doc:`project/uptake` (:pull:`282`). +- New utility :class:`.sdmx.AnnotationsMixIn` (:pull:`259`). +- Bug fix: adjust or guard some Python usage that was not compatible with Python 3.9—the earliest version supported by :mod:`message_ix_models` (:pull:`295`, :issue:`294`). v2025.1.10 ========== @@ -443,12 +455,12 @@ Earlier releases 2021.7.27 --------- -- Improve caching using mod:`genno` v1.8.0 (:pull:`29`). +- Improve caching using :mod:`genno` v1.8.0 (:pull:`29`). 2021.7.22 --------- -- Migrate utilities :func:`.cached`, :func:`.check_support`, :func:`.convert_units`, :func:`.maybe_query`, :func:`.series_of_pint_quantity` (:pull:`27`) +- Migrate utilities :func:`.cached`, :func:`.check_support`, :func:`.convert_units`, :func:`.maybe_query`, :py:`series_of_pint_quantity()` (:pull:`27`) - Add :data:`.testing.NIE`. - Add the ``--jvmargs`` option to :command:`pytest` (see :func:`.pytest_addoption`). - Remove :py:`.Context.get_config_file()`, :py:`.get_path()`, :py:`.load_config()`, and :py:`.units`, all deprecated since 2021-02-28. diff --git a/message_ix_models/data/sdmx/IIASA_ECE_CL_TRANSPORT_SCENARIO(1.0.0).xml b/message_ix_models/data/sdmx/IIASA_ECE_CL_TRANSPORT_SCENARIO(1.0.0).xml index 2bfbd14587..ea12214e3e 100644 --- a/message_ix_models/data/sdmx/IIASA_ECE_CL_TRANSPORT_SCENARIO(1.0.0).xml +++ b/message_ix_models/data/sdmx/IIASA_ECE_CL_TRANSPORT_SCENARIO(1.0.0).xml @@ -3,13 +3,13 @@ none false - 2025-01-15T10:37:47.926181 + 2025-02-20T10:44:16.460424 - Generated by message_ix_models 2025.1.11.dev1+gabce19674.d20250113 + Generated by message_ix_models 2025.1.11.dev97+gdf7a6cee.d20250211 - + @@ -22,7 +22,7 @@ None - 'ixmp://ixmp-dev/SSP_SSP1_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP1_v2.1/baseline_DEFAULT_step_13' @@ -38,7 +38,7 @@ None - 'ixmp://ixmp-dev/SSP_SSP2_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP2_v2.1/baseline_DEFAULT_step_13' @@ -54,7 +54,7 @@ None - 'ixmp://ixmp-dev/SSP_SSP3_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP3_v2.1/baseline_DEFAULT_step_13' @@ -70,7 +70,7 @@ None - 'ixmp://ixmp-dev/SSP_SSP4_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP4_v2.1/baseline_DEFAULT_step_13' @@ -86,7 +86,7 @@ None - 'ixmp://ixmp-dev/SSP_SSP5_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP5_v2.1/baseline_DEFAULT_step_13' @@ -102,7 +102,7 @@ None - 'ixmp://ixmp-dev/SSP_SSP1_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP1_v2.1/baseline_DEFAULT_step_13' Low Energy Demand/High-with-Low scenario with SSP1 demographics @@ -119,7 +119,7 @@ None - 'ixmp://ixmp-dev/SSP_SSP2_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP2_v2.1/baseline_DEFAULT_step_13' Low Energy Demand/High-with-Low scenario with SSP2 demographics @@ -136,7 +136,7 @@ 'CA' - 'ixmp://ixmp-dev/SSP_SSP2_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP2_v2.1/baseline_DEFAULT_step_13' EDITS scenario with ITF PASTA 'CA' activity @@ -153,7 +153,7 @@ 'HA' - 'ixmp://ixmp-dev/SSP_SSP2_v1.1/baseline_DEFAULT_step_13' + 'ixmp://ixmp-dev/SSP_SSP2_v2.1/baseline_DEFAULT_step_13' EDITS scenario with ITF PASTA 'HA' activity diff --git a/message_ix_models/data/test/transport/MESSAGEix-Transport R12 YB 41eee_baseline.xlsx b/message_ix_models/data/test/transport/MESSAGEix-Transport R12 YB 41eee_baseline.xlsx new file mode 100644 index 0000000000..5f7a6242c9 --- /dev/null +++ b/message_ix_models/data/test/transport/MESSAGEix-Transport R12 YB 41eee_baseline.xlsx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1721a7f774603e85f2afdc84a5d2564d77b7907840aaea9919c965d39080df71 +size 23418171 diff --git a/message_ix_models/data/transport/R12/pdt-cap.csv b/message_ix_models/data/transport/R12/pdt-cap.csv index 84a1afba97..aa8d7706d9 100644 --- a/message_ix_models/data/transport/R12/pdt-cap.csv +++ b/message_ix_models/data/transport/R12/pdt-cap.csv @@ -1,6 +1,6 @@ # Projected passenger-distance travelled (PDT) per capita # -# Units: km / passenger +# Units: km / year # scenario, node, technology, year, value LED, R12_AFR, 2W, 2020, 360 diff --git a/message_ix_models/data/transport/set.yaml b/message_ix_models/data/transport/set.yaml index 5d21c933cd..bb7d2cbc83 100644 --- a/message_ix_models/data/transport/set.yaml +++ b/message_ix_models/data/transport/set.yaml @@ -168,7 +168,8 @@ commodity: "transport vehicle {technology.id}": _generate: {technology: LDV} name: Activity for {technology.id} LDVs - units: Gv km + # FIXME This is necessary to avoid imputed dimensionless in + units: (Gv km)**2 "transport pax {consumer_group.id}": _generate: {consumer_group: null} diff --git a/message_ix_models/data/transport/technology.yaml b/message_ix_models/data/transport/technology.yaml index e6ffaa933e..1721aeb27e 100644 --- a/message_ix_models/data/transport/technology.yaml +++ b/message_ix_models/data/transport/technology.yaml @@ -302,7 +302,7 @@ F RAIL: - f rail electr - f rail lightoil units: Gv km - output: {commodity: transport F RAIL vehicle} + output: {commodity: transport F RAIL vehicle, level: useful} iea-eweb-flow: [RAIL] crail_pub: @@ -342,14 +342,14 @@ transport F RAIL usage: description: >- Conversion from vehicle-distance traveled to freight transport activity input: {commodity: transport F RAIL vehicle, level: useful} - output: {commodity: transport F RAIL} + output: {commodity: transport F RAIL, level: useful} units: tonne / vehicle transport F ROAD usage: description: >- Conversion from vehicle-distance traveled to freight transport activity input: {commodity: transport F ROAD vehicle, level: useful} - output: {commodity: transport F ROAD} + output: {commodity: transport F ROAD, level: useful} units: tonne / vehicle F usage: @@ -455,27 +455,13 @@ F ROAD: - FR_ICG - FR_ICH units: Gv km - output: {commodity: transport F ROAD vehicle} + output: {commodity: transport F ROAD vehicle, level: useful} iea-eweb-flow: [ROAD] F: # Freight modes - # TODO Prepare this hierarchically from the following - # child: - # - F RAIL - # - F ROAD child: - - f rail electr - - f rail lightoil - - f road electr - - FR_FCg - - FR_FCH - - FR_FCm - - FR_ICAe - - FR_ICE_H - - FR_ICE_L - - FR_ICE_M - - FR_ICG - - FR_ICH + - F RAIL + - F ROAD AIR: name: Aviation @@ -527,6 +513,14 @@ RAIL: units: Gv km iea-eweb-flow: [RAIL] +P: # Passenger modes + child: + - 2W + - AIR + - BUS + - LDV + - RAIL + transport other coal: {input: {commodity: coal}} transport other electr: {input: {commodity: electr}} transport other fueloil: {input: {commodity: fueloil}} diff --git a/message_ix_models/model/structure.py b/message_ix_models/model/structure.py index 427e5056d5..f8922cd6fa 100644 --- a/message_ix_models/model/structure.py +++ b/message_ix_models/model/structure.py @@ -3,7 +3,7 @@ from collections import ChainMap from collections.abc import Mapping, MutableMapping from copy import copy -from functools import lru_cache +from functools import cache from itertools import product import click @@ -19,7 +19,7 @@ log = logging.getLogger(__name__) -@lru_cache() +@cache def codelists(kind: str) -> list[str]: """Return a valid IDs for code lists of `kind`. @@ -31,7 +31,7 @@ def codelists(kind: str) -> list[str]: return sorted(path.stem for path in package_data_path(kind).glob("*.yaml")) -@lru_cache() +@cache def get_codes(name: str) -> list[Code]: """Return codes for the dimension/set `name` in MESSAGE-GLOBIOM scenarios. @@ -92,7 +92,7 @@ def get_codes(name: str) -> list[Code]: return data -@lru_cache() +@cache def get_codelist(name: str) -> Codelist: """Return a :class:`.Codelist` for `name` in MESSAGEix-GLOBIOM scenarios.""" cl = Codelist(id=name.replace("/", "_").upper()) @@ -100,7 +100,7 @@ def get_codelist(name: str) -> Codelist: return cl -@lru_cache() +@cache def get_region_codes(codelist: str) -> list[Code]: """Return the codes that are children of "World" in the specified `codelist`.""" nodes = get_codes(f"node/{codelist}") diff --git a/message_ix_models/model/transport/base.py b/message_ix_models/model/transport/base.py index 23f1ac2b88..8691cb23db 100644 --- a/message_ix_models/model/transport/base.py +++ b/message_ix_models/model/transport/base.py @@ -8,12 +8,13 @@ import genno import numpy as np import pandas as pd -from genno import Computer, KeySeq +from genno import Computer, Key, KeySeq, quote from genno.core.key import single_key from message_ix_models.util import minimum_version from .key import gdp_exo +from .key import report as k_report if TYPE_CHECKING: import message_ix @@ -115,7 +116,7 @@ def clip_nan(qty: "AnyQuantity", coord: Any) -> "AnyQuantity": # - Return clipped values. return genno.Quantity( qty.sel({dim: coord}) - .expand_dims({dim: qty.coords[dim]}) + .expand_dims({dim: qty.coords[dim].data}) .to_series() .rename("threshold") .to_frame() @@ -166,11 +167,12 @@ def prepare_reporter(rep: "message_ix.Reporter") -> str: (c, t) totals in correspondence with IEA World Energy Balance (WEB) values. 5. :file:`scale-2.csv`: Second stage scaling factor used to bring overall totals. """ - from genno import Key, KeySeq, quote # Add an empty list; invoking this key will trigger calculation of all the keys # below added to the list rep.add(RESULT_KEY, []) + # Add this result key to the list of all reporting keys + rep.graph[k_report.all].append(RESULT_KEY) # Create output subdirectory for base model files rep.graph["config"]["output_dir"].joinpath("base").mkdir( diff --git a/message_ix_models/model/transport/build.py b/message_ix_models/model/transport/build.py index b28b54b512..c65f21bc27 100644 --- a/message_ix_models/model/transport/build.py +++ b/message_ix_models/model/transport/build.py @@ -154,7 +154,11 @@ def add_exogenous_data(c: Computer, info: ScenarioInfo) -> None: from message_ix_models.tools.exo_data import prepare_computer # Ensure that the MERtoPPP data provider is available - from . import data # noqa: F401 + from . import ( + data, # noqa: F401 + key, + ) + from .files import FILES, add # Added keys keys = {} @@ -179,7 +183,6 @@ def add_exogenous_data(c: Computer, info: ScenarioInfo) -> None: keys[kw["measure"]] = prepare_computer( context, c, source, source_kw=kw, strict=False ) - # Add data for MERtoPPP kw = dict(measure="MERtoPPP", nodes=context.model.regions) prepare_computer(context, c, "transport MERtoPPP", source_kw=kw, strict=False) @@ -208,34 +211,35 @@ def add_exogenous_data(c: Computer, info: ScenarioInfo) -> None: # Add the base data kw = dict(measure=m, name=f"advance {n}") kw.update(common) - key, *_ = prepare_computer(context, c, "ADVANCE", source_kw=kw, strict=False) + k, *_ = prepare_computer(context, c, "ADVANCE", source_kw=kw, strict=False) # Broadcast to R12 - c.add(f"{n}:n:advance", "broadcast_advance", key, "y0", "config") + c.add(f"{n}:n:advance", "broadcast_advance", k, "y0", "config") # Alias for other computations which expect the upper-case name c.add("MERtoPPP:n-y", "mertoppp:n-y") - try: - c.add("GDP:n-y", "gdp:n-y", strict=True) - except KeyExistsError as e: - log.info(repr(e)) # Solved scenario that already has this key + + # FIXME Ensure the latter case for a simulated solution + # if key.GDP in c: + if False: + pass # Solved scenario that already has this key + else: + c.add(key.GDP, keys["GDP"][0]) # Ensure correct units c.add("population:n-y", "mul", "pop:n-y", genno.Quantity(1.0, units="passenger")) - # Dummy prices - try: - c.add( - "PRICE_COMMODITY:n-c-y", - "dummy_prices", - keys["GDP"][0], - sums=True, - strict=True, - ) - except KeyExistsError as e: - log.info(repr(e)) # Solved scenario that already has this key + # FIXME Adjust to derive PRICE_COMMODITY c=transport from solved scenario with + # MESSAGEix-Transport detail, then uncomment the following line + # if key.price.base - "transport" in c: + if False: + # Alias PRICE_COMMODITY:… to PRICE_COMMODITY:*:transport, e.g. solved scenario + # that already has this key + c.add(key.price[0], key.price.base - "transport") + else: + # Not solved scenario → dummy prices + c.add(key.price[0], "dummy_prices", keys["GDP"][0], sums=True) # Data from files - from .files import FILES, add # Identify the mode-share file according to the config setting add( @@ -342,20 +346,11 @@ def add_structure(c: Computer) -> None: :class:`str`. See :func:`.get_technology_groups`. - ``t::transport RAIL`` etc.: :class:`dict` mapping "t" to the elements of ``t::RAIL``. - - ``broadcast:t-c-l:input``: Quantity for broadcasting (all values 1) from every - transport |t| (same as ``t::transport``) to the :math:`(c, l)` that that - technology receives as input. See :func:`.broadcast_t_c_l`. - - ``broadcast:t-c-l:input``: same as above, but for the :math:`(c, l)` that the - technology produces as output. - - ``broadcast:y-yv-ya:all``: Quantity for broadcasting (all values 1) from every |y| - to every possible combination of :math:`(y^V=y, y^A)`—including historical - periods. See :func:`.broadcast_y_yv_ya`. - - ``broadcast:y-yv-ya``: same as above, but only model periods (``y::model``). - - ``broadcast:y-yv-ya:no vintage``: same as above, but only the cases where - :math:`y^V = y^A`. + - All of the keys in :data:`.bcast_tcl` and :data:`.bcast_y`. """ from ixmp.report import configure + from . import key from .operator import broadcast_t_c_l, broadcast_y_yv_ya # Retrieve configuration and other information @@ -387,7 +382,7 @@ def add_structure(c: Computer) -> None: "cat_year", pd.DataFrame([["firstmodelyear", info.y0]], columns=["type_year", "year"]), ), - ("y::model", "model_periods", "y", "cat_year"), + (key.y, "model_periods", "y", "cat_year"), ("y0", itemgetter(0), "y::model"), ): try: @@ -410,7 +405,7 @@ def add_structure(c: Computer) -> None: ("t::transport", quote(spec.add.set["technology"])), ("t::transport agg", quote(dict(t=t_groups))), ("t::transport all", quote(dict(t=spec.add.set["technology"]))), - ("t::transport modes", quote(config.demand_modes)), + (key.t_modes, quote(config.demand_modes)), ("t::transport modes 0", quote(dict(t=list(t_groups.keys())))), ( "t::transport modes 1", @@ -419,30 +414,23 @@ def add_structure(c: Computer) -> None: ] # Quantities for broadcasting (t,) to (t, c, l) dimensions - tasks += [ + tasks.extend( ( - f"broadcast:t-c-l:transport+{kind}", + getattr(key.bcast_tcl, kind), partial(broadcast_t_c_l, kind=kind, default_level="final"), "t::transport", "c::transport+base", ) for kind in ("input", "output") - ] + ) # Quantities for broadcasting y to (yv, ya) - for base, tag, method in ( - ("y", ":all", "product"), # All periods - ("y::model", "", "product"), # Model periods only - ("y::model", ":no vintage", "zip"), # Model periods with no vintaging + for k, base, method in ( + (key.bcast_y.all, "y", "product"), # All periods + (key.bcast_y.model, "y::model", "product"), # Model periods only + (key.bcast_y.no_vintage, "y::model", "zip"), # Model periods with no vintaging ): - tasks.append( - ( - f"broadcast:y-yv-ya{tag}", - partial(broadcast_y_yv_ya, method=method), - base, - base, - ) - ) + tasks.append((k, partial(broadcast_y_yv_ya, method=method), base, base)) # Groups of technologies and indexers # FIXME Combine or disambiguate these keys @@ -490,7 +478,7 @@ def get_computer( If :any:`True` (the default), a file :file:`transport/build.svg` is written in the local data directory with a visualization of the ``add transport data`` key. """ - from . import operator + from . import key, operator # Configure config = Config.from_context(context, **kwargs) @@ -539,6 +527,7 @@ def get_computer( # Add a computation that is an empty list. # Individual modules's prepare_computer() functions can append keys. c.add("add transport data", []) + c.add(key.report.all, []) # Needed by .plot.prepare_computer() # Add structure-related keys add_structure(c) diff --git a/message_ix_models/model/transport/config.py b/message_ix_models/model/transport/config.py index 92ee2ce220..04a6d5dff6 100644 --- a/message_ix_models/model/transport/config.py +++ b/message_ix_models/model/transport/config.py @@ -9,9 +9,9 @@ from message_ix_models.project.navigate import T35_POLICY as NAVIGATE_SCENARIO from message_ix_models.project.ssp import SSP_2024, ssp_field from message_ix_models.project.transport_futures import SCENARIO as FUTURES_SCENARIO -from message_ix_models.report.util import as_quantity from message_ix_models.util import identify_nodes, package_data_path from message_ix_models.util.config import ConfigHelper +from message_ix_models.util.genno import as_quantity from message_ix_models.util.sdmx import AnnotationsMixIn if TYPE_CHECKING: @@ -349,7 +349,9 @@ def from_context( pass else: if scenario: - log.debug(f".transport.Config.from_context: {scenario.set('node') = }") + log.debug( + f"scenario.set('node') = {' '.join(sorted(scenario.set('node')))}" + ) if context.model.regions != regions: log.info( f"Override Context.model.regions={context.model.regions!r} with " @@ -474,7 +476,7 @@ def get_cl_scenario() -> "common.Codelist": ) -def refresh_cl_scenario(cl: "common.Codelist") -> "common.Codelist": +def refresh_cl_scenario(cl: Optional["common.Codelist"] = None) -> "common.Codelist": """Refresh ``Codelist=IIASA_ECE:CL_TRANSPORT_SCENARIO``. The code list is entirely regenerated. If it is different from `cl`, the new @@ -489,14 +491,21 @@ def refresh_cl_scenario(cl: "common.Codelist") -> "common.Codelist": cl_ssp_2024 = read("ICONICS:SSP(2024)") candidate: "common.Codelist" = common.Codelist( - id="CL_TRANSPORT_SCENARIO", maintainer=IIASA_ECE, version="1.0.0" + id="CL_TRANSPORT_SCENARIO", + maintainer=IIASA_ECE, + version="1.0.0", + is_external_reference=False, + is_final=False, ) - # - The model name is per a Microsoft Teams message on 2024-11-25. + # - Model name: + # - 2024-11-25: use _v1.1 per a Microsoft Teams message. + # - 2025-02-20: update to _v2.1 per discussion with OF. At this point _v2.3 is the + # latest appearing in the database. # - The scenario names appear to form a sequence from "baseline_DEFAULT" to # "baseline_DEFAULT_step_15" and finally "baseline". The one used below is the # latest in this sequence for which y₀=2020, rather than 2030. - base_url = "ixmp://ixmp-dev/SSP_SSP{}_v1.1/baseline_DEFAULT_step_13" + base_url = "ixmp://ixmp-dev/SSP_SSP{}_v2.1/baseline_DEFAULT_step_13" def _a(c, led, edits): """Shorthand to generate the annotations.""" @@ -529,7 +538,7 @@ def _a(c, led, edits): ) ) - if not candidate.compare(cl, strict=True): + if cl is None or not candidate.compare(cl, strict=True): write(candidate) return candidate else: diff --git a/message_ix_models/model/transport/demand.py b/message_ix_models/model/transport/demand.py index b029f417cb..6058f9675e 100644 --- a/message_ix_models/model/transport/demand.py +++ b/message_ix_models/model/transport/demand.py @@ -10,6 +10,7 @@ from genno import Computer, KeySeq from message_ix import make_df +from message_ix_models.report.key import GDP from message_ix_models.util import broadcast from . import files as exo @@ -18,7 +19,6 @@ cost, fv, fv_cny, - gdp, gdp_cap, gdp_index, gdp_ppp, @@ -33,9 +33,6 @@ pdt_nyt, pop, price, - price_full, - price_sel0, - price_sel1, sw, t_modes, y, @@ -102,7 +99,7 @@ def dummy( (ms + "base", "base_shares", "mode share:n-t:exo", n, t_modes, y), # GDP expressed in PPP. The in the SSP(2024) input files, this conversion is already # applied, so no need to multiply by a mer_to_ppp factor here → simple alias. - (gdp_ppp, gdp), + (gdp_ppp, GDP), # GDP PPP per capita (gdp_cap, "div", gdp_ppp, pop), # @@ -115,10 +112,10 @@ def dummy( ("votm:n-y", "votm", gdp_cap), # Select only the price of transport services # FIXME should be the full set of prices - ((price_sel0, "select", price_full), dict(indexers=dict(c="transport"), drop=True)), - (price_sel1, "price_units", price_sel0), + ((price[1], "select", price[0]), dict(indexers=dict(c="transport"), drop=True)), + (price[2], "price_units", price[1]), # Smooth prices to avoid zig-zag in share projections - (price, "smooth", price_sel1), + (price.base, "smooth", price[2]), # Interpolate speed data ( ("speed:scenario-n-t-y:0", "interpolate", exo.speed, "y::coords"), @@ -127,7 +124,7 @@ def dummy( # Select speed data ("speed:n-t-y", "select", "speed:scenario-n-t-y:0", "indexers:scenario"), # Cost of transport (n, t, y) - (cost, "cost", price, gdp_cap, "whour:", "speed:n-t-y", "votm:n-y", y), + (cost, "cost", price.base, gdp_cap, "whour:", "speed:n-t-y", "votm:n-y", y), # Share weights (n, t, y) ( sw, @@ -228,9 +225,7 @@ def pdt_per_capita(c: Computer) -> None: between projected, log GDP in each future period and the log GDP in the reference year. """ - from . import key - - gdp = KeySeq(key.gdp) + gdp = KeySeq(GDP) pdt = KeySeq("_pdt:n-y") # GDP expressed in PPP. In the SSP(2024) input files, this conversion is already @@ -321,14 +316,22 @@ def prepare_computer(c: Computer) -> None: config: "Config" = c.graph["context"].transport - if config.project.get("LED", False): - # Select from the file input - c.add(pdt_cap, "select", exo.pdt_cap_proj, indexers=dict(scenario="LED")) - else: - c.apply(pdt_per_capita) + # Compute total PDT per capita + c.apply(pdt_per_capita) # Insert a scaling factor that varies according to SSP setting c.apply(factor.insert, pdt_cap, name="pdt non-active", target=pdt_cap + "adj") + # Add other tasks for demand calculation c.add_queue(TASKS) + + if config.project.get("LED", False): + # Replace certain calculations for LED projected activity + + # Select data from input file: projected PDT per capita + c.add(pdt_cap * "t", "select", exo.pdt_cap_proj, indexers=dict(scenario="LED")) + + # Multiply by population for the total + c.add(pdt_nyt + "0", "mul", pdt_cap * "t", pop) + c.add("transport_data", __name__, key="transport demand::ixmp") diff --git a/message_ix_models/model/transport/files.py b/message_ix_models/model/transport/files.py index fffe789781..943a917792 100644 --- a/message_ix_models/model/transport/files.py +++ b/message_ix_models/model/transport/files.py @@ -329,7 +329,7 @@ def read_structures() -> "sdmx.message.StructureMessage": key="P activity:scenario-n-t-y:exo", path="pdt-cap", name="Projected passenger-distance travelled (PDT) per capita", - units="km / passenger / year", + units="km / year", required=False, ) diff --git a/message_ix_models/model/transport/freight.py b/message_ix_models/model/transport/freight.py index 26d08d8455..ca8ce04ef3 100644 --- a/message_ix_models/model/transport/freight.py +++ b/message_ix_models/model/transport/freight.py @@ -1,13 +1,29 @@ """Freight transport data.""" +from collections import defaultdict from functools import partial +from typing import TYPE_CHECKING import genno +import numpy as np +import pandas as pd from iam_units import registry +from message_ix import make_df + +from message_ix_models.util import ( + broadcast, + convert_units, + make_matched_dfs, + same_node, + same_time, +) + +from .util import has_input_commodity, wildcard -from message_ix_models.util import convert_units, make_matched_dfs, same_node, same_time +if TYPE_CHECKING: + from sdmx.model.common import Code -from .util import wildcard + from message_ix_models.model.transport import Config COMMON = dict( mode="all", @@ -33,6 +49,8 @@ def prepare_computer(c: genno.Computer): from genno.core.attrseries import AttrSeries + from .key import bcast_tcl, bcast_y, n, y + to_add = [] # Keys for ixmp-structured data to add to the target scenario k = genno.KeySeq("F") # Sequence of temporary keys for the present function @@ -43,9 +61,7 @@ def prepare_computer(c: genno.Computer): t_F_ROAD = "t::transport F ROAD" c.add(k[0], AttrSeries.expand_dims, "energy intensity of VDT:n-y", t_F_ROAD) # Broadcast over dimensions (c, l, y, yv, ya) - prev = c.add( - k[1], "mul", k[0], "broadcast:t-c-l:transport+input", "broadcast:y-yv-ya" - ) + prev = c.add(k[1], "mul", k[0], bcast_tcl.input, bcast_y.model) # Convert input to MESSAGE data structure c.add(k[2], "as_message_df", prev, name="input", dims=DIMS, common=COMMON) @@ -69,13 +85,7 @@ def prepare_computer(c: genno.Computer): for par_name, base, ks, i in (("output", k_output[3] * nty, k_output, 3),): # Produce the full quantity for input/output efficiency - prev = c.add( - ks[i + 1], - "mul", - ks[i], - f"broadcast:t-c-l:transport+{par_name}", - "broadcast:y-yv-ya:all", - ) + prev = c.add(ks[i + 1], "mul", ks[i], getattr(bcast_tcl, par_name), bcast_y.all) # Convert to ixmp/MESSAGEix-structured pd.DataFrame # NB quote() is necessary with dask 2024.11.0, not with earlier versions @@ -109,39 +119,109 @@ def prepare_computer(c: genno.Computer): # Base values for conversion technologies prev = c.add("F usage output:t:base", "freight_usage_output", "context") # Broadcast from (t,) to (t, c, l) dimensions - prev = c.add(k[6], "mul", prev, "broadcast:t-c-l:transport+output") + prev = c.add(k[6], "mul", prev, bcast_tcl.output) # Broadcast over the (n, yv, ya) dimensions - dim = dict(n=["*"], y=[None], ya=[None], yv=[None]) - prev = c.add(k[7], "expand_dims", prev, dim=dim) - prev = c.add(k[8], "broadcast_wildcard", prev, "n", dim="n") - prev = c.add(k[9], "broadcast", prev, "broadcast:y-yv-ya:no vintage") + d = tuple("tcl") + tuple("ny") + prev = c.add(k[7] * d, "expand_dims", prev, dim=dict(n=["*"], y=["*"])) + prev = c.add(k[8] * d, "broadcast_wildcard", prev, "n::ex world", dim="n") + prev = c.add(k[9] * d, "broadcast_wildcard", prev, "y::model", dim="y") + prev = c.add(k[10] * (d + ("ya", "yv")), "mul", prev, bcast_y.no_vintage) # Convert output to MESSAGE data structure - c.add(k[10], "as_message_df", prev, name="output", dims=DIMS, common=COMMON) + c.add(k[11], "as_message_df", prev, name="output", dims=DIMS, common=COMMON) to_add.append(f"usage output{Fi}") - c.add(to_add[-1], lambda v: same_time(same_node(v)), k[10]) + c.add(to_add[-1], lambda v: same_time(same_node(v)), k[11]) # Create corresponding input values in Gv km - prev = c.add(k[11], wildcard(1.0, "gigavehicle km", tuple("nty"))) - for i, coords in enumerate(["n::ex world", "t::F usage", "y::model"], start=11): + prev = c.add(k[12], wildcard(1.0, "gigavehicle km", tuple("nty"))) + for i, coords in enumerate(["n::ex world", "t::F usage", "y::model"], start=12): prev = c.add(k[i + 1], "broadcast_wildcard", k[i], coords, dim=coords[0]) - prev = c.add( - k[i + 2], - "mul", - prev, - "broadcast:t-c-l:transport+input", - "broadcast:y-yv-ya:no vintage", - ) + prev = c.add(k[i + 2], "mul", prev, bcast_tcl.input, bcast_y.no_vintage) prev = c.add( k[i + 3], "as_message_df", prev, name="input", dims=DIMS, common=COMMON ) to_add.append(f"usage input{Fi}") c.add(to_add[-1], prev) + # Constraint data + k_constraint = f"constraints{Fi}" + to_add.append(k_constraint) + c.add(k_constraint, constraint_data, "t::transport", n, y, "config") + # Merge data to one collection - k_all = "transport F::ixmp" + k_all = f"transport{Fi}" c.add(k_all, "merge_data", *to_add) # Append to the "add transport data" key c.add("transport_data", __name__, key=k_all) + + +def constraint_data( + t_all, nodes, years: list[int], genno_config: dict +) -> dict[str, pd.DataFrame]: + """Return constraints on growth of ACT and CAP_NEW for non-LDV technologies. + + Responds to the :attr:`.Config.constraint` keys :py:`"non-LDV *"`; see description + there. + """ + config: "Config" = genno_config["transport"] + + # Freight modes + modes = ["F ROAD", "F RAIL"] + + # Sets of technologies to constrain + # All technologies under the non-LDV modes + t_0: set["Code"] = set(filter(lambda t: t.parent and t.parent.id in modes, t_all)) + # Only the technologies that input c=electr + t_1: set["Code"] = set( + filter(partial(has_input_commodity, commodity="electr"), t_0) + ) + # Only the technologies that input c=gas + t_2: set["Code"] = set(filter(partial(has_input_commodity, commodity="gas"), t_0)) + + assert all(len(t) for t in (t_0, t_1, t_2)), "Technology groups are empty" + + common = dict(year_act=years, year_vtg=years, time="year", unit="-") + dfs = defaultdict(list) + + # Iterate over: + # 1. Parameter name + # 2. Set of technologies to be constrained. + # 3. A fixed value, if any, to be used. + for name, techs, fixed_value in ( + # These 2 entries set: + # - 0 for the t_1 (c=electr) technologies + # - The value from config for all others + ("growth_activity_lo", list(t_0 - t_1), np.nan), + ("growth_activity_lo", list(t_1), 0.0), + # This 1 entry sets the value from config for all technologies + # ("growth_activity_lo", t_0, np.nan), + # This entry sets the value from config for certain technologies + ("growth_activity_up", list(t_1 | t_2), np.nan), + # For this parameter, no differentiation + ("growth_new_capacity_up", list(t_0), np.nan), + ): + # Use the fixed_value, if any, or a value from configuration + value = np.nan_to_num(fixed_value, nan=config.constraint[f"non-LDV {name}"]) + + # Assemble the data + dfs[name].append( + make_df(name, value=value, **common).pipe( + broadcast, node_loc=nodes, technology=techs + ) + ) + + # Add initial_* values corresponding to growth_{activity,new_capacity}_up, to + # set the starting point of dynamic constraints. + if name.endswith("_up"): + name_init = name.replace("growth", "initial") + value = config.constraint[f"non-LDV {name_init}"] + for n, df in make_matched_dfs(dfs[name][-1], **{name_init: value}).items(): + dfs[n].append(df) + + result = {k: pd.concat(v) for k, v in dfs.items()} + + assert not any(v.isna().any(axis=None) for v in result.values()), "Missing labels" + + return result diff --git a/message_ix_models/model/transport/ikarus.py b/message_ix_models/model/transport/ikarus.py index 10478c4418..43728ca96a 100644 --- a/message_ix_models/model/transport/ikarus.py +++ b/message_ix_models/model/transport/ikarus.py @@ -19,9 +19,9 @@ package_data_path, same_node, same_time, - series_of_pint_quantity, ) +from .key import bcast_tcl, bcast_y from .non_ldv import UNITS if TYPE_CHECKING: @@ -189,7 +189,7 @@ def read_ikarus_data(occupancy, k_output, k_inv_cost): output = registry.Quantity( occupancy[tec], "passenger / vehicle" ) * k_output.get(tec, 1.0) - df["output"] = series_of_pint_quantity([output] * len(df.index), index=df.index) + df["output"] = pd.Series([output] * len(df.index), index=df.index) df["inv_cost"] *= k_inv_cost.get(tec, 1.0) @@ -295,7 +295,7 @@ def prepare_computer(c: Computer): # Drop existing "c" dimension key = single_key(c.add(key / "c", "drop_vars", key, quote("c"))) # Fill (c, l) dimensions based on t - key = c.add(ks[5], "mul", key, "broadcast:t-c-l:transport+input") + key = c.add(ks[5], "mul", key, bcast_tcl.input) elif name == "technical_lifetime": # Round up technical_lifetime values due to incompatibility in handling # non-integer values in the GAMS code @@ -306,7 +306,7 @@ def prepare_computer(c: Computer): if name in ("fix_cost", "input", "var_cost"): # Broadcast across valid (yv, ya) pairs - key = c.add(ks[7], "mul", key, "broadcast:y-yv-ya") + key = c.add(ks[7], "mul", key, bcast_y.model) # Convert to target units try: diff --git a/message_ix_models/model/transport/key.py b/message_ix_models/model/transport/key.py index 6bafe47fa3..5ac39bbec4 100644 --- a/message_ix_models/model/transport/key.py +++ b/message_ix_models/model/transport/key.py @@ -1,9 +1,12 @@ """Keys to refer to various quantities.""" -from genno import Key +from types import SimpleNamespace + +from genno import Key, Keys, KeySeq + +from message_ix_models.report.key import GDP, PRICE_COMMODITY __all__ = [ - "PRICE_COMMODITY", "cg", "cost", "fv_cny", @@ -11,7 +14,6 @@ "gdp_cap", "gdp_index", "gdp_ppp", - "gdp", "ldv_cny", "ldv_ny", "ldv_nycg", @@ -24,9 +26,6 @@ "pdt_nyt", "pop_at", "pop", - "price_full", - "price_sel0", - "price_sel1", "price", "sw", "t_modes", @@ -34,14 +33,35 @@ ] # Existing keys, either from Reporter.from_scenario() or .build.add_structure() -gdp = Key("GDP", "ny") gdp_exo = Key("gdp", "ny") mer_to_ppp = Key("MERtoPPP", "ny") -PRICE_COMMODITY = Key("PRICE_COMMODITY", "nclyh") -price_full = PRICE_COMMODITY / ("h", "l") # Keys for new quantities +#: Quantities for broadcasting (t) to (t, c, l). See :func:`.broadcast_t_c_l`. +#: +#: - :py:`.input`: Quantity for broadcasting (all values 1) from every transport |t| +#: (same as ``t::transport``) to the :math:`(c, l)` that that technology receives as +#: input. +#: - :py:`.output`: same as above, but for the :math:`(c, l)` that the technology +#: produces as output. +bcast_tcl = Keys( + input="broadcast:t-c-l:transport+input", + output="broadcast:t-c-l:transport+output", +) + +#: Quantities for broadcasting (y) to (yv, ya). See :func:`.broadcast_y_yv_ya`. +#: +#: - :py:`.all`: Quantity for broadcasting (all values 1) from every |y| to every +#: possible combination of :math:`(y^V=y, y^A)`—including historical periods. +#: - :py:`.model`: same as above, but only model periods (``y::model``). +#: - :py:`.no_vintage`: same as above, but only the cases where :math:`y^V = y^A`. +bcast_y = Keys( + all="broadcast:y-yv-ya:all", + model="broadcast:y-yv-ya:model", + no_vintage="broadcast:y-yv-ya:no vintage", +) + #: Shares of population with consumer group (`cg`) dimension. cg = Key("cg share:n-y-cg") @@ -54,7 +74,7 @@ pop_at = pop * "area_type" #: GDP at purchasing power parity. -gdp_ppp = gdp + "PPP" +gdp_ppp = GDP + "PPP" #: :data:`.gdp_ppp` per capita. gdp_cap = gdp_ppp + "capita" @@ -83,9 +103,14 @@ #: technologies. pdt_nyt = _pdt * "t" -price_sel1 = price_full + "transport" -price = price_sel1 + "smooth" -price_sel0 = price_sel1 + "raw units" +#: Prices. +price = KeySeq(PRICE_COMMODITY / ("h", "l") + "transport") + +#: Keys for :mod:`.transport.report`. +report = SimpleNamespace( + all="transport all", + sdmx=Key("transport::sdmx"), +) sw = Key("share weight", "nty") diff --git a/message_ix_models/model/transport/ldv.py b/message_ix_models/model/transport/ldv.py index 06bb558e0e..271f038ec0 100644 --- a/message_ix_models/model/transport/ldv.py +++ b/message_ix_models/model/transport/ldv.py @@ -27,6 +27,7 @@ from . import files as exo from .data import MaybeAdaptR11Source from .emission import ef_for_input +from .key import bcast_tcl, bcast_y from .util import wildcard if TYPE_CHECKING: @@ -96,7 +97,6 @@ def prepare_computer(c: Computer): In both cases, :func:`constraint_data` is used to generate constraint data. """ from genno import Key - from genno.core.attrseries import AttrSeries from . import factor @@ -180,9 +180,7 @@ def prepare_computer(c: Computer): dim="nl", ) # Broadcast to all LDV technologies - # TODO Use a named operator like genno.operator.expand_dims, instead of the method - # of the AttrSeries class - c.add(f"{name}:nl-t-yv:LDV", AttrSeries.expand_dims, f"{name}:nl-yv:LDV", t_ldv) + c.add(f"{name}:nl-t-yv:LDV", "expand_dims", f"{name}:nl-yv:LDV", t_ldv) # Convert to MESSAGE data structure _add( c, @@ -213,13 +211,7 @@ def prepare_computer(c: Computer): _add(c, "constraints", constraint_data, "context") # Capacity factor _add( - c, - "capacity_factor", - capacity_factor, - exo.activity_ldv, - t_ldv, - "y", - "broadcast:y-yv-ya:all", + c, "capacity_factor", capacity_factor, exo.activity_ldv, t_ldv, "y", bcast_y.all ) # Calculate base-period CAP_NEW and historical_new_capacity (‘sales’) @@ -318,13 +310,7 @@ def prepare_tech_econ( c.add(ks[i], "extend_y", base, "y::LDV") # Produce the full quantity for input/output efficiency - prev = c.add( - ks[i + 1], - "mul", - ks[i], - f"broadcast:t-c-l:transport+{par_name}", - "broadcast:y-yv-ya:all", - ) + prev = c.add(ks[i + 1], "mul", ks[i], getattr(bcast_tcl, par_name), bcast_y.all) # Convert to ixmp/MESSAGEix-structured pd.DataFrame # NB quote() is necessary with dask 2024.11.0, not with earlier versions @@ -342,7 +328,7 @@ def prepare_tech_econ( "y::coords", kwargs=dict(fill_value="extrapolate"), ) - prev = c.add(f"{par_name}::LDV+1", "mul", prev, "broadcast:y-yv-ya:all") + prev = c.add(f"{par_name}::LDV+1", "mul", prev, bcast_y.all) _add( c, par_name, "as_message_df", prev, name=par_name, dims=DIMS, common=COMMON ) @@ -415,7 +401,7 @@ def capacity_factor( qty Input data, for instance from file :`ldv-activity.csv`, with dimension |n|. y_broadcast - The structure :py:`"broadcast:y-yv-va"`. + The structure :data:`bcast_y.model <.bcast_y>`. t_ldv The structure :py:`"t::transport LDV"`, mapping the key "t" to the list of LDV technologies. diff --git a/message_ix_models/model/transport/non_ldv.py b/message_ix_models/model/transport/non_ldv.py index 5c6f224a3e..354101de99 100644 --- a/message_ix_models/model/transport/non_ldv.py +++ b/message_ix_models/model/transport/non_ldv.py @@ -26,6 +26,7 @@ from . import files as exo from .emission import ef_for_input +from .util import has_input_commodity if TYPE_CHECKING: from message_ix_models import Context @@ -248,41 +249,28 @@ def _(nodes, technologies, y0, config: dict) -> Quantity: return [k["ixmp"]] -def _inputs(technology: Code, commodity: str) -> bool: - """Return :any:`True` if `technology` has an ‘input’ annotation with `commodity`. - - :func:`.filter` helper for sequences of technology codes. - """ - if input_info := technology.eval_annotation(id="input"): - return commodity in input_info["commodity"] - else: - return False - - def constraint_data( t_all, t_modes: list[str], nodes, years: list[int], genno_config: dict ) -> dict[str, pd.DataFrame]: - """Return constraints on growth of CAP_NEW for non-LDV technologies. + """Return constraints on growth of ACT and CAP_NEW for non-LDV technologies. Responds to the :attr:`.Config.constraint` keys :py:`"non-LDV *"`; see description there. """ config: Config = genno_config["transport"] - # Non-LDV modes passenger modes + # Non-LDV passenger modes modes = set(t for t in t_modes if t != "LDV") - # Freight modes - modes.add("F ROAD") # Lists of technologies to constrain # All technologies under the non-LDV modes t_0: set[Code] = set(filter(lambda t: t.parent and t.parent.id in modes, t_all)) # Only the technologies that input c=electr - t_1: set[Code] = set(filter(partial(_inputs, commodity="electr"), t_0)) + t_1: set[Code] = set(filter(partial(has_input_commodity, commodity="electr"), t_0)) # Aviation technologies only t_2: set[Code] = set(filter(lambda t: t.parent and t.parent.id == "AIR", t_all)) # Only the technologies that input c=gas - t_3: set[Code] = set(filter(partial(_inputs, commodity="electr"), t_0)) + t_3: set[Code] = set(filter(partial(has_input_commodity, commodity="gas"), t_0)) common = dict(year_act=years, year_vtg=years, time="year", unit="-") dfs = defaultdict(list) diff --git a/message_ix_models/model/transport/operator.py b/message_ix_models/model/transport/operator.py index 04c260a43c..d7fdb4e1c1 100644 --- a/message_ix_models/model/transport/operator.py +++ b/message_ix_models/model/transport/operator.py @@ -22,7 +22,6 @@ from message_ix_models.model.structure import get_codelist from message_ix_models.project.navigate import T35_POLICY from message_ix_models.report.operator import compound_growth -from message_ix_models.report.util import as_quantity from message_ix_models.util import ( MappingAdapter, datetime_now_with_tz, @@ -30,12 +29,14 @@ nodes_ex_world, show_versions, ) +from message_ix_models.util.genno import as_quantity from .config import Config if TYPE_CHECKING: from pathlib import Path + import sdmx.message from genno.types import AnyQuantity from message_ix import Scenario from xarray.core.types import Dims @@ -439,15 +440,6 @@ def duration_period(info: "ScenarioInfo") -> "AnyQuantity": ).pipe(unique_units_from_dim, "unit") -def expand_dims(qty: "AnyQuantity", dim, *args, **kwargs) -> "AnyQuantity": - """Like :meth:`.Quantity.expand_dims`. - - .. todo:: Move upstream, to :mod:`.genno`. - """ - kwargs.update(dim=dim) - return qty.expand_dims(*args, **kwargs) - - def extend_y(qty: "AnyQuantity", y: list[int], *, dim: str = "y") -> "AnyQuantity": """Extend `qty` along the dimension `dim` to cover all of `y`. @@ -501,7 +493,7 @@ def factor_fv(n: list[str], y: list[int], config: dict) -> "AnyQuantity": df.iloc[0, :] = 1.0 # NAVIGATE T3.5 "act" demand-side scenario - if T35_POLICY.ACT & config["transport"].project["navigate"]: + if T35_POLICY.ACT & config["transport"].project.get("navigate", T35_POLICY.REF): years = list(filter(lambda y: y <= 2050, y)) df.loc[years, "value"] = np.interp(years, [y[0], 2050], [1.0, 0.865]) @@ -548,7 +540,7 @@ def _not_disutility(tech): df.iloc[0, :] = 1.0 # NAVIGATE T3.5 "tec" demand-side scenario - if T35_POLICY.TEC & config["transport"].project["navigate"]: + if T35_POLICY.TEC & config["transport"].project.get("navigate", T35_POLICY.REF): years = list(filter(partial(gt, 2050), df.index)) # Prepare a dictionary mapping technologies to their respective EI improvement @@ -594,7 +586,7 @@ def factor_pdt(n: list[str], y: list[int], t: list[str], config: dict) -> "AnyQu df.iloc[0, :] = 1.0 # Handle particular scenarios - if T35_POLICY.ACT & config["transport"].project["navigate"]: + if T35_POLICY.ACT & config["transport"].project.get("navigate", T35_POLICY.REF): # NAVIGATE T3.5 "act" demand-side scenario years = list(filter(lambda y: y <= 2050, y)) df.loc[years, "LDV"] = np.interp(years, [y[0], 2050], [1.0, 0.8]) @@ -1235,3 +1227,91 @@ def write_report_debug(qty: "AnyQuantity", path: "Path", kwargs=None) -> None: ) operator.write_report(qty, path, kwargs) + + +def write_sdmx_data( + qty: "AnyQuantity", + structure_message: "sdmx.message.StructureMessage", + scenario: "ScenarioInfo", + path: "Path", + **kwargs, +) -> None: + """Write two files for `qty`. + + 1. :file:`{path}/{dataflow_id}.csv` —an SDMX-CSV :class:`.DataMessage` with the + values from `qty`. + 2. :file:`{path}/{dataflow_id}.xml` —an SDMX-ML :class:`.DataMessage` with the + values from `qty`. + + …where `dataflow_id` is the data flow ID constructed by :func:`.make_dataflow`. + + The `structure_message` is passed to :func:`.make_dataflow` and updated with the + given structures. + """ + import sdmx + from genno.compat.sdmx.operator import quantity_to_message + from sdmx.model import common + + from message_ix_models.util.sdmx import make_dataflow + + # Add a dataflow and related structures to `structure_message` + make_dataflow(**kwargs, message=structure_message) + dfd = tuple(structure_message.dataflow.values())[-1] + + # Convert `qty` to DataMessage + # FIXME Remove exclusion once upstream type hint is improved + dm = quantity_to_message(qty, structure=dfd.structure) # type: ignore [arg-type] + + # Identify the first/only data set in the message + ds = dm.data[0] + + # Add attribute values + for attr_id, value in ( + ("MODEL", scenario.model), + ("SCENARIO", scenario.scenario), + ("VERSION", scenario.version), + ("UNIT_MEASURE", f"{qty.units}"), + ): + ds.attrib[attr_id] = common.AttributeValue( + value=str(value), value_for=dfd.structure.attributes.get(attr_id) + ) + + # Write SDMX-ML + path.mkdir(parents=True, exist_ok=True) + path.joinpath(f"{dfd.id}.xml").write_bytes(sdmx.to_xml(dm, pretty_print=True)) + + # Convert to SDMX_CSV + # FIXME Remove this once sdmx1 supports it directly + # Fixed values in certain columns + fixed_cols = dict( + STRUCTURE="dataflow", STRUCTURE_ID=dfd.urn.split("=")[-1], ACTION="I" + ) + # SDMX-CSV column order + columns = list(fixed_cols) + columns.extend(dim.id for dim in dfd.structure.dimensions) + columns.extend(measure.id for measure in dfd.structure.measures) + + # Write SDMX-CSV + df = ( + qty.to_series() + .rename("value") + .reset_index() + .assign(**fixed_cols) + .reindex(columns=columns) + ) + df.to_csv(path.joinpath(f"{dfd.id}.csv"), index=False) + + +def write_sdmx_structures(structure_message, path: "Path", *args) -> "Path": + """Write `structure_message`. + + The message is written to :file:`{path}/structure.xml` in SDMX-ML format. + """ + import sdmx + + path.mkdir(parents=True, exist_ok=True) + path.joinpath("structure.xml").write_bytes( + sdmx.to_xml(structure_message, pretty_print=True) + ) + + return path diff --git a/message_ix_models/model/transport/plot.py b/message_ix_models/model/transport/plot.py index 1db698e49b..903d713718 100644 --- a/message_ix_models/model/transport/plot.py +++ b/message_ix_models/model/transport/plot.py @@ -12,6 +12,7 @@ from iam_units import registry from .key import gdp_cap, pdt_nyt +from .key import report as k_report if TYPE_CHECKING: from genno.core.key import KeyLike @@ -743,3 +744,5 @@ def prepare_computer(c: Computer): key = "transport plots" log.info(f"Add {repr(key)} collecting {len(keys)} plots") c.add(key, keys) + + c.graph[k_report.all].append(key) diff --git a/message_ix_models/model/transport/report.py b/message_ix_models/model/transport/report.py index ee604a82d5..8696287326 100644 --- a/message_ix_models/model/transport/report.py +++ b/message_ix_models/model/transport/report.py @@ -2,6 +2,7 @@ import logging from copy import deepcopy +from functools import partial from pathlib import Path from typing import TYPE_CHECKING, Any @@ -15,6 +16,7 @@ from message_ix_models.report.util import add_replacements from . import Config +from .key import pdt_nyt if TYPE_CHECKING: import ixmp @@ -25,154 +27,20 @@ log = logging.getLogger(__name__) -def check(scenario): - """Check that the transport model solution is complete. - - Parameters - ---------- - scenario : message_ix.Scenario - Scenario with solution. - - Returns - ------- - pd.Series - Index entries are str descriptions of checks. Values are :obj:`True` if the - respective check passes. - """ - # NB this is here to avoid circular imports - from message_ix_models.report import prepare_reporter, register - - register(callback) - rep, key = prepare_reporter(scenario, "global.yaml", "transport check") - return rep.get(key) - - -def aggregate(c: "Computer") -> None: - """Aggregate individual transport technologies to modes.""" - from genno.operator import aggregate as func - - config: Config = c.graph["config"]["transport"] - - for key in map(lambda s: KeySeq(c.infer_keys(s)), "emi in out".split()): - try: - # Reference the function to avoid the genno magic which would treat as sum() - # NB aggregation on the nl dimension *could* come first, but this can use a - # lot of memory when applied to e.g. out:*: for a full global model. - c.add(key[0], func, key.base, "t::transport agg", keep=False) - c.add(key[1], func, key[0], "nl::world agg", keep=False) - c.add(key["transport"], "select", key[1], "t::transport modes 1", sums=True) - except MissingKeyError: - if config.with_solution: - raise - - -def reapply_units(c: "Computer") -> None: - """Apply units to transport quantities. - - :func:`.ixmp.report.operator.data_for_quantity` drops units for most data extracted - from a MESSAGEix-GLOBIOM :class:`.Scenario`, because the data contain a mix of - inconsistent units. - - Here, add tasks to reapply units to selected subsets of data that are guaranteed to - have certain units. - """ - # TODO Infer these values from technology.yaml etc. - for base, (op, units) in { - # Vehicle stocks - # FIXME should not need the extra [vehicle] in the numerator - "CAP:nl-t-ya:non-ldv": ("apply", "v**2 Tm / a"), - "CAP:*:ldv": ("apply", "Mv"), - "CAP_NEW:*:ldv": ("apply", "Mv"), - # NB these units are correct for final energy only - "in:*:transport": ("apply", "GWa / a"), - "in:*:ldv": ("apply", "GWa / a"), - "out:*:transport": ("apply", "Tm / a"), - "out:*:ldv": ("apply", "Tm / a"), - # Units of ACT are not carried, so must correct here: - # - Add [time]: -1 - # - Remove [vehicle]: -1, [distance]: -1 - # - # When run together with global.yaml reporting, emi:* is assigned units of - # "Mt / year". Using apply_units() causes these to be *converted* to kt/a, i.e. - # increasing the magnitude; so use assign_units() instead. - "emi:*:transport": ("assign", "kt / a"), - }.items(): - key = c.infer_keys(base) - c.add(key + "units", f"{op}_units", key, units=units, sums=True) - - -SELECT = [ - "CAP_NEW", - "CAP", - "fix_cost", - "historical_new_capacity", - "in", - "input", - "inv_cost", - "out", - "var_cost", -] - - -def select_transport_techs(c: "Computer") -> None: - """Select subsets of transport technologies.""" - # Infer the full dimensionality of each key to be selected - for key in map(lambda name: c.infer_keys(f"{name}:*"), SELECT): - c.add(key + "transport all", "select", key, "t::transport all", sums=True) - c.add(key + "ldv", "select", key, "t::transport LDV", sums=True) - c.add(key + "non-ldv", "select", key, "t::transport non-ldv", sums=True) - - -# TODO Type c as (string) "Computer" once genno supports this -def add_iamc_store_write(c: Computer, base_key) -> "Key": - """Write `base_key` to CSV, XLSX, and/or both; and/or store on "scenario". - - If `base_key` is, for instance, "foo::iamc", this function adds the following keys: - - - "foo::iamc+all": both of: - - - "foo::iamc+file": both of: - - - "foo::iamc+csv": write the data in `base_key` to a file named :file:`foo.csv`. - - "foo::iamc+xlsx": write the data in `base_key` to a file named - :file:`foo.xlsx`. - - The files are created in a subdirectory using :func:`make_output_path`—that is, - including a path component given by the scenario URL. - - - "foo::iamc+store" store the data in `base_key` as time series data on the - scenario identified by the key "scenario". - - .. todo:: Move upstream, to :mod:`message_ix_models`. - """ - k = KeySeq(base_key) - - file_keys = [] - for suffix in ("csv", "xlsx"): - # Create the path - path = c.add( - k[f"{suffix} path"], - "make_output_path", - "config", - name=f"{k.base.name}.{suffix}", - ) - # Write `key` to the path - file_keys.append(c.add(k[suffix], "write_report", base_key, path)) - - # Write all files - c.add(k["file"], file_keys) - - # Store data on "scenario" - c.add(k["store"], "store_ts", "scenario", base_key) - - # Both write and store - return single_key(c.add(k["all"], [k["file"], k["store"]])) - - -# Units for final energy. This *exact* value (and not e.g. "EJ / year") is required for -# the legacy reporting to properly handle the result. -fe_unit = "EJ/yr" +#: Units for final energy. This *exact* value (and not e.g. "EJ / year") is required for +#: the legacy reporting to properly handle the result. +_FE_UNIT = "EJ/yr" +#: SDMX output data flows to generate. +DATAFLOW = ( + ("population_in", Key("pop:n-y")), + ("gdp_in", Key("gdp:n-y")), + ("activity_passenger", pdt_nyt), + # Same as "Energy Service|Transportation" IAMC variable + ("activity_vehicle", Key("out:nl-t-ya-c:transport+units")), + # Same as "Final Energy|Transportation" + ("fe_transport", Key("in:nl-t-ya-c:transport+units")), +) CONVERT_IAMC = ( # NB these are currently tailored to produce the variable names expected for the @@ -204,7 +72,7 @@ def add_iamc_store_write(c: Computer, base_key) -> "Key": base="in:nl-t-ya-c:transport+units", var=["Final Energy|Transportation", "t", "c"], sums=["c", "t", "c-t"], - unit=fe_unit, + unit=_FE_UNIT, ), dict( variable="transport fe ldv", @@ -243,76 +111,84 @@ def add_iamc_store_write(c: Computer, base_key) -> "Key": ) -def convert_iamc(c: "Computer") -> "Key": - """Add tasks from :data:`.CONVERT_IAMC`.""" - from message_ix_models.report import iamc as handle_iamc - from message_ix_models.report import util +#: Quantities in which to select transport technologies only. See +#: :func:`select_transport_techs`. +SELECT = [ + "CAP_NEW", + "CAP", + "fix_cost", + "historical_new_capacity", + "in", + "input", + "inv_cost", + "out", + "var_cost", +] - util.REPLACE_VARS.update({r"^CAP\|(Transport)": r"\1"}) - keys = [] - for info in CONVERT_IAMC: - handle_iamc(c, deepcopy(info)) - keys.append(f"{info['variable']}::iamc") +# TODO Type c as (string) "Computer" once genno supports this +def add_iamc_store_write(c: Computer, base_key) -> "Key": + """Write `base_key` to CSV, XLSX, and/or both; and/or store on "scenario". - # Concatenate IAMC-format tables - c.add("transport::iamc", "concat", *keys) + If `base_key` is, for instance, "foo::iamc", this function adds the following keys: - # Add tasks for writing IAMC-structured data to file and storing on the scenario - return single_key(c.apply(add_iamc_store_write, "transport::iamc")) + - "foo::iamc+all": both of: + - "foo::iamc+file": both of: -def misc(c: "Computer") -> None: - """Add miscellaneous tasks. + - "foo::iamc+csv": write the data in `base_key` to a file named :file:`foo.csv`. + - "foo::iamc+xlsx": write the data in `base_key` to a file named + :file:`foo.xlsx`. - Among others, these include: + The files are created in a subdirectory using :func:`make_output_path`—that is, + including a path component given by the scenario URL. - - ``calibrate fe`` → a file :file:`calibrate-fe.csv`. See the header comment. + - "foo::iamc+store" store the data in `base_key` as time series data on the + scenario identified by the key "scenario". + + .. todo:: Move upstream, to :mod:`message_ix_models`. """ - from . import files as exo + k = KeySeq(base_key) - config: "Config" = c.graph["config"]["transport"] + file_keys = [] + for suffix in ("csv", "xlsx"): + # Create the path + path = c.add( + k[f"{suffix} path"], + "make_output_path", + "config", + name=f"{k.base.name}.{suffix}", + ) + # Write `key` to the path + file_keys.append(c.add(k[suffix], "write_report", base_key, path)) - # Configuration for :func:`check`. Adds a single key, 'transport check', that - # depends on others and returns a :class:`pandas.Series` of :class:`bool`. - c.add("transport check", "transport_check", "scenario", "ACT:nl-t-yv-va-m-h") + # Write all files + c.add(k["file"], file_keys) - # Exogenous data - c.add("distance:nl:non-ldv", "distance_nonldv", "config") + # Store data on "scenario" + c.add(k["store"], "store_ts", "scenario", base_key) - # Demand per capita - c.add("demand::capita", "divdemand:n-c-y", "population:n-y") + # Both write and store + return single_key(c.add(k["all"], [k["file"], k["store"]])) - # Adjustment factor for LDV calibration: fuel economy ratio - k_num = Key("in:nl-t-ya-c:transport+units") / "c" # As in CONVERT_IAMC - k_denom = Key("out:nl-t-ya-c:transport+units") / "c" # As in CONVERT_IAMC - k_check = single_key(c.add("fuel economy::check", "div", k_num, k_denom)) - c.add( - k_check + "sel", - "select", - k_check, - indexers=dict(t="LDV", ya=config.base_model_info.y0), - drop=True, - ) - k_ratio = single_key( - c.add("fuel economy::ratio", "div", exo.input_ref_ldv, k_check + "sel") - ) - c.add("calibrate fe path", "make_output_path", "config", name="calibrate-fe.csv") - hc = "\n\n".join( - [ - "Calibration factor for LDV fuel economy", - f"Ratio of ldv-fuel-economy-ref.csv\n to ({k_num} / {k_denom})", - "Units: dimensionless\n", - ] - ) - c.add( - "calibrate fe", - "write_report", - k_ratio, - "calibrate fe path", - kwargs=dict(header_comment=hc), - ) +def aggregate(c: "Computer") -> None: + """Aggregate individual transport technologies to modes.""" + from genno.operator import aggregate as func + + config: Config = c.graph["config"]["transport"] + + for key in map(lambda s: KeySeq(c.infer_keys(s)), "emi in out".split()): + try: + # Reference the function to avoid the genno magic which would treat as sum() + # NB aggregation on the nl dimension *could* come first, but this can use a + # lot of memory when applied to e.g. out:*: for a full global model. + c.add(key[0], func, key.base, "t::transport agg", keep=False) + c.add(key[1], func, key[0], "nl::world agg", keep=False) + c.add(key["transport"], "select", key[1], "t::transport modes 1", sums=True) + except MissingKeyError: + if config.with_solution: + raise def callback(rep: Reporter, context: Context) -> None: @@ -325,17 +201,20 @@ def callback(rep: Reporter, context: Context) -> None: - ``transport plots``: the plots from :mod:`.transport.plot`. If the scenario to be reported is not solved, only a subset of plots are added. - - ``transport all``: all of the above. + - :data:`.key.report.all`: all of the above. """ - from . import base, build + from . import base, build, key N_keys = len(rep.graph) + # Collect all reporting tasks + rep.add(key.report.all, []) + # - Configure MESSAGEix-Transport. # - Add structure and other information. # - Call, inter alia: - # - demand.prepare_computer() for ex-post mode and demand calculations - # - plot.prepare_computer() for plots + # - demand.prepare_computer() for ex-post mode and demand calculations. + # - plot.prepare_computer() for plots; adds to key.report.all. check = build.get_computer( context, obj=rep, visualize=False, scenario=rep.graph.get("scenario") ) @@ -362,27 +241,36 @@ def callback(rep: Reporter, context: Context) -> None: select_transport_techs(rep) reapply_units(rep) misc(rep) - iamc_key = convert_iamc(rep) - - # Add tasks that prepare data to parametrize the MESSAGEix-GLOBIOM base model - base_key = base.prepare_reporter(rep) - - rep.add( - "transport all", - [ - # Use ths line to both store and write to file IAMC structured-data - iamc_key, - # Use this line for "transport::iamc+file" instead of "transport::iamc+all" - # iamc_key - "all" + "file", - "transport plots", - base_key, - ], - ) + convert_iamc(rep) # Adds to key.report.all + convert_sdmx(rep) # Adds to key.report.all + base.prepare_reporter(rep) # Tasks that prepare data to parametrize the base model log.info(f"Added {len(rep.graph) - N_keys} keys") # TODO Write an SVG visualization of reporting calculations +def check(scenario): + """Check that the transport model solution is complete. + + Parameters + ---------- + scenario : message_ix.Scenario + Scenario with solution. + + Returns + ------- + pd.Series + Index entries are str descriptions of checks. Values are :obj:`True` if the + respective check passes. + """ + # NB this is here to avoid circular imports + from message_ix_models.report import prepare_reporter, register + + register(callback) + rep, key = prepare_reporter(scenario, "global.yaml", "transport check") + return rep.get(key) + + def configure_legacy_reporting(config: dict) -> None: """Callback to configure the legacy reporting. @@ -434,6 +322,64 @@ def configure_legacy_reporting(config: dict) -> None: config[group].append(t.id) +def convert_iamc(c: "Computer") -> None: + """Add tasks from :data:`.CONVERT_IAMC`.""" + from message_ix_models.report import iamc as handle_iamc + from message_ix_models.report import util + + from .key import report as k_report + + util.REPLACE_VARS.update({r"^CAP\|(Transport)": r"\1"}) + + keys = [] + for info in CONVERT_IAMC: + handle_iamc(c, deepcopy(info)) + keys.append(f"{info['variable']}::iamc") + + # Concatenate IAMC-format tables + k = Key("transport", tag="iamc") + c.add(k, "concat", *keys) + + # Add tasks for writing IAMC-structured data to file and storing on the scenario + c.apply(add_iamc_store_write, k) + + c.graph[k_report.all].append( + # Use ths line to both store and write to file IAMC structured-data + k + "all" + # Use this line for "transport::iamc+file" instead of "transport::iamc+all" + # k + " file" + ) + + +def convert_sdmx(c: "Computer") -> None: + """Add tasks to convert data to SDMX.""" + from sdmx.message import StructureMessage + + from .key import report as k_report + from .operator import write_sdmx_data + + # Directory for SDMX output + dir = "dir::transport sdmx" + c.add(dir, "make_output_path", "config", name="sdmx") + + # Add a key that returns a reference to a shared StructureMessage + sm = "sdmx structure message" + c.add(sm, StructureMessage) + + # Write each quantity in DATAFLOW to .{csv,xml}; update the shared StructureMessage + keys = [] + args = [sm, "scenario", dir] # Common arguments + for id_, base in DATAFLOW: + keys.append(Key(id_, tag="sdmx")) + c.add(keys[-1], partial(write_sdmx_data, id=id_, dims=base.dims), base, *args) + + # Collect all the keys *then* write the collected structures to file + c.add(k_report.sdmx, "write_sdmx_structures", sm, dir, *keys) + + # Connect to the main report key + c.graph[k_report.all].append(k_report.sdmx) + + def latest_reporting_from_file( info: ScenarioInfo, base_dir: Path ) -> tuple[Any, int, pd.DataFrame]: @@ -522,6 +468,59 @@ def latest_reporting_from_platform( return None, -1, pd.DataFrame() +def misc(c: "Computer") -> None: + """Add miscellaneous tasks. + + Among others, these include: + + - ``calibrate fe`` → a file :file:`calibrate-fe.csv`. See the header comment. + """ + from . import files as exo + + config: "Config" = c.graph["config"]["transport"] + + # Configuration for :func:`check`. Adds a single key, 'transport check', that + # depends on others and returns a :class:`pandas.Series` of :class:`bool`. + c.add("transport check", "transport_check", "scenario", "ACT:nl-t-yv-va-m-h") + + # Exogenous data + c.add("distance:nl:non-ldv", "distance_nonldv", "config") + + # Demand per capita + c.add("demand::capita", "divdemand:n-c-y", "population:n-y") + + # Adjustment factor for LDV calibration: fuel economy ratio + k_num = Key("in:nl-t-ya-c:transport+units") / "c" # As in CONVERT_IAMC + k_denom = Key("out:nl-t-ya-c:transport+units") / "c" # As in CONVERT_IAMC + k_check = single_key(c.add("fuel economy::check", "div", k_num, k_denom)) + c.add( + k_check + "sel", + "select", + k_check, + indexers=dict(t="LDV", ya=config.base_model_info.y0), + drop=True, + ) + + k_ratio = single_key( + c.add("fuel economy::ratio", "div", exo.input_ref_ldv, k_check + "sel") + ) + c.add("calibrate fe path", "make_output_path", "config", name="calibrate-fe.csv") + hc = "\n\n".join( + [ + "Calibration factor for LDV fuel economy", + f"Ratio of ldv-fuel-economy-ref.csv\n to ({k_num} / {k_denom})", + "Units: dimensionless\n", + ] + ) + c.add( + "calibrate fe", + "write_report", + k_ratio, + "calibrate fe path", + kwargs=dict(header_comment=hc), + ) + + def multi(context: Context, targets): """Report outputs from multiple scenarios.""" import plotnine as p9 @@ -581,3 +580,50 @@ def multi(context: Context, targets): plot.save("debug.pdf") return data + + +def reapply_units(c: "Computer") -> None: + """Apply units to transport quantities. + + :func:`.ixmp.report.operator.data_for_quantity` drops units for most data extracted + from a MESSAGEix-GLOBIOM :class:`.Scenario`, because the data contain a mix of + inconsistent units. + + Here, add tasks to reapply units to selected subsets of data that are guaranteed to + have certain units. + """ + # TODO Infer these values from technology.yaml etc. + for base, (op, units) in { + # Vehicle stocks + # FIXME should not need the extra [vehicle] in the numerator + "CAP:nl-t-ya:non-ldv": ("apply", "v**2 Tm / a"), + "CAP:*:ldv": ("apply", "Mv"), + "CAP_NEW:*:ldv": ("apply", "Mv"), + # NB these units are correct for final energy only + "in:*:transport": ("apply", "GWa / a"), + "in:*:ldv": ("apply", "GWa / a"), + "out:*:transport": ("apply", "Tm / a"), + "out:*:ldv": ("apply", "Tm / a"), + # Units of ACT are not carried, so must correct here: + # - Add [time]: -1 + # - Remove [vehicle]: -1, [distance]: -1 + # + # When run together with global.yaml reporting, emi:* is assigned units of + # "Mt / year". Using apply_units() causes these to be *converted* to kt/a, i.e. + # increasing the magnitude; so use assign_units() instead. + "emi:*:transport": ("assign", "kt / a"), + }.items(): + key = c.infer_keys(base) + c.add(key + "units", f"{op}_units", key, units=units, sums=True) + + +def select_transport_techs(c: "Computer") -> None: + """Select subsets of transport technologies. + + Applied to the quantities in :data:`SELECT`. + """ + # Infer the full dimensionality of each key to be selected + for key in map(lambda name: c.infer_keys(f"{name}:*"), SELECT): + c.add(key + "transport all", "select", key, "t::transport all", sums=True) + c.add(key + "ldv", "select", key, "t::transport LDV", sums=True) + c.add(key + "non-ldv", "select", key, "t::transport non-ldv", sums=True) diff --git a/message_ix_models/model/transport/structure.py b/message_ix_models/model/transport/structure.py index f2b30e43fe..3bcff69ee4 100644 --- a/message_ix_models/model/transport/structure.py +++ b/message_ix_models/model/transport/structure.py @@ -1,5 +1,6 @@ from collections.abc import Sequence from copy import deepcopy +from itertools import chain from typing import Any, Union from sdmx.model.common import Annotation, Code @@ -50,9 +51,15 @@ def get_technology_groups( result: dict[str, list[str]] = {"non-ldv": []} + # Recursively collect leaf IDs + def _leaf_ids(node) -> list[str]: + return list( + chain(*[_leaf_ids(c) if len(c.child) else (c.id,) for c in node.child]) + ) + # Only include those technologies with children for tech in filter(lambda t: len(t.child), t_list): - result[tech.id] = list(c.id for c in tech.child) + result[tech.id] = _leaf_ids(tech) # Store non-LDV technologies if tech.id != "LDV": result["non-ldv"].extend(result[tech.id]) diff --git a/message_ix_models/model/transport/testing.py b/message_ix_models/model/transport/testing.py index af72ea5182..6766fdd7ea 100644 --- a/message_ix_models/model/transport/testing.py +++ b/message_ix_models/model/transport/testing.py @@ -8,10 +8,10 @@ from typing import TYPE_CHECKING, Optional, Union import pytest -from message_ix import ModelError, Reporter, Scenario +from message_ix import Reporter, Scenario import message_ix_models.report -from message_ix_models import Context, ScenarioInfo +from message_ix_models import ScenarioInfo from message_ix_models.report.sim import add_simulated_solution from message_ix_models.testing import GHA, bare_res from message_ix_models.util import identify_nodes, silence_log @@ -24,6 +24,8 @@ import pint from genno import Computer + from message_ix_models import Context + log = logging.getLogger(__name__) # Common marks for transport code. Do not reuse keys that are less than the highest key @@ -42,10 +44,6 @@ condition=GHA and platform.system() == "Darwin" and not HAS_GRAPHVIZ, reason="Graphviz missing on macos-13 GitHub Actions runners", ), - "gh-281": pytest.mark.xfail( - raises=ModelError, - reason="Temporary, for https://github.com/iiasa/message-ix-models/pull/281", - ), 9: pytest.mark.xfail(reason="Missing R14 input data/config"), "gh-288": pytest.mark.xfail( reason="Temporary, for https://github.com/iiasa/message-ix-models/pull/288", @@ -83,7 +81,7 @@ def assert_units( def configure_build( - test_context: Context, + test_context: "Context", *, regions: str, years: str, @@ -104,7 +102,7 @@ def configure_build( def built_transport( request, - context: Context, + context: "Context", options: Optional[dict] = None, solved: bool = False, quiet: bool = True, @@ -144,6 +142,13 @@ def built_transport( log.info(f"Clone to '{model_name}/{request.node.name}'") result = scenario.clone(scenario=request.node.name, keep_solution=solved) + # DEBUG Dump the scenario to a temporary path + # si = ScenarioInfo(scenario) + # tmp_path = request.getfixturevalue("tmp_path") + # dump_path = tmp_path.joinpath(f"{si.path}.xlsx") + # log.info(f"Dump to {dump_path}") + # result.to_excel(dump_path) + if ( GHA and platform.system() == "Darwin" @@ -158,25 +163,51 @@ def built_transport( return result -def simulated_solution(request, context) -> Reporter: +def simulated_solution(request, context: "Context", build: bool) -> Reporter: """Return a :class:`.Reporter` with a simulated model solution. - The contents allow for fast testing of reporting code, without solving an actual - :class:`.Scenario`. - """ + The contents allow for performant testing of reporting code, without solving an + actual :class:`.Scenario`. + + Parameters + ---------- + build + If :any:`False`, do not run :func:`.transport.build.main`; load data for the + built scenario from a file like + :file:`message_ix_models/data/test/transport/MESSAGEix-Transport R12 YB a1b2c3_baseline.xlsx`. + """ # noqa: E501 + from message_ix_models.model import bare + from message_ix_models.report.sim import reporter_from_excel + from message_ix_models.util import package_data_path + from .report import callback - # Build the base model - scenario = built_transport(request, context, solved=False) + if build: + # Build the base model + scenario = built_transport(request, context, solved=False) - # Info about the built model - info = ScenarioInfo(scenario) + # Config object generated by built_transport() + config: "Config" = context.transport - config: "Config" = context.transport - technologies = config.spec.add.set["technology"] + # Info about the built model + info = ScenarioInfo(scenario) - # Create a reporter - rep = Reporter.from_scenario(scenario) + # Create a reporter + rep = Reporter.from_scenario(scenario) + + else: + # Create a Reporter with the contents of a file + model_name = bare.name(context, unique=True).replace("-GLOBIOM", "-Transport") + path = package_data_path("test", "transport", f"{model_name}_baseline.xlsx") + rep = reporter_from_excel(path) + + # Ensure a Config object + config = Config.from_context(context) + + # Retrieve the ScenarioInfo generated in reporter_from_excel() + info = rep.graph["scenario info"] + + technologies = config.spec.add.set["technology"] # Add simulated solution data # TODO expand diff --git a/message_ix_models/model/transport/util.py b/message_ix_models/model/transport/util.py index 958dba379f..e868d2a76c 100644 --- a/message_ix_models/model/transport/util.py +++ b/message_ix_models/model/transport/util.py @@ -12,10 +12,22 @@ import numbers from genno.types import AnyQuantity + from sdmx.model.common import Code log = logging.getLogger(__name__) +def has_input_commodity(technology: "Code", commodity: str) -> bool: + """Return :any:`True` if `technology` has an ‘input’ annotation with `commodity`. + + :func:`.filter` helper for sequences of technology codes. + """ + if input_info := technology.eval_annotation(id="input"): + return commodity in input_info["commodity"] + else: + return False + + def path_fallback(context_or_regions: Union[Context, str], *parts) -> Path: """Return a :class:`.Path` constructed from `parts`. diff --git a/message_ix_models/project/edits/__init__.py b/message_ix_models/project/edits/__init__.py index e5b38d8dbb..955606d4a0 100644 --- a/message_ix_models/project/edits/__init__.py +++ b/message_ix_models/project/edits/__init__.py @@ -132,7 +132,7 @@ def coords_to_codelists( ) -> list["Codelist"]: """Convert the coordinates of `qty` to a collection of :class:`.Codelist`. - .. todo:: Move upstream, to :mod:`genno`. + .. todo:: Move upstream, to :mod:`genno.compat.sdmx`. """ result = [] diff --git a/message_ix_models/report/compat.py b/message_ix_models/report/compat.py index 3f9f31c450..2a3da5c8de 100644 --- a/message_ix_models/report/compat.py +++ b/message_ix_models/report/compat.py @@ -260,8 +260,17 @@ def assert_dims(c: "Computer", *keys: Key): task, in order to ensure the key matches the dimensionality of the quantity that will result from the task. - .. todo:: Remove once handled upstream in :mod:`genno`. + .. deprecated:: 2025-02-17 + Handled upstream in :func:`genno.operator.add_binop` with genno ≥1.20. """ + from warnings import warn + + warn( + "message-ix-models.report.compat.assert_dims()", + DeprecationWarning, + stacklevel=2, + ) + for key in keys: task = c.graph[key] expected = Key.product("foo", *task[1:]) diff --git a/message_ix_models/report/key.py b/message_ix_models/report/key.py new file mode 100644 index 0000000000..d1c34c7926 --- /dev/null +++ b/message_ix_models/report/key.py @@ -0,0 +1,8 @@ +"""Keys for setting up reporting tasks.""" + +from genno import Key + +GDP = Key("GDP", "ny") + +# NB genno ≤ 1.27.1 is sensitive to the order +PRICE_COMMODITY = Key("PRICE_COMMODITY", "nclyh") diff --git a/message_ix_models/report/operator.py b/message_ix_models/report/operator.py index a92d84eef3..0d60c429d6 100644 --- a/message_ix_models/report/operator.py +++ b/message_ix_models/report/operator.py @@ -239,15 +239,25 @@ def from_url(url: str, cls=ixmp.TimeSeries) -> ixmp.TimeSeries: def quantity_from_iamc(qty: "AnyQuantity", variable: str) -> "AnyQuantity": """Extract data for a single measure from `qty` with (at least) dimensions v, u. - .. todo:: Move upstream, to either :mod:`ixmp` or :mod:`genno`. + .. deprecated:: 2025-02-17 + Use :func:`.genno.compat.pyam.operator.quantity_from_iamc` instead. Parameters ---------- variable : str Regular expression to match the ``v`` dimension of `qty`. """ + from warnings import warn + from genno.operator import relabel, select + warn( + "message-ix-models.report.operator.quantity_from_iamc(); use " + "genno.compat.pyam.operator.quantity_from_iamc() instead", + DeprecationWarning, + stacklevel=2, + ) + expr = re.compile(variable) variables, replacements = [], {} for var in qty.coords["v"].data: diff --git a/message_ix_models/report/sim.py b/message_ix_models/report/sim.py index 439e83f836..2134cc07b1 100644 --- a/message_ix_models/report/sim.py +++ b/message_ix_models/report/sim.py @@ -4,13 +4,15 @@ from collections import ChainMap, defaultdict from collections.abc import Mapping, Sequence from copy import deepcopy -from functools import lru_cache +from dataclasses import dataclass +from functools import cache, lru_cache, partial from pathlib import Path from typing import TYPE_CHECKING, Any, Optional, Union +import genno import pandas as pd from dask.core import quote -from genno import Key, KeyExistsError, Quantity +from genno import Key, KeyExistsError from message_ix import Reporter from pandas.api.types import is_scalar @@ -20,7 +22,9 @@ from message_ix_models.util.ixmp import rename_dims if TYPE_CHECKING: + from genno.types import AnyQuantity from message_ix.models import Item + from pandas import ExcelFile __all__ = [ "add_simulated_solution", @@ -32,6 +36,67 @@ log = logging.getLogger(__name__) +@dataclass +class MockScenario: + """Object to mock a :class:`.Scenario` with data from a :file:`.xlsx` file. + + For use with :func:`.reporter_from_excel`. + """ + + _info: "ScenarioInfo" + _file: "ExcelFile" + + @cache + def cat(self, name: str, cat: str): + return ( + pd.read_excel(self._file, sheet_name=f"cat_{name}") + .query(f"type_{name} == {cat!r}")[name] + .to_list() + ) + + @cache + def par(self, name): + return pd.read_excel(self._file, sheet_name=name) + + def _par_as_qty(self, name, dims): + return genno.Quantity( + self.par(name).rename(columns=dims).set_index(list(dims.values()))["value"] + ) + + @cache + def set(self, name): + df = pd.read_excel(self._file, sheet_name=name) + return df.iloc[:, 0].to_list() if 1 == len(df.columns) else df + + def has_solution(self): + return True + + def vintage_and_active_years(self): + return None + + @cache + def par_list(self): + return ( + pd.read_excel(self._file, sheet_name="ix_type_mapping") + .query("ix_type == 'par'")["item"] + .to_list() + ) + + @cache + def set_list(self): + return ( + pd.read_excel(self._file, sheet_name="ix_type_mapping") + .query("ix_type == 'set'")["item"] + .to_list() + ) + + def __getattr__(self, name): + return getattr(self._info, name) + + def __hash__(self): + return hash(self._file) + + def dims_of(info: "Item") -> dict[str, str]: """Return a mapping from the full index names to short dimension IDs of `info`.""" return {d: rename_dims().get(d, d) for d in (info.dims or info.coords or [])} @@ -52,9 +117,43 @@ def to_simulate(): return result +def reporter_from_excel(path: "Path") -> "Reporter": + """Return a :class:`.Reporter` that provides its data from an Excel file. + + The file must be of the format generated by :meth:`.Scenario.to_excel`. + + .. todo:: Move upstream to a new method :meth:`ixmp.Reporter.from_excel`. + """ + import pandas as pd + + from message_ix_models.util.ixmp import rename_dims + + rep = Reporter() + info = rep.graph["scenario info"] = ScenarioInfo(model="m", scenario="s") + ef = rep.graph["_file"] = pd.ExcelFile(path) + mock = rep.graph["scenario"] = MockScenario(info, ef) + + # Add tasks to retrieve sets from file + for set_name in mock.set_list(): + key = rename_dims().get(set_name, set_name) + rep.add(key, partial(mock.set, set_name)) + + # Add tasks to retrieve parameter data from file + for par_name in mock.par_list(): + dims = dims_of(to_simulate()[par_name]) + key = Key(par_name, list(dims.values())) + rep.add(key, partial(mock._par_as_qty, par_name, dims)) + + # Pre-populate some sets of `info` + for name in "commodity", "node", "year": + info.set[name] = rep.get(rename_dims()[name]) + + return rep + + def simulate_qty( name: str, dims: list[str], item_data: Union[dict, pd.DataFrame] -) -> Quantity: +) -> "AnyQuantity": """Return simulated data for item `name`. Parameters @@ -96,10 +195,10 @@ def simulate_qty( assert not df.isna().any().any() or df.isna().all().all(), data assert not df.duplicated().any(), f"Duplicate data for simulated {repr(name)}" - return Quantity(df.set_index(dims)["value"] if len(dims) else df, name=name) + return genno.Quantity(df.set_index(dims)["value"] if len(dims) else df, name=name) -def data_from_file(path: Path, *, name: str, dims: Sequence[str]) -> Quantity: +def data_from_file(path: Path, *, name: str, dims: Sequence[str]) -> "AnyQuantity": """Read simulated solution data for item `name` from `path`. For variables and equations (`name` in upper case), the file **must** have columns @@ -116,7 +215,7 @@ def data_from_file(path: Path, *, name: str, dims: Sequence[str]) -> Quantity: # instead of the index names from message_ix. cols = list(dims) + ["Val", "Marginal", "Lower", "Upper", "Scale"] - return Quantity( + return genno.Quantity( pd.read_csv(path, engine="pyarrow") .set_axis(cols, axis=1) .set_index(cols[:-5])["Val"], @@ -134,7 +233,7 @@ def data_from_file(path: Path, *, name: str, dims: Sequence[str]) -> Quantity: .set_index(cols[:-2]) ) # TODO pass units if they are unique - return Quantity(tmp["value"], name=name) + return genno.Quantity(tmp["value"], name=name) @minimum_version("message_ix 3.6") @@ -190,7 +289,7 @@ def add_simulated_solution( rep.add(key, data_from_file, p, name=name, dims=key.dims, sums=True) continue - if item_info.type == ItemType.SET: + if item_info.type == ItemType.SET and name not in rep: # Add the set elements from `info` rep.add(rename_dims().get(name, name), quote(info.set[name])) elif item_info.type in (ItemType.PAR, ItemType.VAR): diff --git a/message_ix_models/report/util.py b/message_ix_models/report/util.py index 99494c5a48..0b529e042a 100644 --- a/message_ix_models/report/util.py +++ b/message_ix_models/report/util.py @@ -1,13 +1,12 @@ import logging from collections.abc import Iterable -from typing import Optional, Union +from typing import Optional import pandas as pd from dask.core import quote -from genno import Key, Quantity +from genno import Key from genno.compat.pyam.util import collapse as genno_collapse from genno.core.key import single_key -from iam_units import registry from message_ix import Reporter from sdmx.model.v21 import Code @@ -70,25 +69,6 @@ } -def as_quantity(info: Union[dict, float, str]) -> Quantity: - """Convert values from a :class:`dict` to Quantity. - - .. todo:: move upstream, to :mod:`genno`. - """ - if isinstance(info, str): - q = registry.Quantity(info) - return Quantity(q.magnitude, units=q.units) - elif isinstance(info, float): - return Quantity(info) - elif isinstance(info, dict): - data = info.copy() - dim = data.pop("_dim") - unit = data.pop("_unit") - return Quantity(pd.Series(data).rename_axis(dim), units=unit) - else: - raise TypeError(type(info)) - - def collapse(df: pd.DataFrame, var=[]) -> pd.DataFrame: """Callback for the `collapse` argument to :meth:`~.Reporter.convert_pyam`. diff --git a/message_ix_models/testing/__init__.py b/message_ix_models/testing/__init__.py index 50374da8ad..0a6fed6be9 100644 --- a/message_ix_models/testing/__init__.py +++ b/message_ix_models/testing/__init__.py @@ -218,7 +218,7 @@ def bare_res(request, context: Context, solved: bool = False) -> message_ix.Scen context.scenario_info.update(model=model_name, scenario="baseline") base = bare.create_res(context) - log.info(f"bare_res: {base.set('node') = }") + log.info(f"base.set('node') = {' '.join(sorted(base.set('node')))}") if solved and not base.has_solution(): log.info("Solve") diff --git a/message_ix_models/testing/check.py b/message_ix_models/testing/check.py index 48ebcb6e89..2814d6e11e 100644 --- a/message_ix_models/testing/check.py +++ b/message_ix_models/testing/check.py @@ -11,8 +11,6 @@ import genno import pandas as pd -from message_ix_models.util.genno import insert - if TYPE_CHECKING: import pathlib @@ -378,18 +376,13 @@ def insert_checks( # Iterate over keys mentioned in `check_map` for key, checks in check_map.items(): - # Insert a task with apply_checks() as the callable - insert( - computer, - key, - partial( - apply_checks, - key=key, - # A collection of Check instances, including those specific to `key` and - # those from `check_common` - checks=tuple(checks) + tuple(check_common), - result_cb=result, - ), + # A collection of Check instances, including those specific to `key` and those + # from `check_common` + c = tuple(checks) + tuple(check_common) + # Insert a task with apply_checks() as the callable; move the existing task to + # "{key}+pre" + computer.insert( + key, partial(apply_checks, key=key, checks=c, result_cb=result), ... ) # Add a task at `target` that collects the outputs of every inserted call diff --git a/message_ix_models/tests/model/transport/__init__.py b/message_ix_models/tests/model/transport/__init__.py index e69de29bb2..2d885fd503 100644 --- a/message_ix_models/tests/model/transport/__init__.py +++ b/message_ix_models/tests/model/transport/__init__.py @@ -0,0 +1,13 @@ +from importlib.metadata import version + +import pytest + +if version("genno") < "1.28.0": + pytest.skip( + reason="""message_ix/ixmp v3.7.0 are tested with genno < 1.25, but these tests +need ≥ 1.28.0: + +- .model.transport.key imports genno.Keys +- .tests.model.transport.test_base imports genno.operator.random_qty()""", + allow_module_level=True, + ) diff --git a/message_ix_models/tests/model/transport/test_base.py b/message_ix_models/tests/model/transport/test_base.py index dd874d0a55..4b999a8e4c 100644 --- a/message_ix_models/tests/model/transport/test_base.py +++ b/message_ix_models/tests/model/transport/test_base.py @@ -5,8 +5,7 @@ import pandas as pd import pytest from genno import Computer, KeySeq -from genno.operator import relabel -from genno.testing import random_qty +from genno.operator import random_qty, relabel from message_ix_models.model.structure import get_codes from message_ix_models.model.transport.base import format_share_constraints, smooth diff --git a/message_ix_models/tests/model/transport/test_build.py b/message_ix_models/tests/model/transport/test_build.py index 953dffa53a..c5e40b2023 100644 --- a/message_ix_models/tests/model/transport/test_build.py +++ b/message_ix_models/tests/model/transport/test_build.py @@ -9,7 +9,7 @@ from pytest import mark, param from message_ix_models.model.structure import get_codes -from message_ix_models.model.transport import build, demand, report, structure +from message_ix_models.model.transport import build, demand, key, report, structure from message_ix_models.model.transport.ldv import TARGET from message_ix_models.model.transport.testing import MARK, configure_build, make_mark from message_ix_models.testing import bare_res @@ -91,7 +91,7 @@ def test_make_spec(regions_arg, regions_exp, years): param("R11", "B", False, "IKARUS", False, marks=[mark.slow, MARK[1]]), param("R11", "B", False, "IKARUS", True, marks=[mark.slow, MARK[1]]), # R12, B - param("R12", "B", False, "IKARUS", True, marks=MARK["gh-281"]), + ("R12", "B", False, "IKARUS", True), # R14, A param( "R14", @@ -205,9 +205,19 @@ def test_build_existing(tmp_path, test_context, url, solve=False): ), ), "other::F+ixmp": (HasCoords({"technology": ["f rail electr"]}),), - "transport F::ixmp": ( + "transport::F+ixmp": ( ContainsDataForParameters( - {"capacity_factor", "input", "output", "technical_lifetime"} + { + "capacity_factor", + "growth_activity_lo", + "growth_activity_up", + "growth_new_capacity_up", + "initial_activity_up", + "initial_new_capacity_up", + "input", + "output", + "technical_lifetime", + } ), # HasCoords({"technology": ["f rail electr"]}), ), @@ -220,10 +230,9 @@ def test_build_existing(tmp_path, test_context, url, solve=False): "GDP:n-y:PPP+capita": (HasUnits("kUSD / passenger / year"),), "GDP:n-y:PPP+capita+index": (HasUnits(""),), "votm:n-y": (HasUnits(""),), - "PRICE_COMMODITY:n-c-y:transport+smooth": (HasUnits("USD / km"),), + key.price.base: (HasUnits("USD / km"),), "cost:n-y-c-t": (HasUnits("USD / km"),), - # These units are implied by the test of "transport pdt:*": - # "transport pdt:n-y:total" [=] Mm / year + demand.pdt_nyt + "0": (HasUnits("passenger km / year"),), demand.pdt_nyt + "1": (HasUnits("passenger km / year"),), demand.ldv_ny + "total": (HasUnits("Gp km / a"),), # FIXME Handle dimensionality instead of exact units @@ -282,6 +291,7 @@ def test_build_existing(tmp_path, test_context, url, solve=False): dict(regions="R11", years="B", options=dict(futures_scenario="debug")), dict(regions="R12", years="B"), dict(regions="R12", years="B", options=dict(navigate_scenario="act+ele+tec")), + dict(regions="R12", years="B", options=dict(project={"LED": True})), param(dict(regions="R14", years="B"), marks=MARK[9]), param(dict(regions="ISR", years="A"), marks=MARK[3]), ), @@ -310,8 +320,11 @@ def test_debug( k = "test_debug" result = insert_checks(c, k, CHECKS, common) + # Show and get a different key + # k = key.pdt_cny + # Show what will be computed - if verbosity == 2: + if verbosity: print(c.describe(k)) # Compute the test key diff --git a/message_ix_models/tests/model/transport/test_data.py b/message_ix_models/tests/model/transport/test_data.py index b5b6da28bd..42bfb1e6a1 100644 --- a/message_ix_models/tests/model/transport/test_data.py +++ b/message_ix_models/tests/model/transport/test_data.py @@ -2,7 +2,7 @@ import pytest from iam_units import registry -from message_ix_models.model.transport import build, non_ldv, testing +from message_ix_models.model.transport import build, freight, non_ldv, testing from message_ix_models.model.transport.CHN_IND import get_chn_ind_data, get_chn_ind_pop from message_ix_models.model.transport.roadmap import get_roadmap_data from message_ix_models.model.transport.testing import MARK, assert_units, make_mark @@ -52,11 +52,16 @@ def test_get_freight_data(test_context, regions="R12", years="B"): c, info = testing.configure_build(ctx, regions=regions, years=years) # Code runs - result = c.get("transport F::ixmp") + result = c.get(f"transport{freight.Fi}") # Data are provided for these parameters assert { "capacity_factor", + "growth_activity_lo", + "growth_activity_up", + "growth_new_capacity_up", + "initial_activity_up", + "initial_new_capacity_up", "input", "output", "technical_lifetime", diff --git a/message_ix_models/tests/model/transport/test_report.py b/message_ix_models/tests/model/transport/test_report.py index f2be57ab8a..ea3abd5f82 100644 --- a/message_ix_models/tests/model/transport/test_report.py +++ b/message_ix_models/tests/model/transport/test_report.py @@ -6,7 +6,7 @@ from pytest import mark, param from message_ix_models import ScenarioInfo -from message_ix_models.model.transport import build +from message_ix_models.model.transport import build, key from message_ix_models.model.transport.report import configure_legacy_reporting from message_ix_models.model.transport.testing import ( MARK, @@ -59,7 +59,7 @@ def test_configure_legacy(): "regions, years", ( param("R11", "A", marks=make_mark[2](ValueError)), - param("R12", "B", marks=MARK["gh-281"]), + ("R12", "B"), param("R14", "A", marks=MARK[9]), param("ISR", "A", marks=MARK[3]), ), @@ -106,10 +106,17 @@ def quiet_genno(caplog): @MARK[7] @build.get_computer.minimum_version @mark.usefixtures("quiet_genno") -def test_simulated_solution(request, test_context, regions="R12", years="B"): +@mark.parametrize( + "build", + ( + True, # Run .transport.build.main() + False, # Use data from an Excel export + ), +) +def test_simulated_solution(request, test_context, build, regions="R12", years="B"): """:func:`message_ix_models.report.prepare_reporter` works on the simulated data.""" test_context.update(regions=regions, years=years) - rep = simulated_solution(request, test_context) + rep = simulated_solution(request, test_context, build) # A quantity for a MESSAGEix variable was added and can be retrieved k = rep.full_key("ACT") @@ -119,11 +126,21 @@ def test_simulated_solution(request, test_context, regions="R12", years="B"): k = rep.full_key("out") rep.get(k) - # A quantity for message_data.model.transport can be computed + # A quantity for message_ix_models.model.transport can be computed k = "transport stock::iamc" result = rep.get(k) assert 0 < len(result) + # SDMX data for message_ix_models.project.edits can be computed + result = rep.get(key.report.sdmx) + + # The task returns the directory in which output is written + p = result + # Expected files are generated + assert p.joinpath("structure.xml").exists() + assert p.joinpath("DF_POPULATION_IN.csv").exists() + assert p.joinpath("DF_POPULATION_IN.xml").exists() + @build.get_computer.minimum_version @mark.usefixtures("quiet_genno") @@ -142,7 +159,7 @@ def test_plot_simulated(request, test_context, plot_name, regions="R12", years=" """Plots are generated correctly using simulated data.""" test_context.update(regions=regions, years=years) log.debug(f"test_plot_simulated: {test_context.regions = }") - rep = simulated_solution(request, test_context) + rep = simulated_solution(request, test_context, build=True) # print(rep.describe(f"plot {plot_name}")) # DEBUG @@ -158,7 +175,7 @@ def test_iamc_simulated( test_context.update(regions=regions, years=years) test_context.report.output_dir = test_context.get_local_path() - rep = simulated_solution(request, test_context) + rep = simulated_solution(request, test_context, build=True) # Key collecting both file output/scenario update # NB the trailing colons are necessary because of how genno handles report.yaml diff --git a/message_ix_models/tests/model/transport/test_util.py b/message_ix_models/tests/model/transport/test_util.py index 8e7579e8a4..7ceb3cab04 100644 --- a/message_ix_models/tests/model/transport/test_util.py +++ b/message_ix_models/tests/model/transport/test_util.py @@ -6,7 +6,7 @@ from iam_units import registry from message_ix_models.model.transport.config import Config, DataSourceConfig -from message_ix_models.report.util import as_quantity +from message_ix_models.util.genno import as_quantity @pytest.mark.xfail(reason="Refactoring") diff --git a/message_ix_models/tests/test_report.py b/message_ix_models/tests/test_report.py index ff61849b18..5046efaa2d 100644 --- a/message_ix_models/tests/test_report.py +++ b/message_ix_models/tests/test_report.py @@ -203,7 +203,8 @@ def test_apply_units(request, test_context, regions): reporter, key = prepare_reporter(test_context, bare_res) # Units are converted - df = reporter.get("Investment Cost::iamc").as_pandas() + key = "Investment Cost::iamc" + df = reporter.get(key).as_pandas() assert ["EUR_2005"] == df["unit"].unique() diff --git a/message_ix_models/tests/test_util.py b/message_ix_models/tests/test_util.py index 97da823f2a..f1ebe09692 100644 --- a/message_ix_models/tests/test_util.py +++ b/message_ix_models/tests/test_util.py @@ -37,7 +37,6 @@ replace_par_data, same_node, same_time, - series_of_pint_quantity, strip_par_data, ) @@ -148,11 +147,11 @@ def test_check_support(test_context): def test_convert_units(recwarn): - """:func:`.convert_units` and :func:`.series_of_pint_quantity` work.""" + """:func:`.convert_units` works.""" # Common arguments args = [pd.Series([1.1, 10.2, 100.3], name="bar"), dict(bar=(10.0, "lb", "kg"))] - exp = series_of_pint_quantity( + exp = pd.Series( [registry("4.9895 kg"), registry("46.2664 kg"), registry("454.9531 kg")], ) @@ -168,12 +167,14 @@ def test_convert_units(recwarn): exp = pd.Series([q.magnitude for q in exp.values], name="bar") assert_series_equal(exp, convert_units(*args, store="magnitude"), check_dtype=False) + N = len(recwarn) + # Other values for store= are errors with pytest.raises(ValueError, match="store = 'foo'"): convert_units(*args, store="foo") # series_of_pint_quantity() successfully caught warnings - assert 0 == len(recwarn) + assert N == len(recwarn) def test_copy_column(): diff --git a/message_ix_models/tests/util/test_sdmx.py b/message_ix_models/tests/util/test_sdmx.py index 3101bee3af..cf3e88d6c0 100644 --- a/message_ix_models/tests/util/test_sdmx.py +++ b/message_ix_models/tests/util/test_sdmx.py @@ -1,10 +1,17 @@ import logging import re +from typing import TYPE_CHECKING import pytest -from sdmx.model.v21 import Annotation, Code +import sdmx +from sdmx.model.common import Annotation, Code -from message_ix_models.util.sdmx import eval_anno, make_enum, read +from message_ix_models.util.sdmx import eval_anno, make_dataflow, make_enum, read + +if TYPE_CHECKING: + from message_ix_models.types import MaintainableArtefactArgs + +log = logging.getLogger(__name__) def test_eval_anno(caplog, recwarn): @@ -31,6 +38,34 @@ def test_eval_anno(caplog, recwarn): assert 7 == eval_anno(c, id="qux") +@pytest.mark.parametrize( + "id_, dims, name", + ( + ("TEST", "t-c-e", None), + ("GDP", "n-y", None), + ("POPULATION", "n-y", None), + ("TRANSPORT_ACTIVITY", "n-y-t", None), + ("FE_TRANSPORT", "n-t-c", "Final energy use in transport"), + ), +) +def test_make_dataflow(tmp_path, test_context, id_, dims, name) -> None: + ma_kwargs: "MaintainableArtefactArgs" = dict() + + dims_tuple = tuple(dims.split("-")) + sm = make_dataflow(id_, dims_tuple, name, ma_kwargs, test_context) + + # Message contains the expected items + assert len(dims_tuple) == len(sm.codelist) # One codelist per item + assert {"CS_MESSAGE_IX_MODELS"} == set(sm.concept_scheme) + assert {f"DF_{id_}"} == set(sm.dataflow) + assert {f"DS_{id_}"} == set(sm.structure) + + path_out = tmp_path.joinpath("output.xml") + path_out.write_bytes(sdmx.to_xml(sm, pretty_print=True)) + + log.debug(path_out) + + def test_make_enum0(): """:func:`.make_enum` works with :class:`~enum.Flag` and subclasses.""" from enum import Flag, IntFlag diff --git a/message_ix_models/types.py b/message_ix_models/types.py index 7bd33fc302..1ab32baf8b 100644 --- a/message_ix_models/types.py +++ b/message_ix_models/types.py @@ -31,7 +31,7 @@ MutableParameterData = MutableMapping[str, pd.DataFrame] -class MaintainableArtefactArgs(TypedDict): +class MaintainableArtefactArgs(TypedDict, total=False): """Some keyword arguments to :class:`sdmx.model.common.MaintainableArtefact`.""" is_external_reference: Optional[bool] diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py index 3f0605e7f7..f734765102 100644 --- a/message_ix_models/util/__init__.py +++ b/message_ix_models/util/__init__.py @@ -20,7 +20,7 @@ import pint from platformdirs import user_cache_path -from ._convert_units import convert_units, series_of_pint_quantity +from ._convert_units import convert_units from ._logging import mark_time, preserve_log_level, silence_log from .cache import cached from .common import ( @@ -82,7 +82,6 @@ "replace_par_data", "same_node", "same_time", - "series_of_pint_quantity", "show_versions", "silence_log", "strip_par_data", diff --git a/message_ix_models/util/_convert_units.py b/message_ix_models/util/_convert_units.py index 5bd89d7466..332418f809 100644 --- a/message_ix_models/util/_convert_units.py +++ b/message_ix_models/util/_convert_units.py @@ -2,7 +2,6 @@ from collections.abc import Mapping from functools import singledispatch from typing import TYPE_CHECKING, Any, Optional -from warnings import catch_warnings, filterwarnings import pandas as pd from iam_units import registry @@ -13,24 +12,6 @@ log = logging.getLogger(__name__) -def series_of_pint_quantity(*args, **kwargs) -> pd.Series: - """Suppress a spurious warning. - - Creating a :class:`pandas.Series` with a list of :class:`pint.Quantity` triggers a - warning “The unit of the quantity is stripped when downcasting to ndarray,” even - though the entire object is being stored and the unit is **not** stripped. This - function suppresses this warning. - """ - with catch_warnings(): - filterwarnings( - "ignore", - message="The unit of the quantity is stripped when downcasting to ndarray", - module="pandas.core.dtypes.cast", - ) - - return pd.Series(*args, **kwargs) - - @singledispatch def convert_units(data: Any, **kwargs): """Convert units of `data`. @@ -85,7 +66,7 @@ def _( # - Reassemble into a series with index matching `s` result = registry.Quantity(factor * data.values, unit_in).to(unit_out) - return series_of_pint_quantity( + return pd.Series( result.magnitude if store == "magnitude" else result.tolist(), index=data.index, dtype=(float if store == "magnitude" else object), diff --git a/message_ix_models/util/genno.py b/message_ix_models/util/genno.py index 467acd980f..f2c90a1a88 100644 --- a/message_ix_models/util/genno.py +++ b/message_ix_models/util/genno.py @@ -3,51 +3,36 @@ Most code appearing here **should** be migrated upstream, to genno itself. """ -import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Union if TYPE_CHECKING: - from genno import Computer - - from message_ix_models.types import KeyLike - - -log = logging.getLogger(__name__) - - -def insert(c: "Computer", key: "KeyLike", operation, tag: str = "pre") -> "KeyLike": - """Insert a task that performs `operation` on `key`. - - 1. The existing task at `key` is moved to a new key, ``{key}+{tag}``. - 2. A new task is inserted at `key` that performs `operation` on the output of the - original task. - - One way to use :func:`insert` is with a ‘pass-through’ `operation` that, for - instance, performs logging, assertions, or other steps, then returns its input - unchanged. In this way, all other tasks in the graph referring to `key` receive - exactly the same input as they would have previously, prior to the :func:`insert` - call. - - It is also possible to insert `operation` that mutates its input in certain ways. - - .. todo:: Migrate to :py:`genno.Computer.insert()` or similar. - - Returns - ------- - KeyLike - same as the `key` parameter. - """ - import genno - - # Determine a key for the task that to be shifted - k_pre = genno.Key(key) + tag - assert k_pre not in c - - # Move the existing task at `key` to `k_pre` - c.graph[k_pre] = c.graph.pop(key) - log.info(f"Move {key!r} to {k_pre!r}") - - # Add `operation` at `key`, operating on the output of the original task - c.graph[key] = (operation, k_pre) - - return key + from genno.types import AnyQuantity + +try: + from genno.operator import as_quantity +except ImportError: + # genno < 1.25, e.g. with message_ix/ixmp 3.7.0 + # TODO Remove when support for these upstream versions is dropped + + def as_quantity(info: Union[dict, float, str]) -> "AnyQuantity": + import genno + import pandas as pd + from iam_units import registry + + if isinstance(info, str): + q = registry.Quantity(info) + return genno.Quantity(q.magnitude, units=q.units) + elif isinstance(info, float): + return genno.Quantity(info) + elif isinstance(info, dict): + data = info.copy() + dim = data.pop("_dim") + unit = data.pop("_unit") + return genno.Quantity(pd.Series(data).rename_axis(dim), units=unit) + else: + raise TypeError(type(info)) + + +__all__ = [ + "as_quantity", +] diff --git a/message_ix_models/util/sdmx.py b/message_ix_models/util/sdmx.py index ffe82b5596..33c1eba37d 100644 --- a/message_ix_models/util/sdmx.py +++ b/message_ix_models/util/sdmx.py @@ -1,10 +1,12 @@ """Utilities for handling objects from :mod:`sdmx`.""" import logging -from collections.abc import Mapping +import re +from collections.abc import Mapping, Sequence from dataclasses import dataclass, fields from datetime import datetime from enum import Enum, Flag +from functools import cache from importlib.metadata import version from pathlib import Path from typing import TYPE_CHECKING, Optional, Union, cast @@ -12,8 +14,9 @@ import sdmx import sdmx.message +import sdmx.urn from iam_units import registry -from sdmx.model import common +from sdmx.model import common, v21 from .common import package_data_path @@ -23,6 +26,10 @@ from sdmx.message import StructureMessage + from message_ix_models.types import MaintainableArtefactArgs + + from .context import Context + # TODO Use "from typing import Self" once Python 3.11 is the minimum supported Self = TypeVar("Self", bound="AnnotationsMixIn") @@ -188,6 +195,209 @@ def by_urn(cls, urn: str): return cls[cls.__dict__["_urn_name"][urn]] +def get_cl(name: str, context: Optional["Context"] = None) -> "common.Codelist": + """Return a code list.""" + from message_ix_models.model.structure import get_codes + + id_ = None + if name == "NODE" and context: + name, id_ = f"node/{context.model.regions}", f"NODE_{context.model.regions}" + elif name == "YEAR" and context: + name, id_ = f"year/{context.model.years}", f"YEAR_{context.model.years}" + + name = name or name.lower() + id_ = id_ or name.upper() + + as_ = read("IIASA_ECE:AGENCIES") + cl: "common.Codelist" = common.Codelist( + id=f"CL_{id_}", + name=f"Codes for message-ix-models concept {name!r}", + maintainer=as_["IIASA_ECE"], + # FIXME remove str() once sdmx1 > 2.21.1 can handle Version + version=str(get_version()), + is_external_reference=False, + is_final=True, + ) + cl.urn = sdmx.urn.make(cl) + + try: + cl.extend(get_codes(name.lower())) + except FileNotFoundError: + pass + + return cl + + +@cache +def get_cs() -> "common.ConceptScheme": + """Return a scheme of common concepts for the MESSAGEix-GLOBIOM model family. + + The full artefact contains its own detailed description. + """ + from .ixmp import rename_dims + + cs = common.ConceptScheme( + id="CS_MESSAGE_IX_MODELS", + name="Concepts for message-ix-models", + description="""These include: + +1. Concepts used as dimensions in MESSAGE parameter data (see also :mod:`.structure`). +2. Concepts particular to variants of MESSAGEix-GLOBIOM, such as + :mod:`.model.transport`. + +Each concept in the concept scheme has: + +- An upper case :py:`.id`, for instance :py:`"TECHNOLOGY"`. +- An annotation with :py:`id="aliases"` which is the :func:`repr` of a :class:`set` + giving alternate labels understood to be equivalent. These include + :data:`ixmp.report.RENAME_DIMS`, for example :py:`"t"` for 'technology'.""", + maintainer=common.Agency(id="IIASA_ECE"), + version="1.0.0", + ) + + # Add concepts for MESSAGE sets/dimensions + for k, v in rename_dims().items(): + # Retrieve or create the Concept for the set (e.g. "year" for k="year_act") + set_name = k.split("_")[0] + concept = cs.setdefault( + id=set_name.upper(), + name=f"{set_name!r} MESSAGEix set", + annotations=[common.Annotation(id="aliases", text=repr(set()))], + ) + # Add `v` to the aliases annotation + anno = concept.get_annotation(id="aliases") + anno.text = repr(eval(str(anno.text)) | {v}) + + for c_id in "MODEL", "SCENARIO", "VERSION": + cs.setdefault( + id=c_id, + name=f"{c_id.lower()!r} ixmp scenario identifier", + description="""In the ixmp data model, scenario objects are identified by +unique keys including (model name, scenario name, version).""", + ) + + cs.setdefault( + id="UNIT_MEASURE", + name="Unit of measure", + description="Unit in which data values are expressed", + annotations=[ + common.Annotation( + id="same-as-urn", + text="urn:sdmx:org.sdmx.infomodel.conceptscheme.Concept=SDMX:CROSS_DOMAIN_CONCEPTS(2.0).UNIT_MEASURE", + ), + ], + ) + cs.setdefault( + id="URL", + name="ixmp scenario URL", + description="""URL combining the platform name (~database), model name, scenario +name, and version of an ixmp Scenario. See +https://docs.messageix.org/projects/ixmp/en/stable/api.html#ixmp.TimeSeries.url""", + ) + + return cs + + +@cache +def get_concept(string: str) -> "common.Concept": + """Retrieve a single Concept from :func:`get_cs`.""" + for concept in get_cs().items.values(): + labels = [concept.id] + list(concept.eval_annotation(id="aliases") or []) + if re.fullmatch("|".join(labels), string, flags=re.IGNORECASE): + return concept + raise ValueError(string) + + +def get_version() -> "common.Version": + """Return a :class:`sdmx.model.common.Version` for :mod:`message_ix_models`.""" + return common.Version(version(__package__.split(".")[0]).split("+")[0]) + + +def make_dataflow( + id: str, + dims: Sequence[str], + name: Optional[str] = None, + ma_kwargs: Optional["MaintainableArtefactArgs"] = None, + context: Optional["Context"] = None, + message: Optional["sdmx.message.StructureMessage"] = None, +) -> "sdmx.message.StructureMessage": + """Create and store an SDMX 2.1 DataflowDefinition (DFD) and related structures. + + Parameters + ---------- + id : + Partial ID of both the DFD and a related DataStructureDefinition (DSD). + dims : + IDs of the dimensions of the DSD. These may be short dimension IDs as used in + :mod:`message_ix.report`, for instance :py:`"t"` for the 'technology' dimension. + ma_kwargs : + Common keyword arguments for all SDMX MaintainableArtefacts created. + + Returns + ------- + sdmx.message.StructureMessage + …containing: + + - 1 :class:`.DataflowDefinition`. + - 1 :class:`.DataStructureDefinition`. + - 1 :class:`.ConceptScheme`, ``IIASA_ECE:CS_COMMON``. + - For each dimension indicated by `dims`, a :class:`Codelist`. + """ + from sdmx import urn + + sm = message or sdmx.message.StructureMessage() + + if ma_kwargs is None: + ma_kwargs = {} + ma_kwargs.setdefault("maintainer", common.Agency(id="IIASA_ECE")) + ma_kwargs.setdefault("is_external_reference", False) + ma_kwargs.setdefault("is_final", True) + # FIXME remove str() once sdmx1 > 2.21.1 can handle Version + ma_kwargs.setdefault("version", str(get_version())) + + # Create the data structure definition + dsd = v21.DataStructureDefinition(id=f"DS_{id.upper()}", **ma_kwargs) + dsd.measures.getdefault(id="value") + sm.add(dsd) + + # Create the data flow definition + dfd = v21.DataflowDefinition(id=f"DF_{id.upper()}", **ma_kwargs, structure=dsd) + dfd.urn = urn.make(dfd) + if name: + dfd.description = name + sm.add(dfd) + + # Add the common concept scheme + sm.add(get_cs()) + + # Add dimensions to the DSD according to `dims` + for order, dim_id in enumerate(dims): + # Retrieve the dimension concept and its full ID + concept = get_concept(dim_id) + + # Create a code list for this dimension + cl = get_cl(concept.id, context=context) + sm.add(cl) + + # Create the dimension + dsd.dimensions.getdefault( + id=dim_id, + concept_identity=concept, + local_representation=common.Representation(enumerated=cl), + order=order, + ) + + # Add attributes + nsr = v21.NoSpecifiedRelationship() + for attr_id in "MODEL", "SCENARIO", "VERSION", "UNIT_MEASURE": + # Retrieve the attribute concept and its full ID + concept = get_concept(attr_id) + + dsd.attributes.getdefault(id=attr_id, concept_identity=concept, related_to=nsr) + + return sm + + def make_enum(urn, base=URNLookupEnum): """Create an :class:`.enum.Enum` (or `base`) with members from codelist `urn`.""" # Read the code list diff --git a/pyproject.toml b/pyproject.toml index c5e866e2bf..ecc45c1953 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ dependencies = [ # message_ix >= 3.4.0 → ixmp >= 3.4.0 → genno >= 1.6.0", "genno >= 1.24.0", "iam_units >= 2023.9.11", - "message_ix >= 3.6.0", + "message_ix >= 3.7.0", "pooch", "pyam-iamc >= 0.6", "pyarrow",