diff --git a/.github/workflows/pytest-snapshots.yaml b/.github/workflows/pytest-snapshots.yaml
index 1348cd4163..85240bad79 100644
--- a/.github/workflows/pytest-snapshots.yaml
+++ b/.github/workflows/pytest-snapshots.yaml
@@ -13,14 +13,16 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
+env:
+ version:
+ gams: "43.4.1"
+ python: "3.12"
+ upstream: main
+
jobs:
snapshots:
runs-on: ubuntu-latest
name: Test snapshots ubuntu-latest
- env:
- upstream-version: main
- python-version: "3.12"
- extra-deps: 'dask[dataframe]'
steps:
- name: Cache test data
@@ -33,17 +35,16 @@ jobs:
uses: actions/checkout@v4
with:
lfs: true
- fetch-depth: ${{ env.depth }}
- uses: actions/setup-python@v5
with:
- python-version: ${{ env.python-version }}
+ python-version: ${{ env.version.python }}
cache: pip
cache-dependency-path: "**/pyproject.toml"
- uses: iiasa/actions/setup-gams@main
with:
- version: 29.1.0
+ version: ${{ env.version.gams }}
license: ${{ secrets.GAMS_LICENSE }}
- uses: ts-graphviz/setup-graphviz@v2
@@ -52,7 +53,7 @@ jobs:
- name: Install packages and dependencies
# By default, install:
- # - ixmp, message_ix: from GitHub branches/tags per env.upstream-version (above)
+ # - ixmp, message_ix: from GitHub branches/tags per env.version.upstream (above)
# - other dependencies including genno: from PyPI.
#
# To test against unreleased code (on `main`, or other branches
@@ -60,11 +61,11 @@ jobs:
# as needed. DO NOT merge such changes to `main`.
run: |
# pip install --upgrade "genno @ git+https://github.com/khaeru/genno.git@main"
- pip install --upgrade "ixmp @ git+https://github.com/iiasa/ixmp.git@${{ env.upstream-version }}"
- # pip install --upgrade "message-ix @ git+https://github.com/iiasa/message_ix.git@${{ env.upstream-version }}"
+ pip install --upgrade "ixmp @ git+https://github.com/iiasa/ixmp.git@${{ env.version.upstream }}"
+ # pip install --upgrade "message-ix @ git+https://github.com/iiasa/message_ix.git@${{ env.version.upstream }}"
pip install --upgrade "message-ix @ git+https://github.com/iiasa/message_ix.git@issue/723"
- pip install .[docs,tests] ${{ env.extra-deps }}
+ pip install .[docs,tests] dask[dataframe]
- name: Configure local data path
run: |
@@ -86,4 +87,4 @@ jobs:
- name: Upload test coverage to Codecov.io
uses: codecov/codecov-action@v5
with:
- token: ${{ secrets.CODECOV_TOKEN }} # required
\ No newline at end of file
+ token: ${{ secrets.CODECOV_TOKEN }} # required
diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml
index 36ea581978..0d30b3a7dc 100644
--- a/.github/workflows/pytest.yaml
+++ b/.github/workflows/pytest.yaml
@@ -16,10 +16,10 @@ jobs:
warm-lfs-cache:
strategy:
matrix:
- os: [ macos-13, ubuntu-latest, windows-latest ]
+ os: [ macos-13, macos-latest, ubuntu-latest, windows-latest ]
runs-on: ${{ matrix.os }}
steps:
- - uses: nschloe/action-cached-lfs-checkout@v1
+ - uses: francisbilham11/action-cached-lfs-checkout@v3
pytest:
needs: warm-lfs-cache
@@ -28,52 +28,40 @@ jobs:
matrix:
os:
- macos-13
+ - macos-latest
- ubuntu-latest
- windows-latest
- upstream:
+ version:
# In each group:
# - Versions of ixmp and message_ix to test.
# - Latest supported Python version for those or other dependencies.
- # - Extra dependencies, in particular fixed/maximum versions to resolve conficts.
- # - dask[dataframe] >= 2024.3.0 requires dask-expr and in turn pandas >= 2.0.
- # https://github.com/iiasa/message-ix-models/pull/156#issuecomment-2020152360
- # - genno: upstream versions < 3.8.0 import genno.computations, removed in 1.25.0.
- # https://github.com/iiasa/message-ix-models/pull/156
- # - pytest: upstream versions < 3.9.0 use a hook argument removed in pytest 8.1.0.
- # https://github.com/iiasa/message-ix-models/pull/155
- #
- # Minimum version given in pyproject.toml
- - version: v3.4.0
- python-version: "3.11"
- extra-deps: '"dask < 2024.3.0" "genno < 1.25" "pandas < 2.0" "pytest == 8.0.0"' #
- dask-dataframe: false
- - version: v3.5.0
- python-version: "3.11"
- extra-deps: '"dask < 2024.3.0" "genno < 1.25" "pandas < 2.0" "pytest == 8.0.0"' #
- dask-dataframe: false
- - version: v3.6.0
- python-version: "3.11"
- extra-deps: '"dask < 2024.3.0" "genno < 1.25" "pandas < 2.0" "pytest == 8.0.0"' #
- dask-dataframe: false
- - version: v3.7.0
- python-version: "3.11"
- extra-deps: ' "genno < 1.25" "pytest == 8.0.0"' #
- dask-dataframe: true
- # Latest released version
- - version: v3.8.0
- python-version: "3.12"
- extra-deps: ' "pytest == 8.0.0"' #
- dask-dataframe: true
- # Development version
- - version: main
- python-version: "3.12"
- extra-deps: '' #
- dask-dataframe: true
+ # Minimum version given in pyproject.toml + earlier version of Python
+ - { upstream: v3.4.0, python: "3.11" } # 2022-01-27
+ - { upstream: v3.5.0, python: "3.11" } # 2022-05-06
+ - { upstream: v3.6.0, python: "3.11" } # 2022-08-18
+ - { upstream: v3.7.0, python: "3.11" } # 2023-05-17
+ - { upstream: v3.8.0, python: "3.12" } # 2024-01-12
+ # Latest released version + latest released Python
+ - { upstream: v3.9.0, python: "3.13" } # 2024-06-04
+ # Development version + latest released Python
+ - { upstream: main, python: "3.13" }
+
+ exclude:
+ # Specific version combinations that are invalid / not to be used
+ # These versions of ixmp are not able locate the arm64 GAMS API binaries
+ - { os: macos-latest, version: {upstream: v3.4.0 }}
+ - { os: macos-latest, version: {upstream: v3.5.0 }}
+ - { os: macos-latest, version: {upstream: v3.6.0 }}
+ - { os: macos-latest, version: {upstream: v3.7.0 }}
+ - { os: macos-latest, version: {upstream: v3.8.0 }}
+ - { os: macos-latest, version: {upstream: v3.9.0 }}
+ # Redundant with macos-latest
+ - { os: macos-13, version: {upstream: main }}
fail-fast: false
runs-on: ${{ matrix.os }}
- name: ${{ matrix.os }}-py${{ matrix.upstream.python-version }}-upstream-${{ matrix.upstream.version }}
+ name: ${{ matrix.os }}-py${{ matrix.version.python }}-upstream-${{ matrix.version.upstream }}
steps:
- name: Cache test data
@@ -83,45 +71,59 @@ jobs:
key: ${{ matrix.os }}
- name: Check out message-ix-models
- uses: nschloe/action-cached-lfs-checkout@v1
+ uses: francisbilham11/action-cached-lfs-checkout@v3
- uses: actions/setup-python@v5
with:
- python-version: ${{ matrix.upstream.python-version }}
+ python-version: ${{ matrix.version.python }}
cache: pip
cache-dependency-path: "**/pyproject.toml"
- uses: iiasa/actions/setup-gams@main
with:
- version: 29.1.0
+ version: 43.4.1
license: ${{ secrets.GAMS_LICENSE }}
- uses: ts-graphviz/setup-graphviz@v2
- # TEMPORARY Work around ts-graphviz/setup-graphviz#630
- if: ${{ ! startswith(matrix.os, 'macos-') }}
+ # Work around ts-graphviz/setup-graphviz#630
+ if: ${{ matrix.os != 'macos-13' }}
+
+ - name: Determine extra dependencies
+ id: extra-deps
+ run : |
+ from os import environ
+ from pathlib import Path
+
+ v, result = "${{ matrix.version.upstream }}".replace("main", "vmain"), []
+ for condition, dependency in (
+ (v <= "v3.6.0", "dask < 2024.3.0"), # dask[dataframe] >= 2024.3.0 requires dask-expr and in turn pandas >= 2.0 (#156)
+ (v <= "v3.6.0", "pandas < 2.0"),
+ (v >= "v3.7.0", "dask[dataframe] < 2024.11.0"), # dask >= 2024.11.0 changes handling of dict (will be addressed in #225)
+ (v <= "v3.7.0", "genno < 1.25"), # Upstream versions < 3.8.0 import genno.computations, removed in 1.25.0 (#156)
+ (v < "v3.9.0", "pytest == 8.0.0"), # Upstream versions < 3.9.0 use a hook argument removed in pytest 8.1.0 (#155)
+ ):
+ result.extend([f'"{dependency}"'] if condition else [])
+
+ Path(environ["GITHUB_OUTPUT"]).write_text(f"value={' '.join(result)}\n")
+ shell: python
- name: Install packages and dependencies
# By default, install:
- # - ixmp, message_ix: from GitHub branches/tags per matrix.upstream-version (above)
+ # - ixmp, message_ix: from GitHub branches/tags per matrix.version.upstream (above)
# - other dependencies including genno: from PyPI.
#
# To test against unreleased code (on `main`, or other branches
# for open PRs), temporarily uncomment, add, or edit lines below
# as needed. DO NOT merge such changes to `main`.
run: |
- # pip install --upgrade "genno @ git+https://github.com/khaeru/genno.git@main"
- pip install --upgrade "ixmp @ git+https://github.com/iiasa/ixmp.git@${{ matrix.upstream.version }}"
- pip install --upgrade "message-ix @ git+https://github.com/iiasa/message_ix.git@${{ matrix.upstream.version }}"
+ pip install --upgrade "ixmp @ git+https://github.com/iiasa/ixmp.git@${{ matrix.version.upstream }}"
+ pip install --upgrade "message-ix @ git+https://github.com/iiasa/message_ix.git@${{ matrix.version.upstream }}"
- pip install .[docs,tests] ${{ matrix.upstream.extra-deps }}
+ pip install .[docs,tests] ${{ steps.extra-deps.outputs.value }}
-
- - name: Install specific dask versions as workaround
- if: ${{ matrix.upstream.dask-dataframe }}
- run: |
- # TEMPORARY Work around dask v2024.11.0;
- # see https://github.com/khaeru/genno/issues/149
- pip install "dask[dataframe] < 2024.11.0"
+ # TEMPORARY With Python 3.13 pyam-iamc resolves to 1.3.1, which in turn
+ # limits pint < 0.17. Override.
+ pip install --upgrade pint
- name: Configure local data path
run: |
@@ -153,7 +155,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- with: { python-version: "3.12" }
+ with: { python-version: "3.13" }
- name: Force recreation of pre-commit virtual environment for mypy
if: github.event_name == 'schedule' # Comment this line to run on a PR
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 7ca4179146..283e5f3b93 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -10,7 +10,7 @@ repos:
language: python
entry: bash -c ". ${PRE_COMMIT_MYPY_VENV:-/dev/null}/bin/activate 2>/dev/null; mypy $0 $@; python -m pip list"
additional_dependencies:
- - mypy >= 1.11.0
+ - mypy >= 1.13.0
- plotnine
- pytest
- sdmx1
@@ -20,7 +20,7 @@ repos:
- "message-ix @ git+https://github.com/iiasa/message_ix.git@main"
args: ["."]
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.5.5
+ rev: v0.7.4
hooks:
- id: ruff
- id: ruff-format
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
index bacb8da0a0..1235f2a20e 100644
--- a/doc/whatsnew.rst
+++ b/doc/whatsnew.rst
@@ -4,8 +4,10 @@ What's new
Next release
============
+- :mod:`message_ix_models` is tested and compatible with `Python 3.13 `__ (:pull:`250`).
+- Support for Python 3.8 is dropped (:pull:`250`), as it has reached end-of-life.
- Connect the water module to the cost module for cooling technologies (:pull:`245`).
-- Make setup of constraints for cooling technologies flexible and update solar csp tech. name (:pull:`242`).
+- Make setup of constraints for cooling technologies flexible and update solar csp tech. name (:pull:`242`).
- Fix the nexus/cooling function and add test for checking some input data (:pull:`236`).
- Add :doc:`/project/circeular` project code and documentation (:pull:`232`).
- Update water availability data and major code editing to allow a new test suite for the water module (:pull:`106`).
diff --git a/message_ix_models/model/build.py b/message_ix_models/model/build.py
index 303e85ab95..3858b207b9 100644
--- a/message_ix_models/model/build.py
+++ b/message_ix_models/model/build.py
@@ -1,5 +1,6 @@
import logging
-from typing import Callable, Dict, List, Mapping, Optional, Union
+from collections.abc import Callable, Mapping
+from typing import Optional, Union
import ixmp
import pandas as pd
@@ -77,7 +78,7 @@ def apply_spec( # noqa: C901
pass
maybe_check_out(scenario)
- dump: Dict[str, pd.DataFrame] = {} # Removed data
+ dump: dict[str, pd.DataFrame] = {} # Removed data
# Sort the list of sets by the number of dimensions; this places basic (non-indexed)
# sets first. Elements for these sets must be added before elements for indexed
@@ -166,7 +167,7 @@ def apply_spec( # noqa: C901
)
-def ellipsize(elements: List) -> str:
+def ellipsize(elements: list) -> str:
"""Generate a short string representation of `elements`.
If the list has more than 5 elements, only the first two and last two are shown,
diff --git a/message_ix_models/model/disutility.py b/message_ix_models/model/disutility.py
index 20bbb19ae8..0de792ae0c 100644
--- a/message_ix_models/model/disutility.py
+++ b/message_ix_models/model/disutility.py
@@ -1,9 +1,9 @@
import logging
from collections import defaultdict
+from collections.abc import Mapping, MutableMapping, Sequence
from copy import copy
from functools import partial
from itertools import product
-from typing import List, Mapping, MutableMapping, Sequence
import message_ix
import pandas as pd
@@ -166,10 +166,10 @@ def data_conversion(info, spec: Spec) -> MutableMapping[str, pd.DataFrame]:
)
# Use the spec to retrieve information
- technology: List[Code] = spec.add.set["technology"]
+ technology: list[Code] = spec.add.set["technology"]
# Data to return
- data0: Mapping[str, List[pd.DataFrame]] = defaultdict(list)
+ data0: Mapping[str, list[pd.DataFrame]] = defaultdict(list)
# Loop over conversion technologies
for t in technology:
diff --git a/message_ix_models/model/emissions.py b/message_ix_models/model/emissions.py
index d668b77929..592ab6d442 100644
--- a/message_ix_models/model/emissions.py
+++ b/message_ix_models/model/emissions.py
@@ -1,6 +1,6 @@
import logging
import re
-from typing import Optional, Tuple
+from typing import Optional
import pandas as pd
from genno import Quantity
@@ -153,7 +153,7 @@ def add_tax_emission(
scen.add_par(name, data)
-def split_species(unit_expr: str) -> Tuple[str, Optional[str]]:
+def split_species(unit_expr: str) -> tuple[str, Optional[str]]:
"""Split `unit_expr` to an expression without a unit mention, and maybe species."""
if match := re.fullmatch("(.*)(CO2|C)(.*)", unit_expr):
return f"{match.group(1)}{match.group(3)}", match.group(2)
diff --git a/message_ix_models/model/macro.py b/message_ix_models/model/macro.py
index 036368e316..80cc99e8dc 100644
--- a/message_ix_models/model/macro.py
+++ b/message_ix_models/model/macro.py
@@ -5,10 +5,11 @@
"""
import logging
+from collections.abc import Mapping
from functools import lru_cache
from itertools import product
from pathlib import Path
-from typing import TYPE_CHECKING, List, Literal, Mapping, Optional, Union
+from typing import TYPE_CHECKING, Literal, Optional, Union
import pandas as pd
@@ -29,7 +30,7 @@
def generate(
parameter: Literal["aeei", "config", "depr", "drate", "lotol"],
context: "Context",
- commodities: Union[List[str], List["Code"]] = COMMODITY,
+ commodities: Union[list[str], list["Code"]] = COMMODITY,
value: Optional[float] = None,
) -> pd.DataFrame:
"""Generate uniform data for one :mod:`message_ix.macro` `parameter`.
diff --git a/message_ix_models/model/material/build.py b/message_ix_models/model/material/build.py
index 0ccac30770..8747a9e507 100644
--- a/message_ix_models/model/material/build.py
+++ b/message_ix_models/model/material/build.py
@@ -1,5 +1,6 @@
import logging
-from typing import Any, Dict, Mapping
+from collections.abc import Mapping
+from typing import Any
import message_ix
import pandas as pd
@@ -206,7 +207,7 @@ def get_spec() -> Mapping[str, ScenarioInfo]:
def make_spec(regions: str, materials: str or None = SPEC_LIST) -> Spec:
- sets: Dict[str, Any] = dict()
+ sets: dict[str, Any] = dict()
materials = ["common"] if not materials else materials
# Overrides specific to regional versions
tmp = dict()
diff --git a/message_ix_models/model/material/data_aluminum.py b/message_ix_models/model/material/data_aluminum.py
index 2cc9110538..16b0edfb47 100644
--- a/message_ix_models/model/material/data_aluminum.py
+++ b/message_ix_models/model/material/data_aluminum.py
@@ -1,5 +1,5 @@
from collections import defaultdict
-from typing import Dict, Iterable, List
+from collections.abc import Iterable
import message_ix
import pandas as pd
@@ -69,7 +69,7 @@ def read_data_aluminum(
return data_alu, data_alu_rel, data_aluminum_ts
-def gen_data_alu_ts(data: pd.DataFrame, nodes: list) -> Dict[str, pd.DataFrame]:
+def gen_data_alu_ts(data: pd.DataFrame, nodes: list) -> dict[str, pd.DataFrame]:
"""
Generates time variable parameter data for aluminum sector
Parameters
@@ -330,7 +330,7 @@ def gen_data_alu_const(
glb_reg: str,
years: Iterable,
yv_ya: pd.DataFrame,
- nodes: List[str],
+ nodes: list[str],
):
results = defaultdict(list)
for t in config["technology"]["add"]:
@@ -544,7 +544,7 @@ def gen_mock_demand_aluminum(scenario: message_ix.Scenario) -> pd.DataFrame:
return demand2020_al
-def gen_data_alu_trade(scenario: message_ix.Scenario) -> Dict[str, pd.DataFrame]:
+def gen_data_alu_trade(scenario: message_ix.Scenario) -> dict[str, pd.DataFrame]:
results = defaultdict(list)
data_trade = pd.read_csv(
diff --git a/message_ix_models/model/material/data_methanol.py b/message_ix_models/model/material/data_methanol.py
index 4f0f08b43b..1e573917b2 100644
--- a/message_ix_models/model/material/data_methanol.py
+++ b/message_ix_models/model/material/data_methanol.py
@@ -1,5 +1,5 @@
from ast import literal_eval
-from typing import TYPE_CHECKING, Dict, List
+from typing import TYPE_CHECKING
import pandas as pd
import yaml
@@ -30,7 +30,7 @@
}
-def gen_data_methanol(scenario: "Scenario") -> Dict[str, pd.DataFrame]:
+def gen_data_methanol(scenario: "Scenario") -> dict[str, pd.DataFrame]:
"""
Generates data for methanol industry model
@@ -94,8 +94,8 @@ def gen_data_methanol(scenario: "Scenario") -> Dict[str, pd.DataFrame]:
def broadcast_nodes(
df_bc_node: pd.DataFrame,
df_final: pd.DataFrame,
- node_cols: List[str],
- node_cols_codes: Dict[str, pd.Series],
+ node_cols: list[str],
+ node_cols_codes: dict[str, pd.Series],
i: int,
) -> pd.DataFrame:
"""
@@ -105,8 +105,8 @@ def broadcast_nodes(
----------
df_bc_node: pd.DataFrame
df_final: pd.DataFrame
- node_cols: List[str]
- node_cols_codes: Dict[str, pd.Series]
+ node_cols: list[str]
+ node_cols_codes: dict[str, pd.Series]
i: int
"""
if len(node_cols) == 1:
@@ -153,8 +153,8 @@ def broadcast_nodes(
def broadcast_years(
df_bc_node: pd.DataFrame,
- yr_col_out: List[str],
- yr_cols_codes: Dict[str, List[str]],
+ yr_col_out: list[str],
+ yr_cols_codes: dict[str, list[str]],
col: str,
) -> pd.DataFrame:
"""
@@ -162,8 +162,8 @@ def broadcast_years(
Parameters
----------
df_bc_node: pd.DataFrame
- yr_col_out: List[str]
- yr_cols_codes: ict[str, List[str]]
+ yr_col_out: list[str]
+ yr_cols_codes: ict[str, list[str]]
col: str
"""
if len(yr_col_out) == 1:
diff --git a/message_ix_models/model/material/data_petro.py b/message_ix_models/model/material/data_petro.py
index e7fbe66263..7f64836f85 100644
--- a/message_ix_models/model/material/data_petro.py
+++ b/message_ix_models/model/material/data_petro.py
@@ -1,5 +1,4 @@
from collections import defaultdict
-from typing import List, Set
import message_ix
import pandas as pd
@@ -166,7 +165,7 @@ def get_demand_t1_with_income_elasticity(
def gen_data_petro_ts(
- data_petro_ts: pd.DataFrame, results: dict[list], tec_ts: Set[str], nodes: List[str]
+ data_petro_ts: pd.DataFrame, results: dict[list], tec_ts: set[str], nodes: list[str]
) -> None:
for t in tec_ts:
common = dict(
@@ -239,7 +238,7 @@ def assign_input_outpt(
rg: str,
global_region: str,
common: dict,
- nodes: List[str],
+ nodes: list[str],
) -> pd.DataFrame:
com = split[1]
lev = split[2]
@@ -295,7 +294,7 @@ def assign_input_outpt(
return df
-def broadcast_to_regions(df: pd.DataFrame, global_region: str, nodes: List[str]):
+def broadcast_to_regions(df: pd.DataFrame, global_region: str, nodes: list[str]):
if "node_loc" in df.columns:
if (
len(set(df["node_loc"])) == 1
diff --git a/message_ix_models/model/material/data_steel.py b/message_ix_models/model/material/data_steel.py
index ac875b872c..24f6f640e6 100644
--- a/message_ix_models/model/material/data_steel.py
+++ b/message_ix_models/model/material/data_steel.py
@@ -1,5 +1,5 @@
from collections import defaultdict
-from typing import Dict, Iterable, List
+from collections.abc import Iterable
import message_ix
import pandas as pd
@@ -119,7 +119,7 @@ def gen_mock_demand_steel(scenario: message_ix.Scenario) -> pd.DataFrame:
def gen_data_steel_ts(
- data_steel_ts: pd.DataFrame, results: Dict[str, list], t: str, nodes: List[str]
+ data_steel_ts: pd.DataFrame, results: dict[str, list], t: str, nodes: list[str]
):
common = dict(
time="year",
@@ -213,11 +213,11 @@ def gen_data_steel_ts(
def get_data_steel_const(
data_steel: pd.DataFrame,
- results: Dict[str, list],
+ results: dict[str, list],
params: Iterable,
t: str,
yv_ya: pd.DataFrame,
- nodes: List[str],
+ nodes: list[str],
global_region: str,
):
for par in params:
diff --git a/message_ix_models/model/material/data_util.py b/message_ix_models/model/material/data_util.py
index 6bf5a8ad89..7b41a402c4 100644
--- a/message_ix_models/model/material/data_util.py
+++ b/message_ix_models/model/material/data_util.py
@@ -1,6 +1,7 @@
import os
+from collections.abc import Mapping
from functools import lru_cache
-from typing import TYPE_CHECKING, Literal, Mapping
+from typing import TYPE_CHECKING, Literal
import ixmp
import message_ix
diff --git a/message_ix_models/model/structure.py b/message_ix_models/model/structure.py
index d694748350..427e5056d5 100644
--- a/message_ix_models/model/structure.py
+++ b/message_ix_models/model/structure.py
@@ -1,10 +1,10 @@
import logging
import re
from collections import ChainMap
+from collections.abc import Mapping, MutableMapping
from copy import copy
from functools import lru_cache
from itertools import product
-from typing import Dict, List, Mapping, MutableMapping, Tuple
import click
import pandas as pd
@@ -20,7 +20,7 @@
@lru_cache()
-def codelists(kind: str) -> List[str]:
+def codelists(kind: str) -> list[str]:
"""Return a valid IDs for code lists of `kind`.
Parameters
@@ -32,7 +32,7 @@ def codelists(kind: str) -> List[str]:
@lru_cache()
-def get_codes(name: str) -> List[Code]:
+def get_codes(name: str) -> list[Code]:
"""Return codes for the dimension/set `name` in MESSAGE-GLOBIOM scenarios.
The information is read from :file:`data/{name}.yaml`, e.g.
@@ -101,7 +101,7 @@ def get_codelist(name: str) -> Codelist:
@lru_cache()
-def get_region_codes(codelist: str) -> List[Code]:
+def get_region_codes(codelist: str) -> list[Code]:
"""Return the codes that are children of "World" in the specified `codelist`."""
nodes = get_codes(f"node/{codelist}")
return nodes[nodes.index(Code(id="World"))].child
@@ -109,7 +109,7 @@ def get_region_codes(codelist: str) -> List[Code]:
def generate_product(
data: Mapping, name: str, template: Code
-) -> Tuple[List[Code], Dict[str, xr.DataArray]]:
+) -> tuple[list[Code], dict[str, xr.DataArray]]:
"""Generates codes using a `template` by Cartesian product along ≥1 dimensions.
:func:`generate_set_elements` is called for each of the `dims`, and these values
diff --git a/message_ix_models/model/transport/data.py b/message_ix_models/model/transport/data.py
index f736a58e4a..2e9a223a29 100644
--- a/message_ix_models/model/transport/data.py
+++ b/message_ix_models/model/transport/data.py
@@ -120,7 +120,7 @@ def dummy_supply(technologies: List["Code"], info, config) -> Dict[str, pd.DataF
merge_data(
result,
make_source_tech(
- info, dict(commodity=c, level=level, technology=t, **common), **values
+ info, dict(commodity=c, level=level, technology=t) | common, **values
),
)
diff --git a/message_ix_models/model/transport/operator.py b/message_ix_models/model/transport/operator.py
index 8b96a8c05c..a2d62c70a7 100644
--- a/message_ix_models/model/transport/operator.py
+++ b/message_ix_models/model/transport/operator.py
@@ -989,8 +989,7 @@ def share_weight(
yC: Dict[Any, Any] = dict(y=cfg.year_convergence)
# Weights in y0 for all modes and nodes
- # NB here and below, with Python 3.9 one could do: dict(t=modes, n=nodes) | y0
- idx = dict(t=t_modes, n=nodes, **y0)
+ idx = dict(t=t_modes, n=nodes) | y0
w0 = share.sel(idx) / (cost.sel(idx).sel(c="transport", drop=True) ** lamda)
# Normalize to 1 across modes
@@ -1005,8 +1004,8 @@ def share_weight(
ref_nodes = cfg.share_weight_convergence[node]
# Indexers
- _1 = dict(n=node, **yC) # Same node, convergence year
- _2 = dict(n=ref_nodes, **y0) # Reference node(s), base year
+ _1 = dict(n=node) | yC # Same node, convergence year
+ _2 = dict(n=ref_nodes) | y0 # Reference node(s), base year
if ref_nodes:
# Ratio between this node's GDP in yC and the mean of the reference nodes'
diff --git a/message_ix_models/model/water/build.py b/message_ix_models/model/water/build.py
index 40978fea1f..c097d4cc75 100644
--- a/message_ix_models/model/water/build.py
+++ b/message_ix_models/model/water/build.py
@@ -1,6 +1,6 @@
import logging
+from collections.abc import Mapping
from functools import lru_cache, partial
-from typing import Mapping
import pandas as pd
from sdmx.model.v21 import Code
diff --git a/message_ix_models/model/water/data/demands.py b/message_ix_models/model/water/data/demands.py
index 912398886b..249f3d314e 100644
--- a/message_ix_models/model/water/data/demands.py
+++ b/message_ix_models/model/water/data/demands.py
@@ -1,7 +1,8 @@
"""Prepare data for adding demands"""
import os
-from typing import TYPE_CHECKING, Literal, Sequence, Union
+from collections.abc import Sequence
+from typing import TYPE_CHECKING, Literal, Union
import numpy as np
import pandas as pd
diff --git a/message_ix_models/model/water/utils.py b/message_ix_models/model/water/utils.py
index c6de5bd31b..aec2d486aa 100644
--- a/message_ix_models/model/water/utils.py
+++ b/message_ix_models/model/water/utils.py
@@ -2,7 +2,7 @@
from collections import defaultdict
from functools import lru_cache
from itertools import product
-from typing import Optional, Tuple
+from typing import Optional
import numpy as np
import pandas as pd
@@ -125,13 +125,13 @@ def func(row: pd.Series):
def map_yv_ya_lt(
- periods: Tuple[int, ...], lt: int, ya: Optional[int] = None
+ periods: tuple[int, ...], lt: int, ya: Optional[int] = None
) -> pd.DataFrame:
"""All meaningful combinations of (vintage year, active year) given `periods`.
Parameters
----------
- periods : Tuple[int, ...]
+ periods : tuple[int, ...]
A sequence of years.
lt : int, lifetime
ya : int, active year
diff --git a/message_ix_models/model/workflow.py b/message_ix_models/model/workflow.py
index 3b9da4b2e4..d5170afe7b 100644
--- a/message_ix_models/model/workflow.py
+++ b/message_ix_models/model/workflow.py
@@ -1,7 +1,7 @@
"""Common steps for workflows."""
from dataclasses import dataclass, field
-from typing import TYPE_CHECKING, Any, Dict, Optional
+from typing import TYPE_CHECKING, Any, Optional
from message_ix import Scenario
@@ -21,7 +21,7 @@ class Config(ConfigHelper):
#: Information on an optional, other scenario from which to copy demand data in
#: :func:`solve` using :func:`transfer_demands`. Default: empty, do nothing.
- demand_scenario: Dict = field(default_factory=dict)
+ demand_scenario: dict = field(default_factory=dict)
#: :obj:`True` to call :func:`.reserve_margin.res_marg.main` in :func:`solve`.
reserve_margin: bool = True
@@ -31,7 +31,7 @@ class Config(ConfigHelper):
#: To replicate the behaviour of the `macro_params` argument to
#: :meth:`.engage.ScenarioRunner.run`, which in turn sets the `convergence_issues`
#: argument to :meth:`.engage.ScenarioRunner.solve`, set max_adjustment to 0.1.
- solve: Dict[str, Any] = field(
+ solve: dict[str, Any] = field(
default_factory=lambda: dict(model="MESSAGE-MACRO", max_adjustment=0.2)
)
diff --git a/message_ix_models/project/edits/__init__.py b/message_ix_models/project/edits/__init__.py
index da9dca5039..1d960ba53f 100644
--- a/message_ix_models/project/edits/__init__.py
+++ b/message_ix_models/project/edits/__init__.py
@@ -1,6 +1,7 @@
import logging
import sys
-from typing import TYPE_CHECKING, Any, Callable, List, Optional
+from collections.abc import Callable
+from typing import TYPE_CHECKING, Any, Optional
import pandas as pd
import sdmx
@@ -126,7 +127,7 @@ def generate_pasta_structures(
def coords_to_codelists(
qty: "AnyQuantity", *, id_transform: Optional[Callable] = str.upper, **kwargs
-) -> List["Codelist"]:
+) -> list["Codelist"]:
"""Convert the coordinates of `qty` to a collection of :class:`.Codelist`.
.. todo:: Move upstream, to :mod:`genno`.
diff --git a/message_ix_models/project/engage/workflow.py b/message_ix_models/project/engage/workflow.py
index be44304c48..bb987f2664 100644
--- a/message_ix_models/project/engage/workflow.py
+++ b/message_ix_models/project/engage/workflow.py
@@ -10,7 +10,7 @@
import logging
from copy import copy, deepcopy
from dataclasses import dataclass, field
-from typing import Dict, List, Literal, Optional, Tuple, Union
+from typing import Literal, Optional, Union
from iam_units import convert_gwp
from message_ix import Scenario
@@ -40,7 +40,7 @@ class PolicyConfig(Config):
label: str = ""
#: Which steps of the ENGAGE workflow to run. Empty list = don't run any steps.
- steps: List[int] = field(default_factory=lambda: [1, 2, 3])
+ steps: list[int] = field(default_factory=lambda: [1, 2, 3])
#: In :func:`step_1`, actual quantity of the carbon budget to be imposed , or the
#: value "calc", in which case the value is calculated from :attr:`label` by
@@ -49,16 +49,16 @@ class PolicyConfig(Config):
#: In :func:`step_3`, optional information on a second scenario from which to copy
#: ``tax_emission`` data.
- tax_emission_scenario: Dict = field(default_factory=dict)
+ tax_emission_scenario: dict = field(default_factory=dict)
#: In :func:`step_3`, emission types or categories (``type_emission``) for which to
#: apply values for ``tax_emission``.
- step_3_type_emission: List[str] = field(default_factory=lambda: ["TCE_non-CO2"])
+ step_3_type_emission: list[str] = field(default_factory=lambda: ["TCE_non-CO2"])
def calc_hist_cum_CO2(
context: Context, scen: Scenario, info: ScenarioInfo
-) -> Tuple[float, float]:
+) -> tuple[float, float]:
"""Calculate historic CO2 emissions.
Adapted from :meth:`.engage.scenario_runner.ScenarioRunner.calc_hist_cum_CO2`, with
diff --git a/message_ix_models/project/gea/data.py b/message_ix_models/project/gea/data.py
index 9a4c671280..6ebba7ac52 100644
--- a/message_ix_models/project/gea/data.py
+++ b/message_ix_models/project/gea/data.py
@@ -2,7 +2,7 @@
import logging
from functools import lru_cache
-from typing import TYPE_CHECKING, Dict, List, Set, Tuple
+from typing import TYPE_CHECKING
import genno
@@ -96,7 +96,7 @@ def transform(self, c: "Computer", base_key: genno.Key) -> genno.Key:
k = super().transform(c, base_key)
# TODO Incorporate the following
- def adapt_nodes(nodes: List["Code"]) -> Dict[str, str]:
+ def adapt_nodes(nodes: list["Code"]) -> dict[str, str]:
"""Convert `nodes` with IDs e.g. “R11_AFR” to a mapping.
From e.g. “AFR” to “R11_AFR”.
@@ -107,7 +107,7 @@ def adapt_nodes(nodes: List["Code"]) -> Dict[str, str]:
@lru_cache
-def get_model_scenario() -> Set[Tuple[str, str]]:
+def get_model_scenario() -> set[tuple[str, str]]:
"""Return a set of valid GEA (model name, scenario name) combinations.
These are read from :file:`data/gea/model-scenario.json`.
diff --git a/message_ix_models/project/navigate/__init__.py b/message_ix_models/project/navigate/__init__.py
index 67ed2a5684..ef2a5d5378 100644
--- a/message_ix_models/project/navigate/__init__.py
+++ b/message_ix_models/project/navigate/__init__.py
@@ -2,12 +2,13 @@
import logging
import operator
+from collections.abc import Generator, Mapping
from copy import deepcopy
from dataclasses import asdict, dataclass, field, replace
from enum import Flag, auto
from functools import lru_cache, reduce
from pathlib import Path
-from typing import Dict, Generator, List, Literal, Mapping, Optional, Union, cast
+from typing import Literal, Optional, Union, cast
import ixmp
import yaml
@@ -76,7 +77,7 @@ def parse(cls, value):
#: In the NAVIGATE workflow, the :attr:`demand_scenario` values (scenario info style,
#: a :class:`dict` of ``model`` name, ``scenario`` name, and optional ``version``) are
#: set in .navigate.workflow.generate().
-CLIMATE_POLICY: Dict[Optional[str], WfConfig] = {
+CLIMATE_POLICY: dict[Optional[str], WfConfig] = {
# Default
None: WfConfig(
reserve_margin=False,
@@ -161,7 +162,7 @@ def parse(cls, value):
}
-def _anno(names: str, climate_policy) -> List[Annotation]:
+def _anno(names: str, climate_policy) -> list[Annotation]:
"""Return the annotations given by `names` from :data:`_A`.
Shorthand function used to prepare :data:`EXTRA_SCENARIOS`.
@@ -201,7 +202,7 @@ def _anno(names: str, climate_policy) -> List[Annotation]:
@lru_cache()
-def _read() -> List[Code]:
+def _read() -> list[Code]:
"""Read the codes from the NAVIGATE workflow directory."""
workflow_dir = Path(ixmp.config.get("navigate workflow dir")).expanduser().resolve()
@@ -212,7 +213,7 @@ def _read() -> List[Code]:
_content = yaml.safe_load(f)
# Transform into a form intelligible by as_codes()
- content: Dict[str, Union[str, Code]] = {}
+ content: dict[str, Union[str, Code]] = {}
for item in _content:
if isinstance(item, str):
content[item] = Code(id=item, name=item)
@@ -265,7 +266,7 @@ class Config:
carbon_tax: float = 1000.0
#: Other scenario from which to copy historical time series data for reporting.
- copy_ts: Dict = field(default_factory=dict)
+ copy_ts: dict = field(default_factory=dict)
#: Target data structure for submission prep
dsd: Literal["iiasa-ece", "navigate"] = "navigate"
diff --git a/message_ix_models/report/__init__.py b/message_ix_models/report/__init__.py
index fd01006481..db6361dd3a 100644
--- a/message_ix_models/report/__init__.py
+++ b/message_ix_models/report/__init__.py
@@ -1,11 +1,12 @@
import logging
+from collections.abc import Callable
from contextlib import nullcontext
from copy import deepcopy
from functools import partial
from importlib import import_module
from operator import itemgetter
from pathlib import Path
-from typing import Callable, List, Optional, Tuple, Union
+from typing import Optional, Union
from warnings import warn
import genno.config
@@ -43,7 +44,7 @@ def _(c: Reporter, info):
#: List of callbacks for preparing the Reporter.
-CALLBACKS: List[Callable] = []
+CALLBACKS: list[Callable] = []
@genno.config.handles("iamc")
@@ -298,7 +299,7 @@ def prepare_reporter(
context: Context,
scenario: Optional[Scenario] = None,
reporter: Optional[Reporter] = None,
-) -> Tuple[Reporter, Key]:
+) -> tuple[Reporter, Key]:
"""Return a :class:`.Reporter` and `key` prepared to report a :class:`.Scenario`.
Parameters
diff --git a/message_ix_models/report/compat.py b/message_ix_models/report/compat.py
index 9e0d198b1b..3f9f31c450 100644
--- a/message_ix_models/report/compat.py
+++ b/message_ix_models/report/compat.py
@@ -1,9 +1,10 @@
"""Compatibility code that emulates legacy reporting."""
import logging
+from collections.abc import Mapping
from functools import partial
from itertools import chain, count
-from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional
+from typing import TYPE_CHECKING, Any, Optional
from genno import Key, Quantity, quote
from genno.core.key import iter_keys, single_key
@@ -65,7 +66,7 @@ def anon(name: Optional[str] = None, dims: Optional[Key] = None) -> Key:
return result.append(*getattr(dims, "dims", []))
-def get_techs(c: "Computer", prefix: str, kinds: Optional[str] = None) -> List[str]:
+def get_techs(c: "Computer", prefix: str, kinds: Optional[str] = None) -> list[str]:
"""Return a list of technologies.
The list is assembled from lists in `c` with keys like "t::{prefix} {kind}",
@@ -88,7 +89,7 @@ def make_shorthand_function(
def func(
c: "Computer",
- technologies: List[str],
+ technologies: list[str],
*,
name: Optional[str] = None,
filters: Optional[dict] = None,
@@ -143,7 +144,7 @@ def func(
def eff(
c: "Computer",
- technologies: List[str],
+ technologies: list[str],
filters_in: Optional[dict] = None,
filters_out: Optional[dict] = None,
) -> Key:
@@ -203,7 +204,7 @@ def pe_w_ccs_retro(
return k6
-def prepare_techs(c: "Computer", technologies: List["Code"]) -> None:
+def prepare_techs(c: "Computer", technologies: list["Code"]) -> None:
"""Prepare sets of technologies in `c`.
For each `key` → `expr` in :data:`TECH_FILTERS` and each technology :class:`Code`
@@ -214,14 +215,14 @@ def prepare_techs(c: "Computer", technologies: List["Code"]) -> None:
These lists of technologies can be used directly or retrieve with :func:`get_techs`.
"""
- result: Mapping[str, List[str]] = {k: list() for k in TECH_FILTERS}
+ result: Mapping[str, list[str]] = {k: list() for k in TECH_FILTERS}
warned = set() # Filters that raise some kind of Exception
# Iterate over technologies
for t in technologies:
# Assemble information about `t` from its annotations
- info: Dict[str, Any] = dict(id=t.id)
+ info: dict[str, Any] = dict(id=t.id)
# Sector
info["sector"] = str(t.get_annotation(id="sector").text)
try:
diff --git a/message_ix_models/report/config.py b/message_ix_models/report/config.py
index 46eec60601..60385dc2fc 100644
--- a/message_ix_models/report/config.py
+++ b/message_ix_models/report/config.py
@@ -1,7 +1,7 @@
import logging
from dataclasses import InitVar, dataclass, field
from pathlib import Path
-from typing import TYPE_CHECKING, Dict, Optional, Union
+from typing import TYPE_CHECKING, Optional, Union
from message_ix_models.util import local_data_path, package_data_path
from message_ix_models.util.config import ConfigHelper
@@ -30,7 +30,7 @@ class Config(ConfigHelper):
cli_output: Optional[Path] = None
#: Configuration to be handled by :mod:`genno.config`.
- genno_config: Dict = field(default_factory=dict)
+ genno_config: dict = field(default_factory=dict)
#: Key for the Quantity or computation to report.
key: Optional["KeyLike"] = None
@@ -46,7 +46,7 @@ class Config(ConfigHelper):
#: Keyword arguments for :func:`.report.legacy.iamc_report_hackathon.report`, plus
#: the key "use", which should be :any:`True` if legacy reporting is to be used.
- legacy: Dict = field(default_factory=lambda: dict(use=False, merge_hist=True))
+ legacy: dict = field(default_factory=lambda: dict(use=False, merge_hist=True))
def __post_init__(self, from_file, _legacy) -> None:
self.use_file(from_file)
diff --git a/message_ix_models/report/operator.py b/message_ix_models/report/operator.py
index bc2a0b416f..ae8c4cc40e 100644
--- a/message_ix_models/report/operator.py
+++ b/message_ix_models/report/operator.py
@@ -3,7 +3,8 @@
import itertools
import logging
import re
-from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Set, Tuple, Union
+from collections.abc import Mapping
+from typing import TYPE_CHECKING, Any, Optional, Union
import ixmp
import pandas as pd
@@ -42,8 +43,8 @@
def codelist_to_groups(
- codes: List["Code"], dim: str = "n"
-) -> Mapping[str, Mapping[str, List[str]]]:
+ codes: list["Code"], dim: str = "n"
+) -> Mapping[str, Mapping[str, list[str]]]:
"""Convert `codes` into a mapping from parent items to their children.
The returned value is suitable for use with :func:`genno.operator.aggregate`.
@@ -87,7 +88,7 @@ def exogenous_data():
@exogenous_data.helper
def add_exogenous_data(
func, c: "Computer", *, context=None, source=None, source_kw=None
-) -> Tuple["Key", ...]:
+) -> tuple["Key", ...]:
"""Prepare `c` to compute exogenous data from `source`."""
from message_ix_models.tools.exo_data import prepare_computer
@@ -165,7 +166,7 @@ def make_output_path(config: Mapping, name: Union[str, "Path"]) -> "Path":
return config["output_dir"].joinpath(name)
-def model_periods(y: List[int], cat_year: pd.DataFrame) -> List[int]:
+def model_periods(y: list[int], cat_year: pd.DataFrame) -> list[int]:
"""Return the elements of `y` beyond the firstmodelyear of `cat_year`.
.. todo:: Move upstream, to :mod:`message_ix`.
@@ -213,7 +214,7 @@ def remove_ts(
# Non-weak references to objects to keep them alive
-_FROM_URL_REF: Set[Any] = set()
+_FROM_URL_REF: set[Any] = set()
def from_url(url: str, cls=ixmp.TimeSeries) -> ixmp.TimeSeries:
diff --git a/message_ix_models/report/plot.py b/message_ix_models/report/plot.py
index 51d405fb17..5ae08cff68 100644
--- a/message_ix_models/report/plot.py
+++ b/message_ix_models/report/plot.py
@@ -6,8 +6,9 @@
import logging
import re
+from collections.abc import Sequence
from datetime import datetime
-from typing import TYPE_CHECKING, List, Optional, Sequence
+from typing import TYPE_CHECKING, Optional
import genno.compat.plotnine
import pandas as pd
@@ -52,7 +53,7 @@ class MyPlot(Plot):
"""
#: 'Static' geoms: list of plotnine objects that are not dynamic.
- static: List["plotnine.typing.PlotAddable"] = [
+ static: list["plotnine.typing.PlotAddable"] = [
p9.theme(figure_size=(23.4, 16.5)), # A3 paper in landscape [inches]
# p9.theme(figure_size=(11.7, 8.3)), # A4 paper in landscape
]
@@ -73,7 +74,7 @@ class MyPlot(Plot):
#: List of regular expressions corresponding to :attr:`inputs`. These are passed as
#: the `expr` argument to :func:`.filter_ts` to filter the entire set of time series
#: data.
- inputs_regex: List[re.Pattern] = []
+ inputs_regex: list[re.Pattern] = []
@classmethod
def add_tasks(
diff --git a/message_ix_models/report/sim.py b/message_ix_models/report/sim.py
index 087362cef6..439e83f836 100644
--- a/message_ix_models/report/sim.py
+++ b/message_ix_models/report/sim.py
@@ -2,11 +2,11 @@
import logging
from collections import ChainMap, defaultdict
-from collections.abc import Mapping
+from collections.abc import Mapping, Sequence
from copy import deepcopy
from functools import lru_cache
from pathlib import Path
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
+from typing import TYPE_CHECKING, Any, Optional, Union
import pandas as pd
from dask.core import quote
@@ -32,7 +32,7 @@
log = logging.getLogger(__name__)
-def dims_of(info: "Item") -> Dict[str, str]:
+def dims_of(info: "Item") -> dict[str, str]:
"""Return a mapping from the full index names to short dimension IDs of `info`."""
return {d: rename_dims().get(d, d) for d in (info.dims or info.coords or [])}
@@ -53,7 +53,7 @@ def to_simulate():
def simulate_qty(
- name: str, dims: List[str], item_data: Union[dict, pd.DataFrame]
+ name: str, dims: list[str], item_data: Union[dict, pd.DataFrame]
) -> Quantity:
"""Return simulated data for item `name`.
@@ -71,7 +71,7 @@ def simulate_qty(
data: Mapping = ChainMap(item_data, defaultdict(lambda: None))
# Arguments for pd.DataFrame constructor
- args: Dict[str, Any] = dict(data={})
+ args: dict[str, Any] = dict(data={})
# Flag if all values in `data` are scalars
all_scalar = True
@@ -141,7 +141,7 @@ def data_from_file(path: Path, *, name: str, dims: Sequence[str]) -> Quantity:
def add_simulated_solution(
rep: Reporter,
info: ScenarioInfo,
- data: Optional[Dict] = None,
+ data: Optional[dict] = None,
path: Optional[Path] = None,
):
"""Add a simulated model solution to `rep`.
diff --git a/message_ix_models/report/util.py b/message_ix_models/report/util.py
index 0826822679..99494c5a48 100644
--- a/message_ix_models/report/util.py
+++ b/message_ix_models/report/util.py
@@ -1,5 +1,6 @@
import logging
-from typing import Dict, Iterable, Optional, Union
+from collections.abc import Iterable
+from typing import Optional, Union
import pandas as pd
from dask.core import quote
@@ -19,7 +20,7 @@
#:
#: - Applied to whole strings along each dimension.
#: - These columns have :meth:`str.title` applied before these replacements.
-REPLACE_DIMS: Dict[str, Dict[str, str]] = {
+REPLACE_DIMS: dict[str, dict[str, str]] = {
"c": {
# in land_out, for CH4 emissions from GLOBIOM
"Agri_Ch4": "GLOBIOM|Emissions|CH4 Emissions Total",
diff --git a/message_ix_models/testing/__init__.py b/message_ix_models/testing/__init__.py
index 216dee7fda..653c507ad5 100644
--- a/message_ix_models/testing/__init__.py
+++ b/message_ix_models/testing/__init__.py
@@ -2,11 +2,11 @@
import os
import shutil
from base64 import b32hexencode
+from collections.abc import Generator
from copy import deepcopy
from pathlib import Path
from random import randbytes
from tempfile import TemporaryDirectory
-from typing import Generator
import message_ix
import pandas as pd
diff --git a/message_ix_models/tests/model/transport/test_ldv.py b/message_ix_models/tests/model/transport/test_ldv.py
index 578f63406b..bf1918fb5f 100644
--- a/message_ix_models/tests/model/transport/test_ldv.py
+++ b/message_ix_models/tests/model/transport/test_ldv.py
@@ -1,6 +1,6 @@
import logging
+from collections.abc import Mapping
from itertools import product
-from typing import List, Mapping, Tuple
import pandas as pd
import pytest
@@ -115,7 +115,7 @@ def include(arg):
# Information about returned parameters
# TODO Include unit checks, above, in this collection
- par_info: Mapping[str, Tuple[bool, List[int], int]] = {
+ par_info: Mapping[str, tuple[bool, list[int], int]] = {
"bound_new_capacity_lo": (False, [info.y0], 1),
"bound_new_capacity_up": (False, info.Y, 1),
"emission_factor": (True, None, None),
diff --git a/message_ix_models/tests/util/test_config.py b/message_ix_models/tests/util/test_config.py
index 409d5c1ddc..0d77cf21fb 100644
--- a/message_ix_models/tests/util/test_config.py
+++ b/message_ix_models/tests/util/test_config.py
@@ -1,5 +1,4 @@
from dataclasses import dataclass, field
-from typing import Type
import pytest
@@ -8,7 +7,7 @@
class TestConfigHelper:
@pytest.fixture
- def cls(self) -> Type:
+ def cls(self) -> type:
"""A class which inherits from ConfigHelper."""
@dataclass
@@ -20,7 +19,7 @@ class Config(ConfigHelper):
return Config
@pytest.fixture
- def cls2(self, cls) -> Type:
+ def cls2(self, cls) -> type:
"""A class with an attribute."""
@dataclass
diff --git a/message_ix_models/tests/util/test_sdmx.py b/message_ix_models/tests/util/test_sdmx.py
index febea39234..56c950e621 100644
--- a/message_ix_models/tests/util/test_sdmx.py
+++ b/message_ix_models/tests/util/test_sdmx.py
@@ -15,8 +15,9 @@ def test_eval_anno(caplog, recwarn):
c.annotations.append(Annotation(id="foo", text="bar baz"))
- with caplog.at_level(logging.DEBUG, logger="message_ix_models"), pytest.warns(
- DeprecationWarning
+ with (
+ caplog.at_level(logging.DEBUG, logger="message_ix_models"),
+ pytest.warns(DeprecationWarning),
):
assert "bar baz" == eval_anno(c, "foo")
diff --git a/message_ix_models/tools/costs/config.py b/message_ix_models/tools/costs/config.py
index d4023469a3..49bf4675dc 100644
--- a/message_ix_models/tools/costs/config.py
+++ b/message_ix_models/tools/costs/config.py
@@ -1,5 +1,5 @@
from dataclasses import dataclass, field
-from typing import List, Literal, Optional
+from typing import Literal, Optional
from message_ix_models import ScenarioInfo
@@ -99,12 +99,12 @@ def y0(self) -> int:
return self._info.y0
@property
- def Y(self) -> List[int]:
+ def Y(self) -> list[int]:
"""List of model periods."""
return self._info.Y
@property
- def seq_years(self) -> List[int]:
+ def seq_years(self) -> list[int]:
"""Similar to :attr:`Y`.
This list of periods differs in that it:
diff --git a/message_ix_models/tools/costs/projections.py b/message_ix_models/tools/costs/projections.py
index b9172a1f84..2462868d15 100644
--- a/message_ix_models/tools/costs/projections.py
+++ b/message_ix_models/tools/costs/projections.py
@@ -1,6 +1,6 @@
import logging
+from collections.abc import Mapping
from itertools import product
-from typing import Mapping, Tuple
import numpy as np
import pandas as pd
@@ -335,7 +335,7 @@ def _predict(df: pd.DataFrame) -> pd.Series:
def create_message_outputs(
df_projections: pd.DataFrame, config: "Config"
-) -> Tuple[pd.DataFrame, pd.DataFrame]:
+) -> tuple[pd.DataFrame, pd.DataFrame]:
"""Create MESSAGEix outputs for investment and fixed costs.
The returned data have the model periods given by :attr:`.Config.Y`.
@@ -522,7 +522,7 @@ def _compute_value(df: pd.DataFrame) -> pd.Series:
def create_iamc_outputs(
msg_inv: pd.DataFrame, msg_fix: pd.DataFrame
-) -> Tuple[pd.DataFrame, pd.DataFrame]:
+) -> tuple[pd.DataFrame, pd.DataFrame]:
"""Create IAMC outputs for investment and fixed costs.
Parameters
diff --git a/message_ix_models/tools/costs/regional_differentiation.py b/message_ix_models/tools/costs/regional_differentiation.py
index da071ab32e..8957475d96 100644
--- a/message_ix_models/tools/costs/regional_differentiation.py
+++ b/message_ix_models/tools/costs/regional_differentiation.py
@@ -1,7 +1,8 @@
import logging
+from collections.abc import Mapping
from functools import lru_cache
from itertools import product
-from typing import Literal, Mapping
+from typing import Literal
import numpy as np
import pandas as pd
diff --git a/message_ix_models/tools/exo_data.py b/message_ix_models/tools/exo_data.py
index 718f3ae836..ece3fc62ee 100644
--- a/message_ix_models/tools/exo_data.py
+++ b/message_ix_models/tools/exo_data.py
@@ -2,10 +2,11 @@
import logging
from abc import ABC, abstractmethod
+from collections.abc import Mapping
from copy import deepcopy
from operator import itemgetter
from pathlib import Path
-from typing import Any, Dict, List, Literal, Mapping, Optional, Tuple, Type
+from typing import Any, Literal, Optional
from genno import Computer, Key, Quantity, quote
@@ -32,7 +33,7 @@
MEASURES = ("GDP", "POP")
#: Known sources for data. Use :func:`register_source` to add to this collection.
-SOURCES: Dict[str, Type["ExoDataSource"]] = {}
+SOURCES: dict[str, type["ExoDataSource"]] = {}
class ExoDataSource(ABC):
@@ -50,7 +51,7 @@ class ExoDataSource(ABC):
#: Optional additional dimensions for the returned :class:`.Key`/:class:`.Quantity`.
#: If not set by :meth:`.__init__`, the dimensions are :math:`(n, y)`.
- extra_dims: Tuple[str, ...] = ()
+ extra_dims: tuple[str, ...] = ()
#: :any:`True` if :meth:`.transform` should aggregate data on the |n| dimension.
aggregate: bool = True
@@ -96,7 +97,7 @@ def __call__(self) -> Quantity:
"""
raise NotImplementedError
- def get_keys(self) -> Tuple[Key, Key]:
+ def get_keys(self) -> tuple[Key, Key]:
"""Return the target keys for the (1) raw and (2) transformed data.
Subclasses **may** override this method to provide different targets keys. In
@@ -172,7 +173,7 @@ def prepare_computer(
source_kw: Optional[Mapping] = None,
*,
strict: bool = True,
-) -> Tuple[Key, ...]:
+) -> tuple[Key, ...]:
"""Prepare `c` to compute GDP, population, or other exogenous data.
Check each :class:`ExoDataSource` in :data:`SOURCES` to determine whether it
@@ -279,7 +280,7 @@ def prepare_computer(
return tuple(keys)
-def register_source(cls: Type[ExoDataSource]) -> Type[ExoDataSource]:
+def register_source(cls: type[ExoDataSource]) -> type[ExoDataSource]:
"""Register :class:`.ExoDataSource` `cls` as a source of exogenous data."""
if cls.id in SOURCES:
raise ValueError(f"{SOURCES[cls.id]} already registered for id {cls.id!r}")
@@ -342,7 +343,7 @@ def iamc_like_data_for_query(
query: str,
*,
archive_member: Optional[str] = None,
- drop: Optional[List[str]] = None,
+ drop: Optional[list[str]] = None,
non_iso_3166: Literal["keep", "discard"] = "discard",
replace: Optional[dict] = None,
unique: str = "MODEL SCENARIO VARIABLE UNIT",
diff --git a/message_ix_models/tools/iea/web.py b/message_ix_models/tools/iea/web.py
index a6092e84ae..64e77bdd76 100644
--- a/message_ix_models/tools/iea/web.py
+++ b/message_ix_models/tools/iea/web.py
@@ -2,9 +2,10 @@
import logging
import zipfile
+from collections.abc import Iterable
from copy import copy
from pathlib import Path
-from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional
+from typing import TYPE_CHECKING, Any, Optional
import pandas as pd
from genno import Quantity
@@ -198,7 +199,7 @@ def iea_web_data_for_query(
if path.suffix == ".TXT": # pragma: no cover
names_to_read.append(fwf_to_csv(path, progress=True))
- args: Dict[str, Any] = dict(header=None, names=DIMS + ["Value"])
+ args: dict[str, Any] = dict(header=None, names=DIMS + ["Value"])
else:
names_to_read.append(path)
args = dict(header=0, usecols=DIMS + ["Value"])
diff --git a/message_ix_models/tools/wb.py b/message_ix_models/tools/wb.py
index 2873b26f5d..2fea967269 100644
--- a/message_ix_models/tools/wb.py
+++ b/message_ix_models/tools/wb.py
@@ -2,8 +2,9 @@
import logging
from collections import defaultdict
+from collections.abc import MutableMapping
from functools import lru_cache
-from typing import TYPE_CHECKING, Dict, MutableMapping, Optional
+from typing import TYPE_CHECKING, Optional
import pandas as pd
@@ -18,7 +19,7 @@ def assign_income_groups( # noqa: C901
cl_node: "sdmx.model.common.Codelist",
cl_income_group: "sdmx.model.common.Codelist",
method: str = "population",
- replace: Optional[Dict[str, str]] = None,
+ replace: Optional[dict[str, str]] = None,
) -> None:
"""Annotate `cl_node` with income groups.
@@ -283,8 +284,8 @@ def urn_for(name: str) -> str:
def make_map(
- source: Dict[str, str], expand_key_urn: bool = True, expand_value_urn: bool = False
-) -> Dict[str, str]:
+ source: dict[str, str], expand_key_urn: bool = True, expand_value_urn: bool = False
+) -> dict[str, str]:
"""Prepare the :py:`replace` parameter of :func:`assign_income_groups`.
The result has one (`key`, `value`) for each in `source`.
diff --git a/message_ix_models/util/__init__.py b/message_ix_models/util/__init__.py
index 90263124e9..f0382a929c 100644
--- a/message_ix_models/util/__init__.py
+++ b/message_ix_models/util/__init__.py
@@ -1,23 +1,12 @@
import logging
from collections import ChainMap, defaultdict
+from collections.abc import Callable, Collection, Mapping, MutableMapping, Sequence
from datetime import datetime
from functools import partial, update_wrapper
from importlib.metadata import version
from itertools import count
from pathlib import Path
-from typing import (
- TYPE_CHECKING,
- Callable,
- Collection,
- Dict,
- List,
- Mapping,
- MutableMapping,
- Optional,
- Protocol,
- Sequence,
- Union,
-)
+from typing import TYPE_CHECKING, Optional, Protocol, Union
import message_ix
import pandas as pd
@@ -425,7 +414,7 @@ def make_io(src, dest, efficiency, on="input", **kwargs):
def make_matched_dfs(
base: Union[MutableMapping, pd.DataFrame], **par_value: Union[float, pint.Quantity]
-) -> Dict[str, pd.DataFrame]:
+) -> dict[str, pd.DataFrame]:
"""Return data frames derived from `base` for multiple parameters.
Creates one data frame per keyword argument.
@@ -471,7 +460,7 @@ def make_matched_dfs(
def make_source_tech(
info: Union[message_ix.Scenario, ScenarioInfo], common, **values
-) -> Dict[str, pd.DataFrame]:
+) -> dict[str, pd.DataFrame]:
"""Return parameter data for a ‘source’ technology.
The technology has no inputs; its output commodity and/or level are determined by
@@ -612,7 +601,7 @@ def marker(test_func):
def path_fallback(
*parts: Union[str, Path],
- where: Union[str, List[Union[str, Path]]] = "",
+ where: Union[str, list[Union[str, Path]]] = "",
) -> Path:
"""Locate a path constructed from `parts` found in the first of several directories.
@@ -783,7 +772,7 @@ def strip_par_data( # noqa: C901
set_name: str,
element: str,
dry_run: bool = False,
- dump: Optional[Dict[str, pd.DataFrame]] = None,
+ dump: Optional[dict[str, pd.DataFrame]] = None,
) -> int:
"""Remove `element` from `set_name` in scenario, optionally dumping to `dump`.
diff --git a/message_ix_models/util/_convert_units.py b/message_ix_models/util/_convert_units.py
index 815e97b464..fba96366c8 100644
--- a/message_ix_models/util/_convert_units.py
+++ b/message_ix_models/util/_convert_units.py
@@ -1,4 +1,5 @@
-from typing import Mapping, Optional, Tuple
+from collections.abc import Mapping
+from typing import Optional
from warnings import catch_warnings, filterwarnings
import pandas as pd
@@ -25,7 +26,7 @@ def series_of_pint_quantity(*args, **kwargs) -> pd.Series:
def convert_units(
s: pd.Series,
- unit_info: Mapping[str, Tuple[float, str, Optional[str]]],
+ unit_info: Mapping[str, tuple[float, str, Optional[str]]],
store="magnitude",
) -> pd.Series:
"""Convert units of `s`, for use with :meth:`~pandas.DataFrame.apply`.
diff --git a/message_ix_models/util/_logging.py b/message_ix_models/util/_logging.py
index 49f5a2cf6e..20543b8725 100644
--- a/message_ix_models/util/_logging.py
+++ b/message_ix_models/util/_logging.py
@@ -11,7 +11,7 @@
from datetime import datetime, timedelta, timezone
from queue import SimpleQueue
from time import process_time
-from typing import Dict, Optional, Union, cast
+from typing import Optional, Union, cast
from warnings import warn
# NB mark_time, preserve_log_level, and silence_log are exposed by util/__init__.py
@@ -26,7 +26,7 @@
log = logging.getLogger(__name__)
# References to handlers
-_HANDLER: Dict[str, logging.Handler] = dict()
+_HANDLER: dict[str, logging.Handler] = dict()
# For mark_time()
_TIMES = []
diff --git a/message_ix_models/util/cache.py b/message_ix_models/util/cache.py
index 5509d2350c..0b35d74244 100644
--- a/message_ix_models/util/cache.py
+++ b/message_ix_models/util/cache.py
@@ -12,8 +12,9 @@
import json
import logging
+from collections.abc import Callable
from dataclasses import asdict, is_dataclass
-from typing import TYPE_CHECKING, Callable, Set
+from typing import TYPE_CHECKING
import genno.caching
import ixmp
@@ -34,7 +35,7 @@
SKIP_CACHE = False
# Paths already logged, to decrease verbosity
-PATHS_SEEN: Set["Path"] = set()
+PATHS_SEEN: set["Path"] = set()
# Show genno how to hash function arguments seen in message_ix_models
diff --git a/message_ix_models/util/click.py b/message_ix_models/util/click.py
index c6194517c7..849d9da3d9 100644
--- a/message_ix_models/util/click.py
+++ b/message_ix_models/util/click.py
@@ -5,11 +5,12 @@
import logging
import sys
+from collections.abc import Callable, Mapping
from contextlib import contextmanager
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
-from typing import Callable, List, Literal, Mapping, Optional, Union, cast
+from typing import Literal, Optional, Union, cast
import click
import click.testing
@@ -230,9 +231,9 @@ def temporary_command(group: "click.Group", command: "click.Command"):
def urls_from_file(
context: Union[click.Context, Context], param, value
-) -> List[ScenarioInfo]:
+) -> list[ScenarioInfo]:
"""Callback to parse scenario URLs from `value`."""
- si: List[ScenarioInfo] = []
+ si: list[ScenarioInfo] = []
if value is None:
return si
@@ -416,7 +417,7 @@ def invoke_subprocess(self, *args, **kwargs) -> click.testing.Result:
import subprocess
assert 1 == len(args)
- all_args: List[str] = [sys.executable, "-m", self.cli_module, *args[0]]
+ all_args: list[str] = [sys.executable, "-m", self.cli_module, *args[0]]
# Run; capture in a subprocess.CompletedProcess
cp = subprocess.run(all_args, capture_output=True, env=self.env, **kwargs)
diff --git a/message_ix_models/util/common.py b/message_ix_models/util/common.py
index 0d0275be21..fe4d501ec4 100644
--- a/message_ix_models/util/common.py
+++ b/message_ix_models/util/common.py
@@ -1,7 +1,8 @@
import logging
from abc import abstractmethod
+from collections.abc import Mapping, Sequence
from pathlib import Path
-from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, cast
+from typing import Any, Optional, cast
import pandas as pd
from genno import Quantity
@@ -23,10 +24,10 @@
MESSAGE_MODELS_PATH = Path(__file__).parents[1]
#: Package data already loaded with :func:`load_package_data`.
-PACKAGE_DATA: Dict[str, Any] = dict()
+PACKAGE_DATA: dict[str, Any] = dict()
#: Data already loaded with :func:`load_private_data`.
-PRIVATE_DATA: Dict[str, Any] = dict()
+PRIVATE_DATA: dict[str, Any] = dict()
__all__ = [
@@ -106,7 +107,7 @@ class MappingAdapter(Adapter):
maps: Mapping
- def __init__(self, maps: Mapping[str, Sequence[Tuple[str, str]]]):
+ def __init__(self, maps: Mapping[str, Sequence[tuple[str, str]]]):
self.maps = maps
def adapt(self, qty: Quantity) -> Quantity:
@@ -126,7 +127,7 @@ def adapt(self, qty: Quantity) -> Quantity:
def _load(
- var: Dict, base_path: Path, *parts: str, default_suffix: Optional[str] = None
+ var: dict, base_path: Path, *parts: str, default_suffix: Optional[str] = None
) -> Any:
"""Helper for :func:`.load_package_data` and :func:`.load_private_data`."""
key = " ".join(parts)
diff --git a/message_ix_models/util/compat/message_data/utilities.py b/message_ix_models/util/compat/message_data/utilities.py
index d2fd07144e..f5a6274597 100644
--- a/message_ix_models/util/compat/message_data/utilities.py
+++ b/message_ix_models/util/compat/message_data/utilities.py
@@ -17,7 +17,7 @@
def closest(List, K):
"""Finds the member of a list closest to a value (k)"""
- return List[min(range(len(List)), key=lambda i: abs(List[i] - K))]
+ return list[min(range(len(List)), key=lambda i: abs(list[i] - K))]
def f_index(df1, df2):
@@ -46,7 +46,7 @@ def idx_memb(List, x, distance):
"""Retrurns the member of the list with distance from x"""
if List.index(x) + distance < len(List):
- return List[List.index(x) + distance]
+ return list[List.index(x) + distance]
else:
return False
diff --git a/message_ix_models/util/config.py b/message_ix_models/util/config.py
index c76919385c..4dabdb78b3 100644
--- a/message_ix_models/util/config.py
+++ b/message_ix_models/util/config.py
@@ -1,10 +1,11 @@
import logging
import os
import pickle
+from collections.abc import Mapping, MutableMapping, Sequence
from dataclasses import asdict, dataclass, field, fields, is_dataclass, replace
from hashlib import blake2s
from pathlib import Path
-from typing import Any, Hashable, List, Mapping, MutableMapping, Optional, Sequence, Set
+from typing import Any, Hashable, Optional
import ixmp
@@ -45,7 +46,7 @@ class ConfigHelper:
"""
@classmethod
- def _fields(cls) -> Set[str]:
+ def _fields(cls) -> set[str]:
"""Names of fields in `cls`."""
result = set(dir(cls))
if is_dataclass(cls):
@@ -175,7 +176,7 @@ class Config:
scenario_info: MutableMapping[str, str] = field(default_factory=dict)
#: Like `scenario_info`, but a list for operations affecting multiple scenarios.
- scenarios: List[ScenarioInfo] = field(default_factory=list)
+ scenarios: list[ScenarioInfo] = field(default_factory=list)
#: Like :attr:`platform_info`, used by e.g. :meth:`.clone_to_dest`.
dest_platform: MutableMapping[str, str] = field(default_factory=dict)
diff --git a/message_ix_models/util/context.py b/message_ix_models/util/context.py
index d384cba4f1..69e5e871dd 100644
--- a/message_ix_models/util/context.py
+++ b/message_ix_models/util/context.py
@@ -4,7 +4,6 @@
from copy import deepcopy
from dataclasses import fields
from pathlib import Path
-from typing import List
import ixmp
import message_ix
@@ -16,7 +15,7 @@
log = logging.getLogger(__name__)
#: List of Context instances, from first created to last.
-_CONTEXTS: List["Context"] = []
+_CONTEXTS: list["Context"] = []
# Configuration keys which can be accessed directly on Context.
diff --git a/message_ix_models/util/ixmp.py b/message_ix_models/util/ixmp.py
index ffb5d4628c..aaa9407b07 100644
--- a/message_ix_models/util/ixmp.py
+++ b/message_ix_models/util/ixmp.py
@@ -1,5 +1,3 @@
-from typing import Dict
-
try:
# ixmp 3.8.0 and later
from ixmp.report.util import get_reversed_rename_dims
@@ -38,7 +36,7 @@ def discard_on_error(*args):
]
-def rename_dims() -> Dict[str, str]:
+def rename_dims() -> dict[str, str]:
"""Access :data:`.ixmp.report.common.RENAME_DIMS`.
This provides backwards-compatibility with ixmp versions 3.7.0 and earlier. It can
diff --git a/message_ix_models/util/node.py b/message_ix_models/util/node.py
index 4845a32905..90dc363207 100644
--- a/message_ix_models/util/node.py
+++ b/message_ix_models/util/node.py
@@ -1,7 +1,8 @@
"""Utilities for nodes."""
import logging
-from typing import List, Sequence, Union
+from collections.abc import Sequence
+from typing import Union
from message_ix import Scenario
from sdmx.model.v21 import Code
@@ -124,7 +125,7 @@ def identify_nodes(scenario: Scenario) -> str:
return id
-def nodes_ex_world(nodes: Sequence[Union[str, Code]]) -> List[Union[str, Code]]:
+def nodes_ex_world(nodes: Sequence[Union[str, Code]]) -> list[Union[str, Code]]:
"""Exclude "World" and anything containing "GLB" from `nodes`.
May also be used as a genno (reporting) operator.
diff --git a/message_ix_models/util/pooch.py b/message_ix_models/util/pooch.py
index 38e3de0cdf..7215635cc5 100644
--- a/message_ix_models/util/pooch.py
+++ b/message_ix_models/util/pooch.py
@@ -1,8 +1,9 @@
"""Utilities for using :doc:`Pooch `."""
import logging
+from collections.abc import Mapping
from pathlib import Path
-from typing import Any, Mapping, Optional, Tuple
+from typing import Any, Optional
import click
import pooch
@@ -152,7 +153,7 @@ def __call__(self, fname, action, pooch):
def fetch(
pooch_args: dict, *, extra_cache_path: Optional[str] = None, **fetch_kwargs
-) -> Tuple[Path, ...]:
+) -> tuple[Path, ...]:
"""Create a :class:`~pooch.Pooch` instance and fetch a single file.
Files are stored under the directory identified by :meth:`.Context.get_cache_path`,
diff --git a/message_ix_models/util/scenarioinfo.py b/message_ix_models/util/scenarioinfo.py
index 81c21250dd..e737a1a678 100644
--- a/message_ix_models/util/scenarioinfo.py
+++ b/message_ix_models/util/scenarioinfo.py
@@ -5,7 +5,7 @@
from collections import defaultdict
from dataclasses import InitVar, dataclass, field
from itertools import product
-from typing import TYPE_CHECKING, Dict, List, Optional
+from typing import TYPE_CHECKING, Optional
import pandas as pd
import pint
@@ -79,10 +79,10 @@ class ScenarioInfo:
version: Optional[int] = None
#: Elements of :mod:`ixmp`/:mod:`message_ix` sets.
- set: Dict[str, List] = field(default_factory=lambda: defaultdict(list))
+ set: dict[str, list] = field(default_factory=lambda: defaultdict(list))
#: Elements of :mod:`ixmp`/:mod:`message_ix` parameters.
- par: Dict[str, pd.DataFrame] = field(default_factory=dict)
+ par: dict[str, pd.DataFrame] = field(default_factory=dict)
#: First model year, if set, else ``Y[0]``.
y0: int = -1
@@ -161,7 +161,7 @@ def N(self):
return list(map(str, self.set["node"]))
@property
- def Y(self) -> List[int]:
+ def Y(self) -> list[int]:
"""Elements of the set 'year' that are >= the first model year."""
return list(filter(lambda y: y >= self.y0, self.set["year"]))
@@ -280,7 +280,7 @@ def io_units(
)
return c / t
- def year_from_codes(self, codes: List[sdmx_model.Code]):
+ def year_from_codes(self, codes: list[sdmx_model.Code]):
"""Update using a list of `codes`.
The following are updated:
@@ -326,7 +326,7 @@ def year_from_codes(self, codes: List[sdmx_model.Code]):
log.debug("Discard existing 'duration_period' elements")
fmy_set = False
- duration_period: List[Dict] = []
+ duration_period: list[dict] = []
# TODO use sorted() here once supported by sdmx
for code in codes:
diff --git a/message_ix_models/util/sdmx.py b/message_ix_models/util/sdmx.py
index acfbb84538..53aeb18148 100644
--- a/message_ix_models/util/sdmx.py
+++ b/message_ix_models/util/sdmx.py
@@ -1,11 +1,12 @@
"""Utilities for handling objects from :mod:`sdmx`."""
import logging
+from collections.abc import Mapping
from datetime import datetime
from enum import Enum, Flag
from importlib.metadata import version
from pathlib import Path
-from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Union
+from typing import TYPE_CHECKING, Optional, Union
from warnings import warn
import sdmx
@@ -27,8 +28,8 @@
# FIXME Reduce complexity from 13 → ≤11
def as_codes( # noqa: C901
- data: Union[List[str], Dict[str, CodeLike]],
-) -> List[Code]:
+ data: Union[list[str], dict[str, CodeLike]],
+) -> list[Code]:
"""Convert `data` to a :class:`list` of :class:`.Code` objects.
Various inputs are accepted:
@@ -38,7 +39,7 @@ def as_codes( # noqa: C901
further :class:`dict` with keys matching other Code attributes.
"""
# Assemble results as a dictionary
- result: Dict[str, Code] = {}
+ result: dict[str, Code] = {}
if isinstance(data, list):
# FIXME typing ignored temporarily for PR#9
diff --git a/message_ix_models/workflow.py b/message_ix_models/workflow.py
index a884cec9a3..b7743e4a4b 100644
--- a/message_ix_models/workflow.py
+++ b/message_ix_models/workflow.py
@@ -2,16 +2,8 @@
import logging
import re
-from typing import (
- TYPE_CHECKING,
- Callable,
- List,
- Literal,
- Mapping,
- Optional,
- Tuple,
- Union,
-)
+from collections.abc import Callable, Mapping
+from typing import TYPE_CHECKING, Literal, Optional, Union
from genno import Computer
from message_ix import Scenario
@@ -207,7 +199,7 @@ def add_step(
# Add to the Computer; return the name of the added step
return str(self.add_single(name, step, "context", base, strict=True))
- def run(self, name_or_names: Union[str, List[str]]):
+ def run(self, name_or_names: Union[str, list[str]]):
"""Run all workflow steps necessary to produce `name_or_names`.
Parameters
@@ -247,7 +239,7 @@ def truncate(self, name: str):
def guess_target(
self, step_name: str, kind: Literal["platform", "scenario"] = "scenario"
- ) -> Tuple[Mapping, str]:
+ ) -> tuple[Mapping, str]:
"""Traverse the graph looking for non-empty platform_info/scenario_info.
Returns the info, and the step name containing it. Usually, this will identify
diff --git a/pyproject.toml b/pyproject.toml
index 9e477826dc..59f38e3361 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,16 +20,16 @@ classifiers = [
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
"Programming Language :: R",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Information Analysis",
]
-requires-python = ">=3.8"
+requires-python = ">=3.9"
dependencies = [
"click",
"colorama",
@@ -152,7 +152,12 @@ markers = [
]
[tool.ruff]
-exclude = ["message_ix_models/report/legacy/", "message_ix_models/util/compat/message_data/"]
+exclude = [
+ "doc/_static/png_source_files/Land-use_emulator_figures.ipynb",
+ "message_ix_models/model/material/report/Historical Power Sector Stock Reporting-.ipynb",
+ "message_ix_models/report/legacy/",
+ "message_ix_models/util/compat/message_data/",
+]
[tool.ruff.lint]
select = ["C9", "E", "F", "I", "W"]