Skip to content

ENH: torch dtype promotions #298

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/array-api-tests-torch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,4 @@ jobs:
with:
package-name: torch
extra-requires: '--index-url https://download.pytorch.org/whl/cpu'
extra-env-vars: |
ARRAY_API_TESTS_SKIP_DTYPES=uint16,uint32,uint64
python-versions: '[''3.10'', ''3.13'']'
40 changes: 34 additions & 6 deletions array_api_compat/torch/_aliases.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@
try:
# torch >=2.3
_int_dtypes |= {torch.uint16, torch.uint32, torch.uint64}
_HAS_LARGE_UINT = True
except AttributeError:
pass

_HAS_LARGE_UINT = False

_array_api_dtypes = {
torch.bool,
Expand Down Expand Up @@ -59,6 +59,28 @@
(torch.float64, torch.complex128): torch.complex128,
}

if _HAS_LARGE_UINT: # torch >=2.3
_promotion_table.update(
{
# uints
(torch.uint8, torch.uint16): torch.uint16,
(torch.uint8, torch.uint32): torch.uint32,
(torch.uint8, torch.uint64): torch.uint64,
(torch.uint16, torch.uint32): torch.uint32,
(torch.uint16, torch.uint64): torch.uint64,
(torch.uint32, torch.uint64): torch.uint64,
# ints and uints (mixed sign)
(torch.uint16, torch.int8): torch.int32,
(torch.uint16, torch.int16): torch.int32,
(torch.uint16, torch.int32): torch.int32,
(torch.uint16, torch.int64): torch.int64,
(torch.uint32, torch.int8): torch.int64,
(torch.uint32, torch.int16): torch.int64,
(torch.uint32, torch.int32): torch.int64,
(torch.uint32, torch.int64): torch.int64,
}
)

_promotion_table.update({(b, a): c for (a, b), c in _promotion_table.items()})
_promotion_table.update({(a, a): a for a in _array_api_dtypes})

Expand Down Expand Up @@ -295,10 +317,16 @@ def _sum_prod_no_axis(x: Array, dtype: DType | None) -> Array:
if dtype is not None:
return x.clone() if dtype == x.dtype else x.to(dtype)

# We can't upcast uint8 according to the spec because there is no
# torch.uint64, so at least upcast to int64 which is what prod does
# when axis=None.
if x.dtype in (torch.uint8, torch.int8, torch.int16, torch.int32):
if x.dtype in (torch.int8, torch.int16, torch.int32):
return x.to(torch.int64)

if _HAS_LARGE_UINT and x.dtype in (torch.uint8, torch.uint16, torch.uint32):
return x.to(torch.uint64)

if x.dtype == torch.uint8:
# We can't upcast uint8 according to the spec because there is no
# torch.uint64, so at least upcast to int64 which is what prod does
# when axis=None.
return x.to(torch.int64)

return x.clone()
Expand Down
95 changes: 37 additions & 58 deletions array_api_compat/torch/_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,78 +170,58 @@ def default_dtypes(self, *, device=None):
"indexing": default_integral,
}


def _dtypes(self, kind):
bool = torch.bool
int8 = torch.int8
int16 = torch.int16
int32 = torch.int32
int64 = torch.int64
uint8 = torch.uint8
# uint16, uint32, and uint64 are present in newer versions of pytorch,
# but they aren't generally supported by the array API functions, so
# we omit them from this function.
float32 = torch.float32
float64 = torch.float64
complex64 = torch.complex64
complex128 = torch.complex128

if kind is None:
return {
"bool": bool,
"int8": int8,
"int16": int16,
"int32": int32,
"int64": int64,
"uint8": uint8,
"float32": float32,
"float64": float64,
"complex64": complex64,
"complex128": complex128,
}
return self._dtypes(
(
"bool",
"signed integer",
"unsigned integer",
"real floating",
"complex floating",
)
)
if kind == "bool":
return {"bool": bool}
return {"bool": torch.bool}
if kind == "signed integer":
return {
"int8": int8,
"int16": int16,
"int32": int32,
"int64": int64,
"int8": torch.int8,
"int16": torch.int16,
"int32": torch.int32,
"int64": torch.int64,
}
if kind == "unsigned integer":
return {
"uint8": uint8,
}
try:
# torch >=2.3
return {
"uint8": torch.uint8,
"uint16": torch.uint16,
"uint32": torch.uint32,
"uint64": torch.uint32,
}
except AttributeError:
return {"uint8": torch.uint8}
if kind == "integral":
return {
"int8": int8,
"int16": int16,
"int32": int32,
"int64": int64,
"uint8": uint8,
}
return self._dtypes(("signed integer", "unsigned integer"))
if kind == "real floating":
return {
"float32": float32,
"float64": float64,
"float32": torch.float32,
"float64": torch.float64,
}
if kind == "complex floating":
return {
"complex64": complex64,
"complex128": complex128,
"complex64": torch.complex64,
"complex128": torch.complex128,
}
if kind == "numeric":
return {
"int8": int8,
"int16": int16,
"int32": int32,
"int64": int64,
"uint8": uint8,
"float32": float32,
"float64": float64,
"complex64": complex64,
"complex128": complex128,
}
return self._dtypes(
(
"signed integer",
"unsigned integer",
"real floating",
"complex floating",
)
)
if isinstance(kind, tuple):
res = {}
for k in kind:
Expand All @@ -261,7 +241,6 @@ def dtypes(self, *, device=None, kind=None):
----------
device : Device, optional
The device to get the data types for.
Unused for PyTorch, as all devices use the same dtypes.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
Expand Down
Loading