-
Notifications
You must be signed in to change notification settings - Fork 7.2k
ToDtype CV-CUDA Backend #9278
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
ToDtype CV-CUDA Backend #9278
Changes from all commits
44db71c
e3dd700
c035df1
98d7dfb
ddc116d
e51dc7e
e14e210
4939355
ec76196
bd823cf
f7aa94a
b21d9f0
973e058
d871331
736a2e6
7a231b1
d3e4573
ec93ba3
89122db
1b0d295
009f925
41af724
d12e4df
c198cf0
18df67f
c5a2a5a
915ffb1
7f41c95
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,6 +21,7 @@ | |
| import torchvision.transforms.v2 as transforms | ||
|
|
||
| from common_utils import ( | ||
| assert_close, | ||
| assert_equal, | ||
| cache, | ||
| cpu_and_cuda, | ||
|
|
@@ -41,7 +42,6 @@ | |
| ) | ||
|
|
||
| from torch import nn | ||
| from torch.testing import assert_close | ||
| from torch.utils._pytree import tree_flatten, tree_map | ||
| from torch.utils.data import DataLoader, default_collate | ||
| from torchvision import tv_tensors | ||
|
|
@@ -2627,7 +2627,17 @@ def test_kernel(self, kernel, make_input, input_dtype, output_dtype, device, sca | |
| scale=scale, | ||
| ) | ||
|
|
||
| @pytest.mark.parametrize("make_input", [make_image_tensor, make_image, make_video]) | ||
| @pytest.mark.parametrize( | ||
| "make_input", | ||
| [ | ||
| make_image_tensor, | ||
| make_image, | ||
| make_video, | ||
| pytest.param( | ||
| make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") | ||
| ), | ||
| ], | ||
| ) | ||
| @pytest.mark.parametrize("input_dtype", [torch.float32, torch.float64, torch.uint8]) | ||
| @pytest.mark.parametrize("output_dtype", [torch.float32, torch.float64, torch.uint8]) | ||
| @pytest.mark.parametrize("device", cpu_and_cuda()) | ||
|
|
@@ -2642,18 +2652,27 @@ def test_functional(self, make_input, input_dtype, output_dtype, device, scale): | |
|
|
||
| @pytest.mark.parametrize( | ||
| "make_input", | ||
| [make_image_tensor, make_image, make_bounding_boxes, make_segmentation_mask, make_video], | ||
| [ | ||
| make_image_tensor, | ||
| make_image, | ||
| make_bounding_boxes, | ||
| make_segmentation_mask, | ||
| make_video, | ||
| pytest.param( | ||
| make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") | ||
| ), | ||
| ], | ||
| ) | ||
| @pytest.mark.parametrize("input_dtype", [torch.float32, torch.float64, torch.uint8]) | ||
| @pytest.mark.parametrize("output_dtype", [torch.float32, torch.float64, torch.uint8]) | ||
| @pytest.mark.parametrize("device", cpu_and_cuda()) | ||
| @pytest.mark.parametrize("scale", (True, False)) | ||
| @pytest.mark.parametrize("as_dict", (True, False)) | ||
| def test_transform(self, make_input, input_dtype, output_dtype, device, scale, as_dict): | ||
| input = make_input(dtype=input_dtype, device=device) | ||
| inpt = make_input(dtype=input_dtype, device=device) | ||
| if as_dict: | ||
| output_dtype = {type(input): output_dtype} | ||
| check_transform(transforms.ToDtype(dtype=output_dtype, scale=scale), input, check_sample_input=not as_dict) | ||
| output_dtype = {type(inpt): output_dtype} | ||
| check_transform(transforms.ToDtype(dtype=output_dtype, scale=scale), inpt, check_sample_input=not as_dict) | ||
|
Comment on lines
+2672
to
+2675
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. May I ask what the reason is for changing "input" to "inpt"?
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
|
||
| def reference_convert_dtype_image_tensor(self, image, dtype=torch.float, scale=False): | ||
| input_dtype = image.dtype | ||
|
|
@@ -2688,25 +2707,59 @@ def fn(value): | |
|
|
||
| return torch.tensor(tree_map(fn, image.tolist())).to(dtype=output_dtype, device=image.device) | ||
|
|
||
| def _get_dtype_conversion_atol(self, input_dtype, output_dtype, scale): | ||
| is_uint16_to_uint8 = input_dtype == torch.uint16 and output_dtype == torch.uint8 | ||
| is_uint8_to_uint16 = input_dtype == torch.uint8 and output_dtype == torch.uint16 | ||
| changes_type_class = output_dtype.is_floating_point != input_dtype.is_floating_point | ||
|
|
||
| in_bits = torch.iinfo(input_dtype).bits if not input_dtype.is_floating_point else None | ||
| out_bits = torch.iinfo(output_dtype).bits if not output_dtype.is_floating_point else None | ||
| expands_bits = in_bits is not None and out_bits is not None and out_bits > in_bits | ||
|
|
||
| if is_uint16_to_uint8: | ||
| atol = 255 | ||
| elif is_uint8_to_uint16 and not scale: | ||
| atol = 255 | ||
| elif expands_bits and not scale: | ||
| atol = 1 | ||
| elif changes_type_class: | ||
| atol = 1 | ||
| else: | ||
| atol = 0 | ||
|
|
||
| return atol | ||
|
|
||
| @pytest.mark.parametrize("input_dtype", [torch.float32, torch.float64, torch.uint8, torch.uint16]) | ||
| @pytest.mark.parametrize("output_dtype", [torch.float32, torch.float64, torch.uint8, torch.uint16]) | ||
| @pytest.mark.parametrize("device", cpu_and_cuda()) | ||
| @pytest.mark.parametrize("scale", (True, False)) | ||
| def test_image_correctness(self, input_dtype, output_dtype, device, scale): | ||
| @pytest.mark.parametrize( | ||
| "make_input", | ||
| [ | ||
| make_image, | ||
| pytest.param( | ||
| make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") | ||
| ), | ||
| ], | ||
| ) | ||
| @pytest.mark.parametrize("fn", [F.to_dtype, transform_cls_to_functional(transforms.ToDtype)]) | ||
| def test_image_correctness(self, input_dtype, output_dtype, device, scale, make_input, fn): | ||
| if input_dtype.is_floating_point and output_dtype == torch.int64: | ||
| pytest.xfail("float to int64 conversion is not supported") | ||
| if input_dtype == torch.uint8 and output_dtype == torch.uint16 and device == "cuda": | ||
| pytest.xfail("uint8 to uint16 conversion is not supported on cuda") | ||
|
|
||
| input = make_image(dtype=input_dtype, device=device) | ||
| inpt = make_input(dtype=input_dtype, device=device) | ||
| out = fn(inpt, dtype=output_dtype, scale=scale) | ||
|
|
||
| out = F.to_dtype(input, dtype=output_dtype, scale=scale) | ||
| expected = self.reference_convert_dtype_image_tensor(input, dtype=output_dtype, scale=scale) | ||
| if make_input == make_image_cvcuda: | ||
| inpt = F.cvcuda_to_tensor(inpt) | ||
| out = F.cvcuda_to_tensor(out) | ||
|
|
||
| if input_dtype.is_floating_point and not output_dtype.is_floating_point and scale: | ||
| torch.testing.assert_close(out, expected, atol=1, rtol=0) | ||
| else: | ||
| torch.testing.assert_close(out, expected) | ||
| expected = self.reference_convert_dtype_image_tensor(inpt, dtype=output_dtype, scale=scale) | ||
|
|
||
| atol = self._get_dtype_conversion_atol(input_dtype, output_dtype, scale) | ||
| torch.testing.assert_close(out, expected, rtol=0, atol=atol) | ||
|
|
||
| def was_scaled(self, inpt): | ||
| # this assumes the target dtype is float | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.