From 2b2d10ae99c022edd76cecb98e1fb4bcd44d4739 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Mon, 31 Jul 2023 15:35:24 +0100 Subject: [PATCH] format --- torchvision/prototype/transforms/_geometry.py | 4 +++- torchvision/transforms/v2/functional/_geometry.py | 4 +++- torchvision/transforms/v2/functional/_meta.py | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/torchvision/prototype/transforms/_geometry.py b/torchvision/prototype/transforms/_geometry.py index d1d1a5ebc74..b328c132070 100644 --- a/torchvision/prototype/transforms/_geometry.py +++ b/torchvision/prototype/transforms/_geometry.py @@ -115,7 +115,9 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: elif isinstance(inpt, datapoints.BoundingBoxes): inpt = datapoints.BoundingBoxes.wrap_like( inpt, - F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, spatial_size=inpt.spatial_size), + F.clamp_bounding_boxes( + inpt[params["is_valid"]], format=inpt.format, spatial_size=inpt.spatial_size + ), ) if params["needs_pad"]: diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 512383ce273..469e58ff9c4 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -1826,7 +1826,9 @@ def center_crop_bounding_boxes( ) -> Tuple[torch.Tensor, Tuple[int, int]]: crop_height, crop_width = _center_crop_parse_output_size(output_size) crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *spatial_size) - return crop_bounding_boxes(bounding_boxes, format, top=crop_top, left=crop_left, height=crop_height, width=crop_width) + return crop_bounding_boxes( + bounding_boxes, format, top=crop_top, left=crop_left, height=crop_height, width=crop_width + ) def center_crop_mask(mask: torch.Tensor, output_size: List[int]) -> torch.Tensor: diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 7bfc5bc3278..f564b180389 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -272,7 +272,9 @@ def clamp_bounding_boxes( elif isinstance(inpt, datapoints.BoundingBoxes): if format is not None or spatial_size is not None: raise ValueError("For bounding box datapoint inputs, `format` and `spatial_size` must not be passed.") - output = _clamp_bounding_boxes(inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size) + output = _clamp_bounding_boxes( + inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size + ) return datapoints.BoundingBoxes.wrap_like(inpt, output) else: raise TypeError(