diff --git a/references/classification/README.md b/references/classification/README.md index 65ee416bf89..bc481f421ed 100644 --- a/references/classification/README.md +++ b/references/classification/README.md @@ -135,7 +135,7 @@ torchrun --nproc_per_node=8 train.py\ --lr-scheduler=cosineannealinglr --lr-warmup-method=linear\ --lr-warmup-epochs=5 --lr-warmup-decay=0.1 ``` -Here `$MODEL` is one of `regnet_x_400mf`, `regnet_x_800mf`, `regnet_x_1_6gf`, `regnet_y_400mf`, `regnet_y_800mf` and `regnet_y_1_6gf`. Please note we used learning rate 0.4 for `regent_y_400mf` to get the same Acc@1 as [the paper)(https://arxiv.org/abs/2003.13678). +Here `$MODEL` is one of `regnet_x_400mf`, `regnet_x_800mf`, `regnet_x_1_6gf`, `regnet_y_400mf`, `regnet_y_800mf` and `regnet_y_1_6gf`. Please note we used learning rate 0.4 for `regent_y_400mf` to get the same Acc@1 as [the paper](https://arxiv.org/abs/2003.13678). #### Medium models ``` diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 07e3d75df6d..3cf7d89de77 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -394,7 +394,15 @@ def _check_transform_sample_input_smoke(transform, input, *, adapter): def check_transform(transform, input, check_v1_compatibility=True, check_sample_input=True): - pickle.loads(pickle.dumps(transform)) + # TODO: remove this cm once https://github.com/pytorch/vision/issues/8517 + # and https://github.com/pytorch/pytorch/issues/130242 are resolved. + if isinstance(transform, (transforms.RandomResizedCrop, transforms.LinearTransformation)): + cm = pytest.warns(FutureWarning, match="You are using `torch.load`") + else: + cm = contextlib.nullcontext() + + with cm: + pickle.loads(pickle.dumps(transform)) output = transform(input) assert isinstance(output, type(input)) diff --git a/torchvision/datasets/_optical_flow.py b/torchvision/datasets/_optical_flow.py index 40d25583942..6d8b852a5d0 100644 --- a/torchvision/datasets/_optical_flow.py +++ b/torchvision/datasets/_optical_flow.py @@ -43,7 +43,7 @@ def __init__(self, root: Union[str, Path], transforms: Optional[Callable] = None def _read_img(self, file_name: str) -> Image.Image: img = Image.open(file_name) if img.mode != "RGB": - img = img.convert("RGB") + img = img.convert("RGB") # type: ignore[assignment] return img @abstractmethod diff --git a/torchvision/datasets/_stereo_matching.py b/torchvision/datasets/_stereo_matching.py index 6a3f563a2da..1deaab7e2f3 100644 --- a/torchvision/datasets/_stereo_matching.py +++ b/torchvision/datasets/_stereo_matching.py @@ -58,7 +58,7 @@ def __init__(self, root: Union[str, Path], transforms: Optional[Callable] = None def _read_img(self, file_path: Union[str, Path]) -> Image.Image: img = Image.open(file_path) if img.mode != "RGB": - img = img.convert("RGB") + img = img.convert("RGB") # type: ignore [assignment] return img def _scan_pairs( diff --git a/torchvision/datasets/widerface.py b/torchvision/datasets/widerface.py index 90f80b7175b..71f4ce313c3 100644 --- a/torchvision/datasets/widerface.py +++ b/torchvision/datasets/widerface.py @@ -92,7 +92,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: """ # stay consistent with other datasets and return a PIL Image - img = Image.open(self.img_info[index]["img_path"]) + img = Image.open(self.img_info[index]["img_path"]) # type: ignore[arg-type] if self.transform is not None: img = self.transform(img) diff --git a/torchvision/utils.py b/torchvision/utils.py index 6b2d19ec3dd..33ac826e5ce 100644 --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -612,8 +612,8 @@ def _parse_colors( colors = [ImageColor.getrgb(color) if isinstance(color, str) else color for color in colors] if dtype.is_floating_point: # [0, 255] -> [0, 1] - colors = [tuple(v / 255 for v in color) for color in colors] - return colors + colors = [tuple(v / 255 for v in color) for color in colors] # type: ignore[union-attr] + return colors # type: ignore[return-value] def _log_api_usage_once(obj: Any) -> None: