Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/18831.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add animated image support to the media thumbnailer.
125 changes: 110 additions & 15 deletions synapse/media/thumbnailer.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@
import logging
from io import BytesIO
from types import TracebackType
from typing import TYPE_CHECKING, List, Optional, Tuple, Type
from typing import TYPE_CHECKING, List, Optional, Tuple, Type, cast

from PIL import Image
from PIL import Image, ImageSequence

from synapse.api.errors import Codes, NotFoundError, SynapseError, cs_error
from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP
Expand Down Expand Up @@ -152,20 +152,24 @@ def aspect(self, max_width: int, max_height: int) -> Tuple[int, int]:
else:
return max((max_height * self.width) // self.height, 1), max_height

def _resize(self, width: int, height: int) -> Image.Image:
def _resize_image(self, image: Image.Image, width: int, height: int) -> Image.Image:
# 1-bit or 8-bit color palette images need converting to RGB
# otherwise they will be scaled using nearest neighbour which
# looks awful.
#
# If the image has transparency, use RGBA instead.
if self.image.mode in ["1", "L", "P"]:
if self.image.info.get("transparency", None) is not None:
with self.image:
self.image = self.image.convert("RGBA")
if image.mode in ["1", "L", "P"]:
if image.info.get("transparency", None) is not None:
converted = image.convert("RGBA")
else:
with self.image:
self.image = self.image.convert("RGB")
return self.image.resize((width, height), Image.LANCZOS)
converted = image.convert("RGB")
else:
converted = image
return converted.resize((width, height), Image.LANCZOS)

def _resize(self, width: int, height: int) -> Image.Image:
# Backwards-compatible single-image resize: operate on self.image
return self._resize_image(self.image, width, height)

@trace
def scale(self, width: int, height: int, output_type: str) -> BytesIO:
Expand All @@ -174,8 +178,34 @@ def scale(self, width: int, height: int, output_type: str) -> BytesIO:
Returns:
The bytes of the encoded image ready to be written to disk
"""
with self._resize(width, height) as scaled:
return self._encode_image(scaled, output_type)
# If it's an animated image, generate an animated thumbnail (preserve
# animation). Otherwise, fall back to static processing using the first
# frame.
if getattr(self.image, "is_animated", False):
frames = []
durations = []
loop = self.image.info.get("loop", 0)
transparency = self.image.info.get("transparency", None)
for frame in ImageSequence.Iterator(self.image):
# Copy the frame to avoid referencing the original image memory
f = frame.copy()
if f.mode != "RGBA":
f = f.convert("RGBA")
resized = self._resize_image(f, width, height)
frames.append(resized)
durations.append(
frame.info.get("duration") or self.image.info.get("duration") or 100
)
return self._encode_animated(frames, durations, loop, transparency)
Comment on lines +185 to +199
Copy link

Copilot AI Aug 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The animated processing logic is duplicated between scale() and crop() methods. Consider extracting this into a shared helper method to reduce code duplication.

Suggested change
frames = []
durations = []
loop = self.image.info.get("loop", 0)
transparency = self.image.info.get("transparency", None)
for frame in ImageSequence.Iterator(self.image):
# Copy the frame to avoid referencing the original image memory
f = frame.copy()
if f.mode != "RGBA":
f = f.convert("RGBA")
resized = self._resize_image(f, width, height)
frames.append(resized)
durations.append(
frame.info.get("duration") or self.image.info.get("duration") or 100
)
return self._encode_animated(frames, durations, loop, transparency)
return self._process_animated(width, height, self._resize_image)

Copilot uses AI. Check for mistakes.

else:
# Static processing
first = next(ImageSequence.Iterator(self.image))
if first is not self.image:
base = first.copy()
else:
base = self.image
with self._resize_image(base, width, height) as scaled:
return self._encode_image(scaled, output_type)

@trace
def crop(self, width: int, height: int, output_type: str) -> BytesIO:
Expand Down Expand Up @@ -205,16 +235,81 @@ def crop(self, width: int, height: int, output_type: str) -> BytesIO:
crop_right = width + crop_left
crop = (crop_left, 0, crop_right, height)

with self._resize(scaled_width, scaled_height) as scaled_image:
with scaled_image.crop(crop) as cropped:
return self._encode_image(cropped, output_type)
# If it's an animated image, generate an animated thumbnail (preserve
# animation). Otherwise, fall back to static processing using the first
# frame.
if getattr(self.image, "is_animated", False):
frames = []
durations = []
loop = self.image.info.get("loop", 0)
transparency = self.image.info.get("transparency", None)
for frame in ImageSequence.Iterator(self.image):
f = frame.copy()
if f.mode != "RGBA":
f = f.convert("RGBA")
scaled = self._resize_image(f, scaled_width, scaled_height)
cropped = scaled.crop(crop)
frames.append(cropped)
durations.append(
frame.info.get("duration") or self.image.info.get("duration") or 100
)
return self._encode_animated(frames, durations, loop, transparency)
else:
# Static processing
first = next(ImageSequence.Iterator(self.image))
if first is not self.image:
base = first.copy()
else:
base = self.image
with self._resize_image(base, scaled_width, scaled_height) as scaled_image:
with scaled_image.crop(crop) as cropped:
return self._encode_image(cropped, output_type)

def _encode_image(self, output_image: Image.Image, output_type: str) -> BytesIO:
output_bytes_io = BytesIO()
fmt = self.FORMATS[output_type]
if fmt == "JPEG" or fmt == "PNG" and output_image.mode == "CMYK":
output_image = output_image.convert("RGB")
output_image.save(output_bytes_io, fmt, quality=80)
output_bytes_io.seek(0)
return output_bytes_io

def _encode_animated(
self,
frames: List[Image.Image],
durations: List[int],
loop: int,
transparency: Optional[int],
) -> BytesIO:
"""
Encode a list of RGBA frames into an animated GIF, attempting to
preserve durations, loop count and transparency where possible.
"""
output_bytes_io = BytesIO()
if not frames:
raise ThumbnailError("No frames to encode for animated GIF")

# Ensure all frames are in 'P' mode with adaptive palette.
paletted_frames = []
for _idx, f in enumerate(frames):
if f.mode != "RGBA":
f = f.convert("RGBA")
p = f.convert("P", palette=Image.ADAPTIVE)
paletted_frames.append(p)

save_kwargs = {
"format": "GIF",
"save_all": True,
"append_images": paletted_frames[1:],
"loop": loop,
"duration": durations,
"disposal": 2,
}
if transparency is not None:
save_kwargs["transparency"] = transparency

paletted_frames[0].save(output_bytes_io, **cast(dict, save_kwargs))
Copy link

Copilot AI Aug 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The cast(dict, save_kwargs) is unnecessary since save_kwargs is already declared as a dict. This cast adds complexity without benefit.

Suggested change
paletted_frames[0].save(output_bytes_io, **cast(dict, save_kwargs))
paletted_frames[0].save(output_bytes_io, **save_kwargs)

Copilot uses AI. Check for mistakes.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Doesn't pass the linter

Copy link
Contributor

@MadLittleMods MadLittleMods Sep 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When you try the suggestion, you run into:

$ poetry run mypy synapse/media/thumbnailer.py
synapse/media/thumbnailer.py:311: note: Revealed type is "builtins.dict[builtins.str, builtins.object]"
synapse/media/thumbnailer.py:313: error: Argument 2 to "save" of "Image" has incompatible type "**dict[str, object]"; expected "Optional[str]"  [arg-type]
synapse/media/thumbnailer.py:313: error: Argument 2 to "save" of "Image" has incompatible type "**dict[str, object]"; expected "bool"  [arg-type]
synapse/media/thumbnailer.py:313: error: Argument 2 to "save" of "Image" has incompatible type "**dict[str, object]"; expected "Literal['bmp', 'png']"  [arg-type]

But overall, the cast isn't very good.

It would be better to annotate the correct type where it is declared save_kwargs: Dict[str, Any] = {


Pillow types:

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When you try the suggestion, you run into:

$ poetry run mypy synapse/media/thumbnailer.py
synapse/media/thumbnailer.py:311: note: Revealed type is "builtins.dict[builtins.str, builtins.object]"
synapse/media/thumbnailer.py:313: error: Argument 2 to "save" of "Image" has incompatible type "**dict[str, object]"; expected "Optional[str]"  [arg-type]
synapse/media/thumbnailer.py:313: error: Argument 2 to "save" of "Image" has incompatible type "**dict[str, object]"; expected "bool"  [arg-type]
synapse/media/thumbnailer.py:313: error: Argument 2 to "save" of "Image" has incompatible type "**dict[str, object]"; expected "Literal['bmp', 'png']"  [arg-type]

But overall, the cast isn't very good.

It would be better to annotate the correct type where it is declared save_kwargs: Dict[str, Any] = {

Pillow types:

* We're using `types-Pillow` but it has since been removed from the `typeshed` in favor of the native types: https://github.com/python/typeshed/pull/12732/files#diff-202ba5663fbebd32c9087d7510b31f3ff145d155c304a876ff10e554dd49de74L266-L275

* The native types (but we're not using these yet): https://github.com/python-pillow/Pillow/blob/b7e0570cb11901fbfdc9d5bc1ae2918cb32cc9c8/src/PIL/Image.py#L2459-L2461

Feel free to modify my PR however you like, I'm no expert

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@catfromplan9 Are you interested in continuing the PR further?

There's bound to be more review points to address once this gets a full round of review. While improving the media repo in Synapse itself is desirable, this isn't necessarily something the team would take over and put effort into ourselves. Happy to review and push this along if you plan on making the changes though 🙂

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@catfromplan9 Are you interested in continuing the PR further?

There's bound to be more review points to address once this gets a full round of review. While improving the media repo in Synapse itself is desirable, this isn't necessarily something the team would take over and put effort into ourselves. Happy to review and push this along if you plan on making the changes though 🙂

Can you outline to me what exactly you would need me to change? I will do so.

output_bytes_io.seek(0)
return output_bytes_io

def close(self) -> None:
Expand Down
Loading