Skip to content

Commit

Permalink
Merge branch 'master' into version-2.10
Browse files Browse the repository at this point in the history
  • Loading branch information
jrief committed Sep 11, 2024
2 parents 7bdbd7c + acb666f commit 409457b
Show file tree
Hide file tree
Showing 4 changed files with 193 additions and 23 deletions.
5 changes: 3 additions & 2 deletions CHANGES.rst
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
Changes
=======

2.10
----
2.10 (2024-09-11)
-----------------
* Drop support for Python-3.8.
* Drop support for Django-4.1 and earlier.
* Add support for Django-5.1.
* Experimental support for animated image formats. See documentation for more infos.


2.9 (2024-07-25)
Expand Down
29 changes: 29 additions & 0 deletions docs/ref/animated_formats.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
=======================
Animated images support
=======================

Support for animated image formats in easy-thumbnails is experimental and must be activated
manually, via `SETTINGS`.

Example settings, that will preserve GIF, WEBP and PNG formats, but wont allow animations on
PNGs.

.. code-block:: python
THUMBNAIL_IMAGE_SAVE_OPTIONS = {
"GIF": {"save_all": True}, # to save all frames available
"WEBP": {"save_all": True},
"PNG": {"save_all": False}, # dont allow animated PNGs
}
THUMBNAIL_PRESERVE_EXTENSIONS = ("webp", "gif", "png")
There have been issues with conversion from GIF to WEBP, so it's currently not recommended to
enable this specific conversion for animated images.


Remark
======

In the future, Easy Thumbnails might preserve animated images by default, and/or provide the
option to enable/disable animations for each generated thumbnail.
99 changes: 78 additions & 21 deletions easy_thumbnails/processors.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import itertools
import re
from functools import partial
from io import BytesIO

from PIL import Image, ImageChops, ImageFilter
from easy_thumbnails import utils
Expand Down Expand Up @@ -35,6 +37,31 @@ def _points_table():
yield j


class FrameAware:
def __new__(cls, im):
if getattr(im, "n_frames", 1) > 1:
return super().__new__(cls)
return im

def __init__(self, im):
self.im = im

def apply_to_frames(self, method, *args, **kwargs):
new_frames = []
for i in range(self.im.n_frames):
self.im.seek(i)
new_frames.append(method(*args, **kwargs))
write_to = BytesIO()
new_frames[0].save(
write_to, format=self.im.format, save_all=True, append_images=new_frames[1:]
)
return Image.open(write_to)

def __getattr__(self, key):
method = getattr(self.im, key)
return partial(self.apply_to_frames, method)


def colorspace(im, bw=False, replace_alpha=False, **kwargs):
"""
Convert images to the correct color space.
Expand All @@ -57,7 +84,7 @@ def colorspace(im, bw=False, replace_alpha=False, **kwargs):
if im.mode == 'I':
# PIL (and pillow) have can't convert 16 bit grayscale images to lower
# modes, so manually convert them to an 8 bit grayscale.
im = im.point(list(_points_table()), 'L')
im = FrameAware(im).point(list(_points_table()), "L")

is_transparent = utils.is_transparent(im)
is_grayscale = im.mode in ('L', 'LA')
Expand All @@ -69,17 +96,31 @@ def colorspace(im, bw=False, replace_alpha=False, **kwargs):

if is_transparent:
if replace_alpha:
if im.mode != 'RGBA':
im = im.convert('RGBA')
base = Image.new('RGBA', im.size, replace_alpha)
base.paste(im, mask=im)
im = base
if not getattr(im, 'is_animated', False):
if im.mode != 'RGBA':
im = FrameAware(im).convert('RGBA')
base = Image.new('RGBA', im.size, replace_alpha)
base.paste(im, mask=im)
im = base
else:
frames = []
for i in range(im.n_frames):
im.seek(i)
if im.mode != 'RGBA':
im = FrameAware(im).convert('RGBA')
base = Image.new('RGBA', im.size, replace_alpha)
base.paste(im, mask=im)
frames.append(base)
write_to = BytesIO()
frames[0].save(
write_to, format=im.format, save_all=True, append_images=frames[1:]
)
return Image.open(write_to)
else:
new_mode = new_mode + 'A'

if im.mode != new_mode:
im = im.convert(new_mode)

im = FrameAware(im).convert(new_mode)
return im


Expand Down Expand Up @@ -108,7 +149,7 @@ def autocrop(im, autocrop=False, **kwargs):
bg = Image.new('L', im.size, 255)
bbox = ImageChops.difference(bw, bg).getbbox()
if bbox:
im = im.crop(bbox)
im = FrameAware(im).crop(bbox)
return im


Expand Down Expand Up @@ -202,9 +243,10 @@ def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
if scale < 1.0 or (scale > 1.0 and upscale):
# Resize the image to the target size boundary. Round the scaled
# boundary sizes to avoid floating point errors.
im = im.resize((int(round(source_x * scale)),
int(round(source_y * scale))),
resample=Image__Resampling__LANCZOS)
im = FrameAware(im).resize(
(int(round(source_x * scale)), int(round(source_y * scale))),
resample=Image__Resampling__LANCZOS,
)

if crop:
# Use integer values now.
Expand Down Expand Up @@ -232,8 +274,9 @@ def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
box.append(int(min(source_x, box[0] + target_x)))
box.append(int(min(source_y, box[1] + target_y)))
# See if an edge cropping argument was provided.
edge_crop = (isinstance(crop, str) and
re.match(r'(?:(-?)(\d+))?,(?:(-?)(\d+))?$', crop))
edge_crop = isinstance(crop, str) and re.match(
r'(?:(-?)(\d+))?,(?:(-?)(\d+))?$', crop
)
if edge_crop and filter(None, edge_crop.groups()):
x_right, x_crop, y_bottom, y_crop = edge_crop.groups()
if x_crop:
Expand All @@ -252,7 +295,7 @@ def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
else:
box[1] = offset
box[3] = source_y - (diff_y - offset)
# See if the image should be "smart cropped".
# See if the image should be 'smart cropped".
elif crop == 'smart':
left = top = 0
right, bottom = source_x, source_y
Expand All @@ -274,7 +317,7 @@ def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None,
diff_y = diff_y - add - remove
box = (left, top, right, bottom)
# Finally, crop the image!
im = im.crop(box)
im = FrameAware(im).crop(box)
return im


Expand All @@ -291,9 +334,9 @@ def filters(im, detail=False, sharpen=False, **kwargs):
"""
if detail:
im = im.filter(ImageFilter.DETAIL)
im = FrameAware(im).filter(ImageFilter.DETAIL)
if sharpen:
im = im.filter(ImageFilter.SHARPEN)
im = FrameAware(im).filter(ImageFilter.SHARPEN)
return im


Expand All @@ -320,6 +363,20 @@ def background(im, size, background=None, **kwargs):
new_im = Image.new('RGB', size, background)
if new_im.mode != im.mode:
new_im = new_im.convert(im.mode)
offset = (size[0]-x)//2, (size[1]-y)//2
new_im.paste(im, offset)
return new_im
offset = (size[0] - x) // 2, (size[1] - y) // 2
# animated format (gif/webp/...) support manually added.
if not getattr(im, 'is_animated', False):
new_im.paste(im, offset)
return new_im
else:
frames = []
for i in range(im.n_frames):
im.seek(i)
copied_new_im = new_im.copy()
copied_new_im.paste(im, offset)
frames.append(copied_new_im)
write_to = BytesIO()
frames[0].save(
write_to, format=im.format, save_all=True, append_images=frames[1:]
)
return Image.open(write_to)
83 changes: 83 additions & 0 deletions easy_thumbnails/tests/test_animated_formats.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
from io import BytesIO
from PIL import Image, ImageChops, ImageDraw
from easy_thumbnails import processors
from unittest import TestCase


def create_animated_image(mode='RGB', format="gif", size=(1000, 1000), no_frames=6):
frames = []
for i in range(no_frames):
image = Image.new(mode, size, (255, 255, 255))
draw = ImageDraw.Draw(image)
x_bit, y_bit = size[0] // 40 * i, size[1] // 40 * i
draw.rectangle((x_bit, y_bit * 2, x_bit * 7, y_bit * 3), 'red')
draw.rectangle((x_bit * 2, y_bit, x_bit * 3, y_bit * 8), 'yellow')
frames.append(image)
write_to = BytesIO()
frames[0].save(
write_to, format=format, save_all=True, append_images=frames[1:]
)
im = Image.open(write_to)
# for debugging
# with open(f"animated{no_frames}.{format}", "wb") as f:
# write_to.seek(0)
# f.write(write_to.read())
return im


class AnimatedFormatProcessorsTests(TestCase):

def test_scale(self):
no_frames = 20
im = create_animated_image(no_frames=no_frames)
frames_count = im.n_frames
self.assertEqual(frames_count, no_frames)
processed = processors.scale_and_crop(im, (100, 100))
processed_frames_count = processed.n_frames
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (100, 100))

def test_scale_crop(self):
frames = 9
im = create_animated_image(no_frames=frames)
frames_count = im.n_frames
self.assertEqual(frames_count, frames)
processed = processors.scale_and_crop(im, (900, 950), crop=True)
processed_frames_count = processed.n_frames
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (900, 950))

def test_colorspace(self):
# to have a color conversion
no_frames = 6
im = create_animated_image(format="png")
frames_count = im.n_frames
self.assertEqual(frames_count, no_frames)
processed = processors.colorspace(im, bw=True)
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.mode, "L")
self.assertEqual(processed.size, (1000, 1000))

def test_filter(self):
no_frames = 12
im = create_animated_image(format="webp", no_frames=no_frames)
frames_count = im.n_frames
self.assertEqual(frames_count, no_frames)
processed = processors.filters(im, detail=True, sharpen=True)
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (1000, 1000))

def test_background(self):
no_frames = 9
im = create_animated_image(format="webp", no_frames=no_frames)
frames_count = im.n_frames
self.assertEqual(frames_count, no_frames)
processed = processors.background(im, background="#ff00ff", size=(1000, 1800))
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (1000, 1800))

0 comments on commit 409457b

Please sign in to comment.