Skip to content

Functional transforms (augmentations.functional)

def add_fog (img, fog_intensity, alpha_coef, fog_particle_positions, fog_particle_radiuses) [view source on GitHub]

Add fog to the input image.

Parameters:

Name Type Description
img np.ndarray

Input image.

fog_intensity float

Intensity of the fog effect, between 0 and 1.

alpha_coef float

Base alpha (transparency) value for fog particles.

fog_particle_positions list[tuple[int, int]]

List of (x, y) coordinates for fog particles.

fog_particle_radiuses list[int]

List of radiuses for each fog particle.

Returns:

Type Description
np.ndarray

Image with added fog effect.

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@clipped
@preserve_channel_dim
def add_fog(
    img: np.ndarray,
    fog_intensity: float,
    alpha_coef: float,
    fog_particle_positions: list[tuple[int, int]],
    fog_particle_radiuses: list[int],
) -> np.ndarray:
    """Add fog to the input image.

    Args:
        img (np.ndarray): Input image.
        fog_intensity (float): Intensity of the fog effect, between 0 and 1.
        alpha_coef (float): Base alpha (transparency) value for fog particles.
        fog_particle_positions (list[tuple[int, int]]): List of (x, y) coordinates for fog particles.
        fog_particle_radiuses (list[int]): List of radiuses for each fog particle.

    Returns:
        np.ndarray: Image with added fog effect.
    """
    height, width = img.shape[:2]
    num_channels = get_num_channels(img)

    fog_layer = np.zeros((height, width, num_channels), dtype=np.uint8)
    max_value = MAX_VALUES_BY_DTYPE[np.uint8]

    for (x, y), radius in zip(fog_particle_positions, fog_particle_radiuses):
        color = max_value if num_channels == 1 else (max_value,) * num_channels
        cv2.circle(
            fog_layer,
            center=(x, y),
            radius=radius,
            color=color,
            thickness=-1,
        )

    # Apply gaussian blur to the fog layer
    fog_layer = cv2.GaussianBlur(fog_layer, (25, 25), 0)

    # Blend the fog layer with the original image
    alpha = np.mean(fog_layer, axis=2, keepdims=True) / max_value * alpha_coef * fog_intensity

    result = img * (1 - alpha) + fog_layer * alpha

    return clip(result, np.uint8, inplace=True)

def add_rain (img, slant, drop_length, drop_width, drop_color, blur_value, brightness_coefficient, rain_drops) [view source on GitHub]

Adds rain drops to the image.

Parameters:

Name Type Description
img np.ndarray

Input image.

slant int

The angle of the rain drops.

drop_length int

The length of each rain drop.

drop_width int

The width of each rain drop.

drop_color tuple[int, int, int]

The color of the rain drops in RGB format.

blur_value int

The size of the kernel used to blur the image. Rainy views are blurry.

brightness_coefficient float

Coefficient to adjust the brightness of the image. Rainy days are usually shady.

rain_drops list[tuple[int, int]]

A list of tuples where each tuple represents the (x, y) coordinates of the starting point of a rain drop.

Returns:

Type Description
np.ndarray

Image with rain effect added.

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@preserve_channel_dim
def add_rain(
    img: np.ndarray,
    slant: int,
    drop_length: int,
    drop_width: int,
    drop_color: tuple[int, int, int],
    blur_value: int,
    brightness_coefficient: float,
    rain_drops: list[tuple[int, int]],
) -> np.ndarray:
    """Adds rain drops to the image.

    Args:
        img (np.ndarray): Input image.
        slant (int): The angle of the rain drops.
        drop_length (int): The length of each rain drop.
        drop_width (int): The width of each rain drop.
        drop_color (tuple[int, int, int]): The color of the rain drops in RGB format.
        blur_value (int): The size of the kernel used to blur the image. Rainy views are blurry.
        brightness_coefficient (float): Coefficient to adjust the brightness of the image. Rainy days are usually shady.
        rain_drops (list[tuple[int, int]]): A list of tuples where each tuple represents the (x, y)
            coordinates of the starting point of a rain drop.

    Returns:
        np.ndarray: Image with rain effect added.

    Reference:
        https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library
    """
    for rain_drop_x0, rain_drop_y0 in rain_drops:
        rain_drop_x1 = rain_drop_x0 + slant
        rain_drop_y1 = rain_drop_y0 + drop_length

        cv2.line(
            img,
            (rain_drop_x0, rain_drop_y0),
            (rain_drop_x1, rain_drop_y1),
            drop_color,
            drop_width,
        )

    img = cv2.blur(img, (blur_value, blur_value))  # rainy view are blurry
    image_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32)
    image_hsv[:, :, 2] *= brightness_coefficient

    return cv2.cvtColor(image_hsv.astype(np.uint8), cv2.COLOR_HSV2RGB)

def add_shadow (img, vertices_list, intensities) [view source on GitHub]

Add shadows to the image by reducing the intensity of the pixel values in specified regions.

Parameters:

Name Type Description
img np.ndarray

Input image. Multichannel images are supported.

vertices_list list[np.ndarray]

List of vertices for shadow polygons.

intensities np.ndarray

Array of shadow intensities. Range is [0, 1].

Returns:

Type Description
np.ndarray

Image with shadows added.

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@preserve_channel_dim
def add_shadow(img: np.ndarray, vertices_list: list[np.ndarray], intensities: np.ndarray) -> np.ndarray:
    """Add shadows to the image by reducing the intensity of the pixel values in specified regions.

    Args:
        img (np.ndarray): Input image. Multichannel images are supported.
        vertices_list (list[np.ndarray]): List of vertices for shadow polygons.
        intensities (np.ndarray): Array of shadow intensities. Range is [0, 1].

    Returns:
        np.ndarray: Image with shadows added.

    Reference:
        https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library
    """
    num_channels = get_num_channels(img)
    max_value = MAX_VALUES_BY_DTYPE[np.uint8]

    img_shadowed = img.copy()

    # Iterate over the vertices and intensity list
    for vertices, shadow_intensity in zip(vertices_list, intensities):
        # Create mask for the current shadow polygon
        mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
        cv2.fillPoly(mask, [vertices], (max_value,))

        # Duplicate the mask to have the same number of channels as the image
        mask = np.repeat(mask, num_channels, axis=2)

        # Apply shadow to the channels directly
        # It could be tempting to convert to HLS and apply the shadow to the L channel, but it creates artifacts
        shadowed_indices = mask[:, :, 0] == max_value
        darkness = 1 - shadow_intensity
        img_shadowed[shadowed_indices] = clip(
            img_shadowed[shadowed_indices] * darkness,
            np.uint8,
            inplace=True,
        )

    return img_shadowed

def add_snow_bleach (img, snow_point, brightness_coeff) [view source on GitHub]

Adds a simple snow effect to the image by bleaching out pixels.

This function simulates a basic snow effect by increasing the brightness of pixels that are above a certain threshold (snow_point). It operates in the HLS color space to modify the lightness channel.

Parameters:

Name Type Description
img np.ndarray

Input image. Can be either RGB uint8 or float32.

snow_point float

A float in the range [0, 1], scaled and adjusted to determine the threshold for pixel modification. Higher values result in less snow effect.

brightness_coeff float

Coefficient applied to increase the brightness of pixels below the snow_point threshold. Larger values lead to more pronounced snow effects. Should be greater than 1.0 for a visible effect.

Returns:

Type Description
np.ndarray

Image with simulated snow effect. The output has the same dtype as the input.

Note

  • This function converts the image to the HLS color space to modify the lightness channel.
  • The snow effect is created by selectively increasing the brightness of pixels.
  • This method tends to create a 'bleached' look, which may not be as realistic as more advanced snow simulation techniques.
  • The function automatically handles both uint8 and float32 input images.

The snow effect is created through the following steps: 1. Convert the image from RGB to HLS color space. 2. Adjust the snow_point threshold. 3. Increase the lightness of pixels below the threshold. 4. Convert the image back to RGB.

Mathematical Formulation: Let L be the lightness channel in HLS space. For each pixel (i, j): If L[i, j] < snow_point: L[i, j] = L[i, j] * brightness_coeff

Examples:

Python
>>> import numpy as np
>>> import albumentations as A
>>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
>>> snowy_image = A.functional.add_snow_v1(image, snow_point=0.5, brightness_coeff=1.5)
Source code in albumentations/augmentations/functional.py
Python
@uint8_io
def add_snow_bleach(img: np.ndarray, snow_point: float, brightness_coeff: float) -> np.ndarray:
    """Adds a simple snow effect to the image by bleaching out pixels.

    This function simulates a basic snow effect by increasing the brightness of pixels
    that are above a certain threshold (snow_point). It operates in the HLS color space
    to modify the lightness channel.

    Args:
        img (np.ndarray): Input image. Can be either RGB uint8 or float32.
        snow_point (float): A float in the range [0, 1], scaled and adjusted to determine
            the threshold for pixel modification. Higher values result in less snow effect.
        brightness_coeff (float): Coefficient applied to increase the brightness of pixels
            below the snow_point threshold. Larger values lead to more pronounced snow effects.
            Should be greater than 1.0 for a visible effect.

    Returns:
        np.ndarray: Image with simulated snow effect. The output has the same dtype as the input.

    Note:
        - This function converts the image to the HLS color space to modify the lightness channel.
        - The snow effect is created by selectively increasing the brightness of pixels.
        - This method tends to create a 'bleached' look, which may not be as realistic as more
          advanced snow simulation techniques.
        - The function automatically handles both uint8 and float32 input images.

    The snow effect is created through the following steps:
    1. Convert the image from RGB to HLS color space.
    2. Adjust the snow_point threshold.
    3. Increase the lightness of pixels below the threshold.
    4. Convert the image back to RGB.

    Mathematical Formulation:
        Let L be the lightness channel in HLS space.
        For each pixel (i, j):
        If L[i, j] < snow_point:
            L[i, j] = L[i, j] * brightness_coeff

    Examples:
        >>> import numpy as np
        >>> import albumentations as A
        >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
        >>> snowy_image = A.functional.add_snow_v1(image, snow_point=0.5, brightness_coeff=1.5)

    References:
        - HLS Color Space: https://en.wikipedia.org/wiki/HSL_and_HSV
        - Original implementation: https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library
    """
    max_value = MAX_VALUES_BY_DTYPE[np.uint8]

    snow_point *= max_value / 2
    snow_point += max_value / 3

    image_hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    image_hls = np.array(image_hls, dtype=np.float32)

    image_hls[:, :, 1][image_hls[:, :, 1] < snow_point] *= brightness_coeff

    image_hls[:, :, 1] = clip(image_hls[:, :, 1], np.uint8, inplace=True)

    image_hls = np.array(image_hls, dtype=np.uint8)

    return cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)

def add_snow_texture (img, snow_point, brightness_coeff, snow_texture, sparkle_mask) [view source on GitHub]

Add a realistic snow effect to the input image.

This function simulates snowfall by applying multiple visual effects to the image, including brightness adjustment, snow texture overlay, depth simulation, and color tinting. The result is a more natural-looking snow effect compared to simple pixel bleaching methods.

Parameters:

Name Type Description
img np.ndarray

Input image in RGB format.

snow_point float

Coefficient that controls the amount and intensity of snow. Should be in the range [0, 1], where 0 means no snow and 1 means maximum snow effect.

brightness_coeff float

Coefficient for brightness adjustment to simulate the reflective nature of snow. Should be in the range [0, 1], where higher values result in a brighter image.

snow_texture np.ndarray

Snow texture.

sparkle_mask np.ndarray

Sparkle mask.

Returns:

Type Description
np.ndarray

Image with added snow effect. The output has the same dtype as the input.

Note

  • The function first converts the image to HSV color space for better control over brightness and color adjustments.
  • A snow texture is generated using Gaussian noise and then filtered for a more natural appearance.
  • A depth effect is simulated, with more snow at the top of the image and less at the bottom.
  • A slight blue tint is added to simulate the cool color of snow.
  • Random sparkle effects are added to simulate light reflecting off snow crystals.

The snow effect is created through the following steps: 1. Brightness adjustment in HSV space 2. Generation of a snow texture using Gaussian noise 3. Application of a depth effect to the snow texture 4. Blending of the snow texture with the original image 5. Addition of a cool blue tint 6. Addition of sparkle effects

Examples:

Python
>>> import numpy as np
>>> import albumentations as A
>>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
>>> snowy_image = A.functional.add_snow_v2(image, snow_coeff=0.5, brightness_coeff=0.2)

Note

This function works with both uint8 and float32 image types, automatically handling the conversion between them.

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
def add_snow_texture(
    img: np.ndarray,
    snow_point: float,
    brightness_coeff: float,
    snow_texture: np.ndarray,
    sparkle_mask: np.ndarray,
) -> np.ndarray:
    """Add a realistic snow effect to the input image.

    This function simulates snowfall by applying multiple visual effects to the image,
    including brightness adjustment, snow texture overlay, depth simulation, and color tinting.
    The result is a more natural-looking snow effect compared to simple pixel bleaching methods.

    Args:
        img (np.ndarray): Input image in RGB format.
        snow_point (float): Coefficient that controls the amount and intensity of snow.
            Should be in the range [0, 1], where 0 means no snow and 1 means maximum snow effect.
        brightness_coeff (float): Coefficient for brightness adjustment to simulate the
            reflective nature of snow. Should be in the range [0, 1], where higher values
            result in a brighter image.
        snow_texture (np.ndarray): Snow texture.
        sparkle_mask (np.ndarray): Sparkle mask.

    Returns:
        np.ndarray: Image with added snow effect. The output has the same dtype as the input.

    Note:
        - The function first converts the image to HSV color space for better control over
          brightness and color adjustments.
        - A snow texture is generated using Gaussian noise and then filtered for a more
          natural appearance.
        - A depth effect is simulated, with more snow at the top of the image and less at the bottom.
        - A slight blue tint is added to simulate the cool color of snow.
        - Random sparkle effects are added to simulate light reflecting off snow crystals.

    The snow effect is created through the following steps:
    1. Brightness adjustment in HSV space
    2. Generation of a snow texture using Gaussian noise
    3. Application of a depth effect to the snow texture
    4. Blending of the snow texture with the original image
    5. Addition of a cool blue tint
    6. Addition of sparkle effects

    Examples:
        >>> import numpy as np
        >>> import albumentations as A
        >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
        >>> snowy_image = A.functional.add_snow_v2(image, snow_coeff=0.5, brightness_coeff=0.2)

    Note:
        This function works with both uint8 and float32 image types, automatically
        handling the conversion between them.

    References:
        - Perlin Noise: https://en.wikipedia.org/wiki/Perlin_noise
        - HSV Color Space: https://en.wikipedia.org/wiki/HSL_and_HSV
    """
    max_value = MAX_VALUES_BY_DTYPE[np.uint8]

    # Convert to HSV for better color control
    img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32)

    # Increase brightness
    img_hsv[:, :, 2] = np.clip(img_hsv[:, :, 2] * (1 + brightness_coeff * snow_point), 0, max_value)

    # Generate snow texture
    snow_texture = cv2.GaussianBlur(snow_texture, (0, 0), sigmaX=1, sigmaY=1)

    # Create depth effect for snow simulation
    # More snow accumulates at the top of the image, gradually decreasing towards the bottom
    # This simulates natural snow distribution on surfaces
    # The effect is achieved using a linear gradient from 1 (full snow) to 0.2 (less snow)
    rows = img.shape[0]
    depth_effect = np.linspace(1, 0.2, rows)[:, np.newaxis]
    snow_texture *= depth_effect

    # Apply snow texture
    snow_layer = (np.dstack([snow_texture] * 3) * max_value * snow_point).astype(np.float32)

    # Blend snow with original image
    img_with_snow = cv2.add(img_hsv, snow_layer)

    # Add a slight blue tint to simulate cool snow color
    blue_tint = np.full_like(img_with_snow, (0.6, 0.75, 1))  # Slight blue in HSV

    img_with_snow = cv2.addWeighted(img_with_snow, 0.85, blue_tint, 0.15 * snow_point, 0)

    # Convert back to RGB
    img_with_snow = cv2.cvtColor(img_with_snow.astype(np.uint8), cv2.COLOR_HSV2RGB)

    # Add some sparkle effects for snow glitter
    img_with_snow[sparkle_mask] = [max_value, max_value, max_value]

    return img_with_snow

def add_sun_flare_overlay (img, flare_center, src_radius, src_color, circles) [view source on GitHub]

Add a sun flare effect to an image using a simple overlay technique.

This function creates a basic sun flare effect by overlaying multiple semi-transparent circles of varying sizes and intensities on the input image. The effect simulates a simple lens flare caused by bright light sources.

Parameters:

Name Type Description
img np.ndarray

The input image.

flare_center tuple[float, float]

(x, y) coordinates of the flare center in pixel coordinates.

src_radius int

The radius of the main sun circle in pixels.

src_color ColorType

The color of the sun, represented as a tuple of RGB values.

circles list[Any]

A list of tuples, each representing a circle that contributes to the flare effect. Each tuple contains: - alpha (float): The transparency of the circle (0.0 to 1.0). - center (tuple[int, int]): (x, y) coordinates of the circle center. - radius (int): The radius of the circle. - color (tuple[int, int, int]): RGB color of the circle.

Returns:

Type Description
np.ndarray

The output image with the sun flare effect added.

Note

  • This function uses a simple alpha blending technique to overlay flare elements.
  • The main sun is created as a gradient circle, fading from the center outwards.
  • Additional flare circles are added along an imaginary line from the sun's position.
  • This method is computationally efficient but may produce less realistic results compared to more advanced techniques.

The flare effect is created through the following steps: 1. Create an overlay image and output image as copies of the input. 2. Add smaller flare circles to the overlay. 3. Blend the overlay with the output image using alpha compositing. 4. Add the main sun circle with a radial gradient.

Examples:

Python
>>> import numpy as np
>>> import albumentations as A
>>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
>>> flare_center = (50, 50)
>>> src_radius = 20
>>> src_color = (255, 255, 200)
>>> circles = [
...     (0.1, (60, 60), 5, (255, 200, 200)),
...     (0.2, (70, 70), 3, (200, 255, 200))
... ]
>>> flared_image = A.functional.add_sun_flare_overlay(
...     image, flare_center, src_radius, src_color, circles
... )
Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@preserve_channel_dim
def add_sun_flare_overlay(
    img: np.ndarray,
    flare_center: tuple[float, float],
    src_radius: int,
    src_color: ColorType,
    circles: list[Any],
) -> np.ndarray:
    """Add a sun flare effect to an image using a simple overlay technique.

    This function creates a basic sun flare effect by overlaying multiple semi-transparent
    circles of varying sizes and intensities on the input image. The effect simulates
    a simple lens flare caused by bright light sources.

    Args:
        img (np.ndarray): The input image.
        flare_center (tuple[float, float]): (x, y) coordinates of the flare center
            in pixel coordinates.
        src_radius (int): The radius of the main sun circle in pixels.
        src_color (ColorType): The color of the sun, represented as a tuple of RGB values.
        circles (list[Any]): A list of tuples, each representing a circle that contributes
            to the flare effect. Each tuple contains:
            - alpha (float): The transparency of the circle (0.0 to 1.0).
            - center (tuple[int, int]): (x, y) coordinates of the circle center.
            - radius (int): The radius of the circle.
            - color (tuple[int, int, int]): RGB color of the circle.

    Returns:
        np.ndarray: The output image with the sun flare effect added.

    Note:
        - This function uses a simple alpha blending technique to overlay flare elements.
        - The main sun is created as a gradient circle, fading from the center outwards.
        - Additional flare circles are added along an imaginary line from the sun's position.
        - This method is computationally efficient but may produce less realistic results
          compared to more advanced techniques.

    The flare effect is created through the following steps:
    1. Create an overlay image and output image as copies of the input.
    2. Add smaller flare circles to the overlay.
    3. Blend the overlay with the output image using alpha compositing.
    4. Add the main sun circle with a radial gradient.

    Examples:
        >>> import numpy as np
        >>> import albumentations as A
        >>> image = np.random.randint(0, 256, [100, 100, 3], dtype=np.uint8)
        >>> flare_center = (50, 50)
        >>> src_radius = 20
        >>> src_color = (255, 255, 200)
        >>> circles = [
        ...     (0.1, (60, 60), 5, (255, 200, 200)),
        ...     (0.2, (70, 70), 3, (200, 255, 200))
        ... ]
        >>> flared_image = A.functional.add_sun_flare_overlay(
        ...     image, flare_center, src_radius, src_color, circles
        ... )

    References:
        - Alpha compositing: https://en.wikipedia.org/wiki/Alpha_compositing
        - Lens flare: https://en.wikipedia.org/wiki/Lens_flare
    """
    overlay = img.copy()
    output = img.copy()

    for alpha, (x, y), rad3, (r_color, g_color, b_color) in circles:
        cv2.circle(overlay, (x, y), rad3, (r_color, g_color, b_color), -1)
        output = add_weighted(overlay, alpha, output, 1 - alpha)

    point = [int(x) for x in flare_center]

    overlay = output.copy()
    num_times = src_radius // 10
    alpha = np.linspace(0.0, 1, num=num_times)
    rad = np.linspace(1, src_radius, num=num_times)

    for i in range(num_times):
        cv2.circle(overlay, point, int(rad[i]), src_color, -1)
        alp = alpha[num_times - i - 1] * alpha[num_times - i - 1] * alpha[num_times - i - 1]
        output = add_weighted(overlay, alp, output, 1 - alp)

    return output

def add_sun_flare_physics_based (img, flare_center, src_radius, src_color, circles) [view source on GitHub]

Add a more realistic sun flare effect to the image.

This function creates a complex sun flare effect by simulating various optical phenomena that occur in real camera lenses when capturing bright light sources. The result is a more realistic and physically plausible lens flare effect.

Parameters:

Name Type Description
img np.ndarray

Input image.

flare_center tuple[int, int]

(x, y) coordinates of the sun's center in pixels.

src_radius int

Radius of the main sun circle in pixels.

src_color tuple[int, int, int]

Color of the sun in RGB format.

circles list[Any]

List of tuples, each representing a flare circle with parameters: (alpha, center, size, color) - alpha (float): Transparency of the circle (0.0 to 1.0). - center (tuple[int, int]): (x, y) coordinates of the circle center. - size (float): Size factor for the circle radius. - color (tuple[int, int, int]): RGB color of the circle.

Returns:

Type Description
np.ndarray

Image with added sun flare effect.

Note

This function implements several techniques to create a more realistic flare: 1. Separate flare layer: Allows for complex manipulations of the flare effect. 2. Lens diffraction spikes: Simulates light diffraction in camera aperture. 3. Radial gradient mask: Creates natural fading of the flare from the center. 4. Gaussian blur: Softens the flare for a more natural glow effect. 5. Chromatic aberration: Simulates color fringing often seen in real lens flares. 6. Screen blending: Provides a more realistic blending of the flare with the image.

The flare effect is created through the following steps: 1. Create a separate flare layer. 2. Add the main sun circle and diffraction spikes to the flare layer. 3. Add additional flare circles based on the input parameters. 4. Apply Gaussian blur to soften the flare. 5. Create and apply a radial gradient mask for natural fading. 6. Simulate chromatic aberration by applying different blurs to color channels. 7. Blend the flare with the original image using screen blending mode.

Examples:

Python
>>> import numpy as np
>>> import albumentations as A
>>> image = np.random.randint(0, 256, [1000, 1000, 3], dtype=np.uint8)
>>> flare_center = (500, 500)
>>> src_radius = 50
>>> src_color = (255, 255, 200)
>>> circles = [
...     (0.1, (550, 550), 10, (255, 200, 200)),
...     (0.2, (600, 600), 5, (200, 255, 200))
... ]
>>> flared_image = A.functional.add_sun_flare_physics_based(
...     image, flare_center, src_radius, src_color, circles
... )
Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@clipped
def add_sun_flare_physics_based(
    img: np.ndarray,
    flare_center: tuple[int, int],
    src_radius: int,
    src_color: tuple[int, int, int],
    circles: list[Any],
) -> np.ndarray:
    """Add a more realistic sun flare effect to the image.

    This function creates a complex sun flare effect by simulating various optical phenomena
    that occur in real camera lenses when capturing bright light sources. The result is a
    more realistic and physically plausible lens flare effect.

    Args:
        img (np.ndarray): Input image.
        flare_center (tuple[int, int]): (x, y) coordinates of the sun's center in pixels.
        src_radius (int): Radius of the main sun circle in pixels.
        src_color (tuple[int, int, int]): Color of the sun in RGB format.
        circles (list[Any]): List of tuples, each representing a flare circle with parameters:
            (alpha, center, size, color)
            - alpha (float): Transparency of the circle (0.0 to 1.0).
            - center (tuple[int, int]): (x, y) coordinates of the circle center.
            - size (float): Size factor for the circle radius.
            - color (tuple[int, int, int]): RGB color of the circle.

    Returns:
        np.ndarray: Image with added sun flare effect.

    Note:
        This function implements several techniques to create a more realistic flare:
        1. Separate flare layer: Allows for complex manipulations of the flare effect.
        2. Lens diffraction spikes: Simulates light diffraction in camera aperture.
        3. Radial gradient mask: Creates natural fading of the flare from the center.
        4. Gaussian blur: Softens the flare for a more natural glow effect.
        5. Chromatic aberration: Simulates color fringing often seen in real lens flares.
        6. Screen blending: Provides a more realistic blending of the flare with the image.

    The flare effect is created through the following steps:
    1. Create a separate flare layer.
    2. Add the main sun circle and diffraction spikes to the flare layer.
    3. Add additional flare circles based on the input parameters.
    4. Apply Gaussian blur to soften the flare.
    5. Create and apply a radial gradient mask for natural fading.
    6. Simulate chromatic aberration by applying different blurs to color channels.
    7. Blend the flare with the original image using screen blending mode.

    Examples:
        >>> import numpy as np
        >>> import albumentations as A
        >>> image = np.random.randint(0, 256, [1000, 1000, 3], dtype=np.uint8)
        >>> flare_center = (500, 500)
        >>> src_radius = 50
        >>> src_color = (255, 255, 200)
        >>> circles = [
        ...     (0.1, (550, 550), 10, (255, 200, 200)),
        ...     (0.2, (600, 600), 5, (200, 255, 200))
        ... ]
        >>> flared_image = A.functional.add_sun_flare_physics_based(
        ...     image, flare_center, src_radius, src_color, circles
        ... )

    References:
        - Lens flare: https://en.wikipedia.org/wiki/Lens_flare
        - Diffraction: https://en.wikipedia.org/wiki/Diffraction
        - Chromatic aberration: https://en.wikipedia.org/wiki/Chromatic_aberration
        - Screen blending: https://en.wikipedia.org/wiki/Blend_modes#Screen
    """
    output = img.copy()
    height, width = img.shape[:2]

    # Create a separate flare layer
    flare_layer = np.zeros_like(img, dtype=np.float32)

    # Add the main sun
    cv2.circle(flare_layer, flare_center, src_radius, src_color, -1)

    # Add lens diffraction spikes
    for angle in [0, 45, 90, 135]:
        end_point = (
            int(flare_center[0] + np.cos(np.radians(angle)) * max(width, height)),
            int(flare_center[1] + np.sin(np.radians(angle)) * max(width, height)),
        )
        cv2.line(flare_layer, flare_center, end_point, src_color, 2)

    # Add flare circles
    for _, center, size, color in circles:
        cv2.circle(flare_layer, center, int(size**0.33), color, -1)

    # Apply gaussian blur to soften the flare
    flare_layer = cv2.GaussianBlur(flare_layer, (0, 0), sigmaX=15, sigmaY=15)

    # Create a radial gradient mask
    y, x = np.ogrid[:height, :width]
    mask = np.sqrt((x - flare_center[0]) ** 2 + (y - flare_center[1]) ** 2)
    mask = 1 - np.clip(mask / (max(width, height) * 0.7), 0, 1)
    mask = np.dstack([mask] * 3)

    # Apply the mask to the flare layer
    flare_layer *= mask

    # Add chromatic aberration
    channels = list(cv2.split(flare_layer))
    channels[0] = cv2.GaussianBlur(channels[0], (0, 0), sigmaX=3, sigmaY=3)  # Blue channel
    channels[2] = cv2.GaussianBlur(channels[2], (0, 0), sigmaX=5, sigmaY=5)  # Red channel
    flare_layer = cv2.merge(channels)

    # Blend the flare with the original image using screen blending
    return 255 - ((255 - output) * (255 - flare_layer) / 255)

def apply_corner_illumination (img, intensity, corner) [view source on GitHub]

Apply corner-based illumination effect.

Source code in albumentations/augmentations/functional.py
Python
@clipped
def apply_corner_illumination(
    img: np.ndarray,
    intensity: float,
    corner: Literal[0, 1, 2, 3],
) -> np.ndarray:
    """Apply corner-based illumination effect."""
    result, height, width = prepare_illumination_input(img)

    # Create distance map coordinates
    y, x = np.ogrid[:height, :width]

    # Adjust coordinates based on corner
    if corner == 1:  # top-right
        x = width - 1 - x
    elif corner == 2:  # bottom-right  # noqa: PLR2004
        x = width - 1 - x
        y = height - 1 - y
    elif corner == 3:  # bottom-left  # noqa: PLR2004
        y = height - 1 - y

    # Calculate normalized distance
    distance = np.sqrt(x * x + y * y) / np.sqrt(height * height + width * width)
    pattern = 1 - distance  # Invert so corner is brightest

    return apply_illumination_pattern(result, pattern, intensity)

def apply_gaussian_illumination (img, intensity, center, sigma) [view source on GitHub]

Apply gaussian illumination effect.

Source code in albumentations/augmentations/functional.py
Python
@clipped
def apply_gaussian_illumination(
    img: np.ndarray,
    intensity: float,
    center: tuple[float, float],
    sigma: float,
) -> np.ndarray:
    """Apply gaussian illumination effect."""
    result, height, width = prepare_illumination_input(img)

    # Create coordinate grid
    y, x = np.ogrid[:height, :width]

    # Calculate gaussian pattern
    center_x = width * center[0]
    center_y = height * center[1]
    sigma_pixels = max(height, width) * sigma
    gaussian = np.exp(-((x - center_x) ** 2 + (y - center_y) ** 2) / (2 * sigma_pixels**2))

    return apply_illumination_pattern(result, gaussian, intensity)

def apply_illumination_pattern (img, pattern, intensity) [view source on GitHub]

Apply illumination pattern to image.

Parameters:

Name Type Description
img np.ndarray

Input image

pattern np.ndarray

Illumination pattern of shape (H, W)

intensity float

Effect strength (-0.2 to 0.2)

Returns:

Type Description
np.ndarray

Image with applied illumination

Source code in albumentations/augmentations/functional.py
Python
def apply_illumination_pattern(
    img: np.ndarray,
    pattern: np.ndarray,
    intensity: float,
) -> np.ndarray:
    """Apply illumination pattern to image.

    Args:
        img: Input image
        pattern: Illumination pattern of shape (H, W)
        intensity: Effect strength (-0.2 to 0.2)

    Returns:
        Image with applied illumination
    """
    if img.ndim == NUM_MULTI_CHANNEL_DIMENSIONS:
        pattern = pattern[..., np.newaxis]
    return img * (1 + intensity * pattern)

def apply_linear_illumination (img, intensity, angle) [view source on GitHub]

Apply linear gradient illumination effect.

Source code in albumentations/augmentations/functional.py
Python
@clipped
def apply_linear_illumination(
    img: np.ndarray,
    intensity: float,
    angle: float,
) -> np.ndarray:
    """Apply linear gradient illumination effect."""
    result, height, width = prepare_illumination_input(img)

    # Create gradient coordinates
    y, x = np.ogrid[:height, :width]

    # Calculate gradient direction
    angle_rad = np.deg2rad(angle)
    dx, dy = np.cos(angle_rad), np.sin(angle_rad)

    # Create normalized gradient
    gradient = (x * dx + y * dy) / np.sqrt(height * height + width * width)
    gradient = (gradient + 1) / 2  # Normalize to [0, 1]

    return apply_illumination_pattern(result, gradient, intensity)

def apply_plasma_brightness_contrast (img, brightness_factor, contrast_factor, plasma_pattern) [view source on GitHub]

Apply plasma-based brightness and contrast adjustments.

The plasma pattern is used to create spatially-varying adjustments: 1. Brightness is modified by adding the pattern * brightness_factor 2. Contrast is modified by interpolating between mean and original using the pattern * contrast_factor

Source code in albumentations/augmentations/functional.py
Python
@clipped
def apply_plasma_brightness_contrast(
    img: np.ndarray,
    brightness_factor: float,
    contrast_factor: float,
    plasma_pattern: np.ndarray,
) -> np.ndarray:
    """Apply plasma-based brightness and contrast adjustments.

    The plasma pattern is used to create spatially-varying adjustments:
    1. Brightness is modified by adding the pattern * brightness_factor
    2. Contrast is modified by interpolating between mean and original
       using the pattern * contrast_factor
    """
    result = img.copy()

    max_value = MAX_VALUES_BY_DTYPE[img.dtype]

    # Expand plasma pattern to match image dimensions
    plasma_pattern = plasma_pattern[..., np.newaxis] if img.ndim > MONO_CHANNEL_DIMENSIONS else plasma_pattern

    # Apply brightness adjustment
    if brightness_factor != 0:
        brightness_adjustment = plasma_pattern * brightness_factor * max_value
        result = np.clip(result + brightness_adjustment, 0, max_value)

    # Apply contrast adjustment
    if contrast_factor != 0:
        mean = result.mean()
        contrast_weights = plasma_pattern * contrast_factor + 1
        result = np.clip(mean + (result - mean) * contrast_weights, 0, max_value)

    return result

def apply_plasma_shadow (img, intensity, plasma_pattern) [view source on GitHub]

Apply plasma-based shadow effect by darkening.

Parameters:

Name Type Description
img np.ndarray

Input image

intensity float

Shadow intensity in [0, 1]

plasma_pattern np.ndarray

Generated plasma pattern of shape (H, W)

Returns:

Type Description
np.ndarray

Image with applied shadow effect

Source code in albumentations/augmentations/functional.py
Python
@clipped
def apply_plasma_shadow(
    img: np.ndarray,
    intensity: float,
    plasma_pattern: np.ndarray,
) -> np.ndarray:
    """Apply plasma-based shadow effect by darkening.

    Args:
        img: Input image
        intensity: Shadow intensity in [0, 1]
        plasma_pattern: Generated plasma pattern of shape (H, W)

    Returns:
        Image with applied shadow effect
    """
    result = img.copy()

    # Expand dimensions to match image
    plasma_pattern = plasma_pattern[..., np.newaxis] if img.ndim > MONO_CHANNEL_DIMENSIONS else plasma_pattern

    # Apply shadow by darkening (multiplying by values < 1)
    shadow_mask = 1 - plasma_pattern * intensity

    return result * shadow_mask

def apply_salt_and_pepper (img, salt_mask, pepper_mask) [view source on GitHub]

Apply salt and pepper noise to image using pre-computed masks.

Parameters:

Name Type Description
img np.ndarray

Input image

salt_mask np.ndarray

Boolean mask for salt (white) noise

pepper_mask np.ndarray

Boolean mask for pepper (black) noise

Returns:

Type Description
np.ndarray

Image with applied salt and pepper noise

Source code in albumentations/augmentations/functional.py
Python
def apply_salt_and_pepper(
    img: np.ndarray,
    salt_mask: np.ndarray,
    pepper_mask: np.ndarray,
) -> np.ndarray:
    """Apply salt and pepper noise to image using pre-computed masks.

    Args:
        img: Input image
        salt_mask: Boolean mask for salt (white) noise
        pepper_mask: Boolean mask for pepper (black) noise

    Returns:
        Image with applied salt and pepper noise
    """
    result = img.copy()

    result[salt_mask] = MAX_VALUES_BY_DTYPE[img.dtype]
    result[pepper_mask] = 0
    return result

def auto_contrast (img) [view source on GitHub]

Apply auto contrast to the image.

Auto contrast enhances image contrast by stretching the intensity range to use the full range while preserving relative intensities.

Parameters:

Name Type Description
img np.ndarray

Input image in uint8 or float32 format.

Returns:

Type Description
np.ndarray

Contrast-enhanced image in the same dtype as input.

Note

The function: 1. Computes histogram for each channel 2. Creates cumulative distribution 3. Normalizes to full intensity range 4. Uses lookup table for scaling

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
def auto_contrast(img: np.ndarray) -> np.ndarray:
    """Apply auto contrast to the image.

    Auto contrast enhances image contrast by stretching the intensity range
    to use the full range while preserving relative intensities.

    Args:
        img: Input image in uint8 or float32 format.

    Returns:
        Contrast-enhanced image in the same dtype as input.

    Note:
        The function:
        1. Computes histogram for each channel
        2. Creates cumulative distribution
        3. Normalizes to full intensity range
        4. Uses lookup table for scaling
    """
    result = img.copy()

    num_channels = get_num_channels(img)

    max_value = MAX_VALUES_BY_DTYPE[img.dtype]

    for i in range(num_channels):
        channel = img[..., i] if img.ndim > MONO_CHANNEL_DIMENSIONS else img

        # Compute histogram
        hist = np.histogram(channel.flatten(), bins=256, range=(0, max_value))[0]

        # Calculate cumulative distribution
        cdf = hist.cumsum()

        min_value = cdf.min()
        max_value = cdf.max()

        if min_value == max_value:
            continue

        # Normalize CDF
        cdf = (cdf - min_value) * max_value / (max_value - min_value + 1e-6)

        # Create lookup table
        lut = clip(np.around(cdf), np.uint8)

        # Apply lookup table
        if img.ndim > MONO_CHANNEL_DIMENSIONS:
            result[..., i] = sz_lut(channel, lut)
        else:
            result = sz_lut(channel, lut)

    return result

def clahe (img, clip_limit, tile_grid_size) [view source on GitHub]

Apply Contrast Limited Adaptive Histogram Equalization (CLAHE) to the input image.

This function enhances the contrast of the input image using CLAHE. For color images, it converts the image to the LAB color space, applies CLAHE to the L channel, and then converts the image back to RGB.

Parameters:

Name Type Description
img np.ndarray

Input image. Can be grayscale (2D array) or RGB (3D array).

clip_limit float

Threshold for contrast limiting. Higher values give more contrast.

tile_grid_size tuple[int, int]

Size of grid for histogram equalization. Width and height of the grid.

Returns:

Type Description
np.ndarray

Image with CLAHE applied. The output has the same dtype as the input.

Note

  • If the input image is float32, it's temporarily converted to uint8 for processing and then converted back to float32.
  • For color images, CLAHE is applied only to the luminance channel in the LAB color space.

Exceptions:

Type Description
ValueError

If the input image is not 2D or 3D.

Examples:

Python
>>> import numpy as np
>>> img = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
>>> result = clahe(img, clip_limit=2.0, tile_grid_size=(8, 8))
>>> assert result.shape == img.shape
>>> assert result.dtype == img.dtype
Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@preserve_channel_dim
def clahe(img: np.ndarray, clip_limit: float, tile_grid_size: tuple[int, int]) -> np.ndarray:
    """Apply Contrast Limited Adaptive Histogram Equalization (CLAHE) to the input image.

    This function enhances the contrast of the input image using CLAHE. For color images,
    it converts the image to the LAB color space, applies CLAHE to the L channel, and then
    converts the image back to RGB.

    Args:
        img (np.ndarray): Input image. Can be grayscale (2D array) or RGB (3D array).
        clip_limit (float): Threshold for contrast limiting. Higher values give more contrast.
        tile_grid_size (tuple[int, int]): Size of grid for histogram equalization.
            Width and height of the grid.

    Returns:
        np.ndarray: Image with CLAHE applied. The output has the same dtype as the input.

    Note:
        - If the input image is float32, it's temporarily converted to uint8 for processing
          and then converted back to float32.
        - For color images, CLAHE is applied only to the luminance channel in the LAB color space.

    Raises:
        ValueError: If the input image is not 2D or 3D.

    Example:
        >>> import numpy as np
        >>> img = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
        >>> result = clahe(img, clip_limit=2.0, tile_grid_size=(8, 8))
        >>> assert result.shape == img.shape
        >>> assert result.dtype == img.dtype
    """
    img = img.copy()
    clahe_mat = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)

    if is_grayscale_image(img):
        return clahe_mat.apply(img)

    img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)

    img[:, :, 0] = clahe_mat.apply(img[:, :, 0])

    return cv2.cvtColor(img, cv2.COLOR_LAB2RGB)

def diamond_step (pattern, y, x, half, grid_size, roughness, random_generator) [view source on GitHub]

Compute edge value during diamond step.

Source code in albumentations/augmentations/functional.py
Python
def diamond_step(
    pattern: np.ndarray,
    y: int,
    x: int,
    half: int,
    grid_size: int,
    roughness: float,
    random_generator: np.random.Generator,
) -> float:
    """Compute edge value during diamond step."""
    points = []
    if y >= half:
        points.append(pattern[y - half, x])
    if y + half <= grid_size:
        points.append(pattern[y + half, x])
    if x >= half:
        points.append(pattern[y, x - half])
    if x + half <= grid_size:
        points.append(pattern[y, x + half])

    return sum(points) / len(points) + random_offset(half * 2, grid_size, roughness, random_generator)

def equalize (img, mask=None, mode='cv', by_channels=True) [view source on GitHub]

Apply histogram equalization to the input image.

This function enhances the contrast of the input image by equalizing its histogram. It supports both grayscale and color images, and can operate on individual channels or on the luminance channel of the image.

Parameters:

Name Type Description
img np.ndarray

Input image. Can be grayscale (2D array) or RGB (3D array).

mask np.ndarray | None

Optional mask to apply the equalization selectively. If provided, must have the same shape as the input image. Default: None.

mode ImageMode

The backend to use for equalization. Can be either "cv" for OpenCV or "pil" for Pillow-style equalization. Default: "cv".

by_channels bool

If True, applies equalization to each channel independently. If False, converts the image to YCrCb color space and equalizes only the luminance channel. Only applicable to color images. Default: True.

Returns:

Type Description
np.ndarray

Equalized image. The output has the same dtype as the input.

Exceptions:

Type Description
ValueError

If the input image or mask have invalid shapes or types.

Note

  • If the input image is not uint8, it will be temporarily converted to uint8 for processing and then converted back to its original dtype.
  • For color images, when by_channels=False, the image is converted to YCrCb color space, equalized on the Y channel, and then converted back to RGB.
  • The function preserves the original number of channels in the image.

Examples:

Python
>>> import numpy as np
>>> import albumentations as A
>>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
>>> equalized = A.equalize(image, mode="cv", by_channels=True)
>>> assert equalized.shape == image.shape
>>> assert equalized.dtype == image.dtype
Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@preserve_channel_dim
def equalize(
    img: np.ndarray,
    mask: np.ndarray | None = None,
    mode: ImageMode = "cv",
    by_channels: bool = True,
) -> np.ndarray:
    """Apply histogram equalization to the input image.

    This function enhances the contrast of the input image by equalizing its histogram.
    It supports both grayscale and color images, and can operate on individual channels
    or on the luminance channel of the image.

    Args:
        img (np.ndarray): Input image. Can be grayscale (2D array) or RGB (3D array).
        mask (np.ndarray | None): Optional mask to apply the equalization selectively.
            If provided, must have the same shape as the input image. Default: None.
        mode (ImageMode): The backend to use for equalization. Can be either "cv" for
            OpenCV or "pil" for Pillow-style equalization. Default: "cv".
        by_channels (bool): If True, applies equalization to each channel independently.
            If False, converts the image to YCrCb color space and equalizes only the
            luminance channel. Only applicable to color images. Default: True.

    Returns:
        np.ndarray: Equalized image. The output has the same dtype as the input.

    Raises:
        ValueError: If the input image or mask have invalid shapes or types.

    Note:
        - If the input image is not uint8, it will be temporarily converted to uint8
          for processing and then converted back to its original dtype.
        - For color images, when by_channels=False, the image is converted to YCrCb
          color space, equalized on the Y channel, and then converted back to RGB.
        - The function preserves the original number of channels in the image.

    Example:
        >>> import numpy as np
        >>> import albumentations as A
        >>> image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
        >>> equalized = A.equalize(image, mode="cv", by_channels=True)
        >>> assert equalized.shape == image.shape
        >>> assert equalized.dtype == image.dtype
    """
    _check_preconditions(img, mask, by_channels)

    function = _equalize_pil if mode == "pil" else _equalize_cv

    if is_grayscale_image(img):
        return function(img, _handle_mask(mask))

    if not by_channels:
        result_img = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
        result_img[..., 0] = function(result_img[..., 0], _handle_mask(mask))
        return cv2.cvtColor(result_img, cv2.COLOR_YCrCb2RGB)

    result_img = np.empty_like(img)
    for i in range(NUM_RGB_CHANNELS):
        _mask = _handle_mask(mask, i)
        result_img[..., i] = function(img[..., i], _mask)

    return result_img

def fancy_pca (img, alpha_vector) [view source on GitHub]

Perform 'Fancy PCA' augmentation on an image with any number of channels.

Parameters:

Name Type Description
img np.ndarray

Input image

alpha_vector np.ndarray

Vector of scale factors for each principal component. Should have the same length as the number of channels in the image.

Returns:

Type Description
np.ndarray

Augmented image of the same shape, type, and range as the input.

Image types: uint8, float32

Number of channels: Any

Note

  • This function generalizes the Fancy PCA augmentation to work with any number of channels.
  • It preserves the original range of the image ([0, 255] for uint8, [0, 1] for float32).
  • For single-channel images, the augmentation is applied as a simple scaling of pixel intensity variation.
  • For multi-channel images, PCA is performed on the entire image, treating each pixel as a point in N-dimensional space (where N is the number of channels).
  • The augmentation preserves the correlation between channels while adding controlled noise.
  • Computation time may increase significantly for images with a large number of channels.

Reference

Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2012). ImageNet classification with deep convolutional neural networks. In Advances in neural information processing systems (pp. 1097-1105).

Source code in albumentations/augmentations/functional.py
Python
@float32_io
@clipped
@preserve_channel_dim
def fancy_pca(img: np.ndarray, alpha_vector: np.ndarray) -> np.ndarray:
    """Perform 'Fancy PCA' augmentation on an image with any number of channels.

    Args:
        img (np.ndarray): Input image
        alpha_vector (np.ndarray): Vector of scale factors for each principal component.
                                   Should have the same length as the number of channels in the image.

    Returns:
        np.ndarray: Augmented image of the same shape, type, and range as the input.

    Image types:
        uint8, float32

    Number of channels:
        Any

    Note:
        - This function generalizes the Fancy PCA augmentation to work with any number of channels.
        - It preserves the original range of the image ([0, 255] for uint8, [0, 1] for float32).
        - For single-channel images, the augmentation is applied as a simple scaling of pixel intensity variation.
        - For multi-channel images, PCA is performed on the entire image, treating each pixel
          as a point in N-dimensional space (where N is the number of channels).
        - The augmentation preserves the correlation between channels while adding controlled noise.
        - Computation time may increase significantly for images with a large number of channels.

    Reference:
        Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2012).
        ImageNet classification with deep convolutional neural networks.
        In Advances in neural information processing systems (pp. 1097-1105).
    """
    orig_shape = img.shape
    num_channels = get_num_channels(img)

    # Reshape image to 2D array of pixels
    img_reshaped = img.reshape(-1, num_channels)

    # Center the pixel values
    img_mean = np.mean(img_reshaped, axis=0)
    img_centered = img_reshaped - img_mean

    if num_channels == 1:
        # For grayscale images, apply a simple scaling
        std_dev = np.std(img_centered)
        noise = alpha_vector[0] * std_dev * img_centered
    else:
        # Compute covariance matrix
        img_cov = np.cov(img_centered, rowvar=False)

        # Compute eigenvectors & eigenvalues of the covariance matrix
        eig_vals, eig_vecs = np.linalg.eigh(img_cov)

        # Sort eigenvectors by eigenvalues in descending order
        sort_perm = eig_vals[::-1].argsort()
        eig_vals = eig_vals[sort_perm]
        eig_vecs = eig_vecs[:, sort_perm]

        # Create noise vector
        noise = np.dot(np.dot(eig_vecs, np.diag(alpha_vector * eig_vals)), img_centered.T).T

    # Add noise to the image
    img_pca = img_reshaped + noise

    # Reshape back to original shape
    img_pca = img_pca.reshape(orig_shape)

    # Clip values to [0, 1] range
    return np.clip(img_pca, 0, 1, out=img_pca)

def generate_constant_noise (noise_type, shape, params, max_value, random_generator) [view source on GitHub]

Generate one value per channel.

Source code in albumentations/augmentations/functional.py
Python
def generate_constant_noise(
    noise_type: Literal["uniform", "gaussian", "laplace", "beta", "poisson"],
    shape: tuple[int, ...],
    params: dict[str, Any],
    max_value: float,
    random_generator: np.random.Generator,
) -> np.ndarray:
    """Generate one value per channel."""
    num_channels = shape[-1] if len(shape) > MONO_CHANNEL_DIMENSIONS else 1
    return sample_noise(noise_type, (num_channels,), params, max_value, random_generator)

def generate_per_pixel_noise (noise_type, shape, params, max_value, random_generator) [view source on GitHub]

Generate separate noise map for each channel.

Source code in albumentations/augmentations/functional.py
Python
def generate_per_pixel_noise(
    noise_type: Literal["uniform", "gaussian", "laplace", "beta", "poisson"],
    shape: tuple[int, ...],
    params: dict[str, Any],
    max_value: float,
    random_generator: np.random.Generator,
) -> np.ndarray:
    """Generate separate noise map for each channel."""
    return sample_noise(noise_type, shape, params, max_value, random_generator)

def generate_plasma_pattern (target_shape, size, roughness, random_generator) [view source on GitHub]

Generate a plasma fractal pattern using the Diamond-Square algorithm.

The Diamond-Square algorithm creates a natural-looking noise pattern by recursively subdividing a grid and adding random displacements at each step. The roughness parameter controls how quickly the random displacements decrease with each iteration.

Parameters:

Name Type Description
target_shape tuple[int, int]

Final shape (height, width) of the pattern

size int

Initial size of the pattern grid. Will be rounded up to nearest power of 2. Larger values create more detailed patterns.

roughness float

Controls pattern roughness. Higher values create more rough/sharp transitions. Typical values are between 1.0 and 5.0.

random_generator np.random.Generator

NumPy random generator.

Returns:

Type Description
np.ndarray

Normalized plasma pattern array of shape target_shape with values in [0, 1]

Source code in albumentations/augmentations/functional.py
Python
def generate_plasma_pattern(
    target_shape: tuple[int, int],
    size: int,
    roughness: float,
    random_generator: np.random.Generator,
) -> np.ndarray:
    """Generate a plasma fractal pattern using the Diamond-Square algorithm.

    The Diamond-Square algorithm creates a natural-looking noise pattern by recursively
    subdividing a grid and adding random displacements at each step. The roughness
    parameter controls how quickly the random displacements decrease with each iteration.

    Args:
        target_shape: Final shape (height, width) of the pattern
        size: Initial size of the pattern grid. Will be rounded up to nearest power of 2.
            Larger values create more detailed patterns.
        roughness: Controls pattern roughness. Higher values create more rough/sharp transitions.
            Typical values are between 1.0 and 5.0.
        random_generator: NumPy random generator.

    Returns:
        Normalized plasma pattern array of shape target_shape with values in [0, 1]
    """
    # Initialize grid
    grid_size = get_grid_size(size, target_shape)
    pattern = initialize_grid(grid_size, random_generator)

    # Diamond-Square algorithm
    step_size = grid_size
    while step_size > 1:
        half_step = step_size // 2

        # Square step
        for y in range(0, grid_size, step_size):
            for x in range(0, grid_size, step_size):
                if half_step > 0:
                    pattern[y + half_step, x + half_step] = square_step(
                        pattern,
                        y,
                        x,
                        step_size,
                        half_step,
                        roughness,
                        random_generator,
                    )

        # Diamond step
        for y in range(0, grid_size + 1, half_step):
            for x in range((y + half_step) % step_size, grid_size + 1, step_size):
                pattern[y, x] = diamond_step(pattern, y, x, half_step, grid_size, roughness, random_generator)

        step_size = half_step

    min_pattern = pattern.min()

    # Normalize to [0, 1] range
    pattern = (pattern - min_pattern) / (pattern.max() - min_pattern)

    return (
        fgeometric.resize(pattern, target_shape, interpolation=cv2.INTER_LINEAR)
        if pattern.shape != target_shape
        else pattern
    )

def generate_shared_noise (noise_type, shape, params, max_value, random_generator) [view source on GitHub]

Generate one noise map and broadcast to all channels.

Parameters:

Name Type Description
noise_type Literal['uniform', 'gaussian', 'laplace', 'beta', 'poisson']

Type of noise distribution to use

shape tuple[int, ...]

Shape of the input image (H, W) or (H, W, C)

params dict[str, Any]

Parameters for the noise distribution

max_value float

Maximum value for the noise distribution

random_generator np.random.Generator

NumPy random generator instance

Returns:

Type Description
np.ndarray

Noise array of shape (H, W) or (H, W, C) where the same noise pattern is shared across all channels

Source code in albumentations/augmentations/functional.py
Python
def generate_shared_noise(
    noise_type: Literal["uniform", "gaussian", "laplace", "beta", "poisson"],
    shape: tuple[int, ...],
    params: dict[str, Any],
    max_value: float,
    random_generator: np.random.Generator,
) -> np.ndarray:
    """Generate one noise map and broadcast to all channels.

    Args:
        noise_type: Type of noise distribution to use
        shape: Shape of the input image (H, W) or (H, W, C)
        params: Parameters for the noise distribution
        max_value: Maximum value for the noise distribution
        random_generator: NumPy random generator instance

    Returns:
        Noise array of shape (H, W) or (H, W, C) where the same noise
        pattern is shared across all channels
    """
    # Generate noise for (H, W)
    height, width = shape[:2]
    noise_map = sample_noise(noise_type, (height, width), params, max_value, random_generator)

    # If input is multichannel, broadcast noise to all channels
    if len(shape) > MONO_CHANNEL_DIMENSIONS:
        return np.broadcast_to(noise_map[..., None], shape)
    return noise_map

def generate_snow_textures (img_shape, random_generator) [view source on GitHub]

Generate snow texture and sparkle mask.

Parameters:

Name Type Description
img_shape tuple[int, int]

Image shape.

random_generator np.random.Generator

Random generator to use.

Returns:

Type Description
tuple[np.ndarray, np.ndarray]

Tuple of (snow_texture, sparkle_mask) arrays.

Source code in albumentations/augmentations/functional.py
Python
def generate_snow_textures(
    img_shape: tuple[int, int],
    random_generator: np.random.Generator,
) -> tuple[np.ndarray, np.ndarray]:
    """Generate snow texture and sparkle mask.

    Args:
        img_shape (tuple[int, int]): Image shape.
        random_generator (np.random.Generator): Random generator to use.

    Returns:
        tuple[np.ndarray, np.ndarray]: Tuple of (snow_texture, sparkle_mask) arrays.
    """
    # Generate base snow texture
    snow_texture = random_generator.normal(size=img_shape[:2], loc=0.5, scale=0.3)
    snow_texture = cv2.GaussianBlur(snow_texture, (0, 0), sigmaX=1, sigmaY=1)

    # Generate sparkle mask
    sparkle_mask = random_generator.random(img_shape[:2]) > 0.99  # noqa: PLR2004

    return snow_texture, sparkle_mask

def get_fog_particle_radiuses (img_shape, num_particles, fog_intensity, random_generator) [view source on GitHub]

Generate radiuses for fog particles.

Parameters:

Name Type Description
img_shape tuple[int, int]

Image shape.

num_particles int

Number of fog particles.

fog_intensity float

Intensity of the fog effect, between 0 and 1.

random_generator np.random.Generator

Random generator to use.

Returns:

Type Description
list[int]

List of radiuses for each fog particle.

Source code in albumentations/augmentations/functional.py
Python
def get_fog_particle_radiuses(
    img_shape: tuple[int, int],
    num_particles: int,
    fog_intensity: float,
    random_generator: np.random.Generator,
) -> list[int]:
    """Generate radiuses for fog particles.

    Args:
        img_shape (tuple[int, int]): Image shape.
        num_particles (int): Number of fog particles.
        fog_intensity (float): Intensity of the fog effect, between 0 and 1.
        random_generator (np.random.Generator): Random generator to use.

    Returns:
        list[int]: List of radiuses for each fog particle.
    """
    height, width = img_shape[:2]
    max_fog_radius = max(2, int(min(height, width) * 0.1 * fog_intensity))
    min_radius = max(1, max_fog_radius // 2)

    return [random_generator.integers(min_radius, max_fog_radius) for _ in range(num_particles)]

def get_grid_size (size, target_shape) [view source on GitHub]

Round up to nearest power of 2.

Source code in albumentations/augmentations/functional.py
Python
def get_grid_size(size: int, target_shape: tuple[int, int]) -> int:
    """Round up to nearest power of 2."""
    return 2 ** int(np.ceil(np.log2(max(size, *target_shape))))

def get_safe_brightness_contrast_params (alpha, beta, max_value) [view source on GitHub]

Calculate safe alpha and beta values to prevent overflow/underflow.

For any pixel value x, we want: 0 <= alpha * x + beta <= max_value

Parameters:

Name Type Description
alpha float

Contrast factor (1 means no change)

beta float

Brightness offset

max_value float

Maximum allowed value (255 for uint8, 1 for float32)

Returns:

Type Description
tuple[float, float]

Safe (alpha, beta) values that prevent overflow/underflow

Source code in albumentations/augmentations/functional.py
Python
def get_safe_brightness_contrast_params(
    alpha: float,
    beta: float,
    max_value: float,
) -> tuple[float, float]:
    """Calculate safe alpha and beta values to prevent overflow/underflow.

    For any pixel value x, we want: 0 <= alpha * x + beta <= max_value

    Args:
        alpha: Contrast factor (1 means no change)
        beta: Brightness offset
        max_value: Maximum allowed value (255 for uint8, 1 for float32)

    Returns:
        tuple[float, float]: Safe (alpha, beta) values that prevent overflow/underflow
    """
    if alpha > 0:
        # For x = max_value: alpha * max_value + beta <= max_value
        # For x = 0: beta >= 0
        safe_beta = np.clip(beta, 0, max_value)
        # From alpha * max_value + safe_beta <= max_value
        safe_alpha = min(alpha, (max_value - safe_beta) / max_value)
    else:
        # For x = 0: beta <= max_value
        # For x = max_value: alpha * max_value + beta >= 0
        safe_beta = min(beta, max_value)
        # From alpha * max_value + safe_beta >= 0
        safe_alpha = max(alpha, -safe_beta / max_value)

    return safe_alpha, safe_beta

def grayscale_to_multichannel (grayscale_image, num_output_channels=3) [view source on GitHub]

Convert a grayscale image to a multi-channel image.

This function takes a 2D grayscale image or a 3D image with a single channel and converts it to a multi-channel image by repeating the grayscale data across the specified number of channels.

Parameters:

Name Type Description
grayscale_image np.ndarray

Input grayscale image. Can be 2D (height, width) or 3D (height, width, 1).

num_output_channels int

Number of channels in the output image. Defaults to 3.

Returns:

Type Description
np.ndarray

Multi-channel image with shape (height, width, num_channels)

Source code in albumentations/augmentations/functional.py
Python
def grayscale_to_multichannel(grayscale_image: np.ndarray, num_output_channels: int = 3) -> np.ndarray:
    """Convert a grayscale image to a multi-channel image.

    This function takes a 2D grayscale image or a 3D image with a single channel
    and converts it to a multi-channel image by repeating the grayscale data
    across the specified number of channels.

    Args:
        grayscale_image (np.ndarray): Input grayscale image. Can be 2D (height, width)
                                      or 3D (height, width, 1).
        num_output_channels (int, optional): Number of channels in the output image. Defaults to 3.

    Returns:
        np.ndarray: Multi-channel image with shape (height, width, num_channels)
    """
    # If output should be single channel, just squeeze and return
    if num_output_channels == 1:
        return grayscale_image

    # For multi-channel output, squeeze and stack
    squeezed = np.squeeze(grayscale_image)

    return cv2.merge([squeezed] * num_output_channels)

def image_compression (img, quality, image_type) [view source on GitHub]

Apply compression to image.

Parameters:

Name Type Description
img np.ndarray

Input image

quality int

Compression quality (0-100)

image_type Literal['.jpg', '.webp']

Type of compression ('.jpg' or '.webp')

Returns:

Type Description
np.ndarray

Compressed image with same number of channels as input

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@preserve_channel_dim
def image_compression(img: np.ndarray, quality: int, image_type: Literal[".jpg", ".webp"]) -> np.ndarray:
    """Apply compression to image.

    Args:
        img: Input image
        quality: Compression quality (0-100)
        image_type: Type of compression ('.jpg' or '.webp')

    Returns:
        Compressed image with same number of channels as input
    """
    quality_flag = cv2.IMWRITE_JPEG_QUALITY if image_type == ".jpg" else cv2.IMWRITE_WEBP_QUALITY

    num_channels = get_num_channels(img)

    if num_channels == 1:
        # For grayscale, ensure we read back as single channel
        _, encoded_img = cv2.imencode(image_type, img, (int(quality_flag), quality))
        decoded = cv2.imdecode(encoded_img, cv2.IMREAD_GRAYSCALE)
        return decoded[..., np.newaxis]  # Add channel dimension back

    if num_channels == NUM_RGB_CHANNELS:
        # Standard RGB image
        _, encoded_img = cv2.imencode(image_type, img, (int(quality_flag), quality))
        return cv2.imdecode(encoded_img, cv2.IMREAD_UNCHANGED)

    # For 2,4 or more channels, we need to handle alpha/extra channels separately
    if num_channels == 2:  # noqa: PLR2004
        # For 2 channels, pad to 3 channels and take only first 2 after compression
        padded = np.pad(img, ((0, 0), (0, 0), (0, 1)), mode="constant")
        _, encoded_bgr = cv2.imencode(image_type, padded, (int(quality_flag), quality))
        decoded_bgr = cv2.imdecode(encoded_bgr, cv2.IMREAD_UNCHANGED)
        return decoded_bgr[..., :2]

    # Process first 3 channels together
    bgr = img[..., :NUM_RGB_CHANNELS]
    _, encoded_bgr = cv2.imencode(image_type, bgr, (int(quality_flag), quality))
    decoded_bgr = cv2.imdecode(encoded_bgr, cv2.IMREAD_UNCHANGED)

    if num_channels > NUM_RGB_CHANNELS:
        # Process additional channels one by one
        extra_channels = []
        for i in range(NUM_RGB_CHANNELS, num_channels):
            channel = img[..., i]
            _, encoded = cv2.imencode(image_type, channel, (int(quality_flag), quality))
            decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)
            if len(decoded.shape) == 2:  # noqa: PLR2004
                decoded = decoded[..., np.newaxis]
            extra_channels.append(decoded)

        # Combine BGR with extra channels
        return np.dstack([decoded_bgr, *extra_channels])

    return decoded_bgr

def initialize_grid (grid_size, random_generator) [view source on GitHub]

Initialize grid with random corners.

Source code in albumentations/augmentations/functional.py
Python
def initialize_grid(grid_size: int, random_generator: np.random.Generator) -> np.ndarray:
    """Initialize grid with random corners."""
    pattern = np.zeros((grid_size + 1, grid_size + 1), dtype=np.float32)
    for corner in [(0, 0), (0, -1), (-1, 0), (-1, -1)]:
        pattern[corner] = random_generator.random()
    return pattern

def iso_noise (image, color_shift, intensity, random_generator) [view source on GitHub]

Apply poisson noise to an image to simulate camera sensor noise.

Parameters:

Name Type Description
image np.ndarray

Input image. Currently, only RGB images are supported.

color_shift float

The amount of color shift to apply.

intensity float

Multiplication factor for noise values. Values of ~0.5 produce a noticeable, yet acceptable level of noise.

random_generator np.random.Generator

If specified, this will be random generator used for noise generation.

Returns:

Type Description
np.ndarray

The noised image.

Image types: uint8, float32

Number of channels: 3

Source code in albumentations/augmentations/functional.py
Python
@float32_io
@clipped
def iso_noise(
    image: np.ndarray,
    color_shift: float,
    intensity: float,
    random_generator: np.random.Generator,
) -> np.ndarray:
    """Apply poisson noise to an image to simulate camera sensor noise.

    Args:
        image (np.ndarray): Input image. Currently, only RGB images are supported.
        color_shift (float): The amount of color shift to apply.
        intensity (float): Multiplication factor for noise values. Values of ~0.5 produce a noticeable,
                           yet acceptable level of noise.
        random_generator (np.random.Generator): If specified, this will be random generator used
            for noise generation.

    Returns:
        np.ndarray: The noised image.

    Image types:
        uint8, float32

    Number of channels:
        3
    """
    hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
    _, stddev = cv2.meanStdDev(hls)

    luminance_noise = random_generator.poisson(stddev[1] * intensity, size=hls.shape[:2])
    color_noise = random_generator.normal(0, color_shift * intensity, size=hls.shape[:2])

    hls[..., 0] += color_noise
    hls[..., 1] = add_array(hls[..., 1], luminance_noise * intensity * (1.0 - hls[..., 1]))

    noised_hls = cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)
    return np.clip(noised_hls, 0, 1, out=noised_hls)  # Ensure output is in [0, 1] range

def move_tone_curve (img, low_y, high_y) [view source on GitHub]

Rescales the relationship between bright and dark areas of the image by manipulating its tone curve.

Parameters:

Name Type Description
img np.ndarray

np.ndarray. Any number of channels

low_y float | np.ndarray

per-channel or single y-position of a Bezier control point used to adjust the tone curve, must be in range [0, 1]

high_y float | np.ndarray

per-channel or single y-position of a Bezier control point used to adjust image tone curve, must be in range [0, 1]

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
def move_tone_curve(
    img: np.ndarray,
    low_y: float | np.ndarray,
    high_y: float | np.ndarray,
) -> np.ndarray:
    """Rescales the relationship between bright and dark areas of the image by manipulating its tone curve.

    Args:
        img: np.ndarray. Any number of channels
        low_y: per-channel or single y-position of a Bezier control point used
            to adjust the tone curve, must be in range [0, 1]
        high_y: per-channel or single y-position of a Bezier control point used
            to adjust image tone curve, must be in range [0, 1]

    """
    t = np.linspace(0.0, 1.0, 256)

    def evaluate_bez(t: np.ndarray, low_y: float | np.ndarray, high_y: float | np.ndarray) -> np.ndarray:
        one_minus_t = 1 - t
        return (3 * one_minus_t**2 * t * low_y + 3 * one_minus_t * t**2 * high_y + t**3) * 255

    num_channels = get_num_channels(img)

    if np.isscalar(low_y) and np.isscalar(high_y):
        lut = clip(np.rint(evaluate_bez(t, low_y, high_y)), np.uint8, inplace=False)
        return sz_lut(img, lut, inplace=False)
    if isinstance(low_y, np.ndarray) and isinstance(high_y, np.ndarray):
        luts = clip(np.rint(evaluate_bez(t[:, np.newaxis], low_y, high_y).T), np.uint8, inplace=False)
        return cv2.merge(
            [sz_lut(img[:, :, i], np.ascontiguousarray(luts[i]), inplace=False) for i in range(num_channels)],
        )

    raise TypeError(
        f"low_y and high_y must both be of type float or np.ndarray. Got {type(low_y)} and {type(high_y)}",
    )

def posterize (img, bits) [view source on GitHub]

Reduce the number of bits for each color channel.

Parameters:

Name Type Description
img np.ndarray

image to posterize.

bits Literal[0, 1, 2, 3, 4, 5, 6, 7, 8]

number of high bits. Must be in range [0, 8]

Returns:

Type Description
np.ndarray

Image with reduced color channels.

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@clipped
def posterize(img: np.ndarray, bits: Literal[0, 1, 2, 3, 4, 5, 6, 7, 8]) -> np.ndarray:
    """Reduce the number of bits for each color channel.

    Args:
        img: image to posterize.
        bits: number of high bits. Must be in range [0, 8]

    Returns:
        Image with reduced color channels.

    """
    bits_array = np.uint8(bits)

    if not bits_array.shape or len(bits_array) == 1:
        if bits_array == 0:
            return np.zeros_like(img)
        if bits_array == EIGHT:
            return img

        lut = np.arange(0, 256, dtype=np.uint8)
        mask = ~np.uint8(2 ** (8 - bits_array) - 1)
        lut &= mask

        return sz_lut(img, lut, inplace=False)

    result_img = np.empty_like(img)
    for i, channel_bits in enumerate(bits_array):
        if channel_bits == 0:
            result_img[..., i] = np.zeros_like(img[..., i])
        elif channel_bits == EIGHT:
            result_img[..., i] = img[..., i].copy()
        else:
            lut = np.arange(0, 256, dtype=np.uint8)
            mask = ~np.uint8(2 ** (8 - channel_bits) - 1)
            lut &= mask

            result_img[..., i] = sz_lut(img[..., i], lut, inplace=True)

    return result_img

def prepare_illumination_input (img) [view source on GitHub]

Prepare image for illumination effect.

Parameters:

Name Type Description
img np.ndarray

Input image

Returns:

Type Description
tuple of
  • float32 image
  • height
  • width
Source code in albumentations/augmentations/functional.py
Python
def prepare_illumination_input(img: np.ndarray) -> tuple[np.ndarray, int, int]:
    """Prepare image for illumination effect.

    Args:
        img: Input image

    Returns:
        tuple of:
        - float32 image
        - height
        - width
    """
    result = img.astype(np.float32)
    height, width = img.shape[:2]
    return result, height, width

def random_offset (current_size, total_size, roughness, random_generator) [view source on GitHub]

Calculate random offset based on current grid size.

Source code in albumentations/augmentations/functional.py
Python
def random_offset(current_size: int, total_size: int, roughness: float, random_generator: np.random.Generator) -> float:
    """Calculate random offset based on current grid size."""
    return (random_generator.random() - 0.5) * (current_size / total_size) ** (roughness / 2)

def sample_beta (size, params, random_generator) [view source on GitHub]

Sample from Beta distribution.

The Beta distribution is bounded by [0, 1] and then scaled and shifted to [-scale, scale]. Alpha and beta parameters control the shape of the distribution.

Source code in albumentations/augmentations/functional.py
Python
def sample_beta(size: tuple[int, ...], params: dict[str, Any], random_generator: np.random.Generator) -> np.ndarray:
    """Sample from Beta distribution.

    The Beta distribution is bounded by [0, 1] and then scaled and shifted to [-scale, scale].
    Alpha and beta parameters control the shape of the distribution.
    """
    alpha = random_generator.uniform(*params["alpha_range"])
    beta = random_generator.uniform(*params["beta_range"])
    scale = random_generator.uniform(*params["scale_range"])

    # Sample from Beta[0,1] and transform to [-scale,scale]
    samples = random_generator.beta(alpha, beta, size=size)
    return (2 * samples - 1) * scale

def sample_gaussian (size, params, random_generator) [view source on GitHub]

Sample from Gaussian distribution.

Source code in albumentations/augmentations/functional.py
Python
def sample_gaussian(size: tuple[int, ...], params: dict[str, Any], random_generator: np.random.Generator) -> np.ndarray:
    """Sample from Gaussian distribution."""
    mean = random_generator.uniform(*params["mean_range"])
    std = random_generator.uniform(*params["std_range"])
    return random_generator.normal(mean, std, size=size)

def sample_laplace (size, params, random_generator) [view source on GitHub]

Sample from Laplace distribution.

The Laplace distribution is also known as the double exponential distribution. It has heavier tails than the Gaussian distribution.

Source code in albumentations/augmentations/functional.py
Python
def sample_laplace(size: tuple[int, ...], params: dict[str, Any], random_generator: np.random.Generator) -> np.ndarray:
    """Sample from Laplace distribution.

    The Laplace distribution is also known as the double exponential distribution.
    It has heavier tails than the Gaussian distribution.
    """
    loc = random_generator.uniform(*params["mean_range"])
    scale = random_generator.uniform(*params["scale_range"])
    return random_generator.laplace(loc=loc, scale=scale, size=size)

def sample_noise (noise_type, size, params, max_value, random_generator) [view source on GitHub]

Sample from specific noise distribution.

Source code in albumentations/augmentations/functional.py
Python
def sample_noise(
    noise_type: Literal["uniform", "gaussian", "laplace", "beta", "poisson"],
    size: tuple[int, ...],
    params: dict[str, Any],
    max_value: float,
    random_generator: np.random.Generator,
) -> np.ndarray:
    """Sample from specific noise distribution."""
    if noise_type == "uniform":
        return sample_uniform(size, params, random_generator) * max_value
    if noise_type == "gaussian":
        return sample_gaussian(size, params, random_generator) * max_value
    if noise_type == "laplace":
        return sample_laplace(size, params, random_generator) * max_value
    if noise_type == "beta":
        return sample_beta(size, params, random_generator) * max_value
    if noise_type == "poisson":
        return sample_poisson(size, params, random_generator, max_value)

    raise ValueError(f"Unknown noise type: {noise_type}")

def sample_poisson (size, params, random_generator, max_value) [view source on GitHub]

Sample from Poisson distribution.

For uint8 images (max_value=255), lambda is scaled accordingly as Poisson noise is intensity-dependent.

Source code in albumentations/augmentations/functional.py
Python
def sample_poisson(
    size: tuple[int, ...],
    params: dict[str, Any],
    random_generator: np.random.Generator,
    max_value: float,
) -> np.ndarray:
    """Sample from Poisson distribution.

    For uint8 images (max_value=255), lambda is scaled accordingly as Poisson noise
    is intensity-dependent.
    """
    lam = random_generator.uniform(*params["lambda_range"])

    # Scale lambda based on max_value as Poisson noise is intensity-dependent
    scaled_lam = lam * max_value

    # Generate Poisson samples
    samples = random_generator.poisson(lam=scaled_lam, size=size)

    # Center around 0 and normalize by standard deviation
    # For Poisson, variance = lambda
    noise = (samples - scaled_lam) / np.sqrt(scaled_lam)

    # Scale to match max_value range
    return np.clip(noise * max_value, -max_value, max_value)

def sample_uniform (size, params, random_generator) [view source on GitHub]

Sample from uniform distribution.

Source code in albumentations/augmentations/functional.py
Python
def sample_uniform(size: tuple[int, ...], params: dict[str, Any], random_generator: np.random.Generator) -> np.ndarray:
    """Sample from uniform distribution."""
    if len(size) == 1:  # constant mode
        if len(params["ranges"]) < size[0]:
            raise ValueError(f"Not enough ranges provided. Expected {size[0]}, got {len(params['ranges'])}")
        return np.array([random_generator.uniform(low, high) for low, high in params["ranges"][: size[0]]])

    # use first range for spatial noise
    low, high = params["ranges"][0]  # use first range for spatial noise
    return random_generator.uniform(low, high, size=size)

def sharpen_gaussian (img, alpha, kernel_size, sigma) [view source on GitHub]

Sharpen image using Gaussian blur.

Source code in albumentations/augmentations/functional.py
Python
@preserve_channel_dim
def sharpen_gaussian(img: np.ndarray, alpha: float, kernel_size: int, sigma: float) -> np.ndarray:
    """Sharpen image using Gaussian blur."""
    blurred = cv2.GaussianBlur(img, ksize=(kernel_size, kernel_size), sigmaX=sigma, sigmaY=sigma)
    return add_weighted(blurred, 1 - alpha, img, alpha)

def shot_noise (img, scale, random_generator) [view source on GitHub]

Apply shot noise to the image by simulating photon counting in linear light space.

This function simulates photon shot noise, which occurs due to the quantum nature of light. The process: 1. Converts image to linear light space (removes gamma correction) 2. Scales pixel values to represent expected photon counts 3. Samples actual photon counts from Poisson distribution 4. Converts back to display space (reapplies gamma)

The simulation is performed in linear light space because photon shot noise is a physical process that occurs before gamma correction is applied by cameras/displays.

Parameters:

Name Type Description
img np.ndarray

Input image in range [0, 1]. Can be single or multi-channel.

scale float

Reciprocal of the number of photons (noise intensity). - Larger values = fewer photons = more noise - Smaller values = more photons = less noise For example: - scale = 0.1 simulates ~100 photons per unit intensity - scale = 10.0 simulates ~0.1 photons per unit intensity

random_generator np.random.Generator

NumPy random generator for Poisson sampling

Returns:

Type Description
Image with shot noise applied, same shape and range [0, 1] as input. The noise characteristics will follow Poisson statistics in linear space
  • Variance equals mean in linear space
  • More noise in brighter regions (but less relative noise)
  • Less noise in darker regions (but more relative noise)

Note

  • Uses gamma value of 2.2 for linear/display space conversion
  • Adds small constant (1e-6) to avoid issues with zero values
  • Clips final values to [0, 1] range
  • Operates on the image in-place for memory efficiency
  • Preserves float32 precision throughout calculations
Source code in albumentations/augmentations/functional.py
Python
@preserve_channel_dim
@float32_io
def shot_noise(img: np.ndarray, scale: float, random_generator: np.random.Generator) -> np.ndarray:
    """Apply shot noise to the image by simulating photon counting in linear light space.

    This function simulates photon shot noise, which occurs due to the quantum nature of light.
    The process:
    1. Converts image to linear light space (removes gamma correction)
    2. Scales pixel values to represent expected photon counts
    3. Samples actual photon counts from Poisson distribution
    4. Converts back to display space (reapplies gamma)

    The simulation is performed in linear light space because photon shot noise is a physical
    process that occurs before gamma correction is applied by cameras/displays.

    Args:
        img: Input image in range [0, 1]. Can be single or multi-channel.
        scale: Reciprocal of the number of photons (noise intensity).
            - Larger values = fewer photons = more noise
            - Smaller values = more photons = less noise
            For example:
            - scale = 0.1 simulates ~100 photons per unit intensity
            - scale = 10.0 simulates ~0.1 photons per unit intensity
        random_generator: NumPy random generator for Poisson sampling

    Returns:
        Image with shot noise applied, same shape and range [0, 1] as input.
        The noise characteristics will follow Poisson statistics in linear space:
        - Variance equals mean in linear space
        - More noise in brighter regions (but less relative noise)
        - Less noise in darker regions (but more relative noise)

    Note:
        - Uses gamma value of 2.2 for linear/display space conversion
        - Adds small constant (1e-6) to avoid issues with zero values
        - Clips final values to [0, 1] range
        - Operates on the image in-place for memory efficiency
        - Preserves float32 precision throughout calculations

    References:
        - https://en.wikipedia.org/wiki/Shot_noise
        - https://en.wikipedia.org/wiki/Gamma_correction
    """
    # Apply inverse gamma correction to work in linear space
    img_linear = cv2.pow(img, 2.2)

    # Scale image values and add small constant to avoid zero values
    scaled_img = (img_linear + scale * 1e-6) / scale

    # Generate Poisson noise
    noisy_img = multiply_by_constant(random_generator.poisson(scaled_img).astype(np.float32), scale, inplace=True)

    # Scale back and apply gamma correction
    return power(np.clip(noisy_img, 0, 1, out=noisy_img), 1 / 2.2)

def slic (image, n_segments, compactness=10.0, max_iterations=10) [view source on GitHub]

Simple Linear Iterative Clustering (SLIC) superpixel segmentation using OpenCV and NumPy.

Parameters:

Name Type Description
image np.ndarray

Input image (2D or 3D numpy array).

n_segments int

Approximate number of superpixels to generate.

compactness float

Balance between color proximity and space proximity.

max_iterations int

Maximum number of iterations for k-means.

Returns:

Type Description
np.ndarray

Segmentation mask where each superpixel has a unique label.

Source code in albumentations/augmentations/functional.py
Python
def slic(image: np.ndarray, n_segments: int, compactness: float = 10.0, max_iterations: int = 10) -> np.ndarray:
    """Simple Linear Iterative Clustering (SLIC) superpixel segmentation using OpenCV and NumPy.

    Args:
        image (np.ndarray): Input image (2D or 3D numpy array).
        n_segments (int): Approximate number of superpixels to generate.
        compactness (float): Balance between color proximity and space proximity.
        max_iterations (int): Maximum number of iterations for k-means.

    Returns:
        np.ndarray: Segmentation mask where each superpixel has a unique label.
    """
    if image.ndim == MONO_CHANNEL_DIMENSIONS:
        image = image[..., np.newaxis]

    height, width = image.shape[:2]
    num_pixels = height * width

    # Normalize image to [0, 1] range
    image_normalized = image.astype(np.float32) / np.max(image)

    # Initialize cluster centers
    grid_step = int((num_pixels / n_segments) ** 0.5)
    x_range = np.arange(grid_step // 2, width, grid_step)
    y_range = np.arange(grid_step // 2, height, grid_step)
    centers = np.array([(x, y) for y in y_range for x in x_range if x < width and y < height])

    # Initialize labels and distances
    labels = -1 * np.ones((height, width), dtype=np.int32)
    distances = np.full((height, width), np.inf)

    for _ in range(max_iterations):
        for i, center in enumerate(centers):
            y, x = int(center[1]), int(center[0])

            # Define the neighborhood
            y_low, y_high = max(0, y - grid_step), min(height, y + grid_step + 1)
            x_low, x_high = max(0, x - grid_step), min(width, x + grid_step + 1)

            # Compute distances
            crop = image_normalized[y_low:y_high, x_low:x_high]
            color_diff = crop - image_normalized[y, x]
            color_distance = np.sum(color_diff**2, axis=-1)

            yy, xx = np.ogrid[y_low:y_high, x_low:x_high]
            spatial_distance = ((yy - y) ** 2 + (xx - x) ** 2) / (grid_step**2)

            distance = color_distance + compactness * spatial_distance

            mask = distance < distances[y_low:y_high, x_low:x_high]
            distances[y_low:y_high, x_low:x_high][mask] = distance[mask]
            labels[y_low:y_high, x_low:x_high][mask] = i

        # Update centers
        for i in range(len(centers)):
            mask = labels == i
            if np.any(mask):
                centers[i] = np.mean(np.argwhere(mask), axis=0)[::-1]

    return labels

def solarize (img, threshold) [view source on GitHub]

Invert all pixel values above a threshold.

Parameters:

Name Type Description
img np.ndarray

The image to solarize. Can be uint8 or float32.

threshold float

Normalized threshold value in range [0, 1]. For uint8 images: pixels above threshold * 255 are inverted For float32 images: pixels above threshold are inverted

Returns:

Type Description
np.ndarray

Solarized image.

Note

The threshold is normalized to [0, 1] range for both uint8 and float32 images. For uint8 images, the threshold is internally scaled by 255.

Source code in albumentations/augmentations/functional.py
Python
@clipped
def solarize(img: np.ndarray, threshold: float) -> np.ndarray:
    """Invert all pixel values above a threshold.

    Args:
        img: The image to solarize. Can be uint8 or float32.
        threshold: Normalized threshold value in range [0, 1].
            For uint8 images: pixels above threshold * 255 are inverted
            For float32 images: pixels above threshold are inverted

    Returns:
        Solarized image.

    Note:
        The threshold is normalized to [0, 1] range for both uint8 and float32 images.
        For uint8 images, the threshold is internally scaled by 255.
    """
    dtype = img.dtype
    max_val = MAX_VALUES_BY_DTYPE[dtype]

    if dtype == np.uint8:
        lut = [(max_val - i if i >= threshold * max_val else i) for i in range(int(max_val) + 1)]

        prev_shape = img.shape
        img = sz_lut(img, np.array(lut, dtype=dtype), inplace=False)

        return np.expand_dims(img, -1) if len(prev_shape) != img.ndim else img

    img = img.copy()

    cond = img >= threshold
    img[cond] = max_val - img[cond]
    return img

def square_step (pattern, y, x, step, grid_size, roughness, random_generator) [view source on GitHub]

Compute center value during square step.

Source code in albumentations/augmentations/functional.py
Python
def square_step(
    pattern: np.ndarray,
    y: int,
    x: int,
    step: int,
    grid_size: int,
    roughness: float,
    random_generator: np.random.Generator,
) -> float:
    """Compute center value during square step."""
    corners = [
        pattern[y, x],  # top-left
        pattern[y, x + step],  # top-right
        pattern[y + step, x],  # bottom-left
        pattern[y + step, x + step],  # bottom-right
    ]
    return sum(corners) / 4.0 + random_offset(step, grid_size, roughness, random_generator)

def to_gray_average (img) [view source on GitHub]

Convert an image to grayscale using the average method.

This function computes the arithmetic mean across all channels for each pixel, resulting in a grayscale representation of the image.

Key aspects of this method: 1. It treats all channels equally, regardless of their perceptual importance. 2. Works with any number of channels, making it versatile for various image types. 3. Simple and fast to compute, but may not accurately represent perceived brightness. 4. For RGB images, the formula is: Gray = (R + G + B) / 3

Note: This method may produce different results compared to weighted methods (like RGB weighted average) which account for human perception of color brightness. It may also produce unexpected results for images with alpha channels or non-color data in additional channels.

Parameters:

Name Type Description
img np.ndarray

Input image as a numpy array. Can be any number of channels.

Returns:

Type Description
np.ndarray

Grayscale image as a 2D numpy array. The output data type matches the input data type.

Image types: uint8, float32

Number of channels: any

Source code in albumentations/augmentations/functional.py
Python
def to_gray_average(img: np.ndarray) -> np.ndarray:
    """Convert an image to grayscale using the average method.

    This function computes the arithmetic mean across all channels for each pixel,
    resulting in a grayscale representation of the image.

    Key aspects of this method:
    1. It treats all channels equally, regardless of their perceptual importance.
    2. Works with any number of channels, making it versatile for various image types.
    3. Simple and fast to compute, but may not accurately represent perceived brightness.
    4. For RGB images, the formula is: Gray = (R + G + B) / 3

    Note: This method may produce different results compared to weighted methods
    (like RGB weighted average) which account for human perception of color brightness.
    It may also produce unexpected results for images with alpha channels or
    non-color data in additional channels.

    Args:
        img (np.ndarray): Input image as a numpy array. Can be any number of channels.

    Returns:
        np.ndarray: Grayscale image as a 2D numpy array. The output data type
                    matches the input data type.

    Image types:
        uint8, float32

    Number of channels:
        any
    """
    return np.mean(img, axis=-1).astype(img.dtype)

def to_gray_desaturation (img) [view source on GitHub]

Convert an image to grayscale using the desaturation method.

Parameters:

Name Type Description
img np.ndarray

Input image as a numpy array.

Returns:

Type Description
np.ndarray

Grayscale image as a 2D numpy array.

Image types: uint8, float32

Number of channels: any

Source code in albumentations/augmentations/functional.py
Python
@clipped
def to_gray_desaturation(img: np.ndarray) -> np.ndarray:
    """Convert an image to grayscale using the desaturation method.

    Args:
        img (np.ndarray): Input image as a numpy array.

    Returns:
        np.ndarray: Grayscale image as a 2D numpy array.

    Image types:
        uint8, float32

    Number of channels:
        any
    """
    float_image = img.astype(np.float32)
    return (np.max(float_image, axis=-1) + np.min(float_image, axis=-1)) / 2

def to_gray_from_lab (img) [view source on GitHub]

Convert an RGB image to grayscale using the L channel from the LAB color space.

This function converts the RGB image to the LAB color space and extracts the L channel. The LAB color space is designed to approximate human vision, where L represents lightness.

Key aspects of this method: 1. The L channel represents the lightness of each pixel, ranging from 0 (black) to 100 (white). 2. It's more perceptually uniform than RGB, meaning equal changes in L values correspond to roughly equal changes in perceived lightness. 3. The L channel is independent of the color information (A and B channels), making it suitable for grayscale conversion.

This method can be particularly useful when you want a grayscale image that closely matches human perception of lightness, potentially preserving more perceived contrast than simple RGB-based methods.

Parameters:

Name Type Description
img np.ndarray

Input RGB image as a numpy array.

Returns:

Type Description
np.ndarray

Grayscale image as a 2D numpy array, representing the L (lightness) channel. Values are scaled to match the input image's data type range.

Image types: uint8, float32

Number of channels: 3

Source code in albumentations/augmentations/functional.py
Python
@uint8_io
@clipped
def to_gray_from_lab(img: np.ndarray) -> np.ndarray:
    """Convert an RGB image to grayscale using the L channel from the LAB color space.

    This function converts the RGB image to the LAB color space and extracts the L channel.
    The LAB color space is designed to approximate human vision, where L represents lightness.

    Key aspects of this method:
    1. The L channel represents the lightness of each pixel, ranging from 0 (black) to 100 (white).
    2. It's more perceptually uniform than RGB, meaning equal changes in L values correspond to
       roughly equal changes in perceived lightness.
    3. The L channel is independent of the color information (A and B channels), making it
       suitable for grayscale conversion.

    This method can be particularly useful when you want a grayscale image that closely
    matches human perception of lightness, potentially preserving more perceived contrast
    than simple RGB-based methods.

    Args:
        img (np.ndarray): Input RGB image as a numpy array.

    Returns:
        np.ndarray: Grayscale image as a 2D numpy array, representing the L (lightness) channel.
                    Values are scaled to match the input image's data type range.

    Image types:
        uint8, float32

    Number of channels:
        3
    """
    return cv2.cvtColor(img, cv2.COLOR_RGB2LAB)[..., 0]

def to_gray_max (img) [view source on GitHub]

Convert an image to grayscale using the maximum channel value method.

This function takes the maximum value across all channels for each pixel, resulting in a grayscale image that preserves the brightest parts of the original image.

Key aspects of this method: 1. Works with any number of channels, making it versatile for various image types. 2. For 3-channel (e.g., RGB) images, this method is equivalent to extracting the V (Value) channel from the HSV color space. 3. Preserves the brightest parts of the image but may lose some color contrast information. 4. Simple and fast to compute.

Note: - This method tends to produce brighter grayscale images compared to other conversion methods, as it always selects the highest intensity value from the channels. - For RGB images, it may not accurately represent perceived brightness as it doesn't account for human color perception.

Parameters:

Name Type Description
img np.ndarray

Input image as a numpy array. Can be any number of channels.

Returns:

Type Description
np.ndarray

Grayscale image as a 2D numpy array. The output data type matches the input data type.

Image types: uint8, float32

Number of channels: any

Source code in albumentations/augmentations/functional.py
Python
def to_gray_max(img: np.ndarray) -> np.ndarray:
    """Convert an image to grayscale using the maximum channel value method.

    This function takes the maximum value across all channels for each pixel,
    resulting in a grayscale image that preserves the brightest parts of the original image.

    Key aspects of this method:
    1. Works with any number of channels, making it versatile for various image types.
    2. For 3-channel (e.g., RGB) images, this method is equivalent to extracting the V (Value)
       channel from the HSV color space.
    3. Preserves the brightest parts of the image but may lose some color contrast information.
    4. Simple and fast to compute.

    Note:
    - This method tends to produce brighter grayscale images compared to other conversion methods,
      as it always selects the highest intensity value from the channels.
    - For RGB images, it may not accurately represent perceived brightness as it doesn't
      account for human color perception.

    Args:
        img (np.ndarray): Input image as a numpy array. Can be any number of channels.

    Returns:
        np.ndarray: Grayscale image as a 2D numpy array. The output data type
                    matches the input data type.

    Image types:
        uint8, float32

    Number of channels:
        any
    """
    return np.max(img, axis=-1)

def to_gray_pca (img) [view source on GitHub]

Convert an image to grayscale using Principal Component Analysis (PCA).

This function applies PCA to reduce a multi-channel image to a single channel, effectively creating a grayscale representation that captures the maximum variance in the color data.

Parameters:

Name Type Description
img np.ndarray

Input image as a numpy array with shape (height, width, channels).

Returns:

Type Description
np.ndarray

Grayscale image as a 2D numpy array with shape (height, width). If input is uint8, output is uint8 in range [0, 255]. If input is float32, output is float32 in range [0, 1].

Note

This method can potentially preserve more information from the original image compared to standard weighted average methods, as it accounts for the correlations between color channels.

Image types: uint8, float32

Number of channels: any

Source code in albumentations/augmentations/functional.py
Python
@clipped
def to_gray_pca(img: np.ndarray) -> np.ndarray:
    """Convert an image to grayscale using Principal Component Analysis (PCA).

    This function applies PCA to reduce a multi-channel image to a single channel,
    effectively creating a grayscale representation that captures the maximum variance
    in the color data.

    Args:
        img (np.ndarray): Input image as a numpy array with shape (height, width, channels).

    Returns:
        np.ndarray: Grayscale image as a 2D numpy array with shape (height, width).
                    If input is uint8, output is uint8 in range [0, 255].
                    If input is float32, output is float32 in range [0, 1].

    Note:
        This method can potentially preserve more information from the original image
        compared to standard weighted average methods, as it accounts for the
        correlations between color channels.

    Image types:
        uint8, float32

    Number of channels:
        any
    """
    dtype = img.dtype
    # Reshape the image to a 2D array of pixels
    pixels = img.reshape(-1, img.shape[2])

    # Perform PCA
    pca = PCA(n_components=1)
    pca_result = pca.fit_transform(pixels)

    # Reshape back to image dimensions and scale to 0-255
    grayscale = pca_result.reshape(img.shape[:2])
    grayscale = normalize_per_image(grayscale, "min_max")

    return from_float(grayscale, target_dtype=dtype) if dtype == np.uint8 else grayscale

def to_gray_weighted_average (img) [view source on GitHub]

Convert an RGB image to grayscale using the weighted average method.

This function uses OpenCV's cvtColor function with COLOR_RGB2GRAY conversion, which applies the following formula: Y = 0.299R + 0.587G + 0.114*B

Parameters:

Name Type Description
img np.ndarray

Input RGB image as a numpy array.

Returns:

Type Description
np.ndarray

Grayscale image as a 2D numpy array.

Image types: uint8, float32

Number of channels: 3

Source code in albumentations/augmentations/functional.py
Python
def to_gray_weighted_average(img: np.ndarray) -> np.ndarray:
    """Convert an RGB image to grayscale using the weighted average method.

    This function uses OpenCV's cvtColor function with COLOR_RGB2GRAY conversion,
    which applies the following formula:
    Y = 0.299*R + 0.587*G + 0.114*B

    Args:
        img (np.ndarray): Input RGB image as a numpy array.

    Returns:
        np.ndarray: Grayscale image as a 2D numpy array.

    Image types:
        uint8, float32

    Number of channels:
        3
    """
    return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)