Source code for timagetk.synthetic_data.util

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#  Copyright (c) 2022 Univ. Lyon, ENS de Lyon, UCB Lyon 1, CNRS, INRAe, Inria
#  All rights reserved.
#  This file is part of the TimageTK library, and is released under the "GPLv3"
#  license. Please see the LICENSE.md file that should have been included as
#  part of this package.
# ------------------------------------------------------------------------------

from collections.abc import Iterable

import numpy as np
import scipy.ndimage as nd

from timagetk.components.spatial_image import SpatialImage


[docs] def point_position_optimization(points, omega_attraction=1., omega_repulsion=1., target_distance=1, sigma_deformation=0.1, n_iterations=100, force_centering=True, center=np.zeros(3), constraint_radius=False, radius=50.): """Optimize the positions of a dictionary of 3D points. The function performs an optimization of an initial dictionary of 3D points by an attraction-repulsion energy minimization process. The result is a new dictionary in which the 3D points tend to be evenly distributed in space. Parameters ---------- points : dict Dictionary with 3D positions as values. omega_attraction : float Coefficient of the attraction term of the energy. omega_repulsion : float Coefficient of the repulsion term of the energy. target_distance : float Optimal distance between points for the energy definition. sigma_deformation : float Maximal displacement of the points at each iteration. n_iterations : int Number of iterations in the optimization process. force_centering : bool Whether to constraint the positions to remain centered. center : numpy.ndarray Length-3 array representing the 3D position of the center to use. constraint_radius : bool Whether to constraint the positions at a fixed distance from the center. radius: float The fixed distance between the points and the center. Returns ------- dict The dictionary of optimized 3D positions. """ for _iterations in range(n_iterations): points_array = np.array(list(points.values())) point_vectors = np.array([points[p] - points_array for p in points.keys()]) point_distances = np.linalg.norm(points_array[np.newaxis] - points_array[:, np.newaxis], axis=-1) point_vectors = point_vectors / (point_distances[..., np.newaxis] + 1e-7) point_distance_forces = omega_attraction * ( (target_distance - point_distances)[..., np.newaxis] * point_vectors / target_distance).sum(axis=1) point_repulsion_forces = omega_repulsion * np.power(target_distance, 2) * ( point_vectors / (np.power(point_distances, 2) + 1e-7)[..., np.newaxis]).sum(axis=1) point_forces = np.zeros((len(points), 3)) point_forces += point_distance_forces point_forces += point_repulsion_forces point_forces = np.minimum(1.0, sigma_deformation / np.linalg.norm(point_forces, axis=1))[:, np.newaxis] * point_forces new_points = points_array + point_forces if constraint_radius: radial_vectors = (new_points - center) / np.linalg.norm((new_points - center), axis=1)[:, np.newaxis] new_points = center + radius * radial_vectors if force_centering: new_points += center - np.mean(new_points, axis=0) points = dict(zip(points.keys(), new_points)) return points
[docs] def pseudo_gradient_norm(seg_img, dtype='uint8', wall_sigma=None, intensity=None, count=True): """Create an image with signal at wall interfaces of a labelled image. The method starts by counting the number of different label values around each voxel, and assigns an intensity value where this number is greater than 2. The signal is then smoothed by a Gaussian kernel to simulate a wall-marker intensity image. Parameters ---------- seg_img : timagetk.LabelledImage The cell image on which to compute the wall signal. dtype : str or numpy.dtype, optional Set the ``dtype`` of the returned image. Defaults to ``'uint8'``. wall_sigma : float, optional Sigma value determining the wall width, in real units. Defaults to the image voxelsize ``seg_img.voxelsize``. intensity : float, optional Signal intensity of the wall interfaces. Defaults to 2/3 of the maximum value based on `dtype`. count : bool, optional Whether to have higher signal at wall junctions. Defaults to ``True``. Returns ------- timagetk.SpatialImage The wall signal intensity image. Examples -------- >>> from timagetk.synthetic_data.util import pseudo_gradient_norm >>> from timagetk import LabelledImage >>> from timagetk.io import imread >>> from timagetk.io.util import shared_dataset >>> from timagetk.visu.stack import orthogonal_view >>> img_path = shared_dataset("p58", "segmented")[0] >>> seg_image = imread(img_path, LabelledImage, not_a_label=0) >>> v = orthogonal_view(seg_image, cmap='glasbey', val_range='auto') >>> # Example 1 - Automatic creation of the wall signal intensity image: >>> int_image = pseudo_gradient_norm(seg_image) >>> v = orthogonal_view(int_image, cmap='gray', val_range='dtype') >>> # Example 2 - Automatic creation of the wall signal intensity image, without gaussian smoothing: >>> int_image = pseudo_gradient_norm(seg_image, wall_sigma=0) >>> v = orthogonal_view(int_image, cmap='gray', val_range='dtype') """ from timagetk.components.image import get_image_attributes img_kwargs = get_image_attributes(seg_img) img_kwargs['dtype'] = dtype shape = np.array(seg_img.shape) voxelsize = np.array(seg_img.voxelsize) dimension = seg_img.ndim if intensity is None: intensity = 2 / 3 * np.iinfo(dtype).max if wall_sigma is None: wall_sigma = voxelsize neighborhood_shape = np.array([s - (2 if k < dimension else 0) for k, s in enumerate(shape)]) neighborhood_img = [] if dimension == 3: for x in np.arange(-1, 2): for y in np.arange(-1, 2): for z in np.arange(-1, 2): neighborhood_img.append( seg_img.get_array()[1 + x:shape[0] - 1 + x, 1 + y:shape[1] - 1 + y, 1 + z:shape[2] - 1 + z]) neighborhood_img = np.sort(np.transpose(neighborhood_img, (1, 2, 3, 0))).reshape((neighborhood_shape).prod(), 27) elif dimension == 2: for x in np.arange(-1, 2): for y in np.arange(-1, 2): neighborhood_img.append(seg_img.get_array()[1 + x:shape[0] - 1 + x, 1 + y:shape[1] - 1 + y]) neighborhood_img = np.sort(np.transpose(neighborhood_img, (1, 2, 0))).reshape((neighborhood_shape).prod(), 9) if count: neighborhoods = np.array(list(map(np.unique, neighborhood_img)), dtype=object) neighborhood_size = np.array(list(map(len, neighborhoods))).reshape(tuple(neighborhood_shape)) else: neighborhood_size = 2 - np.all(neighborhood_img == neighborhood_img[..., :1], axis=-1).astype(int) neighborhood_size = neighborhood_size.reshape(tuple(neighborhood_shape)) gradient_img = np.zeros_like(seg_img.get_array()).astype(float) if dimension == 3: gradient_img[1:shape[0] - 1, 1:shape[1] - 1, 1:shape[2] - 1] += (neighborhood_size - 1) elif dimension == 2: gradient_img[1:shape[0] - 1, 1:shape[1] - 1] += (neighborhood_size - 1) if isinstance(wall_sigma, (int, float)) and wall_sigma != 0: smoothing = True elif isinstance(wall_sigma, Iterable) and all(sigma != 0 for sigma in wall_sigma): smoothing = True else: smoothing = False if smoothing: # gradient_img = np.pi * intensity * nd.gaussian_filter(gradient_img, sigma=wall_sigma / voxelsize) gradient_img = intensity * nd.gaussian_filter(gradient_img, sigma=wall_sigma / voxelsize) gradient_img = np.minimum(gradient_img, np.iinfo(dtype).max) return SpatialImage(gradient_img.astype(dtype), **img_kwargs)