Nantes Université

Skip to content
Extraits de code Groupes Projets
Valider 534f0b86 rédigé par rafic nader's avatar rafic nader
Parcourir les fichiers

Upload New File

parent 091f6b35
Aucune branche associée trouvée
Aucune étiquette associée trouvée
Aucune requête de fusion associée trouvée
import os
import tempfile
import itertools as IT
import numpy as np
from scipy import ndimage
#import elasticdeform
from skimage.filters import *
from skimage.morphology import skeletonize_3d
from scipy.ndimage import gaussian_filter
import sknw
import SimpleITK as sitk
import cv2
from scipy.ndimage import map_coordinates
import scipy.ndimage as ndi
#########################################################################################################
def uniquify(path):
filename, extension = os.path.splitext(path)
counter = 1
while os.path.exists(path):
path = filename + " (" + str(counter) + ")" + extension
counter += 1
return path
#########################################################################################################
def Make_Sphere_1(x, y, z):
x1D = np.linspace(0, x-1, x)
y1D = np.linspace(0, y-1, y)
x2D, y2D = np.meshgrid(x1D, y1D)
x_3D = np.zeros([z, y, x], dtype=np.float64)
y_3D = np.zeros([z, y, x], dtype=np.float64)
z_3D = np.zeros([z, y, x], dtype=np.float64)
for i in range(z):
x_3D[i, :, :] = x2D
y_3D[i, :, :] = y2D
z_3D[i, :, :] = np.float64(i)
Sph_3D = np.zeros([x, y, z], dtype=np.float64)
Sph_3D = np.power((x_3D-int(x/2)), 2) + np.power((y_3D-int(y/2)),2) + np.power((z_3D-int(z/2)), 2)
Sph_3D[Sph_3D <= ((int(x/2)-1)*(int(x/2)-1))] = 1
Sph_3D[Sph_3D != 1] = 0
return(Sph_3D)
#########################################################################################################
def Display_dual_3D(Model, GTruth):
from mayavi import mlab
zm,ym,xm = Model.shape
zg,yg,xg = GTruth.shape
Model[Model > 0] = 255
"""
Edges1 = np.zeros(Model.shape)
Edges2 = np.zeros(Model.shape)
Edges3 = np.zeros(Model.shape)
Edges4 = np.zeros(Model.shape)
Edges5 = np.zeros(Model.shape)
Edges6 = np.zeros(Model.shape)
Edges1[0:2,:,:] = 1
Edges2[zm-2:zm,:,:] = 1
Edges3[:,0:2,:] = 1
Edges4[:,ym-2:ym,:] = 1
Edges5[:,:,0:2] = 1
Edges6[:,:,xm-2:xm] = 1
Edges = Edges1 + Edges2 + Edges3 + Edges4 + Edges5 + Edges6
Edges[Edges>=2] = 255
Model[Edges==255] = 255
#sitk.WriteImage(sitk.GetImageFromArray(np.uint16(Model)), "/Users/florent/Desktop/Model.nrrd")
"""
"""
Model[Model>0] = 1
Model2 = ndi.binary_dilation(Model).astype(Model.dtype)
Model2[Model2>0] = 1
Model = Model2 - Model
#sitk.WriteImage(sitk.GetImageFromArray(np.uint16(data)), "/Users/florent/Desktop/data.nrrd")
"""
Model = np.uint16(Model.T *20.0)
mlab.figure(bgcolor=(0.63, 0.63, 0.63), size=(600, 600))
src1 = mlab.pipeline.scalar_field(Model)
#src1.spacing = [1, 1, 1.5]
src1.update_image_data = True
blur1 = mlab.pipeline.user_defined(src1, filter='ImageGaussianSmooth')
voi1 = mlab.pipeline.extract_grid(blur1)
voi1.trait_set(x_min=1, x_max=xm-1, y_min=1, y_max=ym-1, z_min=1, z_max=zm-1)
mlab.pipeline.iso_surface(voi1, colormap='hot')
mlab.title("Model", size=0.6)
#voi1b = mlab.pipeline.extract_grid(src1)
#voi1b.trait_set(y_max=int(ym/3), z_max=int(zm*2/3))
##voi1b.trait_set(y_max=12, z_max=53)
##outer1b = mlab.pipeline.iso_surface(voi1b, contours=[1776, ], color=(0.8, 0.7, 0.6))
#outer1b = mlab.pipeline.iso_surface(voi1b, color=(0.8, 0.7, 0.6))
"""
GTruth[GTruth > 0] = 255
GTruth[Edges==255] = 255
"""
"""
GTruth[GTruth>0] = 1
GTruth2 = ndi.binary_dilation(GTruth).astype(GTruth.dtype)
GTruth2[GTruth2>0] = 1
GTruth = GTruth2 - GTruth
#sitk.WriteImage(sitk.GetImageFromArray(np.uint16(data)), "/Users/florent/Desktop/data.nrrd")
"""
GTruth = np.uint16(GTruth.T *20.0)
mlab.figure(bgcolor=(0.63, 0.63, 0.63), size=(600, 600))
src2 = mlab.pipeline.scalar_field(GTruth)
#src2.spacing = [1, 1, 1.5]
src2.update_image_data = True
blur2 = mlab.pipeline.user_defined(src2, filter='ImageGaussianSmooth')
voi2 = mlab.pipeline.extract_grid(blur2)
voi2.trait_set(x_min=1, x_max=xg-1, y_min=1, y_max=yg-1, z_min=1, z_max=zg-1)
mlab.pipeline.iso_surface(voi2, colormap='hot')
mlab.title("Ground Truth", size=0.6)
#voi2b = mlab.pipeline.extract_grid(src2)
#voi2b.trait_set(y_max=int(yg/3), z_max=int(zg*2/3))
##voi2b.trait_set(y_max=12, z_max=53)
##outer2b = mlab.pipeline.iso_surface(voi2b, contours=[1776, ], color=(0.8, 0.7, 0.6))
#outer2b = mlab.pipeline.iso_surface(voi2b, color=(0.8, 0.7, 0.6))
mlab.show()
#########################################################################################################
def Display_3D(SplineModelTOF):
from mayavi import mlab
SplineModelTOF[SplineModelTOF > 0] = 255
SplineModelTOF = np.uint16(SplineModelTOF.T *20.0)
mlab.figure(bgcolor=(0, 0, 0), size=(600, 600))
src = mlab.pipeline.scalar_field(SplineModelTOF)
# Our data is not equally spaced in all directions:
src.spacing = [1, 1, 1.5]
src.update_image_data = True
z,y,x = SplineModelTOF.shape
# Extract some inner structures: the ventricles and the inter-hemisphere
# fibers. We define a volume of interest (VOI) that restricts the
# iso-surfaces to the inner of the brain. We do this with the ExtractGrid
# filter.
blur = mlab.pipeline.user_defined(src, filter='ImageGaussianSmooth')
voi = mlab.pipeline.extract_grid(blur)
voi.trait_set(x_min=1, x_max=x-1, y_min=1, y_max=y-1, z_min=1, z_max=z-1)
#voi = mlab.pipeline.extract_grid(src)
#mlab.pipeline.iso_surface(voi, contours=[1610, 2480], colormap='autumn')
mlab.pipeline.iso_surface(voi, colormap='autumn')
# Extract two views of the outside surface. We need to define VOIs in
# order to leave out a cut in the head.
"""
voi2 = mlab.pipeline.extract_grid(src)
#voi2.trait_set(y_min=1)
#outer = mlab.pipeline.iso_surface(voi2, contours=[1776, ], color=(0.8, 0.4, 0.3))
outer = mlab.pipeline.iso_surface(voi2, color=(0.8, 0.4, 0.3))
"""
""" """
voi3 = mlab.pipeline.extract_grid(src)
voi3.trait_set(y_max=12, z_max=53)
outer3 = mlab.pipeline.iso_surface(voi3, contours=[1776, ], color=(0.8, 0.7, 0.6))
""" """
#mlab.view(-125, 54, 326, (145.5, 138, 66.5))
#mlab.roll(-175)
mlab.show()
#########################################################################################################
def resample_image2(itk_image, is_label=False):
original_spacing = itk_image.GetSpacing()
out_spacing=[original_spacing[0], original_spacing[0], original_spacing[0]]
original_size = itk_image.GetSize()
out_size = [
int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))
]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkLinear)
return resample.Execute(itk_image)
#########################################################################################################
# From : https://www.kaggle.com/code/ori226/data-augmentation-with-elastic-deformations/notebook
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
# Random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dz = np.zeros_like(dx)
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))
return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
########################################################################################################################
def max_entropy(data): # pour la segmentation
"""
Implements Kapur-Sahoo-Wong (Maximum Entropy) thresholding method
Kapur J.N., Sahoo P.K., and Wong A.K.C. (1985) "A New Method for Gray-Level Picture Thresholding Using the Entropy
of the Histogram", Graphical Models and Image Processing, 29(3): 273-285
M. Emre Celebi
06.15.2007
Ported to ImageJ plugin by G.Landini from E Celebi's fourier_0.8 routines
2016-04-28: Adapted for Python 2.7 by Robert Metchev from Java source of MaxEntropy() in the Autothresholder plugin
http://rsb.info.nih.gov/ij/plugins/download/AutoThresholder.java
:param data: Sequence representing the histogram of the image
:return threshold: Resulting maximum entropy threshold
"""
# calculate CDF (cumulative density function)
cdf = data.astype(np.float).cumsum()
# find histogram's nonzero area
valid_idx = np.nonzero(data)[0]
first_bin = valid_idx[0]
last_bin = valid_idx[-1]
# initialize search for maximum
max_ent, threshold = 0, 0
for it in range(first_bin, last_bin + 1):
# Background (dark)
hist_range = data[:it + 1]
hist_range = hist_range[hist_range != 0] / cdf[it] # normalize within selected range & remove all 0 elements
tot_ent = -np.sum(hist_range * np.log(hist_range)) # background entropy
# Foreground/Object (bright)
hist_range = data[it + 1:]
# normalize within selected range & remove all 0 elements
hist_range = hist_range[hist_range != 0] / (cdf[last_bin] - cdf[it])
tot_ent -= np.sum(hist_range * np.log(hist_range)) # accumulate object entropy
# find max
if tot_ent > max_ent:
max_ent, threshold = tot_ent, it
return threshold
#########################################################################################################
def CropStack(stack):
z,y,x = stack.shape
### Find min & max position of the ROI along the z-axis: ###
s_z = []
for i in range(z):
s_z.append(stack[i,:,:].sum())
s_z = np.asarray(s_z)
pos_z = np.asarray(np.where(s_z>0))
len_pos_z = pos_z.shape[1]
pz0 = pos_z[0][0]
pz1 = pos_z[0][len_pos_z-1]+1
### Find min & max position of the ROI along the x-axis: ###
proj_x = []
pos_x0 = []
pos_x1 = []
for i in range(pz0,pz1):
proj_x.append(np.sum(stack[i,:,:], axis=0))
s_x = np.asarray(proj_x[i-pz0])
pos_x = np.asarray(np.where(s_x>0))
len_pos_x = pos_x.shape[1]
pos_x0.append(pos_x[0][0])
pos_x1.append(pos_x[0][len_pos_x-1]+1)
px0 = min(pos_x0)
px1 = max(pos_x1)
### Find min & max position of the ROI along the y-axis: ###
proj_y = []
pos_y0 = []
pos_y1 = []
for i in range(pz0,pz1):
proj_y.append(np.sum(stack[i,:,:], axis=1))
s_y = np.asarray(proj_y[i-pz0])
pos_y = np.asarray(np.where(s_y>0))
len_pos_y = pos_y.shape[1]
pos_y0.append(pos_y[0][0])
pos_y1.append(pos_y[0][len_pos_y-1]+1)
py0 = min(pos_y0)
py1 = max(pos_y1)
CroppedStack = np.zeros(((pz1-pz0),(py1-py0),(px1-px0)), dtype=np.uint8)
CroppedStack = stack[pz0:pz1 , py0:py1 , px0:px1]
return(CroppedStack)
#########################################################################################################
def CropStack_XYZ(stack, CropCenter, CropSize):
z,y,x = stack.shape
Xc = int(CropCenter[0])
Yc = int(CropCenter[1])
Zc = int(CropCenter[2])
hc = int(CropSize/2.)
CroppedStack = np.zeros((CropSize,CropSize,CropSize), dtype=np.uint16)
CroppedStack = stack[Zc-hc:Zc+hc, Yc-hc:Yc+hc , Xc-hc:Xc+hc]
return(CroppedStack)
0% Chargement en cours ou .
You are about to add 0 people to the discussion. Proceed with caution.
Terminez d'abord l'édition de ce message.
Veuillez vous inscrire ou vous pour commenter