changes for multichannel, small adjustments
This commit is contained in:
20
plot.py
20
plot.py
@@ -14,22 +14,26 @@ parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--infile', type = str, required = True, help = 'file name of the .h5 file for names')
|
||||
parser.add_argument('--data', type = str, required = True, help = "write only the part of 1 file name before the _(nr-s).npy; files for creating the plots")
|
||||
parser.add_argument('--images', type = str, default = "mse_mssim", help = "naming the output images, default = mse_mssim")
|
||||
parser.add_argument('-q','--squared', action = argparse.BooleanOptionalAction, help = "squares the intensity values, otherwise normal values")
|
||||
parser.add_argument('-sp', '--sigma_plots', action = argparse.BooleanOptionalAction, help = 'shows the plots, where all intensity values of a sigma value are on one plot')
|
||||
parser.add_argument('-q','--squared', action = argparse.BooleanOptionalAction, help = "squares the intensity values, otherwise normal values")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
group_to_datasets = {}
|
||||
|
||||
with h5py.File(args.infile, 'r') as hdf5_file:
|
||||
for group_name in hdf5_file: # loop üle grupi nimede
|
||||
group = hdf5_file[group_name] #salvestab grupi nime
|
||||
for group_name in hdf5_file: # salvestab grupi nime
|
||||
group = hdf5_file[group_name]
|
||||
if isinstance(group, h5py.Group):
|
||||
datasets = [] #kui grupi nimi on h5py.Group nimi siis
|
||||
for ds_name in group: #vaatab üle kõik datasetid grupi sees
|
||||
if isinstance(group[ds_name], h5py.Dataset): # kui vastab ds nimele
|
||||
datasets.append(ds_name) # appenditakse
|
||||
group_to_datasets[group_name] = datasets # iga grupile apenditakse tema oma ds
|
||||
for subgroup_name in group: # salvestab subgrupi nime
|
||||
subgroup = group[subgroup_name]
|
||||
if isinstance(subgroup, h5py.Group):
|
||||
datasets = [] #kui grupi nimi on h5py.Group nimi siis
|
||||
for ds_name in subgroup:#vaatab üle kõik datasetid grupi sees
|
||||
if isinstance(subgroup[ds_name], h5py.Dataset):# kui vastab ds nimele
|
||||
datasets.append(ds_name)# appenditakse
|
||||
group_to_datasets[subgroup_name] = datasets # iga grupile apenditakse tema oma ds
|
||||
# added lines for recognising groups and subgroups
|
||||
|
||||
group_names = list(group_to_datasets.keys()) # has all the groups which are different inensitires and
|
||||
# all the datasets names are there with sigma and minimum but i only use sigma so the rest are not used at all
|
||||
|
||||
110
pohi.py
110
pohi.py
@@ -9,11 +9,11 @@ from deconvolve_func import DeconvolveWithBead as DWB
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('bead', type = str, help = "original file")
|
||||
parser.add_argument('--output', type = str, required = True, help = "file name for the output of images")
|
||||
parser.add_argument('-pd', '--plot_data', type = str, required = True, help = "data storage file name, iterations added automatically")
|
||||
parser.add_argument('-o', '--output', type = str, required = True, help = "file .h5 name for the output of images")
|
||||
parser.add_argument('-pd', '--plot_data', type = str, required = True, help = " .npy data storage file name, iterations added automatically")
|
||||
parser.add_argument('-it', '--iterations', type = int, required = True, help = "nr of iterations")
|
||||
parser.add_argument('-in', '--intensity', type = float, nargs = '+', required = True, help = "image intensity; division of the signal by intensity")
|
||||
parser.add_argument('-k', '--kernel', type = float, nargs = '+', required = True, help = "kernel g sigma values")
|
||||
parser.add_argument('-in', '--intensity', type = float, nargs = '+', required = True, help = "[x y ..], space as delimter, image intensity; division of the signal by intensity")
|
||||
parser.add_argument('-k', '--kernel', type = float, nargs = '+', required = True, help = "[x y ..], space as delimter, kernel g sigma values")
|
||||
parser.add_argument('--vox', type = float, nargs = '+', default = [1.0, 1.0, 1.0], help = "voxel values in micrometers, default [1.0, 1.0, 1.0]")
|
||||
|
||||
args = parser.parse_args()
|
||||
@@ -21,16 +21,31 @@ args = parser.parse_args()
|
||||
|
||||
#--------- importing the image, that will become the GROUND TRUTH ------
|
||||
with h5py.File(args.bead, 'r') as hdf5_file:
|
||||
original = hdf5_file['t0/channel0'][:]
|
||||
# dset = hdf5_file['t0/channel0'] # allows acces to attributes
|
||||
# vox = reverse(dset.attrs.get('element_size_um'))
|
||||
# ch1 = dset[:] # reads the whole data from dataset
|
||||
# ch2 = hdf5_file['t0/channel1'][:]
|
||||
# ch3 = hdf5_file['t0/channel2'][:]
|
||||
|
||||
#original = hdf5_file['t0/channel0'][:]
|
||||
|
||||
print("algse pildi integraal", np.sum(original))
|
||||
group = hdf5_file['t0']
|
||||
ch = []
|
||||
for ch_name in group:
|
||||
dset = group[ch_name]
|
||||
vox = dset.attrs.get('element_size_um')
|
||||
vox = vox[::-1] # reversing, because z,y,x
|
||||
channel = dset[:]
|
||||
ch.append(channel)
|
||||
|
||||
#print("algse pildi integraal", np.sum(original))
|
||||
|
||||
#--------- creating the 2D gaussian PSF----------------------------------
|
||||
points = np.max(original)
|
||||
point_sources = np.zeros(original.shape)
|
||||
points = np.max(ch[0])
|
||||
point_sources = np.zeros(ch[0].shape)
|
||||
|
||||
center_x = original.shape[0] // 2
|
||||
center_y = original.shape[1] // 2
|
||||
center_x = ch[0].shape[0] // 2
|
||||
center_y = ch[0].shape[1] // 2
|
||||
|
||||
point_sources[center_x, center_y] = points
|
||||
#point_sources[90, 110] = points
|
||||
@@ -46,49 +61,54 @@ with h5py.File(args.output, 'w') as hdf5_file:
|
||||
'erinevad mürad ### signal intensity #################################################'
|
||||
intensity = args.intensity
|
||||
|
||||
for i in intensity:
|
||||
scaled_original = original / i if i!=0 else original.copy()
|
||||
scaled_original = scaled_original.astype(np.float64)
|
||||
image = gaussian_filter(scaled_original, sigma = 2.4)#convolve(scaled_original, psf, mode='same')
|
||||
image[image <= 0] = 0
|
||||
for w, ch in enumerate(ch):
|
||||
|
||||
"rakendan pildile Poissoni müra #################################################"
|
||||
if i == 0:
|
||||
noisy_image = image.copy()
|
||||
else:
|
||||
noisy_image = np.random.poisson(lam = image, size = image.shape).astype(np.float64)
|
||||
for i in intensity:
|
||||
scaled_original = ch / i if i!=0 else ch.copy()
|
||||
scaled_original = scaled_original.astype(np.float64)
|
||||
image = gaussian_filter(scaled_original, sigma = 2.4)#convolve(scaled_original, psf, mode='same')
|
||||
image[image <= 0] = 0
|
||||
|
||||
hdf5_file.create_dataset(f"noise_level_{i:08.3f}", data = noisy_image)
|
||||
hdf5_file.create_dataset(f"scaled_original_{i:08.3f}", data = scaled_original)
|
||||
"rakendan pildile Poissoni müra #################################################"
|
||||
if i == 0:
|
||||
noisy_image = image.copy()
|
||||
else:
|
||||
noisy_image = np.random.poisson(lam = image, size = image.shape).astype(np.float64)
|
||||
|
||||
'''kernelid erinevate sigmadega - pildi taastamine ##################'''
|
||||
g_sigma = args.kernel
|
||||
vox = np.array(args.vox)
|
||||
mses = []
|
||||
mssim = []
|
||||
|
||||
for j in g_sigma:
|
||||
image_kerneled = gaussian_filter(noisy_image, sigma = j)
|
||||
psf_kerneled = gaussian_filter(psf, sigma = j)
|
||||
print(np.sum(image_kerneled),np.sum(noisy_image), np.sum(psf_kerneled), np.sum(psf))
|
||||
'dekonvolveerin - taastan pilti ######################################'
|
||||
deconv = DWB(image = image_kerneled, voxel_size = vox, bead = scaled_original)
|
||||
hdf5_file.create_dataset(f"ch{w:03d}/noise_level_{i:08.3f}", data = noisy_image)
|
||||
hdf5_file.create_dataset(f"ch{w:03d}/scaled_original_{i:08.3f}", data = scaled_original)
|
||||
|
||||
dec = deconv.deconvolve_rl(n_iterations = args.iterations,
|
||||
bead = scaled_original,
|
||||
psf = psf_kerneled,
|
||||
im = image_kerneled)
|
||||
'''kernelid erinevate sigmadega - pildi taastamine ##################'''
|
||||
g_sigma = args.kernel
|
||||
if vox is None:
|
||||
vox = np.array(args.vox)
|
||||
else:
|
||||
vox = np.array(vox)
|
||||
mses = []
|
||||
mssim = []
|
||||
|
||||
for j in g_sigma:
|
||||
image_kerneled = gaussian_filter(noisy_image, sigma = j)
|
||||
psf_kerneled = gaussian_filter(psf, sigma = j)
|
||||
print(np.sum(image_kerneled),np.sum(noisy_image), np.sum(psf_kerneled), np.sum(psf))
|
||||
'dekonvolveerin - taastan pilti ######################################'
|
||||
deconv = DWB(image = image_kerneled, voxel_size = vox, bead = scaled_original)
|
||||
|
||||
hdf5_file.create_dataset(f'intensity_{i:08.3f}/SIGMA_{j:03.1f}', data = dec[0])
|
||||
hdf5_file.create_dataset(f"intensity_{i:08.3f}/minimum_image_{j:03.1f}", data = dec[4])
|
||||
dec = deconv.deconvolve_rl(n_iterations = args.iterations,
|
||||
bead = scaled_original,
|
||||
psf = psf_kerneled,
|
||||
im = image_kerneled)
|
||||
|
||||
mses.append(dec[1])
|
||||
mssim.append(dec[2])
|
||||
hdf5_file.create_dataset(f'ch{w:03d}/intensity_{i:08.3f}/SIGMA_{j:03.1f}', data = dec[0])
|
||||
hdf5_file.create_dataset(f"ch{w:03d}/intensity_{i:08.3f}/minimum_image_{j:03.1f}", data = dec[4])
|
||||
|
||||
mses.append(dec[1])
|
||||
mssim.append(dec[2])
|
||||
|
||||
|
||||
print("---------------lõpp-pildi integraal", np.sum(dec[0]))
|
||||
data = np.column_stack((np.column_stack(mses),np.column_stack(mssim)))
|
||||
np.save(f"{args.plot_data}_{args.iterations}_{i:08.3f}.npy", np.column_stack((dec[3], data)))
|
||||
print("---------------lõpp-pildi integraal", np.sum(dec[0]))
|
||||
data = np.column_stack((np.column_stack(mses),np.column_stack(mssim)))
|
||||
np.save(f"{args.plot_data}_{args.iterations}_ch{w:03d}_{i:08.3f}.npy", np.column_stack((dec[3], data)))
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user