Add files via upload

This commit is contained in:
David Rotermund 2023-07-14 22:33:18 +02:00 committed by GitHub
parent eedae1ae44
commit d6e2c7432b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 115 additions and 50 deletions

View file

@ -54,10 +54,7 @@ class Anime:
if mask_np is not None:
image[mask_np] = float("NaN")
image_handle = plt.imshow(
image,
cmap=cmap,
vmin=vmin,
vmax=vmax,
image, cmap=cmap, vmin=vmin, vmax=vmax, interpolation="nearest"
)
if colorbar:

View file

@ -3,6 +3,7 @@ import torch
import matplotlib.pyplot as plt
import skimage
filename: str = "example_data_crop"
threshold: float = 0.8
tolerance: float | None = None
@ -18,15 +19,22 @@ data = torch.tensor(input, device=torch_device)
del input
print("loading done")
data = data.nan_to_num(nan=0.0)
data -= data.mean(dim=0, keepdim=True)
# master_image = data.std(dim=0).nan_to_num(nan=0.0).clone()
data /= data.std(dim=0, keepdim=True)
master_image = (data.max(dim=0)[0] - data.min(dim=0)[0]).nan_to_num(nan=0.0).clone()
temp_image = master_image.clone()
master_mask = torch.ones_like(temp_image)
stored_contours: list = []
perimenter: list = []
area: list = []
counter: int = 0
contours_found: int = 0
while int(master_mask.sum()) > 0:
@ -74,10 +82,13 @@ while int(master_mask.sum()) > 0:
# check if this is the contour in which the original point was
if mask[x, y]:
found_something = True
if mask.sum() > minimum_area:
mask_sum = mask.sum()
if mask_sum > minimum_area:
perimenter.append(skimage.measure.perimeter(mask))
area.append(mask_sum)
stored_contours.append(coords)
contours_found += 1
idx_set_mask = torch.where(torch.tensor(mask, device=torch_device) > 0)
master_mask[idx_set_mask] = 0.0
@ -87,6 +98,14 @@ while int(master_mask.sum()) > 0:
master_mask[x, y] = 0.0
print("-==- DONE -==-")
np.save("cells.npy", np.array(stored_contours, dtype=object))
np.save("perimenter.npy", np.array(perimenter, dtype=object))
np.save("area.npy", np.array(area, dtype=object))
print()
for i in range(0, len(stored_contours)):
print(
f"ID:{i} area:{area[i]:.3e} perimenter:{perimenter[i]:.3e} ration:{area[i] / perimenter[i]:.3e}"
)
plt.imshow(master_image.cpu(), cmap="hot")
for i in range(0, len(stored_contours)):

View file

@ -6,6 +6,9 @@ from scipy.stats import skew
filename: str = "example_data_crop"
use_svd: bool = True
show_movie: bool = True
from Anime import Anime
torch_device: torch.device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu"
@ -64,6 +67,26 @@ for id in range(0, stored_contours.shape[0]):
) / mask.sum()
to_plot[:, id] = ts
with torch.no_grad():
if show_movie:
print("Calculate movie")
# Clean tensor
data *= 0.0
for id in range(0, stored_contours.shape[0]):
mask = torch.tensor(
skimage.draw.polygon2mask(
(int(data.shape[1]), int(data.shape[2])), stored_contours[id]
),
device=torch_device,
dtype=torch.float32,
)
# * 1.0 - mask: otherwise the overlapping outlines look bad
# Yes... reshape and indices would be faster...
data *= 1.0 - mask.unsqueeze(0)
data += mask.unsqueeze(0) * to_plot[:, id].unsqueeze(1).unsqueeze(2)
ani = Anime()
ani.show(data)
skew_value = skew(to_plot.cpu().numpy(), axis=0)
skew_idx = np.flip(skew_value.argsort())

View file

@ -3,15 +3,16 @@ import torch
import matplotlib.pyplot as plt
import skimage
from scipy.stats import skew
from svd import calculate_svd, to_remove, calculate_translation
from svd import to_remove
import torchvision as tv
from ImageAlignment import ImageAlignment
# from Anime import Anime
from Anime import Anime
filename: str = "example_data_crop"
use_svd: bool = True
show_movie: bool = True
torch_device: torch.device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu"
@ -20,43 +21,25 @@ torch_device: torch.device = torch.device(
with torch.no_grad():
print("Load data")
input = np.load(filename + str(".npy")) # str("_decorrelated.npy"))
data = torch.tensor(input, device=torch_device)
# del input
print("loading done")
fill_value: float = 0.0
print("Movement compensation [BROKEN!!!!]")
print("During development, information about what could move was missing.")
print("Thus the preprocessing before shift determination may not work.")
data -= data.min(dim=0)[0]
data /= data.std(dim=0, keepdim=True) + 1e-20
stored_contours = np.load("cells.npy", allow_pickle=True)
kernel_size_pooling = int(np.load("kernel_size_pooling.npy"))
fill_value = float(np.load("fill_value.npy"))
image_alignment = ImageAlignment(default_dtype=torch.float32, device=torch_device)
tvec = calculate_translation(
input=data,
reference_image=data[0, ...].clone(),
image_alignment=image_alignment,
tvec = torch.tensor(np.load(filename + "_tvec.npy"), device=torch_device)
np_svd_data = np.load(
filename + "_svd.npz",
)
tvec_media = tvec.median(dim=0)[0]
print(f"Median of movement: {tvec_media[0]}, {tvec_media[1]}")
whiten_mean = torch.tensor(np_svd_data["whiten_mean"], device=torch_device)
whiten_k = torch.tensor(np_svd_data["whiten_k"], device=torch_device)
eigenvalues = torch.tensor(np_svd_data["eigenvalues"], device=torch_device)
del np_svd_data
data = torch.tensor(input, device=torch_device)
data -= data.min(dim=0, keepdim=True)[0]
for id in range(0, data.shape[0]):
data[id, ...] = tv.transforms.functional.affine(
img=data[id, ...].unsqueeze(0),
angle=0,
translate=[tvec[id, 1], tvec[id, 0]],
scale=1.0,
shear=0,
fill=fill_value,
).squeeze(0)
print("SVD")
whiten_mean, whiten_k, eigenvalues = calculate_svd(data)
# ----
data = torch.tensor(input, device=torch_device)
for id in range(0, data.shape[0]):
data[id, ...] = tv.transforms.functional.affine(
@ -68,12 +51,19 @@ with torch.no_grad():
fill=fill_value,
).squeeze(0)
data -= data.min(dim=0, keepdim=True)[0]
to_remove_data = to_remove(data, whiten_k, whiten_mean)
data -= to_remove_data
del to_remove_data
stored_contours = np.load("cells.npy", allow_pickle=True)
print("Pooling")
# Warning: The contour masks have the same size as the binned data!!!
avage_pooling = torch.nn.AvgPool2d(
kernel_size=(kernel_size_pooling, kernel_size_pooling),
stride=(kernel_size_pooling, kernel_size_pooling),
)
data = avage_pooling(data)
if use_svd:
data_flat = torch.flatten(
@ -125,6 +115,26 @@ with torch.no_grad():
) / mask.sum()
to_plot[:, id] = ts
if show_movie:
print("Calculate movie")
# Clean tensor
data *= 0.0
for id in range(0, stored_contours.shape[0]):
mask = torch.tensor(
skimage.draw.polygon2mask(
(int(data.shape[1]), int(data.shape[2])), stored_contours[id]
),
device=torch_device,
dtype=torch.float32,
)
# * 1.0 - mask: otherwise the overlapping outlines look bad
# Yes... reshape and indices would be faster...
data *= 1.0 - mask.unsqueeze(0)
data += mask.unsqueeze(0) * to_plot[:, id].unsqueeze(1).unsqueeze(2)
ani = Anime()
ani.show(data)
exit()
skew_value = skew(to_plot.cpu().numpy(), axis=0)
skew_idx = np.flip(skew_value.argsort())

View file

@ -4,6 +4,7 @@ import os
import torchvision as tv
from svd import (
calculate_svd,
to_remove,
@ -25,31 +26,33 @@ if __name__ == "__main__":
filtfilt_chuck_size: int = 10
bp_low_frequency: float = 0.1
bp_high_frequency: float = 1.0
convert_overwrite: bool | None = None
fill_value: float = 0.0
convert_overwrite: bool | None = None
torch_device: torch.device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu"
)
np.save("kernel_size_pooling.npy", np.array(kernel_size_pooling))
np.save("fill_value.npy", np.array(fill_value))
if (
(convert_overwrite is None)
and (os.path.isfile("example_data_crop" + ".npy") is False)
(convert_overwrite is None) and (os.path.isfile(filename + ".npy") is False)
) or (convert_overwrite):
print("Convert AVI file to npy file.")
convert_avi_to_npy(filename)
input = convert_avi_to_npy(filename)
print("--==-- DONE --==--")
with torch.no_grad():
else:
print("Load data")
input = np.load(filename + str(".npy"))
with torch.no_grad():
data = torch.tensor(input, device=torch_device)
print("Movement compensation [BROKEN!!!!]")
print("During development, information about what could move was missing.")
print("Thus the preprocessing before shift determination may not work.")
# TODO:
data -= data.min(dim=0)[0]
data /= data.std(dim=0, keepdim=True) + 1e-20
@ -62,9 +65,12 @@ if __name__ == "__main__":
reference_image=data[0, ...].clone(),
image_alignment=image_alignment,
)
np.save(filename + "_tvec.npy", tvec.cpu().numpy())
tvec_media = tvec.median(dim=0)[0]
print(f"Median of movement: {tvec_media[0]}, {tvec_media[1]}")
del data
data = torch.tensor(input, device=torch_device)
for id in range(0, data.shape[0]):
@ -76,13 +82,21 @@ if __name__ == "__main__":
shear=0,
fill=fill_value,
).squeeze(0)
data -= data.min(dim=0)[0]
print("SVD")
whiten_mean, whiten_k, eigenvalues = calculate_svd(data)
np.savez(
filename + "_svd.npz",
whiten_mean=whiten_mean.cpu().numpy(),
whiten_k=whiten_k.cpu().numpy(),
eigenvalues=eigenvalues.cpu().numpy(),
)
print("Calculate to_remove")
del data
data = torch.tensor(input, device=torch_device)
for id in range(0, data.shape[0]):
data[id, ...] = tv.transforms.functional.affine(
img=data[id, ...].unsqueeze(0),
@ -92,7 +106,8 @@ if __name__ == "__main__":
shear=0,
fill=fill_value,
).squeeze(0)
data -= data.min(dim=0)[0]
to_remove_data = to_remove(data, whiten_k, whiten_mean)
data -= to_remove_data

5
svd.py
View file

@ -5,7 +5,7 @@ import numpy as np
from tqdm import trange
def convert_avi_to_npy(filename: str) -> None:
def convert_avi_to_npy(filename: str) -> np.ndarray:
capture_from_file = cv2.VideoCapture(filename + str(".avi"))
avi_length = int(capture_from_file.get(cv2.CAP_PROP_FRAME_COUNT))
@ -26,12 +26,13 @@ def convert_avi_to_npy(filename: str) -> None:
assert data is not None
np.save(filename + str(".npy"), data)
return data
@torch.no_grad()
def to_remove(
data: torch.Tensor, whiten_k: torch.Tensor, whiten_mean: torch.Tensor
) -> torch.Tensor:
whiten_mean = whiten_mean
whiten_k = whiten_k[:, :, 0]
data = (data - whiten_mean.unsqueeze(0)) * whiten_k.unsqueeze(0)