Add files via upload

This commit is contained in:
David Rotermund 2024-02-26 18:57:33 +01:00 committed by GitHub
parent 6b3146be0f
commit 675e9743b6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 1377 additions and 0 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,57 @@
import torch
import torchvision as tv # type: ignore
import logging
from functions.ImageAlignment import ImageAlignment
from functions.calculate_translation import calculate_translation
from functions.calculate_rotation import calculate_rotation
@torch.no_grad()
def align_refref(
mylogger: logging.Logger,
ref_image_acceptor: torch.Tensor,
ref_image_donor: torch.Tensor,
image_alignment: ImageAlignment,
batch_size: int,
fill_value: float = 0,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
mylogger.info("Rotate ref image acceptor onto donor")
angle_refref = calculate_rotation(
image_alignment=image_alignment,
input=ref_image_acceptor.unsqueeze(0),
reference_image=ref_image_donor,
batch_size=batch_size,
)
ref_image_acceptor = tv.transforms.functional.affine(
img=ref_image_acceptor.unsqueeze(0),
angle=-float(angle_refref),
translate=[0, 0],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
)
mylogger.info("Translate ref image acceptor onto donor")
tvec_refref = calculate_translation(
image_alignment=image_alignment,
input=ref_image_acceptor,
reference_image=ref_image_donor,
batch_size=batch_size,
)
tvec_refref = tvec_refref[0, :]
ref_image_acceptor = tv.transforms.functional.affine(
img=ref_image_acceptor,
angle=0,
translate=[tvec_refref[1], tvec_refref[0]],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
return angle_refref, tvec_refref, ref_image_acceptor, ref_image_donor

View file

@ -0,0 +1,21 @@
import torch
def binning(
data: torch.Tensor,
kernel_size: int = 4,
stride: int = 4,
divisor_override: int | None = 1,
) -> torch.Tensor:
assert data.ndim == 4
return (
torch.nn.functional.avg_pool2d(
input=data.movedim(0, -1).movedim(0, -1),
kernel_size=kernel_size,
stride=stride,
divisor_override=divisor_override,
)
.movedim(-1, 0)
.movedim(-1, 0)
)

View file

@ -0,0 +1,40 @@
import torch
from functions.ImageAlignment import ImageAlignment
@torch.no_grad()
def calculate_rotation(
image_alignment: ImageAlignment,
input: torch.Tensor,
reference_image: torch.Tensor,
batch_size: int,
) -> torch.Tensor:
angle = torch.zeros((input.shape[0]))
data_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(input),
batch_size=batch_size,
shuffle=False,
)
start_position: int = 0
for input_batch in data_loader:
assert len(input_batch) == 1
end_position = start_position + input_batch[0].shape[0]
angle_temp = image_alignment.dry_run_angle(
input=input_batch[0],
new_reference_image=reference_image,
)
assert angle_temp is not None
angle[start_position:end_position] = angle_temp
start_position += input_batch[0].shape[0]
angle = torch.where(angle >= 180, 360.0 - angle, angle)
angle = torch.where(angle <= -180, 360.0 + angle, angle)
return angle

View file

@ -0,0 +1,37 @@
import torch
from functions.ImageAlignment import ImageAlignment
@torch.no_grad()
def calculate_translation(
image_alignment: ImageAlignment,
input: torch.Tensor,
reference_image: torch.Tensor,
batch_size: int,
) -> torch.Tensor:
tvec = torch.zeros((input.shape[0], 2))
data_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(input),
batch_size=batch_size,
shuffle=False,
)
start_position: int = 0
for input_batch in data_loader:
assert len(input_batch) == 1
end_position = start_position + input_batch[0].shape[0]
tvec_temp = image_alignment.dry_run_translation(
input=input_batch[0],
new_reference_image=reference_image,
)
assert tvec_temp is not None
tvec[start_position:end_position, :] = tvec_temp
start_position += input_batch[0].shape[0]
return tvec

View file

@ -0,0 +1,16 @@
import json
import os
import logging
from jsmin import jsmin # type:ignore
def load_config(mylogger: logging.Logger, filename: str = "config.json") -> dict:
mylogger.info("loading config file")
if os.path.isfile(filename) is False:
mylogger.info(f"{filename} is missing")
with open(filename, "r") as file:
config = json.loads(jsmin(file.read()))
return config

View file

@ -0,0 +1,95 @@
import torch
import torchvision as tv # type: ignore
import logging
from functions.calculate_rotation import calculate_rotation
from functions.ImageAlignment import ImageAlignment
@torch.no_grad()
def perform_donor_volume_rotation(
mylogger: logging.Logger,
acceptor: torch.Tensor,
donor: torch.Tensor,
oxygenation: torch.Tensor,
volume: torch.Tensor,
ref_image_donor: torch.Tensor,
ref_image_volume: torch.Tensor,
image_alignment: ImageAlignment,
batch_size: int,
fill_value: float = 0,
) -> tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
mylogger.info("Calculate rotation between donor data and donor ref image")
angle_donor = calculate_rotation(
input=donor,
reference_image=ref_image_donor,
image_alignment=image_alignment,
batch_size=batch_size,
)
mylogger.info("Calculate rotation between volume data and volume ref image")
angle_volume = calculate_rotation(
input=volume,
reference_image=ref_image_volume,
image_alignment=image_alignment,
batch_size=batch_size,
)
mylogger.info("Average over both rotations")
angle_donor_volume = (angle_donor + angle_volume) / 2.0
mylogger.info("Rotate acceptor data based on the average rotation")
for frame_id in range(0, angle_donor_volume.shape[0]):
acceptor[frame_id, ...] = tv.transforms.functional.affine(
img=acceptor[frame_id, ...].unsqueeze(0),
angle=-float(angle_donor_volume[frame_id]),
translate=[0, 0],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
mylogger.info("Rotate donor data based on the average rotation")
for frame_id in range(0, angle_donor_volume.shape[0]):
donor[frame_id, ...] = tv.transforms.functional.affine(
img=donor[frame_id, ...].unsqueeze(0),
angle=-float(angle_donor_volume[frame_id]),
translate=[0, 0],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
mylogger.info("Rotate oxygenation data based on the average rotation")
for frame_id in range(0, angle_donor_volume.shape[0]):
oxygenation[frame_id, ...] = tv.transforms.functional.affine(
img=oxygenation[frame_id, ...].unsqueeze(0),
angle=-float(angle_donor_volume[frame_id]),
translate=[0, 0],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
mylogger.info("Rotate volume data based on the average rotation")
for frame_id in range(0, angle_donor_volume.shape[0]):
volume[frame_id, ...] = tv.transforms.functional.affine(
img=volume[frame_id, ...].unsqueeze(0),
angle=-float(angle_donor_volume[frame_id]),
translate=[0, 0],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
return (acceptor, donor, oxygenation, volume, angle_donor_volume)

View file

@ -0,0 +1,96 @@
import torch
import torchvision as tv # type: ignore
import logging
from functions.calculate_translation import calculate_translation
from functions.ImageAlignment import ImageAlignment
@torch.no_grad()
def perform_donor_volume_translation(
mylogger: logging.Logger,
acceptor: torch.Tensor,
donor: torch.Tensor,
oxygenation: torch.Tensor,
volume: torch.Tensor,
ref_image_donor: torch.Tensor,
ref_image_volume: torch.Tensor,
image_alignment: ImageAlignment,
batch_size: int,
fill_value: float = 0,
) -> tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
mylogger.info("Calculate translation between donor data and donor ref image")
tvec_donor = calculate_translation(
input=donor,
reference_image=ref_image_donor,
image_alignment=image_alignment,
batch_size=batch_size,
)
mylogger.info("Calculate translation between volume data and volume ref image")
tvec_volume = calculate_translation(
input=volume,
reference_image=ref_image_volume,
image_alignment=image_alignment,
batch_size=batch_size,
)
mylogger.info("Average over both translations")
tvec_donor_volume = (tvec_donor + tvec_volume) / 2.0
mylogger.info("Translate acceptor data based on the average translation vector")
for frame_id in range(0, tvec_donor_volume.shape[0]):
acceptor[frame_id, ...] = tv.transforms.functional.affine(
img=acceptor[frame_id, ...].unsqueeze(0),
angle=0,
translate=[tvec_donor_volume[frame_id, 1], tvec_donor_volume[frame_id, 0]],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
mylogger.info("Translate donor data based on the average translation vector")
for frame_id in range(0, tvec_donor_volume.shape[0]):
donor[frame_id, ...] = tv.transforms.functional.affine(
img=donor[frame_id, ...].unsqueeze(0),
angle=0,
translate=[tvec_donor_volume[frame_id, 1], tvec_donor_volume[frame_id, 0]],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
mylogger.info("Translate oxygenation data based on the average translation vector")
for frame_id in range(0, tvec_donor_volume.shape[0]):
oxygenation[frame_id, ...] = tv.transforms.functional.affine(
img=oxygenation[frame_id, ...].unsqueeze(0),
angle=0,
translate=[tvec_donor_volume[frame_id, 1], tvec_donor_volume[frame_id, 0]],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
mylogger.info("Translate volume data based on the average translation vector")
for frame_id in range(0, tvec_donor_volume.shape[0]):
volume[frame_id, ...] = tv.transforms.functional.affine(
img=volume[frame_id, ...].unsqueeze(0),
angle=0,
translate=[tvec_donor_volume[frame_id, 1], tvec_donor_volume[frame_id, 0]],
scale=1.0,
shear=0,
interpolation=tv.transforms.InterpolationMode.BILINEAR,
fill=fill_value,
).squeeze(0)
return (acceptor, donor, oxygenation, volume, tvec_donor_volume)