diff --git a/4_show.py b/4_show.py new file mode 100644 index 0000000..7276dd6 --- /dev/null +++ b/4_show.py @@ -0,0 +1,24 @@ +import numpy as np +import torch +from Anime import Anime + +# Convert from avi to npy +filename: str = "example_data_crop" + + +torch_device: torch.device = torch.device( + "cuda:0" if torch.cuda.is_available() else "cpu" +) + +print("Load data") +input = np.load(filename + str("_decorrelated.npy")) +data = torch.tensor(input, device=torch_device) +del input +print("loading done") + +data = data.nan_to_num(nan=0.0) +#data -= data.min(dim=0, keepdim=True)[0] + + +ani = Anime() +ani.show(data, vmin=0.0) diff --git a/4b_show.py b/4b_show.py new file mode 100644 index 0000000..82c070d --- /dev/null +++ b/4b_show.py @@ -0,0 +1,24 @@ +import numpy as np +import torch +from Anime import Anime + +# Convert from avi to npy +filename: str = "example_data_crop" + + +torch_device: torch.device = torch.device( + "cuda:0" if torch.cuda.is_available() else "cpu" +) + +print("Load data") +input = np.load(filename + str("_decorrelated.npy")) +data = torch.tensor(input, device=torch_device) +del input +print("loading done") + +data = data.nan_to_num(nan=0.0) +data -= data.min(dim=0, keepdim=True)[0] +data *= data.std(dim=0, keepdim=True) + +ani = Anime() +ani.show(data)