Add files via upload

This commit is contained in:
David Rotermund 2024-07-10 16:01:26 +02:00 committed by GitHub
parent 9e28e73468
commit 533f54f212
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 6 additions and 76 deletions

View file

@ -7,7 +7,6 @@ class NNMF2d(torch.nn.Module):
in_channels: int in_channels: int
out_channels: int out_channels: int
weight: torch.Tensor weight: torch.Tensor
bias: None | torch.Tensor
iterations: int iterations: int
epsilon: float | None epsilon: float | None
init_min: float init_min: float
@ -16,8 +15,6 @@ class NNMF2d(torch.nn.Module):
positive_function_type: int positive_function_type: int
local_learning: bool local_learning: bool
local_learning_kl: bool local_learning_kl: bool
use_reconstruction: bool
skip_connection: bool
def __init__( def __init__(
self, self,

View file

@ -9,8 +9,6 @@ def append_block(
out_channels: int, out_channels: int,
test_image: torch.Tensor, test_image: torch.Tensor,
parameter_cnn_top: list[torch.nn.parameter.Parameter], parameter_cnn_top: list[torch.nn.parameter.Parameter],
parameter_cnn_skip: list[torch.nn.parameter.Parameter],
parameter_cnn: list[torch.nn.parameter.Parameter],
parameter_nnmf: list[torch.nn.parameter.Parameter], parameter_nnmf: list[torch.nn.parameter.Parameter],
parameter_norm: list[torch.nn.parameter.Parameter], parameter_norm: list[torch.nn.parameter.Parameter],
torch_device: torch.device, torch_device: torch.device,
@ -24,8 +22,6 @@ def append_block(
iterations: int = 20, iterations: int = 20,
local_learning: bool = False, local_learning: bool = False,
local_learning_kl: bool = False, local_learning_kl: bool = False,
use_nnmf: bool = True,
use_identity: bool = False,
momentum: float = 0.1, momentum: float = 0.1,
track_running_stats: bool = False, track_running_stats: bool = False,
) -> torch.Tensor: ) -> torch.Tensor:
@ -39,7 +35,6 @@ def append_block(
kernel_size_internal[1] = test_image.shape[-1] kernel_size_internal[1] = test_image.shape[-1]
# Main # Main
network.append(torch.nn.ReLU()) network.append(torch.nn.ReLU())
test_image = network[-1](test_image) test_image = network[-1](test_image)

View file

@ -19,7 +19,8 @@ def get_data(path: str = "log_cnn"):
for id in range(0, len(te)): for id in range(0, len(te)):
np_temp[id, 0] = te[id].step np_temp[id, 0] = te[id].step
np_temp[id, 1] = te[id].value np_temp[id, 1] = te[id].value
print(np_temp[:, 1]/100)
print(np_temp[:, 1] / 100)
return np_temp return np_temp

View file

@ -1,5 +1,4 @@
import torch import torch
from SplitOnOffLayer import SplitOnOffLayer
from append_block import append_block from append_block import append_block
from L1NormLayer import L1NormLayer from L1NormLayer import L1NormLayer
from NNMF2d import NNMF2d from NNMF2d import NNMF2d
@ -7,7 +6,6 @@ from append_parameter import append_parameter
def make_network( def make_network(
use_nnmf: bool,
input_dim_x: int, input_dim_x: int,
input_dim_y: int, input_dim_y: int,
input_number_of_channel: int, input_number_of_channel: int,
@ -67,11 +65,7 @@ def make_network(
(1, 1), (1, 1),
(1, 1), (1, 1),
], ],
local_learning: list[bool] = [False, False, False, False],
local_learning_kl: bool = True,
max_pool: bool = True,
enable_onoff: bool = False, enable_onoff: bool = False,
use_identity: bool = False,
) -> tuple[ ) -> tuple[
torch.nn.Sequential, torch.nn.Sequential,
list[list[torch.nn.parameter.Parameter]], list[list[torch.nn.parameter.Parameter]],
@ -86,14 +80,11 @@ def make_network(
assert len(number_of_output_channels) == len(stride_pool) assert len(number_of_output_channels) == len(stride_pool)
assert len(number_of_output_channels) == len(padding_pool) assert len(number_of_output_channels) == len(padding_pool)
assert len(number_of_output_channels) == len(dilation_pool) assert len(number_of_output_channels) == len(dilation_pool)
assert len(number_of_output_channels) == len(local_learning)
if enable_onoff: if enable_onoff:
input_number_of_channel *= 2 input_number_of_channel *= 2
parameter_cnn_top: list[torch.nn.parameter.Parameter] = [] parameter_cnn_top: list[torch.nn.parameter.Parameter] = []
parameter_cnn_skip: list[torch.nn.parameter.Parameter] = []
parameter_cnn: list[torch.nn.parameter.Parameter] = []
parameter_nnmf: list[torch.nn.parameter.Parameter] = [] parameter_nnmf: list[torch.nn.parameter.Parameter] = []
parameter_norm: list[torch.nn.parameter.Parameter] = [] parameter_norm: list[torch.nn.parameter.Parameter] = []
@ -104,10 +95,6 @@ def make_network(
network = torch.nn.Sequential() network = torch.nn.Sequential()
network = network.to(torch_device) network = network.to(torch_device)
if enable_onoff:
network.append(SplitOnOffLayer())
test_image = network[-1](test_image)
for block_id in range(0, len(number_of_output_channels)): for block_id in range(0, len(number_of_output_channels)):
test_image = append_block( test_image = append_block(
@ -122,16 +109,10 @@ def make_network(
positive_function_type=positive_function_type, positive_function_type=positive_function_type,
beta=beta, beta=beta,
iterations=iterations, iterations=iterations,
local_learning=local_learning[block_id],
local_learning_kl=local_learning_kl,
torch_device=torch_device, torch_device=torch_device,
parameter_cnn_top=parameter_cnn_top, parameter_cnn_top=parameter_cnn_top,
parameter_cnn_skip=parameter_cnn_skip,
parameter_cnn=parameter_cnn,
parameter_nnmf=parameter_nnmf, parameter_nnmf=parameter_nnmf,
parameter_norm=parameter_norm, parameter_norm=parameter_norm,
use_nnmf=use_nnmf,
use_identity=use_identity,
) )
if (kernel_size_pool[block_id][0] > 0) and (kernel_size_pool[block_id][1] > 0): if (kernel_size_pool[block_id][0] > 0) and (kernel_size_pool[block_id][1] > 0):
@ -214,16 +195,12 @@ def make_network(
parameters: list[list[torch.nn.parameter.Parameter]] = [ parameters: list[list[torch.nn.parameter.Parameter]] = [
parameter_cnn_top, parameter_cnn_top,
parameter_cnn_skip,
parameter_cnn,
parameter_nnmf, parameter_nnmf,
parameter_norm, parameter_norm,
] ]
name_list: list[str] = [ name_list: list[str] = [
"cnn_top", "cnn_top",
"cnn_skip",
"cnn",
"nnmf", "nnmf",
"batchnorm2d", "batchnorm2d",
] ]

View file

@ -23,51 +23,26 @@ from make_optimize import make_optimize
def main( def main(
lr_initial_nnmf: float = 0.01, lr_initial_nnmf: float = 0.01,
lr_initial_cnn: float = 0.001,
lr_initial_cnn_top: float = 0.001, lr_initial_cnn_top: float = 0.001,
lr_initial_cnn_skip: float = 0.001,
lr_initial_norm: float = 0.001, lr_initial_norm: float = 0.001,
iterations: int = 20, iterations: int = 20,
use_nnmf: bool = True,
dataset: str = "CIFAR10", # "CIFAR10", "FashionMNIST", "MNIST" dataset: str = "CIFAR10", # "CIFAR10", "FashionMNIST", "MNIST"
enable_onoff: bool = False,
local_learning_all: bool = False,
local_learning_0: bool = False,
local_learning_1: bool = False,
local_learning_2: bool = False,
local_learning_3: bool = False,
local_learning_kl: bool = False,
max_pool: bool = False,
only_print_network: bool = False, only_print_network: bool = False,
use_identity: bool = False,
da_auto_mode: bool = False,
) -> None: ) -> None:
if local_learning_all: da_auto_mode: bool = False # Automatic Data Augmentation from TorchVision
local_learning_0 = True
local_learning_1 = True
local_learning_2 = True
local_learning_3 = True
lr_limit: float = 1e-9 lr_limit: float = 1e-9
if use_identity:
use_nnmf = True
torch_device: torch.device = ( torch_device: torch.device = (
torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
) )
torch.set_default_dtype(torch.float32) torch.set_default_dtype(torch.float32)
# Some parameters # Some parameters
batch_size_train: int = 50#0 batch_size_train: int = 50 # 0
batch_size_test: int = 50#0 batch_size_test: int = 50 # 0
number_of_epoch: int = 5000 number_of_epoch: int = 5000
if use_nnmf:
prefix: str = "nnmf"
else:
prefix = "cnn"
loss_mode: int = 0 loss_mode: int = 0
loss_coeffs_mse: float = 0.5 loss_coeffs_mse: float = 0.5
loss_coeffs_kldiv: float = 1.0 loss_coeffs_kldiv: float = 1.0
@ -111,22 +86,11 @@ def main(
parameters, parameters,
name_list, name_list,
) = make_network( ) = make_network(
use_nnmf=use_nnmf,
input_dim_x=input_dim_x, input_dim_x=input_dim_x,
input_dim_y=input_dim_y, input_dim_y=input_dim_y,
input_number_of_channel=input_number_of_channel, input_number_of_channel=input_number_of_channel,
iterations=iterations, iterations=iterations,
enable_onoff=enable_onoff,
local_learning=[
local_learning_0,
local_learning_1,
local_learning_2,
local_learning_3,
],
local_learning_kl=local_learning_kl,
max_pool=max_pool,
torch_device=torch_device, torch_device=torch_device,
use_identity=use_identity,
) )
print(network) print(network)
@ -152,8 +116,6 @@ def main(
parameters=parameters, parameters=parameters,
lr_initial=[ lr_initial=[
lr_initial_cnn_top, lr_initial_cnn_top,
lr_initial_cnn_skip,
lr_initial_cnn,
lr_initial_nnmf, lr_initial_nnmf,
lr_initial_norm, lr_initial_norm,
], ],
@ -166,9 +128,7 @@ def main(
else: else:
my_string += "-_" my_string += "-_"
default_path: str = ( default_path: str = f"iter{iterations}{my_string}"
f"{prefix}_iter{iterations}{my_string}0{local_learning_0}_1{local_learning_1}_2{local_learning_2}_3{local_learning_3}_kl{local_learning_kl}_max{max_pool}"
)
log_dir: str = f"log_{default_path}" log_dir: str = f"log_{default_path}"
tb = SummaryWriter(log_dir=log_dir) tb = SummaryWriter(log_dir=log_dir)