From 832128acc97ec452174de3438982bbed94ca6175 Mon Sep 17 00:00:00 2001 From: David Rotermund <54365609+davrot@users.noreply.github.com> Date: Fri, 5 Jan 2024 15:38:20 +0100 Subject: [PATCH] Update README.md Signed-off-by: David Rotermund <54365609+davrot@users.noreply.github.com> --- pytorch/own_layer/README.md | 82 ++++++++++++++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/pytorch/own_layer/README.md b/pytorch/own_layer/README.md index 13d3b23..0e216ef 100644 --- a/pytorch/own_layer/README.md +++ b/pytorch/own_layer/README.md @@ -21,7 +21,7 @@ reset_parameters(): I copied it from the original code. forward(): We get an input tensor and need to produce an output tensor. Please remember that dimension 0 contains the batch. Here we just multiply the input with the weights and add the bias to it (if available). -extra_repr(): Plots the information about the layer in a nice fashion. +extra_repr(): Prints the information about the layer in a nice fashion. ```python @@ -81,4 +81,84 @@ class MyOwnLayer(torch.nn.Module): return f"in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}" ``` +I just add it to the networs as any other layer: + +```python +network = torch.nn.Sequential( + torch.nn.Conv2d( + in_channels=input_number_of_channel, + out_channels=number_of_output_channels_conv1, + kernel_size=kernel_size_conv1, + stride=stride_conv1, + padding=padding_conv1, + ), + torch.nn.ReLU(), + torch.nn.MaxPool2d( + kernel_size=kernel_size_pool1, stride=stride_pool1, padding=padding_pool1 + ), + torch.nn.Conv2d( + in_channels=number_of_output_channels_conv1, + out_channels=number_of_output_channels_conv2, + kernel_size=kernel_size_conv2, + stride=stride_conv2, + padding=padding_conv2, + ), + torch.nn.ReLU(), + torch.nn.MaxPool2d( + kernel_size=kernel_size_pool2, stride=stride_pool2, padding=padding_pool2 + ), + torch.nn.Flatten( + start_dim=1, + ), + MyOwnLayer( + in_features=number_of_output_channels_flatten1, + out_features=number_of_output_channels_full1, + bias=True, + ), + torch.nn.ReLU(), + torch.nn.Linear( + in_features=number_of_output_channels_full1, + out_features=number_of_output_channels_output, + bias=True, + ), + torch.nn.Softmax(dim=1), +).to(device=device_gpu) +``` + +We can print information as usual: + +```python +print(network[-4]) +``` + +Output: + +```python +MyOwnLayer(in_features=576, out_features=1024, bias=True) +``` + +We can check the stored parameters: + +```python +for parameter in network[-4].parameters(): + print(type(parameter), parameter.shape) + +print() + +for name, parameter in network[-4].named_parameters(): + print(name, type(parameter), parameter.shape) +``` + +Output: + +```python + torch.Size([1024, 576]) + torch.Size([1024]) + +weight torch.Size([1024, 576]) +bias torch.Size([1024]) +``` + +And train the network as usual: + ![Figure_1.png](Figure_1.png)