Add files via upload
This commit is contained in:
parent
20c14f75be
commit
190666a067
11 changed files with 1652 additions and 0 deletions
43
RenderStimuli/CPPExtensions/Makefile
Normal file
43
RenderStimuli/CPPExtensions/Makefile
Normal file
|
@ -0,0 +1,43 @@
|
|||
PYBIN=/home/kk/P3.10/bin/
|
||||
CC=/usr/lib64/ccache/clang++
|
||||
NVCC=/usr/local/cuda-12/bin/nvcc -allow-unsupported-compiler
|
||||
|
||||
O_DIRS = o/
|
||||
|
||||
PARAMETERS_O_CPU = -O3 -std=c++14 -fPIC -Wall -fopenmp=libomp
|
||||
PARAMETERS_Linker_CPU = -shared -lm -lomp -lstdc++ -Wall
|
||||
|
||||
PARAMETERS_O_GPU= -O3 -std=c++14 -ccbin=$(CC) \
|
||||
-Xcompiler "-fPIC -Wall -fopenmp=libomp"
|
||||
PARAMETERS_Linker_GPU=-Xcompiler "-shared -lm -lomp -lstdc++ -Wall"
|
||||
|
||||
name = TCopy
|
||||
type = CPU
|
||||
|
||||
PYPOSTFIX := $(shell $(PYBIN)python3-config --extension-suffix)
|
||||
PYBIND11INCLUDE := $(shell $(PYBIN)python3 -m pybind11 --includes)
|
||||
PARAMETERS_O = $(PARAMETERS_O_CPU) $(PYBIND11INCLUDE)
|
||||
PARAMETERS_Linker = $(PARAMETERS_Linker_CPU)
|
||||
|
||||
so_file = Py$(name)$(type)$(PYPOSTFIX)
|
||||
pyi_file = Py$(name)$(type).pyi
|
||||
all: $(so_file)
|
||||
|
||||
$(O_DIRS)$(name)$(type).o: $(name)$(type).h $(name)$(type).cpp
|
||||
mkdir -p $(O_DIRS)
|
||||
$(CC) $(PARAMETERS_O) -c $(name)$(type).cpp -o $(O_DIRS)$(name)$(type).o
|
||||
|
||||
$(O_DIRS)Py$(name)$(type).o: $(name)$(type).h Py$(name)$(type).cpp
|
||||
mkdir -p $(O_DIRS)
|
||||
$(CC) $(PARAMETERS_O) -c Py$(name)$(type).cpp -o $(O_DIRS)Py$(name)$(type).o
|
||||
|
||||
$(so_file): $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o
|
||||
$(CC) $(PARAMETERS_Linker) -o $(so_file) $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o
|
||||
|
||||
|
||||
#######################
|
||||
clean:
|
||||
rm -rf $(O_DIRS)
|
||||
rm -f $(so_file)
|
||||
rm -f $(pyi_file)
|
||||
|
15
RenderStimuli/CPPExtensions/PyTCopyCPU.cpp
Normal file
15
RenderStimuli/CPPExtensions/PyTCopyCPU.cpp
Normal file
|
@ -0,0 +1,15 @@
|
|||
#include <pybind11/pybind11.h>
|
||||
|
||||
#include "TCopyCPU.h"
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
PYBIND11_MODULE(PyTCopyCPU, m)
|
||||
{
|
||||
m.doc() = "TCopyCPU Module";
|
||||
py::class_<TCopyCPU>(m, "TCopyCPU")
|
||||
.def(py::init<>())
|
||||
.def("process",
|
||||
&TCopyCPU::process);
|
||||
}
|
||||
|
196
RenderStimuli/CPPExtensions/TCopyCPU.cpp
Normal file
196
RenderStimuli/CPPExtensions/TCopyCPU.cpp
Normal file
|
@ -0,0 +1,196 @@
|
|||
#include "TCopyCPU.h"
|
||||
|
||||
#include <omp.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
|
||||
|
||||
TCopyCPU::TCopyCPU()
|
||||
{
|
||||
|
||||
};
|
||||
|
||||
TCopyCPU::~TCopyCPU()
|
||||
{
|
||||
|
||||
};
|
||||
|
||||
|
||||
void TCopyCPU::process(
|
||||
int64_t sparse_pointer_addr, // int64 *
|
||||
int64_t sparse_dim_0, // Gabor ID
|
||||
int64_t sparse_dim_1, // Gabor Parameter
|
||||
|
||||
int64_t gabor_pointer_addr, // float32 *
|
||||
int64_t gabor_dim_0, // Gabor ID
|
||||
int64_t gabor_dim_1, // X
|
||||
int64_t gabor_dim_2, // Y
|
||||
|
||||
int64_t output_pointer_addr, // float32 *
|
||||
int64_t output_dim_0, // Pattern ID
|
||||
int64_t output_dim_1, // X
|
||||
int64_t output_dim_2, // Y
|
||||
|
||||
int64_t number_of_cpu_processes
|
||||
){
|
||||
int64_t* sparse_pointer = (int64_t*)sparse_pointer_addr;
|
||||
float* gabor_pointer = (float*)gabor_pointer_addr;
|
||||
float* output_pointer = (float*)output_pointer_addr;
|
||||
|
||||
// Sparse Matrix
|
||||
assert((sparse_pointer != nullptr));
|
||||
assert((sparse_dim_0 > 0));
|
||||
assert((sparse_dim_1 > 0));
|
||||
|
||||
// I assume three parameters: Pattern ID, Type, X, Y
|
||||
assert((sparse_dim_1 == 4));
|
||||
|
||||
// Gabor Matrix
|
||||
assert((gabor_pointer != nullptr));
|
||||
assert((gabor_dim_0 > 0));
|
||||
assert((gabor_dim_1 > 0));
|
||||
assert((gabor_dim_2 > 0));
|
||||
|
||||
// Output Matrix
|
||||
assert((output_pointer != nullptr));
|
||||
assert((output_dim_0 > 0));
|
||||
assert((output_dim_1 > 0));
|
||||
|
||||
|
||||
// Cache data for the pointer calculations
|
||||
size_t sparse_dim_c0 = sparse_dim_1;
|
||||
|
||||
size_t gabor_dim_c0 = gabor_dim_1 * gabor_dim_2;
|
||||
// size_t gabor_dim_c1 = gabor_dim_2;
|
||||
|
||||
size_t output_dim_c0 = output_dim_1 * output_dim_2;
|
||||
size_t output_dim_c1 = output_dim_2;
|
||||
|
||||
assert((number_of_cpu_processes > 0));
|
||||
|
||||
omp_set_num_threads(number_of_cpu_processes);
|
||||
|
||||
// DEBUG:
|
||||
// omp_set_num_threads(1);
|
||||
|
||||
#pragma omp parallel for
|
||||
for (size_t gabor_id = 0; gabor_id < sparse_dim_0; gabor_id++)
|
||||
{
|
||||
process_sub(gabor_id,
|
||||
sparse_pointer,
|
||||
sparse_dim_c0,
|
||||
|
||||
gabor_pointer,
|
||||
gabor_dim_0,
|
||||
gabor_dim_1,
|
||||
gabor_dim_2,
|
||||
gabor_dim_c0,
|
||||
|
||||
output_pointer,
|
||||
output_dim_0,
|
||||
output_dim_1,
|
||||
output_dim_2,
|
||||
output_dim_c0,
|
||||
output_dim_c1
|
||||
);
|
||||
}
|
||||
|
||||
return;
|
||||
};
|
||||
|
||||
|
||||
void TCopyCPU::process_sub(
|
||||
size_t gabor_id,
|
||||
int64_t* sparse_pointer,
|
||||
size_t sparse_dim_c0,
|
||||
|
||||
float* gabor_pointer,
|
||||
int64_t gabor_dim_0,
|
||||
int64_t gabor_dim_1,
|
||||
int64_t gabor_dim_2,
|
||||
size_t gabor_dim_c0,
|
||||
|
||||
float* output_pointer,
|
||||
int64_t output_dim_0,
|
||||
int64_t output_dim_1,
|
||||
int64_t output_dim_2,
|
||||
size_t output_dim_c0,
|
||||
size_t output_dim_c1
|
||||
|
||||
){
|
||||
int64_t* sparse_offset = sparse_pointer + gabor_id * sparse_dim_c0;
|
||||
// Extract the gabor parameter
|
||||
int64_t gabor_pattern_id = sparse_offset[0];
|
||||
int64_t gabor_type = sparse_offset[1];
|
||||
int64_t gabor_x = sparse_offset[2];
|
||||
int64_t gabor_y = sparse_offset[3];
|
||||
|
||||
// Filter out non valid stimulus ids -- we don't do anything
|
||||
if ((gabor_pattern_id < 0) || (gabor_pattern_id >= output_dim_0)) {
|
||||
printf("Stimulus ID=%li outside range [0, %li]!\n",
|
||||
(long int)gabor_pattern_id, (long int)output_dim_0);
|
||||
return;
|
||||
}
|
||||
|
||||
// Filter out non valid patch types -- we don't do anything
|
||||
if ((gabor_type < 0) || (gabor_type >= gabor_dim_0)) {
|
||||
printf("Patch ID=%li outside range [0, %li]!\n",
|
||||
(long int)gabor_type, (long int)gabor_dim_0);
|
||||
return;
|
||||
}
|
||||
|
||||
// X position is too big -- we don't do anything
|
||||
if (gabor_x >= output_dim_1) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Y position is too big -- we don't do anything
|
||||
if (gabor_y >= output_dim_2){
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Get the offset to the gabor patch
|
||||
float* gabor_offset = gabor_pointer + gabor_type * gabor_dim_c0;
|
||||
|
||||
// Get the offset to the output image with the id pattern_id
|
||||
float* output_offset = output_pointer + gabor_pattern_id * output_dim_c0;
|
||||
|
||||
float* output_position_x = nullptr;
|
||||
int64_t gabor_y_start = gabor_y;
|
||||
|
||||
for (int64_t g_x = 0; g_x < gabor_dim_1; g_x ++) {
|
||||
|
||||
// Start at the first y (i.e. last dimension) position
|
||||
gabor_y = gabor_y_start;
|
||||
|
||||
// We copy only if we are on the output canvas -- X dimension
|
||||
if ((gabor_x >= 0) && (gabor_x < output_dim_1)) {
|
||||
|
||||
// Where is our x line in memory?
|
||||
output_position_x = output_offset + gabor_x * output_dim_c1;
|
||||
|
||||
for (int64_t g_y = 0; g_y < gabor_dim_2; g_y ++) {
|
||||
|
||||
// We copy only if we are on the output canvas -- Y dimension
|
||||
if ((gabor_y >= 0) && (gabor_y < output_dim_2)) {
|
||||
output_position_x[gabor_y] += *gabor_offset;
|
||||
}
|
||||
gabor_offset++;
|
||||
gabor_y++;
|
||||
}
|
||||
}
|
||||
// We skip an x line
|
||||
else
|
||||
{
|
||||
gabor_offset += gabor_dim_2;
|
||||
}
|
||||
gabor_x++;
|
||||
}
|
||||
|
||||
return;
|
||||
};
|
52
RenderStimuli/CPPExtensions/TCopyCPU.h
Normal file
52
RenderStimuli/CPPExtensions/TCopyCPU.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
#ifndef TCOPYCPU
|
||||
#define TCOPYCPU
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <cctype>
|
||||
#include <iostream>
|
||||
|
||||
class TCopyCPU
|
||||
{
|
||||
public:
|
||||
TCopyCPU();
|
||||
~TCopyCPU();
|
||||
|
||||
void process(
|
||||
int64_t sparse_pointer_addr, // int64 *
|
||||
int64_t sparse_dim_0, // Gabor ID
|
||||
int64_t sparse_dim_1, // Gabor Parameter
|
||||
|
||||
int64_t garbor_pointer_addr, // float32 *
|
||||
int64_t garbor_dim_0, // Gabor ID
|
||||
int64_t garbor_dim_1, // X
|
||||
int64_t garbor_dim_2, // Y
|
||||
|
||||
int64_t output_pointer_addr, // float32 *
|
||||
int64_t output_dim_0, // Pattern ID
|
||||
int64_t output_dim_1, // X
|
||||
int64_t output_dim_2, // Y
|
||||
|
||||
int64_t number_of_cpu_processes
|
||||
);
|
||||
|
||||
private:
|
||||
void process_sub(
|
||||
size_t gabor_id,
|
||||
int64_t* sparse_pointer,
|
||||
size_t sparse_dim_c0,
|
||||
float* garbor_pointer,
|
||||
int64_t garbor_dim_0,
|
||||
int64_t garbor_dim_1,
|
||||
int64_t garbor_dim_2,
|
||||
size_t garbor_dim_c0,
|
||||
float* output_pointer,
|
||||
int64_t output_dim_0,
|
||||
int64_t output_dim_1,
|
||||
int64_t output_dim_2,
|
||||
size_t output_dim_c0,
|
||||
size_t output_dim_c1
|
||||
);
|
||||
};
|
||||
|
||||
#endif /* TCOPYCPU */
|
78
RenderStimuli/CPPExtensions/test.py
Normal file
78
RenderStimuli/CPPExtensions/test.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
import os
|
||||
|
||||
from PyTCopyCPU import TCopyCPU
|
||||
|
||||
|
||||
copyier = TCopyCPU()
|
||||
|
||||
# Output canvas
|
||||
output_pattern: int = 10
|
||||
output_x: int = 160
|
||||
output_y: int = 200 # Last dim
|
||||
|
||||
output = torch.zeros(
|
||||
(output_pattern, output_x, output_y), device="cpu", dtype=torch.float32
|
||||
)
|
||||
|
||||
# The "gabors"
|
||||
garbor_amount: int = 3
|
||||
garbor_x: int = 11
|
||||
garbor_y: int = 13 # Last dim
|
||||
|
||||
gabor = torch.arange(
|
||||
1, garbor_amount * garbor_x * garbor_y + 1, device="cpu", dtype=torch.float32
|
||||
).reshape((garbor_amount, garbor_x, garbor_y))
|
||||
|
||||
# The sparse control matrix
|
||||
|
||||
sparse_matrix = torch.zeros((3, 4), device="cpu", dtype=torch.int64)
|
||||
|
||||
sparse_matrix[0, 0] = 0 # pattern_id -> output dim 0
|
||||
sparse_matrix[0, 1] = 2 # gabor_type -> gabor dim 0
|
||||
sparse_matrix[0, 2] = 0 # gabor_x -> output dim 1 start point
|
||||
sparse_matrix[0, 3] = 0 # gabor_x -> output dim 2 start point
|
||||
|
||||
sparse_matrix[1, 0] = 0 # pattern_id -> output dim 0
|
||||
sparse_matrix[1, 1] = 1 # gabor_type -> gabor dim 0
|
||||
sparse_matrix[1, 2] = 40 # gabor_x -> output dim 1 start point
|
||||
sparse_matrix[1, 3] = 60 # gabor_x -> output dim 2 start point
|
||||
|
||||
sparse_matrix[2, 0] = 1 # pattern_id -> output dim 0
|
||||
sparse_matrix[2, 1] = 0 # gabor_type -> gabor dim 0
|
||||
sparse_matrix[2, 2] = 0 # gabor_x -> output dim 1 start point
|
||||
sparse_matrix[2, 3] = 0 # gabor_x -> output dim 2 start point
|
||||
|
||||
# ###########################################
|
||||
|
||||
assert sparse_matrix.ndim == 2
|
||||
assert int(sparse_matrix.shape[1]) == 4
|
||||
|
||||
assert gabor.ndim == 3
|
||||
assert output.ndim == 3
|
||||
|
||||
number_of_cpu_processes = os.cpu_count()
|
||||
|
||||
copyier.process(
|
||||
sparse_matrix.data_ptr(),
|
||||
int(sparse_matrix.shape[0]),
|
||||
int(sparse_matrix.shape[1]),
|
||||
gabor.data_ptr(),
|
||||
int(gabor.shape[0]),
|
||||
int(gabor.shape[1]),
|
||||
int(gabor.shape[2]),
|
||||
output.data_ptr(),
|
||||
int(output.shape[0]),
|
||||
int(output.shape[1]),
|
||||
int(output.shape[2]),
|
||||
int(number_of_cpu_processes),
|
||||
)
|
||||
|
||||
plt.imshow(output[0, :, :])
|
||||
plt.title("Pattern 0")
|
||||
plt.show()
|
||||
|
||||
plt.imshow(output[1, :, :])
|
||||
plt.title("Pattern 1")
|
||||
plt.show()
|
603
RenderStimuli/contours.py
Normal file
603
RenderStimuli/contours.py
Normal file
|
@ -0,0 +1,603 @@
|
|||
# %%
|
||||
#
|
||||
# contours.py
|
||||
#
|
||||
# Tools for contour integration studies
|
||||
#
|
||||
# Version 2.0, 28.04.2023:
|
||||
# phases can now be randomized...
|
||||
#
|
||||
|
||||
#
|
||||
# Coordinate system assumptions:
|
||||
#
|
||||
# for arrays:
|
||||
# [..., HEIGHT, WIDTH], origin is on TOP LEFT
|
||||
# HEIGHT indices *decrease* with increasing y-coordinates (reversed)
|
||||
# WIDTH indices *increase* with increasing x-coordinates (normal)
|
||||
#
|
||||
# Orientations:
|
||||
# 0 is horizontal, orientation *increase* counter-clockwise
|
||||
# Corner elements, quantified by [dir_source, dir_change]:
|
||||
# - consist of two legs
|
||||
# - contour *enters* corner from *source direction* at one leg
|
||||
# and goes from border to its center...
|
||||
# - contour path changes by *direction change* and goes
|
||||
# from center to the border
|
||||
#
|
||||
|
||||
import torch
|
||||
import time
|
||||
import matplotlib.pyplot as plt
|
||||
import math
|
||||
import scipy.io
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
torch_device = "cuda"
|
||||
default_dtype = torch.float32
|
||||
torch.set_default_dtype(default_dtype)
|
||||
torch.device(torch_device)
|
||||
|
||||
|
||||
#
|
||||
# performs a coordinate transform (rotation with phi around origin)
|
||||
# rotation is performed CLOCKWISE with increasing phi
|
||||
#
|
||||
# remark: rotating a mesh grid by phi and orienting an image element
|
||||
# along the new x-axis is EQUIVALENT to rotating the image element
|
||||
# by -phi (so this realizes a rotation COUNTER-CLOCKWISE with
|
||||
# increasing phi)
|
||||
#
|
||||
def rotate_CW(x: torch.Tensor, y: torch.Tensor, phi: torch.float32):
|
||||
xr = +x * torch.cos(phi) + y * torch.sin(phi)
|
||||
yr = -x * torch.sin(phi) + y * torch.cos(phi)
|
||||
|
||||
return xr, yr
|
||||
|
||||
|
||||
#
|
||||
# renders a Gabor with (or without) corner
|
||||
#
|
||||
def gaborner(
|
||||
r_gab: int, # radius, size will be 2*r_gab+1
|
||||
dir_source: float, # contour enters in this dir
|
||||
dir_change: float, # contour turns around by this dir
|
||||
lambdah: float, # wavelength of Gabor
|
||||
sigma: float, # half-width of Gabor
|
||||
phase: float, # phase of Gabor
|
||||
normalize: bool, # normalize patch to zero
|
||||
torch_device: str, # GPU or CPU...
|
||||
) -> torch.Tensor:
|
||||
# incoming dir: change to outgoing dir
|
||||
dir1 = dir_source + torch.pi
|
||||
nook = dir_change - torch.pi
|
||||
|
||||
# create coordinate grids
|
||||
d_gab = 2 * r_gab + 1
|
||||
x = -r_gab + torch.arange(d_gab, device=torch_device)
|
||||
yg, xg = torch.meshgrid(x, x, indexing="ij")
|
||||
|
||||
# put into tensor for performing vectorized scalar products
|
||||
xyg = torch.zeros([d_gab, d_gab, 1, 2], device=torch_device)
|
||||
xyg[:, :, 0, 0] = xg
|
||||
xyg[:, :, 0, 1] = yg
|
||||
|
||||
# create Gaussian hull
|
||||
gauss = torch.exp(-(xg**2 + yg**2) / 2 / sigma**2)
|
||||
gabor_corner = gauss.clone()
|
||||
|
||||
if (dir_change == 0) or (dir_change == torch.pi):
|
||||
# handle special case of straight Gabor or change by 180 deg
|
||||
|
||||
# vector orth to Gabor axis
|
||||
ev1_orth = torch.tensor(
|
||||
[math.cos(-dir1 + math.pi / 2), math.sin(-dir1 + math.pi / 2)],
|
||||
device=torch_device,
|
||||
)
|
||||
# project coords to orth vector to get distance
|
||||
legs = torch.cos(
|
||||
2
|
||||
* torch.pi
|
||||
* torch.matmul(xyg, ev1_orth.unsqueeze(1).unsqueeze(0).unsqueeze(0))
|
||||
/ lambdah
|
||||
+ phase
|
||||
)
|
||||
gabor_corner *= legs[:, :, 0, 0]
|
||||
|
||||
else:
|
||||
dir2 = dir1 + nook
|
||||
|
||||
# compute separation line between corner's legs
|
||||
ev1 = torch.tensor([math.cos(-dir1), math.sin(-dir1)], device=torch_device)
|
||||
ev2 = torch.tensor([math.cos(-dir2), math.sin(-dir2)], device=torch_device)
|
||||
v_towards_1 = (ev1 - ev2).unsqueeze(1).unsqueeze(0).unsqueeze(0)
|
||||
|
||||
# which coords belong to which leg?
|
||||
which_side = torch.matmul(xyg, v_towards_1)[:, :, 0, 0]
|
||||
towards_1y, towards_1x = torch.where(which_side > 0)
|
||||
towards_2y, towards_2x = torch.where(which_side <= 0)
|
||||
|
||||
# compute orth distance to legs
|
||||
side_sign = -1 + 2 * ((dir_change % 2 * torch.pi) > torch.pi)
|
||||
ev12 = ev1 + ev2
|
||||
v1_orth = ev12 - ev1 * torch.matmul(ev12, ev1)
|
||||
v2_orth = ev12 - ev2 * torch.matmul(ev12, ev2)
|
||||
ev1_orth = side_sign * v1_orth / torch.sqrt((v1_orth**2).sum())
|
||||
ev2_orth = side_sign * v2_orth / torch.sqrt((v2_orth**2).sum())
|
||||
|
||||
leg1 = torch.cos(
|
||||
2
|
||||
* torch.pi
|
||||
* torch.matmul(xyg, ev1_orth.unsqueeze(1).unsqueeze(0).unsqueeze(0))
|
||||
/ lambdah
|
||||
+ phase
|
||||
)
|
||||
leg2 = torch.cos(
|
||||
2
|
||||
* torch.pi
|
||||
* torch.matmul(xyg, ev2_orth.unsqueeze(1).unsqueeze(0).unsqueeze(0))
|
||||
/ lambdah
|
||||
+ phase
|
||||
)
|
||||
gabor_corner[towards_1y, towards_1x] *= leg1[towards_1y, towards_1x, 0, 0]
|
||||
gabor_corner[towards_2y, towards_2x] *= leg2[towards_2y, towards_2x, 0, 0]
|
||||
|
||||
# depending on phase, Gabor might not be normalized...
|
||||
if normalize:
|
||||
s = gabor_corner.sum()
|
||||
s0 = gauss.sum()
|
||||
gabor_corner -= s / s0 * gauss
|
||||
|
||||
return gabor_corner
|
||||
|
||||
|
||||
#
|
||||
# creates a filter bank of Gabor corners
|
||||
#
|
||||
# outputs:
|
||||
# filters: [n_source, n_change, HEIGHT, WIDTH]
|
||||
# dirs_source: [n_source]
|
||||
# dirs_change: [n_change]
|
||||
#
|
||||
def gaborner_filterbank(
|
||||
r_gab: int, # radius, size will be 2*r_gab+1
|
||||
n_source: int, # number of source orientations
|
||||
n_change: int, # number of direction changes
|
||||
lambdah: float, # wavelength of Gabor
|
||||
sigma: float, # half-width of Gabor
|
||||
phase: float, # phase of Gabor
|
||||
normalize: bool, # normalize patch to zero
|
||||
torch_device: str, # GPU or CPU...
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
kernels = torch.zeros(
|
||||
[n_source, n_change, 2 * r_gab + 1, 2 * r_gab + 1],
|
||||
device=torch_device,
|
||||
requires_grad=False,
|
||||
)
|
||||
dirs_source = 2 * torch.pi * torch.arange(n_source, device=torch_device) / n_source
|
||||
dirs_change = 2 * torch.pi * torch.arange(n_change, device=torch_device) / n_change
|
||||
|
||||
for i_source in range(n_source):
|
||||
for i_change in range(n_change):
|
||||
gabor_corner = gaborner(
|
||||
r_gab=r_gab,
|
||||
dir_source=dirs_source[i_source],
|
||||
dir_change=dirs_change[i_change],
|
||||
lambdah=lambdah,
|
||||
sigma=sigma,
|
||||
phase=phase,
|
||||
normalize=normalize,
|
||||
torch_device=torch_device,
|
||||
)
|
||||
kernels[i_source, i_change] = gabor_corner
|
||||
|
||||
# check = torch.isnan(gabor_corner).sum()
|
||||
# if check > 0:
|
||||
# print(i_source, i_change, check)
|
||||
# kernels[i_source, i_change] = 1
|
||||
|
||||
return kernels, dirs_source, dirs_change
|
||||
|
||||
|
||||
def discretize_stimuli(
|
||||
posori,
|
||||
x_range: tuple,
|
||||
y_range: tuple,
|
||||
scale_factor: float,
|
||||
r_gab_PIX: int,
|
||||
n_source: int,
|
||||
n_change: int,
|
||||
torch_device: str,
|
||||
n_phase: int = 1,
|
||||
) -> torch.Tensor:
|
||||
# check correct input size
|
||||
s = posori.shape
|
||||
assert len(s) == 2, "posori should be NDARRAY with N x 1 entries"
|
||||
assert s[1] == 1, "posori should be NDARRAY with N x 1 entries"
|
||||
|
||||
# determine size of (extended) canvas
|
||||
x_canvas_PIX = torch.tensor(
|
||||
(x_range[1] - x_range[0]) * scale_factor, device=torch_device
|
||||
).ceil()
|
||||
y_canvas_PIX = torch.tensor(
|
||||
(y_range[1] - y_range[0]) * scale_factor, device=torch_device
|
||||
).ceil()
|
||||
x_canvas_ext_PIX = int(x_canvas_PIX + 2 * r_gab_PIX)
|
||||
y_canvas_ext_PIX = int(y_canvas_PIX + 2 * r_gab_PIX)
|
||||
|
||||
# get number of contours
|
||||
n_contours = s[0]
|
||||
index_phasrcchg = []
|
||||
index_y = []
|
||||
index_x = []
|
||||
for i_contour in range(n_contours):
|
||||
x_y_src_chg = torch.asarray(posori[i_contour, 0][1:, :].copy())
|
||||
x_y_src_chg[2] += torch.pi
|
||||
|
||||
# if i_contour == 0:
|
||||
# print(x_y_src_chg[2][:3])
|
||||
|
||||
# compute integer coordinates and find all visible elements
|
||||
x = ((x_y_src_chg[0] - x_range[0]) * scale_factor + r_gab_PIX).type(torch.long)
|
||||
y = y_canvas_ext_PIX - (
|
||||
(x_y_src_chg[1] - y_range[0]) * scale_factor + r_gab_PIX
|
||||
).type(torch.long)
|
||||
i_visible = torch.where(
|
||||
(x >= 0) * (y >= 0) * (x < x_canvas_ext_PIX) * (y < y_canvas_ext_PIX)
|
||||
)[0]
|
||||
|
||||
# compute integer (changes of) directions
|
||||
i_source = (
|
||||
((((x_y_src_chg[2]) / (2 * torch.pi)) + 1 / (2 * n_source)) % 1) * n_source
|
||||
).type(torch.long)
|
||||
i_change = (
|
||||
(((x_y_src_chg[3] / (2 * torch.pi)) + 1 / (2 * n_change)) % 1) * n_change
|
||||
).type(torch.long)
|
||||
|
||||
i_phase = torch.randint(n_phase, i_visible.size())
|
||||
index_phasrcchg.append(
|
||||
(i_phase * n_source + i_source[i_visible]) * n_change + i_change[i_visible]
|
||||
)
|
||||
# index_change.append(i_change[i_visible])
|
||||
index_y.append(y[i_visible])
|
||||
index_x.append(x[i_visible])
|
||||
|
||||
return (
|
||||
index_phasrcchg,
|
||||
index_x,
|
||||
index_y,
|
||||
x_canvas_ext_PIX,
|
||||
y_canvas_ext_PIX,
|
||||
)
|
||||
|
||||
|
||||
def render_stimulus(
|
||||
kernels, index_element, index_y, index_x, y_canvas, x_canvas, torch_device
|
||||
):
|
||||
s = kernels.shape
|
||||
kx = s[-1]
|
||||
ky = s[-2]
|
||||
|
||||
stimulus = torch.zeros((y_canvas + ky - 1, x_canvas + kx - 1), device=torch_device)
|
||||
n = index_element.size()[0]
|
||||
for i in torch.arange(n, device=torch_device):
|
||||
x = index_x[i]
|
||||
y = index_y[i]
|
||||
stimulus[y : y + ky, x : x + kx] += kernels[index_element[i]]
|
||||
|
||||
return stimulus[ky - 1 : -(ky - 1), kx - 1 : -(kx - 1)]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
VERBOSE = True
|
||||
BENCH_CONVOLVE = True
|
||||
BENCH_GPU = True
|
||||
BENCH_CPU = True
|
||||
BENCH_DAVID = True
|
||||
|
||||
print("Testing contour rendering speed:")
|
||||
print("================================")
|
||||
|
||||
# load contours, multiplex coordinates to simulate a larger set of contours
|
||||
n_multiplex = 1
|
||||
mat = scipy.io.loadmat("z.mat")
|
||||
posori = np.tile(mat["z"], (n_multiplex, 1))
|
||||
n_contours = posori.shape[0]
|
||||
print(f"Processing {n_contours} contour stimuli")
|
||||
|
||||
# how many contours to render simultaneously?
|
||||
n_simultaneous = 5
|
||||
n_simultaneous_chunks, n_remaining = divmod(n_contours, n_simultaneous)
|
||||
assert n_remaining == 0, "Check parameters for simultaneous contour rendering!"
|
||||
|
||||
# repeat some times for speed testing
|
||||
n_repeat = 10
|
||||
t_dis = torch.zeros((n_repeat + 2), device=torch_device)
|
||||
t_con = torch.zeros((n_repeat + 2), device=torch_device)
|
||||
t_rsg = torch.zeros((n_repeat + 2), device=torch_device)
|
||||
t_rsc = torch.zeros((n_repeat + 2), device="cpu")
|
||||
t_rsd = torch.zeros((n_repeat + 2), device="cpu")
|
||||
|
||||
# cutout for stimuli, and gabor parameters
|
||||
x_range = [140, 940]
|
||||
y_range = [140, 940]
|
||||
d_gab = 40
|
||||
lambdah = 12
|
||||
sigma = 8
|
||||
phase = 0.0
|
||||
normalize = True
|
||||
|
||||
# scale to convert coordinates to pixel values
|
||||
scale_factor = 0.25
|
||||
|
||||
# number of directions for dictionary
|
||||
n_source = 32
|
||||
n_change = 32
|
||||
|
||||
# convert sizes to pixel units
|
||||
lambdah_PIX = lambdah * scale_factor
|
||||
sigma_PIX = sigma * scale_factor
|
||||
r_gab_PIX = int(d_gab * scale_factor / 2)
|
||||
d_gab_PIX = r_gab_PIX * 2 + 1
|
||||
|
||||
# make filterbank
|
||||
kernels, dirs_source, dirs_change = gaborner_filterbank(
|
||||
r_gab=r_gab_PIX,
|
||||
n_source=n_source,
|
||||
n_change=n_change,
|
||||
lambdah=lambdah_PIX,
|
||||
sigma=sigma_PIX,
|
||||
phase=phase,
|
||||
normalize=normalize,
|
||||
torch_device=torch_device,
|
||||
)
|
||||
kernels = kernels.reshape([1, n_source * n_change, d_gab_PIX, d_gab_PIX])
|
||||
kernels_flip = kernels.flip(dims=(-1, -2))
|
||||
|
||||
# define "network" and put to cuda
|
||||
conv = torch.nn.Conv2d(
|
||||
in_channels=n_source * n_change,
|
||||
out_channels=1,
|
||||
kernel_size=d_gab_PIX,
|
||||
stride=1,
|
||||
device=torch_device,
|
||||
)
|
||||
conv.weight.data = kernels_flip
|
||||
|
||||
print("Discretizing START!!!")
|
||||
t_dis[0] = time.perf_counter()
|
||||
for i_rep in range(n_repeat):
|
||||
# discretize
|
||||
(
|
||||
index_srcchg,
|
||||
index_x,
|
||||
index_y,
|
||||
x_canvas,
|
||||
y_canvas,
|
||||
) = discretize_stimuli(
|
||||
posori=posori,
|
||||
x_range=x_range,
|
||||
y_range=y_range,
|
||||
scale_factor=scale_factor,
|
||||
r_gab_PIX=r_gab_PIX,
|
||||
n_source=n_source,
|
||||
n_change=n_change,
|
||||
torch_device=torch_device,
|
||||
)
|
||||
t_dis[i_rep + 1] = time.perf_counter()
|
||||
t_dis[-1] = time.perf_counter()
|
||||
print("Discretizing END!!!")
|
||||
|
||||
if BENCH_CONVOLVE:
|
||||
print("Allocating!")
|
||||
stimuli = torch.zeros(
|
||||
[n_simultaneous, n_source * n_change, y_canvas, x_canvas],
|
||||
device=torch_device,
|
||||
requires_grad=False,
|
||||
)
|
||||
|
||||
print("Generation by CONVOLUTION start!")
|
||||
t_con[0] = time.perf_counter()
|
||||
for i_rep in torch.arange(n_repeat):
|
||||
for i_simultaneous_chunks in torch.arange(n_simultaneous_chunks):
|
||||
i_ofs = i_simultaneous_chunks * n_simultaneous
|
||||
|
||||
for i_sim in torch.arange(n_simultaneous):
|
||||
stimuli[
|
||||
i_sim,
|
||||
index_srcchg[i_sim + i_ofs],
|
||||
index_y[i_sim + i_ofs],
|
||||
index_x[i_sim + i_ofs],
|
||||
] = 1
|
||||
|
||||
output = conv(stimuli)
|
||||
|
||||
for i_sim in range(n_simultaneous):
|
||||
stimuli[
|
||||
i_sim,
|
||||
index_srcchg[i_sim + i_ofs],
|
||||
index_y[i_sim + i_ofs],
|
||||
index_x[i_sim + i_ofs],
|
||||
] = 0
|
||||
|
||||
t_con[i_rep + 1] = time.perf_counter()
|
||||
t_con[-1] = time.perf_counter()
|
||||
print("Generation by CONVOLUTION stop!")
|
||||
|
||||
if BENCH_GPU:
|
||||
print("Generation by GPU start!")
|
||||
output_gpu = torch.zeros(
|
||||
(
|
||||
n_contours,
|
||||
y_canvas - d_gab_PIX + 1,
|
||||
x_canvas - d_gab_PIX + 1,
|
||||
),
|
||||
device=torch_device,
|
||||
)
|
||||
t_rsg[0] = time.perf_counter()
|
||||
for i_rep in torch.arange(n_repeat):
|
||||
for i_con in torch.arange(n_contours):
|
||||
output_gpu[i_con] = render_stimulus(
|
||||
kernels=kernels[0],
|
||||
index_element=index_srcchg[i_con],
|
||||
index_y=index_y[i_con],
|
||||
index_x=index_x[i_con],
|
||||
y_canvas=y_canvas,
|
||||
x_canvas=x_canvas,
|
||||
torch_device=torch_device,
|
||||
)
|
||||
# output_gpu = torch.clip(output_gpu, -1, +1)
|
||||
|
||||
t_rsg[i_rep + 1] = time.perf_counter()
|
||||
t_rsg[-1] = time.perf_counter()
|
||||
print("Generation by GPU stop!")
|
||||
|
||||
if BENCH_CPU:
|
||||
print("Generation by CPU start!")
|
||||
output_cpu = torch.zeros(
|
||||
(
|
||||
n_contours,
|
||||
y_canvas - d_gab_PIX + 1,
|
||||
x_canvas - d_gab_PIX + 1,
|
||||
),
|
||||
device="cpu",
|
||||
)
|
||||
kernels_cpu = kernels.detach().cpu()
|
||||
t_rsc[0] = time.perf_counter()
|
||||
for i_rep in range(n_repeat):
|
||||
for i_con in range(n_contours):
|
||||
output_cpu[i_con] = render_stimulus(
|
||||
kernels=kernels_cpu[0],
|
||||
index_element=index_srcchg[i_con],
|
||||
index_y=index_y[i_con],
|
||||
index_x=index_x[i_con],
|
||||
y_canvas=y_canvas,
|
||||
x_canvas=x_canvas,
|
||||
torch_device="cpu",
|
||||
)
|
||||
# output_cpu = torch.clip(output_cpu, -1, +1)
|
||||
|
||||
t_rsc[i_rep + 1] = time.perf_counter()
|
||||
t_rsc[-1] = time.perf_counter()
|
||||
print("Generation by CPU stop!")
|
||||
|
||||
if BENCH_DAVID:
|
||||
print("Generation by DAVID start!")
|
||||
from CPPExtensions.PyTCopyCPU import TCopyCPU as render_stimulus_CPP
|
||||
|
||||
copyier = render_stimulus_CPP()
|
||||
|
||||
number_of_cpu_processes = os.cpu_count()
|
||||
output_dav_tmp = torch.zeros(
|
||||
(
|
||||
n_contours,
|
||||
y_canvas + 2 * r_gab_PIX,
|
||||
x_canvas + 2 * r_gab_PIX,
|
||||
),
|
||||
device="cpu",
|
||||
dtype=torch.float,
|
||||
)
|
||||
gabor = kernels[0].detach().cpu()
|
||||
|
||||
# Umsort!
|
||||
n_elements_total = 0
|
||||
for i_con in range(n_contours):
|
||||
n_elements_total += len(index_x[i_con])
|
||||
sparse_matrix = torch.zeros(
|
||||
(n_elements_total, 4), device="cpu", dtype=torch.int64
|
||||
)
|
||||
i_elements_total = 0
|
||||
for i_con in range(n_contours):
|
||||
n_add = len(index_x[i_con])
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 0] = i_con
|
||||
sparse_matrix[
|
||||
i_elements_total : i_elements_total + n_add, 1
|
||||
] = index_srcchg[i_con]
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 2] = index_y[
|
||||
i_con
|
||||
]
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 3] = index_x[
|
||||
i_con
|
||||
]
|
||||
i_elements_total += n_add
|
||||
assert i_elements_total == n_elements_total, "UNBEHAGEN macht sich breit!"
|
||||
|
||||
t_dav = torch.zeros((n_repeat + 2), device="cpu")
|
||||
t_dav[0] = time.perf_counter()
|
||||
for i_rep in range(n_repeat):
|
||||
output_dav_tmp.fill_(0.0)
|
||||
copyier.process(
|
||||
sparse_matrix.data_ptr(),
|
||||
int(sparse_matrix.shape[0]),
|
||||
int(sparse_matrix.shape[1]),
|
||||
gabor.data_ptr(),
|
||||
int(gabor.shape[0]),
|
||||
int(gabor.shape[1]),
|
||||
int(gabor.shape[2]),
|
||||
output_dav_tmp.data_ptr(),
|
||||
int(output_dav_tmp.shape[0]),
|
||||
int(output_dav_tmp.shape[1]),
|
||||
int(output_dav_tmp.shape[2]),
|
||||
int(number_of_cpu_processes),
|
||||
)
|
||||
output_dav = output_dav_tmp[
|
||||
:,
|
||||
d_gab_PIX - 1 : -(d_gab_PIX - 1),
|
||||
d_gab_PIX - 1 : -(d_gab_PIX - 1),
|
||||
].clone()
|
||||
t_dav[i_rep + 1] = time.perf_counter()
|
||||
t_dav[-1] = time.perf_counter()
|
||||
print("Generation by DAVID done!")
|
||||
|
||||
if VERBOSE: # show last stimulus
|
||||
if BENCH_CONVOLVE:
|
||||
plt.subplot(2, 2, 1)
|
||||
plt.imshow(output[-1, 0].detach().cpu(), cmap="gray", vmin=-1, vmax=+1)
|
||||
plt.title("convolve")
|
||||
if BENCH_GPU:
|
||||
plt.subplot(2, 2, 2)
|
||||
plt.imshow(output_gpu[-1].detach().cpu(), cmap="gray", vmin=-1, vmax=+1)
|
||||
plt.title("gpu")
|
||||
if BENCH_CPU:
|
||||
plt.subplot(2, 2, 3)
|
||||
plt.imshow(output_cpu[-1], cmap="gray", vmin=-1, vmax=+1)
|
||||
plt.title("cpu")
|
||||
if BENCH_DAVID:
|
||||
plt.subplot(2, 2, 4)
|
||||
plt.imshow(output_dav[-1], cmap="gray", vmin=-1, vmax=+1)
|
||||
plt.title("david")
|
||||
plt.show()
|
||||
|
||||
dt_discretize = t_dis.diff() / n_contours
|
||||
plt.plot(dt_discretize.detach().cpu())
|
||||
dt_convolve = t_con.diff() / n_contours
|
||||
plt.plot(dt_convolve.detach().cpu())
|
||||
dt_gpu = t_rsg.diff() / n_contours
|
||||
plt.plot(dt_gpu.detach().cpu())
|
||||
dt_cpu = t_rsc.diff() / n_contours
|
||||
plt.plot(dt_cpu.detach().cpu())
|
||||
dt_david = t_dav.diff() / n_contours
|
||||
plt.plot(dt_david.detach().cpu())
|
||||
|
||||
plt.legend(["discretize", "convolve", "gpu", "cpu", "david"])
|
||||
plt.show()
|
||||
print(
|
||||
f"Average discretize for 1k stims: {1000*dt_discretize[:-1].detach().cpu().mean()} secs."
|
||||
)
|
||||
print(
|
||||
f"Average convolve for 1k stims: {1000*dt_convolve[:-1].detach().cpu().mean()} secs."
|
||||
)
|
||||
print(f"Average gpu for 1k stims: {1000*dt_gpu[:-1].detach().cpu().mean()} secs.")
|
||||
print(f"Average cpu for 1k stims: {1000*dt_cpu[:-1].detach().cpu().mean()} secs.")
|
||||
print(
|
||||
f"Average david for 1k stims: {1000*dt_david[:-1].detach().cpu().mean()} secs."
|
||||
)
|
||||
|
||||
if BENCH_GPU and BENCH_CPU and BENCH_DAVID:
|
||||
df1 = (torch.abs(output_gpu[-1].detach().cpu() - output_cpu[-1])).mean()
|
||||
df2 = (torch.abs(output_gpu[-1].detach().cpu() - output_dav[-1])).mean()
|
||||
df3 = (torch.abs(output_dav[-1].cpu() - output_cpu[-1])).mean()
|
||||
print(f"Differences: CPU-GPU:{df1}, GPU-David:{df2}, David-CPU:{df3}")
|
||||
|
||||
# %%
|
73
RenderStimuli/meanGabors.py
Normal file
73
RenderStimuli/meanGabors.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
import torch
|
||||
import scipy
|
||||
import os
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import glob
|
||||
import logging
|
||||
|
||||
|
||||
# this code calculates the mean number of Gabor patches inside a stimulus for each class
|
||||
|
||||
logging.basicConfig(filename='AngularAvrg.txt', filemode='w', format='%(message)s', level=logging.INFO)
|
||||
|
||||
avg_avg_size: list = []
|
||||
n_contours = 0
|
||||
x_range = [140, 940]
|
||||
y_range = [140, 940]
|
||||
for i in range(10):
|
||||
path = f"/data_1/kk/StimulusGeneration/Alicorn/Angular/Ang0{i}0_n10000"
|
||||
files = glob.glob(path + os.sep + "*.mat")
|
||||
|
||||
n_files = len(files)
|
||||
print(f"Going through {n_files} contour files...")
|
||||
logging.info(f"Going through {n_files} contour files...")
|
||||
varname=f"Table_intr_crn0{i}0"
|
||||
varname_dist=f"Table_intr_crn0{i}0_dist"
|
||||
for i_file in range(n_files):
|
||||
# get path, basename, suffix...
|
||||
full = files[i_file]
|
||||
path, file = os.path.split(full)
|
||||
base, suffix = os.path.splitext(file)
|
||||
|
||||
# load file
|
||||
print(full)
|
||||
mat = scipy.io.loadmat(full)
|
||||
if "dist" in full:
|
||||
posori = mat[varname_dist]
|
||||
else:
|
||||
posori = mat[varname]
|
||||
|
||||
sec_dim_sizes = [] #[posori[i][0].shape[1] for i in range(posori.shape[0])]
|
||||
|
||||
for s in range(posori.shape[0]):
|
||||
# Extract the entry
|
||||
entry = posori[s][0]
|
||||
|
||||
# Get the x and y coordinates
|
||||
x = entry[1]
|
||||
y = entry[2]
|
||||
|
||||
# Find the indices of the coordinates that fall within the specified range
|
||||
idx = np.where((x >= x_range[0]) & (x <= x_range[1]) & (y >= y_range[0]) & (y <= y_range[1]))[0]
|
||||
|
||||
# Calculate the size of the second dimension while only considering the coordinates within the specified range
|
||||
sec_dim_size = len(idx)
|
||||
|
||||
# Append the size to the list
|
||||
sec_dim_sizes.append(sec_dim_size)
|
||||
|
||||
avg_size = np.mean(sec_dim_sizes)
|
||||
print(f"Average 2nd dim of posori: {avg_size}")
|
||||
logging.info(f"Average 2nd dim of posori: {avg_size}")
|
||||
avg_avg_size.append(avg_size)
|
||||
n_contours += posori.shape[0]
|
||||
|
||||
print(f"...overall {n_contours} contours so far.")
|
||||
logging.info(f"...overall {n_contours} contours so far.")
|
||||
|
||||
|
||||
# calculate avg number Gabors over whole condition
|
||||
overall = np.mean(avg_avg_size)
|
||||
print(f"OVERALL average 2nd dim of posori: {overall}")
|
||||
logging.info(f"OVERALL average 2nd dim of posori: {overall}")
|
269
RenderStimuli/render.py
Normal file
269
RenderStimuli/render.py
Normal file
|
@ -0,0 +1,269 @@
|
|||
# %%
|
||||
|
||||
import torch
|
||||
import time
|
||||
import scipy
|
||||
import os
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import contours
|
||||
import glob
|
||||
|
||||
USE_CEXT_FROM_DAVID = True
|
||||
if USE_CEXT_FROM_DAVID:
|
||||
# from CPPExtensions.PyTCopyCPU import TCopyCPU
|
||||
from CPPExtensions.PyTCopyCPU import TCopyCPU as render_stimulus_CPP
|
||||
|
||||
|
||||
def render_gaborfield(posori, params, verbose=False):
|
||||
scale_factor = params["scale_factor"]
|
||||
n_source = params["n_source"]
|
||||
n_change = params["n_change"]
|
||||
n_phase = params["n_phase"]
|
||||
|
||||
# convert sizes to pixel units
|
||||
lambda_PIX = params["lambda_gabor"] * scale_factor
|
||||
sigma_PIX = params["sigma_gabor"] * scale_factor
|
||||
r_gab_PIX = int(params["d_gabor"] * scale_factor / 2)
|
||||
d_gab_PIX = r_gab_PIX * 2 + 1
|
||||
|
||||
# make filterbank
|
||||
gabors = torch.zeros(
|
||||
[n_phase, n_source, n_change, d_gab_PIX, d_gab_PIX], dtype=torch.float32
|
||||
)
|
||||
for i_phase in range(n_phase):
|
||||
phase = (torch.pi * 2 * i_phase) / n_phase
|
||||
gabors[i_phase], dirs_source, dirs_change = contours.gaborner_filterbank(
|
||||
r_gab=r_gab_PIX,
|
||||
n_source=n_source,
|
||||
n_change=n_change,
|
||||
lambdah=lambda_PIX,
|
||||
sigma=sigma_PIX,
|
||||
phase=phase,
|
||||
normalize=params["normalize_gabor"],
|
||||
torch_device="cpu",
|
||||
)
|
||||
gabors = gabors.reshape([n_phase * n_source * n_change, d_gab_PIX, d_gab_PIX])
|
||||
|
||||
n_contours = posori.shape[0]
|
||||
|
||||
# discretize ALL stimuli
|
||||
if verbose:
|
||||
print("Discretizing START!!!")
|
||||
t_dis0 = time.perf_counter()
|
||||
(
|
||||
index_srcchg,
|
||||
index_x,
|
||||
index_y,
|
||||
x_canvas,
|
||||
y_canvas,
|
||||
) = contours.discretize_stimuli(
|
||||
posori=posori,
|
||||
x_range=params["x_range"],
|
||||
y_range=params["y_range"],
|
||||
scale_factor=scale_factor,
|
||||
r_gab_PIX=r_gab_PIX,
|
||||
n_source=n_source,
|
||||
n_change=n_change,
|
||||
n_phase=n_phase,
|
||||
torch_device="cpu",
|
||||
)
|
||||
t_dis1 = time.perf_counter()
|
||||
if verbose:
|
||||
print(f"Discretizing END, took {t_dis1-t_dis0} seconds.!!!")
|
||||
|
||||
if verbose:
|
||||
print("Generation START!!!")
|
||||
t0 = time.perf_counter()
|
||||
|
||||
if not USE_CEXT_FROM_DAVID:
|
||||
if verbose:
|
||||
print(" (using NUMPY...)")
|
||||
output = torch.zeros(
|
||||
(
|
||||
n_contours,
|
||||
y_canvas - d_gab_PIX + 1,
|
||||
x_canvas - d_gab_PIX + 1,
|
||||
),
|
||||
device="cpu",
|
||||
dtype=torch.float32,
|
||||
)
|
||||
kernels_cpu = gabors.detach().cpu()
|
||||
for i_con in range(n_contours):
|
||||
output[i_con] = contours.render_stimulus(
|
||||
kernels=kernels_cpu,
|
||||
index_element=index_srcchg[i_con],
|
||||
index_y=index_y[i_con],
|
||||
index_x=index_x[i_con],
|
||||
y_canvas=y_canvas,
|
||||
x_canvas=x_canvas,
|
||||
torch_device="cpu",
|
||||
)
|
||||
output = torch.clip(output, -1, +1)
|
||||
|
||||
else:
|
||||
if verbose:
|
||||
print(" (using C++...)")
|
||||
copyier = render_stimulus_CPP()
|
||||
number_of_cpu_processes = os.cpu_count()
|
||||
output_dav_tmp = torch.zeros(
|
||||
(
|
||||
n_contours,
|
||||
y_canvas + 2 * r_gab_PIX,
|
||||
x_canvas + 2 * r_gab_PIX,
|
||||
),
|
||||
device="cpu",
|
||||
dtype=torch.float32,
|
||||
)
|
||||
|
||||
# Umsort!
|
||||
n_elements_total = 0
|
||||
for i_con in range(n_contours):
|
||||
n_elements_total += len(index_x[i_con])
|
||||
sparse_matrix = torch.zeros(
|
||||
(n_elements_total, 4), device="cpu", dtype=torch.int64
|
||||
)
|
||||
i_elements_total = 0
|
||||
for i_con in range(n_contours):
|
||||
n_add = len(index_x[i_con])
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 0] = i_con
|
||||
sparse_matrix[
|
||||
i_elements_total : i_elements_total + n_add, 1
|
||||
] = index_srcchg[i_con]
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 2] = index_y[
|
||||
i_con
|
||||
]
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 3] = index_x[
|
||||
i_con
|
||||
]
|
||||
i_elements_total += n_add
|
||||
assert i_elements_total == n_elements_total, "UNBEHAGEN macht sich breit!"
|
||||
|
||||
# output_dav_tmp.fill_(0.0)
|
||||
copyier.process(
|
||||
sparse_matrix.data_ptr(),
|
||||
int(sparse_matrix.shape[0]),
|
||||
int(sparse_matrix.shape[1]),
|
||||
gabors.data_ptr(),
|
||||
int(gabors.shape[0]),
|
||||
int(gabors.shape[1]),
|
||||
int(gabors.shape[2]),
|
||||
output_dav_tmp.data_ptr(),
|
||||
int(output_dav_tmp.shape[0]),
|
||||
int(output_dav_tmp.shape[1]),
|
||||
int(output_dav_tmp.shape[2]),
|
||||
int(number_of_cpu_processes),
|
||||
)
|
||||
output = torch.clip(
|
||||
output_dav_tmp[
|
||||
:,
|
||||
d_gab_PIX - 1 : -(d_gab_PIX - 1),
|
||||
d_gab_PIX - 1 : -(d_gab_PIX - 1),
|
||||
],
|
||||
-1,
|
||||
+1,
|
||||
)
|
||||
|
||||
t1 = time.perf_counter()
|
||||
if verbose:
|
||||
print(f"Generating END, took {t1-t0} seconds.!!!")
|
||||
|
||||
if verbose:
|
||||
print("Showing first and last stimulus generated...")
|
||||
plt.imshow(output[0], cmap="gray", vmin=-1, vmax=+1)
|
||||
plt.show()
|
||||
plt.imshow(output[-1], cmap="gray", vmin=-1, vmax=+1)
|
||||
plt.show()
|
||||
print(f"Processed {n_contours} stimuli in {t1-t_dis0} seconds!")
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def render_gaborfield_frommatfiles(files, params, varname, varname_dist, altpath=None, verbose=False):
|
||||
n_total = 0
|
||||
n_files = len(files)
|
||||
print(f"Going through {n_files} contour files...")
|
||||
|
||||
for i_file in range(n_files):
|
||||
# get path, basename, suffix...
|
||||
full = files[i_file]
|
||||
path, file = os.path.split(full)
|
||||
base, suffix = os.path.splitext(file)
|
||||
|
||||
# load file
|
||||
mat = scipy.io.loadmat(full)
|
||||
# posori = mat[varname]
|
||||
if "dist" in full:
|
||||
posori = mat[varname_dist]
|
||||
else:
|
||||
posori = mat[varname]
|
||||
n_contours = posori.shape[0]
|
||||
n_total += n_contours
|
||||
print(f" ...file {file} contains {n_contours} contours.")
|
||||
|
||||
# process...
|
||||
gaborfield = render_gaborfield(posori, params=params, verbose=verbose)
|
||||
|
||||
# save
|
||||
if altpath:
|
||||
savepath = altpath
|
||||
else:
|
||||
savepath = path
|
||||
savefull = savepath + os.sep + base + "_RENDERED.npz"
|
||||
print(f" ...saving under {savefull}...")
|
||||
gaborfield = (torch.clip(gaborfield, -1, 1) * 127 + 128).type(torch.uint8)
|
||||
np.savez_compressed(savefull, gaborfield=gaborfield)
|
||||
|
||||
return n_total
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
TESTMODE = "files" # "files" or "posori"
|
||||
|
||||
# cutout for stimuli, and gabor parameters
|
||||
params = {
|
||||
"x_range": [140, 940],
|
||||
"y_range": [140, 940],
|
||||
"scale_factor": 0.25, # scale to convert coordinates to pixel values
|
||||
"d_gabor": 40,
|
||||
"lambda_gabor": 16,
|
||||
"sigma_gabor": 8,
|
||||
"n_phase": 4,
|
||||
"normalize_gabor": True,
|
||||
# number of directions for dictionary
|
||||
"n_source": 32,
|
||||
"n_change": 32,
|
||||
}
|
||||
|
||||
if TESTMODE == "files":
|
||||
num = int(9)
|
||||
path = f"/data_1/kk/StimulusGeneration/Alicorn/Coignless/Base0{num}0_n100"
|
||||
files = glob.glob(path + os.sep + "*.mat")
|
||||
|
||||
t0 = time.perf_counter()
|
||||
n_total = render_gaborfield_frommatfiles(
|
||||
files=files, params=params, varname=f"Table_base_crn0{num}0", varname_dist=f"Table_base_crn0{num}0_dist", altpath="./Output100/Coignless" #intr crn base
|
||||
)
|
||||
t1 = time.perf_counter()
|
||||
dt = t1 - t0
|
||||
print(
|
||||
f"Rendered {n_total} contours in {dt} secs, yielding {n_total/dt} contours/sec."
|
||||
)
|
||||
|
||||
if TESTMODE == "posori":
|
||||
print("Sample stimulus generation:")
|
||||
print("===========================")
|
||||
|
||||
# load contours, multiplex coordinates to simulate a larger set of contours
|
||||
n_multiplex = 500
|
||||
mat = scipy.io.loadmat("z.mat")
|
||||
posori = np.tile(mat["z"], (n_multiplex, 1))
|
||||
n_contours = posori.shape[0]
|
||||
print(f"Processing {n_contours} contour stimuli")
|
||||
|
||||
output = render_gaborfield(posori, params=params, verbose=True)
|
||||
# output8 = (torch.clip(output, -1, 1) * 127 + 128).type(torch.uint8)
|
||||
# np.savez_compressed("output8_compressed.npz", output8=output8)
|
||||
|
||||
|
||||
# %%
|
299
RenderStimuli/renderLess.py
Normal file
299
RenderStimuli/renderLess.py
Normal file
|
@ -0,0 +1,299 @@
|
|||
# %%
|
||||
|
||||
import torch
|
||||
import time
|
||||
import scipy
|
||||
import os
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import contours
|
||||
import glob
|
||||
|
||||
USE_CEXT_FROM_DAVID = False
|
||||
if USE_CEXT_FROM_DAVID:
|
||||
# from CPPExtensions.PyTCopyCPU import TCopyCPU
|
||||
from CPPExtensions.PyTCopyCPU import TCopyCPU as render_stimulus_CPP
|
||||
|
||||
|
||||
def render_gaborfield(posori, params, verbose=False):
|
||||
scale_factor = params["scale_factor"]
|
||||
n_source = params["n_source"]
|
||||
n_change = params["n_change"]
|
||||
n_phase = params["n_phase"]
|
||||
|
||||
# convert sizes to pixel units
|
||||
lambda_PIX = params["lambda_gabor"] * scale_factor
|
||||
sigma_PIX = params["sigma_gabor"] * scale_factor
|
||||
r_gab_PIX = int(params["d_gabor"] * scale_factor / 2)
|
||||
d_gab_PIX = r_gab_PIX * 2 + 1
|
||||
|
||||
# make filterbank
|
||||
gabors = torch.zeros(
|
||||
[n_phase, n_source, n_change, d_gab_PIX, d_gab_PIX], dtype=torch.float32
|
||||
)
|
||||
for i_phase in range(n_phase):
|
||||
phase = (torch.pi * 2 * i_phase) / n_phase
|
||||
gabors[i_phase], dirs_source, dirs_change = contours.gaborner_filterbank(
|
||||
r_gab=r_gab_PIX,
|
||||
n_source=n_source,
|
||||
n_change=n_change,
|
||||
lambdah=lambda_PIX,
|
||||
sigma=sigma_PIX,
|
||||
phase=phase,
|
||||
normalize=params["normalize_gabor"],
|
||||
torch_device="cpu",
|
||||
)
|
||||
gabors = gabors.reshape([n_phase * n_source * n_change, d_gab_PIX, d_gab_PIX])
|
||||
|
||||
n_contours = posori.shape[0]
|
||||
|
||||
# discretize ALL stimuli
|
||||
if verbose:
|
||||
print("Discretizing START!!!")
|
||||
t_dis0 = time.perf_counter()
|
||||
(
|
||||
index_srcchg,
|
||||
index_x,
|
||||
index_y,
|
||||
x_canvas,
|
||||
y_canvas,
|
||||
) = contours.discretize_stimuli(
|
||||
posori=posori,
|
||||
x_range=params["x_range"],
|
||||
y_range=params["y_range"],
|
||||
scale_factor=scale_factor,
|
||||
r_gab_PIX=r_gab_PIX,
|
||||
n_source=n_source,
|
||||
n_change=n_change,
|
||||
n_phase=n_phase,
|
||||
torch_device="cpu",
|
||||
)
|
||||
t_dis1 = time.perf_counter()
|
||||
if verbose:
|
||||
print(f"Discretizing END, took {t_dis1-t_dis0} seconds.!!!")
|
||||
|
||||
if verbose:
|
||||
print("Generation START!!!")
|
||||
t0 = time.perf_counter()
|
||||
|
||||
if not USE_CEXT_FROM_DAVID:
|
||||
if verbose:
|
||||
print(" (using NUMPY...)")
|
||||
output = torch.zeros(
|
||||
(
|
||||
n_contours,
|
||||
y_canvas - d_gab_PIX + 1,
|
||||
x_canvas - d_gab_PIX + 1,
|
||||
),
|
||||
device="cpu",
|
||||
dtype=torch.float32,
|
||||
)
|
||||
kernels_cpu = gabors.detach().cpu()
|
||||
for i_con in range(n_contours):
|
||||
output[i_con] = contours.render_stimulus(
|
||||
kernels=kernels_cpu,
|
||||
index_element=index_srcchg[i_con],
|
||||
index_y=index_y[i_con],
|
||||
index_x=index_x[i_con],
|
||||
y_canvas=y_canvas,
|
||||
x_canvas=x_canvas,
|
||||
torch_device="cpu",
|
||||
)
|
||||
output = torch.clip(output, -1, +1)
|
||||
|
||||
else:
|
||||
if verbose:
|
||||
print(" (using C++...)")
|
||||
copyier = render_stimulus_CPP()
|
||||
number_of_cpu_processes = os.cpu_count()
|
||||
output_dav_tmp = torch.zeros(
|
||||
(
|
||||
n_contours,
|
||||
y_canvas + 2 * r_gab_PIX,
|
||||
x_canvas + 2 * r_gab_PIX,
|
||||
),
|
||||
device="cpu",
|
||||
dtype=torch.float32,
|
||||
)
|
||||
|
||||
# Umsort!
|
||||
n_elements_total = 0
|
||||
for i_con in range(n_contours):
|
||||
n_elements_total += len(index_x[i_con])
|
||||
sparse_matrix = torch.zeros(
|
||||
(n_elements_total, 4), device="cpu", dtype=torch.int64
|
||||
)
|
||||
i_elements_total = 0
|
||||
for i_con in range(n_contours):
|
||||
n_add = len(index_x[i_con])
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 0] = i_con
|
||||
sparse_matrix[
|
||||
i_elements_total : i_elements_total + n_add, 1
|
||||
] = index_srcchg[i_con]
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 2] = index_y[
|
||||
i_con
|
||||
]
|
||||
sparse_matrix[i_elements_total : i_elements_total + n_add, 3] = index_x[
|
||||
i_con
|
||||
]
|
||||
i_elements_total += n_add
|
||||
assert i_elements_total == n_elements_total, "UNBEHAGEN macht sich breit!"
|
||||
|
||||
# output_dav_tmp.fill_(0.0)
|
||||
copyier.process(
|
||||
sparse_matrix.data_ptr(),
|
||||
int(sparse_matrix.shape[0]),
|
||||
int(sparse_matrix.shape[1]),
|
||||
gabors.data_ptr(),
|
||||
int(gabors.shape[0]),
|
||||
int(gabors.shape[1]),
|
||||
int(gabors.shape[2]),
|
||||
output_dav_tmp.data_ptr(),
|
||||
int(output_dav_tmp.shape[0]),
|
||||
int(output_dav_tmp.shape[1]),
|
||||
int(output_dav_tmp.shape[2]),
|
||||
int(number_of_cpu_processes), # type: ignore
|
||||
)
|
||||
output = torch.clip(
|
||||
output_dav_tmp[
|
||||
:,
|
||||
d_gab_PIX - 1 : -(d_gab_PIX - 1),
|
||||
d_gab_PIX - 1 : -(d_gab_PIX - 1),
|
||||
],
|
||||
-1,
|
||||
+1,
|
||||
)
|
||||
|
||||
t1 = time.perf_counter()
|
||||
if verbose:
|
||||
print(f"Generating END, took {t1-t0} seconds.!!!")
|
||||
|
||||
if verbose:
|
||||
print("Showing first and last stimulus generated...")
|
||||
plt.imshow(output[0], cmap="gray", vmin=-1, vmax=+1)
|
||||
plt.show()
|
||||
plt.imshow(output[-1], cmap="gray", vmin=-1, vmax=+1)
|
||||
plt.show()
|
||||
print(f"Processed {n_contours} stimuli in {t1-t_dis0} seconds!")
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def render_gaborfield_frommatfiles(
|
||||
files, params, varname, num_make_background, altpath=None, verbose=False
|
||||
):
|
||||
n_total = 0
|
||||
n_files = len(files)
|
||||
print(f"Going through {n_files} contour files...")
|
||||
|
||||
# how many elements are in contour path
|
||||
num_c_elems = 7 - num_make_background
|
||||
print(f"Number of contour elements: {num_c_elems}")
|
||||
|
||||
for i_file in range(n_files):
|
||||
# get path, basename, suffix...
|
||||
full = files[i_file]
|
||||
path, file = os.path.split(full)
|
||||
base, suffix = os.path.splitext(file)
|
||||
|
||||
# ... if distractor file
|
||||
if "dist" in full:
|
||||
continue
|
||||
|
||||
# load file
|
||||
mat = scipy.io.loadmat(full)
|
||||
posori = mat[varname]
|
||||
n_contours = posori.shape[0]
|
||||
n_total += n_contours
|
||||
print(f" ...file {file} contains {n_contours} contours.")
|
||||
|
||||
# adjust number of contour path elements + their angles
|
||||
ang_range = [np.pi / 3, np.pi / 4, np.pi / 2]
|
||||
ang_range = ang_range + [-x for x in ang_range]
|
||||
for i in range(posori.shape[0]):
|
||||
for b in range(num_make_background):
|
||||
# change contour elem to back-elem
|
||||
elem_idx = 6 - b
|
||||
posori[i][0][0][elem_idx] = 0
|
||||
|
||||
# add random orientation
|
||||
ang = np.random.choice(ang_range)
|
||||
posori[i][0][3][elem_idx] += ang
|
||||
|
||||
# process...
|
||||
gaborfield = render_gaborfield(posori, params=params, verbose=verbose)
|
||||
|
||||
# # plot some
|
||||
# for i in range(5):
|
||||
# plt.imshow(gaborfield[i], cmap="gray")
|
||||
# plt.show(block=True)
|
||||
|
||||
# save
|
||||
if altpath:
|
||||
savepath = altpath
|
||||
else:
|
||||
savepath = path
|
||||
savefull = (
|
||||
savepath + os.sep + base + "_" + str(num_c_elems) + "conElems_RENDERED.npz"
|
||||
)
|
||||
print(f" ...saving under {savefull}...")
|
||||
gaborfield = (torch.clip(gaborfield, -1, 1) * 127 + 128).type(torch.uint8)
|
||||
np.savez_compressed(savefull, gaborfield=gaborfield)
|
||||
|
||||
return n_total
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
TESTMODE = "files" # "files" or "posori"
|
||||
|
||||
# cutout for stimuli, and gabor parameters
|
||||
params = {
|
||||
"x_range": [140, 940],
|
||||
"y_range": [140, 940],
|
||||
"scale_factor": 0.25, # scale to convert coordinates to pixel values
|
||||
"d_gabor": 40,
|
||||
"lambda_gabor": 16,
|
||||
"sigma_gabor": 8,
|
||||
"n_phase": 4,
|
||||
"normalize_gabor": True,
|
||||
# number of directions for dictionary
|
||||
"n_source": 32,
|
||||
"n_change": 32,
|
||||
}
|
||||
|
||||
if TESTMODE == "files":
|
||||
num_make_background: int = 5
|
||||
path = "/data_1/kk/StimulusGeneration/Alicorn/Coignless/Base000_n10000/"
|
||||
files = glob.glob(path + os.sep + "*.mat")
|
||||
|
||||
t0 = time.perf_counter()
|
||||
n_total = render_gaborfield_frommatfiles(
|
||||
files=files,
|
||||
params=params,
|
||||
varname="Table_base_crn000",
|
||||
num_make_background=num_make_background,
|
||||
altpath="/home/kk/Documents/Semester4/code/RenderStimuli/OutputLess/CoignLess/",
|
||||
)
|
||||
t1 = time.perf_counter()
|
||||
dt = t1 - t0
|
||||
print(
|
||||
f"Rendered {n_total} contours in {dt} secs, yielding {n_total/dt} contours/sec."
|
||||
)
|
||||
|
||||
if TESTMODE == "posori":
|
||||
print("Sample stimulus generation:")
|
||||
print("===========================")
|
||||
|
||||
# load contours, multiplex coordinates to simulate a larger set of contours
|
||||
n_multiplex = 500
|
||||
mat = scipy.io.loadmat("z.mat")
|
||||
posori = np.tile(mat["z"], (n_multiplex, 1))
|
||||
n_contours = posori.shape[0]
|
||||
print(f"Processing {n_contours} contour stimuli")
|
||||
|
||||
output = render_gaborfield(posori, params=params, verbose=True)
|
||||
# output8 = (torch.clip(output, -1, 1) * 127 + 128).type(torch.uint8)
|
||||
# np.savez_compressed("output8_compressed.npz", output8=output8)
|
||||
|
||||
|
||||
# %%
|
24
RenderStimuli/test_readgaborfield.py
Normal file
24
RenderStimuli/test_readgaborfield.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
import numpy as np
|
||||
import time
|
||||
import matplotlib.pyplot as plt
|
||||
import os
|
||||
|
||||
file = "/home/kk/Documents/Semester4/code/RenderStimuli/Output/Coignless/base_angle_090_b001_n10000_RENDERED.npz"
|
||||
t0 = time.perf_counter()
|
||||
with np.load(file) as data:
|
||||
a = data["gaborfield"]
|
||||
t1 = time.perf_counter()
|
||||
print(t1 - t0)
|
||||
|
||||
for i in range(5):
|
||||
plt.imshow(a[i], cmap="gray", vmin=0, vmax=255)
|
||||
plt.savefig(
|
||||
os.path.join(
|
||||
"./poster",
|
||||
"classic_90_stimulus.pdf",
|
||||
),
|
||||
dpi=300,
|
||||
bbox_inches="tight",
|
||||
)
|
||||
plt.show(block=True)
|
||||
|
BIN
RenderStimuli/z.mat
Normal file
BIN
RenderStimuli/z.mat
Normal file
Binary file not shown.
Loading…
Reference in a new issue