diff --git a/network/Makefile b/network/Makefile new file mode 100644 index 0000000..b573d4e --- /dev/null +++ b/network/Makefile @@ -0,0 +1,20 @@ +include .env +export + +all: + cd h_dynamic_cnn_cpu_cpp && $(MAKE) all + cd h_dynamic_cnn_gpu_cpp && $(MAKE) all + cd spike_generation_cpu_cpp && $(MAKE) all + cd spike_generation_gpu_cpp && $(MAKE) all + cd multiplication_approximation_cpu_cpp && $(MAKE) all + cd multiplication_approximation_gpu_cpp && $(MAKE) all + $(PYBIN)python3 pybind11_auto_pyi.py + +clean: + cd h_dynamic_cnn_cpu_cpp && $(MAKE) clean + cd h_dynamic_cnn_gpu_cpp && $(MAKE) clean + cd spike_generation_cpu_cpp && $(MAKE) clean + cd spike_generation_gpu_cpp && $(MAKE) clean + cd multiplication_approximation_cpu_cpp && $(MAKE) clean + cd multiplication_approximation_gpu_cpp && $(MAKE) clean + diff --git a/network/h_dynamic_cnn_cpu_cpp/HDynamicCNNCPU.cpp b/network/h_dynamic_cnn_cpu_cpp/HDynamicCNNCPU.cpp new file mode 100644 index 0000000..3722929 --- /dev/null +++ b/network/h_dynamic_cnn_cpu_cpp/HDynamicCNNCPU.cpp @@ -0,0 +1,326 @@ +#include "HDynamicCNNCPU.h" + +#include +#include +#include + +#include +#include +#include + + +HDynamicCNNCPU::HDynamicCNNCPU() +{ + +}; + +HDynamicCNNCPU::~HDynamicCNNCPU() +{ + +}; + +void HDynamicCNNCPU::entrypoint( + int64_t h_pointer_addr, + int64_t h_dim_0, + int64_t h_dim_1, + int64_t h_dim_2, + int64_t h_dim_3, + int64_t epsilon_xy_pointer_addr, + int64_t epsilon_xy_dim_0, + int64_t epsilon_xy_dim_1, + int64_t epsilon_xy_dim_2, + int64_t epsilon_t_pointer_addr, + int64_t epsilon_t_dim_0, + int64_t weights_pointer_addr, + int64_t weights_dim_0, + int64_t weights_dim_1, + int64_t input_pointer_addr, + int64_t input_dim_0, + int64_t input_dim_1, + int64_t input_dim_2, + int64_t input_dim_3, + int64_t init_vector_pointer_addr, + int64_t init_vector_dim_0, + int64_t number_of_processes, + float forgetting_offset, + int64_t gpu_tuning_factor) +{ + + size_t number_of_pattern = input_dim_0; + + size_t h_dim = init_vector_dim_0; + float* h_init_ptr = (float*)init_vector_pointer_addr; + assert((h_init_ptr != nullptr)); + assert((h_dim > 0)); + + float* h_pointer = (float*)h_pointer_addr; + assert((h_pointer != nullptr)); + assert((h_dim_0 > 0)); + assert((h_dim_1 > 0)); + assert((h_dim_2 > 0)); + assert((h_dim_3 > 0)); + + size_t h_dim_c0 = h_dim_1 * h_dim_2 * h_dim_3; + size_t h_dim_c1 = h_dim_2 * h_dim_3; + size_t h_dim_c2 = h_dim_3; + + float* epsilon_xy_pointer = (float*)epsilon_xy_pointer_addr; + assert((epsilon_xy_pointer != nullptr)); + assert((epsilon_xy_dim_0 > 0)); + assert((epsilon_xy_dim_1 > 0)); + + size_t epsilon_xy_dim_c0 = epsilon_xy_dim_2 * epsilon_xy_dim_1; + size_t epsilon_xy_dim_c1 = epsilon_xy_dim_2; + + float* epsilon_t_pointer = (float*)epsilon_t_pointer_addr; + assert((epsilon_t_pointer != nullptr)); + assert((epsilon_t_dim_0 > 0)); + + float* weights_pointer = (float*)weights_pointer_addr; + assert((weights_pointer != nullptr)); + assert((weights_dim_0 > 0)); + assert((weights_dim_1 > 0)); + + size_t weights_dim_c0 = weights_dim_1; + + int64_t* input_pointer = (int64_t*)input_pointer_addr; + assert((input_pointer != nullptr)); + assert((input_dim_0 > 0)); + assert((input_dim_1 > 0)); + assert((input_dim_2 > 0)); + assert((input_dim_3 > 0)); + + size_t input_dim_c0 = input_dim_1 * input_dim_2 * input_dim_3; + size_t input_dim_c1 = input_dim_2 * input_dim_3; + size_t input_dim_c2 = input_dim_3; + + assert((h_dim == weights_dim_1)); + size_t number_of_spikes = input_dim_1; + size_t dim_x = input_dim_2; + size_t dim_y = input_dim_3; + + float forgetting_offset_local = forgetting_offset / static_cast(h_dim); + + + // -------------------- + assert((number_of_processes > 0)); + omp_set_num_threads(number_of_processes); + +#pragma omp parallel for + for (size_t pattern_id = 0; pattern_id < number_of_pattern; pattern_id++) + { + update( + h_init_ptr, + h_pointer, + h_dim_c0, + h_dim_c1, + h_dim_c2, + h_dim, + epsilon_xy_pointer, + epsilon_xy_dim_c0, + epsilon_xy_dim_c1, + epsilon_t_pointer, + weights_pointer, + weights_dim_c0, + input_pointer, + input_dim_c0, + input_dim_c1, + input_dim_c2, + number_of_spikes, + dim_x, + dim_y, + forgetting_offset, + forgetting_offset_local, + pattern_id); + } + + return; +}; + + +void HDynamicCNNCPU::update( + float* h_init_ptr, + float* h_pointer, + size_t h_dim_c0, + size_t h_dim_c1, + size_t h_dim_c2, + size_t h_dim, + float* epsilon_xy_pointer, + size_t epsilon_xy_dim_c0, + size_t epsilon_xy_dim_c1, + float* epsilon_t_pointer, + float* weights_pointer, + size_t weights_dim_c0, + int64_t* input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + size_t number_of_spikes, + size_t dim_x, + size_t dim_y, + float forgetting_offset, + float forgetting_offset_local, + size_t pattern_id) +{ + + float* h_ptr; + float* epsilon_xy_ptr; + int64_t* input_ptr; + + for (size_t counter_x = 0; counter_x < dim_x; counter_x++) + { + for (size_t counter_y = 0; counter_y < dim_y; counter_y++) + { + epsilon_xy_ptr = epsilon_xy_pointer + + counter_x * epsilon_xy_dim_c1 + counter_y; + + h_ptr = h_pointer + + pattern_id * h_dim_c0 + counter_x * h_dim_c2 + counter_y; + + input_ptr = input_pointer + + pattern_id * input_dim_c0 + counter_x * input_dim_c2 + counter_y; + + update_one_ip( + h_init_ptr, + h_ptr, + h_dim_c1, + h_dim, + weights_pointer, + weights_dim_c0, + input_ptr, + input_dim_c1, + epsilon_xy_ptr, + epsilon_xy_dim_c0, + epsilon_t_pointer, + number_of_spikes, + forgetting_offset, + forgetting_offset_local); + + } + } + + return; +}; + +void HDynamicCNNCPU::update_one_ip( + float* h_init_ptr, + float* h_pointer, + size_t h_dim_c1, + size_t h_dim, + float* weights_pointer, + size_t weights_dim_c0, + int64_t* input_pointer, + size_t input_dim_c1, + float* epsilon_xy_pointer, + size_t epsilon_xy_dim_c0, + float* epsilon_t_pointer, + size_t number_of_spikes, + float forgetting_offset, + float forgetting_offset_local) +{ + + float* h_temp = new float[h_dim]; + float* h_subsegment = new float[h_dim]; + + memcpy(h_subsegment, h_init_ptr, sizeof(float) * h_dim); + + float h_temp_sum; + float temp_value; + + float epsilon_subsegment; + float epsilon_scale = 1.0; + + int64_t* spike; + float* w_ptr; + + for (size_t counter_spike = 0; counter_spike < number_of_spikes; counter_spike++) + { + if (epsilon_scale > 1E10) + { + temp_value = 1.0 / epsilon_scale; + +#pragma omp simd + for (size_t counter = 0; counter < h_dim; counter++) + { + h_subsegment[counter] *= temp_value; + } + + epsilon_scale = 1.0; + } + + spike = input_pointer + counter_spike * input_dim_c1; + + if (*spike >= 0) + { + epsilon_subsegment = + epsilon_xy_pointer[*spike * epsilon_xy_dim_c0] * epsilon_t_pointer[counter_spike]; + + w_ptr = weights_pointer + *spike * weights_dim_c0; + + memcpy(h_temp, h_subsegment, sizeof(float) * h_dim); + +#pragma omp simd + for (size_t counter = 0; counter < h_dim; counter++) + { + h_temp[counter] *= w_ptr[counter]; + } + + h_temp_sum = 0.0; +#pragma omp simd reduction(+ : h_temp_sum) + for (size_t counter = 0; counter < h_dim; counter++) + { + h_temp_sum += h_temp[counter]; + } + + if (h_temp_sum > 1E-10) + { + temp_value = epsilon_scale * epsilon_subsegment / h_temp_sum; + +#pragma omp simd + for (size_t counter = 0; counter < h_dim; counter++) + { + h_temp[counter] *= temp_value; + } + +#pragma omp simd + for (size_t counter = 0; counter < h_dim; counter++) + { + h_subsegment[counter] += h_temp[counter]; + } + + if (forgetting_offset_local > 0.0) + { + temp_value = + epsilon_scale * epsilon_subsegment * forgetting_offset_local; + +#pragma omp simd + for (size_t counter = 0; counter < h_dim; counter++) + { + h_subsegment[counter] += temp_value; + } + + epsilon_scale *= + 1.0 + epsilon_subsegment * (1.0 + forgetting_offset); + } + else + { + epsilon_scale *= 1.0 + epsilon_subsegment * 1.0; + } + } + } + } + + temp_value = 1.0 / epsilon_scale; +#pragma omp simd + for (size_t counter = 0; counter < h_dim; counter++) + { + h_pointer[counter * h_dim_c1] = + h_subsegment[counter] * temp_value; + } + + delete[] h_temp; + delete[] h_subsegment; + + return; +}; + diff --git a/network/h_dynamic_cnn_cpu_cpp/HDynamicCNNCPU.h b/network/h_dynamic_cnn_cpu_cpp/HDynamicCNNCPU.h new file mode 100644 index 0000000..e041e0b --- /dev/null +++ b/network/h_dynamic_cnn_cpu_cpp/HDynamicCNNCPU.h @@ -0,0 +1,85 @@ +#ifndef HDYNAMICCNNCPU +#define HDYNAMICCNNCPU + +#include + +#include +#include + +class HDynamicCNNCPU +{ +public: + HDynamicCNNCPU(); + ~HDynamicCNNCPU(); + + void entrypoint( + int64_t h_pointer_addr, + int64_t h_dim_0, + int64_t h_dim_1, + int64_t h_dim_2, + int64_t h_dim_3, + int64_t epsilon_xy_pointer_addr, + int64_t epsilon_xy_dim_0, + int64_t epsilon_xy_dim_1, + int64_t epsilon_xy_dim_2, + int64_t epsilon_t_pointer_addr, + int64_t epsilon_t_dim_0, + int64_t weights_pointer_addr, + int64_t weights_dim_0, + int64_t weights_dim_1, + int64_t input_pointer_addr, + int64_t input_dim_0, + int64_t input_dim_1, + int64_t input_dim_2, + int64_t input_dim_3, + int64_t init_vector_pointer_addr, + int64_t init_vector_dim_0, + int64_t number_of_processes, + float forgetting_offset, + int64_t gpu_tuning_factor); + +private: + + void update( + float* h_init_ptr, + float* h_pointer, + size_t h_dim_c0, + size_t h_dim_c1, + size_t h_dim_c2, + size_t h_dim, + float* epsilon_xy_pointer, + size_t epsilon_xy_dim_c0, + size_t epsilon_xy_dim_c1, + float* epsilon_t_pointer, + float* weights_pointer, + size_t weights_dim_c0, + int64_t* input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + size_t number_of_spikes, + size_t dim_x, + size_t dim_y, + float forgetting_offset, + float forgetting_offset_local, + size_t pattern_id); + + void update_one_ip( + float* h_init_ptr, + float* h_pointer, + size_t h_dim_c1, + size_t h_dim, + float* weights_pointer, + size_t weights_dim_c0, + int64_t* input_pointer, + size_t input_dim_c1, + float* epsilon_xy_pointer, + size_t epsilon_xy_dim_c0, + float* epsilon_t_pointer, + size_t number_of_spikes, + float forgetting_offset, + float forgetting_offset_local); + +}; + +#endif /* HDYNAMICCNNCPU */ diff --git a/network/h_dynamic_cnn_cpu_cpp/Makefile b/network/h_dynamic_cnn_cpu_cpp/Makefile new file mode 100644 index 0000000..47f439b --- /dev/null +++ b/network/h_dynamic_cnn_cpu_cpp/Makefile @@ -0,0 +1,33 @@ +include ../.env +export + +name = HDynamicCNN +type = CPU + +PYPOSTFIX := $(shell $(PYBIN)python3-config --extension-suffix) +PYBIND11INCLUDE := $(shell $(PYBIN)python3 -m pybind11 --includes) +PARAMETERS_O = $(PARAMETERS_O_CPU) $(PYBIND11INCLUDE) +PARAMETERS_Linker = $(PARAMETERS_Linker_CPU) + +so_file = Py$(name)$(type)$(PYPOSTFIX) +pyi_file = Py$(name)$(type).pyi +all: ../$(so_file) + +$(O_DIRS)$(name)$(type).o: $(name)$(type).h $(name)$(type).cpp + mkdir -p $(O_DIRS) + $(CC) $(PARAMETERS_O) -c $(name)$(type).cpp -o $(O_DIRS)$(name)$(type).o + +$(O_DIRS)Py$(name)$(type).o: $(name)$(type).h Py$(name)$(type).cpp + mkdir -p $(O_DIRS) + $(CC) $(PARAMETERS_O) -c Py$(name)$(type).cpp -o $(O_DIRS)Py$(name)$(type).o + +../$(so_file): $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o + $(CC) $(PARAMETERS_Linker) -o ../$(so_file) $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o + + +####################### +clean: + rm -rf $(O_DIRS) + rm -f ../$(so_file) + rm -f ../$(pyi_file) + diff --git a/network/h_dynamic_cnn_cpu_cpp/PyHDynamicCNNCPU.cpp b/network/h_dynamic_cnn_cpu_cpp/PyHDynamicCNNCPU.cpp new file mode 100644 index 0000000..38f5f72 --- /dev/null +++ b/network/h_dynamic_cnn_cpu_cpp/PyHDynamicCNNCPU.cpp @@ -0,0 +1,14 @@ +#include + +#include "HDynamicCNNCPU.h" + +namespace py = pybind11; + +PYBIND11_MODULE(PyHDynamicCNNCPU, m) +{ + m.doc() = "HDynamicCNNCPU Module"; + py::class_(m, "HDynamicCNNCPU") + .def(py::init<>()) + .def("update", + &HDynamicCNNCPU::entrypoint); +} \ No newline at end of file diff --git a/network/h_dynamic_cnn_gpu_cpp/HDynamicCNNGPU.cu b/network/h_dynamic_cnn_gpu_cpp/HDynamicCNNGPU.cu new file mode 100644 index 0000000..4237406 --- /dev/null +++ b/network/h_dynamic_cnn_gpu_cpp/HDynamicCNNGPU.cu @@ -0,0 +1,472 @@ +#include "HDynamicCNNGPU.h" + +#include +#include +#include + +#include +#include +#include + + +HDynamicCNNGPU::HDynamicCNNGPU() +{ + +}; + +HDynamicCNNGPU::~HDynamicCNNGPU() +{ + +}; + +void HDynamicCNNGPU::entrypoint( + int64_t h_pointer_addr, + int64_t h_dim_0, + int64_t h_dim_1, + int64_t h_dim_2, + int64_t h_dim_3, + int64_t epsilon_xy_pointer_addr, + int64_t epsilon_xy_dim_0, + int64_t epsilon_xy_dim_1, + int64_t epsilon_xy_dim_2, + int64_t epsilon_t_pointer_addr, + int64_t epsilon_t_dim_0, + int64_t weights_pointer_addr, + int64_t weights_dim_0, + int64_t weights_dim_1, + int64_t input_pointer_addr, + int64_t input_dim_0, + int64_t input_dim_1, + int64_t input_dim_2, + int64_t input_dim_3, + int64_t init_vector_pointer_addr, + int64_t init_vector_dim_0, + int64_t number_of_processes, + float forgetting_offset, + int64_t gpu_tuning_factor) +{ + + size_t number_of_pattern = input_dim_0; + + size_t h_dim = init_vector_dim_0; + float* h_init_ptr = (float*)init_vector_pointer_addr; + assert((h_init_ptr != nullptr)); + assert((h_dim > 0)); + + float* h_pointer = (float*)h_pointer_addr; + assert((h_pointer != nullptr)); + assert((h_dim_0 > 0)); + assert((h_dim_1 > 0)); + assert((h_dim_2 > 0)); + assert((h_dim_3 > 0)); + + size_t h_dim_c0 = h_dim_1 * h_dim_2 * h_dim_3; + size_t h_dim_c1 = h_dim_2 * h_dim_3; + size_t h_dim_c2 = h_dim_3; + + float* epsilon_xy_pointer = (float*)epsilon_xy_pointer_addr; + assert((epsilon_xy_pointer != nullptr)); + assert((epsilon_xy_dim_0 > 0)); + assert((epsilon_xy_dim_1 > 0)); + + size_t epsilon_xy_dim_c0 = epsilon_xy_dim_2 * epsilon_xy_dim_1; + size_t epsilon_xy_dim_c1 = epsilon_xy_dim_2; + + float* epsilon_t_pointer = (float*)epsilon_t_pointer_addr; + assert((epsilon_t_pointer != nullptr)); + assert((epsilon_t_dim_0 > 0)); + + float* weights_pointer = (float*)weights_pointer_addr; + assert((weights_pointer != nullptr)); + assert((weights_dim_0 > 0)); + assert((weights_dim_1 > 0)); + + size_t weights_dim_c0 = weights_dim_1; + + int64_t* input_pointer = (int64_t*)input_pointer_addr; + assert((input_pointer != nullptr)); + assert((input_dim_0 > 0)); + assert((input_dim_1 > 0)); + assert((input_dim_2 > 0)); + assert((input_dim_3 > 0)); + + size_t input_dim_c0 = input_dim_1 * input_dim_2 * input_dim_3; + size_t input_dim_c1 = input_dim_2 * input_dim_3; + size_t input_dim_c2 = input_dim_3; + + assert((h_dim == weights_dim_1)); + size_t number_of_spikes = input_dim_1; + size_t dim_x = input_dim_2; + size_t dim_y = input_dim_3; + + float forgetting_offset_local = forgetting_offset / static_cast(h_dim); + + // -------------------- + assert ((number_of_processes <= 0)); + + gpu_update( + h_init_ptr, + h_pointer, + h_dim_c0, + h_dim_c1, + h_dim_c2, + h_dim, + epsilon_xy_pointer, + epsilon_xy_dim_c0, + epsilon_xy_dim_c1, + epsilon_t_pointer, + weights_pointer, + weights_dim_c0, + input_pointer, + input_dim_c0, + input_dim_c1, + input_dim_c2, + number_of_spikes, + dim_x, + dim_y, + forgetting_offset, + forgetting_offset_local, + number_of_pattern, + gpu_tuning_factor); + + return; +}; + +__device__ void gpu_update_one_ip( + float* __restrict__ h_init_ptr, + float* __restrict__ h_pointer, + size_t h_dim_c1, + size_t h_dim, + float* __restrict__ weights_pointer, + size_t weights_dim_c0, + int64_t* input_pointer, + size_t input_dim_c1, + float* __restrict__ epsilon_xy_pointer, + size_t epsilon_xy_dim_c0, + float* __restrict__ epsilon_t_pointer, + size_t number_of_spikes, + float forgetting_offset, + float forgetting_offset_local, + float* __restrict__ h_temp, + float* __restrict__ h_subsegment +) +{ + + float h_temp_sum; + float temp_value; + + float epsilon_subsegment; + float epsilon_scale = 1.0; + + int64_t* spike; + float* w_ptr; + + // float* h_temp = new float[h_dim]; + // float* h_subsegment = new float[h_dim]; + + // Initialize the sub-segement + for (size_t counter = 0; counter < h_dim; counter++) + { + h_subsegment[counter] = h_init_ptr[counter]; + } + + for (size_t counter_spike = 0; counter_spike < number_of_spikes; counter_spike++) + { + if (epsilon_scale > 1E10) + { + temp_value = 1.0 / epsilon_scale; + + for (size_t counter = 0; counter < h_dim; counter++) + { + h_subsegment[counter] *= temp_value; + } + + epsilon_scale = 1.0; + } + + spike = input_pointer + counter_spike * input_dim_c1; + + if (*spike >= 0) + { + epsilon_subsegment = + epsilon_xy_pointer[*spike *epsilon_xy_dim_c0] * epsilon_t_pointer[counter_spike]; + + w_ptr = weights_pointer + *spike * weights_dim_c0; + + for (size_t counter = 0; counter < h_dim; counter++) + { + h_temp[counter] = h_subsegment[counter] * w_ptr[counter]; + } + + h_temp_sum = 0.0; + + for (size_t counter = 0; counter < h_dim; counter++) + { + h_temp_sum += h_temp[counter]; + } + + if (h_temp_sum > 1E-10) + { + temp_value = epsilon_scale * epsilon_subsegment / h_temp_sum; + + for (size_t counter = 0; counter < h_dim; counter++) + { + h_temp[counter] *= temp_value; + } + + for (size_t counter = 0; counter < h_dim; counter++) + { + h_subsegment[counter] += h_temp[counter]; + } + + if (forgetting_offset_local > 0.0) + { + temp_value = + epsilon_scale * epsilon_subsegment * forgetting_offset_local; + + for (size_t counter = 0; counter < h_dim; counter++) + { + h_subsegment[counter] += temp_value; + } + + epsilon_scale *= + 1.0 + epsilon_subsegment * (1.0 + forgetting_offset); + } + else + { + epsilon_scale *= 1.0 + epsilon_subsegment * 1.0; + } + } + } + } + + temp_value = 1.0 / epsilon_scale; + + for (size_t counter = 0; counter < h_dim; counter++) + { + h_pointer[counter * h_dim_c1] = + h_subsegment[counter] * temp_value; + } + + // delete[] h_temp; + // delete[] h_subsegment; + + return; +}; + +__global__ void kernel_spike_generation( + float* __restrict__ h_init_ptr, + float* __restrict__ h_pointer, + size_t h_dim_c0, + size_t h_dim_c1, + size_t h_dim_c2, + size_t h_dim, + float* __restrict__ weights_pointer, + size_t weights_dim_c0, + int64_t* __restrict__ input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + float* __restrict__ epsilon_xy_pointer, + size_t epsilon_xy_dim_c0, + size_t epsilon_xy_dim_c1, + float* __restrict__ epsilon_t_pointer, + size_t number_of_spikes, + float forgetting_offset, + float forgetting_offset_local, + size_t dim_x, + size_t dim_y, + size_t dim_xy, + size_t max_threadable_tasks, + float* __restrict__ temp_memory_a, + float* __restrict__ temp_memory_b +) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + + if (idx < max_threadable_tasks) + { + float* h_ptr; + float* epsilon_xy_ptr; + int64_t* input_ptr; + + float* temp_memory_ptr_a = temp_memory_a + idx * h_dim; + float* temp_memory_ptr_b = temp_memory_b + idx * h_dim; + + // int pattern_id = idx; + int pattern_id = idx / dim_xy; + int position_xy = idx - (pattern_id * dim_xy); + + // size_t position_x = blockIdx.y; + // size_t position_y = blockIdx.z; + size_t position_x = position_xy / dim_y; + size_t position_y = position_xy - (position_x * dim_y); + + epsilon_xy_ptr = epsilon_xy_pointer + + position_x * epsilon_xy_dim_c1 + position_y; + + h_ptr = h_pointer + + pattern_id * h_dim_c0 + position_x * h_dim_c2 + position_y; + + input_ptr = input_pointer + + pattern_id * input_dim_c0 + position_x * input_dim_c2 + position_y; + + gpu_update_one_ip( + h_init_ptr, + h_ptr, + h_dim_c1, + h_dim, + weights_pointer, + weights_dim_c0, + input_ptr, + input_dim_c1, + epsilon_xy_ptr, + epsilon_xy_dim_c0, + epsilon_t_pointer, + number_of_spikes, + forgetting_offset, + forgetting_offset_local, + temp_memory_ptr_a, + temp_memory_ptr_b + ); + + } + +}; + +// Let's face it... We need a better way to paralelize it... +void HDynamicCNNGPU::gpu_update( + float* h_init_ptr, + float* h_pointer, + size_t h_dim_c0, + size_t h_dim_c1, + size_t h_dim_c2, + size_t h_dim, + float* epsilon_xy_pointer, + size_t epsilon_xy_dim_c0, + size_t epsilon_xy_dim_c1, + float* epsilon_t_pointer, + float* weights_pointer, + size_t weights_dim_c0, + int64_t* input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + size_t number_of_spikes, + size_t dim_x, + size_t dim_y, + float forgetting_offset, + float forgetting_offset_local, + size_t number_of_pattern, + size_t gpu_tuning_factor) +{ + + cudaError_t status; + assert((dim_x < 65535)); + assert((dim_y < 65535)); + + // ////////////////////////////////////// + // Calculate the distribution on the GPU + // ////////////////////////////////////// + + int min_grid_size; + int block_size; + int grid_size; + + size_t dynamic_s_mem_size = 0; + size_t max_threadable_tasks = number_of_pattern * dim_x * dim_y; + + // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html?highlight=blocksize#occupancy-calculator + status = cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, + (void*)kernel_spike_generation, + dynamic_s_mem_size, max_threadable_tasks); + assert((status == cudaSuccess)); + + // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications + // Maximum dimensionality of grid of thread blocks: 3 + // Maximum x -dimension of a grid of thread blocks: (2^31)-1 + // Maximum y- or z-dimension of a grid of thread blocks: 65535 + + // Reduce the automatic block size with our guess + if ((gpu_tuning_factor > 0) && (gpu_tuning_factor < block_size)) + { + block_size = int(gpu_tuning_factor); + } + // Round up according to array size + // (I will separate x and y into other grid dimentsions soon) + // grid_size = (number_of_pattern + block_size - 1) / block_size; + grid_size = (max_threadable_tasks + block_size - 1) / block_size; + + float* temp_memory_a = nullptr; + status = cudaMalloc((void**)&temp_memory_a, h_dim * max_threadable_tasks * sizeof(float)); + assert((status == cudaSuccess)); + + float* temp_memory_b = nullptr; + status = cudaMalloc((void**)&temp_memory_b, h_dim * max_threadable_tasks * sizeof(float)); + assert((status == cudaSuccess)); + + + //kernel_spike_generation<<>>( + kernel_spike_generation<<>>( + h_init_ptr, + h_pointer, + h_dim_c0, + h_dim_c1, + h_dim_c2, + h_dim, + weights_pointer, + weights_dim_c0, + input_pointer, + input_dim_c0, + input_dim_c1, + input_dim_c2, + epsilon_xy_pointer, + epsilon_xy_dim_c0, + epsilon_xy_dim_c1, + epsilon_t_pointer, + number_of_spikes, + forgetting_offset, + forgetting_offset_local, + dim_x, + dim_y, + (dim_x * dim_y), + //number_of_pattern + max_threadable_tasks, + temp_memory_a, + temp_memory_b + ); + + status = cudaDeviceSynchronize(); + assert((status == cudaSuccess)); + + status = cudaFree(temp_memory_a); + assert((status == cudaSuccess)); + + status = cudaFree(temp_memory_b); + assert((status == cudaSuccess)); + + return; +}; + + +void HDynamicCNNGPU::gpu_occupancy_export( + size_t dim_x, + size_t dim_y, + size_t number_of_pattern, + size_t h_dim, + int64_t setting_memory_addr, + size_t setting_dim_0, + size_t setting_dim_1) +{ + return; +}; + +void HDynamicCNNGPU::gpu_occupancy_import( + int64_t setting_memory_addr, + size_t setting_dim_0, + size_t setting_dim_1 +) +{ + return; +}; \ No newline at end of file diff --git a/network/h_dynamic_cnn_gpu_cpp/HDynamicCNNGPU.h b/network/h_dynamic_cnn_gpu_cpp/HDynamicCNNGPU.h new file mode 100644 index 0000000..235bad4 --- /dev/null +++ b/network/h_dynamic_cnn_gpu_cpp/HDynamicCNNGPU.h @@ -0,0 +1,84 @@ +#ifndef HDYNAMICCNNGPU +#define HDYNAMICCNNGPU + +#include + +#include +#include + +class HDynamicCNNGPU +{ +public: + HDynamicCNNGPU(); + ~HDynamicCNNGPU(); + + void entrypoint( + int64_t h_pointer_addr, + int64_t h_dim_0, + int64_t h_dim_1, + int64_t h_dim_2, + int64_t h_dim_3, + int64_t epsilon_xy_pointer_addr, + int64_t epsilon_xy_dim_0, + int64_t epsilon_xy_dim_1, + int64_t epsilon_xy_dim_2, + int64_t epsilon_t_pointer_addr, + int64_t epsilon_t_dim_0, + int64_t weights_pointer_addr, + int64_t weights_dim_0, + int64_t weights_dim_1, + int64_t input_pointer_addr, + int64_t input_dim_0, + int64_t input_dim_1, + int64_t input_dim_2, + int64_t input_dim_3, + int64_t init_vector_pointer_addr, + int64_t init_vector_dim_0, + int64_t number_of_processes, + float forgetting_offset, + int64_t gpu_tuning_factor); + + void gpu_occupancy_export( + size_t dim_x, + size_t dim_y, + size_t number_of_pattern, + size_t h_dim, + int64_t setting_memory_addr, + size_t setting_dim_0, + size_t setting_dim_1); + + void gpu_occupancy_import( + int64_t setting_memory_addr, + size_t setting_dim_0, + size_t setting_dim_1); + +private: + + void gpu_update( + float* h_init_ptr, + float* h_pointer, + size_t h_dim_c0, + size_t h_dim_c1, + size_t h_dim_c2, + size_t h_dim, + float* epsilon_xy_pointer, + size_t epsilon_xy_dim_c0, + size_t epsilon_xy_dim_c1, + float* epsilon_t_pointer, + float* weights_pointer, + size_t weights_dim_c0, + int64_t* input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + size_t number_of_spikes, + size_t dim_x, + size_t dim_y, + float forgetting_offset, + float forgetting_offset_local, + size_t number_of_pattern, + size_t gpu_tuning_factor); + +}; + +#endif /* HDYNAMICCNNGPU */ diff --git a/network/h_dynamic_cnn_gpu_cpp/Makefile b/network/h_dynamic_cnn_gpu_cpp/Makefile new file mode 100644 index 0000000..acdc799 --- /dev/null +++ b/network/h_dynamic_cnn_gpu_cpp/Makefile @@ -0,0 +1,33 @@ +include ../.env +export + +name = HDynamicCNN +type = GPU + +PYPOSTFIX := $(shell $(PYBIN)python3-config --extension-suffix) +PYBIND11INCLUDE := $(shell $(PYBIN)python3 -m pybind11 --includes) +PARAMETERS_O = $(PARAMETERS_O_GPU) $(PYBIND11INCLUDE) +PARAMETERS_Linker = $(PARAMETERS_Linker_GPU) + +so_file = Py$(name)$(type)$(PYPOSTFIX) +pyi_file = Py$(name)$(type).pyi +all: ../$(so_file) + +$(O_DIRS)$(name)$(type).o: $(name)$(type).h $(name)$(type).cu + mkdir -p $(O_DIRS) + $(NVCC) $(PARAMETERS_O) -c $(name)$(type).cu -o $(O_DIRS)$(name)$(type).o + +$(O_DIRS)Py$(name)$(type).o: $(name)$(type).h Py$(name)$(type).cpp + mkdir -p $(O_DIRS) + $(NVCC) $(PARAMETERS_O) -c Py$(name)$(type).cpp -o $(O_DIRS)Py$(name)$(type).o + +../$(so_file): $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o + $(NVCC) $(PARAMETERS_Linker) -o ../$(so_file) $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o + + +####################### +clean: + rm -rf $(O_DIRS) + rm -f ../$(so_file) + rm -f ../$(pyi_file) + diff --git a/network/h_dynamic_cnn_gpu_cpp/PyHDynamicCNNGPU.cpp b/network/h_dynamic_cnn_gpu_cpp/PyHDynamicCNNGPU.cpp new file mode 100644 index 0000000..999b6a6 --- /dev/null +++ b/network/h_dynamic_cnn_gpu_cpp/PyHDynamicCNNGPU.cpp @@ -0,0 +1,18 @@ +#include + +#include "HDynamicCNNGPU.h" + +namespace py = pybind11; + +PYBIND11_MODULE(PyHDynamicCNNGPU, m) +{ + m.doc() = "HDynamicCNNGPU Module"; + py::class_(m, "HDynamicCNNGPU") + .def(py::init<>()) + .def("gpu_occupancy_export", + &HDynamicCNNGPU::gpu_occupancy_export) + .def("gpu_occupancy_import", + &HDynamicCNNGPU::gpu_occupancy_import) + .def("update", + &HDynamicCNNGPU::entrypoint); +} \ No newline at end of file diff --git a/network/multiplication_approximation_cpu_cpp/Makefile b/network/multiplication_approximation_cpu_cpp/Makefile new file mode 100644 index 0000000..303fef9 --- /dev/null +++ b/network/multiplication_approximation_cpu_cpp/Makefile @@ -0,0 +1,61 @@ +include ../.env +export + +name = MultiplicationApproximation +type = CPU + +PYPOSTFIX := $(shell $(PYBIN)python3-config --extension-suffix) +PYBIND11INCLUDE := $(shell $(PYBIN)python3 -m pybind11 --includes) +PARAMETERS_O = $(PARAMETERS_O_CPU) $(PYBIND11INCLUDE) +PARAMETERS_Linker = $(PARAMETERS_Linker_CPU) + +so_file = Py$(name)$(type)$(PYPOSTFIX) +pyi_file = Py$(name)$(type).pyi + +all: ../$(so_file) + + +$(O_DIRS)error_term.o: \ + error_term.h \ + error_term.cpp + mkdir -p $(O_DIRS) + $(CC) $(PARAMETERS_O) -c error_term.cpp -o $(O_DIRS)error_term.o + + +$(O_DIRS)approximation_multiplication_function.o: \ + approximation_multiplication_function.h \ + approximation_multiplication_function.cpp \ + error_term.h + mkdir -p $(O_DIRS) + $(CC) $(PARAMETERS_O) -c approximation_multiplication_function.cpp -o $(O_DIRS)approximation_multiplication_function.o + + +$(O_DIRS)$(name)$(type).o: \ + $(name)$(type).h \ + $(name)$(type).cpp \ + approximation_multiplication_function.h + mkdir -p $(O_DIRS) + $(CC) $(PARAMETERS_O) -c $(name)$(type).cpp -o $(O_DIRS)$(name)$(type).o + +$(O_DIRS)Py$(name)$(type).o: $(name)$(type).h Py$(name)$(type).cpp + mkdir -p $(O_DIRS) + $(CC) $(PARAMETERS_O) -c Py$(name)$(type).cpp -o $(O_DIRS)Py$(name)$(type).o + +../$(so_file): \ + $(O_DIRS)$(name)$(type).o \ + $(O_DIRS)Py$(name)$(type).o \ + $(O_DIRS)approximation_multiplication_function.o \ + $(O_DIRS)error_term.o + $(CC) $(PARAMETERS_Linker) -o ../$(so_file) \ + $(O_DIRS)$(name)$(type).o \ + $(O_DIRS)Py$(name)$(type).o \ + $(O_DIRS)approximation_multiplication_function.o \ + $(O_DIRS)error_term.o + + +####################### +clean: + rm -rf $(O_DIRS) + rm -f ../$(so_file) + rm -f ../$(pyi_file) + diff --git a/network/multiplication_approximation_cpu_cpp/MultiplicationApproximationCPU.cpp b/network/multiplication_approximation_cpu_cpp/MultiplicationApproximationCPU.cpp new file mode 100644 index 0000000..6e688ce --- /dev/null +++ b/network/multiplication_approximation_cpu_cpp/MultiplicationApproximationCPU.cpp @@ -0,0 +1,193 @@ +#include "MultiplicationApproximationCPU.h" + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "approximation_multiplication_function.h" + +MultiplicationApproximationCPU::MultiplicationApproximationCPU() +{ + +}; + +MultiplicationApproximationCPU::~MultiplicationApproximationCPU() +{ + +}; + +void MultiplicationApproximationCPU::entrypoint( + int64_t np_input_pointer_addr, + int64_t np_weight_pointer_addr, + int64_t np_output_pointer_addr, + int64_t pattern_dim, + int64_t feature_dim, + int64_t x_dim, + int64_t y_dim, + int64_t input_channel_dim, + int64_t number_of_processes, + bool approximation_enable, + int64_t number_of_trunc_bits, + int64_t number_of_frac) +{ + + size_t number_of_pattern = pattern_dim; + + + float* np_input_pointer = (float*)np_input_pointer_addr; + float* np_weight_pointer = (float*)np_weight_pointer_addr; + float* np_output_pointer = (float*)np_output_pointer_addr; + + assert((np_input_pointer != nullptr)); + assert((np_output_pointer != nullptr)); + assert((np_weight_pointer != nullptr)); + + assert((pattern_dim > 0)); + assert((feature_dim > 0)); + assert((x_dim > 0)); + assert((y_dim > 0)); + assert((input_channel_dim > 0)); + + assert((number_of_processes > 0)); + + omp_set_num_threads(number_of_processes); + // For debugging: Only one thread + // omp_set_num_threads(1); + +#pragma omp parallel for + for (size_t pattern_id = 0; pattern_id < number_of_pattern; pattern_id++) + { + + calculate(np_input_pointer, np_weight_pointer, + np_output_pointer, pattern_dim, feature_dim, x_dim, y_dim, + input_channel_dim, pattern_id, approximation_enable, + number_of_trunc_bits, number_of_frac); + } + return; +}; + +void MultiplicationApproximationCPU::calculate( + float* np_input_pointer, + float* np_weight_pointer, + float* np_output_pointer, + size_t pattern_dim, + size_t feature_dim, + size_t x_dim, + size_t y_dim, + size_t input_channel_dim, + size_t id_pattern, + bool approximation_enable, + size_t number_of_trunc_bits, + size_t number_of_frac_bits) +{ + + assert((id_pattern >= 0)); + assert((id_pattern < pattern_dim)); + + float* np_input_pointer_pattern; + float* np_output_pointer_pattern; + + float* input_ptr; + float* output_ptr; + float* w_ptr; + + size_t pattern_size = input_channel_dim; + + std::vector ap_h_vector; + ap_h_vector.resize(pattern_size); + float* ap_h_ptr = ap_h_vector.data(); + + std::vector ap_x_vector; + ap_x_vector.resize(pattern_size); + uint32_t* ap_x_ptr = ap_x_vector.data(); + + std::vector ap_y_vector; + ap_y_vector.resize(pattern_size); + uint32_t* ap_y_ptr = ap_y_vector.data(); + + std::vector ap_x_exponent_vector; + ap_x_exponent_vector.resize(pattern_size); + uint32_t* ap_x_exponent_ptr = ap_x_exponent_vector.data(); + + std::vector ap_y_exponent_vector; + ap_y_exponent_vector.resize(pattern_size); + uint32_t* ap_y_exponent_ptr = ap_y_exponent_vector.data(); + + std::vector ap_h_exponent_vector; + ap_h_exponent_vector.resize(pattern_size); + uint32_t* ap_h_exponent_ptr = ap_h_exponent_vector.data(); + + std::vector ap_res_vector; + ap_res_vector.resize(pattern_size); + uint64_t* ap_res_ptr = ap_res_vector.data(); + + uint32_t ap_mask = static_cast(pow(2, number_of_trunc_bits)) - 1; + + std::vector sign_temp_vector; + sign_temp_vector.resize(pattern_size); + uint32_t* sign_temp_ptr = sign_temp_vector.data(); + + size_t input_pattern_size = input_channel_dim * x_dim * y_dim; + size_t output_pattern_size = feature_dim * x_dim * y_dim; + + np_input_pointer_pattern = np_input_pointer + id_pattern * input_pattern_size; + np_output_pointer_pattern = + np_output_pointer + id_pattern * output_pattern_size; + + size_t pos_xy; + size_t pos_xy_if; + + float temp_sum; + + size_t pattern_c_2 = x_dim * y_dim; + + for (size_t counter_x = 0; counter_x < x_dim; counter_x++) + { + for (size_t counter_y = 0; counter_y < y_dim; counter_y++) + { + pos_xy = counter_y + counter_x * y_dim; + for (size_t counter_feature = 0; counter_feature < feature_dim; + counter_feature++) + { + pos_xy_if = counter_feature * pattern_c_2 + pos_xy; + + input_ptr = np_input_pointer_pattern + pos_xy; + output_ptr = np_output_pointer_pattern + pos_xy_if; + w_ptr = np_weight_pointer + counter_feature * input_channel_dim; + +#pragma omp simd + for (size_t counter = 0; counter < pattern_size; counter++) + { + ap_h_ptr[counter] = input_ptr[counter * pattern_c_2]; + } + + approximation_multiplication_function( + ap_h_ptr, w_ptr, pattern_size, number_of_trunc_bits, + number_of_frac_bits, ap_x_ptr, ap_y_ptr, ap_x_exponent_ptr, + ap_y_exponent_ptr, ap_h_exponent_ptr, ap_mask, ap_res_ptr, + sign_temp_ptr, approximation_enable); + + temp_sum = 0.0; +#pragma omp simd reduction(+ \ + : temp_sum) + for (size_t counter = 0; counter < pattern_size; counter++) + { + temp_sum += ap_h_ptr[counter]; + } + + output_ptr[0] = temp_sum; + } + } + } + + return; +}; + + + diff --git a/network/multiplication_approximation_cpu_cpp/MultiplicationApproximationCPU.h b/network/multiplication_approximation_cpu_cpp/MultiplicationApproximationCPU.h new file mode 100644 index 0000000..65f7e49 --- /dev/null +++ b/network/multiplication_approximation_cpu_cpp/MultiplicationApproximationCPU.h @@ -0,0 +1,47 @@ +#ifndef MULTIPLICATIONAPPROXIMATIONCPU +#define MULTIPLICATIONAPPROXIMATIONCPU + +#include + +#include +#include + +class MultiplicationApproximationCPU +{ +public: + MultiplicationApproximationCPU(); + ~MultiplicationApproximationCPU(); + + void entrypoint( + int64_t np_input_pointer_addr, + int64_t np_weight_pointer_addr, + int64_t np_output_pointer_addr, + int64_t pattern_dim, + int64_t feature_dim, + int64_t x_dim, + int64_t y_dim, + int64_t input_channel_dim, + int64_t number_of_processes, + bool approximation_enable, + int64_t number_of_trunc_bits, + int64_t number_of_frac); + +private: + + void calculate( + float* np_input_pointer, + float* np_weight_pointer, + float* np_output_pointer, + size_t pattern_dim, + size_t feature_dim, + size_t x_dim, + size_t y_dim, + size_t input_channel_dim, + size_t id_pattern, + bool approximation_enable, + size_t number_of_trunc_bits, + size_t number_of_frac); + +}; + +#endif /* MULTIPLICATIONAPPROXIMATIONCPU */ diff --git a/network/multiplication_approximation_cpu_cpp/PyMultiplicationApproximationCPU.cpp b/network/multiplication_approximation_cpu_cpp/PyMultiplicationApproximationCPU.cpp new file mode 100644 index 0000000..e43b12f --- /dev/null +++ b/network/multiplication_approximation_cpu_cpp/PyMultiplicationApproximationCPU.cpp @@ -0,0 +1,14 @@ + +#include + +#include "MultiplicationApproximationCPU.h" + +namespace py = pybind11; + +PYBIND11_MODULE(PyMultiplicationApproximationCPU, m) { + m.doc() = "MultiplicationApproximationCPU Module"; + py::class_(m, "MultiplicationApproximationCPU") + .def(py::init<>()) + .def("update_entrypoint", + &MultiplicationApproximationCPU::entrypoint); +} \ No newline at end of file diff --git a/network/multiplication_approximation_cpu_cpp/approximation_multiplication_function.cpp b/network/multiplication_approximation_cpu_cpp/approximation_multiplication_function.cpp new file mode 100644 index 0000000..749c8f9 --- /dev/null +++ b/network/multiplication_approximation_cpu_cpp/approximation_multiplication_function.cpp @@ -0,0 +1,148 @@ +#include + +#include +#include +#include + +#include "error_term.h" + +// Best way to plot the bits +// std::cout << std::bitset<32>(ap_y_ptr[1]) << "\n"; + +// The result needs to be written back into h_pointer (which contains h) +// Don't write to w_pointer. +void approximation_multiplication_function( + float* h_pointer, + float* w_pointer, + size_t pattern_length, + size_t number_of_trunc_bits, + size_t number_of_frac_bits, + uint32_t* ap_x_ptr, + uint32_t* ap_y_ptr, + uint32_t* ap_x_exponent_ptr, + uint32_t* ap_y_exponent_ptr, + uint32_t* ap_h_exponent_ptr, + uint32_t ap_mask, + uint64_t* ap_res_ptr, + uint32_t* sign_temp_ptr, + bool approximation_enable) { + + uint32_t* w_pointer_mod = (uint32_t*)w_pointer; + uint32_t* h_pointer_mod = (uint32_t*)h_pointer; + + // Calculate the new sign +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + sign_temp_ptr[counter] = (w_pointer_mod[counter] & 0x80000000) ^ + (h_pointer_mod[counter] & 0x80000000); + } + + // Extract the exponent +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + ap_x_exponent_ptr[counter] = (h_pointer_mod[counter] << 1) >> 24; + } +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + ap_y_exponent_ptr[counter] = (w_pointer_mod[counter] << 1) >> 24; + } + + // Cast and "normalize" + uint64_t shift_value = 32 - number_of_frac_bits; +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + ap_x_ptr[counter] = + ((h_pointer_mod[counter] << 8) | 0x80000000) >> shift_value; + } + +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + ap_y_ptr[counter] = + ((w_pointer_mod[counter] << 8) | 0x80000000) >> shift_value; + } + + // Make the zero -g-r-e-a-t- correct again +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + if (h_pointer[counter] == 0) { + ap_x_ptr[counter] = 0; + } + } + +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + if (w_pointer[counter] == 0) { + ap_y_ptr[counter] = 0; + } + } + + // res = x*y +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + ap_res_ptr[counter] = static_cast(ap_x_ptr[counter]) * static_cast(ap_y_ptr[counter]); + } + + uint32_t temp; + if (approximation_enable == true) { + // Go through the vector values + for (size_t counter = 0; counter < pattern_length; counter++) { + temp = error_term(ap_y_ptr[counter], ap_x_ptr[counter], ap_mask, + number_of_trunc_bits); + if (temp > ap_res_ptr[counter]) { + ap_res_ptr[counter] = 0; + } + else { + ap_res_ptr[counter] -= temp; + } + } + } + // Cast from int to float +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + h_pointer[counter] = static_cast(ap_res_ptr[counter]); + } + +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + ap_h_exponent_ptr[counter] = (h_pointer_mod[counter] << 1) >> 24; + } + + // devide by the 2^number_of_frac_bits +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + ap_h_exponent_ptr[counter] -= 2 * number_of_frac_bits; + } + +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + temp = ap_x_exponent_ptr[counter] + ap_y_exponent_ptr[counter] + + ap_h_exponent_ptr[counter]; + if (temp > 252) { + ap_h_exponent_ptr[counter] = temp - 252; + } + else { + // Here I try to catch the case that the new exponent is too small + ap_h_exponent_ptr[counter] = 0; + } + } + + // Remove the old exponent +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + h_pointer_mod[counter] = (h_pointer_mod[counter] << 9) >> 9; + } + + // Install the new exponent +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + h_pointer_mod[counter] += ap_h_exponent_ptr[counter] << 23; + } + + // Add the sign back +#pragma omp simd + for (size_t counter = 0; counter < pattern_length; counter++) { + h_pointer_mod[counter] += sign_temp_ptr[counter]; + } + + return; +} diff --git a/network/multiplication_approximation_cpu_cpp/approximation_multiplication_function.h b/network/multiplication_approximation_cpu_cpp/approximation_multiplication_function.h new file mode 100644 index 0000000..3f225d9 --- /dev/null +++ b/network/multiplication_approximation_cpu_cpp/approximation_multiplication_function.h @@ -0,0 +1,20 @@ +#ifndef APPROXIMATION_MULTIPLICATION_FUNCTION +#define APPROXIMATION_MULTIPLICATION_FUNCTION + +void approximation_multiplication_function( + float* h_pointer, + float* w_pointer, + size_t pattern_length, + size_t number_of_trunc_bits, + size_t number_of_frac_bits, + uint32_t* ap_x_ptr, + uint32_t* ap_y_ptr, + uint32_t* ap_x_exponent_ptr, + uint32_t* ap_y_exponent_ptr, + uint32_t* ap_h_exponent_ptr, + uint32_t ap_mask, + uint64_t* ap_res_ptr, + uint32_t* sign_temp_ptr, + bool approximation_enable); + +#endif /* APPROXIMATION_MULTIPLICATION_FUNCTION */ diff --git a/network/multiplication_approximation_cpu_cpp/error_term.cpp b/network/multiplication_approximation_cpu_cpp/error_term.cpp new file mode 100644 index 0000000..f331bd1 --- /dev/null +++ b/network/multiplication_approximation_cpu_cpp/error_term.cpp @@ -0,0 +1,32 @@ +#include + +#include +#include + +#include "error_term.h" + +uint32_t error_term(uint32_t a, + uint32_t b, + uint32_t ap_mask, + uint32_t number_of_trunc_bits) +{ + uint32_t error_value = 0; + + uint32_t temp_shift_a = a; + uint32_t temp_shift_b = b & ap_mask; + + uint32_t temp; + + // Go through the bits + for (size_t counter_trunc = 0; counter_trunc < number_of_trunc_bits; + counter_trunc++) { + temp = temp_shift_a & 1; + if (temp == 1) { + error_value += temp_shift_b & ap_mask; + } + temp_shift_a >>= 1; + temp_shift_b <<= 1; + } + + return error_value; +} diff --git a/network/multiplication_approximation_cpu_cpp/error_term.h b/network/multiplication_approximation_cpu_cpp/error_term.h new file mode 100644 index 0000000..b2267f9 --- /dev/null +++ b/network/multiplication_approximation_cpu_cpp/error_term.h @@ -0,0 +1,13 @@ +#ifndef ERROR_TERM +#define ERROR_TERM + +#include +#include +#include + +uint32_t error_term(uint32_t a, + uint32_t b, + uint32_t ap_mask, + uint32_t number_of_trunc_bits); + +#endif /* ERROR_TERM */ diff --git a/network/multiplication_approximation_gpu_cpp/Makefile b/network/multiplication_approximation_gpu_cpp/Makefile new file mode 100644 index 0000000..9454744 --- /dev/null +++ b/network/multiplication_approximation_gpu_cpp/Makefile @@ -0,0 +1,40 @@ +include ../.env +export + +name = MultiplicationApproximation +type = GPU + +PYPOSTFIX := $(shell $(PYBIN)python3-config --extension-suffix) +PYBIND11INCLUDE := $(shell $(PYBIN)python3 -m pybind11 --includes) +PARAMETERS_O = $(PARAMETERS_O_GPU) $(PYBIND11INCLUDE) +PARAMETERS_Linker = $(PARAMETERS_Linker_GPU) + +so_file = Py$(name)$(type)$(PYPOSTFIX) +pyi_file = Py$(name)$(type).pyi +all: ../$(so_file) + +$(O_DIRS)$(name)$(type).o: $(name)$(type).h \ + $(name)$(type).cu \ + gpu_error_term.cu \ + gpu_approximation_multiplication_function.cu + mkdir -p $(O_DIRS) + $(NVCC) $(PARAMETERS_O) -c $(name)$(type).cu -o $(O_DIRS)$(name)$(type).o + +$(O_DIRS)Py$(name)$(type).o: $(name)$(type).h Py$(name)$(type).cpp + mkdir -p $(O_DIRS) + $(NVCC) $(PARAMETERS_O) -c Py$(name)$(type).cpp -o $(O_DIRS)Py$(name)$(type).o + +../$(so_file): \ + $(O_DIRS)$(name)$(type).o \ + $(O_DIRS)Py$(name)$(type).o + $(NVCC) $(PARAMETERS_Linker) -o ../$(so_file) \ + $(O_DIRS)$(name)$(type).o \ + $(O_DIRS)Py$(name)$(type).o + + +####################### +clean: + rm -rf $(O_DIRS) + rm -f ../$(so_file) + rm -f ../$(pyi_file) + diff --git a/network/multiplication_approximation_gpu_cpp/MultiplicationApproximationGPU.cu b/network/multiplication_approximation_gpu_cpp/MultiplicationApproximationGPU.cu new file mode 100644 index 0000000..32beb1c --- /dev/null +++ b/network/multiplication_approximation_gpu_cpp/MultiplicationApproximationGPU.cu @@ -0,0 +1,182 @@ +#include "MultiplicationApproximationGPU.h" + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "gpu_approximation_multiplication_function.cu" + +MultiplicationApproximationGPU::MultiplicationApproximationGPU() +{ + +}; + +MultiplicationApproximationGPU::~MultiplicationApproximationGPU() +{ + +}; + +void MultiplicationApproximationGPU::entrypoint( + int64_t np_input_pointer_addr, + int64_t np_weight_pointer_addr, + int64_t np_output_pointer_addr, + int64_t pattern_dim, + int64_t feature_dim, + int64_t x_dim, + int64_t y_dim, + int64_t input_channel_dim, + int64_t number_of_processes, + bool approximation_enable, + int64_t number_of_trunc_bits, + int64_t number_of_frac) +{ + + // int64_t number_of_pattern = pattern_dim; + + float* np_input_pointer = (float*)np_input_pointer_addr; + float* np_weight_pointer = (float*)np_weight_pointer_addr; + float* np_output_pointer = (float*)np_output_pointer_addr; + + assert((np_input_pointer != nullptr)); + assert((np_output_pointer != nullptr)); + assert((np_weight_pointer != nullptr)); + + assert((pattern_dim > 0)); + assert((feature_dim > 0)); + assert((x_dim > 0)); + assert((y_dim > 0)); + assert((input_channel_dim > 0)); + + assert ((number_of_processes <= 0)); + + calculate_gpu(np_input_pointer, np_weight_pointer, + np_output_pointer, pattern_dim, feature_dim, x_dim, y_dim, + input_channel_dim, approximation_enable, + number_of_trunc_bits, number_of_frac); + + return; +}; + +__global__ void kernel_approx_multiplication( + float* __restrict__ input_pointer, + float* __restrict__ weight_pointer, + float* __restrict__ output_pointer, + uint64_t pattern_dim, + uint64_t feature_dim, + uint64_t x_dim, + uint64_t y_dim, + uint64_t input_channel_dim, + size_t max_threadable_tasks, + uint64_t input_index_scale, + uint64_t number_of_frac_bits, + bool approximation_enable, + uint64_t number_of_trunc_bits, + uint32_t ap_mask) +{ + int idx = threadIdx.x + blockIdx.x * blockDim.x; + + if (idx < max_threadable_tasks) + { + int pattern_id = idx / feature_dim; + int feature_id = idx - (pattern_id * feature_dim); + int x_id = blockIdx.y; + int y_id = blockIdx.z; + + float* weight_pointer_sub = weight_pointer + feature_id * input_channel_dim; + float* input_pointer_sub = input_pointer + pattern_id * input_channel_dim * x_dim * y_dim + x_id * y_dim + y_id; + float* output_pointer_sub = output_pointer + + pattern_id * feature_dim * x_dim * y_dim + + feature_id * x_dim * y_dim + x_id * y_dim + y_id; + *output_pointer_sub = 0.0; + + for (size_t counter = 0; counter < input_channel_dim; counter++) + { + *output_pointer_sub += gpu_approximation_multiplication_function( + weight_pointer_sub[counter], + input_pointer_sub[counter * input_index_scale], + number_of_frac_bits, approximation_enable, + number_of_trunc_bits, ap_mask); + } + } +}; + +void MultiplicationApproximationGPU::calculate_gpu( + float* np_input_pointer, + float* np_weight_pointer, + float* np_output_pointer, + size_t pattern_dim, + size_t feature_dim, + size_t x_dim, + size_t y_dim, + size_t input_channel_dim, + bool approximation_enable, + size_t number_of_trunc_bits, + size_t number_of_frac_bits) +{ + + uint32_t ap_mask = static_cast(pow(2, number_of_trunc_bits)) - 1; + // std::cout << approximation_enable << std::endl; + // std::cout << number_of_trunc_bits << std::endl; + // std::cout << number_of_frac_bits << std::endl; + + cudaError_t status; + assert((x_dim < 65535)); + assert((y_dim < 65535)); + + // ////////////////////////////////////// + // Calculate the distribution on the GPU + // ////////////////////////////////////// + + int min_grid_size; + int block_size; + int grid_size; + + size_t dynamic_s_mem_size = 0; + size_t max_threadable_tasks = pattern_dim * feature_dim * x_dim * y_dim; + + // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html?highlight=blocksize#occupancy-calculator + status = cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, + (void*)kernel_approx_multiplication, + dynamic_s_mem_size, max_threadable_tasks); + assert((status == cudaSuccess)); + + // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications + // Maximum dimensionality of grid of thread blocks: 3 + // Maximum x -dimension of a grid of thread blocks: (2^31)-1 + // Maximum y- or z-dimension of a grid of thread blocks: 65535 + + // Round up according to array size + grid_size = ((pattern_dim * feature_dim) + block_size - 1) / block_size; + + // std::cout << min_grid_size << std::endl; + // std::cout << grid_size << std::endl; + // std::cout << block_size << std::endl; + // std::cout << max_threadable_tasks << std::endl; + + dim3 grid(grid_size, x_dim, y_dim); + + kernel_approx_multiplication<<>>(np_input_pointer, + np_weight_pointer, + np_output_pointer, + pattern_dim, + feature_dim, + x_dim, + y_dim, + input_channel_dim, + (pattern_dim * feature_dim), + (x_dim * y_dim), + number_of_frac_bits, + approximation_enable, + number_of_trunc_bits, + ap_mask); + + status = cudaDeviceSynchronize(); + assert((status == cudaSuccess)); + return; +}; diff --git a/network/multiplication_approximation_gpu_cpp/MultiplicationApproximationGPU.h b/network/multiplication_approximation_gpu_cpp/MultiplicationApproximationGPU.h new file mode 100644 index 0000000..a985b1b --- /dev/null +++ b/network/multiplication_approximation_gpu_cpp/MultiplicationApproximationGPU.h @@ -0,0 +1,44 @@ +#ifndef MULTIPLICATIONAPPROXIMATIONGPU +#define MULTIPLICATIONAPPROXIMATIONGPU + +#include +#include +#include + +class MultiplicationApproximationGPU +{ +public: + MultiplicationApproximationGPU(); + ~MultiplicationApproximationGPU(); + + void entrypoint( + int64_t np_input_pointer_addr, + int64_t np_weight_pointer_addr, + int64_t np_output_pointer_addr, + int64_t pattern_dim, + int64_t feature_dim, + int64_t x_dim, + int64_t y_dim, + int64_t input_channel_dim, + int64_t number_of_processes, + bool approximation_enable, + int64_t number_of_trunc_bits, + int64_t number_of_frac); + +private: + void calculate_gpu( + float* input_pointer, + float* weight_pointer, + float* output_pointer, + size_t pattern_dim, + size_t feature_dim, + size_t x_dim, + size_t y_dim, + size_t input_channel_dim, + bool approximation_enable, + size_t number_of_trunc_bits, + size_t number_of_frac); + +}; + +#endif /* MULTIPLICATIONAPPROXIMATIONGPU */ diff --git a/network/multiplication_approximation_gpu_cpp/PyMultiplicationApproximationGPU.cpp b/network/multiplication_approximation_gpu_cpp/PyMultiplicationApproximationGPU.cpp new file mode 100644 index 0000000..d85b047 --- /dev/null +++ b/network/multiplication_approximation_gpu_cpp/PyMultiplicationApproximationGPU.cpp @@ -0,0 +1,14 @@ + +#include + +#include "MultiplicationApproximationGPU.h" + +namespace py = pybind11; + +PYBIND11_MODULE(PyMultiplicationApproximationGPU, m) { + m.doc() = "MultiplicationApproximationGPU Module"; + py::class_(m, "MultiplicationApproximationGPU") + .def(py::init<>()) + .def("update_entrypoint", + &MultiplicationApproximationGPU::entrypoint); +} \ No newline at end of file diff --git a/network/multiplication_approximation_gpu_cpp/gpu_approximation_multiplication_function.cu b/network/multiplication_approximation_gpu_cpp/gpu_approximation_multiplication_function.cu new file mode 100644 index 0000000..6707431 --- /dev/null +++ b/network/multiplication_approximation_gpu_cpp/gpu_approximation_multiplication_function.cu @@ -0,0 +1,103 @@ + +#include "gpu_error_term.cu" + +__device__ float gpu_approximation_multiplication_function( + float weight, + float input, + size_t number_of_frac_bits, + bool approximation_enable, + size_t number_of_trunc_bits, + uint32_t ap_mask) +{ + + float weight_copy = weight; + float input_copy = input; + + uint32_t *weight_pointer_mod = (uint32_t *)&weight_copy; + uint32_t *input_pointer_mod = (uint32_t *)&input_copy; + + // Calculate the new sign + uint32_t sign_temp = (*weight_pointer_mod & 0x80000000) ^ + (*input_pointer_mod & 0x80000000); + + // Extract the exponent + uint32_t ap_input_exponent = (*input_pointer_mod << 1) >> 24; + uint32_t ap_weight_exponent = (*weight_pointer_mod << 1) >> 24; + + // Cast and "normalize" + uint64_t shift_value = 32 - number_of_frac_bits; + + uint32_t ap_input_mantissa = + ((*input_pointer_mod << 8) | 0x80000000) >> shift_value; + + uint32_t ap_weight_mantissa = + ((*weight_pointer_mod << 8) | 0x80000000) >> shift_value; + + // Make the zero -g-r-e-a-t- correct again + if (input == 0) + { + ap_input_mantissa = 0; + } + + if (weight == 0) + { + ap_weight_mantissa = 0; + } + + // res = x*y + uint64_t ap_result = static_cast(ap_input_mantissa) * static_cast(ap_weight_mantissa); + + uint32_t temp; + // -------------------------------------------- + // Approx + // -------------------------------------------- + + if (approximation_enable == true) + { + // Go through the vector values + temp = gpu_error_term(ap_weight_mantissa, ap_input_mantissa, ap_mask, + number_of_trunc_bits); + if (temp > ap_result) + { + ap_result = 0; + } + else + { + ap_result -= temp; + } + } + + // Cast from int to float + float output = static_cast(ap_result); + if (ap_result == 0) + { + output = 0.0; + } + else + { + uint32_t *output_pointer_mod = (uint32_t *)&output; + + uint32_t ap_output_exponent = (*output_pointer_mod << 1) >> 24; + ap_output_exponent -= 2 * number_of_frac_bits; + temp = ap_input_exponent + ap_weight_exponent + ap_output_exponent; + if (temp > 252) + { + ap_output_exponent = temp - 252; + } + else + { + // Here I try to catch the case that the new exponent is too small + ap_output_exponent = 0; + } + + // Remove the old exponent + *output_pointer_mod = (*output_pointer_mod << 9) >> 9; + + // Install the new exponent + *output_pointer_mod += ap_output_exponent << 23; + + // Add the sign back + *output_pointer_mod += sign_temp; + } + return output; +}; \ No newline at end of file diff --git a/network/multiplication_approximation_gpu_cpp/gpu_error_term.cu b/network/multiplication_approximation_gpu_cpp/gpu_error_term.cu new file mode 100644 index 0000000..18d81d5 --- /dev/null +++ b/network/multiplication_approximation_gpu_cpp/gpu_error_term.cu @@ -0,0 +1,29 @@ + +__device__ uint32_t gpu_error_term( + uint32_t ap_weight_mantissa, + uint32_t ap_input_mantissa, + uint32_t ap_mask, + uint32_t number_of_trunc_bits) +{ + uint32_t error_value = 0; + + uint32_t temp_shift_a = ap_weight_mantissa; + uint32_t temp_shift_b = ap_input_mantissa & ap_mask; + + uint32_t counter_trunc; + uint32_t temp; + + // Go through the bits + for (counter_trunc = 0; counter_trunc < number_of_trunc_bits; counter_trunc++) + { + temp = temp_shift_a & 1; + if (temp == 1) + { + error_value += temp_shift_b & ap_mask; + } + temp_shift_a >>= 1; + temp_shift_b <<= 1; + } + + return error_value; +} \ No newline at end of file diff --git a/network/spike_generation_cpu_cpp/Makefile b/network/spike_generation_cpu_cpp/Makefile new file mode 100644 index 0000000..0b8aba9 --- /dev/null +++ b/network/spike_generation_cpu_cpp/Makefile @@ -0,0 +1,33 @@ +include ../.env +export + +name = SpikeGeneration +type = CPU + +PYPOSTFIX := $(shell $(PYBIN)python3-config --extension-suffix) +PYBIND11INCLUDE := $(shell $(PYBIN)python3 -m pybind11 --includes) +PARAMETERS_O = $(PARAMETERS_O_CPU) $(PYBIND11INCLUDE) +PARAMETERS_Linker = $(PARAMETERS_Linker_CPU) + +so_file = Py$(name)$(type)$(PYPOSTFIX) +pyi_file = Py$(name)$(type).pyi +all: ../$(so_file) + +$(O_DIRS)$(name)$(type).o: $(name)$(type).h $(name)$(type).cpp + mkdir -p $(O_DIRS) + $(CC) $(PARAMETERS_O) -c $(name)$(type).cpp -o $(O_DIRS)$(name)$(type).o + +$(O_DIRS)Py$(name)$(type).o: $(name)$(type).h Py$(name)$(type).cpp + mkdir -p $(O_DIRS) + $(CC) $(PARAMETERS_O) -c Py$(name)$(type).cpp -o $(O_DIRS)Py$(name)$(type).o + +../$(so_file): $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o + $(CC) $(PARAMETERS_Linker) -o ../$(so_file) $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o + + +####################### +clean: + rm -rf $(O_DIRS) + rm -f ../$(so_file) + rm -f ../$(pyi_file) + diff --git a/network/spike_generation_cpu_cpp/PySpikeGenerationCPU.cpp b/network/spike_generation_cpu_cpp/PySpikeGenerationCPU.cpp new file mode 100644 index 0000000..0897c39 --- /dev/null +++ b/network/spike_generation_cpu_cpp/PySpikeGenerationCPU.cpp @@ -0,0 +1,14 @@ + +#include + +#include "SpikeGenerationCPU.h" + +namespace py = pybind11; + +PYBIND11_MODULE(PySpikeGenerationCPU, m) { + m.doc() = "SpikeGenerationCPU Module"; + py::class_(m, "SpikeGenerationCPU") + .def(py::init<>()) + .def("spike_generation", + &SpikeGenerationCPU::entrypoint); +} diff --git a/network/spike_generation_cpu_cpp/SpikeGenerationCPU.cpp b/network/spike_generation_cpu_cpp/SpikeGenerationCPU.cpp new file mode 100644 index 0000000..104fe94 --- /dev/null +++ b/network/spike_generation_cpu_cpp/SpikeGenerationCPU.cpp @@ -0,0 +1,200 @@ +#include "SpikeGenerationCPU.h" + +#include +#include +#include + +#include +#include +#include + + +SpikeGenerationCPU::SpikeGenerationCPU() +{ + +}; + +SpikeGenerationCPU::~SpikeGenerationCPU() +{ + +}; + +void SpikeGenerationCPU::entrypoint( + int64_t input_pointer_addr, + int64_t input_dim_0, + int64_t input_dim_1, + int64_t input_dim_2, + int64_t input_dim_3, + int64_t random_values_pointer_addr, + int64_t random_values_dim_0, + int64_t random_values_dim_1, + int64_t random_values_dim_2, + int64_t random_values_dim_3, + int64_t output_pointer_addr, + int64_t output_dim_0, + int64_t output_dim_1, + int64_t output_dim_2, + int64_t output_dim_3, + int64_t number_of_cpu_processes) +{ + + float* input_pointer = (float*)input_pointer_addr; + float* random_values_pointer = (float*)random_values_pointer_addr; + int64_t* output_pointer = (int64_t*)output_pointer_addr; + + // Input + assert((input_pointer != nullptr)); + assert((input_dim_0 > 0)); + assert((input_dim_1 > 0)); + assert((input_dim_2 > 0)); + assert((input_dim_3 > 0)); + + // Random + assert((random_values_pointer != nullptr)); + assert((random_values_dim_0 > 0)); + assert((random_values_dim_1 > 0)); + assert((random_values_dim_2 > 0)); + assert((random_values_dim_3 > 0)); + + // Output + assert((output_pointer != nullptr)); + assert((output_dim_0 > 0)); + assert((output_dim_1 > 0)); + assert((output_dim_2 > 0)); + assert((output_dim_3 > 0)); + + // Input + size_t input_dim_c0 = input_dim_1 * input_dim_2 * input_dim_3; + size_t input_dim_c1 = input_dim_2 * input_dim_3; + size_t input_dim_c2 = input_dim_3; + + // Random + size_t random_values_dim_c0 = + random_values_dim_1 * random_values_dim_2 * random_values_dim_3; + size_t random_values_dim_c1 = + random_values_dim_2 * random_values_dim_3; + size_t random_values_dim_c2 = random_values_dim_3; + + // Output + size_t output_dim_c0 = + output_dim_1 * output_dim_2 * output_dim_3; + size_t output_dim_c1 = output_dim_2 * output_dim_3; + size_t output_dim_c2 = output_dim_3; + + size_t number_of_pattern = input_dim_0; + size_t h_dim = input_dim_1; + size_t spike_dim = output_dim_1; + size_t x_dim = output_dim_2; + size_t y_dim = output_dim_2; + + assert((number_of_cpu_processes > 0)); + + omp_set_num_threads(number_of_cpu_processes); + // DEBUG: + // omp_set_num_threads(1); + +#pragma omp parallel for + for (size_t pattern_id = 0; pattern_id < number_of_pattern; pattern_id++) + { + spike_generation( + input_pointer, + input_dim_c0, + input_dim_c1, + input_dim_c2, + random_values_pointer, + random_values_dim_c0, + random_values_dim_c1, + random_values_dim_c2, + output_pointer, + output_dim_c0, + output_dim_c1, + output_dim_c2, + x_dim, + y_dim, + spike_dim, + h_dim, + pattern_id); + } + + return; +}; + +void SpikeGenerationCPU::spike_generation( + float* input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + float* random_values_pointer, + size_t random_values_dim_c0, + size_t random_values_dim_c1, + size_t random_values_dim_c2, + int64_t* output_pointer, + size_t output_dim_c0, + size_t output_dim_c1, + size_t output_dim_c2, + size_t x_dim, + size_t y_dim, + size_t spike_dim, + size_t h_dim, + size_t pattern_id) +{ + + float* p_ptr = nullptr; + int64_t* out_ptr = nullptr; + float* rand_ptr = nullptr; + + for (size_t counter_x = 0; counter_x < x_dim; counter_x++) + { + for (size_t counter_y = 0; counter_y < y_dim; counter_y++) + { + p_ptr = input_pointer + pattern_id * input_dim_c0 + + counter_x * input_dim_c2 + counter_y; + // + counter * input_dim_c1 + + out_ptr = output_pointer + pattern_id * output_dim_c0 + + counter_x * output_dim_c2 + counter_y; + // + counter * output_dim_c1 + + rand_ptr = random_values_pointer + + pattern_id * random_values_dim_c0 + + counter_x * random_values_dim_c2 + counter_y; + // + counter * random_values_dim_c1 + + for (size_t counter = 0; counter < spike_dim; counter++) + { + out_ptr[counter * output_dim_c1] = lower_bound(p_ptr, + h_dim, + input_dim_c1, + rand_ptr[counter * random_values_dim_c1]); + } + } + } + + return; +}; + +// algorithmic idea stolen from libc++ +size_t SpikeGenerationCPU::lower_bound(float* data_ptr, + size_t data_length, + size_t data_ptr_stride, + float compare_to_value) +{ + + size_t start_of_range = 0; + size_t length_of_range = data_length; + + while (length_of_range != 0) + { + size_t half_length = length_of_range >> 1; + size_t actual_position = start_of_range + half_length; + + if (data_ptr[actual_position * data_ptr_stride] < compare_to_value) + { + start_of_range = ++actual_position; + length_of_range -= half_length + 1; + } + else + length_of_range = half_length; + } + return start_of_range; +}; diff --git a/network/spike_generation_cpu_cpp/SpikeGenerationCPU.h b/network/spike_generation_cpu_cpp/SpikeGenerationCPU.h new file mode 100644 index 0000000..f8b731a --- /dev/null +++ b/network/spike_generation_cpu_cpp/SpikeGenerationCPU.h @@ -0,0 +1,38 @@ +#ifndef SPIKEGENERATIONCPU +#define SPIKEGENERATIONCPU + +#include + +#include +#include + +class SpikeGenerationCPU { +public: + SpikeGenerationCPU(); + ~SpikeGenerationCPU(); + + void entrypoint( + int64_t input_pointer_addr, int64_t input_dim_0, int64_t input_dim_1, + int64_t input_dim_2, int64_t input_dim_3, + int64_t random_values_pointer_addr, int64_t random_values_dim_0, + int64_t random_values_dim_1, int64_t random_values_dim_2, + int64_t random_values_dim_3, int64_t output_pointer_addr, + int64_t output_dim_0, int64_t output_dim_1, int64_t output_dim_2, + int64_t output_dim_3, int64_t number_of_cpu_processes); + +private: + void spike_generation(float* input_pointer, size_t input_dim_c0, + size_t input_dim_c1, size_t input_dim_c2, + float* random_values_pointer, + size_t random_values_dim_c0, + size_t random_values_dim_c1, + size_t random_values_dim_c2, int64_t* output_pointer, + size_t output_dim_c0, size_t output_dim_c1, + size_t output_dim_c2, size_t x_dim, size_t y_dim, + size_t spike_dim, size_t h_dim, size_t pattern_id); + + size_t lower_bound(float* data_ptr, size_t data_length, + size_t data_ptr_stride, float compare_to_value); +}; + +#endif /* SPIKEGENERATIONCPU */ diff --git a/network/spike_generation_gpu_cpp/Makefile b/network/spike_generation_gpu_cpp/Makefile new file mode 100644 index 0000000..5817c60 --- /dev/null +++ b/network/spike_generation_gpu_cpp/Makefile @@ -0,0 +1,33 @@ +include ../.env +export + +name = SpikeGeneration +type = GPU + +PYPOSTFIX := $(shell $(PYBIN)python3-config --extension-suffix) +PYBIND11INCLUDE := $(shell $(PYBIN)python3 -m pybind11 --includes) +PARAMETERS_O = $(PARAMETERS_O_GPU) $(PYBIND11INCLUDE) +PARAMETERS_Linker = $(PARAMETERS_Linker_GPU) + +so_file = Py$(name)$(type)$(PYPOSTFIX) +pyi_file = Py$(name)$(type).pyi +all: ../$(so_file) + +$(O_DIRS)$(name)$(type).o: $(name)$(type).h $(name)$(type).cu + mkdir -p $(O_DIRS) + $(NVCC) $(PARAMETERS_O) -c $(name)$(type).cu -o $(O_DIRS)$(name)$(type).o + +$(O_DIRS)Py$(name)$(type).o: $(name)$(type).h Py$(name)$(type).cpp + mkdir -p $(O_DIRS) + $(NVCC) $(PARAMETERS_O) -c Py$(name)$(type).cpp -o $(O_DIRS)Py$(name)$(type).o + +../$(so_file): $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o + $(NVCC) $(PARAMETERS_Linker) -o ../$(so_file) $(O_DIRS)$(name)$(type).o $(O_DIRS)Py$(name)$(type).o + + +####################### +clean: + rm -rf $(O_DIRS) + rm -f ../$(so_file) + rm -f ../$(pyi_file) + diff --git a/network/spike_generation_gpu_cpp/PySpikeGenerationGPU.cpp b/network/spike_generation_gpu_cpp/PySpikeGenerationGPU.cpp new file mode 100644 index 0000000..70f4581 --- /dev/null +++ b/network/spike_generation_gpu_cpp/PySpikeGenerationGPU.cpp @@ -0,0 +1,19 @@ + +#include + +#include "SpikeGenerationGPU.h" + +namespace py = pybind11; + +PYBIND11_MODULE(PySpikeGenerationGPU, m) +{ + m.doc() = "SpikeGenerationGPU Module"; + py::class_(m, "SpikeGenerationGPU") + .def(py::init<>()) + .def("gpu_occupancy_export", + &SpikeGenerationGPU::gpu_occupancy_export) + .def("gpu_occupancy_import", + &SpikeGenerationGPU::gpu_occupancy_import) + .def("spike_generation", + &SpikeGenerationGPU::entrypoint); +} diff --git a/network/spike_generation_gpu_cpp/SpikeGenerationGPU.cu b/network/spike_generation_gpu_cpp/SpikeGenerationGPU.cu new file mode 100644 index 0000000..d439207 --- /dev/null +++ b/network/spike_generation_gpu_cpp/SpikeGenerationGPU.cu @@ -0,0 +1,275 @@ +#include "SpikeGenerationGPU.h" + +#include +#include +#include + +#include +#include +#include + + +SpikeGenerationGPU::SpikeGenerationGPU() +{ + +}; + +SpikeGenerationGPU::~SpikeGenerationGPU() +{ + +}; + +void SpikeGenerationGPU::entrypoint( + int64_t input_pointer_addr, + int64_t input_dim_0, + int64_t input_dim_1, + int64_t input_dim_2, + int64_t input_dim_3, + int64_t random_values_pointer_addr, + int64_t random_values_dim_0, + int64_t random_values_dim_1, + int64_t random_values_dim_2, + int64_t random_values_dim_3, + int64_t output_pointer_addr, + int64_t output_dim_0, + int64_t output_dim_1, + int64_t output_dim_2, + int64_t output_dim_3, + int64_t number_of_cpu_processes) +{ + + float* input_pointer = (float*)input_pointer_addr; + float* random_values_pointer = (float*)random_values_pointer_addr; + int64_t* output_pointer = (int64_t*)output_pointer_addr; + + // Input + assert((input_pointer != nullptr)); + assert((input_dim_0 > 0)); + assert((input_dim_1 > 0)); + assert((input_dim_2 > 0)); + assert((input_dim_3 > 0)); + + // Random + assert((random_values_pointer != nullptr)); + assert((random_values_dim_0 > 0)); + assert((random_values_dim_1 > 0)); + assert((random_values_dim_2 > 0)); + assert((random_values_dim_3 > 0)); + + // Output + assert((output_pointer != nullptr)); + assert((output_dim_0 > 0)); + assert((output_dim_1 > 0)); + assert((output_dim_2 > 0)); + assert((output_dim_3 > 0)); + + // Input + size_t input_dim_c0 = input_dim_1 * input_dim_2 * input_dim_3; + size_t input_dim_c1 = input_dim_2 * input_dim_3; + size_t input_dim_c2 = input_dim_3; + + // Random + size_t random_values_dim_c0 = + random_values_dim_1 * random_values_dim_2 * random_values_dim_3; + size_t random_values_dim_c1 = + random_values_dim_2 * random_values_dim_3; + size_t random_values_dim_c2 = random_values_dim_3; + + // Output + size_t output_dim_c0 = + output_dim_1 * output_dim_2 * output_dim_3; + size_t output_dim_c1 = output_dim_2 * output_dim_3; + size_t output_dim_c2 = output_dim_3; + + size_t number_of_pattern = input_dim_0; + size_t h_dim = input_dim_1; + size_t spike_dim = output_dim_1; + size_t x_dim = output_dim_2; + size_t y_dim = output_dim_2; + + assert ((number_of_cpu_processes <= 0)); + + gpu_spike_generation( + input_pointer, + input_dim_c0, + input_dim_c1, + input_dim_c2, + random_values_pointer, + random_values_dim_c0, + random_values_dim_c1, + random_values_dim_c2, + output_pointer, + output_dim_c0, + output_dim_c1, + output_dim_c2, + x_dim, + y_dim, + spike_dim, + h_dim, + number_of_pattern); + + return; +}; + +__device__ size_t gpu_lower_bound(float* __restrict__ data_ptr, + size_t data_length, + size_t data_ptr_stride, + float compare_to_value) +{ + + size_t start_of_range = 0; + size_t length_of_range = data_length; + + while (length_of_range != 0) + { + size_t half_length = length_of_range >> 1; + size_t actual_position = start_of_range + half_length; + + if (data_ptr[actual_position * data_ptr_stride] < compare_to_value) + { + start_of_range = ++actual_position; + length_of_range -= half_length + 1; + } + else + length_of_range = half_length; + } + return start_of_range; +}; + +__global__ void kernel_spike_generation( + float* __restrict__ input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + float* __restrict__ random_values_pointer, + size_t random_values_dim_c0, + size_t random_values_dim_c1, + size_t random_values_dim_c2, + int64_t* __restrict__ output_pointer, + size_t output_dim_c0, + size_t output_dim_c1, + size_t output_dim_c2, + size_t x_dim, + size_t y_dim, + size_t spike_dim, + size_t h_dim, + size_t max_threadable_tasks) +{ + int idx = threadIdx.x + blockIdx.x * blockDim.x; + + if (idx < max_threadable_tasks) + { + + size_t pattern_id = idx / spike_dim; + size_t position_spike = idx - (pattern_id * spike_dim); + + size_t position_x = blockIdx.y; + size_t position_y = blockIdx.z; + + float* p_ptr = input_pointer + pattern_id * input_dim_c0 + + position_x * input_dim_c2 + position_y; + + int64_t* out_ptr = output_pointer + pattern_id * output_dim_c0 + + position_x * output_dim_c2 + position_y + + position_spike * output_dim_c1; + + float* rand_ptr = random_values_pointer + + pattern_id * random_values_dim_c0 + + position_x * random_values_dim_c2 + position_y + + position_spike * random_values_dim_c1; + + *out_ptr = gpu_lower_bound(p_ptr, + h_dim, + input_dim_c1, + *rand_ptr); + } +}; + +void SpikeGenerationGPU::gpu_spike_generation( + float* input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + float* random_values_pointer, + size_t random_values_dim_c0, + size_t random_values_dim_c1, + size_t random_values_dim_c2, + int64_t* output_pointer, + size_t output_dim_c0, + size_t output_dim_c1, + size_t output_dim_c2, + size_t x_dim, + size_t y_dim, + size_t spike_dim, + size_t h_dim, + size_t number_of_pattern) +{ + cudaError_t status; + assert((x_dim < 65535)); + assert((y_dim < 65535)); + + // ////////////////////////////////////// + // Calculate the distribution on the GPU + // ////////////////////////////////////// + + int min_grid_size; + int block_size; + int grid_size; + + size_t dynamic_s_mem_size = 0; + size_t max_threadable_tasks = number_of_pattern * spike_dim * x_dim * y_dim; + + status = cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, + (void*)kernel_spike_generation, + dynamic_s_mem_size, max_threadable_tasks); + assert((status == cudaSuccess)); + + grid_size = ((number_of_pattern * spike_dim) + block_size - 1) / block_size; + + dim3 grid(grid_size, x_dim, y_dim); + + + kernel_spike_generation<<>>( + input_pointer, + input_dim_c0, + input_dim_c1, + input_dim_c2, + random_values_pointer, + random_values_dim_c0, + random_values_dim_c1, + random_values_dim_c2, + output_pointer, + output_dim_c0, + output_dim_c1, + output_dim_c2, + x_dim, + y_dim, + spike_dim, + h_dim, + (number_of_pattern * spike_dim)); + + cudaDeviceSynchronize(); + + return; +}; + + +void SpikeGenerationGPU::gpu_occupancy_export( + size_t dim_x, + size_t dim_y, + size_t number_of_pattern, + size_t spike_dim, + int64_t setting_memory_addr, + size_t setting_dim_0, + size_t setting_dim_1) +{ + return; +}; + +void SpikeGenerationGPU::gpu_occupancy_import( + int64_t setting_memory_addr, + size_t setting_dim_0, + size_t setting_dim_1) +{ + return; +}; \ No newline at end of file diff --git a/network/spike_generation_gpu_cpp/SpikeGenerationGPU.h b/network/spike_generation_gpu_cpp/SpikeGenerationGPU.h new file mode 100644 index 0000000..7059b4b --- /dev/null +++ b/network/spike_generation_gpu_cpp/SpikeGenerationGPU.h @@ -0,0 +1,70 @@ +#ifndef SPIKEGENERATIONGPU +#define SPIKEGENERATIONGPU + +#include + +#include +#include + +class SpikeGenerationGPU +{ +public: + SpikeGenerationGPU(); + ~SpikeGenerationGPU(); + + void entrypoint( + int64_t input_pointer_addr, + int64_t input_dim_0, + int64_t input_dim_1, + int64_t input_dim_2, + int64_t input_dim_3, + int64_t random_values_pointer_addr, + int64_t random_values_dim_0, + int64_t random_values_dim_1, + int64_t random_values_dim_2, + int64_t random_values_dim_3, + int64_t output_pointer_addr, + int64_t output_dim_0, + int64_t output_dim_1, + int64_t output_dim_2, + int64_t output_dim_3, + int64_t number_of_cpu_processes); + + void gpu_occupancy_export( + size_t dim_x, + size_t dim_y, + size_t number_of_pattern, + size_t spike_dim, + int64_t setting_memory_addr, + size_t setting_dim_0, + size_t setting_dim_1); + + void gpu_occupancy_import( + int64_t setting_memory_addr, + size_t setting_dim_0, + size_t setting_dim_1); + +private: + + void gpu_spike_generation( + float* input_pointer, + size_t input_dim_c0, + size_t input_dim_c1, + size_t input_dim_c2, + float* random_values_pointer, + size_t random_values_dim_c0, + size_t random_values_dim_c1, + size_t random_values_dim_c2, + int64_t* output_pointer, + size_t output_dim_c0, + size_t output_dim_c1, + size_t output_dim_c2, + size_t x_dim, + size_t y_dim, + size_t spike_dim, + size_t h_dim, + size_t number_of_pattern); + +}; + +#endif /* SPIKEGENERATIONGPU */