Skip to content

Instantly share code, notes, and snippets.

@ebraminio
Last active December 19, 2016 15:13
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ebraminio/dcf773cc059ff4fb758cfe670e63a784 to your computer and use it in GitHub Desktop.
Save ebraminio/dcf773cc059ff4fb758cfe670e63a784 to your computer and use it in GitHub Desktop.
CUDA capabilities gist
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <stdio.h>
void print(char*, int[], int);
__global__ void square(int *a) { // our computation logic, will be run on GPU
a[threadIdx.x] *= a[threadIdx.x];
__syncthreads(); // synchronizes threads of a block
// needed when number of specified threads exceeds
// the number of actual available ones
}
int main() {
int arr[] = { 1, 2, 3, 4, 5, 6 };
int arraySizeOnMemory = sizeof arr;
int arrayEntriesCount = arraySizeOnMemory / sizeof(int);
print("arr: ", arr, arrayEntriesCount); // our custom array printer
int *arrGPU; // handle of GPU allocated memory
cudaMalloc((void**)&arrGPU, arraySizeOnMemory); // allocate GPU memory for our array
// copy our array to the allocated GPU memory
cudaMemcpy(arrGPU, arr, arraySizeOnMemory, cudaMemcpyHostToDevice);
dim3 dimBlock(arrayEntriesCount, 1); // define GPU computation configs
dim3 dimGrid(1, 1);
square<<<dimGrid, dimBlock>>>(arrGPU); // actual GPU processing happens here
cudaThreadSynchronize(); // above was an async call so we need to ensure it is finished
cudaMemcpy(arr, arrGPU, arraySizeOnMemory, cudaMemcpyDeviceToHost); // copy back the results
// free the GPU allocated memory
cudaFree(arrGPU);
print("\nResult of GPU processing (x .^ 2): ", arr, arrayEntriesCount);
getchar(); // don't exit immediately
return 0;
}
void print(char* str, int arr[], int n) {
printf("%s{ ", str);
for (int i = 0; i < n; ++i)
printf("%d%s", arr[i], n - 1 == i ? "" : ", ");
printf(" }\n");
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment