1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
template<int Nthreads>
__global__ void diffuse(float *uold_d, float *unew_d, int Ncells)
{
// thread index, no block
int itx = threadIdx.x;
int ity = threadIdx.y;
// total index (over all blocks)
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
// allocate shared memory, copy over from global memory
// shared memory is faster than global memory
// this copying requires a thread to one a single r/w op
// the math later requires 5 reads to update one cell
// therefore, it's worth copying over first
__shared__ float u_s[Nthreads][Nthreads];
u_s[ity][itx] = uold_d[idy*Ncells+idx];
// to make sure all neighbouring cells are updated, we wait for all threads
__syncthreads();
// math, watch out when we're on the boundary
if(itx>0 && itx<Nthreads-1 && ity>0 && ity<Nthreads-1) {
unew_d[idy*Ncells+idx] = (u_s[ity][itx] + u_s[ity-1][itx] + u_s[ity+1][itx] +
u_s[ity][itx-1] + u_s[ity][itx+1])/5.f;
}
// debug
//printf("%i %i %.2e\n", idx, idy, unew_d[idy*N+idx]);
}
__global__ void updateMem(float *uold_d, float *unew_d, int Ncells)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
uold_d[idy*Ncells+idx] = unew_d[idy*Ncells+idx];
}
int main()
{
int Ncells = 64; // Number of cells ALONG ONE DIMENSION
// We have NxN total cells
float *u_h, *uold_d, *unew_d; // Memory pointers
// select a CUDA device
cudaSetDevice(0);
// allocate host memory
u_h = (float *)malloc(sizeof(float)*Ncells*Ncells);
// allocate device memory
cudaMalloc((void **) &uold_d, sizeof(float)*Ncells*Ncells);
cudaMalloc((void **) &unew_d, sizeof(float)*Ncells*Ncells);
// set zero
cudaMemset(unew_d, 0, sizeof(float)*Ncells*Ncells);
// fill array
for(int i=0; i<Ncells; i++) {
for(int j=0; j<Ncells; j++) {
u_h[j*Ncells+i] = 0.f;
}
}
// put in some data
u_h[22*Ncells+22] = 1.f;
// copy to device
cudaMemcpy(uold_d, u_h, sizeof(float)*Ncells*Ncells, cudaMemcpyHostToDevice);
// this is how we define a 2D grid of threads and blocks
dim3 nThreads(32,32,1);
dim3 nBlocks(2,2,1);
// main loop, let's do 10 timesteps
for(int i=0; i<120; i++) {
// run our physics
// <32> [32x32] is the biggest block size we can have!
// alt: diffuse <32> <<< (1,1,1), (32,32,1) >>> (...)
diffuse <32> <<< nBlocks, nThreads >>> (uold_d, unew_d, Ncells);
// set uold to unew before the next step
updateMem <<< nBlocks, nThreads >>> (uold_d, unew_d, Ncells);
}
// copy back into host memory
cudaMemcpy(u_h, unew_d, sizeof(float)*Ncells*Ncells, cudaMemcpyDeviceToHost);
// output array
for(int i=0; i<Ncells; i++) {
for(int j=0; j<Ncells; j++) {
printf("%i %i %.16f \n", i, j, u_h[j*Ncells+i]);
}
}
return 0;
}
|