Skip to content

Instantly share code, notes, and snippets.

@carstenbauer
Created August 9, 2021 18:22
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save carstenbauer/6cbc3d2c32422472a579bf61b5d62015 to your computer and use it in GitHub Desktop.
Save carstenbauer/6cbc3d2c32422472a579bf61b5d62015 to your computer and use it in GitHub Desktop.
SAXPY benchmark CUDA.jl (broadcasting)
print("Loading modules...")
using BenchmarkTools
using CUDA
using DataFrames
using CSV
using Printf
println("done!")
const a = 3.1415f0
function saxpy_julia!(z,a,x,y)
z .= a .* x .+ y
return z
end
df = DataFrame(n=Int[], var"GFLOPS/s"=Float64[], var"GB/s"=Float64[], time=Float64[])
println("Running measurements...")
for i in 8:128
n = 1024*1024*i
x, y, z = CUDA.ones(n), CUDA.ones(n), CUDA.zeros(n)
t_saxpy = @belapsed CUDA.@sync saxpy_julia!($z,$a,$x,$y)
gflops = 2.0 * n * (1000)^(-3) / t_saxpy
bandwidth = 3.0 * sizeof(Float32) * n * (1000)^(-3) / t_saxpy
@printf("saxpy (julia): n= %12d %7.3f GFLOP/s %7.3f GB/s %7.3f s\n",
n, gflops, bandwidth, t_saxpy);
push!(df, (n, gflops, bandwidth, t_saxpy))
flush(stdout) # force immediate printing to .out logfile
# free memory (to be safe)
x, y, z = nothing, nothing, nothing
GC.gc(true)
end
println("done!")
print("Writing results to disk...")
CSV.write("bench_results.csv", df)
println("done!")
@carstenbauer
Copy link
Author

What is particularly curious is that for some values of n, we seem to essentially match the C performance in a spike-like fashion…. For example, see n = 15728640 where we have 764.813 GB/s for C and 754.453 GB/s for Julia.

Interestingly, the positive spikes seem to appear with a constant frequency: every 5th run is fast?!?

@carstenbauer
Copy link
Author

carstenbauer commented Aug 9, 2021

Alright, using an explicit kernel closes the gap to C.

print("Loading modules...")
using BenchmarkTools
using CUDA
using DataFrames
using CSV
using Printf
println("done!")

const a = 3.1415f0

function saxpy_gpu_kernel!(z, a, x, y)
    i = (blockIdx().x - 1) * blockDim().x + threadIdx().x
    if i <= length(z)
        @inbounds z[i] = a * x[i] + y[i]
    end
    return nothing
end

function saxpy_gpu!(z, a, x, y; nthreads, nblocks)
    CUDA.@sync @cuda(
        threads = nthreads,
        blocks = nblocks,
        saxpy_gpu_kernel!(z, a, x, y)
    )
end

# query how many threads per block are available on the GPU
nthreads = CUDA.attribute(
    device(),
    CUDA.DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK
)

df = DataFrame(n=Int[], var"GFLOPS/s"=Float64[], var"GB/s"=Float64[], time=Float64[])

println("Running measurements...")
for i in 8:20 # 128
    n = 1024*1024*i
    x, y, z = CUDA.ones(n), CUDA.ones(n), CUDA.zeros(n)
    # compute how many blocks we need to use for the given `dim`
    nblocks = cld(n, nthreads)

    t_saxpy = @belapsed saxpy_gpu!($z, $a, $x, $y; nblocks=$nblocks, nthreads=$nthreads)

    gflops = 2.0 * n * (1000)^(-3) / t_saxpy
    bandwidth = 3.0 * sizeof(Float32) * n * (1000)^(-3) / t_saxpy
    @printf("saxpy (julia): n= %12d %7.3f GFLOP/s %7.3f GB/s %7.3f s\n",
                 n, gflops, bandwidth, t_saxpy);
    push!(df, (n, gflops, bandwidth, t_saxpy))


    flush(stdout) # force immediate printing to .out logfile
    # free memory (to be safe)
    x, y, z = nothing, nothing, nothing
    GC.gc(true)
end
println("done!")
print("Writing results to disk...")
CSV.write("bench_results.csv", df)
println("done!")

Output:

saxpy (julia): n=      8388608 125.526 GFLOP/s 753.158 GB/s   0.000 s
saxpy (julia): n=      9437184 124.185 GFLOP/s 745.109 GB/s   0.000 s
saxpy (julia): n=     10485760 126.514 GFLOP/s 759.086 GB/s   0.000 s
saxpy (julia): n=     11534336 127.279 GFLOP/s 763.674 GB/s   0.000 s
saxpy (julia): n=     12582912 126.933 GFLOP/s 761.601 GB/s   0.000 s
saxpy (julia): n=     13631488 127.728 GFLOP/s 766.366 GB/s   0.000 s
saxpy (julia): n=     14680064 128.188 GFLOP/s 769.127 GB/s   0.000 s
saxpy (julia): n=     15728640 128.661 GFLOP/s 771.967 GB/s   0.000 s
saxpy (julia): n=     16777216 128.307 GFLOP/s 769.844 GB/s   0.000 s
saxpy (julia): n=     17825792 129.633 GFLOP/s 777.799 GB/s   0.000 s
saxpy (julia): n=     18874368 129.656 GFLOP/s 777.934 GB/s   0.000 s
saxpy (julia): n=     19922944 129.482 GFLOP/s 776.895 GB/s   0.000 s
saxpy (julia): n=     20971520 129.418 GFLOP/s 776.510 GB/s   0.000 s

@carstenbauer
Copy link
Author

carstenbauer commented Aug 10, 2021

Benchmark results:

saxpy_julia

Benchmark code (without the plotting part):

print("Loading modules...");
flush(stdout);
using CUDA
using DataFrames
using CSV
using Printf
println("done!");
flush(stdout);

# kernel definition
function saxpy_gpu_kernel!(z, a, x, y)
    i = (blockIdx().x - 1) * blockDim().x + threadIdx().x
    if i <= length(z)
        @inbounds z[i] = a * x[i] + y[i]
    end
    return nothing
end

# calling the kernel
function saxpy_kernel!(z, a, x, y; nthreads, nblocks)
    @cuda(threads = nthreads, blocks = nblocks, saxpy_gpu_kernel!(z, a, x, y))
    return nothing
end

# high-level broadcasting version
function saxpy_broadcasting!(z, a, x, y; nthreads, nblocks)
    z .= a .* x .+ y
    return z
end

function run_benchmarks(saxpy!)
    # for storing the results
    df = DataFrame(; n=Int[], var"GFLOP/s"=Float64[], var"GB/s"=Float64[], kind=String[])
    fname = string(saxpy!)

    # query how many threads per block are available on the GPU
    nthreads = CUDA.attribute(device(), CUDA.DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK)

    for i in 8:128
        # vector length:
        # always a multiple of nthreads per block (1024)
        # such that blocks are fully busy (no remainder).
        n = 1024 * 1024 * i
        # arbitrary constant
        a = 3.1415f0
        # allocate GPU memory
        x, y, z = CUDA.ones(n), CUDA.ones(n), CUDA.zeros(n)
        # compute how many blocks we need to use for the given `dim`
        nblocks = cld(n, nthreads)
        # benchmark: minimum time of 10 trials
        t_saxpy = 1e6
        for j in 1:10
            t = CUDA.@elapsed saxpy!(z, a, x, y; nthreads, nblocks)
            t_saxpy = min(t_saxpy, t)
        end
        # print and save results
        flops = 2.0 * n * (1000)^(-3) / t_saxpy
        bandwidth = 3.0 * sizeof(Float32) * n * (1000)^(-3) / t_saxpy
        @printf(
            "%s (julia): n= %12d %7.3f GFLOP/s %7.3f GB/s \n",
            fname,
            n,
            flops,
            bandwidth,
        )
        # force immediate printing to .out logfile
        flush(stdout)
        push!(df, (n, flops, bandwidth, fname))
        # explicitly free memory (to be safe)
        x, y, z = nothing, nothing, nothing
        GC.gc(true)
    end
    return df
end

println("Running measurements (kernel)...");
flush(stdout);
df_kernel = run_benchmarks(saxpy_kernel!)
df_broadcasting = run_benchmarks(saxpy_broadcasting!)
df = vcat(df_kernel, df_broadcasting)

println("done!")
print("Writing results to disk...")
CSV.write("bench_results.csv", df)
println("done!")

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment