Created
March 3, 2022 12:23
-
-
Save mfalt/b5f8461b74eb533b61ef9105030d1283 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
ERROR: LoadError: TaskFailedException | |
Stacktrace: | |
[1] wait | |
@ ./task.jl:334 [inlined] | |
[2] fetch(t::Task) | |
@ Base ./task.jl:349 | |
[3] top-level scope | |
@ ~/knightvision/piece_recognition/KnightVisionServer/test/test_cuda.jl:39 | |
nested task error: CUDNNError: CUDNN_STATUS_INTERNAL_ERROR (code 4) | |
Stacktrace: | |
[1] throw_api_error(res::CUDA.CUDNN.cudnnStatus_t) | |
@ CUDA.CUDNN ~/.julia/packages/CUDA/0IDh2/lib/cudnn/error.jl:22 | |
[2] macro expansion | |
@ ~/.julia/packages/CUDA/0IDh2/lib/cudnn/error.jl:35 [inlined] | |
[3] cudnnCreate() | |
@ CUDA.CUDNN ~/.julia/packages/CUDA/0IDh2/lib/cudnn/base.jl:3 | |
[4] #1218 | |
@ ~/.julia/packages/CUDA/0IDh2/lib/cudnn/CUDNN.jl:72 [inlined] | |
[5] (::CUDA.APIUtils.var"#8#11"{CUDA.CUDNN.var"#1218#1224", CUDA.APIUtils.HandleCache{CUDA.CuContext, Ptr{Nothing}}, CUDA.CuContext})() | |
@ CUDA.APIUtils ~/.julia/packages/CUDA/0IDh2/lib/utils/cache.jl:24 | |
[6] lock(f::CUDA.APIUtils.var"#8#11"{CUDA.CUDNN.var"#1218#1224", CUDA.APIUtils.HandleCache{CUDA.CuContext, Ptr{Nothing}}, CUDA.CuContext}, l::ReentrantLock) | |
@ Base ./lock.jl:190 | |
[7] (::CUDA.APIUtils.var"#check_cache#9"{CUDA.APIUtils.HandleCache{CUDA.CuContext, Ptr{Nothing}}, CUDA.CuContext})(f::CUDA.CUDNN.var"#1218#1224") | |
@ CUDA.APIUtils ~/.julia/packages/CUDA/0IDh2/lib/utils/cache.jl:22 | |
[8] pop! | |
@ ~/.julia/packages/CUDA/0IDh2/lib/utils/cache.jl:46 [inlined] | |
[9] (::CUDA.CUDNN.var"#new_state#1223")(cuda::NamedTuple{(:device, :context, :stream, :math_mode, :math_precision), Tuple{CUDA.CuDevice, CUDA.CuContext, CUDA.CuStream, CUDA.MathMode, Symbol}}) | |
@ CUDA.CUDNN ~/.julia/packages/CUDA/0IDh2/lib/cudnn/CUDNN.jl:71 | |
[10] #1221 | |
@ ~/.julia/packages/CUDA/0IDh2/lib/cudnn/CUDNN.jl:86 [inlined] | |
[11] get!(default::CUDA.CUDNN.var"#1221#1227"{CUDA.CUDNN.var"#new_state#1223", NamedTuple{(:device, :context, :stream, :math_mode, :math_precision), Tuple{CUDA.CuDevice, CUDA.CuContext, CUDA.CuStream, CUDA.MathMode, Symbol}}}, h::Dict{CUDA.CuContext, NamedTuple{(:handle, :stream), Tuple{Ptr{Nothing}, CUDA.CuStream}}}, key::CUDA.CuContext) | |
@ Base ./dict.jl:464 | |
[12] handle() | |
@ CUDA.CUDNN ~/.julia/packages/CUDA/0IDh2/lib/cudnn/CUDNN.jl:85 | |
[13] (::CUDA.CUDNN.var"#1145#1147"{CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CUDNN.cudnnActivationMode_t, CUDA.CUDNN.cudnnConvolutionDescriptor, CUDA.CUDNN.cudnnFilterDescriptor, CUDA.CUDNN.cudnnTensorDescriptor, CUDA.CUDNN.cudnnTensorDescriptor, Base.RefValue{Float32}, Base.RefValue{Float32}, CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CUDNN.cudnnConvolutionFwdAlgoPerfStruct})(workspace::CUDA.CuArray{UInt8, 1, CUDA.Mem.DeviceBuffer}) | |
@ CUDA.CUDNN ~/.julia/packages/CUDA/0IDh2/lib/cudnn/convolution.jl:105 | |
[14] with_workspace(f::CUDA.CUDNN.var"#1145#1147"{CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CUDNN.cudnnActivationMode_t, CUDA.CUDNN.cudnnConvolutionDescriptor, CUDA.CUDNN.cudnnFilterDescriptor, CUDA.CUDNN.cudnnTensorDescriptor, CUDA.CUDNN.cudnnTensorDescriptor, Base.RefValue{Float32}, Base.RefValue{Float32}, CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CUDNN.cudnnConvolutionFwdAlgoPerfStruct}, eltyp::Type{UInt8}, size::CUDA.APIUtils.var"#2#3"{UInt64}, fallback::Nothing; keep::Bool) | |
@ CUDA.APIUtils ~/.julia/packages/CUDA/0IDh2/lib/utils/call.jl:77 | |
[15] with_workspace | |
@ ~/.julia/packages/CUDA/0IDh2/lib/utils/call.jl:58 [inlined] | |
[16] #with_workspace#1 | |
@ ~/.julia/packages/CUDA/0IDh2/lib/utils/call.jl:53 [inlined] | |
[17] with_workspace (repeats 2 times) | |
@ ~/.julia/packages/CUDA/0IDh2/lib/utils/call.jl:53 [inlined] | |
[18] cudnnConvolutionForwardAD(w::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, bias::Nothing, z::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}; y::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, activation::CUDA.CUDNN.cudnnActivationMode_t, convDesc::CUDA.CUDNN.cudnnConvolutionDescriptor, wDesc::CUDA.CUDNN.cudnnFilterDescriptor, xDesc::CUDA.CUDNN.cudnnTensorDescriptor, yDesc::CUDA.CUDNN.cudnnTensorDescriptor, zDesc::CUDA.CUDNN.cudnnTensorDescriptor, biasDesc::Nothing, alpha::Base.RefValue{Float32}, beta::Base.RefValue{Float32}, dw::Base.RefValue{Any}, dx::Base.RefValue{Any}, dz::Base.RefValue{Any}, dbias::Base.RefValue{Any}, dready::Base.RefValue{Bool}) | |
@ CUDA.CUDNN ~/.julia/packages/CUDA/0IDh2/lib/cudnn/convolution.jl:103 | |
[19] #cudnnConvolutionForwardWithDefaults#1143 | |
@ ~/.julia/packages/CUDA/0IDh2/lib/cudnn/convolution.jl:96 [inlined] | |
[20] #cudnnConvolutionForward!#1142 | |
@ ~/.julia/packages/CUDA/0IDh2/lib/cudnn/convolution.jl:53 [inlined] | |
[21] conv!(y::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, w::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, cdims::DenseConvDims{2, (5, 5), 3, 6, 1, (1, 1), (0, 0, 0, 0), (1, 1), false}; alpha::Int64, beta::Int64, algo::Int64) | |
@ NNlibCUDA ~/.julia/packages/NNlibCUDA/IeeBk/src/cudnn/conv.jl:34 | |
[22] conv! | |
@ ~/.julia/packages/NNlibCUDA/IeeBk/src/cudnn/conv.jl:27 [inlined] | |
[23] conv(x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, w::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, cdims::DenseConvDims{2, (5, 5), 3, 6, 1, (1, 1), (0, 0, 0, 0), (1, 1), false}; kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}) | |
@ NNlib ~/.julia/packages/NNlib/tvMmZ/src/conv.jl:91 | |
[24] conv(x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, w::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, cdims::DenseConvDims{2, (5, 5), 3, 6, 1, (1, 1), (0, 0, 0, 0), (1, 1), false}) | |
@ NNlib ~/.julia/packages/NNlib/tvMmZ/src/conv.jl:89 | |
[25] (::Conv{2, 4, typeof(relu), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}})(x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}) | |
@ Flux ~/.julia/packages/Flux/BPPNj/src/layers/conv.jl:166 | |
[26] applychain(fs::Tuple{Conv{2, 4, typeof(relu), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, MaxPool{2, 4}, Conv{2, 4, typeof(relu), CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, MaxPool{2, 4}, typeof(flatten), Dense{typeof(relu), CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Dense{typeof(relu), CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, Dense{typeof(identity), CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}, CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}}, x::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}) | |
@ Flux ~/.julia/packages/Flux/BPPNj/src/layers/basic.jl:47 | |
[27] Chain | |
@ ~/.julia/packages/Flux/BPPNj/src/layers/basic.jl:49 [inlined] | |
[28] inference(imgs::CUDA.CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}) | |
@ Main ~/knightvision/piece_recognition/KnightVisionServer/test/test_cuda.jl:15 | |
[29] (::var"#15#16")() | |
@ Main ./threadingconstructs.jl:178 | |
in expression starting at /home/mattias/knightvision/piece_recognition/KnightVisionServer/test/test_cuda.jl:36 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment