From 6e9bdfa95e8053a338cf3a3802566cc5c5b31bee Mon Sep 17 00:00:00 2001 From: Chris Rackauckas Date: Mon, 7 Aug 2023 11:47:49 -0400 Subject: [PATCH 01/11] Support Apple Accelerate and improve MKL integration --- ext/LinearSolveMKLExt.jl | 2 +- src/LinearSolve.jl | 5 ++++ src/appleaccelerate.jl | 62 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 src/appleaccelerate.jl diff --git a/ext/LinearSolveMKLExt.jl b/ext/LinearSolveMKLExt.jl index bc40d049d..71e19e44a 100644 --- a/ext/LinearSolveMKLExt.jl +++ b/ext/LinearSolveMKLExt.jl @@ -39,7 +39,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::MKLLUFactorization; A = convert(AbstractMatrix, A) if cache.isfresh cacheval = @get_cacheval(cache, :MKLLUFactorization) - fact = LU(getrf!(A)...) + fact = LU(getrf!(A)...; ipiv = fact.ipiv) cache.cacheval = fact cache.isfresh = false end diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 272639e34..107a027fc 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -23,6 +23,10 @@ using EnumX using Requires import InteractiveUtils +using LinearAlgebra: BlasInt, LU +using LinearAlgebra.LAPACK: require_one_based_indexing, chkfinite, chkstride1, + @blasfunc, chkargsok + import GPUArraysCore import Preferences @@ -87,6 +91,7 @@ include("solve_function.jl") include("default.jl") include("init.jl") include("extension_algs.jl") +include("appleaccelerate.jl") include("deprecated.jl") @generated function SciMLBase.solve!(cache::LinearCache, alg::AbstractFactorization; diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl new file mode 100644 index 000000000..11b34cf22 --- /dev/null +++ b/src/appleaccelerate.jl @@ -0,0 +1,62 @@ +# For now, only use BLAS from Accelerate (that is to say, vecLib) +global const libacc = "/System/Library/Frameworks/Accelerate.framework/Accelerate" +global const libacc_info_plist = "/System/Library/Frameworks/Accelerate.framework/Versions/Current/Resources/Info.plist" + +""" +```julia +AppleAccelerateLUFactorization() +``` + +A wrapper over Apple's Accelerate Library. Direct calls to Acceelrate in a way that pre-allocates workspace +to avoid allocations and does not require libblastrampoline. +""" +struct AppleAccelerateLUFactorization <: AbstractFactorization end + +function is_new_accelerate_available() + libacc_hdl = dlopen_e(libacc) + if libacc_hdl == C_NULL + return false + end + + if dlsym_e(libacc_hdl, "dgemm\$NEWLAPACK\$ILP64") == C_NULL + return false + end + return true +end + +function aa_getrf!(A::AbstractMatrix{<:Float64}; ipiv = similar(A, BlasInt, min(size(A,1),size(A,2))), info = Ref{BlasInt}(), check = false) + require_one_based_indexing(A) + check && chkfinite(A) + chkstride1(A) + m, n = size(A) + lda = max(1,stride(A, 2)) + ccall(("dgemm\$NEWLAPACK\$ILP64", libacc), Cvoid, + (Ref{BlasInt}, Ref{BlasInt}, Ptr{Float64}, + Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), + m, n, A, lda, ipiv, info) + chkargsok(info[]) + A, ipiv, info[] #Error code is stored in LU factorization type +end + +default_alias_A(::AppleAccelerateLUFactorization, ::Any, ::Any) = false +default_alias_b(::AppleAccelerateLUFactorization, ::Any, ::Any) = false + +function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, verbose::Bool, + assumptions::OperatorAssumptions) + ArrayInterface.lu_instance(convert(AbstractMatrix, A)) +end + +function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorization; + kwargs...) + A = cache.A + A = convert(AbstractMatrix, A) + if cache.isfresh + cacheval = @get_cacheval(cache, :AppleAccelerateLUFactorization) + fact = LU(aa_getrf!(A)...; ipiv = fact.ipiv) + cache.cacheval = fact + cache.isfresh = false + end + y = ldiv!(cache.u, @get_cacheval(cache, :AppleAccelerateLUFactorization), cache.b) + SciMLBase.build_linear_solution(alg, y, nothing, cache) +end \ No newline at end of file From 2ea5488b27516be4a6ae57f103834c35798d6f7c Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 12:12:32 -0400 Subject: [PATCH 02/11] Update src/appleaccelerate.jl Co-authored-by: Elliot Saba --- src/appleaccelerate.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index 11b34cf22..ae5c63da7 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -30,7 +30,7 @@ function aa_getrf!(A::AbstractMatrix{<:Float64}; ipiv = similar(A, BlasInt, min( chkstride1(A) m, n = size(A) lda = max(1,stride(A, 2)) - ccall(("dgemm\$NEWLAPACK\$ILP64", libacc), Cvoid, + ccall(("dgetrf\$NEWLAPACK\$ILP64", libacc), Cvoid, (Ref{BlasInt}, Ref{BlasInt}, Ptr{Float64}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), m, n, A, lda, ipiv, info) From 42af75b3f51ce29bbcbd0bfd395a20153e8bfe12 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 12:12:36 -0400 Subject: [PATCH 03/11] Update src/appleaccelerate.jl Co-authored-by: Elliot Saba --- src/appleaccelerate.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index ae5c63da7..82f679865 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -1,6 +1,5 @@ # For now, only use BLAS from Accelerate (that is to say, vecLib) global const libacc = "/System/Library/Frameworks/Accelerate.framework/Accelerate" -global const libacc_info_plist = "/System/Library/Frameworks/Accelerate.framework/Versions/Current/Resources/Info.plist" """ ```julia From 946a45e1b7c3a0847fd8479070f134fc70ec8adc Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 14:48:01 -0400 Subject: [PATCH 04/11] Fix the dispatch --- ext/LinearSolveMKLExt.jl | 5 ++++- src/LinearSolve.jl | 1 + src/appleaccelerate.jl | 6 +++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ext/LinearSolveMKLExt.jl b/ext/LinearSolveMKLExt.jl index 71e19e44a..56b2d78a8 100644 --- a/ext/LinearSolveMKLExt.jl +++ b/ext/LinearSolveMKLExt.jl @@ -16,6 +16,9 @@ function getrf!(A::AbstractMatrix{<:Float64}; ipiv = similar(A, BlasInt, min(siz chkstride1(A) m, n = size(A) lda = max(1,stride(A, 2)) + if isempty(ipiv) + ipiv = similar(A, BlasInt, min(size(A,1),size(A,2))) + end ccall((@blasfunc(dgetrf_), MKL_jll.libmkl_rt), Cvoid, (Ref{BlasInt}, Ref{BlasInt}, Ptr{Float64}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), @@ -39,7 +42,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::MKLLUFactorization; A = convert(AbstractMatrix, A) if cache.isfresh cacheval = @get_cacheval(cache, :MKLLUFactorization) - fact = LU(getrf!(A)...; ipiv = fact.ipiv) + fact = LU(getrf!(A; ipiv = cacheval.ipiv)...) cache.cacheval = fact cache.isfresh = false end diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 107a027fc..2c049f11e 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -190,6 +190,7 @@ export CudaOffloadFactorization export MKLPardisoFactorize, MKLPardisoIterate export PardisoJL export MKLLUFactorization +export AppleAccelerateLUFactorization export OperatorAssumptions, OperatorCondition diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index 82f679865..36fe1dc38 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -29,6 +29,10 @@ function aa_getrf!(A::AbstractMatrix{<:Float64}; ipiv = similar(A, BlasInt, min( chkstride1(A) m, n = size(A) lda = max(1,stride(A, 2)) + if isempty(ipiv) + ipiv = similar(A, BlasInt, min(size(A,1),size(A,2))) + end + ccall(("dgetrf\$NEWLAPACK\$ILP64", libacc), Cvoid, (Ref{BlasInt}, Ref{BlasInt}, Ptr{Float64}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), @@ -52,7 +56,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorizatio A = convert(AbstractMatrix, A) if cache.isfresh cacheval = @get_cacheval(cache, :AppleAccelerateLUFactorization) - fact = LU(aa_getrf!(A)...; ipiv = fact.ipiv) + fact = LU(aa_getrf!(A; ipiv = cacheval.ipiv)...) cache.cacheval = fact cache.isfresh = false end From 9ef71342f2b1f4ebd0454f392322b24958dda49d Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 14:49:35 -0400 Subject: [PATCH 05/11] fix up tests --- src/appleaccelerate.jl | 2 +- test/basictests.jl | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index 36fe1dc38..f8ec65160 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -11,7 +11,7 @@ to avoid allocations and does not require libblastrampoline. """ struct AppleAccelerateLUFactorization <: AbstractFactorization end -function is_new_accelerate_available() +function appleaccelerate_isavailable() libacc_hdl = dlopen_e(libacc) if libacc_hdl == C_NULL return false diff --git a/test/basictests.jl b/test/basictests.jl index e24b87a6e..a58d4987d 100644 --- a/test/basictests.jl +++ b/test/basictests.jl @@ -213,6 +213,9 @@ end test_interface(alg, prob1, prob2) end end + if LinearSolve.appleaccelerate_isavailable() + test_interface(AppleAccelerateLUFactorization(), prob1, prob2) + end end @testset "Generic Factorizations" begin From 94be1069b7f4beb493f9d5da3695a55e8ef54377 Mon Sep 17 00:00:00 2001 From: "Viral B. Shah" Date: Mon, 7 Aug 2023 17:25:48 -0400 Subject: [PATCH 06/11] Use LP64 BLAS and LAPACK from Accelerate These should always be available everywhere. --- Project.toml | 1 + src/appleaccelerate.jl | 23 +++++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/Project.toml b/Project.toml index 9ddde9976..09deed21d 100644 --- a/Project.toml +++ b/Project.toml @@ -12,6 +12,7 @@ GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" KLU = "ef3ab10e-7fda-4108-b977-705223b18434" Krylov = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7" +Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" Preferences = "21216c6a-2e73-6563-6e65-726566657250" diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index f8ec65160..e0f6a5959 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -1,3 +1,6 @@ +using LinearAlgebra +using Libdl + # For now, only use BLAS from Accelerate (that is to say, vecLib) global const libacc = "/System/Library/Frameworks/Accelerate.framework/Accelerate" @@ -12,33 +15,33 @@ to avoid allocations and does not require libblastrampoline. struct AppleAccelerateLUFactorization <: AbstractFactorization end function appleaccelerate_isavailable() - libacc_hdl = dlopen_e(libacc) + libacc_hdl = Libdl.dlopen_e(libacc) if libacc_hdl == C_NULL return false end - if dlsym_e(libacc_hdl, "dgemm\$NEWLAPACK\$ILP64") == C_NULL + if dlsym_e(libacc_hdl, "dgetrf_") == C_NULL return false end return true end -function aa_getrf!(A::AbstractMatrix{<:Float64}; ipiv = similar(A, BlasInt, min(size(A,1),size(A,2))), info = Ref{BlasInt}(), check = false) +function aa_getrf!(A::AbstractMatrix{<:Float64}; ipiv = similar(A, Cint, min(size(A,1),size(A,2))), info = Ref{Cint}(), check = false) require_one_based_indexing(A) check && chkfinite(A) chkstride1(A) m, n = size(A) lda = max(1,stride(A, 2)) if isempty(ipiv) - ipiv = similar(A, BlasInt, min(size(A,1),size(A,2))) + ipiv = similar(A, Cint, min(size(A,1),size(A,2))) end - ccall(("dgetrf\$NEWLAPACK\$ILP64", libacc), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{Float64}, - Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), + ccall(("dgetrf_", libacc), Cvoid, + (Ref{Cint}, Ref{Cint}, Ptr{Float64}, + Ref{Cint}, Ptr{Cint}, Ptr{Cint}), m, n, A, lda, ipiv, info) - chkargsok(info[]) - A, ipiv, info[] #Error code is stored in LU factorization type + info[] < 0 && throw(ArgumentError("Invalid arguments sent to LAPACK dgetrf_")) + A, Vector{BlasInt}(ipiv), BlasInt(info[]) #Error code is stored in LU factorization type end default_alias_A(::AppleAccelerateLUFactorization, ::Any, ::Any) = false @@ -62,4 +65,4 @@ function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorizatio end y = ldiv!(cache.u, @get_cacheval(cache, :AppleAccelerateLUFactorization), cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache) -end \ No newline at end of file +end From f78754f28d52bba0d2070f41be2bab6fddd83b85 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 18:14:13 -0400 Subject: [PATCH 07/11] handle the getrs as well and support more lapack installations --- ext/LinearSolveMKLExt.jl | 9 ++++---- src/appleaccelerate.jl | 44 +++++++++++++++++++++++++++++++++++----- 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/ext/LinearSolveMKLExt.jl b/ext/LinearSolveMKLExt.jl index 56b2d78a8..da9f4673d 100644 --- a/ext/LinearSolveMKLExt.jl +++ b/ext/LinearSolveMKLExt.jl @@ -24,7 +24,7 @@ function getrf!(A::AbstractMatrix{<:Float64}; ipiv = similar(A, BlasInt, min(siz Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), m, n, A, lda, ipiv, info) chkargsok(info[]) - A, ipiv, info[] #Error code is stored in LU factorization type + A, ipiv, info[], info #Error code is stored in LU factorization type end default_alias_A(::MKLLUFactorization, ::Any, ::Any) = false @@ -33,7 +33,7 @@ default_alias_b(::MKLLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::MKLLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - ArrayInterface.lu_instance(convert(AbstractMatrix, A)) + ArrayInterface.lu_instance(convert(AbstractMatrix, A)), Ref{BlasInt}() end function SciMLBase.solve!(cache::LinearCache, alg::MKLLUFactorization; @@ -42,11 +42,12 @@ function SciMLBase.solve!(cache::LinearCache, alg::MKLLUFactorization; A = convert(AbstractMatrix, A) if cache.isfresh cacheval = @get_cacheval(cache, :MKLLUFactorization) - fact = LU(getrf!(A; ipiv = cacheval.ipiv)...) + res = getrf!(A; ipiv = cacheval[1].ipiv, info = cacheval[2]) + fact = LU(res[1:3]...), res[4] cache.cacheval = fact cache.isfresh = false end - y = ldiv!(cache.u, @get_cacheval(cache, :MKLLUFactorization), cache.b) + y = ldiv!(cache.u, @get_cacheval(cache, :MKLLUFactorization)[1], cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache) end diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index e0f6a5959..9b9cc32db 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -41,7 +41,27 @@ function aa_getrf!(A::AbstractMatrix{<:Float64}; ipiv = similar(A, Cint, min(siz Ref{Cint}, Ptr{Cint}, Ptr{Cint}), m, n, A, lda, ipiv, info) info[] < 0 && throw(ArgumentError("Invalid arguments sent to LAPACK dgetrf_")) - A, Vector{BlasInt}(ipiv), BlasInt(info[]) #Error code is stored in LU factorization type + A, ipiv, BlasInt(info[]), info #Error code is stored in LU factorization type +end + +function aa_getrs!(trans::AbstractChar, A::AbstractMatrix{<:Float64}, ipiv::AbstractVector{Cint}, B::AbstractVecOrMat{<:Float64}; info = Ref{Cint}()) + require_one_based_indexing(A, ipiv, B) + LinearAlgebra.LAPACK.chktrans(trans) + chkstride1(A, B, ipiv) + n = LinearAlgebra.checksquare(A) + if n != size(B, 1) + throw(DimensionMismatch("B has leading dimension $(size(B,1)), but needs $n")) + end + if n != length(ipiv) + throw(DimensionMismatch("ipiv has length $(length(ipiv)), but needs to be $n")) + end + nrhs = size(B, 2) + ccall(("dgetrs_", libacc), Cvoid, + (Ref{UInt8}, Ref{Cint}, Ref{Cint}, Ptr{Float64}, Ref{Cint}, + Ptr{Cint}, Ptr{Float64}, Ref{Cint}, Ptr{Cint}, Clong), + trans, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) + LinearAlgebra.LAPACK.chklapackerror(BlasInt(info[])) + B end default_alias_A(::AppleAccelerateLUFactorization, ::Any, ::Any) = false @@ -50,7 +70,8 @@ default_alias_b(::AppleAccelerateLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - ArrayInterface.lu_instance(convert(AbstractMatrix, A)) + luinst = ArrayInterface.lu_instance(convert(AbstractMatrix, A)) + LU(luinst.factors,similar(A, Cint, 0), luinst.info), Ref{Cint}() end function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorization; @@ -59,10 +80,23 @@ function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorizatio A = convert(AbstractMatrix, A) if cache.isfresh cacheval = @get_cacheval(cache, :AppleAccelerateLUFactorization) - fact = LU(aa_getrf!(A; ipiv = cacheval.ipiv)...) + res = aa_getrf!(A; ipiv = cacheval[1].ipiv, info = cacheval[2]) + fact = LU(res[1:3]...), res[4] cache.cacheval = fact cache.isfresh = false end - y = ldiv!(cache.u, @get_cacheval(cache, :AppleAccelerateLUFactorization), cache.b) - SciMLBase.build_linear_solution(alg, y, nothing, cache) + + A, info = @get_cacheval(cache, :AppleAccelerateLUFactorization) + LinearAlgebra.require_one_based_indexing(cache.u, cache.b) + m, n = size(A, 1), size(A, 2) + if m > n + Bc = copy(cache.b) + aa_getrs!('N', A.factors, A.ipiv, Bc; info) + return copyto!(cache.u, 1, Bc, 1, n) + else + copyto!(cache.u, cache.b) + aa_getrs!('N', A.factors, A.ipiv, cache.u; info) + end + + SciMLBase.build_linear_solution(alg, cache.u, nothing, cache) end From 93382d1b7423a9f7c00923c4b08f5652c44b3ac7 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 18:22:47 -0400 Subject: [PATCH 08/11] skip apple test on non-apple platforms --- test/resolve.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/resolve.jl b/test/resolve.jl index c058e527c..e3fc9b501 100644 --- a/test/resolve.jl +++ b/test/resolve.jl @@ -2,6 +2,9 @@ using LinearSolve, LinearAlgebra, SparseArrays, InteractiveUtils, Test for alg in subtypes(LinearSolve.AbstractFactorization) @show alg + if alg isa AppleAccelerateLUFactorization && !LinearSolve.appleaccelerate_isavailable() + continue + end if !(alg in [DiagonalFactorization, CudaOffloadFactorization]) A = [1.0 2.0; 3.0 4.0] alg in [KLUFactorization, UMFPACKFactorization, SparspakFactorization] && From d8d0849c33df781847f71ce80a6609e61e5ec68c Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 18:30:24 -0400 Subject: [PATCH 09/11] better skip apple test? --- test/resolve.jl | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/test/resolve.jl b/test/resolve.jl index e3fc9b501..a56c50dda 100644 --- a/test/resolve.jl +++ b/test/resolve.jl @@ -2,10 +2,9 @@ using LinearSolve, LinearAlgebra, SparseArrays, InteractiveUtils, Test for alg in subtypes(LinearSolve.AbstractFactorization) @show alg - if alg isa AppleAccelerateLUFactorization && !LinearSolve.appleaccelerate_isavailable() - continue - end - if !(alg in [DiagonalFactorization, CudaOffloadFactorization]) + if !(alg in [DiagonalFactorization, CudaOffloadFactorization]) && + !(alg isa AppleAccelerateLUFactorization) || LinearSolve.appleaccelerate_isavailable() + A = [1.0 2.0; 3.0 4.0] alg in [KLUFactorization, UMFPACKFactorization, SparspakFactorization] && (A = sparse(A)) From 83e1edbf3629d4d784d232a58e13da37f77045a4 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 18:38:09 -0400 Subject: [PATCH 10/11] missing paren --- test/resolve.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/resolve.jl b/test/resolve.jl index a56c50dda..91d1cbfd7 100644 --- a/test/resolve.jl +++ b/test/resolve.jl @@ -3,7 +3,7 @@ using LinearSolve, LinearAlgebra, SparseArrays, InteractiveUtils, Test for alg in subtypes(LinearSolve.AbstractFactorization) @show alg if !(alg in [DiagonalFactorization, CudaOffloadFactorization]) && - !(alg isa AppleAccelerateLUFactorization) || LinearSolve.appleaccelerate_isavailable() + (!(alg isa AppleAccelerateLUFactorization) || LinearSolve.appleaccelerate_isavailable()) A = [1.0 2.0; 3.0 4.0] alg in [KLUFactorization, UMFPACKFactorization, SparspakFactorization] && From 6d5aeb4c4d150ec9f150f46fb55e9b5a75312d02 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 7 Aug 2023 19:40:55 -0400 Subject: [PATCH 11/11] dope it's not a subtype --- test/resolve.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/resolve.jl b/test/resolve.jl index 91d1cbfd7..d622a8865 100644 --- a/test/resolve.jl +++ b/test/resolve.jl @@ -2,8 +2,8 @@ using LinearSolve, LinearAlgebra, SparseArrays, InteractiveUtils, Test for alg in subtypes(LinearSolve.AbstractFactorization) @show alg - if !(alg in [DiagonalFactorization, CudaOffloadFactorization]) && - (!(alg isa AppleAccelerateLUFactorization) || LinearSolve.appleaccelerate_isavailable()) + if !(alg in [DiagonalFactorization, CudaOffloadFactorization, AppleAccelerateLUFactorization]) && + (!(alg == AppleAccelerateLUFactorization) || LinearSolve.appleaccelerate_isavailable()) A = [1.0 2.0; 3.0 4.0] alg in [KLUFactorization, UMFPACKFactorization, SparspakFactorization] &&