diff --git a/Example/Tests/MatrixTests.swift b/Example/Tests/MatrixTests.swift index c6a9aea..d9301ce 100644 --- a/Example/Tests/MatrixTests.swift +++ b/Example/Tests/MatrixTests.swift @@ -574,6 +574,24 @@ class MatrixSpec: QuickSpec { let usv = svd(m1) expect(usv.U * usv.S * usv.V′) == m1 } + it("gsvd") { + let m1 = Matrix([[1.0, 6.0, 11.0], + [2.0, 7.0, 12.0], + [3.0, 8.0, 13.0], + [4.0, 9.0, 14.0], + [5.0, 10.0, 15.0]]) + let m2 = Matrix([[8.0, 1.0, 6.0], + [3.0, 5.0, 7.0], + [4.0, 9.0, 2.0]]) + let m3 = Matrix([[0.5700, -0.6457, -0.4279], + [-0.7455, -0.3296, -0.4375], + [-0.1702, -0.0135, -0.4470], + [0.2966, 0.3026, -0.4566], + [0.0490, 0.6187, -0.4661]]) + let (U, V, Q, alpha, beta, success) = gsvd(m1, m2) + expect(U == m3) + + } it("chol") { let m1 = Matrix([[1, 1, 1, 1, 1], [1, 2, 3, 4, 5], diff --git a/Sources/Matrix.swift b/Sources/Matrix.swift index 05e81ed..a781331 100644 --- a/Sources/Matrix.swift +++ b/Sources/Matrix.swift @@ -136,7 +136,7 @@ public enum Dim { /// Matrix of Double values public class Matrix { - internal var flat = Vector() + public var flat = Vector() internal var _rows: Int = 0 internal var _cols: Int = 0 @@ -150,14 +150,14 @@ public class Matrix { return _cols } - internal init(_ r: Int, _ c: Int, _ value: Double = 0.0) { + public init(_ r: Int, _ c: Int, _ value: Double = 0.0) { precondition(r > 0 && c > 0, "Matrix dimensions must be positive") flat = Vector(repeating: value, count: r * c) _rows = r _cols = c } - internal init(_ r: Int, _ c: Int, _ f: Vector) { + public init(_ r: Int, _ c: Int, _ f: Vector) { precondition(r * c == f.count, "Matrix dimensions must agree") flat = f _rows = r @@ -290,7 +290,7 @@ extension Matrix { } } -internal func toRows(_ A: Matrix, _ d: Dim) -> Matrix { +public func toRows(_ A: Matrix, _ d: Dim) -> Matrix { switch d { case .Row: return A @@ -299,7 +299,7 @@ internal func toRows(_ A: Matrix, _ d: Dim) -> Matrix { } } -internal func toCols(_ A: Matrix, _ d: Dim) -> Matrix { +public func toCols(_ A: Matrix, _ d: Dim) -> Matrix { switch d { case .Row: return transpose(A) diff --git a/Sources/MatrixAlgebra.swift b/Sources/MatrixAlgebra.swift index cda547c..0daf161 100644 --- a/Sources/MatrixAlgebra.swift +++ b/Sources/MatrixAlgebra.swift @@ -269,6 +269,55 @@ public func svd(_ A: Matrix) -> (U: Matrix, S: Matrix, V: Matrix) { return (toRows(U, .Column), diag(Int(M), Int(N), s), VT) } +/// Perform a generalized singular value decomposition of 2 given matrices. +/// +/// - Parameters: +/// - A: first matrix +/// - B: second matrix +/// - Returns: matrices U, V, and Q, plus vectors alpha and beta +public func gsvd(_ A: Matrix, _ B: Matrix) -> (U: Matrix, V: Matrix, Q: Matrix, alpha: Vector, beta: Vector, success: Bool) { + /* LAPACK is using column-major order */ + let _A = toCols(A, .Row) + let _B = toCols(B, .Row) + + var jobu:Int8 = Int8(Array("U".utf8).first!) + var jobv:Int8 = Int8(Array("V".utf8).first!) + var jobq:Int8 = Int8(Array("Q".utf8).first!) + + var M = __CLPK_integer(A.rows) + var N = __CLPK_integer(A.cols) + var P = __CLPK_integer(B.rows) + + var LDA = M + var LDB = P + var LDU = M + var LDV = P + var LDQ = N + + let lWork = max(max(Int(3*N),Int(M)),Int(P))+Int(N) + var iWork = [__CLPK_integer](repeating: 0, count: Int(N)) + var work = Vector(repeating: 0.0, count: Int(lWork) * 4) + var error = __CLPK_integer(0) + + var k = __CLPK_integer() + var l = __CLPK_integer() + + let U = Matrix(Int(LDU), Int(M)) + let V = Matrix(Int(LDV), Int(P)) + let Q = Matrix(Int(LDQ), Int(N)) + var alpha = Vector(repeating: 0.0, count: Int(N)) + var beta = Vector(repeating: 0.0, count: Int(N)) + + dggsvd_(&jobu, &jobv, &jobq, &M, &N, &P, &k, &l, &_A.flat, &LDA, &_B.flat, &LDB, &alpha, &beta, &U.flat, &LDU, &V.flat, &LDV, &Q.flat, &LDQ, &work, &iWork, &error) + + //precondition(error == 0, "Failed to compute SVD") + if error != 0 { + return (toRows(U, .Column), toRows(V, .Column), toRows(Q, .Column), Vector(alpha[Int(k)...Int(k+l)-1]), Vector(beta[Int(k)...Int(k+l)-1]), false) + } else { + return (toRows(U, .Column), toRows(V, .Column), toRows(Q, .Column), Vector(alpha[Int(k)...Int(k+l)-1]), Vector(beta[Int(k)...Int(k+l)-1]), true) + } +} + /// Compute the Cholesky factorization of a real symmetric positive definite matrix. /// /// A precondition error is thrown if the algorithm fails to converge.