Skip to content

Commit 705fb88

Browse files
dmbatespalday
authored andcommitted
Use 5-argument mul! instead of mulαβ! (#231)
* Changes for julia v1.3.0 and reformat * Require julia v1.3.0 or later * Remove badge for coveralls * Move `fit` methods from `mixed.jl` to other files * Use 5-argument `mul!` methods * Drop 2-stage optimization for GLMM fit with `fast=false` * Reinstate the `ltriindprs` vector * Clean up badges, drop unused function, add tests. * Add tests for simulate-related functions. * Update test results * Delete commented-out code
1 parent 3799c25 commit 705fb88

24 files changed

+701
-519
lines changed

.travis.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ os:
33
- linux
44
- osx
55
julia:
6-
- 1.2
76
- 1.3
87
codecov: true
98
notifications:
@@ -15,7 +14,7 @@ jobs:
1514
include:
1615
- stage: "Documentation"
1716
os: linux
18-
julia: 1.2
17+
julia: 1.3
1918
script:
2019
- julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd()));
2120
Pkg.instantiate()'

Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ StatsFuns = "0.8, 0.9"
3939
StatsModels = "0.6"
4040
Tables = "0.2"
4141
TypedTables = "1"
42-
julia = "1.2"
42+
julia = "1.3"
4343

4444
[extras]
4545
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"

README.md

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
|**Documentation**|**Citation**|**Build Status**|**Code Coverage**|
44
|:-:|:-:|:-:|:-:|
5-
|[![][docs-stable-img]][docs-stable-url] [![][docs-latest-img]][docs-latest-url] | [![][doi-img]][doi-url] | [![][travis-img]][travis-url] [![][appveyor-img]][appveyor-url] | [![][coveralls-img]][coveralls-url] [![][codecov-img]][codecov-url]|
5+
|[![][docs-stable-img]][docs-stable-url] [![][docs-latest-img]][docs-latest-url] | [![][doi-img]][doi-url] | [![][travis-img]][travis-url] [![][appveyor-img]][appveyor-url] | [![][codecov-img]][codecov-url]|
66

77
[doi-img]: https://zenodo.org/badge/9106942.svg
88
[doi-url]: https://zenodo.org/badge/latestdoi/9106942
@@ -19,9 +19,6 @@
1919
[appveyor-img]: https://ci.appveyor.com/api/projects/status/github/JuliaStats/MixedModels.jl?svg=true
2020
[appveyor-url]: https://ci.appveyor.com/project/JuliaStats/mixedmodels-jl
2121

22-
[coveralls-img]: https://coveralls.io/repos/github/JuliaStats/MixedModels.jl/badge.svg?branch=master
23-
[coveralls-url]: https://coveralls.io/github/JuliaStats/MixedModels.jl?branch=master
24-
2522
[codecov-img]: https://codecov.io/github/JuliaStats/MixedModels.jl/badge.svg?branch=master
2623
[codecov-url]: https://codecov.io/github/JuliaStats/MixedModels.jl?branch=master
2724

appveyor.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
environment:
22
matrix:
3-
- julia_version: 1.2
3+
- julia_version: 1.3
44
- julia_version: nightly
55

66
platform:

src/MixedModels.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,6 @@ include("randomeffectsterm.jl")
116116
include("linearmixedmodel.jl")
117117
include("gausshermite.jl")
118118
include("generalizedlinearmixedmodel.jl")
119-
include("mixed.jl")
120119
include("linalg/statschol.jl")
121120
include("linalg/cholUnblocked.jl")
122121
include("linalg/rankUpdate.jl")

src/arraytypes.jl

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,21 @@
1-
using StaticArrays, SparseArrays, LinearAlgebra
2-
31
"""
42
UniformBlockDiagonal{T}
53
64
Homogeneous block diagonal matrices. `k` diagonal blocks each of size `m×m`
75
"""
86
struct UniformBlockDiagonal{T} <: AbstractMatrix{T}
9-
data::Array{T, 3}
7+
data::Array{T,3}
108
facevec::Vector{SubArray{T,2,Array{T,3}}}
119
end
1210

1311
function UniformBlockDiagonal(dat::Array{T,3}) where {T}
14-
UniformBlockDiagonal(dat,
15-
SubArray{T,2,Array{T,3}}[view(dat,:,:,i) for i in 1:size(dat, 3)])
12+
UniformBlockDiagonal(
13+
dat,
14+
SubArray{T,2,Array{T,3}}[view(dat, :, :, i) for i = 1:size(dat, 3)],
15+
)
1616
end
1717

18-
function Base.copyto!(dest::UniformBlockDiagonal{T}, src::UniformBlockDiagonal{T}) where{T}
18+
function Base.copyto!(dest::UniformBlockDiagonal{T}, src::UniformBlockDiagonal{T}) where {T}
1919
sdat = src.data
2020
ddat = dest.data
2121
size(ddat) == size(sdat) || throw(DimensionMismatch(""))
@@ -28,13 +28,13 @@ function Base.copyto!(dest::Matrix{T}, src::UniformBlockDiagonal{T}) where {T}
2828
fill!(dest, zero(T))
2929
sdat = src.data
3030
m, n, l = size(sdat)
31-
for k in 1:l
31+
for k = 1:l
3232
ioffset = (k - 1) * m
3333
joffset = (k - 1) * n
34-
for j in 1:n
34+
for j = 1:n
3535
jind = joffset + j
36-
for i in 1:m
37-
dest[ioffset + i, jind] = sdat[i,j,k]
36+
for i = 1:m
37+
dest[ioffset+i, jind] = sdat[i, j, k]
3838
end
3939
end
4040
end
@@ -45,7 +45,7 @@ function Base.getindex(A::UniformBlockDiagonal{T}, i::Int, j::Int) where {T}
4545
Ad = A.data
4646
m, n, l = size(Ad)
4747
(0 < i l * m && 0 < j l * n) ||
48-
throw(IndexError("attempt to access $(l*m) × $(l*n) array at index [$i, $j]"))
48+
throw(IndexError("attempt to access $(l*m) × $(l*n) array at index [$i, $j]"))
4949
iblk, ioffset = divrem(i - 1, m)
5050
jblk, joffset = divrem(j - 1, n)
5151
iblk == jblk ? Ad[ioffset+1, joffset+1, iblk+1] : zero(T)
@@ -54,15 +54,15 @@ end
5454
function LinearAlgebra.Matrix(A::UniformBlockDiagonal{T}) where {T}
5555
Ad = A.data
5656
m, n, l = size(Ad)
57-
mat = zeros(T, (m*l, n*l))
57+
mat = zeros(T, (m * l, n * l))
5858
@inbounds for k = 0:(l-1)
5959
kp1 = k + 1
6060
km = k * m
6161
kn = k * n
6262
for j = 1:n
6363
knpj = kn + j
6464
for i = 1:m
65-
mat[km + i, knpj] = Ad[i, j, kp1]
65+
mat[km+i, knpj] = Ad[i, j, kp1]
6666
end
6767
end
6868
end
@@ -81,7 +81,10 @@ A `SparseMatrixCSC` whose nonzeros form blocks of rows or columns or both.
8181
8282
# Members
8383
* `cscmat`: `SparseMatrixCSC{Tv, Int32}` representation for general calculations
84-
* `blkpattern`: `SparseMatrixCSC{Bool,Int32}` pattern of blocks of size (S,P)
84+
* `nzasmat`: nonzeros of `cscmat` as a dense matrix
85+
* `colblkptr`: pattern of blocks of columns
86+
87+
The only time these are created are as products of `ReMat`s.
8588
"""
8689
mutable struct BlockedSparse{T,S,P} <: AbstractMatrix{T}
8790
cscmat::SparseMatrixCSC{T,Int32}
@@ -102,7 +105,8 @@ SparseArrays.sparse(A::BlockedSparse) = A.cscmat
102105
SparseArrays.nnz(A::BlockedSparse) = nnz(A.cscmat)
103106

104107
function Base.copyto!(L::BlockedSparse{T}, A::SparseMatrixCSC{T}) where {T}
105-
size(L) == size(A) && nnz(L) == nnz(A) || throw(DimensionMismatch("size(L) ≠ size(A) or nnz(L) ≠ nnz(A"))
108+
size(L) == size(A) && nnz(L) == nnz(A) ||
109+
throw(DimensionMismatch("size(L) ≠ size(A) or nnz(L) ≠ nnz(A"))
106110
copyto!(nonzeros(L.cscmat), nonzeros(A))
107111
L
108112
end

src/femat.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ Base.size(A::FeMat, i) = size(A.wtx, i)
5555
Base.copyto!(A::FeMat{T}, src::AbstractVecOrMat{T}) where {T} = copyto!(A.x, src)
5656

5757
*(adjA::Adjoint{T,<:FeMat{T}}, B::FeMat{T}) where {T} =
58-
fullrankwtx(adjA.parent)'fullrankwtx(B)
58+
fullrankwtx(adjA.parent)' * fullrankwtx(B)
5959

6060
LinearAlgebra.mul!(R::StridedVecOrMat{T}, A::FeMat{T}, B::StridedVecOrMat{T}) where {T} =
6161
mul!(R, A.x, B)

src/gausshermite.jl

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
using StaticArrays, LinearAlgebra
2-
31
"""
42
GaussHermiteQuadrature
53
@@ -28,8 +26,8 @@ gn5 = GHnorm(5)
2826
sum(@. abs2(σ*gn5.z + μ)*gn5.w) # E[X^2] where X ∼ N(μ, σ)
2927
```
3028
31-
For evaluation of the log-likelihood of a GLMM the integral to evaluate for each level of the grouping
32-
factor is approximately Gaussian shaped.
29+
For evaluation of the log-likelihood of a GLMM the integral to evaluate for each level of
30+
the grouping factor is approximately Gaussian shaped.
3331
"""
3432
GaussHermiteQuadrature
3533
"""
@@ -40,18 +38,20 @@ A struct with 2 SVector{K,Float64} members
4038
- `wt`: Gauss-Hermite weights normalized to sum to unity
4139
"""
4240
struct GaussHermiteNormalized{K}
43-
z::SVector{K, Float64}
41+
z::SVector{K,Float64}
4442
w::SVector{K,Float64}
4543
end
4644
function GaussHermiteNormalized(k::Integer)
4745
ev = eigen(SymTridiagonal(zeros(k), sqrt.(1:k-1)))
48-
w = abs2.(ev.vectors[1,:])
46+
w = abs2.(ev.vectors[1, :])
4947
GaussHermiteNormalized(
5048
SVector{k}((ev.values .- reverse(ev.values)) ./ 2),
51-
SVector{k}(LinearAlgebra.normalize((w .+ reverse(w)) ./ 2, 1)))
49+
SVector{k}(LinearAlgebra.normalize((w .+ reverse(w)) ./ 2, 1)),
50+
)
5251
end
5352

54-
Base.iterate(g::GaussHermiteNormalized{K}, i=1) where {K} = (K < i ? nothing : ((z = g.z[i], w = g.w[i]), i + 1))
53+
Base.iterate(g::GaussHermiteNormalized{K}, i = 1) where {K} =
54+
(K < i ? nothing : ((z = g.z[i], w = g.w[i]), i + 1))
5555

5656
Base.length(g::GaussHermiteNormalized{K}) where {K} = K
5757

@@ -61,10 +61,13 @@ Base.length(g::GaussHermiteNormalized{K}) where {K} = K
6161
Memoized values of `GHnorm`{@ref} stored as a `Dict{Int,GaussHermiteNormalized}`
6262
"""
6363
const GHnormd = Dict{Int,GaussHermiteNormalized}(
64-
1 => GaussHermiteNormalized(SVector{1}(0.),SVector{1}(1.)),
65-
2 => GaussHermiteNormalized(SVector{2}(-1.0,1.0),SVector{2}(0.5,0.5)),
66-
3 => GaussHermiteNormalized(SVector{3}(-sqrt(3),0.,sqrt(3)),SVector{3}(1/6,2/3,1/6))
67-
)
64+
1 => GaussHermiteNormalized(SVector{1}(0.0), SVector{1}(1.0)),
65+
2 => GaussHermiteNormalized(SVector{2}(-1.0, 1.0), SVector{2}(0.5, 0.5)),
66+
3 => GaussHermiteNormalized(
67+
SVector{3}(-sqrt(3), 0.0, sqrt(3)),
68+
SVector{3}(1 / 6, 2 / 3, 1 / 6),
69+
),
70+
)
6871

6972
"""
7073
GHnorm(k::Int)
@@ -74,7 +77,8 @@ Return the (unique) GaussHermiteNormalized{k} object.
7477
The function values are stored (memoized) when first evaluated. Subsequent evaluations
7578
for the same `k` have very low overhead.
7679
"""
77-
GHnorm(k::Int) = get!(GHnormd, k) do
78-
GaussHermiteNormalized(k)
79-
end
80+
GHnorm(k::Int) =
81+
get!(GHnormd, k) do
82+
GaussHermiteNormalized(k)
83+
end
8084
GHnorm(k) = GHnorm(Int(k))

0 commit comments

Comments
 (0)