From 0989e769691fba16db7ae4cdd0bf27607ac7f01a Mon Sep 17 00:00:00 2001 From: PharmCat Date: Tue, 16 Jul 2024 22:59:44 +0300 Subject: [PATCH 1/9] cosmetics --- src/Metida.jl | 1 + src/linearalgebra.jl | 4 ++-- src/rmat.jl | 6 +++--- src/sweep.jl | 4 ++-- test/profile.pb.gz | Bin 11360 -> 0 bytes 5 files changed, 8 insertions(+), 7 deletions(-) delete mode 100644 test/profile.pb.gz diff --git a/src/Metida.jl b/src/Metida.jl index a86b2143..b95c8339 100644 --- a/src/Metida.jl +++ b/src/Metida.jl @@ -17,6 +17,7 @@ import StatsBase: fit, fit!, coef, coefnames, confint, nobs, dof_residual, dof, import Base: show, rand, ht_keyindex, getproperty import Random: default_rng, AbstractRNG, rand! + export @formula, @covstr, @lmmformula, SI, ScaledIdentity, DIAG, Diag, diff --git a/src/linearalgebra.jl b/src/linearalgebra.jl index 79d5093f..4de882a3 100644 --- a/src/linearalgebra.jl +++ b/src/linearalgebra.jl @@ -108,8 +108,8 @@ use only upper triangle of V end end for m = 2:q - @inbounds ycm2 = (y[m] - c[m])*2 - @simd for n = 1:m-1 + @inbounds ycm2 = (y[m] - c[m]) * 2 + @simd for n = 1:m - 1 @inbounds θ -= V[n, m] * (y[n] - c[n]) * ycm2 end end diff --git a/src/rmat.jl b/src/rmat.jl index 7681c7e0..958c3ac6 100644 --- a/src/rmat.jl +++ b/src/rmat.jl @@ -71,8 +71,8 @@ end #CS function rmat!(mx, θ, ::AbstractMatrix, ::CS_) s = size(mx, 1) - θsq = θ[1]*θ[1] - θsqp = θsq*θ[2] + θsq = θ[1] * θ[1] + θsqp = θsq * θ[2] @inbounds @simd for i = 1:size(mx, 1) mx[i, i] += θsq end @@ -93,7 +93,7 @@ function rmat!(mx, θ, rz, ::CSH_) θend = last(θ) for n = 2:s @inbounds vecnθend = vec[n] * θend - @inbounds @simd for m = 1:n-1 + @inbounds @simd for m = 1:n - 1 mx[m, n] += vec[m] * vecnθend end end diff --git a/src/sweep.jl b/src/sweep.jl index 3610621b..1d4d781e 100644 --- a/src/sweep.jl +++ b/src/sweep.jl @@ -26,7 +26,7 @@ function sweepb!(akk::AbstractArray{T, 1}, A::AbstractArray{T, 2}, k::Integer, i @simd for j in 1:k @inbounds akk[j] = A[j, k] end - @simd for j in (k+1):p + @simd for j in (k + 1):p @inbounds akk[j] = A[k, j] end # syrk!(uplo, trans, alpha, A, beta, C) @@ -57,7 +57,7 @@ function sweepb!(akk::AbstractArray{T, 1}, A::AbstractArray{T, 2}, ks::AbstractV if logdet ld = 0 for k in ks - @inbounds Akk = A[k,k] + @inbounds Akk = A[k, k] if Akk > 0 ld += log(Akk) else diff --git a/test/profile.pb.gz b/test/profile.pb.gz deleted file mode 100644 index fd252a96d5b5be9e6bc12e3cd8c06cafffd88bd1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11360 zcmV-mET7XKiwFP!000003hjM)TohOG`0s(jh|zR3F>hnC8PRVXjiNM)$A0(lKttjc zG3HqIJ39`ojt(WS5ycBnyl;M0H8XV2pwYN~ zb|b(a)zjUt->dso)q8aNlu5eOQ}C!X-oq!>BPV5bts%Jw>+c(Mx7OgKyS47* z{Zu=C=ILsa7SE`Yk$$k|p@JK}Cub)5O+Wc)hga(D__`zZ&Z(QaeQMn+1+)AnkDXL^ z)%J;Xuk4>yH~##@x+{_<)y>~JsqX&ulj<%`nN&9|70&xV+2J*LIDm!I0E{{bpx+|^ z50(y2^qM<-NTS!{IhWSg*svu&(QDZO084HoNPL)6cHWxdiC&BD0N9;gFp$x&700#> zwf}k?eLtt{z#ZGhdo388Hs0&T3II=20n9!JAQM6Mk;g;WxE%O(_6z{(FEis_PT9K% zsF$S^@^i|bodO_d=jcSQN*?SFm8N_6Oz_xsJHvn4>|r(Y9}la!ckb{{59F_|9ldK! z?d&nBwFhqAUwzckS)U?@W^F!LvxITzk9OVJd~jIJK}$+G$HuSW;24rMdb-wZN8t#pR@ zWU;u%yO6#CDQA$rd*fCmdJUZnVA|2*L1YP$WjgcO%nqiKEZ2@-jfZvXfxe4-r@Ue2 z_erH?%qNj8&4$wp8{Z6^eM^d(l@KzElQqp^K9kw_mHXD$So~W>^5c@CSEDGQxtJ}20~N&TVm z-ss`%##W$cQW}8ctAN0r2Z{rgbxMjbOUW!Hvy{x=CYe+(8$JY*p(`taq7!cj;-&Sn z;zi~@=9BIWFlU@!OR3K}%7DhL1~MPLadBqNO2?Sb0X8fVus=9#6l^8YlY|ASEs$Sg6gzm&{UGT%5d$;I9pSWM|F zb+M&nmXcXY=6^r2ct$Wjc<`I{K93Sp%cuV~81vJ}-+C=jU0QE=V|A<;xdq=^D#DNx z^nFV;e&wpX%!Gn-<%Dm2E2s{nkJz5~)02XmOlaa;O@*J5;|P4;Q;ps}@Y=Znd-A^) z3Y>q;_d^xo#|ipYRK?v$ztV3?@~{LZ{N&+VLq#m%1bwThwr?D|IBUnu>uF5cheqX4 z9qu?n%j*NLkIue!>--*>Eb_*v?%vam(y$ z*%Rt0pRcCE@0@W2(B$#W4Tnedn|oolePh?(K1Yyvz(Tj6)qf^d)B*ZgcUlo3YjU#~WC-0a$XWygs zXNEIZs_~<2905>$^Y*Z*na8hP33Y9PAAI8opvkzLhbyx-pI`WvvPn&q!_K+mhRVey zua)2g;pXrO$G2wh&OG|9aM1%!XOFu%Y}e(n_axz#r|&l^`eX(t2sdN4KRTAQ?8NT! z!fsDEO*wvX&eA6n4}2vY@&r+(jEK3r`9bv36-;=DT_xPRaQW!9Q=21%$@uV+O(So{ z^?#Bq2=~0e8psqmK_E+^E-s?LPwjC8z_^Jq)BB%(nsWbd$|k-l{Q4e80No_4xb<+s zm>qjBy6F^sXpa*FL`P<3k4XLb@nA5_nF1&>IRs7+?ylW;Vs}bR^4V-AEU5^1Z#uSM z_{MQFuZTi|w{KGwy(oYagvPsfWIUR2>GHf;OxTPx&l}ip-Ti(CR(vkZ^>%e3^#E1; zIweN{sE(L0V&L2rR~FB6LzOn7a)QwRxxA6L_AVWL6s-EZC)}l}gcF3j(`gcT55Kvq;Z0yAy`%h&8FCUSBR>nDtcEcCkVZcj=!6Adee->?+b$&G(I?Y?zOCKvkN9M zAq^ef_{$fj#*7`XvW~FQ6EpSyMv%=m>)jsV)sjGH@f#+_yR2f4Mu zZ&Pvv*w2HE1M{O3Clt8Z4SwyDBLH0Y#U~t#$;}=R>e>x{@{%KX?odDvPUV~+(4&~L zYWcQS(f?F)f*_{MQT;B>&m7us2@}?Mf(yE}>D0ABx2N51DU@|PbLr^!Fbe(04L?+L zl$v0i^nMsl5N_uDlpG&3AoFo8VUQ0DdinIjI~jN9j{!xB^?{yfzal4ys=X!t>XDs` zMs^Wac)%1yFCydw0i(nlnenp{24DDEXbxvnhy1krWd6a&yl|(-d{)k`4QJh0OaJM{ zw>!M>nsZ0@#;l9J^_h@e2FAdzhjIik#@1W&?#CV0G?lcCw=I^;p28jy5fjmJLL!f$8qB)Zy0%N*N}~FT7+NGpD{6PJ$`3z{E}IBzY+#}!L&*#7bl3GGGR!+r75%f4PZh*(O43{9Lo^^ zW5;Jdj9a+%+SS^wG}8<4I6)YDMZv6zai`a&9AZMMH!zmAl5m2k=$odmcrfwqnPB0P zk8d3n#k-tf5wt6+rp~)D2 zWmnx)oH7_r5c*k`wQ|kelc|d_%JcM1Vk-LTA14U6Pi6I6b0>Gf4H)J$MqaTA`8fj~ z#%uy*yMTe&f-@7d_fOn8@eBJj?RI+L$+g4R3ihlB6wac-+~2Zdz?189)&~g5XfVmU z4lLa^{#IHZ1l)KYdop!I?&H{L>zObGn;*VEYy8u^4adG0@?OOEPgS%C%`ZBvEot45 zOV`piEH?>HpQSyN`E!EM_~PtwkEhO_{75bA_5kamIshjKHz)4dbac_Zg@;0fe7E_; z7kEjZTIK{{nE2c?X?cs!Zh%DKYB?B&$_kty#>VNhh7bQKe#tw+)v_R7`ZhBs2#=nh zdp>zp%-A^-!E|uCnZ07u#>LmJP8-UEaSX_#PeJkd4)PNB<(^B<8MH)=;crp$=tZ5J zAjZz~*51iHy=7Jz3~D}9(LXAjsSXJJALbfe&bfpYU!%R^j39F5IWeCxG({tfkj^lGP=0B+DQ`?*%O83IDMUT#ocmU*Gc^k%`{4{g_%0)*WR})eyA$oTYZO_ab zd>Djqr!r8FpOEJW0F#|lk_r}Y*?ryxlTTGt(clE3gZ=r-4sVP-y9{R4v1oULkEi7y z$&H=bQP_sl7L|NBL1=vI;;y7S17?iw$Alvut}6WrNR9qwgcEd~P5i(%=-*2?K?r4r z13`EfVTJz47^tn&s{8=M4rA_Cy?*tEScy|4Uaxqe>em2zczhnKIy7 z+MX?-5VOm`R7U^2!wEtcGiRP&^5jYE%r35W z{OAkFs-dF)Z{h?o#~rgFG5g7dWm{ZYwN}x`p!tP=H*=_=qG}%}2*W97UnQ@pl*kD} z7jYNvZa+L`(Du3*5NcG8{{`iaBUX;C%pH|?HmhKnOE;gX=>NGmLBK2h{=$)`E??|l zN9YWl-bkN5d0pPL6ELm!d`8ZCFJ;4_+3Ck_wii+{XU#}ISd$|F;jBy_IzMyM_RLPM zIMX&^P7t0ybm>vX^@7g&5X-*J`UH>?5 z?A}x9cUL(q+NFe4RYfITP7rQxJ0Cwc?N)S51q{zWRMCIja)KDFXWd(}V%yk#;2V<4 zfS%B6K{>&HM^Bons0Pgm!p-EH3k&j-Gh#BBP~$}w-&93!Bjg03@zG%y?+h6@^y)Dt zoKZpJTO&^mTypu&>Cc6=`0Vwe`&XUJ**hdy*yHK@or=B$!wF&~DeLIQ!DluleJsRa z)V4Whcy9XclM}#sTq+0WoAOuBPhTEAbR`oeVYD%S%*73llkZMm%7pwPo)Qv3{A>v) z2sh=Jc9=If0IG^}g6KmE%cKf1Hd0Z^o)d%~a+4lh88v9lG{~jv06$}<=Ix4)KQYo< z_={V9{Gp28-pUCAo;%am=1XcFoEf)aotrPA+C3)-trUrzqE$_mV-12kjzDeV^20M?_YBGNcb%2!6TqC{ zi;GMy2K%9E`pHM&^$9}bheP{aoKkRW>Om&NmVvwd9^sro5bj>umJ>fScf`68Ovvy6 zOQg57bArIgjO4lf2dul$?=Qksa%#}9aLcl5^ct}ce&cESmw zTZ~)%c>Kxe!DWRxSlp*|8BP!@|L2nvwhf!S_@BZJoWqaiTzsiNhnvlvLZPM=i%zFHTh+0zo)4B^=G^v zgUj*W(1%?8JjwV;rk=nwY2MI>T=e~hf200Zli*#usv=Xb!mFsgH8MAAzpD`JF2;8; z^^cm*YJaHAWh?W_uFohZ=8KG9VCq3k^Lg#}l~t`Hj2~g@pP44s2YOQkJ8$u~+u2@NQgX{?hOwp{7?$xDKz{fA9z46_by?4 zF;h=tn%iDLy22jb;qRy`X=2L&x8%ChU-@5Majl}PSTKHxsrNI@B_HTY5i|R7KXqNr zSj0%-3Fk0AhpA^XO}+;-R}_-}$^QuypZ5geDQYhou2Dy50^p(?eb?jlpyO>Qe#O+a zm+`&Gvnw7jpj^8ajxm0e^kSO_NI+32{Tu(AyACKaiWosyd5Y}Kd#R%|dy#s@vWlJhnF916 zPmm`VKY<)x%K$Y+Ub2Sq)x_bEULX>=STUaQ@pL^JbwCl~rZJwz)Z3Y6b{V*#$Wjk8 zewfsCXSv$HSC(307>{ADtbbSO#R9mfZlH-S597;4``r+=F!dg$x#XalKluw#%a0KysSkvwOjHMQJpa@mSRJC&*<*6`>}t2}W_JGPIW0(XtuOCJdvI zYjV(4$hE%!xr#Wv3-97e?)%C(GMVvYGJs1s5h|ACk>F+;UudmZu{_TBanje#=-L!( zr<`-wK|WH{UeXy)FJeoI$ldOUlh6d@#dAH^yGu!)+6(PwM;W*-2O_5#Kh4w|m}ZU# z@J6l}Luxx1XR8HxS&q1!v)&$5S49bJDdS6tmHQarB(H{r@=$l*DldjRr-O?gV2_Ht zco^fun7X_2v8sw?AT8Ir)2WE7FEf6bFsTF5QLM_>=C#$IYN`U-idgy};|H1g5Ys$F zAuIBZEXK3Qm7h^T_ljuCIUlS-tC!ce3RS5&j4Bt|TJjd45-ZWh_&QFa0#2_krSQdIwq~7xwpGWacPrzSMto7x-V87$gb0`WIKky&i9jjal zn!)%Crk=&XW~wMMueprRB?DXd0UDTGhqLhS)mBXn7(uZltK&L%$FErU{g8j?t}Aj? zID_#Fa=vM3UWzrFCcFs*y2nu#axVP`-ccH|0a)EN4w>fVPIyVrnDt1S; z;z1CCj6{`?Q{J>9@`{2}Q=XzLQ>UgF(jZuc83>@u<$&X;bC9eFdpVEAbEAN5pM(p+P#d^u40kwOa7(0 zgXVe};G4V+Y82z6ii9|Fc`b+W9AbB8jHL^AZ2g}9-raiSD(XJQ_mOt5!w5xD;ECjs z5U%b+2FVM>P04zWp_JvaK%p-gjOfS<#ZE;lz&t^*z_Fh3^)y*s$Ij)ogirVF;JeDcfTpG zAE~&?-3S$HkcCD#1RacGe}UZ&4x$~%CDe+%qPtSaJ4OnFqd^$xDmE4lXM8yM+994W zhN7_2kT+BZYL--lG2}Aq$NXb=V&vtZxB1)dhN!4E+PIA-i7JZv(gMa8ke;QY{maX# zb`=4_kCg+Y6br)bc{{M72Z*YoCf3R6g+Ed({4HmEc@dAK*fjPJ{tpNjHsKVm*p60? zmvf)B6dR)iF1X&Uq}VoVPq)7XHInN(&RpaWrsncKm0T%lpbj@i7j+!q&|r4aP9Z5+t^2 z(KPfct7Nf;Mp<g%;A8NWC;$BF!~2X8LS3d3!Tx}TxYcp(YWyJ*L8rm18i%F)Y}YZleK-6WN8&?qLYsH&Kf(aoxZz+v5h+EET9K%3|%ZbOP^pXov^9rC8j(FTRG{NjZ0v?Zpi%0k)A#XjsOX_kK@fw&CwgnrsK`ZTIw$3I>dO;yJ|N4<81ISDpVY39c3#qRao3zz|R1G>~p|GtH zpr?i|2BX2&2Ont=CUw<;+JR>>N;WAtBtVNMT3_oQ*r~fk(uIZSx=K!>Vwf905Ssvh zutS4BxFg}>HsDt#gU#PRyzrv*I?)vUo`H_PduN?=)}9Unjw+O6m}Ju#jQ)+kB21iW zgAOG)>hmHfYO&)6*wHg_X#ltc1=&3cC_`|ER#731@LE)dsE+Wfh>IdT1A}@?`bZo$ zM6z1pAc|oIYlP0G@7_Mr2GfNp3=F01S5`oLlgCT-;NPfy8!h;y2*e9avS(z(Gx_Zw zY@%5Jp0c+Ju>sGX%V+Gu+FfUn!sIRjnpyjp^xZ9HlR45F(kHx&+4yCoWa)#g8+Uc^ zGq_b0%}W60UvJsd2pweePFr?D!d1i9QEtp(B4-F%n#rtY!+_dJ^187207Nh?!C$8OmCn!$dhY5($|+9vkF z;P$fyD+gh}JtPp{>S5Hvnbv03N|N}l7Ebl{$OMjIk^w?L@jbZMO)_CP1f~p*OltrK zD}JD88!p+po5QpQ$QUdpol$K3DiKq+u6}ya2T?GXqRc%d@f`zVhfdW#zCka(9j?<` z%xbB31VqX>=lbbgbER$uTldH=p=~9bAx!7j!x(ClthUfFD<0rfeA{M@sB47W&ZxHP zA;uHGg=EW#VW=I$P!|mCcS9qsmQb2RLwjJ}6>7DG0lPz^ApZ(=gw>%)h2uAlg^YvY zXf&+^4outCDD@Wq^eW78_5&;;wz%{wzFXuH@K7v6_dfsi*`$)rE-;gYb}^fYdld~P zy)iOO(ppKs#8$57nx;){eIg`tS53#D4sHCb=B_qka;-Y*rcLb{hrjH~DgUY^wtWpa z8qGQkxDZX1Q*XpSyi(|no%(eb-+^wrYPEoZQESwp2ECf>@Rtot31p`VGdnPQySiP= z?HyMa#W5^OX9@LnyBz8ZOvSj?5B;aLuicRAs3Gt*oBIA(N8KqBq>ew@>B6Nj`mW7l))&rxtR;4M4RQh3C7Emx&s$M6Zz+ctYbnn( zzlcgjV2ly6Zq(`OjT(p%PHrhK+L>iW{uI*OIiv+RvJlAvxxJs<`nf6@Y_bJ5aQWd> z(w=Uv#xKf>J~9*>w?xj!YA4&M11&@l3Cd8nhl4kPfyf-a#Sme$IvtEPREZtA55&!3 zX@!JU@7YWAtRC1%{1=etLXVvXfsLv+0!8U-r(6l01Ukuc(BwEcuex;EQHn5jj5Ohu zj#xmj+NeTg{jnBg!m3{2ULnQlE{BX;#)TtZW>!BT`ZU%_lESO}7B3x)|Fd4#i#tz;4Sm9m^ zqt0rjbfkXU7S6VK9?-UPryy~bdwSt7QYq6>EA?-PKU$b0`m_S0fyb#v8W_?NtMx%W zx^%%PyRg@eZn>(qQAo?qkdi`16`*}aM)blgD1fpcaoWou83Oq-I4(j=iDZ{9&qvDz z`)457;1>~Wpd|`|ie@JMjYIU^CA8I+&05>>u= z_z^h2uY=pQZ2xsgXvnuAor2m1h@mD(Gr^nFTT-M2N6T>XWS@)1uk)Pgm6&mOT#OgLar4qK{t9O z{jS|A8b@}al8oF|EYjSWmAMZ|>6|+fT_IzVew3M0qzOg87(CUYBF4Wxt2@<6Z+0yB zQNs34b&C{k)LMH0P=je@Yp zRlK#tPhV6Svd8n`6wkloN(*LO6{?4eMl95aLuy%Htn)J6McNGZG`E8MEcTVio@t*h zjW9lI+EkgvHx1YIb{J%x!i>I9Kc30q!X96qyG0ns+u6R>8Emc0mS#rDVr$;VzK|Fk zCRXieGlm#}M=i{zFavr=NYM?tE=H+Eh{&IN=PmGyxJ^Ra3%V6GFLH}^k_!u6;0Co2 z`e{utLA_c%2Pa_*^yeL6j-X(*f#Zj>v=VHJf|wvmYGW|~LkbkN#fo-8E3FI~0s&FU8tVK9LuVjt~agQ4o3qoAUdwKKJX)`5y ztWX#Z3iGwDr&L5M#EMRxuo}8y#khPgXr{G`-Y$5&5ETpJ&R0(EreNTfaOzJI$ypE2$fQ#1YZG{6=rUAN` zA<(vJVH|^AvWn#u7sYaDMRp82zXlA#BaPr!4B__LioIy1DjeZ0rJokRex3&>ujn>K z7V$`mOOAuX2iAIV^&idO7#TuywhhKMELxE0pMo1~N zVtK{cE3SiJOjw?joy4nsHLqt^4O~$v>`AOdD=LJjqw6rSEQKwix784CFzPI#pp7(H zbzLP{UBcY2&@>N6@RyR#VxMWACq-w3)3qv zYUO*QHZlqbe;=Gr5Q?dV1$+-^c8EQZqc}Z05)vLQCV}ExAg&1P2({#uMb=%cVwZdb zL}U&g8%!`Qh`%dx-DI{`cZs#lDUoE|ZMzSs|-y&~1+`{2NvGZ~VN_ zy@fd{ghISXizK&@quN@cceikzElSdhpE-VLo@tja-|0&RWH=fHWXKr_rKjz#!{UKO z{Nt-g#$vO_WO}h2O!{377OSnA7+Co8*F^w6HcKC84ADsb{p&iZ;0$K$Q<=}_ZM0u? z*0yQi;tP=z(Y*~e$E+uXX)TZmgUbH-mC9?*h#q4p$4uNTEDX1vG=o_l?W2vPi;~>! z}+3@8S`qpB67L@iBBT3uI^FQRGf zZZ?LA73BqAjBQ^{;ysM`EJ~ch5+DF(^UelP1f8)h1VxrU;#-bWx+@Aw|Ba5|)mHhs z>FeUdw91D|>eLgdv5WCTsHQhZ^s$*~6}_7Hcb8My%AS!CDw3<0YD7P3p+ms|s)=PF z;OZs{G;@FN*ku%MFp1@?4!>UQ^~9$TZcA2!j^fjI+~|BjcdrIA@=vL{5Zh5l|%KaKQ%(1UhN*bPbB zK&;;=z)x>9nxWQk&BI3@m)ae;mU iDdO9-#x2|2cV$#%gtC9%IPi}V?EeG8093W-$p8Qh3r5rc From 5f05ec7744d110b407339a3ab072a046622b93aa Mon Sep 17 00:00:00 2001 From: PharmCat Date: Wed, 21 Aug 2024 16:56:31 +0300 Subject: [PATCH 2/9] matrix wts, docs, swc cov type, chnges in rmat! --- Project.toml | 2 +- docs/src/custom.md | 6 ++--- docs/src/details.md | 53 ++++++++++++++++++++++++++++++++++++++++++--- src/lmm.jl | 34 ++++++++++++++++++++++------- src/lmmdata.jl | 15 +++++++++---- src/rmat.jl | 51 ++++++++++++++++++++++++++++--------------- src/utils.jl | 5 ++++- src/varstruct.jl | 5 +++++ src/vartypes.jl | 37 +++++++++++++++++++++++++++++++ test/test.jl | 14 +++++++++++- 10 files changed, 183 insertions(+), 39 deletions(-) diff --git a/Project.toml b/Project.toml index 86d81023..3f7ad971 100644 --- a/Project.toml +++ b/Project.toml @@ -3,7 +3,7 @@ uuid = "a1dec852-9fe5-11e9-361f-8d9fde67cfa2" keywords = ["lenearmodel", "mixedmodel"] desc = "Mixed-effects models with flexible covariance structure." authors = ["Vladimir Arnautov "] -version = "0.15.1" +version = "0.16.0" [deps] DiffResults = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" diff --git a/docs/src/custom.md b/docs/src/custom.md index 7789d090..6ad8de76 100644 --- a/docs/src/custom.md +++ b/docs/src/custom.md @@ -26,10 +26,10 @@ function Metida.gmat!(mx, θ, ::YourCovarianceStruct) end ``` -Function `rmat!` have 4 arguments and add repeated effect to V': V = V' + R (so V = Z * G * Z' + R), `mx` - V' matrix, `θ` - theta vector for this effect, `rz` - subject effect matrix, `ct` - your covariance type object. For example, `rmat!` for Heterogeneous Toeplitz Parameterized structure is specified bellow (`TOEPHP_ <: AbstractCovarianceType`). +Function `rmat!` have 5 arguments and add repeated effect to V': V = V' + R (so V = Z * G * Z' + R), `mx` - V' matrix, `θ` - theta vector for this effect, `rz` - subject effect matrix, `ct` - your covariance type object, `sb` = block number. For example, `rmat!` for Heterogeneous Toeplitz Parameterized structure is specified bellow (`TOEPHP_ <: AbstractCovarianceType`). ``` -function Metida.rmat!(mx, θ, rz, ct::TOEPHP_) +function Metida.rmat!(mx, θ, rz, ct::TOEPHP_, ::Int) l = size(rz, 2) vec = rz * (θ[1:l]) s = size(mx, 1) @@ -123,7 +123,7 @@ Metida.fit!(lmm) # for R matrix -function Metida.rmat!(mx, θ, rz, ::CustomCovarianceStructure) +function Metida.rmat!(mx, θ, rz, ::CustomCovarianceStructure, ::Int) vec = Metida.tmul_unsafe(rz, θ) rn = size(mx, 1) if rn > 1 diff --git a/docs/src/details.md b/docs/src/details.md index 4fceee5c..a6959f5c 100644 --- a/docs/src/details.md +++ b/docs/src/details.md @@ -13,6 +13,33 @@ In matrix notation a mixed effect model can be represented as: y = X\beta + Zu + \epsilon ``` +where: + + +```math +\epsilon \sim N(0, R) + +\\ + +u \sim N(0, G) + +\\ + +y \sim N(X\beta, V) + +``` + +where V depends on covariance sructure and parameters ``\theta``: + +```math +V = CovStruct(\theta) + +``` + +The unknown parameters include the regression parameters in ``\beta`` and covariance parameters in ``\theta``. + +Estimation of these model parameters relies on the use of a Newton-Ralphson (by default) algorithm. When we use either algorithm for finding REML solutions, we need to compute ``V^{-1}`` and its derivatives with respect to ``\theta``, which are computationally difficult for large ``n``, therefor SWEEP (see https://github.com/joshday/SweepOperator.jl) algorithm used to meke oprtimization less computationaly expensive. + #### V ```math @@ -33,7 +60,7 @@ logREML(\theta,\beta) = -\frac{N-p}{2} - \frac{1}{2}\sum_{i=1}^nlog|V_{\theta, i -\frac{1}{2}log|\sum_{i=1}^nX_i'V_{\theta, i}^{-1}X_i|-\frac{1}{2}\sum_{i=1}^n(y_i - X_{i}\beta)'V_{\theta, i}^{-1}(y_i - X_{i}\beta) ``` -Actually ```L(\theta) = -2logREML = L_1(\theta) + L_2(\theta) + L_3(\theta) + c``` used for optimization, where: +Actually `` L(\theta) = -2logREML = L_1(\theta) + L_2(\theta) + L_3(\theta) + c`` used for optimization, where: ```math L_1(\theta) = \frac{1}{2}\sum_{i=1}^nlog|V_{i}| \\ @@ -51,16 +78,36 @@ L_3(\theta) = \frac{1}{2}\sum_{i=1}^n(y_i - X_{i}\beta)'V_i^{-1}(y_i - X_{i}\bet \mathcal{H}\mathcal{L}(\theta) = \mathcal{H}L_1(\theta) + \mathcal{H}L_2(\theta) + \mathcal{H} L_3(\theta) ``` -#### Weights +#### [Weights](@id weights_header) If weights defined: ```math + +W_{i} = diag(wts_{i}) + +\\ + V_{i} = Z_{i} G Z_i'+ W^{- \frac{1}{2}}_i R_{i} W^{- \frac{1}{2}}_i ``` +where ``W`` - diagonal matrix of weights. + + +If wts is matrix then: + + +```math + +W_{i} = wts_{i} + +\\ + +V_{i} = Z_{i} G Z_i'+ R_{i} \circ W_{i} +``` + +where ``\circ`` - element-wise multiplication. -where ```W``` - diagonal matrix of weights. #### Multiple random and repeated effects diff --git a/src/lmm.jl b/src/lmm.jl index cdeb5c7f..9edbcc2c 100644 --- a/src/lmm.jl +++ b/src/lmm.jl @@ -11,7 +11,7 @@ struct ModelStructure end """ - LMM(model, data; contrasts=Dict{Symbol,Any}(), random::Union{Nothing, VarEffect, Vector{VarEffect}} = nothing, repeated::Union{Nothing, VarEffect} = nothing, wts::Union{Nothing, AbstractVector, AbstractString, Symbol} = nothing) + LMM(model, data; contrasts=Dict{Symbol,Any}(), random::Union{Nothing, VarEffect, Vector{VarEffect}} = nothing, repeated::Union{Nothing, VarEffect} = nothing, wts::Union{Nothing, AbstractVector, AbstractMatrix, AbstractString, Symbol} = nothing) Make Linear-Mixed Model object. @@ -27,6 +27,10 @@ Make Linear-Mixed Model object. `wts`: regression weights (residuals). +Weigts can be set as `Symbol` or `String`, in this case weights taken from tabular data. +If weights is vector then this vector applyed to R-side part of covariance matrix (see [Weights details](@ref weights_header)). +If weights is matrix then R-side part of covariance matrix multiplied by corresponding part of weight-matrix. + See also: [`@lmmformula`](@ref) """ struct LMM{T <: AbstractFloat, W <: Union{LMMWts, Nothing}} <: MetidaModel @@ -57,7 +61,11 @@ struct LMM{T <: AbstractFloat, W <: Union{LMMWts, Nothing}} <: MetidaModel log::Vector{LMMLogMsg}) where T where W <: Union{LMMWts, Nothing} new{T, W}(model, f, modstr, covstr, data, dv, nfixed, rankx, result, maxvcbl, wts, log) end - function LMM(model, data; contrasts=Dict{Symbol,Any}(), random::Union{Nothing, VarEffect, Vector{VarEffect}} = nothing, repeated::Union{Nothing, VarEffect, Vector{VarEffect}} = nothing, wts::Union{Nothing, AbstractVector, AbstractString, Symbol} = nothing) + function LMM(model, data; + contrasts=Dict{Symbol,Any}(), + random::Union{Nothing, VarEffect, Vector{VarEffect}} = nothing, + repeated::Union{Nothing, VarEffect, Vector{VarEffect}} = nothing, + wts::Union{Nothing, AbstractVector, AbstractMatrix, AbstractString, Symbol} = nothing) #need check responce - Float if !Tables.istable(data) error("Data not a table!") end if repeated === nothing && random === nothing @@ -122,12 +130,22 @@ struct LMM{T <: AbstractFloat, W <: Union{LMMWts, Nothing}} <: MetidaModel if wts isa Symbol wts = Tables.getcolumn(data, wts) end - if length(lmmdata.yv) == length(wts) - if any(x -> x <= zero(x), wts) error("Only cases with positive weights allowed!") end - lmmwts = LMMWts(wts, covstr.vcovblock) - else - @warn "wts count not equal observations count! wts not used." - lmmwts = nothing + if wts isa AbstractVector + if length(lmmdata.yv) == length(wts) + if any(x -> x <= zero(x), wts) error("Only cases with positive weights allowed!") end + lmmwts = LMMWts(wts, covstr.vcovblock) + else + @warn "wts count not equal observations count! wts not used." + lmmwts = nothing + end + elseif wts isa AbstractMatrix + if length(lmmdata.yv) == LinearAlgebra.checksquare(wts) + if any(x -> x <= zero(x), wts) error("Only positive values allowed!") end + lmmwts = LMMWts(wts, covstr.vcovblock) + else + @warn "wts count not equal observations count! wts not used." + lmmwts = nothing + end end end diff --git a/src/lmmdata.jl b/src/lmmdata.jl index bf07fa7b..98f6805a 100644 --- a/src/lmmdata.jl +++ b/src/lmmdata.jl @@ -35,16 +35,23 @@ struct LMMDataViews{T<:AbstractFloat} <: AbstractLMMDataBlocks end end -struct LMMWts{T<:AbstractFloat} - sqrtwts::Vector{Vector{T}} - function LMMWts(sqrtwts::Vector{Vector{T}}) where T +struct LMMWts{T} + sqrtwts::Vector{T} + function LMMWts(sqrtwts::Vector{T}) where T new{T}(sqrtwts) end - function LMMWts(wts::Vector{T}, vcovblock) where T + function LMMWts(wts::AbstractVector{T}, vcovblock) where T sqrtwts = Vector{Vector{T}}(undef, length(vcovblock)) for i in eachindex(vcovblock) sqrtwts[i] = @. inv(sqrt($(view(wts, vcovblock[i])))) end LMMWts(sqrtwts) end + function LMMWts(wts::AbstractMatrix{T}, vcovblock) where T + sqrtwts = Vector{Matrix{T}}(undef, length(vcovblock)) + for i in eachindex(vcovblock) + sqrtwts[i] = wts[vcovblock[i], vcovblock[i]] + end + LMMWts(sqrtwts) + end end \ No newline at end of file diff --git a/src/rmat.jl b/src/rmat.jl index 958c3ac6..5cac108a 100644 --- a/src/rmat.jl +++ b/src/rmat.jl @@ -10,25 +10,40 @@ zblock = view(covstr.rz[j], block, :) @simd for i = 1:subjn(covstr, en, bi) sb = getsubj(covstr, en, bi, i) - rmat!(view(mx, sb, sb), view(θ, rθ[j]), view(zblock, sb, :), covstr.repeated[j].covtype.s) + rmat!(view(mx, sb, sb), view(θ, rθ[j]), view(zblock, sb, :), covstr.repeated[j].covtype.s, bi) end end mx end ################################################################################ -function rmat!(::Any, ::Any, ::Any, ::AbstractCovarianceType) +function rmat!(::Any, ::Any, ::Any, ::AbstractCovarianceType, ::Int) error("No rmat! method defined for thit structure!") end #SI -Base.@propagate_inbounds function rmat!(mx, θ, ::AbstractMatrix, ::SI_) +Base.@propagate_inbounds function rmat!(mx, θ, ::AbstractMatrix, ::SI_, ::Int) val = θ[1]^2 @inbounds @simd for i ∈ axes(mx, 1) mx[i, i] += val end mx end +#SWC +function rmat!(mx, θ, ::AbstractMatrix, ct::SWC_, sbj::Int) + s = size(mx, 1) + de = θ[1] ^ 2 + if s > 1 + for n = 1:s + @inbounds @simd for m = 1:n + mx[m, n] += de * ct.wtsb[sbj][m, n] + end + end + else + @inbounds mx[1, 1] += de * ct.wtsb[sbj][1, 1] + end + mx +end #DIAG -function rmat!(mx, θ, rz, ::DIAG_) +function rmat!(mx, θ, rz, ::DIAG_, ::Int) for i ∈ axes(mx, 1) @inbounds @simd for c ∈ axes(θ, 1) mx[i, i] += rz[i, c] * θ[c] ^ 2 @@ -37,7 +52,7 @@ function rmat!(mx, θ, rz, ::DIAG_) mx end #AR -function rmat!(mx, θ, ::AbstractMatrix, ::AR_) +function rmat!(mx, θ, ::AbstractMatrix, ::AR_, ::Int) s = size(mx, 1) de = θ[1] ^ 2 @inbounds @simd for m = 1:s @@ -53,7 +68,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ::AR_) mx end #ARH -function rmat!(mx, θ, rz, ::ARH_) +function rmat!(mx, θ, rz, ::ARH_, ::Int) vec = tmul_unsafe(rz, θ) s = size(mx, 1) if s > 1 @@ -69,7 +84,7 @@ function rmat!(mx, θ, rz, ::ARH_) mx end #CS -function rmat!(mx, θ, ::AbstractMatrix, ::CS_) +function rmat!(mx, θ, ::AbstractMatrix, ::CS_, ::Int) s = size(mx, 1) θsq = θ[1] * θ[1] θsqp = θsq * θ[2] @@ -86,7 +101,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ::CS_) mx end #CSH -function rmat!(mx, θ, rz, ::CSH_) +function rmat!(mx, θ, rz, ::CSH_, ::Int) vec = tmul_unsafe(rz, θ) s = size(mx, 1) if s > 1 @@ -105,7 +120,7 @@ function rmat!(mx, θ, rz, ::CSH_) end ################################################################################ #ARMA -function rmat!(mx, θ, ::AbstractMatrix, ::ARMA_) +function rmat!(mx, θ, ::AbstractMatrix, ::ARMA_, ::Int) s = size(mx, 1) de = θ[1] ^ 2 @inbounds @simd for m = 1:s @@ -123,7 +138,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ::ARMA_) end ################################################################################ #TOEPP -function rmat!(mx, θ, ::AbstractMatrix, ct::TOEPP_) +function rmat!(mx, θ, ::AbstractMatrix, ct::TOEPP_, ::Int) de = θ[1] ^ 2 #diagonal element s = size(mx, 1) #size @inbounds @simd for i = 1:s @@ -140,7 +155,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ct::TOEPP_) end ################################################################################ #TOEPHP -function rmat!(mx, θ, rz, ct::TOEPHP_) +function rmat!(mx, θ, rz, ct::TOEPHP_, ::Int) l = size(rz, 2) vec = rz * (θ[1:l]) s = size(mx, 1) #size @@ -181,7 +196,7 @@ function edistance(mx::AbstractMatrix{T}, i::Int, j::Int) where T end ################################################################################ #SPEXP -function rmat!(mx, θ, rz, ::SPEXP_) +function rmat!(mx, θ, rz, ::SPEXP_, ::Int) σ² = θ[1]^2 #θe = exp(θ[2]) θe = θ[2] @@ -202,7 +217,7 @@ function rmat!(mx, θ, rz, ::SPEXP_) end ################################################################################ #SPPOW -function rmat!(mx, θ, rz, ::SPPOW_) +function rmat!(mx, θ, rz, ::SPPOW_, ::Int) σ² = θ[1]^2 ρ = θ[2] s = size(mx, 1) @@ -222,7 +237,7 @@ function rmat!(mx, θ, rz, ::SPPOW_) end #SPGAU -function rmat!(mx, θ, rz, ::SPGAU_) +function rmat!(mx, θ, rz, ::SPGAU_, ::Int) σ² = θ[1]^2 #θe = exp(θ[2]) θe = θ[2] @@ -244,7 +259,7 @@ function rmat!(mx, θ, rz, ::SPGAU_) end ################################################################################ #SPEXPD cos(pidij) -function rmat!(mx, θ, rz, ::SPEXPD_) +function rmat!(mx, θ, rz, ::SPEXPD_, ::Int) σ² = θ[2]^2 σ²d = θ[1]^2 + σ² θe = θ[3] @@ -263,7 +278,7 @@ function rmat!(mx, θ, rz, ::SPEXPD_) mx end #SPPOWD -function rmat!(mx, θ, rz, ::SPPOWD_) +function rmat!(mx, θ, rz, ::SPPOWD_, ::Int) σ² = θ[2]^2 σ²d = θ[1]^2 + σ² ρ = θ[3] @@ -281,7 +296,7 @@ function rmat!(mx, θ, rz, ::SPPOWD_) mx end #SPGAUD -function rmat!(mx, θ, rz, ::SPGAUD_) +function rmat!(mx, θ, rz, ::SPGAUD_, ::Int) σ² = θ[2]^2 σ²d = θ[1]^2 + σ² θe = θ[3] @@ -320,7 +335,7 @@ function unrmat(θ::AbstractVector{T}, rz) where T end Symmetric(mx) end -function rmat!(mx, θ, rz::AbstractMatrix, ::UN_) +function rmat!(mx, θ, rz::AbstractMatrix, ::UN_, ::Int) vec = tmul_unsafe(rz, θ) rm = size(mx, 1) rcov = unrmat(θ, rz) diff --git a/src/utils.jl b/src/utils.jl index 3a8efad4..f2de44ce 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -272,10 +272,13 @@ function applywts!(::Any, ::Int, ::Nothing) nothing end -function applywts!(V::AbstractMatrix, i::Int, wts::LMMWts) +function applywts!(V::AbstractMatrix, i::Int, wts::LMMWts{<:Vector}) mulβdαβd!(V, wts.sqrtwts[i]) end +function applywts!(V::AbstractMatrix, i::Int, wts::LMMWts{<:Matrix}) + V .*= wts.sqrtwts[i] +end ##################################################################### diff --git a/src/varstruct.jl b/src/varstruct.jl index 49a426a3..1fbb96b4 100644 --- a/src/varstruct.jl +++ b/src/varstruct.jl @@ -394,6 +394,11 @@ struct CovStructure{T, T2} <: AbstractCovarianceStructure end esb = EffectSubjectBlock(sblock, nblock) ####################################################################### + # Modify repeated effect covariance type for some types + for r in repeated + applycovschema!(r.covtype.s, blocks) + end + ####################################################################### new{eltype(z), T2}(random, repeated, schema, rcnames, blocks, rn, rtn, rpn, z, esb, zrndur, rz, q, t, tr, tl, ct, emap, sn, maxn) end end diff --git a/src/vartypes.jl b/src/vartypes.jl index 1b4707b4..d4abda2d 100644 --- a/src/vartypes.jl +++ b/src/vartypes.jl @@ -5,6 +5,10 @@ ################################################################################ struct SI_ <: AbstractCovarianceType end +mutable struct SWC_{W<:AbstractMatrix, B<:Vector{<:AbstractMatrix}} <: AbstractCovarianceType + wtsm::W + wtsb::B +end struct DIAG_ <: AbstractCovarianceType end struct AR_ <: AbstractCovarianceType end struct ARH_ <: AbstractCovarianceType end @@ -64,6 +68,15 @@ function ScaledIdentity() CovarianceType(SI_()) end const SI = ScaledIdentity() + +# docs need +# Experimental +function ScaledWeightedCov(wtsm::AbstractMatrix{T}) where T + wtsb = Matrix{T}[] + CovarianceType(SWC_(wtsm, wtsb)) +end +const SWC = ScaledWeightedCov + """ Diag() @@ -362,6 +375,9 @@ end function covstrparam(ct::SI_, ::Int)::Tuple{Int, Int} return (1, 0) end +function covstrparam(ct::SWC_, ::Int)::Tuple{Int, Int} + return (1, 0) +end function covstrparam(ct::DIAG_, t::Int, )::Tuple{Int, Int} return (t, 0) end @@ -412,6 +428,9 @@ end function rcoefnames(s, t, ct::SI_) return ["σ² "] end +function rcoefnames(s, t, ct::SWC_) + return ["σ² "] +end function rcoefnames(s, t, ct::DIAG_) if isa(coefnames(s), AbstractArray{T,1} where T) l = length(coefnames(s)) else l = 1 end return fill!(Vector{String}(undef, l), "σ² ") .* string.(coefnames(s)) @@ -510,6 +529,21 @@ function rcoefnames(s, t, ct::AbstractCovarianceType) v .= "Val " return v end +################################################################################ +# APPLY COV SCHEMA +################################################################################ +function applycovschema!(::AbstractCovarianceType, ::Any) + nothing +end + +function applycovschema!(ct::SWC_{<:AbstractMatrix{T}}, vcovblock) where T + if length(ct.wtsb) == 0 + for i in eachindex(vcovblock) + push!(ct.wtsb, ct.wtsm[vcovblock[i], vcovblock[i]]) + end + end + ct +end ################################################################################ # SHOW @@ -524,6 +558,9 @@ end function Base.show(io::IO, ct::SI_) print(io, "SI") end +function Base.show(io::IO, ct::SWC_) + print(io, "SWC") +end function Base.show(io::IO, ct::DIAG_) print(io, "DIAG") end diff --git a/test/test.jl b/test/test.jl index afcf3bea..0913efd2 100644 --- a/test/test.jl +++ b/test/test.jl @@ -209,6 +209,18 @@ include("testdata.jl") fit!(lmm) @test Metida.m2logreml(lmm) ≈ 17.823729 atol=1E-6 # TEST WITH SPSS 28 + # Matrix wts + matwts = Symmetric(rand(size(df0, 1), size(df0, 1))) + lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; + random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), + wts = matwts) + @test_nowarn fit!(lmm) + + # experimental weighted covariance + lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; + repeated = Metida.VarEffect(Metida.@covstr(1|subject), Metida.SWC(matwts))) + @test_nowarn fit!(lmm) + # Repeated vector lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; @@ -654,7 +666,7 @@ end reml = Metida.m2logreml(lmm) @test reml_c ≈ reml - function Metida.rmat!(mx, θ, rz, ::CustomCovarianceStructure) + function Metida.rmat!(mx, θ, rz, ::CustomCovarianceStructure, ::Int) vec = Metida.tmul_unsafe(rz, θ) rn = size(mx, 1) if rn > 1 From 766ee62a41487508010a21bab145009d6f8d7c4e Mon Sep 17 00:00:00 2001 From: PharmCat Date: Wed, 21 Aug 2024 23:08:15 +0300 Subject: [PATCH 3/9] update ga --- .github/workflows/Documenter.yml | 7 +++---- .github/workflows/Tier1.yml | 22 +++++++++++++++++----- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/.github/workflows/Documenter.yml b/.github/workflows/Documenter.yml index 5ca7853f..21695a8e 100644 --- a/.github/workflows/Documenter.yml +++ b/.github/workflows/Documenter.yml @@ -23,12 +23,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 45 steps: - - uses: actions/checkout@v3 - - uses: julia-actions/setup-julia@v1 + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 with: version: "1.8" - - uses: julia-actions/julia-buildpkg@v1 - - uses: julia-actions/julia-docdeploy@v1 + - uses: julia-actions/cache@v1 - name: Install dependencies run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Build and deploy diff --git a/.github/workflows/Tier1.yml b/.github/workflows/Tier1.yml index 7639b18b..dbb7ac80 100644 --- a/.github/workflows/Tier1.yml +++ b/.github/workflows/Tier1.yml @@ -30,6 +30,7 @@ jobs: strategy: matrix: version: + - 'lts' - '1.8' - '1' os: @@ -39,15 +40,26 @@ jobs: arch: - x64 steps: - - uses: actions/checkout@v3 - - uses: julia-actions/setup-julia@v1 + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} arch: ${{ matrix.arch }} - - uses: julia-actions/cache@v1 + - uses: actions/cache@v4 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} + restore-keys: | + ${{ runner.os }}-test-${{ env.cache-name }}- + ${{ runner.os }}-test- + ${{ runner.os }}- - uses: julia-actions/julia-buildpkg@v1 - uses: julia-actions/julia-runtest@v1 - uses: julia-actions/julia-processcoverage@v1 - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 + if: ${{ matrix.os == 'ubuntu-latest' && matrix.version == '1' && matrix.arch == 'x64' }} with: - files: lcov.info + file: lcov.info + token: ${{ secrets.CODECOV_TOKEN }} From 966096052dca6c5818854a240bdc6820e680f0ed Mon Sep 17 00:00:00 2001 From: PharmCat Date: Wed, 21 Aug 2024 23:23:12 +0300 Subject: [PATCH 4/9] fix --- .github/workflows/Tier1.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/Tier1.yml b/.github/workflows/Tier1.yml index dbb7ac80..94311708 100644 --- a/.github/workflows/Tier1.yml +++ b/.github/workflows/Tier1.yml @@ -30,7 +30,6 @@ jobs: strategy: matrix: version: - - 'lts' - '1.8' - '1' os: From a303064452126d43e5b33c859cd220ecbe17053f Mon Sep 17 00:00:00 2001 From: PharmCat Date: Wed, 21 Aug 2024 23:57:40 +0300 Subject: [PATCH 5/9] docs, clean code --- docs/src/api.md | 6 ++++++ src/varstruct.jl | 17 +++++------------ src/vartypes.jl | 32 +++++++++++++++++++++++++++++++- 3 files changed, 42 insertions(+), 13 deletions(-) diff --git a/docs/src/api.md b/docs/src/api.md index b2c824bd..ee1300e8 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -124,6 +124,12 @@ Metida.ToeplitzParameterized Metida.Unstructured ``` +### Metida.ScaledWeightedCov + +```@docs +Metida.ScaledWeightedCov +``` + ### Methods ### Metida.caic diff --git a/src/varstruct.jl b/src/varstruct.jl index 1fbb96b4..bf6ae3a4 100644 --- a/src/varstruct.jl +++ b/src/varstruct.jl @@ -16,8 +16,6 @@ function StatsModels.ContrastsMatrix(contrasts::RawCoding, levels::AbstractVecto contrasts) end function StatsModels.modelcols(t::CategoricalTerm{RawCoding, T, N}, d::NamedTuple) where T where N - #v = d[t.sym] - #reshape(v, length(v), 1) d[t.sym] end @@ -255,7 +253,6 @@ struct CovStructure{T, T2} <: AbstractCovarianceStructure end end # RANDOM EFFECTS - #if random[1].covtype.z #IF NOT ZERO @inbounds for i = 1:rn if length(random[i].coding) == 0 fill_coding_dict!(random[i].model, random[i].coding, data) @@ -283,7 +280,7 @@ struct CovStructure{T, T2} <: AbstractCovarianceStructure fillur!(tr, i, t) symbs = StatsModels.termvars(random[i].subj) if length(symbs) > 0 - cdata = tabcols(data, symbs) # Tuple(Tables.getcolumn(Tables.columns(data_), x) for x in symbs) + cdata = tabcols(data, symbs) dicts[i] = Dict{Tuple{eltype.(cdata)...}, Vector{Int}}() indsdict!(dicts[i], cdata) else @@ -310,11 +307,10 @@ struct CovStructure{T, T2} <: AbstractCovarianceStructure end schema[rn + i] = apply_schema(repeated[i].model, StatsModels.schema(data_, repeated[i].coding)) - #rz_[i] = reduce(hcat, modelcols(schema[rn+i], data)) rz_[i] = modelcols(MatrixTerm(schema[rn+i]), data_) symbs = StatsModels.termvars(repeated[i].subj) if length(symbs) > 0 - cdata = tabcols(data, symbs) # Tuple(Tables.getcolumn(Tables.columns(data), x) for x in symbs) + cdata = tabcols(data, symbs) dicts[rn + i] = Dict{Tuple{eltype.(cdata)...}, Vector{Int}}() indsdict!(dicts[rn + i], cdata) else @@ -336,9 +332,6 @@ struct CovStructure{T, T2} <: AbstractCovarianceStructure # Theta length tl = sum(t) ######################################################################## - #if any(x-> 1 in keys(x), dicts[1:end-1]) - # blocks = [first(dicts)[1]] - #else if random[1].covtype.z # if first random effect not null subjblockdict = dicts[1] if length(dicts) > 2 # if more than 2 random effects @@ -363,9 +356,7 @@ struct CovStructure{T, T2} <: AbstractCovarianceStructure dicts[rn+i] = subjblockdict end - blocks = collect(values(subjblockdict)) - #end sblock = Matrix{Vector{Tuple{Vector{Int}, Int}}}(undef, length(blocks), alleffl) nblock = [] @@ -394,7 +385,9 @@ struct CovStructure{T, T2} <: AbstractCovarianceStructure end esb = EffectSubjectBlock(sblock, nblock) ####################################################################### - # Modify repeated effect covariance type for some types + # Postprocessing + # Modify repeated effect covariance type for some types + # Maybe it will be removed for r in repeated applycovschema!(r.covtype.s, blocks) end diff --git a/src/vartypes.jl b/src/vartypes.jl index d4abda2d..a22ca849 100644 --- a/src/vartypes.jl +++ b/src/vartypes.jl @@ -69,8 +69,38 @@ function ScaledIdentity() end const SI = ScaledIdentity() -# docs need # Experimental +""" + ScaledWeightedCov(wtsm::AbstractMatrix{T}) + +!!! warning + Experimental + +Scaled weighted covariance matrix, where `wtsm` - `NxN` within block correlation matrix (N - total number of observations). +Used only for repeated effect. + +SWC = ScaledWeightedCov + +```math +R = Corr(W) * \\sigma_c^2 +``` + +where ``Corr(W)`` - diagonal correlation matrix. + +example: + +```julia +matwts = Symmetric(UnitUpperTriangular(rand(size(df0,1), size(df0,1)))) +lmm = LMM(@formula(var~sequence+period+formulation), df0; + repeated = VarEffect(@covstr(1|subject), SWC(matwts))) +fit!(lmm) + +``` +!!! note + +There is no `wtsm` checks for symmetricity or values. + +""" function ScaledWeightedCov(wtsm::AbstractMatrix{T}) where T wtsb = Matrix{T}[] CovarianceType(SWC_(wtsm, wtsb)) From b76211ba883fd5bdc1f16b9ac11891b29d66b3cb Mon Sep 17 00:00:00 2001 From: PharmCat Date: Thu, 22 Aug 2024 00:09:09 +0300 Subject: [PATCH 6/9] update test --- test/test.jl | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/test/test.jl b/test/test.jl index 0913efd2..fff428ac 100644 --- a/test/test.jl +++ b/test/test.jl @@ -189,7 +189,7 @@ include("testdata.jl") # Int dependent variable, function Term in random part df0.varint = Int.(ceil.(df0.var2)) - lmmint = Metida.fit(Metida.LMM, Metida.@lmmformula(varint~formulation, + @test_warn "Response variable not <: AbstractFloat" lmmint = Metida.fit(Metida.LMM, Metida.@lmmformula(varint~formulation, random = 1+var^2|subject:Metida.SI), df0) Metida.fit!(lmmint) @test Metida.m2logreml(lmmint) ≈ 84.23373276096902 atol=1E-6 @@ -209,8 +209,13 @@ include("testdata.jl") fit!(lmm) @test Metida.m2logreml(lmm) ≈ 17.823729 atol=1E-6 # TEST WITH SPSS 28 + + @test_warn "wts count not equal observations count! wts not used." lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; + random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), + wts = ones(10)) + # Matrix wts - matwts = Symmetric(rand(size(df0, 1), size(df0, 1))) + matwts = Symmetric(rand(size(df0,1), size(df0,1))) lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), wts = matwts) @@ -220,7 +225,7 @@ include("testdata.jl") lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; repeated = Metida.VarEffect(Metida.@covstr(1|subject), Metida.SWC(matwts))) @test_nowarn fit!(lmm) - + # Repeated vector lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; From 8e7229b52082611702f40fc4b4b940c5cf6fcca6 Mon Sep 17 00:00:00 2001 From: PharmCat Date: Thu, 22 Aug 2024 01:29:12 +0300 Subject: [PATCH 7/9] monor opt, clean, docs, test --- docs/src/index.md | 2 +- src/Metida.jl | 1 + src/fvalue.jl | 3 - src/reml.jl | 7 +- src/utils.jl | 5 +- test/devtest.jl | 388 ++++++++-------------------------------------- test/test.jl | 28 ++-- 7 files changed, 89 insertions(+), 345 deletions(-) diff --git a/docs/src/index.md b/docs/src/index.md index a364593e..a42bf860 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -38,7 +38,7 @@ Implemented covariance structures: Actually Metida can fit datasets with wore than 160k observation and 40k subjects levels on PC with 64 GB RAM. This is not "hard-coded" limitation, but depends on your model and data structure. Fitting of big datasets can take a lot of time. Optimal dataset size is less than 100k observations with maximum length of block less than 400. -!!! note +!!! warning Julia v1.8 or higher required. diff --git a/src/Metida.jl b/src/Metida.jl index b95c8339..e8950ebd 100644 --- a/src/Metida.jl +++ b/src/Metida.jl @@ -20,6 +20,7 @@ import Random: default_rng, AbstractRNG, rand! export @formula, @covstr, @lmmformula, SI, ScaledIdentity, +SWC, ScaledWeightedCov, DIAG, Diag, AR, Autoregressive, ARH, HeterogeneousAutoregressive, diff --git a/src/fvalue.jl b/src/fvalue.jl index 1a69e698..1d8e6ba5 100644 --- a/src/fvalue.jl +++ b/src/fvalue.jl @@ -1,8 +1,5 @@ # fvalue.jl -#= -Metida.fvalue(lmm, [0 0 1 0 0 0; 0 0 0 1 0 0; 0 0 0 0 1 0]) -=# """ fvalue(lmm::LMM, l::Matrix) diff --git a/src/reml.jl b/src/reml.jl index 10c29e51..bc30de59 100644 --- a/src/reml.jl +++ b/src/reml.jl @@ -88,7 +88,12 @@ function reml_sweep_β(lmm, data, θ::Vector{T}; maxthreads::Int = 4) where T # end θ₁ = sum(accθ₁) θ₂ = sum(accθ₂) - βm = sum(accβm) + if length(accβm) > 1 + for i = 2:length(accβm) + accβm[1] += accβm[i] + end + end + βm = accβm[1] noerror = all(erroracc) noerror = noerror * checkmatrix!(θ₂) θs₂ = Symmetric(θ₂) diff --git a/src/utils.jl b/src/utils.jl index f2de44ce..a142c874 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -16,10 +16,7 @@ end function fixedeffn(lmm::LMM) fixedeffn(lmm.f) end -#= -function nterms(mf::ModelFrame) - mf.schema.schema.count -=# + function nterms(rhs::Union{Tuple{Vararg{AbstractTerm}}, Nothing, AbstractTerm}) if isa(rhs, Term) p = 1 diff --git a/test/devtest.jl b/test/devtest.jl index d70c1149..e1e52d80 100644 --- a/test/devtest.jl +++ b/test/devtest.jl @@ -1,9 +1,8 @@ #using NLopt using Metida -using DataFrames, CSV, StatsModels, LinearAlgebra, ForwardDiff, ForwardDiff, Optim, Distributions, CategoricalArrays -#using SnoopCompile -#using LineSearches +using DataFrames, CSV, StatsModels, LinearAlgebra, CategoricalArrays, Dates + using BenchmarkTools path = dirname(@__FILE__) cd(path) @@ -13,349 +12,86 @@ ftdf = CSV.File(path*"/csv/1fptime.csv"; types = [String, String, Float6 ftdf2 = CSV.File(path*"/csv/1freparma.csv"; types = [String, String, Float64, Float64]) |> DataFrame ftdf3 = CSV.File(path*"/csv/ftdf3.csv"; types = [String, Float64, Float64, String, String, String, String, String, Float64]) |> DataFrame +hdp = CSV.File("hdp.csv") |> DataFrame +transform!(hdp, :DID => categorical); +transform!(hdp, :HID=> categorical); +transform!(hdp, :Sex=> categorical); +transform!(hdp, :School=> categorical); +transform!(hdp, :pain=> categorical); pkgversion(m::Module) = Pkg.TOML.parsefile(joinpath(dirname(string(first(methods(m.eval)).file)), "..", "Project.toml"))["version"] -# MODEL 1 + + +results = DataFrame(datetime =[], model = [], mintime =[], memory = [], allocs = []) +b = Vector{Any}(undef, 4) ################################################################################ # Metida ################################################################################ -#nt = LinearAlgebra.BLAS.get_num_threads() -#LinearAlgebra.BLAS.set_num_threads(16) -#LinearAlgebra.BLAS.set_num_threads(nt) - +# MODEL 1 lmm = Metida.LMM(@formula(response ~1 + factor*time), ftdf; random = Metida.VarEffect(Metida.@covstr(1 + time|subject&factor), Metida.CSH), ) -b11 = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 - -#= -lmm = Metida.LMM(@formula(response ~1 + factor2), ftdf3; -repeated = Metida.VarEffect(Metida.@covstr(p|subject), Metida.CSH), -) -@benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 -=# -#:LN_BOBYQA :LN_NEWUOA -#@benchmark Metida.fit!($lmm, hes = false; maxthreads = 16, solver = :nlopt, optmethod = :LN_NEWUOA) seconds = 15 -#@benchmark Metida.fit!($lmm, optmethod = Metida.LBFGS_OM, hes = false; maxthreads = 16) seconds = 15 -#@benchmark Metida.fit!($lmm, optmethod = Metida.BFGS_OM, hes = false; maxthreads = 16) seconds = 15 -#@benchmark Metida.fit!($lmm, optmethod = Metida.CG_OM, hes = false; maxthreads = 16) seconds = 15 -#@benchmark Metida.fit!($lmm, optmethod = Optim.NelderMead(), hes = false; maxthreads = 16) seconds = 15 - - -#@time Metida.fit!(lmm, hes = false) - - - -################################################################################ -# MetidaNLopt -################################################################################ -using MetidaNLopt -b12 = @benchmark Metida.fit!($lmm, hes = false, solver = :nlopt) seconds = 15 - -################################################################################ -# MetidaCu -################################################################################ -using MetidaCu -b13 = @benchmark Metida.fit!($lmm, hes = false, solver = :cuda) seconds = 15 - - +b[1] = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 # MODEL 2 - lmm = Metida.LMM(@formula(response ~1 + factor*time), ftdf; random = Metida.VarEffect(Metida.@covstr(1 + time|factor), Metida.ARH), ) -b21 = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 -b22 = @benchmark Metida.fit!($lmm, hes = false, solver = :nlopt) seconds = 15 -b23 = @benchmark Metida.fit!($lmm, hes = false, solver = :cuda) seconds = 15 - +b[2] = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 # MODEL 3 - lmm = Metida.LMM(@formula(response ~1 + factor*time), ftdf; random = Metida.VarEffect(Metida.@covstr(1 + time|subject&factor), Metida.CSH), +repeated = Metida.VarEffect(Metida.@covstr(1|subject), Metida.AR), ) - -b31 = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 -b32 = @benchmark Metida.fit!($lmm, hes = false, solver = :nlopt) seconds = 15 -b33 = @benchmark Metida.fit!($lmm, hes = false, solver = :cuda) seconds = 15 - +b[3] = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 # MODEL 4 - -hdp = CSV.File("hdp.csv") |> DataFrame -transform!(hdp, :DID => categorical); -transform!(hdp, :HID=> categorical); -transform!(hdp, :Sex=> categorical); -transform!(hdp, :School=> categorical); -transform!(hdp, :pain=> categorical); -################################################################################ -# Metida -################################################################################ - lmm = Metida.LMM(@formula(tumorsize ~ 1 + CancerStage), hdp; random = Metida.VarEffect(Metida.@covstr(1|HID), Metida.DIAG), ) +b[4] = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 -b41 = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 -b42 = @benchmark Metida.fit!($lmm, hes = false, solver = :nlopt) seconds = 15 -b43 = @benchmark Metida.fit!($lmm, hes = false, solver = :cuda) seconds = 15 - -# MODEL 5 maximum 1437 observation-per-subject (10 subjects) -lmm = Metida.LMM(@formula(tumorsize ~ 1 + CancerStage), hdp; -random = Metida.VarEffect(Metida.@covstr(1|ntumors), Metida.SI), -) - -#b51 = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 -b52 = @benchmark Metida.fit!($lmm, hes = false, solver = :nlopt) seconds = 15 -b53 = @benchmark Metida.fit!($lmm, hes = false, solver = :cuda) seconds = 15 - -# MODEL 6: maximum 3409 observation-per-subject (4 subjects) - - -println("Metida version: ", pkgversion(Metida)) -println("MetidaNLopt version: ", pkgversion(MetidaNLopt)) -println("MetidaCu version: ", pkgversion(MetidaCu)) - -println("MODEL 1") -println("# Metida") -display(b11) -println("# MetidaNLopt") -display(b12) -println("# MetidaCu") -display(b13) -println() -println() - -println("MODEL 2") -println("# Metida") -display(b21) -println("# MetidaNLopt") -display(b22) -println("# MetidaCu") -display(b23) -println() -println() - -println("MODEL 3") -println("# Metida") -display(b31) -println("# MetidaNLopt") -display(b32) -println("# MetidaCu") -display(b33) -println() -println() - -println("MODEL 4") -println("# Metida") -display(b41) -println("# MetidaNLopt") -display(b42) -println("# MetidaCu") -display(b43) -println() -println() - -println("MODEL 5") -#println("# Metida") -#display(b51) -println("# MetidaNLopt") -display(b52) -println("# MetidaCu") -display(b53) -println() -println() - -#Julia 1.6.3 -#= -julia> -Metida version: 0.12.0 -MetidaNLopt version: 0.4.0 -MetidaCu version: 0.4.1 -MODEL 1 -# Metida -BenchmarkTools.Trial: 1184 samples with 1 evaluation. - Range (min … max): 5.489 ms … 161.314 ms ┊ GC (min … max): 0.00% … 94.86% - Time (median): 8.535 ms ┊ GC (median): 0.00% - Time (mean ± σ): 12.700 ms ± 22.994 ms ┊ GC (mean ± σ): 33.38% ± 16.90% - - ▇█▅ - ███▄▄▄▁▁▁▁▁▁▁▁▁▁▁▄▁▁▄▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▄▁▄▄▁▅▄▆▁▄▄▄▄▅▄▆▄ █ - 5.49 ms Histogram: log(frequency) by time 144 ms < - - Memory estimate: 22.63 MiB, allocs estimate: 37225. -# MetidaNLopt -BenchmarkTools.Trial: 173 samples with 1 evaluation. - Range (min … max): 74.294 ms … 186.791 ms ┊ GC (min … max): 0.00% … 58.09% - Time (median): 78.964 ms ┊ GC (median): 0.00% - Time (mean ± σ): 86.794 ms ± 22.516 ms ┊ GC (mean ± σ): 9.19% ± 14.57% - - █▃ - ▅▆██▆▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▅█▁▁▁▁▁▁▄▁▁▄▁▄▁▄▁▁▁▁▁▁▁▄▁▁▁▄▁▄▁▄▄▁▁▄▁▄ ▄ - 74.3 ms Histogram: log(frequency) by time 176 ms < - - Memory estimate: 56.61 MiB, allocs estimate: 481209. -# MetidaCu -BenchmarkTools.Trial: 2 samples with 1 evaluation. - Range (min … max): 9.128 s … 9.323 s ┊ GC (min … max): 0.00% … 0.58% - Time (median): 9.226 s ┊ GC (median): 0.29% - Time (mean ± σ): 9.226 s ± 137.689 ms ┊ GC (mean ± σ): 0.29% ± 0.41% - - █ █ - █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ - 9.13 s Histogram: frequency by time 9.32 s < - - Memory estimate: 143.46 MiB, allocs estimate: 2524366. - - -MODEL 2 -# Metida -BenchmarkTools.Trial: 18 samples with 1 evaluation. - Range (min … max): 829.008 ms … 850.212 ms ┊ GC (min … max): 0.00% … 1.35% - Time (median): 839.290 ms ┊ GC (median): 0.58% - Time (mean ± σ): 839.688 ms ± 7.197 ms ┊ GC (mean ± σ): 0.62% ± 0.63% - - ▁ █ ▁ ▁▁ ▁ █ ▁ ▁█▁▁ ▁ ▁ ▁ - █▁▁▁█▁▁▁▁█▁▁▁██▁▁█▁▁▁█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█▁▁▁▁▁▁▁████▁▁▁▁▁▁█▁▁█▁█ ▁ - 829 ms Histogram: frequency by time 850 ms < - - Memory estimate: 140.68 MiB, allocs estimate: 7919. -# MetidaNLopt -BenchmarkTools.Trial: 135 samples with 1 evaluation. - Range (min … max): 108.482 ms … 119.274 ms ┊ GC (min … max): 0.00% … 4.83% - Time (median): 110.394 ms ┊ GC (median): 0.00% - Time (mean ± σ): 111.933 ms ± 2.719 ms ┊ GC (mean ± σ): 1.80% ± 2.26% - - ▂ ▂ █▂▄▃ ▂▂ - █▅▆▃▅▅███▇█████▆▅▁▃▁▃▃▃▁▁▁▁▃▁▁▁▁▁▃▁▁▁▁▃▃▃▅▁▆▃▇▇██▇▇▇▅▅▁▆▁▁▁▃▃ ▃ - 108 ms Histogram: frequency by time 117 ms < - - Memory estimate: 91.48 MiB, allocs estimate: 33646. -# MetidaCu -BenchmarkTools.Trial: 29 samples with 1 evaluation. - Range (min … max): 507.681 ms … 536.903 ms ┊ GC (min … max): 0.00% … 1.58% - Time (median): 518.749 ms ┊ GC (median): 0.00% - Time (mean ± σ): 521.751 ms ± 10.164 ms ┊ GC (mean ± σ): 0.58% ± 0.74% - - █ ▃ ▃ ▃ ▃ - ▇▁▁▁▇▇▁▇▁█▇▇▇▇▁▇▁▁▁▇▇▁▁█▁▇▁▇▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▇▁█▁▇▇▁▇█▇▁▁█ ▁ - 508 ms Histogram: frequency by time 537 ms < - - Memory estimate: 93.94 MiB, allocs estimate: 95784. - - -MODEL 3 -# Metida -BenchmarkTools.Trial: 1100 samples with 1 evaluation. - Range (min … max): 5.554 ms … 213.611 ms ┊ GC (min … max): 0.00% … 95.95% - Time (median): 8.962 ms ┊ GC (median): 0.00% - Time (mean ± σ): 13.684 ms ± 28.997 ms ┊ GC (mean ± σ): 37.50% ± 16.47% - - ▇█ - ██▄▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▄▆▄▁▆▄▄▁▄▄▄▆ ▇ - 5.55 ms Histogram: log(frequency) by time 181 ms < - - Memory estimate: 22.63 MiB, allocs estimate: 37224. -# MetidaNLopt -BenchmarkTools.Trial: 174 samples with 1 evaluation. - Range (min … max): 75.122 ms … 186.340 ms ┊ GC (min … max): 0.00% … 57.72% - Time (median): 79.034 ms ┊ GC (median): 0.00% - Time (mean ± σ): 86.617 ms ± 22.856 ms ┊ GC (mean ± σ): 8.85% ± 14.37% - - ▁█ - ▇██▇▄▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▇▆▁▄▁▁▁▅▄▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▁▅▁▁▄▁▆ ▄ - 75.1 ms Histogram: log(frequency) by time 175 ms < - - Memory estimate: 56.61 MiB, allocs estimate: 481212. -# MetidaCu -BenchmarkTools.Trial: 2 samples with 1 evaluation. - Range (min … max): 9.221 s … 9.255 s ┊ GC (min … max): 0.00% … 0.29% - Time (median): 9.238 s ┊ GC (median): 0.14% - Time (mean ± σ): 9.238 s ± 24.138 ms ┊ GC (mean ± σ): 0.14% ± 0.20% - - █ █ - █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ - 9.22 s Histogram: frequency by time 9.26 s < - - Memory estimate: 143.47 MiB, allocs estimate: 2524496. - - -MODEL 4 -# Metida -BenchmarkTools.Trial: 3 samples with 1 evaluation. - Range (min … max): 6.738 s … 6.852 s ┊ GC (min … max): 1.36% … 0.52% - Time (median): 6.745 s ┊ GC (median): 1.09% - Time (mean ± σ): 6.779 s ± 63.739 ms ┊ GC (mean ± σ): 0.99% ± 0.43% - - █ █ █ - █▁▁█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ - 6.74 s Histogram: frequency by time 6.85 s < - - Memory estimate: 2.33 GiB, allocs estimate: 41657. -# MetidaNLopt -BenchmarkTools.Trial: 11 samples with 1 evaluation. - Range (min … max): 1.317 s … 1.438 s ┊ GC (min … max): 3.19% … 8.44% - Time (median): 1.365 s ┊ GC (median): 6.01% - Time (mean ± σ): 1.365 s ± 35.318 ms ┊ GC (mean ± σ): 5.52% ± 2.00% - - ▁▁ ▁ ▁ █▁ ▁▁ ▁ ▁ - ██▁▁▁▁▁▁█▁▁▁█▁▁▁▁▁▁▁▁▁██▁▁▁▁▁▁▁██▁█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ - 1.32 s Histogram: frequency by time 1.44 s < - - Memory estimate: 2.00 GiB, allocs estimate: 138279. -# MetidaCu -BenchmarkTools.Trial: 3 samples with 1 evaluation. - Range (min … max): 7.391 s … 7.432 s ┊ GC (min … max): 1.10% … 1.43% - Time (median): 7.426 s ┊ GC (median): 1.30% - Time (mean ± σ): 7.416 s ± 21.787 ms ┊ GC (mean ± σ): 1.28% ± 0.17% - - █ █ █ - █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█▁▁▁▁▁▁▁█ ▁ - 7.39 s Histogram: frequency by time 7.43 s < - - Memory estimate: 1.87 GiB, allocs estimate: 919345. - - -MODEL 5 -# Metida -BenchmarkTools.Trial: 1 sample with 1 evaluation. - Single result which took 216.945 s (0.04% GC) to evaluate, - with a memory estimate of 3.91 GiB, over 7229 allocations. -# MetidaNLopt -BenchmarkTools.Trial: 2 samples with 1 evaluation. - Range (min … max): 8.973 s … 9.139 s ┊ GC (min … max): 3.06% … 2.77% - Time (median): 9.056 s ┊ GC (median): 2.91% - Time (mean ± σ): 9.056 s ± 117.413 ms ┊ GC (mean ± σ): 2.91% ± 0.20% - - █ █ - █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ - 8.97 s Histogram: frequency by time 9.14 s < - - Memory estimate: 7.86 GiB, allocs estimate: 57558. -# MetidaCu -BenchmarkTools.Trial: 2 samples with 1 evaluation. - Range (min … max): 12.039 s … 12.084 s ┊ GC (min … max): 1.85% … 1.78% - Time (median): 12.062 s ┊ GC (median): 1.81% - Time (mean ± σ): 12.062 s ± 31.774 ms ┊ GC (mean ± σ): 1.81% ± 0.05% - - █ █ - █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ - 12 s Histogram: frequency by time 12.1 s < - - Memory estimate: 8.31 GiB, allocs estimate: 365031. - -=# +for i = 1:4 + display(b[i]) + push!(results, (now(), "Model $i", minimum(b[i]).time, minimum(b[i]).memory, minimum(b[i]).allocs)) +end #= -lmm = Metida.LMM(@formula(r2 ~ f), spatdf; -repeated = Metida.VarEffect(Metida.@covstr(x+y|1), Metida.SPEXP), -) -@benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 - - -spatdf.ci = map(x -> CartesianIndex(x[:x], x[:y]), eachrow(spatdf)) -function Metida.edistance(mx::AbstractMatrix{<:CartesianIndex}, i::Int, j::Int) - return sqrt((mx[i, 1][1] - mx[j, 1][1])^2 + (mx[i, 1][2] - mx[j, 1][2])^2) -end -lmm = Metida.LMM(@formula(r2 ~ f), spatdf; -repeated = Metida.VarEffect(Metida.@covstr(ci|1), Metida.SPEXP; coding = Dict(:ci => Metida.RawCoding())), -) -@benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 +BenchmarkTools.Trial: 411 samples with 1 evaluation. + Range (min … max): 11.681 ms … 1.511 s ┊ GC (min … max): 0.00% … 99.24% + Time (median): 14.456 ms ┊ GC (median): 0.00% + Time (mean ± σ): 36.240 ms ± 170.372 ms ┊ GC (mean ± σ): 57.08% ± 11.90% + + █ + █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▁▁▁▁▁▁▁▁▁▄ ▆ + 11.7 ms Histogram: log(frequency) by time 1.42 s < + + Memory estimate: 19.38 MiB, allocs estimate: 31265. +BenchmarkTools.Trial: 23 samples with 1 evaluation. + Range (min … max): 657.256 ms … 719.396 ms ┊ GC (min … max): 0.00% … 4.31% + Time (median): 670.301 ms ┊ GC (median): 0.00% + Time (mean ± σ): 677.402 ms ± 16.735 ms ┊ GC (mean ± σ): 1.46% ± 2.20% + + █ ▁ + ▆▆▆▁▁▆█▆█▁▁▁▆▁▁▁▁▁▁▁▁▁▁▆▆▆▁▁▁▆▆▆▁▁▁▁▆▁▆▆▁▆▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▆ ▁ + 657 ms Histogram: frequency by time 719 ms < + + Memory estimate: 132.83 MiB, allocs estimate: 5086. +BenchmarkTools.Trial: 297 samples with 1 evaluation. + Range (min … max): 17.368 ms … 1.552 s ┊ GC (min … max): 0.00% … 98.55% + Time (median): 20.409 ms ┊ GC (median): 0.00% + Time (mean ± σ): 54.268 ms ± 197.857 ms ┊ GC (mean ± σ): 60.26% ± 15.96% + + █ + █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▁▁▅▅ ▅ + 17.4 ms Histogram: log(frequency) by time 1.19 s < + + Memory estimate: 42.28 MiB, allocs estimate: 23492. +BenchmarkTools.Trial: 4 samples with 1 evaluation. + Range (min … max): 4.696 s … 4.993 s ┊ GC (min … max): 2.54% … 8.32% + Time (median): 4.727 s ┊ GC (median): 2.74% + Time (mean ± σ): 4.786 s ± 138.981 ms ┊ GC (mean ± σ): 4.11% ± 2.86% + + █ ██ █ + █▁▁▁▁██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ + 4.7 s Histogram: frequency by time 4.99 s < + + Memory estimate: 2.56 GiB, allocs estimate: 40772. =# \ No newline at end of file diff --git a/test/test.jl b/test/test.jl index fff428ac..bd6ecc0b 100644 --- a/test/test.jl +++ b/test/test.jl @@ -32,15 +32,15 @@ include("testdata.jl") Metida.fit!(lmm) @test Metida.m2logreml(lmm) ≈ 25.00077786912235 atol=1E-6 - #Missing + # Missing lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0m; random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), ) Metida.fit!(lmm) @test Metida.m2logreml(lmm) ≈ 16.636012616466203 atol=1E-6 - #milmm = Metida.MILMM(lmm, df0m) - #Basic, Subject block + # milmm = Metida.MILMM(lmm, df0m) + # Basic, Subject block lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), ) @@ -130,21 +130,26 @@ include("testdata.jl") # AI like algo Metida.fit!(lmm; aifirst = true, init = Metida.theta(lmm)) @test Metida.m2logreml(lmm) ≈ 16.241112644506067 atol=1E-6 + # Score Metida.fit!(lmm; aifirst = :score) @test Metida.m2logreml(lmm) ≈ 16.241112644506067 atol=1E-6 + # AI Metida.fit!(lmm; aifirst = :ai) @test Metida.m2logreml(lmm) ≈ 16.241112644506067 atol=1E-6 - #Set user coding + + # Set user coding lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; random = Metida.VarEffect(Metida.@covstr(1 + formulation|subject), Metida.CSH; coding = Dict(:formulation => StatsModels.DummyCoding())), ) + # Test varlink/rholinkf Metida.fit!(lmm; rholinkf = :sqsigm) @test Metida.dof_satter(lmm, [0, 0, 0, 0, 0, 1]) ≈ 6.043195705464293 atol=1E-2 @test Metida.m2logreml(lmm) ≈ 10.314822559210157 atol=1E-6 @test_nowarn Metida.fit!(lmm; varlinkf = :sq) + # Repeated effect only lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; repeated = Metida.VarEffect(Metida.@covstr(formulation|nosubj)), @@ -157,14 +162,19 @@ include("testdata.jl") random = formulation|subject:Metida.DIAG), df0); @test Metida.responsename(lmm) == "log(var)" - #BE like + # BE like lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.CSH), repeated = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), ) Metida.fit!(lmm; aifirst = :score) @test Metida.m2logreml(lmm) ≈ 10.065238626765524 atol=1E-6 - #incomplete + + # One thread + Metida.fit!(lmm; maxthreads = 1) + @test Metida.m2logreml(lmm) ≈ 10.065238626765524 atol=1E-6 + + # incomplete lmm = Metida.LMM(@formula(var~sequence+period+formulation), df1; random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.CSH), repeated = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), @@ -178,6 +188,7 @@ include("testdata.jl") ) Metida.fit!(lmm) @test Metida.m2logreml(lmm, [0.222283, 0.444566]) ≈ Metida.m2logreml(lmm) atol=1E-6 + # EXPERIMENTAL @test Metida.dof_contain(lmm, 1) == 12 @test Metida.dof_contain(lmm, 5) == 8 @@ -189,13 +200,12 @@ include("testdata.jl") # Int dependent variable, function Term in random part df0.varint = Int.(ceil.(df0.var2)) - @test_warn "Response variable not <: AbstractFloat" lmmint = Metida.fit(Metida.LMM, Metida.@lmmformula(varint~formulation, + lmmint = @test_warn "Response variable not <: AbstractFloat" Metida.fit(Metida.LMM, Metida.@lmmformula(varint~formulation, random = 1+var^2|subject:Metida.SI), df0) Metida.fit!(lmmint) @test Metida.m2logreml(lmmint) ≈ 84.23373276096902 atol=1E-6 # Wts - df0.wtsc = fill(0.5, size(df0, 1)) lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), @@ -209,7 +219,6 @@ include("testdata.jl") fit!(lmm) @test Metida.m2logreml(lmm) ≈ 17.823729 atol=1E-6 # TEST WITH SPSS 28 - @test_warn "wts count not equal observations count! wts not used." lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; random = Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), wts = ones(10)) @@ -227,7 +236,6 @@ include("testdata.jl") @test_nowarn fit!(lmm) # Repeated vector - lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; repeated = [Metida.VarEffect(Metida.@covstr(formulation|subject), Metida.DIAG), Metida.VarEffect(Metida.@covstr(1|subject), Metida.SI)]) fit!(lmm) From 6085659c5d190b746788028f318586de1087c377 Mon Sep 17 00:00:00 2001 From: PharmCat Date: Thu, 22 Aug 2024 14:29:29 +0300 Subject: [PATCH 8/9] test cover, minor opt --- src/reml.jl | 25 ++++++++++++++++--- src/rmat.jl | 2 +- src/sweep.jl | 4 +-- test/devtest.jl | 65 ++++++++++++++++++++++++++++--------------------- test/test.jl | 31 ++++++++++++++++++++++- 5 files changed, 92 insertions(+), 35 deletions(-) diff --git a/src/reml.jl b/src/reml.jl index bc30de59..3da0e5d7 100644 --- a/src/reml.jl +++ b/src/reml.jl @@ -87,7 +87,14 @@ function reml_sweep_β(lmm, data, θ::Vector{T}; maxthreads::Int = 4) where T # #----------------------------------------------------------------------- end θ₁ = sum(accθ₁) - θ₂ = sum(accθ₂) + #θ₂ = sum(accθ₂) + if length(accθ₂) > 1 + for i = 2:length(accθ₂) + accθ₂[1] += accθ₂[i] + end + end + θ₂ = accθ₂[1] + if length(accβm) > 1 for i = 2:length(accβm) accβm[1] += accβm[i] @@ -194,8 +201,20 @@ function reml_sweep_β_nlopt(lmm, data, θ::Vector{T}; maxthreads::Int = 16) whe return Inf, β, θ₂, Inf, false end θ₁ = sum(accθ₁) - θ₂tc = sum(accθ₂) - βtc = sum(accβm) + #θ₂tc = sum(accθ₂) + if length(accθ₂) > 1 + for i = 2:length(accθ₂) + accθ₂[1] += accθ₂[i] + end + end + θ₂tc = accθ₂[1] + #βtc = sum(accβm) + if length(accβm) > 1 + for i = 2:length(accβm) + accβm[1] += accβm[i] + end + end + βtc = accβm[1] # Beta calculation copyto!(θ₂, θ₂tc) ldθ₂, info = LinearAlgebra.LAPACK.potrf!('U', θ₂tc) diff --git a/src/rmat.jl b/src/rmat.jl index 5cac108a..cb8d024d 100644 --- a/src/rmat.jl +++ b/src/rmat.jl @@ -25,7 +25,7 @@ Base.@propagate_inbounds function rmat!(mx, θ, ::AbstractMatrix, ::SI_, ::Int) @inbounds @simd for i ∈ axes(mx, 1) mx[i, i] += val end - mx + return mx end #SWC function rmat!(mx, θ, ::AbstractMatrix, ct::SWC_, sbj::Int) diff --git a/src/sweep.jl b/src/sweep.jl index 1d4d781e..acd5bff8 100644 --- a/src/sweep.jl +++ b/src/sweep.jl @@ -10,7 +10,7 @@ function nsyrk!(α, x, A) @inbounds A[i, j] += x[i] * xjα end end - A + return A end function nsyrk!(α, x, A::AbstractArray{T}) where T <: AbstractFloat BLAS.syrk!('U', 'N', α, x, one(T), A) @@ -44,7 +44,7 @@ function sweepb!(akk::AbstractArray{T, 1}, A::AbstractArray{T, 2}, k::Integer, i @inbounds A[k, j] = akk[j] end @inbounds A[k, k] = -d - A + return A end function sweep!(A::AbstractArray{T, 2}, ks::AbstractVector{I}, inv::Bool = false; logdet::Bool = false) where {T <: Number, I <: Integer} akk = Vector{T}(undef, size(A,2)) diff --git a/test/devtest.jl b/test/devtest.jl index e1e52d80..db017c17 100644 --- a/test/devtest.jl +++ b/test/devtest.jl @@ -27,71 +27,80 @@ b = Vector{Any}(undef, 4) # Metida ################################################################################ # MODEL 1 +println("MODEL 1") lmm = Metida.LMM(@formula(response ~1 + factor*time), ftdf; random = Metida.VarEffect(Metida.@covstr(1 + time|subject&factor), Metida.CSH), ) b[1] = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 # MODEL 2 +println("MODEL 2") lmm = Metida.LMM(@formula(response ~1 + factor*time), ftdf; random = Metida.VarEffect(Metida.@covstr(1 + time|factor), Metida.ARH), ) b[2] = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 # MODEL 3 +println("MODEL 3") lmm = Metida.LMM(@formula(response ~1 + factor*time), ftdf; random = Metida.VarEffect(Metida.@covstr(1 + time|subject&factor), Metida.CSH), repeated = Metida.VarEffect(Metida.@covstr(1|subject), Metida.AR), ) b[3] = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 # MODEL 4 +println("MODEL 4") lmm = Metida.LMM(@formula(tumorsize ~ 1 + CancerStage), hdp; random = Metida.VarEffect(Metida.@covstr(1|HID), Metida.DIAG), ) b[4] = @benchmark Metida.fit!($lmm, hes = false; maxthreads = 16) seconds = 15 for i = 1:4 + println("MODEL $i") display(b[i]) push!(results, (now(), "Model $i", minimum(b[i]).time, minimum(b[i]).memory, minimum(b[i]).allocs)) end #= -BenchmarkTools.Trial: 411 samples with 1 evaluation. - Range (min … max): 11.681 ms … 1.511 s ┊ GC (min … max): 0.00% … 99.24% - Time (median): 14.456 ms ┊ GC (median): 0.00% - Time (mean ± σ): 36.240 ms ± 170.372 ms ┊ GC (mean ± σ): 57.08% ± 11.90% +MODEL 1 +BenchmarkTools.Trial: 675 samples with 1 evaluation. + Range (min … max): 8.194 ms … 798.405 ms ┊ GC (min … max): 0.00% … 98.64% + Time (median): 10.882 ms ┊ GC (median): 0.00% + Time (mean ± σ): 22.823 ms ± 81.034 ms ┊ GC (mean ± σ): 52.70% ± 14.48% - █ - █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▁▁▁▁▁▁▁▁▁▄ ▆ - 11.7 ms Histogram: log(frequency) by time 1.42 s < + █ + █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▃▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▄▁▄▃ ▆ + 8.19 ms Histogram: log(frequency) by time 533 ms < Memory estimate: 19.38 MiB, allocs estimate: 31265. +MODEL 2 BenchmarkTools.Trial: 23 samples with 1 evaluation. - Range (min … max): 657.256 ms … 719.396 ms ┊ GC (min … max): 0.00% … 4.31% - Time (median): 670.301 ms ┊ GC (median): 0.00% - Time (mean ± σ): 677.402 ms ± 16.735 ms ┊ GC (mean ± σ): 1.46% ± 2.20% + Range (min … max): 657.833 ms … 704.073 ms ┊ GC (min … max): 0.00% … 3.23% + Time (median): 675.148 ms ┊ GC (median): 0.00% + Time (mean ± σ): 677.822 ms ± 12.920 ms ┊ GC (mean ± σ): 1.41% ± 1.62% - █ ▁ - ▆▆▆▁▁▆█▆█▁▁▁▆▁▁▁▁▁▁▁▁▁▁▆▆▆▁▁▁▆▆▆▁▁▁▁▆▁▆▆▁▆▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▆ ▁ - 657 ms Histogram: frequency by time 719 ms < + ▁ ▁▁ ▁ ██ ▁ ▁ ▁▁ ▁ ▁ ▁ ▁ █ ▁ █ ▁ ▁ + █▁▁▁▁▁██▁█▁██▁▁█▁█▁▁▁██▁▁▁█▁▁█▁▁█▁▁▁█▁▁▁█▁▁▁▁█▁█▁▁▁█▁▁▁▁▁▁▁▁█ ▁ + 658 ms Histogram: frequency by time 704 ms < - Memory estimate: 132.83 MiB, allocs estimate: 5086. -BenchmarkTools.Trial: 297 samples with 1 evaluation. - Range (min … max): 17.368 ms … 1.552 s ┊ GC (min … max): 0.00% … 98.55% - Time (median): 20.409 ms ┊ GC (median): 0.00% - Time (mean ± σ): 54.268 ms ± 197.857 ms ┊ GC (mean ± σ): 60.26% ± 15.96% + Memory estimate: 132.83 MiB, allocs estimate: 5085. +MODEL 3 +BenchmarkTools.Trial: 440 samples with 1 evaluation. + Range (min … max): 13.693 ms … 644.360 ms ┊ GC (min … max): 0.00% … 97.15% + Time (median): 17.967 ms ┊ GC (median): 0.00% + Time (mean ± σ): 34.118 ms ± 86.080 ms ┊ GC (mean ± σ): 47.99% ± 18.02% - █ - █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▁▁▅▅ ▅ - 17.4 ms Histogram: log(frequency) by time 1.19 s < + █ + █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▄▄▄▁▅▄▄▄▄▁▁▁▁▁▁▄ ▆ + 13.7 ms Histogram: log(frequency) by time 510 ms < - Memory estimate: 42.28 MiB, allocs estimate: 23492. + Memory estimate: 42.28 MiB, allocs estimate: 23491. +MODEL 4 BenchmarkTools.Trial: 4 samples with 1 evaluation. - Range (min … max): 4.696 s … 4.993 s ┊ GC (min … max): 2.54% … 8.32% - Time (median): 4.727 s ┊ GC (median): 2.74% - Time (mean ± σ): 4.786 s ± 138.981 ms ┊ GC (mean ± σ): 4.11% ± 2.86% + Range (min … max): 4.635 s … 4.880 s ┊ GC (min … max): 1.77% … 4.69% + Time (median): 4.791 s ┊ GC (median): 3.57% + Time (mean ± σ): 4.774 s ± 102.530 ms ┊ GC (mean ± σ): 3.42% ± 1.34% - █ ██ █ - █▁▁▁▁██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ - 4.7 s Histogram: frequency by time 4.99 s < + █ █ █ █ + █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█▁▁▁▁▁▁█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁ + 4.64 s Histogram: frequency by time 4.88 s < Memory estimate: 2.56 GiB, allocs estimate: 40772. =# \ No newline at end of file diff --git a/test/test.jl b/test/test.jl index bd6ecc0b..46092792 100644 --- a/test/test.jl +++ b/test/test.jl @@ -109,6 +109,8 @@ include("testdata.jl") @test Metida.confint(lmm, 6)[1] ≈ -0.7630380758015894 atol=1E-4 @test Metida.confint(lmm; ddf = :residual)[end][1] ≈ -0.6740837049617738 atol=1E-4 @test Metida.responsename(lmm) == "var" + @test Metida.nblocks(lmm) == 5 + @test Metida.msgnum(lmm.log) == 3 Metida.confint(lmm; ddf = :contain)[end][1] #NOT VALIDATED @test size(crossmodelmatrix(lmm), 1) == 6 @@ -148,7 +150,15 @@ include("testdata.jl") Metida.fit!(lmm; rholinkf = :sqsigm) @test Metida.dof_satter(lmm, [0, 0, 0, 0, 0, 1]) ≈ 6.043195705464293 atol=1E-2 @test Metida.m2logreml(lmm) ≈ 10.314822559210157 atol=1E-6 - @test_nowarn Metida.fit!(lmm; varlinkf = :sq) + + Metida.fit!(lmm; rholinkf = :atan) + @test Metida.m2logreml(lmm) ≈ 10.314837309793571 atol=1E-6 + + Metida.fit!(lmm; rholinkf = :psigm) + @test Metida.m2logreml(lmm) ≈ 10.86212458333098 atol=1E-6 + + Metida.fit!(lmm; varlinkf = :sq) + @test Metida.m2logreml(lmm) ≈ 10.314822479530243 atol=1E-6 # Repeated effect only lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; @@ -234,6 +244,7 @@ include("testdata.jl") lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; repeated = Metida.VarEffect(Metida.@covstr(1|subject), Metida.SWC(matwts))) @test_nowarn fit!(lmm) + @test_nowarn show(io, lmm) # Repeated vector lmm = Metida.LMM(@formula(var~sequence+period+formulation), df0; @@ -808,6 +819,24 @@ end random = Metida.VarEffect(Metida.@covstr(factor|subject), Metida.DIAG), repeated = Metida.VarEffect(Metida.@covstr(1|subject+factor), Metida.ARMA), ) + + @test_throws ErrorException Metida.LMM(@formula(var~sequence+period+formulation), df0;) + + + @test_throws ErrorException begin + # make cov type + struct NewCCS <: Metida.AbstractCovarianceType end + function Metida.covstrparam(ct::NewCCS, t::Int)::Tuple{Int, Int} + return (t, 1) + end + # try to apply to repeated effect + lmm = Metida.LMM(@formula(response ~1 + factor*time), ftdf; + repeated = Metida.VarEffect(Metida.@covstr(1 + time|subject&factor), Metida.CovarianceType(NewCCS())), + ) + # try to get V + Metida.vmatrix([1.0, 1.0, 1.0], lmm, 1) + end + # Error messages io = IOBuffer(); lmm = Metida.LMM(@formula(response ~ 1 + factor*time), ftdf2; From 8833bb0b7383ec2214755b1f3708635e81015335 Mon Sep 17 00:00:00 2001 From: PharmCat Date: Thu, 22 Aug 2024 14:48:09 +0300 Subject: [PATCH 9/9] cosmetics --- src/gmat.jl | 31 +++++++++---------- src/linearalgebra.jl | 14 ++++----- src/reml.jl | 6 ++-- src/rmat.jl | 36 +++++++++++----------- src/statsbase.jl | 22 +++++++------- src/sweep.jl | 8 ++--- src/typeiii.jl | 2 +- src/utils.jl | 71 ++++++++++++++++++++++---------------------- src/varstruct.jl | 14 ++++----- src/vartypes.jl | 4 +-- 10 files changed, 104 insertions(+), 104 deletions(-) diff --git a/src/gmat.jl b/src/gmat.jl index 452cf9f1..d4b04188 100644 --- a/src/gmat.jl +++ b/src/gmat.jl @@ -9,7 +9,7 @@ gmat!(gt[r].data, view(θ, covstr.tr[r]), covstr.random[r].covtype.s) end end - gt + return gt end # Main @noinline function zgz_base_inc!(mx::AbstractArray, G, covstr, bi) @@ -23,7 +23,7 @@ end end end end - mx + return mx end ################################################################################ ################################################################################ @@ -32,7 +32,7 @@ function gmat!(::Any, ::Any, ::AbstractCovarianceType) error("No gmat! method defined for thit structure!") end function gmat!(mx, ::Any, ::ZERO) - mx + return mx end #SI Base.@propagate_inbounds function gmat!(mx, θ, ::SI_) @@ -40,14 +40,14 @@ Base.@propagate_inbounds function gmat!(mx, θ, ::SI_) @inbounds @simd for i = 1:size(mx, 1) mx[i, i] = val end - mx + return mx end #DIAG function gmat!(mx, θ, ::DIAG_) @inbounds @simd for i = 1:size(mx, 1) mx[i, i] = θ[i] ^ 2 end - mx + return mx end #AR function gmat!(mx, θ, ::AR_) @@ -64,7 +64,7 @@ function gmat!(mx, θ, ::AR_) end end end - mx + return mx end #ARH function gmat!(mx, θ, ::ARH_) @@ -84,7 +84,7 @@ function gmat!(mx, θ, ::ARH_) @inbounds @simd for m = 1:s mx[m, m] *= mx[m, m] end - mx + return mx end #CS function gmat!(mx, θ, ::CS_) @@ -99,7 +99,7 @@ function gmat!(mx, θ, ::CS_) end end end - mx + return mx end #CSH function gmat!(mx, θ, ::CSH_) @@ -118,7 +118,7 @@ function gmat!(mx, θ, ::CSH_) @inbounds @simd for m = 1:s mx[m, m] *= mx[m, m] end - mx + return mx end ################################################################################ #ARMA @@ -136,7 +136,7 @@ function gmat!(mx, θ, ::ARMA_) end end end - mx + return mx end #TOEP function gmat!(mx, θ, ::TOEP_) @@ -152,7 +152,7 @@ function gmat!(mx, θ, ::TOEP_) end end end - mx + return mx end function gmat!(mx, θ, ct::TOEPP_) de = θ[1] ^ 2 #diagonal element @@ -167,7 +167,7 @@ function gmat!(mx, θ, ct::TOEPP_) end end end - mx + return mx end #TOEPH function gmat!(mx, θ, ::TOEPH_) @@ -186,7 +186,7 @@ function gmat!(mx, θ, ::TOEPH_) @inbounds @simd for m = 1:s mx[m, m] *= mx[m, m] end - mx + return mx end #TOEPHP function gmat!(mx, θ, ct::TOEPHP_) @@ -205,7 +205,7 @@ function gmat!(mx, θ, ct::TOEPHP_) @inbounds @simd for m = 1:s mx[m, m] *= mx[m, m] end - mx + return mx end #UN function gmat!(mx, θ, ::UN_) @@ -224,7 +224,7 @@ function gmat!(mx, θ, ::UN_) @inbounds @simd for m = 1:s mx[m, m] *= mx[m, m] end - mx + return mx end function tpnum(m, n, s) @@ -233,4 +233,5 @@ function tpnum(m, n, s) b += s - i end b -= s - n + return b end diff --git a/src/linearalgebra.jl b/src/linearalgebra.jl index 4de882a3..1f5de27a 100644 --- a/src/linearalgebra.jl +++ b/src/linearalgebra.jl @@ -23,7 +23,7 @@ Change θ (only upper triangle). B is symmetric. end end end - θ + return θ end #= function mulαβαtinc!(θ::AbstractMatrix{T}, A::AbstractMatrix{T}, B::AbstractMatrix{T}) where T <: AbstractFloat @@ -55,7 +55,7 @@ Change θ (only upper triangle). B is symmetric. end end end - θ + return θ end """ mulαβαtinc!(θ::AbstractVector{T}, A::AbstractMatrix, B::AbstractMatrix, a::AbstractVector, b::AbstractVector, alpha) where T @@ -78,7 +78,7 @@ Change θ (only upper triangle). B is symmetric. end end end - θ + return θ end """ @@ -137,7 +137,7 @@ Change θ. end @inbounds θ[n] += θn end - θ + return θ end # Diagonal(b) * A * Diagonal(b) - chnage only A upper triangle @noinline function mulβdαβd!(A::AbstractMatrix, b::AbstractVector) @@ -149,7 +149,7 @@ end @inbounds A[m, n] *= b[m] * b[n] end end - A + return A end @@ -162,7 +162,7 @@ end @inbounds vec[r] += rz[r, i] * θi end end - vec + return vec end @inline function diag!(f, v, m) @@ -171,5 +171,5 @@ end @simd for i = 1:l @inbounds v[i] = f(m[i, i]) end - v + return v end diff --git a/src/reml.jl b/src/reml.jl index 3da0e5d7..84c5fe19 100644 --- a/src/reml.jl +++ b/src/reml.jl @@ -7,7 +7,7 @@ function subutri!(a, b) @inbounds a[m,n] -= b[m,n] end end - a + return a end function fillzeroutri!(a::AbstractArray{T}) where T @@ -28,7 +28,7 @@ function checkmatrix!(mx::AbstractMatrix{T}) where T e = false end end - e + return e end ################################################################################ # REML without provided β @@ -270,7 +270,7 @@ function core_sweep_β(lmm, data, θ::Vector{T}, β, n; maxthreads::Int = 16) wh accθ₃[t] += mulθ₃(data.yv[i], data.xv[i], β, V) end end - sum(accθ₁), sum(accθ₂), sum(accθ₃), all(erroracc) + return sum(accθ₁), sum(accθ₂), sum(accθ₃), all(erroracc) end ### function reml_sweep_β(lmm, data, θ::Vector{T}, β; kwargs...) where T diff --git a/src/rmat.jl b/src/rmat.jl index cb8d024d..8c4a0980 100644 --- a/src/rmat.jl +++ b/src/rmat.jl @@ -13,7 +13,7 @@ rmat!(view(mx, sb, sb), view(θ, rθ[j]), view(zblock, sb, :), covstr.repeated[j].covtype.s, bi) end end - mx + return mx end ################################################################################ function rmat!(::Any, ::Any, ::Any, ::AbstractCovarianceType, ::Int) @@ -40,7 +40,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ct::SWC_, sbj::Int) else @inbounds mx[1, 1] += de * ct.wtsb[sbj][1, 1] end - mx + return mx end #DIAG function rmat!(mx, θ, rz, ::DIAG_, ::Int) @@ -49,7 +49,7 @@ function rmat!(mx, θ, rz, ::DIAG_, ::Int) mx[i, i] += rz[i, c] * θ[c] ^ 2 end end - mx + return mx end #AR function rmat!(mx, θ, ::AbstractMatrix, ::AR_, ::Int) @@ -65,7 +65,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ::AR_, ::Int) end end end - mx + return mx end #ARH function rmat!(mx, θ, rz, ::ARH_, ::Int) @@ -81,7 +81,7 @@ function rmat!(mx, θ, rz, ::ARH_, ::Int) @inbounds for m ∈ axes(mx, 1) mx[m, m] += vec[m] * vec[m] end - mx + return mx end #CS function rmat!(mx, θ, ::AbstractMatrix, ::CS_, ::Int) @@ -98,7 +98,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ::CS_, ::Int) end end end - mx + return mx end #CSH function rmat!(mx, θ, rz, ::CSH_, ::Int) @@ -116,7 +116,7 @@ function rmat!(mx, θ, rz, ::CSH_, ::Int) @inbounds for m ∈ axes(mx, 1) mx[m, m] += vec[m] * vec[m] end - mx + return mx end ################################################################################ #ARMA @@ -134,7 +134,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ::ARMA_, ::Int) end end end - mx + return mx end ################################################################################ #TOEPP @@ -151,7 +151,7 @@ function rmat!(mx, θ, ::AbstractMatrix, ct::TOEPP_, ::Int) end end end - mx + return mx end ################################################################################ #TOEPHP @@ -169,7 +169,7 @@ function rmat!(mx, θ, rz, ct::TOEPHP_, ::Int) @inbounds @simd for m = 1:s mx[m, m] += vec[m] * vec[m] end - mx + return mx end ################################################################################ #= @@ -213,7 +213,7 @@ function rmat!(mx, θ, rz, ::SPEXP_, ::Int) end end end - mx + return mx end ################################################################################ #SPPOW @@ -233,7 +233,7 @@ function rmat!(mx, θ, rz, ::SPPOW_, ::Int) end end end - mx + return mx end #SPGAU @@ -255,7 +255,7 @@ function rmat!(mx, θ, rz, ::SPGAU_, ::Int) end end end - mx + return mx end ################################################################################ #SPEXPD cos(pidij) @@ -275,7 +275,7 @@ function rmat!(mx, θ, rz, ::SPEXPD_, ::Int) end end end - mx + return mx end #SPPOWD function rmat!(mx, θ, rz, ::SPPOWD_, ::Int) @@ -293,7 +293,7 @@ function rmat!(mx, θ, rz, ::SPPOWD_, ::Int) end end end - mx + return mx end #SPGAUD function rmat!(mx, θ, rz, ::SPGAUD_, ::Int) @@ -312,7 +312,7 @@ function rmat!(mx, θ, rz, ::SPGAUD_, ::Int) end end end - mx + return mx end #UN @@ -333,14 +333,14 @@ function unrmat(θ::AbstractVector{T}, rz) where T @inbounds @simd for m = 1:rm mx[m, m] *= mx[m, m] end - Symmetric(mx) + return Symmetric(mx) end function rmat!(mx, θ, rz::AbstractMatrix, ::UN_, ::Int) vec = tmul_unsafe(rz, θ) rm = size(mx, 1) rcov = unrmat(θ, rz) mulαβαtinc!(mx, rz, rcov) - mx + return mx end ############################################################################### ############################################################################### diff --git a/src/statsbase.jl b/src/statsbase.jl index 888be62f..a29b318c 100644 --- a/src/statsbase.jl +++ b/src/statsbase.jl @@ -75,7 +75,7 @@ Model coefficients (β). StatsBase.coef(lmm::LMM) = copy(coef_(lmm)) function coef_(lmm::LMM) - lmm.result.beta + return lmm.result.beta end """ StatsBase.coefnames(lmm::LMM) = StatsBase.coefnames(lmm.mf) @@ -99,7 +99,7 @@ end DOF residuals: N - rank(X), where N - total number of observations. """ function StatsBase.dof_residual(lmm::LMM) - nobs(lmm) - lmm.rankx + return nobs(lmm) - lmm.rankx end """ @@ -108,7 +108,7 @@ end DOF. """ function StatsBase.dof(lmm::LMM) - lmm.nfixed + lmm.covstr.tl + return lmm.nfixed + lmm.covstr.tl end """ @@ -117,7 +117,7 @@ end Return loglikelihood value. """ function StatsBase.loglikelihood(lmm::LMM) - -lmm.result.reml/2 + return -lmm.result.reml/2 end """ @@ -128,7 +128,7 @@ Akaike Information Criterion. function StatsBase.aic(lmm::LMM) l = loglikelihood(lmm) d = lmm.covstr.tl - -2l + 2d + return -2l + 2d end """ @@ -140,7 +140,7 @@ function StatsBase.bic(lmm::LMM) l = loglikelihood(lmm) d = lmm.covstr.tl n = nobs(lmm) - lmm.nfixed - -2l + d * log(n) + return -2l + d * log(n) end """ @@ -152,7 +152,7 @@ function StatsBase.aicc(lmm::LMM) l = loglikelihood(lmm) d = lmm.covstr.tl n = nobs(lmm) - lmm.nfixed - -2l + (2d * n) / (n - d - 1.0) + return -2l + (2d * n) / (n - d - 1.0) end """ @@ -164,14 +164,14 @@ function caic(lmm::LMM) l = loglikelihood(lmm) d = lmm.covstr.tl n = nobs(lmm) - lmm.nfixed - -2l + d * (log(n) + 1.0) + return -2l + d * (log(n) + 1.0) end """ StatsBase.isfitted(lmm::LMM) """ function StatsBase.isfitted(lmm::LMM) - lmm.result.fit + return lmm.result.fit end """ StatsBase.vcov(lmm::LMM) @@ -187,7 +187,7 @@ Standard error StatsBase.stderror(lmm::LMM) = copy(stderror_(lmm)) function stderror_(lmm::LMM) - lmm.result.se + return lmm.result.se end stderror!(v, lmm::LMM) = copyto!(v, lmm.result.se) @@ -239,7 +239,7 @@ end Responce varible name. """ function StatsBase.responsename(lmm::LMM) - StatsBase.coefnames(lmm.f)[1] + return StatsBase.coefnames(lmm.f)[1] end diff --git a/src/sweep.jl b/src/sweep.jl index acd5bff8..744e21a1 100644 --- a/src/sweep.jl +++ b/src/sweep.jl @@ -13,11 +13,11 @@ function nsyrk!(α, x, A) return A end function nsyrk!(α, x, A::AbstractArray{T}) where T <: AbstractFloat - BLAS.syrk!('U', 'N', α, x, one(T), A) + return BLAS.syrk!('U', 'N', α, x, one(T), A) end function sweep!(A::AbstractArray{T}, k::Integer, inv::Bool = false) where T - sweepb!(Vector{T}(undef, size(A, 2)), A, k, inv) + return sweepb!(Vector{T}(undef, size(A, 2)), A, k, inv) end function sweepb!(akk::AbstractArray{T, 1}, A::AbstractArray{T, 2}, k::Integer, inv::Bool = false) where T <: Number p = checksquare(A) @@ -48,7 +48,7 @@ function sweepb!(akk::AbstractArray{T, 1}, A::AbstractArray{T, 2}, k::Integer, i end function sweep!(A::AbstractArray{T, 2}, ks::AbstractVector{I}, inv::Bool = false; logdet::Bool = false) where {T <: Number, I <: Integer} akk = Vector{T}(undef, size(A,2)) - sweepb!(akk, A, ks, inv; logdet = logdet) + return sweepb!(akk, A, ks, inv; logdet = logdet) end function sweepb!(akk::AbstractArray{T, 1}, A::AbstractArray{T, 2}, ks::AbstractVector{I}, inv::Bool = false; logdet::Bool = false) where {T <: Number, I<:Integer} @@ -75,5 +75,5 @@ function sweepb!(akk::AbstractArray{T, 1}, A::AbstractArray{T, 2}, ks::AbstractV sweepb!(akk, A, k, inv) end end - A, ld, noerror + return A, ld, noerror end diff --git a/src/typeiii.jl b/src/typeiii.jl index 7128330b..c7e23450 100644 --- a/src/typeiii.jl +++ b/src/typeiii.jl @@ -84,7 +84,7 @@ function contrast(lmm, l::AbstractMatrix; name::String = "Contrast", ddf = :satt else pval = NaN end - ContrastTable([name], [F], [ndf], [df], [pval]) + return ContrastTable([name], [F], [ndf], [df], [pval]) end function Base.show(io::IO, at::ContrastTable) diff --git a/src/utils.jl b/src/utils.jl index a142c874..b714afae 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -7,7 +7,7 @@ function initvar(y::Vector, X::Matrix{T}) where T r = copy(y) LinearAlgebra.BLAS.gemv!('N', one(T), X, β, -one(T), r) #r = y .- X * β - dot(r, r)/(length(r) - size(X, 2)), β + return dot(r, r)/(length(r) - size(X, 2)), β end ################################################################################ function fixedeffn(f::FormulaTerm) @@ -25,7 +25,7 @@ function nterms(rhs::Union{Tuple{Vararg{AbstractTerm}}, Nothing, AbstractTerm}) else p = 0 end - p + return p end """ Term name. @@ -68,7 +68,7 @@ function lcontrast(lmm::LMM, i::Int) mx[j, inds[j]] = 1 end end - mx + return mx end ################################################################################ # VAR LINK @@ -76,17 +76,17 @@ end function vlink(σ::T) where T <: Real if σ < -21.0 return one(T)*7.582560427911907e-10 end #Experimental - exp(σ) + return exp(σ) end function vlinkr(σ::T) where T <: Real - log(σ) + return log(σ) end function vlinksq(σ::T) where T <: Real - σ*σ + return σ*σ end function vlinksqr(σ::T) where T <: Real - sqrt(σ) + return sqrt(σ) end function rholinkpsigmoid(ρ::T) where T <: Real @@ -141,7 +141,7 @@ function varlinkvecapply!(v, p; varlinkf = :exp, rholinkf = :sigm) end end end - v + return v end function varlinkrvecapply!(v, p; varlinkf = :exp, rholinkf = :sigm) @inbounds for i = 1:length(v) @@ -165,7 +165,7 @@ function varlinkrvecapply!(v, p; varlinkf = :exp, rholinkf = :sigm) end end end - v + return v end function varlinkvecapply(v, p; varlinkf = :exp, rholinkf = :sigm) s = similar(v) @@ -192,22 +192,22 @@ function varlinkvecapply(v, p; varlinkf = :exp, rholinkf = :sigm) s[i] = v[i] end end - s + return s end ################################################################################ function m2logreml(lmm) - lmm.result.reml + return lmm.result.reml end function logreml(lmm) - -m2logreml(lmm)/2 + return -m2logreml(lmm)/2 end function m2logreml(lmm, theta; maxthreads::Int = num_cores()) - reml_sweep_β(lmm, LMMDataViews(lmm), theta; maxthreads = maxthreads)[1] + return reml_sweep_β(lmm, LMMDataViews(lmm), theta; maxthreads = maxthreads)[1] end ################################################################################ function optim_callback(os) - false + return false end ################################################################################ """ @@ -216,7 +216,7 @@ end Return true if CovarianceType is ZERO. """ function zeroeff(eff) - isa(eff.covtype.s, ZERO) + return isa(eff.covtype.s, ZERO) end """ raneffn(lmm) @@ -226,7 +226,7 @@ function raneffn(lmm) if zeroeff(lmm.covstr.random[1]) return 0 end - length(lmm.covstr.random) + return length(lmm.covstr.random) end """ @@ -237,7 +237,7 @@ function gmatrix(lmm::LMM{T}, r::Int) where T if r > length(lmm.covstr.random) error("Invalid random effect number: $(r)!") end G = zeros(T, lmm.covstr.q[r], lmm.covstr.q[r]) gmat!(G, view(lmm.result.theta, lmm.covstr.tr[r]), lmm.covstr.random[r].covtype.s) - Symmetric(G) + return Symmetric(G) end @@ -247,7 +247,7 @@ end Return true if all variance-covariance matrix (G) of random effect is positive definite. """ function gmatrixipd(lmm::LMM) - lmm.result.ipd + return lmm.result.ipd end """ @@ -260,7 +260,7 @@ function rmatrix(lmm::LMM{T}, i::Int) where T R = zeros(T, q, q) rθ = lmm.covstr.tr[lmm.covstr.rn + 1:end] rmat_base_inc!(R, lmm.result.theta, rθ, lmm.covstr, i) - Symmetric(R) + return Symmetric(R) end ##################################################################### @@ -270,11 +270,12 @@ function applywts!(::Any, ::Int, ::Nothing) end function applywts!(V::AbstractMatrix, i::Int, wts::LMMWts{<:Vector}) - mulβdαβd!(V, wts.sqrtwts[i]) + return mulβdαβd!(V, wts.sqrtwts[i]) end function applywts!(V::AbstractMatrix, i::Int, wts::LMMWts{<:Matrix}) V .*= wts.sqrtwts[i] + return V end ##################################################################### @@ -290,15 +291,14 @@ function vmatrix!(V, θ, lmm::LMM, i::Int) # pub API rθ = lmm.covstr.tr[lmm.covstr.rn + 1:end] rmat_base_inc!(V, θ, rθ, lmm.covstr, i) # Repeated vector applywts!(V, i, lmm.wts) - zgz_base_inc!(V, gvec, lmm.covstr, i) - + return zgz_base_inc!(V, gvec, lmm.covstr, i) end # !!! Main function REML used @noinline function vmatrix!(V, G, θ, rθ, lmm::LMM, i::Int) rmat_base_inc!(V, θ, rθ, lmm.covstr, i) # Repeated vector applywts!(V, i, lmm.wts) - zgz_base_inc!(V, G, lmm.covstr, i) + return zgz_base_inc!(V, G, lmm.covstr, i) end """ @@ -307,7 +307,7 @@ end Return variance-covariance matrix V for i bolock. """ function vmatrix(lmm::LMM, i::Int) - vmatrix(lmm.result.theta, lmm, i) + return vmatrix(lmm.result.theta, lmm, i) end function vmatrix(θ::AbstractVector{T}, lmm::LMM, i::Int) where T @@ -315,7 +315,7 @@ function vmatrix(θ::AbstractVector{T}, lmm::LMM, i::Int) where T gvec = gmatvec(θ, lmm.covstr) rθ = lmm.covstr.tr[lmm.covstr.rn + 1:end] vmatrix!(V, gvec, θ, rθ, lmm, i) # Repeated vector - Symmetric(V) + return Symmetric(V) end # For Multiple Imputation function vmatrix(θ::Vector, covstr::CovStructure, lmmwts, i::Int) @@ -325,11 +325,11 @@ function vmatrix(θ::Vector, covstr::CovStructure, lmmwts, i::Int) rmat_base_inc!(V, θ, rθ, covstr, i) # Repeated vector applywts!(V, i, lmmwts) zgz_base_inc!(V, gvec, covstr, i) - Symmetric(V) + return Symmetric(V) end function blockgmatrix(lmm::LMM{T}) where T - blockgmatrix(lmm, (1, 1)) + return blockgmatrix(lmm, (1, 1)) end function blockgmatrix(lmm::LMM{T}, v) where T @@ -348,7 +348,7 @@ function blockgmatrix(lmm::LMM{T}, v) where T s = e + 1 end end - G + return G end function blockzmatrix(lmm::LMM{T}, i) where T @@ -368,7 +368,7 @@ function blockzmatrix(lmm::LMM{T}, i) where T s = e + 1 mx[j] = smx end - hcat(mx...) + return hcat(mx...) end """ raneff(lmm::LMM{T}, i) @@ -399,8 +399,7 @@ function raneff(lmm::LMM{T}, block) where T rvsbj[i][j] = subjname => rv[s:e] end end - rvsbj - + return rvsbj end """ raneff(lmm::LMM{T}) @@ -418,7 +417,7 @@ function raneff(lmm::LMM) end end end - fb + return fb end """ @@ -438,19 +437,19 @@ function hessian(lmm, theta) vloptf(x) = reml_sweep_β(lmm, lmm.dv, x, lmm.result.beta)[1] chunk = ForwardDiff.Chunk{min(8, length(theta))}() hcfg = ForwardDiff.HessianConfig(vloptf, theta, chunk) - ForwardDiff.hessian(vloptf, theta, hcfg) + return ForwardDiff.hessian(vloptf, theta, hcfg) end function hessian(lmm) if !lmm.result.fit error("Model not fitted!") end - hessian(lmm, lmm.result.theta) + return hessian(lmm, lmm.result.theta) end ################################################################################ ################################################################################ function StatsModels.termvars(ve::VarEffect) - termvars(ve.formula) + return termvars(ve.formula) end function StatsModels.termvars(ve::Vector{VarEffect}) - union(termvars.(ve)...) + return union(termvars.(ve)...) end ################################################################################ diff --git a/src/varstruct.jl b/src/varstruct.jl index bf6ae3a4..b2bb5941 100644 --- a/src/varstruct.jl +++ b/src/varstruct.jl @@ -16,7 +16,7 @@ function StatsModels.ContrastsMatrix(contrasts::RawCoding, levels::AbstractVecto contrasts) end function StatsModels.modelcols(t::CategoricalTerm{RawCoding, T, N}, d::NamedTuple) where T where N - d[t.sym] + return d[t.sym] end ################################################################################ @@ -40,7 +40,7 @@ end function modelparse(term::FunctionTerm{typeof(|)}) eff, subj = term.args if !isa(subj, AbstractTerm) || isa(subj, FunctionTerm{typeof(*), Vector{Term}}) throw(FormulaException("Subject term type not <: AbstractTerm. Use `term` or `interaction term` only. Maybe you are using something like this: `@covstr(factor|term1*term2)` or `@covstr(factor|(term1+term2))`. Use only `@covstr(factor|term)` or `@covstr(factor|term1&term2)`.")) end - eff, subj + return eff, subj end function modelparse(term) throw(FormulaException("Model term type not <: FunctionTerm{typeof(|)}. Use model like this: `@covstr(factor|subject)`. Maybe you are using something like this: `@covstr(factor|term1+term2)`. Use only `@covstr(factor|term)` or `@covstr(factor|term1&term2)`.")) @@ -138,7 +138,7 @@ function sabjcrossdicts(d1, d2) end end end - v + return v end tabcols(data, symbs) = Tuple(Tables.getcolumn(Tables.columns(data), x) for x in symbs) @@ -168,7 +168,7 @@ function raneflenv(covstr, block) for i = 1:l v[i] = length(covstr.esb.sblock[block, i]) end - v + return v end """ Covarince structure. @@ -420,13 +420,13 @@ end ################################################################################ function fill_coding_dict!(t::T, d::Dict, data) where T <: Union{ConstantTerm, InterceptTerm, FunctionTerm} - d + return d end function fill_coding_dict!(t::T, d::Dict, data) where T <: Term if typeof(Tables.getcolumn(data, t.sym)) <: AbstractCategoricalVector || !(typeof(Tables.getcolumn(data, t.sym)) <: AbstractVector{V} where V <: Real) d[t.sym] = StatsModels.FullDummyCoding() end - d + return d end #= function fill_coding_dict!(t::T, d::Dict, data) where T <: InteractionTerm @@ -448,7 +448,7 @@ function fill_coding_dict_ct!(t, d, data) fill_coding_dict!(i, d, data) end end - d + return d end #= function fill_coding_dict!(t::T, d::Dict, data) where T <: Tuple{Vararg{AbstractTerm}} diff --git a/src/vartypes.jl b/src/vartypes.jl index a22ca849..d8bf8708 100644 --- a/src/vartypes.jl +++ b/src/vartypes.jl @@ -532,7 +532,7 @@ function indfromtn(ind, s) break end end - m, s + ind - b + return m, s + ind - b end function rcoefnames(s, t, ct::UN_) @@ -572,7 +572,7 @@ function applycovschema!(ct::SWC_{<:AbstractMatrix{T}}, vcovblock) where T push!(ct.wtsb, ct.wtsm[vcovblock[i], vcovblock[i]]) end end - ct + return ct end ################################################################################