olZc&R9t8d*Nl(<-V^+GfMY
zUHG|W`%$rC9DFHbK26oK$y909+o?eoRq23EVI|=xtJAtbFkWV?1xv_DE7mY}S71ZH
zanCOXc$c2|W|JHk%^6r;r#T3;dzsj8^wnELAEbj;upmOh#O@^9Be%(n6f$~*kKq&<
zG6gM5>F$(eV_6YrC@crl0TYJpH^1nSzz-U+0T?mpmoWv_
zgqUzlAuSc$orxuaDNqyqfDtd0_>@0y`ycVQ4}LHwIsbdUklQ5`v>Y2xo*EuZMPjcz
zgGzSLTp7tGAvJ}j=nNur783iPJkX!=_5%VoAU$B;1kW+o8pQ3AUK`ktyc3wP2SY9^
zC)cRS_6X#bolzlS=9dFE$!HU}8xw_b(1NvQjG8!dAydZBnInIW57N(^CKXcdfb+xbbv&
z=$Q6(>_?NRWdLRB6h|)`OVC4!LW#mM`=nX?9`MIA`9jHZuV$twuu8RQ?M6mK3G+nP
zdTbw@gk$jZ%?}euxA8
zsdeb2YjELNULKMaR!Z6*&`&`{CC{>=xsU9`Cz&Nr`*T=Nv$85uOu7|$l+;@^+SRfu
zg}G1SF=xefU}}+#)o4v;)MPX~U<^`2sC~S86827>{0=8Q+)dAKQW?vLl#!3Mv5G)t
zppB206uTD8SHjfmvL!BZk&$~Gh_IzukO%ZyXd3bc5xtzTu>IC@^c~=!M2}!&Bl#8P
zy4J)~5Z^ZA8q6j^x0|uwU;yK8!Wxn$T;7%#H{=*Y!tTbRA=w0`m$~>eTQd50@#zPD
z)TC$W+Kp$~+!MsTNb=^th8pVF^>ol37@+MYaZG9pY^1*wJ4P{`%5&
zhzCZ*0~Pi_6Ay|g&LcXTuo8CzKS%t9nQs?Xa-~VR-3?UTY&vyfMK=;O4W@JkmDgWc
z%V5AufzMdj-O-8hT+2n4oQy=)jbk|yFyi7IlRYnr^9i-c?A+5?O?alWaX;To%8lJo$qF~EMWqYQ)E4c%5p|bO6+s^v
zv~%hbc!@p}l{}}`0!l_R3VL&=N8)R^{q-ZR)thd(yc4U&AW782vgLVbyiu}T#0qO?
z<#t{aCZD6?nu^2&J$x_ZW35~(QR*oxjmcO6jGTjFdu(t!m2S7VV44up)8_e
zm6z_k@bS$P5|K2wZd?VqM}T-QmK?dal%zixmw#F(^G3QlSfwSb!(LK
z@MW17Qes@9g*cU!gn{Y_D@3DdeiYk(Wb0m3yq3D^DkeC84n}o5*F7OooccAcI4hd=eFt=2=0UDV2m$pxX?e
zG`PB*H=kXL%5}i63i}(J&ypa*4rERvWRrA4Jxj)ZmyL_)C`nD3B`XO{5snd8JMnTX
z`?<#Yzh1b^gzPyuSoY-u%x32vPs}>iLV;^5a>4KV}e{tg`gBsw1Ku|-ilxf5rSlq
zkXs`wtdXRjvVcdK-V3>3h1g}-KX*-DabrSIQ*dY3)A}$%Okq5l(7T(#q?ET16WHAUB7u`b7VpBGR7{J*>a9489SI}p0R~Y-{tiTzPvmfb))`;H({;EUCli-0K(MXvc
z$igR57OTz3vFvu8!0GQ^ECrKTUJ+5XTtYEvCI$2eJE?Qqj*S~zz`xD=p#bX+UAF|7
zFsF=pGeR}XQpNJyWcWBo8<2~)#r6gh$&&p-6z!oxs?&h(=2C6^GWZ_giqwNKW`W|Wg;{5=DclB
zcPj70cnrboGq_PnNit=gCWDxW!pkLWI$1O`#f01^DU>i3U^zxOJV>tRIeHd}rBeI{
zm<(MN##+o?F7W|7K|HhhKDcbPG&(=P1ekEYZcLMdEu4Fn6?56
zcU0K4yqNZ|?@4FI!s!d^>8If31n)IVdlMuxl#B^8Dk1v9*lS``BFc$Y;oLt&rBd*|
zW^Vrg+#UGqAb$N++dsMDIyg&)J8n257Pi#Q&nK8B<7`f{uYE$uE66HHC|D}TOgvns
ze#4gCv-s0r^pn2nYVQT^x&ZU_j6WM=oUSn@vB^#1tZ@ix)Of|j{N!K~lW49n
zMkNq1iWnouF`76_8w3>XXwU{kgocKun;uU0nd(&4Uhnt*QK#u{=yOi>K%QKa@AK4i
z>g=`l8g^B!cdd8$z8oXblBf%Y16ocLh$T3NDtV=!Blq9uD6v@FB6E+HF7ilKB~L_3yA-flQMW%^vk4`c?N
zb(E5aa2kSYLCOw`I>9QXVoP#fi8P#2-eR~<<~a1=&w0+akVG5}F%h^^gF#SVz_Nt*
zbL+jtuc+O@y;ml5uJ9WI)k|Us+#k5FBoCT&G~}6th7!3E;<*Za__u1;eB)4_*@KYC
z`}&i((loHW8ye~T>7^eM5dpJ|1aA@iajwT}6#8w$MJ|d@x!o>FHOW;Wd$Q^fi(o@uC-iCoY4jj~oG+P{=@`AkWd%f4ADUO=c!foy(>D}MGD;d@7n*4-DZ
zT-+ufhft*S3Buzf|9YM&zE>!}k>qP4ILX%CQhW>aC7X|-*ea#%Ax{-SvzYud
z{wn2~L&hFRL=);wg(X!vk~>l1BoE|TF&$Oa%qpr>OMP^+Vo=g1N^*{3Dir^QdA`?D
zX!OXk8}~&zKhO`Q{AI+>qGZM}qhKIR6e7*$k=RKvGh=p?==q?(>(qUU&CeOXl;HBq
zhdKRa6U>{&^?_K}a4k+kEW`9;b_?%}Unm87(6CZ(E)rb%0*v1G9P7As@3Z|s=XmTE)YXn>Z9Aomv)Fmxjyigl^5w(DsZa%`~pehS!18wbNlX$P6
z+1SaJs}dY1^rwMlN^(a~8FdO=1_g}guFc}n+$p%-G%$auOu<;xc9}3e{=q*C;566{-+Fc>#j!9)w&ZTp*YYIix
zfUd@oqDVwQYWGE?j|1OQeaB0px%5Jbw#;glv+u4YNwP6TvX&~
zJC!MylkHU1g%#PUkc(6+rrRBt(aDpw!u6DuQ{}>CHBAfJh*~bDs6{=#WA*%R~{G?ID)#VWO_pS?%C4k>gjb^ylC&rQo)Z
z{((Yofq2QmjA3CRVM<9w;7)=n63ryl35H3kdiU8mP}6!W<`l$9po_!l6*6lvBbIAI
z8^FU@754Xmqk#dzGJN1%A`uNA#Bqo=By1
zf+AKJnNCUp6mCMZ5t5XS*G8~=|)bOPNjVkM?8>`evkPx8YB8T2sA
zFMgrEdDtFJ`@+O+IoRH*W_GHzD6@$EzH+A1DQc4dc^vx6*39la7>mnl&cs-UHwfRE
zqc&2iy4J9vNX1{I>|;`-0e~npP0rb=Y?X<=6tPH=2D)Z+2N59^gH^PrKbWZpMjPmti23axL;qn&~T3|IB#REC*{&in6NJs@$%1QnOW5
z^{A(DTGbF_id5E8J+<3WYvxW>ucvf(;WRi?TD0Y6t!VdEwWf;B&7GVlL!1sAF(ENj
zPp>fS$es8o1q~`%5u~9GGm|xa
zb@37|diAbee!E@ta*}8nc6Vig$LV@G>~x`|!zU$M49tXie3E`CDSz52O&fi)tAfWy
z)1;V!VoFGoQoxQv?6N#0(xf?by9h8S819;Pb(%%p)|3(*i__?mp^YUN2($vT$S{Sy
z3B@wt@lX87%;~wi*c|J`pzY)7Zta&K5_W|a-Z2-Up%AvT*wB)^($X?J`i-^eW7ULj$5nL8_VcHZD~A=r~^T0S=W%Sl5QHw6ckel
ze;Ir#_%p_j=<>KIt9xo#3htlvMCmsSB^anwKKFO@wXF;Ff6x%kRMaP%OLupMX~$xA
zPu;!o_-TM;{W}Xv7?_x@I;Ylxt*QM`D4Yy`RHo|fnBx=vy-wAcO39Ncms=AWN*TzG
z9J$q>X|@XwW|dou=&W#7GbKNaf_E$PrsU$5s>;_Va|-B>QshsP*p68Qo*)>SCGeVd
z#T9n?UEqHok$H$S5Wnf{(-Qu>5dWBq@{Q5zbO1VZ17c|aRbNckTT^{)8URHnPv%-N
zR4Y173Ye~#?1M&GRZ{=~gYHv|4eo9ZPOa!r%`~D>n`9h{x(9Px5v|?LA?UP=;YPTe
zs;k>qeOj&>yPIwf)+xAS*2Af%EO-myUrwb#@FvjPiaI%Gh(4;?4CQ2vC
z6wscgqQ>JePjbYos+3+s_{(*3Gj-evZnyP`5?uiC>R!juOQMrKK|TXkB6WFajM-(e
z;xG;p{epI~Uml4|z}t<#fIXCwJC-)d3>n51s!?zoc*gcXLc0Qu$-2df#T6YbpujP6WnXKQ}Q$-B{LB)hEy3(gv!^6$RXXMh+Qt2LYhE6rt4jq
z?&;D8EPxKflnmHdisK3HLtf-U1;jCku`tH~9nc~G9Hwq=;lpc3?7yl
zv{bV^Ei|C+LOUAxf;-8G}OK3S=$FoUD+n_3e#m3{Qo9t(z
zDSqXAeeKA*YXY)bix-bip2KYwU3Wd5dUAJ@Im+5DWAejB4frOX-8%9fecIr}9S&X$
ze5357Zx4(wnwnWO(On#qCjpdFzrnN?O-0cyb3rqamf(0d@Wzmur2K8!GnQUmkZvII
zG>yKX6;se2qMjzZ8cq);ag`%J?B$WeKH(RkoYs@KVP
z>0ClXnB7D>(K_~cIuko6wk<)7bsZP$1tIi0;4@XprZW2#l3xT+@4DmX&UB0@>EA9N
z;E!K6-5dLIFG6BQxFxAcc&CfF510K(Orc(U_UC}PE6iJ6>a&9HVD9Q_B(!XnqhQ#$
zOx?w#zsvUMI=E&C*%c`*mRN$~e0L~SxUv#e
z;Mg|%qm2a}e`afe~Mp-O0r
zO<(GIv(+3dNyWk^35RzeP1^pR&`~HYhiHc5u2LKWY(?rdR-yy$2s(ms@M1}y1??1R
z6#QKgyAm{oXMo+p(}FBwMrg+NbVAEy-E&X!Ye%&1A@iiORP4TcQL+B`)n3i|>Lym*
z|9e}-P0W^6t65*cfs_6m4w7||?GYf1-!Rar?__eQnqIbZ7t9{NcQ5W5S?lf0uGIV)0Am^-{S1Uoh!iAYbd#k3$QNSRne-qP#Y#l&@Q^WiEV?#;_Xyf6q6Or
zqRv?X&&l#Rb@QSSnRVraO>YK;p(bPZ-RJZFJoCan9jOYDXq7oUqv|`
z!S=G`ZW94QBE8)$!Oy_K7#{Z`;kQP|RsXL503ZNKL_t)+2aT^pOoHx7j=CwQWVRYl
zqC$j^vBHYl?JBe#b1a2-O1j62-KZD_e*RF#0Np5Ng-=L@W&)N-3?l{vQ;}GLxgJ=A
zSd~OO97}N8;>6MvdYDU6pntRDw}IQ0T#&Q_BiToVSVY7ONqHf@q>P4S-C|d}jK)H-
zQ}#|5X$a?CIBkQNk~|!+Bbi-JoYx#@;oe^$)SDwN)?L0vKg&
zT6yp7_y67&Pwmbt*cQ84%qVNqDsMdE`6ODkYE!*kJOlYl6pcH!E~^(0JNl+5b}lUd
zb_1@rQvv00yWWJGg5jk_XC|MF`LH5hTYJX0uLh|9@^^2@pF8_TCw`N`>R*^n!BD-R
zI%$UXU43Y}JN|-#c41PzNNQhf^Z|~5kVr*-i-XOLDES&wHrIE+Q%xoVO3X3
zTsm8JGCCGu9k60nf?SGOyRmcZ#O!@5NcYYh$MM{q0Zv{)hi#h$aMOzY101_$>@Ayt
z`-X|*!6&a^;wN|OM&lP8?py(Cn~LAgxU%!;vB&Q9mbz~@sQ_+YPpkmlx|%lY;ke_d
zwR3vC@wy*v
z#w4bKXn`L?&b!~i~5}OG>!pR>ecro(hfww1oo6vUf0MORnpu=R+
zb`c%WFG+s4a35asN#HrX^n0kV9qn9qi-y-S>h;I2?%dAGSjp{a_w3$kf?=;^)Hmtz
z)sfaVl~wT^y7>h@y`zU4!&ZqvpwLrr9<*`y*Ub>`laa+P9eaQNJ&J^po|99~`n<
zvEOife+T}T?BC-rZXfRs1$&fWbl;jA^4w+YU$)QnV@Ebuj9`u8eJO3u+=mcvuCg=t
zY18NAA3!>K!#ONlb^W1dbTp)IN^g<&@$cmrZcprHlD)P3iRzZ(V3S3rAdIDW{gpsHt#Y%4yn(iLPphLnbA&
z{7j*DhUrg5M3sw5F8W+gM^R{dO16=Z7F#4a^(Q#dN{>ruSuSF!#fgeEpyB~vhl3nS
z6nTA$+O7;6i-M(?ahYZ^54Tu(?p8i~^|AbsgAb&FjxrNb(5j-?o{DNHs(c>}AkuQ#
zov1OH5`oxk(V!}}yR2GwHz}A;NE8|rQALU&BNT2X#Za~>?kJ6*2pnF3ywNPp~zUz2zSFmZ|3obgJnyQR0j4;~4{uH(1GGr27QuV#H--K90k
z_uC{Goz<=$d8%;1LZtF
ztSI{2aBI4qup*r*I7A8qEz02=R4W!cilSb0c7~iYC3dkR>}M(TD-vg*D3x@dr8|r_
zCZ%tiv{CXME@I=C{_>c^$q8Lo{vvTlL>eqo3_p6ff7e1QY$5HC8Ov4u=A5GJD>^@^I!rfQL|M`oW#`1A
zOTXL$bPC~z1KW^RN`5tHQAJ)<5Y2$y1^F1=?`)B$gilTAIT^iORPQVVD80~dVhYZu
zOK<&t=a#3xTbr}
zZhchn+Wx0LSU>>i+20(iK6~~j!7t>T$6QE#S<7B$M%N}*Z0pYL$-p<(1y5d+>~-ct
z`UsUq{K2zM&`W>Vi*v$*5j+<77y4(qu`nY@BeQ~H>FVwYW^8>|@U7bCM6h&L#c&*%
z!IK-~?Wq&|f;fR&eF~*diOfkj9v;8W@nnuCJeK5*M7j&-yDV-YG_tuiyzV$Au45f(
z{j8d5^~(J@VAic64NuUj)w$E5(}6=05(Zr>|DoXBPv6@0_`7iZ-rd9M*}v-UDD3t4
z^T%;QC;VI3&;P^owNC;s6`c5xN3~U3}w`Vg9XF@fzSy<{MKFaS~Rl_fHp9
z)yQR?oM+VO6m1P;O#LuS#yA@7sj@v(M5CRX(Txr(Bkiv0v`ct>k#wG#Y2z0@dg?>s
zL%wFjeIiens*gpDnX;slMJKnIC=-#p&X!F8*SW$SeY2jpvIEm$W|Y{LRa;z%siN6$
z)tkG?(Q$&wPSqM{l=XI1xS`HWtCnIiYV!(-+n#dl2CnkYpLpzDi}oIL6Zfmr7K7Nm
z|5nDD-6M1l2RSuUg)S>Q#aHD#-ch8oO!dmuSiMF?Cyi(Ff+q=yGApM(MY57&Dyi0&
zif4BZDyMN6n)0SrrH7Z$aEf)&@(}QUXn)z-^kJj>Qp;|#ulkM|4Kh|(ew3%hvfTZ
zuyo)g7kjSHRepr9#BeWb)^$Ipy?^)Z(|gKv_0J#Mb~P`9t5dp6Iet4&_WernYwJGN
z6@H_^Z0cVw#H;~Wm%C-(-|fpD{V`YR)xaMf$|TffWxa*X$95RqyjP7hg5d#nrBN9F
zoueK(bx-k9N1W`46MTnGHj_(Lc$0#=?6}M3F2Pr;th*w7*U`1zoAjv%YeqIdWAM2o
z|BT>TuOshSr1#~y71-;#zrx$j;TEGq_O|@ysl~GeCwDseerg!abHo(*v*v}sPx4ki
zp<~hq2|n~|_jpcK@SgTQAiqoKBky0~_ng0tIX(P(uu0HNv^p&sX`tQC%?8m|Us5*R
zv0zT-&0bJ1+f7xSpwq~MOX+MMYnu{=>r6M>{e1DVGY*=J>R?dR88mFE!R~EPQ9(m=
z=%*TQkq5dz;3mMjmuTkyyTqRkaqeJ^f0*baG(lM_=%kVvN@z~?=i(^ZoiQ}jM~t`V
zq=DV-zUjp?l#}j{a$)1-dCGUeg8WSL;bFIYn^d(U%6h8d7M*sh7E~%LS1EPq#U=r!
zMl(_>s8T}&omOYM-Y8rpHHz$>7Pn5T+)i42P`ODxdwWZ^UQL{xS0Ffsfr^aN*o-rr>>+
z|J8qg^4s;2jR&2~$>@c
zY&B~FC~AY=^Zubm5exv+Gl>|^q~S~|Y7`7mHI`z-K+FmWfKI{1*st83C>m2ZhN;w|
zoSGp`&5#;3qAw`=6UEd__u1B`Bc>LUAmySgdT}{EYzV4ypfz2uPfRvpyr`E`3=dD2
zRbx6;c>-(xlZ3A>fzIMZOtz=k)^5e!Ob(6bW*(oItlbyXcH-XBs9wKld>2x^f4Dxe
zYw)O@xzV?Up`mexCdZE-+`OYGJ2%@4=edp)>lK^@x+T;3YJ-%6PMB6T&4edO8QM{i
zS{0pV2~HROC*X>NUo=DQLlYELCg&xyuT1{3?WgUl2Ks4(<)CYD-Sg`5Y0kbLShf%(
zjJRCzcs}#^#{zH>y1%`P(EYDHeZBGrw*cRxHSn8}(XH}}+GuIsV^u6bOuF~%T
zcl}@spDOB6Wx~#Mj
zJ}kGQ_R@Ma4$WPC#rWcxAI-Fv)~kWOsb+iA&iQGqj}%gS&@k1WLPdkBXi(K-@9D@;
zllBUU>tC*@{SkJaNypLUbN}6xG9z>gxYy5_!imTGK~SErMJQ+0&Uyc)S&9-jl?t
ztM40n)RHLE0FBulUOgrB9Kq=q
z=$|f{qW=c~qP#sZ6HAy*6_g#OX1}*ZNAlM`#G0q~G?)KJp|0mb9Oxu7U)fwpHGdfB
zMDQZ$8mRAYZ-m(M+OMCa_nGvXgB`oyamI3Bd9B_2F?4tAbM5E_uI1_!fC3#6YK)L?
zgm|<^_FpUK8i%YO0gw3ZY7Rc`MgAkBzdiR>w)b*s`N~Qb3%^eCpN>F0cQc!WKBCYc
z&3E6vIDdlA0NZvC@$$L)TX2zA$iu_OnUks3uYX0ge*L|9<;tb8a^=z>`LNgy*I&B2
zYA>xty#0?q14mf4_N~;yMUP|?4G9=e8T@Tz?a-{{Cgt
zx7|R;kZWj8A+ho+I~TW7e9_UlAg6ReC#4IDf(ue7E=Vpfu&S@0aKn#&qsN(v)wc($
zZ;!i=zq@_+ZLJQgS7%n=9z#ryYXNmr<^JWOtD|h3I_bf_MhVczn5%M7u409~u>0!+
zaWt*8jq&}SK_4Vo*tAvAdJ?c}hWfu&EN$Jv#Pl6ZOxJ1b-ptb0oy^QY>sjCv-m1@x
z0E}pK((s9jc-|s*v|()eIF^kKbKG>39ql?yX3Bq%{j!6d%YS9)e|~3+;Pb!>{4b~W
zmc9M%8~+)+#22r6;=+~pqN+M)7&hrIxAx?mBIVP8cHi!;cO6VWb77k4n}iyhzTG&b
zm-@L#w*s}nqPO~c@a;3obATsU)w_GWr`_VU!1mVWz22Rbgc1>1_-#xx9xs*9J6BAyeAqLd705Y>v=`jD0tL9wx{DS%8%Oph(UaCe&D~%y?KNS
zyF`NkXpil$`gJZ?P)H==vDjZ5(^zX2t9%^C&6cdIE@fRF;dxI6KlW|<^dIbhU%D(&
zee7O_-vw*pUe+a8V_22XWJBDamY_&8aWYx+6x+_S23CUnc2S^W?H;0iRx|
zzD5XNqQ8ysYk+He?Z@^b<@wXn?pI$r9mgFD*d6Jr?-MLiJC-cKY61
z|3`g7i8ELro$mhd+bk~sgdv$@*B_FC!XVf-N#QpzCxd{cnf%KFKz9d
zVf48pCm+#j*jNh(a}7shV$s?d3kj7ye#<$ICjf>U;F<5&?6@X
zNS+$Gh;Agb`YEC`IrHDY9eVvy#tXkFXz8?u5_qYxqJU4Z@&2kaj9yFId^NB8B
zQcq*)4rtf)^2Isg{NCsD-zeggF9Dmph9$H-M?3d-#V3
zmSx4C8GiN3Wt`bdTO$yb-;Q*_ySiL<59sT^Fv`#%q^Xe+?Nx8>OPeA3_O(gu!F4au
z_l1`K!WBMo|M~|IZuehHx$@lVrbrB!x!uqd)P;KTCX=b=69PcB>ZZ{-b*9N$W~Br3
za%m)h*jA$fA8J$hfFn9h>!DWK;
z0oF1aBi%W$r=1t+U%w~m)T_K^Xs$N?8~a1&jb_B(TRhWOJ-#Pn%Uh1v1;?BOK&t%xgsVFNv@$RxiO{N8g95DW2J_i>}d
zKP#mLqpA1c_a^xiuvPu(L7S!jv_j$`_8$jkPZIM{RExe84;B&tf!^bYH}u?@h{E$2
zu7hFhaqI{0@@;`?IC_k5*W`+u)(sx8o$j@2`|`ze3JE8cE9Mab@-m3tPZH7&N19Ve
zV0OC5J-NY-Xe8J-E#{sR$vl`LU1?bJ0iN}+Jt;LMysxkzd}S;05#inWuY%<2gFwRZ
z{NC^3eUe_`_x5PZK&LD9CM3L;0(c~$@Ao>MUXbh?br3>AaI1P`rvA>O>SqDZj+Fnb
z*Y>|~Liu#WBN`1Zo2yR9?-2ftkFKDnKvjWlzXSOFcRwKa&3{@U@zAAn+!ry(d_p4V
z%Lh0~09^YLeFgb(=Umlej%GXZJST}HFSWE|Kef&tNS3IZ-qcMd#?{|Hc%XLnq*`sX
zI&&MZ?d3}IG5l&7MbW$ATHfvybDt#em~{}jt`8D_pno0{{8L3W=VCTfOnu7Y*M6Ga
zVUINQ>)*Rm)>jNYtDJQHhVKJEM)d{tWaEzHZoc2)&rE1opeX582YVs
zBYM!WOlAOk?o^(0sGIID78h$KewPKp+Ph5=X`UoaftD)#!@0_hYw3aNrmL?mPh7ry
z7$Dn&mD=?vQWBlTspYvd;aR!WSDX7JVV1dyIfaCwAp5V++V#Q7q#zNbWC}7ONXqQF
zUMR5R=t603DiCR>qi4GCLqfo11%5ws1P#4X>pNHECtdT={G_MAHD7xu$MVyJz?UT$
z^;M5MTjzcEhFdq6fMS**^j;|MAbV;S6Ckk^Xzv%QYxizo0T<8vZn!Bem7
zcNo5S#bVz6!rk1lQ2i08-UaVT=J&6HzuRLF|4%C-o$%sH^X81c4tVnk-@WBoPBGwQ
zP>fd!P)sAY3JOSh3~Bq8)1NT6ep9Y$opNrQL}EFLIma@%r1|^PyK9dwNwPBuL9WO^
zt(GvEL;^C|A{9)2weKx+?gnkfr1X%TBmiE|7r*g0JgxRVKkW&x(U%XH$2zCt(xmFW
zz<g9#PQzC)2krvR?x9BqXQ}e3Dl4-G29~v-QRebKX;CPB;AcfiV|zA=zhz*wEX^
z4-UdN8a;*{ghVbnKPKvKr}A{D<`fd4>Mfwpoqbcg_c2Rv!;JpVd%1I=s=u{^uV1;G
z^(xW@5Ixy}2Ac7~ss41~#{S~hXy#G)Pb?&k7)-}=FV2JDxrK!3>vZN866?-YbG{OL(WUF
zZ7Xn%nfjHv>byYzdYz=7{hU`XNEPzm=t9q1e+%ZDwqD%xJ#V=AUc={be#KmMj87Np
zDc^@*duqez^hn&?>lh9OGB*l8WsLC|Az=9(YB-uNPU`#!b{2d8MzTDQl_AbFcOys`b`i~gT
zPYaZ|4DnQ7vHHYb?@99ej9=wTR?l^+Jw1AiTAs?yM}cceMb9UXgF&90-T)f7^J#r2
z_fp^Rqq*E*+o^mL;`+yLs173XJiQtG)u-Rdb-muR2YuTaMQl=)ddg`H(CCYIAx?Zp
zqj<)DX(3@WQ0vSs1O&D9uUt5Q?YbB0K8c&o{)f&}=I)D}t9cC9a(lk~*GCvtb{y~|
zaq=Bk_rd%|_#=)+u@mu-pCkZ|V(AxwM_%Hm&bgSCMgB+OLuD*IRQ$i~dON@MoW1
zl6ei+mNH^aS4t@j`p`-Kw07-=`snDYLr#I(3oP}UIlUw5uFuQy(z6gvRlGY&{T1-_
z1J_VFpBBfS+dzKgAr(4sZi}LSY*9sVSotgt7K1bE~%Q
zUz~sO3dt`9-hSXZ*Ec^_opaNbe!}t3*S8OK(YXg}?RC3W?}l05=e*Ykdhh=Q3Kn6`
zGVy^!>}xLE33}}A?>zCDCU*eJeH_8}eXmMGwrj&XPukV1lhVG>FR!1@AN{#2F|YI$
ztG`0key~^1zRJ`VkgCGj_Yeoz9M#-UMH{B)O!vD71~sLpVQ!p{J@s&5_$KC8H$8pP
zH8J(TG4Bmo*w$ncs|8NQ5*q9u-!ud)BDDIuXBI>EFFtuM0aMO<{{|=%FU7v*8JoD~
zP{%&FlPjl|@HUsa__D?RcxJZ0*?#l={T6%2L9BAnnO8{2{*=v!fBw;LeY;YLRD+^G
zDj*fuo-Cx79^$wZYkvU%00It4L_t(98;e~xJ@q%P)1LNF0Vx3m6zax8q}dpW+4zV(
ze^c7`e`f1wzc>j}BJifZV(@@RP)Q%k5nuSFcV?5o#h&}XNe=jOuamj<1${qy!asJt
z^@NYNz8%R039zeq^T=yG&_q1=^`zms}NMfV;n7sB_-0|$&s{mH|Ab=!$sK0A5heV_ivL?opk(T#yXpp?7u4;Yjw
zQa1oPktqlylTzL%W}n!s3@QtqoHi!
zdw}=%@>%7?UnldCzl(FfmT(l^w6ERqViaWKt+#P~Ph@KcLW=YzE8b`Hw}~#xS)r~X
zPwuW{iEhlO#1!Te9&0X4-gpp*%&QCWCzk(MP7qyUp*`$qA-Z1+gT-!gexc4AJ!E3M
zRFxM-(OCxZNs3~CaCgFa&xE79T?==mWMx#x*owuEm
z9MMh76oxR4*{z?0nM^@=O!$bn&Ci9hmfl*m
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ tensor TRAX
+
+
+
+
+
+
diff --git a/docs/tensortrax.rst b/docs/tensortrax.rst
index 1887df9..71332ea 100644
--- a/docs/tensortrax.rst
+++ b/docs/tensortrax.rst
@@ -7,6 +7,4 @@ API Reference
:maxdepth: 1
:caption: Modules:
- tensortrax/tensor
- tensortrax/evaluate
- tensortrax/math
+ tensortrax/tensor
\ No newline at end of file
diff --git a/docs/tensortrax/evaluate.rst b/docs/tensortrax/evaluate.rst
deleted file mode 100644
index 1a4bc51..0000000
--- a/docs/tensortrax/evaluate.rst
+++ /dev/null
@@ -1,37 +0,0 @@
-.. _api-evaluate:
-
-Evaluate Functions
-~~~~~~~~~~~~~~~~~~
-This module provides evaluation methods for (the gradient, Hessian or Jacobian) of a
-function.
-
-**Core**
-
-.. currentmodule:: tensortrax
-
-.. autosummary::
-
- function
- gradient
- hessian
- jacobian
- gradient_vector_product
- hessian_vector_product
- hessian_vectors_product
-
-
-**Detailed API Reference**
-
-.. autofunction:: tensortrax.function
-
-.. autofunction:: tensortrax.gradient
-
-.. autofunction:: tensortrax.hessian
-
-.. autofunction:: tensortrax.jacobian
-
-.. autofunction:: tensortrax.gradient_vector_product
-
-.. autofunction:: tensortrax.hessian_vector_product
-
-.. autofunction:: tensortrax.hessian_vectors_product
diff --git a/docs/tensortrax/math.rst b/docs/tensortrax/math.rst
deleted file mode 100644
index 56bc7ba..0000000
--- a/docs/tensortrax/math.rst
+++ /dev/null
@@ -1,60 +0,0 @@
-.. _api-math:
-
-Math
-~~~~
-This module provides NumPy-like math functions.
-
-.. toctree::
- :maxdepth: 1
- :caption: Modules:
-
- math/linalg
- math/special
-
-.. currentmodule:: tensortrax.math
-
-.. autosummary::
-
- abs
- array
- broadcast_to
- concatenate
- cos
- cosh
- diagonal
- dot
- dual_to_real
- einsum
- exp
- external
- hstack
- if_else
- log
- log10
- matmul
- maximum
- minimum
- ravel
- real_to_dual
- repeat
- reshape
- sign
- sin
- sinh
- split
- squeeze
- sqrt
- stack
- sum
- tan
- tanh
- tile
- trace
- transpose
- vstack
-
-
-**Detailed API Reference**
-
-.. automodule:: tensortrax.math
- :members: abs, array, broadcast_to, concatenate, cos, cosh, diagonal, dot, dual_to_real, einsum, exp, external, hstack, if_else, log, log10, matmul, maximum, minimum, ravel, real_to_dual, repeat, reshape, sign, sin, sinh, split, squeeze, sqrt, stack, sum, tan, tanh, tile, trace, transpose, vstack
diff --git a/docs/tensortrax/math/linalg.rst b/docs/tensortrax/math/linalg.rst
deleted file mode 100644
index 25048c6..0000000
--- a/docs/tensortrax/math/linalg.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-.. _api-math-linalg:
-
-Linear Algebra
-~~~~~~~~~~~~~~
-This module provides NumPy-like linear algebra math functions.
-
-.. currentmodule:: tensortrax.math.linalg
-
-.. autosummary::
-
- det
- eigh
- eigvalsh
- expm
- inv
- pinv
- sqrtm
-
-**Detailed API Reference**
-
-.. automodule:: tensortrax.math.linalg
- :members: det, eigh, eigvalsh, expm, inv, pinv, sqrtm
diff --git a/docs/tensortrax/math/special.rst b/docs/tensortrax/math/special.rst
deleted file mode 100644
index 1a69a94..0000000
--- a/docs/tensortrax/math/special.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-.. _api-math-special:
-
-Special
-~~~~~~~
-This module provides special (linear algebra) math functions.
-
-.. currentmodule:: tensortrax.math.special
-
-.. autosummary::
-
- ddot
- dev
- erf
- from_triu_1d
- from_triu_2d
- sym
- tresca
- triu_1d
- try_stack
- von_mises
-
-**Detailed API Reference**
-
-.. automodule:: tensortrax.math.special
- :members: ddot, dev, erf, from_triu_1d, from_triu_2d, sym, tresca, triu_1d, try_stack, von_mises
diff --git a/examples/ex01_usage.py b/examples/ex01_usage.py
index 5a7bc7e..36d08d0 100644
--- a/examples/ex01_usage.py
+++ b/examples/ex01_usage.py
@@ -1,25 +1,10 @@
r"""
Quickstart
----------
-Let's define a scalar-valued function which operates on a :class:`~tensortrax.Tensor`.
-The math module :mod:`tensortrax.math` provides some essential NumPy-like functions
-including linear algebra. We take the strain energy density function of the Neo-Hookean
-isotropic hyperelastic material formulation as a reference example, see Eq.
-:eq:`tutorial-nh`.
-
-.. math::
- :label: tutorial-nh
-
- C &= \boldsymbol{F}^T \boldsymbol{F}
-
- I_1 &= \text{tr} (\boldsymbol{C})
-
- J &= \det (\boldsymbol{F})
-
- \psi(\boldsymbol{F}) &= \frac{\mu}{2} \left( J^{-2/3}\ I_1 - 3 \right)
-
+Let's define a scalar-valued function which operates on a tensor. The math module
+:mod:`tensortrax.math` provides some essential NumPy-like functions including linear
+algebra.
"""
-
import tensortrax as tr
import tensortrax.math as tm
@@ -98,7 +83,7 @@ def W(F, p, J):
# %%
# In a similar way, the gradient may be obtained by initiating a Tensor with the
-# gradient instead of the hessian argument.
+# gradient argument.
# init Tensors to be used with first partial derivatives
F.init(gradient=True, δx=False)
diff --git a/examples/ex02_numeric_variation.py b/examples/ex02_numeric_variation.py
deleted file mode 100644
index 4b530ac..0000000
--- a/examples/ex02_numeric_variation.py
+++ /dev/null
@@ -1,62 +0,0 @@
-r"""
-Numeric calculus of variation
------------------------------
-Each Tensor has four attributes: the (real) tensor array and the (hyper-dual)
-variational arrays. To obtain the :math:`12` - component of the gradient and the
-:math:`1223` - component of the hessian, a tensor has to be created with the appropriate
-small-changes of the tensor components (dual arrays).
-"""
-import numpy as np
-
-import tensortrax as tr
-from tensortrax import Tensor, Δ, Δδ, f, δ
-from tensortrax.math import trace
-
-δF_12 = np.array(
- [
- [0, 1, 0],
- [0, 0, 0],
- [0, 0, 0],
- ],
- dtype=float,
-)
-
-ΔF_23 = np.array(
- [
- [0, 0, 0],
- [0, 0, 1],
- [0, 0, 0],
- ],
- dtype=float,
-)
-
-x = np.eye(3) + np.arange(9).reshape(3, 3) / 10
-F = Tensor(x=x, δx=δF_12, Δx=ΔF_23, Δδx=None)
-I1_C = trace(F.T @ F)
-
-# %%
-# The function as well as the gradient and hessian components are accessible with
-# helpers.
-ψ = f(I1_C)
-P_12 = δ(I1_C)
-A_1223 = Δδ(I1_C)
-
-# %%
-# To obtain full gradients and hessians of scalar-valued functions in one function call,
-# ``tensortrax`` provides helpers (decorators) which handle the multiple function calls.
-fun = lambda F: trace(F.T @ F)
-
-func = tr.function(fun)(x)
-grad = tr.gradient(fun)(x)
-hess = tr.hessian(fun)(x)
-
-# %%
-# For tensor-valued functions, use ``jacobian()`` instead of ``gradient()``.
-fun = lambda F: F.T @ F
-jac = tr.jacobian(fun)(x)
-
-# %%
-# Evaluate the gradient- as well as the hessian-vector(s)-product.
-gvp = tr.gradient_vector_product(fun)(x, δx=x)
-hvp = tr.hessian_vector_product(fun)(x, δx=x)
-hvsp = tr.hessian_vectors_product(fun)(x, δx=x, Δx=x)
diff --git a/examples/ex03_custom_extension.py b/examples/ex03_custom_extension.py
deleted file mode 100644
index 4912d2f..0000000
--- a/examples/ex03_custom_extension.py
+++ /dev/null
@@ -1,33 +0,0 @@
-r"""
-Custom Extensions
------------------
-Custom functions (extensions) are easy to implement in `tensortrax`. Beside the function
-expression, three additional (dual) variation expressions have to be defined.
-"""
-import numpy as np
-
-from tensortrax import Tensor, Δ, Δδ, f, δ
-
-
-def sin(A):
- return Tensor(
- x=np.sin(f(A)),
- δx=np.cos(f(A)) * δ(A),
- Δx=np.cos(f(A)) * Δ(A),
- Δδx=-np.sin(f(A)) * δ(A) * Δ(A) + np.cos(f(A)) * Δδ(A),
- ntrax=A.ntrax,
- )
-
-
-x = np.eye(3)
-y = sin(Tensor(x))
-
-# %%
-# .. note::
-# Contrary to NumPy's ``w, v = np.linalg.eigh(C)``, which returns eigenvalues and
-# -vectors, the differentiable ``w, M = tm.linalg.eigh(C)`` function returns
-# eigenvalues and eigenbases of symmetric real-valued tensors.
-#
-# .. tip::
-# Feel free to `contribute `_ missing
-# math-functions to `src/tensortrax/math/_math_tensor.py `_ 📃 ✏️.
diff --git a/pyproject.toml b/pyproject.toml
index 01951ec..3753bef 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -7,7 +7,7 @@ profile = "black"
[project]
name = "tensortrax"
-description = "Differentiable Tensors based on NumPy Arrays"
+description = "Math on (Hyper-Dual) Tensors with Trailing Axes"
readme = "README.md"
authors = [
{name = "Andreas Dutzler"},
diff --git a/src/tensortrax/__about__.py b/src/tensortrax/__about__.py
index 62db274..c6ee375 100644
--- a/src/tensortrax/__about__.py
+++ b/src/tensortrax/__about__.py
@@ -2,4 +2,4 @@
tensorTRAX: Math on (Hyper-Dual) Tensors with Trailing Axes.
"""
-__version__ = "0.25.3"
+__version__ = "0.26.0"
diff --git a/src/tensortrax/_evaluate.py b/src/tensortrax/_evaluate.py
index 56697a4..6a48882 100644
--- a/src/tensortrax/_evaluate.py
+++ b/src/tensortrax/_evaluate.py
@@ -143,50 +143,7 @@ def concat(arrays, axis):
def function(fun, wrt=0, ntrax=0, parallel=False):
- r"""Evaluate a function.
-
- Parameters
- ----------
- fun : callable
- The function to be evaluated.
- wrt : int or str, optional
- The input argument which will be treated as :class:`~tensortrax.Tensor` (default
- is 0).
- ntrax : int, optional
- Number of elementwise-operating trailing axes (batch dimensions). Default is 0.
- parallel : bool, optional
- Flag to evaluate the function in parallel (threaded).
-
- Returns
- -------
- ndarray
- NumPy array containing the function result.
-
- Examples
- --------
-
- >>> import numpy as np
- >>> import tensortrax as tr
- >>> import tensortrax.math as tm
- >>>
- >>> def fun(F, mu=1):
- >>> C = F.T @ F
- >>> I1 = tm.trace(C)
- >>> J = tm.linalg.det(F)
- >>> return mu / 2 * (J ** (-2 / 3) * I1 - 3)
- >>>
- >>> np.random.seed(125161)
- >>> F = (np.eye(3) + np.random.rand(20, 8, 3, 3) / 10).T
- >>>
- >>> F.shape
- (3, 3, 8, 20)
-
- >>> W = tr.function(fun, wrt=0, ntrax=2)(F)
- >>> W = tr.function(fun, wrt="F", ntrax=2)(F=F)
- >>>
- >>> W.shape
- >>> (8, 20)
- """
+ "Evaluate a function."
@wraps(fun)
def evaluate_function(*args, **kwargs):
@@ -213,57 +170,7 @@ def kernel(args, kwargs):
def gradient(fun, wrt=0, ntrax=0, parallel=False, full_output=False, sym=False):
- r"""Evaluate the gradient of a scalar-valued function.
-
- Parameters
- ----------
- fun : callable
- The function to be evaluated.
- wrt : int or str, optional
- The input argument which will be treated as :class:`~tensortrax.Tensor` (default
- is 0). The gradient is carried out with respect to this argument.
- ntrax : int, optional
- Number of elementwise-operating trailing axes (batch dimensions). Default is 0.
- parallel : bool, optional
- Flag to evaluate the gradient in parallel (threaded).
- full_output: bool, optional
- Return the gradient and the function (default is False).
- sym : bool, optional
- Apply the variations only on the upper-triangle entries of a symmetric second
- order tensor. This is a performance feature and requires no modification of the
- callable ``fun`` and the input arguments, including ``wrt``. Default is False.
-
- Returns
- -------
- ndarray or list of ndarray
- NumPy array containing the gradient result. If ``full_output=True``, the
- function is also returned.
-
- Examples
- --------
-
- >>> import numpy as np
- >>> import tensortrax as tr
- >>> import tensortrax.math as tm
- >>>
- >>> def fun(F, mu=1):
- >>> C = F.T @ F
- >>> I1 = tm.trace(C)
- >>> J = tm.linalg.det(F)
- >>> return mu / 2 * (J ** (-2 / 3) * I1 - 3)
- >>>
- >>> np.random.seed(125161)
- >>> F = (np.eye(3) + np.random.rand(20, 8, 3, 3) / 10).T
- >>>
- >>> F.shape
- (3, 3, 8, 20)
-
- >>> dWdF = tr.gradient(fun, wrt=0, ntrax=2)(F)
- >>> dWdF = tr.gradient(fun, wrt="F", ntrax=2)(F=F)
- >>>
- >>> dWdF.shape
- >>> (3, 3, 8, 20)
- """
+ "Evaluate the gradient of a scalar-valued function."
@wraps(fun)
def evaluate_gradient(*args, **kwargs):
@@ -295,57 +202,7 @@ def kernel(args, kwargs):
def hessian(fun, wrt=0, ntrax=0, parallel=False, full_output=False, sym=False):
- r"""Evaluate the Hessian of a scalar-valued function.
-
- Parameters
- ----------
- fun : callable
- The function to be evaluated.
- wrt : int or str, optional
- The input argument which will be treated as :class:`~tensortrax.Tensor` (default
- is 0). The Hessian is carried out with respect to this argument.
- ntrax : int, optional
- Number of elementwise-operating trailing axes (batch dimensions). Default is 0.
- parallel : bool, optional
- Flag to evaluate the Hessian in parallel (threaded).
- full_output: bool, optional
- Return the hessian, the gradient and the function (default is False).
- sym : bool, optional
- Apply the variations only on the upper-triangle entries of a symmetric second
- order tensor. This is a performance feature and requires no modification of the
- callable ``fun`` and the input arguments, including ``wrt``. Default is False.
-
- Returns
- -------
- ndarray or list of ndarray
- NumPy array containing the Hessian result. If ``full_output=True``, the
- gradient and the function are also returned.
-
- Examples
- --------
-
- >>> import numpy as np
- >>> import tensortrax as tr
- >>> import tensortrax.math as tm
- >>>
- >>> def fun(F, mu=1):
- >>> C = F.T @ F
- >>> I1 = tm.trace(C)
- >>> J = tm.linalg.det(F)
- >>> return mu / 2 * (J ** (-2 / 3) * I1 - 3)
- >>>
- >>> np.random.seed(125161)
- >>> F = (np.eye(3) + np.random.rand(20, 8, 3, 3) / 10).T
- >>>
- >>> F.shape
- (3, 3, 8, 20)
-
- >>> d2WdFdF = tr.hessian(fun, wrt=0, ntrax=2)(F)
- >>> d2WdFdF = tr.hessian(fun, wrt="F", ntrax=2)(F=F)
- >>>
- >>> d2WdFdF.shape
- >>> (3, 3, 3, 3, 8, 20)
- """
+ "Evaluate the hessian of a scalar-valued function."
@wraps(fun)
def evaluate_hessian(*args, **kwargs):
@@ -385,51 +242,7 @@ def kernel(args, kwargs):
def jacobian(fun, wrt=0, ntrax=0, parallel=False, full_output=False):
- r"""Evaluate the Jacobian of a tensor-valued function.
-
- Parameters
- ----------
- fun : callable
- The function to be evaluated.
- wrt : int or str, optional
- The input argument which will be treated as :class:`~tensortrax.Tensor` (default
- is 0). The Jacobian is carried out with respect to this argument.
- ntrax : int, optional
- Number of elementwise-operating trailing axes (batch dimensions). Default is 0.
- parallel : bool, optional
- Flag to evaluate the Jacobian in parallel (threaded).
- full_output: bool, optional
- Return the Jacobian and the function (default is False).
-
- Returns
- -------
- ndarray
- NumPy array containing the Jacobian result.
-
- Examples
- --------
-
- >>> import numpy as np
- >>> import tensortrax as tr
- >>> import tensortrax.math as tm
- >>>
- >>> def fun(C, mu=1):
- >>> I3 = tm.linalg.det(C)
- >>> return mu * tm.special.dev(I3 ** (-1 / 3) * C) @ tm.linalg.inv(C)
- >>>
- >>> np.random.seed(125161)
- >>> F = (np.eye(3) + np.random.rand(20, 8, 3, 3) / 10).T
- >>> C = np.einsum("ki...,kj...->ij...", F, F)
- >>>
- >>> C.shape
- (3, 3, 8, 20)
-
- >>> dSdC = tr.jacobian(fun, wrt=0, ntrax=2)(C)
- >>> dSdC = tr.jacobian(fun, wrt="C", ntrax=2)(C=C)
- >>>
- >>> dSdC.shape
- >>> (3, 3, 3, 3, 8, 20)
- """
+ "Evaluate the jacobian of a function."
@wraps(fun)
def evaluate_jacobian(*args, **kwargs):
@@ -456,60 +269,7 @@ def kernel(args, kwargs):
def gradient_vector_product(fun, wrt=0, ntrax=0, parallel=False):
- r"""Evaluate the gradient-vector-product of a scalar-valued function.
-
- Parameters
- ----------
- fun : callable
- The function to be evaluated. Its signature is extended to
- :func:`fun(*args, δx, **kwargs)`, where the added ``δx``-argument is the vector
- of the gradient-vector product.
- wrt : int or str, optional
- The input argument which will be treated as :class:`~tensortrax.Tensor` (default
- is 0). The gradient-vector-product is carried out with respect to this argument.
- ntrax : int, optional
- Number of elementwise-operating trailing axes (batch dimensions). Default is 0.
- parallel : bool, optional
- Flag to evaluate the gradient-vector-product in parallel (threaded).
-
- Returns
- -------
- ndarray
- NumPy array containing the gradient-vector-product result.
-
- Notes
- -----
- The *vector* :math:`\delta x` and the tensor-argument ``wrt`` must have equal or
- broadcast-compatible shapes. This means that the *vector* is not restricted to be a
- one-dimensional array but must be an array with compatible shape instead.
-
- Examples
- --------
-
- >>> import numpy as np
- >>> import tensortrax as tr
- >>> import tensortrax.math as tm
- >>>
- >>> def fun(F, mu=1):
- >>> C = F.T @ F
- >>> I1 = tm.trace(C)
- >>> J = tm.linalg.det(F)
- >>> return mu / 2 * (J ** (-2 / 3) * I1 - 3)
- >>>
- >>> np.random.seed(125161)
- >>> F = (np.eye(3) + np.random.rand(20, 8, 3, 3) / 10).T
- >>> F.shape
- (3, 3, 8, 20)
-
- >>> np.random.seed(63254)
- >>> δF = np.random.rand(3, 3, 8, 20) / 10
- >>> δF.shape
- (3, 3, 8, 20)
-
- >>> dW = tr.gradient_vector_product(fun, wrt=0, ntrax=2)(F, δx=δF)
- >>> dW.shape
- >>> (8, 20)
- """
+ "Evaluate the gradient-vector-product of a function."
@wraps(fun)
def evaluate_gradient_vector_product(*args, δx, **kwargs):
@@ -524,60 +284,7 @@ def evaluate_gradient_vector_product(*args, δx, **kwargs):
def hessian_vector_product(fun, wrt=0, ntrax=0, parallel=False):
- r"""Evaluate the Hessian-vector-product of a scalar-valued function.
-
- Parameters
- ----------
- fun : callable
- The function to be evaluated. Its signature is extended to
- :func:`fun(*args, δx, **kwargs)`, where the added ``δx``-argument is the vector
- of the Hessian-vector product.
- wrt : int or str, optional
- The input argument which will be treated as :class:`~tensortrax.Tensor` (default
- is 0). The Hessian-vector-product is carried out with respect to this argument.
- ntrax : int, optional
- Number of elementwise-operating trailing axes (batch dimensions). Default is 0.
- parallel : bool, optional
- Flag to evaluate the gradient-vector-product in parallel (threaded).
-
- Returns
- -------
- ndarray
- NumPy array containing the Hessian-vector-product result.
-
- Notes
- -----
- The *vector* :math:`\delta x` and the tensor-argument ``wrt`` must have equal or
- broadcast-compatible shapes. This means that the *vector* is not restricted to be a
- one-dimensional array but must be an array with compatible shape instead.
-
- Examples
- --------
-
- >>> import numpy as np
- >>> import tensortrax as tr
- >>> import tensortrax.math as tm
- >>>
- >>> def fun(F, mu=1):
- >>> C = F.T @ F
- >>> I1 = tm.trace(C)
- >>> J = tm.linalg.det(F)
- >>> return mu / 2 * (J ** (-2 / 3) * I1 - 3)
- >>>
- >>> np.random.seed(125161)
- >>> F = (np.eye(3) + np.random.rand(20, 8, 3, 3) / 10).T
- >>> F.shape
- (3, 3, 8, 20)
-
- >>> np.random.seed(63254)
- >>> δF = np.random.rand(3, 3, 8, 20) / 10
- >>> δF.shape
- (3, 3, 8, 20)
-
- >>> dP = tr.hessian_vector_product(fun, wrt=0, ntrax=2)(F, δx=δF)
- >>> dP.shape
- >>> (3, 3, 8, 20)
- """
+ "Evaluate the hessian-vector-product of a function."
@wraps(fun)
def evaluate_hessian_vector_product(*args, δx, **kwargs):
@@ -592,67 +299,7 @@ def evaluate_hessian_vector_product(*args, δx, **kwargs):
def hessian_vectors_product(fun, wrt=0, ntrax=0, parallel=False):
- r"""Evaluate the vector-Hessian-vector- or Hessian-vectors-product of a scalar-
- valued function.
-
- Parameters
- ----------
- fun : callable
- The function to be evaluated. Its signature is extended to
- :func:`fun(*args, δx, Δx, **kwargs)`, where the added ``δx``- and ``Δx``-
- arguments are the vectors of the Hessian-vectors product.
- wrt : int or str, optional
- The input argument which will be treated as :class:`~tensortrax.Tensor` (default
- is 0). The Hessian-vectors-product is carried out with respect to this argument.
- ntrax : int, optional
- Number of elementwise-operating trailing axes (batch dimensions). Default is 0.
- parallel : bool, optional
- Flag to evaluate the gradient-vector-product in parallel (threaded).
-
- Returns
- -------
- ndarray
- NumPy array containing the Hessian-vectors-product result.
-
- Notes
- -----
- The *vectors* :math:`\delta x` and :math:`\Delta x` as well as the tensor-argument
- ``wrt`` must have equal or broadcast-compatible shapes. This means that the
- *vectors* are not restricted to be one-dimensional arrays but must be arrays with
- compatible shapes instead.
-
- Examples
- --------
-
- >>> import numpy as np
- >>> import tensortrax as tr
- >>> import tensortrax.math as tm
- >>>
- >>> def fun(F, mu=1):
- >>> C = F.T @ F
- >>> I1 = tm.trace(C)
- >>> J = tm.linalg.det(F)
- >>> return mu / 2 * (J ** (-2 / 3) * I1 - 3)
- >>>
- >>> np.random.seed(125161)
- >>> F = (np.eye(3) + np.random.rand(20, 8, 3, 3) / 10).T
- >>> F.shape
- (3, 3, 8, 20)
-
- >>> np.random.seed(63254)
- >>> δF = np.random.rand(3, 3, 8, 20) / 10
- >>> δF.shape
- (3, 3, 8, 20)
-
- >>> np.random.seed(85476)
- >>> ΔF = np.random.rand(3, 3, 8, 20) / 10
- >>> ΔF.shape
- (3, 3, 8, 20)
-
- >>> ΔδW = tr.hessian_vectors_product(fun, wrt=0, ntrax=2)(F, δx=δF, Δx=ΔF)
- >>> ΔδW.shape
- >>> (8, 20)
- """
+ "Evaluate the hessian-vectors-product of a function."
@wraps(fun)
def evaluate_hessian_vectors_product(*args, δx, Δx, **kwargs):
diff --git a/src/tensortrax/_tensor.py b/src/tensortrax/_tensor.py
index bbe6890..c9c7f23 100644
--- a/src/tensortrax/_tensor.py
+++ b/src/tensortrax/_tensor.py
@@ -449,15 +449,12 @@ def T(self):
return transpose(self)
def ravel(self, order="C"):
- "Return a contiguous flattened array."
return ravel(self, order=order)
def reshape(self, *shape, order="C"):
- "Gives a new shape to an array without changing its data."
return reshape(self, newshape=shape, order=order)
def squeeze(self, axis=None):
- "Remove axes of length one."
return squeeze(self, axis=axis)
def dual2real(self, like):
@@ -537,7 +534,6 @@ def mul(A, B):
def ravel(A, order="C"):
- "Return a contiguous flattened array."
if isinstance(A, Tensor):
δtrax = δ(A).shape[len(A.shape) :]
Δtrax = Δ(A).shape[len(A.shape) :]
@@ -554,7 +550,6 @@ def ravel(A, order="C"):
def squeeze(A, axis=None):
- "Remove axes of length one."
if isinstance(A, Tensor):
if axis is None:
if 1 in A.shape:
@@ -573,7 +568,6 @@ def squeeze(A, axis=None):
def reshape(A, newshape, order="C"):
- "Gives a new shape to an array without changing its data."
if isinstance(A, Tensor):
δtrax = δ(A).shape[len(A.shape) :]
Δtrax = Δ(A).shape[len(A.shape) :]
@@ -755,14 +749,12 @@ def einsum(subscripts, *operands):
def transpose(A):
- "Returns an array with axes transposed."
ij = "abcdefghijklmnopqrstuvwxyz"[: len(A.shape)]
ji = ij[::-1]
return einsum(f"{ij}...->{ji}...", A)
def matmul(A, B):
- "Matrix product of two arrays."
ik = "ik"[2 - len(A.shape) :]
kj = "kj"[: len(B.shape)]
ij = (ik + kj).replace("k", "")
diff --git a/src/tensortrax/math/__init__.py b/src/tensortrax/math/__init__.py
index 907df8b..27c71a7 100644
--- a/src/tensortrax/math/__init__.py
+++ b/src/tensortrax/math/__init__.py
@@ -1,3 +1,7 @@
+"""
+tensorTRAX: Math on (Hyper-Dual) Tensors with Trailing Axes.
+"""
+
from .._tensor import broadcast_to
from .._tensor import dual_to_real
from .._tensor import dual_to_real as dual2real
diff --git a/src/tensortrax/math/_math_tensor.py b/src/tensortrax/math/_math_tensor.py
index 3f22ba7..ba70b83 100644
--- a/src/tensortrax/math/_math_tensor.py
+++ b/src/tensortrax/math/_math_tensor.py
@@ -41,17 +41,14 @@ def array(object, dtype=None, like=None, shape=None):
def trace(A):
- "Return the sum along diagonals of the array."
return einsum("ii...->...", A)
def transpose(A):
- "Returns an array with axes transposed."
return einsum("ij...->ji...", A)
def sum(A, axis=0):
- "Sum of array elements over a given axis."
if isinstance(A, Tensor):
return Tensor(
x=np.sum(f(A), axis=axis),
@@ -65,7 +62,6 @@ def sum(A, axis=0):
def sign(A):
- "Returns an element-wise indication of the sign of a number."
if isinstance(A, Tensor):
return Tensor(
x=np.sign(f(A)),
@@ -79,7 +75,6 @@ def sign(A):
def abs(A):
- "Calculate the absolute value element-wise."
if isinstance(A, Tensor):
return Tensor(
x=np.abs(f(A)),
@@ -100,7 +95,6 @@ def sqrt(A):
def sin(A):
- "Trigonometric sine, element-wise."
if isinstance(A, Tensor):
return Tensor(
x=np.sin(f(A)),
@@ -114,7 +108,6 @@ def sin(A):
def cos(A):
- "Cosine element-wise."
if isinstance(A, Tensor):
return Tensor(
x=np.cos(f(A)),
@@ -128,7 +121,6 @@ def cos(A):
def tan(A):
- "Compute tangent element-wise."
if isinstance(A, Tensor):
return Tensor(
x=np.tan(f(A)),
@@ -143,7 +135,6 @@ def tan(A):
def sinh(A):
- "Hyperbolic sine, element-wise."
if isinstance(A, Tensor):
return Tensor(
x=np.sinh(f(A)),
@@ -157,7 +148,6 @@ def sinh(A):
def cosh(A):
- "Hyperbolic cosine, element-wise."
if isinstance(A, Tensor):
return Tensor(
x=np.cosh(f(A)),
@@ -171,7 +161,6 @@ def cosh(A):
def tanh(A):
- "Compute hyperbolic tangent element-wise."
if isinstance(A, Tensor):
x = np.tanh(f(A))
return Tensor(
@@ -186,7 +175,6 @@ def tanh(A):
def exp(A):
- "Calculate the exponential of all elements in the input array."
if isinstance(A, Tensor):
x = np.exp(f(A))
return Tensor(
@@ -201,7 +189,6 @@ def exp(A):
def log(A):
- "Natural logarithm, element-wise."
if isinstance(A, Tensor):
x = np.log(f(A))
return Tensor(
@@ -216,7 +203,6 @@ def log(A):
def log10(A):
- "Return the base 10 logarithm of the input array, element-wise."
if isinstance(A, Tensor):
x = np.log10(f(A))
return Tensor(
diff --git a/src/tensortrax/math/linalg/__init__.py b/src/tensortrax/math/linalg/__init__.py
index d85ec5c..fe8b8f1 100644
--- a/src/tensortrax/math/linalg/__init__.py
+++ b/src/tensortrax/math/linalg/__init__.py
@@ -1,3 +1,7 @@
+"""
+tensorTRAX: Math on (Hyper-Dual) Tensors with Trailing Axes.
+"""
+
from ._linalg_array import det as _det
from ._linalg_array import inv as _inv
from ._linalg_array import pinv as _pinv
diff --git a/src/tensortrax/math/special/__init__.py b/src/tensortrax/math/special/__init__.py
index 7e06f7d..a9bac1b 100644
--- a/src/tensortrax/math/special/__init__.py
+++ b/src/tensortrax/math/special/__init__.py
@@ -1,3 +1,7 @@
+"""
+tensorTRAX: Math on (Hyper-Dual) Tensors with Trailing Axes.
+"""
+
from ._special_tensor import (
ddot,
dev,
diff --git a/tox.ini b/tox.ini
index 688150b..902ebb9 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,7 +3,7 @@ envlist = py3
isolated_build = True
[tool:pytest]
-addopts = --doctest-modules
+addopts = --cov tensortrax --cov-report xml --cov-report term
[testenv]
deps =
@@ -11,4 +11,4 @@ deps =
pytest-cov
extras = all
commands =
- pytest tests {posargs}
+ pytest