From 382446fd5bc93bc1133dde0859115282da8f4889 Mon Sep 17 00:00:00 2001 From: novice <44259234+novice03@users.noreply.github.com> Date: Thu, 13 May 2021 19:03:46 +0530 Subject: [PATCH] Added all relevant files --- README.md | 21 ++++++++++ build/lib/tucker_conv/__init__.py | 1 + build/lib/tucker_conv/conv.py | 47 ++++++++++++++++++++++ build/lib/tucker_conv/tucker_conv.py | 47 ++++++++++++++++++++++ dist/tucker_conv-1.0.0-py3-none-any.whl | Bin 0 -> 3032 bytes dist/tucker_conv-1.0.0.tar.gz | Bin 0 -> 1799 bytes setup.cfg | 15 +++++++ tucker_conv.egg-info/PKG-INFO | 32 +++++++++++++++ tucker_conv.egg-info/SOURCES.txt | 8 ++++ tucker_conv.egg-info/dependency_links.txt | 1 + tucker_conv.egg-info/top_level.txt | 1 + tucker_conv/__init__.py | 1 + tucker_conv/conv.py | 47 ++++++++++++++++++++++ 13 files changed, 221 insertions(+) create mode 100644 README.md create mode 100644 build/lib/tucker_conv/__init__.py create mode 100644 build/lib/tucker_conv/conv.py create mode 100644 build/lib/tucker_conv/tucker_conv.py create mode 100644 dist/tucker_conv-1.0.0-py3-none-any.whl create mode 100644 dist/tucker_conv-1.0.0.tar.gz create mode 100644 setup.cfg create mode 100644 tucker_conv.egg-info/PKG-INFO create mode 100644 tucker_conv.egg-info/SOURCES.txt create mode 100644 tucker_conv.egg-info/dependency_links.txt create mode 100644 tucker_conv.egg-info/top_level.txt create mode 100644 tucker_conv/__init__.py create mode 100644 tucker_conv/conv.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..f052b4f --- /dev/null +++ b/README.md @@ -0,0 +1,21 @@ +## Tucker Convolutional Layers + +PyTorch implementation of Tucker Convolutional Layers as introduced in [MobileDets: Searching for Object Detection Architectures for Mobile Accelerators](https://arxiv.org/abs/2004.14525v3). Ross Wightman's timm library has been used for some helper functions and inspiration for syntax style. + +## Installation + +```bash +$ pip install tucker-conv +``` + +## Usage + +```python +from tucker_conv.conv import TuckerConv +import torch + +tucker = TuckerConv(30, 30, in_comp_ratio = 0.25, out_comp_ratio = 0.75) +input = torch.randn([1, 30, 512, 512]) + +output = tucker(input) +``` \ No newline at end of file diff --git a/build/lib/tucker_conv/__init__.py b/build/lib/tucker_conv/__init__.py new file mode 100644 index 0000000..ccbad98 --- /dev/null +++ b/build/lib/tucker_conv/__init__.py @@ -0,0 +1 @@ +from tucker_conv.conv import TuckerConv \ No newline at end of file diff --git a/build/lib/tucker_conv/conv.py b/build/lib/tucker_conv/conv.py new file mode 100644 index 0000000..96b1ddb --- /dev/null +++ b/build/lib/tucker_conv/conv.py @@ -0,0 +1,47 @@ +import torch.nn as nn +from timm.models.efficientnet_blocks import make_divisible +from timm.models.layers import create_conv2d + +class TuckerConv(nn.Module): + def __init__(self, in_chs, out_chs, in_comp_ratio = 0.25, out_comp_ratio = 0.75, act_layer = nn.ReLU6, + norm_layer = nn.BatchNorm2d, comp_kernel_size = 1, reg_kernel_size = 3, pad_type = '', residual = True): + super(TuckerConv, self).__init__() + self.residual = residual + comp_chs = make_divisible(in_comp_ratio * in_chs) + reg_chs = make_divisible(out_comp_ratio * out_chs) + + # Point - wise compression + self.conv_pw = create_conv2d(in_chs, comp_chs, comp_kernel_size, padding = pad_type) + self.bn1 = norm_layer(comp_chs) + self.act1 = act_layer(inplace = True) + + # Regular convolution + self.conv_reg = create_conv2d(comp_chs, reg_chs, reg_kernel_size, padding = pad_type) + self.bn2 = norm_layer(reg_chs) + self.act2 = act_layer(inplace = True) + + # Point - wise linear projection + self.conv_pwl = create_conv2d(reg_chs, out_chs, comp_kernel_size, padding = pad_type) + self.bn3 = norm_layer(out_chs) + + def forward(self, x): + shortcut = x + + # Point - wise compression + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Regular convolution + x = self.conv_reg(x) + x = self.bn2(x) + x = self.act2(x) + + # Point - wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.residual: + x = x + shortcut + + return x \ No newline at end of file diff --git a/build/lib/tucker_conv/tucker_conv.py b/build/lib/tucker_conv/tucker_conv.py new file mode 100644 index 0000000..96b1ddb --- /dev/null +++ b/build/lib/tucker_conv/tucker_conv.py @@ -0,0 +1,47 @@ +import torch.nn as nn +from timm.models.efficientnet_blocks import make_divisible +from timm.models.layers import create_conv2d + +class TuckerConv(nn.Module): + def __init__(self, in_chs, out_chs, in_comp_ratio = 0.25, out_comp_ratio = 0.75, act_layer = nn.ReLU6, + norm_layer = nn.BatchNorm2d, comp_kernel_size = 1, reg_kernel_size = 3, pad_type = '', residual = True): + super(TuckerConv, self).__init__() + self.residual = residual + comp_chs = make_divisible(in_comp_ratio * in_chs) + reg_chs = make_divisible(out_comp_ratio * out_chs) + + # Point - wise compression + self.conv_pw = create_conv2d(in_chs, comp_chs, comp_kernel_size, padding = pad_type) + self.bn1 = norm_layer(comp_chs) + self.act1 = act_layer(inplace = True) + + # Regular convolution + self.conv_reg = create_conv2d(comp_chs, reg_chs, reg_kernel_size, padding = pad_type) + self.bn2 = norm_layer(reg_chs) + self.act2 = act_layer(inplace = True) + + # Point - wise linear projection + self.conv_pwl = create_conv2d(reg_chs, out_chs, comp_kernel_size, padding = pad_type) + self.bn3 = norm_layer(out_chs) + + def forward(self, x): + shortcut = x + + # Point - wise compression + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Regular convolution + x = self.conv_reg(x) + x = self.bn2(x) + x = self.act2(x) + + # Point - wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.residual: + x = x + shortcut + + return x \ No newline at end of file diff --git a/dist/tucker_conv-1.0.0-py3-none-any.whl b/dist/tucker_conv-1.0.0-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..a76a465753dd97eb955dad8b8e3f06ae49b3bbc3 GIT binary patch literal 3032 zcmeH}_gB;B7so$K%fL@Q#K@F2?hd9*(1b2Kt@3#CXBF07(u86q3k6~R-pnG z1hg!XErM(aks(lqfQSXbfqIak4ehsIklOkW^z)p1p3n2k>)dnC`+e^<#c~|t0{{RF zEW1?@?p+#4`E?QAzAgHw|Wu~*f}5P0x)3uf9!88EPuw^%g5OPpzkvP;5~rdWwVfZ7PzEgg=5WD z%NGS>+OJ`w$1TDlIzTtVhFsNbZzxl6BIv3-j|?K?w#R)GuBhr8we_8u9Ba$kO3|ju z^O+Q`Df(ES9t8)ow>)E+#h%{x3%oeFq*LF=v95Ve8j7pFoadFx|a3s&1B z_101j7v`^@ueU}<%n!Dii@kAaO;KU0Z#l&3&n>U}vQZ&$x?*XuGu)@7m#@g_{1QPS z0dqFI%|fMm3d=!!7@Bbr)ks>GQu3Rd@R6-Dtuxquw?%^d>||vCwdE>o^_366^jr~; z5sn*vD0IT_r97cq2f|+`wxAxFEzsXOGPkmZ>_FL^v|}!%dIXox-M*%uV<%_?3$u6B zMCH+tDb)s4kj| zu~dR+8yMZT%uyScA^uy(TC&~n_vE_dU~IhzgLGMS69yhH%c3rJ(rZLuRYaa;q8#!U zb624SF;D0{L_}7C>-JHNtFIulpa)sc%eWf2h@uNez#~B@*th-QZy)}8yLxvWuMzP{OJ=sh# z7_aU2TQ|O!5Hnss_|| zIoA&rBy>PQ`$YPmkp2_WKMQG-)rvytTfWk)R#e9U0OB7C389EoU>!2WkBXppI(fE`U8`F~U z+~Pr=kuv+~&6K%N|L2~(M+RdY$%ah1nL@|e_`7CZ(Cbsdv-L_0=-jWSjsu#>i ziUYH=>f9|Vw<6LPv!6MfC<=OfxmqsV)gW?>e^#Jx3uJ4w7T9?mlj7Ht$BU-NcnbBU z^ECWqPvtE+EG;u*P7?_n=H76O%6N4wcp9A~`*nuf?Gf9j$^5GmpoYm@T;0SC#dVxeSXm!I0m1{oFFBnd>#D+S#r>^%d zrd~z$bvjq~xMM_|yh*v2E--8CeRd}30>*T{^x_9>h*}$%D>$(sUO+9FW9X{*B?kfO za@5F^k~aUlq8(CXyk%4PdUFSSBB(TPi699k^M@=9{qdz2!}%u20T9WJ z)^?(kjwDq;Babj0Z~w-UTeLGuyZVJVB1xw+`ujuIj&R1D(&*j26@G&lFgz-R+Nfkj_Mi#dSF*2zmNSB-(8|Y(TM{)gSkLKC;&EwtF z;Qavpb5=@6St0X@)TmzGM0dvkM|UJOh$^=?^A`?6-&BM66xaYjlI5=59b#Ww{+qK1 zdOCQ5ZbiSQSNOL;=Zk5E?-Tkw@_8Z37`Je{+pmG&J^x0_we$_ceD8>S&OY4YxC_TdCOk~=!5ZZw=p z@2{5k!0U8}+zD9VgI?L#wp>v!^E^dd1g&uK)ze+bVs|Vs(%Q5%15o!AHgilIo(hv$ zq84#u8Ve4`!Df6Sinle_o2zAU#SLXy_E&Z&TY3_}FOP17xw3us@4^|Exj1 zpYyTKyvLb8^a1B@YWk~BA0!@h;2zP0^WTYo`0*g_plkMUOP|1f;Gu)qgTDC-Tf?RS b{`V~Zbdf0*#9FHXfRlCjSrsooy!-kcW!dE* literal 0 HcmV?d00001 diff --git a/dist/tucker_conv-1.0.0.tar.gz b/dist/tucker_conv-1.0.0.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..3cb2a7f62b39180ab9c6ddd2ce40922156c35511 GIT binary patch literal 1799 zcmV+i2l)6OiwFojDxF{g|3f-lRdZ!>b6jw8VQyhuL}hbpbZ>B6bai8EWpZC*Z*F#6 zWNCABTy$-4Vq-ICZ8B_hTy%9~Yh`j@V{dMDEio=IE-)^1VR8WNT3c`1HWbcl1^7P@ z=zuz1EKBkYFtCR#?Y5%1A!*i!pipUvmeo*{AX1LKU%x|IRwP?)oyPUrl^>8q;r-mt zp(HZzj-;*^vuVpVEz@cr>>L(gFc_%6fYrb1+3s51f#dY7LHEG29eZFOpx(hAka2-i zgmyw?nYbSWVom;+Z~tt)IsfBgxAO1z`_|Blml4v?kg|IKkm1opAOt-ndi z!zaFOJ$Qo`xQ=I_<^At;NR2nV7`s5M8F2;VHVz zX!r?J+%QgIFypY)&H3B&i}&XZ;}zjv8YV)W^ZE0dgJ8Ni3U3%F$HUX*{9T-S6BKSi zQLzxh9ED7zv7dRwhevdK7LP(oUJ=2E=$c?yGGqZ7$0@oPeIlMf&;@_w+Q+g}{*$GI zt09ZY=-BfJB`Fqh%0D!h%);p`oSJbOwDE|y9n0#PcDLvBrkz6*UB#TE_hB#*5oRwq z5@8e}8jeyB6Pmy=M}#nxaX6*ih=VN9gwO;C8)r3VF6NY& zb&La6zhPiVl&VQ}Lq2}|IKq7L(C`aN!bEN*r=g0(OLI2tcEdr3Hf)p3L0?K|7^iVm zR%KKEk}{4{kqh>uvTfcaq%PF$u~-ZJR+`-GSVu_ygbenG5?65#hFGT4J3?_LR+|UC z!_tl{$pkd3EltpKpEYmo#fm-KQNKPM)|~}b&ZnxgG}TInD=fSOv49+-lBJwb%y~Et zi88D>NO2TNB|XI~$bh$K2*1%q^|GJi~>%YC=Hp&aY4-JDs(N`F6UiD2ERUb6xWgk}682rCh zj`bOj`p^3>I=GD|5mZe3$~MRvK=Scu>0Fl|5!@t>xpx2z5l@5p-5X)vWUDWzN*n;*yTq znJavKxs*fg9rbf`8HY@u7P=2PQRjhE^2&<>MTXP~H@S!HDoWB^3VTV(I@=X@eVK^= zR`7Y1Nh4-U0T$A1E|*&!2-+b>7D^)TF(~%PQd0FaUXdW9;7YuQ`lgONVCfot4|L@W zU(=xN=yR&{<%`vj=RBUgiU?`Q2#}hj@u$U=UA-Xb8l?q&h24IgBs*0~H))@A9OF2> z$ElzDjakLcppq4yy0Mylt>VsLa+&7l?69U2bg#Zc>RP2|Yn7^Q42<2hv$Jt$XXDP> zN_&-hy6F&|4O@ldss|fZov>VChJGz1RkuV+M3yo%)5+Yf^FNaWK}%jfw4a?0q007ZSgqHvS literal 0 HcmV?d00001 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..19caf44 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,15 @@ +[metadata] +name = tucker_conv +version = 1.0.0 +author = Pranav Pulijala +description = Implementation of Tucker Convolution Layer +long_description = file: README.md +long_description_content_type = text/markdown +url = https://github.com/novice03/tucker-conv +classifiers = + Programming Language :: Python :: 3 + Operating System :: OS Independent + +[options] +packages = find: +include_package_data = True \ No newline at end of file diff --git a/tucker_conv.egg-info/PKG-INFO b/tucker_conv.egg-info/PKG-INFO new file mode 100644 index 0000000..2f771d8 --- /dev/null +++ b/tucker_conv.egg-info/PKG-INFO @@ -0,0 +1,32 @@ +Metadata-Version: 2.1 +Name: tucker-conv +Version: 1.0.0 +Summary: Implementation of Tucker Convolution Layer +Home-page: https://github.com/novice03/tucker-conv +Author: Pranav Pulijala +License: UNKNOWN +Description: ## Tucker Convolutional Layers + + PyTorch implementation of Tucker Convolutional Layers as introduced in [MobileDets: Searching for Object Detection Architectures for Mobile Accelerators](https://arxiv.org/abs/2004.14525v3). Ross Wightman's timm library has been used for some helper functions and inspiration for syntax style. + + ## Installation + + ```bash + $ pip install tucker-conv + ``` + + ## Usage + + ```python + from tucker_conv.conv import TuckerConv + import torch + + tucker = TuckerConv(30, 30, in_comp_ratio = 0.25, out_comp_ratio = 0.75) + input = torch.randn([1, 30, 512, 512]) + + output = tucker(input) + ``` +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: OS Independent +Description-Content-Type: text/markdown diff --git a/tucker_conv.egg-info/SOURCES.txt b/tucker_conv.egg-info/SOURCES.txt new file mode 100644 index 0000000..312d6c8 --- /dev/null +++ b/tucker_conv.egg-info/SOURCES.txt @@ -0,0 +1,8 @@ +README.md +setup.cfg +tucker_conv/__init__.py +tucker_conv/conv.py +tucker_conv.egg-info/PKG-INFO +tucker_conv.egg-info/SOURCES.txt +tucker_conv.egg-info/dependency_links.txt +tucker_conv.egg-info/top_level.txt \ No newline at end of file diff --git a/tucker_conv.egg-info/dependency_links.txt b/tucker_conv.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/tucker_conv.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/tucker_conv.egg-info/top_level.txt b/tucker_conv.egg-info/top_level.txt new file mode 100644 index 0000000..f0baafb --- /dev/null +++ b/tucker_conv.egg-info/top_level.txt @@ -0,0 +1 @@ +tucker_conv diff --git a/tucker_conv/__init__.py b/tucker_conv/__init__.py new file mode 100644 index 0000000..ccbad98 --- /dev/null +++ b/tucker_conv/__init__.py @@ -0,0 +1 @@ +from tucker_conv.conv import TuckerConv \ No newline at end of file diff --git a/tucker_conv/conv.py b/tucker_conv/conv.py new file mode 100644 index 0000000..96b1ddb --- /dev/null +++ b/tucker_conv/conv.py @@ -0,0 +1,47 @@ +import torch.nn as nn +from timm.models.efficientnet_blocks import make_divisible +from timm.models.layers import create_conv2d + +class TuckerConv(nn.Module): + def __init__(self, in_chs, out_chs, in_comp_ratio = 0.25, out_comp_ratio = 0.75, act_layer = nn.ReLU6, + norm_layer = nn.BatchNorm2d, comp_kernel_size = 1, reg_kernel_size = 3, pad_type = '', residual = True): + super(TuckerConv, self).__init__() + self.residual = residual + comp_chs = make_divisible(in_comp_ratio * in_chs) + reg_chs = make_divisible(out_comp_ratio * out_chs) + + # Point - wise compression + self.conv_pw = create_conv2d(in_chs, comp_chs, comp_kernel_size, padding = pad_type) + self.bn1 = norm_layer(comp_chs) + self.act1 = act_layer(inplace = True) + + # Regular convolution + self.conv_reg = create_conv2d(comp_chs, reg_chs, reg_kernel_size, padding = pad_type) + self.bn2 = norm_layer(reg_chs) + self.act2 = act_layer(inplace = True) + + # Point - wise linear projection + self.conv_pwl = create_conv2d(reg_chs, out_chs, comp_kernel_size, padding = pad_type) + self.bn3 = norm_layer(out_chs) + + def forward(self, x): + shortcut = x + + # Point - wise compression + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Regular convolution + x = self.conv_reg(x) + x = self.bn2(x) + x = self.act2(x) + + # Point - wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.residual: + x = x + shortcut + + return x \ No newline at end of file