Skip to content

Commit f8d9059

Browse files
committed
add top5 acc
1 parent c99dcd7 commit f8d9059

File tree

4 files changed

+39
-31
lines changed

4 files changed

+39
-31
lines changed

README.md

+17-17
Original file line numberDiff line numberDiff line change
@@ -48,27 +48,27 @@ We also provide a simple demo to quantize these models to specified bit-width wi
4848
`python quantize.py --type cifar10 --quant_method linear --param_bits 8 --fwd_bits 8 --bn_bits 8 --ngpu 1`
4949

5050
## Top1 Accuracy
51-
We evaluate the performance of popular dataset and models with linear quantized method. The bit-width of running mean and running variance in BN are 10 bits for all results.
51+
We evaluate the performance of popular dataset and models with linear quantized method. The bit-width of running mean and running variance in BN are 10 bits for all results (except for 32-float (except for 32-float).
5252

5353

5454
|Model|32-float |12-bit |10-bit |8-bit |6-bit |
5555
|:----|:--------:|:------:|:-----:|:-----:|:-----:|
56-
|[MNIST](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/mnist-b07bb66b.pth)|98.42%|98.43%|98.44%|98.44%|98.32|
57-
|[SVHN](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/svhn-f564f3d8.pth)|96.03%|96.03%|96.04%|96.02%|95.46%|
58-
|[CIFAR10](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth)|93.78%|93.79%|93.80%|93.58%|90.86%|
59-
|[CIFAR100](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth)|74.27%|74.21%|74.19%|73.70%|66.32%|
60-
|[STL10](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/stl10-866321e9.pth)|77.59%|77.65%|77.70%|77.59%|73.40%|
61-
|[AlexNet](https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth)|55.70%|55.66%|55.54%|54.17%|18.19%|
62-
|[VGG16](https://download.pytorch.org/models/vgg16-397923af.pth)|70.44%|70.45%|70.44%|69.99%|53.33%|
63-
|[VGG19](https://download.pytorch.org/models/vgg19-dcbb9e9d.pth)|71.36%|71.35%|71.34%|70.88%|56.00%|
64-
|[ResNet18](https://download.pytorch.org/models/resnet18-5c106cde.pth)|68.63%|68.62%|68.49%|66.80%|19.14%|
65-
|[ResNet34](https://download.pytorch.org/models/resnet34-333f7ec4.pth)|72.50%|72.46%|72.45%|71.47%|32.25%|
66-
|[ResNet50](https://download.pytorch.org/models/resnet50-19c8e357.pth)|74.98%|74.94%|74.91%|72.54%|2.43%|
67-
|[ResNet101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth)|76.69%|76.66%|76.22%|65.69%|1.41%|
68-
|[ResNet152](https://download.pytorch.org/models/resnet152-b121ed2d.pth)|77.55%|77.51%|77.40%|74.95%|9.29%|
69-
|[SqueezeNetV0](https://download.pytorch.org/models/squeezenet1_0-a815701f.pth)|56.73%|56.75%|56.70%|53.93%|14.21%|
70-
|[SqueezeNetV1](https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth)|56.52%|56.52%|56.24%|54.56%|17.10%|
71-
|[InceptionV3](https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth)|76.41%|76.43%|76.44%|73.67%|1.50%|
56+
|[MNIST](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/mnist-b07bb66b.pth)|98.42|98.43|98.44|98.44|98.32|
57+
|[SVHN](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/svhn-f564f3d8.pth)|96.03|96.03|96.04|96.02|95.46|
58+
|[CIFAR10](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth)|93.78|93.79|93.80|93.58|90.86|
59+
|[CIFAR100](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth)|74.27|74.21|74.19|73.70|66.32|
60+
|[STL10](http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/stl10-866321e9.pth)|77.59|77.65|77.70|77.59|73.40|
61+
|[AlexNet](https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth)|55.70/78.42|55.66/78.41|55.54/78.39|54.17/77.29|18.19/36.25|
62+
|[VGG16](https://download.pytorch.org/models/vgg16-397923af.pth)|70.44/89.43|70.45/89.43|70.44/89.33|69.99/89.17|53.33/76.32|
63+
|[VGG19](https://download.pytorch.org/models/vgg19-dcbb9e9d.pth)|71.36/89.94|71.35/89.93|71.34/89.88|70.88/89.62|56.00/78.62|
64+
|[ResNet18](https://download.pytorch.org/models/resnet18-5c106cde.pth)|68.63/88.31|68.62/88.33|68.49/88.25|66.80/87.20|19.14/36.49|
65+
|[ResNet34](https://download.pytorch.org/models/resnet34-333f7ec4.pth)|72.50/90.86|72.46/90.82|72.45/90.85|71.47/90.00|32.25/55.71|
66+
|[ResNet50](https://download.pytorch.org/models/resnet50-19c8e357.pth)|74.98/92.17|74.94/92.12|74.91/92.09|72.54/90.44|2.43/5.36|
67+
|[ResNet101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth)|76.69/93.30|76.66/93.25|76.22/92.90|65.69/79.54|1.41/1.18|
68+
|[ResNet152](https://download.pytorch.org/models/resnet152-b121ed2d.pth)|77.55/93.59|77.51/93.62|77.40/93.54|74.95/92.46|9.29/16.75|
69+
|[SqueezeNetV0](https://download.pytorch.org/models/squeezenet1_0-a815701f.pth)|56.73/79.39|56.75/79.40|56.70/79.27|53.93/77.04|14.21/29.74|
70+
|[SqueezeNetV1](https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth)|56.52/79.13|56.52/79.15|56.24/79.03|54.56/77.33|17.10/32.46|
71+
|[InceptionV3](https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth)|76.41/92.78|76.43/92.71|76.44/92.73|73.67/91.34|1.50/4.82|
7272

7373
**Note: ImageNet 32-float models are directly from torchvision**
7474

quantize.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -83,14 +83,14 @@
8383

8484
# eval model
8585
val_ds = ds_fetcher(args.batch_size, data_root=args.data_root, train=False, input_size=args.input_size)
86-
acc = misc.eval_model(model_raw, val_ds, ngpu=args.ngpu, is_imagenet=is_imagenet)
86+
acc1, acc5 = misc.eval_model(model_raw, val_ds, ngpu=args.ngpu, is_imagenet=is_imagenet)
8787

8888
# print sf
8989
print(model_raw)
90-
res_str = "type={}, quant_method={}, param_bits={}, bn_bits={}, fwd_bits={}, overflow_rate={}, acc={:.4f}".format(
91-
args.type, args.quant_method, args.param_bits, args.bn_bits, args.fwd_bits, args.overflow_rate, acc)
90+
res_str = "type={}, quant_method={}, param_bits={}, bn_bits={}, fwd_bits={}, overflow_rate={}, acc1={:.4f}, acc5={:.4f}".format(
91+
args.type, args.quant_method, args.param_bits, args.bn_bits, args.fwd_bits, args.overflow_rate, acc1, acc5)
9292
print(res_str)
93-
with open('overflow_rate_6810.txt', 'a') as f:
93+
with open('acc1_acc5.txt', 'a') as f:
9494
f.write(res_str + '\n')
9595

9696

utee/misc.py

+14-6
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
import numpy as np
77
import hashlib
88

9+
from IPython import embed
10+
911
class Logger(object):
1012
def __init__(self):
1113
self._logger = None
@@ -167,7 +169,7 @@ def forward(self, input):
167169
input.data[:, 2, :, :].sub_(self.mean[2]).div_(self.std[2])
168170
return self.model(input)
169171

170-
correct = 0
172+
correct1, correct5 = 0, 0
171173
n_passed = 0
172174
if is_imagenet:
173175
model = ModelWrapper(model)
@@ -179,16 +181,22 @@ def forward(self, input):
179181
n_passed += len(data)
180182
data = Variable(torch.FloatTensor(data)).cuda()
181183
indx_target = torch.LongTensor(target)
182-
183184
output = model(data)
184-
pred = output.data.max(1)[1] # get the index of the max log-probability
185-
correct += pred.cpu().eq(indx_target).sum()
185+
bs = output.size(0)
186+
idx_pred = output.data.sort(1, descending=True)[1]
187+
188+
idx_gt1 = indx_target.expand(1, bs).transpose_(0, 1)
189+
idx_gt5 = idx_gt1.expand(bs, 5)
190+
191+
correct1 += idx_pred[:, :1].cpu().eq(idx_gt1).sum()
192+
correct5 += idx_pred[:, :5].cpu().eq(idx_gt5).sum()
186193

187194
if idx >= n_sample - 1:
188195
break
189196

190-
acc = correct * 1.0 / n_passed
191-
return acc
197+
acc1 = correct1 * 1.0 / n_passed
198+
acc5 = correct5 * 1.0 / n_passed
199+
return acc1, acc5
192200

193201
def load_state_dict(model, model_urls, model_root):
194202
from torch.utils import model_zoo

utee/selector.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def stl10(cuda=True, model_root=None):
5656
return m, dataset.get, False
5757

5858
def alexnet(cuda=True, model_root=None):
59-
print("Building and initializing vgg16 parameters")
59+
print("Building and initializing alexnet parameters")
6060
from imagenet import alexnet as alx
6161
m = alx.alexnet(True, model_root)
6262
if cuda:
@@ -96,7 +96,7 @@ def vgg19_bn(cuda=True, model_root=None):
9696
return m, dataset.get, True
9797

9898
def inception_v3(cuda=True, model_root=None):
99-
print("Building and initializing inception v3 parameters")
99+
print("Building and initializing inception_v3 parameters")
100100
from imagenet import inception
101101
m = inception.inception_v3(True, model_root)
102102
if cuda:
@@ -144,15 +144,15 @@ def resnet152(cuda=True, model_root=None):
144144
return m, dataset.get, True
145145

146146
def squeezenet_v0(cuda=True, model_root=None):
147-
print("Building and initializing squeezenet parameters")
147+
print("Building and initializing squeezenet_v0 parameters")
148148
from imagenet import squeezenet
149149
m = squeezenet.squeezenet1_0(True, model_root)
150150
if cuda:
151151
m = m.cuda()
152152
return m, dataset.get, True
153153

154154
def squeezenet_v1(cuda=True, model_root=None):
155-
print("Building and initializing squeezenet parameters")
155+
print("Building and initializing squeezenet_v1 parameters")
156156
from imagenet import squeezenet
157157
m = squeezenet.squeezenet1_1(True, model_root)
158158
if cuda:

0 commit comments

Comments
 (0)