Skip to content

Commit 9defba8

Browse files
author
xyliao
committed
update
1 parent f79f286 commit 9defba8

File tree

3 files changed

+290
-1
lines changed

3 files changed

+290
-1
lines changed

Diff for: README.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ Learn Deep Learning with PyTorch
2626
- [线性模型与梯度下降](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/blob/master/chapter3_NN/linear-regression-gradient-descend.ipynb)
2727
- [Logistic 回归与优化器](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/blob/master/chapter3_NN/logistic-regression/logistic-regression.ipynb)
2828
- [多层神经网络,Sequential 和 Module](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/blob/master/chapter3_NN/nn-sequential-module.ipynb)
29-
- [深度神经网络](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/blob/master/chapter3_NN/deep-nn.ipynb)
29+
- [深层神经网络](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/blob/master/chapter3_NN/deep-nn.ipynb)
3030
- [参数初始化方法](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/blob/master/chapter3_NN/param_initialize.ipynb)
3131
- 优化算法
3232
- [SGD](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/blob/master/chapter3_NN/optimizer/sgd.ipynb)
@@ -75,6 +75,7 @@ Learn Deep Learning with PyTorch
7575
### part2: 深度学习的应用
7676
- Chapter 9: 计算机视觉
7777
- [Fine-tuning: 通过微调进行迁移学习](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/blob/master/chapter9_Computer-Vision/fine_tune/)
78+
- kaggle初体验:猫狗大战
7879
- [语义分割: 通过 FCN 实现像素级别的分类](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch/tree/master/chapter9_Computer-Vision/segmentation)
7980
- Pixel to Pixel 生成对抗网络
8081
- Neural Transfer: 通过卷积网络实现风格迁移

Diff for: chapter4_CNN/utils.py

+144
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
from datetime import datetime
2+
3+
import torch
4+
import torch.nn.functional as F
5+
from torch import nn
6+
from torch.autograd import Variable
7+
8+
9+
def get_acc(output, label):
10+
total = output.shape[0]
11+
_, pred_label = output.max(1)
12+
num_correct = (pred_label == label).sum().data[0]
13+
return num_correct / total
14+
15+
16+
def train(net, train_data, valid_data, num_epochs, optimizer, criterion):
17+
if torch.cuda.is_available():
18+
net = net.cuda()
19+
prev_time = datetime.now()
20+
for epoch in range(num_epochs):
21+
train_loss = 0
22+
train_acc = 0
23+
net = net.train()
24+
for im, label in train_data:
25+
if torch.cuda.is_available():
26+
im = Variable(im.cuda()) # (bs, 3, h, w)
27+
label = Variable(label.cuda()) # (bs, h, w)
28+
else:
29+
im = Variable(im)
30+
label = Variable(label)
31+
# forward
32+
output = net(im)
33+
loss = criterion(output, label)
34+
# backward
35+
optimizer.zero_grad()
36+
loss.backward()
37+
optimizer.step()
38+
39+
train_loss += loss.data[0]
40+
train_acc += get_acc(output, label)
41+
42+
cur_time = datetime.now()
43+
h, remainder = divmod((cur_time - prev_time).seconds, 3600)
44+
m, s = divmod(remainder, 60)
45+
time_str = "Time %02d:%02d:%02d" % (h, m, s)
46+
if valid_data is not None:
47+
valid_loss = 0
48+
valid_acc = 0
49+
net = net.eval()
50+
for im, label in valid_data:
51+
if torch.cuda.is_available():
52+
im = Variable(im.cuda(), volatile=True)
53+
label = Variable(label.cuda(), volatile=True)
54+
else:
55+
im = Variable(im, volatile=True)
56+
label = Variable(label, volatile=True)
57+
output = net(im)
58+
loss = criterion(output, label)
59+
valid_loss += loss.data[0]
60+
valid_acc += get_acc(output, label)
61+
epoch_str = (
62+
"Epoch %d. Train Loss: %f, Train Acc: %f, Valid Loss: %f, Valid Acc: %f, "
63+
% (epoch, train_loss / len(train_data),
64+
train_acc / len(train_data), valid_loss / len(valid_data),
65+
valid_acc / len(valid_data)))
66+
else:
67+
epoch_str = ("Epoch %d. Train Loss: %f, Train Acc: %f, " %
68+
(epoch, train_loss / len(train_data),
69+
train_acc / len(train_data)))
70+
prev_time = cur_time
71+
print(epoch_str + time_str)
72+
73+
74+
def conv3x3(in_channel, out_channel, stride=1):
75+
return nn.Conv2d(
76+
in_channel, out_channel, 3, stride=stride, padding=1, bias=False)
77+
78+
79+
class residual_block(nn.Module):
80+
def __init__(self, in_channel, out_channel, same_shape=True):
81+
super(residual_block, self).__init__()
82+
self.same_shape = same_shape
83+
stride = 1 if self.same_shape else 2
84+
85+
self.conv1 = conv3x3(in_channel, out_channel, stride=stride)
86+
self.bn1 = nn.BatchNorm2d(out_channel)
87+
88+
self.conv2 = conv3x3(out_channel, out_channel)
89+
self.bn2 = nn.BatchNorm2d(out_channel)
90+
if not self.same_shape:
91+
self.conv3 = nn.Conv2d(in_channel, out_channel, 1, stride=stride)
92+
93+
def forward(self, x):
94+
out = self.conv1(x)
95+
out = F.relu(self.bn1(out), True)
96+
out = self.conv2(out)
97+
out = F.relu(self.bn2(out), True)
98+
99+
if not self.same_shape:
100+
x = self.conv3(x)
101+
return F.relu(x + out, True)
102+
103+
104+
class resnet(nn.Module):
105+
def __init__(self, in_channel, num_classes, verbose=False):
106+
super(resnet, self).__init__()
107+
self.verbose = verbose
108+
109+
self.block1 = nn.Conv2d(in_channel, 64, 7, 2)
110+
111+
self.block2 = nn.Sequential(
112+
nn.MaxPool2d(3, 2), residual_block(64, 64), residual_block(64, 64))
113+
114+
self.block3 = nn.Sequential(
115+
residual_block(64, 128, False), residual_block(128, 128))
116+
117+
self.block4 = nn.Sequential(
118+
residual_block(128, 256, False), residual_block(256, 256))
119+
120+
self.block5 = nn.Sequential(
121+
residual_block(256, 512, False),
122+
residual_block(512, 512), nn.AvgPool2d(3))
123+
124+
self.classifier = nn.Linear(512, num_classes)
125+
126+
def forward(self, x):
127+
x = self.block1(x)
128+
if self.verbose:
129+
print('block 1 output: {}'.format(x.shape))
130+
x = self.block2(x)
131+
if self.verbose:
132+
print('block 2 output: {}'.format(x.shape))
133+
x = self.block3(x)
134+
if self.verbose:
135+
print('block 3 output: {}'.format(x.shape))
136+
x = self.block4(x)
137+
if self.verbose:
138+
print('block 4 output: {}'.format(x.shape))
139+
x = self.block5(x)
140+
if self.verbose:
141+
print('block 5 output: {}'.format(x.shape))
142+
x = x.view(x.shape[0], -1)
143+
x = self.classifier(x)
144+
return x

Diff for: chapter5_RNN/utils.py

+144
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
from datetime import datetime
2+
3+
import torch
4+
import torch.nn.functional as F
5+
from torch import nn
6+
from torch.autograd import Variable
7+
8+
9+
def get_acc(output, label):
10+
total = output.shape[0]
11+
_, pred_label = output.max(1)
12+
num_correct = (pred_label == label).sum().data[0]
13+
return num_correct / total
14+
15+
16+
def train(net, train_data, valid_data, num_epochs, optimizer, criterion):
17+
if torch.cuda.is_available():
18+
net = net.cuda()
19+
prev_time = datetime.now()
20+
for epoch in range(num_epochs):
21+
train_loss = 0
22+
train_acc = 0
23+
net = net.train()
24+
for im, label in train_data:
25+
if torch.cuda.is_available():
26+
im = Variable(im.cuda()) # (bs, 3, h, w)
27+
label = Variable(label.cuda()) # (bs, h, w)
28+
else:
29+
im = Variable(im)
30+
label = Variable(label)
31+
# forward
32+
output = net(im)
33+
loss = criterion(output, label)
34+
# backward
35+
optimizer.zero_grad()
36+
loss.backward()
37+
optimizer.step()
38+
39+
train_loss += loss.data[0]
40+
train_acc += get_acc(output, label)
41+
42+
cur_time = datetime.now()
43+
h, remainder = divmod((cur_time - prev_time).seconds, 3600)
44+
m, s = divmod(remainder, 60)
45+
time_str = "Time %02d:%02d:%02d" % (h, m, s)
46+
if valid_data is not None:
47+
valid_loss = 0
48+
valid_acc = 0
49+
net = net.eval()
50+
for im, label in valid_data:
51+
if torch.cuda.is_available():
52+
im = Variable(im.cuda(), volatile=True)
53+
label = Variable(label.cuda(), volatile=True)
54+
else:
55+
im = Variable(im, volatile=True)
56+
label = Variable(label, volatile=True)
57+
output = net(im)
58+
loss = criterion(output, label)
59+
valid_loss += loss.data[0]
60+
valid_acc += get_acc(output, label)
61+
epoch_str = (
62+
"Epoch %d. Train Loss: %f, Train Acc: %f, Valid Loss: %f, Valid Acc: %f, "
63+
% (epoch, train_loss / len(train_data),
64+
train_acc / len(train_data), valid_loss / len(valid_data),
65+
valid_acc / len(valid_data)))
66+
else:
67+
epoch_str = ("Epoch %d. Train Loss: %f, Train Acc: %f, " %
68+
(epoch, train_loss / len(train_data),
69+
train_acc / len(train_data)))
70+
prev_time = cur_time
71+
print(epoch_str + time_str)
72+
73+
74+
def conv3x3(in_channel, out_channel, stride=1):
75+
return nn.Conv2d(
76+
in_channel, out_channel, 3, stride=stride, padding=1, bias=False)
77+
78+
79+
class residual_block(nn.Module):
80+
def __init__(self, in_channel, out_channel, same_shape=True):
81+
super(residual_block, self).__init__()
82+
self.same_shape = same_shape
83+
stride = 1 if self.same_shape else 2
84+
85+
self.conv1 = conv3x3(in_channel, out_channel, stride=stride)
86+
self.bn1 = nn.BatchNorm2d(out_channel)
87+
88+
self.conv2 = conv3x3(out_channel, out_channel)
89+
self.bn2 = nn.BatchNorm2d(out_channel)
90+
if not self.same_shape:
91+
self.conv3 = nn.Conv2d(in_channel, out_channel, 1, stride=stride)
92+
93+
def forward(self, x):
94+
out = self.conv1(x)
95+
out = F.relu(self.bn1(out), True)
96+
out = self.conv2(out)
97+
out = F.relu(self.bn2(out), True)
98+
99+
if not self.same_shape:
100+
x = self.conv3(x)
101+
return F.relu(x + out, True)
102+
103+
104+
class resnet(nn.Module):
105+
def __init__(self, in_channel, num_classes, verbose=False):
106+
super(resnet, self).__init__()
107+
self.verbose = verbose
108+
109+
self.block1 = nn.Conv2d(in_channel, 64, 7, 2)
110+
111+
self.block2 = nn.Sequential(
112+
nn.MaxPool2d(3, 2), residual_block(64, 64), residual_block(64, 64))
113+
114+
self.block3 = nn.Sequential(
115+
residual_block(64, 128, False), residual_block(128, 128))
116+
117+
self.block4 = nn.Sequential(
118+
residual_block(128, 256, False), residual_block(256, 256))
119+
120+
self.block5 = nn.Sequential(
121+
residual_block(256, 512, False),
122+
residual_block(512, 512), nn.AvgPool2d(3))
123+
124+
self.classifier = nn.Linear(512, num_classes)
125+
126+
def forward(self, x):
127+
x = self.block1(x)
128+
if self.verbose:
129+
print('block 1 output: {}'.format(x.shape))
130+
x = self.block2(x)
131+
if self.verbose:
132+
print('block 2 output: {}'.format(x.shape))
133+
x = self.block3(x)
134+
if self.verbose:
135+
print('block 3 output: {}'.format(x.shape))
136+
x = self.block4(x)
137+
if self.verbose:
138+
print('block 4 output: {}'.format(x.shape))
139+
x = self.block5(x)
140+
if self.verbose:
141+
print('block 5 output: {}'.format(x.shape))
142+
x = x.view(x.shape[0], -1)
143+
x = self.classifier(x)
144+
return x

0 commit comments

Comments
 (0)