Skip to content

Commit 0b2ee91

Browse files
Remove fluid API (#1783)
1 parent bb02b10 commit 0b2ee91

29 files changed

+201
-554
lines changed

ce_tests/dygraph/quant/src/save_quant_model.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
import numpy as np
2323
import time
2424
import paddle
25-
from paddle.fluid.framework import IrGraph
25+
from paddle.framework import IrGraph
2626
from paddle.static.quantization import Quant2Int8MkldnnPass
2727
from paddle.framework import core
2828

@@ -45,7 +45,8 @@ def parse_args():
4545
'--ops_to_quantize',
4646
type=str,
4747
default='',
48-
help='A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.'
48+
help=
49+
'A comma separated list of operators to quantize. Only quantizable operators are taken into account. If the option is not used, an attempt to quantize all quantizable operators will be made.'
4950
)
5051
parser.add_argument(
5152
'--op_ids_to_skip',
@@ -70,8 +71,9 @@ def transform_and_save_int8_model(original_path, save_path):
7071

7172
with paddle.static.scope_guard(inference_scope):
7273
if os.path.exists(os.path.join(original_path, '__model__')):
73-
[inference_program, feed_target_names, fetch_targets
74-
] = paddle.static.load_inference_model(original_path, exe)
74+
[inference_program, feed_target_names,
75+
fetch_targets] = paddle.static.load_inference_model(
76+
original_path, exe)
7577
else:
7678
[inference_program, feed_target_names,
7779
fetch_targets] = paddle.static.load_inference_model(

demo/darts/README.md

-4
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,6 @@
1616
1717
├── search.py 模型结构搜索入口
1818
19-
├── train.py CIFAR10数据集评估训练入口
20-
2119
├── train_imagenet.py ImageNet数据集评估训练入口
2220
2321
├── visualize.py 模型结构可视化入口
@@ -67,14 +65,12 @@ python -m paddle.distributed.launch --selected_gpus=0,1,2,3 --log_dir ./mylog se
6765
在得到搜索结构Genotype之后,可以对其进行评估训练,从而获得它在特定数据集上的真实性能
6866

6967
```bash
70-
python train.py --arch='PC_DARTS' # 在CIFAR10数据集上对搜索到的结构评估训练
7168
python train_imagenet.py --arch='PC_DARTS' # 在ImageNet数据集上对搜索得到的结构评估训练
7269
```
7370

7471
同样,也支持用多卡进行评估训练, 以4卡为例(GPU id: 0-3), 启动命令如下:
7572

7673
```bash
77-
python -m paddle.distributed.launch --selected_gpus=0,1,2,3 --log_dir ./mylog train.py --use_data_parallel 1 --arch='DARTS_V2'
7874
python -m paddle.distributed.launch --selected_gpus=0,1,2,3 --log_dir ./mylog train_imagenet.py --use_data_parallel 1 --arch='DARTS_V2'
7975
```
8076

demo/darts/train.py

-217
This file was deleted.

demo/darts/train_imagenet.py

+24-20
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
import functools
2525

2626
import paddle
27-
import paddle.fluid as fluid
2827
from paddleslim.common import AvgrageMeter, get_logger
2928
from paddleslim.nas.darts import count_parameters_in_MB
3029

@@ -166,8 +165,8 @@ def main(args):
166165

167166
device_num = paddle.distributed.parallel.ParallelEnv().nranks
168167
step_per_epoch = int(args.trainset_num / (args.batch_size * device_num))
169-
learning_rate = paddle.optimizer.lr.ExponentialDecay(args.learning_rate,
170-
args.decay_rate)
168+
learning_rate = paddle.optimizer.lr.ExponentialDecay(
169+
args.learning_rate, args.decay_rate)
171170

172171
clip = paddle.nn.ClipGradByGlobalNorm(args.grad_clip)
173172
optimizer = paddle.optimizer.Momentum(
@@ -181,24 +180,29 @@ def main(args):
181180
strategy = paddle.distributed.init_parallel_env()
182181
model = paddle.DataParallel(model, strategy)
183182

184-
train_loader = paddle.io.DataLoader.from_generator(
185-
capacity=64, use_double_buffer=True, iterable=True, return_list=True)
186-
valid_loader = paddle.io.DataLoader.from_generator(
187-
capacity=64, use_double_buffer=True, iterable=True, return_list=True)
188-
189-
train_reader = paddle.batch(
190-
reader.imagenet_reader(args.data_dir, 'train'),
183+
train_dataset = reader.imagenet_reader(args.data_dir, 'train')
184+
valid_dataset = reader.imagenet_reader(args.data_dir, 'val')
185+
valid_loader = paddle.io.DataLoader(
186+
valid_dataset,
191187
batch_size=args.batch_size,
192-
drop_last=True)
193-
valid_reader = paddle.batch(
194-
reader.imagenet_reader(args.data_dir, 'val'),
195-
batch_size=args.batch_size)
196-
if args.use_data_parallel:
197-
train_reader = fluid.contrib.reader.distributed_batch_reader(
198-
train_reader)
199-
200-
train_loader.set_sample_list_generator(train_reader, places=place)
201-
valid_loader.set_sample_list_generator(valid_reader, places=place)
188+
use_buffer_reader=True,
189+
return_list=True)
190+
191+
if not args.use_data_parallel:
192+
train_loader = paddle.io.DataLoader(
193+
train_dataset,
194+
batch_size=args.batch_size,
195+
use_buffer_reader=True,
196+
return_list=True,
197+
drop_last=True)
198+
else:
199+
sampler = paddle.io.DistributedBatchSampler(
200+
train_dataset, args.batch_size, drop_last=True)
201+
train_loader = paddle.io.DataLoader(
202+
train_dataset,
203+
batch_sampler=sampler,
204+
use_buffer_reader=True,
205+
return_list=True)
202206

203207
save_parameters = (not args.use_data_parallel) or (
204208
args.use_data_parallel and

0 commit comments

Comments
 (0)