Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

change some dygraph models to use data loader #4595

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 36 additions & 25 deletions dygraph/mnist/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,13 @@ def __init__(self):
self.pool_2_shape = 50 * 4 * 4
SIZE = 10
scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5
self._fc = Linear(self.pool_2_shape, 10,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)),
act="softmax")
self._fc = Linear(
self.pool_2_shape,
10,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)),
act="softmax")

def forward(self, inputs, label=None):
x = self._simple_img_conv_pool_1(inputs)
Expand All @@ -117,17 +119,21 @@ def forward(self, inputs, label=None):
return x


def reader_decorator(reader):
def __reader__():
for item in reader():
img = np.array(item[0]).astype('float32').reshape(1, 28, 28)
label = np.array(item[1]).astype('int64').reshape(1)
yield img, label

return __reader__


def test_mnist(reader, model, batch_size):
acc_set = []
avg_loss_set = []
for batch_id, data in enumerate(reader()):
dy_x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(batch_size, 1)

img = to_variable(dy_x_data)
label = to_variable(y_data)
img, label = data
label.stop_gradient = True
prediction, acc = model(img, label)
loss = fluid.layers.cross_entropy(input=prediction, label=label)
Expand Down Expand Up @@ -187,28 +193,33 @@ def train_mnist(args):
if args.use_data_parallel:
strategy = fluid.dygraph.parallel.prepare_context()
mnist = MNIST()
adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters())
adam = AdamOptimizer(
learning_rate=0.001, parameter_list=mnist.parameters())
if args.use_data_parallel:
mnist = fluid.dygraph.parallel.DataParallel(mnist, strategy)

train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True)
reader_decorator(paddle.dataset.mnist.train()),
batch_size=BATCH_SIZE,
drop_last=True)
if args.use_data_parallel:
train_reader = fluid.contrib.reader.distributed_batch_reader(
train_reader)

test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE, drop_last=True)
reader_decorator(paddle.dataset.mnist.test()),
batch_size=BATCH_SIZE,
drop_last=True)

train_loader = fluid.io.DataLoader.from_generator(capacity=10)
train_loader.set_sample_list_generator(train_reader, places=place)

test_loader = fluid.io.DataLoader.from_generator(capacity=10)
test_loader.set_sample_list_generator(test_reader, places=place)

for epoch in range(epoch_num):
for batch_id, data in enumerate(train_reader()):
dy_x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)

img = to_variable(dy_x_data)
label = to_variable(y_data)
for batch_id, data in enumerate(train_loader()):
img, label = data
label.stop_gradient = True

cost, acc = mnist(img, label)
Expand All @@ -231,7 +242,7 @@ def train_mnist(args):
epoch, batch_id, avg_loss.numpy()))

mnist.eval()
test_cost, test_acc = test_mnist(test_reader, mnist, BATCH_SIZE)
test_cost, test_acc = test_mnist(test_loader, mnist, BATCH_SIZE)
mnist.train()
if args.ce:
print("kpis\ttest_acc\t%s" % test_acc)
Expand All @@ -244,7 +255,7 @@ def train_mnist(args):
fluid.dygraph.parallel.Env().local_rank == 0)
if save_parameters:
fluid.save_dygraph(mnist.state_dict(), "save_temp")

print("checkpoint saved")

inference_mnist()
Expand Down
2 changes: 1 addition & 1 deletion dygraph/mobilenet/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ def process_image(sample, settings, mode, color_jitter, rotate):
img /= img_std

if mode == 'train' or mode == 'val':
return (img, sample[1])
return (img, [sample[1]])
elif mode == 'test':
return (img, )

Expand Down
8 changes: 2 additions & 6 deletions dygraph/mobilenet/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,8 @@ def train_mobilenet():
optimizer.set_dict(opti_dict)

# 3. reader
train_data_loader, train_data = utility.create_data_loader(
is_train=True, args=args)
test_data_loader, test_data = utility.create_data_loader(
is_train=False, args=args)
train_data_loader = utility.create_data_loader(is_train=True, args=args)
test_data_loader = utility.create_data_loader(is_train=False, args=args)
num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
imagenet_reader = reader.ImageNetReader(seed=0, place_num=place_num)
train_reader = imagenet_reader.train(settings=args)
Expand All @@ -145,8 +143,6 @@ def train_mobilenet():
t1 = time.time()
if args.max_iter and total_batch_num == args.max_iter:
return
label = to_variable(label.numpy().astype('int64').reshape(
int(args.batch_size // place_num), 1))
t_start = time.time()

# 4.1.1 call net()
Expand Down
22 changes: 2 additions & 20 deletions dygraph/mobilenet/utils/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,40 +309,22 @@ def create_data_loader(is_train, args):
Returns:
data_loader and the input data of net,
"""
image_shape = [int(m) for m in args.image_shape.split(",")]

feed_image = fluid.data(
name="feed_image",
shape=[None] + image_shape,
dtype="float32",
lod_level=0)

feed_label = fluid.data(
name="feed_label", shape=[None, 1], dtype="int64", lod_level=0)
feed_y_a = fluid.data(
name="feed_y_a", shape=[None, 1], dtype="int64", lod_level=0)

if is_train and args.use_mixup:
feed_y_b = fluid.data(
name="feed_y_b", shape=[None, 1], dtype="int64", lod_level=0)
feed_lam = fluid.data(
name="feed_lam", shape=[None, 1], dtype="float32", lod_level=0)

data_loader = fluid.io.DataLoader.from_generator(
capacity=64,
use_double_buffer=True,
iterable=True,
return_list=True)

return data_loader, [feed_image, feed_y_a, feed_y_b, feed_lam]
return data_loader
else:
data_loader = fluid.io.DataLoader.from_generator(
capacity=64,
use_double_buffer=True,
iterable=True,
return_list=True)

return data_loader, [feed_image, feed_label]
return data_loader


def print_info(pass_id, batch_id, print_step, metrics, time_info, info_mode):
Expand Down
Loading