这会是一个好的实现吗?
@torch.no_grad()
def evaluate_loss(model, data_loader, device):
val_loss = 0
model.train()
for images, targets in (data_loader):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
val_loss += losses_reduced
validation_loss = val_loss/ len(data_loader)
return validation_loss
然后我在以下 for 循环中使用它:
for epoch in range(args.num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, train_data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
validation_loss = evaluate_loss(model, valid_data_loader, device=device)
print('validation loss', validation_loss)
# evaluate on the test dataset
evaluate(model, valid_data_loader, device=device)
我想知道它是否是一个正确的实现,可以在 lr_scheduler.step()...之后使用