[Tune] Report at every epoch as well as after all epochs

I am running an experiment where some metrics are reported every epoch while other metrics are reported after all epochs in the experiment.

Here is the sample code:

from ray import tune, train
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim


class ToyModel(nn.Module):

    def __init__(self, l1, l2):
        super(ToyModel, self).__init__()
        self.net1 = nn.Linear(l1, l2)
        self.relu = nn.ReLU()
        self.net2 = nn.Linear(l2, 1)

    def forward(self, x):
        return self.net2(self.relu(self.net1(x)))


def main():

    def objective(config):
        loss_fn = nn.MSELoss()
        net = ToyModel(config['l1'], config['l2'])
        optimizer = optim.SGD(net.parameters(), lr=0.001)

        for i in range(0, config['nb_epoch']):
            optimizer.zero_grad()
            data = torch.randn(100, config['l1'])
            labels = torch.randn(100, 1)
            out = net(data)
            train_loss = loss_fn(labels, out)
            optimizer.step()
            metrics = {
                'iterations': i,
                'train_loss': train_loss.cpu().item()
            }
            train.report(metrics=metrics)
        tune.report({'eval_metric': np.random.uniform(0, 5)})

    config = {
        "l1": tune.choice([4, 8]),
        "l2": tune.choice([2, 4]),
        'nb_epoch': tune.choice([30, 40]),
    }

    tune_config = tune.TuneConfig(mode='min', num_samples=5, metric='score')
    run_config = train.RunConfig(name="test_restore")
    trainable = tune.with_resources(objective, {'cpu': 0.5, 'gpu': 0})
    tuner = tune.Tuner(trainable=trainable,
                       tune_config=tune_config,
                       run_config=run_config,
                       param_space=config)
    result_grid = tuner.fit()

    # FIXME Throws error - eval_metric not found
    best_result = result_grid.get_best_result(metric='eval_metric', mode='max')
    print(best_result.config)


if __name__ == "__main__":
    main()

How to report eval_metric at the end of experiment and report train_loss, iterations for every epoch in the experiment?