Training a numpy function with hyperopt

I am optimizing the hyperparameters for a numpy function that performs some template matching.

My function looks like -

def img_reg(im1, im2, config):
    hype1 = config["hype1"]
    hype2 = config["hype2"]
    ###. some computation ###
    return {"loss": loss, "status": STATUS_OK}

def trainable_hyperopt(config):
     for i in range(config["iterations"]):
         score = img_reg(im1, im2, config)
         tune.report(iterations = i, loss = score)

hyperopt_search = HyperOptSearch( metric="loss/loss", mode="min", n_initial_points = 20)


space = {"grad_step": hp.loguniform("grad_step",np.log(1e-4), np.log(1e-1)),
          "gc_metric_weight": hp.normal("gc_metric_weight", .1, 1e-2),
          "update_field_variance": hp.normal("update_field_variance", 10, 2)
        }
config = {
        "num_samples": 2,
        "config": {
            "iterations": 10
        }
    }
algo = HyperOptSearch(
        space,
       
        metric="loss/loss",
        mode="min",
        )

analysis = tune.run(trainable_hyperopt, search_alg=algo, **config, metric = "loss/loss", mode = "min")

When i run this code. It fires up two samples with some initial values but no optimization of those values with hyperopt happens. At the end of the run it just returns one of the two values with the lower scores. Can you please help me with what might be going wrong.