GarbageCollected , related to points_to_evaluate

The argument ‘points_to_evaluate’ of HyperOptSearch can’t be an array. For example, the following runs fine:

algorithm = HyperOptSearch(...., points_to_evaluate = [{"a": 1, "b": 1}])

but when in case “a” is a list, it will throw the GarbageCollected error:

algorithm = HyperOptSearch(...., points_to_evaluate = [{"a": [1], "b": 1}])

The following is the reproduction script. Any hints would be appreciated

 import os, time, ray 
 import pandas as pd
 import multiprocessing
 from ray import tune, air
 from hyperopt import hp
 from import HyperOptSearch
 from import ConcurrencyLimiter
 from ray.tune.schedulers import ASHAScheduler

 # disable dash board. See:
 ip_head = os.getenv("ip_head")
 if ip_head != None:
     print('-- running without a cluster ---')

 n_epoch = 1

 # 1. Define an objective function.
 def objective(config, data = None):
     f = open(os.path.join(os.getcwd(), "oo.root"), "w")
     for i in range(n_epoch):
         score = config["a"][0] ** 2 + config["b"]
         #SCORE 1st apparence which defines the key of the dictionary, i.e. metric="SCORE", 
         # or  return {"SCORE": score}  # this or the following return should work
         #return {"SCORE": score}

 # 2. Define a search space.
 search_space = {
     "a": [hp.uniform("a", 0, 1)],
     "b": hp.uniform("b", 0, 1)

 raw_log_dir = "./ray_log"
 raw_log_name = "example"

 initial_params = [
         {"a": [1], "b": 1},
 algorithm = HyperOptSearch(search_space, metric="SCORE", mode="max", n_initial_points=8, points_to_evaluate=initial_params)
 #algorithm = HyperOptSearch(search_space, metric="SCORE", mode="max", n_initial_points=8)
 algorithm = ConcurrencyLimiter(algorithm, max_concurrent=8)
 if os.path.exists(os.path.join(raw_log_dir, raw_log_name)) == False:
     print('--- this is the 1st run ----')
 else: #note: restoring described here doesn't work: 
     print('--- previous run exist, continue the tuning ----')
     algorithm.restore_from_dir(os.path.join(raw_log_dir, raw_log_name))

 # 3. Start a Tune run and print the best result.

 trainable_with_resources = tune.with_resources(objective, {"cpu": 1})
 #trainable_with_resources = tune.with_resources(objective, {"cpu": multiprocessing.cpu_count()})
 tuner = tune.Tuner(
         #tune.with_parameters(trainable_with_resources, data = "aaaa"),
         tune_config = tune.TuneConfig(
             num_samples = 10, # number of tries. too expensive for Brian2
             time_budget_s = 10000, # tot running time in seconds
             scheduler=ASHAScheduler(metric="SCORE", mode="max"),
         run_config = air.RunConfig(local_dir = raw_log_dir, name = raw_log_name, verbose=2) # where to save the log which will be loaded later

 results =
 best_result = results.get_best_result(metric="SCORE", mode="max")
 print('--1: config: ', best_result.config)
 print('--2: log_dir: ', best_result.log_dir)
 print('--3: SCORE: ', best_result.metrics['SCORE'])
 print('--4: trial_id: ', best_result.metrics['trial_id'])
 print('--6: all metrics: ', best_result.metrics) # contains all metrics 

turn out the solution is to use tuner(param_space = search_space,) to pass along the search space instead of directly loaded it to the search algorithm. The ray tune will directly translate it to the the form specific to each search algorithm. pretty cool!