I have troubles making my ray tune results reproducible. I went through the FAQ, basically all the google results that come up regarding this issue, but the results remain stochastic.

My code looks as follows:

```
# trainable
def trainable(config, data):
# make it reproducible
np.random.seed(config["seed"])
seed(config["seed"])
tf.random.set_seed(config["seed"])
# initialize model
model = tf.keras.Sequential([
tf.keras.layers.Dropout(0.1, input_shape=(471,), seed=config['seed']),
tf.keras.layers.Dense(32, activation=tf.keras.layers.LeakyReLU(alpha=0.1), input_shape=(471,), kernel_regularizer = regularizers.L1L2(l1 = 0.1, l2 = 0)), #manually change input shape
tf.keras.layers.Dense(16, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_regularizer = regularizers.L1L2(l1 = 0.1, l2 = 0)),
tf.keras.layers.Dense(8, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_regularizer = regularizers.L1L2(l1 = 0.1, l2 = 0)),
tf.keras.layers.Dense(4, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_regularizer = regularizers.L1L2(l1 = 0.1, l2 = 0)),
tf.keras.layers.Dense(1, activation=tf.keras.layers.LeakyReLU(alpha=0.1), kernel_regularizer = regularizers.L1L2(l1 = 0.1, l2 = 0))
])
# compile model
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=config["learning_rate"]),
loss='mse',
metrics=[RSquare(), 'mse'],
)
# fit model
model.fit(
data[0], data[1],
validation_data=(data[2], data[3]),
epochs=3,
batch_size=128,
verbose=0,
callbacks = [ReportCheckpointCallback(
metrics={'val_mse':'val_mse'},
checkpoint_on = 'epoch_end')]
)
# define search space
space = {'learning_rate': tune.choice([0.1]), 'seed': 1234}
# tuning configuration
analysis = tune.Tuner(
tune.with_parameters(
trainable,
data=[X_train, Y_train, X_val, Y_val]
),
param_space=space,
tune_config=tune.TuneConfig(
metric="val_mse",
mode="min",
num_samples=1,
),
run_config=air.RunConfig(
local_dir = path,
name= name,
),
)
```

So setting seeds in the trainable function does not work. How do I resolve this issue?