ray::ImplicitFu: RuntimeError: No best trial found for the given metric: mean_accuracy. This means that no trial has reported this metric, or all values reported for this metric are NaN. To not ignore NaN values, you can set the `filter_nan_and_inf` arg

Error: RuntimeError: No best trial found for the given metric: mean_accuracy. This means that no trial has reported this metric, or all values reported for this metric are NaN. To not ignore NaN values, you can set the filter_nan_and_inf arg to False.

Here is my code:

import argparse
import os

from filelock import FileLock
from tensorflow.keras.datasets import mnist
import tensorflow as tf
#import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
tf.keras.backend.set_floatx('float64')
import pennylane as qml
import numpy as np

import matplotlib.pyplot as plt

import ray
from ray import train, tune
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.air.integrations.keras import ReportCheckpointCallback

mnist = keras.datasets.mnist

# datasets are numpy.ndarrays
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()     

# normalize the image data
X_train, X_test = X_train / 255.0, X_test / 255.0

def one_hot(labels):  
       
    depth =  2**4                       # 10 classes + 6 zeros for padding
    indices = labels.astype(np.int32)    
    one_hot_labels = np.eye(depth)[indices].astype(np.float32) 
    
    return one_hot_labels

# one-hot encoded labels, each label of length cutoff dimension**2
y_train, y_test = one_hot(Y_train), one_hot(Y_test)

# using only 600 samples for training in this experiment
n_samples = 600
test_samples = 100
X_train, X_test, y_train, y_test = X_train[:n_samples], X_test[:test_samples], y_train[:n_samples], y_test[:test_samples]

def encode_data(x):
    qml.Squeezing(x[0], x[1], wires=0)
    qml.Squeezing(x[2], x[3], wires=1)
    qml.Squeezing(x[4], x[5], wires=2)
    qml.Squeezing(x[6], x[7], wires=3)
    
    qml.Beamsplitter(x[8], x[9], wires=[0,1])
    qml.Beamsplitter(x[10], x[11], wires=[1,2])
    qml.Beamsplitter(x[12], x[13], wires=[2,3])
    
    qml.Rotation(x[14], wires=0)
    qml.Rotation(x[15], wires=1)
    qml.Rotation(x[16], wires=2)
    qml.Rotation(x[17], wires=3)    
    
    qml.Displacement(x[18], x[19], wires=0)
    qml.Displacement(x[20], x[21], wires=1)
    qml.Displacement(x[22], x[23], wires=2)
    qml.Displacement(x[24], x[25], wires=3) 
    
    qml.Kerr(x[26], wires=0)
    qml.Kerr(x[27], wires=1)
    qml.Kerr(x[28], wires=2)
    qml.Kerr(x[29], wires=3)

def layer(v):
    
    # Linear transformation W = Interferemeter, squeezers, interferometer
    # Interferometer 1
    qml.Beamsplitter(v[0], v[1], wires=[0,1])
    qml.Beamsplitter(v[2], v[3], wires=[1,2])
    qml.Beamsplitter(v[4], v[5], wires=[2,3])
    
    qml.Rotation(v[6], wires=0)
    qml.Rotation(v[7], wires=1)
    qml.Rotation(v[8], wires=2)
    qml.Rotation(v[9], wires=3)
    
    # Squeezers
    qml.Squeezing(v[10], 0.0, wires=0)
    qml.Squeezing(v[11], 0.0, wires=1)
    qml.Squeezing(v[12], 0.0, wires=2)
    qml.Squeezing(v[13], 0.0, wires=3) 
    
    # Interferometer 2
    qml.Beamsplitter(v[14], v[15], wires=[0,1])
    qml.Beamsplitter(v[16], v[17], wires=[1,2])
    qml.Beamsplitter(v[18], v[19], wires=[2,3])
    
    qml.Rotation(v[20], wires=0)
    qml.Rotation(v[21], wires=1)
    qml.Rotation(v[22], wires=2)
    qml.Rotation(v[23], wires=3)
    
    # Bias addition
    qml.Displacement(v[24], 0.0, wires=0)
    qml.Displacement(v[25], 0.0, wires=1)
    qml.Displacement(v[26], 0.0, wires=2)
    qml.Displacement(v[27], 0.0, wires=3)
    
    # Non-linear activation
    qml.Kerr(v[28], wires=0)
    qml.Kerr(v[29], wires=1)
    qml.Kerr(v[30], wires=2)
    qml.Kerr(v[31], wires=3)

def init_weights(layers, modes, active_sd=0.0001, passive_sd=0.1):
    
    # Number of interferometer parameters: beamsplitter + 2 rotations
    M = 10 

    int1_weights = tf.random.normal(shape=[layers, M], stddev=passive_sd)
    s_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)
    int2_weights = tf.random.normal(shape=[layers, M], stddev=passive_sd)
    dr_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)
    k_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)

    weights = tf.concat([int1_weights, s_weights, int2_weights, dr_weights, k_weights], axis=1)
    weights = tf.Variable(weights)

    return weights

num_modes = 4
cutoff_dim = 2

# select a devide 
dev = qml.device("strawberryfields.fock", wires=num_modes, cutoff_dim=cutoff_dim) 

@qml.qnode(dev, interface="tf")
def quantum_nn(inputs, var):
    # Encode input x into quantum state
    encode_data(inputs)

    # iterative quantum layers
    for v in var:
        layer(v)

    return qml.probs(wires=[0, 1, 2, 3])  # Measurement



weight_shape = {'var': (4,32)}          # 4 layers and 32 parameters per layer, Keras layer will initialize.

num_layers = 4

weights = init_weights(num_layers, num_modes)

# convert the quantum layer to a Keras layer
shape_tup = weights.shape
weight_shapes = {'var': shape_tup}
print(weight_shapes)

def train_mnist(config):
    model = keras.models.Sequential()
    qlayer = qml.qnn.KerasLayer(quantum_nn, weight_shapes, output_dim = 4)
    model.add(qlayer)
    #opt = keras.optimizers.SGD(learning_rate = 0.02)
    model.compile(loss="sparse_categorical_crossentropy", optimizer=tf.keras.optimizers.SGD(lr=config["lr"], momentum=config["momentum"]), metrics=["accuracy"])
    model.fit(X_train, y_train, epochs = 200, batch_size = 64, shuffle = True, verbose=0, validation_data = (X_test, y_test), 
              callbacks=[ReportCheckpointCallback(metrics={"mean_accuracy": "accuracy"})],)


 def tune_mnist():
    sched = AsyncHyperBandScheduler(
        time_attr="training_iteration", max_t=400, grace_period=20
    )

    tuner = tune.Tuner(
        tune.with_resources(train_mnist, resources={"cpu": 2, "gpu": 0}),
        tune_config=tune.TuneConfig(
            metric="mean_accuracy",
            mode="max",
            scheduler=sched,
            num_samples=10,
        ),
        run_config=train.RunConfig(
            name="exp",
            stop={"mean_accuracy": 0.99},
        ),
        param_space={
            "threads": 2,
            "lr": tune.uniform(0.001, 0.1),
            "momentum": tune.uniform(0.1, 0.9),
            "hidden": tune.randint(32, 512),
        },
    )
    results = tuner.fit()

    print("Best hyperparameters found were: ", results.get_best_result().config)

tune_mnist()

track of error:


ray.exceptions.RayTaskError(InvalidArgumentError): ray::ImplicitFunc.train() (pid=90629, ip=172.17.0.8, actor_id=8ed433a12c307f77a1a7124a01000000, repr=train_mnist)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/trainable.py", line 342, in train
    raise skipped from exception_cause(skipped)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 528, in minimize
    grads_and_vars = self._compute_gradients(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 580, in _compute_gradients
    grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 473, in _get_gradients
    grads = tape.gradient(loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 1074, in gradient
    flat_grad = imperative_grad.imperative_grad(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/imperative_grad.py", line 71, in imperative_grad
    return pywrap_tfe.TFE_Py_TapeGradient(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 159, in _gradient_function
    return grad_fn(mock_op, *out_grads)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/math_grad.py", line 1385, in _MulGrad
    math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/gen_math_ops.py", line 6240, in mul
    _ops.raise_from_not_ok_status(e, name)
    grads = tape.gradient(loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 1074, in gradient
    flat_grad = imperative_grad.imperative_grad(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/imperative_grad.py", line 71, in imperative_grad
    return pywrap_tfe.TFE_Py_TapeGradient(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 159, in _gradient_function
    return grad_fn(mock_op, *out_grads)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/math_grad.py", line 1385, in _MulGrad
    math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/gen_math_ops.py", line 6240, in mul
    _ops.raise_from_not_ok_status(e, name)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/framework/ops.py", line 6897, in raise_from_not_ok_status
    six.raise_from(core._status_to_exception(e.code, message), None)
  File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: cannot compute Mul as input #1(zero-based) was expected to be a float tensor but is a double tensor [Op:Mul]
2024-03-14 03:59:35,551	ERROR tune_controller.py:1374 -- Trial task failed for trial train_mnist_c62da_00008
Traceback (most recent call last):
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/air/execution/_internal/event_manager.py", line 110, in resolve_future
    result = ray.get(future)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/auto_init_hook.py", line 22, in auto_init_wrapper
    return fn(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/client_mode_hook.py", line 103, in wrapper
    return func(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/worker.py", line 2624, in get
    raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::ImplicitFunc.train() (pid=90630, ip=172.17.0.8, actor_id=5e261c453848631029b14d6e01000000, repr=train_mnist)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/trainable.py", line 342, in train
    raise skipped from exception_cause(skipped)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/air/_internal/util.py", line 88, in run
    self._ret = self._target(*self._args, **self._kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 115, in <lambda>
    training_func=lambda: self._trainable_func(self.config),
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 332, in _trainable_func
    output = fn()
  File "/tmp/ipykernel_87521/3106224546.py", line 43, in train_mnist
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 1183, in fit
    tmp_logs = self.train_function(iterator)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 855, in train_function
    return step_function(self, iterator)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 845, in step_function
    outputs = model.distribute_strategy.run(run_step, args=(data,))
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 1285, in run
    return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 2833, in call_for_each_replica
    return self._call_for_each_replica(fn, args, kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 3608, in _call_for_each_replica
    return fn(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/autograph/impl/api.py", line 597, in wrapper
    return func(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 838, in run_step
    outputs = model.train_step(data)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 799, in train_step
    self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 528, in minimize
    grads_and_vars = self._compute_gradients(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 580, in _compute_gradients
    grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 473, in _get_gradients
    grads = tape.gradient(loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 1074, in gradient
    flat_grad = imperative_grad.imperative_grad(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/imperative_grad.py", line 71, in imperative_grad
    return pywrap_tfe.TFE_Py_TapeGradient(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 159, in _gradient_function
    return grad_fn(mock_op, *out_grads)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/math_grad.py", line 1385, in _MulGrad
    math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/gen_math_ops.py", line 6240, in mul
    _ops.raise_from_not_ok_status(e, name)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/framework/ops.py", line 6897, in raise_from_not_ok_status
    six.raise_from(core._status_to_exception(e.code, message), None)
  File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: cannot compute Mul as input #1(zero-based) was expected to be a float tensor but is a double tensor [Op:Mul]
2024-03-14 03:59:45,318	ERROR tune_controller.py:1374 -- Trial task failed for trial train_mnist_c62da_00004
Traceback (most recent call last):
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/air/execution/_internal/event_manager.py", line 110, in resolve_future
    result = ray.get(future)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/auto_init_hook.py", line 22, in auto_init_wrapper
    return fn(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/client_mode_hook.py", line 103, in wrapper
    return func(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/worker.py", line 2624, in get
    raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::ImplicitFunc.train() (pid=90627, ip=172.17.0.8, actor_id=1f7bb91e4ca698ce1eeed45601000000, repr=train_mnist)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/trainable.py", line 342, in train
    raise skipped from exception_cause(skipped)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/air/_internal/util.py", line 88, in run
    self._ret = self._target(*self._args, **self._kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 115, in <lambda>
    training_func=lambda: self._trainable_func(self.config),
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 332, in _trainable_func
    output = fn()
  File "/tmp/ipykernel_87521/3106224546.py", line 43, in train_mnist
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 1183, in fit
    tmp_logs = self.train_function(iterator)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 855, in train_function
    return step_function(self, iterator)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 845, in step_function
    outputs = model.distribute_strategy.run(run_step, args=(data,))
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 1285, in run
    return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 2833, in call_for_each_replica
    return self._call_for_each_replica(fn, args, kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 3608, in _call_for_each_replica
    return fn(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/autograph/impl/api.py", line 597, in wrapper
    return func(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 838, in run_step
    outputs = model.train_step(data)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 799, in train_step
    self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 528, in minimize
    grads_and_vars = self._compute_gradients(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 580, in _compute_gradients
    grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 473, in _get_gradients
    grads = tape.gradient(loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 1074, in gradient
    flat_grad = imperative_grad.imperative_grad(
 line 580, in _compute_gradients
    grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 473, in _get_gradients
    grads = tape.gradient(loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 1074, in gradient
    flat_grad = imperative_grad.imperative_grad(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/imperative_grad.py", line 71, in imperative_grad
    return pywrap_tfe.TFE_Py_TapeGradient(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 159, in _gradient_function
    return grad_fn(mock_op, *out_grads)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/math_grad.py", line 1385, in _MulGrad
    math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/gen_math_ops.py", line 6240, in mul
    _ops.raise_from_not_ok_status(e, name)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/framework/ops.py", line 6897, in raise_from_not_ok_status
    six.raise_from(core._status_to_exception(e.code, message), None)
  File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: cannot compute Mul as input #1(zero-based) was expected to be a float tensor but is a double tensor [Op:Mul]
2024-03-14 03:59:53,336	ERROR tune_controller.py:1374 -- Trial task failed for trial train_mnist_c62da_00003
Traceback (most recent call last):
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/air/execution/_internal/event_manager.py", line 110, in resolve_future
    result = ray.get(future)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/auto_init_hook.py", line 22, in auto_init_wrapper
    return fn(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/client_mode_hook.py", line 103, in wrapper
    return func(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/_private/worker.py", line 2624, in get
    raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::ImplicitFunc.train() (pid=90626, ip=172.17.0.8, actor_id=dbae4261d722e06cbfb2131101000000, repr=train_mnist)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/trainable.py", line 342, in train
    raise skipped from exception_cause(skipped)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/air/_internal/util.py", line 88, in run
    self._ret = self._target(*self._args, **self._kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 115, in <lambda>
    training_func=lambda: self._trainable_func(self.config),
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/trainable/function_trainable.py", line 332, in _trainable_func
    output = fn()
  File "/tmp/ipykernel_87521/3106224546.py", line 43, in train_mnist
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 1183, in fit
    tmp_logs = self.train_function(iterator)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 855, in train_function
    return step_function(self, iterator)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 845, in step_function
    outputs = model.distribute_strategy.run(run_step, args=(data,))
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 1285, in run
    return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 2833, in call_for_each_replica
    return self._call_for_each_replica(fn, args, kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py", line 3608, in _call_for_each_replica
    return fn(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/autograph/impl/api.py", line 597, in wrapper
    return func(*args, **kwargs)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 838, in run_step
    outputs = model.train_step(data)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py", line 799, in train_step
    self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 528, in minimize
    grads_and_vars = self._compute_gradients(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 580, in _compute_gradients
    grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py", line 473, in _get_gradients
    grads = tape.gradient(loss, var_list, grad_loss)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 1074, in gradient
    flat_grad = imperative_grad.imperative_grad(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/imperative_grad.py", line 71, in imperative_grad
    return pywrap_tfe.TFE_Py_TapeGradient(
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/eager/backprop.py", line 159, in _gradient_function
    return grad_fn(mock_op, *out_grads)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/math_grad.py", line 1385, in _MulGrad
    math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/ops/gen_math_ops.py", line 6240, in mul
    _ops.raise_from_not_ok_status(e, name)
  File "/opt/conda/envs/tf1/lib/python3.9/site-packages/tensorflow/python/framework/ops.py", line 6897, in raise_from_not_ok_status
    six.raise_from(core._status_to_exception(e.code, message), None)
  File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: cannot compute Mul as input #1(zero-based) was expected to be a float tensor but is a double tensor [Op:Mul]
2024-03-14 03:59:53,348	ERROR tune.py:1038 -- Trials did not complete: [train_mnist_c62da_00000, train_mnist_c62da_00001, train_mnist_c62da_00002, train_mnist_c62da_00003, train_mnist_c62da_00004, train_mnist_c62da_00005, train_mnist_c62da_00006, train_mnist_c62da_00007, train_mnist_c62da_00008, train_mnist_c62da_00009]
2024-03-14 03:59:53,348	INFO tune.py:1042 -- Total run time: 231.37 seconds (231.17 seconds for the tuning loop).
2024-03-14 03:59:53,360	WARNING experiment_analysis.py:584 -- Could not find best trial. Did you pass the correct `metric` parameter?
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
Cell In[7], line 29
     25     results = tuner.fit()
     27     print("Best hyperparameters found were: ", results.get_best_result().config)
---> 29 tune_mnist()

Cell In[7], line 27, in tune_mnist()
      6 tuner = tune.Tuner(
      7     tune.with_resources(train_mnist, resources={"cpu": 2, "gpu": 0}),
      8     tune_config=tune.TuneConfig(
   (...)
     23     },
     24 )
     25 results = tuner.fit()
---> 27 print("Best hyperparameters found were: ", results.get_best_result().config)

File /opt/conda/envs/tf1/lib/python3.9/site-packages/ray/tune/result_grid.py:162, in ResultGrid.get_best_result(self, metric, mode, scope, filter_nan_and_inf)
    151     error_msg = (
    152         "No best trial found for the given metric: "
    153         f"{metric or self._experiment_analysis.default_metric}. "
    154         "This means that no trial has reported this metric"
    155     )
    156     error_msg += (
    157         ", or all values reported for this metric are NaN. To not ignore NaN "
    158         "values, you can set the `filter_nan_and_inf` arg to False."
    159         if filter_nan_and_inf
    160         else "."
    161     )
--> 162     raise RuntimeError(error_msg)
    164 return self._trial_to_result(best_trial)

RuntimeError: No best trial found for the given metric: mean_accuracy. This means that no trial has reported this metric, or all values reported for this metric are NaN. To not ignore NaN values, you can set the `filter_nan_and_inf` arg to False.