High: Completely blocks me.
- Ray version: 2.51.0
- Python version: 3.12.12
- OS: Windows
- Cloud/Infrastructure: Colab
I keep getting this error when i try to run the below code.
TypeError: self.handle cannot be converted to a Python object for pickling
The above exception was the direct cause of the following exception:
TypeError Traceback (most recent call last)
/tmp/ipython-input-73983139.py in <cell line: 0>()
9
10 @ray.serve.deployment
---> 11 @ray.serve.ingress(app)
12 class Ensemble:
13 def __init__(self, model1, model2):
/usr/local/lib/python3.12/dist-packages/ray/serve/api.py in decorator(cls)
302 ensure_serialization_context()
303 frozen_app_or_func = cloudpickle.loads(
--> 304 pickle_dumps(app, error_msg="Failed to serialize the ASGI app.")
305 )
306
/usr/local/lib/python3.12/dist-packages/ray/_common/serialization.py in pickle_dumps(obj, error_msg)
30 msg = f"{error_msg}:\n{sio.getvalue()}"
31 if isinstance(e, TypeError):
---> 32 raise TypeError(msg) from e
33 else:
34 raise ray.exceptions.OufOfBandObjectRefSerializationException(msg)
TypeError: Failed to serialize the ASGI app.:
app = fastapi.FastAPI()
class Payload(BaseModel):
passenger_count: int
trip_distance: float
fare_amount: float
tolls_amount: float
@ray.serve.deployment
@ray.serve.ingress(app)
class Ensemble:
def \__init_\_(self, model1, model2):
self.model1 = model1
self.model2 = model2
@app.post("/predict")
async def predict(self, data: Payload) -> dict:
model1_prediction, model2_prediction = await asyncio.gather(
self.model1.predict.remote(\[data.model_dump()\]),
self.model2.predict.remote(\[data.model_dump()\]),
)
out = {"prediction": float(model1_prediction + model2_prediction) / 2}
return out
@ray.serve.deployment
class Model:
def \__init_\_(self, path: str):
self.\_model = xgboost.Booster()
self.\_model.load_model(path)
def predict(self, data: list\[dict\]) -> list\[float\]:
\# Make prediction
dmatrix = xgboost.DMatrix(pd.DataFrame(data))
model_prediction = self.\_model.predict(dmatrix)
return model_prediction
# Run the deployment
handle = ray.serve.run(
Ensemble.bind(
model1=Model.bind(model_path),
model2=Model.bind(model_path),
),
route_prefix="/ensemble"
)