| # Hugging Face Inference Endpoint handler | |
| # Expects {"inputs": {"image_base64": "<...>"}} or {"inputs": "<base64>"} | |
| from inference import load_model, predict_from_base64 | |
| class EndpointHandler: | |
| def __init__(self, path=""): | |
| # path points to the repo dir inside the container | |
| self.model = load_model(path) | |
| def __call__(self, data): | |
| inputs = data.get("inputs", {}) | |
| if isinstance(inputs, str): | |
| b64 = inputs | |
| else: | |
| b64 = inputs.get("image_base64") | |
| if not b64: | |
| return {"error": "Provide base64 image under 'inputs' or 'inputs.image_base64'."} | |
| preds = predict_from_base64(b64) | |
| # Return {label: prob} | |
| return {"predictions": preds} | |