mirror of
https://github.com/mudler/LocalAI.git
synced 2025-12-30 22:20:20 -06:00
feat(rfdetr): add object detection API (#5923)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
committed by
GitHub
parent
73ecb7f90b
commit
949e5b9be8
@@ -8,4 +8,6 @@ else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
ensureVenv
|
||||
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
20
backend/python/rfdetr/Makefile
Normal file
20
backend/python/rfdetr/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
bash install.sh
|
||||
$(MAKE) protogen
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
bash protogen.sh
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
174
backend/python/rfdetr/backend.py
Executable file
174
backend/python/rfdetr/backend.py
Executable file
@@ -0,0 +1,174 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
gRPC server for RFDETR object detection models.
|
||||
"""
|
||||
from concurrent import futures
|
||||
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import base64
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import grpc
|
||||
|
||||
import requests
|
||||
|
||||
import supervision as sv
|
||||
from inference import get_model
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
A gRPC servicer for the RFDETR backend service.
|
||||
|
||||
This class implements the gRPC methods for object detection using RFDETR models.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.model = None
|
||||
self.model_name = None
|
||||
|
||||
def Health(self, request, context):
|
||||
"""
|
||||
A gRPC method that returns the health status of the backend service.
|
||||
|
||||
Args:
|
||||
request: A HealthMessage object that contains the request parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
A Reply object that contains the health status of the backend service.
|
||||
"""
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
"""
|
||||
A gRPC method that loads a RFDETR model into memory.
|
||||
|
||||
Args:
|
||||
request: A ModelOptions object that contains the model parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
A Result object that contains the result of the LoadModel operation.
|
||||
"""
|
||||
model_name = request.Model
|
||||
try:
|
||||
# Load the RFDETR model
|
||||
self.model = get_model(model_name)
|
||||
self.model_name = model_name
|
||||
print(f'Loaded RFDETR model: {model_name}')
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Failed to load model: {err}")
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def Detect(self, request, context):
|
||||
"""
|
||||
A gRPC method that performs object detection on an image.
|
||||
|
||||
Args:
|
||||
request: A DetectOptions object that contains the image source.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
A DetectResponse object that contains the detection results.
|
||||
"""
|
||||
if self.model is None:
|
||||
print(f"Model is None")
|
||||
return backend_pb2.DetectResponse()
|
||||
print(f"Model is not None")
|
||||
try:
|
||||
print(f"Decoding image")
|
||||
# Decode the base64 image
|
||||
print(f"Image data: {request.src}")
|
||||
|
||||
image_data = base64.b64decode(request.src)
|
||||
image = Image.open(BytesIO(image_data))
|
||||
|
||||
# Perform inference
|
||||
predictions = self.model.infer(image, confidence=0.5)[0]
|
||||
|
||||
# Convert to proto format
|
||||
proto_detections = []
|
||||
for i in range(len(predictions.predictions)):
|
||||
pred = predictions.predictions[i]
|
||||
print(f"Prediction: {pred}")
|
||||
proto_detection = backend_pb2.Detection(
|
||||
x=float(pred.x),
|
||||
y=float(pred.y),
|
||||
width=float(pred.width),
|
||||
height=float(pred.height),
|
||||
confidence=float(pred.confidence),
|
||||
class_name=pred.class_name
|
||||
)
|
||||
proto_detections.append(proto_detection)
|
||||
|
||||
return backend_pb2.DetectResponse(Detections=proto_detections)
|
||||
except Exception as err:
|
||||
print(f"Detection error: {err}")
|
||||
return backend_pb2.DetectResponse()
|
||||
|
||||
def Status(self, request, context):
|
||||
"""
|
||||
A gRPC method that returns the status of the backend service.
|
||||
|
||||
Args:
|
||||
request: A HealthMessage object that contains the request parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
A StatusResponse object that contains the status information.
|
||||
"""
|
||||
state = backend_pb2.StatusResponse.READY if self.model is not None else backend_pb2.StatusResponse.UNINITIALIZED
|
||||
return backend_pb2.StatusResponse(state=state)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("[RFDETR] Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("[RFDETR] Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the RFDETR gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
print(f"[RFDETR] startup: {args}", file=sys.stderr)
|
||||
serve(args.addr)
|
||||
|
||||
|
||||
|
||||
19
backend/python/rfdetr/install.sh
Executable file
19
backend/python/rfdetr/install.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
||||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
||||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
||||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
13
backend/python/rfdetr/protogen.sh
Normal file
13
backend/python/rfdetr/protogen.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
ensureVenv
|
||||
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
7
backend/python/rfdetr/requirements-cpu.txt
Normal file
7
backend/python/rfdetr/requirements-cpu.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
rfdetr
|
||||
opencv-python
|
||||
accelerate
|
||||
peft
|
||||
inference
|
||||
torch==2.7.1
|
||||
optimum-quanto
|
||||
8
backend/python/rfdetr/requirements-cublas11.txt
Normal file
8
backend/python/rfdetr/requirements-cublas11.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.7.1+cu118
|
||||
rfdetr
|
||||
opencv-python
|
||||
accelerate
|
||||
inference
|
||||
peft
|
||||
optimum-quanto
|
||||
7
backend/python/rfdetr/requirements-cublas12.txt
Normal file
7
backend/python/rfdetr/requirements-cublas12.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
torch==2.7.1
|
||||
rfdetr
|
||||
opencv-python
|
||||
accelerate
|
||||
inference
|
||||
peft
|
||||
optimum-quanto
|
||||
9
backend/python/rfdetr/requirements-hipblas.txt
Normal file
9
backend/python/rfdetr/requirements-hipblas.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchvision==0.22.1+rocm6.3
|
||||
rfdetr
|
||||
opencv-python
|
||||
accelerate
|
||||
inference
|
||||
peft
|
||||
optimum-quanto
|
||||
13
backend/python/rfdetr/requirements-intel.txt
Normal file
13
backend/python/rfdetr/requirements-intel.txt
Normal file
@@ -0,0 +1,13 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchvision==0.18.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
rfdetr
|
||||
inference
|
||||
opencv-python
|
||||
accelerate
|
||||
peft
|
||||
optimum-quanto
|
||||
3
backend/python/rfdetr/requirements.txt
Normal file
3
backend/python/rfdetr/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
grpcio-tools
|
||||
9
backend/python/rfdetr/run.sh
Executable file
9
backend/python/rfdetr/run.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
11
backend/python/rfdetr/test.sh
Executable file
11
backend/python/rfdetr/test.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
Reference in New Issue
Block a user