diff --git a/docs/docs/install/environment-variables.md b/docs/docs/install/environment-variables.md index 78a5289bf4..55c226d507 100644 --- a/docs/docs/install/environment-variables.md +++ b/docs/docs/install/environment-variables.md @@ -149,29 +149,31 @@ Redis (Sentinel) URL example JSON before encoding: ## Machine Learning -| Variable | Description | Default | Containers | -| :---------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- | -| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning | -| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning | -| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning | -| `MACHINE_LEARNING_REQUEST_THREADS`\*1 | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning | -| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning | -| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning | -| `MACHINE_LEARNING_WORKERS`\*2 | Number of worker processes to spawn | `1` | machine learning | -| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`\*3 | HTTP Keep-alive time in seconds | `2` | machine learning | -| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning | -| `MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL` | Comma-separated list of (textual) CLIP model(s) to preload and cache | | machine learning | -| `MACHINE_LEARNING_PRELOAD__CLIP__VISUAL` | Comma-separated list of (visual) CLIP model(s) to preload and cache | | machine learning | -| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION` | Comma-separated list of (recognition) facial recognition model(s) to preload and cache | | machine learning | -| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION` | Comma-separated list of (detection) facial recognition model(s) to preload and cache | | machine learning | -| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning | -| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning | -| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning | -| `MACHINE_LEARNING_DEVICE_IDS`\*4 | Device IDs to use in multi-GPU environments | `0` | machine learning | -| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning | -| `MACHINE_LEARNING_RKNN` | Enable RKNN hardware acceleration if supported | `True` | machine learning | -| `MACHINE_LEARNING_RKNN_THREADS` | How many threads of RKNN runtime should be spinned up while inferencing. | `1` | machine learning | -| `MACHINE_LEARNING_MODEL_ARENA` | Pre-allocates CPU memory to avoid memory fragmentation | true | machine learning | +| Variable | Description | Default | Containers | +| :---------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- | +| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning | +| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning | +| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning | +| `MACHINE_LEARNING_REQUEST_THREADS`\*1 | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning | +| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning | +| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning | +| `MACHINE_LEARNING_WORKERS`\*2 | Number of worker processes to spawn | `1` | machine learning | +| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`\*3 | HTTP Keep-alive time in seconds | `2` | machine learning | +| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning | +| `MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL` | Comma-separated list of (textual) CLIP model(s) to preload and cache | | machine learning | +| `MACHINE_LEARNING_PRELOAD__CLIP__VISUAL` | Comma-separated list of (visual) CLIP model(s) to preload and cache | | machine learning | +| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION` | Comma-separated list of (recognition) facial recognition model(s) to preload and cache | | machine learning | +| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION` | Comma-separated list of (detection) facial recognition model(s) to preload and cache | | machine learning | +| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning | +| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning | +| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning | +| `MACHINE_LEARNING_DEVICE_IDS`\*4 | Device IDs to use in multi-GPU environments | `0` | machine learning | +| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning | +| `MACHINE_LEARNING_MAX_BATCH_SIZE__OCR` | Set the maximum number of boxes that will be processed at once by the OCR model | `6` | machine learning | +| `MACHINE_LEARNING_RKNN` | Enable RKNN hardware acceleration if supported | `True` | machine learning | +| `MACHINE_LEARNING_RKNN_THREADS` | How many threads of RKNN runtime should be spun up while inferencing. | `1` | machine learning | +| `MACHINE_LEARNING_MODEL_ARENA` | Pre-allocates CPU memory to avoid memory fragmentation | true | machine learning | +| `MACHINE_LEARNING_OPENVINO_PRECISION` | If set to FP16, uses half-precision floating-point operations for faster inference with reduced accuracy (one of [`FP16`, `FP32`], applies only to OpenVINO) | `FP32` | machine learning | \*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones. diff --git a/machine-learning/immich_ml/config.py b/machine-learning/immich_ml/config.py index 68d00625a3..19fd5300df 100644 --- a/machine-learning/immich_ml/config.py +++ b/machine-learning/immich_ml/config.py @@ -13,6 +13,8 @@ from rich.logging import RichHandler from uvicorn import Server from uvicorn.workers import UvicornWorker +from .schemas import ModelPrecision + class ClipSettings(BaseModel): textual: str | None = None @@ -24,6 +26,11 @@ class FacialRecognitionSettings(BaseModel): detection: str | None = None +class OcrSettings(BaseModel): + recognition: str | None = None + detection: str | None = None + + class PreloadModelData(BaseModel): clip_fallback: str | None = os.getenv("MACHINE_LEARNING_PRELOAD__CLIP", None) facial_recognition_fallback: str | None = os.getenv("MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION", None) @@ -37,6 +44,7 @@ class PreloadModelData(BaseModel): del os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION"] clip: ClipSettings = ClipSettings() facial_recognition: FacialRecognitionSettings = FacialRecognitionSettings() + ocr: OcrSettings = OcrSettings() class MaxBatchSize(BaseModel): @@ -70,6 +78,7 @@ class Settings(BaseSettings): rknn_threads: int = 1 preload: PreloadModelData | None = None max_batch_size: MaxBatchSize | None = None + openvino_precision: ModelPrecision = ModelPrecision.FP32 @property def device_id(self) -> str: diff --git a/machine-learning/immich_ml/main.py b/machine-learning/immich_ml/main.py index 35f04d77ef..3d34d9bf9d 100644 --- a/machine-learning/immich_ml/main.py +++ b/machine-learning/immich_ml/main.py @@ -103,6 +103,20 @@ async def preload_models(preload: PreloadModelData) -> None: ModelTask.FACIAL_RECOGNITION, ) + if preload.ocr.detection is not None: + await load_models( + preload.ocr.detection, + ModelType.DETECTION, + ModelTask.OCR, + ) + + if preload.ocr.recognition is not None: + await load_models( + preload.ocr.recognition, + ModelType.RECOGNITION, + ModelTask.OCR, + ) + if preload.clip_fallback is not None: log.warning( "Deprecated env variable: 'MACHINE_LEARNING_PRELOAD__CLIP'. " diff --git a/machine-learning/immich_ml/schemas.py b/machine-learning/immich_ml/schemas.py index bfb40b9c84..41706180de 100644 --- a/machine-learning/immich_ml/schemas.py +++ b/machine-learning/immich_ml/schemas.py @@ -46,6 +46,11 @@ class ModelSource(StrEnum): PADDLE = "paddle" +class ModelPrecision(StrEnum): + FP16 = "FP16" + FP32 = "FP32" + + ModelIdentity = tuple[ModelType, ModelTask] diff --git a/machine-learning/immich_ml/sessions/ort.py b/machine-learning/immich_ml/sessions/ort.py index b6f709a323..6c52936722 100644 --- a/machine-learning/immich_ml/sessions/ort.py +++ b/machine-learning/immich_ml/sessions/ort.py @@ -93,10 +93,12 @@ class OrtSession: case "CUDAExecutionProvider" | "ROCMExecutionProvider": options = {"arena_extend_strategy": "kSameAsRequested", "device_id": settings.device_id} case "OpenVINOExecutionProvider": + openvino_dir = self.model_path.parent / "openvino" + device = f"GPU.{settings.device_id}" options = { - "device_type": f"GPU.{settings.device_id}", - "precision": "FP32", - "cache_dir": (self.model_path.parent / "openvino").as_posix(), + "device_type": device, + "precision": settings.openvino_precision.value, + "cache_dir": openvino_dir.as_posix(), } case "CoreMLExecutionProvider": options = { diff --git a/machine-learning/test_main.py b/machine-learning/test_main.py index 582a05a950..eb8706fc19 100644 --- a/machine-learning/test_main.py +++ b/machine-learning/test_main.py @@ -26,7 +26,7 @@ from immich_ml.models.clip.textual import MClipTextualEncoder, OpenClipTextualEn from immich_ml.models.clip.visual import OpenClipVisualEncoder from immich_ml.models.facial_recognition.detection import FaceDetector from immich_ml.models.facial_recognition.recognition import FaceRecognizer -from immich_ml.schemas import ModelFormat, ModelTask, ModelType +from immich_ml.schemas import ModelFormat, ModelPrecision, ModelTask, ModelType from immich_ml.sessions.ann import AnnSession from immich_ml.sessions.ort import OrtSession from immich_ml.sessions.rknn import RknnSession, run_inference @@ -240,11 +240,16 @@ class TestOrtSession: @pytest.mark.ov_device_ids(["GPU.0", "CPU"]) def test_sets_default_provider_options(self, ov_device_ids: list[str]) -> None: - model_path = "/cache/ViT-B-32__openai/model.onnx" + model_path = "/cache/ViT-B-32__openai/textual/model.onnx" + session = OrtSession(model_path, providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"]) assert session.provider_options == [ - {"device_type": "GPU.0", "precision": "FP32", "cache_dir": "/cache/ViT-B-32__openai/openvino"}, + { + "device_type": "GPU.0", + "precision": "FP32", + "cache_dir": "/cache/ViT-B-32__openai/textual/openvino", + }, {"arena_extend_strategy": "kSameAsRequested"}, ] @@ -262,6 +267,21 @@ class TestOrtSession: } ] + def test_sets_openvino_to_fp16_if_enabled(self, mocker: MockerFixture) -> None: + model_path = "/cache/ViT-B-32__openai/textual/model.onnx" + os.environ["MACHINE_LEARNING_DEVICE_ID"] = "1" + mocker.patch.object(settings, "openvino_precision", ModelPrecision.FP16) + + session = OrtSession(model_path, providers=["OpenVINOExecutionProvider"]) + + assert session.provider_options == [ + { + "device_type": "GPU.1", + "precision": "FP16", + "cache_dir": "/cache/ViT-B-32__openai/textual/openvino", + } + ] + def test_sets_provider_options_for_cuda(self) -> None: os.environ["MACHINE_LEARNING_DEVICE_ID"] = "1" @@ -417,7 +437,7 @@ class TestRknnSession: session.run(None, input_feed) rknn_session.return_value.put.assert_called_once_with([input1, input2]) - np_spy.call_count == 2 + assert np_spy.call_count == 2 np_spy.assert_has_calls([mock.call(input1), mock.call(input2)]) @@ -925,11 +945,34 @@ class TestCache: any_order=True, ) + async def test_preloads_ocr_models(self, monkeypatch: MonkeyPatch, mock_get_model: mock.Mock) -> None: + os.environ["MACHINE_LEARNING_PRELOAD__OCR__DETECTION"] = "PP-OCRv5_mobile" + os.environ["MACHINE_LEARNING_PRELOAD__OCR__RECOGNITION"] = "PP-OCRv5_mobile" + + settings = Settings() + assert settings.preload is not None + assert settings.preload.ocr.detection == "PP-OCRv5_mobile" + assert settings.preload.ocr.recognition == "PP-OCRv5_mobile" + + model_cache = ModelCache() + monkeypatch.setattr("immich_ml.main.model_cache", model_cache) + + await preload_models(settings.preload) + mock_get_model.assert_has_calls( + [ + mock.call("PP-OCRv5_mobile", ModelType.DETECTION, ModelTask.OCR), + mock.call("PP-OCRv5_mobile", ModelType.RECOGNITION, ModelTask.OCR), + ], + any_order=True, + ) + async def test_preloads_all_models(self, monkeypatch: MonkeyPatch, mock_get_model: mock.Mock) -> None: os.environ["MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL"] = "ViT-B-32__openai" os.environ["MACHINE_LEARNING_PRELOAD__CLIP__VISUAL"] = "ViT-B-32__openai" os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION"] = "buffalo_s" os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION"] = "buffalo_s" + os.environ["MACHINE_LEARNING_PRELOAD__OCR__DETECTION"] = "PP-OCRv5_mobile" + os.environ["MACHINE_LEARNING_PRELOAD__OCR__RECOGNITION"] = "PP-OCRv5_mobile" settings = Settings() assert settings.preload is not None @@ -937,6 +980,8 @@ class TestCache: assert settings.preload.clip.textual == "ViT-B-32__openai" assert settings.preload.facial_recognition.recognition == "buffalo_s" assert settings.preload.facial_recognition.detection == "buffalo_s" + assert settings.preload.ocr.detection == "PP-OCRv5_mobile" + assert settings.preload.ocr.recognition == "PP-OCRv5_mobile" model_cache = ModelCache() monkeypatch.setattr("immich_ml.main.model_cache", model_cache) @@ -948,6 +993,8 @@ class TestCache: mock.call("ViT-B-32__openai", ModelType.VISUAL, ModelTask.SEARCH), mock.call("buffalo_s", ModelType.DETECTION, ModelTask.FACIAL_RECOGNITION), mock.call("buffalo_s", ModelType.RECOGNITION, ModelTask.FACIAL_RECOGNITION), + mock.call("PP-OCRv5_mobile", ModelType.DETECTION, ModelTask.OCR), + mock.call("PP-OCRv5_mobile", ModelType.RECOGNITION, ModelTask.OCR), ], any_order=True, )