immich/machine-learning/immich_ml/models/clip/visual.py

78 lines
2.6 KiB
Python
Raw Normal View History

import json
from abc import abstractmethod
from functools import cached_property
from pathlib import Path
from typing import Any
import numpy as np
from numpy.typing import NDArray
from PIL import Image
from immich_ml.config import log
from immich_ml.models.base import InferenceModel
from immich_ml.models.transforms import (
crop_pil,
decode_pil,
get_pil_resampling,
normalize,
resize_pil,
serialize_np_array,
to_numpy,
)
from immich_ml.schemas import ModelSession, ModelTask, ModelType
class BaseCLIPVisualEncoder(InferenceModel):
depends = []
identity = (ModelType.VISUAL, ModelTask.SEARCH)
feat: ocr (#18836) * feat: add OCR functionality and related configurations * chore: update labeler configuration for machine learning files * feat(i18n): enhance OCR model descriptions and add orientation classification and unwarping features * chore: update Dockerfile to include ccache for improved build performance * feat(ocr): enhance OCR model configuration with orientation classification and unwarping options, update PaddleOCR integration, and improve response structure * refactor(ocr): remove OCR_CLEANUP job from enum and type definitions * refactor(ocr): remove obsolete OCR entity and migration files, and update asset job status and schema to accommodate new OCR table structure * refactor(ocr): update OCR schema and response structure to use individual coordinates instead of bounding box, and adjust related service and repository files * feat: enhance OCR configuration and functionality - Updated OCR settings to include minimum detection box score, minimum detection score, and minimum recognition score. - Refactored PaddleOCRecognizer to utilize new scoring parameters. - Introduced new database tables for asset OCR data and search functionality. - Modified related services and repositories to support the new OCR features. - Updated translations for improved clarity in settings UI. * sql changes * use rapidocr * change dto * update web * update lock * update api * store positions as normalized floats * match column order in db * update admin ui settings descriptions fix max resolution key set min threshold to 0.1 fix bind * apply config correctly, adjust defaults * unnecessary model type * unnecessary sources * fix(ocr): switch RapidOCR lang type from LangDet to LangRec * fix(ocr): expose lang_type (LangRec.CH) and font_path on OcrOptions for RapidOCR * fix(ocr): make OCR text search case- and accent-insensitive using ILIKE + unaccent * fix(ocr): add OCR search fields * fix: Add OCR database migration and update ML prediction logic. * trigrams are already case insensitive * add tests * format * update migrations * wrong uuid function * linting * maybe fix medium tests * formatting * fix weblate check * openapi * sql * minor fixes * maybe fix medium tests part 2 * passing medium tests * format web * readd sql * format dart * disabled in e2e * chore: translation ordering --------- Co-authored-by: mertalev <101130780+mertalev@users.noreply.github.com> Co-authored-by: Alex Tran <alex.tran1502@gmail.com>
2025-10-27 22:09:55 +08:00
def _predict(self, inputs: Image.Image | bytes) -> str:
image = decode_pil(inputs)
res: NDArray[np.float32] = self.session.run(None, self.transform(image))[0][0]
return serialize_np_array(res)
@abstractmethod
def transform(self, image: Image.Image) -> dict[str, NDArray[np.float32]]:
pass
@property
def model_cfg_path(self) -> Path:
return self.cache_dir / "config.json"
@property
def preprocess_cfg_path(self) -> Path:
return self.model_dir / "preprocess_cfg.json"
@cached_property
def model_cfg(self) -> dict[str, Any]:
log.debug(f"Loading model config for CLIP model '{self.model_name}'")
model_cfg: dict[str, Any] = json.load(self.model_cfg_path.open())
log.debug(f"Loaded model config for CLIP model '{self.model_name}'")
return model_cfg
@cached_property
def preprocess_cfg(self) -> dict[str, Any]:
log.debug(f"Loading visual preprocessing config for CLIP model '{self.model_name}'")
preprocess_cfg: dict[str, Any] = json.load(self.preprocess_cfg_path.open())
log.debug(f"Loaded visual preprocessing config for CLIP model '{self.model_name}'")
return preprocess_cfg
class OpenClipVisualEncoder(BaseCLIPVisualEncoder):
def _load(self) -> ModelSession:
size: list[int] | int = self.preprocess_cfg["size"]
self.size = size[0] if isinstance(size, list) else size
self.resampling = get_pil_resampling(self.preprocess_cfg["interpolation"])
self.mean = np.array(self.preprocess_cfg["mean"], dtype=np.float32)
self.std = np.array(self.preprocess_cfg["std"], dtype=np.float32)
return super()._load()
def transform(self, image: Image.Image) -> dict[str, NDArray[np.float32]]:
image = resize_pil(image, self.size)
image = crop_pil(image, self.size)
image_np = to_numpy(image)
image_np = normalize(image_np, self.mean, self.std)
return {"image": np.expand_dims(image_np.transpose(2, 0, 1), 0)}