mirror of
https://github.com/immich-app/immich
synced 2025-11-07 17:27:20 +00:00
feat(server,ml): remove image tagging (#5903)
* remove image tagging * updated lock * fixed tests, improved logging * be nice * fixed tests
This commit is contained in:
parent
154292242f
commit
092a23fd7f
65 changed files with 988 additions and 2930 deletions
|
|
@ -6,7 +6,6 @@ from .base import InferenceModel
|
|||
from .clip import MCLIPEncoder, OpenCLIPEncoder
|
||||
from .constants import is_insightface, is_mclip, is_openclip
|
||||
from .facial_recognition import FaceRecognizer
|
||||
from .image_classification import ImageClassifier
|
||||
|
||||
|
||||
def from_model_type(model_type: ModelType, model_name: str, **model_kwargs: Any) -> InferenceModel:
|
||||
|
|
@ -19,8 +18,6 @@ def from_model_type(model_type: ModelType, model_name: str, **model_kwargs: Any)
|
|||
case ModelType.FACIAL_RECOGNITION:
|
||||
if is_insightface(model_name):
|
||||
return FaceRecognizer(model_name, **model_kwargs)
|
||||
case ModelType.IMAGE_CLASSIFICATION:
|
||||
return ImageClassifier(model_name, **model_kwargs)
|
||||
case _:
|
||||
raise ValueError(f"Unknown model type {model_type}")
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class InferenceModel(ABC):
|
|||
)
|
||||
log.debug(
|
||||
(
|
||||
f"Setting '{self.model_name}' execution providers to {self.providers}"
|
||||
f"Setting '{self.model_name}' execution providers to {self.providers} "
|
||||
"in descending order of preference"
|
||||
),
|
||||
)
|
||||
|
|
@ -55,7 +55,7 @@ class InferenceModel(ABC):
|
|||
def download(self) -> None:
|
||||
if not self.cached:
|
||||
log.info(
|
||||
(f"Downloading {self.model_type.replace('-', ' ')} model '{self.model_name}'." "This may take a while.")
|
||||
f"Downloading {self.model_type.replace('-', ' ')} model '{self.model_name}'. This may take a while."
|
||||
)
|
||||
self._download()
|
||||
|
||||
|
|
@ -63,7 +63,7 @@ class InferenceModel(ABC):
|
|||
if self.loaded:
|
||||
return
|
||||
self.download()
|
||||
log.info(f"Loading {self.model_type.replace('-', ' ')} model '{self.model_name}'")
|
||||
log.info(f"Loading {self.model_type.replace('-', ' ')} model '{self.model_name}' to memory")
|
||||
self._load()
|
||||
self.loaded = True
|
||||
|
||||
|
|
@ -119,11 +119,11 @@ class InferenceModel(ABC):
|
|||
def clear_cache(self) -> None:
|
||||
if not self.cache_dir.exists():
|
||||
log.warn(
|
||||
f"Attempted to clear cache for model '{self.model_name}' but cache directory does not exist.",
|
||||
f"Attempted to clear cache for model '{self.model_name}', but cache directory does not exist",
|
||||
)
|
||||
return
|
||||
if not rmtree.avoids_symlink_attacks:
|
||||
raise RuntimeError("Attempted to clear cache, but rmtree is not safe on this platform.")
|
||||
raise RuntimeError("Attempted to clear cache, but rmtree is not safe on this platform")
|
||||
|
||||
if self.cache_dir.is_dir():
|
||||
log.info(f"Cleared cache directory for model '{self.model_name}'.")
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ from typing import Any, Literal
|
|||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
from PIL import Image
|
||||
from transformers import AutoTokenizer
|
||||
from tokenizers import Encoding, Tokenizer
|
||||
|
||||
from app.config import clean_name, log
|
||||
from app.models.transforms import crop, get_pil_resampling, normalize, resize, to_numpy
|
||||
from app.schemas import ModelType, ndarray_f32, ndarray_i32, ndarray_i64
|
||||
from app.schemas import ModelType, ndarray_f32, ndarray_i32
|
||||
|
||||
from .base import InferenceModel
|
||||
|
||||
|
|
@ -40,6 +40,7 @@ class BaseCLIPEncoder(InferenceModel):
|
|||
providers=self.providers,
|
||||
provider_options=self.provider_options,
|
||||
)
|
||||
log.debug(f"Loaded clip text model '{self.model_name}'")
|
||||
|
||||
if self.mode == "vision" or self.mode is None:
|
||||
log.debug(f"Loading clip vision model '{self.model_name}'")
|
||||
|
|
@ -50,6 +51,7 @@ class BaseCLIPEncoder(InferenceModel):
|
|||
providers=self.providers,
|
||||
provider_options=self.provider_options,
|
||||
)
|
||||
log.debug(f"Loaded clip vision model '{self.model_name}'")
|
||||
|
||||
def _predict(self, image_or_text: Image.Image | str) -> ndarray_f32:
|
||||
if isinstance(image_or_text, bytes):
|
||||
|
|
@ -99,6 +101,14 @@ class BaseCLIPEncoder(InferenceModel):
|
|||
def visual_path(self) -> Path:
|
||||
return self.visual_dir / "model.onnx"
|
||||
|
||||
@property
|
||||
def tokenizer_file_path(self) -> Path:
|
||||
return self.textual_dir / "tokenizer.json"
|
||||
|
||||
@property
|
||||
def tokenizer_cfg_path(self) -> Path:
|
||||
return self.textual_dir / "tokenizer_config.json"
|
||||
|
||||
@property
|
||||
def preprocess_cfg_path(self) -> Path:
|
||||
return self.visual_dir / "preprocess_cfg.json"
|
||||
|
|
@ -107,6 +117,34 @@ class BaseCLIPEncoder(InferenceModel):
|
|||
def cached(self) -> bool:
|
||||
return self.textual_path.is_file() and self.visual_path.is_file()
|
||||
|
||||
@cached_property
|
||||
def model_cfg(self) -> dict[str, Any]:
|
||||
log.debug(f"Loading model config for CLIP model '{self.model_name}'")
|
||||
model_cfg: dict[str, Any] = json.load(self.model_cfg_path.open())
|
||||
log.debug(f"Loaded model config for CLIP model '{self.model_name}'")
|
||||
return model_cfg
|
||||
|
||||
@cached_property
|
||||
def tokenizer_file(self) -> dict[str, Any]:
|
||||
log.debug(f"Loading tokenizer file for CLIP model '{self.model_name}'")
|
||||
tokenizer_file: dict[str, Any] = json.load(self.tokenizer_file_path.open())
|
||||
log.debug(f"Loaded tokenizer file for CLIP model '{self.model_name}'")
|
||||
return tokenizer_file
|
||||
|
||||
@cached_property
|
||||
def tokenizer_cfg(self) -> dict[str, Any]:
|
||||
log.debug(f"Loading tokenizer config for CLIP model '{self.model_name}'")
|
||||
tokenizer_cfg: dict[str, Any] = json.load(self.tokenizer_cfg_path.open())
|
||||
log.debug(f"Loaded tokenizer config for CLIP model '{self.model_name}'")
|
||||
return tokenizer_cfg
|
||||
|
||||
@cached_property
|
||||
def preprocess_cfg(self) -> dict[str, Any]:
|
||||
log.debug(f"Loading visual preprocessing config for CLIP model '{self.model_name}'")
|
||||
preprocess_cfg: dict[str, Any] = json.load(self.preprocess_cfg_path.open())
|
||||
log.debug(f"Loaded visual preprocessing config for CLIP model '{self.model_name}'")
|
||||
return preprocess_cfg
|
||||
|
||||
|
||||
class OpenCLIPEncoder(BaseCLIPEncoder):
|
||||
def __init__(
|
||||
|
|
@ -121,8 +159,8 @@ class OpenCLIPEncoder(BaseCLIPEncoder):
|
|||
def _load(self) -> None:
|
||||
super()._load()
|
||||
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(self.textual_dir)
|
||||
self.sequence_length = self.model_cfg["text_cfg"]["context_length"]
|
||||
context_length = self.model_cfg["text_cfg"]["context_length"]
|
||||
pad_token = self.tokenizer_cfg["pad_token"]
|
||||
|
||||
self.size = (
|
||||
self.preprocess_cfg["size"][0] if type(self.preprocess_cfg["size"]) == list else self.preprocess_cfg["size"]
|
||||
|
|
@ -131,16 +169,16 @@ class OpenCLIPEncoder(BaseCLIPEncoder):
|
|||
self.mean = np.array(self.preprocess_cfg["mean"], dtype=np.float32)
|
||||
self.std = np.array(self.preprocess_cfg["std"], dtype=np.float32)
|
||||
|
||||
log.debug(f"Loading tokenizer for CLIP model '{self.model_name}'")
|
||||
self.tokenizer: Tokenizer = Tokenizer.from_file(self.tokenizer_file_path.as_posix())
|
||||
pad_id = self.tokenizer.token_to_id(pad_token)
|
||||
self.tokenizer.enable_padding(length=context_length, pad_token=pad_token, pad_id=pad_id)
|
||||
self.tokenizer.enable_truncation(max_length=context_length)
|
||||
log.debug(f"Loaded tokenizer for CLIP model '{self.model_name}'")
|
||||
|
||||
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
|
||||
input_ids: ndarray_i64 = self.tokenizer(
|
||||
text,
|
||||
max_length=self.sequence_length,
|
||||
return_tensors="np",
|
||||
return_attention_mask=False,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
).input_ids
|
||||
return {"text": input_ids.astype(np.int32)}
|
||||
tokens: Encoding = self.tokenizer.encode(text)
|
||||
return {"text": np.array([tokens.ids], dtype=np.int32)}
|
||||
|
||||
def transform(self, image: Image.Image) -> dict[str, ndarray_f32]:
|
||||
image = resize(image, self.size)
|
||||
|
|
@ -149,18 +187,11 @@ class OpenCLIPEncoder(BaseCLIPEncoder):
|
|||
image_np = normalize(image_np, self.mean, self.std)
|
||||
return {"image": np.expand_dims(image_np.transpose(2, 0, 1), 0)}
|
||||
|
||||
@cached_property
|
||||
def model_cfg(self) -> dict[str, Any]:
|
||||
model_cfg: dict[str, Any] = json.load(self.model_cfg_path.open())
|
||||
return model_cfg
|
||||
|
||||
@cached_property
|
||||
def preprocess_cfg(self) -> dict[str, Any]:
|
||||
preprocess_cfg: dict[str, Any] = json.load(self.preprocess_cfg_path.open())
|
||||
return preprocess_cfg
|
||||
|
||||
|
||||
class MCLIPEncoder(OpenCLIPEncoder):
|
||||
def tokenize(self, text: str) -> dict[str, ndarray_i32]:
|
||||
tokens: dict[str, ndarray_i64] = self.tokenizer(text, return_tensors="np")
|
||||
return {k: v.astype(np.int32) for k, v in tokens.items()}
|
||||
tokens: Encoding = self.tokenizer.encode(text)
|
||||
return {
|
||||
"input_ids": np.array([tokens.ids], dtype=np.int32),
|
||||
"attention_mask": np.array([tokens.attention_mask], dtype=np.int32),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,75 +0,0 @@
|
|||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from huggingface_hub import snapshot_download
|
||||
from optimum.onnxruntime import ORTModelForImageClassification
|
||||
from optimum.pipelines import pipeline
|
||||
from PIL import Image
|
||||
from transformers import AutoImageProcessor
|
||||
|
||||
from ..config import log
|
||||
from ..schemas import ModelType
|
||||
from .base import InferenceModel
|
||||
|
||||
|
||||
class ImageClassifier(InferenceModel):
|
||||
_model_type = ModelType.IMAGE_CLASSIFICATION
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str,
|
||||
min_score: float = 0.9,
|
||||
cache_dir: Path | str | None = None,
|
||||
**model_kwargs: Any,
|
||||
) -> None:
|
||||
self.min_score = model_kwargs.pop("minScore", min_score)
|
||||
super().__init__(model_name, cache_dir, **model_kwargs)
|
||||
|
||||
def _download(self) -> None:
|
||||
snapshot_download(
|
||||
cache_dir=self.cache_dir,
|
||||
repo_id=self.model_name,
|
||||
allow_patterns=["*.bin", "*.json", "*.txt"],
|
||||
local_dir=self.cache_dir,
|
||||
local_dir_use_symlinks=True,
|
||||
)
|
||||
|
||||
def _load(self) -> None:
|
||||
processor = AutoImageProcessor.from_pretrained(self.cache_dir, cache_dir=self.cache_dir)
|
||||
model_path = self.cache_dir / "model.onnx"
|
||||
model_kwargs = {
|
||||
"cache_dir": self.cache_dir,
|
||||
"provider": self.providers[0],
|
||||
"provider_options": self.provider_options[0],
|
||||
"session_options": self.sess_options,
|
||||
}
|
||||
|
||||
if model_path.exists():
|
||||
model = ORTModelForImageClassification.from_pretrained(self.cache_dir, **model_kwargs)
|
||||
self.model = pipeline(self.model_type.value, model, feature_extractor=processor)
|
||||
else:
|
||||
log.info(
|
||||
(
|
||||
f"ONNX model not found in cache directory for '{self.model_name}'."
|
||||
"Exporting optimized model for future use."
|
||||
),
|
||||
)
|
||||
self.sess_options.optimized_model_filepath = model_path.as_posix()
|
||||
self.model = pipeline(
|
||||
self.model_type.value,
|
||||
self.model_name,
|
||||
model_kwargs=model_kwargs,
|
||||
feature_extractor=processor,
|
||||
)
|
||||
|
||||
def _predict(self, image: Image.Image | bytes) -> list[str]:
|
||||
if isinstance(image, bytes):
|
||||
image = Image.open(BytesIO(image))
|
||||
predictions: list[dict[str, Any]] = self.model(image)
|
||||
tags = [tag for pred in predictions for tag in pred["label"].split(", ") if pred["score"] >= self.min_score]
|
||||
|
||||
return tags
|
||||
|
||||
def configure(self, **model_kwargs: Any) -> None:
|
||||
self.min_score = model_kwargs.pop("minScore", self.min_score)
|
||||
Loading…
Add table
Add a link
Reference in a new issue