mirror of
https://github.com/immich-app/immich
synced 2025-11-14 17:36:12 +00:00
refactor(ml): model downloading (#3545)
* download facial recognition models * download hf models * simplified logic * updated `predict` for facial recognition * ensure download method is called * fixed repo_id for clip * fixed download destination * use st's own `snapshot_download` * conditional download * fixed predict method * check if loaded * minor fixes * updated mypy overrides * added pytest-mock * updated tests * updated lock
This commit is contained in:
parent
2f26a7edae
commit
c73832bd9c
10 changed files with 350 additions and 274 deletions
|
|
@ -14,22 +14,43 @@ from ..schemas import ModelType
|
|||
class InferenceModel(ABC):
|
||||
_model_type: ModelType
|
||||
|
||||
def __init__(self, model_name: str, cache_dir: Path | str | None = None, **model_kwargs: Any) -> None:
|
||||
def __init__(
|
||||
self, model_name: str, cache_dir: Path | str | None = None, eager: bool = True, **model_kwargs: Any
|
||||
) -> None:
|
||||
self.model_name = model_name
|
||||
self._loaded = False
|
||||
self._cache_dir = Path(cache_dir) if cache_dir is not None else get_cache_dir(model_name, self.model_type)
|
||||
|
||||
loader = self.load if eager else self.download
|
||||
try:
|
||||
self.load(**model_kwargs)
|
||||
loader(**model_kwargs)
|
||||
except (OSError, InvalidProtobuf):
|
||||
self.clear_cache()
|
||||
self.load(**model_kwargs)
|
||||
loader(**model_kwargs)
|
||||
|
||||
def download(self, **model_kwargs: Any) -> None:
|
||||
if not self.cached:
|
||||
self._download(**model_kwargs)
|
||||
|
||||
def load(self, **model_kwargs: Any) -> None:
|
||||
self.download(**model_kwargs)
|
||||
self._load(**model_kwargs)
|
||||
self._loaded = True
|
||||
|
||||
def predict(self, inputs: Any) -> Any:
|
||||
if not self._loaded:
|
||||
self.load()
|
||||
return self._predict(inputs)
|
||||
|
||||
@abstractmethod
|
||||
def load(self, **model_kwargs: Any) -> None:
|
||||
def _predict(self, inputs: Any) -> Any:
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def predict(self, inputs: Any) -> Any:
|
||||
def _download(self, **model_kwargs: Any) -> None:
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def _load(self, **model_kwargs: Any) -> None:
|
||||
...
|
||||
|
||||
@property
|
||||
|
|
@ -44,6 +65,10 @@ class InferenceModel(ABC):
|
|||
def cache_dir(self, cache_dir: Path) -> None:
|
||||
self._cache_dir = cache_dir
|
||||
|
||||
@property
|
||||
def cached(self) -> bool:
|
||||
return self.cache_dir.exists() and any(self.cache_dir.iterdir())
|
||||
|
||||
@classmethod
|
||||
def from_model_type(cls, model_type: ModelType, model_name: str, **model_kwargs: Any) -> InferenceModel:
|
||||
subclasses = {subclass._model_type: subclass for subclass in cls.__subclasses__()}
|
||||
|
|
@ -55,7 +80,11 @@ class InferenceModel(ABC):
|
|||
def clear_cache(self) -> None:
|
||||
if not self.cache_dir.exists():
|
||||
return
|
||||
elif not rmtree.avoids_symlink_attacks:
|
||||
if not rmtree.avoids_symlink_attacks:
|
||||
raise RuntimeError("Attempted to clear cache, but rmtree is not safe on this platform.")
|
||||
|
||||
rmtree(self.cache_dir)
|
||||
if self.cache_dir.is_dir():
|
||||
rmtree(self.cache_dir)
|
||||
else:
|
||||
self.cache_dir.unlink()
|
||||
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from PIL.Image import Image
|
||||
from sentence_transformers import SentenceTransformer
|
||||
from sentence_transformers.util import snapshot_download
|
||||
|
||||
from ..schemas import ModelType
|
||||
from .base import InferenceModel
|
||||
|
|
@ -11,12 +11,21 @@ from .base import InferenceModel
|
|||
class CLIPSTEncoder(InferenceModel):
|
||||
_model_type = ModelType.CLIP
|
||||
|
||||
def load(self, **model_kwargs: Any) -> None:
|
||||
def _download(self, **model_kwargs: Any) -> None:
|
||||
repo_id = self.model_name if "/" in self.model_name else f"sentence-transformers/{self.model_name}"
|
||||
snapshot_download(
|
||||
cache_dir=self.cache_dir,
|
||||
repo_id=repo_id,
|
||||
library_name="sentence-transformers",
|
||||
ignore_files=["flax_model.msgpack", "rust_model.ot", "tf_model.h5"],
|
||||
)
|
||||
|
||||
def _load(self, **model_kwargs: Any) -> None:
|
||||
self.model = SentenceTransformer(
|
||||
self.model_name,
|
||||
cache_folder=self.cache_dir.as_posix(),
|
||||
**model_kwargs,
|
||||
)
|
||||
|
||||
def predict(self, image_or_text: Image | str) -> list[float]:
|
||||
def _predict(self, image_or_text: Image | str) -> list[float]:
|
||||
return self.model.encode(image_or_text).tolist()
|
||||
|
|
|
|||
|
|
@ -1,8 +1,12 @@
|
|||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import cv2
|
||||
from insightface.app import FaceAnalysis
|
||||
import numpy as np
|
||||
from insightface.model_zoo import ArcFaceONNX, RetinaFace
|
||||
from insightface.utils.face_align import norm_crop
|
||||
from insightface.utils.storage import BASE_REPO_URL, download_file
|
||||
|
||||
from ..config import settings
|
||||
from ..schemas import ModelType
|
||||
|
|
@ -22,39 +26,62 @@ class FaceRecognizer(InferenceModel):
|
|||
self.min_score = min_score
|
||||
super().__init__(model_name, cache_dir, **model_kwargs)
|
||||
|
||||
def load(self, **model_kwargs: Any) -> None:
|
||||
self.model = FaceAnalysis(
|
||||
name=self.model_name,
|
||||
root=self.cache_dir.as_posix(),
|
||||
allowed_modules=["detection", "recognition"],
|
||||
**model_kwargs,
|
||||
)
|
||||
self.model.prepare(
|
||||
ctx_id=0,
|
||||
def _download(self, **model_kwargs: Any) -> None:
|
||||
zip_file = self.cache_dir / f"{self.model_name}.zip"
|
||||
download_file(f"{BASE_REPO_URL}/{self.model_name}.zip", zip_file)
|
||||
with zipfile.ZipFile(zip_file, "r") as zip:
|
||||
members = zip.namelist()
|
||||
det_file = next(model for model in members if model.startswith("det_"))
|
||||
rec_file = next(model for model in members if model.startswith("w600k_"))
|
||||
zip.extractall(self.cache_dir, members=[det_file, rec_file])
|
||||
zip_file.unlink()
|
||||
|
||||
def _load(self, **model_kwargs: Any) -> None:
|
||||
try:
|
||||
det_file = next(self.cache_dir.glob("det_*.onnx"))
|
||||
rec_file = next(self.cache_dir.glob("w600k_*.onnx"))
|
||||
except StopIteration:
|
||||
raise FileNotFoundError("Facial recognition models not found in cache directory")
|
||||
self.det_model = RetinaFace(det_file.as_posix())
|
||||
self.rec_model = ArcFaceONNX(rec_file.as_posix())
|
||||
|
||||
self.det_model.prepare(
|
||||
ctx_id=-1,
|
||||
det_thresh=self.min_score,
|
||||
det_size=(640, 640),
|
||||
input_size=(640, 640),
|
||||
)
|
||||
self.rec_model.prepare(ctx_id=-1)
|
||||
|
||||
def _predict(self, image: cv2.Mat) -> list[dict[str, Any]]:
|
||||
bboxes, kpss = self.det_model.detect(image)
|
||||
if bboxes.size == 0:
|
||||
return []
|
||||
assert isinstance(kpss, np.ndarray)
|
||||
|
||||
scores = bboxes[:, 4].tolist()
|
||||
bboxes = bboxes[:, :4].round().tolist()
|
||||
|
||||
def predict(self, image: cv2.Mat) -> list[dict[str, Any]]:
|
||||
height, width, _ = image.shape
|
||||
results = []
|
||||
faces = self.model.get(image)
|
||||
|
||||
for face in faces:
|
||||
x1, y1, x2, y2 = face.bbox
|
||||
|
||||
height, width, _ = image.shape
|
||||
for (x1, y1, x2, y2), score, kps in zip(bboxes, scores, kpss):
|
||||
cropped_img = norm_crop(image, kps)
|
||||
embedding = self.rec_model.get_feat(cropped_img)[0].tolist()
|
||||
results.append(
|
||||
{
|
||||
"imageWidth": width,
|
||||
"imageHeight": height,
|
||||
"boundingBox": {
|
||||
"x1": round(x1),
|
||||
"y1": round(y1),
|
||||
"x2": round(x2),
|
||||
"y2": round(y2),
|
||||
"x1": x1,
|
||||
"y1": y1,
|
||||
"x2": x2,
|
||||
"y2": y2,
|
||||
},
|
||||
"score": face.det_score.item(),
|
||||
"embedding": face.normed_embedding.tolist(),
|
||||
"score": score,
|
||||
"embedding": embedding,
|
||||
}
|
||||
)
|
||||
return results
|
||||
|
||||
@property
|
||||
def cached(self) -> bool:
|
||||
return self.cache_dir.is_dir() and any(self.cache_dir.glob("*.onnx"))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from huggingface_hub import snapshot_download
|
||||
from PIL.Image import Image
|
||||
from transformers.pipelines import pipeline
|
||||
|
||||
|
|
@ -22,14 +23,19 @@ class ImageClassifier(InferenceModel):
|
|||
self.min_score = min_score
|
||||
super().__init__(model_name, cache_dir, **model_kwargs)
|
||||
|
||||
def load(self, **model_kwargs: Any) -> None:
|
||||
def _download(self, **model_kwargs: Any) -> None:
|
||||
snapshot_download(
|
||||
cache_dir=self.cache_dir, repo_id=self.model_name, allow_patterns=["*.bin", "*.json", "*.txt"]
|
||||
)
|
||||
|
||||
def _load(self, **model_kwargs: Any) -> None:
|
||||
self.model = pipeline(
|
||||
self.model_type.value,
|
||||
self.model_name,
|
||||
model_kwargs={"cache_dir": self.cache_dir, **model_kwargs},
|
||||
)
|
||||
|
||||
def predict(self, image: Image) -> list[str]:
|
||||
def _predict(self, image: Image) -> list[str]:
|
||||
predictions: list[dict[str, Any]] = self.model(image) # type: ignore
|
||||
tags = [tag for pred in predictions for tag in pred["label"].split(", ") if pred["score"] >= self.min_score]
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue