mirror of
https://github.com/immich-app/immich
synced 2025-11-14 17:36:12 +00:00
feat(ml): add more search models (#11468)
* update export code * add uuid glob, sort model names * add new models to ml, sort names * add new models to server, sort by dims and name * typo in name * update export dependencies * onnx save function * format
This commit is contained in:
parent
2423bb36c4
commit
41580696c7
9 changed files with 3804 additions and 2923 deletions
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import tempfile
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
|
@ -8,7 +9,6 @@ from transformers import AutoTokenizer
|
|||
|
||||
from .openclip import OpenCLIPModelConfig
|
||||
from .openclip import to_onnx as openclip_to_onnx
|
||||
from .optimize import optimize
|
||||
from .util import get_model_path
|
||||
|
||||
_MCLIP_TO_OPENCLIP = {
|
||||
|
|
@ -23,18 +23,20 @@ def to_onnx(
|
|||
model_name: str,
|
||||
output_dir_visual: Path | str,
|
||||
output_dir_textual: Path | str,
|
||||
) -> None:
|
||||
) -> tuple[Path, Path]:
|
||||
textual_path = get_model_path(output_dir_textual)
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
model = MultilingualCLIP.from_pretrained(model_name, cache_dir=tmpdir)
|
||||
model = MultilingualCLIP.from_pretrained(model_name, cache_dir=os.environ.get("CACHE_DIR", tmpdir))
|
||||
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
|
||||
|
||||
model.eval()
|
||||
for param in model.parameters():
|
||||
param.requires_grad_(False)
|
||||
|
||||
export_text_encoder(model, textual_path)
|
||||
openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual)
|
||||
optimize(textual_path)
|
||||
visual_path, _ = openclip_to_onnx(_MCLIP_TO_OPENCLIP[model_name], output_dir_visual)
|
||||
assert visual_path is not None, "Visual model export failed"
|
||||
return visual_path, textual_path
|
||||
|
||||
|
||||
def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> None:
|
||||
|
|
@ -58,10 +60,10 @@ def export_text_encoder(model: MultilingualCLIP, output_path: Path | str) -> Non
|
|||
args,
|
||||
output_path.as_posix(),
|
||||
input_names=["input_ids", "attention_mask"],
|
||||
output_names=["text_embedding"],
|
||||
output_names=["embedding"],
|
||||
opset_version=17,
|
||||
dynamic_axes={
|
||||
"input_ids": {0: "batch_size", 1: "sequence_length"},
|
||||
"attention_mask": {0: "batch_size", 1: "sequence_length"},
|
||||
},
|
||||
# dynamic_axes={
|
||||
# "input_ids": {0: "batch_size", 1: "sequence_length"},
|
||||
# "attention_mask": {0: "batch_size", 1: "sequence_length"},
|
||||
# },
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import tempfile
|
||||
import warnings
|
||||
from dataclasses import dataclass, field
|
||||
|
|
@ -7,7 +8,6 @@ import open_clip
|
|||
import torch
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from .optimize import optimize
|
||||
from .util import get_model_path, save_config
|
||||
|
||||
|
||||
|
|
@ -23,25 +23,28 @@ class OpenCLIPModelConfig:
|
|||
if open_clip_cfg is None:
|
||||
raise ValueError(f"Unknown model {self.name}")
|
||||
self.image_size = open_clip_cfg["vision_cfg"]["image_size"]
|
||||
self.sequence_length = open_clip_cfg["text_cfg"]["context_length"]
|
||||
self.sequence_length = open_clip_cfg["text_cfg"].get("context_length", 77)
|
||||
|
||||
|
||||
def to_onnx(
|
||||
model_cfg: OpenCLIPModelConfig,
|
||||
output_dir_visual: Path | str | None = None,
|
||||
output_dir_textual: Path | str | None = None,
|
||||
) -> None:
|
||||
) -> tuple[Path | None, Path | None]:
|
||||
visual_path = None
|
||||
textual_path = None
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
model = open_clip.create_model(
|
||||
model_cfg.name,
|
||||
pretrained=model_cfg.pretrained,
|
||||
jit=False,
|
||||
cache_dir=tmpdir,
|
||||
cache_dir=os.environ.get("CACHE_DIR", tmpdir),
|
||||
require_pretrained=True,
|
||||
)
|
||||
|
||||
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
|
||||
|
||||
model.eval()
|
||||
for param in model.parameters():
|
||||
param.requires_grad_(False)
|
||||
|
||||
|
|
@ -53,8 +56,6 @@ def to_onnx(
|
|||
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
|
||||
export_image_encoder(model, model_cfg, visual_path)
|
||||
|
||||
optimize(visual_path)
|
||||
|
||||
if output_dir_textual is not None:
|
||||
output_dir_textual = Path(output_dir_textual)
|
||||
textual_path = get_model_path(output_dir_textual)
|
||||
|
|
@ -62,7 +63,7 @@ def to_onnx(
|
|||
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
|
||||
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
|
||||
export_text_encoder(model, model_cfg, textual_path)
|
||||
optimize(textual_path)
|
||||
return visual_path, textual_path
|
||||
|
||||
|
||||
def export_image_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, output_path: Path | str) -> None:
|
||||
|
|
@ -83,9 +84,9 @@ def export_image_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig,
|
|||
args,
|
||||
output_path.as_posix(),
|
||||
input_names=["image"],
|
||||
output_names=["image_embedding"],
|
||||
output_names=["embedding"],
|
||||
opset_version=17,
|
||||
dynamic_axes={"image": {0: "batch_size"}},
|
||||
# dynamic_axes={"image": {0: "batch_size"}},
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -107,7 +108,7 @@ def export_text_encoder(model: open_clip.CLIP, model_cfg: OpenCLIPModelConfig, o
|
|||
args,
|
||||
output_path.as_posix(),
|
||||
input_names=["text"],
|
||||
output_names=["text_embedding"],
|
||||
output_names=["embedding"],
|
||||
opset_version=17,
|
||||
dynamic_axes={"text": {0: "batch_size"}},
|
||||
# dynamic_axes={"text": {0: "batch_size"}},
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,13 +5,26 @@ import onnxruntime as ort
|
|||
import onnxsim
|
||||
|
||||
|
||||
def save_onnx(model: onnx.ModelProto, output_path: Path | str) -> None:
|
||||
try:
|
||||
onnx.save(model, output_path)
|
||||
except ValueError as e:
|
||||
if "The proto size is larger than the 2 GB limit." in str(e):
|
||||
onnx.save(model, output_path, save_as_external_data=True, size_threshold=1_000_000)
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
def optimize_onnxsim(model_path: Path | str, output_path: Path | str) -> None:
|
||||
model_path = Path(model_path)
|
||||
output_path = Path(output_path)
|
||||
model = onnx.load(model_path.as_posix())
|
||||
model, check = onnxsim.simplify(model, skip_shape_inference=True)
|
||||
model, check = onnxsim.simplify(model)
|
||||
assert check, "Simplified ONNX model could not be validated"
|
||||
onnx.save(model, output_path.as_posix())
|
||||
for file in model_path.parent.iterdir():
|
||||
if file.name.startswith("Constant") or "onnx" in file.name or file.suffix == ".weight":
|
||||
file.unlink()
|
||||
save_onnx(model, output_path)
|
||||
|
||||
|
||||
def optimize_ort(
|
||||
|
|
@ -33,6 +46,4 @@ def optimize(model_path: Path | str) -> None:
|
|||
model_path = Path(model_path)
|
||||
|
||||
optimize_ort(model_path, model_path)
|
||||
# onnxsim serializes large models as a blob, which uses much more memory when loading the model at runtime
|
||||
if not any(file.name.startswith("Constant") for file in model_path.parent.iterdir()):
|
||||
optimize_onnxsim(model_path, model_path)
|
||||
optimize_onnxsim(model_path, model_path)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue