mirror of
https://github.com/immich-app/immich
synced 2025-11-14 17:36:12 +00:00
feat(ml): ML on Rockchip NPUs (#15241)
This commit is contained in:
parent
1e184a70f1
commit
14c3b99c0f
43 changed files with 2417 additions and 4726 deletions
98
machine-learning/export/immich_model_exporter/export.py
Normal file
98
machine-learning/export/immich_model_exporter/export.py
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
from pathlib import Path
|
||||
|
||||
import typer
|
||||
from tenacity import retry, stop_after_attempt, wait_fixed
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from .exporters.constants import DELETE_PATTERNS, SOURCE_TO_METADATA, ModelSource
|
||||
from .exporters.onnx import export as onnx_export
|
||||
from .exporters.rknn import export as rknn_export
|
||||
|
||||
app = typer.Typer(pretty_exceptions_show_locals=False)
|
||||
|
||||
|
||||
def generate_readme(model_name: str, model_source: ModelSource) -> str:
|
||||
(name, link, type) = SOURCE_TO_METADATA[model_source]
|
||||
match model_source:
|
||||
case ModelSource.MCLIP:
|
||||
tags = ["immich", "clip", "multilingual"]
|
||||
case ModelSource.OPENCLIP:
|
||||
tags = ["immich", "clip"]
|
||||
lowered = model_name.lower()
|
||||
if "xlm" in lowered or "nllb" in lowered:
|
||||
tags.append("multilingual")
|
||||
case ModelSource.INSIGHTFACE:
|
||||
tags = ["immich", "facial-recognition"]
|
||||
case _:
|
||||
raise ValueError(f"Unsupported model source {model_source}")
|
||||
|
||||
return f"""---
|
||||
tags:
|
||||
{" - " + "\n - ".join(tags)}
|
||||
---
|
||||
# Model Description
|
||||
|
||||
This repo contains ONNX exports for the associated {type} model by {name}. See the [{name}]({link}) repo for more info.
|
||||
|
||||
This repo is specifically intended for use with [Immich](https://immich.app/), a self-hosted photo library.
|
||||
"""
|
||||
|
||||
|
||||
@app.command()
|
||||
def main(
|
||||
model_name: str,
|
||||
model_source: ModelSource,
|
||||
output_dir: Path = Path("./models"),
|
||||
no_cache: bool = False,
|
||||
hf_organization: str = "immich-app",
|
||||
hf_auth_token: Annotated[str | None, typer.Option(envvar="HF_AUTH_TOKEN")] = None,
|
||||
) -> None:
|
||||
hf_model_name = model_name.split("/")[-1]
|
||||
hf_model_name = hf_model_name.replace("xlm-roberta-large", "XLM-Roberta-Large")
|
||||
hf_model_name = hf_model_name.replace("xlm-roberta-base", "XLM-Roberta-Base")
|
||||
output_dir = output_dir / hf_model_name
|
||||
match model_source:
|
||||
case ModelSource.MCLIP | ModelSource.OPENCLIP:
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
onnx_export(model_name, model_source, output_dir, no_cache=no_cache)
|
||||
case ModelSource.INSIGHTFACE:
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
# TODO: start from insightface dump instead of downloading from HF
|
||||
snapshot_download(f"immich-app/{hf_model_name}", local_dir=output_dir)
|
||||
case _:
|
||||
raise ValueError(f"Unsupported model source {model_source}")
|
||||
|
||||
try:
|
||||
rknn_export(output_dir, no_cache=no_cache)
|
||||
except Exception as e:
|
||||
print(f"Failed to export model {model_name} to rknn: {e}")
|
||||
(output_dir / "rknpu").unlink(missing_ok=True)
|
||||
|
||||
readme_path = output_dir / "README.md"
|
||||
if no_cache or not readme_path.exists():
|
||||
with open(readme_path, "w") as f:
|
||||
f.write(generate_readme(model_name, model_source))
|
||||
|
||||
if hf_auth_token is not None:
|
||||
from huggingface_hub import create_repo, upload_folder
|
||||
|
||||
repo_id = f"{hf_organization}/{hf_model_name}"
|
||||
|
||||
@retry(stop=stop_after_attempt(5), wait=wait_fixed(5))
|
||||
def upload_model() -> None:
|
||||
create_repo(repo_id, exist_ok=True, token=hf_auth_token)
|
||||
upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=output_dir,
|
||||
# remote repo files to be deleted before uploading
|
||||
# deletion is in the same commit as the upload, so it's atomic
|
||||
delete_patterns=DELETE_PATTERNS,
|
||||
token=hf_auth_token,
|
||||
)
|
||||
|
||||
upload_model()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
typer.run(main)
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
from enum import StrEnum
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class ModelSource(StrEnum):
|
||||
INSIGHTFACE = "insightface"
|
||||
MCLIP = "mclip"
|
||||
OPENCLIP = "openclip"
|
||||
|
||||
|
||||
class SourceMetadata(NamedTuple):
|
||||
name: str
|
||||
link: str
|
||||
type: str
|
||||
|
||||
|
||||
SOURCE_TO_METADATA = {
|
||||
ModelSource.MCLIP: SourceMetadata("M-CLIP", "https://huggingface.co/M-CLIP", "CLIP"),
|
||||
ModelSource.OPENCLIP: SourceMetadata("OpenCLIP", "https://github.com/mlfoundations/open_clip", "CLIP"),
|
||||
ModelSource.INSIGHTFACE: SourceMetadata(
|
||||
"InsightFace", "https://github.com/deepinsight/insightface/tree/master", "facial recognition"
|
||||
),
|
||||
}
|
||||
|
||||
RKNN_SOCS = ["rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
|
||||
|
||||
# glob to delete old UUID blobs when reuploading models
|
||||
_uuid_char = "[a-fA-F0-9]"
|
||||
_uuid_glob = _uuid_char * 8 + "-" + _uuid_char * 4 + "-" + _uuid_char * 4 + "-" + _uuid_char * 4 + "-" + _uuid_char * 12
|
||||
DELETE_PATTERNS = [
|
||||
"**/*onnx*",
|
||||
"**/Constant*",
|
||||
"**/*.weight",
|
||||
"**/*.bias",
|
||||
"**/*.proj",
|
||||
"**/*in_proj_bias",
|
||||
"**/*.npy",
|
||||
"**/*.latent",
|
||||
"**/*.pos_embed",
|
||||
f"**/{_uuid_glob}",
|
||||
]
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
from pathlib import Path
|
||||
|
||||
from ..constants import ModelSource
|
||||
from .models import mclip, openclip
|
||||
|
||||
|
||||
def export(
|
||||
model_name: str, model_source: ModelSource, output_dir: Path, opset_version: int = 19, no_cache: bool = False
|
||||
) -> None:
|
||||
visual_dir = output_dir / "visual"
|
||||
textual_dir = output_dir / "textual"
|
||||
match model_source:
|
||||
case ModelSource.MCLIP:
|
||||
mclip.to_onnx(model_name, opset_version, visual_dir, textual_dir, no_cache=no_cache)
|
||||
case ModelSource.OPENCLIP:
|
||||
name, _, pretrained = model_name.partition("__")
|
||||
config = openclip.OpenCLIPModelConfig(name, pretrained)
|
||||
openclip.to_onnx(config, opset_version, visual_dir, textual_dir, no_cache=no_cache)
|
||||
case _:
|
||||
raise ValueError(f"Unsupported model source {model_source}")
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from .openclip import OpenCLIPModelConfig
|
||||
from .openclip import to_onnx as openclip_to_onnx
|
||||
from .util import get_model_path
|
||||
|
||||
_MCLIP_TO_OPENCLIP = {
|
||||
"M-CLIP/XLM-Roberta-Large-Vit-B-32": OpenCLIPModelConfig("ViT-B-32", "openai"),
|
||||
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus": OpenCLIPModelConfig("ViT-B-16-plus-240", "laion400m_e32"),
|
||||
"M-CLIP/LABSE-Vit-L-14": OpenCLIPModelConfig("ViT-L-14", "openai"),
|
||||
"M-CLIP/XLM-Roberta-Large-Vit-L-14": OpenCLIPModelConfig("ViT-L-14", "openai"),
|
||||
}
|
||||
|
||||
|
||||
def to_onnx(
|
||||
model_name: str,
|
||||
opset_version: int,
|
||||
output_dir_visual: Path | str,
|
||||
output_dir_textual: Path | str,
|
||||
no_cache: bool = False,
|
||||
) -> tuple[Path, Path]:
|
||||
textual_path = get_model_path(output_dir_textual)
|
||||
if no_cache or not textual_path.exists():
|
||||
import torch
|
||||
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
torch.backends.mha.set_fastpath_enabled(False)
|
||||
|
||||
model = MultilingualCLIP.from_pretrained(model_name)
|
||||
AutoTokenizer.from_pretrained(model_name).save_pretrained(output_dir_textual)
|
||||
|
||||
model.eval()
|
||||
for param in model.parameters():
|
||||
param.requires_grad_(False)
|
||||
|
||||
_export_text_encoder(model, textual_path, opset_version)
|
||||
else:
|
||||
print(f"Model {textual_path} already exists, skipping")
|
||||
visual_path, _ = openclip_to_onnx(
|
||||
_MCLIP_TO_OPENCLIP[model_name], opset_version, output_dir_visual, no_cache=no_cache
|
||||
)
|
||||
assert visual_path is not None, "Visual model export failed"
|
||||
return visual_path, textual_path
|
||||
|
||||
|
||||
def _export_text_encoder(model: Any, output_path: Path | str, opset_version: int) -> None:
|
||||
import torch
|
||||
from multilingual_clip.pt_multilingual_clip import MultilingualCLIP
|
||||
|
||||
output_path = Path(output_path)
|
||||
|
||||
def forward(self: MultilingualCLIP, input_ids: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
|
||||
embs = self.transformer(input_ids, attention_mask)[0]
|
||||
embs = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
|
||||
embs = self.LinearTransformation(embs)
|
||||
return torch.nn.functional.normalize(embs, dim=-1)
|
||||
|
||||
# unfortunately need to monkeypatch for tracing to work here
|
||||
# otherwise it hits the 2GiB protobuf serialization limit
|
||||
MultilingualCLIP.forward = forward
|
||||
|
||||
args = (torch.ones(1, 77, dtype=torch.int32), torch.ones(1, 77, dtype=torch.int32))
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", UserWarning)
|
||||
torch.onnx.export(
|
||||
model,
|
||||
args,
|
||||
output_path.as_posix(),
|
||||
input_names=["input_ids", "attention_mask"],
|
||||
output_names=["embedding"],
|
||||
opset_version=opset_version,
|
||||
# dynamic_axes={
|
||||
# "input_ids": {0: "batch_size", 1: "sequence_length"},
|
||||
# "attention_mask": {0: "batch_size", 1: "sequence_length"},
|
||||
# },
|
||||
)
|
||||
|
|
@ -0,0 +1,153 @@
|
|||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from functools import cached_property
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from .util import get_model_path, save_config
|
||||
|
||||
|
||||
@dataclass
|
||||
class OpenCLIPModelConfig:
|
||||
name: str
|
||||
pretrained: str
|
||||
|
||||
@cached_property
|
||||
def model_config(self) -> dict[str, Any]:
|
||||
import open_clip
|
||||
|
||||
config: dict[str, Any] | None = open_clip.get_model_config(self.name)
|
||||
if config is None:
|
||||
raise ValueError(f"Unknown model {self.name}")
|
||||
return config
|
||||
|
||||
@property
|
||||
def image_size(self) -> int:
|
||||
image_size: int = self.model_config["vision_cfg"]["image_size"]
|
||||
return image_size
|
||||
|
||||
@property
|
||||
def sequence_length(self) -> int:
|
||||
context_length: int = self.model_config["text_cfg"].get("context_length", 77)
|
||||
return context_length
|
||||
|
||||
|
||||
def to_onnx(
|
||||
model_cfg: OpenCLIPModelConfig,
|
||||
opset_version: int,
|
||||
output_dir_visual: Path | str | None = None,
|
||||
output_dir_textual: Path | str | None = None,
|
||||
no_cache: bool = False,
|
||||
) -> tuple[Path | None, Path | None]:
|
||||
visual_path = None
|
||||
textual_path = None
|
||||
if output_dir_visual is not None:
|
||||
output_dir_visual = Path(output_dir_visual)
|
||||
visual_path = get_model_path(output_dir_visual)
|
||||
|
||||
if output_dir_textual is not None:
|
||||
output_dir_textual = Path(output_dir_textual)
|
||||
textual_path = get_model_path(output_dir_textual)
|
||||
|
||||
if not no_cache and (
|
||||
(textual_path is None or textual_path.exists()) and (visual_path is None or visual_path.exists())
|
||||
):
|
||||
print(f"Models {textual_path} and {visual_path} already exist, skipping")
|
||||
return visual_path, textual_path
|
||||
|
||||
import open_clip
|
||||
import torch
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
torch.backends.mha.set_fastpath_enabled(False)
|
||||
|
||||
model = open_clip.create_model(
|
||||
model_cfg.name,
|
||||
pretrained=model_cfg.pretrained,
|
||||
jit=False,
|
||||
require_pretrained=True,
|
||||
)
|
||||
|
||||
text_vision_cfg = open_clip.get_model_config(model_cfg.name)
|
||||
|
||||
model.eval()
|
||||
for param in model.parameters():
|
||||
param.requires_grad_(False)
|
||||
|
||||
if visual_path is not None and output_dir_visual is not None:
|
||||
if no_cache or not visual_path.exists():
|
||||
save_config(
|
||||
open_clip.get_model_preprocess_cfg(model),
|
||||
output_dir_visual / "preprocess_cfg.json",
|
||||
)
|
||||
save_config(text_vision_cfg, output_dir_visual.parent / "config.json")
|
||||
_export_image_encoder(model, model_cfg, visual_path, opset_version)
|
||||
else:
|
||||
print(f"Model {visual_path} already exists, skipping")
|
||||
|
||||
if textual_path is not None and output_dir_textual is not None:
|
||||
if no_cache or not textual_path.exists():
|
||||
tokenizer_name = text_vision_cfg["text_cfg"].get("hf_tokenizer_name", "openai/clip-vit-base-patch32")
|
||||
AutoTokenizer.from_pretrained(tokenizer_name).save_pretrained(output_dir_textual)
|
||||
_export_text_encoder(model, model_cfg, textual_path, opset_version)
|
||||
else:
|
||||
print(f"Model {textual_path} already exists, skipping")
|
||||
return visual_path, textual_path
|
||||
|
||||
|
||||
def _export_image_encoder(
|
||||
model: Any, model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
|
||||
) -> None:
|
||||
import torch
|
||||
|
||||
output_path = Path(output_path)
|
||||
|
||||
def encode_image(image: torch.Tensor) -> torch.Tensor:
|
||||
output = model.encode_image(image, normalize=True)
|
||||
assert isinstance(output, torch.Tensor)
|
||||
return output
|
||||
|
||||
model.forward = encode_image
|
||||
|
||||
args = (torch.randn(1, 3, model_cfg.image_size, model_cfg.image_size),)
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", UserWarning)
|
||||
torch.onnx.export(
|
||||
model,
|
||||
args,
|
||||
output_path.as_posix(),
|
||||
input_names=["image"],
|
||||
output_names=["embedding"],
|
||||
opset_version=opset_version,
|
||||
# dynamic_axes={"image": {0: "batch_size"}},
|
||||
)
|
||||
|
||||
|
||||
def _export_text_encoder(
|
||||
model: Any, model_cfg: OpenCLIPModelConfig, output_path: Path | str, opset_version: int
|
||||
) -> None:
|
||||
import torch
|
||||
|
||||
output_path = Path(output_path)
|
||||
|
||||
def encode_text(text: torch.Tensor) -> torch.Tensor:
|
||||
output = model.encode_text(text, normalize=True)
|
||||
assert isinstance(output, torch.Tensor)
|
||||
return output
|
||||
|
||||
model.forward = encode_text
|
||||
|
||||
args = (torch.ones(1, model_cfg.sequence_length, dtype=torch.int32),)
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", UserWarning)
|
||||
torch.onnx.export(
|
||||
model,
|
||||
args,
|
||||
output_path.as_posix(),
|
||||
input_names=["text"],
|
||||
output_names=["embedding"],
|
||||
opset_version=opset_version,
|
||||
# dynamic_axes={"text": {0: "batch_size"}},
|
||||
)
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
def get_model_path(output_dir: Path | str) -> Path:
|
||||
output_dir = Path(output_dir)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
return output_dir / "model.onnx"
|
||||
|
||||
|
||||
def save_config(config: Any, output_path: Path | str) -> None:
|
||||
output_path = Path(output_path)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
json.dump(config, output_path.open("w"))
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
from pathlib import Path
|
||||
|
||||
from .constants import RKNN_SOCS
|
||||
|
||||
|
||||
def _export_platform(
|
||||
model_dir: Path,
|
||||
target_platform: str,
|
||||
inputs: list[str] | None = None,
|
||||
input_size_list: list[list[int]] | None = None,
|
||||
fuse_matmul_softmax_matmul_to_sdpa: bool = True,
|
||||
no_cache: bool = False,
|
||||
) -> None:
|
||||
from rknn.api import RKNN
|
||||
|
||||
input_path = model_dir / "model.onnx"
|
||||
output_path = model_dir / "rknpu" / target_platform / "model.rknn"
|
||||
if not no_cache and output_path.exists():
|
||||
print(f"Model {input_path} already exists at {output_path}, skipping")
|
||||
return
|
||||
|
||||
print(f"Exporting model {input_path} to {output_path}")
|
||||
|
||||
rknn = RKNN(verbose=False)
|
||||
|
||||
rknn.config(
|
||||
target_platform=target_platform,
|
||||
disable_rules=["fuse_matmul_softmax_matmul_to_sdpa"] if not fuse_matmul_softmax_matmul_to_sdpa else [],
|
||||
enable_flash_attention=False,
|
||||
model_pruning=True,
|
||||
)
|
||||
ret = rknn.load_onnx(model=input_path.as_posix(), inputs=inputs, input_size_list=input_size_list)
|
||||
|
||||
if ret != 0:
|
||||
raise RuntimeError("Load failed!")
|
||||
|
||||
ret = rknn.build(do_quantization=False)
|
||||
|
||||
if ret != 0:
|
||||
raise RuntimeError("Build failed!")
|
||||
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
ret = rknn.export_rknn(output_path.as_posix())
|
||||
if ret != 0:
|
||||
raise RuntimeError("Export rknn model failed!")
|
||||
|
||||
|
||||
def _export_platforms(
|
||||
model_dir: Path,
|
||||
inputs: list[str] | None = None,
|
||||
input_size_list: list[list[int]] | None = None,
|
||||
no_cache: bool = False,
|
||||
) -> None:
|
||||
fuse_matmul_softmax_matmul_to_sdpa = True
|
||||
for soc in RKNN_SOCS:
|
||||
try:
|
||||
_export_platform(
|
||||
model_dir,
|
||||
soc,
|
||||
inputs=inputs,
|
||||
input_size_list=input_size_list,
|
||||
fuse_matmul_softmax_matmul_to_sdpa=fuse_matmul_softmax_matmul_to_sdpa,
|
||||
no_cache=no_cache,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Failed to export model for {soc}: {e}")
|
||||
if "inputs or 'outputs' must be set" in str(e):
|
||||
print("Retrying without fuse_matmul_softmax_matmul_to_sdpa")
|
||||
fuse_matmul_softmax_matmul_to_sdpa = False
|
||||
_export_platform(
|
||||
model_dir,
|
||||
soc,
|
||||
inputs=inputs,
|
||||
input_size_list=input_size_list,
|
||||
fuse_matmul_softmax_matmul_to_sdpa=fuse_matmul_softmax_matmul_to_sdpa,
|
||||
no_cache=no_cache,
|
||||
)
|
||||
|
||||
|
||||
def export(model_dir: Path, no_cache: bool = False) -> None:
|
||||
textual = model_dir / "textual"
|
||||
visual = model_dir / "visual"
|
||||
detection = model_dir / "detection"
|
||||
recognition = model_dir / "recognition"
|
||||
|
||||
if textual.is_dir():
|
||||
_export_platforms(textual, no_cache=no_cache)
|
||||
|
||||
if visual.is_dir():
|
||||
_export_platforms(visual, no_cache=no_cache)
|
||||
|
||||
if detection.is_dir():
|
||||
_export_platforms(detection, inputs=["input.1"], input_size_list=[[1, 3, 640, 640]], no_cache=no_cache)
|
||||
|
||||
if recognition.is_dir():
|
||||
_export_platforms(recognition, inputs=["input.1"], input_size_list=[[1, 3, 112, 112]], no_cache=no_cache)
|
||||
88
machine-learning/export/immich_model_exporter/run.py
Normal file
88
machine-learning/export/immich_model_exporter/run.py
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
import subprocess
|
||||
|
||||
from exporters.constants import ModelSource
|
||||
|
||||
mclip = [
|
||||
"M-CLIP/LABSE-Vit-L-14",
|
||||
"M-CLIP/XLM-Roberta-Large-Vit-B-16Plus",
|
||||
"M-CLIP/XLM-Roberta-Large-Vit-B-32",
|
||||
"M-CLIP/XLM-Roberta-Large-Vit-L-14",
|
||||
]
|
||||
|
||||
openclip = [
|
||||
"RN101__openai",
|
||||
"RN101__yfcc15m",
|
||||
"RN50__cc12m",
|
||||
"RN50__openai",
|
||||
"RN50__yfcc15m",
|
||||
"RN50x16__openai",
|
||||
"RN50x4__openai",
|
||||
"RN50x64__openai",
|
||||
"ViT-B-16-SigLIP-256__webli",
|
||||
"ViT-B-16-SigLIP-384__webli",
|
||||
"ViT-B-16-SigLIP-512__webli",
|
||||
"ViT-B-16-SigLIP-i18n-256__webli",
|
||||
"ViT-B-16-SigLIP2__webli",
|
||||
"ViT-B-16-SigLIP__webli",
|
||||
"ViT-B-16-plus-240__laion400m_e31",
|
||||
"ViT-B-16-plus-240__laion400m_e32",
|
||||
"ViT-B-16__laion400m_e31",
|
||||
"ViT-B-16__laion400m_e32",
|
||||
"ViT-B-16__openai",
|
||||
"ViT-B-32-SigLIP2-256__webli",
|
||||
"ViT-B-32__laion2b-s34b-b79k",
|
||||
"ViT-B-32__laion2b_e16",
|
||||
"ViT-B-32__laion400m_e31",
|
||||
"ViT-B-32__laion400m_e32",
|
||||
"ViT-B-32__openai",
|
||||
"ViT-H-14-378-quickgelu__dfn5b",
|
||||
"ViT-H-14-quickgelu__dfn5b",
|
||||
"ViT-H-14__laion2b-s32b-b79k",
|
||||
"ViT-L-14-336__openai",
|
||||
"ViT-L-14-quickgelu__dfn2b",
|
||||
"ViT-L-14__laion2b-s32b-b82k",
|
||||
"ViT-L-14__laion400m_e31",
|
||||
"ViT-L-14__laion400m_e32",
|
||||
"ViT-L-14__openai",
|
||||
"ViT-L-16-SigLIP-256__webli",
|
||||
"ViT-L-16-SigLIP-384__webli",
|
||||
"ViT-L-16-SigLIP2-256__webli",
|
||||
"ViT-L-16-SigLIP2-384__webli",
|
||||
"ViT-L-16-SigLIP2-512__webli",
|
||||
"ViT-SO400M-14-SigLIP-384__webli",
|
||||
"ViT-SO400M-14-SigLIP2-378__webli",
|
||||
"ViT-SO400M-14-SigLIP2__webli",
|
||||
"ViT-SO400M-16-SigLIP2-256__webli",
|
||||
"ViT-SO400M-16-SigLIP2-384__webli",
|
||||
"ViT-SO400M-16-SigLIP2-512__webli",
|
||||
"ViT-gopt-16-SigLIP2-256__webli",
|
||||
"ViT-gopt-16-SigLIP2-384__webli",
|
||||
"nllb-clip-base-siglip__mrl",
|
||||
"nllb-clip-base-siglip__v1",
|
||||
"nllb-clip-large-siglip__mrl",
|
||||
"nllb-clip-large-siglip__v1",
|
||||
"xlm-roberta-base-ViT-B-32__laion5b_s13b_b90k",
|
||||
"xlm-roberta-large-ViT-H-14__frozen_laion5b_s13b_b90k",
|
||||
]
|
||||
|
||||
insightface = [
|
||||
"antelopev2",
|
||||
"buffalo_l",
|
||||
"buffalo_m",
|
||||
"buffalo_s",
|
||||
]
|
||||
|
||||
|
||||
def export_models(models: list[str], source: ModelSource) -> None:
|
||||
for model in models:
|
||||
try:
|
||||
print(f"Exporting model {model}")
|
||||
subprocess.check_call(["python", "-m", "immich_model_exporter.export", model, source])
|
||||
except Exception as e:
|
||||
print(f"Failed to export model {model}: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
export_models(mclip, ModelSource.MCLIP)
|
||||
export_models(openclip, ModelSource.OPENCLIP)
|
||||
export_models(insightface, ModelSource.INSIGHTFACE)
|
||||
Loading…
Add table
Add a link
Reference in a new issue