mirror of
https://github.com/immich-app/immich
synced 2025-11-07 17:27:20 +00:00
feat: Add additional env variables for Machine Learning (#15326)
* Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Update config.py * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Apply formatting * minor update * formatting * root validator * minor update * minor update * minor update * change to support explicit models * minor update * minor change * minor change * minor change * minor update * add logs, resolve errors * minor change * add new enviornment variables * minor revisons * remove comments
This commit is contained in:
parent
5d2e421800
commit
c5476a99b1
4 changed files with 87 additions and 34 deletions
|
|
@ -14,9 +14,41 @@ from uvicorn import Server
|
|||
from uvicorn.workers import UvicornWorker
|
||||
|
||||
|
||||
class ClipSettings(BaseModel):
|
||||
textual: str | None = None
|
||||
visual: str | None = None
|
||||
|
||||
|
||||
class FacialRecognitionSettings(BaseModel):
|
||||
recognition: str | None = None
|
||||
detection: str | None = None
|
||||
|
||||
|
||||
class PreloadModelData(BaseModel):
|
||||
clip: str | None = None
|
||||
facial_recognition: str | None = None
|
||||
clip: ClipSettings = ClipSettings()
|
||||
facial_recognition: FacialRecognitionSettings = FacialRecognitionSettings()
|
||||
|
||||
clip_model_fallback: str | None = os.getenv("MACHINE_LEARNING_PRELOAD__CLIP", None)
|
||||
facial_recognition_model_fallback: str | None = os.getenv("MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION", None)
|
||||
|
||||
def update_from_fallbacks(self) -> None:
|
||||
if self.clip_model_fallback:
|
||||
self.clip.textual = self.clip_model_fallback
|
||||
self.clip.visual = self.clip_model_fallback
|
||||
log.warning(
|
||||
"Deprecated env variable: MACHINE_LEARNING_PRELOAD__CLIP. "
|
||||
"Use MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL and "
|
||||
"MACHINE_LEARNING_PRELOAD__CLIP__VISUAL instead."
|
||||
)
|
||||
|
||||
if self.facial_recognition_model_fallback:
|
||||
self.facial_recognition.recognition = self.facial_recognition_model_fallback
|
||||
self.facial_recognition.detection = self.facial_recognition_model_fallback
|
||||
log.warning(
|
||||
"Deprecated environment variable: MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION. "
|
||||
"Use MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION and "
|
||||
"MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION instead."
|
||||
)
|
||||
|
||||
|
||||
class MaxBatchSize(BaseModel):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue