mirror of
https://github.com/immich-app/immich.git
synced 2025-01-06 03:46:47 +01:00
87a0ba3db3
* export clip models * export to hf refactored export code * export mclip, general refactoring cleanup * updated conda deps * do transforms with pillow and numpy, add tokenization config to export, general refactoring * moved conda dockerfile, re-added poetry * minor fixes * updated link * updated tests * removed `requirements.txt` from workflow * fixed mimalloc path * removed torchvision * cleaner np typing * review suggestions * update default model name * update test
25 lines
1,004 B
Python
25 lines
1,004 B
Python
from typing import Any
|
|
|
|
from app.schemas import ModelType
|
|
|
|
from .base import InferenceModel
|
|
from .clip import MCLIPEncoder, OpenCLIPEncoder, is_mclip, is_openclip
|
|
from .facial_recognition import FaceRecognizer
|
|
from .image_classification import ImageClassifier
|
|
|
|
|
|
def from_model_type(model_type: ModelType, model_name: str, **model_kwargs: Any) -> InferenceModel:
|
|
match model_type:
|
|
case ModelType.CLIP:
|
|
if is_openclip(model_name):
|
|
return OpenCLIPEncoder(model_name, **model_kwargs)
|
|
elif is_mclip(model_name):
|
|
return MCLIPEncoder(model_name, **model_kwargs)
|
|
else:
|
|
raise ValueError(f"Unknown CLIP model {model_name}")
|
|
case ModelType.FACIAL_RECOGNITION:
|
|
return FaceRecognizer(model_name, **model_kwargs)
|
|
case ModelType.IMAGE_CLASSIFICATION:
|
|
return ImageClassifier(model_name, **model_kwargs)
|
|
case _:
|
|
raise ValueError(f"Unknown model type {model_type}")
|