1
0
Fork 0
mirror of https://github.com/immich-app/immich.git synced 2025-01-04 02:46:47 +01:00
immich/machine-learning/app/schemas.py
Mert 87a0ba3db3
feat(ml): export clip models to ONNX and host models on Hugging Face (#4700)
* export clip models

* export to hf

refactored export code

* export mclip, general refactoring

cleanup

* updated conda deps

* do transforms with pillow and numpy, add tokenization config to export, general refactoring

* moved conda dockerfile, re-added poetry

* minor fixes

* updated link

* updated tests

* removed `requirements.txt` from workflow

* fixed mimalloc path

* removed torchvision

* cleaner np typing

* review suggestions

* update default model name

* update test
2023-10-31 05:02:04 -05:00

40 lines
837 B
Python

from enum import StrEnum
from typing import TypeAlias
import numpy as np
from pydantic import BaseModel
def to_lower_camel(string: str) -> str:
tokens = [token.capitalize() if i > 0 else token for i, token in enumerate(string.split("_"))]
return "".join(tokens)
class TextModelRequest(BaseModel):
text: str
class TextResponse(BaseModel):
__root__: str
class MessageResponse(BaseModel):
message: str
class BoundingBox(BaseModel):
x1: int
y1: int
x2: int
y2: int
class ModelType(StrEnum):
IMAGE_CLASSIFICATION = "image-classification"
CLIP = "clip"
FACIAL_RECOGNITION = "facial-recognition"
ndarray_f32: TypeAlias = np.ndarray[int, np.dtype[np.float32]]
ndarray_i64: TypeAlias = np.ndarray[int, np.dtype[np.int64]]
ndarray_i32: TypeAlias = np.ndarray[int, np.dtype[np.int32]]