1
0
Fork 0
mirror of https://github.com/immich-app/immich.git synced 2024-12-28 06:31:58 +00:00

optimized scrfd code

This commit is contained in:
mertalev 2024-06-09 23:03:34 -04:00
parent fb4fe5d40b
commit 8d2a849edc
No known key found for this signature in database
GPG key ID: 9181CD92C0A1C5E3
6 changed files with 411 additions and 32 deletions

View file

@ -1,44 +1,33 @@
from pathlib import Path
from typing import Any
import numpy as np
import onnxruntime as ort
from insightface.model_zoo import RetinaFace
from numpy.typing import NDArray
from app.models.base import InferenceModel
from app.models.session import ort_has_batch_dim, ort_squeeze_outputs
from app.models.transforms import decode_cv2
from app.models.session import ort_has_batch_dim, ort_expand_outputs
from app.models.transforms import decode_pil
from app.schemas import FaceDetectionOutput, ModelSession, ModelTask, ModelType
from .scrfd import SCRFD
from PIL import Image
from PIL.ImageOps import pad
class FaceDetector(InferenceModel):
depends = []
identity = (ModelType.DETECTION, ModelTask.FACIAL_RECOGNITION)
def __init__(
self,
model_name: str,
min_score: float = 0.7,
cache_dir: Path | str | None = None,
**model_kwargs: Any,
) -> None:
self.min_score = model_kwargs.pop("minScore", min_score)
super().__init__(model_name, cache_dir, **model_kwargs)
def _load(self) -> ModelSession:
session = self._make_session(self.model_path)
if isinstance(session, ort.InferenceSession) and ort_has_batch_dim(session):
ort_squeeze_outputs(session)
self.model = RetinaFace(session=session)
self.model.prepare(ctx_id=0, det_thresh=self.min_score, input_size=(640, 640))
if isinstance(session, ort.InferenceSession) and not ort_has_batch_dim(session):
ort_expand_outputs(session)
self.model = SCRFD(session=session)
return session
def _predict(self, inputs: NDArray[np.uint8] | bytes, **kwargs: Any) -> FaceDetectionOutput:
inputs = decode_cv2(inputs)
def _predict(self, inputs: NDArray[np.uint8] | bytes | Image.Image, **kwargs: Any) -> FaceDetectionOutput:
inputs = self._transform(inputs)
bboxes, landmarks = self._detect(inputs)
[bboxes], [landmarks] = self.model.detect(inputs, threshold=kwargs.pop("minScore", 0.7))
return {
"boxes": bboxes[:, :4].round(),
"scores": bboxes[:, 4],
@ -48,5 +37,7 @@ class FaceDetector(InferenceModel):
def _detect(self, inputs: NDArray[np.uint8] | bytes) -> tuple[NDArray[np.float32], NDArray[np.float32]]:
return self.model.detect(inputs) # type: ignore
def configure(self, **kwargs: Any) -> None:
self.model.det_thresh = kwargs.pop("minScore", self.model.det_thresh)
def _transform(self, inputs: NDArray[np.uint8] | bytes | Image.Image) -> NDArray[np.uint8]:
image = decode_pil(inputs)
padded = pad(image, (640, 640), method=Image.Resampling.BICUBIC)
return np.array(padded, dtype=np.uint8)[None, ...]

View file

@ -0,0 +1,325 @@
# Based on InsightFace-REST by SthPhoenix https://github.com/SthPhoenix/InsightFace-REST/blob/master/src/api_trt/modules/model_zoo/detectors/scrfd.py
# Primary changes made:
# 1. Removed CuPy-related code
# 2. Adapted proposal generation to be thread-safe
# 3. Added typing
# 4. Assume RGB input
# 5. Removed unused variables
# Copyright 2021 SthPhoenix
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Based on Jia Guo reference implementation at
# https://github.com/deepinsight/insightface/blob/master/detection/scrfd/tools/scrfd.py
from __future__ import division
import cv2
import numpy as np
from numba import njit
from app.schemas import ModelSession
from numpy.typing import NDArray
@njit(cache=True, nogil=True)
def nms(dets, threshold: float = 0.4) -> NDArray[np.float32]:
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= threshold)[0]
order = order[inds + 1]
return np.asarray(keep)
@njit(fastmath=True, cache=True, nogil=True)
def single_distance2bbox(point: NDArray[np.float32], distance: NDArray[np.float32], stride: int) -> NDArray[np.float32]:
"""
Fast conversion of single bbox distances to coordinates
:param point: Anchor point
:param distance: Bbox distances from anchor point
:param stride: Current stride scale
:return: bbox
"""
distance[0] = point[0] - distance[0] * stride
distance[1] = point[1] - distance[1] * stride
distance[2] = point[0] + distance[2] * stride
distance[3] = point[1] + distance[3] * stride
return distance
@njit(fastmath=True, cache=True, nogil=True)
def single_distance2kps(point: NDArray[np.float32], distance: NDArray[np.float32], stride: int) -> NDArray[np.float32]:
"""
Fast conversion of single keypoint distances to coordinates
:param point: Anchor point
:param distance: Keypoint distances from anchor point
:param stride: Current stride scale
:return: keypoint
"""
for ix in range(0, distance.shape[0], 2):
distance[ix] = distance[ix] * stride + point[0]
distance[ix + 1] = distance[ix + 1] * stride + point[1]
return distance
@njit(fastmath=True, cache=True, nogil=True)
def generate_proposals(
score_blob: NDArray[np.float32],
bbox_blob: NDArray[np.float32],
kpss_blob: NDArray[np.float32],
stride: int,
anchors: NDArray[np.float32],
threshold: float,
) -> tuple[NDArray[np.float32], NDArray[np.float32], NDArray[np.float32]]:
"""
Convert distances from anchors to actual coordinates on source image
and filter proposals by confidence threshold.
:param score_blob: Raw scores for stride
:param bbox_blob: Raw bbox distances for stride
:param kpss_blob: Raw keypoints distances for stride
:param stride: Stride scale
:param anchors: Precomputed anchors for stride
:param threshold: Confidence threshold
:return: Filtered scores, bboxes and keypoints
"""
idxs = []
for ix in range(score_blob.shape[0]):
if score_blob[ix][0] > threshold:
idxs.append(ix)
score_out = np.empty((len(idxs), 1), dtype="float32")
bbox_out = np.empty((len(idxs), 4), dtype="float32")
kpss_out = np.empty((len(idxs), 10), dtype="float32")
for i in range(len(idxs)):
ix = idxs[i]
score_out[i] = score_blob[ix]
bbox_out[i] = single_distance2bbox(anchors[ix], bbox_blob[ix], stride)
kpss_out[i] = single_distance2kps(anchors[ix], kpss_blob[ix], stride)
return score_out, bbox_out, kpss_out
@njit(fastmath=True, cache=True, nogil=True)
def filter(
bboxes_list: NDArray[np.float32],
kpss_list: NDArray[np.float32],
scores_list: NDArray[np.float32],
nms_threshold: float = 0.4,
) -> tuple[NDArray[np.float32], NDArray[np.float32]]:
"""
Filter postprocessed network outputs with NMS
:param bboxes_list: List of bboxes (np.ndarray)
:param kpss_list: List of keypoints (np.ndarray)
:param scores_list: List of scores (np.ndarray)
:return: Face bboxes with scores [t,l,b,r,score], and key points
"""
pre_det = np.hstack((bboxes_list, scores_list))
keep = nms(pre_det, threshold=nms_threshold)
det = pre_det[keep, :]
kpss = kpss_list[keep, :]
kpss = kpss.reshape((kpss.shape[0], -1, 2))
return det, kpss
class SCRFD:
def __init__(self, session: ModelSession):
self.session = session
self.center_cache: dict[tuple[int, int], NDArray[np.float32]] = {}
self.nms_threshold = 0.4
self.fmc = 3
self._feat_stride_fpn = [8, 16, 32]
self._num_anchors = 2
def prepare(self, nms_threshold: float = 0.4) -> None:
"""
Populate class parameters
:param nms_threshold: Threshold for NMS IoU
"""
self.nms_threshold = nms_threshold
def detect(
self, imgs: NDArray[np.uint8], threshold: float = 0.5
) -> tuple[list[NDArray[np.float32]], list[NDArray[np.float32]]]:
"""
Run detection pipeline for provided images
:param img: Raw image as nd.ndarray with HWC shape
:param threshold: Confidence threshold
:return: Face bboxes with scores [t,l,b,r,score], and key points
"""
height, width = imgs.shape[1:3]
blob = self._preprocess(imgs)
net_outs = self._forward(blob)
batch_bboxes, batch_kpss, batch_scores = self._postprocess(net_outs, height, width, threshold)
dets_list = []
kpss_list = []
for e in range(imgs.shape[0]):
if len(batch_bboxes[e]) == 0:
det, kpss = np.zeros((0, 5), dtype="float32"), np.zeros((0, 10), dtype="float32")
else:
det, kpss = filter(batch_bboxes[e], batch_kpss[e], batch_scores[e], self.nms_threshold)
dets_list.append(det)
kpss_list.append(kpss)
return dets_list, kpss_list
@staticmethod
def _build_anchors(
input_height: int, input_width: int, strides: list[int], num_anchors: int
) -> NDArray[np.float32]:
"""
Precompute anchor points for provided image size
:param input_height: Input image height
:param input_width: Input image width
:param strides: Model strides
:param num_anchors: Model num anchors
:return: box centers
"""
centers = []
for stride in strides:
height = input_height // stride
width = input_width // stride
anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
anchor_centers = (anchor_centers * stride).reshape((-1, 2))
if num_anchors > 1:
anchor_centers = np.stack([anchor_centers] * num_anchors, axis=1).reshape((-1, 2))
centers.append(anchor_centers)
return centers
def _preprocess(self, images: NDArray[np.uint8]):
"""
Normalize image on CPU if backend can't provide CUDA stream,
otherwise preprocess image on GPU using CuPy
:param img: Raw image as np.ndarray with HWC shape
:return: Preprocessed image or None if image was processed on device
"""
input_size = tuple(images[0].shape[0:2][::-1])
return cv2.dnn.blobFromImages(images, 1.0 / 128, input_size, (127.5, 127.5, 127.5), swapRB=False)
def _forward(self, blob: NDArray[np.float32]) -> list[NDArray[np.float32]]:
"""
Send input data to inference backend.
:param blob: Preprocessed image of shape NCHW or None
:return: network outputs
"""
return self.session.run(None, {"input.1": blob})
def _postprocess(
self, net_outs: list[NDArray[np.float32]], height: int, width: int, threshold: float
) -> tuple[list[NDArray[np.float32]], list[NDArray[np.float32]], list[NDArray[np.float32]]]:
"""
Precompute anchor points for provided image size and process network outputs
:param net_outs: Network outputs
:param input_height: Input image height
:param input_width: Input image width
:param threshold: Confidence threshold
:return: filtered bboxes, keypoints and scores
"""
key = (height, width)
if not self.center_cache.get(key):
self.center_cache[key] = self._build_anchors(height, width, self._feat_stride_fpn, self._num_anchors)
anchor_centers = self.center_cache[key]
bboxes, kpss, scores = self._process_strides(net_outs, threshold, anchor_centers)
return bboxes, kpss, scores
def _process_strides(
self, net_outs: list[NDArray[np.float32]], threshold: float, anchors: NDArray[np.float32]
) -> tuple[list[NDArray[np.float32]], list[NDArray[np.float32]], list[NDArray[np.float32]]]:
"""
Process network outputs by strides and return results proposals filtered by threshold
:param net_outs: Network outputs
:param threshold: Confidence threshold
:param anchor_centers: Precomputed anchor centers for all strides
:return: filtered bboxes, keypoints and scores
"""
batch_size = net_outs[0].shape[0]
bboxes_by_img = []
kpss_by_img = []
scores_by_img = []
for batch in range(batch_size):
scores_strided = []
bboxes_strided = []
kpss_strided = []
for idx, stride in enumerate(self._feat_stride_fpn):
score_blob = net_outs[idx][batch]
bbox_blob = net_outs[idx + self.fmc][batch]
kpss_blob = net_outs[idx + self.fmc * 2][batch]
stride_anchors = anchors[idx]
score_list, bbox_list, kpss_list = generate_proposals(
score_blob,
bbox_blob,
kpss_blob,
stride,
stride_anchors,
threshold,
)
scores_strided.append(score_list)
bboxes_strided.append(bbox_list)
kpss_strided.append(kpss_list)
bboxes_by_img.append(np.concatenate(bboxes_strided, axis=0))
kpss_by_img.append(np.concatenate(kpss_strided, axis=0))
scores_by_img.append(np.concatenate(scores_strided, axis=0))
return bboxes_by_img, kpss_by_img, scores_by_img

View file

@ -12,12 +12,12 @@ def ort_has_batch_dim(session: ort.InferenceSession) -> bool:
return session.get_inputs()[0].shape[0] == "batch"
def ort_squeeze_outputs(session: ort.InferenceSession) -> None:
def ort_expand_outputs(session: ort.InferenceSession) -> None:
original_run = session.run
def run(output_names: list[str], input_feed: dict[str, NDArray[np.float32]]) -> list[NDArray[np.float32]]:
out: list[NDArray[np.float32]] = original_run(output_names, input_feed)
out = [o.squeeze(axis=0) for o in out]
out = [np.expand_dims(o, axis=0) for o in out]
return out
session.run = run

View file

@ -3,6 +3,7 @@ from typing import IO
import cv2
import numpy as np
from numba import njit
from numpy.typing import NDArray
from PIL import Image
@ -30,10 +31,11 @@ def to_numpy(img: Image.Image) -> NDArray[np.float32]:
return np.asarray(img if img.mode == "RGB" else img.convert("RGB"), dtype=np.float32) / 255.0
@njit(cache=True, fastmath=True, nogil=True)
def normalize(
img: NDArray[np.float32], mean: float | NDArray[np.float32], std: float | NDArray[np.float32]
) -> NDArray[np.float32]:
return np.divide(img - mean, std, dtype=np.float32)
return (img - mean) / std
def get_pil_resampling(resample: str) -> Image.Resampling:

View file

@ -1528,6 +1528,36 @@ files = [
lint = ["pre-commit (>=3.3)"]
test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"]
[[package]]
name = "llvmlite"
version = "0.42.0"
description = "lightweight wrapper around basic LLVM functionality"
optional = false
python-versions = ">=3.9"
files = [
{file = "llvmlite-0.42.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3366938e1bf63d26c34fbfb4c8e8d2ded57d11e0567d5bb243d89aab1eb56098"},
{file = "llvmlite-0.42.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c35da49666a21185d21b551fc3caf46a935d54d66969d32d72af109b5e7d2b6f"},
{file = "llvmlite-0.42.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70f44ccc3c6220bd23e0ba698a63ec2a7d3205da0d848804807f37fc243e3f77"},
{file = "llvmlite-0.42.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f8d8717a9073b9e0246998de89929071d15b47f254c10eef2310b9aac033d"},
{file = "llvmlite-0.42.0-cp310-cp310-win_amd64.whl", hash = "sha256:8d90edf400b4ceb3a0e776b6c6e4656d05c7187c439587e06f86afceb66d2be5"},
{file = "llvmlite-0.42.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ae511caed28beaf1252dbaf5f40e663f533b79ceb408c874c01754cafabb9cbf"},
{file = "llvmlite-0.42.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81e674c2fe85576e6c4474e8c7e7aba7901ac0196e864fe7985492b737dbab65"},
{file = "llvmlite-0.42.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb3975787f13eb97629052edb5017f6c170eebc1c14a0433e8089e5db43bcce6"},
{file = "llvmlite-0.42.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5bece0cdf77f22379f19b1959ccd7aee518afa4afbd3656c6365865f84903f9"},
{file = "llvmlite-0.42.0-cp311-cp311-win_amd64.whl", hash = "sha256:7e0c4c11c8c2aa9b0701f91b799cb9134a6a6de51444eff5a9087fc7c1384275"},
{file = "llvmlite-0.42.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:08fa9ab02b0d0179c688a4216b8939138266519aaa0aa94f1195a8542faedb56"},
{file = "llvmlite-0.42.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b2fce7d355068494d1e42202c7aff25d50c462584233013eb4470c33b995e3ee"},
{file = "llvmlite-0.42.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebe66a86dc44634b59a3bc860c7b20d26d9aaffcd30364ebe8ba79161a9121f4"},
{file = "llvmlite-0.42.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d47494552559e00d81bfb836cf1c4d5a5062e54102cc5767d5aa1e77ccd2505c"},
{file = "llvmlite-0.42.0-cp312-cp312-win_amd64.whl", hash = "sha256:05cb7e9b6ce69165ce4d1b994fbdedca0c62492e537b0cc86141b6e2c78d5888"},
{file = "llvmlite-0.42.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bdd3888544538a94d7ec99e7c62a0cdd8833609c85f0c23fcb6c5c591aec60ad"},
{file = "llvmlite-0.42.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0936c2067a67fb8816c908d5457d63eba3e2b17e515c5fe00e5ee2bace06040"},
{file = "llvmlite-0.42.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a78ab89f1924fc11482209f6799a7a3fc74ddc80425a7a3e0e8174af0e9e2301"},
{file = "llvmlite-0.42.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7599b65c7af7abbc978dbf345712c60fd596aa5670496561cc10e8a71cebfb2"},
{file = "llvmlite-0.42.0-cp39-cp39-win_amd64.whl", hash = "sha256:43d65cc4e206c2e902c1004dd5418417c4efa6c1d04df05c6c5675a27e8ca90e"},
{file = "llvmlite-0.42.0.tar.gz", hash = "sha256:f92b09243c0cc3f457da8b983f67bd8e1295d0f5b3746c7a1861d7a99403854a"},
]
[[package]]
name = "locust"
version = "2.28.0"
@ -1864,6 +1894,40 @@ doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.
extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"]
test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"]
[[package]]
name = "numba"
version = "0.59.1"
description = "compiling Python code using LLVM"
optional = false
python-versions = ">=3.9"
files = [
{file = "numba-0.59.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:97385a7f12212c4f4bc28f648720a92514bee79d7063e40ef66c2d30600fd18e"},
{file = "numba-0.59.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b77aecf52040de2a1eb1d7e314497b9e56fba17466c80b457b971a25bb1576d"},
{file = "numba-0.59.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3476a4f641bfd58f35ead42f4dcaf5f132569c4647c6f1360ccf18ee4cda3990"},
{file = "numba-0.59.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:525ef3f820931bdae95ee5379c670d5c97289c6520726bc6937a4a7d4230ba24"},
{file = "numba-0.59.1-cp310-cp310-win_amd64.whl", hash = "sha256:990e395e44d192a12105eca3083b61307db7da10e093972ca285c85bef0963d6"},
{file = "numba-0.59.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:43727e7ad20b3ec23ee4fc642f5b61845c71f75dd2825b3c234390c6d8d64051"},
{file = "numba-0.59.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:411df625372c77959570050e861981e9d196cc1da9aa62c3d6a836b5cc338966"},
{file = "numba-0.59.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2801003caa263d1e8497fb84829a7ecfb61738a95f62bc05693fcf1733e978e4"},
{file = "numba-0.59.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dd2842fac03be4e5324ebbbd4d2d0c8c0fc6e0df75c09477dd45b288a0777389"},
{file = "numba-0.59.1-cp311-cp311-win_amd64.whl", hash = "sha256:0594b3dfb369fada1f8bb2e3045cd6c61a564c62e50cf1f86b4666bc721b3450"},
{file = "numba-0.59.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1cce206a3b92836cdf26ef39d3a3242fec25e07f020cc4feec4c4a865e340569"},
{file = "numba-0.59.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8c8b4477763cb1fbd86a3be7050500229417bf60867c93e131fd2626edb02238"},
{file = "numba-0.59.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d80bce4ef7e65bf895c29e3889ca75a29ee01da80266a01d34815918e365835"},
{file = "numba-0.59.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f7ad1d217773e89a9845886401eaaab0a156a90aa2f179fdc125261fd1105096"},
{file = "numba-0.59.1-cp312-cp312-win_amd64.whl", hash = "sha256:5bf68f4d69dd3a9f26a9b23548fa23e3bcb9042e2935257b471d2a8d3c424b7f"},
{file = "numba-0.59.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4e0318ae729de6e5dbe64c75ead1a95eb01fabfe0e2ebed81ebf0344d32db0ae"},
{file = "numba-0.59.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0f68589740a8c38bb7dc1b938b55d1145244c8353078eea23895d4f82c8b9ec1"},
{file = "numba-0.59.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:649913a3758891c77c32e2d2a3bcbedf4a69f5fea276d11f9119677c45a422e8"},
{file = "numba-0.59.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9712808e4545270291d76b9a264839ac878c5eb7d8b6e02c970dc0ac29bc8187"},
{file = "numba-0.59.1-cp39-cp39-win_amd64.whl", hash = "sha256:8d51ccd7008a83105ad6a0082b6a2b70f1142dc7cfd76deb8c5a862367eb8c86"},
{file = "numba-0.59.1.tar.gz", hash = "sha256:76f69132b96028d2774ed20415e8c528a34e3299a40581bae178f0994a2f370b"},
]
[package.dependencies]
llvmlite = "==0.42.*"
numpy = ">=1.22,<1.27"
[[package]]
name = "numpy"
version = "1.26.3"
@ -2037,11 +2101,8 @@ description = "ONNX Runtime is a runtime accelerator for Machine Learning models
optional = false
python-versions = "*"
files = [
{file = "onnxruntime_openvino-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ed693011b472f9a617b2d5c4785d5fa1e1b77f7cb2b02e47b899534ec6c6396"},
{file = "onnxruntime_openvino-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:5152b5e56e83e022ced2986700d68dd8ba7b1466761725ce774f679c5710ab87"},
{file = "onnxruntime_openvino-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ce3b1aa06d6b8b732d314d217028ec4735de5806215c44d3bdbcad03b9260d5"},
{file = "onnxruntime_openvino-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:21133a701bb07ea19e01f48b8c23beee575f2e879f49173843f275d7c91a625a"},
{file = "onnxruntime_openvino-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76824dac3c392ad4b812f29c18be2055ab3bba2e3c111e44baae847b33d5b081"},
]
[package.dependencies]
@ -2601,7 +2662,6 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@ -3572,4 +3632,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.12"
content-hash = "db51ad1e631b569e106927683a13124252bd80974def1f2edbe23ac87d89c461"
content-hash = "a44e079d565fc1166458690ca2dc5826e198cc07ccab0ebaf71b5ab5e0eed150"

View file

@ -23,6 +23,7 @@ orjson = ">=3.9.5"
gunicorn = ">=21.1.0"
huggingface-hub = ">=0.20.1,<1.0"
tokenizers = ">=0.15.0,<1.0"
numba = "^0.59.1"
[tool.poetry.group.dev.dependencies]
mypy = ">=1.3.0"