2023-02-25 09:12:03 -05:00
|
|
|
export const IMachineLearningRepository = 'IMachineLearningRepository';
|
|
|
|
|
|
2023-05-17 13:07:17 -04:00
|
|
|
export interface BoundingBox {
|
|
|
|
|
x1: number;
|
|
|
|
|
y1: number;
|
|
|
|
|
x2: number;
|
|
|
|
|
y2: number;
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-06 23:09:47 -04:00
|
|
|
export enum ModelTask {
|
|
|
|
|
FACIAL_RECOGNITION = 'facial-recognition',
|
|
|
|
|
SEARCH = 'clip',
|
2023-05-17 13:07:17 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-29 09:58:00 -04:00
|
|
|
export enum ModelType {
|
2024-06-06 23:09:47 -04:00
|
|
|
DETECTION = 'detection',
|
|
|
|
|
PIPELINE = 'pipeline',
|
|
|
|
|
RECOGNITION = 'recognition',
|
|
|
|
|
TEXTUAL = 'textual',
|
|
|
|
|
VISUAL = 'visual',
|
2023-08-29 09:58:00 -04:00
|
|
|
}
|
|
|
|
|
|
2024-06-06 23:09:47 -04:00
|
|
|
export type ModelPayload = { imagePath: string } | { text: string };
|
|
|
|
|
|
|
|
|
|
type ModelOptions = { modelName: string };
|
|
|
|
|
|
|
|
|
|
export type FaceDetectionOptions = ModelOptions & { minScore: number };
|
|
|
|
|
|
|
|
|
|
type VisualResponse = { imageHeight: number; imageWidth: number };
|
|
|
|
|
export type ClipVisualRequest = { [ModelTask.SEARCH]: { [ModelType.VISUAL]: ModelOptions } };
|
|
|
|
|
export type ClipVisualResponse = { [ModelTask.SEARCH]: number[] } & VisualResponse;
|
|
|
|
|
|
|
|
|
|
export type ClipTextualRequest = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: ModelOptions } };
|
|
|
|
|
export type ClipTextualResponse = { [ModelTask.SEARCH]: number[] };
|
|
|
|
|
|
|
|
|
|
export type FacialRecognitionRequest = {
|
|
|
|
|
[ModelTask.FACIAL_RECOGNITION]: {
|
|
|
|
|
[ModelType.DETECTION]: FaceDetectionOptions;
|
|
|
|
|
[ModelType.RECOGNITION]: ModelOptions;
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
export interface Face {
|
|
|
|
|
boundingBox: BoundingBox;
|
|
|
|
|
embedding: number[];
|
|
|
|
|
score: number;
|
2023-08-29 09:58:00 -04:00
|
|
|
}
|
|
|
|
|
|
2024-06-06 23:09:47 -04:00
|
|
|
export type FacialRecognitionResponse = { [ModelTask.FACIAL_RECOGNITION]: Face[] } & VisualResponse;
|
|
|
|
|
export type DetectedFaces = { faces: Face[] } & VisualResponse;
|
|
|
|
|
export type MachineLearningRequest = ClipVisualRequest | ClipTextualRequest | FacialRecognitionRequest;
|
|
|
|
|
|
2023-02-25 09:12:03 -05:00
|
|
|
export interface IMachineLearningRepository {
|
2024-06-06 23:09:47 -04:00
|
|
|
encodeImage(url: string, imagePath: string, config: ModelOptions): Promise<number[]>;
|
|
|
|
|
encodeText(url: string, text: string, config: ModelOptions): Promise<number[]>;
|
|
|
|
|
detectFaces(url: string, imagePath: string, config: FaceDetectionOptions): Promise<DetectedFaces>;
|
2023-02-25 09:12:03 -05:00
|
|
|
}
|