Merge branch 'main' into openvino-cpu-fix

This commit is contained in:
Aleksander Pejcic 2025-10-16 02:30:19 -07:00 committed by GitHub
commit c49899e1b4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 179 additions and 25 deletions

View file

@ -134,7 +134,7 @@ services:
redis: redis:
container_name: immich_redis container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571 image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa
healthcheck: healthcheck:
test: redis-cli ping || exit 1 test: redis-cli ping || exit 1

View file

@ -56,7 +56,7 @@ services:
redis: redis:
container_name: immich_redis container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571 image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa
healthcheck: healthcheck:
test: redis-cli ping || exit 1 test: redis-cli ping || exit 1
restart: always restart: always

View file

@ -49,7 +49,7 @@ services:
redis: redis:
container_name: immich_redis container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571 image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa
healthcheck: healthcheck:
test: redis-cli ping || exit 1 test: redis-cli ping || exit 1
restart: always restart: always

View file

@ -54,7 +54,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
#### OpenVINO #### OpenVINO
- Integrated GPUs are more likely to experience issues than discrete GPUs, especially for older processors or servers with low RAM. - Integrated GPUs are more likely to experience issues than discrete GPUs, especially for older processors or servers with low RAM.
- Ensure the server's kernel version is new enough to use the device for hardware accceleration. - Ensure the server's kernel version is new enough to use the device for hardware acceleration.
- Expect higher RAM usage when using OpenVINO compared to CPU processing. - Expect higher RAM usage when using OpenVINO compared to CPU processing.
#### RKNN #### RKNN

View file

@ -28,7 +28,7 @@ You can read this guide to learn more about [partner sharing](/features/partner-
## Public sharing ## Public sharing
You can create a public link to share a group of photos or videos, or an album, with anyone. The public link can be shared via email, social media, or any other method. There are a varierity of options to customize the public link, such as setting an expiration date, password protection, and more. Public shared link is handy when you want to share a group of photos or videos with someone who doesn't have an Immich account and allow the shared user to upload their photos or videos to your account. You can create a public link to share a group of photos or videos, or an album, with anyone. The public link can be shared via email, social media, or any other method. There are a variety of options to customize the public link, such as setting an expiration date, password protection, and more. Public shared link is handy when you want to share a group of photos or videos with someone who doesn't have an Immich account and allow the shared user to upload their photos or videos to your account.
The public shared link is generated with a random URL, which acts as as a secret to avoid the link being guessed by unwanted parties, for instance. The public shared link is generated with a random URL, which acts as as a secret to avoid the link being guessed by unwanted parties, for instance.

View file

@ -37,7 +37,7 @@ In the Immich web UI:
<img src={require('./img/create-external-library.webp').default} width="50%" title="Create Library button" /> <img src={require('./img/create-external-library.webp').default} width="50%" title="Create Library button" />
- In the dialog, select which user should own the new library - In the dialog, select which user should own the new library
<img src={require('./img/library-owner.webp').default} width="50%" title="Library owner diaglog" /> <img src={require('./img/library-owner.webp').default} width="50%" title="Library owner dialog" />
- Click the three-dots menu and select **Edit Import Paths** - Click the three-dots menu and select **Edit Import Paths**
<img src={require('./img/edit-import-paths.webp').default} width="50%" title="Edit Import Paths menu option" /> <img src={require('./img/edit-import-paths.webp').default} width="50%" title="Edit Import Paths menu option" />

View file

@ -40,7 +40,7 @@ In the settings of your new project, set "**Project name**" to a name you'll rem
![Set path](../../static/img/synology-container-manager-set-path.png) ![Set path](../../static/img/synology-container-manager-set-path.png)
The following screen will give you the option to further customize your `docker-compose.yml` file. Take note of `DB_STORAGE_TYPE: 'HDD'`and uncomment if applicable for your Synology setup. The following screen will give you the option to further customize your `docker-compose.yml` file. Take note of `DB_STORAGE_TYPE: 'HDD'` and uncomment if applicable for your Synology setup.
![DB storage](../../static/img/synology-container-manager-customize-docker-compose.png) ![DB storage](../../static/img/synology-container-manager-customize-docker-compose.png)

View file

@ -87,7 +87,7 @@ After making a backup, please modify your `docker-compose.yml` file with the fol
If you deviated from the defaults of pg14 or pgvectors0.2.0, you must adjust the pg major version and pgvecto.rs version. If you are still using the default `docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0` image, you can just follow the changes above. For example, if the previous image is `docker.io/tensorchord/pgvecto-rs:pg16-v0.3.0`, the new image should be `ghcr.io/immich-app/postgres:16-vectorchord0.3.0-pgvectors0.3.0` instead of the image specified in the diff. If you deviated from the defaults of pg14 or pgvectors0.2.0, you must adjust the pg major version and pgvecto.rs version. If you are still using the default `docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0` image, you can just follow the changes above. For example, if the previous image is `docker.io/tensorchord/pgvecto-rs:pg16-v0.3.0`, the new image should be `ghcr.io/immich-app/postgres:16-vectorchord0.3.0-pgvectors0.3.0` instead of the image specified in the diff.
::: :::
After making these changes, you can start Immich as normal. Immich will make some changes to the DB during startup, which can take seconds to minutes to finish, depending on hardware and library size. In particular, its normal for the server logs to be seemingly stuck at `Reindexing clip_index` and `Reindexing face_index`for some time if you have over 100k assets in Immich and/or Immich is on a relatively weak server. If you see these logs and there are no errors, just give it time. After making these changes, you can start Immich as normal. Immich will make some changes to the DB during startup, which can take seconds to minutes to finish, depending on hardware and library size. In particular, its normal for the server logs to be seemingly stuck at `Reindexing clip_index` and `Reindexing face_index` for some time if you have over 100k assets in Immich and/or Immich is on a relatively weak server. If you see these logs and there are no errors, just give it time.
:::danger :::danger
After switching to VectorChord, you should not downgrade Immich below 1.133.0. After switching to VectorChord, you should not downgrade Immich below 1.133.0.

View file

@ -163,22 +163,22 @@ export class TagRepository {
} }
async deleteEmptyTags() { async deleteEmptyTags() {
// TODO rewrite as a single statement const result = await this.db
await this.db.transaction().execute(async (tx) => { .deleteFrom('tag')
const result = await tx .where(({ not, exists, selectFrom }) =>
.selectFrom('asset') not(
.innerJoin('tag_asset', 'tag_asset.assetsId', 'asset.id') exists(
.innerJoin('tag_closure', 'tag_closure.id_descendant', 'tag_asset.tagsId') selectFrom('tag_closure')
.innerJoin('tag', 'tag.id', 'tag_closure.id_descendant') .whereRef('tag.id', '=', 'tag_closure.id_ancestor')
.select((eb) => ['tag.id', eb.fn.count<number>('asset.id').as('count')]) .innerJoin('tag_asset', 'tag_closure.id_descendant', 'tag_asset.tagsId'),
.groupBy('tag.id') ),
.execute(); ),
)
.executeTakeFirst();
const ids = result.filter(({ count }) => count === 0).map(({ id }) => id); const deletedRows = Number(result.numDeletedRows);
if (ids.length > 0) { if (deletedRows > 0) {
await this.db.deleteFrom('tag').where('id', 'in', ids).execute(); this.logger.log(`Deleted ${deletedRows} empty tags`);
this.logger.log(`Deleted ${ids.length} empty tags`); }
}
});
} }
} }

View file

@ -39,6 +39,7 @@ import { StorageRepository } from 'src/repositories/storage.repository';
import { SyncCheckpointRepository } from 'src/repositories/sync-checkpoint.repository'; import { SyncCheckpointRepository } from 'src/repositories/sync-checkpoint.repository';
import { SyncRepository } from 'src/repositories/sync.repository'; import { SyncRepository } from 'src/repositories/sync.repository';
import { SystemMetadataRepository } from 'src/repositories/system-metadata.repository'; import { SystemMetadataRepository } from 'src/repositories/system-metadata.repository';
import { TagRepository } from 'src/repositories/tag.repository';
import { TelemetryRepository } from 'src/repositories/telemetry.repository'; import { TelemetryRepository } from 'src/repositories/telemetry.repository';
import { UserRepository } from 'src/repositories/user.repository'; import { UserRepository } from 'src/repositories/user.repository';
import { VersionHistoryRepository } from 'src/repositories/version-history.repository'; import { VersionHistoryRepository } from 'src/repositories/version-history.repository';
@ -52,6 +53,8 @@ import { MemoryTable } from 'src/schema/tables/memory.table';
import { PersonTable } from 'src/schema/tables/person.table'; import { PersonTable } from 'src/schema/tables/person.table';
import { SessionTable } from 'src/schema/tables/session.table'; import { SessionTable } from 'src/schema/tables/session.table';
import { StackTable } from 'src/schema/tables/stack.table'; import { StackTable } from 'src/schema/tables/stack.table';
import { TagAssetTable } from 'src/schema/tables/tag-asset.table';
import { TagTable } from 'src/schema/tables/tag.table';
import { UserTable } from 'src/schema/tables/user.table'; import { UserTable } from 'src/schema/tables/user.table';
import { BASE_SERVICE_DEPENDENCIES, BaseService } from 'src/services/base.service'; import { BASE_SERVICE_DEPENDENCIES, BaseService } from 'src/services/base.service';
import { SyncService } from 'src/services/sync.service'; import { SyncService } from 'src/services/sync.service';
@ -240,6 +243,18 @@ export class MediumTestContext<S extends BaseService = BaseService> {
user, user,
}; };
} }
async newTagAsset(tagBulkAssets: { tagIds: string[]; assetIds: string[] }) {
const tagsAssets: Insertable<TagAssetTable>[] = [];
for (const tagsId of tagBulkAssets.tagIds) {
for (const assetsId of tagBulkAssets.assetIds) {
tagsAssets.push({ tagsId, assetsId });
}
}
const result = await this.get(TagRepository).upsertAssetIds(tagsAssets);
return { tagsAssets, result };
}
} }
export class SyncTestContext extends MediumTestContext<SyncService> { export class SyncTestContext extends MediumTestContext<SyncService> {
@ -318,6 +333,10 @@ const newRealRepository = <T>(key: ClassConstructor<T>, db: Kysely<DB>): T => {
return new key(LoggingRepository.create()); return new key(LoggingRepository.create());
} }
case TagRepository: {
return new key(db, LoggingRepository.create());
}
case LoggingRepository as unknown as ClassConstructor<LoggingRepository>: { case LoggingRepository as unknown as ClassConstructor<LoggingRepository>: {
return new key() as unknown as T; return new key() as unknown as T;
} }
@ -345,7 +364,8 @@ const newMockRepository = <T>(key: ClassConstructor<T>) => {
case SyncCheckpointRepository: case SyncCheckpointRepository:
case SystemMetadataRepository: case SystemMetadataRepository:
case UserRepository: case UserRepository:
case VersionHistoryRepository: { case VersionHistoryRepository:
case TagRepository: {
return automock(key); return automock(key);
} }
@ -567,6 +587,23 @@ const memoryInsert = (memory: Partial<Insertable<MemoryTable>> = {}) => {
return { ...defaults, ...memory, id }; return { ...defaults, ...memory, id };
}; };
const tagInsert = (tag: Partial<Insertable<TagTable>>) => {
const id = tag.id || newUuid();
const defaults: Insertable<TagTable> = {
id,
userId: '',
value: '',
createdAt: newDate(),
updatedAt: newDate(),
color: '',
parentId: null,
updateId: newUuid(),
};
return { ...defaults, ...tag, id };
};
class CustomWritable extends Writable { class CustomWritable extends Writable {
private data = ''; private data = '';
@ -619,4 +656,5 @@ export const mediumFactory = {
memoryInsert, memoryInsert,
loginDetails, loginDetails,
loginResponse, loginResponse,
tagInsert,
}; };

View file

@ -0,0 +1,116 @@
import { Kysely } from 'kysely';
import { JobStatus } from 'src/enum';
import { AccessRepository } from 'src/repositories/access.repository';
import { LoggingRepository } from 'src/repositories/logging.repository';
import { TagRepository } from 'src/repositories/tag.repository';
import { DB } from 'src/schema';
import { TagService } from 'src/services/tag.service';
import { upsertTags } from 'src/utils/tag';
import { newMediumService } from 'test/medium.factory';
import { getKyselyDB } from 'test/utils';
let defaultDatabase: Kysely<DB>;
const setup = (db?: Kysely<DB>) => {
return newMediumService(TagService, {
database: db || defaultDatabase,
real: [TagRepository, AccessRepository],
mock: [LoggingRepository],
});
};
beforeAll(async () => {
defaultDatabase = await getKyselyDB();
});
describe(TagService.name, () => {
describe('deleteEmptyTags', () => {
it('single tag exists, not connected to any assets, and is deleted', async () => {
const { sut, ctx } = setup();
const { user } = await ctx.newUser();
const tagRepo = ctx.get(TagRepository);
const [tag] = await upsertTags(tagRepo, { userId: user.id, tags: ['tag-1'] });
await expect(tagRepo.getByValue(user.id, 'tag-1')).resolves.toEqual(expect.objectContaining({ id: tag.id }));
await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success);
await expect(tagRepo.getByValue(user.id, 'tag-1')).resolves.toBeUndefined();
});
it('single tag exists, connected to one asset, and is not deleted', async () => {
const { sut, ctx } = setup();
const { user } = await ctx.newUser();
const { asset } = await ctx.newAsset({ ownerId: user.id });
const tagRepo = ctx.get(TagRepository);
const [tag] = await upsertTags(tagRepo, { userId: user.id, tags: ['tag-1'] });
await ctx.newTagAsset({ tagIds: [tag.id], assetIds: [asset.id] });
await expect(tagRepo.getByValue(user.id, 'tag-1')).resolves.toEqual(expect.objectContaining({ id: tag.id }));
await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success);
await expect(tagRepo.getByValue(user.id, 'tag-1')).resolves.toEqual(expect.objectContaining({ id: tag.id }));
});
it('hierarchical tag exists, and the parent is connected to an asset, and the child is deleted', async () => {
const { sut, ctx } = setup();
const { user } = await ctx.newUser();
const { asset } = await ctx.newAsset({ ownerId: user.id });
const tagRepo = ctx.get(TagRepository);
const [parentTag, childTag] = await upsertTags(tagRepo, { userId: user.id, tags: ['parent', 'parent/child'] });
await ctx.newTagAsset({ tagIds: [parentTag.id], assetIds: [asset.id] });
await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual(
expect.objectContaining({ id: parentTag.id }),
);
await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toEqual(
expect.objectContaining({ id: childTag.id }),
);
await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success);
await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual(
expect.objectContaining({ id: parentTag.id }),
);
await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toBeUndefined();
});
it('hierarchical tag exists, and only the child is connected to an asset, and nothing is deleted', async () => {
const { sut, ctx } = setup();
const { user } = await ctx.newUser();
const { asset } = await ctx.newAsset({ ownerId: user.id });
const tagRepo = ctx.get(TagRepository);
const [parentTag, childTag] = await upsertTags(tagRepo, { userId: user.id, tags: ['parent', 'parent/child'] });
await ctx.newTagAsset({ tagIds: [childTag.id], assetIds: [asset.id] });
await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual(
expect.objectContaining({ id: parentTag.id }),
);
await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toEqual(
expect.objectContaining({ id: childTag.id }),
);
await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success);
await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual(
expect.objectContaining({ id: parentTag.id }),
);
await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toEqual(
expect.objectContaining({ id: childTag.id }),
);
});
it('hierarchical tag exists, and neither parent nor child is connected to an asset, and both are deleted', async () => {
const { sut, ctx } = setup();
const { user } = await ctx.newUser();
const tagRepo = ctx.get(TagRepository);
const [parentTag, childTag] = await upsertTags(tagRepo, { userId: user.id, tags: ['parent', 'parent/child'] });
await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual(
expect.objectContaining({ id: parentTag.id }),
);
await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toEqual(
expect.objectContaining({ id: childTag.id }),
);
await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success);
await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toBeUndefined();
await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toBeUndefined();
});
});
});