From 26e877cba725df00fa3fb0cd42d8cbf2ab8d4426 Mon Sep 17 00:00:00 2001 From: 0xflotus <0xflotus@gmail.com> Date: Wed, 15 Oct 2025 23:30:33 +0200 Subject: [PATCH 1/3] docs: fix small error (#22890) * docs: fix small error * docs: fix small error * docs: fix small error * docs: fix small error * docs: fix small error * docs: fix small error * docs: fix small error --- docs/docs/features/ml-hardware-acceleration.md | 2 +- docs/docs/features/sharing.md | 2 +- docs/docs/guides/external-library.md | 2 +- docs/docs/install/synology.md | 2 +- docs/docs/install/upgrading.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/docs/features/ml-hardware-acceleration.md b/docs/docs/features/ml-hardware-acceleration.md index 086f93a000..7f92c25449 100644 --- a/docs/docs/features/ml-hardware-acceleration.md +++ b/docs/docs/features/ml-hardware-acceleration.md @@ -54,7 +54,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele #### OpenVINO - Integrated GPUs are more likely to experience issues than discrete GPUs, especially for older processors or servers with low RAM. -- Ensure the server's kernel version is new enough to use the device for hardware accceleration. +- Ensure the server's kernel version is new enough to use the device for hardware acceleration. - Expect higher RAM usage when using OpenVINO compared to CPU processing. #### RKNN diff --git a/docs/docs/features/sharing.md b/docs/docs/features/sharing.md index 9ba7470407..c19b4f48e1 100644 --- a/docs/docs/features/sharing.md +++ b/docs/docs/features/sharing.md @@ -28,7 +28,7 @@ You can read this guide to learn more about [partner sharing](/features/partner- ## Public sharing -You can create a public link to share a group of photos or videos, or an album, with anyone. The public link can be shared via email, social media, or any other method. There are a varierity of options to customize the public link, such as setting an expiration date, password protection, and more. Public shared link is handy when you want to share a group of photos or videos with someone who doesn't have an Immich account and allow the shared user to upload their photos or videos to your account. +You can create a public link to share a group of photos or videos, or an album, with anyone. The public link can be shared via email, social media, or any other method. There are a variety of options to customize the public link, such as setting an expiration date, password protection, and more. Public shared link is handy when you want to share a group of photos or videos with someone who doesn't have an Immich account and allow the shared user to upload their photos or videos to your account. The public shared link is generated with a random URL, which acts as as a secret to avoid the link being guessed by unwanted parties, for instance. diff --git a/docs/docs/guides/external-library.md b/docs/docs/guides/external-library.md index 8ff45f2806..3f366bb0d4 100644 --- a/docs/docs/guides/external-library.md +++ b/docs/docs/guides/external-library.md @@ -37,7 +37,7 @@ In the Immich web UI: - In the dialog, select which user should own the new library - + - Click the three-dots menu and select **Edit Import Paths** diff --git a/docs/docs/install/synology.md b/docs/docs/install/synology.md index 6cf90b1619..3e5b780db2 100644 --- a/docs/docs/install/synology.md +++ b/docs/docs/install/synology.md @@ -40,7 +40,7 @@ In the settings of your new project, set "**Project name**" to a name you'll rem ![Set path](../../static/img/synology-container-manager-set-path.png) -The following screen will give you the option to further customize your `docker-compose.yml` file. Take note of `DB_STORAGE_TYPE: 'HDD'`and uncomment if applicable for your Synology setup. +The following screen will give you the option to further customize your `docker-compose.yml` file. Take note of `DB_STORAGE_TYPE: 'HDD'` and uncomment if applicable for your Synology setup. ![DB storage](../../static/img/synology-container-manager-customize-docker-compose.png) diff --git a/docs/docs/install/upgrading.md b/docs/docs/install/upgrading.md index da95222911..bf788cb680 100644 --- a/docs/docs/install/upgrading.md +++ b/docs/docs/install/upgrading.md @@ -87,7 +87,7 @@ After making a backup, please modify your `docker-compose.yml` file with the fol If you deviated from the defaults of pg14 or pgvectors0.2.0, you must adjust the pg major version and pgvecto.rs version. If you are still using the default `docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0` image, you can just follow the changes above. For example, if the previous image is `docker.io/tensorchord/pgvecto-rs:pg16-v0.3.0`, the new image should be `ghcr.io/immich-app/postgres:16-vectorchord0.3.0-pgvectors0.3.0` instead of the image specified in the diff. ::: -After making these changes, you can start Immich as normal. Immich will make some changes to the DB during startup, which can take seconds to minutes to finish, depending on hardware and library size. In particular, it’s normal for the server logs to be seemingly stuck at `Reindexing clip_index` and `Reindexing face_index`for some time if you have over 100k assets in Immich and/or Immich is on a relatively weak server. If you see these logs and there are no errors, just give it time. +After making these changes, you can start Immich as normal. Immich will make some changes to the DB during startup, which can take seconds to minutes to finish, depending on hardware and library size. In particular, it’s normal for the server logs to be seemingly stuck at `Reindexing clip_index` and `Reindexing face_index` for some time if you have over 100k assets in Immich and/or Immich is on a relatively weak server. If you see these logs and there are no errors, just give it time. :::danger After switching to VectorChord, you should not downgrade Immich below 1.133.0. From 74a9be4a0e0a884ee1c8c90711b76043501f5937 Mon Sep 17 00:00:00 2001 From: Matthew Momjian <50788000+mmomjian@users.noreply.github.com> Date: Wed, 15 Oct 2025 18:48:36 -0400 Subject: [PATCH 2/3] fix(server): bump valkey to 8 (#22911) * unpin valkey * pin --- docker/docker-compose.dev.yml | 2 +- docker/docker-compose.prod.yml | 2 +- docker/docker-compose.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 0dadaa2cc7..53d64dd0c8 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -134,7 +134,7 @@ services: redis: container_name: immich_redis - image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571 + image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa healthcheck: test: redis-cli ping || exit 1 diff --git a/docker/docker-compose.prod.yml b/docker/docker-compose.prod.yml index 7ec77cda92..78ba0653ac 100644 --- a/docker/docker-compose.prod.yml +++ b/docker/docker-compose.prod.yml @@ -56,7 +56,7 @@ services: redis: container_name: immich_redis - image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571 + image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa healthcheck: test: redis-cli ping || exit 1 restart: always diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 9f0935e17c..e4e0f964d3 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -49,7 +49,7 @@ services: redis: container_name: immich_redis - image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571 + image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa healthcheck: test: redis-cli ping || exit 1 restart: always From 9d639607c79f2179271550f7e2bfb9d1312c07aa Mon Sep 17 00:00:00 2001 From: Jorge Montejo Date: Thu, 16 Oct 2025 00:51:57 +0200 Subject: [PATCH 3/3] fix: tag clean up query and add tests (#22633) * fix delete empty tags query * rewrite as a single statement * create tag service medium test * single tag exists, connected to one asset, and is not deleted * do not delete parent tag if children have an asset * hierarchical tag tests * fix query to match 3 test * remove transaction and format:fix * remove transaction and format:fix * simplify query, handle nested empty tag * unused helper --------- Co-authored-by: mertalev <101130780+mertalev@users.noreply.github.com> --- server/src/repositories/tag.repository.ts | 32 ++--- server/test/medium.factory.ts | 40 +++++- .../medium/specs/services/tag.service.spec.ts | 116 ++++++++++++++++++ 3 files changed, 171 insertions(+), 17 deletions(-) create mode 100644 server/test/medium/specs/services/tag.service.spec.ts diff --git a/server/src/repositories/tag.repository.ts b/server/src/repositories/tag.repository.ts index 9bbb62bd8b..d9c44f4ba4 100644 --- a/server/src/repositories/tag.repository.ts +++ b/server/src/repositories/tag.repository.ts @@ -163,22 +163,22 @@ export class TagRepository { } async deleteEmptyTags() { - // TODO rewrite as a single statement - await this.db.transaction().execute(async (tx) => { - const result = await tx - .selectFrom('asset') - .innerJoin('tag_asset', 'tag_asset.assetsId', 'asset.id') - .innerJoin('tag_closure', 'tag_closure.id_descendant', 'tag_asset.tagsId') - .innerJoin('tag', 'tag.id', 'tag_closure.id_descendant') - .select((eb) => ['tag.id', eb.fn.count('asset.id').as('count')]) - .groupBy('tag.id') - .execute(); + const result = await this.db + .deleteFrom('tag') + .where(({ not, exists, selectFrom }) => + not( + exists( + selectFrom('tag_closure') + .whereRef('tag.id', '=', 'tag_closure.id_ancestor') + .innerJoin('tag_asset', 'tag_closure.id_descendant', 'tag_asset.tagsId'), + ), + ), + ) + .executeTakeFirst(); - const ids = result.filter(({ count }) => count === 0).map(({ id }) => id); - if (ids.length > 0) { - await this.db.deleteFrom('tag').where('id', 'in', ids).execute(); - this.logger.log(`Deleted ${ids.length} empty tags`); - } - }); + const deletedRows = Number(result.numDeletedRows); + if (deletedRows > 0) { + this.logger.log(`Deleted ${deletedRows} empty tags`); + } } } diff --git a/server/test/medium.factory.ts b/server/test/medium.factory.ts index c7356e2f1b..f802e3113e 100644 --- a/server/test/medium.factory.ts +++ b/server/test/medium.factory.ts @@ -39,6 +39,7 @@ import { StorageRepository } from 'src/repositories/storage.repository'; import { SyncCheckpointRepository } from 'src/repositories/sync-checkpoint.repository'; import { SyncRepository } from 'src/repositories/sync.repository'; import { SystemMetadataRepository } from 'src/repositories/system-metadata.repository'; +import { TagRepository } from 'src/repositories/tag.repository'; import { TelemetryRepository } from 'src/repositories/telemetry.repository'; import { UserRepository } from 'src/repositories/user.repository'; import { VersionHistoryRepository } from 'src/repositories/version-history.repository'; @@ -52,6 +53,8 @@ import { MemoryTable } from 'src/schema/tables/memory.table'; import { PersonTable } from 'src/schema/tables/person.table'; import { SessionTable } from 'src/schema/tables/session.table'; import { StackTable } from 'src/schema/tables/stack.table'; +import { TagAssetTable } from 'src/schema/tables/tag-asset.table'; +import { TagTable } from 'src/schema/tables/tag.table'; import { UserTable } from 'src/schema/tables/user.table'; import { BASE_SERVICE_DEPENDENCIES, BaseService } from 'src/services/base.service'; import { SyncService } from 'src/services/sync.service'; @@ -240,6 +243,18 @@ export class MediumTestContext { user, }; } + + async newTagAsset(tagBulkAssets: { tagIds: string[]; assetIds: string[] }) { + const tagsAssets: Insertable[] = []; + for (const tagsId of tagBulkAssets.tagIds) { + for (const assetsId of tagBulkAssets.assetIds) { + tagsAssets.push({ tagsId, assetsId }); + } + } + + const result = await this.get(TagRepository).upsertAssetIds(tagsAssets); + return { tagsAssets, result }; + } } export class SyncTestContext extends MediumTestContext { @@ -318,6 +333,10 @@ const newRealRepository = (key: ClassConstructor, db: Kysely): T => { return new key(LoggingRepository.create()); } + case TagRepository: { + return new key(db, LoggingRepository.create()); + } + case LoggingRepository as unknown as ClassConstructor: { return new key() as unknown as T; } @@ -345,7 +364,8 @@ const newMockRepository = (key: ClassConstructor) => { case SyncCheckpointRepository: case SystemMetadataRepository: case UserRepository: - case VersionHistoryRepository: { + case VersionHistoryRepository: + case TagRepository: { return automock(key); } @@ -567,6 +587,23 @@ const memoryInsert = (memory: Partial> = {}) => { return { ...defaults, ...memory, id }; }; +const tagInsert = (tag: Partial>) => { + const id = tag.id || newUuid(); + + const defaults: Insertable = { + id, + userId: '', + value: '', + createdAt: newDate(), + updatedAt: newDate(), + color: '', + parentId: null, + updateId: newUuid(), + }; + + return { ...defaults, ...tag, id }; +}; + class CustomWritable extends Writable { private data = ''; @@ -619,4 +656,5 @@ export const mediumFactory = { memoryInsert, loginDetails, loginResponse, + tagInsert, }; diff --git a/server/test/medium/specs/services/tag.service.spec.ts b/server/test/medium/specs/services/tag.service.spec.ts new file mode 100644 index 0000000000..2ec498e56d --- /dev/null +++ b/server/test/medium/specs/services/tag.service.spec.ts @@ -0,0 +1,116 @@ +import { Kysely } from 'kysely'; +import { JobStatus } from 'src/enum'; +import { AccessRepository } from 'src/repositories/access.repository'; +import { LoggingRepository } from 'src/repositories/logging.repository'; +import { TagRepository } from 'src/repositories/tag.repository'; +import { DB } from 'src/schema'; +import { TagService } from 'src/services/tag.service'; +import { upsertTags } from 'src/utils/tag'; +import { newMediumService } from 'test/medium.factory'; +import { getKyselyDB } from 'test/utils'; + +let defaultDatabase: Kysely; + +const setup = (db?: Kysely) => { + return newMediumService(TagService, { + database: db || defaultDatabase, + real: [TagRepository, AccessRepository], + mock: [LoggingRepository], + }); +}; + +beforeAll(async () => { + defaultDatabase = await getKyselyDB(); +}); + +describe(TagService.name, () => { + describe('deleteEmptyTags', () => { + it('single tag exists, not connected to any assets, and is deleted', async () => { + const { sut, ctx } = setup(); + const { user } = await ctx.newUser(); + const tagRepo = ctx.get(TagRepository); + const [tag] = await upsertTags(tagRepo, { userId: user.id, tags: ['tag-1'] }); + + await expect(tagRepo.getByValue(user.id, 'tag-1')).resolves.toEqual(expect.objectContaining({ id: tag.id })); + await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success); + await expect(tagRepo.getByValue(user.id, 'tag-1')).resolves.toBeUndefined(); + }); + + it('single tag exists, connected to one asset, and is not deleted', async () => { + const { sut, ctx } = setup(); + const { user } = await ctx.newUser(); + const { asset } = await ctx.newAsset({ ownerId: user.id }); + const tagRepo = ctx.get(TagRepository); + const [tag] = await upsertTags(tagRepo, { userId: user.id, tags: ['tag-1'] }); + + await ctx.newTagAsset({ tagIds: [tag.id], assetIds: [asset.id] }); + + await expect(tagRepo.getByValue(user.id, 'tag-1')).resolves.toEqual(expect.objectContaining({ id: tag.id })); + await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success); + await expect(tagRepo.getByValue(user.id, 'tag-1')).resolves.toEqual(expect.objectContaining({ id: tag.id })); + }); + + it('hierarchical tag exists, and the parent is connected to an asset, and the child is deleted', async () => { + const { sut, ctx } = setup(); + const { user } = await ctx.newUser(); + const { asset } = await ctx.newAsset({ ownerId: user.id }); + const tagRepo = ctx.get(TagRepository); + const [parentTag, childTag] = await upsertTags(tagRepo, { userId: user.id, tags: ['parent', 'parent/child'] }); + + await ctx.newTagAsset({ tagIds: [parentTag.id], assetIds: [asset.id] }); + + await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual( + expect.objectContaining({ id: parentTag.id }), + ); + await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toEqual( + expect.objectContaining({ id: childTag.id }), + ); + await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success); + await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual( + expect.objectContaining({ id: parentTag.id }), + ); + await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toBeUndefined(); + }); + + it('hierarchical tag exists, and only the child is connected to an asset, and nothing is deleted', async () => { + const { sut, ctx } = setup(); + const { user } = await ctx.newUser(); + const { asset } = await ctx.newAsset({ ownerId: user.id }); + const tagRepo = ctx.get(TagRepository); + const [parentTag, childTag] = await upsertTags(tagRepo, { userId: user.id, tags: ['parent', 'parent/child'] }); + + await ctx.newTagAsset({ tagIds: [childTag.id], assetIds: [asset.id] }); + + await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual( + expect.objectContaining({ id: parentTag.id }), + ); + await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toEqual( + expect.objectContaining({ id: childTag.id }), + ); + await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success); + await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual( + expect.objectContaining({ id: parentTag.id }), + ); + await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toEqual( + expect.objectContaining({ id: childTag.id }), + ); + }); + + it('hierarchical tag exists, and neither parent nor child is connected to an asset, and both are deleted', async () => { + const { sut, ctx } = setup(); + const { user } = await ctx.newUser(); + const tagRepo = ctx.get(TagRepository); + const [parentTag, childTag] = await upsertTags(tagRepo, { userId: user.id, tags: ['parent', 'parent/child'] }); + + await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toEqual( + expect.objectContaining({ id: parentTag.id }), + ); + await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toEqual( + expect.objectContaining({ id: childTag.id }), + ); + await expect(sut.handleTagCleanup()).resolves.toBe(JobStatus.Success); + await expect(tagRepo.getByValue(user.id, 'parent/child')).resolves.toBeUndefined(); + await expect(tagRepo.getByValue(user.id, 'parent')).resolves.toBeUndefined(); + }); + }); +});