feat: Use postgres as a queue

We've been keen to try this for a while as it means we can remove redis as a
dependency, which makes Immich easier to setup and run.

This replaces bullmq with a bespoke postgres queue. Jobs in the queue are
processed either immediately via triggers and notifications, or eventually if a
notification is missed.
This commit is contained in:
Thomas Way
2025-04-30 20:43:51 +01:00
parent b845184c80
commit 8c0c8a8d0e
46 changed files with 731 additions and 915 deletions

446
server/package-lock.json generated
View File

@@ -10,7 +10,6 @@
"hasInstallScript": true,
"license": "GNU Affero General Public License version 3",
"dependencies": {
"@nestjs/bullmq": "^11.0.1",
"@nestjs/common": "^11.0.4",
"@nestjs/core": "^11.0.4",
"@nestjs/event-emitter": "^3.0.0",
@@ -24,11 +23,11 @@
"@opentelemetry/exporter-prometheus": "^0.200.0",
"@opentelemetry/sdk-node": "^0.200.0",
"@react-email/components": "^0.0.36",
"@socket.io/redis-adapter": "^8.3.0",
"@socket.io/postgres-adapter": "^0.4.0",
"@types/pg": "^8.11.14",
"archiver": "^7.0.0",
"async-lock": "^1.4.0",
"bcrypt": "^5.1.1",
"bullmq": "^4.8.0",
"chokidar": "^3.5.3",
"class-transformer": "^0.5.1",
"class-validator": "^0.14.0",
@@ -39,9 +38,9 @@
"fast-glob": "^3.3.2",
"fluent-ffmpeg": "^2.1.2",
"geo-tz": "^8.0.0",
"graphile-worker": "^0.16.6",
"handlebars": "^4.7.8",
"i18n-iso-countries": "^7.6.0",
"ioredis": "^5.3.2",
"joi": "^17.10.0",
"js-yaml": "^4.1.0",
"kysely": "^0.28.0",
@@ -54,7 +53,7 @@
"nestjs-otel": "^6.0.0",
"nodemailer": "^6.9.13",
"openid-client": "^6.3.3",
"pg": "^8.11.3",
"pg": "^8.15.6",
"picomatch": "^4.0.2",
"react": "^19.0.0",
"react-dom": "^19.0.0",
@@ -80,7 +79,6 @@
"@nestjs/testing": "^11.0.4",
"@swc/core": "^1.4.14",
"@testcontainers/postgresql": "^10.2.1",
"@testcontainers/redis": "^10.18.0",
"@types/archiver": "^6.0.0",
"@types/async-lock": "^1.4.2",
"@types/bcrypt": "^5.0.0",
@@ -1072,6 +1070,12 @@
"@nestjs/core": "^10.x || ^11.0.0"
}
},
"node_modules/@graphile/logger": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/@graphile/logger/-/logger-0.2.0.tgz",
"integrity": "sha512-jjcWBokl9eb1gVJ85QmoaQ73CQ52xAaOCF29ukRbYNl6lY+ts0ErTaDYOBlejcbUs2OpaiqYLO5uDhyLFzWw4w==",
"license": "MIT"
},
"node_modules/@grpc/grpc-js": {
"version": "1.13.3",
"resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.13.3.tgz",
@@ -1883,7 +1887,9 @@
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@ioredis/commands/-/commands-1.2.0.tgz",
"integrity": "sha512-Sx1pU8EM64o2BrqNpEO1CNLtKQwyhuXuqyfH7oGKCk+1a33d2r5saW8zNwm3j6BTExtjrv2BxTgzzkMwts6vGg==",
"license": "MIT"
"license": "MIT",
"optional": true,
"peer": true
},
"node_modules/@isaacs/cliui": {
"version": "8.0.2",
@@ -2118,45 +2124,13 @@
"integrity": "sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==",
"license": "MIT"
},
"node_modules/@msgpackr-extract/msgpackr-extract-linux-x64": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-x64/-/msgpackr-extract-linux-x64-3.0.3.tgz",
"integrity": "sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
]
},
"node_modules/@nestjs/bull-shared": {
"version": "11.0.2",
"resolved": "https://registry.npmjs.org/@nestjs/bull-shared/-/bull-shared-11.0.2.tgz",
"integrity": "sha512-dFlttJvBqIFD6M8JVFbkrR4Feb39OTAJPJpFVILU50NOJCM4qziRw3dSNG84Q3v+7/M6xUGMFdZRRGvBBKxoSA==",
"license": "MIT",
"dependencies": {
"tslib": "2.8.1"
},
"peerDependencies": {
"@nestjs/common": "^10.0.0 || ^11.0.0",
"@nestjs/core": "^10.0.0 || ^11.0.0"
}
},
"node_modules/@nestjs/bullmq": {
"version": "11.0.2",
"resolved": "https://registry.npmjs.org/@nestjs/bullmq/-/bullmq-11.0.2.tgz",
"integrity": "sha512-Lq6lGpKkETsm0RDcUktlzsthFoE3A5QTMp2FwPi1eztKqKD6/90KS1TcnC9CJFzjpUaYnQzIMrlNs55e+/wsHA==",
"license": "MIT",
"dependencies": {
"@nestjs/bull-shared": "^11.0.2",
"tslib": "2.8.1"
},
"peerDependencies": {
"@nestjs/common": "^10.0.0 || ^11.0.0",
"@nestjs/core": "^10.0.0 || ^11.0.0",
"bullmq": "^3.0.0 || ^4.0.0 || ^5.0.0"
"node_modules/@msgpack/msgpack": {
"version": "2.8.0",
"resolved": "https://registry.npmjs.org/@msgpack/msgpack/-/msgpack-2.8.0.tgz",
"integrity": "sha512-h9u4u/jiIRKbq25PM+zymTyW6bhTzELvOoUd+AvYriWOAKpLGnIamaET3pnHYoI5iYphAHBI4ayx0MehR+VVPQ==",
"license": "ISC",
"engines": {
"node": ">= 10"
}
},
"node_modules/@nestjs/cli": {
@@ -3787,6 +3761,17 @@
"@opentelemetry/api": "^1.3.0"
}
},
"node_modules/@opentelemetry/instrumentation-pg/node_modules/@types/pg": {
"version": "8.6.1",
"resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.6.1.tgz",
"integrity": "sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==",
"license": "MIT",
"dependencies": {
"@types/node": "*",
"pg-protocol": "*",
"pg-types": "^2.2.0"
}
},
"node_modules/@opentelemetry/instrumentation-pino": {
"version": "0.47.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-pino/-/instrumentation-pino-0.47.0.tgz",
@@ -4763,24 +4748,25 @@
"integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==",
"license": "MIT"
},
"node_modules/@socket.io/redis-adapter": {
"version": "8.3.0",
"resolved": "https://registry.npmjs.org/@socket.io/redis-adapter/-/redis-adapter-8.3.0.tgz",
"integrity": "sha512-ly0cra+48hDmChxmIpnESKrc94LjRL80TEmZVscuQ/WWkRP81nNj8W8cCGMqbI4L6NCuAaPRSzZF1a9GlAxxnA==",
"node_modules/@socket.io/postgres-adapter": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/@socket.io/postgres-adapter/-/postgres-adapter-0.4.0.tgz",
"integrity": "sha512-FJQslCIchoT4oMHk0D8HeSi9nhAOE8/snId65zI10ykZsk3MQJnUH45+Jqd75IuQhtxxwrvNxqHmzLJEPw9PnA==",
"license": "MIT",
"dependencies": {
"debug": "~4.3.1",
"notepack.io": "~3.0.1",
"uid2": "1.0.0"
"@msgpack/msgpack": "~2.8.0",
"@types/pg": "^8.6.6",
"debug": "~4.3.4",
"pg": "^8.9.0"
},
"engines": {
"node": ">=10.0.0"
"node": ">=12.0.0"
},
"peerDependencies": {
"socket.io-adapter": "^2.5.4"
}
},
"node_modules/@socket.io/redis-adapter/node_modules/debug": {
"node_modules/@socket.io/postgres-adapter/node_modules/debug": {
"version": "4.3.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
"integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
@@ -4914,16 +4900,6 @@
"testcontainers": "^10.24.2"
}
},
"node_modules/@testcontainers/redis": {
"version": "10.24.2",
"resolved": "https://registry.npmjs.org/@testcontainers/redis/-/redis-10.24.2.tgz",
"integrity": "sha512-m4/FZW5ltZPaK9pQTKNipjpBk73Vdj7Ql3sFr26A9dOr0wJyM3Wnc9jeHTNRal7RDnY5rvumXAIUWbBlvKMJEw==",
"dev": true,
"license": "MIT",
"dependencies": {
"testcontainers": "^10.24.2"
}
},
"node_modules/@tokenizer/inflate": {
"version": "0.2.7",
"resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz",
@@ -5089,6 +5065,15 @@
"@types/node": "*"
}
},
"node_modules/@types/debug": {
"version": "4.1.12",
"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
"integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==",
"license": "MIT",
"dependencies": {
"@types/ms": "*"
}
},
"node_modules/@types/docker-modem": {
"version": "3.0.6",
"resolved": "https://registry.npmjs.org/@types/docker-modem/-/docker-modem-3.0.6.tgz",
@@ -5201,6 +5186,15 @@
"rxjs": "^7.2.0"
}
},
"node_modules/@types/interpret": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@types/interpret/-/interpret-1.1.3.tgz",
"integrity": "sha512-uBaBhj/BhilG58r64mtDb/BEdH51HIQLgP5bmWzc5qCtFMja8dCk/IOJmk36j0lbi9QHwI6sbtUNGuqXdKCAtQ==",
"license": "MIT",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/js-yaml": {
"version": "4.0.9",
"resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz",
@@ -5261,6 +5255,12 @@
"@types/node": "*"
}
},
"node_modules/@types/ms": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz",
"integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==",
"license": "MIT"
},
"node_modules/@types/multer": {
"version": "1.4.12",
"resolved": "https://registry.npmjs.org/@types/multer/-/multer-1.4.12.tgz",
@@ -5317,14 +5317,14 @@
"license": "MIT"
},
"node_modules/@types/pg": {
"version": "8.6.1",
"resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.6.1.tgz",
"integrity": "sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==",
"version": "8.11.14",
"resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.11.14.tgz",
"integrity": "sha512-qyD11E5R3u0eJmd1lB0WnWKXJGA7s015nyARWljfz5DcX83TKAIlY+QrmvzQTsbIe+hkiFtkyL2gHC6qwF6Fbg==",
"license": "MIT",
"dependencies": {
"@types/node": "*",
"pg-protocol": "*",
"pg-types": "^2.2.0"
"pg-types": "^4.0.1"
}
},
"node_modules/@types/pg-pool": {
@@ -5336,6 +5336,63 @@
"@types/pg": "*"
}
},
"node_modules/@types/pg/node_modules/pg-types": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/pg-types/-/pg-types-4.0.2.tgz",
"integrity": "sha512-cRL3JpS3lKMGsKaWndugWQoLOCoP+Cic8oseVcbr0qhPzYD5DWXK+RZ9LY9wxRf7RQia4SCwQlXk0q6FCPrVng==",
"license": "MIT",
"dependencies": {
"pg-int8": "1.0.1",
"pg-numeric": "1.0.2",
"postgres-array": "~3.0.1",
"postgres-bytea": "~3.0.0",
"postgres-date": "~2.1.0",
"postgres-interval": "^3.0.0",
"postgres-range": "^1.1.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/@types/pg/node_modules/postgres-array": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-3.0.4.tgz",
"integrity": "sha512-nAUSGfSDGOaOAEGwqsRY27GPOea7CNipJPOA7lPbdEpx5Kg3qzdP0AaWC5MlhTWV9s4hFX39nomVZ+C4tnGOJQ==",
"license": "MIT",
"engines": {
"node": ">=12"
}
},
"node_modules/@types/pg/node_modules/postgres-bytea": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-3.0.0.tgz",
"integrity": "sha512-CNd4jim9RFPkObHSjVHlVrxoVQXz7quwNFpz7RY1okNNme49+sVyiTvTRobiLV548Hx/hb1BG+iE7h9493WzFw==",
"license": "MIT",
"dependencies": {
"obuf": "~1.1.2"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/@types/pg/node_modules/postgres-date": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-2.1.0.tgz",
"integrity": "sha512-K7Juri8gtgXVcDfZttFKVmhglp7epKb1K4pgrkLxehjqkrgPhfG6OO8LHLkfaqkbpjNRnra018XwAr1yQFWGcA==",
"license": "MIT",
"engines": {
"node": ">=12"
}
},
"node_modules/@types/pg/node_modules/postgres-interval": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-3.0.0.tgz",
"integrity": "sha512-BSNDnbyZCXSxgA+1f5UU2GmwhoI0aU5yMxRGO8CdFEcY2BQF9xm/7MqKnYoM1nJDk8nONNWDk9WeSmePFhQdlw==",
"license": "MIT",
"engines": {
"node": ">=12"
}
},
"node_modules/@types/picomatch": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/@types/picomatch/-/picomatch-3.0.2.tgz",
@@ -5401,7 +5458,6 @@
"version": "7.7.0",
"resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.7.0.tgz",
"integrity": "sha512-k107IF4+Xr7UHjwDc7Cfd6PRQfbdkiRabXGRjo07b4WyPahFBZCZ1sE+BNxYIJPPg73UkfOsVOLwqVc/6ETrIA==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/send": {
@@ -6885,64 +6941,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/bullmq": {
"version": "4.18.2",
"resolved": "https://registry.npmjs.org/bullmq/-/bullmq-4.18.2.tgz",
"integrity": "sha512-Cx0O98IlGiFw7UBa+zwGz+nH0Pcl1wfTvMVBlsMna3s0219hXroVovh1xPRgomyUcbyciHiugGCkW0RRNZDHYQ==",
"license": "MIT",
"dependencies": {
"cron-parser": "^4.6.0",
"glob": "^8.0.3",
"ioredis": "^5.3.2",
"lodash": "^4.17.21",
"msgpackr": "^1.6.2",
"node-abort-controller": "^3.1.1",
"semver": "^7.5.4",
"tslib": "^2.0.0",
"uuid": "^9.0.0"
}
},
"node_modules/bullmq/node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/bullmq/node_modules/glob": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz",
"integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==",
"deprecated": "Glob versions prior to v9 are no longer supported",
"license": "ISC",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^5.0.1",
"once": "^1.3.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/bullmq/node_modules/minimatch": {
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz",
"integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==",
"license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/busboy": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
@@ -7530,6 +7528,8 @@
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz",
"integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==",
"license": "Apache-2.0",
"optional": true,
"peer": true,
"engines": {
"node": ">=0.10.0"
}
@@ -7932,18 +7932,6 @@
"luxon": "~3.5.0"
}
},
"node_modules/cron-parser": {
"version": "4.9.0",
"resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.9.0.tgz",
"integrity": "sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==",
"license": "MIT",
"dependencies": {
"luxon": "^3.2.1"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/cron/node_modules/luxon": {
"version": "3.5.0",
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.5.0.tgz",
@@ -8170,6 +8158,8 @@
"resolved": "https://registry.npmjs.org/denque/-/denque-2.1.0.tgz",
"integrity": "sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==",
"license": "Apache-2.0",
"optional": true,
"peer": true,
"engines": {
"node": ">=0.10"
}
@@ -10059,6 +10049,64 @@
"dev": true,
"license": "MIT"
},
"node_modules/graphile-config": {
"version": "0.0.1-beta.15",
"resolved": "https://registry.npmjs.org/graphile-config/-/graphile-config-0.0.1-beta.15.tgz",
"integrity": "sha512-J+hYqhZlx5yY7XdU7XjOAqNCAUZU33fEx3PdkNc1cfAAbo1TNMWiib4DFH5XkT8BagJtTyFrMnDCuKxnphCu+g==",
"license": "MIT",
"dependencies": {
"@types/interpret": "^1.1.1",
"@types/node": "^20.5.7",
"@types/semver": "^7.5.1",
"chalk": "^4.1.2",
"debug": "^4.3.4",
"interpret": "^3.1.1",
"semver": "^7.5.4",
"tslib": "^2.6.2",
"yargs": "^17.7.2"
},
"engines": {
"node": ">=16"
}
},
"node_modules/graphile-config/node_modules/@types/node": {
"version": "20.17.32",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.32.tgz",
"integrity": "sha512-zeMXFn8zQ+UkjK4ws0RiOC9EWByyW1CcVmLe+2rQocXRsGEDxUCwPEIVgpsGcLHS/P8JkT0oa3839BRABS0oPw==",
"license": "MIT",
"dependencies": {
"undici-types": "~6.19.2"
}
},
"node_modules/graphile-config/node_modules/undici-types": {
"version": "6.19.8",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz",
"integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==",
"license": "MIT"
},
"node_modules/graphile-worker": {
"version": "0.16.6",
"resolved": "https://registry.npmjs.org/graphile-worker/-/graphile-worker-0.16.6.tgz",
"integrity": "sha512-e7gGYDmGqzju2l83MpzX8vNG/lOtVJiSzI3eZpAFubSxh/cxs7sRrRGBGjzBP1kNG0H+c95etPpNRNlH65PYhw==",
"license": "MIT",
"dependencies": {
"@graphile/logger": "^0.2.0",
"@types/debug": "^4.1.10",
"@types/pg": "^8.10.5",
"cosmiconfig": "^8.3.6",
"graphile-config": "^0.0.1-beta.4",
"json5": "^2.2.3",
"pg": "^8.11.3",
"tslib": "^2.6.2",
"yargs": "^17.7.2"
},
"bin": {
"graphile-worker": "dist/cli.js"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/handlebars": {
"version": "4.7.8",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
@@ -10501,11 +10549,22 @@
"node": ">=8"
}
},
"node_modules/interpret": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/interpret/-/interpret-3.1.1.tgz",
"integrity": "sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==",
"license": "MIT",
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/ioredis": {
"version": "5.6.1",
"resolved": "https://registry.npmjs.org/ioredis/-/ioredis-5.6.1.tgz",
"integrity": "sha512-UxC0Yv1Y4WRJiGQxQkP0hfdL0/5/6YvdfOOClRgJ0qppSarkhneSa6UvkMkms0AkdGimSH3Ikqm+6mkMmX7vGA==",
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"@ioredis/commands": "^1.1.1",
"cluster-key-slot": "^1.1.0",
@@ -11375,13 +11434,17 @@
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz",
"integrity": "sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==",
"license": "MIT"
"license": "MIT",
"optional": true,
"peer": true
},
"node_modules/lodash.isarguments": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz",
"integrity": "sha512-chi4NHZlZqZD18a0imDHnZPrDeBbTtVN7GXMwuGdRH9qotxAjYs3aVLKc7zNOG9eddR5Ksd8rvFEBc9SsggPpg==",
"license": "MIT"
"license": "MIT",
"optional": true,
"peer": true
},
"node_modules/lodash.merge": {
"version": "4.6.2",
@@ -11908,37 +11971,6 @@
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/msgpackr": {
"version": "1.11.2",
"resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.11.2.tgz",
"integrity": "sha512-F9UngXRlPyWCDEASDpTf6c9uNhGPTqnTeLVt7bN+bU1eajoR/8V9ys2BRaV5C/e5ihE6sJ9uPIKaYt6bFuO32g==",
"license": "MIT",
"optionalDependencies": {
"msgpackr-extract": "^3.0.2"
}
},
"node_modules/msgpackr-extract": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/msgpackr-extract/-/msgpackr-extract-3.0.3.tgz",
"integrity": "sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==",
"hasInstallScript": true,
"license": "MIT",
"optional": true,
"dependencies": {
"node-gyp-build-optional-packages": "5.2.2"
},
"bin": {
"download-msgpackr-prebuilds": "bin/download-prebuilds.js"
},
"optionalDependencies": {
"@msgpackr-extract/msgpackr-extract-darwin-arm64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-darwin-x64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-arm": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-arm64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-x64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-win32-x64": "3.0.3"
}
},
"node_modules/multer": {
"version": "1.4.5-lts.2",
"resolved": "https://registry.npmjs.org/multer/-/multer-1.4.5-lts.2.tgz",
@@ -12299,6 +12331,7 @@
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz",
"integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==",
"dev": true,
"license": "MIT"
},
"node_modules/node-addon-api": {
@@ -12366,21 +12399,6 @@
"node": "^18.17.0 || >=20.5.0"
}
},
"node_modules/node-gyp-build-optional-packages": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/node-gyp-build-optional-packages/-/node-gyp-build-optional-packages-5.2.2.tgz",
"integrity": "sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==",
"license": "MIT",
"optional": true,
"dependencies": {
"detect-libc": "^2.0.1"
},
"bin": {
"node-gyp-build-optional-packages": "bin.js",
"node-gyp-build-optional-packages-optional": "optional.js",
"node-gyp-build-optional-packages-test": "build-test.js"
}
},
"node_modules/node-gyp/node_modules/abbrev": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz",
@@ -12554,12 +12572,6 @@
"node": ">=0.10.0"
}
},
"node_modules/notepack.io": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/notepack.io/-/notepack.io-3.0.1.tgz",
"integrity": "sha512-TKC/8zH5pXIAMVQio2TvVDTtPRX+DJPHDqjRbxogtFiByHyzKmy96RA0JtCQJ+WouyyL4A10xomQzgbUT+1jCg==",
"license": "MIT"
},
"node_modules/npmlog": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/npmlog/-/npmlog-5.0.1.tgz",
@@ -12629,6 +12641,12 @@
"node": ">= 0.4"
}
},
"node_modules/obuf": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
"integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==",
"license": "MIT"
},
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
@@ -13151,13 +13169,13 @@
}
},
"node_modules/pg": {
"version": "8.15.5",
"resolved": "https://registry.npmjs.org/pg/-/pg-8.15.5.tgz",
"integrity": "sha512-EpAhHFQc+aH9VfeffWIVC+XXk6lmAhS9W1FxtxcPXs94yxhrI1I6w/zkWfIOII/OkBv3Be04X3xMOj0kQ78l6w==",
"version": "8.15.6",
"resolved": "https://registry.npmjs.org/pg/-/pg-8.15.6.tgz",
"integrity": "sha512-yvao7YI3GdmmrslNVsZgx9PfntfWrnXwtR+K/DjI0I/sTKif4Z623um+sjVZ1hk5670B+ODjvHDAckKdjmPTsg==",
"license": "MIT",
"dependencies": {
"pg-connection-string": "^2.8.5",
"pg-pool": "^3.9.5",
"pg-pool": "^3.9.6",
"pg-protocol": "^1.9.5",
"pg-types": "^2.1.0",
"pgpass": "1.x"
@@ -13199,6 +13217,15 @@
"node": ">=4.0.0"
}
},
"node_modules/pg-numeric": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/pg-numeric/-/pg-numeric-1.0.2.tgz",
"integrity": "sha512-BM/Thnrw5jm2kKLE5uJkXqqExRUY/toLHda65XgFTBTFYZyopbKjBe29Ii3RbkvlsMoFwD+tHeGaCjjv0gHlyw==",
"license": "ISC",
"engines": {
"node": ">=4"
}
},
"node_modules/pg-pool": {
"version": "3.9.6",
"resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.9.6.tgz",
@@ -13508,6 +13535,12 @@
"node": ">=0.10.0"
}
},
"node_modules/postgres-range": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/postgres-range/-/postgres-range-1.1.4.tgz",
"integrity": "sha512-i/hbxIE9803Alj/6ytL7UHQxRvZkI9O4Sy+J3HGc4F4oo/2eQAjTSNJ0bfxyse3bH0nuVesCk+3IRLaMtG3H6w==",
"license": "MIT"
},
"node_modules/prelude-ls": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
@@ -14219,6 +14252,8 @@
"resolved": "https://registry.npmjs.org/redis-errors/-/redis-errors-1.2.0.tgz",
"integrity": "sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w==",
"license": "MIT",
"optional": true,
"peer": true,
"engines": {
"node": ">=4"
}
@@ -14228,6 +14263,8 @@
"resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-3.0.0.tgz",
"integrity": "sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A==",
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"redis-errors": "^1.0.0"
},
@@ -15346,7 +15383,9 @@
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/standard-as-callback/-/standard-as-callback-2.1.0.tgz",
"integrity": "sha512-qoRRSyROncaz1z0mvYqIE4lCd9p2R90i6GxW3uZv5ucSu8tU7B5HXUP1gG8pVZsYNVaXjk8ClXHPttLyxAL48A==",
"license": "MIT"
"license": "MIT",
"optional": true,
"peer": true
},
"node_modules/statuses": {
"version": "2.0.1",
@@ -16965,15 +17004,6 @@
"node": ">=8"
}
},
"node_modules/uid2": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/uid2/-/uid2-1.0.0.tgz",
"integrity": "sha512-+I6aJUv63YAcY9n4mQreLUt0d4lvwkkopDNmpomkAUz0fAkEMV9pRWxN0EjhW1YfRhcuyHg2v3mwddCDW1+LFQ==",
"license": "MIT",
"engines": {
"node": ">= 4.0.0"
}
},
"node_modules/uint8array-extras": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/uint8array-extras/-/uint8array-extras-1.4.0.tgz",

View File

@@ -35,7 +35,6 @@
"postinstall": "patch-package"
},
"dependencies": {
"@nestjs/bullmq": "^11.0.1",
"@nestjs/common": "^11.0.4",
"@nestjs/core": "^11.0.4",
"@nestjs/event-emitter": "^3.0.0",
@@ -49,11 +48,11 @@
"@opentelemetry/exporter-prometheus": "^0.200.0",
"@opentelemetry/sdk-node": "^0.200.0",
"@react-email/components": "^0.0.36",
"@socket.io/redis-adapter": "^8.3.0",
"@socket.io/postgres-adapter": "^0.4.0",
"@types/pg": "^8.11.14",
"archiver": "^7.0.0",
"async-lock": "^1.4.0",
"bcrypt": "^5.1.1",
"bullmq": "^4.8.0",
"chokidar": "^3.5.3",
"class-transformer": "^0.5.1",
"class-validator": "^0.14.0",
@@ -64,9 +63,9 @@
"fast-glob": "^3.3.2",
"fluent-ffmpeg": "^2.1.2",
"geo-tz": "^8.0.0",
"graphile-worker": "^0.16.6",
"handlebars": "^4.7.8",
"i18n-iso-countries": "^7.6.0",
"ioredis": "^5.3.2",
"joi": "^17.10.0",
"js-yaml": "^4.1.0",
"kysely": "^0.28.0",
@@ -79,7 +78,7 @@
"nestjs-otel": "^6.0.0",
"nodemailer": "^6.9.13",
"openid-client": "^6.3.3",
"pg": "^8.11.3",
"pg": "^8.15.6",
"picomatch": "^4.0.2",
"react": "^19.0.0",
"react-dom": "^19.0.0",
@@ -105,7 +104,6 @@
"@nestjs/testing": "^11.0.4",
"@swc/core": "^1.4.14",
"@testcontainers/postgresql": "^10.2.1",
"@testcontainers/redis": "^10.18.0",
"@types/archiver": "^6.0.0",
"@types/async-lock": "^1.4.2",
"@types/bcrypt": "^5.0.0",

View File

@@ -1,4 +1,3 @@
import { BullModule } from '@nestjs/bullmq';
import { Inject, Module, OnModuleDestroy, OnModuleInit, ValidationPipe } from '@nestjs/common';
import { APP_FILTER, APP_GUARD, APP_INTERCEPTOR, APP_PIPE } from '@nestjs/core';
import { ScheduleModule, SchedulerRegistry } from '@nestjs/schedule';
@@ -37,11 +36,9 @@ export const middleware = [
];
const configRepository = new ConfigRepository();
const { bull, cls, database, otel } = configRepository.getEnv();
const { cls, database, otel } = configRepository.getEnv();
const imports = [
BullModule.forRoot(bull.config),
BullModule.registerQueue(...bull.queues),
ClsModule.forRoot(cls.config),
OpenTelemetryModule.forRoot(otel),
KyselyModule.forRoot(getKyselyConfig(database.config)),

27
server/src/db.d.ts vendored
View File

@@ -236,6 +236,30 @@ export interface GeodataPlaces {
name: string;
}
export interface GraphileWorkerJobs {
id: Generated<string>;
task_identifier: string;
locked_at: Timestamp | null;
locked_by: string | null;
run_at: Timestamp | null;
attempts: number;
max_attempts: number;
}
export interface GraphileWorkerPrivateJobs {
id: Generated<string>;
task_id: string;
locked_at: Timestamp | null;
locked_by: string | null;
attempts: number;
max_attempts: number;
}
export interface GraphileWorkerPrivateTasks {
id: Generated<string>;
identifier: string;
}
export interface Libraries {
createdAt: Generated<Timestamp>;
deletedAt: Timestamp | null;
@@ -476,6 +500,9 @@ export interface DB {
exif: Exif;
face_search: FaceSearch;
geodata_places: GeodataPlaces;
'graphile_worker.jobs': GraphileWorkerJobs;
'graphile_worker._private_jobs': GraphileWorkerPrivateJobs;
'graphile_worker._private_tasks': GraphileWorkerPrivateTasks;
libraries: Libraries;
memories: Memories;
memories_assets_assets: MemoriesAssetsAssets;

View File

@@ -157,34 +157,4 @@ export class EnvDto {
@IsString()
@Optional()
NO_COLOR?: string;
@IsString()
@Optional()
REDIS_HOSTNAME?: string;
@IsInt()
@Optional()
@Type(() => Number)
REDIS_PORT?: number;
@IsInt()
@Optional()
@Type(() => Number)
REDIS_DBINDEX?: number;
@IsString()
@Optional()
REDIS_USERNAME?: string;
@IsString()
@Optional()
REDIS_PASSWORD?: string;
@IsString()
@Optional()
REDIS_SOCKET?: string;
@IsString()
@Optional()
REDIS_URL?: string;
}

View File

@@ -30,20 +30,15 @@ export class JobCountsDto {
@ApiProperty({ type: 'integer' })
active!: number;
@ApiProperty({ type: 'integer' })
completed!: number;
@ApiProperty({ type: 'integer' })
failed!: number;
waiting!: number;
@ApiProperty({ type: 'integer' })
delayed!: number;
@ApiProperty({ type: 'integer' })
waiting!: number;
@ApiProperty({ type: 'integer' })
paused!: number;
failed!: number;
}
export class QueueStatusDto {
isActive!: boolean;
isPaused!: boolean;
paused!: boolean;
}
export class JobStatusDto {

View File

@@ -204,6 +204,7 @@ export enum SystemMetadataKey {
SYSTEM_FLAGS = 'system-flags',
VERSION_CHECK_STATE = 'version-check-state',
LICENSE = 'license',
QUEUES_STATE = 'queues-state',
}
export enum UserMetadataKey {
@@ -533,10 +534,20 @@ export enum JobName {
}
export enum JobCommand {
// The behavior of start depends on the queue. Usually it is a request to
// reprocess everything associated with the queue from scratch.
START = 'start',
// Pause prevents workers from processing jobs.
PAUSE = 'pause',
// Resume allows workers to continue processing jobs.
RESUME = 'resume',
EMPTY = 'empty',
// Clear removes all pending jobs.
CLEAR = 'clear',
// ClearFailed removes all failed jobs.
CLEAR_FAILED = 'clear-failed',
}

View File

@@ -1,9 +1,10 @@
import { INestApplicationContext } from '@nestjs/common';
import { IoAdapter } from '@nestjs/platform-socket.io';
import { createAdapter } from '@socket.io/redis-adapter';
import { Redis } from 'ioredis';
import { createAdapter } from '@socket.io/postgres-adapter';
import pg, { PoolConfig } from 'pg';
import { ServerOptions } from 'socket.io';
import { ConfigRepository } from 'src/repositories/config.repository';
import { asPostgresConnectionConfig } from 'src/utils/database';
export class WebSocketAdapter extends IoAdapter {
constructor(private app: INestApplicationContext) {
@@ -11,11 +12,11 @@ export class WebSocketAdapter extends IoAdapter {
}
createIOServer(port: number, options?: ServerOptions): any {
const { redis } = this.app.get(ConfigRepository).getEnv();
const server = super.createIOServer(port, options);
const pubClient = new Redis(redis);
const subClient = pubClient.duplicate();
server.adapter(createAdapter(pubClient, subClient));
const configRepository = new ConfigRepository();
const { database } = configRepository.getEnv();
const pool = new pg.Pool(asPostgresConnectionConfig(database.config) as PoolConfig);
server.adapter(createAdapter(pool));
return server;
}
}

View File

@@ -26,38 +26,12 @@ const resetEnv = () => {
'DB_SKIP_MIGRATIONS',
'DB_VECTOR_EXTENSION',
'REDIS_HOSTNAME',
'REDIS_PORT',
'REDIS_DBINDEX',
'REDIS_USERNAME',
'REDIS_PASSWORD',
'REDIS_SOCKET',
'REDIS_URL',
'NO_COLOR',
]) {
delete process.env[env];
}
};
const sentinelConfig = {
sentinels: [
{
host: 'redis-sentinel-node-0',
port: 26_379,
},
{
host: 'redis-sentinel-node-1',
port: 26_379,
},
{
host: 'redis-sentinel-node-2',
port: 26_379,
},
],
name: 'redis-sentinel',
};
describe('getEnv', () => {
beforeEach(() => {
resetEnv();
@@ -108,34 +82,6 @@ describe('getEnv', () => {
});
});
describe('redis', () => {
it('should use defaults', () => {
const { redis } = getEnv();
expect(redis).toEqual({
host: 'redis',
port: 6379,
db: 0,
username: undefined,
password: undefined,
path: undefined,
});
});
it('should parse base64 encoded config, ignore other env', () => {
process.env.REDIS_URL = `ioredis://${Buffer.from(JSON.stringify(sentinelConfig)).toString('base64')}`;
process.env.REDIS_HOSTNAME = 'redis-host';
process.env.REDIS_USERNAME = 'redis-user';
process.env.REDIS_PASSWORD = 'redis-password';
const { redis } = getEnv();
expect(redis).toEqual(sentinelConfig);
});
it('should reject invalid json', () => {
process.env.REDIS_URL = `ioredis://${Buffer.from('{ "invalid json"').toString('base64')}`;
expect(() => getEnv()).toThrowError('Failed to decode redis options');
});
});
describe('noColor', () => {
beforeEach(() => {
delete process.env.NO_COLOR;

View File

@@ -1,25 +1,14 @@
import { RegisterQueueOptions } from '@nestjs/bullmq';
import { Inject, Injectable, Optional } from '@nestjs/common';
import { QueueOptions } from 'bullmq';
import { plainToInstance } from 'class-transformer';
import { validateSync } from 'class-validator';
import { Request, Response } from 'express';
import { RedisOptions } from 'ioredis';
import { CLS_ID, ClsModuleOptions } from 'nestjs-cls';
import { OpenTelemetryModuleOptions } from 'nestjs-otel/lib/interfaces';
import { join } from 'node:path';
import { join, resolve } from 'node:path';
import { citiesFile, excludePaths, IWorker } from 'src/constants';
import { Telemetry } from 'src/decorators';
import { EnvDto } from 'src/dtos/env.dto';
import {
DatabaseExtension,
ImmichEnvironment,
ImmichHeader,
ImmichTelemetry,
ImmichWorker,
LogLevel,
QueueName,
} from 'src/enum';
import { DatabaseExtension, ImmichEnvironment, ImmichHeader, ImmichTelemetry, ImmichWorker, LogLevel } from 'src/enum';
import { DatabaseConnectionParams, VectorExtension } from 'src/types';
import { setDifference } from 'src/utils/set';
@@ -46,11 +35,6 @@ export interface EnvData {
thirdPartySupportUrl?: string;
};
bull: {
config: QueueOptions;
queues: RegisterQueueOptions[];
};
cls: {
config: ClsModuleOptions;
};
@@ -87,8 +71,6 @@ export interface EnvData {
};
};
redis: RedisOptions;
telemetry: {
apiPort: number;
microservicesPort: number;
@@ -149,28 +131,12 @@ const getEnv = (): EnvData => {
const isProd = environment === ImmichEnvironment.PRODUCTION;
const buildFolder = dto.IMMICH_BUILD_DATA || '/build';
const folders = {
// eslint-disable-next-line unicorn/prefer-module
dist: resolve(`${__dirname}/..`),
geodata: join(buildFolder, 'geodata'),
web: join(buildFolder, 'www'),
};
let redisConfig = {
host: dto.REDIS_HOSTNAME || 'redis',
port: dto.REDIS_PORT || 6379,
db: dto.REDIS_DBINDEX || 0,
username: dto.REDIS_USERNAME || undefined,
password: dto.REDIS_PASSWORD || undefined,
path: dto.REDIS_SOCKET || undefined,
};
const redisUrl = dto.REDIS_URL;
if (redisUrl && redisUrl.startsWith('ioredis://')) {
try {
redisConfig = JSON.parse(Buffer.from(redisUrl.slice(10), 'base64').toString());
} catch (error) {
throw new Error(`Failed to decode redis options: ${error}`);
}
}
const includedTelemetries =
dto.IMMICH_TELEMETRY_INCLUDE === 'all'
? new Set(Object.values(ImmichTelemetry))
@@ -218,19 +184,6 @@ const getEnv = (): EnvData => {
thirdPartySupportUrl: dto.IMMICH_THIRD_PARTY_SUPPORT_URL,
},
bull: {
config: {
prefix: 'immich_bull',
connection: { ...redisConfig },
defaultJobOptions: {
attempts: 3,
removeOnComplete: true,
removeOnFail: false,
},
},
queues: Object.values(QueueName).map((name) => ({ name })),
},
cls: {
config: {
middleware: {
@@ -269,8 +222,6 @@ const getEnv = (): EnvData => {
},
},
redis: redisConfig,
resourcePaths: {
lockFile: join(buildFolder, 'build-lock.json'),
geodata: {

View File

@@ -64,6 +64,9 @@ type EventMap = {
'assets.delete': [{ assetIds: string[]; userId: string }];
'assets.restore': [{ assetIds: string[]; userId: string }];
'queue.pause': [QueueName];
'queue.resume': [QueueName];
'job.start': [QueueName, JobItem];
'job.failed': [{ job: JobItem; error: Error | any }];
@@ -85,7 +88,7 @@ type EventMap = {
'websocket.connect': [{ userId: string }];
};
export const serverEvents = ['config.update'] as const;
export const serverEvents = ['config.update', 'queue.pause', 'queue.resume'] as const;
export type ServerEvents = (typeof serverEvents)[number];
export type EmitEvent = keyof EventMap;

View File

@@ -1,15 +1,20 @@
import { getQueueToken } from '@nestjs/bullmq';
import { Injectable } from '@nestjs/common';
import { ModuleRef, Reflector } from '@nestjs/core';
import { JobsOptions, Queue, Worker } from 'bullmq';
import { ClassConstructor } from 'class-transformer';
import { setTimeout } from 'node:timers/promises';
import { JobConfig } from 'src/decorators';
import { JobName, JobStatus, MetadataKey, QueueCleanType, QueueName } from 'src/enum';
import { makeWorkerUtils, run, Runner, TaskSpec, WorkerUtils } from 'graphile-worker';
import { Kysely } from 'kysely';
import { DateTime, Duration } from 'luxon';
import { InjectKysely } from 'nestjs-kysely';
import pg, { PoolConfig } from 'pg';
import { DB } from 'src/db';
import { GenerateSql, JobConfig } from 'src/decorators';
import { JobName, JobStatus, MetadataKey, QueueName, SystemMetadataKey } from 'src/enum';
import { ConfigRepository } from 'src/repositories/config.repository';
import { EventRepository } from 'src/repositories/event.repository';
import { LoggingRepository } from 'src/repositories/logging.repository';
import { IEntityJob, JobCounts, JobItem, JobOf, QueueStatus } from 'src/types';
import { SystemMetadataRepository } from 'src/repositories/system-metadata.repository';
import { JobCounts, JobItem, JobOf, QueueStatus } from 'src/types';
import { asPostgresConnectionConfig } from 'src/utils/database';
import { getKeyByValue, getMethodNames, ImmichStartupError } from 'src/utils/misc';
type JobMapItem = {
@@ -19,26 +24,38 @@ type JobMapItem = {
label: string;
};
type QueueConfiguration = {
paused: boolean;
concurrency: number;
};
@Injectable()
export class JobRepository {
private workers: Partial<Record<QueueName, Worker>> = {};
private handlers: Partial<Record<JobName, JobMapItem>> = {};
// todo inject the pg pool
private pool?: pg.Pool;
// todo inject worker utils?
private workerUtils?: WorkerUtils;
private queueConfig: Record<string, QueueConfiguration> = {};
private runners: Record<string, Runner> = {};
constructor(
private moduleRef: ModuleRef,
private configRepository: ConfigRepository,
private eventRepository: EventRepository,
@InjectKysely() private db: Kysely<DB>,
private logger: LoggingRepository,
private moduleRef: ModuleRef,
private eventRepository: EventRepository,
private configRepository: ConfigRepository,
private systemMetadataRepository: SystemMetadataRepository,
) {
this.logger.setContext(JobRepository.name);
logger.setContext(JobRepository.name);
}
setup(services: ClassConstructor<unknown>[]) {
async setup(services: ClassConstructor<unknown>[]) {
const reflector = this.moduleRef.get(Reflector, { strict: false });
// discovery
for (const Service of services) {
const instance = this.moduleRef.get<any>(Service);
for (const service of services) {
const instance = this.moduleRef.get<any>(service);
for (const methodName of getMethodNames(instance)) {
const handler = instance[methodName];
const config = reflector.get<JobConfig>(MetadataKey.JOB_CONFIG, handler);
@@ -47,7 +64,7 @@ export class JobRepository {
}
const { name: jobName, queue: queueName } = config;
const label = `${Service.name}.${handler.name}`;
const label = `${service.name}.${handler.name}`;
// one handler per job
if (this.handlers[jobName]) {
@@ -70,176 +87,217 @@ export class JobRepository {
}
}
// no missing handlers
for (const [jobKey, jobName] of Object.entries(JobName)) {
const item = this.handlers[jobName];
if (!item) {
const errorMessage = `Failed to find job handler for Job.${jobKey} ("${jobName}")`;
this.logger.error(
`${errorMessage}. Make sure to add the @OnJob({ name: JobName.${jobKey}, queue: QueueName.XYZ }) decorator for the new job.`,
);
throw new ImmichStartupError(errorMessage);
}
const { database } = this.configRepository.getEnv();
const pool = new pg.Pool({
...asPostgresConnectionConfig(database.config),
max: 100,
} as PoolConfig);
// todo: remove debug info
setInterval(() => {
this.logger.log(`connections:
total: ${pool.totalCount}
idle: ${pool.idleCount}
waiting: ${pool.waitingCount}`);
}, 5000);
pool.on('connect', (client) => {
client.setMaxListeners(200);
});
this.pool = pool;
this.workerUtils = await makeWorkerUtils({ pgPool: pool });
}
async start(queueName: QueueName, concurrency?: number): Promise<void> {
if (concurrency) {
this.queueConfig[queueName] = {
...this.queueConfig[queueName],
concurrency,
};
} else {
concurrency = this.queueConfig[queueName].concurrency;
}
if (this.queueConfig[queueName].paused) {
return;
}
await this.stop(queueName);
this.runners[queueName] = await run({
concurrency,
taskList: {
[queueName]: async (payload: unknown): Promise<void> => {
this.logger.log(`Job ${queueName} started with payload: ${JSON.stringify(payload)}`);
await this.eventRepository.emit('job.start', queueName, payload as JobItem);
},
},
pgPool: this.pool,
});
}
async stop(queueName: QueueName): Promise<void> {
const runner = this.runners[queueName];
if (runner) {
await runner.stop();
delete this.runners[queueName];
}
}
startWorkers() {
const { bull } = this.configRepository.getEnv();
for (const queueName of Object.values(QueueName)) {
this.logger.debug(`Starting worker for queue: ${queueName}`);
this.workers[queueName] = new Worker(
queueName,
(job) => this.eventRepository.emit('job.start', queueName, job as JobItem),
{ ...bull.config, concurrency: 1 },
);
}
async pause(queueName: QueueName): Promise<void> {
await this.setState(queueName, true);
await this.stop(queueName);
}
async run({ name, data }: JobItem) {
async resume(queueName: QueueName): Promise<void> {
await this.setState(queueName, false);
await this.start(queueName);
}
private async setState(queueName: QueueName, paused: boolean): Promise<void> {
const state = await this.systemMetadataRepository.get(SystemMetadataKey.QUEUES_STATE);
await this.systemMetadataRepository.set(SystemMetadataKey.QUEUES_STATE, {
...state,
[queueName]: { paused },
});
this.queueConfig[queueName] = {
...this.queueConfig[queueName],
paused,
};
}
// todo: we should consolidate queue and job names and have queues be
// homogenous.
//
// the reason there are multiple kinds of jobs per queue is so that
// concurrency settings apply to all of them. We could instead create a
// concept of "queue" groups, such that workers will run for groups of queues
// rather than just a single queue and achieve the same outcome.
private getQueueName(name: JobName) {
return (this.handlers[name] as JobMapItem).queueName;
}
async run({ name, data }: JobItem): Promise<JobStatus> {
const item = this.handlers[name as JobName];
if (!item) {
this.logger.warn(`Skipping unknown job: "${name}"`);
return JobStatus.SKIPPED;
}
return item.handler(data);
}
setConcurrency(queueName: QueueName, concurrency: number) {
const worker = this.workers[queueName];
if (!worker) {
this.logger.warn(`Unable to set queue concurrency, worker not found: '${queueName}'`);
return;
}
worker.concurrency = concurrency;
}
async getQueueStatus(name: QueueName): Promise<QueueStatus> {
const queue = this.getQueue(name);
return {
isActive: !!(await queue.getActiveCount()),
isPaused: await queue.isPaused(),
};
}
pause(name: QueueName) {
return this.getQueue(name).pause();
}
resume(name: QueueName) {
return this.getQueue(name).resume();
}
empty(name: QueueName) {
return this.getQueue(name).drain();
}
clear(name: QueueName, type: QueueCleanType) {
return this.getQueue(name).clean(0, 1000, type);
}
getJobCounts(name: QueueName): Promise<JobCounts> {
return this.getQueue(name).getJobCounts(
'active',
'completed',
'failed',
'delayed',
'waiting',
'paused',
) as unknown as Promise<JobCounts>;
}
private getQueueName(name: JobName) {
return (this.handlers[name] as JobMapItem).queueName;
async queue(item: JobItem): Promise<void> {
this.logger.log(`Queueing job: ${this.getQueueName(item.name)}, data: ${JSON.stringify(item)}`);
await this.workerUtils!.addJob(this.getQueueName(item.name), item, this.getJobOptions(item));
}
async queueAll(items: JobItem[]): Promise<void> {
if (items.length === 0) {
return;
}
const promises = [];
const itemsByQueue = {} as Record<string, (JobItem & { data: any; options: JobsOptions | undefined })[]>;
for (const item of items) {
const queueName = this.getQueueName(item.name);
const job = {
name: item.name,
data: item.data || {},
options: this.getJobOptions(item) || undefined,
} as JobItem & { data: any; options: JobsOptions | undefined };
if (job.options?.jobId) {
// need to use add() instead of addBulk() for jobId deduplication
promises.push(this.getQueue(queueName).add(item.name, item.data, job.options));
} else {
itemsByQueue[queueName] = itemsByQueue[queueName] || [];
itemsByQueue[queueName].push(job);
}
}
for (const [queueName, jobs] of Object.entries(itemsByQueue)) {
const queue = this.getQueue(queueName as QueueName);
promises.push(queue.addBulk(jobs));
}
await Promise.all(promises);
}
async queue(item: JobItem): Promise<void> {
return this.queueAll([item]);
}
async waitForQueueCompletion(...queues: QueueName[]): Promise<void> {
let activeQueue: QueueStatus | undefined;
do {
const statuses = await Promise.all(queues.map((name) => this.getQueueStatus(name)));
activeQueue = statuses.find((status) => status.isActive);
} while (activeQueue);
{
this.logger.verbose(`Waiting for ${activeQueue} queue to stop...`);
await setTimeout(1000);
await this.queue(item);
}
}
private getJobOptions(item: JobItem): JobsOptions | null {
// todo: are we actually generating sql
async clear(name: QueueName): Promise<void> {
await this.db
.deleteFrom('graphile_worker._private_jobs')
.where(({ eb, selectFrom }) =>
eb('task_id', 'in', selectFrom('graphile_worker._private_tasks').select('id').where('identifier', '=', name)),
)
.execute();
const workers = await this.db
.selectFrom('graphile_worker.jobs')
.select('locked_by')
.where('locked_by', 'is not', null)
.distinct()
.execute();
// Potentially dangerous? It helps if jobs get stuck active though. The
// documentation says that stuck jobs will be unlocked automatically after 4
// hours. Though, it can be strange to click "clear" in the UI and see
// nothing happen. Especially as the UI is binary, such that new jobs cannot
// usually be scheduled unless both active and waiting are zero.
await this.workerUtils!.forceUnlockWorkers(workers.map((worker) => worker.locked_by!));
}
async clearFailed(name: QueueName): Promise<void> {
await this.db
.deleteFrom('graphile_worker._private_jobs')
.where(({ eb, selectFrom }) =>
eb(
'task_id',
'in',
selectFrom('graphile_worker._private_tasks')
.select('id')
.where((eb) => eb.and([eb('identifier', '=', name), eb('attempts', '>=', eb.ref('max_attempts'))])),
),
)
.execute();
}
// todo: are we actually generating sql
@GenerateSql({ params: [] })
async getJobCounts(name: QueueName): Promise<JobCounts> {
return await this.db
.selectFrom('graphile_worker.jobs')
.select((eb) => [
eb.fn
.countAll<number>()
.filterWhere((eb) => eb.and([eb('task_identifier', '=', name), eb('locked_by', 'is not', null)]))
.as('active'),
eb.fn
.countAll<number>()
.filterWhere((eb) =>
eb.and([
eb('task_identifier', '=', name),
eb('locked_by', 'is', null),
eb('run_at', '<=', eb.fn<Date>('now')),
]),
)
.as('waiting'),
eb.fn
.countAll<number>()
.filterWhere((eb) =>
eb.and([
eb('task_identifier', '=', name),
eb('locked_by', 'is', null),
eb('run_at', '>', eb.fn<Date>('now')),
]),
)
.as('delayed'),
eb.fn
.countAll<number>()
.filterWhere((eb) => eb.and([eb('task_identifier', '=', name), eb('attempts', '>=', eb.ref('max_attempts'))]))
.as('failed'),
])
.executeTakeFirstOrThrow();
}
async getQueueStatus(queueName: QueueName): Promise<QueueStatus> {
const state = await this.systemMetadataRepository.get(SystemMetadataKey.QUEUES_STATE);
return { paused: state?.[queueName]?.paused ?? false };
}
private getJobOptions(item: JobItem): TaskSpec | undefined {
switch (item.name) {
case JobName.NOTIFY_ALBUM_UPDATE: {
return { jobId: item.data.id, delay: item.data?.delay };
let runAt: Date | undefined;
if (item.data?.delay) {
runAt = DateTime.now().plus(Duration.fromMillis(item.data.delay)).toJSDate();
}
return { jobKey: item.data.id, runAt };
}
case JobName.STORAGE_TEMPLATE_MIGRATION_SINGLE: {
return { jobId: item.data.id };
return { jobKey: QueueName.STORAGE_TEMPLATE_MIGRATION };
}
case JobName.GENERATE_PERSON_THUMBNAIL: {
return { priority: 1 };
}
case JobName.QUEUE_FACIAL_RECOGNITION: {
return { jobId: JobName.QUEUE_FACIAL_RECOGNITION };
}
default: {
return null;
return { jobKey: JobName.QUEUE_FACIAL_RECOGNITION };
}
}
}
private getQueue(queue: QueueName): Queue {
return this.moduleRef.get<Queue>(getQueueToken(queue), { strict: false });
}
public async removeJob(jobId: string, name: JobName): Promise<IEntityJob | undefined> {
const existingJob = await this.getQueue(this.getQueueName(name)).getJob(jobId);
if (!existingJob) {
return;
}
try {
await existingJob.remove();
} catch (error: any) {
if (error.message?.includes('Missing key for job')) {
return;
}
throw error;
}
return existingJob.data;
}
}

View File

@@ -4,7 +4,6 @@ import { MetricOptions } from '@opentelemetry/api';
import { AsyncLocalStorageContextManager } from '@opentelemetry/context-async-hooks';
import { PrometheusExporter } from '@opentelemetry/exporter-prometheus';
import { HttpInstrumentation } from '@opentelemetry/instrumentation-http';
import { IORedisInstrumentation } from '@opentelemetry/instrumentation-ioredis';
import { NestInstrumentation } from '@opentelemetry/instrumentation-nestjs-core';
import { PgInstrumentation } from '@opentelemetry/instrumentation-pg';
import { resourceFromAttributes } from '@opentelemetry/resources';
@@ -68,12 +67,7 @@ export const bootstrapTelemetry = (port: number) => {
}),
metricReader: new PrometheusExporter({ port }),
contextManager: new AsyncLocalStorageContextManager(),
instrumentations: [
new HttpInstrumentation(),
new IORedisInstrumentation(),
new NestInstrumentation(),
new PgInstrumentation(),
],
instrumentations: [new HttpInstrumentation(), new NestInstrumentation(), new PgInstrumentation()],
views: [
{
instrumentName: '*',

View File

@@ -2,7 +2,7 @@ import { BadRequestException } from '@nestjs/common';
import { defaults, SystemConfig } from 'src/config';
import { ImmichWorker, JobCommand, JobName, JobStatus, QueueName } from 'src/enum';
import { JobService } from 'src/services/job.service';
import { JobItem } from 'src/types';
import { JobCounts, JobItem } from 'src/types';
import { assetStub } from 'test/fixtures/asset.stub';
import { newTestService, ServiceMocks } from 'test/utils';
@@ -21,14 +21,14 @@ describe(JobService.name, () => {
});
describe('onConfigUpdate', () => {
it('should update concurrency', () => {
sut.onConfigUpdate({ newConfig: defaults, oldConfig: {} as SystemConfig });
it('should update concurrency', async () => {
await sut.onConfigUpdate({ newConfig: defaults, oldConfig: {} as SystemConfig });
expect(mocks.job.setConcurrency).toHaveBeenCalledTimes(15);
expect(mocks.job.setConcurrency).toHaveBeenNthCalledWith(5, QueueName.FACIAL_RECOGNITION, 1);
expect(mocks.job.setConcurrency).toHaveBeenNthCalledWith(7, QueueName.DUPLICATE_DETECTION, 1);
expect(mocks.job.setConcurrency).toHaveBeenNthCalledWith(8, QueueName.BACKGROUND_TASK, 5);
expect(mocks.job.setConcurrency).toHaveBeenNthCalledWith(9, QueueName.STORAGE_TEMPLATE_MIGRATION, 1);
expect(mocks.job.start).toHaveBeenCalledTimes(15);
expect(mocks.job.start).toHaveBeenNthCalledWith(5, QueueName.FACIAL_RECOGNITION, 1);
expect(mocks.job.start).toHaveBeenNthCalledWith(7, QueueName.DUPLICATE_DETECTION, 1);
expect(mocks.job.start).toHaveBeenNthCalledWith(8, QueueName.BACKGROUND_TASK, 5);
expect(mocks.job.start).toHaveBeenNthCalledWith(9, QueueName.STORAGE_TEMPLATE_MIGRATION, 1);
});
});
@@ -55,29 +55,20 @@ describe(JobService.name, () => {
it('should get all job statuses', async () => {
mocks.job.getJobCounts.mockResolvedValue({
active: 1,
completed: 1,
failed: 1,
delayed: 1,
waiting: 1,
paused: 1,
});
mocks.job.getQueueStatus.mockResolvedValue({
isActive: true,
isPaused: true,
delayed: 1,
failed: 1,
});
const expectedJobStatus = {
jobCounts: {
active: 1,
completed: 1,
waiting: 1,
delayed: 1,
failed: 1,
waiting: 1,
paused: 1,
},
queueStatus: {
isActive: true,
isPaused: true,
paused: true,
},
};
@@ -114,14 +105,20 @@ describe(JobService.name, () => {
expect(mocks.job.resume).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION);
});
it('should handle an empty command', async () => {
await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.EMPTY, force: false });
it('should handle a clear command', async () => {
await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.CLEAR, force: false });
expect(mocks.job.empty).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION);
expect(mocks.job.clear).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION);
});
it('should handle a clear-failed command', async () => {
await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.CLEAR_FAILED, force: false });
expect(mocks.job.clearFailed).toHaveBeenCalledWith(QueueName.METADATA_EXTRACTION);
});
it('should not start a job that is already running', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: true, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 1 } as JobCounts);
await expect(
sut.handleCommand(QueueName.VIDEO_CONVERSION, { command: JobCommand.START, force: false }),
@@ -132,7 +129,7 @@ describe(JobService.name, () => {
});
it('should handle a start video conversion command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.VIDEO_CONVERSION, { command: JobCommand.START, force: false });
@@ -140,7 +137,7 @@ describe(JobService.name, () => {
});
it('should handle a start storage template migration command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.STORAGE_TEMPLATE_MIGRATION, { command: JobCommand.START, force: false });
@@ -148,7 +145,7 @@ describe(JobService.name, () => {
});
it('should handle a start smart search command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.SMART_SEARCH, { command: JobCommand.START, force: false });
@@ -156,7 +153,7 @@ describe(JobService.name, () => {
});
it('should handle a start metadata extraction command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.METADATA_EXTRACTION, { command: JobCommand.START, force: false });
@@ -164,7 +161,7 @@ describe(JobService.name, () => {
});
it('should handle a start sidecar command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.SIDECAR, { command: JobCommand.START, force: false });
@@ -172,7 +169,7 @@ describe(JobService.name, () => {
});
it('should handle a start thumbnail generation command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.THUMBNAIL_GENERATION, { command: JobCommand.START, force: false });
@@ -180,7 +177,7 @@ describe(JobService.name, () => {
});
it('should handle a start face detection command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.FACE_DETECTION, { command: JobCommand.START, force: false });
@@ -188,7 +185,7 @@ describe(JobService.name, () => {
});
it('should handle a start facial recognition command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.FACIAL_RECOGNITION, { command: JobCommand.START, force: false });
@@ -196,7 +193,7 @@ describe(JobService.name, () => {
});
it('should handle a start backup database command', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await sut.handleCommand(QueueName.BACKUP_DATABASE, { command: JobCommand.START, force: false });
@@ -204,7 +201,7 @@ describe(JobService.name, () => {
});
it('should throw a bad request when an invalid queue is used', async () => {
mocks.job.getQueueStatus.mockResolvedValue({ isActive: false, isPaused: false });
mocks.job.getJobCounts.mockResolvedValue({ active: 0 } as JobCounts);
await expect(
sut.handleCommand(QueueName.BACKGROUND_TASK, { command: JobCommand.START, force: false }),

View File

@@ -12,7 +12,6 @@ import {
JobName,
JobStatus,
ManualJobName,
QueueCleanType,
QueueName,
} from 'src/enum';
import { ArgOf, ArgsOf } from 'src/repositories/event.repository';
@@ -56,7 +55,7 @@ export class JobService extends BaseService {
private services: ClassConstructor<unknown>[] = [];
@OnEvent({ name: 'config.init', workers: [ImmichWorker.MICROSERVICES] })
onConfigInit({ newConfig: config }: ArgOf<'config.init'>) {
async onConfigInit({ newConfig: config }: ArgOf<'config.init'>) {
this.logger.debug(`Updating queue concurrency settings`);
for (const queueName of Object.values(QueueName)) {
let concurrency = 1;
@@ -64,21 +63,18 @@ export class JobService extends BaseService {
concurrency = config.job[queueName].concurrency;
}
this.logger.debug(`Setting ${queueName} concurrency to ${concurrency}`);
this.jobRepository.setConcurrency(queueName, concurrency);
await this.jobRepository.start(queueName, concurrency);
}
}
@OnEvent({ name: 'config.update', server: true, workers: [ImmichWorker.MICROSERVICES] })
onConfigUpdate({ newConfig: config }: ArgOf<'config.update'>) {
this.onConfigInit({ newConfig: config });
async onConfigUpdate({ newConfig: config }: ArgOf<'config.update'>) {
await this.onConfigInit({ newConfig: config });
}
@OnEvent({ name: 'app.bootstrap', priority: BootstrapEventPriority.JobService })
onBootstrap() {
this.jobRepository.setup(this.services);
if (this.worker === ImmichWorker.MICROSERVICES) {
this.jobRepository.startWorkers();
}
async onBootstrap() {
await this.jobRepository.setup(this.services);
}
setServices(services: ClassConstructor<unknown>[]) {
@@ -97,25 +93,20 @@ export class JobService extends BaseService {
await this.start(queueName, dto);
break;
}
case JobCommand.PAUSE: {
await this.jobRepository.pause(queueName);
this.eventRepository.serverSend('queue.pause', queueName);
break;
}
case JobCommand.RESUME: {
await this.jobRepository.resume(queueName);
this.eventRepository.serverSend('queue.resume', queueName);
break;
}
case JobCommand.EMPTY: {
await this.jobRepository.empty(queueName);
case JobCommand.CLEAR: {
await this.jobRepository.clear(queueName);
break;
}
case JobCommand.CLEAR_FAILED: {
const failedJobs = await this.jobRepository.clear(queueName, QueueCleanType.FAILED);
this.logger.debug(`Cleared failed jobs: ${failedJobs}`);
await this.jobRepository.clearFailed(queueName);
break;
}
}
@@ -141,9 +132,9 @@ export class JobService extends BaseService {
}
private async start(name: QueueName, { force }: JobCommandDto): Promise<void> {
const { isActive } = await this.jobRepository.getQueueStatus(name);
if (isActive) {
throw new BadRequestException(`Job is already running`);
const { active } = await this.jobRepository.getJobCounts(name);
if (active > 0) {
throw new BadRequestException(`Jobs are already running`);
}
this.telemetryRepository.jobs.addToCounter(`immich.queues.${snakeCase(name)}.started`, 1);
@@ -203,6 +194,16 @@ export class JobService extends BaseService {
}
}
@OnEvent({ name: 'queue.pause', server: true, workers: [ImmichWorker.MICROSERVICES] })
async pause(...[queueName]: ArgsOf<'queue.pause'>): Promise<void> {
await this.jobRepository.pause(queueName);
}
@OnEvent({ name: 'queue.resume', server: true, workers: [ImmichWorker.MICROSERVICES] })
async resume(...[queueName]: ArgsOf<'queue.resume'>): Promise<void> {
await this.jobRepository.resume(queueName);
}
@OnEvent({ name: 'job.start' })
async onJobStart(...[queueName, job]: ArgsOf<'job.start'>) {
const queueMetric = `immich.queues.${snakeCase(queueName)}.active`;

View File

@@ -67,16 +67,12 @@ describe(MetadataService.name, () => {
});
describe('onBootstrapEvent', () => {
it('should pause and resume queue during init', async () => {
mocks.job.pause.mockResolvedValue();
it('should init', async () => {
mocks.map.init.mockResolvedValue();
mocks.job.resume.mockResolvedValue();
await sut.onBootstrap();
expect(mocks.job.pause).toHaveBeenCalledTimes(1);
expect(mocks.map.init).toHaveBeenCalledTimes(1);
expect(mocks.job.resume).toHaveBeenCalledTimes(1);
});
});

View File

@@ -121,9 +121,7 @@ export class MetadataService extends BaseService {
this.logger.log('Initializing metadata service');
try {
await this.jobRepository.pause(QueueName.METADATA_EXTRACTION);
await this.databaseRepository.withLock(DatabaseLock.GeodataImport, () => this.mapRepository.init());
await this.jobRepository.resume(QueueName.METADATA_EXTRACTION);
this.logger.log(`Initialized local reverse geocoder`);
} catch (error: Error | any) {

View File

@@ -499,14 +499,13 @@ describe(NotificationService.name, () => {
});
it('should add new recipients for new images if job is already queued', async () => {
mocks.job.removeJob.mockResolvedValue({ id: '1', recipientIds: ['2', '3', '4'] } as INotifyAlbumUpdateJob);
await sut.onAlbumUpdate({ id: '1', recipientIds: ['1', '2', '3'] } as INotifyAlbumUpdateJob);
expect(mocks.job.queue).toHaveBeenCalledWith({
name: JobName.NOTIFY_ALBUM_UPDATE,
data: {
id: '1',
delay: 300_000,
recipientIds: ['1', '2', '3', '4'],
recipientIds: ['1', '2', '3'],
},
});
});

View File

@@ -196,14 +196,15 @@ export class NotificationService extends BaseService {
data: { id, recipientIds, delay: NotificationService.albumUpdateEmailDelayMs },
};
const previousJobData = await this.jobRepository.removeJob(id, JobName.NOTIFY_ALBUM_UPDATE);
if (previousJobData && this.isAlbumUpdateJob(previousJobData)) {
for (const id of previousJobData.recipientIds) {
if (!recipientIds.includes(id)) {
recipientIds.push(id);
}
}
}
// todo: https://github.com/immich-app/immich/pull/17879
// const previousJobData = await this.jobRepository.removeJob(id, JobName.NOTIFY_ALBUM_UPDATE);
// if (previousJobData && this.isAlbumUpdateJob(previousJobData)) {
// for (const id of previousJobData.recipientIds) {
// if (!recipientIds.includes(id)) {
// recipientIds.push(id);
// }
// }
// }
await this.jobRepository.queue(job);
}

View File

@@ -529,10 +529,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({
active: 1,
waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0,
failed: 0,
});
mocks.systemMetadata.get.mockResolvedValue(systemConfigStub.machineLearningDisabled);
@@ -546,10 +544,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({
active: 1,
waiting: 1,
paused: 0,
completed: 0,
failed: 0,
delayed: 0,
failed: 0,
});
await expect(sut.handleQueueRecognizeFaces({})).resolves.toBe(JobStatus.SKIPPED);
@@ -561,10 +557,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({
active: 1,
waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0,
failed: 0,
});
mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1]));
mocks.person.getAllWithoutFaces.mockResolvedValue([]);
@@ -590,10 +584,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({
active: 1,
waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0,
failed: 0,
});
mocks.person.getAll.mockReturnValue(makeStream());
mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1]));
@@ -619,10 +611,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({
active: 1,
waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0,
failed: 0,
});
mocks.person.getAll.mockReturnValue(makeStream());
mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1]));
@@ -666,10 +656,8 @@ describe(PersonService.name, () => {
mocks.job.getJobCounts.mockResolvedValue({
active: 1,
waiting: 0,
paused: 0,
completed: 0,
failed: 0,
delayed: 0,
failed: 0,
});
mocks.person.getAll.mockReturnValue(makeStream([faceStub.face1.person, personStub.randomPerson]));
mocks.person.getAllFaces.mockReturnValue(makeStream([faceStub.face1]));

View File

@@ -392,7 +392,8 @@ export class PersonService extends BaseService {
return JobStatus.SKIPPED;
}
await this.jobRepository.waitForQueueCompletion(QueueName.THUMBNAIL_GENERATION, QueueName.FACE_DETECTION);
// todo
// await this.jobRepository.waitForQueueCompletion(QueueName.THUMBNAIL_GENERATION, QueueName.FACE_DETECTION);
if (nightly) {
const [state, latestFaceDate] = await Promise.all([

View File

@@ -256,16 +256,13 @@ export interface INotifyAlbumUpdateJob extends IEntityJob, IDelayedJob {
export interface JobCounts {
active: number;
completed: number;
failed: number;
delayed: number;
waiting: number;
paused: number;
delayed: number;
failed: number;
}
export interface QueueStatus {
isActive: boolean;
isPaused: boolean;
paused: boolean;
}
export type JobItem =
@@ -450,6 +447,14 @@ export type MemoriesState = {
lastOnThisDayDate: string;
};
export type QueueState = {
paused: boolean;
};
export type QueuesState = {
[key in QueueName]?: QueueState;
};
export interface SystemMetadata extends Record<SystemMetadataKey, Record<string, any>> {
[SystemMetadataKey.ADMIN_ONBOARDING]: { isOnboarded: boolean };
[SystemMetadataKey.FACIAL_RECOGNITION_STATE]: { lastRun?: string };
@@ -459,6 +464,7 @@ export interface SystemMetadata extends Record<SystemMetadataKey, Record<string,
[SystemMetadataKey.SYSTEM_FLAGS]: DeepPartial<SystemFlags>;
[SystemMetadataKey.VERSION_CHECK_STATE]: VersionCheckMetadata;
[SystemMetadataKey.MEMORIES_STATE]: MemoriesState;
[SystemMetadataKey.QUEUES_STATE]: QueuesState;
}
export type UserMetadataItem<T extends keyof UserMetadata = UserMetadataKey> = {

View File

@@ -32,7 +32,7 @@ export const asPostgresConnectionConfig = (params: DatabaseConnectionParams) =>
return {
host: params.host,
port: params.port,
username: params.username,
user: params.username,
password: params.password,
database: params.database,
ssl: undefined,
@@ -51,7 +51,7 @@ export const asPostgresConnectionConfig = (params: DatabaseConnectionParams) =>
return {
host: host ?? undefined,
port: port ? Number(port) : undefined,
username: user,
user,
password,
database: database ?? undefined,
ssl,
@@ -92,7 +92,7 @@ export const getKyselyConfig = (
},
host: config.host,
port: config.port,
username: config.username,
user: config.user,
password: config.password,
database: config.database,
ssl: config.ssl,

View File

@@ -18,7 +18,6 @@ read_file_and_export "DB_HOSTNAME_FILE" "DB_HOSTNAME"
read_file_and_export "DB_DATABASE_NAME_FILE" "DB_DATABASE_NAME"
read_file_and_export "DB_USERNAME_FILE" "DB_USERNAME"
read_file_and_export "DB_PASSWORD_FILE" "DB_PASSWORD"
read_file_and_export "REDIS_PASSWORD_FILE" "REDIS_PASSWORD"
export CPU_CORES="${CPU_CORES:=$(./get-cpus.sh)}"
echo "Detected CPU Cores: $CPU_CORES"

View File

@@ -8,12 +8,6 @@ const envData: EnvData = {
environment: ImmichEnvironment.PRODUCTION,
buildMetadata: {},
bull: {
config: {
prefix: 'immich_bull',
},
queues: [{ name: 'queue-1' }],
},
cls: {
config: {},
@@ -52,12 +46,6 @@ const envData: EnvData = {
},
},
redis: {
host: 'redis',
port: 6379,
db: 0,
},
resourcePaths: {
lockFile: 'build-lock.json',
geodata: {

View File

@@ -5,18 +5,16 @@ import { Mocked, vitest } from 'vitest';
export const newJobRepositoryMock = (): Mocked<RepositoryInterface<JobRepository>> => {
return {
setup: vitest.fn(),
startWorkers: vitest.fn(),
run: vitest.fn(),
setConcurrency: vitest.fn(),
empty: vitest.fn(),
start: vitest.fn(),
stop: vitest.fn(),
pause: vitest.fn(),
resume: vitest.fn(),
run: vitest.fn(),
queue: vitest.fn().mockImplementation(() => Promise.resolve()),
queueAll: vitest.fn().mockImplementation(() => Promise.resolve()),
getQueueStatus: vitest.fn(),
getJobCounts: vitest.fn(),
clear: vitest.fn(),
waitForQueueCompletion: vitest.fn(),
removeJob: vitest.fn(),
clearFailed: vitest.fn(),
getJobCounts: vitest.fn(),
getQueueStatus: vitest.fn(),
};
};