mirror of
https://github.com/immich-app/immich.git
synced 2026-02-09 03:08:53 +03:00
* feat: ProcessRepository#createSpawnDuplexStream
* test: write tests for ProcessRepository#createSpawnDuplexStream
* feat: StorageRepository#createGzip,createGunzip,createPlainReadStream
* feat: backups util (args, create, restore, progress)
* feat: wait on maintenance operation lock on boot
* chore: use backup util from backup.service.ts
test: update backup.service.ts tests with new util
* feat: list/delete backups (maintenance services)
* chore: open api
fix: missing action in cli.service.ts
* chore: add missing repositories to MaintenanceModule
* refactor: move logSecret into module init
* feat: initialise StorageCore in maintenance mode
* feat: authenticate websocket requests in maintenance mode
* test: add mock for new storage fns
* feat: add MaintenanceEphemeralStateRepository
refactor: cache the secret in memory
* test: update service worker tests
* feat: add external maintenance mode status
* feat: synchronised status, restore db action
* test: backup restore service tests
* refactor: DRY end maintenance
* feat: list and delete backup routes
* feat: start action on boot
* fix: should set status on restore end
* refactor: add maintenanceStore to hold writables
* feat: sync status to web app
* feat: web impl.
* test: various utils for testings
* test: web e2e tests
* test: e2e maintenance spec
* test: update cli spec
* chore: e2e lint
* chore: lint fixes
* chore: lint fixes
* feat: start restore flow route
* test: update e2e tests
* chore: remove neon lights on maintenance action pages
* fix: use 'startRestoreFlow' on onboarding page
* chore: ignore any library folder in `docker/`
* fix: load status on boot
* feat: upload backups
* refactor: permit any .sql(.gz) to be listed/restored
* feat: download backups from list
* fix: permit uploading just .sql files
* feat: restore just .sql files
* fix: don't show backups list if logged out
* feat: system integrity check in restore flow
* test: not providing failed backups in API anymore
* test: util should also not try to use failedBackups
* fix: actually assign inputStream
* test: correct test backup prep.
* fix: ensure task is defined to show error
* test: fix docker cp command
* test: update e2e web spec to select next button
* test: update e2e api tests
* test: refactor timeouts
* chore: remove `showDelete` from maint. settings
* chore: lint
* chore: lint
* fix: make sure backups are correctly sorted for clean up
* test: update service spec
* test: adjust e2e timeout
* test: increase web timeouts for ci
* chore: move gitignore changes
* chore: additional filename validation
* refactor: better typings for integrity API
* feat: higher accuracy progress tracking
* chore: delay lock retry
* refactor: remove old maintenance settings
* refactor: clean up tailwind classes
* refactor: use while loop rather than recursive calls
* test: update service specs
* chore: check canParse too
* chore: lint
* fix: logic error causing infinite loop
* refactor: use <ProgressBar /> from ui library
* fix: create or overwrite file
* chore: i18n pass, update progress bar
* fix: wrong translation string
* chore: update colour variables
* test: update web test for new maint. page
* chore: format, fix key
* test: update tests to be more linter complaint & use new routines
* chore: update onClick -> onAction, title -> breadcrumbs
* fix: use wrench icon in admin settings sidebar
* chore: add translation strings to accordion
* chore: lint
* refactor: move maintenance worker init into service
* refactor: `maintenanceStatus` -> `getMaintenanceStatus`
refactor: `integrityCheck` -> `detectPriorInstall`
chore: add `v2.4.0` version
refactor: `/backups/list` -> `/backups`
refactor: use sendFile in download route
refactor: use separate backups permissions
chore: correct descriptions
refactor: permit handler that doesn't return promise for sendfile
* refactor: move status impl into service
refactor: add active flag to maintenance status
* refactor: split into database backup controller
* test: split api e2e tests and passing
* fix: move end button into authed default maint page
* fix: also show in restore flow
* fix: import getMaintenanceStatus
* test: split web e2e tests
* refactor: ensure detect install is consistently named
* chore: ensure admin for detect install while out of maint.
* refactor: remove state repository
* test: update maint. worker service spec
* test: split backup service spec
* refactor: rename db backup routes
* refactor: instead of param, allow bulk backup deletion
* test: update sdk use in e2e test
* test: correct deleteBackup call
* fix: correct type for serverinstall response dto
* chore: validate filename for deletion
* test: wip
* test: backups no longer take path param
* refactor: scope util to database-backups instead of backups
* fix: update worker controller with new route
* chore: use new admin page actions
* chore: remove stray comment
* test: rename outdated test
* refactor: getter pattern for maintenance secret
* refactor: `createSpawnDuplexStream` -> `spawnDuplexStream`
* refactor: prefer `Object.assign`
* refactor: remove useless try {} block
* refactor: prefer `type Props`
refactor: prefer arrow function
* refactor: use luxon API for minutesAgo
* chore: remove change to gitignore
* refactor: prefer `type Props`
* refactor: remove async from onMount
* refactor: use luxon toRelative for relative time
* refactor: duplicate logic check
* chore: open api
* refactor: begin moving code into web//services
* refactor: don't use template string with $t
* test: use dialog role to match prompt
* refactor: split actions into flow/restore
* test: fix action value
* refactor: move more service calls into web//services
* chore: should void fn return
* chore: bump 2.4.0 to 2.5.0 in controller
* chore: bump 2.4.0 to 2.5.0 in controller
* refactor: use events for web//services
* chore: open api
* chore: open api
* refactor: don't await returned promise
* refactor: remove redundant check
* refactor: add `type: command` to actions
* refactor: split backup entries into own component
* refactor: split restore flow into separate components
* refactor(web): split BackupDelete event
* chore: stylings
* chore: stylings
* fix: don't log query failure on first boot
* feat: support pg_dumpall backups
* feat: display information about each backup
* chore: i18n
* feat: rollback to restore point on migrations failure
* feat: health check after restore
* chore: format
* refactor: split health check into separate function
* refactor: split health into repository
test: write tests covering rollbacks
* fix: omit 'health' requirement from createDbBackup
* test(e2e): rollback test
* fix: wrap text in backup entry
* fix: don't shrink context menu button
* fix: correct CREATE DB syntax for postgres
* test: rename backups generated by test
* feat: add filesize to backup response dto
* feat: restore list
* feat: ui work
* fix: e2e test
* fix: e2e test
* pr feedback
* pr feedback
---------
Co-authored-by: Alex <alex.tran1502@gmail.com>
Co-authored-by: Jason Rasmussen <jason@rasm.me>
495 lines
14 KiB
TypeScript
495 lines
14 KiB
TypeScript
import { BadRequestException } from '@nestjs/common';
|
|
import { debounce } from 'lodash';
|
|
import { DateTime } from 'luxon';
|
|
import path, { basename, join } from 'node:path';
|
|
import { PassThrough, Readable, Writable } from 'node:stream';
|
|
import { pipeline } from 'node:stream/promises';
|
|
import semver from 'semver';
|
|
import { serverVersion } from 'src/constants';
|
|
import { StorageCore } from 'src/cores/storage.core';
|
|
import { CacheControl, StorageFolder } from 'src/enum';
|
|
import { MaintenanceHealthRepository } from 'src/maintenance/maintenance-health.repository';
|
|
import { ConfigRepository } from 'src/repositories/config.repository';
|
|
import { DatabaseRepository } from 'src/repositories/database.repository';
|
|
import { LoggingRepository } from 'src/repositories/logging.repository';
|
|
import { ProcessRepository } from 'src/repositories/process.repository';
|
|
import { StorageRepository } from 'src/repositories/storage.repository';
|
|
|
|
export function isValidDatabaseBackupName(filename: string) {
|
|
return filename.match(/^[\d\w-.]+\.sql(?:\.gz)?$/);
|
|
}
|
|
|
|
export function isValidDatabaseRoutineBackupName(filename: string) {
|
|
const oldBackupStyle = filename.match(/^immich-db-backup-\d+\.sql\.gz$/);
|
|
//immich-db-backup-20250729T114018-v1.136.0-pg14.17.sql.gz
|
|
const newBackupStyle = filename.match(/^immich-db-backup-\d{8}T\d{6}-v.*-pg.*\.sql\.gz$/);
|
|
return oldBackupStyle || newBackupStyle;
|
|
}
|
|
|
|
export function isFailedDatabaseBackupName(filename: string) {
|
|
return filename.match(/^immich-db-backup-.*\.sql\.gz\.tmp$/);
|
|
}
|
|
|
|
export function findVersion(filename: string) {
|
|
return /-v(.*)-/.exec(filename)?.[1];
|
|
}
|
|
|
|
type BackupRepos = {
|
|
logger: LoggingRepository;
|
|
storage: StorageRepository;
|
|
config: ConfigRepository;
|
|
process: ProcessRepository;
|
|
database: DatabaseRepository;
|
|
health: MaintenanceHealthRepository;
|
|
};
|
|
|
|
export class UnsupportedPostgresError extends Error {
|
|
constructor(databaseVersion: string) {
|
|
super(`Unsupported PostgreSQL version: ${databaseVersion}`);
|
|
}
|
|
}
|
|
|
|
export async function buildPostgresLaunchArguments(
|
|
{ logger, config, database }: Pick<BackupRepos, 'logger' | 'config' | 'database'>,
|
|
bin: 'pg_dump' | 'pg_dumpall' | 'psql',
|
|
options: {
|
|
singleTransaction?: boolean;
|
|
username?: string;
|
|
} = {},
|
|
): Promise<{
|
|
bin: string;
|
|
args: string[];
|
|
databasePassword: string;
|
|
databaseVersion: string;
|
|
databaseMajorVersion?: number;
|
|
}> {
|
|
const {
|
|
database: { config: databaseConfig },
|
|
} = config.getEnv();
|
|
const isUrlConnection = databaseConfig.connectionType === 'url';
|
|
|
|
const databaseVersion = await database.getPostgresVersion();
|
|
const databaseSemver = semver.coerce(databaseVersion);
|
|
const databaseMajorVersion = databaseSemver?.major;
|
|
|
|
const args: string[] = [];
|
|
|
|
if (isUrlConnection) {
|
|
if (bin !== 'pg_dump') {
|
|
args.push('--dbname');
|
|
}
|
|
|
|
let url = databaseConfig.url;
|
|
if (URL.canParse(databaseConfig.url)) {
|
|
const parsedUrl = new URL(databaseConfig.url);
|
|
// remove known bad parameters
|
|
parsedUrl.searchParams.delete('uselibpqcompat');
|
|
|
|
if (options.username) {
|
|
parsedUrl.username = options.username;
|
|
}
|
|
|
|
url = parsedUrl.toString();
|
|
}
|
|
|
|
args.push(url);
|
|
} else {
|
|
args.push(
|
|
'--username',
|
|
options.username ?? databaseConfig.username,
|
|
'--host',
|
|
databaseConfig.host,
|
|
'--port',
|
|
databaseConfig.port.toString(),
|
|
);
|
|
|
|
switch (bin) {
|
|
case 'pg_dumpall': {
|
|
args.push('--database');
|
|
break;
|
|
}
|
|
case 'psql': {
|
|
args.push('--dbname');
|
|
break;
|
|
}
|
|
}
|
|
|
|
args.push(databaseConfig.database);
|
|
}
|
|
|
|
switch (bin) {
|
|
case 'pg_dump':
|
|
case 'pg_dumpall': {
|
|
args.push('--clean', '--if-exists');
|
|
break;
|
|
}
|
|
case 'psql': {
|
|
if (options.singleTransaction) {
|
|
args.push(
|
|
// don't commit any transaction on failure
|
|
'--single-transaction',
|
|
// exit with non-zero code on error
|
|
'--set',
|
|
'ON_ERROR_STOP=on',
|
|
);
|
|
}
|
|
|
|
args.push(
|
|
// used for progress monitoring
|
|
'--echo-all',
|
|
'--output=/dev/null',
|
|
);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!databaseMajorVersion || !databaseSemver || !semver.satisfies(databaseSemver, '>=14.0.0 <19.0.0')) {
|
|
logger.error(`Database Restore Failure: Unsupported PostgreSQL version: ${databaseVersion}`);
|
|
throw new UnsupportedPostgresError(databaseVersion);
|
|
}
|
|
|
|
return {
|
|
bin: `/usr/lib/postgresql/${databaseMajorVersion}/bin/${bin}`,
|
|
args,
|
|
databasePassword: isUrlConnection ? new URL(databaseConfig.url).password : databaseConfig.password,
|
|
databaseVersion,
|
|
databaseMajorVersion,
|
|
};
|
|
}
|
|
|
|
export async function createDatabaseBackup(
|
|
{ logger, storage, process: processRepository, ...pgRepos }: Omit<BackupRepos, 'health'>,
|
|
filenamePrefix: string = '',
|
|
): Promise<string> {
|
|
logger.debug(`Database Backup Started`);
|
|
|
|
const { bin, args, databasePassword, databaseVersion, databaseMajorVersion } = await buildPostgresLaunchArguments(
|
|
{ logger, ...pgRepos },
|
|
'pg_dump',
|
|
);
|
|
|
|
logger.log(`Database Backup Starting. Database Version: ${databaseMajorVersion}`);
|
|
|
|
const filename = `${filenamePrefix}immich-db-backup-${DateTime.now().toFormat("yyyyLLdd'T'HHmmss")}-v${serverVersion.toString()}-pg${databaseVersion.split(' ')[0]}.sql.gz`;
|
|
const backupFilePath = join(StorageCore.getBaseFolder(StorageFolder.Backups), filename);
|
|
const temporaryFilePath = `${backupFilePath}.tmp`;
|
|
|
|
try {
|
|
const pgdump = processRepository.spawnDuplexStream(bin, args, {
|
|
env: {
|
|
PATH: process.env.PATH,
|
|
PGPASSWORD: databasePassword,
|
|
},
|
|
});
|
|
|
|
const gzip = processRepository.spawnDuplexStream('gzip', ['--rsyncable']);
|
|
const fileStream = storage.createWriteStream(temporaryFilePath);
|
|
|
|
await pipeline(pgdump, gzip, fileStream);
|
|
await storage.rename(temporaryFilePath, backupFilePath);
|
|
} catch (error) {
|
|
logger.error(`Database Backup Failure: ${error}`);
|
|
await storage
|
|
.unlink(temporaryFilePath)
|
|
.catch((error) => logger.error(`Failed to delete failed backup file: ${error}`));
|
|
throw error;
|
|
}
|
|
|
|
logger.log(`Database Backup Success`);
|
|
return backupFilePath;
|
|
}
|
|
|
|
const SQL_DROP_CONNECTIONS = `
|
|
-- drop all other database connections
|
|
SELECT pg_terminate_backend(pid)
|
|
FROM pg_stat_activity
|
|
WHERE datname = current_database()
|
|
AND pid <> pg_backend_pid();
|
|
`;
|
|
|
|
const SQL_RESET_SCHEMA = `
|
|
-- re-create the default schema
|
|
DROP SCHEMA public CASCADE;
|
|
CREATE SCHEMA public;
|
|
|
|
-- restore access to schema
|
|
GRANT ALL ON SCHEMA public TO postgres;
|
|
GRANT ALL ON SCHEMA public TO public;
|
|
`;
|
|
|
|
async function* sql(inputStream: Readable, isPgClusterDump: boolean) {
|
|
yield SQL_DROP_CONNECTIONS;
|
|
yield isPgClusterDump
|
|
? String.raw`
|
|
\c postgres
|
|
`
|
|
: SQL_RESET_SCHEMA;
|
|
|
|
for await (const chunk of inputStream) {
|
|
yield chunk;
|
|
}
|
|
}
|
|
|
|
async function* sqlRollback(inputStream: Readable, isPgClusterDump: boolean) {
|
|
yield SQL_DROP_CONNECTIONS;
|
|
|
|
if (isPgClusterDump) {
|
|
yield String.raw`
|
|
-- try to create database
|
|
-- may fail but script will continue running
|
|
CREATE DATABASE immich;
|
|
|
|
-- switch to database / newly created database
|
|
\c immich
|
|
`;
|
|
}
|
|
|
|
yield SQL_RESET_SCHEMA;
|
|
|
|
for await (const chunk of inputStream) {
|
|
yield chunk;
|
|
}
|
|
}
|
|
|
|
export async function restoreDatabaseBackup(
|
|
{ logger, storage, process: processRepository, database: databaseRepository, health, ...pgRepos }: BackupRepos,
|
|
filename: string,
|
|
progressCb?: (action: 'backup' | 'restore' | 'migrations' | 'rollback', progress: number) => void,
|
|
): Promise<void> {
|
|
logger.debug(`Database Restore Started`);
|
|
|
|
let complete = false;
|
|
try {
|
|
if (!isValidDatabaseBackupName(filename)) {
|
|
throw new Error('Invalid backup file format!');
|
|
}
|
|
|
|
const backupFilePath = path.join(StorageCore.getBaseFolder(StorageFolder.Backups), filename);
|
|
await storage.stat(backupFilePath); // => check file exists
|
|
|
|
let isPgClusterDump = false;
|
|
const version = findVersion(filename);
|
|
if (version && semver.satisfies(version, '<= 2.4')) {
|
|
isPgClusterDump = true;
|
|
}
|
|
|
|
const { bin, args, databasePassword, databaseMajorVersion } = await buildPostgresLaunchArguments(
|
|
{ logger, database: databaseRepository, ...pgRepos },
|
|
'psql',
|
|
{
|
|
singleTransaction: !isPgClusterDump,
|
|
username: isPgClusterDump ? 'postgres' : undefined,
|
|
},
|
|
);
|
|
|
|
progressCb?.('backup', 0.05);
|
|
|
|
const restorePointFilePath = await createDatabaseBackup(
|
|
{ logger, storage, process: processRepository, database: databaseRepository, ...pgRepos },
|
|
'restore-point-',
|
|
);
|
|
|
|
logger.log(`Database Restore Starting. Database Version: ${databaseMajorVersion}`);
|
|
|
|
let inputStream: Readable;
|
|
if (backupFilePath.endsWith('.gz')) {
|
|
const fileStream = storage.createPlainReadStream(backupFilePath);
|
|
const gunzip = storage.createGunzip();
|
|
fileStream.pipe(gunzip);
|
|
inputStream = gunzip;
|
|
} else {
|
|
inputStream = storage.createPlainReadStream(backupFilePath);
|
|
}
|
|
|
|
const sqlStream = Readable.from(sql(inputStream, isPgClusterDump));
|
|
const psql = processRepository.spawnDuplexStream(bin, args, {
|
|
env: {
|
|
PATH: process.env.PATH,
|
|
PGPASSWORD: databasePassword,
|
|
},
|
|
});
|
|
|
|
const [progressSource, progressSink] = createSqlProgressStreams((progress) => {
|
|
if (complete) {
|
|
return;
|
|
}
|
|
|
|
logger.log(`Restore progress ~ ${(progress * 100).toFixed(2)}%`);
|
|
progressCb?.('restore', progress);
|
|
});
|
|
|
|
await pipeline(sqlStream, progressSource, psql, progressSink);
|
|
|
|
try {
|
|
progressCb?.('migrations', 0.9);
|
|
await databaseRepository.runMigrations();
|
|
await health.checkApiHealth();
|
|
} catch (error) {
|
|
progressCb?.('rollback', 0);
|
|
|
|
const fileStream = storage.createPlainReadStream(restorePointFilePath);
|
|
const gunzip = storage.createGunzip();
|
|
fileStream.pipe(gunzip);
|
|
inputStream = gunzip;
|
|
|
|
const sqlStream = Readable.from(sqlRollback(inputStream, isPgClusterDump));
|
|
const psql = processRepository.spawnDuplexStream(bin, args, {
|
|
env: {
|
|
PATH: process.env.PATH,
|
|
PGPASSWORD: databasePassword,
|
|
},
|
|
});
|
|
|
|
const [progressSource, progressSink] = createSqlProgressStreams((progress) => {
|
|
if (complete) {
|
|
return;
|
|
}
|
|
|
|
logger.log(`Rollback progress ~ ${(progress * 100).toFixed(2)}%`);
|
|
progressCb?.('rollback', progress);
|
|
});
|
|
|
|
await pipeline(sqlStream, progressSource, psql, progressSink);
|
|
|
|
throw error;
|
|
}
|
|
} catch (error) {
|
|
logger.error(`Database Restore Failure: ${error}`);
|
|
throw error;
|
|
} finally {
|
|
complete = true;
|
|
}
|
|
|
|
logger.log(`Database Restore Success`);
|
|
}
|
|
|
|
export async function deleteDatabaseBackup({ storage }: Pick<BackupRepos, 'storage'>, files: string[]): Promise<void> {
|
|
const backupsFolder = StorageCore.getBaseFolder(StorageFolder.Backups);
|
|
|
|
if (files.some((filename) => !isValidDatabaseBackupName(filename))) {
|
|
throw new BadRequestException('Invalid backup name!');
|
|
}
|
|
|
|
await Promise.all(files.map((filename) => storage.unlink(path.join(backupsFolder, filename))));
|
|
}
|
|
|
|
export async function listDatabaseBackups({
|
|
storage,
|
|
}: Pick<BackupRepos, 'storage'>): Promise<{ filename: string; filesize: number }[]> {
|
|
const backupsFolder = StorageCore.getBaseFolder(StorageFolder.Backups);
|
|
const files = await storage.readdir(backupsFolder);
|
|
|
|
const validFiles = files
|
|
.filter((fn) => isValidDatabaseBackupName(fn))
|
|
.toSorted((a, b) => (a.startsWith('uploaded-') === b.startsWith('uploaded-') ? a.localeCompare(b) : 1))
|
|
.toReversed();
|
|
|
|
const backups = await Promise.all(
|
|
validFiles.map(async (filename) => {
|
|
const stats = await storage.stat(path.join(backupsFolder, filename));
|
|
return { filename, filesize: stats.size };
|
|
}),
|
|
);
|
|
|
|
return backups;
|
|
}
|
|
|
|
export async function uploadDatabaseBackup(
|
|
{ storage }: Pick<BackupRepos, 'storage'>,
|
|
file: Express.Multer.File,
|
|
): Promise<void> {
|
|
const backupsFolder = StorageCore.getBaseFolder(StorageFolder.Backups);
|
|
const fn = basename(file.originalname);
|
|
if (!isValidDatabaseBackupName(fn)) {
|
|
throw new BadRequestException('Invalid backup name!');
|
|
}
|
|
|
|
const path = join(backupsFolder, `uploaded-${fn}`);
|
|
await storage.createOrOverwriteFile(path, file.buffer);
|
|
}
|
|
|
|
export function downloadDatabaseBackup(fileName: string) {
|
|
if (!isValidDatabaseBackupName(fileName)) {
|
|
throw new BadRequestException('Invalid backup name!');
|
|
}
|
|
|
|
const path = join(StorageCore.getBaseFolder(StorageFolder.Backups), fileName);
|
|
|
|
return {
|
|
path,
|
|
fileName,
|
|
cacheControl: CacheControl.PrivateWithoutCache,
|
|
contentType: fileName.endsWith('.gz') ? 'application/gzip' : 'application/sql',
|
|
};
|
|
}
|
|
|
|
function createSqlProgressStreams(cb: (progress: number) => void) {
|
|
const STDIN_START_MARKER = new TextEncoder().encode('FROM stdin');
|
|
const STDIN_END_MARKER = new TextEncoder().encode(String.raw`\.`);
|
|
|
|
let readingStdin = false;
|
|
let sequenceIdx = 0;
|
|
|
|
let linesSent = 0;
|
|
let linesProcessed = 0;
|
|
|
|
const startedAt = +Date.now();
|
|
const cbDebounced = debounce(
|
|
() => {
|
|
const progress = source.writableEnded
|
|
? Math.min(1, linesProcessed / linesSent)
|
|
: // progress simulation while we're in an indeterminate state
|
|
Math.min(0.3, 0.1 + (Date.now() - startedAt) / 1e4);
|
|
cb(progress);
|
|
},
|
|
100,
|
|
{
|
|
maxWait: 100,
|
|
},
|
|
);
|
|
|
|
let lastByte = -1;
|
|
const source = new PassThrough({
|
|
transform(chunk, _encoding, callback) {
|
|
for (const byte of chunk) {
|
|
if (!readingStdin && byte === 10 && lastByte !== 10) {
|
|
linesSent += 1;
|
|
}
|
|
|
|
lastByte = byte;
|
|
|
|
const sequence = readingStdin ? STDIN_END_MARKER : STDIN_START_MARKER;
|
|
if (sequence[sequenceIdx] === byte) {
|
|
sequenceIdx += 1;
|
|
|
|
if (sequence.length === sequenceIdx) {
|
|
sequenceIdx = 0;
|
|
readingStdin = !readingStdin;
|
|
}
|
|
} else {
|
|
sequenceIdx = 0;
|
|
}
|
|
}
|
|
|
|
cbDebounced();
|
|
this.push(chunk);
|
|
callback();
|
|
},
|
|
});
|
|
|
|
const sink = new Writable({
|
|
write(chunk, _encoding, callback) {
|
|
for (const byte of chunk) {
|
|
if (byte === 10) {
|
|
linesProcessed++;
|
|
}
|
|
}
|
|
|
|
cbDebounced();
|
|
callback();
|
|
},
|
|
});
|
|
|
|
return [source, sink];
|
|
}
|