diff --git a/backup.db b/backup.db index 4ab710b9306dc1..a1c75f32be3e58 100644 Binary files a/backup.db and b/backup.db differ diff --git a/src/node_sqlite.cc b/src/node_sqlite.cc index 5a3ef62b3b6430..ba1df9116e5f91 100644 --- a/src/node_sqlite.cc +++ b/src/node_sqlite.cc @@ -141,7 +141,6 @@ class BackupJob : public ThreadPoolWork { std::string destination_name, std::string dest_db, int pages, - int sleep, Local progressFunc) : ThreadPoolWork(env, "node_sqlite3.BackupJob"), env_(env), @@ -149,8 +148,7 @@ class BackupJob : public ThreadPoolWork { source_db_(source_db), destination_name_(destination_name), dest_db_(dest_db), - pages_(pages), - sleep_(sleep) { + pages_(pages) { resolver_.Reset(env->isolate(), resolver); progressFunc_.Reset(env->isolate(), progressFunc); } @@ -179,16 +177,14 @@ class BackupJob : public ThreadPoolWork { return; } - std::cout << "Scheduling thread pool work" << std::endl; - this->ScheduleWork(); } void DoThreadPoolWork() override { int status = sqlite3_backup_step(pBackup_, pages_); - if (status == SQLITE_OK || status == SQLITE_BUSY || status == SQLITE_LOCKED) { - sqlite3_sleep(sleep_); + if (status == SQLITE_OK || status == SQLITE_BUSY || + status == SQLITE_LOCKED) { } else { // return some error code backup_status_ = -1; @@ -198,55 +194,82 @@ class BackupJob : public ThreadPoolWork { void AfterThreadPoolWork(int status) override { HandleScope handle_scope(env()->isolate()); + if (resolver_.IsEmpty()) { + Cleanup(); + + return; + } + + Local resolver = + Local::New(env()->isolate(), resolver_); + int total_pages = sqlite3_backup_pagecount(pBackup_); int remaining_pages = sqlite3_backup_remaining(pBackup_); if (remaining_pages != 0) { Local fn = - Local::New(env()->isolate(), progressFunc_); + Local::New(env()->isolate(), progressFunc_); - // There's still work to do - Local argv[] = { - Integer::New(env()->isolate(), total_pages), - Integer::New(env()->isolate(), remaining_pages), - }; - // handle possible exceptions - Local result = + if (!fn.IsEmpty()) { + Local argv[] = { + Integer::New(env()->isolate(), total_pages), + Integer::New(env()->isolate(), remaining_pages), + }; + + // TODO: handle possible exceptions + TryCatch try_catch(env()->isolate()); fn->Call(env()->context(), Null(env()->isolate()), 2, argv) - .FromMaybe(Local()); + .FromMaybe(Local()); + + if (try_catch.HasCaught()) { + Cleanup(); + resolver->Reject(env()->context(), try_catch.Exception()).ToChecked(); + + return; + } + } + + // There's still work to do this->ScheduleWork(); return; } - if (!resolver_.IsEmpty()) { - Local resolver = - Local::New(env()->isolate(), resolver_); - Local message = - String::NewFromUtf8( - env()->isolate(), "Backup completed", NewStringType::kNormal) - .ToLocalChecked(); - Local error_message = - String::NewFromUtf8( - env()->isolate(), "Could not finish backup", NewStringType::kNormal) - .ToLocalChecked(); - - sqlite3_backup_finish(pBackup_); - int status = sqlite3_errcode(pDest_); - sqlite3_close(pDest_); + Local message = + String::NewFromUtf8( + env()->isolate(), "Backup completed", NewStringType::kNormal) + .ToLocalChecked(); + Local error_message = + String::NewFromUtf8( + env()->isolate(), "Could not finish backup", NewStringType::kNormal) + .ToLocalChecked(); - if (status == SQLITE_OK) { - resolver->Resolve(env()->context(), message).ToChecked(); - } else { - resolver->Reject(env()->context(), error_message).ToChecked(); - } + int dest_status = Cleanup(); - /* resolver->Resolve(env()->context(), message).ToChecked(); */ + if (dest_status == SQLITE_OK) { + resolver->Resolve(env()->context(), message).ToChecked(); + } else { + resolver->Reject(env()->context(), error_message).ToChecked(); } } private: + int Cleanup() { + int status = 0; + + if (pBackup_) { + sqlite3_backup_finish(pBackup_); + } + + if (pDest_) { + status = sqlite3_errcode(pDest_); + sqlite3_close(pDest_); + } + + return status; + } + // https://github.com/nodejs/node/blob/649da3b8377e030ea7b9a1bc0308451e26e28740/src/crypto/crypto_keygen.h#L126 int backup_status_; @@ -263,7 +286,6 @@ class BackupJob : public ThreadPoolWork { std::string destination_name_; std::string dest_db_; int pages_; - int sleep_; }; class UserDefinedFunction { @@ -671,13 +693,14 @@ void DatabaseSync::Exec(const FunctionCallbackInfo& args) { CHECK_ERROR_OR_THROW(env->isolate(), db->connection_, r, SQLITE_OK, void()); } -// database.backup(destination, { sourceDb, targetDb, rate, progress: (total, remaining) => {} ) +// database.backup(destination, { sourceDb, targetDb, rate, progress: (total, +// remaining) => {} ) void DatabaseSync::Backup(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); if (!args[0]->IsString()) { - THROW_ERR_INVALID_ARG_TYPE(env->isolate(), - "The \"destination\" argument must be a string."); + THROW_ERR_INVALID_ARG_TYPE( + env->isolate(), "The \"destination\" argument must be a string."); return; } @@ -697,26 +720,26 @@ void DatabaseSync::Backup(const FunctionCallbackInfo& args) { if (args.Length() > 1) { if (!args[1]->IsObject()) { THROW_ERR_INVALID_ARG_TYPE(env->isolate(), - "The \"options\" argument must be an object."); + "The \"options\" argument must be an object."); return; } Local options = args[1].As(); Local progress_string = - FIXED_ONE_BYTE_STRING(env->isolate(), "progress"); - Local rate_string = - FIXED_ONE_BYTE_STRING(env->isolate(), "rate"); + FIXED_ONE_BYTE_STRING(env->isolate(), "progress"); + Local rate_string = FIXED_ONE_BYTE_STRING(env->isolate(), "rate"); Local target_db_string = - FIXED_ONE_BYTE_STRING(env->isolate(), "targetDb"); + FIXED_ONE_BYTE_STRING(env->isolate(), "targetDb"); Local source_db_string = - FIXED_ONE_BYTE_STRING(env->isolate(), "sourceDb"); + FIXED_ONE_BYTE_STRING(env->isolate(), "sourceDb"); Local rateValue = - options->Get(env->context(), rate_string).ToLocalChecked(); + options->Get(env->context(), rate_string).ToLocalChecked(); if (!rateValue->IsUndefined()) { if (!rateValue->IsInt32()) { - THROW_ERR_INVALID_ARG_TYPE(env->isolate(), + THROW_ERR_INVALID_ARG_TYPE( + env->isolate(), "The \"options.rate\" argument must be an integer."); return; } @@ -725,37 +748,42 @@ void DatabaseSync::Backup(const FunctionCallbackInfo& args) { } Local sourceDbValue = - options->Get(env->context(), source_db_string).ToLocalChecked(); + options->Get(env->context(), source_db_string).ToLocalChecked(); if (!sourceDbValue->IsUndefined()) { if (!sourceDbValue->IsString()) { - THROW_ERR_INVALID_ARG_TYPE(env->isolate(), + THROW_ERR_INVALID_ARG_TYPE( + env->isolate(), "The \"options.sourceDb\" argument must be a string."); return; } - source_db = Utf8Value(env->isolate(), sourceDbValue.As()).ToString(); + source_db = + Utf8Value(env->isolate(), sourceDbValue.As()).ToString(); } Local targetDbValue = - options->Get(env->context(), target_db_string).ToLocalChecked(); + options->Get(env->context(), target_db_string).ToLocalChecked(); if (!targetDbValue->IsUndefined()) { if (!targetDbValue->IsString()) { - THROW_ERR_INVALID_ARG_TYPE(env->isolate(), + THROW_ERR_INVALID_ARG_TYPE( + env->isolate(), "The \"options.targetDb\" argument must be a string."); return; } - dest_db = Utf8Value(env->isolate(), targetDbValue.As()).ToString(); + dest_db = + Utf8Value(env->isolate(), targetDbValue.As()).ToString(); } Local progressValue = - options->Get(env->context(), progress_string).ToLocalChecked(); + options->Get(env->context(), progress_string).ToLocalChecked(); if (!progressValue->IsUndefined()) { if (!progressValue->IsFunction()) { - THROW_ERR_INVALID_ARG_TYPE(env->isolate(), + THROW_ERR_INVALID_ARG_TYPE( + env->isolate(), "The \"options.progress\" argument must be a function."); return; } @@ -767,7 +795,7 @@ void DatabaseSync::Backup(const FunctionCallbackInfo& args) { args.GetReturnValue().Set(resolver->GetPromise()); BackupJob* job = new BackupJob( - env, db, resolver, source_db, *destFilename, dest_db, rate, 0, progressFunc); + env, db, resolver, source_db, *destFilename, dest_db, rate, progressFunc); job->ScheduleBackup(); } diff --git a/test/parallel/test-sqlite-backup.mjs b/test/parallel/test-sqlite-backup.mjs index debd8585e70009..502adba177792c 100644 --- a/test/parallel/test-sqlite-backup.mjs +++ b/test/parallel/test-sqlite-backup.mjs @@ -22,10 +22,13 @@ test('database backup', async () => { await database.backup('backup.db', { sourceDb: 'main', targetDb: 'main', - // progress: 'hello' progress: (totalPages, remainingPages) => { console.log('Backup progress:', { totalPages, remainingPages }); } + }).catch(error => { + console.log('Something went wrong:', error) + + return error; }); const backup = new DatabaseSync('backup.db');