Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix database is locked when performing auto cleanup #4971

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
117 changes: 83 additions & 34 deletions Duplicati/Library/Main/Operation/BackupHandler.cs
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ internal class BackupHandler : IDisposable
private readonly string m_backendurl;

private LocalBackupDatabase m_database;
private BackupDatabase m_backupDb;
private BackendManager m_backendManager;
private System.Data.IDbTransaction m_transaction;

private Library.Utility.IFilter m_filter;
Expand Down Expand Up @@ -141,31 +143,42 @@ private UsnJournalService GetJournalService(IEnumerable<string> sources, ISnapsh
return service;
}

private void PreBackupVerify(BackendManager backend, string protectedfile)
private async Task PreBackupVerifyAsync(string protectedfile)
{
m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify);
using(new Logging.Timer(LOGTAG, "PreBackupVerify", "PreBackupVerify"))
using (new Logging.Timer(LOGTAG, "PreBackupVerify", "PreBackupVerify"))
{
try
{
if (m_options.NoBackendverification)
{
FilelistProcessor.VerifyLocalList(backend, m_database);
FilelistProcessor.VerifyLocalList(m_backendManager, m_database);
UpdateStorageStatsFromDatabase();
}
else
FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter, new string[] { protectedfile });
FilelistProcessor.VerifyRemoteList(m_backendManager, m_options, m_database, m_result.BackendWriter, new string[] { protectedfile }, !m_options.AutoCleanup);
}
catch (RemoteListVerificationException ex)
{
if (m_options.AutoCleanup)
{
Logging.Log.WriteWarningMessage(LOGTAG, "BackendVerifyFailedAttemptingCleanup", ex, "Backend verification failed, attempting automatic cleanup");

// Repair might run a database recreate, so the database connection must be closed
await m_backupDb.CommitTransactionAsync("CommitBeforeRepair", false);
new DatabaseDisposeHelper(this, true).Dispose();
m_result.SetDatabase(null);
m_result.RepairResults = new RepairResults(m_result);
new RepairHandler(backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run();
new RepairHandler(m_backendurl, m_options, (RepairResults)m_result.RepairResults).Run();
// Reopen connection
m_database = new LocalBackupDatabase(m_options.Dbpath, m_options);
m_result.SetDatabase(m_database);
m_backupDb = new BackupDatabase(m_database, m_options);
m_backendManager = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database);


Logging.Log.WriteInformationMessage(LOGTAG, "BackendCleanupFinished", "Backend cleanup finished, retrying verification");
FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter, new string[] { protectedfile });
FilelistProcessor.VerifyRemoteList(m_backendManager, m_options, m_database, m_result.BackendWriter, new string[] { protectedfile });
}
else
throw;
Expand Down Expand Up @@ -395,12 +408,13 @@ private static async Task<long> FlushBackend(BackupResults result, IWriteChannel

private async Task RunAsync(string[] sources, Library.Utility.IFilter filter, CancellationToken token)
{
m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin);
m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin);

// New isolated scope for each operation
using(new IsolatedChannelScope())
using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options))
using (new IsolatedChannelScope())
using (new DatabaseDisposeHelper(this, true))
{
m_database = new LocalBackupDatabase(m_options.Dbpath, m_options);
m_result.SetDatabase(m_database);
m_result.Dryrun = m_options.Dryrun;

Expand Down Expand Up @@ -428,22 +442,24 @@ private async Task RunAsync(string[] sources, Library.Utility.IFilter filter, Ca
try
{
// Setup runners and instances here
using(var db = new Backup.BackupDatabase(m_database, m_options))
using(var backendManager = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
using(var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp))
using(var stats = new Backup.BackupStatsCollector(m_result))
using (new DatabaseDisposeHelper(this, false))
using (var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp))
using (var stats = new Backup.BackupStatsCollector(m_result))
// Keep a reference to these channels to avoid shutdown
using(var uploadtarget = ChannelManager.GetChannel(Backup.Channels.BackendRequest.ForWrite))
using (var uploadtarget = ChannelManager.GetChannel(Backup.Channels.BackendRequest.ForWrite))
{
m_backupDb = new Backup.BackupDatabase(m_database, m_options);
m_backendManager = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database);

long filesetid;
var counterToken = new CancellationTokenSource();
var uploader = new Backup.BackendUploader(() => DynamicLoader.BackendLoader.GetBackend(m_backendurl, m_options.RawOptions), m_options, db, m_result.TaskReader, stats);
var uploader = new Backup.BackendUploader(() => DynamicLoader.BackendLoader.GetBackend(m_backendurl, m_options.RawOptions), m_options, m_backupDb, m_result.TaskReader, stats);
using (var snapshot = GetSnapshot(sources, m_options))
{
try
{
// Make sure the database is sane
await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, !m_options.DisableFilelistConsistencyChecks);
await m_backupDb.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, !m_options.DisableFilelistConsistencyChecks);

// Start the uploader process
uploaderTask = uploader.Run();
Expand All @@ -453,7 +469,7 @@ private async Task RunAsync(string[] sources, Library.Utility.IFilter filter, Ca
long lastTempFilesetId = -1;
if (!m_options.DisableSyntheticFilelist)
{
var candidates = (await db.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToArray();
var candidates = (await m_backupDb.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToArray();
if (candidates.Any())
{
lastTempFilesetId = candidates.Last().Key;
Expand All @@ -463,33 +479,33 @@ private async Task RunAsync(string[] sources, Library.Utility.IFilter filter, Ca

// TODO: Rewrite to using the uploader process, or the BackendHandler interface
// Do a remote verification, unless disabled
PreBackupVerify(backendManager, lastTempFilelist);
await PreBackupVerifyAsync(lastTempFilelist);

// If the previous backup was interrupted, send a synthetic list
await Backup.UploadSyntheticFilelist.Run(db, m_options, m_result, m_result.TaskReader, lastTempFilelist, lastTempFilesetId);
await Backup.UploadSyntheticFilelist.Run(m_backupDb, m_options, m_result, m_result.TaskReader, lastTempFilelist, lastTempFilesetId);

// Grab the previous backup ID, if any
var prevfileset = m_database.FilesetTimes.FirstOrDefault();
if (prevfileset.Value.ToUniversalTime() > m_database.OperationTimestamp.ToUniversalTime())
throw new Exception(string.Format("The previous backup has time {0}, but this backup has time {1}. Something is wrong with the clock.", prevfileset.Value.ToLocalTime(), m_database.OperationTimestamp.ToLocalTime()));

var lastfilesetid = prevfileset.Value.Ticks == 0 ? -1 : prevfileset.Key;

// Rebuild any index files that are missing
await Backup.RecreateMissingIndexFiles.Run(db, m_options, m_result.TaskReader);
await Backup.RecreateMissingIndexFiles.Run(m_backupDb, m_options, m_result.TaskReader);

// Prepare the operation by registering the filelist
m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles);

var repcnt = 0;
while(repcnt < 100 && await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
while (repcnt < 100 && await m_backupDb.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
filesetvolume.ResetRemoteFilename(m_options, m_database.OperationTimestamp.AddSeconds(repcnt++));

if (await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
if (await m_backupDb.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
throw new Exception("Unable to generate a unique fileset name");

var filesetvolumeid = await db.RegisterRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary);
filesetid = await db.CreateFilesetAsync(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time);
var filesetvolumeid = await m_backupDb.RegisterRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary);
filesetid = await m_backupDb.CreateFilesetAsync(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time);

// create USN-based scanner if enabled
var journalService = GetJournalService(sources, snapshot, filter, lastfilesetid);
Expand All @@ -508,7 +524,7 @@ private async Task RunAsync(string[] sources, Library.Utility.IFilter filter, Ca
// Run the backup operation
if (await m_result.TaskReader.ProgressAsync)
{
await RunMainOperation(sources, snapshot, journalService, db, stats, m_options, m_sourceFilter, m_filter, m_result, m_result.TaskReader, filesetid, lastfilesetid, token).ConfigureAwait(false);
await RunMainOperation(sources, snapshot, journalService, m_backupDb, stats, m_options, m_sourceFilter, m_filter, m_result, m_result.TaskReader, filesetid, lastfilesetid, token).ConfigureAwait(false);
}
}
finally
Expand All @@ -523,29 +539,29 @@ private async Task RunAsync(string[] sources, Library.Utility.IFilter filter, Ca

// Ensure the database is in a sane state after adding data
using (new Logging.Timer(LOGTAG, "VerifyConsistency", "VerifyConsistency"))
await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, false);
await m_backupDb.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, false);

// Send the actual filelist
if (await m_result.TaskReader.ProgressAsync)
await Backup.UploadRealFilelist.Run(m_result, db, m_options, filesetvolume, filesetid, m_result.TaskReader);
await Backup.UploadRealFilelist.Run(m_result, m_backupDb, m_options, filesetvolume, filesetid, m_result.TaskReader);

// Wait for upload completion
m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);
var lastVolumeSize = await FlushBackend(m_result, uploadtarget, uploaderTask).ConfigureAwait(false);

// Make sure we have the database up-to-date
await db.CommitTransactionAsync("CommitAfterUpload", false);
await m_backupDb.CommitTransactionAsync("CommitAfterUpload", false);

// TODO: Remove this later
m_transaction = m_database.BeginTransaction();

if (await m_result.TaskReader.ProgressAsync)
CompactIfRequired(backendManager, lastVolumeSize);
CompactIfRequired(m_backendManager, lastVolumeSize);

if (m_options.UploadVerificationFile && await m_result.TaskReader.ProgressAsync)
{
m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload);
FilelistProcessor.UploadVerificationFile(backendManager.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction);
FilelistProcessor.UploadVerificationFile(m_backendManager.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction);
}

if (m_options.Dryrun)
Expand Down Expand Up @@ -599,7 +615,40 @@ private async Task RunAsync(string[] sources, Library.Utility.IFilter filter, Ca
// TODO: We want to commit? always?
if (m_transaction != null)
try { m_transaction.Rollback(); }
catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RollbackError", ex, "Rollback error: {0}", ex.Message); }
catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RollbackError", ex, "Rollback error: {0}", ex.Message); }
}
}
}

// Using statements do not allow the object reference to change.
// This helper closes the database which may have been recreated due to a repair
private class DatabaseDisposeHelper : IDisposable
{
private readonly BackupHandler m_handler;
private readonly bool m_includeDatabase;

public DatabaseDisposeHelper(BackupHandler h, bool includeDatabase)
{
m_handler = h;
m_includeDatabase = includeDatabase;
}

public void Dispose()
{
if (m_handler.m_backendManager != null)
{
m_handler.m_backendManager.Dispose();
m_handler.m_backendManager = null;
}
if (m_handler.m_backupDb != null)
{
m_handler.m_backupDb.Dispose();
m_handler.m_backupDb = null;
}
if (m_includeDatabase && m_handler.m_database != null)
{
m_handler.m_database.Dispose();
m_handler.m_database = null;
}
}
}
Expand Down
18 changes: 14 additions & 4 deletions Duplicati/Library/Main/Operation/FilelistProcessor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,8 @@ public static void VerifyRemoteList(BackendManager backend, Options options, Loc
/// <param name="database">The database to compare with</param>
/// <param name="log">The log instance to use</param>
/// <param name="protectedFiles">Filenames that should be exempted from deletion</param>
public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log, IEnumerable<string> protectedFiles = null)
/// <param name="logError">Whether to log error messages on failure. When set to false, use the thrown exceptions to determine the cause</param>
public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log, IEnumerable<string> protectedFiles = null, bool logError = true)
{
var tp = RemoteListAnalysis(backend, options, database, log, protectedFiles);
long extraCount = 0;
Expand All @@ -112,7 +113,10 @@ public static void VerifyRemoteList(BackendManager backend, Options options, Loc
if (extraCount > 0)
{
var s = string.Format("Found {0} remote files that are not recorded in local storage, please run repair", extraCount);
Logging.Log.WriteErrorMessage(LOGTAG, "ExtraRemoteFiles", null, s);
if (logError)
{
Logging.Log.WriteErrorMessage(LOGTAG, "ExtraRemoteFiles", null, s);
}
throw new RemoteListVerificationException(s, "ExtraRemoteFiles");
}

Expand All @@ -122,7 +126,10 @@ public static void VerifyRemoteList(BackendManager backend, Options options, Loc
if (doubles.Count > 0)
{
var s = string.Format("Found remote files reported as duplicates, either the backend module is broken or you need to manually remove the extra copies.\nThe following files were found multiple times: {0}", string.Join(", ", doubles));
Logging.Log.WriteErrorMessage(LOGTAG, "DuplicateRemoteFiles", null, s);
if (logError)
{
Logging.Log.WriteErrorMessage(LOGTAG, "DuplicateRemoteFiles", null, s);
}
throw new RemoteListVerificationException(s, "DuplicateRemoteFiles");
}

Expand All @@ -134,7 +141,10 @@ public static void VerifyRemoteList(BackendManager backend, Options options, Loc
else
s = string.Format("Found {0} files that are missing from the remote storage, please run repair", missingCount);

Logging.Log.WriteErrorMessage(LOGTAG, "MissingRemoteFiles", null, s);
if (logError)
{
Logging.Log.WriteErrorMessage(LOGTAG, "MissingRemoteFiles", null, s);
}
throw new RemoteListVerificationException(s, "MissingRemoteFiles");
}
}
Expand Down