Skip to content

Commit

Permalink
Simplify gRPC way of working out backup statuses
Browse files Browse the repository at this point in the history
  • Loading branch information
rzvoncek committed Nov 24, 2023
1 parent cdaa284 commit a5448fc
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 38 deletions.
47 changes: 33 additions & 14 deletions medusa/service/grpc/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def Backup(self, request, context):
BackupMan.register_backup(request.name, is_async=False)
backup_node.handle_backup(config=self.config, backup_name_arg=request.name, stagger_time=None,
enable_md5_checks_flag=False, mode=mode)
record_status_in_response(response, request.name)
response = record_status_in_response(response, request.name)
return response
except Exception as e:
response.status = medusa_pb2.StatusType.FAILED
Expand Down Expand Up @@ -181,7 +181,7 @@ def BackupStatus(self, request, context):
else:
response.finishTime = ""
# record the status
record_status_in_response(response, request.backupName)
response = record_status_in_response(response, request.backupName)
except KeyError:
context.set_details("backup <{}> does not exist".format(request.backupName))
context.set_code(grpc.StatusCode.NOT_FOUND)
Expand All @@ -191,13 +191,12 @@ def BackupStatus(self, request, context):

def GetBackup(self, request, context):
response = medusa_pb2.GetBackupResponse()
last_status = medusa_pb2.StatusType.UNKNOWN
try:
with Storage(config=self.storage_config) as connected_storage:
backup = connected_storage.get_cluster_backup(request.backupName)
summary, response.status = get_backup_summary(backup, last_status)
summary = get_backup_summary(backup)
response.backup.CopyFrom(summary)
record_status_in_response(response, request.backupName)
response.status = summary.status
except Exception as e:
context.set_details("Failed to get backup due to error: {}".format(e))
context.set_code(grpc.StatusCode.INTERNAL)
Expand All @@ -207,14 +206,14 @@ def GetBackup(self, request, context):

def GetBackups(self, request, context):
response = medusa_pb2.GetBackupsResponse()
last_status = medusa_pb2.StatusType.UNKNOWN
try:
# cluster backups
with Storage(config=self.storage_config) as connected_storage:
backups = get_backups(connected_storage, self.config, True)
for backup in backups:
summary, last_status = get_backup_summary(backup, last_status)
summary = get_backup_summary(backup)
response.backups.append(summary)
set_overall_status(response)

except Exception as e:
context.set_details("Failed to get backups due to error: {}".format(e))
Expand Down Expand Up @@ -281,29 +280,48 @@ def PrepareRestore(self, request, context):
return response


def get_backup_summary(backup, last_status):
def set_overall_status(get_backups_response):
get_backups_response.overallStatus = medusa_pb2.StatusType.UNKNOWN
backups = get_backups_response.backups
if all(backup.status == medusa_pb2.StatusType.SUCCESS for backup in backups):
get_backups_response.overallStatus = medusa_pb2.StatusType.SUCCESS
if any(backup.status == medusa_pb2.StatusType.IN_PROGRESS for backup in backups):
get_backups_response.overallStatus = medusa_pb2.StatusType.IN_PROGRESS
if any(backup.status == medusa_pb2.StatusType.FAILED for backup in backups):
get_backups_response.overallStatus = medusa_pb2.StatusType.FAILED
if any(backup.status == medusa_pb2.StatusType.UNKNOWN for backup in backups):
get_backups_response.overallStatus = medusa_pb2.StatusType.UNKNOWN


def get_backup_summary(backup):
summary = medusa_pb2.BackupSummary()

summary.backupName = backup.name

if backup.started is None:
summary.startTime = 0
else:
summary.startTime = 1234
summary.startTime = backup.started()

if backup.finished is None:
summary.finishTime = 0
summary.status = medusa_pb2.StatusType.IN_PROGRESS
last_status = medusa_pb2.StatusType.IN_PROGRESS
else:
summary.finishTime = backup.finished
if last_status != medusa_pb2.StatusType.IN_PROGRESS:
summary.status = medusa_pb2.StatusType.SUCCESS
summary.finishTime = backup.finished()
summary.status = medusa_pb2.StatusType.SUCCESS

summary.totalNodes = len(backup.tokenmap)
summary.finishedNodes = len(backup.complete_nodes())

for node in backup.tokenmap:
summary.nodes.append(create_token_map_node(backup, node))

summary.backupType = backup.backup_type

summary.totalSize = backup.size()
summary.totalObjects = backup.num_objects()
return summary, last_status

return summary


# Callback function for recording unique backup results
Expand Down Expand Up @@ -350,6 +368,7 @@ def record_status_in_response(response, backup_name):
response.status = medusa_pb2.StatusType.FAILED
if status == BackupMan.STATUS_SUCCESS:
response.status = medusa_pb2.StatusType.SUCCESS
return response


def handle_backup_removal(backup_name):
Expand Down
48 changes: 24 additions & 24 deletions tests/service/grpc/server_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,30 +118,30 @@ def test_get_known_incomplete_backup(self):
tokenmap.__iter__ = lambda _: list(tokenmap_dict.keys()).__iter__()
tokenmap.__len__ = lambda _: len(tokenmap_dict.keys())
# we don't ever create any file, so we won't get a timestamp to get the finish time from
with patch('medusa.storage.ClusterBackup.finished', return_value=123456):
# prevent calls to the storage by faking the get_cluster_backup method
with patch('medusa.storage.Storage.get_cluster_backup', return_value=cluster_backup):
request = medusa_pb2.BackupStatusRequest(backupName='backup1')
context = Mock(spec=ServicerContext)
get_backup_response = service.GetBackup(request, context)

self.assertEqual(medusa_pb2.StatusType.IN_PROGRESS, get_backup_response.status)

self.assertEqual('backup1', get_backup_response.backup.backupName)
self.assertEqual(1234, get_backup_response.backup.startTime)
# the finishTime is 1 because it's the proto's default value. the magic mock does not set this
self.assertEqual(1, get_backup_response.backup.finishTime)
self.assertEqual(2, get_backup_response.backup.totalNodes)
self.assertEqual(1, get_backup_response.backup.finishedNodes)
# the BackupNode records ought to be more populated than this, but we test that in ITs instead
self.assertEqual(
[medusa_pb2.BackupNode(host='node1'), medusa_pb2.BackupNode(host='node2')],
get_backup_response.backup.nodes
)
# this should also be IN_PROGRESS but because the ClusterBackup.finished is a mock
# we cannot correctly make it be 'None' when needed (some other things break)
self.assertEqual(medusa_pb2.StatusType.SUCCESS, get_backup_response.backup.status)
self.assertEqual('differential', get_backup_response.backup.backupType)
with patch('medusa.storage.ClusterBackup.started', return_value=12345):
with patch('medusa.storage.ClusterBackup.finished', return_value=123456):
# prevent calls to the storage by faking the get_cluster_backup method
with patch('medusa.storage.Storage.get_cluster_backup', return_value=cluster_backup):
request = medusa_pb2.BackupStatusRequest(backupName='backup1')
context = Mock(spec=ServicerContext)
get_backup_response = service.GetBackup(request, context)

self.assertEqual(medusa_pb2.StatusType.SUCCESS, get_backup_response.status)

self.assertEqual('backup1', get_backup_response.backup.backupName)
self.assertEqual(12345, get_backup_response.backup.startTime)
self.assertEqual(123456, get_backup_response.backup.finishTime)
self.assertEqual(2, get_backup_response.backup.totalNodes)
self.assertEqual(1, get_backup_response.backup.finishedNodes)
# the BackupNode records ought to be more populated than this, but we test that in ITs instead
self.assertEqual(
[medusa_pb2.BackupNode(host='node1'), medusa_pb2.BackupNode(host='node2')],
get_backup_response.backup.nodes
)
# this should also be IN_PROGRESS but because the ClusterBackup.finished is a mock
# we cannot correctly make it be 'None' when needed (some other things break)
self.assertEqual(medusa_pb2.StatusType.SUCCESS, get_backup_response.backup.status)
self.assertEqual('differential', get_backup_response.backup.backupType)

def test_get_backup_status_unknown_backup(self):
# start the Medusa service
Expand Down

0 comments on commit a5448fc

Please sign in to comment.