From ed57a3817ec6d6936152f0b786fd953dcbf93c2f Mon Sep 17 00:00:00 2001 From: John Baublitz Date: Tue, 30 Jul 2019 11:16:58 -0400 Subject: [PATCH] Changes to DBus and engine APIs for idempotence Make internal changes to the engine to better support idempotent operations using the DBus API so that the return value of DBus methods provides information on what was changed and omits resources that were not changed. Co-Authored-By: mulhern --- src/dbus_api/api.rs | 85 +++--- src/dbus_api/blockdev.rs | 27 +- src/dbus_api/filesystem.rs | 22 +- src/dbus_api/pool.rs | 166 ++++++++---- src/engine/engine.rs | 27 +- src/engine/mod.rs | 5 +- src/engine/sim_engine/engine.rs | 113 +++++--- src/engine/sim_engine/pool.rs | 245 +++++++++++------- .../strat_engine/backstore/backstore.rs | 16 +- src/engine/strat_engine/engine.rs | 60 +++-- src/engine/strat_engine/pool.rs | 75 +++--- src/engine/strat_engine/thinpool/thinpool.rs | 16 +- src/engine/types/actions.rs | 135 ++++++++++ src/engine/{types.rs => types/mod.rs} | 12 +- .../src/stratisd_client_dbus/_data.py | 16 +- .../tests/dbus/filesystem/test_properties.py | 4 +- .../tests/dbus/filesystem/test_rename.py | 9 +- .../tests/dbus/manager/test_create.py | 8 +- .../tests/dbus/manager/test_destroy.py | 10 +- .../tests/dbus/pool/test_add_cache_devs.py | 28 +- .../tests/dbus/pool/test_add_data_devs.py | 11 +- .../tests/dbus/pool/test_create_filesystem.py | 36 ++- .../tests/dbus/pool/test_create_snapshot.py | 15 +- .../dbus/pool/test_destroy_filesystem.py | 20 +- .../tests/dbus/pool/test_rename.py | 12 +- tests/client-dbus/tests/udev/test_udev.py | 6 +- 26 files changed, 785 insertions(+), 394 deletions(-) create mode 100644 src/engine/types/actions.rs rename src/engine/{types.rs => types/mod.rs} (95%) diff --git a/src/dbus_api/api.rs b/src/dbus_api/api.rs index ef4d762582..9064a6c4e1 100644 --- a/src/dbus_api/api.rs +++ b/src/dbus_api/api.rs @@ -26,7 +26,7 @@ use crate::{ engine_to_dbus_err_tuple, get_next_arg, msg_code_ok, msg_string_ok, tuple_to_option, }, }, - engine::{Engine, Pool, PoolUuid}, + engine::{CreateAction, DeleteAction, Engine, Pool, PoolUuid}, stratis::VERSION, }; @@ -47,28 +47,30 @@ fn create_pool(m: &MethodInfo, TData>) -> MethodResult { let return_message = message.method_return(); - let default_return: (dbus::Path, Vec) = (dbus::Path::default(), Vec::new()); + let default_return: (bool, (dbus::Path<'static>, Vec>)) = + (false, (dbus::Path::default(), Vec::new())); let msg = match result { - Ok(pool_uuid) => { - let (_, pool) = get_mut_pool!(engine; pool_uuid; default_return; return_message); - - let pool_object_path: dbus::Path = - create_dbus_pool(dbus_context, object_path.clone(), pool_uuid, pool); - - let bd_object_paths = pool - .blockdevs_mut() - .into_iter() - .map(|(uuid, bd)| { - create_dbus_blockdev(dbus_context, pool_object_path.clone(), uuid, bd) - }) - .collect::>(); - - return_message.append3( - (pool_object_path, bd_object_paths), - msg_code_ok(), - msg_string_ok(), - ) + Ok(pool_uuid_action) => { + let results = match pool_uuid_action { + CreateAction::Created(uuid) => { + let (_, pool) = get_mut_pool!(engine; uuid; default_return; return_message); + + let pool_object_path: dbus::Path = + create_dbus_pool(dbus_context, object_path.clone(), uuid, pool); + + let bd_paths = pool + .blockdevs_mut() + .into_iter() + .map(|(uuid, bd)| { + create_dbus_blockdev(dbus_context, pool_object_path.clone(), uuid, bd) + }) + .collect::>(); + (true, (pool_object_path, bd_paths)) + } + CreateAction::Identity => default_return, + }; + return_message.append3(results, msg_code_ok(), msg_string_ok()) } Err(x) => { let (rc, rs) = engine_to_dbus_err_tuple(&x); @@ -82,15 +84,20 @@ fn destroy_pool(m: &MethodInfo, TData>) -> MethodResult { let message: &Message = m.msg; let mut iter = message.iter_init(); - let object_path: dbus::Path<'static> = get_next_arg(&mut iter, 0)?; + let pool_path: dbus::Path<'static> = get_next_arg(&mut iter, 0)?; let dbus_context = m.tree.get_data(); - let default_return = false; + let default_return = (false, uuid_to_string!(PoolUuid::nil())); let return_message = message.method_return(); - let pool_uuid = match m.tree.get(&object_path) { - Some(pool_path) => get_data!(pool_path; default_return; return_message).uuid, + let pool_uuid = match m + .tree + .get(&pool_path) + .and_then(|op| op.get_data().as_ref()) + .map(|d| d.uuid) + { + Some(uuid) => uuid, None => { return Ok(vec![return_message.append3( default_return, @@ -101,12 +108,19 @@ fn destroy_pool(m: &MethodInfo, TData>) -> MethodResult { }; let msg = match dbus_context.engine.borrow_mut().destroy_pool(pool_uuid) { - Ok(action) => { + Ok(DeleteAction::Deleted(uuid)) => { dbus_context .actions .borrow_mut() - .push_remove(&object_path, m.tree); - return_message.append3(action, msg_code_ok(), msg_string_ok()) + .push_remove(&pool_path, m.tree); + return_message.append3( + (true, uuid_to_string!(uuid)), + msg_code_ok(), + msg_string_ok(), + ) + } + Ok(DeleteAction::Identity) => { + return_message.append3(default_return, msg_code_ok(), msg_string_ok()) } Err(err) => { let (rc, rs) = engine_to_dbus_err_tuple(&err); @@ -155,14 +169,25 @@ fn get_base_tree<'a>(dbus_context: DbusContext) -> (Tree, TData>, db .in_arg(("name", "s")) .in_arg(("redundancy", "(bq)")) .in_arg(("devices", "as")) - .out_arg(("result", "(oao)")) + // In order from left to right: + // b: true if a pool was created and object paths were returned + // o: Object path for Pool + // a(o): Array of object paths for block devices + // + // Rust representation: (bool, (dbus::Path, Vec)) + .out_arg(("result", "(b(oao))")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); let destroy_pool_method = f .method("DestroyPool", (), destroy_pool) .in_arg(("pool", "o")) - .out_arg(("action", "b")) + // In order from left to right: + // b: true if a valid UUID is returned - otherwise no action was performed + // s: String representation of pool UUID that was destroyed + // + // Rust representation: (bool, String) + .out_arg(("result", "(bs)")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); diff --git a/src/dbus_api/blockdev.rs b/src/dbus_api/blockdev.rs index 225fa807d2..8fb1abe157 100644 --- a/src/dbus_api/blockdev.rs +++ b/src/dbus_api/blockdev.rs @@ -22,7 +22,7 @@ use crate::{ msg_code_ok, msg_string_ok, }, }, - engine::{BlockDev, BlockDevTier, MaybeDbusPath}, + engine::{BlockDev, BlockDevTier, DevUuid, MaybeDbusPath, RenameAction}, }; pub fn create_dbus_blockdev<'a>( @@ -36,7 +36,11 @@ pub fn create_dbus_blockdev<'a>( let set_userid_method = f .method("SetUserInfo", (), set_user_info) .in_arg(("id", "s")) - .out_arg(("changed", "b")) + // b: false if no change to the user info + // s: UUID of the changed device + // + // Rust representation: (bool, String) + .out_arg(("changed", "(bs)")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); @@ -131,7 +135,7 @@ fn set_user_info(m: &MethodInfo, TData>) -> MethodResult { let dbus_context = m.tree.get_data(); let object_path = m.path.get_name(); let return_message = message.method_return(); - let default_return = false; + let default_return = (false, uuid_to_string!(DevUuid::nil())); let blockdev_path = m .tree @@ -148,7 +152,22 @@ fn set_user_info(m: &MethodInfo, TData>) -> MethodResult { let result = pool.set_blockdev_user_info(&pool_name, blockdev_data.uuid, new_id); let msg = match result { - Ok(id_changed) => return_message.append3(id_changed, msg_code_ok(), msg_string_ok()), + Ok(RenameAction::NoSource) => { + let error_message = format!( + "pool doesn't know about block device {}", + blockdev_data.uuid + ); + let (rc, rs) = (DbusErrorEnum::INTERNAL_ERROR as u16, error_message); + return_message.append3(default_return, rc, rs) + } + Ok(RenameAction::Renamed(uuid)) => return_message.append3( + (true, uuid_to_string!(uuid)), + msg_code_ok(), + msg_string_ok(), + ), + Ok(RenameAction::Identity) => { + return_message.append3(default_return, msg_code_ok(), msg_string_ok()) + } Err(err) => { let (rc, rs) = engine_to_dbus_err_tuple(&err); return_message.append3(default_return, rc, rs) diff --git a/src/dbus_api/filesystem.rs b/src/dbus_api/filesystem.rs index 8fee78bf43..8a79e4f55e 100644 --- a/src/dbus_api/filesystem.rs +++ b/src/dbus_api/filesystem.rs @@ -12,8 +12,6 @@ use dbus::{ Message, }; -use uuid::Uuid; - use crate::{ dbus_api::{ consts, @@ -23,13 +21,15 @@ use crate::{ msg_code_ok, msg_string_ok, }, }, - engine::{filesystem_mount_path, Filesystem, MaybeDbusPath, Name, RenameAction}, + engine::{ + filesystem_mount_path, Filesystem, FilesystemUuid, MaybeDbusPath, Name, RenameAction, + }, }; pub fn create_dbus_filesystem<'a>( dbus_context: &DbusContext, parent: dbus::Path<'static>, - uuid: Uuid, + uuid: FilesystemUuid, filesystem: &mut dyn Filesystem, ) -> dbus::Path<'a> { let f = Factory::new_fn(); @@ -37,7 +37,11 @@ pub fn create_dbus_filesystem<'a>( let rename_method = f .method("SetName", (), rename_filesystem) .in_arg(("name", "s")) - .out_arg(("action", "b")) + // b: true if UUID of changed resource has been returned + // s: UUID of changed resource + // + // Rust representation: (bool, String) + .out_arg(("result", "(bs)")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); @@ -108,7 +112,7 @@ fn rename_filesystem(m: &MethodInfo, TData>) -> MethodResult { let dbus_context = m.tree.get_data(); let object_path = m.path.get_name(); let return_message = message.method_return(); - let default_return = false; + let default_return = (false, uuid_to_string!(FilesystemUuid::nil())); let filesystem_path = m .tree @@ -134,7 +138,11 @@ fn rename_filesystem(m: &MethodInfo, TData>) -> MethodResult { Ok(RenameAction::Identity) => { return_message.append3(default_return, msg_code_ok(), msg_string_ok()) } - Ok(RenameAction::Renamed) => return_message.append3(true, msg_code_ok(), msg_string_ok()), + Ok(RenameAction::Renamed(uuid)) => return_message.append3( + (true, uuid_to_string!(uuid)), + msg_code_ok(), + msg_string_ok(), + ), Err(err) => { let (rc, rs) = engine_to_dbus_err_tuple(&err); return_message.append3(default_return, rc, rs) diff --git a/src/dbus_api/pool.rs b/src/dbus_api/pool.rs index f0e6da5a9f..d5b1c141d1 100644 --- a/src/dbus_api/pool.rs +++ b/src/dbus_api/pool.rs @@ -13,8 +13,6 @@ use dbus::{ Message, }; -use uuid::Uuid; - use devicemapper::Sectors; use crate::{ @@ -28,7 +26,10 @@ use crate::{ msg_string_ok, }, }, - engine::{BlockDevTier, MaybeDbusPath, Name, Pool, RenameAction}, + engine::{ + BlockDevTier, CreateAction, EngineAction, FilesystemUuid, MaybeDbusPath, Name, Pool, + PoolUuid, RenameAction, + }, }; fn create_filesystems(m: &MethodInfo, TData>) -> MethodResult { @@ -40,7 +41,7 @@ fn create_filesystems(m: &MethodInfo, TData>) -> MethodResult { let object_path = m.path.get_name(); let return_message = message.method_return(); - let default_return: Vec<(dbus::Path, &str)> = Vec::new(); + let default_return: (bool, Vec<(dbus::Path, &str)>) = (false, Vec::new()); if filesystems.count() > 1 { let error_message = "only 1 filesystem per request allowed"; @@ -65,15 +66,23 @@ fn create_filesystems(m: &MethodInfo, TData>) -> MethodResult { .collect::)>>(), ); - let msg = match result { - Ok(ref infos) => { - let return_value = infos + let infos = match result { + Ok(created_set) => created_set.changed(), + Err(err) => { + let (rc, rs) = engine_to_dbus_err_tuple(&err); + return Ok(vec![return_message.append3(default_return, rc, rs)]); + } + }; + + let return_value = match infos { + Some(ref newly_created_filesystems) => { + let v = newly_created_filesystems .iter() .map(|&(name, uuid)| { + // FIXME: To avoid this expect, modify create_filesystem + // so that it returns a mutable reference to the + // filesystem created. ( - // FIXME: To avoid this expect, modify create_filesystem - // so that it returns a mutable reference to the - // filesystem created. create_dbus_filesystem( dbus_context, object_path.clone(), @@ -86,15 +95,16 @@ fn create_filesystems(m: &MethodInfo, TData>) -> MethodResult { ) }) .collect::>(); - - return_message.append3(return_value, msg_code_ok(), msg_string_ok()) - } - Err(err) => { - let (rc, rs) = engine_to_dbus_err_tuple(&err); - return_message.append3(default_return, rc, rs) + (true, v) } + None => default_return, }; - Ok(vec![msg]) + + Ok(vec![return_message.append3( + return_value, + msg_code_ok(), + msg_string_ok(), + )]) } fn destroy_filesystems(m: &MethodInfo, TData>) -> MethodResult { @@ -106,7 +116,7 @@ fn destroy_filesystems(m: &MethodInfo, TData>) -> MethodResult { let dbus_context = m.tree.get_data(); let object_path = m.path.get_name(); let return_message = message.method_return(); - let default_return: Vec<&str> = Vec::new(); + let default_return: (bool, Vec) = (false, Vec::new()); let pool_path = m .tree @@ -117,29 +127,39 @@ fn destroy_filesystems(m: &MethodInfo, TData>) -> MethodResult { let mut engine = dbus_context.engine.borrow_mut(); let (pool_name, pool) = get_mut_pool!(engine; pool_uuid; default_return; return_message); - let mut filesystem_map: HashMap> = HashMap::new(); - for op in filesystems { - if let Some(filesystem_path) = m.tree.get(&op) { - let filesystem_uuid = get_data!(filesystem_path; default_return; return_message).uuid; - filesystem_map.insert(filesystem_uuid, op); - } - } + let filesystem_map: HashMap> = filesystems + .filter_map(|path| { + m.tree.get(&path).and_then(|op| { + op.get_data() + .as_ref() + .map(|d| (d.uuid, op.get_name().clone())) + }) + }) + .collect(); let result = pool.destroy_filesystems( &pool_name, - &filesystem_map.keys().cloned().collect::>(), + &filesystem_map.keys().cloned().collect::>(), ); let msg = match result { - Ok(ref uuids) => { - for uuid in uuids { - let op = filesystem_map - .get(uuid) - .expect("'uuids' is a subset of filesystem_map.keys()"); - dbus_context.actions.borrow_mut().push_remove(op, m.tree); - } - - let return_value: Vec = uuids.iter().map(|n| uuid_to_string!(n)).collect(); - return_message.append3(return_value, msg_code_ok(), msg_string_ok()) + Ok(uuids) => { + // Only get changed values here as non-existant filesystems will have been filtered out + // before calling destroy_filesystems + let uuid_vec: Vec = if let Some(ref changed_uuids) = uuids.changed() { + for uuid in changed_uuids { + let op = filesystem_map + .get(uuid) + .expect("'uuids' is a subset of filesystem_map.keys()"); + dbus_context.actions.borrow_mut().push_remove(op, m.tree); + } + changed_uuids + .iter() + .map(|uuid| uuid_to_string!(uuid)) + .collect() + } else { + Vec::new() + }; + return_message.append3((true, uuid_vec), msg_code_ok(), msg_string_ok()) } Err(err) => { let (rc, rs) = engine_to_dbus_err_tuple(&err); @@ -159,7 +179,7 @@ fn snapshot_filesystem(m: &MethodInfo, TData>) -> MethodResult { let dbus_context = m.tree.get_data(); let object_path = m.path.get_name(); let return_message = message.method_return(); - let default_return = dbus::Path::default(); + let default_return = (false, dbus::Path::default()); let pool_path = m .tree @@ -180,10 +200,13 @@ fn snapshot_filesystem(m: &MethodInfo, TData>) -> MethodResult { let (pool_name, pool) = get_mut_pool!(engine; pool_uuid; default_return; return_message); let msg = match pool.snapshot_filesystem(pool_uuid, &pool_name, fs_uuid, snapshot_name) { - Ok((uuid, fs)) => { + Ok(CreateAction::Created((uuid, fs))) => { let fs_object_path: dbus::Path = create_dbus_filesystem(dbus_context, object_path.clone(), uuid, fs); - return_message.append3(fs_object_path, msg_code_ok(), msg_string_ok()) + return_message.append3((true, fs_object_path), msg_code_ok(), msg_string_ok()) + } + Ok(CreateAction::Identity) => { + return_message.append3(default_return, msg_code_ok(), msg_string_ok()) } Err(err) => { let (rc, rs) = engine_to_dbus_err_tuple(&err); @@ -203,7 +226,7 @@ fn add_blockdevs(m: &MethodInfo, TData>, tier: BlockDevTier) -> Meth let dbus_context = m.tree.get_data(); let object_path = m.path.get_name(); let return_message = message.method_return(); - let default_return: Vec = Vec::new(); + let default_return: (bool, Vec) = (false, Vec::new()); let pool_path = m .tree @@ -217,8 +240,8 @@ fn add_blockdevs(m: &MethodInfo, TData>, tier: BlockDevTier) -> Meth let blockdevs = devs.map(|x| Path::new(x)).collect::>(); let result = pool.add_blockdevs(pool_uuid, &*pool_name, &blockdevs, tier); - let msg = match result { - Ok(uuids) => { + let msg = match result.map(|bds| bds.changed()) { + Ok(Some(uuids)) => { let return_value = uuids .iter() .map(|uuid| { @@ -236,8 +259,9 @@ fn add_blockdevs(m: &MethodInfo, TData>, tier: BlockDevTier) -> Meth }) .collect::>(); - return_message.append3(return_value, msg_code_ok(), msg_string_ok()) + return_message.append3((true, return_value), msg_code_ok(), msg_string_ok()) } + Ok(None) => return_message.append3(default_return, msg_code_ok(), msg_string_ok()), Err(err) => { let (rc, rs) = engine_to_dbus_err_tuple(&err); return_message.append3(default_return, rc, rs) @@ -264,7 +288,7 @@ fn rename_pool(m: &MethodInfo, TData>) -> MethodResult { let dbus_context = m.tree.get_data(); let object_path = m.path.get_name(); let return_message = message.method_return(); - let default_return = false; + let default_return = (false, uuid_to_string!(PoolUuid::nil())); let pool_path = m .tree @@ -278,12 +302,18 @@ fn rename_pool(m: &MethodInfo, TData>) -> MethodResult { .rename_pool(pool_uuid, new_name) { Ok(RenameAction::NoSource) => { - let error_message = format!("engine doesn't know about pool {}", &pool_uuid); + let error_message = format!("engine doesn't know about pool {}", pool_uuid); let (rc, rs) = (DbusErrorEnum::INTERNAL_ERROR as u16, error_message); return_message.append3(default_return, rc, rs) } - Ok(RenameAction::Identity) => return_message.append3(false, msg_code_ok(), msg_string_ok()), - Ok(RenameAction::Renamed) => return_message.append3(true, msg_code_ok(), msg_string_ok()), + Ok(RenameAction::Identity) => { + return_message.append3(default_return, msg_code_ok(), msg_string_ok()) + } + Ok(RenameAction::Renamed(uuid)) => return_message.append3( + (true, uuid_to_string!(uuid)), + msg_code_ok(), + msg_string_ok(), + ), Err(err) => { let (rc, rs) = engine_to_dbus_err_tuple(&err); return_message.append3(default_return, rc, rs) @@ -301,7 +331,7 @@ fn get_pool_property( getter: F, ) -> Result<(), MethodErr> where - F: Fn((Name, Uuid, &dyn Pool)) -> Result, + F: Fn((Name, PoolUuid, &dyn Pool)) -> Result, R: dbus::arg::Append, { let dbus_context = p.tree.get_data(); @@ -334,7 +364,7 @@ fn get_pool_total_physical_used( i: &mut IterAppend, p: &PropInfo, TData>, ) -> Result<(), MethodErr> { - fn get_used((_, uuid, pool): (Name, Uuid, &dyn Pool)) -> Result { + fn get_used((_, uuid, pool): (Name, PoolUuid, &dyn Pool)) -> Result { let err_func = |_| { MethodErr::failed(&format!( "no total physical size computed for pool with uuid {}", @@ -377,7 +407,7 @@ fn get_space_state(i: &mut IterAppend, p: &PropInfo, TData>) -> Resu pub fn create_dbus_pool<'a>( dbus_context: &DbusContext, parent: dbus::Path<'static>, - uuid: Uuid, + uuid: PoolUuid, pool: &mut dyn Pool, ) -> dbus::Path<'a> { let f = Factory::new_fn(); @@ -385,35 +415,55 @@ pub fn create_dbus_pool<'a>( let create_filesystems_method = f .method("CreateFilesystems", (), create_filesystems) .in_arg(("specs", "as")) - .out_arg(("filesystems", "a(os)")) + // b: true if filesystems were created + // a(os): Array of tuples with object paths and names + // + // Rust representation: (bool, Vec<(dbus::Path, String)>) + .out_arg(("results", "(ba(os))")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); let destroy_filesystems_method = f .method("DestroyFilesystems", (), destroy_filesystems) .in_arg(("filesystems", "ao")) - .out_arg(("results", "as")) + // b: true if filesystems were destroyed + // as: Array of UUIDs of destroyed filesystems + // + // Rust representation: (bool, Vec) + .out_arg(("results", "(bas)")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); let add_blockdevs_method = f .method("AddDataDevs", (), add_datadevs) .in_arg(("devices", "as")) - .out_arg(("results", "ao")) + // b: Indicates if any data devices were added + // ao: Array of object paths of created data devices + // + // Rust representation: (bool, Vec) + .out_arg(("results", "(bao)")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); let add_cachedevs_method = f .method("AddCacheDevs", (), add_cachedevs) .in_arg(("devices", "as")) - .out_arg(("results", "ao")) + // b: Indicates if any cache devices were added + // ao: Array of object paths of created cache devices + // + // Rust representation: (bool, Vec) + .out_arg(("results", "(bao)")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); let rename_method = f .method("SetName", (), rename_pool) .in_arg(("name", "s")) - .out_arg(("action", "b")) + // b: false if no pool was renamed + // s: UUID of renamed pool + // + // Rust representation: (bool, String) + .out_arg(("result", "(bs)")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); @@ -421,7 +471,11 @@ pub fn create_dbus_pool<'a>( .method("SnapshotFilesystem", (), snapshot_filesystem) .in_arg(("origin", "o")) .in_arg(("snapshot_name", "s")) - .out_arg(("result", "o")) + // b: false if no new snapshot was created + // s: Object path of new snapshot + // + // Rust representation: (bool, String) + .out_arg(("result", "(bo)")) .out_arg(("return_code", "q")) .out_arg(("return_string", "s")); diff --git a/src/engine/engine.rs b/src/engine/engine.rs index 8ef0b7deaa..aa14c4e55e 100644 --- a/src/engine/engine.rs +++ b/src/engine/engine.rs @@ -15,8 +15,9 @@ use devicemapper::{Bytes, Device, Sectors}; use crate::{ engine::types::{ - BlockDevState, BlockDevTier, DevUuid, FilesystemUuid, FreeSpaceState, MaybeDbusPath, Name, - PoolExtendState, PoolState, PoolUuid, RenameAction, + BlockDevState, BlockDevTier, CreateAction, DeleteAction, DevUuid, FilesystemUuid, + FreeSpaceState, MaybeDbusPath, Name, PoolExtendState, PoolState, PoolUuid, RenameAction, + SetCreateAction, SetDeleteAction, }, stratis::StratisResult, }; @@ -78,7 +79,7 @@ pub trait Pool: Debug { pool_uuid: PoolUuid, pool_name: &str, specs: &[(&'b str, Option)], - ) -> StratisResult>; + ) -> StratisResult>; /// Adds blockdevs specified by paths to pool. /// Returns a list of uuids corresponding to devices actually added. @@ -90,7 +91,7 @@ pub trait Pool: Debug { pool_name: &str, paths: &[&Path], tier: BlockDevTier, - ) -> StratisResult>; + ) -> StratisResult>; /// Destroy the pool. /// Precondition: All filesystems belonging to this pool must be @@ -105,7 +106,7 @@ pub trait Pool: Debug { &'a mut self, pool_name: &str, fs_uuids: &[FilesystemUuid], - ) -> StratisResult>; + ) -> StratisResult>; /// Rename filesystem /// Rename pool with uuid to new_name. @@ -117,7 +118,7 @@ pub trait Pool: Debug { pool_name: &str, uuid: FilesystemUuid, new_name: &str, - ) -> StratisResult; + ) -> StratisResult>; /// Snapshot filesystem /// Create a CoW snapshot of the origin @@ -127,7 +128,7 @@ pub trait Pool: Debug { pool_name: &str, origin_uuid: FilesystemUuid, snapshot_name: &str, - ) -> StratisResult<(FilesystemUuid, &mut dyn Filesystem)>; + ) -> StratisResult>; /// The total number of Sectors belonging to this pool. /// There are no exclusions, so this number includes overhead sectors @@ -174,7 +175,7 @@ pub trait Pool: Debug { pool_name: &str, uuid: DevUuid, user_info: Option<&str>, - ) -> StratisResult; + ) -> StratisResult>; /// The current state of the Pool. fn state(&self) -> PoolState; @@ -202,7 +203,7 @@ pub trait Engine: Debug { name: &str, blockdev_paths: &[&Path], redundancy: Option, - ) -> StratisResult; + ) -> StratisResult>; /// Evaluate a device node & devicemapper::Device to see if it's a valid /// stratis device. If all the devices are present in the pool and the pool isn't already @@ -216,13 +217,17 @@ pub trait Engine: Debug { /// Destroy a pool. /// Ensures that the pool of the given UUID is absent on completion. /// Returns true if some action was necessary, otherwise false. - fn destroy_pool(&mut self, uuid: PoolUuid) -> StratisResult; + fn destroy_pool(&mut self, uuid: PoolUuid) -> StratisResult>; /// Rename pool with uuid to new_name. /// Raises an error if the mapping can't be applied because /// new_name is already in use. /// Returns true if it was necessary to perform an action, false if not. - fn rename_pool(&mut self, uuid: PoolUuid, new_name: &str) -> StratisResult; + fn rename_pool( + &mut self, + uuid: PoolUuid, + new_name: &str, + ) -> StratisResult>; /// Find the pool designated by uuid. fn get_pool(&self, uuid: PoolUuid) -> Option<(Name, &dyn Pool)>; diff --git a/src/engine/mod.rs b/src/engine/mod.rs index ce48fbf450..72ad764604 100644 --- a/src/engine/mod.rs +++ b/src/engine/mod.rs @@ -9,8 +9,9 @@ pub use self::{ sim_engine::SimEngine, strat_engine::StratEngine, types::{ - BlockDevState, BlockDevTier, DevUuid, FilesystemUuid, MaybeDbusPath, Name, PoolUuid, - Redundancy, RenameAction, + BlockDevState, BlockDevTier, CreateAction, DeleteAction, DevUuid, EngineAction, + FilesystemUuid, MaybeDbusPath, Name, PoolUuid, Redundancy, RenameAction, SetCreateAction, + SetDeleteAction, }, }; diff --git a/src/engine/sim_engine/engine.rs b/src/engine/sim_engine/engine.rs index 717501986e..994dcf39f9 100644 --- a/src/engine/sim_engine/engine.rs +++ b/src/engine/sim_engine/engine.rs @@ -17,7 +17,7 @@ use crate::{ engine::{Engine, Eventable, Pool}, sim_engine::{pool::SimPool, randomization::Randomizer}, structures::Table, - types::{Name, PoolUuid, Redundancy, RenameAction}, + types::{CreateAction, DeleteAction, Name, PoolUuid, Redundancy, RenameAction}, }, stratis::{ErrorEnum, StratisError, StratisResult}, }; @@ -36,26 +36,27 @@ impl Engine for SimEngine { name: &str, blockdev_paths: &[&Path], redundancy: Option, - ) -> StratisResult { + ) -> StratisResult> { let redundancy = calculate_redundancy!(redundancy); - if self.pools.contains_name(name) { - return Err(StratisError::Engine(ErrorEnum::AlreadyExists, name.into())); - } + match self.pools.get_by_name(name) { + Some(_) => Ok(CreateAction::Identity), + None => { + let device_set: HashSet<_, RandomState> = HashSet::from_iter(blockdev_paths); + let devices = device_set.into_iter().cloned().collect::>(); - let device_set: HashSet<_, RandomState> = HashSet::from_iter(blockdev_paths); - let devices = device_set.into_iter().cloned().collect::>(); + let (pool_uuid, pool) = SimPool::new(&Rc::clone(&self.rdm), &devices, redundancy); - let (pool_uuid, pool) = SimPool::new(&Rc::clone(&self.rdm), &devices, redundancy); + if self.rdm.borrow_mut().throw_die() { + return Err(StratisError::Engine(ErrorEnum::Error, "X".into())); + } - if self.rdm.borrow_mut().throw_die() { - return Err(StratisError::Engine(ErrorEnum::Error, "X".into())); - } + self.pools + .insert(Name::new(name.to_owned()), pool_uuid, pool); - self.pools - .insert(Name::new(name.to_owned()), pool_uuid, pool); - - Ok(pool_uuid) + Ok(CreateAction::Created(pool_uuid)) + } + } } fn block_evaluate( @@ -68,7 +69,7 @@ impl Engine for SimEngine { Ok(None) } - fn destroy_pool(&mut self, uuid: PoolUuid) -> StratisResult { + fn destroy_pool(&mut self, uuid: PoolUuid) -> StratisResult> { if let Some((_, pool)) = self.pools.get_by_uuid(uuid) { if pool.has_filesystems() { return Err(StratisError::Engine( @@ -77,17 +78,21 @@ impl Engine for SimEngine { )); }; } else { - return Ok(false); + return Ok(DeleteAction::Identity); } self.pools .remove_by_uuid(uuid) .expect("Must succeed since self.pool.get_by_uuid() returned a value") .1 .destroy()?; - Ok(true) + Ok(DeleteAction::Deleted(uuid)) } - fn rename_pool(&mut self, uuid: PoolUuid, new_name: &str) -> StratisResult { + fn rename_pool( + &mut self, + uuid: PoolUuid, + new_name: &str, + ) -> StratisResult> { rename_pool_pre!(self; uuid; new_name); let (_, pool) = self @@ -97,7 +102,7 @@ impl Engine for SimEngine { self.pools .insert(Name::new(new_name.to_owned()), uuid, pool); - Ok(RenameAction::Renamed) + Ok(RenameAction::Renamed(uuid)) } fn get_pool(&self, uuid: PoolUuid) -> Option<(Name, &dyn Pool)> { @@ -146,7 +151,10 @@ mod tests { use uuid::Uuid; use crate::{ - engine::{Engine, RenameAction}, + engine::{ + types::{EngineAction, RenameAction}, + Engine, + }, stratis::{ErrorEnum, StratisError}, }; @@ -179,7 +187,11 @@ mod tests { /// Destroying an empty pool should succeed. fn destroy_empty_pool() { let mut engine = SimEngine::default(); - let uuid = engine.create_pool("name", &[], None).unwrap(); + let uuid = engine + .create_pool("name", &[], None) + .unwrap() + .changed() + .unwrap(); assert_matches!(engine.destroy_pool(uuid), Ok(_)); } @@ -189,6 +201,8 @@ mod tests { let mut engine = SimEngine::default(); let uuid = engine .create_pool("name", &[Path::new("/s/d")], None) + .unwrap() + .changed() .unwrap(); assert_matches!(engine.destroy_pool(uuid), Ok(_)); } @@ -200,6 +214,8 @@ mod tests { let pool_name = "pool_name"; let uuid = engine .create_pool(pool_name, &[Path::new("/s/d")], None) + .unwrap() + .changed() .unwrap(); { let pool = engine.get_mut_pool(uuid).unwrap().1; @@ -216,9 +232,13 @@ mod tests { let name = "name"; let mut engine = SimEngine::default(); engine.create_pool(name, &[], None).unwrap(); - assert!(match engine.create_pool(name, &[], None) { - Ok(uuid) => engine.get_pool(uuid).unwrap().1.blockdevs().is_empty(), - Err(_) => false, + assert!(match engine + .create_pool(name, &[], None) + .ok() + .and_then(|uuid| uuid.changed()) + { + Some(uuid) => engine.get_pool(uuid).unwrap().1.blockdevs().is_empty(), + _ => false, }); } @@ -232,7 +252,7 @@ mod tests { .unwrap(); assert_matches!( engine.create_pool(name, &[], None), - Err(StratisError::Engine(ErrorEnum::AlreadyExists, _)) + Ok(CreateAction::Identity) ); } @@ -243,13 +263,12 @@ mod tests { let mut engine = SimEngine::default(); let devices = vec![Path::new(path), Path::new(path)]; assert_matches!( - engine.create_pool("name", &devices, None).map(|uuid| engine - .get_pool(uuid) + engine + .create_pool("name", &devices, None) .unwrap() - .1 - .blockdevs() - .len()), - Ok(1) + .changed() + .map(|uuid| engine.get_pool(uuid).unwrap().1.blockdevs().len()), + Some(1) ); } @@ -275,18 +294,29 @@ mod tests { fn rename_identity() { let name = "name"; let mut engine = SimEngine::default(); - let uuid = engine.create_pool(name, &[], None).unwrap(); - assert_matches!(engine.rename_pool(uuid, name), Ok(RenameAction::Identity)); + let uuid = engine + .create_pool(name, &[], None) + .unwrap() + .changed() + .unwrap(); + assert_eq!( + engine.rename_pool(uuid, name).unwrap(), + RenameAction::Identity + ); } #[test] /// Renaming a pool to another pool should work if new name not taken fn rename_happens() { let mut engine = SimEngine::default(); - let uuid = engine.create_pool("old_name", &[], None).unwrap(); - assert_matches!( - engine.rename_pool(uuid, "new_name"), - Ok(RenameAction::Renamed) + let uuid = engine + .create_pool("old_name", &[], None) + .unwrap() + .changed() + .unwrap(); + assert_eq!( + engine.rename_pool(uuid, "new_name").unwrap(), + RenameAction::Renamed(uuid) ); } @@ -295,7 +325,11 @@ mod tests { fn rename_fails() { let new_name = "new_name"; let mut engine = SimEngine::default(); - let uuid = engine.create_pool("old_name", &[], None).unwrap(); + let uuid = engine + .create_pool("old_name", &[], None) + .unwrap() + .changed() + .unwrap(); engine.create_pool(new_name, &[], None).unwrap(); assert_matches!( engine.rename_pool(uuid, new_name), @@ -314,5 +348,4 @@ mod tests { Ok(RenameAction::NoSource) ); } - } diff --git a/src/engine/sim_engine/pool.rs b/src/engine/sim_engine/pool.rs index f08fe516b1..0589a29cd3 100644 --- a/src/engine/sim_engine/pool.rs +++ b/src/engine/sim_engine/pool.rs @@ -21,8 +21,9 @@ use crate::{ sim_engine::{blockdev::SimDev, filesystem::SimFilesystem, randomization::Randomizer}, structures::Table, types::{ - BlockDevTier, DevUuid, FilesystemUuid, FreeSpaceState, MaybeDbusPath, Name, - PoolExtendState, PoolState, PoolUuid, Redundancy, RenameAction, + BlockDevTier, CreateAction, DevUuid, FilesystemUuid, FreeSpaceState, MaybeDbusPath, + Name, PoolExtendState, PoolState, PoolUuid, Redundancy, RenameAction, SetCreateAction, + SetDeleteAction, }, }, stratis::{ErrorEnum, StratisError, StratisResult}, @@ -89,27 +90,20 @@ impl Pool for SimPool { _pool_uuid: PoolUuid, _pool_name: &str, specs: &[(&'b str, Option)], - ) -> StratisResult> { + ) -> StratisResult> { let names: HashMap<_, _> = HashMap::from_iter(specs.iter().map(|&tup| (tup.0, tup.1))); - for name in names.keys() { - if self.filesystems.contains_name(name) { - return Err(StratisError::Engine( - ErrorEnum::AlreadyExists, - name.to_string(), - )); - } - } - let mut result = Vec::new(); for name in names.keys() { - let uuid = Uuid::new_v4(); - let new_filesystem = SimFilesystem::new(); - self.filesystems - .insert(Name::new((&**name).to_owned()), uuid, new_filesystem); - result.push((*name, uuid)); + if !self.filesystems.contains_name(name) { + let uuid = Uuid::new_v4(); + let new_filesystem = SimFilesystem::new(); + self.filesystems + .insert(Name::new((&**name).to_owned()), uuid, new_filesystem); + result.push((*name, uuid)); + } } - Ok(result) + Ok(SetCreateAction::new(result)) } fn add_blockdevs( @@ -118,21 +112,31 @@ impl Pool for SimPool { _pool_name: &str, paths: &[&Path], tier: BlockDevTier, - ) -> StratisResult> { + ) -> StratisResult> { let devices: HashSet<_, RandomState> = HashSet::from_iter(paths); + let device_pairs: Vec<_> = devices .iter() .map(|p| SimDev::new(Rc::clone(&self.rdm), p)) .collect(); - let ret_uuids = device_pairs.iter().map(|&(uuid, _)| uuid).collect(); let the_vec = match tier { BlockDevTier::Cache => &mut self.cache_devs, BlockDevTier::Data => &mut self.block_devs, }; - the_vec.extend(device_pairs); - Ok(ret_uuids) + let filter: Vec<_> = the_vec.values().map(|d| d.devnode()).collect(); + let filtered_device_pairs: Vec<_> = device_pairs + .into_iter() + .filter(|(_, sd)| !filter.contains(&sd.devnode())) + .collect(); + + let ret_uuids = filtered_device_pairs + .iter() + .map(|&(uuid, _)| uuid) + .collect(); + the_vec.extend(filtered_device_pairs); + Ok(SetCreateAction::new(ret_uuids)) } fn destroy(&mut self) -> StratisResult<()> { @@ -144,14 +148,14 @@ impl Pool for SimPool { &'a mut self, _pool_name: &str, fs_uuids: &[FilesystemUuid], - ) -> StratisResult> { + ) -> StratisResult> { let mut removed = Vec::new(); for &uuid in fs_uuids { if self.filesystems.remove_by_uuid(uuid).is_some() { removed.push(uuid); } } - Ok(removed) + Ok(SetDeleteAction::new(removed)) } fn rename_filesystem( @@ -159,7 +163,7 @@ impl Pool for SimPool { _pool_name: &str, uuid: FilesystemUuid, new_name: &str, - ) -> StratisResult { + ) -> StratisResult> { rename_filesystem_pre!(self; uuid; new_name); let (_, filesystem) = self @@ -170,7 +174,7 @@ impl Pool for SimPool { self.filesystems .insert(Name::new(new_name.to_owned()), uuid, filesystem); - Ok(RenameAction::Renamed) + Ok(RenameAction::Renamed(uuid)) } fn snapshot_filesystem( @@ -179,12 +183,9 @@ impl Pool for SimPool { _pool_name: &str, origin_uuid: FilesystemUuid, snapshot_name: &str, - ) -> StratisResult<(FilesystemUuid, &mut dyn Filesystem)> { + ) -> StratisResult> { if self.filesystems.contains_name(snapshot_name) { - return Err(StratisError::Engine( - ErrorEnum::AlreadyExists, - snapshot_name.to_string(), - )); + return Ok(CreateAction::Identity); } let uuid = Uuid::new_v4(); @@ -199,13 +200,13 @@ impl Pool for SimPool { }; self.filesystems .insert(Name::new(snapshot_name.to_owned()), uuid, snapshot); - Ok(( + Ok(CreateAction::Created(( uuid, self.filesystems .get_mut_by_uuid(uuid) .expect("just inserted") .1, - )) + ))) } fn total_physical_size(&self) -> Sectors { @@ -281,16 +282,17 @@ impl Pool for SimPool { _pool_name: &str, uuid: DevUuid, user_info: Option<&str>, - ) -> StratisResult { - self.get_mut_blockdev_internal(uuid).map_or_else( - || { - Err(StratisError::Engine( - ErrorEnum::NotFound, - format!("No blockdev for uuid {} found", uuid), - )) + ) -> StratisResult> { + Ok(self.get_mut_blockdev_internal(uuid).map_or_else( + || RenameAction::NoSource, + |(_, b)| { + if b.set_user_info(user_info) { + RenameAction::Renamed(uuid) + } else { + RenameAction::Identity + } }, - |(_, b)| Ok(b.set_user_info(user_info)), - ) + )) } fn state(&self) -> PoolState { @@ -325,6 +327,8 @@ mod tests { use crate::engine::sim_engine::SimEngine; + use crate::engine::types::EngineAction; + use super::*; #[test] @@ -332,7 +336,11 @@ mod tests { fn rename_empty() { let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; assert!( match pool.rename_filesystem(pool_name, Uuid::new_v4(), "new_name") { @@ -347,16 +355,21 @@ mod tests { fn rename_happens() { let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; let infos = pool .create_filesystems(uuid, pool_name, &[("old_name", None)]) + .unwrap() + .changed() .unwrap(); - assert!( - match pool.rename_filesystem(pool_name, infos[0].1, "new_name") { - Ok(RenameAction::Renamed) => true, - _ => false, - } + assert_matches!( + pool.rename_filesystem(pool_name, infos[0].1, "new_name") + .unwrap(), + RenameAction::Renamed(_) ); } @@ -367,10 +380,16 @@ mod tests { let new_name = "new_name"; let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; let results = pool .create_filesystems(uuid, pool_name, &[(old_name, None), (new_name, None)]) + .unwrap() + .changed() .unwrap(); let old_uuid = results.iter().find(|x| x.0 == old_name).unwrap().1; assert!( @@ -387,7 +406,11 @@ mod tests { let new_name = "new_name"; let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; assert!( match pool.rename_filesystem(pool_name, Uuid::new_v4(), new_name) { @@ -402,10 +425,14 @@ mod tests { fn destroy_fs_empty() { let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; assert!(match pool.destroy_filesystems(pool_name, &[]) { - Ok(names) => names.is_empty(), + Ok(uuids) => !uuids.is_changed(), _ => false, }); } @@ -415,7 +442,11 @@ mod tests { fn destroy_fs_some() { let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; assert_matches!( pool.destroy_filesystems(pool_name, &[Uuid::new_v4()]), @@ -428,18 +459,22 @@ mod tests { fn destroy_fs_any() { let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; let fs_results = pool .create_filesystems(uuid, pool_name, &[("fs_name", None)]) + .unwrap() + .changed() .unwrap(); let fs_uuid = fs_results[0].1; - assert!( - match pool.destroy_filesystems(pool_name, &[fs_uuid, Uuid::new_v4()]) { - Ok(filesystems) => filesystems == vec![fs_uuid], - _ => false, - } - ); + assert!(match pool.destroy_filesystems(pool_name, &[fs_uuid]) { + Ok(filesystems) => filesystems == SetDeleteAction::new(vec![fs_uuid]), + _ => false, + }); } #[test] @@ -447,12 +482,14 @@ mod tests { fn create_fs_none() { let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; - assert!(match pool.create_filesystems(uuid, pool_name, &[]) { - Ok(names) => names.is_empty(), - _ => false, - }); + let fs = pool.create_filesystems(uuid, pool_name, &[]).unwrap(); + assert!(!fs.is_changed()) } #[test] @@ -460,14 +497,20 @@ mod tests { fn create_fs_some() { let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; - assert!( - match pool.create_filesystems(uuid, pool_name, &[("name", None)]) { - Ok(names) => (names.len() == 1) & (names[0].0 == "name"), - _ => false, - } - ); + assert!(match pool + .create_filesystems(uuid, pool_name, &[("name", None)]) + .ok() + .and_then(|fs| fs.changed()) + { + Some(names) => (names.len() == 1) & (names[0].0 == "name"), + _ => false, + }); } #[test] @@ -476,16 +519,18 @@ mod tests { let fs_name = "fs_name"; let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; pool.create_filesystems(uuid, pool_name, &[(fs_name, None)]) .unwrap(); - assert!( - match pool.create_filesystems(uuid, pool_name, &[(fs_name, None)]) { - Err(StratisError::Engine(ErrorEnum::AlreadyExists, _)) => true, - _ => false, - } - ); + let set_create_action = pool + .create_filesystems(uuid, pool_name, &[(fs_name, None)]) + .unwrap(); + assert!(!set_create_action.is_changed()); } #[test] @@ -494,28 +539,40 @@ mod tests { let fs_name = "fs_name"; let mut engine = SimEngine::default(); let pool_name = "pool_name"; - let uuid = engine.create_pool(pool_name, &[], None).unwrap(); + let uuid = engine + .create_pool(pool_name, &[], None) + .unwrap() + .changed() + .unwrap(); let pool = engine.get_mut_pool(uuid).unwrap().1; - assert!( - match pool.create_filesystems(uuid, pool_name, &[(fs_name, None), (fs_name, None)]) { - Ok(names) => (names.len() == 1) & (names[0].0 == fs_name), - _ => false, - } - ); + assert!(match pool + .create_filesystems(uuid, pool_name, &[(fs_name, None), (fs_name, None)]) + .ok() + .and_then(|fs| fs.changed()) + { + Some(names) => (names.len() == 1) & (names[0].0 == fs_name), + _ => false, + }); } #[test] /// Adding a list of devices to an empty pool should yield list. fn add_device_empty() { let mut engine = SimEngine::default(); - let uuid = engine.create_pool("pool_name", &[], None).unwrap(); + let uuid = engine + .create_pool("pool_name", &[], None) + .unwrap() + .changed() + .unwrap(); let (pool_name, pool) = engine.get_mut_pool(uuid).unwrap(); let devices = [Path::new("/s/a"), Path::new("/s/b")]; - assert!( - match pool.add_blockdevs(uuid, &*pool_name, &devices, BlockDevTier::Data) { - Ok(devs) => devs.len() == devices.len(), - _ => false, - } - ); + assert!(match pool + .add_blockdevs(uuid, &*pool_name, &devices, BlockDevTier::Data) + .ok() + .and_then(|c| c.changed()) + { + Some(devs) => devs.len() == devices.len(), + _ => false, + }); } } diff --git a/src/engine/strat_engine/backstore/backstore.rs b/src/engine/strat_engine/backstore/backstore.rs index 2a55ba1fdb..37404c4e09 100644 --- a/src/engine/strat_engine/backstore/backstore.rs +++ b/src/engine/strat_engine/backstore/backstore.rs @@ -30,6 +30,7 @@ use crate::{ names::{format_backstore_ids, CacheRole}, serde_structs::{BackstoreSave, CapSave, Recordable}, }, + types::RenameAction, BlockDevTier, DevUuid, PoolUuid, }, stratis::{ErrorEnum, StratisError, StratisResult}, @@ -556,15 +557,16 @@ impl Backstore { &mut self, uuid: DevUuid, user_info: Option<&str>, - ) -> StratisResult { + ) -> RenameAction { self.get_mut_blockdev_by_uuid(uuid).map_or_else( - || { - Err(StratisError::Engine( - ErrorEnum::NotFound, - format!("No blockdev for uuid {} found", uuid), - )) + || RenameAction::NoSource, + |(_, b)| { + if b.set_user_info(user_info) { + RenameAction::Renamed(uuid) + } else { + RenameAction::Identity + } }, - |(_, b)| Ok(b.set_user_info(user_info)), ) } } diff --git a/src/engine/strat_engine/engine.rs b/src/engine/strat_engine/engine.rs index cf0f32d5b0..aebd962a5f 100644 --- a/src/engine/strat_engine/engine.rs +++ b/src/engine/strat_engine/engine.rs @@ -26,7 +26,8 @@ use crate::{ pool::{check_metadata, StratPool}, }, structures::Table, - Engine, EngineEvent, Name, Pool, PoolUuid, Redundancy, RenameAction, + types::{CreateAction, DeleteAction, RenameAction}, + Engine, EngineEvent, Name, Pool, PoolUuid, Redundancy, }, stratis::{ErrorEnum, StratisError, StratisResult}, }; @@ -174,21 +175,22 @@ impl Engine for StratEngine { name: &str, blockdev_paths: &[&Path], redundancy: Option, - ) -> StratisResult { + ) -> StratisResult> { let redundancy = calculate_redundancy!(redundancy); validate_name(name)?; - if self.pools.contains_name(name) { - return Err(StratisError::Engine(ErrorEnum::AlreadyExists, name.into())); - } - - let (uuid, pool) = StratPool::initialize(name, blockdev_paths, redundancy)?; + match self.pools.get_by_name(name) { + None => { + let (uuid, pool) = StratPool::initialize(name, blockdev_paths, redundancy)?; - let name = Name::new(name.to_owned()); - devlinks::pool_added(&name); - self.pools.insert(name, uuid, pool); - Ok(uuid) + let name = Name::new(name.to_owned()); + devlinks::pool_added(&name); + self.pools.insert(name, uuid, pool); + Ok(CreateAction::Created(uuid)) + } + Some(_) => Ok(CreateAction::Identity), + } } /// Evaluate a device node & devicemapper::Device to see if it's a valid @@ -266,7 +268,7 @@ impl Engine for StratEngine { Ok(pool_uuid) } - fn destroy_pool(&mut self, uuid: PoolUuid) -> StratisResult { + fn destroy_pool(&mut self, uuid: PoolUuid) -> StratisResult> { if let Some((_, pool)) = self.pools.get_by_uuid(uuid) { if pool.has_filesystems() { return Err(StratisError::Engine( @@ -275,7 +277,7 @@ impl Engine for StratEngine { )); }; } else { - return Ok(false); + return Ok(DeleteAction::Identity); } let (pool_name, mut pool) = self @@ -288,11 +290,15 @@ impl Engine for StratEngine { Err(err) } else { devlinks::pool_removed(&pool_name); - Ok(true) + Ok(DeleteAction::Deleted(uuid)) } } - fn rename_pool(&mut self, uuid: PoolUuid, new_name: &str) -> StratisResult { + fn rename_pool( + &mut self, + uuid: PoolUuid, + new_name: &str, + ) -> StratisResult> { validate_name(new_name)?; let old_name = rename_pool_pre!(self; uuid; new_name); @@ -314,7 +320,7 @@ impl Engine for StratEngine { self.pools.insert(new_name.clone(), uuid, pool); devlinks::pool_renamed(&old_name, &new_name); - Ok(RenameAction::Renamed) + Ok(RenameAction::Renamed(uuid)) } } @@ -382,6 +388,8 @@ mod test { use crate::engine::strat_engine::tests::{loopbacked, real}; + use crate::engine::types::EngineAction; + use super::*; /// Verify that a pool rename causes the pool metadata to get the new name. @@ -389,12 +397,16 @@ mod test { let mut engine = StratEngine::initialize().unwrap(); let name1 = "name1"; - let uuid1 = engine.create_pool(&name1, paths, None).unwrap(); + let uuid1 = engine + .create_pool(&name1, paths, None) + .unwrap() + .changed() + .unwrap(); let name2 = "name2"; let action = engine.rename_pool(uuid1, name2).unwrap(); - assert_eq!(action, RenameAction::Renamed); + assert_eq!(action, RenameAction::Renamed(uuid1)); engine.teardown().unwrap(); let engine = StratEngine::initialize().unwrap(); @@ -435,10 +447,18 @@ mod test { let mut engine = StratEngine::initialize().unwrap(); let name1 = "name1"; - let uuid1 = engine.create_pool(&name1, paths1, None).unwrap(); + let uuid1 = engine + .create_pool(&name1, paths1, None) + .unwrap() + .changed() + .unwrap(); let name2 = "name2"; - let uuid2 = engine.create_pool(&name2, paths2, None).unwrap(); + let uuid2 = engine + .create_pool(&name2, paths2, None) + .unwrap() + .changed() + .unwrap(); assert!(engine.get_pool(uuid1).is_some()); assert!(engine.get_pool(uuid2).is_some()); diff --git a/src/engine/strat_engine/pool.rs b/src/engine/strat_engine/pool.rs index f1ccd50f86..01a57c5244 100644 --- a/src/engine/strat_engine/pool.rs +++ b/src/engine/strat_engine/pool.rs @@ -25,8 +25,9 @@ use crate::{ thinpool::{ThinPool, ThinPoolSizeParams, DATA_BLOCK_SIZE}, }, types::{ - BlockDevTier, DevUuid, FilesystemUuid, FreeSpaceState, MaybeDbusPath, Name, - PoolExtendState, PoolState, PoolUuid, Redundancy, RenameAction, + BlockDevTier, CreateAction, DevUuid, EngineAction, FilesystemUuid, FreeSpaceState, + MaybeDbusPath, Name, PoolExtendState, PoolState, PoolUuid, Redundancy, RenameAction, + SetCreateAction, SetDeleteAction, }, }, stratis::{ErrorEnum, StratisError, StratisResult}, @@ -283,28 +284,25 @@ impl Pool for StratPool { pool_uuid: PoolUuid, pool_name: &str, specs: &[(&'b str, Option)], - ) -> StratisResult> { + ) -> StratisResult> { let names: HashMap<_, _> = HashMap::from_iter(specs.iter().map(|&tup| (tup.0, tup.1))); - for name in names.keys() { - validate_name(name)?; - if self.thin_pool.get_mut_filesystem_by_name(*name).is_some() { - return Err(StratisError::Engine( - ErrorEnum::AlreadyExists, - name.to_string(), - )); - } - } + + names.iter().fold(Ok(()), |res, (name, _)| { + res.and_then(|()| validate_name(name)) + })?; // TODO: Roll back on filesystem initialization failure. let mut result = Vec::new(); for (name, size) in names { - let fs_uuid = self - .thin_pool - .create_filesystem(pool_uuid, pool_name, name, size)?; - result.push((name, fs_uuid)); + if self.thin_pool.get_mut_filesystem_by_name(name).is_none() { + let fs_uuid = self + .thin_pool + .create_filesystem(pool_uuid, pool_name, name, size)?; + result.push((name, fs_uuid)); + } } - Ok(result) + Ok(SetCreateAction::new(result)) } fn add_blockdevs( @@ -313,7 +311,7 @@ impl Pool for StratPool { pool_name: &str, paths: &[&Path], tier: BlockDevTier, - ) -> StratisResult> { + ) -> StratisResult> { let bdev_info = if tier == BlockDevTier::Cache { // If adding cache devices, must suspend the pool, since the cache // must be augmeneted with the new devices. @@ -321,7 +319,7 @@ impl Pool for StratPool { let bdev_info = self.backstore.add_cachedevs(pool_uuid, paths)?; self.thin_pool.set_device(self.backstore.device().expect("Since thin pool exists, space must have been allocated from the backstore, so backstore must have a cap device"))?; self.thin_pool.resume()?; - Ok(bdev_info) + Ok(SetCreateAction::new(bdev_info)) } else { // If just adding data devices, no need to suspend the pool. // No action will be taken on the DM devices. @@ -334,7 +332,7 @@ impl Pool for StratPool { // so that it can satisfy the allocation request where // previously it could not. Run check() in case that is true. self.thin_pool.check(pool_uuid, &mut self.backstore)?; - Ok(bdev_info) + Ok(SetCreateAction::new(bdev_info)) }; self.write_metadata(pool_name)?; bdev_info @@ -350,14 +348,16 @@ impl Pool for StratPool { &'a mut self, pool_name: &str, fs_uuids: &[FilesystemUuid], - ) -> StratisResult> { + ) -> StratisResult> { let mut removed = Vec::new(); for &uuid in fs_uuids { - self.thin_pool.destroy_filesystem(pool_name, uuid)?; - removed.push(uuid); + let changed = self.thin_pool.destroy_filesystem(pool_name, uuid)?; + if changed.is_changed() { + removed.push(uuid); + } } - Ok(removed) + Ok(SetDeleteAction::new(removed)) } fn rename_filesystem( @@ -365,7 +365,7 @@ impl Pool for StratPool { pool_name: &str, uuid: FilesystemUuid, new_name: &str, - ) -> StratisResult { + ) -> StratisResult> { validate_name(new_name)?; self.thin_pool.rename_filesystem(pool_name, uuid, new_name) } @@ -376,7 +376,7 @@ impl Pool for StratPool { pool_name: &str, origin_uuid: FilesystemUuid, snapshot_name: &str, - ) -> StratisResult<(FilesystemUuid, &mut dyn Filesystem)> { + ) -> StratisResult> { validate_name(snapshot_name)?; if self @@ -384,14 +384,12 @@ impl Pool for StratPool { .get_filesystem_by_name(snapshot_name) .is_some() { - return Err(StratisError::Engine( - ErrorEnum::AlreadyExists, - snapshot_name.to_string(), - )); + return Ok(CreateAction::Identity); } self.thin_pool .snapshot_filesystem(pool_uuid, pool_name, origin_uuid, snapshot_name) + .map(CreateAction::Created) } fn total_physical_size(&self) -> Sectors { @@ -456,13 +454,12 @@ impl Pool for StratPool { pool_name: &str, uuid: DevUuid, user_info: Option<&str>, - ) -> StratisResult { - if self.backstore.set_blockdev_user_info(uuid, user_info)? { + ) -> StratisResult> { + let result = self.backstore.set_blockdev_user_info(uuid, user_info); + if let RenameAction::Renamed(_) = result { self.write_metadata(pool_name)?; - Ok(true) - } else { - Ok(false) } + Ok(result) } fn state(&self) -> PoolState { @@ -506,7 +503,7 @@ mod tests { cmd, tests::{loopbacked, real}, }, - types::Redundancy, + types::{EngineAction, Redundancy}, }; use super::*; @@ -620,7 +617,8 @@ mod tests { let (_, fs_uuid) = pool .create_filesystems(uuid, &name, &[("stratis-filesystem", None)]) .unwrap() - .pop() + .changed() + .and_then(|mut fs| fs.pop()) .unwrap(); invariant(&pool, &name); @@ -735,7 +733,8 @@ mod tests { let (_, fs_uuid) = pool .create_filesystems(pool_uuid, &name, &[(&fs_name, None)]) .unwrap() - .pop() + .changed() + .and_then(|mut fs| fs.pop()) .expect("just created one"); let devnode = pool.get_filesystem(fs_uuid).unwrap().1.devnode(); diff --git a/src/engine/strat_engine/thinpool/thinpool.rs b/src/engine/strat_engine/thinpool/thinpool.rs index e22c2375c2..5cd35c6ee6 100644 --- a/src/engine/strat_engine/thinpool/thinpool.rs +++ b/src/engine/strat_engine/thinpool/thinpool.rs @@ -42,8 +42,8 @@ use crate::{ }, structures::Table, types::{ - FilesystemUuid, FreeSpaceState, MaybeDbusPath, Name, PoolExtendState, PoolState, - PoolUuid, RenameAction, + DeleteAction, FilesystemUuid, FreeSpaceState, MaybeDbusPath, Name, PoolExtendState, + PoolState, PoolUuid, RenameAction, }, }, stratis::{ErrorEnum, StratisError, StratisResult}, @@ -978,7 +978,7 @@ impl ThinPool { &mut self, pool_name: &str, uuid: FilesystemUuid, - ) -> StratisResult<()> { + ) -> StratisResult> { match self.filesystems.remove_by_uuid(uuid) { Some((fs_name, mut fs)) => match fs.destroy(&self.thin_pool) { Ok(_) => { @@ -990,14 +990,14 @@ impl ThinPool { err); } devlinks::filesystem_removed(pool_name, &fs_name); - Ok(()) + Ok(DeleteAction::Deleted(uuid)) } Err(err) => { self.filesystems.insert(fs_name, uuid, fs); Err(err) } }, - None => Ok(()), + None => Ok(DeleteAction::Identity), } } @@ -1019,7 +1019,7 @@ impl ThinPool { pool_name: &str, uuid: FilesystemUuid, new_name: &str, - ) -> StratisResult { + ) -> StratisResult> { let old_name = rename_filesystem_pre!(self; uuid; new_name); let new_name = Name::new(new_name.to_owned()); @@ -1040,7 +1040,7 @@ impl ThinPool { }); self.filesystems.insert(new_name.clone(), uuid, filesystem); devlinks::filesystem_renamed(pool_name, &old_name, &new_name); - Ok(RenameAction::Renamed) + Ok(RenameAction::Renamed(uuid)) } } @@ -1549,7 +1549,7 @@ mod tests { .unwrap(); let action = pool.rename_filesystem(pool_name, fs_uuid, name2).unwrap(); - assert_eq!(action, RenameAction::Renamed); + assert_matches!(action, RenameAction::Renamed(_)); let flexdevs: FlexDevsSave = pool.record(); let thinpoolsave: ThinPoolDevSave = pool.record(); pool.teardown().unwrap(); diff --git a/src/engine/types/actions.rs b/src/engine/types/actions.rs new file mode 100644 index 0000000000..a45ef14e22 --- /dev/null +++ b/src/engine/types/actions.rs @@ -0,0 +1,135 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. + +//! Contains types which encode the results of actions requested on an engine, +//! pool, filesystem, or blockdev. Each action type is designed to support +//! idempotency. In every case, the action type is used to indicate the +//! effect of the action at the time the action is requested. The action was +//! completed succesfully; this type indicates what changes had to be made. + +/// A trait for a generic kind of action. Defines the type of the thing to +/// be changed, and also a method to indicate what changed. +pub trait EngineAction { + type Return; + + /// Returns whether or not the action changed state. + fn is_changed(&self) -> bool; + + /// Returns the thing or things changed. + fn changed(self) -> Option; +} + +#[derive(Debug, PartialEq, Eq)] +/// A single create action. +pub enum CreateAction { + /// The thing already existed. + Identity, + /// The thing did not already exist. + Created(T), +} + +impl EngineAction for CreateAction { + type Return = T; + + fn is_changed(&self) -> bool { + match *self { + CreateAction::Identity => false, + _ => true, + } + } + + fn changed(self) -> Option { + match self { + CreateAction::Created(t) => Some(t), + _ => None, + } + } +} + +#[derive(Debug, PartialEq, Eq)] +/// An action which may create multiple things. +pub struct SetCreateAction { + changed: Vec, +} + +impl SetCreateAction { + pub fn new(changed: Vec) -> Self { + SetCreateAction { changed } + } +} + +impl EngineAction for SetCreateAction { + type Return = Vec; + + fn is_changed(&self) -> bool { + !self.changed.is_empty() + } + + fn changed(self) -> Option> { + if self.changed.is_empty() { + None + } else { + Some(self.changed) + } + } +} + +#[derive(Debug, PartialEq, Eq)] +/// An action which may rename a single thing. +pub enum RenameAction { + /// The thing already had the given name. + Identity, + /// The thing did not have the given name and was renamed. + Renamed(T), + /// The thing did not exist, so could not be renamed. + NoSource, +} + +impl EngineAction for RenameAction { + type Return = T; + + fn is_changed(&self) -> bool { + match *self { + RenameAction::Renamed(_) => true, + _ => false, + } + } + + fn changed(self) -> Option { + match self { + RenameAction::Renamed(t) => Some(t), + _ => None, + } + } +} + +#[derive(Debug, PartialEq, Eq)] +/// A single delete action. +pub enum DeleteAction { + /// The thing was already gone. + Identity, + /// The thing existed and was removed. + Deleted(T), +} + +impl EngineAction for DeleteAction { + type Return = T; + + fn is_changed(&self) -> bool { + match *self { + DeleteAction::Deleted(_) => true, + _ => false, + } + } + + fn changed(self) -> Option { + match self { + DeleteAction::Deleted(t) => Some(t), + _ => None, + } + } +} + +/// An action which may delete multiple things. +pub type SetDeleteAction = SetCreateAction; diff --git a/src/engine/types.rs b/src/engine/types/mod.rs similarity index 95% rename from src/engine/types.rs rename to src/engine/types/mod.rs index c3c9a2bee0..165487f391 100644 --- a/src/engine/types.rs +++ b/src/engine/types/mod.rs @@ -4,6 +4,11 @@ use std::{borrow::Borrow, fmt, ops::Deref, rc::Rc}; +mod actions; +pub use crate::engine::types::actions::{ + CreateAction, DeleteAction, EngineAction, RenameAction, SetCreateAction, SetDeleteAction, +}; + #[cfg(feature = "dbus_enabled")] use dbus; use uuid::Uuid; @@ -12,13 +17,6 @@ pub type DevUuid = Uuid; pub type FilesystemUuid = Uuid; pub type PoolUuid = Uuid; -#[derive(Debug, PartialEq, Eq)] -pub enum RenameAction { - Identity, - NoSource, - Renamed, -} - /// A DM pool operates in 4 modes. See drivers/md/dm-thin.c (enum pool_mode). /// The 4 modes map to Running, OutOfDataSpace, ReadOnly and Failed - in degrading /// order. Stratis adds 2 additional modes - Initializing and Stopping. The Stratis diff --git a/tests/client-dbus/src/stratisd_client_dbus/_data.py b/tests/client-dbus/src/stratisd_client_dbus/_data.py index 50be81c87e..e3d37d75c9 100644 --- a/tests/client-dbus/src/stratisd_client_dbus/_data.py +++ b/tests/client-dbus/src/stratisd_client_dbus/_data.py @@ -34,13 +34,13 @@ - + - + @@ -53,38 +53,38 @@ - + - + - + - + - + - + diff --git a/tests/client-dbus/tests/dbus/filesystem/test_properties.py b/tests/client-dbus/tests/dbus/filesystem/test_properties.py index 8ee9ab0df6..da13e06cf9 100644 --- a/tests/client-dbus/tests/dbus/filesystem/test_properties.py +++ b/tests/client-dbus/tests/dbus/filesystem/test_properties.py @@ -42,7 +42,7 @@ def setUp(self): super().setUp() self._fs_name = "fs" self._proxy = get_object(TOP_OBJECT) - ((self._pool_object_path, _), _, _) = Manager.Methods.CreatePool( + ((_, (self._pool_object_path, _)), _, _) = Manager.Methods.CreatePool( self._proxy, { "name": self._POOLNAME, @@ -51,7 +51,7 @@ def setUp(self): }, ) self._pool_object = get_object(self._pool_object_path) - (created, _, _) = Pool.Methods.CreateFilesystems( + ((_, created), _, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": [self._fs_name]} ) self._filesystem_object_path = created[0][0] diff --git a/tests/client-dbus/tests/dbus/filesystem/test_rename.py b/tests/client-dbus/tests/dbus/filesystem/test_rename.py index 8c69919375..6b04fec551 100644 --- a/tests/client-dbus/tests/dbus/filesystem/test_rename.py +++ b/tests/client-dbus/tests/dbus/filesystem/test_rename.py @@ -45,7 +45,7 @@ def setUp(self): super().setUp() self._fs_name = "fs" self._proxy = get_object(TOP_OBJECT) - ((self._pool_object_path, _), _, _) = Manager.Methods.CreatePool( + ((_, (self._pool_object_path, _)), _, _) = Manager.Methods.CreatePool( self._proxy, { "name": self._POOLNAME, @@ -54,7 +54,7 @@ def setUp(self): }, ) self._pool_object = get_object(self._pool_object_path) - (created, _, _) = Pool.Methods.CreateFilesystems( + ((_, created), _, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": [self._fs_name]} ) self._filesystem_object_path = created[0][0] @@ -65,12 +65,13 @@ def testNullMapping(self): Test rename to same name. """ filesystem = get_object(self._filesystem_object_path) - (result, rc, _) = Filesystem.Methods.SetName( + ((is_some, result), rc, _) = Filesystem.Methods.SetName( filesystem, {"name": self._fs_name} ) self.assertEqual(rc, StratisdErrors.OK) - self.assertFalse(result) + self.assertFalse(is_some) + self.assertEqual(result, "0" * 32) def testNewName(self): """ diff --git a/tests/client-dbus/tests/dbus/manager/test_create.py b/tests/client-dbus/tests/dbus/manager/test_create.py index d122095c50..e51d0d828e 100644 --- a/tests/client-dbus/tests/dbus/manager/test_create.py +++ b/tests/client-dbus/tests/dbus/manager/test_create.py @@ -52,7 +52,7 @@ def testCreate(self): If rc is OK, then pool must exist. """ devs = _DEVICE_STRATEGY() - ((poolpath, devnodes), rc, _) = Manager.Methods.CreatePool( + ((_, (poolpath, devnodes)), rc, _) = Manager.Methods.CreatePool( self._proxy, {"name": self._POOLNAME, "redundancy": (True, 0), "devices": devs}, ) @@ -121,7 +121,7 @@ def testCreate(self): ObjectManager.Methods.GetManagedObjects(self._proxy, {}) ) - (_, rc, _) = Manager.Methods.CreatePool( + ((is_some, _), rc, _) = Manager.Methods.CreatePool( self._proxy, { "name": self._POOLNAME, @@ -129,8 +129,8 @@ def testCreate(self): "devices": _DEVICE_STRATEGY(), }, ) - expected_rc = StratisdErrors.ALREADY_EXISTS - self.assertEqual(rc, expected_rc) + self.assertEqual(rc, StratisdErrors.OK) + self.assertFalse(is_some) managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) pools2 = list(pools().search(managed_objects)) diff --git a/tests/client-dbus/tests/dbus/manager/test_destroy.py b/tests/client-dbus/tests/dbus/manager/test_destroy.py index 62664607d9..87b0354664 100644 --- a/tests/client-dbus/tests/dbus/manager/test_destroy.py +++ b/tests/client-dbus/tests/dbus/manager/test_destroy.py @@ -127,7 +127,7 @@ def setUp(self): """ super().setUp() self._proxy = get_object(TOP_OBJECT) - ((poolpath, _), _, _) = Manager.Methods.CreatePool( + ((_, (poolpath, _)), _, _) = Manager.Methods.CreatePool( self._proxy, { "name": self._POOLNAME, @@ -145,9 +145,9 @@ def testExecution(self): managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) (pool, _) = next(pools(props={"Name": self._POOLNAME}).search(managed_objects)) - (result, rc, _) = Manager.Methods.DestroyPool(self._proxy, {"pool": pool}) + ((is_some, _), rc, _) = Manager.Methods.DestroyPool(self._proxy, {"pool": pool}) self.assertEqual(rc, StratisdErrors.BUSY) - self.assertEqual(result, False) + self.assertFalse(is_some) managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) (pool1, _) = next(pools(props={"Name": self._POOLNAME}).search(managed_objects)) @@ -181,10 +181,10 @@ def testExecution(self): managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) (pool, _) = next(pools(props={"Name": self._POOLNAME}).search(managed_objects)) - (result, rc, _) = Manager.Methods.DestroyPool(self._proxy, {"pool": pool}) + ((is_some, _), rc, _) = Manager.Methods.DestroyPool(self._proxy, {"pool": pool}) self.assertEqual(rc, StratisdErrors.OK) - self.assertEqual(result, True) + self.assertTrue(is_some) managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) self.assertIsNone( diff --git a/tests/client-dbus/tests/dbus/pool/test_add_cache_devs.py b/tests/client-dbus/tests/dbus/pool/test_add_cache_devs.py index 358e05f2d6..efb79a27a1 100644 --- a/tests/client-dbus/tests/dbus/pool/test_add_cache_devs.py +++ b/tests/client-dbus/tests/dbus/pool/test_add_cache_devs.py @@ -44,7 +44,7 @@ def setUp(self): """ super().setUp() self._proxy = get_object(TOP_OBJECT) - ((poolpath, _), _, _) = Manager.Methods.CreatePool( + ((_, (poolpath, _)), _, _) = Manager.Methods.CreatePool( self._proxy, {"name": self._POOLNAME, "redundancy": (True, 0), "devices": []}, ) @@ -58,8 +58,11 @@ def testEmptyDevs(self): managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) (pool, _) = next(pools(props={"Name": self._POOLNAME}).search(managed_objects)) - (result, rc, _) = Pool.Methods.AddCacheDevs(self._pool_object, {"devices": []}) + ((is_some, result), rc, _) = Pool.Methods.AddCacheDevs( + self._pool_object, {"devices": []} + ) + self.assertFalse(is_some) self.assertEqual(len(result), 0) self.assertEqual(rc, StratisdErrors.OK) @@ -77,7 +80,7 @@ def testSomeDevs(self): managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) (pool, _) = next(pools(props={"Name": self._POOLNAME}).search(managed_objects)) - (result, rc, _) = Pool.Methods.AddCacheDevs( + ((is_some, result), rc, _) = Pool.Methods.AddCacheDevs( self._pool_object, {"devices": _DEVICE_STRATEGY()} ) @@ -85,16 +88,18 @@ def testSomeDevs(self): managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) if rc == StratisdErrors.OK: + self.assertTrue(is_some) self.assertGreater(num_devices_added, 0) else: + self.assertFalse(is_some) self.assertEqual(num_devices_added, 0) - blockdev_object_paths = frozenset(result) + blockdev_paths = frozenset(result) # blockdevs exported on the D-Bus are exactly those added blockdevs2 = list(blockdevs(props={"Pool": pool}).search(managed_objects)) - blockdevs2_object_paths = frozenset([op for (op, _) in blockdevs2]) - self.assertEqual(blockdevs2_object_paths, blockdev_object_paths) + blockdevs2_paths = frozenset([op for (op, _) in blockdevs2]) + self.assertEqual(blockdevs2_paths, blockdev_paths) # no duplicates in the object paths self.assertEqual(len(blockdevs2), num_devices_added) @@ -121,7 +126,7 @@ def setUp(self): """ super().setUp() self._proxy = get_object(TOP_OBJECT) - ((poolpath, devpaths), _, _) = Manager.Methods.CreatePool( + ((_, (poolpath, devpaths)), _, _) = Manager.Methods.CreatePool( self._proxy, { "name": self._POOLNAME, @@ -145,8 +150,11 @@ def testEmptyDevs(self): blockdevs2 = blockdevs(props={"Pool": pool, "Tier": 1}).search(managed_objects) self.assertEqual(list(blockdevs2), []) - (result, rc, _) = Pool.Methods.AddCacheDevs(self._pool_object, {"devices": []}) + ((is_some, result), rc, _) = Pool.Methods.AddCacheDevs( + self._pool_object, {"devices": []} + ) + self.assertFalse(is_some) self.assertEqual(len(result), 0) self.assertEqual(rc, StratisdErrors.OK) @@ -163,7 +171,7 @@ def testSomeDevs(self): blockdevs1 = blockdevs(props={"Pool": pool, "Tier": 0}).search(managed_objects) self.assertEqual(self._devpaths, frozenset(op for (op, _) in blockdevs1)) - (result, rc, _) = Pool.Methods.AddCacheDevs( + ((is_some, result), rc, _) = Pool.Methods.AddCacheDevs( self._pool_object, {"devices": _DEVICE_STRATEGY()} ) @@ -171,8 +179,10 @@ def testSomeDevs(self): managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) if rc == StratisdErrors.OK: + self.assertTrue(is_some) self.assertGreater(num_devices_added, 0) else: + self.assertFalse(is_some) self.assertEqual(num_devices_added, 0) blockdev_object_paths = frozenset(result) diff --git a/tests/client-dbus/tests/dbus/pool/test_add_data_devs.py b/tests/client-dbus/tests/dbus/pool/test_add_data_devs.py index 47984a288b..78e871cec2 100644 --- a/tests/client-dbus/tests/dbus/pool/test_add_data_devs.py +++ b/tests/client-dbus/tests/dbus/pool/test_add_data_devs.py @@ -44,7 +44,7 @@ def setUp(self): """ super().setUp() self._proxy = get_object(TOP_OBJECT) - ((poolpath, _), _, _) = Manager.Methods.CreatePool( + ((_, (poolpath, _)), _, _) = Manager.Methods.CreatePool( self._proxy, {"name": self._POOLNAME, "redundancy": (True, 0), "devices": []}, ) @@ -61,8 +61,11 @@ def testEmptyDevs(self): blockdevs1 = blockdevs(props={"Pool": pool}).search(managed_objects) self.assertEqual(list(blockdevs1), []) - (result, rc, _) = Pool.Methods.AddDataDevs(self._pool_object, {"devices": []}) + ((is_some, result), rc, _) = Pool.Methods.AddDataDevs( + self._pool_object, {"devices": []} + ) + self.assertFalse(is_some) self.assertEqual(result, []) self.assertEqual(rc, StratisdErrors.OK) @@ -84,7 +87,7 @@ def testSomeDevs(self): blockdevs1 = blockdevs(props={"Pool": pool}).search(managed_objects) self.assertEqual(list(blockdevs1), []) - (result, rc, _) = Pool.Methods.AddDataDevs( + ((is_some, result), rc, _) = Pool.Methods.AddDataDevs( self._pool_object, {"devices": _DEVICE_STRATEGY()} ) @@ -92,8 +95,10 @@ def testSomeDevs(self): managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) if rc == StratisdErrors.OK: + self.assertTrue(is_some) self.assertGreater(num_devices_added, 0) else: + self.assertFalse(is_some) self.assertEqual(num_devices_added, 0) blockdev_object_paths = frozenset(result) diff --git a/tests/client-dbus/tests/dbus/pool/test_create_filesystem.py b/tests/client-dbus/tests/dbus/pool/test_create_filesystem.py index 4acd81c671..fe58c24537 100644 --- a/tests/client-dbus/tests/dbus/pool/test_create_filesystem.py +++ b/tests/client-dbus/tests/dbus/pool/test_create_filesystem.py @@ -46,7 +46,7 @@ def setUp(self): super().setUp() self._proxy = get_object(TOP_OBJECT) self._devs = _DEVICE_STRATEGY() - ((poolpath, _), _, _) = Manager.Methods.CreatePool( + ((_, (poolpath, _)), _, _) = Manager.Methods.CreatePool( self._proxy, {"name": self._POOLNAME, "redundancy": (True, 0), "devices": self._devs}, ) @@ -59,10 +59,11 @@ def testCreate(self): list should always succeed, and it should not increase the number of volumes. """ - (result, rc, _) = Pool.Methods.CreateFilesystems( + ((is_some, result), rc, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": []} ) + self.assertFalse(is_some) self.assertEqual(len(result), 0) self.assertEqual(rc, StratisdErrors.OK) @@ -78,12 +79,13 @@ def testDuplicateSpecs(self): """ new_name = "name" - (result, rc, _) = Pool.Methods.CreateFilesystems( + ((is_some, result), rc, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": [new_name, new_name]} ) - self.assertEqual(rc, StratisdErrors.OK) + self.assertTrue(is_some) self.assertEqual(len(result), 1) + self.assertEqual(rc, StratisdErrors.OK) (_, fs_name) = result[0] self.assertEqual(fs_name, new_name) @@ -109,7 +111,7 @@ def setUp(self): super().setUp() self._proxy = get_object(TOP_OBJECT) self._devs = _DEVICE_STRATEGY() - ((poolpath, _), _, _) = Manager.Methods.CreatePool( + ((_, (poolpath, _)), _, _) = Manager.Methods.CreatePool( self._proxy, {"name": self._POOLNAME, "redundancy": (True, 0), "devices": self._devs}, ) @@ -121,13 +123,15 @@ def testCreate(self): """ Test calling by specifying a volume name. Because there is already a volume with the given name, the creation of the new volume should - fail, and no additional volume should be created. + return the volume information as unchanged, and no additional volume + should be created. """ - (result, rc, _) = Pool.Methods.CreateFilesystems( + ((is_some, result), rc, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": [self._VOLNAME]} ) - self.assertEqual(rc, StratisdErrors.ALREADY_EXISTS) + self.assertEqual(rc, StratisdErrors.OK) + self.assertFalse(is_some) self.assertEqual(len(result), 0) result = filesystems().search( @@ -142,11 +146,12 @@ def testCreateOne(self): """ new_name = "newname" - (result, rc, _) = Pool.Methods.CreateFilesystems( + ((is_some, result), rc, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": [new_name]} ) self.assertEqual(rc, StratisdErrors.OK) + self.assertTrue(is_some) self.assertEqual(len(result), 1) (_, fs_name) = result[0] @@ -161,15 +166,17 @@ def testCreateOne(self): def testCreateWithConflict(self): """ Test calling by specifying several volumes. Because there is already - a volume with the given name, the creation of the new volumes should - fail, and no additional volume should be created. + a volume with the given name, only the new volumes should be created + and the command should succeed. """ - (result, rc, _) = Pool.Methods.CreateFilesystems( + ((is_some, result), rc, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": [self._VOLNAME, "newname"]} ) - self.assertEqual(rc, StratisdErrors.ALREADY_EXISTS) + self.assertEqual(rc, StratisdErrors.OK) + self.assertTrue(is_some) self.assertEqual(len(result), 0) + self.assertEqual(result[0][1], "newname") result = filesystems().search( ObjectManager.Methods.GetManagedObjects(self._proxy, {}) @@ -182,11 +189,12 @@ def testCreateMultiple(self): volume names are not supported due to possible d-bus timeouts. When multiple volume support is added back - this test should be removed. """ - (result, rc, _) = Pool.Methods.CreateFilesystems( + ((is_some, result), rc, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": ["a", "b"]} ) self.assertEqual(rc, StratisdErrors.ERROR) + self.assertFalse(is_some) self.assertEqual(len(result), 0) result = filesystems().search( diff --git a/tests/client-dbus/tests/dbus/pool/test_create_snapshot.py b/tests/client-dbus/tests/dbus/pool/test_create_snapshot.py index d016d79b0e..dae664d48f 100644 --- a/tests/client-dbus/tests/dbus/pool/test_create_snapshot.py +++ b/tests/client-dbus/tests/dbus/pool/test_create_snapshot.py @@ -46,14 +46,14 @@ def setUp(self): super().setUp() self._proxy = get_object(TOP_OBJECT) self._devs = _DEVICE_STRATEGY() - ((poolpath, _), _, _) = Manager.Methods.CreatePool( + ((_, (poolpath, _)), _, _) = Manager.Methods.CreatePool( self._proxy, {"name": self._POOLNAME, "redundancy": (True, 0), "devices": self._devs}, ) self._pool_object = get_object(poolpath) Manager.Methods.ConfigureSimulator(self._proxy, {"denominator": 8}) - (fs_objects, rc, _) = Pool.Methods.CreateFilesystems( + ((_, fs_objects), rc, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": [self._VOLNAME]} ) @@ -69,11 +69,12 @@ def testCreate(self): Test creating a snapshot and ensure that it works. """ - (ss_object_path, rc, _) = Pool.Methods.SnapshotFilesystem( + ((is_some, ss_object_path), rc, _) = Pool.Methods.SnapshotFilesystem( self._pool_object, {"origin": self._fs_object_path, "snapshot_name": self._SNAPSHOTNAME}, ) + self.assertTrue(is_some) self.assertEqual(rc, StratisdErrors.OK) self.assertNotEqual(ss_object_path, "/") @@ -87,20 +88,22 @@ def testDuplicateSnapshotName(self): Test creating a snapshot with duplicate name. """ - (ss_object_path, rc, _) = Pool.Methods.SnapshotFilesystem( + ((is_some, ss_object_path), rc, _) = Pool.Methods.SnapshotFilesystem( self._pool_object, {"origin": self._fs_object_path, "snapshot_name": self._SNAPSHOTNAME}, ) + self.assertTrue(is_some) self.assertEqual(rc, StratisdErrors.OK) self.assertNotEqual(ss_object_path, "/") - (ss_object_path_dupe_name, rc, _) = Pool.Methods.SnapshotFilesystem( + ((is_some, ss_object_path_dupe_name), rc, _) = Pool.Methods.SnapshotFilesystem( self._pool_object, {"origin": self._fs_object_path, "snapshot_name": self._SNAPSHOTNAME}, ) - self.assertEqual(rc, StratisdErrors.ALREADY_EXISTS) + self.assertFalse(is_some) + self.assertEqual(rc, StratisdErrors.OK) self.assertEqual(ss_object_path_dupe_name, "/") result = filesystems().search( diff --git a/tests/client-dbus/tests/dbus/pool/test_destroy_filesystem.py b/tests/client-dbus/tests/dbus/pool/test_destroy_filesystem.py index 621664c03e..4da60e9c55 100644 --- a/tests/client-dbus/tests/dbus/pool/test_destroy_filesystem.py +++ b/tests/client-dbus/tests/dbus/pool/test_destroy_filesystem.py @@ -44,7 +44,7 @@ def setUp(self): super().setUp() self._proxy = get_object(TOP_OBJECT) self._devs = _DEVICE_STRATEGY() - ((poolpath, _), _, _) = Manager.Methods.CreatePool( + ((_, (poolpath, _)), _, _) = Manager.Methods.CreatePool( self._proxy, {"name": self._POOLNAME, "redundancy": (True, 0), "devices": self._devs}, ) @@ -57,11 +57,11 @@ def testDestroyNone(self): list should always succeed, and it should not decrease the number of volumes. """ - (result, rc, _) = Pool.Methods.DestroyFilesystems( + ((_, result_changed), rc, _) = Pool.Methods.DestroyFilesystems( self._pool_object, {"filesystems": []} ) - self.assertEqual(len(result), 0) + self.assertEqual(len(result_changed), 0) self.assertEqual(rc, StratisdErrors.OK) result = filesystems().search( @@ -74,11 +74,11 @@ def testDestroyOne(self): Test calling with a non-existant object path. This should succeed, because at the end the filesystem is not there. """ - (result, rc, _) = Pool.Methods.DestroyFilesystems( + ((_, result), rc, _) = Pool.Methods.DestroyFilesystems( self._pool_object, {"filesystems": ["/"]} ) - self.assertEqual(rc, StratisdErrors.OK) self.assertEqual(len(result), 0) + self.assertEqual(rc, StratisdErrors.OK) result = filesystems().search( ObjectManager.Methods.GetManagedObjects(self._proxy, {}) @@ -101,12 +101,12 @@ def setUp(self): super().setUp() self._proxy = get_object(TOP_OBJECT) self._devs = _DEVICE_STRATEGY() - ((self._poolpath, _), _, _) = Manager.Methods.CreatePool( + ((_, (self._poolpath, _)), _, _) = Manager.Methods.CreatePool( self._proxy, {"name": self._POOLNAME, "redundancy": (True, 0), "devices": self._devs}, ) self._pool_object = get_object(self._poolpath) - (self._filesystems, _, _) = Pool.Methods.CreateFilesystems( + ((_, self._filesystems), _, _) = Pool.Methods.CreateFilesystems( self._pool_object, {"specs": [(self._VOLNAME, "", None)]} ) Manager.Methods.ConfigureSimulator(self._proxy, {"denominator": 8}) @@ -117,10 +117,11 @@ def testDestroyOne(self): should always succeed. """ fs_object_path = self._filesystems[0][0] - (result, rc, _) = Pool.Methods.DestroyFilesystems( + ((is_some, result), rc, _) = Pool.Methods.DestroyFilesystems( self._pool_object, {"filesystems": [fs_object_path]} ) + self.assertTrue(is_some) self.assertEqual(len(result), 1) self.assertEqual(rc, StratisdErrors.OK) @@ -136,10 +137,11 @@ def testDestroyTwo(self): returned. """ fs_object_path = self._filesystems[0][0] - (result, rc, _) = Pool.Methods.DestroyFilesystems( + ((is_some, result), rc, _) = Pool.Methods.DestroyFilesystems( self._pool_object, {"filesystems": [fs_object_path, "/"]} ) + self.assertTrue(is_some) self.assertEqual(len(result), 1) self.assertEqual(rc, StratisdErrors.OK) diff --git a/tests/client-dbus/tests/dbus/pool/test_rename.py b/tests/client-dbus/tests/dbus/pool/test_rename.py index 9ee49614ec..d0f00f51f7 100644 --- a/tests/client-dbus/tests/dbus/pool/test_rename.py +++ b/tests/client-dbus/tests/dbus/pool/test_rename.py @@ -43,7 +43,7 @@ def setUp(self): """ super().setUp() self._proxy = get_object(TOP_OBJECT) - ((self._pool_object_path, _), _, _) = Manager.Methods.CreatePool( + ((_, (self._pool_object_path, _)), _, _) = Manager.Methods.CreatePool( self._proxy, { "name": self._POOLNAME, @@ -58,12 +58,12 @@ def testNullMapping(self): """ Test rename to same name. """ - (result, rc, _) = Pool.Methods.SetName( + ((is_some, _), rc, _) = Pool.Methods.SetName( self._pool_object, {"name": self._POOLNAME} ) self.assertEqual(rc, StratisdErrors.OK) - self.assertFalse(result) + self.assertFalse(is_some) managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) result = next( @@ -79,9 +79,11 @@ def testNewName(self): """ new_name = "new" - (result, rc, _) = Pool.Methods.SetName(self._pool_object, {"name": new_name}) + ((is_some, _), rc, _) = Pool.Methods.SetName( + self._pool_object, {"name": new_name} + ) - self.assertTrue(result) + self.assertTrue(is_some) self.assertEqual(rc, StratisdErrors.OK) managed_objects = ObjectManager.Methods.GetManagedObjects(self._proxy, {}) diff --git a/tests/client-dbus/tests/udev/test_udev.py b/tests/client-dbus/tests/udev/test_udev.py index 1d62099e74..a0841394a7 100644 --- a/tests/client-dbus/tests/udev/test_udev.py +++ b/tests/client-dbus/tests/udev/test_udev.py @@ -69,7 +69,11 @@ def _create_pool(name, devices): # actually exist, retry on error. error_reasons = "" for _ in range(3): - ((pool_object_path, _), exit_code, error_str) = Manager.Methods.CreatePool( + ( + (_, (pool_object_path, _)), + exit_code, + error_str, + ) = Manager.Methods.CreatePool( get_object(TOP_OBJECT), {"name": name, "redundancy": (True, 0), "devices": devices}, )