194 lines
6.3 KiB
Rust
194 lines
6.3 KiB
Rust
use anyhow::{anyhow, Result};
|
|
use std::{
|
|
path::{Path, PathBuf},
|
|
sync::atomic::{AtomicIsize, Ordering},
|
|
};
|
|
|
|
use crate::{container, dirsync, mount, zfs};
|
|
|
|
pub static LOG_CTX_NUM: AtomicIsize = AtomicIsize::new(0);
|
|
|
|
struct LogCtxNum {
|
|
negative: bool,
|
|
}
|
|
|
|
impl LogCtxNum {
|
|
fn new(negative: bool) -> LogCtxNum {
|
|
LOG_CTX_NUM.store(0, Ordering::SeqCst);
|
|
LogCtxNum { negative }
|
|
}
|
|
|
|
fn step(&mut self) {
|
|
if self.negative {
|
|
LOG_CTX_NUM.fetch_sub(1, Ordering::SeqCst);
|
|
} else {
|
|
LOG_CTX_NUM.fetch_add(1, Ordering::SeqCst);
|
|
}
|
|
}
|
|
}
|
|
|
|
impl Drop for LogCtxNum {
|
|
fn drop(&mut self) {
|
|
LOG_CTX_NUM.store(0, Ordering::SeqCst);
|
|
}
|
|
}
|
|
|
|
// TODO: safe-path / cap-std stuff
|
|
fn scoped_join(root: &Path, unsafe_path: &Path) -> PathBuf {
|
|
root.join(if unsafe_path.is_absolute() {
|
|
unsafe_path.strip_prefix("/").unwrap()
|
|
} else {
|
|
unsafe_path
|
|
})
|
|
}
|
|
|
|
struct Context<'a> {
|
|
store: &'a (zfs::Dataset, Vec<zfs::Dataset>),
|
|
root_path: &'a Path,
|
|
mounted_devfs: bool,
|
|
}
|
|
|
|
impl<'a> Context<'a> {
|
|
fn get_dataset(&'a self, name: &str) -> Option<&'a zfs::Dataset> {
|
|
let dspath = format!("{}/{}", self.store.0.path, name);
|
|
self.store.1.iter().find(|ds| ds.path == dspath)
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
pub enum Op<'a> {
|
|
UseDataset { name: &'a str },
|
|
CreateDataset { name: &'a str, target_path: &'a str },
|
|
InheritDataset { name: &'a str, parent_name: &'a str },
|
|
MountTmpfs { target_path: &'a str },
|
|
MountBind { host_path: &'a str, target_path: &'a str, rw: bool },
|
|
ImportFiles { host_path: &'a str, target_path: &'a str },
|
|
RunCommand { cmd: &'a str },
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
enum Cleanup {
|
|
Nothing,
|
|
Unmount(mount::Mount),
|
|
Save(mount::Mount, zfs::WritableDataset),
|
|
}
|
|
|
|
fn step<'a>(op: Op<'a>, ctx: &mut Context) -> Result<Cleanup> {
|
|
Ok(match op {
|
|
Op::UseDataset { name } => {
|
|
let ds = ctx.get_dataset(name).ok_or(anyhow!("dataset not found"))?;
|
|
let target_path = ds.target_path.as_ref().ok_or(anyhow!("dataset without target"))?;
|
|
let snap_path = ds.mountpoint.as_ref().ok_or(anyhow!("dataset without mountpoint"))?;
|
|
Cleanup::Unmount(mount::Mount::mount_bind(
|
|
Path::new(snap_path),
|
|
scoped_join(ctx.root_path, Path::new(target_path)),
|
|
false,
|
|
)?)
|
|
}
|
|
Op::CreateDataset { name, target_path } => {
|
|
if ctx.get_dataset(name).is_some() {
|
|
return Err(anyhow!("Dataset '{}' already exists", name));
|
|
}
|
|
let wd = zfs::WritableDataset::from_scratch(&ctx.store.0, name, target_path)?;
|
|
let mtmp = mount::Mount::mount_tmpfs(scoped_join(ctx.root_path, Path::new(target_path)))?;
|
|
Cleanup::Save(mtmp, wd)
|
|
}
|
|
Op::InheritDataset { name, parent_name } => {
|
|
if ctx.get_dataset(name).is_some() {
|
|
return Err(anyhow!("Dataset '{}' already exists", name));
|
|
}
|
|
if let Some(parent) = ctx.get_dataset(parent_name) {
|
|
let target_path = parent
|
|
.target_path
|
|
.as_ref()
|
|
.ok_or(anyhow!("Dataset '{}' does not have a target path", parent.path))?;
|
|
let wd = zfs::WritableDataset::from_snapshot(&ctx.store.0, parent, name)?;
|
|
let mtmp = mount::Mount::mount_tmpfs(scoped_join(ctx.root_path, Path::new(target_path)))?;
|
|
Cleanup::Save(mtmp, wd)
|
|
} else {
|
|
return Err(anyhow!("Dataset '{}' does not exist", parent_name));
|
|
}
|
|
}
|
|
Op::MountTmpfs { target_path } => {
|
|
Cleanup::Unmount(mount::Mount::mount_tmpfs(scoped_join(ctx.root_path, Path::new(target_path)))?)
|
|
}
|
|
Op::MountBind { host_path, target_path, rw } => Cleanup::Unmount(mount::Mount::mount_bind(
|
|
Path::new(host_path),
|
|
scoped_join(ctx.root_path, Path::new(target_path)),
|
|
rw,
|
|
)?),
|
|
Op::ImportFiles { host_path, target_path } => {
|
|
dirsync::sync_files(Path::new(host_path), &scoped_join(ctx.root_path, Path::new(target_path)))?;
|
|
Cleanup::Nothing
|
|
}
|
|
Op::RunCommand { cmd } => {
|
|
let clean = if ctx.mounted_devfs {
|
|
Cleanup::Nothing
|
|
} else {
|
|
ctx.mounted_devfs = true;
|
|
Cleanup::Unmount(mount::Mount::mount_safe_dev(scoped_join(ctx.root_path, Path::new("/dev")))?)
|
|
};
|
|
container::run_contained(ctx.root_path, cmd)?;
|
|
clean
|
|
}
|
|
})
|
|
}
|
|
|
|
fn cleanup(op: Cleanup) -> Result<()> {
|
|
match op {
|
|
Cleanup::Nothing => Ok(()),
|
|
Cleanup::Unmount(mut m) => m.unmount(),
|
|
Cleanup::Save(mut mtmp, wd) => {
|
|
let mut mtgt = mount::Mount::mount_temp(Path::new(&wd.0), "zfs", "rw")?;
|
|
dirsync::sync_files(mtmp.path(), mtgt.path())?;
|
|
mtgt.unmount()?; // TODO: Drop to ensure this happens with failures
|
|
mtmp.unmount()?;
|
|
wd.commit()?;
|
|
Ok(())
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn execute(storepath: Option<&str>, mut todo: Vec<Op>) -> Result<()> {
|
|
let stores = zfs::filter_stores(zfs::get_datasets()?)?;
|
|
|
|
let store = if let Some(path) = storepath {
|
|
stores
|
|
.iter()
|
|
.find(|(d, _)| d.path == path)
|
|
.ok_or(anyhow!("Store '{}' not found", path))
|
|
} else {
|
|
stores.first().ok_or(anyhow!("No stores found"))
|
|
}?;
|
|
|
|
let mut toclean = Vec::new();
|
|
|
|
let root = mount::TempDir::new()?;
|
|
log::debug!("The run mountpoint is '{}'", root.path().display());
|
|
|
|
let mut ctx = Context { store, root_path: root.path(), mounted_devfs: false };
|
|
|
|
let mut ret = Ok(());
|
|
let mut lognum = LogCtxNum::new(false);
|
|
log::debug!("Actions to be executed: {:#?}", todo);
|
|
todo.reverse();
|
|
while let Some(op) = todo.pop() {
|
|
lognum.step();
|
|
match step(op, &mut ctx) {
|
|
Ok(clean) => toclean.push(clean),
|
|
Err(e) => {
|
|
log::error!("{:?}", e);
|
|
ret = Err(anyhow!("Run did not succeed"));
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
lognum = LogCtxNum::new(true);
|
|
log::debug!("Cleanup to be executed: {:#?}", toclean);
|
|
while let Some(clean) = toclean.pop() {
|
|
lognum.step();
|
|
cleanup(clean)?;
|
|
}
|
|
ret
|
|
}
|