diff --git a/lib/src/blockdev.rs b/lib/src/blockdev.rs index 7f737f15..6a0e1b36 100644 --- a/lib/src/blockdev.rs +++ b/lib/src/blockdev.rs @@ -13,7 +13,7 @@ use nix::errno::Errno; use regex::Regex; use serde::Deserialize; -use crate::install::run_in_host_mountns; +use crate::hostexec::run_in_host_mountns; use crate::task::Task; #[derive(Debug, Deserialize)] diff --git a/lib/src/cli.rs b/lib/src/cli.rs index c949b351..39d633e3 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -90,6 +90,10 @@ pub(crate) struct SwitchOpts { /// Target image to use for the next boot. pub(crate) target: String, + + /// The storage backend + #[clap(long, hide = true)] + pub(crate) backend: Option, } /// Options controlling rollback @@ -251,6 +255,14 @@ impl InternalsOpts { const GENERATOR_BIN: &'static str = "bootc-systemd-generator"; } +#[derive(Debug, clap::Parser, PartialEq, Eq)] +pub(crate) struct InternalPodmanOpts { + #[clap(long, value_parser, default_value = "/")] + root: Utf8PathBuf, + #[clap(trailing_var_arg = true, allow_hyphen_values = true)] + args: Vec, +} + /// Deploy and transactionally in-place with bootable container images. /// /// The `bootc` project currently uses ostree-containers as a backend @@ -379,6 +391,9 @@ pub(crate) enum Opt { #[clap(subcommand)] #[clap(hide = true)] Internals(InternalsOpts), + /// Execute podman in our internal configuration + #[clap(hide = true)] + InternalPodman(InternalPodmanOpts), #[clap(hide(true))] #[cfg(feature = "docgen")] Man(ManOpts), @@ -482,7 +497,7 @@ async fn upgrade(opts: UpgradeOpts) -> Result<()> { let sysroot = &get_locked_sysroot().await?; let repo = &sysroot.repo(); let (booted_deployment, _deployments, host) = - crate::status::get_status_require_booted(sysroot)?; + crate::status::get_status_require_booted(sysroot).await?; let imgref = host.spec.image.as_ref(); // If there's no specified image, let's be nice and check if the booted system is using rpm-ostree if imgref.is_none() { @@ -540,7 +555,7 @@ async fn upgrade(opts: UpgradeOpts) -> Result<()> { } } } else { - let fetched = crate::deploy::pull(sysroot, imgref, opts.quiet).await?; + let fetched = crate::deploy::pull(sysroot, spec.backend, imgref, opts.quiet).await?; let kargs = crate::kargs::get_kargs(repo, &booted_deployment, fetched.as_ref())?; let staged_digest = staged_image.as_ref().map(|s| s.image_digest.as_str()); let fetched_digest = fetched.manifest_digest.as_str(); @@ -603,6 +618,8 @@ async fn switch(opts: SwitchOpts) -> Result<()> { let target = ostree_container::OstreeImageReference { sigverify, imgref }; let target = ImageReference::from(target); + let backend = opts.backend.unwrap_or_default(); + // If we're doing an in-place mutation, we shortcut most of the rest of the work here if opts.mutate_in_place { let deployid = { @@ -610,7 +627,7 @@ async fn switch(opts: SwitchOpts) -> Result<()> { let target = target.clone(); let root = cap_std::fs::Dir::open_ambient_dir("/", cap_std::ambient_authority())?; tokio::task::spawn_blocking(move || { - crate::deploy::switch_origin_inplace(&root, &target) + crate::deploy::switch_origin_inplace(&root, &target, backend) }) .await?? }; @@ -623,11 +640,12 @@ async fn switch(opts: SwitchOpts) -> Result<()> { let sysroot = &get_locked_sysroot().await?; let repo = &sysroot.repo(); let (booted_deployment, _deployments, host) = - crate::status::get_status_require_booted(sysroot)?; + crate::status::get_status_require_booted(sysroot).await?; let new_spec = { let mut new_spec = host.spec.clone(); new_spec.image = Some(target.clone()); + new_spec.backend = backend; new_spec }; @@ -637,7 +655,7 @@ async fn switch(opts: SwitchOpts) -> Result<()> { } let new_spec = RequiredHostSpec::from_spec(&new_spec)?; - let fetched = crate::deploy::pull(sysroot, &target, opts.quiet).await?; + let fetched = crate::deploy::pull(sysroot, new_spec.backend, &target, opts.quiet).await?; let kargs = crate::kargs::get_kargs(repo, &booted_deployment, fetched.as_ref())?; if !opts.retain { @@ -672,7 +690,7 @@ async fn rollback(_opts: RollbackOpts) -> Result<()> { async fn edit(opts: EditOpts) -> Result<()> { let sysroot = &get_locked_sysroot().await?; let (booted_deployment, _deployments, host) = - crate::status::get_status_require_booted(sysroot)?; + crate::status::get_status_require_booted(sysroot).await?; let new_host: Host = if let Some(filename) = opts.filename { let mut r = std::io::BufReader::new(std::fs::File::open(filename)?); serde_yaml::from_reader(&mut r)? @@ -697,7 +715,8 @@ async fn edit(opts: EditOpts) -> Result<()> { return crate::deploy::rollback(sysroot).await; } - let fetched = crate::deploy::pull(sysroot, new_spec.image, opts.quiet).await?; + let fetched = + crate::deploy::pull(sysroot, new_spec.backend, new_spec.image, opts.quiet).await?; let repo = &sysroot.repo(); let kargs = crate::kargs::get_kargs(repo, &booted_deployment, fetched.as_ref())?; @@ -799,7 +818,7 @@ async fn run_from_opt(opt: Opt) -> Result<()> { }, #[cfg(feature = "install")] Opt::ExecInHostMountNamespace { args } => { - crate::install::exec_in_host_mountns(args.as_slice()) + crate::hostexec::exec_in_host_mountns(args.as_slice()) } Opt::Status(opts) => super::status::status(opts).await, Opt::Internals(opts) => match opts { @@ -813,6 +832,12 @@ async fn run_from_opt(opt: Opt) -> Result<()> { } InternalsOpts::FixupEtcFstab => crate::deploy::fixup_etc_fstab(&root), }, + Opt::InternalPodman(args) => { + prepare_for_write()?; + // This also remounts writable + let _sysroot = get_locked_sysroot().await?; + crate::podman::exec(args.root.as_path(), args.args.as_slice()) + } #[cfg(feature = "docgen")] Opt::Man(manopts) => crate::docgen::generate_manpages(&manopts.directory), } diff --git a/lib/src/deploy.rs b/lib/src/deploy.rs index 690550e2..8aa27154 100644 --- a/lib/src/deploy.rs +++ b/lib/src/deploy.rs @@ -9,17 +9,19 @@ use anyhow::{anyhow, Context, Result}; use cap_std::fs::{Dir, MetadataExt}; use cap_std_ext::cap_std; use cap_std_ext::dirext::CapStdExtDirExt; +use chrono::DateTime; use fn_error_context::context; use ostree::{gio, glib}; use ostree_container::OstreeImageReference; use ostree_ext::container as ostree_container; use ostree_ext::container::store::PrepareResult; +use ostree_ext::oci_spec; use ostree_ext::ostree; use ostree_ext::ostree::Deployment; use ostree_ext::sysroot::SysrootLock; use crate::spec::ImageReference; -use crate::spec::{BootOrder, HostSpec}; +use crate::spec::{Backend, BootOrder, HostSpec}; use crate::status::labels_of_config; // TODO use https://github.com/ostreedev/ostree-rs-ext/pull/493/commits/afc1837ff383681b947de30c0cefc70080a4f87a @@ -31,11 +33,14 @@ const BOOTC_DERIVED_KEY: &str = "bootc.derived"; /// Variant of HostSpec but required to be filled out pub(crate) struct RequiredHostSpec<'a> { pub(crate) image: &'a ImageReference, + pub(crate) backend: Backend, } /// State of a locally fetched image pub(crate) struct ImageState { + pub(crate) backend: Backend, pub(crate) manifest_digest: String, + pub(crate) created: Option>, pub(crate) version: Option, pub(crate) ostree_commit: String, } @@ -48,7 +53,10 @@ impl<'a> RequiredHostSpec<'a> { .image .as_ref() .ok_or_else(|| anyhow::anyhow!("Missing image in specification"))?; - Ok(Self { image }) + Ok(Self { + image, + backend: spec.backend, + }) } } @@ -56,8 +64,32 @@ impl From for ImageState { fn from(value: ostree_container::store::LayeredImageState) -> Self { let version = value.version().map(|v| v.to_owned()); let ostree_commit = value.get_commit().to_owned(); + let labels = crate::status::labels_of_config(&value.configuration); + let created = labels + .and_then(|l| { + l.get(oci_spec::image::ANNOTATION_CREATED) + .map(|s| s.as_str()) + }) + .and_then(crate::status::try_deserialize_timestamp); Self { + backend: Backend::OstreeContainer, manifest_digest: value.manifest_digest, + created, + version, + ostree_commit, + } + } +} + +impl From for ImageState { + fn from(value: crate::podman::PodmanInspect) -> Self { + let version = None; + let ostree_commit = "".to_owned(); + let created = value.created; + Self { + backend: Backend::Container, + manifest_digest: value.digest, + created, version, ostree_commit, } @@ -70,8 +102,14 @@ impl ImageState { &self, repo: &ostree::Repo, ) -> Result> { - ostree_container::store::query_image_commit(repo, &self.ostree_commit) - .map(|v| Some(v.manifest)) + match self.backend { + Backend::OstreeContainer => { + ostree_container::store::query_image_commit(repo, &self.ostree_commit) + .map(|v| Some(v.manifest)) + } + // TODO: Figure out if we can get the OCI manifest from podman + Backend::Container => Ok(None), + } } } @@ -164,6 +202,31 @@ async fn handle_layer_progress_print( /// Wrapper for pulling a container image, wiring up status output. #[context("Pulling")] pub(crate) async fn pull( + sysroot: &SysrootLock, + backend: Backend, + imgref: &ImageReference, + quiet: bool, +) -> Result> { + match backend { + Backend::OstreeContainer => pull_via_ostree(sysroot, imgref, quiet).await, + Backend::Container => pull_via_podman(sysroot, imgref, quiet).await, + } +} + +/// Wrapper for pulling a container image, wiring up status output. +async fn pull_via_podman( + sysroot: &SysrootLock, + imgref: &ImageReference, + quiet: bool, +) -> Result> { + let rootfs = &Dir::reopen_dir(&crate::utils::sysroot_fd_borrowed(sysroot))?; + let fetched_imageid = crate::podman::podman_pull(rootfs, imgref, quiet).await?; + crate::podman_ostree::commit_image_to_ostree(sysroot, &fetched_imageid) + .await + .map(Box::new) +} + +async fn pull_via_ostree( sysroot: &SysrootLock, imgref: &ImageReference, quiet: bool, @@ -295,7 +358,7 @@ async fn deploy( } #[context("Generating origin")] -fn origin_from_imageref(imgref: &ImageReference) -> Result { +fn origin_from_imageref(imgref: &ImageReference, backend: Backend) -> Result { let origin = glib::KeyFile::new(); let imgref = OstreeImageReference::from(imgref.clone()); origin.set_string( @@ -303,6 +366,9 @@ fn origin_from_imageref(imgref: &ImageReference) -> Result { ostree_container::deploy::ORIGIN_CONTAINER, imgref.to_string().as_str(), ); + if backend == Backend::Container { + origin.set_string("bootc", "backend", "container"); + } Ok(origin) } @@ -316,7 +382,7 @@ pub(crate) async fn stage( opts: Option>, ) -> Result<()> { let merge_deployment = sysroot.merge_deployment(Some(stateroot)); - let origin = origin_from_imageref(spec.image)?; + let origin = origin_from_imageref(spec.image, image.backend)?; crate::deploy::deploy( sysroot, merge_deployment.as_ref(), @@ -340,7 +406,8 @@ pub(crate) async fn stage( pub(crate) async fn rollback(sysroot: &SysrootLock) -> Result<()> { const ROLLBACK_JOURNAL_ID: &str = "26f3b1eb24464d12aa5e7b544a6b5468"; let repo = &sysroot.repo(); - let (booted_deployment, deployments, host) = crate::status::get_status_require_booted(sysroot)?; + let (booted_deployment, deployments, host) = + crate::status::get_status_require_booted(sysroot).await?; let new_spec = { let mut new_spec = host.spec.clone(); @@ -415,9 +482,13 @@ fn find_newest_deployment_name(deploysdir: &Dir) -> Result { } // Implementation of `bootc switch --in-place` -pub(crate) fn switch_origin_inplace(root: &Dir, imgref: &ImageReference) -> Result { +pub(crate) fn switch_origin_inplace( + root: &Dir, + imgref: &ImageReference, + backend: Backend, +) -> Result { // First, just create the new origin file - let origin = origin_from_imageref(imgref)?; + let origin = origin_from_imageref(imgref, backend)?; let serialized_origin = origin.to_data(); // Now, we can't rely on being officially booted (e.g. with the `ostree=` karg) @@ -477,7 +548,7 @@ fn test_switch_inplace() -> Result<()> { signature: None, }; { - let origin = origin_from_imageref(&orig_imgref)?; + let origin = origin_from_imageref(&orig_imgref, Backend::OstreeContainer)?; deploydir.atomic_write( format!("{target_deployment}.origin"), origin.to_data().as_bytes(), @@ -490,7 +561,7 @@ fn test_switch_inplace() -> Result<()> { signature: None, }; - let replaced = switch_origin_inplace(&td, &target_imgref).unwrap(); + let replaced = switch_origin_inplace(&td, &target_imgref, Backend::OstreeContainer).unwrap(); assert_eq!(replaced, target_deployment); Ok(()) } diff --git a/lib/src/hostexec.rs b/lib/src/hostexec.rs new file mode 100644 index 00000000..aeb347f4 --- /dev/null +++ b/lib/src/hostexec.rs @@ -0,0 +1,38 @@ +//! Run a command in the host mount namespace + +use std::os::fd::AsFd; +use std::os::unix::process::CommandExt; +use std::process::Command; + +use anyhow::{Context, Result}; +use camino::Utf8Path; +use fn_error_context::context; + +/// Run a command in the host mount namespace +pub(crate) fn run_in_host_mountns(cmd: &str) -> Command { + let mut c = Command::new("/proc/self/exe"); + c.args(["exec-in-host-mount-namespace", cmd]); + c +} + +#[context("Re-exec in host mountns")] +pub(crate) fn exec_in_host_mountns(args: &[std::ffi::OsString]) -> Result<()> { + let (cmd, args) = args + .split_first() + .ok_or_else(|| anyhow::anyhow!("Missing command"))?; + tracing::trace!("{cmd:?} {args:?}"); + let pid1mountns = std::fs::File::open("/proc/1/ns/mnt").context("open pid1 mountns")?; + nix::sched::setns(pid1mountns.as_fd(), nix::sched::CloneFlags::CLONE_NEWNS).context("setns")?; + rustix::process::chdir("/").context("chdir")?; + // Work around supermin doing chroot() and not pivot_root + // https://github.com/libguestfs/supermin/blob/5230e2c3cd07e82bd6431e871e239f7056bf25ad/init/init.c#L288 + if !Utf8Path::new("/usr").try_exists().context("/usr")? + && Utf8Path::new("/root/usr") + .try_exists() + .context("/root/usr")? + { + tracing::debug!("Using supermin workaround"); + rustix::process::chroot("/root").context("chroot")?; + } + Err(Command::new(cmd).args(args).exec()).context("exec")? +} diff --git a/lib/src/image.rs b/lib/src/image.rs index 4fd7cd9b..064bad55 100644 --- a/lib/src/image.rs +++ b/lib/src/image.rs @@ -47,7 +47,7 @@ pub(crate) async fn push_entrypoint(source: Option<&str>, target: Option<&str>) let source = if let Some(source) = source { ImageReference::try_from(source).context("Parsing source image")? } else { - let status = crate::status::get_status_require_booted(&sysroot)?; + let status = crate::status::get_status_require_booted(&sysroot).await?; // SAFETY: We know it's booted let booted = status.2.status.booted.unwrap(); let booted_image = booted.image.unwrap().image; diff --git a/lib/src/install.rs b/lib/src/install.rs index b9d6a653..ddf36fed 100644 --- a/lib/src/install.rs +++ b/lib/src/install.rs @@ -12,9 +12,7 @@ pub(crate) mod osconfig; use std::io::Write; use std::os::fd::AsFd; -use std::os::unix::process::CommandExt; use std::path::Path; -use std::process::Command; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -39,6 +37,7 @@ use serde::{Deserialize, Serialize}; use self::baseline::InstallBlockDeviceOpts; use crate::containerenv::ContainerExecutionInfo; +use crate::hostexec::run_in_host_mountns; use crate::mount::Filesystem; use crate::task::Task; use crate::utils::sigpolicy_from_opts; @@ -755,35 +754,6 @@ async fn initialize_ostree_root_from_self( Ok(aleph) } -/// Run a command in the host mount namespace -pub(crate) fn run_in_host_mountns(cmd: &str) -> Command { - let mut c = Command::new("/proc/self/exe"); - c.args(["exec-in-host-mount-namespace", cmd]); - c -} - -#[context("Re-exec in host mountns")] -pub(crate) fn exec_in_host_mountns(args: &[std::ffi::OsString]) -> Result<()> { - let (cmd, args) = args - .split_first() - .ok_or_else(|| anyhow::anyhow!("Missing command"))?; - tracing::trace!("{cmd:?} {args:?}"); - let pid1mountns = std::fs::File::open("/proc/1/ns/mnt").context("open pid1 mountns")?; - nix::sched::setns(pid1mountns.as_fd(), nix::sched::CloneFlags::CLONE_NEWNS).context("setns")?; - rustix::process::chdir("/").context("chdir")?; - // Work around supermin doing chroot() and not pivot_root - // https://github.com/libguestfs/supermin/blob/5230e2c3cd07e82bd6431e871e239f7056bf25ad/init/init.c#L288 - if !Utf8Path::new("/usr").try_exists().context("/usr")? - && Utf8Path::new("/root/usr") - .try_exists() - .context("/root/usr")? - { - tracing::debug!("Using supermin workaround"); - rustix::process::chroot("/root").context("chroot")?; - } - Err(Command::new(cmd).args(args).exec()).context("exec")? -} - #[context("Querying skopeo version")] fn require_skopeo_with_containers_storage() -> Result<()> { let out = Task::new_cmd("skopeo --version", run_in_host_mountns("skopeo")) diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 9f8d4ac5..f54db99a 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -20,12 +20,15 @@ pub mod cli; pub(crate) mod deploy; pub(crate) mod generator; +pub(crate) mod hostexec; mod image; pub(crate) mod journal; pub(crate) mod kargs; mod lints; mod lsm; pub(crate) mod metadata; +mod podman; +mod podman_ostree; mod reboot; mod reexec; mod status; @@ -45,8 +48,6 @@ mod k8sapitypes; mod kernel; #[cfg(feature = "install")] pub(crate) mod mount; -#[cfg(feature = "install")] -mod podman; pub mod spec; #[cfg(feature = "docgen")] diff --git a/lib/src/podman.rs b/lib/src/podman.rs index f5f7fd96..7697e6e0 100644 --- a/lib/src/podman.rs +++ b/lib/src/podman.rs @@ -1,12 +1,151 @@ +//! # Helpers for interacting with podman +//! +//! Wrapper for podman which writes to a bootc-owned root. + +use std::os::unix::process::CommandExt; + use anyhow::{anyhow, Result}; +use camino::{Utf8Path, Utf8PathBuf}; +use cap_std_ext::cap_std; +use cap_std_ext::cap_std::fs::Dir; +use ostree_ext::container::OstreeImageReference; use serde::Deserialize; +use tokio::process::Command; -use crate::install::run_in_host_mountns; +use crate::hostexec::run_in_host_mountns; +use crate::spec::ImageReference; use crate::task::Task; +use crate::utils::{cmd_in_root, newline_trim_vec_to_string}; /// Where we look inside our container to find our own image /// for use with `bootc install`. pub(crate) const CONTAINER_STORAGE: &str = "/var/lib/containers"; +/// The argument for podman --root, in parallel to `ostree/repo`. +pub(crate) const STORAGE_ROOT: &str = "ostree/container-storage"; +/// The argument for podman --runroot, this is stored under /run/bootc. +pub(crate) const RUN_ROOT: &str = "run/bootc/container-storage"; +const PODMAN_ARGS: &[&str] = &["--root", STORAGE_ROOT, "--runroot", RUN_ROOT]; + +pub(crate) fn podman_in_root(rootfs: &Dir) -> Result { + let mut cmd = cmd_in_root(rootfs, "podman")?; + cmd.args(PODMAN_ARGS); + Ok(cmd) +} + +pub(crate) async fn temporary_container_for_image(rootfs: &Dir, imageid: &str) -> Result { + tracing::debug!("Creating temporary container for {imageid}"); + let st = podman_in_root(rootfs)? + .args(["create", imageid]) + .output() + .await?; + if !st.status.success() { + anyhow::bail!("Failed to create transient image: {st:?}"); + } + newline_trim_vec_to_string(st.stdout) +} + +pub(crate) async fn podman_mount(rootfs: &Dir, cid: &str) -> Result { + tracing::debug!("Mounting {cid}"); + let st = podman_in_root(rootfs)? + .args(["mount", cid]) + .output() + .await?; + if !st.status.success() { + anyhow::bail!("Failed to mount transient image: {st:?}"); + } + Ok(newline_trim_vec_to_string(st.stdout)?.into()) +} + +pub(crate) async fn podman_pull( + rootfs: &Dir, + image: &ImageReference, + quiet: bool, +) -> Result { + let authfile = + ostree_ext::globals::get_global_authfile(rootfs)?.map(|(authfile, _fd)| authfile); + let mut cmd = podman_in_root(rootfs)?; + let image = OstreeImageReference::from(image.clone()); + let pull_spec_image = image.imgref.to_string(); + tracing::debug!("Pulling {pull_spec_image}"); + let child = cmd + .args(["pull"]) + .args(authfile.iter().flat_map(|v| ["--authfile", v.as_str()])) + .args(quiet.then_some("--quiet")) + .arg(&pull_spec_image) + .stdout(std::process::Stdio::piped()) + .spawn()?; + let output = child.wait_with_output().await?; + if !output.status.success() { + anyhow::bail!("Failed to pull: {:?}", output.status); + } + newline_trim_vec_to_string(output.stdout) +} + +#[derive(Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct PodmanInspect { + #[allow(dead_code)] + pub(crate) id: String, + pub(crate) digest: String, + pub(crate) created: Option>, + pub(crate) config: PodmanInspectConfig, + #[serde(rename = "RootFS")] + #[allow(dead_code)] + pub(crate) root_fs: PodmanInspectRootfs, + pub(crate) graph_driver: PodmanInspectGraphDriver, +} + +#[derive(Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct PodmanInspectConfig { + #[serde(default)] + pub(crate) labels: std::collections::BTreeMap, +} + +#[derive(Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct PodmanInspectGraphDriver { + pub(crate) name: String, + pub(crate) data: PodmanInspectGraphDriverData, +} + +#[derive(Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct PodmanInspectGraphDriverData { + pub(crate) lower_dir: String, + pub(crate) upper_dir: String, +} + +impl PodmanInspectGraphDriverData { + pub(crate) fn layers(&self) -> impl Iterator { + self.lower_dir + .split(':') + .chain(std::iter::once(self.upper_dir.as_str())) + } +} + +#[derive(Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct PodmanInspectRootfs { + #[allow(dead_code)] + pub(crate) layers: Vec, +} + +pub(crate) async fn podman_inspect(rootfs: &Dir, imgid: &str) -> Result { + let st = podman_in_root(rootfs)? + .args(["image", "inspect", imgid]) + .output() + .await?; + if !st.status.success() { + anyhow::bail!("Failed to mount transient image: {st:?}"); + } + let r: Vec = serde_json::from_slice(&st.stdout)?; + let r = r + .into_iter() + .next() + .ok_or_else(|| anyhow!("Missing output from inspect"))?; + Ok(r) +} #[derive(Deserialize)] #[serde(rename_all = "PascalCase")] @@ -27,3 +166,11 @@ pub(crate) fn imageid_to_digest(imgid: &str) -> Result { .ok_or_else(|| anyhow!("No images returned for inspect"))?; Ok(i.digest) } + +pub(crate) fn exec(root: &Utf8Path, args: &[std::ffi::OsString]) -> Result<()> { + let rootfs = &Dir::open_ambient_dir(root, cap_std::ambient_authority())?; + let mut cmd = crate::utils::sync_cmd_in_root(rootfs, "podman")?; + cmd.args(PODMAN_ARGS); + cmd.args(args); + Err(anyhow::Error::msg(cmd.exec())) +} diff --git a/lib/src/podman_ostree.rs b/lib/src/podman_ostree.rs new file mode 100644 index 00000000..251fd747 --- /dev/null +++ b/lib/src/podman_ostree.rs @@ -0,0 +1,296 @@ +//! # Mapping between podman/containers-storage: and ostree +//! +//! The common container storage model is to store blobs (layers) as unpacked directories, +//! and use the Linux `overlayfs` to merge them dynamically. +//! +//! However, today the `ostree-prepare-root` model as used by ostree expects a final flattened +//! filesystem tree; and crucially we need to perform SELinux labeling. At the moment, because +//! ostree again works on just a plain directory, we need to "physically" change the on-disk +//! xattrs of the target files. +//! +//! That said, there is work in ostree to use composefs, which will add a huge amount of flexibility; +//! we can generate an erofs blob dynamically with the target labels. +//! +//! Even more than that however the ostree core currently expects an ostree commit object to be backing +//! the filesystem tree; this is how it handles garbage collection, inspects metadata, etc. Parts +//! of bootc rely on this too today. +//! +//! ## Disadvantages +//! +//! One notable disadvantage of this model is that we're storing file *references* twice, +//! which means the ostree deduplication is pointless. In theory this is fixable by going back +//! and changing the containers-storage files, but... +//! +//! ## Medium term: Unify containers-storage and ostree with composefs +//! +//! Ultimately the best fix is https://github.com/containers/composefs/issues/125 + +use std::cell::OnceCell; +use std::path::{Path, PathBuf}; +use std::sync::atomic::{AtomicI64, Ordering}; +use std::sync::Arc; + +use anyhow::{Context, Result}; + +use cap_std::fs::Dir; +use cap_std::fs::{DirBuilder, DirEntry}; +use cap_std::io_lifetimes::AsFilelike; +use cap_std_ext::cap_tempfile::{TempDir, TempFile}; +use cap_std_ext::cmdext::CapStdExtCommandExt; +use cap_std_ext::dirext::CapStdExtDirExt; +use cap_std_ext::{ + cap_primitives::{ + self, + fs::{DirBuilderExt, MetadataExt, OpenOptionsExt}, + }, + cap_std, +}; +use fn_error_context::context; +use ostree_ext::sysroot::SysrootLock; +use rustix::fd::AsFd; + +use crate::deploy::ImageState; +use crate::podman::PodmanInspectGraphDriver; +use crate::utils::sync_cmd_in_root; + +const OSTREE_CONTAINER_IMAGE_REF_PREFIX: &str = "ostree-container/image"; + +fn image_commit_ostree_ref(imageid: &str) -> String { + format!("{OSTREE_CONTAINER_IMAGE_REF_PREFIX}/{imageid}") +} + +struct MergeState<'a> { + trash: &'a Dir, + // Unique integer for naming trashed files + trashid: AtomicI64, + can_clone: bool, +} + +/// Given one directory entry, perform an overlayfs-style merge operation. +fn merge_one_entry( + layer: &Dir, + elt: DirEntry, + pathbuf: &mut std::path::PathBuf, + output: &Dir, + state: &MergeState, +) -> Result<()> { + let name = elt.file_name(); + // We operate on a shared path buffer for improved efficiency. + // Here, we append the name of the target file. + pathbuf.push(&name); + let src_meta = elt.metadata()?; + let inum = src_meta.ino(); + let src_ftype = src_meta.file_type(); + + // Helper closure which lazily initializes a "layer trash directory" and moves the target path into it. + let move_to_trash = |src: &Path| -> anyhow::Result<()> { + let id = state.trashid.fetch_add(1, Ordering::SeqCst); + let tempname = format!("t{:X}-{:X}", id, inum); + output + .rename(src, state.trash, &tempname) + .with_context(|| format!("Moving {src:?} to trash"))?; + Ok(()) + }; + + let target_meta = output + .symlink_metadata_optional(&pathbuf) + .context("Querying target")?; + if src_ftype.is_dir() { + // The source layer type is a directory. Check if we need to create it. + let mut needs_create = true; + if let Some(target_meta) = target_meta { + if target_meta.is_dir() { + needs_create = false; + } else { + // The target exists and is not a directory. Trash it. + move_to_trash(&pathbuf)?; + } + } + // Create the directory if needed. + if needs_create { + let mut db = DirBuilder::new(); + db.mode(src_meta.mode()); + output + .create_dir_with(&pathbuf, &db) + .with_context(|| format!("Creating {pathbuf:?}"))?; + } + // Now recurse + merge_layer(layer, pathbuf, output, state)?; + } else if (src_meta.mode() & libc::S_IFMT) == libc::S_IFCHR && src_meta.rdev() == 0 { + // The layer specifies a whiteout entry; remove the target path. + if target_meta.is_some() { + move_to_trash(&pathbuf)?; + } + } else { + // We're operating on a non-directory. In this case if the target exists, + // it needs to be removed. + if target_meta.is_some() { + move_to_trash(&pathbuf)?; + } + if src_meta.is_symlink() { + let target = + cap_primitives::fs::read_link_contents(&layer.as_filelike_view(), &pathbuf) + .with_context(|| format!("Reading link {pathbuf:?}"))?; + cap_primitives::fs::symlink_contents(target, &output.as_filelike_view(), &pathbuf) + .with_context(|| format!("Writing symlink {pathbuf:?}"))?; + } else { + let src = layer + .open(&pathbuf) + .with_context(|| format!("Opening src {pathbuf:?}"))?; + // Use reflinks if available, otherwise we can fall back to hard linking. The hardlink + // count will "leak" into any containers spawned (until podman learns to use composefs). + if state.can_clone { + let mut openopts = cap_std::fs::OpenOptions::new(); + openopts.write(true); + openopts.create_new(true); + openopts.mode(src_meta.mode()); + let dest = output + .open_with(&pathbuf, &openopts) + .with_context(|| format!("Opening dest {pathbuf:?}"))?; + rustix::fs::ioctl_ficlone(dest.as_fd(), src.as_fd()).context("Cloning")?; + } else { + layer + .hard_link(&pathbuf, output, &pathbuf) + .context("Hard linking")?; + } + } + } + assert!(pathbuf.pop()); + Ok(()) +} + +/// This function is an "eager" implementation of computing the filesystem tree, implementing +/// the same algorithm as overlayfs, including processing whiteouts. +fn merge_layer( + layer: &Dir, + pathbuf: &mut std::path::PathBuf, + output: &Dir, + state: &MergeState, +) -> Result<()> { + for elt in layer.read_dir(&pathbuf)? { + let elt = elt?; + merge_one_entry(layer, elt, pathbuf, output, state)?; + } + Ok(()) +} + +#[context("Squashing to tempdir")] +async fn generate_squashed_dir( + rootfs: &Dir, + graph: PodmanInspectGraphDriver, +) -> Result { + let ostree_tmp = &rootfs.open_dir("ostree/repo/tmp")?; + let td = TempDir::new_in(ostree_tmp)?; + // We put files/directories which should be deleted here; they're processed asynchronously + let trashdir = TempDir::new_in(ostree_tmp)?; + anyhow::ensure!(graph.name == "overlay"); + let rootfs = rootfs.try_clone()?; + let td = tokio::task::spawn_blocking(move || { + let can_clone = OnceCell::::new(); + for layer in graph.data.layers() { + // TODO: Does this actually work when operating on a non-default root? + let layer = layer.trim_start_matches('/'); + tracing::debug!("Merging layer: {layer}"); + let layer = rootfs + .open_dir(layer) + .with_context(|| format!("Opening {layer}"))?; + // Determine if we can do reflinks + if can_clone.get().is_none() { + let src = TempFile::new(&layer)?; + let dest = TempFile::new(&td)?; + let did_clone = + rustix::fs::ioctl_ficlone(dest.as_file().as_fd(), src.as_file().as_fd()) + .is_ok(); + can_clone.get_or_init(|| did_clone); + } + let mut pathbuf = PathBuf::from("."); + let mergestate = MergeState { + trash: &trashdir, + trashid: Default::default(), + can_clone: *can_clone.get().unwrap(), + }; + merge_layer(&layer, &mut pathbuf, &td, &mergestate)?; + } + anyhow::Ok(td) + }) + .await??; + Ok(td) +} + +/// Post-process target directory +pub(crate) fn prepare_squashed_root(rootfs: &Dir) -> Result<()> { + if rootfs.exists("etc") { + rootfs + .rename("etc", rootfs, "usr/etc") + .context("Renaming etc => usr/etc")?; + } + // And move everything in /var to the "factory" directory so it can be processed + // by tmpfiles.d + if let Some(ref var) = rootfs.open_dir_optional("var")? { + let factory_var_path = "usr/share/factory/var"; + rootfs.create_dir_all(factory_var_path)?; + let factory_var = &rootfs.open_dir(factory_var_path)?; + for ent in var.entries()? { + let ent = ent?; + let name = ent.file_name(); + var.rename(&name, factory_var, &name) + .with_context(|| format!("Moving var/{name:?} to {factory_var_path}"))?; + } + } + Ok(()) +} + +/// Given an image in containers-storage, generate an ostree commit from it +pub(crate) async fn commit_image_to_ostree( + sysroot: &SysrootLock, + imageid: &str, +) -> Result { + let rootfs = &Dir::reopen_dir(&crate::utils::sysroot_fd_borrowed(sysroot))?; + + // Mount the merged filesystem (via overlayfs) basically just so we can get the final + // SELinux policy in /etc/selinux which we need to compute the labels + let cid = crate::podman::temporary_container_for_image(rootfs, imageid).await?; + let mount_path = &crate::podman::podman_mount(rootfs, &cid).await?; + // Gather metadata on the image, including its constitutent layers + let mut inspect = crate::podman::podman_inspect(rootfs, imageid).await?; + let manifest_digest = inspect.digest; + + // Merge the layers into one final filesystem tree + let squashed = generate_squashed_dir(rootfs, inspect.graph_driver).await?; + // Post-process the merged tree + let squashed = tokio::task::spawn_blocking(move || { + prepare_squashed_root(&squashed)?; + anyhow::Ok(squashed) + }) + .await??; + + tracing::debug!("Writing ostree commit"); + let repo_fd = Arc::new(sysroot.repo().dfd_borrow().try_clone_to_owned()?); + let ostree_ref = image_commit_ostree_ref(imageid); + let mut cmd = sync_cmd_in_root(&squashed, "ostree")?; + cmd.args([ + "--repo=/proc/self/fd/3", + "commit", + "--consume", + "--selinux-policy", + mount_path.as_str(), + "--branch", + ostree_ref.as_str(), + "--tree=dir=.", + ]); + cmd.take_fd_n(repo_fd, 3); + let mut cmd = tokio::process::Command::from(cmd); + cmd.kill_on_drop(true); + let st = cmd.status().await?; + if !st.success() { + anyhow::bail!("Failed to ostree commit: {st:?}") + } + let ostree_commit = sysroot.repo().require_rev(&ostree_ref)?.to_string(); + Ok(ImageState { + backend: crate::spec::Backend::Container, + created: inspect.created, + manifest_digest, + version: inspect.config.labels.remove("version"), + ostree_commit, + }) +} diff --git a/lib/src/spec.rs b/lib/src/spec.rs index 5f6df932..1904ea45 100644 --- a/lib/src/spec.rs +++ b/lib/src/spec.rs @@ -40,6 +40,24 @@ pub enum BootOrder { Rollback, } +#[derive( + clap::ValueEnum, Serialize, Deserialize, Copy, Clone, Debug, PartialEq, Eq, JsonSchema, +)] +#[serde(rename_all = "camelCase")] +/// The storage backend +pub enum Backend { + /// Use the ostree-container storage backend. + OstreeContainer, + /// Use containers-storage: backend + Container, +} + +impl Default for Backend { + fn default() -> Self { + Self::OstreeContainer + } +} + #[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] /// The host specification @@ -49,6 +67,9 @@ pub struct HostSpec { /// If set, and there is a rollback deployment, it will be set for the next boot. #[serde(default)] pub boot_order: BootOrder, + /// The storage backend + #[serde(default)] + pub backend: Backend, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, JsonSchema)] @@ -112,6 +133,9 @@ pub struct BootEntry { pub incompatible: bool, /// Whether this entry will be subject to garbage collection pub pinned: bool, + /// The backend for this boot entry + #[serde(default)] + pub backend: Backend, /// If this boot entry is ostree based, the corresponding state pub ostree: Option, } diff --git a/lib/src/status.rs b/lib/src/status.rs index 65386ff3..f5541ad4 100644 --- a/lib/src/status.rs +++ b/lib/src/status.rs @@ -2,6 +2,7 @@ use std::collections::VecDeque; use anyhow::{Context, Result}; use camino::Utf8Path; +use clap::ValueEnum; use fn_error_context::context; use ostree::glib; use ostree_container::OstreeImageReference; @@ -13,7 +14,11 @@ use ostree_ext::ostree; use ostree_ext::sysroot::SysrootLock; use crate::cli::OutputFormat; -use crate::spec::{BootEntry, BootOrder, Host, HostSpec, HostStatus, HostType, ImageStatus}; +use crate::deploy::ImageState; +use crate::podman; +use crate::spec::{ + Backend, BootEntry, BootOrder, Host, HostSpec, HostStatus, HostType, ImageStatus, +}; use crate::spec::{ImageReference, ImageSignature}; impl From for ImageSignature { @@ -109,6 +114,16 @@ pub(crate) fn try_deserialize_timestamp(t: &str) -> Option Result { + let r = origin + .optional_string("bootc", "backend")? + .map(|v| Backend::from_str(&v, true)) + .transpose() + .map_err(anyhow::Error::msg)? + .unwrap_or_default(); + Ok(r) +} + pub(crate) fn labels_of_config( config: &oci_spec::image::ImageConfiguration, ) -> Option<&std::collections::HashMap> { @@ -140,41 +155,62 @@ pub(crate) fn create_imagestatus( /// Given an OSTree deployment, parse out metadata into our spec. #[context("Reading deployment metadata")] -fn boot_entry_from_deployment( +async fn boot_entry_from_deployment( sysroot: &SysrootLock, deployment: &ostree::Deployment, ) -> Result { let repo = &sysroot.repo(); - let (image, cached_update, incompatible) = if let Some(origin) = deployment.origin().as_ref() { - let incompatible = crate::utils::origin_has_rpmostree_stuff(origin); - let (image, cached) = if incompatible { - // If there are local changes, we can't represent it as a bootc compatible image. - (None, None) - } else if let Some(image) = get_image_origin(origin)? { - let image = ImageReference::from(image); - let csum = deployment.csum(); - let imgstate = ostree_container::store::query_image_commit(repo, &csum)?; - let cached = imgstate.cached_update.map(|cached| { - create_imagestatus(image.clone(), &cached.manifest_digest, &cached.config) - }); - let imagestatus = - create_imagestatus(image, &imgstate.manifest_digest, &imgstate.configuration); - // We found a container-image based deployment - (Some(imagestatus), cached) + let (image, cached_update, incompatible, backend) = + if let Some(origin) = deployment.origin().as_ref() { + let incompatible = crate::utils::origin_has_rpmostree_stuff(origin); + let backend = get_image_backend(origin)?; + let (image, cached) = if incompatible { + // If there are local changes, we can't represent it as a bootc compatible image. + (None, None) + } else if let Some(image) = get_image_origin(origin)? { + let image = ImageReference::from(image); + let csum = deployment.csum(); + let imgstate = match backend { + Backend::Container => { + // TODO: encapsulate this better + let rootfs = &cap_std_ext::cap_std::fs::Dir::reopen_dir( + &crate::utils::sysroot_fd_borrowed(sysroot), + )?; + ImageState::from(podman::podman_inspect(rootfs, &image.image).await?) + } + Backend::OstreeContainer => { + ImageState::from(*ostree_container::store::query_image_commit(repo, &csum)?) + } + }; + //let cached = imgstate.cached_update.map(|cached| { + // create_imagestatus(image.clone(), &cached.manifest_digest, &cached.config) + //}); + let cached = None; + + ( + Some(ImageStatus { + image, + version: imgstate.version, + timestamp: imgstate.created, + image_digest: imgstate.manifest_digest, + }), + cached, + ) + } else { + // The deployment isn't using a container image + (None, None) + }; + (image, cached, incompatible, backend) } else { - // The deployment isn't using a container image - (None, None) + // The deployment has no origin at all (this generally shouldn't happen) + (None, None, false, Default::default()) }; - (image, cached, incompatible) - } else { - // The deployment has no origin at all (this generally shouldn't happen) - (None, None, false) - }; let r = BootEntry { image, cached_update, incompatible, pinned: deployment.is_pinned(), + backend, ostree: Some(crate::spec::BootEntryOstree { checksum: deployment.csum().into(), // SAFETY: The deployserial is really unsigned @@ -202,18 +238,18 @@ impl BootEntry { } /// A variant of [`get_status`] that requires a booted deployment. -pub(crate) fn get_status_require_booted( +pub(crate) async fn get_status_require_booted( sysroot: &SysrootLock, ) -> Result<(ostree::Deployment, Deployments, Host)> { let booted_deployment = sysroot.require_booted_deployment()?; - let (deployments, host) = get_status(sysroot, Some(&booted_deployment))?; + let (deployments, host) = get_status(sysroot, Some(&booted_deployment)).await?; Ok((booted_deployment, deployments, host)) } /// Gather the ostree deployment objects, but also extract metadata from them into /// a more native Rust structure. #[context("Computing status")] -pub(crate) fn get_status( +pub(crate) async fn get_status( sysroot: &SysrootLock, booted_deployment: Option<&ostree::Deployment>, ) -> Result<(Deployments, Host)> { @@ -252,30 +288,46 @@ pub(crate) fn get_status( other, }; - let staged = deployments - .staged - .as_ref() - .map(|d| boot_entry_from_deployment(sysroot, d)) - .transpose() - .context("Staged deployment")?; - let booted = booted_deployment - .as_ref() - .map(|d| boot_entry_from_deployment(sysroot, d)) - .transpose() - .context("Booted deployment")?; - let rollback = deployments - .rollback - .as_ref() - .map(|d| boot_entry_from_deployment(sysroot, d)) - .transpose() - .context("Rollback deployment")?; + let staged = if let Some(d) = deployments.staged.as_ref() { + Some( + boot_entry_from_deployment(sysroot, d) + .await + .context("Staged deployment")?, + ) + } else { + None + }; + + let booted = if let Some(d) = booted_deployment { + Some( + boot_entry_from_deployment(sysroot, d) + .await + .context("Booted deployment")?, + ) + } else { + None + }; + + let rollback = if let Some(d) = deployments.rollback.as_ref() { + Some( + boot_entry_from_deployment(sysroot, d) + .await + .context("Rollback deployment")?, + ) + } else { + None + }; + let spec = staged .as_ref() .or(booted.as_ref()) - .and_then(|entry| entry.image.as_ref()) - .map(|img| HostSpec { - image: Some(img.image.clone()), - boot_order, + .and_then(|entry| { + let image = entry.image.as_ref(); + image.map(|image| HostSpec { + image: Some(image.image.clone()), + backend: entry.backend, + boot_order, + }) }) .unwrap_or_default(); @@ -313,7 +365,7 @@ pub(crate) async fn status(opts: super::cli::StatusOpts) -> Result<()> { } else { let sysroot = super::cli::get_locked_sysroot().await?; let booted_deployment = sysroot.booted_deployment(); - let (_deployments, host) = get_status(&sysroot, booted_deployment.as_ref())?; + let (_deployments, host) = get_status(&sysroot, booted_deployment.as_ref()).await?; host }; diff --git a/lib/src/utils.rs b/lib/src/utils.rs index bc50fdfe..9dd9614e 100644 --- a/lib/src/utils.rs +++ b/lib/src/utils.rs @@ -1,9 +1,11 @@ use std::future::Future; use std::io::Write; +use std::os::fd::BorrowedFd; use std::process::Command; use std::time::Duration; use anyhow::{Context, Result}; +use cap_std_ext::{cap_std::fs::Dir, cmdext::CapStdExtCommandExt}; use ostree::glib; use ostree_ext::container::SignatureSource; use ostree_ext::ostree; @@ -78,6 +80,46 @@ pub(crate) fn sigpolicy_from_opts( } } +#[allow(unsafe_code)] +pub(crate) fn sysroot_fd_borrowed(sysroot: &ostree_ext::ostree::Sysroot) -> BorrowedFd { + // SAFETY: Just borrowing an existing fd; there's aleady a PR to add this + // api to libostree + unsafe { BorrowedFd::borrow_raw(sysroot.fd()) } +} + +#[allow(unsafe_code)] +fn set_pdeathsig(cmd: &mut std::process::Command) { + use std::os::unix::process::CommandExt; + // SAFETY: This is a straightforward use of prctl; would be good + // to put in a crate (maybe cap-std-ext) + unsafe { + cmd.pre_exec(|| { + rustix::process::set_parent_process_death_signal(Some(rustix::process::Signal::Term)) + .map_err(Into::into) + }); + } +} + +/// Create a Command instance that has its current working directory set +/// to the target root, and is also lifecycle-bound to us. +pub(crate) fn sync_cmd_in_root(rootfs: &Dir, cmd: &str) -> Result { + let mut cmd = std::process::Command::new(cmd); + cmd.cwd_dir(rootfs.try_clone()?); + set_pdeathsig(&mut cmd); + Ok(cmd) +} + +/// Create a Command instance that has its current working directory set +/// to the target root, and is also lifecycle-bound to us. +pub(crate) fn cmd_in_root(rootfs: &Dir, cmd: &str) -> Result { + let mut cmd = std::process::Command::new(cmd); + cmd.cwd_dir(rootfs.try_clone()?); + set_pdeathsig(&mut cmd); + let mut cmd = tokio::process::Command::from(cmd); + cmd.kill_on_drop(true); + Ok(cmd) +} + /// Output a warning message that we want to be quite visible. /// The process (thread) execution will be delayed for a short time. pub(crate) fn medium_visibility_warning(s: &str) { @@ -118,6 +160,15 @@ where r } +pub(crate) fn newline_trim_vec_to_string(mut v: Vec) -> Result { + let mut i = v.len(); + while i > 0 && v[i - 1] == b'\n' { + i -= 1; + } + v.truncate(i); + String::from_utf8(v).map_err(Into::into) +} + /// Given a possibly tagged image like quay.io/foo/bar:latest and a digest 0ab32..., return /// the digested form quay.io/foo/bar:latest@sha256:0ab32... /// If the image already has a digest, it will be replaced. @@ -127,6 +178,21 @@ pub(crate) fn digested_pullspec(image: &str, digest: &str) -> String { format!("{image}@{digest}") } +#[allow(dead_code)] +pub(crate) fn require_sha256_digest(blobid: &str) -> Result<&str> { + let r = blobid + .split_once("sha256:") + .ok_or_else(|| anyhow::anyhow!("Missing sha256: in blob ID: {blobid}"))? + .1; + if r.len() != 64 { + anyhow::bail!("Invalid digest in blob ID: {blobid}"); + } + if !r.chars().all(|c| char::is_ascii_alphanumeric(&c)) { + anyhow::bail!("Invalid checksum in blob ID: {blobid}"); + } + Ok(r) +} + #[test] fn test_digested_pullspec() { let digest = "ebe3bdccc041864e5a485f1e755e242535c3b83d110c0357fe57f110b73b143e"; @@ -171,3 +237,31 @@ fn test_sigpolicy_from_opts() { SignatureSource::ContainerPolicyAllowInsecure ); } + +#[test] +fn test_newline_trim() { + let ident_cases = ["", "foo"].into_iter().map(|s| s.as_bytes()); + for case in ident_cases { + let r = newline_trim_vec_to_string(Vec::from(case)).unwrap(); + assert_eq!(case, r.as_bytes()); + } + let cases = [("foo\n", "foo"), ("bar\n\n", "bar")]; + for (orig, new) in cases { + let r = newline_trim_vec_to_string(Vec::from(orig)).unwrap(); + assert_eq!(new.as_bytes(), r.as_bytes()); + } +} + +#[test] +fn test_require_sha256_digest() { + assert_eq!( + require_sha256_digest( + "sha256:0b145899261c8a62406f697c67040cbd811f4dfaa9d778426cf1953413be8534" + ) + .unwrap(), + "0b145899261c8a62406f697c67040cbd811f4dfaa9d778426cf1953413be8534" + ); + for e in ["", "sha256:abcde", "sha256:0b145899261c8a62406f697c67040cbd811f4dfaa9d778426cf1953413b34🦀123", "sha512:9895de267ca908c36ed0031c017ba9bf85b83c21ff2bf241766a4037be81f947c68841ee75f003eba3b4bddc524c0357d7bc9ebffe499f5b72f2da3507cb170d"] { + assert!(require_sha256_digest(e).is_err()); + } +}