CVE-2025-27512
Description
Zincati is an auto-update agent for Fedora CoreOS hosts. Zincati ships a polkit rule which allows the zincati system user to use the actions org.projectatomic.rpmostree1.deploy to deploy updates to the system and org.projectatomic.rpmostree1.finalize-deployment to reboot the system into the deployed update. Since Zincati v0.0.24, this polkit rule contains a logic error which broadens access of those polkit actions to any unprivileged user rather than just the zincati system user. In practice, this means that any unprivileged user with access to the system D-Bus socket is able to deploy older Fedora CoreOS versions (which may have other known vulnerabilities). Note that rpm-ostree enforces that the selected version must be from the same branch the system is currently on so this cannot directly be used to deploy an attacker-controlled update payload. This primarily impacts users running untrusted workloads with access to the system D-Bus socket. Note that in general, untrusted workloads should not be given this access, whether containerized or not. By default, containers do not have access to the system D-Bus socket. The logic error is fixed in Zincati v0.0.30. A workaround is to manually add a following polkit rule, instructions for which are available in the GitHub Security Advisory.
Affected packages
Versions sourced from the GitHub Security Advisory.
| Package | Affected versions | Patched versions |
|---|---|---|
zincaticrates.io | >= 0.0.24, < 0.0.30 | 0.0.30 |
Patches
401d8e89f799eMerge commit from fork
1 file changed · +7 −6
dist/polkit-1/rules.d/zincati.rules+7 −6 modified@@ -1,11 +1,12 @@ // Allow Zincati to deploy, finalize, and cleanup a staged deployment through rpm-ostree. polkit.addRule(function(action, subject) { - if ((action.id == "org.projectatomic.rpmostree1.deploy" || - action.id == "org.projectatomic.rpmostree1.finalize-deployment") || - action.id == "org.projectatomic.rpmostree1.cleanup" || - action.id == "org.projectatomic.rpmostree1.rebase" && - subject.user == "zincati") { - return polkit.Result.YES; + if (action.id == "org.projectatomic.rpmostree1.deploy" || + action.id == "org.projectatomic.rpmostree1.rebase" || + action.id == "org.projectatomic.rpmostree1.finalize-deployment" || + action.id == "org.projectatomic.rpmostree1.cleanup") { + if (subject.user == "zincati") { + return polkit.Result.YES; + } } });
28a43aa2c1edupdate_agent: skip branch check when deploying
7 files changed · +140 −14
dist/polkit-1/rules.d/zincati.rules+3 −2 modified@@ -1,7 +1,8 @@ -// Allow Zincati to deploy, and finalize a staged deployment through rpm-ostree. +// Allow Zincati to deploy, finalize, and cleanup a staged deployment through rpm-ostree. polkit.addRule(function(action, subject) { if ((action.id == "org.projectatomic.rpmostree1.deploy" || - action.id == "org.projectatomic.rpmostree1.finalize-deployment") && + action.id == "org.projectatomic.rpmostree1.finalize-deployment") || + action.id == "org.projectatomic.rpmostree1.cleanup" && subject.user == "zincati") { return polkit.Result.YES; }
src/identity/mod.rs+1 −1 modified@@ -103,7 +103,7 @@ impl Identity { compute_node_uuid(&app_id)? }; let platform = platform::read_id("/proc/cmdline")?; - let stream = rpm_ostree::parse_updates_stream(&status) + let stream = rpm_ostree::parse_booted_updates_stream(&status) .context("failed to introspect OS updates stream")?; let id = Self {
src/rpm_ostree/actor.rs+43 −1 modified@@ -3,7 +3,7 @@ use super::cli_status::StatusJson; use super::Release; use actix::prelude::*; -use anyhow::Result; +use anyhow::{Context, Result}; use filetime::FileTime; use log::trace; use std::collections::BTreeSet; @@ -107,6 +107,48 @@ impl Handler<QueryLocalDeployments> for RpmOstreeClient { } } +/// Request: query pending deployment stream. +#[derive(Debug, Clone)] +pub struct QueryPendingDeploymentStream {} + +impl Message for QueryPendingDeploymentStream { + type Result = Result<String>; +} + +impl Handler<QueryPendingDeploymentStream> for RpmOstreeClient { + type Result = Result<String>; + + fn handle( + &mut self, + _msg: QueryPendingDeploymentStream, + _ctx: &mut Self::Context, + ) -> Self::Result { + trace!("request to get OS update stream of pending deployment"); + let status = super::cli_status::invoke_cli_status(false)?; + let stream = super::cli_status::parse_pending_updates_stream(&status) + .context("failed to introspect OS updates stream of pending deployment")?; + Ok(stream) + } +} + +/// Request: cleanup pending deployment. +#[derive(Debug, Clone)] +pub struct CleanupPendingDeployment {} + +impl Message for CleanupPendingDeployment { + type Result = Result<()>; +} + +impl Handler<CleanupPendingDeployment> for RpmOstreeClient { + type Result = Result<()>; + + fn handle(&mut self, _msg: CleanupPendingDeployment, _ctx: &mut Self::Context) -> Self::Result { + trace!("request to cleanup pending deployment"); + super::cli_deploy::invoke_cli_cleanup()?; + Ok(()) + } +} + /// Request: Register as the update driver for rpm-ostree. #[derive(Debug, Clone)] pub struct RegisterAsDriver {}
src/rpm_ostree/cli_deploy.rs+15 −0 modified@@ -93,6 +93,7 @@ fn invoke_cli_deploy(release: Release, allow_downgrade: bool) -> Result<Release> let mut cmd = std::process::Command::new("rpm-ostree"); cmd.arg("deploy") .arg("--lock-finalization") + .arg("--skip-branch-check") .arg(format!("revision={}", release.checksum)) .env("RPMOSTREE_CLIENT_ID", "zincati"); if !allow_downgrade { @@ -110,6 +111,20 @@ fn invoke_cli_deploy(release: Release, allow_downgrade: bool) -> Result<Release> Ok(release) } +/// CLI executor for cleaning up the pending deployment. +pub fn invoke_cli_cleanup() -> Result<()> { + let mut cmd = std::process::Command::new("rpm-ostree"); + cmd.arg("cleanup").arg("-p"); + let out = cmd.output().context("failed to run 'rpm-ostree' binary")?; + if !out.status.success() { + bail!( + "rpm-ostree cleanup failed:\n{}", + String::from_utf8_lossy(&out.stderr) + ) + }; + Ok(()) +} + #[cfg(test)] mod tests { #[allow(unused_imports)]
src/rpm_ostree/cli_status.rs+12 −1 modified@@ -98,12 +98,23 @@ pub fn parse_booted(status: &StatusJson) -> Result<Release> { } /// Parse updates stream for booted deployment from status object. -pub fn parse_updates_stream(status: &StatusJson) -> Result<String> { +pub fn parse_booted_updates_stream(status: &StatusJson) -> Result<String> { let json = booted_json(status)?; ensure!(!json.base_metadata.stream.is_empty(), "empty stream value"); Ok(json.base_metadata.stream) } +/// Parse updates stream for pending deployment from status object. +pub fn parse_pending_updates_stream(status: &StatusJson) -> Result<String> { + let pending_json = status.deployments[0].clone(); + ensure!(!pending_json.booted, "no pending deployment found"); + ensure!( + !pending_json.base_metadata.stream.is_empty(), + "empty stream value" + ); + Ok(pending_json.base_metadata.stream) +} + /// Parse local deployments from a status object. fn parse_local_deployments(status: &StatusJson, omit_staged: bool) -> BTreeSet<Release> { let mut deployments = BTreeSet::<Release>::new();
src/rpm_ostree/mod.rs+5 −2 modified@@ -1,11 +1,14 @@ mod cli_deploy; mod cli_finalize; mod cli_status; -pub use cli_status::{invoke_cli_status, parse_basearch, parse_booted, parse_updates_stream}; +pub use cli_status::{ + invoke_cli_status, parse_basearch, parse_booted, parse_booted_updates_stream, +}; mod actor; pub use actor::{ - FinalizeDeployment, QueryLocalDeployments, RegisterAsDriver, RpmOstreeClient, StageDeployment, + CleanupPendingDeployment, FinalizeDeployment, QueryLocalDeployments, + QueryPendingDeploymentStream, RegisterAsDriver, RpmOstreeClient, StageDeployment, }; #[cfg(test)]
src/update_agent/actor.rs+61 −7 modified@@ -118,7 +118,7 @@ impl Handler<RefreshTick> for UpdateAgent { UpdateAgentMachineState::UpdateAvailable((release, _)) => { let update = release.clone(); update_agent_info - .tick_stage_update(&mut agent_state_guard.machine_state, update) + .tick_stage_update(&mut agent_state_guard, update) .await } UpdateAgentMachineState::UpdateStaged((release, _)) => { @@ -335,23 +335,22 @@ impl UpdateAgentInfo { } /// Try to stage an update. - async fn tick_stage_update(&self, mut state: &mut UpdateAgentMachineState, release: Release) { + async fn tick_stage_update(&self, state: &mut UpdateAgentState, release: Release) { trace!("trying to stage an update"); let target = release.clone(); let deploy_outcome = self.attempt_deploy(target).await; match deploy_outcome { Ok(release) => { - let msg = format!("update staged: {}", release.version); - utils::update_unit_status(&msg); - log::info!("{}", msg); - state.update_staged(release); + // Sanity check that the deployment we just staged is from the correct stream. + self.check_stream(state, release.clone()).await; } Err(e) => { log::error!("failed to stage deployment: {}", e); let release_ver = release.version.clone(); - let fail_count = UpdateAgentInfo::deploy_attempt_failed(release, &mut state); + let fail_count = + UpdateAgentInfo::deploy_attempt_failed(release, &mut state.machine_state); let msg = format!( "trying to stage {} ({} failed deployment attempt{})", release_ver, @@ -464,6 +463,15 @@ impl UpdateAgentInfo { depls } + /// Return the update stream of the pending deployment. + async fn query_pending_deployment_stream(&self) -> Result<String> { + let msg = rpm_ostree::QueryPendingDeploymentStream {}; + self.rpm_ostree_actor + .send(msg) + .unwrap_or_else(|e| Err(e.into())) + .await + } + /// Finalize a deployment (unlock and reboot). async fn finalize_deployment(&self, release: Release) -> Result<Release> { log::info!( @@ -491,6 +499,52 @@ impl UpdateAgentInfo { .unwrap_or_else(|e| log::error!("failed to register as driver: {}", e)) .await } + + /// Cleanup pending deployment. + async fn cleanup_pending_deployment(&self) { + let msg = rpm_ostree::CleanupPendingDeployment {}; + let result = self + .rpm_ostree_actor + .send(msg) + .unwrap_or_else(|e| Err(e.into())) + .await; + if let Err(e) = result { + log::error!("failed to cleanup pending deployment: {}", e) + }; + } + + /// Check that the pending deployment is from the correct update stream. + /// If not, clean up the pending deployment. + async fn check_stream(&self, state: &mut UpdateAgentState, release: Release) { + match self.query_pending_deployment_stream().await { + Ok(stream) => { + if stream != self.identity.stream { + log::error!( + "deployed an update on different update stream, abandoning update {}", + release.version + ); + } else { + let msg = format!("update staged: {}", release.version); + utils::update_unit_status(&msg); + log::info!("{}", msg); + state.machine_state.update_staged(release); + // The release we just deployed is on the correct stream so we + // return early. + return; + } + } + Err(e) => { + log::error!( + "failed to check pending deployment's update stream: {}, abandoning update {}", + e, + release.version + ); + } + } + self.cleanup_pending_deployment().await; + state.denylist.insert(release); + state.machine_state.update_abandoned(); + } } #[cfg(test)]
7f805e064575688021f14075Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
7- github.com/advisories/GHSA-w6fv-6gcc-x825ghsaADVISORY
- nvd.nist.gov/vuln/detail/CVE-2025-27512ghsaADVISORY
- github.com/coreos/zincati/commit/01d8e89f799e6ba21bdf7dc668abce23bd0d8f78nvdWEB
- github.com/coreos/zincati/commit/28a43aa2c1edda091ba659677d73c13e6e3ea99dnvdWEB
- github.com/coreos/zincati/releases/tag/v0.0.24nvdWEB
- github.com/coreos/zincati/releases/tag/v0.0.30nvdWEB
- github.com/coreos/zincati/security/advisories/GHSA-w6fv-6gcc-x825nvdWEB
News mentions
0No linked articles in our index yet.