CVE-2025-31130
Description
gitoxide is an implementation of git written in Rust. Before 0.42.0, gitoxide uses SHA-1 hash implementations without any collision detection, leaving it vulnerable to hash collision attacks. gitoxide uses the sha1_smol or sha1 crate, both of which implement standard SHA-1 without any mitigations for collision attacks. This means that two distinct Git objects with colliding SHA-1 hashes would break the Git object model and integrity checks when used with gitoxide. This vulnerability is fixed in 0.42.0.
Affected packages
Versions sourced from the GitHub Security Advisory.
| Package | Affected versions | Patched versions |
|---|---|---|
gix-featurescrates.io | < 0.41.0 | 0.41.0 |
gix-commitgraphcrates.io | < 0.27.0 | 0.27.0 |
gix-indexcrates.io | < 0.39.0 | 0.39.0 |
gix-objectcrates.io | < 0.48.0 | 0.48.0 |
gix-odbcrates.io | < 0.68.0 | 0.68.0 |
gix-packcrates.io | < 0.58.0 | 0.58.0 |
gitoxidecrates.io | < 0.42.0 | 0.42.0 |
gitoxide-corecrates.io | < 0.46.0 | 0.46.0 |
gixcrates.io | < 0.71.0 | 0.71.0 |
gix-archivecrates.io | < 0.20.0 | 0.20.0 |
gix-blamecrates.io | < 0.1.0 | 0.1.0 |
gix-configcrates.io | < 0.44.0 | 0.44.0 |
gix-diffcrates.io | < 0.51.0 | 0.51.0 |
gix-dircrates.io | < 0.13.0 | 0.13.0 |
gix-discovercrates.io | < 0.39.0 | 0.39.0 |
gix-filtercrates.io | < 0.18.0 | 0.18.0 |
gix-fsckcrates.io | < 0.10.0 | 0.10.0 |
gix-mergecrates.io | < 0.4.0 | 0.4.0 |
gix-negotiatecrates.io | < 0.19.0 | 0.19.0 |
gix-protocolcrates.io | < 0.49.0 | 0.49.0 |
gix-refcrates.io | < 0.51.0 | 0.51.0 |
gix-revisioncrates.io | < 0.33.0 | 0.33.0 |
gix-revwalkcrates.io | < 0.19.0 | 0.19.0 |
gix-statuscrates.io | < 0.18.0 | 0.18.0 |
gix-traversecrates.io | < 0.45.0 | 0.45.0 |
gix-worktreecrates.io | < 0.40.0 | 0.40.0 |
gix-worktree-statecrates.io | < 0.18.0 | 0.18.0 |
Patches
2d248e3d87d45f253f02a6658feat!: detect SHA‐1 collision attacks
13 files changed · +292 −242
Cargo.lock+7 −7 modified@@ -1853,8 +1853,6 @@ dependencies = [ "once_cell", "parking_lot", "prodash 29.0.1", - "sha1", - "sha1_smol", "thiserror 2.0.12", "walkdir", ] @@ -1966,7 +1964,9 @@ dependencies = [ "faster-hex", "gix-features 0.40.0", "gix-testtools", + "prodash 29.0.1", "serde", + "sha1-checked", "thiserror 2.0.12", ] @@ -4872,16 +4872,16 @@ dependencies = [ "cfg-if", "cpufeatures", "digest", - "sha1-asm", ] [[package]] -name = "sha1-asm" -version = "0.5.3" +name = "sha1-checked" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "286acebaf8b67c1130aedffad26f594eff0c1292389158135327d2e23aed582b" +checksum = "89f599ac0c323ebb1c6082821a54962b839832b03984598375bff3975b804423" dependencies = [ - "cc", + "digest", + "sha1", ] [[package]]
Cargo.toml+1 −2 modified@@ -205,8 +205,7 @@ gix-hash = { opt-level = 3 } gix-actor = { opt-level = 3 } gix-config = { opt-level = 3 } miniz_oxide = { opt-level = 3 } -sha1 = { opt-level = 3 } -sha1_smol = { opt-level = 3 } +sha1-checked = { opt-level = 3 } [profile.release] overflow-checks = false
gix-features/Cargo.toml+7 −26 modified@@ -24,7 +24,7 @@ progress = ["prodash"] ## Provide human-readable numbers as well as easier to read byte units for progress bars. progress-unit-human-numbers = ["prodash?/unit-human"] ## Provide human readable byte units for progress bars. -progress-unit-bytes = ["dep:bytesize", "prodash?/unit-bytes"] +progress-unit-bytes = ["dep:bytesize", "prodash?/unit-bytes", "gix-hash/progress-unit-bytes"] ## Provide utilities suitable for working with the `std::fs::read_dir()`. fs-read-dir = ["dep:gix-utils"] @@ -77,40 +77,29 @@ zlib-stock = ["zlib", "flate2?/zlib"] ## may build in environments where other backends don't. zlib-rust-backend = ["zlib", "flate2?/rust_backend"] -#! ### Mutually Exclusive SHA1 -## A fast SHA1 implementation is critical to `gitoxide's` object database performance -## A multi-crate implementation that can use hardware acceleration, thus bearing the potential for up to 2Gb/s throughput on -## CPUs that support it, like AMD Ryzen or Intel Core i3, as well as Apple Silicon like M1. -## Takes precedence over `rustsha1` if both are specified. -fast-sha1 = ["dep:sha1"] -## A standard and well performing pure Rust implementation of Sha1. Will significantly slow down various git operations. -rustsha1 = ["dep:sha1_smol"] +# TODO: Remove these. +fast-sha1 = [] +rustsha1 = [] #! ### Other ## Count cache hits and misses and print that debug information on drop. ## Caches implement this by default, which costs nothing unless this feature is enabled cache-efficiency-debug = [] -[[test]] -name = "hash" -path = "tests/hash.rs" -required-features = ["rustsha1"] - [[test]] name = "parallel" path = "tests/parallel_threaded.rs" -required-features = ["parallel", "rustsha1"] +required-features = ["parallel"] [[test]] name = "multi-threaded" path = "tests/parallel_shared_threaded.rs" -required-features = ["parallel", "rustsha1"] +required-features = ["parallel"] [[test]] name = "single-threaded" path = "tests/parallel_shared.rs" -required-features = ["rustsha1"] [[test]] name = "pipe" @@ -130,10 +119,8 @@ parking_lot = { version = "0.12.0", default-features = false, optional = true } walkdir = { version = "2.3.2", optional = true } # used when parallel is off -# hashing and 'fast-sha1' feature -sha1_smol = { version = "1.0.0", optional = true } +# hashing crc32fast = { version = "1.2.1", optional = true } -sha1 = { version = "0.10.0", optional = true } # progress prodash = { version = "29.0.1", optional = true } @@ -156,12 +143,6 @@ libc = { version = "0.2.119" } [dev-dependencies] bstr = { version = "1.3.0", default-features = false } - -# Assembly doesn't yet compile on MSVC on windows, but does on GNU, see https://github.com/RustCrypto/asm-hashes/issues/17 -# At this time, only aarch64, x86 and x86_64 are supported. -[target.'cfg(all(any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64"), not(target_os = "windows")))'.dependencies] -sha1 = { version = "0.10.0", optional = true, features = ["asm"] } - [package.metadata.docs.rs] all-features = true features = ["document-features"]
gix-features/src/hash.rs+6 −177 modified@@ -1,54 +1,12 @@ //! Hash functions and hash utilities -//! -//! With the `fast-sha1` feature, the `Sha1` hash type will use a more elaborate implementation utilizing hardware support -//! in case it is available. Otherwise the `rustsha1` feature should be set. `fast-sha1` will take precedence. -//! Otherwise, a minimal yet performant implementation is used instead for a decent trade-off between compile times and run-time performance. -#[cfg(all(feature = "rustsha1", not(feature = "fast-sha1")))] -mod _impl { - use super::Digest; - - /// A implementation of the Sha1 hash, which can be used once. - #[derive(Default, Clone)] - pub struct Sha1(sha1_smol::Sha1); - - impl Sha1 { - /// Digest the given `bytes`. - pub fn update(&mut self, bytes: &[u8]) { - self.0.update(bytes); - } - /// Finalize the hash and produce a digest. - pub fn digest(self) -> Digest { - self.0.digest().bytes() - } - } -} - -/// A hash-digest produced by a [`Hasher`] hash implementation. -#[cfg(any(feature = "fast-sha1", feature = "rustsha1"))] -pub type Digest = [u8; 20]; - -#[cfg(feature = "fast-sha1")] -mod _impl { - use sha1::Digest; - - /// A implementation of the Sha1 hash, which can be used once. - #[derive(Default, Clone)] - pub struct Sha1(sha1::Sha1); - - impl Sha1 { - /// Digest the given `bytes`. - pub fn update(&mut self, bytes: &[u8]) { - self.0.update(bytes); - } - /// Finalize the hash and produce a digest. - pub fn digest(self) -> super::Digest { - self.0.finalize().into() - } - } -} +// TODO: Remove this. #[cfg(any(feature = "rustsha1", feature = "fast-sha1"))] -pub use _impl::Sha1 as Hasher; +pub use gix_hash::hasher::{ + hasher, + io::{bytes, bytes_of_file, bytes_with_hasher, Write}, + Digest, Hasher, +}; /// Compute a CRC32 hash from the given `bytes`, returning the CRC32 hash. /// @@ -71,132 +29,3 @@ pub fn crc32(bytes: &[u8]) -> u32 { h.update(bytes); h.finalize() } - -/// Produce a hasher suitable for the given kind of hash. -#[cfg(any(feature = "rustsha1", feature = "fast-sha1"))] -pub fn hasher(kind: gix_hash::Kind) -> Hasher { - match kind { - gix_hash::Kind::Sha1 => Hasher::default(), - } -} - -/// Compute the hash of `kind` for the bytes in the file at `path`, hashing only the first `num_bytes_from_start` -/// while initializing and calling `progress`. -/// -/// `num_bytes_from_start` is useful to avoid reading trailing hashes, which are never part of the hash itself, -/// denoting the amount of bytes to hash starting from the beginning of the file. -/// -/// # Note -/// -/// * Only available with the `gix-object` feature enabled due to usage of the [`gix_hash::Kind`] enum and the -/// [`gix_hash::ObjectId`] return value. -/// * [Interrupts][crate::interrupt] are supported. -#[cfg(all(feature = "progress", any(feature = "rustsha1", feature = "fast-sha1")))] -pub fn bytes_of_file( - path: &std::path::Path, - num_bytes_from_start: u64, - kind: gix_hash::Kind, - progress: &mut dyn crate::progress::Progress, - should_interrupt: &std::sync::atomic::AtomicBool, -) -> std::io::Result<gix_hash::ObjectId> { - bytes( - &mut std::fs::File::open(path)?, - num_bytes_from_start, - kind, - progress, - should_interrupt, - ) -} - -/// Similar to [`bytes_of_file`], but operates on a stream of bytes. -#[cfg(all(feature = "progress", any(feature = "rustsha1", feature = "fast-sha1")))] -pub fn bytes( - read: &mut dyn std::io::Read, - num_bytes_from_start: u64, - kind: gix_hash::Kind, - progress: &mut dyn crate::progress::Progress, - should_interrupt: &std::sync::atomic::AtomicBool, -) -> std::io::Result<gix_hash::ObjectId> { - bytes_with_hasher(read, num_bytes_from_start, hasher(kind), progress, should_interrupt) -} - -/// Similar to [`bytes()`], but takes a `hasher` instead of a hash kind. -#[cfg(all(feature = "progress", any(feature = "rustsha1", feature = "fast-sha1")))] -pub fn bytes_with_hasher( - read: &mut dyn std::io::Read, - num_bytes_from_start: u64, - mut hasher: Hasher, - progress: &mut dyn crate::progress::Progress, - should_interrupt: &std::sync::atomic::AtomicBool, -) -> std::io::Result<gix_hash::ObjectId> { - let start = std::time::Instant::now(); - // init progress before the possibility for failure, as convenience in case people want to recover - progress.init( - Some(num_bytes_from_start as prodash::progress::Step), - crate::progress::bytes(), - ); - - const BUF_SIZE: usize = u16::MAX as usize; - let mut buf = [0u8; BUF_SIZE]; - let mut bytes_left = num_bytes_from_start; - - while bytes_left > 0 { - let out = &mut buf[..BUF_SIZE.min(bytes_left as usize)]; - read.read_exact(out)?; - bytes_left -= out.len() as u64; - progress.inc_by(out.len()); - hasher.update(out); - if should_interrupt.load(std::sync::atomic::Ordering::SeqCst) { - return Err(std::io::Error::new(std::io::ErrorKind::Other, "Interrupted")); - } - } - - let id = gix_hash::ObjectId::from(hasher.digest()); - progress.show_throughput(start); - Ok(id) -} - -#[cfg(any(feature = "rustsha1", feature = "fast-sha1"))] -mod write { - use crate::hash::Hasher; - - /// A utility to automatically generate a hash while writing into an inner writer. - pub struct Write<T> { - /// The hash implementation. - pub hash: Hasher, - /// The inner writer. - pub inner: T, - } - - impl<T> std::io::Write for Write<T> - where - T: std::io::Write, - { - fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { - let written = self.inner.write(buf)?; - self.hash.update(&buf[..written]); - Ok(written) - } - - fn flush(&mut self) -> std::io::Result<()> { - self.inner.flush() - } - } - - impl<T> Write<T> - where - T: std::io::Write, - { - /// Create a new hash writer which hashes all bytes written to `inner` with a hash of `kind`. - pub fn new(inner: T, object_hash: gix_hash::Kind) -> Self { - match object_hash { - gix_hash::Kind::Sha1 => Write { - inner, - hash: Hasher::default(), - }, - } - } - } -} -#[cfg(any(feature = "rustsha1", feature = "fast-sha1"))] -pub use write::Write;
gix-features/tests/hash.rs+0 −16 removed@@ -1,16 +0,0 @@ -use gix_features::hash::Hasher; - -#[cfg(not(feature = "fast-sha1"))] -#[test] -fn size_of_sha1() { - assert_eq!(std::mem::size_of::<Hasher>(), 96); -} - -#[cfg(feature = "fast-sha1")] -#[test] -fn size_of_sha1() { - assert_eq!( - std::mem::size_of::<Hasher>(), - if cfg!(target_arch = "x86") { 96 } else { 104 } - ); -}
gix-hash/Cargo.toml+6 −0 modified@@ -16,13 +16,19 @@ doctest = false test = false [features] +# Temporary, to avoid a circular dependency on `gix-features`. +progress-unit-bytes = ["prodash/unit-bytes"] ## Data structures implement `serde::Serialize` and `serde::Deserialize`. serde = ["dep:serde"] [dependencies] +# Temporary, to avoid a circular dependency on `gix-features`. +prodash = "29.0.1" + thiserror = "2.0.0" faster-hex = { version = "0.9.0" } serde = { version = "1.0.114", optional = true, default-features = false, features = ["derive"] } +sha1-checked = { version = "0.10.0", default-features = false } document-features = { version = "0.2.0", optional = true }
gix-hash/src/hasher/io.rs+138 −0 added@@ -0,0 +1,138 @@ +use crate::{hasher, Hasher}; + +// Temporary, to avoid a circular dependency on `gix-features`. +/// +mod gix_features { + /// + pub mod progress { + pub use prodash::{self, unit, Progress, Unit}; + + /// + #[cfg(feature = "progress-unit-bytes")] + pub fn bytes() -> Option<Unit> { + Some(unit::dynamic_and_mode( + unit::Bytes, + unit::display::Mode::with_throughput().and_percentage(), + )) + } + + /// + #[cfg(not(feature = "progress-unit-bytes"))] + pub fn bytes() -> Option<Unit> { + Some(unit::label_and_mode( + "B", + unit::display::Mode::with_throughput().and_percentage(), + )) + } + } +} + +/// Compute the hash of `kind` for the bytes in the file at `path`, hashing only the first `num_bytes_from_start` +/// while initializing and calling `progress`. +/// +/// `num_bytes_from_start` is useful to avoid reading trailing hashes, which are never part of the hash itself, +/// denoting the amount of bytes to hash starting from the beginning of the file. +/// +/// # Note +/// +/// * Interrupts are supported. +// TODO: Fix link to `gix_features::interrupt`. +pub fn bytes_of_file( + path: &std::path::Path, + num_bytes_from_start: u64, + kind: crate::Kind, + progress: &mut dyn gix_features::progress::Progress, + should_interrupt: &std::sync::atomic::AtomicBool, +) -> std::io::Result<crate::ObjectId> { + bytes( + &mut std::fs::File::open(path)?, + num_bytes_from_start, + kind, + progress, + should_interrupt, + ) +} + +/// Similar to [`bytes_of_file`], but operates on a stream of bytes. +pub fn bytes( + read: &mut dyn std::io::Read, + num_bytes_from_start: u64, + kind: crate::Kind, + progress: &mut dyn gix_features::progress::Progress, + should_interrupt: &std::sync::atomic::AtomicBool, +) -> std::io::Result<crate::ObjectId> { + bytes_with_hasher(read, num_bytes_from_start, hasher(kind), progress, should_interrupt) +} + +/// Similar to [`bytes()`], but takes a `hasher` instead of a hash kind. +pub fn bytes_with_hasher( + read: &mut dyn std::io::Read, + num_bytes_from_start: u64, + mut hasher: Hasher, + progress: &mut dyn gix_features::progress::Progress, + should_interrupt: &std::sync::atomic::AtomicBool, +) -> std::io::Result<crate::ObjectId> { + let start = std::time::Instant::now(); + // init progress before the possibility for failure, as convenience in case people want to recover + progress.init( + Some(num_bytes_from_start as gix_features::progress::prodash::progress::Step), + gix_features::progress::bytes(), + ); + + const BUF_SIZE: usize = u16::MAX as usize; + let mut buf = [0u8; BUF_SIZE]; + let mut bytes_left = num_bytes_from_start; + + while bytes_left > 0 { + let out = &mut buf[..BUF_SIZE.min(bytes_left as usize)]; + read.read_exact(out)?; + bytes_left -= out.len() as u64; + progress.inc_by(out.len()); + hasher.update(out); + if should_interrupt.load(std::sync::atomic::Ordering::SeqCst) { + return Err(std::io::Error::new(std::io::ErrorKind::Other, "Interrupted")); + } + } + + let id = crate::ObjectId::from(hasher.digest()); + progress.show_throughput(start); + Ok(id) +} + +/// A utility to automatically generate a hash while writing into an inner writer. +pub struct Write<T> { + /// The hash implementation. + pub hash: Hasher, + /// The inner writer. + pub inner: T, +} + +impl<T> std::io::Write for Write<T> +where + T: std::io::Write, +{ + fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { + let written = self.inner.write(buf)?; + self.hash.update(&buf[..written]); + Ok(written) + } + + fn flush(&mut self) -> std::io::Result<()> { + self.inner.flush() + } +} + +impl<T> Write<T> +where + T: std::io::Write, +{ + /// Create a new hash writer which hashes all bytes written to `inner` with a hash of `kind`. + pub fn new(inner: T, object_hash: crate::Kind) -> Self { + match object_hash { + crate::Kind::Sha1 => Write { + inner, + hash: Hasher::default(), + }, + } + } +}
gix-hash/src/hasher/mod.rs+90 −0 added@@ -0,0 +1,90 @@ +use sha1_checked::CollisionResult; + +/// A hash-digest produced by a [`Hasher`] hash implementation. +pub type Digest = [u8; 20]; + +/// The error returned by [`Hasher::try_finalize()`]. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error { + #[error("Detected SHA-1 collision attack with digest {digest}")] + CollisionAttack { digest: crate::ObjectId }, +} + +/// A implementation of the Sha1 hash, which can be used once. +/// +/// We use [`sha1_checked`] to implement the same collision detection +/// algorithm as Git. +#[derive(Clone)] +pub struct Hasher(sha1_checked::Sha1); + +impl Default for Hasher { + #[inline] + fn default() -> Self { + // This matches the configuration used by Git, which only uses + // the collision detection to bail out, rather than computing + // alternate “safe hashes” for inputs where a collision attack + // was detected. + Self(sha1_checked::Builder::default().safe_hash(false).build()) + } +} + +impl Hasher { + /// Digest the given `bytes`. + pub fn update(&mut self, bytes: &[u8]) { + use sha1_checked::Digest; + self.0.update(bytes); + } + + /// Finalize the hash and produce an object ID. + /// + /// Returns [`Error`] if a collision attack is detected. + #[inline] + pub fn try_finalize(self) -> Result<crate::ObjectId, Error> { + match self.0.try_finalize() { + CollisionResult::Ok(digest) => Ok(crate::ObjectId::Sha1(digest.into())), + CollisionResult::Mitigated(_) => { + // SAFETY: `CollisionResult::Mitigated` is only + // returned when `safe_hash()` is on. `Hasher`’s field + // is private, and we only construct it in the + // `Default` instance, which turns `safe_hash()` off. + // + // As of Rust 1.84.1, the compiler can’t figure out + // this function cannot panic without this. + #[allow(unsafe_code)] + unsafe { + std::hint::unreachable_unchecked() + } + } + CollisionResult::Collision(digest) => Err(Error::CollisionAttack { + digest: crate::ObjectId::Sha1(digest.into()), + }), + } + } + + /// Finalize the hash and produce an object ID. + #[inline] + pub fn finalize(self) -> crate::ObjectId { + self.try_finalize().expect("Detected SHA-1 collision attack") + } + + /// Finalize the hash and produce a digest. + #[inline] + pub fn digest(self) -> Digest { + self.finalize() + .as_slice() + .try_into() + .expect("SHA-1 object ID to be 20 bytes long") + } +} + +/// Produce a hasher suitable for the given kind of hash. +#[inline] +pub fn hasher(kind: crate::Kind) -> Hasher { + match kind { + crate::Kind::Sha1 => Hasher::default(), + } +} + +/// Hashing utilities for I/O operations. +pub mod io;
gix-hash/src/lib.rs+5 −0 modified@@ -13,6 +13,11 @@ mod borrowed; pub use borrowed::{oid, Error}; +/// Hash functions and hash utilities +pub mod hasher; +pub use hasher::io::{bytes, bytes_of_file, bytes_with_hasher}; +pub use hasher::{hasher, Hasher}; + mod object_id; pub use object_id::{decode, ObjectId};
gix-hash/tests/hasher/mod.rs+9 −0 added@@ -0,0 +1,9 @@ +use gix_hash::Hasher; + +#[test] +fn size_of_sha1() { + assert_eq!( + std::mem::size_of::<Hasher>(), + if cfg!(target_arch = "x86") { 820 } else { 824 }, + ); +}
gix-hash/tests/hash.rs+1 −0 modified@@ -1,5 +1,6 @@ use gix_hash::ObjectId; +mod hasher; mod kind; mod object_id; mod oid;
gix-hash/tests/object_id/mod.rs+22 −10 modified@@ -45,23 +45,28 @@ mod from_hex { mod sha1 { use std::str::FromStr as _; - use gix_features::hash::hasher; - use gix_hash::{Kind, ObjectId}; + use gix_hash::{hasher, Kind, ObjectId}; - fn hash_contents(s: &[u8]) -> ObjectId { + fn hash_contents(s: &[u8]) -> Result<ObjectId, hasher::Error> { let mut hasher = hasher(Kind::Sha1); hasher.update(s); - ObjectId::Sha1(hasher.digest()) + hasher.try_finalize() } #[test] fn empty_blob() { - assert_eq!(ObjectId::empty_blob(Kind::Sha1), hash_contents(b"blob 0\0")); + assert_eq!( + ObjectId::empty_blob(Kind::Sha1), + hash_contents(b"blob 0\0").expect("empty blob to not collide"), + ); } #[test] fn empty_tree() { - assert_eq!(ObjectId::empty_tree(Kind::Sha1), hash_contents(b"tree 0\0")); + assert_eq!( + ObjectId::empty_tree(Kind::Sha1), + hash_contents(b"tree 0\0").expect("empty tree to not collide"), + ); } /// Check the test vectors from RFC 3174. @@ -84,7 +89,7 @@ mod sha1 { ]; for (input, output) in fixtures { assert_eq!( - hash_contents(input), + hash_contents(input).expect("RFC inputs to not collide"), ObjectId::from_str(&output.to_lowercase().replace(' ', "")).expect("RFC digests to be valid"), ); } @@ -101,10 +106,17 @@ mod sha1 { let message_b = include_bytes!("../fixtures/shambles/messageB"); assert_ne!(message_a, message_b); - // BUG: These should be detected as a collision attack. let expected = ObjectId::from_str("8ac60ba76f1999a1ab70223f225aefdc78d4ddc0").expect("Shambles digest to be valid"); - assert_eq!(hash_contents(message_a), expected); - assert_eq!(hash_contents(message_b), expected); + + let Err(hasher::Error::CollisionAttack { digest }) = hash_contents(message_a) else { + panic!("expected Shambles input to collide"); + }; + assert_eq!(digest, expected); + + let Err(hasher::Error::CollisionAttack { digest }) = hash_contents(message_b) else { + panic!("expected Shambles input to collide"); + }; + assert_eq!(digest, expected); } }
SHORTCOMINGS.md+0 −4 modified@@ -35,7 +35,3 @@ This file is for tracking features that are less well implemented or less powerf * **gix-url** _might_ be more restrictive than what git allows as for the most part, it uses a browser grade URL parser. * Thus far there is no proof for this, and as _potential remedy_ we could certainly re-implement exactly what git does to handle its URLs. - -### `gix-features` - -* **sha1** isn't hardened (i.e. doesn't have collision detection). Needs [to be contributed](https://github.com/GitoxideLabs/gitoxide/issues/585).
Vulnerability mechanics
Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.
References
5- github.com/advisories/GHSA-2frx-2596-x5r6ghsaADVISORY
- nvd.nist.gov/vuln/detail/CVE-2025-31130ghsaADVISORY
- github.com/GitoxideLabs/gitoxide/commit/f253f02a6658b3b7612a50d56c71f5ae4da4ca21nvdWEB
- github.com/GitoxideLabs/gitoxide/security/advisories/GHSA-2frx-2596-x5r6nvdWEB
- rustsec.org/advisories/RUSTSEC-2025-0021.htmlghsaWEB
News mentions
0No linked articles in our index yet.