Skip to content
Snippets Groups Projects
index.rs 66.1 KiB
Newer Older
use std::collections::{HashMap, HashSet};
use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use byteorder::{BigEndian, ByteOrder};
use bytes_cast::{unaligned, BytesCast};
use super::{NodePrefix, RevlogError, RevlogIndex, REVIDX_KNOWN_FLAGS};
use crate::errors::HgError;
use crate::revlog::node::{
    Node, NODE_BYTES_LENGTH, NULL_NODE, STORED_NODE_ID_BYTES,
};
use crate::revlog::{Revision, NULL_REVISION};
    dagops, BaseRevision, FastHashMap, Graph, GraphError, UncheckedRevision,

pub const INDEX_ENTRY_SIZE: usize = 64;
pub const INDEX_HEADER_SIZE: usize = 4;
pub const COMPRESSION_MODE_INLINE: u8 = 2;
pub struct IndexHeader {
    pub(super) header_bytes: [u8; INDEX_HEADER_SIZE],
#[derive(Copy, Clone, Debug)]
pub struct IndexHeaderFlags {
    flags: u16,
}

/// Corresponds to the high bits of `_format_flags` in python
impl IndexHeaderFlags {
    /// Corresponds to FLAG_INLINE_DATA in python
    pub fn is_inline(self) -> bool {
        self.flags & 1 != 0
    }
    /// Corresponds to FLAG_GENERALDELTA in python
    pub fn uses_generaldelta(self) -> bool {
        self.flags & 2 != 0
    }
}

/// Corresponds to the INDEX_HEADER structure,
/// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
impl IndexHeader {
    fn format_flags(&self) -> IndexHeaderFlags {
        // No "unknown flags" check here, unlike in python. Maybe there should
        // be.
        IndexHeaderFlags {
            flags: BigEndian::read_u16(&self.header_bytes[0..2]),
    }

    /// The only revlog version currently supported by rhg.
    const REVLOGV1: u16 = 1;

    /// Corresponds to `_format_version` in Python.
    fn format_version(&self) -> u16 {
        BigEndian::read_u16(&self.header_bytes[2..4])
    pub fn parse(index_bytes: &[u8]) -> Result<IndexHeader, HgError> {
        if index_bytes.len() < 4 {
            return Err(HgError::corrupted(
                "corrupted revlog: can't read the index format header",
            ));
        }
            header_bytes: {
                let bytes: [u8; 4] =
                    index_bytes[0..4].try_into().expect("impossible");
                bytes
            },
/// Abstracts the access to the index bytes since they can be spread between
/// the immutable (bytes) part and the mutable (added) part if any appends
/// happened. This makes it transparent for the callers.
struct IndexData {
    /// Immutable bytes, most likely taken from disk
    bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>,
    /// Used when stripping index contents, keeps track of the start of the
    /// first stripped revision, which is used to give a slice of the
    /// `bytes` field.
    truncation: Option<usize>,
    /// Bytes that were added after reading the index
    added: Vec<u8>,
    first_entry: [u8; INDEX_ENTRY_SIZE],
    pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self {
        let mut first_entry = [0; INDEX_ENTRY_SIZE];
        if bytes.len() >= INDEX_ENTRY_SIZE {
            first_entry[INDEX_HEADER_SIZE..]
                .copy_from_slice(&bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE])
        }
            first_entry,
        match self.truncation {
            Some(truncation) => truncation + self.added.len(),
            None => self.bytes.len() + self.added.len(),
        }
    }

    fn remove(
        &mut self,
        rev: Revision,
        offsets: Option<&[usize]>,
    ) -> Result<(), RevlogError> {
        let rev = rev.0 as usize;
        let truncation = if let Some(offsets) = offsets {
            offsets[rev]
        } else {
            rev * INDEX_ENTRY_SIZE
        };
        if truncation < self.bytes.len() {
            self.truncation = Some(truncation);
            self.added.clear();
        } else {
            self.added.truncate(truncation - self.bytes.len());
        }
        Ok(())

    fn is_new(&self) -> bool {
        self.bytes.is_empty()
    }
}

impl std::ops::Index<std::ops::Range<usize>> for IndexData {
    type Output = [u8];

    fn index(&self, index: std::ops::Range<usize>) -> &Self::Output {
        let start = index.start;
        let end = index.end;
        let immutable_len = match self.truncation {
            Some(truncation) => truncation,
            None => self.bytes.len(),
        };
        if start < immutable_len {
            if end > immutable_len {
                panic!("index data cannot span existing and added ranges");
            }
            &self.bytes[index]
        } else {
            &self.added[start - immutable_len..end - immutable_len]
        }
    }
}

#[derive(Debug, PartialEq, Eq)]
pub struct RevisionDataParams {
    pub flags: u16,
    pub data_offset: u64,
    pub data_compressed_length: i32,
    pub data_uncompressed_length: i32,
    pub data_delta_base: i32,
    pub link_rev: i32,
    pub parent_rev_1: i32,
    pub parent_rev_2: i32,
    pub node_id: [u8; NODE_BYTES_LENGTH],
    pub _sidedata_offset: u64,
    pub _sidedata_compressed_length: i32,
    pub data_compression_mode: u8,
    pub _sidedata_compression_mode: u8,
    pub _rank: i32,
impl Default for RevisionDataParams {
    fn default() -> Self {
        Self {
            flags: 0,
            data_offset: 0,
            data_compressed_length: 0,
            data_uncompressed_length: 0,
            data_delta_base: -1,
            link_rev: -1,
            parent_rev_1: -1,
            parent_rev_2: -1,
            node_id: [0; NODE_BYTES_LENGTH],
            _sidedata_offset: 0,
            _sidedata_compressed_length: 0,
            data_compression_mode: COMPRESSION_MODE_INLINE,
            _sidedata_compression_mode: COMPRESSION_MODE_INLINE,
            _rank: -1,
        }
    }
}

#[derive(BytesCast)]
#[repr(C)]
pub struct RevisionDataV1 {
    data_offset_or_flags: unaligned::U64Be,
    data_compressed_length: unaligned::I32Be,
    data_uncompressed_length: unaligned::I32Be,
    data_delta_base: unaligned::I32Be,
    link_rev: unaligned::I32Be,
    parent_rev_1: unaligned::I32Be,
    parent_rev_2: unaligned::I32Be,
    node_id: [u8; STORED_NODE_ID_BYTES],
}

fn _static_assert_size_of_revision_data_v1() {
    let _ = std::mem::transmute::<RevisionDataV1, [u8; 64]>;
}

impl RevisionDataParams {
    pub fn validate(&self) -> Result<(), RevlogError> {
        if self.flags & !REVIDX_KNOWN_FLAGS != 0 {
            return Err(RevlogError::corrupted(format!(
                "unknown revlog index flags: {}",
                self.flags
            )));
        }
        if self.data_compression_mode != COMPRESSION_MODE_INLINE {
            return Err(RevlogError::corrupted(format!(
                "invalid data compression mode: {}",
                self.data_compression_mode
            )));
        }
        // FIXME isn't this only for v2 or changelog v2?
        if self._sidedata_compression_mode != COMPRESSION_MODE_INLINE {
            return Err(RevlogError::corrupted(format!(
                "invalid sidedata compression mode: {}",
                self._sidedata_compression_mode
            )));
        }
        Ok(())
    }

    pub fn into_v1(self) -> RevisionDataV1 {
        let data_offset_or_flags = self.data_offset << 16 | self.flags as u64;
        let mut node_id = [0; STORED_NODE_ID_BYTES];
        node_id[..NODE_BYTES_LENGTH].copy_from_slice(&self.node_id);
        RevisionDataV1 {
            data_offset_or_flags: data_offset_or_flags.into(),
            data_compressed_length: self.data_compressed_length.into(),
            data_uncompressed_length: self.data_uncompressed_length.into(),
            data_delta_base: self.data_delta_base.into(),
            link_rev: self.link_rev.into(),
            parent_rev_1: self.parent_rev_1.into(),
            parent_rev_2: self.parent_rev_2.into(),
            node_id,
        }
    }
}

    /// Offsets of starts of index blocks.
    /// Only needed when the index is interleaved with data.
    offsets: RwLock<Option<Vec<usize>>>,
    uses_generaldelta: bool,
    is_inline: bool,
    /// Cache of (head_revisions, filtered_revisions)
    ///
    /// The head revisions in this index, kept in sync. Should
    /// be accessed via the [`Self::head_revs`] method.
    /// The last filtered revisions in this index, used to make sure
    /// we haven't changed filters when returning the cached `head_revs`.
    pub(super) head_revs: RwLock<(Vec<Revision>, HashSet<Revision>)>,
impl Debug for Index {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("Index")
            .field("offsets", &self.offsets)
            .field("uses_generaldelta", &self.uses_generaldelta)
            .finish()
    }
}

impl Graph for Index {
    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
        let err = || GraphError::ParentOutOfRange(rev);
        match self.get_entry(rev) {
            Some(entry) => {
                // The C implementation checks that the parents are valid
                // before returning
                Ok([
                    self.check_revision(entry.p1()).ok_or_else(err)?,
                    self.check_revision(entry.p2()).ok_or_else(err)?,
                ])
            }
            None => Ok([NULL_REVISION, NULL_REVISION]),
        }
    }
}

/// A cache suitable for find_snapshots
///
/// Logically equivalent to a mapping whose keys are [`BaseRevision`] and
/// values sets of [`BaseRevision`]
///
/// TODO the dubious part is insisting that errors must be RevlogError
/// we would probably need to sprinkle some magic here, such as an associated
/// type that would be `Into<RevlogError>` but even that would not be
/// satisfactory, as errors potentially have nothing to do with the revlog.
pub trait SnapshotsCache {
    fn insert_for(
        &mut self,
        rev: BaseRevision,
        value: BaseRevision,
    ) -> Result<(), RevlogError>;
}

impl SnapshotsCache for FastHashMap<BaseRevision, HashSet<BaseRevision>> {
    fn insert_for(
        &mut self,
        rev: BaseRevision,
        value: BaseRevision,
    ) -> Result<(), RevlogError> {
        let all_values = self.entry(rev).or_default();
        all_values.insert(value);
        Ok(())
    }
}

    /// Create an index from bytes.
    /// Calculate the start of each entry when is_inline is true.
        bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>,
        let header = if bytes.len() < INDEX_ENTRY_SIZE {
            default_header
        } else {
            IndexHeader::parse(bytes.as_ref())?
        };

        if header.format_version() != IndexHeader::REVLOGV1 {
            // A proper new version should have had a repo/store
            // requirement.
            return Err(HgError::corrupted("unsupported revlog version"));
        }

        let uses_generaldelta = header.format_flags().uses_generaldelta();

        if header.format_flags().is_inline() {
            let mut offset: usize = 0;
            let mut offsets = Vec::new();

            while offset + INDEX_ENTRY_SIZE <= bytes.len() {
                offsets.push(offset);
                let end = offset + INDEX_ENTRY_SIZE;
                let entry = IndexEntry {
                    bytes: &bytes[offset..end],
                };

                offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
            if offset == bytes.len() {
                Ok(Self {
                    offsets: RwLock::new(Some(offsets)),
                    is_inline: true,
                    head_revs: RwLock::new((vec![], HashSet::new())),
                Err(HgError::corrupted("unexpected inline revlog length"))
                offsets: RwLock::new(None),
                is_inline: false,
                head_revs: RwLock::new((vec![], HashSet::new())),
    pub fn uses_generaldelta(&self) -> bool {
        self.uses_generaldelta
    }

    /// Value of the inline flag.
    pub fn is_inline(&self) -> bool {
        self.is_inline
    }

    /// Return a slice of bytes if `revlog` is inline. Panic if not.
    pub fn data(&self, start: usize, end: usize) -> &[u8] {
        if !self.is_inline() {
            panic!("tried to access data in the index of a revlog that is not inline");
        }
        &self.bytes[start..end]
    }

    /// Return number of entries of the revlog index.
    pub fn len(&self) -> usize {
        if self.is_inline() {
            (*self.get_offsets())
                .as_ref()
                .expect("inline should have offsets")
                .len()
        } else {
            self.bytes.len() / INDEX_ENTRY_SIZE
        }
    }

    /// Same as `rev_from_node`, without using a persistent nodemap
    ///
    /// This is used as fallback when a persistent nodemap is not present.
    /// This happens when the persistent-nodemap experimental feature is not
    /// enabled, or for small revlogs.
    pub fn rev_from_node_no_persistent_nodemap(
        &self,
        node: NodePrefix,
    ) -> Result<Revision, RevlogError> {
        // Linear scan of the revlog
        // TODO: consider building a non-persistent nodemap in memory to
        // optimize these cases.
        let mut found_by_prefix = None;
        for rev in (-1..self.len() as BaseRevision).rev() {
            let rev = Revision(rev as BaseRevision);
            let candidate_node = if rev == Revision(-1) {
                NULL_NODE
            } else {
                let index_entry = self.get_entry(rev).ok_or_else(|| {
                    HgError::corrupted(
                        "revlog references a revision not in the index",
                    )
                })?;
                *index_entry.hash()
            };
            if node == candidate_node {
                return Ok(rev);
            }
            if node.is_prefix_of(&candidate_node) {
                if found_by_prefix.is_some() {
                    return Err(RevlogError::AmbiguousPrefix(format!(
                        "{:x}",
                        node
                    )));
                }
                found_by_prefix = Some(rev)
            }
        }
        found_by_prefix
            .ok_or_else(|| RevlogError::InvalidRevision(format!("{:x}", node)))
    }

    pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> {
        assert!(self.is_inline());
        {
            // Wrap in a block to drop the read guard
            let mut offsets = self.offsets.write().unwrap();
            if offsets.is_none() {
                offsets.replace(inline_scan(&self.bytes.bytes).1);
            }
        }
        self.offsets.read().unwrap()
    }

    pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> {
        assert!(self.is_inline());
        let mut offsets = self.offsets.write().unwrap();
            offsets.replace(inline_scan(&self.bytes.bytes).1);
        }
        offsets
    }

    /// Returns `true` if the `Index` has zero `entries`.
    pub fn is_empty(&self) -> bool {
        self.len() == 0
    }

    /// Return the index entry corresponding to the given revision or `None`
    /// for [`NULL_REVISION`]
    ///
    /// The specified revision being of the checked type, it always exists
    /// if it was validated by this index.
    pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
        if rev == NULL_REVISION {
            return None;
        }
        if rev.0 == 0 {
            Some(IndexEntry {
                bytes: &self.bytes.first_entry[..],
            })
            Some(if self.is_inline() {
                self.get_entry_inline(rev)
            } else {
                self.get_entry_separated(rev)
            })
        }
    /// Return the binary content of the index entry for the given revision
    ///
    /// See [`Self::get_entry`] for cases when `None` is returned.
    pub fn entry_binary(&self, rev: Revision) -> Option<&[u8]> {
        self.get_entry(rev).map(|e| {
            let bytes = e.as_bytes();
            if rev.0 == 0 {
                &bytes[4..]
            } else {
                bytes
            }
        })
    }

    pub fn entry_as_params(
        &self,
        rev: UncheckedRevision,
    ) -> Option<RevisionDataParams> {
        let rev = self.check_revision(rev)?;
        self.get_entry(rev).map(|e| RevisionDataParams {
            flags: e.flags(),
            data_offset: if rev.0 == 0 && !self.bytes.is_new() {
                e.flags() as u64
            } else {
                e.raw_offset()
            },
            data_compressed_length: e
                .compressed_len()
                .try_into()
                .unwrap_or_else(|_| {
                    // Python's `unionrepo` sets the compressed length to
                    // be `-1` (or `u32::MAX` if
                    // transmuted to `u32`) because it
                    // cannot know the correct compressed length of a given
                    // revision. I'm not sure if this is true, but having
                    // this edge case won't hurt
                    // other use cases, let's handle it.
                    assert_eq!(e.compressed_len(), u32::MAX);
                    NULL_REVISION.0
                }),
            data_uncompressed_length: e.uncompressed_len(),
            data_delta_base: e.base_revision_or_base_of_delta_chain().0,
            link_rev: e.link_revision().0,
            parent_rev_1: e.p1().0,
            parent_rev_2: e.p2().0,
            node_id: e.hash().as_bytes().try_into().unwrap(),
            ..Default::default()
        })
    }

    fn get_entry_inline(&self, rev: Revision) -> IndexEntry {
        let offsets = &self.get_offsets();
        let offsets = offsets.as_ref().expect("inline should have offsets");
        let start = offsets[rev.0 as usize];
        let end = start + INDEX_ENTRY_SIZE;
        let bytes = &self.bytes[start..end];

        IndexEntry { bytes }
    fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
        let start = rev.0 as usize * INDEX_ENTRY_SIZE;
        let end = start + INDEX_ENTRY_SIZE;
        let bytes = &self.bytes[start..end];

        IndexEntry { bytes }
    fn null_entry(&self) -> IndexEntry {
        IndexEntry {
            bytes: &[0; INDEX_ENTRY_SIZE],
        }
    }

    /// Return the head revisions of this index
    pub fn head_revs(&self) -> Result<Vec<Revision>, GraphError> {
        self.head_revs_advanced(&HashSet::new(), None, false)
    /// Return the head revisions of this index
        py_shortcut: bool,
    ) -> Result<Option<Vec<Revision>>, GraphError> {
        {
            let guard = self
                .head_revs
                .read()
                .expect("RwLock on Index.head_revs should not be poisoned");
            let self_head_revs = &guard.0;
            let self_filtered_revs = &guard.1;
            if !self_head_revs.is_empty()
                && filtered_revs == self_filtered_revs
            {
                if py_shortcut {
                    // Don't copy the revs since we've already cached them
                    // on the Python side.
                    return Ok(None);
                } else {
                    return Ok(Some(self_head_revs.to_owned()));
                }
            }
        }

        let (as_vec, cachable) = if self.is_empty() {
            (vec![NULL_REVISION], true)
            let length: usize = match stop_rev {
                Some(r) => r.0 as usize,
                None => self.len(),
            };
            let cachable = self.len() == length;
            let mut not_heads = bitvec![0; length];
            dagops::retain_heads_fast(
                self,
                not_heads.as_mut_bitslice(),
                filtered_revs,
            )?;
            (
                not_heads
                    .into_iter()
                    .enumerate()
                    .filter_map(|(idx, is_not_head)| {
                        if is_not_head {
                            None
                        } else {
                            Some(Revision(idx as BaseRevision))
                        }
                    })
                    .collect(),
                cachable,
            )
        if cachable {
            *self
                .head_revs
                .write()
                .expect("RwLock on Index.head_revs should not be poisoned") =
                (as_vec.to_owned(), filtered_revs.to_owned());
        }
    /// Python-specific shortcut to save on PyList creation
    pub fn head_revs_shortcut(
        &self,
    ) -> Result<Option<Vec<Revision>>, GraphError> {
        self.head_revs_advanced(&HashSet::new(), None, true)
    /// Return the heads removed and added by advancing from `begin` to `end`.
    /// In revset language, we compute:
    /// - `heads(:begin)-heads(:end)`
    /// - `heads(:end)-heads(:begin)`
    pub fn head_revs_diff(
        &self,
        begin: Revision,
        end: Revision,
    ) -> Result<(Vec<Revision>, Vec<Revision>), GraphError> {
        let mut heads_added = vec![];
        let mut heads_removed = vec![];

        let mut acc = HashSet::new();
        let Revision(begin) = begin;
        let Revision(end) = end;
        let mut i = end;

        while i > begin {
            // acc invariant:
            // `j` is in the set iff `j <= i` and it has children
            // among `i+1..end` (inclusive)
            if !acc.remove(&i) {
                heads_added.push(Revision(i));
            }
            for Revision(parent) in self.parents(Revision(i))? {
                acc.insert(parent);
            }
            i -= 1;
        }

        // At this point `acc` contains old revisions that gained new children.
        // We need to check if they had any children before. If not, those
        // revisions are the removed heads.
        while !acc.is_empty() {
            // acc invariant:
            // `j` is in the set iff `j <= i` and it has children
            // among `begin+1..end`, but not among `i+1..begin` (inclusive)

            assert!(i >= -1); // yes, `-1` can also be a head if the repo is empty
            if acc.remove(&i) {
                heads_removed.push(Revision(i));
            }
            for Revision(parent) in self.parents(Revision(i))? {
                acc.remove(&parent);
            }
            i -= 1;
        }

        Ok((heads_removed, heads_added))
    }

    /// Obtain the delta chain for a revision.
    ///
    /// `stop_rev` specifies a revision to stop at. If not specified, we
    /// stop at the base of the chain.
    ///
    /// Returns a 2-tuple of (chain, stopped) where `chain` is a vec of
    /// revs in ascending order and `stopped` is a bool indicating whether
    /// `stoprev` was hit.
    pub fn delta_chain(
        &self,
        rev: Revision,
        stop_rev: Option<Revision>,
    ) -> Result<(Vec<Revision>, bool), HgError> {
        let mut current_rev = rev;
        let mut entry = self.get_entry(rev).unwrap();
        let mut chain = vec![];
        let using_general_delta = self.uses_generaldelta();
        while current_rev.0 != entry.base_revision_or_base_of_delta_chain().0
            && stop_rev.map(|r| r != current_rev).unwrap_or(true)
        {
            chain.push(current_rev);
            let new_rev = if using_general_delta {
                entry.base_revision_or_base_of_delta_chain()
            } else {
                UncheckedRevision(current_rev.0 - 1)
            };
            current_rev = self.check_revision(new_rev).ok_or_else(|| {
                HgError::corrupted(format!("Revision {new_rev} out of range"))
            })?;
            if current_rev.0 == NULL_REVISION.0 {
                break;
            }
            entry = self.get_entry(current_rev).unwrap()
        }

        let stopped = if stop_rev.map(|r| current_rev == r).unwrap_or(false) {
            true
        } else {
            chain.push(current_rev);
            false
        };
        chain.reverse();
        Ok((chain, stopped))
    }

    pub fn find_snapshots(
        &self,
        start_rev: UncheckedRevision,
        end_rev: UncheckedRevision,
        cache: &mut impl SnapshotsCache,
    ) -> Result<(), RevlogError> {
        let mut start_rev = start_rev.0;
        let mut end_rev = end_rev.0;
        end_rev += 1;
        let len = self.len().try_into().unwrap();
        if end_rev > len {
            end_rev = len;
        }
        if start_rev < 0 {
            start_rev = 0;
        }
        for rev in start_rev..end_rev {
            if !self.is_snapshot_unchecked(Revision(rev))? {
                continue;
            }
            let mut base = self
                .get_entry(Revision(rev))
                .unwrap()
                .base_revision_or_base_of_delta_chain();
            if base.0 == rev {
                base = NULL_REVISION.into();
            }
            cache.insert_for(base.0, rev)?;
        }
        Ok(())
    }

    fn clear_head_revs(&self) {
        self.head_revs
            .write()
            .expect("RwLock on Index.head_revs should not be poisoined")
            .0
            .clear()
    }

    /// TODO move this to the trait probably, along with other things
    pub fn append(
        &mut self,
        revision_data: RevisionDataParams,
    ) -> Result<(), RevlogError> {
        revision_data.validate()?;
        let entry_v1 = revision_data.into_v1();
        let entry_bytes = entry_v1.as_bytes();
        if self.bytes.len() == 0 {
            self.bytes.first_entry[INDEX_HEADER_SIZE..].copy_from_slice(
                &entry_bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE],
            )
        }
        if self.is_inline() {
            let new_offset = self.bytes.len();
            if let Some(offsets) = &mut *self.get_offsets_mut() {
                offsets.push(new_offset)
            }
        self.bytes.added.extend(entry_bytes);
    pub fn pack_header(&self, header: i32) -> [u8; 4] {
        header.to_be_bytes()
    }

    pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> {
        let offsets = if self.is_inline() {
            self.get_offsets().clone()
        } else {
            None
        };
        self.bytes.remove(rev, offsets.as_deref())?;
        if self.is_inline() {
            if let Some(offsets) = &mut *self.get_offsets_mut() {
                offsets.truncate(rev.0 as usize)
            }
        // We need to get the 'inline' value from Python at init and use this
        // instead of offsets to determine whether we're inline since we might
        // clear caches. This implies re-populating the offsets on-demand.
        *self
            .offsets
            .write()
            .expect("RwLock on Index.offsets should not be poisoed") = None;
        self.clear_head_revs();

    /// Unchecked version of `is_snapshot`.
    /// Assumes the caller checked that `rev` is within a valid revision range.
    pub fn is_snapshot_unchecked(
        &self,
        mut rev: Revision,
    ) -> Result<bool, RevlogError> {
        while rev.0 >= 0 {
            let entry = self.get_entry(rev).unwrap();
            let mut base = entry.base_revision_or_base_of_delta_chain().0;
            if base == rev.0 {
                base = NULL_REVISION.0;
            }
            if base == NULL_REVISION.0 {
                return Ok(true);
            }
            let [mut p1, mut p2] = self
                .parents(rev)
                .map_err(|e| RevlogError::InvalidRevision(e.to_string()))?;
            while let Some(p1_entry) = self.get_entry(p1) {
                if p1_entry.compressed_len() != 0 || p1.0 == 0 {
                    break;
                }
                let parent_base =
                    p1_entry.base_revision_or_base_of_delta_chain();
                if parent_base.0 == p1.0 {
                    break;
                }
                p1 = self.check_revision(parent_base).ok_or_else(|| {
                    RevlogError::InvalidRevision(parent_base.to_string())
                })?;
            }
            while let Some(p2_entry) = self.get_entry(p2) {
                if p2_entry.compressed_len() != 0 || p2.0 == 0 {
                    break;
                }
                let parent_base =
                    p2_entry.base_revision_or_base_of_delta_chain();
                if parent_base.0 == p2.0 {
                    break;
                }
                p2 = self.check_revision(parent_base).ok_or_else(|| {
                    RevlogError::InvalidRevision(parent_base.to_string())
                })?;
            }
            if base == p1.0 || base == p2.0 {
                return Ok(false);
            }
            rev = self.check_revision(base.into()).ok_or_else(|| {
                RevlogError::InvalidRevision(base.to_string())
            })?;
        }
        Ok(rev == NULL_REVISION)
    }

    /// Return whether the given revision is a snapshot. Returns an error if
    /// `rev` is not within a valid revision range.
    pub fn is_snapshot(
        &self,
        rev: UncheckedRevision,
    ) -> Result<bool, RevlogError> {
        let rev = self
            .check_revision(rev)
            .ok_or_else(|| RevlogError::corrupted("test"))?;
        self.is_snapshot_unchecked(rev)
    }

    /// Slice revs to reduce the amount of unrelated data to be read from disk.
    ///
    /// The index is sliced into groups that should be read in one time.
    ///
    /// The initial chunk is sliced until the overall density
    /// (payload/chunks-span ratio) is above `target_density`.
    /// No gap smaller than `min_gap_size` is skipped.
    pub fn slice_chunk_to_density(
        &self,
        revs: &[Revision],
        target_density: f64,
        min_gap_size: usize,
    ) -> Vec<Vec<Revision>> {
        if revs.is_empty() {
            return vec![];
        }
        if revs.len() == 1 {
            return vec![revs.to_owned()];
        }
        let delta_chain_span = self.segment_span(revs);
        if delta_chain_span < min_gap_size {
            return vec![revs.to_owned()];
        }
        let entries: Vec<_> = revs
            .iter()
            .map(|r| {
                (*r, self.get_entry(*r).unwrap_or_else(|| self.null_entry()))
            })
            .collect();

        let mut read_data = delta_chain_span;
        let chain_payload: u32 =
            entries.iter().map(|(_r, e)| e.compressed_len()).sum();
        let mut density = if delta_chain_span > 0 {
            chain_payload as f64 / delta_chain_span as f64
        } else {
            1.0
        };

        if density >= target_density {
            return vec![revs.to_owned()];
        }

        // Store the gaps in a heap to have them sorted by decreasing size
        let mut gaps = Vec::new();
        let mut previous_end = None;

        for (i, (rev, entry)) in entries.iter().enumerate() {
            let start = self.start(*rev, entry);
            let length = entry.compressed_len();

            // Skip empty revisions to form larger holes
            if length == 0 {
                continue;
            }

            if let Some(end) = previous_end {
                let gap_size = start - end;
                // Only consider holes that are large enough
                if gap_size > min_gap_size {
                    gaps.push((gap_size, i));
                }
            }
            previous_end = Some(start + length as usize);
        }
        if gaps.is_empty() {
            return vec![revs.to_owned()];
        }
        // sort the gaps to pop them from largest to small
        gaps.sort_unstable();

        // Collect the indices of the largest holes until
        // the density is acceptable
        let mut selected = vec![];
        while let Some((gap_size, gap_id)) = gaps.pop() {
            if density >= target_density {
                break;
            }
            selected.push(gap_id);