Skip to content
Snippets Groups Projects
Commit 13486a81 authored by Georges Racinet's avatar Georges Racinet
Browse files

rhgitaly::streaming::stream_with_pagination

This generic helper in `rhgitaly::streaming` is for the case
of streamed responses with `repeated` items, the whole stream
being the current page. In all cases known to us, GitLab derives
some information from the last item of the last chunk to be
the `next_cursor`, hence we introduce a trait for the item
to represent that, and use it in the implementation.

The first use-case will be the implementation of
`CommitService.GetTreeEntries`, but this also makes
the likes of `RefService.FindLocalBranches` essentially trivial.
parent d124dd52
No related branches found
No related tags found
2 merge requests!189Merged stable branch into default and bumped VERSION,!188RHGitaly CommitService.GetTreeEntries implementation
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
// GNU General Public License version 2 or any later version. // GNU General Public License version 2 or any later version.
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
use std::env; use std::env;
use std::fmt::Debug;
use std::str::FromStr; use std::str::FromStr;
use lazy_static::lazy_static; use lazy_static::lazy_static;
...@@ -12,6 +13,7 @@ ...@@ -12,6 +13,7 @@
use tonic::{Response, Status}; use tonic::{Response, Status};
use tracing::error; use tracing::error;
use crate::gitaly::{PaginationCursor, PaginationParameter};
const DEFAULT_WRITE_BUFFER_SIZE: usize = 131072; const DEFAULT_WRITE_BUFFER_SIZE: usize = 131072;
lazy_static! { lazy_static! {
...@@ -155,3 +157,151 @@ ...@@ -155,3 +157,151 @@
} }
} }
} }
/// This trait is for elements of responses which rely on the Pagination protocol
///
/// Typically, the response will include a repeated field, and the traait is to be
/// implemented on inividual items of that field.
///
/// For an example, see [`crate::gitaly::GetTreeEntriesResponse`]
pub trait PaginableMessageItem: Debug {
/// Provide the `next_cursor` field of `PaginationCursor`
///
/// This is the value to tell clients that the next page of results would
/// start after the current item, which is typically the last one returned
/// by the current call
fn next_cursor(&self) -> String;
/// Tell whether this item is the one bearing the givn token.
///
/// This means typically that the wanted results start right afterwards.
fn match_token(&self, token: &str) -> bool;
}
/// Stream responses by grouping them in chunks and abiding to pagination parameters.
///
/// This is meant to take into account incoming [`PaginationParameter`] and provide in turn
/// the appropriate responses so that the client can request the next page. These together
/// represent an effort in the Gitaly protocol to standardize a client-driven second level of
/// batching on top of the server-driven streaming responses.
///
/// This function takes severak closure arguments:
///
/// - `resp_builder` is a closure responsible to build a response from collected items and a
/// [`PaginationCursor`] message.
/// - `err_handler` is the generic transformation from the errors that `iterator` may yield.
/// - `token_not_found` provides the error [`Status`] in case no item matches
/// `pagination.page_token`. It is expected to differ among gRPC methods and possibly need
/// some contextual information.
pub fn stream_with_pagination<Iter, Resp, Item, E>(
tx: &BlockingResponseSender<Resp>,
pagination: &Option<PaginationParameter>,
mut iterator: Iter,
resp_builder: impl FnOnce(Vec<Item>, Option<PaginationCursor>) -> Resp + Copy,
err_handler: impl FnOnce(E) -> Status + Copy,
token_not_found: impl FnOnce(&str) -> Status + Copy,
) where
Iter: Iterator<Item = Result<Item, E>>,
Item: PaginableMessageItem,
{
match pagination {
None => {
// no pagination param: in particular, no limit
stream_chunks_with_cursor(tx, iterator, "", resp_builder, err_handler);
}
Some(pagination) => {
let token = &pagination.page_token;
if !token.is_empty() {
let mut found = false;
for res in iterator.by_ref() {
match res {
Ok(item) => {
if item.match_token(token) {
found = true;
break;
}
}
Err(e) => {
tx.send(Err(err_handler(e)));
return;
}
}
}
if !found {
tx.send(Err(token_not_found(token)));
return;
}
}
if pagination.limit < 0 {
return stream_chunks_with_cursor(tx, iterator, "", resp_builder, err_handler);
}
// No other choice than collecting to truncate and derive `next_cursor`
let limit = pagination.limit as usize;
let mut limited: Vec<Result<Item, E>> = Vec::new();
for (i, item) in iterator.enumerate() {
if i >= limit {
break;
}
limited.push(item);
}
match limited.last() {
None => {}
// In case it is an error, we need to take ownership, since `err_handler` does
// not work on references. We cannot clone because some of the errors we have to
// deal with do not implement `Clone` (e.g., [`HgError`] does not)
Some(Err(_e)) => tx.send(Err(err_handler(
limited
.pop()
.expect("Last element already known to exist and to be an error")
.unwrap_err(),
))),
Some(Ok(ref item)) => {
// We need to take a ref for next_cursor, so that the closure used in the
// underlying `stream_chunks()` is `Copy`, with leads us to clone in actual
// `PaginationCursor` instantiation (this is bearable).
// We also need to take the ref right now, to make the borrow checker accept its
// use in the call below.
let next_cursor = &item.next_cursor();
stream_chunks_with_cursor(
tx,
limited.into_iter(),
next_cursor,
resp_builder,
err_handler,
);
}
}
}
}
}
/// Shortcut to reduce code dupliaction in `stream_chunks_with_pagination`
fn stream_chunks_with_cursor<Iter, Resp, Item, E>(
tx: &BlockingResponseSender<Resp>,
iter: Iter,
next_cursor: &str,
resp_builder: impl FnOnce(Vec<Item>, Option<PaginationCursor>) -> Resp + Copy,
err_handler: impl FnOnce(E) -> Status + Copy,
) where
Iter: Iterator<Item = Result<Item, E>>,
{
stream_chunks(
tx,
iter,
|chunk, first| {
resp_builder(
chunk,
if first {
Some(PaginationCursor {
next_cursor: next_cursor.to_string(),
})
} else {
None
},
)
},
err_handler,
);
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment