Skip to content

Commit

Permalink
implement the one-shot serialize with backrefs in terms of the increm…
Browse files Browse the repository at this point in the history
…ental serializer
  • Loading branch information
arvidn committed Dec 26, 2024
1 parent 110d69c commit a8d86f0
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 90 deletions.
2 changes: 1 addition & 1 deletion src/serde/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ pub use de_tree::{parse_triples, ParsedTriple};
pub use incremental::{Serializer, UndoState};
pub use object_cache::{serialized_length, treehash, ObjectCache};
pub use ser::{node_to_bytes, node_to_bytes_limit};
pub use ser_br::{node_to_bytes_backrefs, node_to_bytes_backrefs_limit};
pub use ser_br::node_to_bytes_backrefs;
pub use tools::{
serialized_length_from_bytes, serialized_length_from_bytes_trusted, tree_hash_from_stream,
};
94 changes: 5 additions & 89 deletions src/serde/ser_br.rs
Original file line number Diff line number Diff line change
@@ -1,93 +1,14 @@
// Serialization with "back-references"

use std::io;
use std::io::Cursor;

use super::object_cache::{serialized_length, treehash, ObjectCache};
use super::read_cache_lookup::ReadCacheLookup;
use super::write_atom::write_atom;
use crate::allocator::{Allocator, NodePtr, SExp};
use crate::serde::ser::LimitedWriter;

const BACK_REFERENCE: u8 = 0xfe;
const CONS_BOX_MARKER: u8 = 0xff;

#[derive(PartialEq, Eq)]
enum ReadOp {
Parse,
Cons,
}

pub fn node_to_stream_backrefs<W: io::Write>(
allocator: &Allocator,
node: NodePtr,
f: &mut W,
) -> io::Result<()> {
let mut read_op_stack: Vec<ReadOp> = vec![ReadOp::Parse];
let mut write_stack: Vec<NodePtr> = vec![node];

let mut read_cache_lookup = ReadCacheLookup::new();

let mut thc = ObjectCache::new(treehash);
let mut slc = ObjectCache::new(serialized_length);

while let Some(node_to_write) = write_stack.pop() {
let op = read_op_stack.pop();
assert!(op == Some(ReadOp::Parse));

let node_serialized_length = *slc
.get_or_calculate(allocator, &node_to_write, None)
.expect("couldn't calculate serialized length");
let node_tree_hash = thc
.get_or_calculate(allocator, &node_to_write, None)
.expect("can't get treehash");
match read_cache_lookup.find_path(node_tree_hash, node_serialized_length) {
Some(path) => {
f.write_all(&[BACK_REFERENCE])?;
write_atom(f, &path)?;
read_cache_lookup.push(*node_tree_hash);
}
None => match allocator.sexp(node_to_write) {
SExp::Pair(left, right) => {
f.write_all(&[CONS_BOX_MARKER])?;
write_stack.push(right);
write_stack.push(left);
read_op_stack.push(ReadOp::Cons);
read_op_stack.push(ReadOp::Parse);
read_op_stack.push(ReadOp::Parse);
}
SExp::Atom => {
let atom = allocator.atom(node_to_write);
write_atom(f, atom.as_ref())?;
read_cache_lookup.push(*node_tree_hash);
}
},
}
while !read_op_stack.is_empty() && read_op_stack[read_op_stack.len() - 1] == ReadOp::Cons {
read_op_stack.pop();
read_cache_lookup.pop2_and_cons();
}
}
Ok(())
}

pub fn node_to_bytes_backrefs_limit(
a: &Allocator,
node: NodePtr,
limit: usize,
) -> io::Result<Vec<u8>> {
let buffer = Cursor::new(Vec::new());
let mut writer = LimitedWriter::new(buffer, limit);
node_to_stream_backrefs(a, node, &mut writer)?;
let vec = writer.into_inner().into_inner();
Ok(vec)
}
use crate::allocator::{Allocator, NodePtr};
use crate::serde::incremental::Serializer;

pub fn node_to_bytes_backrefs(a: &Allocator, node: NodePtr) -> io::Result<Vec<u8>> {
let mut buffer = Cursor::new(Vec::new());
node_to_stream_backrefs(a, node, &mut buffer)?;
let vec = buffer.into_inner();
Ok(vec)
let mut ser = Serializer::new();
ser.add(&a, node, None)?;
Ok(ser.into_inner())
}

#[cfg(test)]
Expand All @@ -107,10 +28,5 @@ mod tests {
let expected = &[255, 255, 255, 133, 1, 2, 3, 4, 5, 254, 2, 254, 2, 254, 2];

assert_eq!(node_to_bytes_backrefs(&a, l3).unwrap(), expected);
assert_eq!(node_to_bytes_backrefs_limit(&a, l3, 15).unwrap(), expected);
assert_eq!(
node_to_bytes_backrefs_limit(&a, l3, 14).unwrap_err().kind(),
io::ErrorKind::OutOfMemory
);
}
}

0 comments on commit a8d86f0

Please sign in to comment.