Skip to content

Commit

Permalink
Make docs Rust-style
Browse files Browse the repository at this point in the history
  • Loading branch information
slarse committed Nov 11, 2023
1 parent 8038742 commit a862193
Show file tree
Hide file tree
Showing 9 changed files with 117 additions and 183 deletions.
88 changes: 39 additions & 49 deletions src/diff.rs
Original file line number Diff line number Diff line change
Expand Up @@ -394,21 +394,19 @@ fn drain_context_into_chunk<'b, 'a: 'b, S: Eq>(
chunk_content.extend(context.drain(..).skip(context_to_skip));
}

/**
* Computes a diff between two arbitrary sequences. The typical thing to use would be two lists of
* strings, where each element represents a line.
*
* ```
* use rut::diff;
*
* let a = "First line\nSecond line\nThird line".split('\n').collect::<Vec<&str>>();
* let b = "Second line\nThird line\nFourth line".split('\n').collect::<Vec<&str>>();
*
* let diff = diff::diff(&a, &b);
*
* assert_eq!(diff, "-First line\n Second line\n Third line\n+Fourth line\n");
* ```
*/
/// Computes a diff between two arbitrary sequences. The typical thing to use would be two lists of
/// strings, where each element represents a line.
///
/// ```
/// use rut::diff;
///
/// let a = "First line\nSecond line\nThird line".split('\n').collect::<Vec<&str>>();
/// let b = "Second line\nThird line\nFourth line".split('\n').collect::<Vec<&str>>();
///
/// let diff = diff::diff(&a, &b);
///
/// assert_eq!(diff, "-First line\n Second line\n Third line\n+Fourth line\n");
/// ```
pub fn diff<S: Eq + Copy + Display>(a: &[S], b: &[S]) -> String {
let edit_script = edit_script(a, b);
let mut result = String::new();
Expand All @@ -430,30 +428,28 @@ pub fn diff<S: Eq + Copy + Display>(a: &[S], b: &[S]) -> String {
result
}

/**
* Computes an edit script between two arbitrary sequences.
*
* Example:
* ```
* use rut::diff;
* use rut::diff::{Edit, EditKind};
*
* let a = "ABC".chars().collect::<Vec<char>>();
* let b = "BBD".chars().collect::<Vec<char>>();
*
* let expected_edits = vec![
* Edit::deletion('A', 0),
* Edit::equal('B', 1, 0),
* Edit::deletion('C', 2),
* Edit::addition('B', 1),
* Edit::addition('D', 2),
* ];
*
* let edit_script = diff::edit_script(&a, &b);
*
* assert_eq!(edit_script, expected_edits);
* ```
*/
/// Computes an edit script between two arbitrary sequences.
///
/// Example:
/// ```
/// use rut::diff;
/// use rut::diff::{Edit, EditKind};
///
/// let a = "ABC".chars().collect::<Vec<char>>();
/// let b = "BBD".chars().collect::<Vec<char>>();
///
/// let expected_edits = vec![
/// Edit::deletion('A', 0),
/// Edit::equal('B', 1, 0),
/// Edit::deletion('C', 2),
/// Edit::addition('B', 1),
/// Edit::addition('D', 2),
/// ];
///
/// let edit_script = diff::edit_script(&a, &b);
///
/// assert_eq!(edit_script, expected_edits);
/// ```
pub fn edit_script<S: Eq + Copy>(a: &[S], b: &[S]) -> Vec<Edit<S>> {
let (final_k_value, edit_path_graph) = compute_edit_path_graph(a, b);
let reversed_edit_trace = trace_edit_points(final_k_value, edit_path_graph);
Expand Down Expand Up @@ -577,10 +573,8 @@ fn trace_edit_points(final_k: i32, trace: Vec<Vec<usize>>) -> Vec<(i32, i32)> {
edit_points
}

/**
* Compute the previous k-value in the edit path graph. This function is optimized for
* understandability rather than performance, it can easily be compressed into a single condition.
*/
/// Compute the previous k-value in the edit path graph. This function is optimized for
/// understandability rather than performance, it can easily be compressed into a single condition.
fn compute_previous_k(k: i32, d: i32, v: &[usize]) -> i32 {
if k == -d {
// the previous move must have been from a larger k as abs(k) <= d
Expand Down Expand Up @@ -631,17 +625,13 @@ fn compute_edit_script<S: Eq + Copy>(
edits
}

/**
* Get a value from the vector with support for negative indexing.
*/
/// Get a value from the vector with support for negative indexing.
fn get<S>(iterable: &[S], index: i32) -> &S {
let adjusted_index = adjust_index(iterable, index);
iterable.get(adjusted_index).unwrap()
}

/**
* Set a value in the vector with support for negative indexing.
*/
/// Set a value in the vector with support for negative indexing.
fn set<S>(iterable: &mut [S], index: i32, value: S) {
let adjusted_index = adjust_index(iterable, index);
iterable[adjusted_index] = value
Expand Down
42 changes: 16 additions & 26 deletions src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,8 @@ pub fn read_file<P: AsRef<Path>>(path: P) -> io::Result<Vec<u8>> {
Ok(bytes)
}

/**
* Atomically write to a file by first writing to a temporary file and then renaming it to the
* target file.
*/
/// Atomically write to a file by first writing to a temporary file and then renaming it to the
/// target file.
pub fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
let mut buffer_file = PathBuf::from(path);
let buffer_file_extension = format!(
Expand All @@ -35,23 +33,19 @@ pub fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
fs::rename(&buffer_file, path)
}

/**
* Create a new file and write the content to it. Fail if the file already exists.
*/
/// Create a new file and write the content to it. Fail if the file already exists.
pub fn create_file(path: &Path, content: &[u8]) -> io::Result<()> {
let mut file = OpenOptions::new().create_new(true).write(true).open(path)?;
file.write_all(content)
}

/**
* Struct that enables synchronized atomic writing to files. On acquiring with a lock with
* [`LockFile::acquire`] an empty lockfile is created in the file system. You can then use
* [`LockFile::write`] to write content to the lockfile.
*
* When the [`LockFile`] goes out of scope, the lockfile itself is renamed to the target file for
* which the lock was acquired. Renames are atomic operations, so there is no risk that someone
* reading the file without acquiring the lock gets a partially written result.
*/
/// Struct that enables synchronized atomic writing to files. On acquiring with a lock with
/// [`LockFile::acquire`] an empty lockfile is created in the file system. You can then use
/// [`LockFile::write`] to write content to the lockfile.
///
/// When the [`LockFile`] goes out of scope, the lockfile itself is renamed to the target file for
/// which the lock was acquired. Renames are atomic operations, so there is no risk that someone
/// reading the file without acquiring the lock gets a partially written result.
pub struct LockFile {
path: PathBuf,
lockfile: File,
Expand Down Expand Up @@ -195,12 +189,10 @@ pub trait AsVec<T> {
fn as_vec(&self) -> Vec<T>;
}

/**
* A resource backed by a lockfile. The final write is atomically transferred to the original file
* when this struct is destroyed.
*
* Do note that any intermediate writes are simply discarded.
*/
/// A resource backed by a lockfile. The final write is atomically transferred to the original file
/// when this struct is destroyed.
///
/// Do note that any intermediate writes are simply discarded.
pub struct LockFileResource<T: AsVec<u8>> {
lockfile: LockFile,
resource: T,
Expand All @@ -211,10 +203,8 @@ impl<T: AsVec<u8>> LockFileResource<T> {
LockFileResource { lockfile, resource }
}

/**
* Write the resource to the lockfile. The final write to the lockfile are committed to the
* original resource once this struct is destroyed.
*/
/// Write the resource to the lockfile. The final write to the lockfile are committed to the
/// original resource once this struct is destroyed.
pub fn write(&mut self) -> io::Result<()> {
self.lockfile.write(&self.resource.as_vec())
}
Expand Down
22 changes: 8 additions & 14 deletions src/index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,14 +175,12 @@ impl Index {
}
}

/**
* We need to discard any new entries that conflict with existing ones. For example, given an
* existing entry `file.txt`, adding a new entry for `file.txt/nested.txt` (i.e. there's now a
* directory called `file.txt` with a file `nested.txt` in it), we need to remove `file.txt`.
*
* Similarly, given an existing entry `nested/dir/file.txt` and adding an entry `nested`, we
* expect `nested/dir/file.txt` to be removed from the index.
*/
/// We need to discard any new entries that conflict with existing ones. For example, given an
/// existing entry `file.txt`, adding a new entry for `file.txt/nested.txt` (i.e. there's now a
/// directory called `file.txt` with a file `nested.txt` in it), we need to remove `file.txt`.
///
/// Similarly, given an existing entry `nested/dir/file.txt` and adding an entry `nested`, we
/// expect `nested/dir/file.txt` to be removed from the index.
fn discard_conflicting_entries<P: AsRef<Path>>(&mut self, path: P) {
self.remove_directory(&path);
for parent in path.as_ref().ancestors() {
Expand All @@ -199,16 +197,12 @@ impl Index {
}
}

/**
* Check whether a path exists as an entry in the index.
*/
/// Check whether a path exists as an entry in the index.
pub fn has_entry<P: AsRef<Path>>(&self, path: P) -> bool {
self.entries.contains_key(path.as_ref())
}

/**
* Check whether a path is a tracked directory.
*/
/// Check whether a path is a tracked directory.
pub fn is_tracked_directory<P: AsRef<Path>>(&self, path: P) -> bool {
self.directories.contains_key(path.as_ref())
}
Expand Down
12 changes: 3 additions & 9 deletions src/object_resolver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,7 @@ impl<'a> ObjectResolver<'a> {
Ok(ObjectResolver::new(root_tree, &repository.database))
}

/**
* Find a blob by its path, relative to the root tree of this ObjectResolver.
*/
/// Find a blob by its path, relative to the root tree of this ObjectResolver.
pub fn find_blob_by_path(&mut self, path: &Path) -> io::Result<Blob> {
if let Some(blob) = self.blobs.get(path) {
return Ok(blob.clone());
Expand Down Expand Up @@ -73,9 +71,7 @@ impl<'a> ObjectResolver<'a> {
self.find_blob_in_subtree(parent_path, remaining_path)
}

/**
* Recursively find a blob in a subtree. Cache any trees found along the way.
*/
/// Recursively find a blob in a subtree. Cache any trees found along the way.
fn find_blob_in_subtree(
&mut self,
parent_path: &Path,
Expand Down Expand Up @@ -104,9 +100,7 @@ impl<'a> ObjectResolver<'a> {
self.find_blob_in_tree_(&current_path, &curent_remaining_path)
}

/**
* Get a blob assuming its parent tree is already cached.
*/
/// Get a blob assuming its parent tree is already cached.
fn get_blob(&mut self, blob_path: &Path) -> io::Result<Blob> {
let file_name = blob_path.file_name().unwrap().to_str().unwrap();
let tree = &self.trees[blob_path.parent().unwrap()];
Expand Down
72 changes: 32 additions & 40 deletions src/objects.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,62 +25,54 @@ pub trait GitObject<'a> {
fn to_object_format(&self) -> Vec<u8>;
}

/**
* A Git object id is the sha1 hash of the object's content, which is represented as a 40 byte
* hexadecimal string. This struct encapsulates this concept and provides some utility methods
* related to common operations on object ids, such as finding out the filepath in the object
* database.
*/
/// A Git object id is the sha1 hash of the object's content, which is represented as a 40 byte
/// hexadecimal string. This struct encapsulates this concept and provides some utility methods
/// related to common operations on object ids, such as finding out the filepath in the object
/// database.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct ObjectId {
bytes: Vec<u8>,
}

impl ObjectId {
/**
* Turn a hexadecimal string into an ObjectId. This is the inverse of to_string().
*
* # Examples
* ```
* use rut::objects::ObjectId;
*
* let id = ObjectId::from_sha("a94a8fe5ccb19ba61c4c0873d391e987982fbbd3").unwrap();
* assert_eq!(id.to_string(), "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3");
* ```
*/
/// Turn a hexadecimal string into an ObjectId. This is the inverse of to_string().
///
/// # Examples
/// ```
/// use rut::objects::ObjectId;
///
/// let id = ObjectId::from_sha("a94a8fe5ccb19ba61c4c0873d391e987982fbbd3").unwrap();
/// assert_eq!(id.to_string(), "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3");
/// ```
pub fn from_sha(s: &str) -> Result<ObjectId, String> {
let bytes = hex::from_hex_string(s).map_err(|e| e.to_string())?;
Self::from_sha_bytes(&bytes)
}

/**
* Turn a string that is the utf8 encoded version of a sha1 hash into an ObjectId.
*
* # Examples
* ```
* use rut::objects::ObjectId;
*
* let bytes = "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3".as_bytes();
* let id = ObjectId::from_utf8_encoded_sha(bytes).unwrap();
* assert_eq!(id.to_string(), "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3");
* ```
*/
/// Turn a string that is the utf8 encoded version of a sha1 hash into an ObjectId.
///
/// # Examples
/// ```
/// use rut::objects::ObjectId;
///
/// let bytes = "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3".as_bytes();
/// let id = ObjectId::from_utf8_encoded_sha(bytes).unwrap();
/// assert_eq!(id.to_string(), "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3");
/// ```
pub fn from_utf8_encoded_sha(bytes: &[u8]) -> Result<ObjectId, String> {
let s = str::from_utf8(bytes).map_err(|e| e.to_string())?;
Self::from_sha(s)
}

/**
* Turn bytes into an ObjectId. This is the inverse of bytes().
*
* # Examples
* ```
* use rut::objects::ObjectId;
*
* let bytes = "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3".as_bytes();
* let id = ObjectId::from_sha_bytes(bytes).unwrap();
* assert_eq!(id.bytes(), bytes);
*/
/// Turn bytes into an ObjectId. This is the inverse of bytes().
///
/// # Examples
/// ```
/// use rut::objects::ObjectId;
///
/// let bytes = "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3".as_bytes();
/// let id = ObjectId::from_sha_bytes(bytes).unwrap();
/// assert_eq!(id.bytes(), bytes);
pub fn from_sha_bytes(bytes: &[u8]) -> Result<ObjectId, String> {
let unhexlified_bytes = if bytes.len() == 20 {
hex::unhexlify(bytes)
Expand Down
Loading

0 comments on commit a862193

Please sign in to comment.