Skip to content

Commit

Permalink
chore: resolve clippy::doc_markdown lints in proof-of-sql (#214)
Browse files Browse the repository at this point in the history
# Rationale for this change

We have cargo clippy running in our CI in order to enforce code quality.
In order to increase our standards, we should enable the
clippy::pedantic lint group.

# What changes are included in this PR?

This PR fixes `clippy::doc_markdown` warnings for `proof-of-sql` lib.

# Are these changes tested?

Yes. The following commands should run without any warnings.

- `cargo clippy --lib -p proof-of-sql -- -W clippy::doc_markdown`
- `cargo doc`

---------

Co-authored-by: Jay White <JayWhite2357@users.noreply.github.com>
  • Loading branch information
mehulmathur16 and JayWhite2357 authored Oct 5, 2024
1 parent 5238673 commit d05636a
Show file tree
Hide file tree
Showing 67 changed files with 252 additions and 249 deletions.
1 change: 1 addition & 0 deletions clippy.toml
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
missing-docs-in-crate-items = true
doc-valid-idents = ["DeFi"]
2 changes: 1 addition & 1 deletion crates/proof-of-sql-parser/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ pub use resource_id::ResourceId;
// lalrpop-generated code is not clippy-compliant
lalrpop_mod!(#[allow(clippy::all, missing_docs, clippy::missing_docs_in_private_items, clippy::pedantic)] pub sql);

/// Implement [`Deserialize`] through [`FromStr`] to avoid invalid identifiers.
/// Implement [`Deserialize`](serde::Deserialize) through [`FromStr`](core::str::FromStr) to avoid invalid identifiers.
#[macro_export]
macro_rules! impl_serde_from_str {
($type:ty) => {
Expand Down
2 changes: 1 addition & 1 deletion crates/proof-of-sql-parser/src/resource_id.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ impl ResourceId {
///
/// Space and time APIs accept a `.` separator in resource ids.
/// However, when a resource id is stored in `KeyDB`, or used as a key, a `:` separator is used.
/// This method differs from [`ResourceId::to_string`] by using the latter format.
/// This method differs from [`ToString::to_string`] by using the latter format.
///
/// Furthermore, while space and time APIs accept lowercase resource identifiers,
/// all resource identifiers are stored internally in uppercase.
Expand Down
4 changes: 2 additions & 2 deletions crates/proof-of-sql/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Get started with Proof of SQL by using the published crate on [crates.io](https:

### Prerequisites

* Linux x86_64 (NOTE: Most of the codebase _should_ work for most rust targets. However, proofs are accelerated using NVIDIA GPUs, so other targets would run very slowly and may require modification.)
* Linux `x86_64` (NOTE: Most of the codebase _should_ work for most rust targets. However, proofs are accelerated using NVIDIA GPUs, so other targets would run very slowly and may require modification.)
* NVIDIA GPU & Drivers (Strongly Recommended)
* lld (`sudo apt install lld`)
* clang (`sudo apt install clang`)
Expand Down Expand Up @@ -145,7 +145,7 @@ An example result for the 3rd query looks like this:
<p align="center">
b | sum_a | c
`b` | `sum_a` | `c`
---|---|---
1 | -45585 | 301
2 | -137574, | 300
Expand Down
16 changes: 8 additions & 8 deletions crates/proof-of-sql/src/base/bit/bit_distribution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ use serde::{Deserialize, Serialize};
pub struct BitDistribution {
/// We use two arrays to track which bits vary
/// and the constant bit values. If
/// {x_1, ..., x_n} represents the values BitDistribution describes, then
/// or_all = abs(x_1) | abs(x_2) | ... | abs(x_n)
/// vary_mask & (1 << i) =
/// 1 if x_s & (1 << i) != x_t & (1 << i) for some s != t
/// `{x_1, ..., x_n}` represents the values [`BitDistribution`] describes, then:
/// `or_all = abs(x_1) | abs(x_2) | ... | abs(x_n)`
/// `vary_mask & (1 << i) =`
/// `1` if `x_s & (1 << i) != x_t & (1 << i)` for some `s != t`
/// 0 otherwise
pub or_all: [u64; 4],
pub vary_mask: [u64; 4],
Expand Down Expand Up @@ -54,8 +54,8 @@ impl BitDistribution {
self.or_all[3] & (1 << 63) != 0
}

/// Check if this instance represents a valid bit distribution. is_valid
/// can be used after deserializing a BitDistribution from an untrusted
/// Check if this instance represents a valid bit distribution. `is_valid`
/// can be used after deserializing a [`BitDistribution`] from an untrusted
/// source.
pub fn is_valid(&self) -> bool {
for (m, o) in self.vary_mask.iter().zip(self.or_all) {
Expand All @@ -66,8 +66,8 @@ impl BitDistribution {
true
}

/// If {b_i} represents the non-varying 1-bits of the absolute values, return the value
/// sum_i b_i 2 ^ i
/// If `{b_i}` represents the non-varying 1-bits of the absolute values, return the value
/// `sum_i b_i 2 ^ i`
pub fn constant_part(&self) -> [u64; 4] {
let mut val = [0; 4];
self.for_each_abs_constant_bit(|i: usize, bit: usize| {
Expand Down
8 changes: 4 additions & 4 deletions crates/proof-of-sql/src/base/bit/bit_matrix.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@ use crate::base::{
use alloc::vec::Vec;
use bumpalo::Bump;

/// Let x1, ..., xn denote the values of a data column. Let
/// b1, ..., bk denote the bit positions of abs(x1), ..., abs(xn)
/// Let `x1, ..., xn` denote the values of a data column. Let
/// `b1, ..., bk` denote the bit positions of `abs(x1), ..., abs(xn)`
/// that vary.
///
/// compute_varying_bit_matrix returns the matrix M where
/// M_ij = abs(xi) & (1 << bj) == 1
/// `compute_varying_bit_matrix` returns the matrix M where
/// `M_ij = abs(xi) & (1 << bj) == 1`
/// The last column of M corresponds to the sign bit if it varies.
pub fn compute_varying_bit_matrix<'a, S: Scalar>(
alloc: &'a Bump,
Expand Down
6 changes: 3 additions & 3 deletions crates/proof-of-sql/src/base/commitment/column_bounds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ where
/// The source collection is empty so has no bounds.
#[default]
Empty,
/// After some operation (like [`Bounds::difference`]), the bounds cannot be determined exactly.
/// After some operation (like `Bounds::difference`), the bounds cannot be determined exactly.
///
/// Instead, this variant underestimates the minimum and overestimates the maximum.
Bounded(BoundsInner<T>),
Expand Down Expand Up @@ -203,11 +203,11 @@ pub struct ColumnBoundsMismatch {
pub enum ColumnBounds {
/// Column does not have order.
NoOrder,
/// The bounds of a SmallInt column.
/// The bounds of a `SmallInt` column.
SmallInt(Bounds<i16>),
/// The bounds of an Int column.
Int(Bounds<i32>),
/// The bounds of a BigInt column.
/// The bounds of a `BigInt` column.
BigInt(Bounds<i64>),
/// The bounds of an Int128 column.
Int128(Bounds<i128>),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ pub struct DuplicateIdentifiers {
id: String,
}

/// Errors that can occur when attempting to append rows to ColumnCommitments.
/// Errors that can occur when attempting to append rows to [`ColumnCommitments`].
#[derive(Debug, Snafu)]
pub enum AppendColumnCommitmentsError {
/// Metadata between new and old columns are mismatched.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pub trait CommitmentEvaluationProof {
type VerifierPublicSetup<'a>: Copy;
/// Create a new proof.
///
/// Note: b_point must have length `nu`, where `2^nu` is at least the length of `a`.
/// Note: `b_point` must have length `nu`, where `2^nu` is at least the length of `a`.
/// `b_point` are the values for the variables that are being evaluated.
/// The resulting evaluation is the the inner product of `a` and `b`, where `b` is the expanded vector form of `b_point`.
fn new(
Expand All @@ -38,7 +38,7 @@ pub trait CommitmentEvaluationProof {
) -> Self;
/// Verify a proof.
///
/// Note: b_point must have length `nu`, where `2^nu` is at least the length of `a`.
/// Note: `b_point` must have length `nu`, where `2^nu` is at least the length of `a`.
/// `b_point` are the values for the variables that are being evaluated.
/// The resulting evaluation is the the inner product of `a` and `b`, where `b` is the expanded vector form of `b_point`.
#[allow(clippy::too_many_arguments)]
Expand Down
10 changes: 5 additions & 5 deletions crates/proof-of-sql/src/base/commitment/committable_column.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,23 +25,23 @@ use proof_of_sql_parser::posql_time::{PoSQLTimeUnit, PoSQLTimeZone};
pub enum CommittableColumn<'a> {
/// Borrowed Bool column, mapped to `bool`.
Boolean(&'a [bool]),
/// Borrowed SmallInt column, mapped to `i16`.
/// Borrowed `SmallInt` column, mapped to `i16`.
SmallInt(&'a [i16]),
/// Borrowed SmallInt column, mapped to `i32`.
/// Borrowed `Int` column, mapped to `i32`.
Int(&'a [i32]),
/// Borrowed BigInt column, mapped to `i64`.
/// Borrowed `BigInt` column, mapped to `i64`.
BigInt(&'a [i64]),
/// Borrowed Int128 column, mapped to `i128`.
Int128(&'a [i128]),
/// Borrowed Decimal75(precion, scale, column), mapped to 'i256'
Decimal75(Precision, i8, Vec<[u64; 4]>),
/// Column of big ints for committing to, montgomery-reduced from a Scalar column.
Scalar(Vec<[u64; 4]>),
/// Column of limbs for committing to scalars, hashed from a VarChar column.
/// Column of limbs for committing to scalars, hashed from a `VarChar` column.
VarChar(Vec<[u64; 4]>),
/// Borrowed Timestamp column with Timezone, mapped to `i64`.
TimestampTZ(PoSQLTimeUnit, PoSQLTimeZone, &'a [i64]),
/// Borrowed byte column, mapped to `u8`. This is not a PoSQL
/// Borrowed byte column, mapped to `u8`. This is not a `PoSQL`
/// type, we need this to commit to words in the range check.
RangeCheckWord(&'a [u8]),
}
Expand Down
2 changes: 1 addition & 1 deletion crates/proof-of-sql/src/base/commitment/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ pub trait Commitment:
///
/// The resulting commitments are written to the slice in `commitments`, which is a buffer.
/// `commitments` is expected to have the same length as `committable_columns` and the behavior is undefined if it does not.
/// The length of each CommittableColumn should be the same.
/// The length of each [`CommittableColumn`] should be the same.
///
/// `offset` is the amount that `committable_columns` is "offset" by. Logically adding `offset` many 0s to the beginning of each of the `committable_columns`.
fn compute_commitments(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ pub enum TableCommitmentArithmeticError {
/// The underlying source error
source: ColumnCommitmentsMismatch,
},
/// Cannot perform TableCommitment arithmetic that would result in a negative range.
/// Cannot perform [`TableCommitment`] arithmetic that would result in a negative range.
#[snafu(transparent)]
NegativeRange {
/// The underlying source error
Expand Down
8 changes: 4 additions & 4 deletions crates/proof-of-sql/src/base/database/accessor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ pub trait MetadataAccessor {
///
/// Verifier uses this information to process a query.
///
/// In pseudo-code, here is a sketch of how CommitmentAccessor fits in
/// In pseudo-code, here is a sketch of how [`CommitmentAccessor`] fits in
/// with the verification workflow:
///
/// ```ignore
Expand Down Expand Up @@ -57,7 +57,7 @@ pub trait CommitmentAccessor<C: Commitment>: MetadataAccessor {
///
/// Prover uses this information to process a query.
///
/// In pseudo-code, here is a sketch of how DataAccessor fits in
/// In pseudo-code, here is a sketch of how [`DataAccessor`] fits in
/// with the prove workflow:
///
/// ```ignore
Expand Down Expand Up @@ -90,8 +90,8 @@ pub trait DataAccessor<S: Scalar>: MetadataAccessor {
/// Access tables and their schemas in a database.
///
/// This accessor should be implemented by both the prover and verifier
/// and then used by the Proof of SQL code to convert an IntermediateAst
/// into a ProofPlan.
/// and then used by the Proof of SQL code to convert an `IntermediateAst`
/// into a [`ProofPlan`](crate::sql::proof::ProofPlan).
pub trait SchemaAccessor {
/// Lookup the column's data type in the specified table
///
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,15 @@ pub enum ArrowArrayToColumnConversionError {
/// The out of bounds index requested
index: usize,
},
/// Using TimeError to handle all time-related errors
/// Using `TimeError` to handle all time-related errors
#[snafu(transparent)]
TimestampConversionError {
/// The underlying source error
source: PoSQLTimestampError,
},
}

/// This trait is used to provide utility functions to convert ArrayRefs into proof types (Column, Scalars, etc.)
/// This trait is used to provide utility functions to convert [`ArrayRef`]s into proof types (Column, Scalars, etc.)
pub trait ArrayRefExt {
/// Convert an ArrayRef into a Proof of SQL Vec<Scalar>
///
Expand All @@ -67,16 +67,16 @@ pub trait ArrayRefExt {
&self,
) -> Result<Vec<crate::base::scalar::Curve25519Scalar>, ArrowArrayToColumnConversionError>;

/// Convert an ArrayRef into a Proof of SQL Column type
/// Convert an [`ArrayRef`] into a Proof of SQL Column type
///
/// Parameters:
/// - `alloc`: used to allocate a slice of data when necessary
/// (vide StringArray into Column::HashedBytes((_,_)).
/// (vide [`StringArray`] into `Column::HashedBytes((_,_))`.
///
/// - `range`: used to get a subslice out of ArrayRef.
/// - `range`: used to get a subslice out of [`ArrayRef`].
///
/// - `scals`: scalar representation of each element in the ArrayRef.
/// Some types don't require this slice (see Column::BigInt). But for types requiring it,
/// - `scals`: scalar representation of each element in the [`ArrayRef`].
/// Some types don't require this slice (see [`Column::BigInt`]). But for types requiring it,
/// `scals` must be provided and have a length equal to `range.len()`.
///
/// Note: this function must not be called from unsupported or nullable arrays as it will panic.
Expand Down Expand Up @@ -178,14 +178,14 @@ impl ArrayRefExt for ArrayRef {
})
}

/// Converts the given ArrowArray into a `Column` data type based on its `DataType`. Returns an
/// empty `Column` for any empty tange if it is in-bounds.
/// Converts the given `ArrowArray` into a [`Column`] data type based on its [`DataType`]. Returns an
/// empty [`Column`] for any empty tange if it is in-bounds.
///
/// # Parameters
/// - `alloc`: Reference to a `Bump` allocator used for memory allocation during the conversion.
/// - `range`: Reference to a `Range<usize>` specifying the slice of the array to convert.
/// - `precomputed_scals`: Optional reference to a slice of `Curve25519Scalar` values.
/// VarChar columns store hashes to their values as scalars, which can be provided here.
/// `VarChar` columns store hashes to their values as scalars, which can be provided here.
///
/// # Supported types
/// - For `DataType::Int64` and `DataType::Decimal128(38, 0)`, it slices the array
Expand Down
22 changes: 11 additions & 11 deletions crates/proof-of-sql/src/base/database/column.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,16 +39,16 @@ pub enum Column<'a, S: Scalar> {
/// i128 columns
Int128(&'a [i128]),
/// Decimal columns with a max width of 252 bits
/// - the backing store maps to the type [crate::base::scalar::Curve25519Scalar]
/// - the backing store maps to the type [`crate::base::scalar::Curve25519Scalar`]
Decimal75(Precision, i8, &'a [S]),
/// Scalar columns
Scalar(&'a [S]),
/// String columns
/// - the first element maps to the str values.
/// - the second element maps to the str hashes (see [crate::base::scalar::Scalar]).
/// - the second element maps to the str hashes (see [`crate::base::scalar::Scalar`]).
VarChar((&'a [&'a str], &'a [S])),
/// Timestamp columns with timezone
/// - the first element maps to the stored [`TimeUnit`]
/// - the first element maps to the stored `TimeUnit`
/// - the second element maps to a timezone
/// - the third element maps to columns of timeunits since unix epoch
TimestampTZ(PoSQLTimeUnit, PoSQLTimeZone, &'a [i64]),
Expand Down Expand Up @@ -247,7 +247,7 @@ pub enum ColumnType {
/// Mapped to i64
#[serde(alias = "TIMESTAMP", alias = "timestamp")]
TimestampTZ(PoSQLTimeUnit, PoSQLTimeZone),
/// Mapped to Curve25519Scalar
/// Mapped to [`Curve25519Scalar`](crate::base::scalar::Curve25519Scalar)
#[serde(alias = "SCALAR", alias = "scalar")]
Scalar,
}
Expand Down Expand Up @@ -287,7 +287,7 @@ impl ColumnType {
}
}

/// Returns the ColumnType of the integer type with the given number of bits if it is a valid integer type.
/// Returns the [`ColumnType`] of the integer type with the given number of bits if it is a valid integer type.
///
/// Otherwise, return None.
fn from_integer_bits(bits: usize) -> Option<Self> {
Expand All @@ -300,7 +300,7 @@ impl ColumnType {
}
}

/// Returns the larger integer type of two ColumnTypes if they are both integers.
/// Returns the larger integer type of two [`ColumnType`]s if they are both integers.
///
/// If either of the columns is not an integer, return None.
#[must_use]
Expand All @@ -316,7 +316,7 @@ impl ColumnType {
})
}

/// Returns the precision of a ColumnType if it is converted to a decimal wrapped in Some(). If it can not be converted to a decimal, return None.
/// Returns the precision of a [`ColumnType`] if it is converted to a decimal wrapped in `Some()`. If it can not be converted to a decimal, return None.
#[must_use]
pub fn precision_value(&self) -> Option<u8> {
match self {
Expand All @@ -331,7 +331,7 @@ impl ColumnType {
Self::Boolean | Self::VarChar => None,
}
}
/// Returns scale of a ColumnType if it is convertible to a decimal wrapped in Some(). Otherwise return None.
/// Returns scale of a [`ColumnType`] if it is convertible to a decimal wrapped in `Some()`. Otherwise return None.
#[must_use]
pub fn scale(&self) -> Option<i8> {
match self {
Expand Down Expand Up @@ -378,7 +378,7 @@ impl ColumnType {
}
}

/// Convert ColumnType values to some arrow DataType
/// Convert [`ColumnType`] values to some arrow [`DataType`]
#[cfg(feature = "arrow")]
impl From<&ColumnType> for DataType {
fn from(column_type: &ColumnType) -> Self {
Expand Down Expand Up @@ -407,7 +407,7 @@ impl From<&ColumnType> for DataType {
}
}

/// Convert arrow DataType values to some ColumnType
/// Convert arrow [`DataType`] values to some [`ColumnType`]
#[cfg(feature = "arrow")]
impl TryFrom<DataType> for ColumnType {
type Error = String;
Expand Down Expand Up @@ -533,7 +533,7 @@ impl ColumnField {
}
}

/// Convert ColumnField values to arrow Field
/// Convert [`ColumnField`] values to arrow Field
#[cfg(feature = "arrow")]
impl From<&ColumnField> for Field {
fn from(column_field: &ColumnField) -> Self {
Expand Down
4 changes: 2 additions & 2 deletions crates/proof-of-sql/src/base/database/column_operation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -840,8 +840,8 @@ where
/// 2. We use floor division for rounding.
/// 3. If division by zero occurs, we return an error.
/// 4. Precision and scale follow T-SQL rules. That is,
/// - new_scale = max(6, right_precision + left_scale + 1)
/// - new_precision = left_precision - left_scale + right_scale + new_scale
/// - `new_scale = max(6, right_precision + left_scale + 1)`
/// - `new_precision = left_precision - left_scale + right_scale + new_scale`
pub(crate) fn try_divide_decimal_columns<S, T0, T1>(
lhs: &[T0],
rhs: &[T1],
Expand Down
Loading

0 comments on commit d05636a

Please sign in to comment.