Skip to content

Commit

Permalink
chore: refactor of main input
Browse files Browse the repository at this point in the history
  • Loading branch information
ImkoMarijnissen committed Nov 15, 2024
1 parent 5e5dd34 commit a1af32e
Show file tree
Hide file tree
Showing 7 changed files with 27 additions and 48 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@ use clap::ValueEnum;

/// Specifies the type of sequence which is used to generate conflict limits before a restart
/// occurs.
#[derive(Clone, Copy, Debug, ValueEnum)]
#[derive(Default, Clone, Copy, Debug, ValueEnum)]
pub enum SequenceGeneratorType {
/// Indicates that the restart strategy should restart every `x` conflicts.
#[default]
Constant,
/// Indicates that the restarts strategy should use geometric restarts.
///
Expand Down
40 changes: 10 additions & 30 deletions pumpkin-solver/src/bin/pumpkin-solver/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ struct Args {
/// What type of proof to log.
///
/// If the `proof_path` option is not provided, this is ignored.
#[arg(long, default_value_t = ProofType::Scaffold)]
#[arg(long, value_enum, default_value_t)]
proof_type: ProofType,

/// The number of high lbd learned clauses that are kept in the database.
Expand Down Expand Up @@ -110,11 +110,7 @@ struct Args {
/// Decides which clauses will be removed when cleaning up the learned clauses. Can either be
/// based on the LBD of a clause (the number of different decision levels) or on the activity
/// of a clause (how often it is used in conflict analysis).
#[arg(
short = 'l',
long = "learning-sorting-strategy",
default_value_t = LearnedNogoodSortingStrategy::Activity, verbatim_doc_comment
)]
#[arg(long, value_enum, default_value_t)]
learning_sorting_strategy: LearnedNogoodSortingStrategy,

/// Decides whether learned clauses are minimised as a post-processing step after computing the
Expand All @@ -128,18 +124,9 @@ struct Args {
no_learning_clause_minimisation: bool,

/// Decides the sequence based on which the restarts are performed.
/// - The "constant" approach uses a constant number of conflicts before another restart is
/// triggered
/// - The "geometric" approach uses a geometrically increasing sequence
/// - The "luby" approach uses a recursive sequence of the form 1, 1, 2, 1, 1, 2, 4, 1, 1, 2,
/// 1, 1, 2, 4, 8, 1, 1, 2.... (see "Optimal speedup of Las Vegas algorithms - Luby et al.
/// (1993)")
///
/// To be used in combination with "--restarts-base-interval".
#[arg(
long = "restart-sequence",
default_value_t = SequenceGeneratorType::Constant, verbatim_doc_comment
)]
#[arg(long, value_enum, default_value_t)]
restart_sequence_generator_type: SequenceGeneratorType,

/// The base interval length is used as a multiplier to the restart sequence.
Expand Down Expand Up @@ -300,16 +287,7 @@ struct Args {
omit_call_site: bool,

/// The encoding to use for the upper bound constraint in a MaxSAT optimisation problem.
///
/// The "gte" value specifies that the solver should use the Generalized Totalizer Encoding
/// (see "Generalized totalizer encoding for pseudo-boolean constraints - Saurabh et al.
/// (2015)"), and the "cne" value specifies that the solver should use the Cardinality Network
/// Encoding (see "Cardinality networks: a theoretical and empirical study - Asín et al.
/// (2011)").
#[arg(
long = "upper-bound-encoding",
default_value_t = PseudoBooleanEncoding::GeneralizedTotalizer, verbatim_doc_comment
)]
#[arg(long, value_enum, default_value_t)]
upper_bound_encoding: PseudoBooleanEncoding,

/// Determines that the cumulative propagator(s) are allowed to create holes in the domain.
Expand All @@ -323,16 +301,17 @@ struct Args {
/// Possible values: bool
#[arg(long = "no-restarts", verbatim_doc_comment)]
no_restarts: bool,

/// Determines the type of explanation used by the cumulative propagator(s) to explain
/// propagations/conflicts.
#[arg(long = "cumulative-explanation-type", default_value_t = CumulativeExplanationType::default())]
#[arg(long, value_enum, default_value_t)]
cumulative_explanation_type: CumulativeExplanationType,

/// Determines the type of propagator which is used by the cumulative propagator(s) to
/// propagate the constraint.
///
/// Currently, the solver only supports variations on time-tabling methods.
#[arg(long = "cumulative-propagation-method", default_value_t = CumulativePropagationMethod::default())]
#[arg(long, value_enum, default_value_t)]
cumulative_propagation_method: CumulativePropagationMethod,

/// Determines whether a sequence of profiles is generated when explaining a propagation for
Expand All @@ -343,7 +322,7 @@ struct Args {
cumulative_generate_sequence: bool,

/// Determines the conflict resolver.
#[arg(long = "conflict-resolver")]
#[arg(long, value_enum, default_value_t)]
conflict_resolver: ConflictResolver,

/// Determines whether incremental backtracking is applied or whether the cumulative
Expand Down Expand Up @@ -623,9 +602,10 @@ fn stringify_solution(
.collect::<String>()
}

#[derive(Clone, Copy, Debug, ValueEnum)]
#[derive(Default, Clone, Copy, Debug, ValueEnum)]
enum ProofType {
/// Log only the proof scaffold.
#[default]
Scaffold,
/// Log the full proof without hints.
Full,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,14 +67,15 @@ pub(crate) trait PseudoBooleanConstraintEncoderInterface {

/// Specifies the type of pseudo-boolean encoding which is used by the
/// [`PseudoBooleanConstraintEncoder`].
#[derive(Clone, Copy, Debug, ValueEnum)]
#[derive(Default, Clone, Copy, Debug, ValueEnum)]
pub(crate) enum PseudoBooleanEncoding {
/// Specifies the usage of the generalized totalizer encoding for pseudo-boolean constraints
/// \[1\].
///
/// # Bibliography
/// \[1] "Generalized totalizer encoding for pseudo-boolean constraints.", Joshi Saurabh, Ruben
/// Martins, Vasco Manquinho; CP '15
#[default]
GeneralizedTotalizer,
/// Specifies the usage of the cardinality network \[1\] encoding for unweighted cardinality
/// constraints in the form `x1 + ... + xn <= k`. The encoding is arc-consistent and
Expand Down
15 changes: 9 additions & 6 deletions pumpkin-solver/src/branching/branchers/autonomous_search.rs
Original file line number Diff line number Diff line change
Expand Up @@ -249,12 +249,15 @@ impl<BackupSelector> AutonomousSearch<BackupSelector> {

impl<BackupBrancher: Brancher> Brancher for AutonomousSearch<BackupBrancher> {
fn next_decision(&mut self, context: &mut SelectionContext) -> Option<Predicate> {
self.next_candidate_predicate(context)
.map(|predicate| self.determine_polarity(predicate))
.or_else(|| {
// There are variables for which we do not have a predicate, rely on the backup
self.backup_brancher.next_decision(context)
})
let result = self
.next_candidate_predicate(context)
.map(|predicate| self.determine_polarity(predicate));
if result.is_none() && !context.are_all_variables_assigned() {
// There are variables for which we do not have a predicate, rely on the backup
self.backup_brancher.next_decision(context)
} else {
result
}
}

fn on_backtrack(&mut self) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ impl ConflictResolver for ResolutionResolver {
context.proof_log,
context.unit_nogood_step_ids,
);
pumpkin_assert_simple!(predicate.is_lower_bound_predicate() , "If the final predicate in the conflict nogood is not a decision predicate then it should be a lower-bound predicate");
pumpkin_assert_simple!(predicate.is_lower_bound_predicate() , "If the final predicate in the conflict nogood is not a decision predicate then it should be a lower-bound predicate but was {predicate}");
pumpkin_assert_simple!(
reason.len() == 1 && reason[0].is_lower_bound_predicate(),
"The reason for the decision predicate should be a lower-bound predicate but was {}", reason[0]
Expand Down
3 changes: 2 additions & 1 deletion pumpkin-solver/src/propagators/nogoods/learning_options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,11 @@ impl Default for LearningOptions {
}

/// The sorting strategy which is used when considering removal from the clause database.
#[derive(ValueEnum, Debug, Clone, Copy, PartialEq, Eq)]
#[derive(ValueEnum, Default, Debug, Clone, Copy, PartialEq, Eq)]
pub enum LearnedNogoodSortingStrategy {
/// Sorts based on the activity, the activity is bumped when a literal is encountered during
/// conflict analysis.
#[default]
Activity,
/// Sorts based on the literal block distance (LBD) which is an indication of how "good" a
/// learned clause is.
Expand Down
9 changes: 1 addition & 8 deletions pumpkin-solver/src/propagators/nogoods/nogood_propagator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,16 +112,9 @@ impl Propagator for NogoodPropagator {

let old_trail_position = context.assignments.trail.len() - 1;

// Because drain lazily removes and updates internal data structures, in case a conflict is
// detected and the loop exits, some elements might not get cleaned up properly.
//
// So we eager call each elements here by copying. Could think about avoiding this in the
// future.
let events: Vec<(IntDomainEvent, DomainId)> = self.enqueued_updates.drain().collect();

// We go over all of the events which we have been notified of to determine whether the
// watchers should be updated or whether a propagation can take place
for (update_event, updated_domain_id) in events {
for (update_event, updated_domain_id) in self.enqueued_updates.drain() {
NogoodPropagator::propagate_or_find_new_watcher(
&mut self.nogoods,
update_event,
Expand Down

0 comments on commit a1af32e

Please sign in to comment.