From dfe426780680f85f70192dc9643cf7347ec76456 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 05:17:35 -0300 Subject: [PATCH 01/70] Rename `SymbolTable` to `HashMapStack` --- carcara/src/ast/deep_eq.rs | 20 ++++++++++---------- carcara/src/checker/elaboration/deep_eq.rs | 6 +++--- carcara/src/checker/elaboration/mod.rs | 6 +++--- carcara/src/parser/mod.rs | 6 +++--- carcara/src/utils.rs | 13 +++++-------- 5 files changed, 24 insertions(+), 27 deletions(-) diff --git a/carcara/src/ast/deep_eq.rs b/carcara/src/ast/deep_eq.rs index c518cc94..66206900 100644 --- a/carcara/src/ast/deep_eq.rs +++ b/carcara/src/ast/deep_eq.rs @@ -11,7 +11,7 @@ use super::{ BindingList, Identifier, Operator, ProofArg, ProofCommand, ProofStep, Rc, Sort, Subproof, Term, Terminal, }; -use crate::utils::SymbolTable; +use crate::utils::HashMapStack; use std::time::{Duration, Instant}; /// A trait that represents objects that can be compared for equality modulo reordering of @@ -88,12 +88,12 @@ pub struct DeepEqualityChecker { // are comparing the second argument of each term, `(< x y)` will again be `(< $0 $1)` in `a`, // but it will be `(< $1 $0)` in `b`. If we just rely on the cache, we will incorrectly // determine that `a` and `b` are alpha-equivalent. To account for that, we use a more - // complicated caching system, based on a `SymbolTable`. We push a new scope every time we enter - // a binder term, and pop it as we exit. This unfortunately means that equalities derived + // complicated caching system, based on a `HashMapStack`. We push a new scope every time we + // enter a binder term, and pop it as we exit. This unfortunately means that equalities derived // inside a binder term can't be reused outside of it, degrading performance. If we are not - // checking for alpha-equivalence, we never push an additional scope to this `SymbolTable`, - // meaning it functions as a simple hash set. - cache: SymbolTable<(Rc, Rc), ()>, + // checking for alpha-equivalence, we never push an additional scope to this `HashMapStack`, + // meaning it functions as a simple hash map. + cache: HashMapStack<(Rc, Rc), ()>, is_mod_reordering: bool, alpha_equiv_checker: Option, @@ -110,7 +110,7 @@ impl DeepEqualityChecker { pub fn new(is_mod_reordering: bool, is_alpha_equivalence: bool) -> Self { Self { is_mod_reordering, - cache: SymbolTable::new(), + cache: HashMapStack::new(), alpha_equiv_checker: if is_alpha_equivalence { Some(AlphaEquivalenceChecker::new()) } else { @@ -367,14 +367,14 @@ struct AlphaEquivalenceChecker { // that is bound second are assigned `$1`, etc. The given term would then be represented like // this: // `(forall ((x Int)) (and (exists ((y Int)) (> $0 $1)) (> $0 5)))` - indices: (SymbolTable, SymbolTable), + indices: (HashMapStack, HashMapStack), counter: Vec, // Holds the count of how many variables were bound before each depth } impl AlphaEquivalenceChecker { fn new() -> Self { Self { - indices: (SymbolTable::new(), SymbolTable::new()), + indices: (HashMapStack::new(), HashMapStack::new()), counter: vec![0], } } @@ -390,7 +390,7 @@ impl AlphaEquivalenceChecker { self.indices.0.pop_scope(); self.indices.1.pop_scope(); - // If we successfully popped the scopes from the symbol tables, that means that there was + // If we successfully popped the scopes from the indices stacks, that means that there was // at least one scope, so we can safely pop from the counter stack as well self.counter.pop(); } diff --git a/carcara/src/checker/elaboration/deep_eq.rs b/carcara/src/checker/elaboration/deep_eq.rs index 3b3809e1..08df6a6e 100644 --- a/carcara/src/checker/elaboration/deep_eq.rs +++ b/carcara/src/checker/elaboration/deep_eq.rs @@ -2,13 +2,13 @@ use super::*; use crate::{ ast::*, checker::context::ContextStack, - utils::{DedupIterator, SymbolTable}, + utils::{DedupIterator, HashMapStack}, }; pub struct DeepEqElaborator<'a> { inner: &'a mut Elaborator, root_id: &'a str, - cache: SymbolTable<(Rc, Rc), (usize, usize)>, + cache: HashMapStack<(Rc, Rc), (usize, usize)>, checker: DeepEqualityChecker, context: Option, } @@ -18,7 +18,7 @@ impl<'a> DeepEqElaborator<'a> { Self { inner, root_id, - cache: SymbolTable::new(), + cache: HashMapStack::new(), checker: DeepEqualityChecker::new(true, is_alpha_equivalence), context: is_alpha_equivalence.then(ContextStack::new), } diff --git a/carcara/src/checker/elaboration/mod.rs b/carcara/src/checker/elaboration/mod.rs index 46eb967d..959eb04d 100644 --- a/carcara/src/checker/elaboration/mod.rs +++ b/carcara/src/checker/elaboration/mod.rs @@ -3,7 +3,7 @@ mod deep_eq; mod diff; mod pruning; -use crate::{ast::*, utils::SymbolTable}; +use crate::{ast::*, utils::HashMapStack}; use accumulator::Accumulator; use deep_eq::DeepEqElaborator; use diff::{apply_diff, CommandDiff, ProofDiff}; @@ -33,7 +33,7 @@ impl Frame { #[derive(Debug)] pub struct Elaborator { stack: Vec, - seen_clauses: SymbolTable>, usize>, + seen_clauses: HashMapStack>, usize>, accumulator: Accumulator, } @@ -48,7 +48,7 @@ impl Elaborator { Self { stack: vec![Frame::default()], accumulator: Accumulator::new(), - seen_clauses: SymbolTable::new(), + seen_clauses: HashMapStack::new(), } } diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index bba32e6e..9748d992 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -9,7 +9,7 @@ pub use lexer::{Lexer, Position, Reserved, Token}; use crate::{ ast::*, - utils::{HashCache, SymbolTable}, + utils::{HashCache, HashMapStack}, CarcaraResult, Error, }; use ahash::{AHashMap, AHashSet}; @@ -72,10 +72,10 @@ enum AnchorArg { /// pool used by the parser. #[derive(Default)] struct ParserState { - symbol_table: SymbolTable, Rc>, + symbol_table: HashMapStack, Rc>, function_defs: AHashMap, sort_declarations: AHashMap, - step_ids: SymbolTable, usize>, + step_ids: HashMapStack, usize>, } /// A parser for the Alethe proof format. diff --git a/carcara/src/utils.rs b/carcara/src/utils.rs index 10f0a2ae..25dd7c44 100644 --- a/carcara/src/utils.rs +++ b/carcara/src/utils.rs @@ -101,11 +101,11 @@ impl AsRef for HashCache { } #[derive(Debug)] -pub struct SymbolTable { +pub struct HashMapStack { scopes: Vec>, } -impl SymbolTable { +impl HashMapStack { pub fn new() -> Self { Self { scopes: vec![AHashMap::new()] } } @@ -117,10 +117,7 @@ impl SymbolTable { pub fn pop_scope(&mut self) { match self.scopes.len() { 0 => unreachable!(), - 1 => { - log::error!("cannot pop last scope in symbol table"); - panic!(); - } + 1 => panic!("trying to pop last scope in `HashMapStack`"), _ => { self.scopes.pop().unwrap(); } @@ -128,7 +125,7 @@ impl SymbolTable { } } -impl SymbolTable { +impl HashMapStack { pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, @@ -161,7 +158,7 @@ impl SymbolTable { } } -impl Default for SymbolTable { +impl Default for HashMapStack { fn default() -> Self { Self::new() } From 5b65e5ee5d3de576d24dc4733f15e7524ff78747 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 05:18:09 -0300 Subject: [PATCH 02/70] Fix typo --- carcara/src/parser/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index 9748d992..a591e37c 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -709,7 +709,7 @@ impl<'a, R: BufRead> Parser<'a, R> { Vec::new() }; - // For some rules (notable the `subproof` rule), there is also a `:discharge` attribute that + // For some rules (notably the `subproof` rule), there is also a `:discharge` attribute that // takes a series of command ids, in addition to the regular premises let discharge = if self.current_token == Token::Keyword("discharge".into()) { self.next_token()?; From c5a23ac14e03d4d92924c30ced331fc6c115e9f3 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 06:14:50 -0300 Subject: [PATCH 03/70] Change terminology for polyequality Now, instead of using "deep equality", the code matches the terminology used in the paper. --- carcara/src/ast/mod.rs | 6 +- carcara/src/ast/{deep_eq.rs => polyeq.rs} | 218 +++++++++--------- carcara/src/ast/tests.rs | 12 +- carcara/src/benchmarking/mod.rs | 50 ++-- carcara/src/checker/elaboration/mod.rs | 10 +- .../elaboration/{deep_eq.rs => polyeq.rs} | 35 +-- carcara/src/checker/mod.rs | 24 +- carcara/src/checker/rules/clausification.rs | 9 +- carcara/src/checker/rules/extras.rs | 2 +- carcara/src/checker/rules/mod.rs | 14 +- carcara/src/checker/rules/quantifier.rs | 13 +- carcara/src/checker/rules/reflexivity.rs | 48 ++-- carcara/src/checker/rules/simplification.rs | 8 +- carcara/src/checker/rules/subproof.rs | 4 +- carcara/src/checker/rules/tautology.rs | 16 +- cli/src/benchmarking.rs | 6 +- cli/src/main.rs | 24 +- 17 files changed, 238 insertions(+), 261 deletions(-) rename carcara/src/ast/{deep_eq.rs => polyeq.rs} (63%) rename carcara/src/checker/elaboration/{deep_eq.rs => polyeq.rs} (91%) diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index 424e59d6..0941737a 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -4,8 +4,8 @@ #[macro_use] mod macros; -mod deep_eq; mod iter; +mod polyeq; mod pool; pub(crate) mod printer; mod rc; @@ -13,14 +13,14 @@ mod substitution; #[cfg(test)] mod tests; -pub use deep_eq::{are_alpha_equivalent, deep_eq, tracing_deep_eq}; pub use iter::ProofIter; +pub use polyeq::{alpha_equiv, polyeq, tracing_polyeq}; pub use pool::TermPool; pub use printer::print_proof; pub use rc::Rc; pub use substitution::{Substitution, SubstitutionError}; -pub(crate) use deep_eq::{DeepEq, DeepEqualityChecker}; +pub(crate) use polyeq::{Polyeq, PolyeqComparator}; use crate::checker::error::CheckerError; use ahash::AHashSet; diff --git a/carcara/src/ast/deep_eq.rs b/carcara/src/ast/polyeq.rs similarity index 63% rename from carcara/src/ast/deep_eq.rs rename to carcara/src/ast/polyeq.rs index 66206900..6fe1af6f 100644 --- a/carcara/src/ast/deep_eq.rs +++ b/carcara/src/ast/polyeq.rs @@ -1,11 +1,11 @@ //! This module implements less strict definitions of equality for terms. In particular, it //! contains two definitions of equality that differ from `PartialEq`: //! -//! - `deep_eq` considers `=` terms that are reflections of each other as equal, meaning the terms +//! - `polyeq` considers `=` terms that are reflections of each other as equal, meaning the terms //! `(= a b)` and `(= b a)` are considered equal by this method. //! -//! - `are_alpha_equivalent` compares terms by alpha-equivalence, meaning it implements equality of -//! terms modulo renaming of bound variables. +//! - `alpha_equiv` compares terms by alpha-equivalence, meaning it implements equality of terms +//! modulo renaming of bound variables. use super::{ BindingList, Identifier, Operator, ProofArg, ProofCommand, ProofStep, Rc, Sort, Subproof, Term, @@ -16,8 +16,8 @@ use std::time::{Duration, Instant}; /// A trait that represents objects that can be compared for equality modulo reordering of /// equalities or alpha equivalence. -pub trait DeepEq { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool; +pub trait Polyeq { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool; } /// Computes whether the two given terms are equal, modulo reordering of equalities. @@ -26,28 +26,28 @@ pub trait DeepEq { /// equal, meaning terms like `(and p (= a b))` and `(and p (= b a))` are considered equal. /// /// This function records how long it takes to run, and adds that duration to the `time` argument. -pub fn deep_eq(a: &Rc, b: &Rc, time: &mut Duration) -> bool { +pub fn polyeq(a: &Rc, b: &Rc, time: &mut Duration) -> bool { let start = Instant::now(); - let result = DeepEq::eq(&mut DeepEqualityChecker::new(true, false), a, b); + let result = Polyeq::eq(&mut PolyeqComparator::new(true, false), a, b); *time += start.elapsed(); result } -/// Similar to `deep_eq`, but also records the maximum depth the deep equality checker reached when +/// Similar to `polyeq`, but also records the maximum depth the polyequal comparator reached when /// comparing the terms. /// /// This function records how long it takes to run, and adds that duration to the `time` argument. -pub fn tracing_deep_eq(a: &Rc, b: &Rc, time: &mut Duration) -> (bool, usize) { +pub fn tracing_polyeq(a: &Rc, b: &Rc, time: &mut Duration) -> (bool, usize) { let start = Instant::now(); - let mut checker = DeepEqualityChecker::new(true, false); - let result = DeepEq::eq(&mut checker, a, b); + let mut comp = PolyeqComparator::new(true, false); + let result = Polyeq::eq(&mut comp, a, b); *time += start.elapsed(); - (result, checker.max_depth) + (result, comp.max_depth) } -/// Similar to `deep_eq`, but instead compares terms for alpha equivalence. +/// Similar to `polyeq`, but instead compares terms for alpha equivalence. /// /// This means that two terms which are the same, except for the renaming of a bound variable, are /// considered equivalent. This functions still considers equality modulo reordering of equalities. @@ -55,21 +55,21 @@ pub fn tracing_deep_eq(a: &Rc, b: &Rc, time: &mut Duration) -> (bool /// Int)) (= 0 y))` as equivalent. /// /// This function records how long it takes to run, and adds that duration to the `time` argument. -pub fn are_alpha_equivalent(a: &Rc, b: &Rc, time: &mut Duration) -> bool { +pub fn alpha_equiv(a: &Rc, b: &Rc, time: &mut Duration) -> bool { let start = Instant::now(); // When we are checking for alpha-equivalence, we can't always assume that if `a` and `b` are - // identical, they are alpha-equivalent, so that optimization is not used in `DeepEq::eq`. + // identical, they are alpha-equivalent, so that optimization is not used in `Polyeq::eq`. // However, here at the "root" level this assumption is valid, so we check if the terms are // directly equal before doing anything else - let result = a == b || DeepEq::eq(&mut DeepEqualityChecker::new(true, true), a, b); + let result = a == b || Polyeq::eq(&mut PolyeqComparator::new(true, true), a, b); *time += start.elapsed(); result } -/// A configurable checker for equality modulo reordering of equalities and alpha equivalence. -pub struct DeepEqualityChecker { +/// A configurable comparator for polyequality and alpha equivalence. +pub struct PolyeqComparator { // In order to check alpha-equivalence, we can't use a simple global cache. For instance, let's // say we are comparing the following terms for alpha equivalence: // ``` @@ -95,24 +95,24 @@ pub struct DeepEqualityChecker { // meaning it functions as a simple hash map. cache: HashMapStack<(Rc, Rc), ()>, is_mod_reordering: bool, - alpha_equiv_checker: Option, + de_bruijn_map: Option, current_depth: usize, max_depth: usize, } -impl DeepEqualityChecker { - /// Constructs a new `DeepEqualityChecker`. +impl PolyeqComparator { + /// Constructs a new `PolyeqComparator`. /// - /// If `is_mod_reordering` is `true`, the checker will compare terms modulo reordering of - /// equalities. If `is_alpha_equivalence` is `true`, the checker will compare terms for alpha + /// If `is_mod_reordering` is `true`, the comparator will compare terms modulo reordering of + /// equalities. If `is_alpha_equivalence` is `true`, the comparator will compare terms for alpha /// equivalence. pub fn new(is_mod_reordering: bool, is_alpha_equivalence: bool) -> Self { Self { is_mod_reordering, cache: HashMapStack::new(), - alpha_equiv_checker: if is_alpha_equivalence { - Some(AlphaEquivalenceChecker::new()) + de_bruijn_map: if is_alpha_equivalence { + Some(DeBruijnMap::new()) } else { None }, @@ -121,109 +121,108 @@ impl DeepEqualityChecker { } } - fn check_binder( + fn compare_binder( &mut self, a_binds: &BindingList, b_binds: &BindingList, a_inner: &Rc, b_inner: &Rc, ) -> bool { - if let Some(alpha_checker) = self.alpha_equiv_checker.as_mut() { - // First, we push new scopes into the alpha-equivalence checker and the cache stack - alpha_checker.push(); + if let Some(de_bruijn_map) = self.de_bruijn_map.as_mut() { + // First, we push new scopes into the De Bruijn map and the cache stack + de_bruijn_map.push(); self.cache.push_scope(); // Then, we check that the binding lists and the inner terms are equivalent for (a_var, b_var) in a_binds.iter().zip(b_binds.iter()) { - if !DeepEq::eq(self, &a_var.1, &b_var.1) { - // We must remember to pop the frames from the alpha equivalence checker and - // cache stack here, so as not to leave them in a corrupted state - self.alpha_equiv_checker.as_mut().unwrap().pop(); + if !Polyeq::eq(self, &a_var.1, &b_var.1) { + // We must remember to pop the frames from the De Bruijn map and cache stack + // here, so as not to leave them in a corrupted state + self.de_bruijn_map.as_mut().unwrap().pop(); self.cache.pop_scope(); return false; } - // We also insert each variable in the binding lists into the alpha-equivalence - // checker - self.alpha_equiv_checker + // We also insert each variable in the binding lists into the De Bruijn map + self.de_bruijn_map .as_mut() .unwrap() .insert(a_var.0.clone(), b_var.0.clone()); } - let result = DeepEq::eq(self, a_inner, b_inner); + let result = Polyeq::eq(self, a_inner, b_inner); // Finally, we pop the scopes we pushed - self.alpha_equiv_checker.as_mut().unwrap().pop(); + self.de_bruijn_map.as_mut().unwrap().pop(); self.cache.pop_scope(); result } else { - DeepEq::eq(self, a_binds, b_binds) && DeepEq::eq(self, a_inner, b_inner) + Polyeq::eq(self, a_binds, b_binds) && Polyeq::eq(self, a_inner, b_inner) } } } -impl DeepEq for Rc { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { +impl Polyeq for Rc { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { // If the two `Rc`s are directly equal, and we are not checking for alpha-equivalence, we // can return `true`. // Note that if we are checking for alpha-equivalence, identical terms may be considered // different, if the bound variables in them have different meanings. For example, in the // terms `(forall ((x Int) (y Int)) (< x y))` and `(forall ((y Int) (x Int)) (< x y))`, // even though both instances of `(< x y)` are identical, they are not alpha-equivalent. - if checker.alpha_equiv_checker.is_none() && a == b { + if comp.de_bruijn_map.is_none() && a == b { return true; } // We first check the cache to see if these terms were already determined to be equal - if checker.cache.get(&(a.clone(), b.clone())).is_some() { + if comp.cache.get(&(a.clone(), b.clone())).is_some() { return true; } - checker.current_depth += 1; - checker.max_depth = std::cmp::max(checker.max_depth, checker.current_depth); - let result = DeepEq::eq(checker, a.as_ref(), b.as_ref()); + comp.current_depth += 1; + comp.max_depth = std::cmp::max(comp.max_depth, comp.current_depth); + let result = Polyeq::eq(comp, a.as_ref(), b.as_ref()); if result { - checker.cache.insert((a.clone(), b.clone()), ()); + comp.cache.insert((a.clone(), b.clone()), ()); } - checker.current_depth -= 1; + comp.current_depth -= 1; result } } -impl DeepEq for Term { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { +impl Polyeq for Term { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { match (a, b) { (Term::App(f_a, args_a), Term::App(f_b, args_b)) => { - DeepEq::eq(checker, f_a, f_b) && DeepEq::eq(checker, args_a, args_b) + Polyeq::eq(comp, f_a, f_b) && Polyeq::eq(comp, args_a, args_b) } (Term::Op(op_a, args_a), Term::Op(op_b, args_b)) => { - if checker.is_mod_reordering { + if comp.is_mod_reordering { if let (Operator::Equals, [a_1, a_2], Operator::Equals, [b_1, b_2]) = (op_a, args_a.as_slice(), op_b, args_b.as_slice()) { // If the term is an equality of two terms, we also check if they would be // equal if one of them was flipped - return DeepEq::eq(checker, &(a_1, a_2), &(b_1, b_2)) - || DeepEq::eq(checker, &(a_1, a_2), &(b_2, b_1)); + return Polyeq::eq(comp, &(a_1, a_2), &(b_1, b_2)) + || Polyeq::eq(comp, &(a_1, a_2), &(b_2, b_1)); } } // General case - op_a == op_b && DeepEq::eq(checker, args_a, args_b) + op_a == op_b && Polyeq::eq(comp, args_a, args_b) } - (Term::Sort(a), Term::Sort(b)) => DeepEq::eq(checker, a, b), + (Term::Sort(a), Term::Sort(b)) => Polyeq::eq(comp, a, b), (Term::Terminal(a), Term::Terminal(b)) => match (a, b) { // If we are checking for alpha-equivalence, and we encounter two variables, we - // check that they are equivalent using the alpha-equivalence checker + // check that they are equivalent using the De Bruijn map ( Terminal::Var(Identifier::Simple(a_var), a_sort), Terminal::Var(Identifier::Simple(b_var), b_sort), - ) if checker.alpha_equiv_checker.is_some() => { - let alpha = checker.alpha_equiv_checker.as_mut().unwrap(); - alpha.check(a_var, b_var) && DeepEq::eq(checker, a_sort, b_sort) + ) if comp.de_bruijn_map.is_some() => { + let alpha = comp.de_bruijn_map.as_mut().unwrap(); + alpha.compare(a_var, b_var) && Polyeq::eq(comp, a_sort, b_sort) } (Terminal::Var(iden_a, sort_a), Terminal::Var(iden_b, sort_b)) => { - iden_a == iden_b && DeepEq::eq(checker, sort_a, sort_b) + iden_a == iden_b && Polyeq::eq(comp, sort_a, sort_b) } (a, b) => a == b, }, @@ -231,124 +230,121 @@ impl DeepEq for Term { (Term::Quant(_, a_binds, a), Term::Quant(_, b_binds, b)) | (Term::Let(a_binds, a), Term::Let(b_binds, b)) | (Term::Lambda(a_binds, a), Term::Lambda(b_binds, b)) => { - checker.check_binder(a_binds, b_binds, a, b) + comp.compare_binder(a_binds, b_binds, a, b) } (Term::Choice(a_var, a), Term::Choice(b_var, b)) => { let a_binds = BindingList(vec![a_var.clone()]); let b_binds = BindingList(vec![b_var.clone()]); - checker.check_binder(&a_binds, &b_binds, a, b) + comp.compare_binder(&a_binds, &b_binds, a, b) } _ => false, } } } -impl DeepEq for BindingList { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { - DeepEq::eq(checker, &a.0, &b.0) +impl Polyeq for BindingList { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { + Polyeq::eq(comp, &a.0, &b.0) } } -impl DeepEq for Sort { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { +impl Polyeq for Sort { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { match (a, b) { (Sort::Function(sorts_a), Sort::Function(sorts_b)) => { - DeepEq::eq(checker, sorts_a, sorts_b) + Polyeq::eq(comp, sorts_a, sorts_b) } (Sort::Atom(a, sorts_a), Sort::Atom(b, sorts_b)) => { - a == b && DeepEq::eq(checker, sorts_a, sorts_b) + a == b && Polyeq::eq(comp, sorts_a, sorts_b) } (Sort::Bool, Sort::Bool) | (Sort::Int, Sort::Int) | (Sort::Real, Sort::Real) | (Sort::String, Sort::String) => true, (Sort::Array(x_a, y_a), Sort::Array(x_b, y_b)) => { - DeepEq::eq(checker, x_a, x_b) && DeepEq::eq(checker, y_a, y_b) + Polyeq::eq(comp, x_a, x_b) && Polyeq::eq(comp, y_a, y_b) } _ => false, } } } -impl DeepEq for &T { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { - DeepEq::eq(checker, *a, *b) +impl Polyeq for &T { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { + Polyeq::eq(comp, *a, *b) } } -impl DeepEq for [T] { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { - a.len() == b.len() - && a.iter() - .zip(b.iter()) - .all(|(a, b)| DeepEq::eq(checker, a, b)) +impl Polyeq for [T] { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { + a.len() == b.len() && a.iter().zip(b.iter()).all(|(a, b)| Polyeq::eq(comp, a, b)) } } -impl DeepEq for Vec { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { - DeepEq::eq(checker, a.as_slice(), b.as_slice()) +impl Polyeq for Vec { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { + Polyeq::eq(comp, a.as_slice(), b.as_slice()) } } -impl DeepEq for (T, U) { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { - DeepEq::eq(checker, &a.0, &b.0) && DeepEq::eq(checker, &a.1, &b.1) +impl Polyeq for (T, U) { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { + Polyeq::eq(comp, &a.0, &b.0) && Polyeq::eq(comp, &a.1, &b.1) } } -impl DeepEq for String { - fn eq(_: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { +impl Polyeq for String { + fn eq(_: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { a == b } } -impl DeepEq for ProofArg { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { +impl Polyeq for ProofArg { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { match (a, b) { - (ProofArg::Term(a), ProofArg::Term(b)) => DeepEq::eq(checker, a, b), + (ProofArg::Term(a), ProofArg::Term(b)) => Polyeq::eq(comp, a, b), (ProofArg::Assign(sa, ta), ProofArg::Assign(sb, tb)) => { - sa == sb && DeepEq::eq(checker, ta, tb) + sa == sb && Polyeq::eq(comp, ta, tb) } _ => false, } } } -impl DeepEq for ProofCommand { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { +impl Polyeq for ProofCommand { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { match (a, b) { ( ProofCommand::Assume { id: a_id, term: a_term }, ProofCommand::Assume { id: b_id, term: b_term }, - ) => a_id == b_id && DeepEq::eq(checker, a_term, b_term), - (ProofCommand::Step(a), ProofCommand::Step(b)) => DeepEq::eq(checker, a, b), - (ProofCommand::Subproof(a), ProofCommand::Subproof(b)) => DeepEq::eq(checker, a, b), + ) => a_id == b_id && Polyeq::eq(comp, a_term, b_term), + (ProofCommand::Step(a), ProofCommand::Step(b)) => Polyeq::eq(comp, a, b), + (ProofCommand::Subproof(a), ProofCommand::Subproof(b)) => Polyeq::eq(comp, a, b), _ => false, } } } -impl DeepEq for ProofStep { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { +impl Polyeq for ProofStep { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { a.id == b.id - && DeepEq::eq(checker, &a.clause, &b.clause) + && Polyeq::eq(comp, &a.clause, &b.clause) && a.rule == b.rule && a.premises == b.premises - && DeepEq::eq(checker, &a.args, &b.args) + && Polyeq::eq(comp, &a.args, &b.args) && a.discharge == b.discharge } } -impl DeepEq for Subproof { - fn eq(checker: &mut DeepEqualityChecker, a: &Self, b: &Self) -> bool { - DeepEq::eq(checker, &a.commands, &b.commands) - && DeepEq::eq(checker, &a.assignment_args, &b.assignment_args) - && DeepEq::eq(checker, &a.variable_args, &b.variable_args) +impl Polyeq for Subproof { + fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { + Polyeq::eq(comp, &a.commands, &b.commands) + && Polyeq::eq(comp, &a.assignment_args, &b.assignment_args) + && Polyeq::eq(comp, &a.variable_args, &b.variable_args) } } -struct AlphaEquivalenceChecker { +struct DeBruijnMap { // To check for alpha-equivalence, we make use of De Bruijn indices. The idea is to map each // bound variable to an integer depending on the order in which they were bound. As we compare // the two terms, if we encounter two bound variables, we need only to check if the associated @@ -368,10 +364,12 @@ struct AlphaEquivalenceChecker { // this: // `(forall ((x Int)) (and (exists ((y Int)) (> $0 $1)) (> $0 5)))` indices: (HashMapStack, HashMapStack), - counter: Vec, // Holds the count of how many variables were bound before each depth + + // Holds the count of how many variables were bound before each depth + counter: Vec, } -impl AlphaEquivalenceChecker { +impl DeBruijnMap { fn new() -> Self { Self { indices: (HashMapStack::new(), HashMapStack::new()), @@ -402,7 +400,7 @@ impl AlphaEquivalenceChecker { *current += 1; } - fn check(&self, a: &str, b: &str) -> bool { + fn compare(&self, a: &str, b: &str) -> bool { match (self.indices.0.get(a), self.indices.1.get(b)) { // If both a and b are free variables, they need to have the same name (None, None) => a == b, diff --git a/carcara/src/ast/tests.rs b/carcara/src/ast/tests.rs index 5343c524..35f032b8 100644 --- a/carcara/src/ast/tests.rs +++ b/carcara/src/ast/tests.rs @@ -37,9 +37,9 @@ fn test_free_vars() { } #[test] -fn test_deep_eq() { +fn test_polyeq() { enum TestType { - ModReordering, + Polyeq, AlphaEquiv, } @@ -49,11 +49,11 @@ fn test_deep_eq() { let [a, b] = parse_terms(&mut pool, definitions, [a, b]); let mut time = std::time::Duration::ZERO; match test_type { - TestType::ModReordering => { - assert!(super::deep_eq::deep_eq(&a, &b, &mut time)); + TestType::Polyeq => { + assert!(super::polyeq::polyeq(&a, &b, &mut time)); } TestType::AlphaEquiv => { - assert!(super::deep_eq::are_alpha_equivalent(&a, &b, &mut time)); + assert!(super::polyeq::alpha_equiv(&a, &b, &mut time)); } } } @@ -77,7 +77,7 @@ fn test_deep_eq() { "(ite (= b a) (= (+ x y) x) (and p (not (= y x))))", ), ], - TestType::ModReordering, + TestType::Polyeq, ); run_tests( definitions, diff --git a/carcara/src/benchmarking/mod.rs b/carcara/src/benchmarking/mod.rs index fc36fb35..21b6013e 100644 --- a/carcara/src/benchmarking/mod.rs +++ b/carcara/src/benchmarking/mod.rs @@ -50,7 +50,7 @@ pub struct RunMeasurement { pub checking: Duration, pub elaboration: Duration, pub total: Duration, - pub deep_eq: Duration, + pub polyeq: Duration, pub assume: Duration, pub assume_core: Duration, } @@ -58,7 +58,7 @@ pub struct RunMeasurement { // Higher kinded types would be very useful here. Ideally, I would like `BenchmarkResults` to be // generic on any kind that implements `Metrics`, like `OnlineMetrics` or `OfflineMetrics`. #[derive(Debug, Default)] -pub struct BenchmarkResults { +pub struct BenchmarkResults { pub parsing: ByRun, pub checking: ByRun, pub elaborating: ByRun, @@ -68,13 +68,13 @@ pub struct BenchmarkResults { pub step_time_by_file: AHashMap, pub step_time_by_rule: AHashMap, - pub deep_eq_time: ByRun, - pub deep_eq_time_ratio: ByRunF64, + pub polyeq_time: ByRun, + pub polyeq_time_ratio: ByRunF64, pub assume_time: ByRun, pub assume_time_ratio: ByRunF64, pub assume_core_time: ByRun, - pub deep_eq_depths: ByDeepEq, + pub polyeq_depths: ByPolyeq, pub num_assumes: usize, pub num_easy_assumes: usize, @@ -96,12 +96,12 @@ pub type OfflineBenchmarkResults = BenchmarkResults< OfflineMetrics<(), usize>, >; -impl BenchmarkResults +impl BenchmarkResults where ByRun: Metrics + Default, ByStep: Metrics + Default, ByRunF64: Metrics + Default, - ByDeepEq: Metrics<(), usize> + Default, + ByPolyeq: Metrics<(), usize> + Default, { pub fn new() -> Self { Default::default() @@ -190,12 +190,12 @@ impl CsvBenchmarkResults { writeln!( dest, "proof_file,run_id,parsing,checking,elaboration,total_accounted_for,\ - total,deep_eq,deep_eq_ratio,assume,assume_ratio" + total,polyeq,polyeq_ratio,assume,assume_ratio" )?; for (id, m) in data { let total_accounted_for = m.parsing + m.checking; - let deep_eq_ratio = m.deep_eq.as_secs_f64() / m.checking.as_secs_f64(); + let polyeq_ratio = m.polyeq.as_secs_f64() / m.checking.as_secs_f64(); let assume_ratio = m.assume.as_secs_f64() / m.checking.as_secs_f64(); writeln!( dest, @@ -207,8 +207,8 @@ impl CsvBenchmarkResults { m.elaboration.as_nanos(), total_accounted_for.as_nanos(), m.total.as_nanos(), - m.deep_eq.as_nanos(), - deep_eq_ratio, + m.polyeq.as_nanos(), + polyeq_ratio, m.assume.as_nanos(), assume_ratio, )?; @@ -252,7 +252,7 @@ impl CsvBenchmarkResults { pub trait CollectResults { fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration); fn add_assume_measurement(&mut self, file: &str, id: &str, is_easy: bool, time: Duration); - fn add_deep_eq_depth(&mut self, depth: usize); + fn add_polyeq_depth(&mut self, depth: usize); fn add_run_measurement(&mut self, id: &RunId, measurement: RunMeasurement); fn register_holey(&mut self); fn register_error(&mut self, error: &crate::Error); @@ -262,13 +262,13 @@ pub trait CollectResults { Self: Sized; } -impl CollectResults - for BenchmarkResults +impl CollectResults + for BenchmarkResults where ByRun: Metrics + Default, ByStep: Metrics + Default, ByRunF64: Metrics + Default, - ByDeepEq: Metrics<(), usize> + Default, + ByPolyeq: Metrics<(), usize> + Default, { fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration) { let file = file.to_owned(); @@ -295,8 +295,8 @@ where self.add_step_measurement(file, id, "assume", time); } - fn add_deep_eq_depth(&mut self, depth: usize) { - self.deep_eq_depths.add_sample(&(), depth); + fn add_polyeq_depth(&mut self, depth: usize) { + self.polyeq_depths.add_sample(&(), depth); } fn add_run_measurement(&mut self, id: &RunId, measurement: RunMeasurement) { @@ -305,7 +305,7 @@ where checking, elaboration, total, - deep_eq, + polyeq, assume, assume_core, } = measurement; @@ -316,13 +316,13 @@ where self.total_accounted_for.add_sample(id, parsing + checking); self.total.add_sample(id, total); - self.deep_eq_time.add_sample(id, deep_eq); + self.polyeq_time.add_sample(id, polyeq); self.assume_time.add_sample(id, assume); self.assume_core_time.add_sample(id, assume_core); - let deep_eq_ratio = deep_eq.as_secs_f64() / checking.as_secs_f64(); + let polyeq_ratio = polyeq.as_secs_f64() / checking.as_secs_f64(); let assume_ratio = assume.as_secs_f64() / checking.as_secs_f64(); - self.deep_eq_time_ratio.add_sample(id, deep_eq_ratio); + self.polyeq_time_ratio.add_sample(id, polyeq_ratio); self.assume_time_ratio.add_sample(id, assume_ratio); } @@ -337,13 +337,13 @@ where step_time_by_file: combine_map(a.step_time_by_file, b.step_time_by_file), step_time_by_rule: combine_map(a.step_time_by_rule, b.step_time_by_rule), - deep_eq_time: a.deep_eq_time.combine(b.deep_eq_time), - deep_eq_time_ratio: a.deep_eq_time_ratio.combine(b.deep_eq_time_ratio), + polyeq_time: a.polyeq_time.combine(b.polyeq_time), + polyeq_time_ratio: a.polyeq_time_ratio.combine(b.polyeq_time_ratio), assume_time: a.assume_time.combine(b.assume_time), assume_time_ratio: a.assume_time_ratio.combine(b.assume_time_ratio), assume_core_time: a.assume_core_time.combine(b.assume_core_time), - deep_eq_depths: a.deep_eq_depths.combine(b.deep_eq_depths), + polyeq_depths: a.polyeq_depths.combine(b.polyeq_depths), num_assumes: a.num_assumes + b.num_assumes, num_easy_assumes: a.num_easy_assumes + b.num_easy_assumes, is_holey: a.is_holey || b.is_holey, @@ -377,7 +377,7 @@ impl CollectResults for CsvBenchmarkResults { self.add_step_measurement(file, id, "assume", time); } - fn add_deep_eq_depth(&mut self, _: usize) {} + fn add_polyeq_depth(&mut self, _: usize) {} fn add_run_measurement(&mut self, id: &RunId, measurement: RunMeasurement) { self.runs.insert(id.clone(), measurement); diff --git a/carcara/src/checker/elaboration/mod.rs b/carcara/src/checker/elaboration/mod.rs index 959eb04d..90c8305b 100644 --- a/carcara/src/checker/elaboration/mod.rs +++ b/carcara/src/checker/elaboration/mod.rs @@ -1,12 +1,12 @@ mod accumulator; -mod deep_eq; mod diff; +mod polyeq; mod pruning; use crate::{ast::*, utils::HashMapStack}; use accumulator::Accumulator; -use deep_eq::DeepEqElaborator; use diff::{apply_diff, CommandDiff, ProofDiff}; +use polyeq::PolyeqElaborator; use pruning::prune_proof; #[derive(Debug, Default)] @@ -236,7 +236,7 @@ impl Elaborator { self.add_new_step(step) } - pub fn elaborate_deep_eq( + pub fn elaborate_polyeq( &mut self, pool: &mut TermPool, root_id: &str, @@ -244,7 +244,7 @@ impl Elaborator { b: Rc, is_alpha_equivalence: bool, ) -> (usize, usize) { - DeepEqElaborator::new(self, root_id, is_alpha_equivalence).elaborate(pool, a, b) + PolyeqElaborator::new(self, root_id, is_alpha_equivalence).elaborate(pool, a, b) } pub fn elaborate_assume( @@ -261,7 +261,7 @@ impl Elaborator { }, false, ); - let equality_step = self.elaborate_deep_eq(pool, id, premise.clone(), term.clone(), false); + let equality_step = self.elaborate_polyeq(pool, id, premise.clone(), term.clone(), false); let equiv1_step = { let new_id = self.get_new_id(id); let clause = vec![build_term!(pool, (not {premise.clone()})), term.clone()]; diff --git a/carcara/src/checker/elaboration/deep_eq.rs b/carcara/src/checker/elaboration/polyeq.rs similarity index 91% rename from carcara/src/checker/elaboration/deep_eq.rs rename to carcara/src/checker/elaboration/polyeq.rs index 08df6a6e..cca43bbc 100644 --- a/carcara/src/checker/elaboration/deep_eq.rs +++ b/carcara/src/checker/elaboration/polyeq.rs @@ -5,21 +5,21 @@ use crate::{ utils::{DedupIterator, HashMapStack}, }; -pub struct DeepEqElaborator<'a> { +pub struct PolyeqElaborator<'a> { inner: &'a mut Elaborator, root_id: &'a str, cache: HashMapStack<(Rc, Rc), (usize, usize)>, - checker: DeepEqualityChecker, + checker: PolyeqComparator, context: Option, } -impl<'a> DeepEqElaborator<'a> { +impl<'a> PolyeqElaborator<'a> { pub fn new(inner: &'a mut Elaborator, root_id: &'a str, is_alpha_equivalence: bool) -> Self { Self { inner, root_id, cache: HashMapStack::new(), - checker: DeepEqualityChecker::new(true, is_alpha_equivalence), + checker: PolyeqComparator::new(true, is_alpha_equivalence), context: is_alpha_equivalence.then(ContextStack::new), } } @@ -48,7 +48,7 @@ impl<'a> DeepEqElaborator<'a> { if let Some((a_left, a_right)) = match_term!((= x y) = a) { if let Some((b_left, b_right)) = match_term!((= x y) = b) { - if self.deep_eq(pool, a_left, b_right) && self.deep_eq(pool, a_right, b_left) { + if self.polyeq(pool, a_left, b_right) && self.polyeq(pool, a_right, b_left) { let [a_left, a_right, b_left, b_right] = [a_left, a_right, b_left, b_right].map(Clone::clone); return self.flip_equality(pool, (a, a_left, a_right), (b, b_left, b_right)); @@ -173,14 +173,14 @@ impl<'a> DeepEqElaborator<'a> { } // Since `choice` and `lambda` terms are not in the SMT-LIB standard, they cannot appear - // in the premises of a proof, so we would never need to elaborate deep equalities that + // in the premises of a proof, so we would never need to elaborate polyequalities that // use these terms. (Term::Choice(_, _), Term::Choice(_, _)) => { - log::error!("Trying to elaborate deep equality between `choice` terms"); + log::error!("Trying to elaborate polyequality between `choice` terms"); panic!() } (Term::Lambda(_, _), Term::Lambda(_, _)) => { - log::error!("Trying to elaborate deep equality between `lambda` terms"); + log::error!("Trying to elaborate polyequality between `lambda` terms"); panic!() } _ => panic!("terms not equal!"), @@ -197,10 +197,10 @@ impl<'a> DeepEqElaborator<'a> { /// Returns `true` if the terms are equal modulo reordering of inequalities, and modulo /// application of the current context. - fn deep_eq(&mut self, pool: &mut TermPool, a: &Rc, b: &Rc) -> bool { + fn polyeq(&mut self, pool: &mut TermPool, a: &Rc, b: &Rc) -> bool { match &mut self.context { - Some(c) => DeepEq::eq(&mut self.checker, &c.apply(pool, a), b), - None => DeepEq::eq(&mut self.checker, a, b), + Some(c) => Polyeq::eq(&mut self.checker, &c.apply(pool, a), b), + None => Polyeq::eq(&mut self.checker, a, b), } } @@ -253,12 +253,13 @@ impl<'a> DeepEqElaborator<'a> { // reordering of equalities, or if they are equal modulo the application of the current // context (in the case of alpha equivalence). // - // In this case, we need to elaborate the deep equality between x and x' (or y and y'), and - // from that, prove that `(= (= x y) (= y' x))`. We do that by first proving that `(= x x')` - // (1) and `(= y y')` (2). Then, we introduce a `cong` step that uses (1) and (2) to show - // that `(= (= x y) (= x' y'))` (3). After that, we add an `equiv_simplify` step that - // derives `(= (= x' y') (= y' x'))` (4). Finally, we introduce a `trans` step with premises - // (3) and (4) that proves `(= (= x y) (= y' x'))`. The general format looks like this: + // In this case, we need to elaborate the polyequality between x and x' (or y and y'), and + // from that, prove that `(= (= x y) (= y' x'))`. We do that by first proving that + // `(= x x')` (1) and `(= y y')` (2). Then, we introduce a `cong` step that uses (1) and (2) + // to show that `(= (= x y) (= x' y'))` (3). After that, we add an `equiv_simplify` step + // that derives `(= (= x' y') (= y' x'))` (4). Finally, we introduce a `trans` step with + // premises (3) and (4) that proves `(= (= x y) (= y' x'))`. The general format looks like + // this: // // ... // (step t1 (cl (= x x')) ...) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 18f9f3d1..56af21c4 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -18,7 +18,7 @@ use std::{ pub struct CheckerStatistics<'s> { pub file_name: &'s str, pub elaboration_time: &'s mut Duration, - pub deep_eq_time: &'s mut Duration, + pub polyeq_time: &'s mut Duration, pub assume_time: &'s mut Duration, // This is the time to compare the `assume` term with the `assert` that matches it. That is, @@ -34,7 +34,7 @@ impl fmt::Debug for CheckerStatistics<'_> { f.debug_struct("CheckerStatistics") .field("file_name", &self.file_name) .field("elaboration_time", &self.elaboration_time) - .field("deep_eq_time", &self.deep_eq_time) + .field("polyeq_time", &self.polyeq_time) .field("assume_time", &self.assume_time) .field("assume_core_time", &self.assume_core_time) .finish() @@ -243,18 +243,18 @@ impl<'c> ProofChecker<'c> { } let mut found = None; - let mut deep_eq_time = Duration::ZERO; + let mut polyeq_time = Duration::ZERO; let mut core_time = Duration::ZERO; for p in premises { - let mut this_deep_eq_time = Duration::ZERO; - let (result, depth) = tracing_deep_eq(term, p, &mut this_deep_eq_time); - deep_eq_time += this_deep_eq_time; + let mut this_polyeq_time = Duration::ZERO; + let (result, depth) = tracing_polyeq(term, p, &mut this_polyeq_time); + polyeq_time += this_polyeq_time; if let Some(s) = &mut self.config.statistics { - s.results.add_deep_eq_depth(depth); + s.results.add_polyeq_depth(depth); } if result { - core_time = this_deep_eq_time; + core_time = this_polyeq_time; found = Some(p.clone()); break; } @@ -276,7 +276,7 @@ impl<'c> ProofChecker<'c> { let time = time.elapsed(); *s.assume_time += time; *s.assume_core_time += core_time; - *s.deep_eq_time += deep_eq_time; + *s.polyeq_time += polyeq_time; s.results .add_assume_measurement(s.file_name, id, false, time); } @@ -291,7 +291,7 @@ impl<'c> ProofChecker<'c> { iter: &'a ProofIter<'a>, ) -> RuleResult { let time = Instant::now(); - let mut deep_eq_time = Duration::ZERO; + let mut polyeq_time = Duration::ZERO; let mut elaborated = false; if step.rule == "lia_generic" { @@ -351,7 +351,7 @@ impl<'c> ProofChecker<'c> { context: &mut self.context, previous_command, discharge: &discharge, - deep_eq_time: &mut deep_eq_time, + polyeq_time: &mut polyeq_time, }; if let Some(elaborator) = &mut self.elaborator { @@ -371,7 +371,7 @@ impl<'c> ProofChecker<'c> { let time = time.elapsed(); s.results .add_step_measurement(s.file_name, &step.id, &step.rule, time); - *s.deep_eq_time += deep_eq_time; + *s.polyeq_time += polyeq_time; if elaborated { *s.elaboration_time += time; } diff --git a/carcara/src/checker/rules/clausification.rs b/carcara/src/checker/rules/clausification.rs index c21b0550..60cb5b8f 100644 --- a/carcara/src/checker/rules/clausification.rs +++ b/carcara/src/checker/rules/clausification.rs @@ -1,7 +1,6 @@ use super::{ - assert_clause_len, assert_deep_eq_is_expected, assert_eq, assert_is_expected, - assert_num_premises, assert_operation_len, get_premise_term, CheckerError, EqualityError, - RuleArgs, RuleResult, + assert_clause_len, assert_eq, assert_is_expected, assert_num_premises, assert_operation_len, + assert_polyeq_expected, get_premise_term, CheckerError, EqualityError, RuleArgs, RuleResult, }; use crate::ast::*; use ahash::AHashMap; @@ -385,7 +384,7 @@ pub fn bfun_elim( conclusion, premises, pool, - deep_eq_time, + polyeq_time, .. }: RuleArgs, ) -> RuleResult { @@ -395,7 +394,7 @@ pub fn bfun_elim( let psi = get_premise_term(&premises[0])?; let expected = apply_bfun_elim(pool, psi, &mut AHashMap::new())?; - assert_deep_eq_is_expected(&conclusion[0], expected, deep_eq_time) + assert_polyeq_expected(&conclusion[0], expected, polyeq_time) } #[cfg(test)] diff --git a/carcara/src/checker/rules/extras.rs b/carcara/src/checker/rules/extras.rs index a7afd7aa..5358af3c 100644 --- a/carcara/src/checker/rules/extras.rs +++ b/carcara/src/checker/rules/extras.rs @@ -317,7 +317,7 @@ mod tests { (step t1.t1 (cl (= x y)) :rule hole) (step t1 (cl (= (let ((a 0)) x) (let ((b 0)) y))) :rule bind_let)": false, } - "Deep equality in variable values" { + "Polyequality in variable values" { "(anchor :step t1 :args ((x Int) (y Int))) (step t1.t1 (cl (= (= 0 1) (= 1 0))) :rule hole) (step t1.t2 (cl (= x y)) :rule hole) diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index e244c6b9..e6ef561b 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -27,7 +27,7 @@ pub struct RuleArgs<'a> { pub(super) previous_command: Option>, pub(super) discharge: &'a [&'a ProofCommand], - pub(super) deep_eq_time: &'a mut Duration, + pub(super) polyeq_time: &'a mut Duration, } #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] @@ -128,19 +128,15 @@ where Ok(()) } -fn assert_deep_eq(a: &Rc, b: &Rc, time: &mut Duration) -> Result<(), CheckerError> { - if !deep_eq(a, b, time) { +fn assert_polyeq(a: &Rc, b: &Rc, time: &mut Duration) -> Result<(), CheckerError> { + if !polyeq(a, b, time) { return Err(EqualityError::ExpectedEqual(a.clone(), b.clone()).into()); } Ok(()) } -fn assert_deep_eq_is_expected( - got: &Rc, - expected: Rc, - time: &mut Duration, -) -> RuleResult { - if !deep_eq(got, &expected, time) { +fn assert_polyeq_expected(got: &Rc, expected: Rc, time: &mut Duration) -> RuleResult { + if !polyeq(got, &expected, time) { return Err(EqualityError::ExpectedToBe { expected, got: got.clone() }.into()); } Ok(()) diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index be63a46b..02740237 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -1,5 +1,5 @@ use super::{ - assert_clause_len, assert_deep_eq_is_expected, assert_eq, assert_is_expected, assert_num_args, + assert_clause_len, assert_eq, assert_is_expected, assert_num_args, assert_polyeq_expected, CheckerError, RuleArgs, RuleResult, }; use crate::{ast::*, checker::error::QuantifierError, utils::DedupIterator}; @@ -7,11 +7,7 @@ use ahash::{AHashMap, AHashSet}; pub fn forall_inst( RuleArgs { - conclusion, - args, - pool, - deep_eq_time, - .. + conclusion, args, pool, polyeq_time, .. }: RuleArgs, ) -> RuleResult { assert_clause_len(conclusion, 1)?; @@ -46,10 +42,9 @@ pub fn forall_inst( QuantifierError::NoArgGivenForBinding(bindings.iter().next().unwrap().0.clone()) ); - // Equalities may be reordered in the final term, so we need to use deep equality modulo - // reordering + // Equalities may be reordered in the final term, so we need to compare for polyequality here let expected = substitution.apply(pool, original); - assert_deep_eq_is_expected(substituted, expected, deep_eq_time) + assert_polyeq_expected(substituted, expected, polyeq_time) } pub fn qnt_join(RuleArgs { conclusion, .. }: RuleArgs) -> RuleResult { diff --git a/carcara/src/checker/rules/reflexivity.rs b/carcara/src/checker/rules/reflexivity.rs index a826f302..c5e9ad4e 100644 --- a/carcara/src/checker/rules/reflexivity.rs +++ b/carcara/src/checker/rules/reflexivity.rs @@ -12,7 +12,7 @@ pub fn refl( conclusion, pool, context, - deep_eq_time, + polyeq_time, .. }: RuleArgs, ) -> RuleResult { @@ -23,7 +23,7 @@ pub fn refl( // If the two terms are directly identical, we don't need to do any more work. We make sure to // do this check before we try to get the context substitution, because `refl` can be used // outside of any subproof - if are_alpha_equivalent(left, right, deep_eq_time) { + if alpha_equiv(left, right, polyeq_time) { return Ok(()); } @@ -36,10 +36,10 @@ pub fn refl( // don't compute the new left and right terms until they are needed, to avoid doing unnecessary // work let new_left = context.apply(pool, left); - let result = are_alpha_equivalent(&new_left, right, deep_eq_time) || { + let result = alpha_equiv(&new_left, right, polyeq_time) || { let new_right = context.apply(pool, right); - are_alpha_equivalent(left, &new_right, deep_eq_time) - || are_alpha_equivalent(&new_left, &new_right, deep_eq_time) + alpha_equiv(left, &new_right, polyeq_time) + || alpha_equiv(&new_left, &new_right, polyeq_time) }; rassert!( result, @@ -79,10 +79,10 @@ fn elaborate_equality( left: &Rc, right: &Rc, id: &str, - deep_eq_time: &mut std::time::Duration, + polyeq_time: &mut std::time::Duration, ) -> (usize, usize) { - let is_alpha_equivalence = !deep_eq(left, right, deep_eq_time); - elaborator.elaborate_deep_eq(pool, id, left.clone(), right.clone(), is_alpha_equivalence) + let is_alpha_equivalence = !polyeq(left, right, polyeq_time); + elaborator.elaborate_polyeq(pool, id, left.clone(), right.clone(), is_alpha_equivalence) } pub fn elaborate_refl( @@ -90,7 +90,7 @@ pub fn elaborate_refl( conclusion, pool, context, - deep_eq_time, + polyeq_time, .. }: RuleArgs, command_id: String, @@ -122,12 +122,12 @@ pub fn elaborate_refl( // directly. In the second case, we need to first apply the context to the left term, using a // `refl` step, and then prove the equivalence of the new left term with the right term. In the // third case, we also need to apply the context to the right term, using another `refl` step. - if are_alpha_equivalent(left, right, deep_eq_time) { + if alpha_equiv(left, right, polyeq_time) { let equality_step = - elaborate_equality(elaborator, pool, left, right, &command_id, deep_eq_time); + elaborate_equality(elaborator, pool, left, right, &command_id, polyeq_time); let id = elaborator.get_new_id(&command_id); - // TODO: Elaborating the deep equality will add new commands to the accumulator, but + // TODO: Elaborating the polyequality will add new commands to the accumulator, but // currently we can't push them as the elaborated step directly, so we need to add this // dummy `reordering` step. elaborator.push_elaborated_step(ProofStep { @@ -142,15 +142,9 @@ pub fn elaborate_refl( let id = elaborator.get_new_id(&command_id); let first_step = elaborator.add_refl_step(pool, left.clone(), new_left.clone(), id); - if are_alpha_equivalent(&new_left, right, deep_eq_time) { - let second_step = elaborate_equality( - elaborator, - pool, - &new_left, - right, - &command_id, - deep_eq_time, - ); + if alpha_equiv(&new_left, right, polyeq_time) { + let second_step = + elaborate_equality(elaborator, pool, &new_left, right, &command_id, polyeq_time); let id = elaborator.get_new_id(&command_id); elaborator.push_elaborated_step(ProofStep { id, @@ -160,15 +154,9 @@ pub fn elaborate_refl( args: Vec::new(), discharge: Vec::new(), }); - } else if are_alpha_equivalent(&new_left, &new_right, deep_eq_time) { - let second_step = elaborate_equality( - elaborator, - pool, - &new_left, - right, - &command_id, - deep_eq_time, - ); + } else if alpha_equiv(&new_left, &new_right, polyeq_time) { + let second_step = + elaborate_equality(elaborator, pool, &new_left, right, &command_id, polyeq_time); let id = elaborator.get_new_id(&command_id); let third_step = elaborator.add_refl_step(pool, new_right.clone(), right.clone(), id); diff --git a/carcara/src/checker/rules/simplification.rs b/carcara/src/checker/rules/simplification.rs index 9e33e096..f53a4062 100644 --- a/carcara/src/checker/rules/simplification.rs +++ b/carcara/src/checker/rules/simplification.rs @@ -349,10 +349,10 @@ pub fn equiv_simplify(args: RuleArgs) -> RuleResult { (= phi_1 false): (phi_1, _) => build_term!(pool, (not {phi_1.clone()})), // This is a special case for the `equiv_simplify` rule that was added to make - // elaboration of deep equalities less verbose. This transformation can very easily lead - // to cycles, so it must always be the last transformation rule. Unfortunately, this - // means that failed simplifications in the `equiv_simplify` rule will frequently reach - // this transformation and reach a cycle, in which case the error message may be a bit + // elaboration of polyequality less verbose. This transformation can very easily lead to + // cycles, so it must always be the last transformation rule. Unfortunately, this means + // that failed simplifications in the `equiv_simplify` rule will frequently reach this + // transformation and reach a cycle, in which case the error message may be a bit // confusing. // // phi_1 = phi_2 => phi_2 = phi_1 diff --git a/carcara/src/checker/rules/subproof.rs b/carcara/src/checker/rules/subproof.rs index 060b083b..909c5c3c 100644 --- a/carcara/src/checker/rules/subproof.rs +++ b/carcara/src/checker/rules/subproof.rs @@ -330,7 +330,7 @@ fn generic_skolemization_rule( pool, context, previous_command, - deep_eq_time, + polyeq_time, .. }: RuleArgs, ) -> RuleResult { @@ -382,7 +382,7 @@ fn generic_skolemization_rule( } pool.add(Term::Choice(x.clone(), inner)) }; - if !are_alpha_equivalent(t, &expected, deep_eq_time) { + if !alpha_equiv(t, &expected, polyeq_time) { return Err(EqualityError::ExpectedEqual(t.clone(), expected).into()); } diff --git a/carcara/src/checker/rules/tautology.rs b/carcara/src/checker/rules/tautology.rs index 508127f5..ecbd17db 100644 --- a/carcara/src/checker/rules/tautology.rs +++ b/carcara/src/checker/rules/tautology.rs @@ -1,5 +1,5 @@ use super::{ - assert_clause_len, assert_deep_eq, assert_eq, assert_num_premises, get_premise_term, + assert_clause_len, assert_eq, assert_num_premises, assert_polyeq, get_premise_term, CheckerError, RuleArgs, RuleResult, }; use crate::{ast::*, checker::rules::assert_operation_len}; @@ -258,7 +258,7 @@ pub fn not_ite2(RuleArgs { conclusion, premises, .. }: RuleArgs) -> RuleResult { assert_eq(phi_2, conclusion[1].remove_negation_err()?) } -pub fn ite_intro(RuleArgs { conclusion, deep_eq_time, .. }: RuleArgs) -> RuleResult { +pub fn ite_intro(RuleArgs { conclusion, polyeq_time, .. }: RuleArgs) -> RuleResult { assert_clause_len(conclusion, 1)?; let (root_term, right_side) = match_term_err!((= t u) = &conclusion[0])?; @@ -278,13 +278,13 @@ pub fn ite_intro(RuleArgs { conclusion, deep_eq_time, .. }: RuleArgs) -> RuleRes // ``` // For cases like this, we first check if `t` equals the right side term modulo reordering of // equalities. If not, we unwrap the conjunction and continue checking the rule normally. - if deep_eq(root_term, right_side, deep_eq_time) { + if polyeq(root_term, right_side, polyeq_time) { return Ok(()); } let us = match_term_err!((and ...) = right_side)?; // `us` must be a conjunction where the first term is the root term - assert_deep_eq(&us[0], root_term, deep_eq_time)?; + assert_polyeq(&us[0], root_term, polyeq_time)?; // The remaining terms in `us` should be of the correct form for u_i in &us[1..] { @@ -292,11 +292,11 @@ pub fn ite_intro(RuleArgs { conclusion, deep_eq_time, .. }: RuleArgs) -> RuleRes let mut is_valid = |r_1, s_1, r_2, s_2| { // s_1 == s_2 == (ite cond r_1 r_2) - if deep_eq(s_1, s_2, deep_eq_time) { + if polyeq(s_1, s_2, polyeq_time) { if let Some((a, b, c)) = match_term!((ite a b c) = s_1) { - return deep_eq(a, cond, deep_eq_time) - && deep_eq(b, r_1, deep_eq_time) - && deep_eq(c, r_2, deep_eq_time); + return polyeq(a, cond, polyeq_time) + && polyeq(b, r_1, polyeq_time) + && polyeq(c, r_2, polyeq_time); } } false diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index a1efa273..a17ae353 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -41,7 +41,7 @@ fn run_job( let parsing = parsing.elapsed(); let mut elaboration = Duration::ZERO; - let mut deep_eq = Duration::ZERO; + let mut polyeq = Duration::ZERO; let mut assume = Duration::ZERO; let mut assume_core = Duration::ZERO; @@ -52,7 +52,7 @@ fn run_job( .statistics(checker::CheckerStatistics { file_name: proof_file_name, elaboration_time: &mut elaboration, - deep_eq_time: &mut deep_eq, + polyeq_time: &mut polyeq, assume_time: &mut assume, assume_core_time: &mut assume_core, results, @@ -79,7 +79,7 @@ fn run_job( checking, elaboration, total, - deep_eq, + polyeq, assume, assume_core, }, diff --git a/cli/src/main.rs b/cli/src/main.rs index 3549669f..b75733fb 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -395,11 +395,11 @@ fn print_benchmark_results(results: OnlineBenchmarkResults, sort_by_total: bool) println!("on assume (core): {}", results.assume_core_time); println!("assume ratio: {}", results.assume_time_ratio); println!( - "on deep equality: {} ({:.02}% of checking time)", - results.deep_eq_time, - 100.0 * results.deep_eq_time.mean().as_secs_f64() / results.checking().mean().as_secs_f64(), + "on polyeq: {} ({:.02}% of checking time)", + results.polyeq_time, + 100.0 * results.polyeq_time.mean().as_secs_f64() / results.checking().mean().as_secs_f64(), ); - println!("deep equality ratio: {}", results.deep_eq_time_ratio); + println!("polyeq ratio: {}", results.polyeq_time_ratio); println!("total accounted for: {}", accounted_for); println!("total: {}", total); @@ -440,11 +440,11 @@ fn print_benchmark_results(results: OnlineBenchmarkResults, sort_by_total: bool) worst_file_assume.1 * 100.0 ); - let worst_file_deep_eq = results.deep_eq_time_ratio.max(); + let worst_file_polyeq = results.polyeq_time_ratio.max(); println!( - " file (deep_eq): {} ({:.04}%)", - worst_file_deep_eq.0 .0, - worst_file_deep_eq.1 * 100.0 + " file (polyeq): {} ({:.04}%)", + worst_file_polyeq.0 .0, + worst_file_polyeq.1 * 100.0 ); let worst_file_total = results.total().max(); @@ -466,11 +466,11 @@ fn print_benchmark_results(results: OnlineBenchmarkResults, sort_by_total: bool) num_hard_assumes, percent_hard ); - let depths = results.deep_eq_depths; + let depths = results.polyeq_depths; if !depths.is_empty() { - println!(" max deep equality depth: {}", depths.max().1); - println!(" total deep equality depth: {}", depths.total()); - println!(" number of deep equalities: {}", depths.count()); + println!(" max polyeq depth: {}", depths.max().1); + println!(" total polyeq depth: {}", depths.total()); + println!(" number of polyeq checks: {}", depths.count()); println!(" mean depth: {:.4}", depths.mean()); println!( "standard deviation of depth: {:.4}", From c864d1bd1501d046ca8d9f0cf8b2208de8a4b531 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 06:27:36 -0300 Subject: [PATCH 04/70] Move context to `ast` module --- carcara/src/{checker => ast}/context.rs | 0 carcara/src/ast/mod.rs | 2 ++ carcara/src/checker/elaboration/polyeq.rs | 1 - carcara/src/checker/mod.rs | 2 -- 4 files changed, 2 insertions(+), 3 deletions(-) rename carcara/src/{checker => ast}/context.rs (100%) diff --git a/carcara/src/checker/context.rs b/carcara/src/ast/context.rs similarity index 100% rename from carcara/src/checker/context.rs rename to carcara/src/ast/context.rs diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index 0941737a..f85dcd80 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -4,6 +4,7 @@ #[macro_use] mod macros; +mod context; mod iter; mod polyeq; mod pool; @@ -13,6 +14,7 @@ mod substitution; #[cfg(test)] mod tests; +pub use context::{Context, ContextStack}; pub use iter::ProofIter; pub use polyeq::{alpha_equiv, polyeq, tracing_polyeq}; pub use pool::TermPool; diff --git a/carcara/src/checker/elaboration/polyeq.rs b/carcara/src/checker/elaboration/polyeq.rs index cca43bbc..e57c9289 100644 --- a/carcara/src/checker/elaboration/polyeq.rs +++ b/carcara/src/checker/elaboration/polyeq.rs @@ -1,7 +1,6 @@ use super::*; use crate::{ ast::*, - checker::context::ContextStack, utils::{DedupIterator, HashMapStack}, }; diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 56af21c4..56de6b92 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -1,4 +1,3 @@ -mod context; mod elaboration; pub mod error; mod lia_generic; @@ -6,7 +5,6 @@ mod rules; use crate::{ast::*, benchmarking::CollectResults, CarcaraResult, Error}; use ahash::AHashSet; -use context::*; use elaboration::Elaborator; use error::CheckerError; use rules::{ElaborationRule, Premise, Rule, RuleArgs, RuleResult}; From bfd768705078ceeef08616b10534e5efc3f3a455 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 06:39:13 -0300 Subject: [PATCH 05/70] Move elaboration module out of `checker` module --- carcara/src/checker/mod.rs | 4 +--- .../src/{checker/elaboration => elaborator}/accumulator.rs | 0 carcara/src/{checker/elaboration => elaborator}/diff.rs | 0 carcara/src/{checker/elaboration => elaborator}/mod.rs | 0 carcara/src/{checker/elaboration => elaborator}/polyeq.rs | 0 carcara/src/{checker/elaboration => elaborator}/pruning.rs | 0 carcara/src/lib.rs | 1 + 7 files changed, 2 insertions(+), 3 deletions(-) rename carcara/src/{checker/elaboration => elaborator}/accumulator.rs (100%) rename carcara/src/{checker/elaboration => elaborator}/diff.rs (100%) rename carcara/src/{checker/elaboration => elaborator}/mod.rs (100%) rename carcara/src/{checker/elaboration => elaborator}/polyeq.rs (100%) rename carcara/src/{checker/elaboration => elaborator}/pruning.rs (100%) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 56de6b92..32646f34 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -1,11 +1,9 @@ -mod elaboration; pub mod error; mod lia_generic; mod rules; -use crate::{ast::*, benchmarking::CollectResults, CarcaraResult, Error}; +use crate::{ast::*, benchmarking::CollectResults, elaborator::Elaborator, CarcaraResult, Error}; use ahash::AHashSet; -use elaboration::Elaborator; use error::CheckerError; use rules::{ElaborationRule, Premise, Rule, RuleArgs, RuleResult}; use std::{ diff --git a/carcara/src/checker/elaboration/accumulator.rs b/carcara/src/elaborator/accumulator.rs similarity index 100% rename from carcara/src/checker/elaboration/accumulator.rs rename to carcara/src/elaborator/accumulator.rs diff --git a/carcara/src/checker/elaboration/diff.rs b/carcara/src/elaborator/diff.rs similarity index 100% rename from carcara/src/checker/elaboration/diff.rs rename to carcara/src/elaborator/diff.rs diff --git a/carcara/src/checker/elaboration/mod.rs b/carcara/src/elaborator/mod.rs similarity index 100% rename from carcara/src/checker/elaboration/mod.rs rename to carcara/src/elaborator/mod.rs diff --git a/carcara/src/checker/elaboration/polyeq.rs b/carcara/src/elaborator/polyeq.rs similarity index 100% rename from carcara/src/checker/elaboration/polyeq.rs rename to carcara/src/elaborator/polyeq.rs diff --git a/carcara/src/checker/elaboration/pruning.rs b/carcara/src/elaborator/pruning.rs similarity index 100% rename from carcara/src/checker/elaboration/pruning.rs rename to carcara/src/elaborator/pruning.rs diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 2e8ceddb..84a181dd 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -38,6 +38,7 @@ pub mod ast; pub mod benchmarking; pub mod checker; +pub mod elaborator; pub mod parser; mod utils; From bbcdac0826aaadd6ee9bb314cd1ff314a17976b1 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 06:45:29 -0300 Subject: [PATCH 06/70] Make `unwrap_*` methods naming more consistent --- carcara/src/ast/mod.rs | 18 +++++++++--------- carcara/src/checker/rules/extras.rs | 6 +++--- carcara/src/checker/rules/quantifier.rs | 16 ++++++++-------- carcara/src/checker/rules/simplification.rs | 2 +- carcara/src/checker/rules/subproof.rs | 12 ++++++------ 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index f85dcd80..b17b8d8e 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -559,7 +559,7 @@ impl Term { /// Tries to unwrap an operation term, returning the `Operator` and the arguments. Returns /// `None` if the term is not an operation term. - pub fn unwrap_op(&self) -> Option<(Operator, &[Rc])> { + pub fn as_op(&self) -> Option<(Operator, &[Rc])> { match self { Term::Op(op, args) => Some((*op, args.as_slice())), _ => None, @@ -568,7 +568,7 @@ impl Term { /// Tries to unwrap a quantifier term, returning the `Quantifier`, the bindings and the inner /// term. Returns `None` if the term is not a quantifier term. - pub fn unwrap_quant(&self) -> Option<(Quantifier, &BindingList, &Rc)> { + pub fn as_quant(&self) -> Option<(Quantifier, &BindingList, &Rc)> { match self { Term::Quant(q, b, t) => Some((*q, b, t)), _ => None, @@ -577,7 +577,7 @@ impl Term { /// Tries to unwrap a `let` term, returning the bindings and the inner term. Returns `None` if /// the term is not a `let` term. - pub fn unwrap_let(&self) -> Option<(&BindingList, &Rc)> { + pub fn as_let(&self) -> Option<(&BindingList, &Rc)> { match self { Term::Let(b, t) => Some((b, t)), _ => None, @@ -662,22 +662,22 @@ impl Rc { /// Tries to unwrap an operation term, returning the `Operator` and the arguments. Returns a /// `CheckerError` if the term is not an operation term. - pub fn unwrap_op_err(&self) -> Result<(Operator, &[Rc]), CheckerError> { - self.unwrap_op() + pub fn as_op_err(&self) -> Result<(Operator, &[Rc]), CheckerError> { + self.as_op() .ok_or_else(|| CheckerError::ExpectedOperationTerm(self.clone())) } /// Tries to unwrap a quantifier term, returning the `Quantifier`, the bindings and the inner /// term. Returns a `CheckerError` if the term is not a quantifier term. - pub fn unwrap_quant_err(&self) -> Result<(Quantifier, &BindingList, &Rc), CheckerError> { - self.unwrap_quant() + pub fn as_quant_err(&self) -> Result<(Quantifier, &BindingList, &Rc), CheckerError> { + self.as_quant() .ok_or_else(|| CheckerError::ExpectedQuantifierTerm(self.clone())) } /// Tries to unwrap a `let` term, returning the bindings and the inner /// term. Returns a `CheckerError` if the term is not a `let` term. - pub fn unwrap_let_err(&self) -> Result<(&BindingList, &Rc), CheckerError> { - self.unwrap_let() + pub fn as_let_err(&self) -> Result<(&BindingList, &Rc), CheckerError> { + self.as_let() .ok_or_else(|| CheckerError::ExpectedLetTerm(self.clone())) } } diff --git a/carcara/src/checker/rules/extras.rs b/carcara/src/checker/rules/extras.rs index 5358af3c..e1f8f275 100644 --- a/carcara/src/checker/rules/extras.rs +++ b/carcara/src/checker/rules/extras.rs @@ -80,8 +80,8 @@ pub fn bind_let( let (left, right) = match_term_err!((= l r) = &conclusion[0])?; - let (l_bindings, left) = left.unwrap_let_err()?; - let (r_bindings, right) = right.unwrap_let_err()?; + let (l_bindings, left) = left.as_let_err()?; + let (r_bindings, right) = right.as_let_err()?; if l_bindings.len() != r_bindings.len() { return Err(EqualityError::ExpectedEqual(l_bindings.clone(), r_bindings.clone()).into()); @@ -149,7 +149,7 @@ fn la_mult_generic(conclusion: &[Rc], is_pos: bool) -> RuleResult { CheckerError::ExpectedNumber(Rational::new(), zero.clone()) ); - let (op, args) = original.unwrap_op_err()?; + let (op, args) = original.as_op_err()?; assert_operation_len(op, args, 2)?; let (l, r) = (&args[0], &args[1]); diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index 02740237..38f5df85 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -52,9 +52,9 @@ pub fn qnt_join(RuleArgs { conclusion, .. }: RuleArgs) -> RuleResult { let (left, right) = match_term_err!((= l r) = &conclusion[0])?; - let (q_1, bindings_1, left) = left.unwrap_quant_err()?; - let (q_2, bindings_2, left) = left.unwrap_quant_err()?; - let (q_3, bindings_3, right) = right.unwrap_quant_err()?; + let (q_1, bindings_1, left) = left.as_quant_err()?; + let (q_2, bindings_2, left) = left.as_quant_err()?; + let (q_3, bindings_3, right) = right.as_quant_err()?; assert_eq(&q_1, &q_2)?; assert_eq(&q_2, &q_3)?; @@ -76,9 +76,9 @@ pub fn qnt_rm_unused(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult assert_clause_len(conclusion, 1)?; let (left, right) = match_term_err!((= l r) = &conclusion[0])?; - let (q_1, bindings_1, phi_1) = left.unwrap_quant_err()?; + let (q_1, bindings_1, phi_1) = left.as_quant_err()?; - let (bindings_2, phi_2) = match right.unwrap_quant() { + let (bindings_2, phi_2) = match right.as_quant() { Some((q_2, b, t)) => { assert_eq(&q_1, &q_2)?; (b, t) @@ -148,7 +148,7 @@ fn negation_normal_form( true => build_term!(pool, (and (or {a} {b}) (or {c} {d}))), false => build_term!(pool, (or (and {a} {b}) (and {c} {d}))), } - } else if let Some((quant, bindings, inner)) = term.unwrap_quant() { + } else if let Some((quant, bindings, inner)) = term.as_quant() { let quant = if polarity { quant } else { !quant }; let inner = negation_normal_form(pool, inner, polarity, cache); pool.add(Term::Quant(quant, bindings.clone(), inner)) @@ -260,8 +260,8 @@ pub fn qnt_cnf(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { let (l_bindings, phi, r_bindings, phi_prime) = { let (l, r) = match_term_err!((or (not l) r) = &conclusion[0])?; - let (l_q, l_b, phi) = l.unwrap_quant_err()?; - let (r_q, r_b, phi_prime) = r.unwrap_quant_err()?; + let (l_q, l_b, phi) = l.as_quant_err()?; + let (r_q, r_b, phi_prime) = r.as_quant_err()?; // We expect both quantifiers to be `forall` assert_is_expected(&l_q, Quantifier::Forall)?; diff --git a/carcara/src/checker/rules/simplification.rs b/carcara/src/checker/rules/simplification.rs index f53a4062..1711203a 100644 --- a/carcara/src/checker/rules/simplification.rs +++ b/carcara/src/checker/rules/simplification.rs @@ -407,7 +407,7 @@ pub fn bool_simplify(args: RuleArgs) -> RuleResult { pub fn qnt_simplify(RuleArgs { conclusion, .. }: RuleArgs) -> RuleResult { assert_clause_len(conclusion, 1)?; let (left, right) = match_term_err!((= l r) = &conclusion[0])?; - let (_, _, inner) = left.unwrap_quant_err()?; + let (_, _, inner) = left.as_quant_err()?; rassert!( inner.is_bool_false() || inner.is_bool_true(), CheckerError::ExpectedAnyBoolConstant(inner.clone()) diff --git a/carcara/src/checker/rules/subproof.rs b/carcara/src/checker/rules/subproof.rs index 909c5c3c..5d451bb2 100644 --- a/carcara/src/checker/rules/subproof.rs +++ b/carcara/src/checker/rules/subproof.rs @@ -64,8 +64,8 @@ pub fn bind( // While the documentation indicates this rule is only called with `forall` quantifiers, in // some of the tests examples it is also called with the `exists` quantifier - let (l_quant, l_bindings, left) = left.unwrap_quant_err()?; - let (r_quant, r_bindings, right) = right.unwrap_quant_err()?; + let (l_quant, l_bindings, left) = left.as_quant_err()?; + let (r_quant, r_bindings, right) = right.as_quant_err()?; assert_eq(&l_quant, &r_quant)?; let [l_bindings, r_bindings] = [l_bindings, r_bindings].map(|b| { @@ -199,7 +199,7 @@ fn extract_points(quant: Quantifier, term: &Rc) -> AHashSet<(Rc, Rc< if let Some(inner) = term.remove_negation() { return find_points(acc, !polarity, inner); } - if let Some((_, _, inner)) = term.unwrap_quant() { + if let Some((_, _, inner)) = term.as_quant() { return find_points(acc, polarity, inner); } match polarity { @@ -244,8 +244,8 @@ pub fn onepoint( assert_clause_len(conclusion, 1)?; let (left, right) = match_term_err!((= l r) = &conclusion[0])?; - let (quant, l_bindings, left) = left.unwrap_quant_err()?; - let (r_bindings, right) = match right.unwrap_quant() { + let (quant, l_bindings, left) = left.as_quant_err()?; + let (r_bindings, right) = match right.as_quant() { Some((q, b, t)) => { assert_eq(&q, &quant)?; (b, t) @@ -340,7 +340,7 @@ fn generic_skolemization_rule( let (left, psi) = match_term_err!((= l r) = &conclusion[0])?; - let (quant, bindings, phi) = left.unwrap_quant_err()?; + let (quant, bindings, phi) = left.as_quant_err()?; assert_is_expected(&quant, rule_type)?; let previous_term = get_premise_term(&previous_command)?; From ba125f031b960f30d2238a5589e3130af332ddf7 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 08:32:49 -0300 Subject: [PATCH 07/70] Split `Term::Terminal` variant into `Const` and `Var` --- carcara/src/ast/mod.rs | 54 +++++++++------------ carcara/src/ast/polyeq.rs | 31 +++++------- carcara/src/ast/pool.rs | 27 ++++------- carcara/src/ast/printer.rs | 29 +++++------ carcara/src/ast/substitution.rs | 2 +- carcara/src/checker/rules/simplification.rs | 2 +- carcara/src/parser/error.rs | 4 +- carcara/src/parser/mod.rs | 14 +++--- 8 files changed, 72 insertions(+), 91 deletions(-) diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index b17b8d8e..833a21bf 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -403,8 +403,11 @@ impl BindingList { /// Many additional methods are implemented in [`Rc`]. #[derive(Clone, PartialEq, Eq, Hash)] pub enum Term { - /// A terminal. This can be a constant or a variable. - Terminal(Terminal), + /// A constant term. + Const(Constant), + + /// A variable, consisting of an identifier and a sort. + Var(Ident, Rc), /// An application of a function to one or more terms. App(Rc, Vec>), @@ -430,29 +433,29 @@ pub enum Term { impl From for Term { fn from(var: SortedVar) -> Self { - Term::Terminal(Terminal::Var(Identifier::Simple(var.0), var.1)) + Term::Var(Ident::Simple(var.0), var.1) } } impl Term { /// Constructs a new integer term. pub fn integer(value: impl Into) -> Self { - Term::Terminal(Terminal::Integer(value.into())) + Term::Const(Constant::Integer(value.into())) } /// Constructs a new real term. pub fn real(value: impl Into) -> Self { - Term::Terminal(Terminal::Real(value.into())) + Term::Const(Constant::Real(value.into())) } /// Constructs a new string term. pub fn string(value: impl Into) -> Self { - Term::Terminal(Terminal::String(value.into())) + Term::Const(Constant::String(value.into())) } /// Constructs a new variable term. pub fn var(name: impl Into, sort: Rc) -> Self { - Term::Terminal(Terminal::Var(Identifier::Simple(name.into()), sort)) + Term::Var(Ident::Simple(name.into()), sort) } /// Returns the sort of this term. This does not make use of a cache --- if possible, prefer to @@ -463,17 +466,14 @@ impl Term { pool.sort(&added).clone() } - /// Returns `true` if the term is a terminal. + /// Returns `true` if the term is a terminal, that is, if it is a constant or a variable. pub fn is_terminal(&self) -> bool { - matches!(self, Term::Terminal(_)) + matches!(self, Term::Const(_) | Term::Var(..)) } /// Returns `true` if the term is an integer or real constant. pub fn is_number(&self) -> bool { - matches!( - self, - Term::Terminal(Terminal::Real(_) | Terminal::Integer(_)) - ) + matches!(self, Term::Const(Constant::Real(_) | Constant::Integer(_))) } /// Returns `true` if the term is an integer or real constant, or one such constant negated @@ -489,8 +489,8 @@ impl Term { /// constant. pub fn as_number(&self) -> Option { match self { - Term::Terminal(Terminal::Real(r)) => Some(r.clone()), - Term::Terminal(Terminal::Integer(i)) => Some(i.clone().into()), + Term::Const(Constant::Real(r)) => Some(r.clone()), + Term::Const(Constant::Integer(i)) => Some(i.clone().into()), _ => None, } } @@ -529,17 +529,14 @@ impl Term { /// Returns `true` if the term is a variable. pub fn is_var(&self) -> bool { - matches!( - self, - Term::Terminal(Terminal::Var(Identifier::Simple(_), _)) - ) + matches!(self, Term::Var(Ident::Simple(_), _)) } /// Tries to extract the variable name from a term. Returns `Some` if the term is a variable /// with a simple identifier. pub fn as_var(&self) -> Option<&str> { match self { - Term::Terminal(Terminal::Var(Identifier::Simple(var), _)) => Some(var.as_str()), + Term::Var(Ident::Simple(var), _) => Some(var.as_str()), _ => None, } } @@ -586,7 +583,7 @@ impl Term { /// Returns `true` if the term is the boolean constant `true`. pub fn is_bool_true(&self) -> bool { - if let Term::Terminal(Terminal::Var(Identifier::Simple(name), sort)) = self { + if let Term::Var(Ident::Simple(name), sort) = self { sort.as_sort() == Some(&Sort::Bool) && name == "true" } else { false @@ -595,7 +592,7 @@ impl Term { /// Returns `true` if the term is the boolean constant `false`. pub fn is_bool_false(&self) -> bool { - if let Term::Terminal(Terminal::Var(Identifier::Simple(name), sort)) = self { + if let Term::Var(Ident::Simple(name), sort) = self { sort.as_sort() == Some(&Sort::Bool) && name == "false" } else { false @@ -682,9 +679,9 @@ impl Rc { } } -/// A terminal term. +/// A constant term. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum Terminal { +pub enum Constant { /// An integer constant term. Integer(Integer), @@ -693,24 +690,21 @@ pub enum Terminal { /// A string literal term. String(String), - - /// A variable, consisting of an identifier and a sort. - Var(Identifier, Rc), } /// An identifier. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum Identifier { +pub enum Ident { /// A simple identifier, consisting of a symbol. Simple(String), /// An indexed identifier, consisting of a symbol and one or more indices. - Indexed(String, Vec), + Indexed(String, Vec), } /// An index for an indexed identifier. This can be either a numeral or a symbol. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum IdentifierIndex { +pub enum IdentIndex { Numeral(u64), Symbol(String), } diff --git a/carcara/src/ast/polyeq.rs b/carcara/src/ast/polyeq.rs index 6fe1af6f..7ba7f592 100644 --- a/carcara/src/ast/polyeq.rs +++ b/carcara/src/ast/polyeq.rs @@ -8,8 +8,7 @@ //! modulo renaming of bound variables. use super::{ - BindingList, Identifier, Operator, ProofArg, ProofCommand, ProofStep, Rc, Sort, Subproof, Term, - Terminal, + BindingList, Ident, Operator, ProofArg, ProofCommand, ProofStep, Rc, Sort, Subproof, Term, }; use crate::utils::HashMapStack; use std::time::{Duration, Instant}; @@ -192,6 +191,18 @@ impl Polyeq for Rc { impl Polyeq for Term { fn eq(comp: &mut PolyeqComparator, a: &Self, b: &Self) -> bool { match (a, b) { + (Term::Const(a), Term::Const(b)) => a == b, + (Term::Var(Ident::Simple(a), a_sort), Term::Var(Ident::Simple(b), b_sort)) + if comp.de_bruijn_map.is_some() => + { + // If we are checking for alpha-equivalence, and we encounter two variables, we + // check that they are equivalent using the De Bruijn map + let db = comp.de_bruijn_map.as_mut().unwrap(); + db.compare(a, b) && Polyeq::eq(comp, a_sort, b_sort) + } + (Term::Var(a, a_sort), Term::Var(b, b_sort)) => { + a == b && Polyeq::eq(comp, a_sort, b_sort) + } (Term::App(f_a, args_a), Term::App(f_b, args_b)) => { Polyeq::eq(comp, f_a, f_b) && Polyeq::eq(comp, args_a, args_b) } @@ -210,22 +221,6 @@ impl Polyeq for Term { op_a == op_b && Polyeq::eq(comp, args_a, args_b) } (Term::Sort(a), Term::Sort(b)) => Polyeq::eq(comp, a, b), - (Term::Terminal(a), Term::Terminal(b)) => match (a, b) { - // If we are checking for alpha-equivalence, and we encounter two variables, we - // check that they are equivalent using the De Bruijn map - ( - Terminal::Var(Identifier::Simple(a_var), a_sort), - Terminal::Var(Identifier::Simple(b_var), b_sort), - ) if comp.de_bruijn_map.is_some() => { - let alpha = comp.de_bruijn_map.as_mut().unwrap(); - alpha.compare(a_var, b_var) && Polyeq::eq(comp, a_sort, b_sort) - } - - (Terminal::Var(iden_a, sort_a), Terminal::Var(iden_b, sort_b)) => { - iden_a == iden_b && Polyeq::eq(comp, sort_a, sort_b) - } - (a, b) => a == b, - }, (Term::Quant(q_a, _, _), Term::Quant(q_b, _, _)) if q_a != q_b => false, (Term::Quant(_, a_binds, a), Term::Quant(_, b_binds, b)) | (Term::Let(a_binds, a), Term::Let(b_binds, b)) diff --git a/carcara/src/ast/pool.rs b/carcara/src/ast/pool.rs index 7e9cb27d..2c3da6de 100644 --- a/carcara/src/ast/pool.rs +++ b/carcara/src/ast/pool.rs @@ -1,6 +1,6 @@ //! This module implements `TermPool`, a structure that stores terms and implements hash consing. -use super::{Identifier, Rc, Sort, Term, Terminal}; +use super::{Constant, Rc, Sort, Term}; use ahash::{AHashMap, AHashSet}; /// A structure to store and manage all allocated terms. @@ -35,15 +35,8 @@ impl TermPool { let mut sorts_cache = AHashMap::new(); let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); - let [bool_true, bool_false] = ["true", "false"].map(|b| { - Self::add_term_to_map( - &mut terms, - Term::Terminal(Terminal::Var( - Identifier::Simple(b.into()), - bool_sort.clone(), - )), - ) - }); + let [bool_true, bool_false] = ["true", "false"] + .map(|b| Self::add_term_to_map(&mut terms, Term::var(b, bool_sort.clone()))); sorts_cache.insert(bool_false.clone(), Sort::Bool); sorts_cache.insert(bool_true.clone(), Sort::Bool); @@ -121,12 +114,12 @@ impl TermPool { } let result = match term.as_ref() { - Term::Terminal(t) => match t { - Terminal::Integer(_) => Sort::Int, - Terminal::Real(_) => Sort::Real, - Terminal::String(_) => Sort::String, - Terminal::Var(_, sort) => sort.as_sort().unwrap().clone(), + Term::Const(c) => match c { + Constant::Integer(_) => Sort::Int, + Constant::Real(_) => Sort::Real, + Constant::String(_) => Sort::String, }, + Term::Var(_, sort) => sort.as_sort().unwrap().clone(), Term::Op(op, args) => match op { Operator::Not | Operator::Implies @@ -239,12 +232,12 @@ impl TermPool { vars.remove(&term); vars } - Term::Terminal(Terminal::Var(Identifier::Simple(_), _)) => { + Term::Var(..) => { let mut set = AHashSet::with_capacity(1); set.insert(term.clone()); set } - Term::Terminal(_) | Term::Sort(_) => AHashSet::new(), + Term::Const(_) | Term::Sort(_) => AHashSet::new(), }; self.free_vars_cache.insert(term.clone(), set); self.free_vars_cache.get(term).unwrap() diff --git a/carcara/src/ast/printer.rs b/carcara/src/ast/printer.rs index 6f068fae..b1d38916 100644 --- a/carcara/src/ast/printer.rs +++ b/carcara/src/ast/printer.rs @@ -57,7 +57,7 @@ impl PrintWithSharing for Rc { if let Some(indices) = &mut p.term_indices { // There are three cases where we don't use sharing when printing a term: // - // - Terminal terms (e.g., integers, reals, variables, etc.) could in theory be shared, + // - Terminal terms (i.e., constants or variables) could in theory be shared, // but, since they are very small, it's not worth it to give them a name. // // - Sorts are represented as terms, but they are not actually terms in the grammar, so @@ -173,7 +173,8 @@ impl<'a> AlethePrinter<'a> { fn write_raw_term(&mut self, term: &Rc) -> io::Result<()> { match term.as_ref() { - Term::Terminal(t) => write!(self.inner, "{}", t), + Term::Const(c) => write!(self.inner, "{}", c), + Term::Var(iden, _) => write!(self.inner, "{}", iden), Term::App(func, args) => self.write_s_expr(func, args), Term::Op(op, args) => self.write_s_expr(op, args), Term::Sort(sort) => write!(self.inner, "{}", sort), @@ -316,7 +317,8 @@ fn escape_string(string: &str) -> Cow { impl fmt::Display for Term { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Term::Terminal(t) => write!(f, "{}", t), + Term::Const(c) => write!(f, "{}", c), + Term::Var(iden, _) => write!(f, "{}", iden), Term::App(func, args) => write_s_expr(f, func, args), Term::Op(op, args) => write_s_expr(f, op, args), Term::Sort(sort) => write!(f, "{}", sort), @@ -342,39 +344,38 @@ impl fmt::Debug for Term { } } -impl fmt::Display for Terminal { +impl fmt::Display for Constant { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Terminal::Integer(i) => write!(f, "{}", i), - Terminal::Real(r) => { + Constant::Integer(i) => write!(f, "{}", i), + Constant::Real(r) => { if r.is_integer() { write!(f, "{:?}.0", r.numer()) } else { write!(f, "{:?}", r.to_f64()) } } - Terminal::String(s) => write!(f, "\"{}\"", escape_string(s)), - Terminal::Var(iden, _) => write!(f, "{}", iden), + Constant::String(s) => write!(f, "\"{}\"", escape_string(s)), } } } -impl fmt::Display for Identifier { +impl fmt::Display for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Identifier::Simple(s) => write!(f, "{}", quote_symbol(s)), - Identifier::Indexed(s, indices) => { + Ident::Simple(s) => write!(f, "{}", quote_symbol(s)), + Ident::Indexed(s, indices) => { write_s_expr(f, format!("_ {}", quote_symbol(s)), indices) } } } } -impl fmt::Display for IdentifierIndex { +impl fmt::Display for IdentIndex { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - IdentifierIndex::Numeral(n) => write!(f, "{}", n), - IdentifierIndex::Symbol(s) => write!(f, "{}", quote_symbol(s)), + IdentIndex::Numeral(n) => write!(f, "{}", n), + IdentIndex::Symbol(s) => write!(f, "{}", quote_symbol(s)), } } } diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index 532f2d8b..9ba5d2b5 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -201,7 +201,7 @@ impl Substitution { Term::Lambda(b, t) => { self.apply_to_binder(pool, term, b.as_ref(), t, true, Term::Lambda) } - Term::Terminal(_) | Term::Sort(_) => term.clone(), + Term::Const(_) | Term::Var(..) | Term::Sort(_) => term.clone(), }; // Since frequently a term will have more than one identical subterms, we insert the diff --git a/carcara/src/checker/rules/simplification.rs b/carcara/src/checker/rules/simplification.rs index 1711203a..6b344ba8 100644 --- a/carcara/src/checker/rules/simplification.rs +++ b/carcara/src/checker/rules/simplification.rs @@ -531,7 +531,7 @@ fn generic_sum_prod_simplify_rule( // Finally, we verify that the constant and the remaining arguments are what we expect rassert!(u_constant == constant_total && u_args.iter().eq(result), { let expected = { - let mut expected_args = vec![pool.add(Term::Terminal(Terminal::Real(constant_total)))]; + let mut expected_args = vec![pool.add(Term::real(constant_total))]; expected_args.extend(u_args.iter().cloned()); pool.add(Term::Op(rule_kind, expected_args)) }; diff --git a/carcara/src/parser/error.rs b/carcara/src/parser/error.rs index fedb2d27..e21f1f63 100644 --- a/carcara/src/parser/error.rs +++ b/carcara/src/parser/error.rs @@ -1,7 +1,7 @@ //! The types for parser errors. use crate::{ - ast::{Identifier, Sort}, + ast::{Ident, Sort}, parser::Token, utils::Range, }; @@ -55,7 +55,7 @@ pub enum ParserError { /// The parser encountered an identifier that was not defined. #[error("identifier '{0}' is not defined")] - UndefinedIden(Identifier), + UndefinedIden(Ident), /// The parser encountered a sort that was not defined. #[error("sort '{0}' is not defined")] diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index a591e37c..c3e35cbc 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -72,7 +72,7 @@ enum AnchorArg { /// pool used by the parser. #[derive(Default)] struct ParserState { - symbol_table: HashMapStack, Rc>, + symbol_table: HashMapStack, Rc>, function_defs: AHashMap, sort_declarations: AHashMap, step_ids: HashMapStack, usize>, @@ -106,7 +106,7 @@ impl<'a, R: BufRead> Parser<'a, R> { let mut state = ParserState::default(); let bool_sort = pool.add(Term::Sort(Sort::Bool)); for iden in ["true", "false"] { - let iden = HashCache::new(Identifier::Simple(iden.to_owned())); + let iden = HashCache::new(Ident::Simple(iden.to_owned())); state.symbol_table.insert(iden, bool_sort.clone()); } let mut lexer = Lexer::new(input)?; @@ -150,7 +150,7 @@ impl<'a, R: BufRead> Parser<'a, R> { fn insert_sorted_var(&mut self, (symbol, sort): SortedVar) { self.state .symbol_table - .insert(HashCache::new(Identifier::Simple(symbol)), sort); + .insert(HashCache::new(Ident::Simple(symbol)), sort); } /// Shortcut for `self.problem.as_mut().unwrap().0` @@ -164,15 +164,13 @@ impl<'a, R: BufRead> Parser<'a, R> { } /// Constructs and sort checks a variable term. - fn make_var(&mut self, iden: Identifier) -> Result, ParserError> { + fn make_var(&mut self, iden: Ident) -> Result, ParserError> { let cached = HashCache::new(iden); let sort = match self.state.symbol_table.get(&cached) { Some(s) => s.clone(), None => return Err(ParserError::UndefinedIden(cached.unwrap())), }; - Ok(self - .pool - .add(Term::Terminal(Terminal::Var(cached.unwrap(), sort)))) + Ok(self.pool.add(Term::Var(cached.unwrap(), sort))) } /// Constructs and sort checks an operation term. @@ -939,7 +937,7 @@ impl<'a, R: BufRead> Parser<'a, R> { )); } } else { - self.make_var(Identifier::Simple(s)) + self.make_var(Ident::Simple(s)) .map_err(|err| Error::Parser(err, pos))? }); } From 8daf6af11637edc0ab6c47d75c3a58ae01bdd3fc Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 08:37:38 -0300 Subject: [PATCH 08/70] Rename term constructor methods --- carcara/src/ast/context.rs | 2 +- carcara/src/ast/macros.rs | 8 +-- carcara/src/ast/mod.rs | 8 +-- carcara/src/ast/pool.rs | 2 +- carcara/src/checker/rules/simplification.rs | 2 +- carcara/src/parser/mod.rs | 12 ++--- carcara/src/parser/tests.rs | 58 +++++++++++---------- 7 files changed, 48 insertions(+), 44 deletions(-) diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index 65fed185..b51a4089 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -54,7 +54,7 @@ impl ContextStack { // resulting hash map will then contain `(:= y z)` and `(:= x (f z))` for (var, value) in assignment_args.iter() { let sort = Term::Sort(pool.sort(value).clone()); - let var_term = Term::var(var, pool.add(sort)); + let var_term = Term::new_var(var, pool.add(sort)); let var_term = pool.add(var_term); substitution.insert(pool, var_term.clone(), value.clone())?; let new_value = substitution_until_fixed_point.apply(pool, value); diff --git a/carcara/src/ast/macros.rs b/carcara/src/ast/macros.rs index ffd55e62..3147da49 100644 --- a/carcara/src/ast/macros.rs +++ b/carcara/src/ast/macros.rs @@ -255,7 +255,7 @@ mod tests { #[test] fn test_match_term() { let mut p = TermPool::new(); - let [one, two, five] = [1, 2, 5].map(|n| p.add(Term::integer(n))); + let [one, two, five] = [1, 2, 5].map(|n| p.add(Term::new_int(n))); let term = parse_term(&mut p, "(= (= (not false) (= true false)) (not true))"); let ((a, (b, c)), d) = match_term!((= (= (not a) (= b c)) (not d)) = &term).unwrap(); @@ -307,9 +307,9 @@ mod tests { let bool_sort = pool.add(Term::Sort(Sort::Bool)); let int_sort = pool.add(Term::Sort(Sort::Int)); - let [one, two, three] = [1, 2, 3].map(|n| pool.add(Term::integer(n))); - let [a, b] = ["a", "b"].map(|s| pool.add(Term::var(s, int_sort.clone()))); - let [p, q] = ["p", "q"].map(|s| pool.add(Term::var(s, bool_sort.clone()))); + let [one, two, three] = [1, 2, 3].map(|n| pool.add(Term::new_int(n))); + let [a, b] = ["a", "b"].map(|s| pool.add(Term::new_var(s, int_sort.clone()))); + let [p, q] = ["p", "q"].map(|s| pool.add(Term::new_var(s, bool_sort.clone()))); let cases = [ ("(= a b)", build_term!(pool, (= {a} {b}))), diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index 833a21bf..a17deb3b 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -439,22 +439,22 @@ impl From for Term { impl Term { /// Constructs a new integer term. - pub fn integer(value: impl Into) -> Self { + pub fn new_int(value: impl Into) -> Self { Term::Const(Constant::Integer(value.into())) } /// Constructs a new real term. - pub fn real(value: impl Into) -> Self { + pub fn new_real(value: impl Into) -> Self { Term::Const(Constant::Real(value.into())) } /// Constructs a new string term. - pub fn string(value: impl Into) -> Self { + pub fn new_string(value: impl Into) -> Self { Term::Const(Constant::String(value.into())) } /// Constructs a new variable term. - pub fn var(name: impl Into, sort: Rc) -> Self { + pub fn new_var(name: impl Into, sort: Rc) -> Self { Term::Var(Ident::Simple(name.into()), sort) } diff --git a/carcara/src/ast/pool.rs b/carcara/src/ast/pool.rs index 2c3da6de..0b25399f 100644 --- a/carcara/src/ast/pool.rs +++ b/carcara/src/ast/pool.rs @@ -36,7 +36,7 @@ impl TermPool { let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); let [bool_true, bool_false] = ["true", "false"] - .map(|b| Self::add_term_to_map(&mut terms, Term::var(b, bool_sort.clone()))); + .map(|b| Self::add_term_to_map(&mut terms, Term::new_var(b, bool_sort.clone()))); sorts_cache.insert(bool_false.clone(), Sort::Bool); sorts_cache.insert(bool_true.clone(), Sort::Bool); diff --git a/carcara/src/checker/rules/simplification.rs b/carcara/src/checker/rules/simplification.rs index 6b344ba8..3f2de8a8 100644 --- a/carcara/src/checker/rules/simplification.rs +++ b/carcara/src/checker/rules/simplification.rs @@ -531,7 +531,7 @@ fn generic_sum_prod_simplify_rule( // Finally, we verify that the constant and the remaining arguments are what we expect rassert!(u_constant == constant_total && u_args.iter().eq(result), { let expected = { - let mut expected_args = vec![pool.add(Term::real(constant_total))]; + let mut expected_args = vec![pool.add(Term::new_real(constant_total))]; expected_args.extend(u_args.iter().cloned()); pool.add(Term::Op(rule_kind, expected_args)) }; diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index c3e35cbc..8da113d3 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -921,10 +921,10 @@ impl<'a, R: BufRead> Parser<'a, R> { /// Parses a term. pub fn parse_term(&mut self) -> CarcaraResult> { let term = match self.next_token()? { - (Token::Numeral(n), _) if self.interpret_integers_as_reals => Term::real(n), - (Token::Numeral(n), _) => Term::integer(n), - (Token::Decimal(r), _) => Term::real(r), - (Token::String(s), _) => Term::string(s), + (Token::Numeral(n), _) if self.interpret_integers_as_reals => Term::new_real(n), + (Token::Numeral(n), _) => Term::new_int(n), + (Token::Decimal(r), _) => Term::new_real(r), + (Token::String(s), _) => Term::new_string(s), (Token::Symbol(s), pos) => { // Check to see if there is a nullary function defined with this name return Ok(if let Some(func_def) = self.state.function_defs.get(&s) { @@ -1034,7 +1034,7 @@ impl<'a, R: BufRead> Parser<'a, R> { .into_iter() .map(|(name, value)| { let sort = Term::Sort(self.pool.sort(&value).clone()); - let var = Term::var(name, self.pool.add(sort)); + let var = Term::new_var(name, self.pool.add(sort)); (self.pool.add(var), value) }) .collect(); @@ -1143,7 +1143,7 @@ impl<'a, R: BufRead> Parser<'a, R> { .params .iter() .zip(args) - .map(|((name, sort), arg)| (self.pool.add(Term::var(name, sort.clone())), arg)) + .map(|((n, s), arg)| (self.pool.add(Term::new_var(n, s.clone())), arg)) .collect(); // Since we already checked the sorts of the arguments, creating this substitution diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index 9442b812..e6d29822 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -103,15 +103,15 @@ fn test_hash_consing() { #[test] fn test_constant_terms() { let mut p = TermPool::new(); - assert_eq!(Term::integer(42), *parse_term(&mut p, "42")); - assert_eq!(Term::real((3, 2)), *parse_term(&mut p, "1.5")); - assert_eq!(Term::string("foo"), *parse_term(&mut p, "\"foo\"")); + assert_eq!(Term::new_int(42), *parse_term(&mut p, "42")); + assert_eq!(Term::new_real((3, 2)), *parse_term(&mut p, "1.5")); + assert_eq!(Term::new_string("foo"), *parse_term(&mut p, "\"foo\"")); } #[test] fn test_arithmetic_ops() { let mut p = TermPool::new(); - let [one, two, three, five, seven] = [1, 2, 3, 5, 7].map(|n| p.add(Term::integer(n))); + let [one, two, three, five, seven] = [1, 2, 3, 5, 7].map(|n| p.add(Term::new_int(n))); let cases = [ ( "(+ 2 3)", @@ -141,7 +141,7 @@ fn test_arithmetic_ops() { #[test] fn test_logic_ops() { let mut p = TermPool::new(); - let [zero, one, two, three, four] = [0, 1, 2, 3, 4].map(|n| p.add(Term::integer(n))); + let [zero, one, two, three, four] = [0, 1, 2, 3, 4].map(|n| p.add(Term::new_int(n))); let cases = [ ( "(and true false)", @@ -223,7 +223,7 @@ fn test_logic_ops() { #[test] fn test_ite() { let mut p = TermPool::new(); - let [one, two, three] = [1, 2, 3].map(|n| p.add(Term::integer(n))); + let [one, two, three] = [1, 2, 3].map(|n| p.add(Term::new_int(n))); let cases = [ ( "(ite true 2 3)", @@ -264,7 +264,7 @@ fn test_quantifiers() { let real_sort = p.add(Term::Sort(Sort::Real)); let cases = [ ("(exists ((p Bool)) p)", { - let inner = p.add(Term::var("p", bool_sort.clone())); + let inner = p.add(Term::new_var("p", bool_sort.clone())); p.add(Term::Quant( Quantifier::Exists, BindingList(vec![("p".into(), bool_sort)]), @@ -272,9 +272,9 @@ fn test_quantifiers() { )) }), ("(forall ((x Real) (y Real)) (= (+ x y) 0.0))", { - let [x, y] = ["x", "y"].map(|s| p.add(Term::var(s, real_sort.clone()))); + let [x, y] = ["x", "y"].map(|s| p.add(Term::new_var(s, real_sort.clone()))); let x_plus_y = p.add(Term::Op(Operator::Add, vec![x, y])); - let zero = p.add(Term::real(0)); + let zero = p.add(Term::new_real(0)); let inner = p.add(Term::Op(Operator::Equals, vec![x_plus_y, zero])); p.add(Term::Quant( Quantifier::Forall, @@ -304,12 +304,12 @@ fn test_choice_terms() { let int_sort = p.add(Term::Sort(Sort::Int)); let cases = [ ("(choice ((p Bool)) p)", { - let inner = p.add(Term::var("p", bool_sort.clone())); + let inner = p.add(Term::new_var("p", bool_sort.clone())); p.add(Term::Choice(("p".into(), bool_sort), inner)) }), ("(choice ((x Int)) (= x 0))", { - let x = p.add(Term::var("x", int_sort.clone())); - let zero = p.add(Term::integer(0)); + let x = p.add(Term::new_var("x", int_sort.clone())); + let zero = p.add(Term::new_int(0)); let inner = p.add(Term::Op(Operator::Equals, vec![x, zero])); p.add(Term::Choice(("x".into(), int_sort), inner)) }), @@ -332,15 +332,15 @@ fn test_let_terms() { let bool_sort = p.add(Term::Sort(Sort::Bool)); let cases = [ ("(let ((p false)) p)", { - let inner = p.add(Term::var("p", bool_sort)); + let inner = p.add(Term::new_var("p", bool_sort)); p.add(Term::Let( BindingList(vec![("p".into(), p.bool_false())]), inner, )) }), ("(let ((x 1) (y 2)) (+ x y))", { - let [one, two] = [1, 2].map(|n| p.add(Term::integer(n))); - let [x, y] = ["x", "y"].map(|s| p.add(Term::var(s, int_sort.clone()))); + let [one, two] = [1, 2].map(|n| p.add(Term::new_int(n))); + let [x, y] = ["x", "y"].map(|s| p.add(Term::new_var(s, int_sort.clone()))); let inner = p.add(Term::Op(Operator::Add, vec![x, y])); p.add(Term::Let( BindingList(vec![("x".into(), one), ("y".into(), two)]), @@ -361,14 +361,14 @@ fn test_lambda_terms() { let int_sort = p.add(Term::Sort(Sort::Int)); let cases = [ ("(lambda ((x Int)) x)", { - let x = p.add(Term::var("x", int_sort.clone())); + let x = p.add(Term::new_var("x", int_sort.clone())); p.add(Term::Lambda( BindingList(vec![("x".into(), int_sort.clone())]), x, )) }), ("(lambda ((x Int) (y Int)) (+ x y))", { - let [x, y] = ["x", "y"].map(|s| p.add(Term::var(s, int_sort.clone()))); + let [x, y] = ["x", "y"].map(|s| p.add(Term::new_var(s, int_sort.clone()))); let inner = p.add(Term::Op(Operator::Add, vec![x, y])); p.add(Term::Lambda( BindingList(vec![("x".into(), int_sort.clone()), ("y".into(), int_sort)]), @@ -390,7 +390,7 @@ fn test_lambda_terms() { #[test] fn test_annotated_terms() { let mut p = TermPool::new(); - let [zero, two, three] = [0, 2, 3].map(|n| p.add(Term::integer(n))); + let [zero, two, three] = [0, 2, 3].map(|n| p.add(Term::new_int(n))); let cases = [ ("(! 0 :named foo)", zero.clone()), ("(! (! 0 :named foo) :named bar)", zero.clone()), @@ -437,7 +437,7 @@ fn test_declare_fun() { let [got] = parse_terms(&mut p, "(declare-fun x () Real)", ["x"]); let real_sort = p.add(Term::Sort(Sort::Real)); - assert_eq!(p.add(Term::var("x", real_sort)), got); + assert_eq!(p.add(Term::new_var("x", real_sort)), got); } #[test] @@ -462,7 +462,7 @@ fn test_declare_sort() { ["x"], ); let expected_sort = p.add(Term::Sort(Sort::Atom("T".to_owned(), Vec::new()))); - assert_eq!(p.add(Term::var("x", expected_sort)), got); + assert_eq!(p.add(Term::new_var("x", expected_sort)), got); } #[test] @@ -534,10 +534,14 @@ fn test_step() { rule: "rule-name".into(), premises: Vec::new(), args: { - vec![Term::integer(1), Term::real(2), Term::string("three")] - .into_iter() - .map(|term| ProofArg::Term(p.add(term))) - .collect() + vec![ + Term::new_int(1), + Term::new_real(2), + Term::new_string("three"), + ] + .into_iter() + .map(|term| ProofArg::Term(p.add(term))) + .collect() }, discharge: Vec::new(), }) @@ -552,8 +556,8 @@ fn test_step() { premises: Vec::new(), args: { vec![ - ProofArg::Assign("a".into(), p.add(Term::integer(12))), - ProofArg::Assign("b".into(), p.add(Term::real((314, 100)))), + ProofArg::Assign("a".into(), p.add(Term::new_int(12))), + ProofArg::Assign("b".into(), p.add(Term::new_real((314, 100)))), ProofArg::Assign("c".into(), parse_term(&mut p, "(* 6 7)")), ] }, @@ -568,7 +572,7 @@ fn test_step() { clause: Vec::new(), rule: "rule-name".into(), premises: vec![(0, 0), (0, 1), (0, 2)], - args: vec![ProofArg::Term(p.add(Term::integer(42)))], + args: vec![ProofArg::Term(p.add(Term::new_int(42)))], discharge: Vec::new(), }) ); From ec47310b08cf46e741cdb2b5eee674d9e33d1e89 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 5 May 2023 09:12:12 -0300 Subject: [PATCH 09/70] Imporve `Deref` of `BindingList` --- carcara/src/ast/mod.rs | 7 +------ carcara/src/elaborator/polyeq.rs | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index a17deb3b..a3ffda50 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -372,7 +372,7 @@ impl AsRef<[SortedVar]> for BindingList { } impl Deref for BindingList { - type Target = [SortedVar]; + type Target = Vec; fn deref(&self) -> &Self::Target { &self.0 @@ -391,11 +391,6 @@ impl<'a> IntoIterator for &'a BindingList { impl BindingList { pub const EMPTY: &'static Self = &BindingList(Vec::new()); - - /// Extract a slice of the binding list's contents. - pub fn as_slice(&self) -> &[SortedVar] { - self.0.as_slice() - } } /// A term. diff --git a/carcara/src/elaborator/polyeq.rs b/carcara/src/elaborator/polyeq.rs index e57c9289..114fe331 100644 --- a/carcara/src/elaborator/polyeq.rs +++ b/carcara/src/elaborator/polyeq.rs @@ -82,7 +82,7 @@ impl<'a> PolyeqElaborator<'a> { }) .collect(); - (a_bindings.as_slice().to_vec(), assignment_args) + (a_bindings.to_vec(), assignment_args) } Some(c) => { assert!(a_bindings From c6e5df0ea51dfd86f306c2d91ddbdb98ef08c437 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 22 May 2023 13:59:15 -0300 Subject: [PATCH 10/70] Implement (experimental) `carcara slice` command --- carcara/src/elaborator/diff.rs | 5 +- carcara/src/elaborator/mod.rs | 5 +- carcara/src/elaborator/pruning.rs | 83 ++++++++++++++++++++----------- cli/src/error.rs | 2 + cli/src/main.rs | 46 +++++++++++++++++ 5 files changed, 106 insertions(+), 35 deletions(-) diff --git a/carcara/src/elaborator/diff.rs b/carcara/src/elaborator/diff.rs index 65f25c60..26f8d84d 100644 --- a/carcara/src/elaborator/diff.rs +++ b/carcara/src/elaborator/diff.rs @@ -57,10 +57,7 @@ pub fn apply_diff(root: ProofDiff, proof: Vec) -> Vec { + (_, CommandDiff::Step(mut elaboration)) => { f.result.commands.append(&mut elaboration); } (_, CommandDiff::Delete) => (), diff --git a/carcara/src/elaborator/mod.rs b/carcara/src/elaborator/mod.rs index 90c8305b..d20cf7b6 100644 --- a/carcara/src/elaborator/mod.rs +++ b/carcara/src/elaborator/mod.rs @@ -3,11 +3,12 @@ mod diff; mod polyeq; mod pruning; +pub use diff::{apply_diff, CommandDiff, ProofDiff}; +pub use pruning::{prune_proof, slice_proof}; + use crate::{ast::*, utils::HashMapStack}; use accumulator::Accumulator; -use diff::{apply_diff, CommandDiff, ProofDiff}; use polyeq::PolyeqElaborator; -use pruning::prune_proof; #[derive(Debug, Default)] struct Frame { diff --git a/carcara/src/elaborator/pruning.rs b/carcara/src/elaborator/pruning.rs index a55ae7a6..0c7d81b7 100644 --- a/carcara/src/elaborator/pruning.rs +++ b/carcara/src/elaborator/pruning.rs @@ -10,77 +10,92 @@ struct Frame<'a> { /// The index of the subproof that this frame represents, in the outer subproof index_of_subproof: usize, - visited: Vec, + + /// For each command, the distance between it and the source. + distance_to_source: Vec, + + /// The queue of commands to visit, represented as a tuple of (command index, distance to + /// source) + queue: VecDeque<(usize, usize)>, } pub fn prune_proof(proof: &[ProofCommand]) -> ProofDiff { - assert!(!proof.is_empty(), "cannot prune an empty proof"); - let end_step = proof .iter() .position(|c| c.clause().is_empty()) .expect("proof does not reach empty clause"); - let root = Frame { + slice_proof(proof, end_step, None) +} + +pub fn slice_proof( + proof: &[ProofCommand], + source: usize, + max_distance: Option, +) -> ProofDiff { + assert!(proof.len() > source, "invalid slice index"); + + let mut stack = vec![Frame { commands: proof, subproof_diffs: vec![None; proof.len()], - visited: vec![false; proof.len()], + distance_to_source: vec![usize::MAX; proof.len()], index_of_subproof: 0, // For the root proof, this value is irrelevant - }; - let mut stack = vec![root]; - let mut to_visit = vec![VecDeque::from([end_step])]; + queue: VecDeque::from([(source, 0usize)]), + }]; loop { 'inner: loop { let frame = stack.last_mut().unwrap(); - let Some(current) = to_visit.last_mut().unwrap().pop_front() else { + let Some((current, current_dist)) = frame.queue.pop_front() else { break 'inner; }; - if frame.visited[current] { + if frame.distance_to_source[current] < usize::MAX { + continue; + } + frame.distance_to_source[current] = + std::cmp::min(frame.distance_to_source[current], current_dist); + + if max_distance.map_or(false, |max| current_dist > max) { continue; } - frame.visited[current] = true; match &frame.commands[current] { ProofCommand::Assume { .. } => (), ProofCommand::Step(s) => { - for &(depth, i) in &s.premises { - to_visit[depth].push_back(i); + for &(_, i) in &s.premises { + frame.queue.push_back((i, current_dist + 1)); } } ProofCommand::Subproof(s) => { let n = s.commands.len(); - let mut visited = vec![false; n]; let mut new_queue = VecDeque::new(); - new_queue.push_back(n - 1); - - // Since the second to last command in a subproof may be implicitly referenced - // by the last command, we have to add it to the `to_visit` queue if it exists - if n >= 2 { - new_queue.push_back(n - 2); - } + new_queue.push_back((n - 1, current_dist)); - // Since `assume` commands in the subproof cannot be removed we need to always - // visit them. As they don't have any premises, we can just mark them as visited - // now + // Since `assume` commands in a subproof are implicitly referenced by the last + // step in the subproof, we must add them to the queue now for (i, command) in s.commands.iter().enumerate() { if command.is_assume() { - visited[i] = true; + new_queue.push_back((i, current_dist + 1)); } } + // The second to last command in a subproof is also implicitly referenced by the + // last step, so we also add it to the queue + if n >= 2 { + new_queue.push_back((n - 2, current_dist + 1)); + } + let frame = Frame { commands: &s.commands, subproof_diffs: vec![None; n], - visited, + distance_to_source: vec![usize::MAX; n], index_of_subproof: current, + queue: new_queue, }; stack.push(frame); - to_visit.push(new_queue); } } } - to_visit.pop(); let mut frame = stack.pop().unwrap(); let mut result_diff = Vec::new(); @@ -90,9 +105,19 @@ pub fn prune_proof(proof: &[ProofCommand]) -> ProofDiff { for i in 0..frame.commands.len() { new_indices.push((depth, i - num_pruned)); - if !frame.visited[i] { + if frame.distance_to_source[i] == usize::MAX { result_diff.push((i, CommandDiff::Delete)); num_pruned += 1; + } else if max_distance.map_or(false, |max| frame.distance_to_source[i] == max + 1) { + let new_command = ProofCommand::Step(ProofStep { + id: frame.commands[i].id().to_owned(), + clause: frame.commands[i].clause().to_vec(), + rule: "hole".to_owned(), + premises: Vec::new(), + args: Vec::new(), + discharge: Vec::new(), + }); + result_diff.push((i, CommandDiff::Step(vec![new_command]))); } else if let Some(diff) = frame.subproof_diffs[i].take() { result_diff.push((i, CommandDiff::Subproof(diff))); } diff --git a/cli/src/error.rs b/cli/src/error.rs index 53073039..ab70fec4 100644 --- a/cli/src/error.rs +++ b/cli/src/error.rs @@ -4,6 +4,7 @@ use std::{fmt, io, path::PathBuf}; pub enum CliError { CarcaraError(carcara::Error), CantInferProblemFile(PathBuf), + InvalidSliceId(String), BothFilesStdin, } @@ -29,6 +30,7 @@ impl fmt::Display for CliError { write!(f, "can't infer problem file: {}", p.display()) } CliError::BothFilesStdin => write!(f, "problem and proof files can't both be `-`"), + CliError::InvalidSliceId(id) => write!(f, "invalid id for slice: {}", id), } } } diff --git a/cli/src/main.rs b/cli/src/main.rs index b75733fb..ccd32eb8 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -70,6 +70,9 @@ enum Command { /// Checks a series of proof files and records performance statistics. Bench(BenchCommandOptions), + + /// Given a step, takes a slice of a proof consisting of all its transitive premises. + Slice(SliceCommandOption), } #[derive(Args)] @@ -218,6 +221,24 @@ struct BenchCommandOptions { files: Vec, } +#[derive(Args)] +struct SliceCommandOption { + #[clap(flatten)] + input: Input, + + #[clap(flatten)] + parsing: ParsingOptions, + + #[clap(flatten)] + printing: PrintingOptions, + + #[clap(long)] + from: String, + + #[clap(long, short = 'd')] + max_distance: Option, +} + #[derive(ArgEnum, Clone)] enum LogLevel { Off, @@ -258,6 +279,7 @@ fn main() { } Command::Elaborate(options) => elaborate_command(options), Command::Bench(options) => bench_command(options), + Command::Slice(options) => slice_command(options), }; if let Err(e) = result { log::error!("{}", e); @@ -479,3 +501,27 @@ fn print_benchmark_results(results: OnlineBenchmarkResults, sort_by_total: bool) } Ok(()) } + +fn slice_command(options: SliceCommandOption) -> CliResult<()> { + let (problem, proof) = get_instance(&options.input)?; + let (_, proof, _) = parser::parse_instance( + problem, + proof, + options.parsing.apply_function_defs, + options.parsing.expand_let_bindings, + options.parsing.allow_int_real_subtyping, + ) + .map_err(carcara::Error::from)?; + + let source_index = proof + .commands + .iter() + .position(|c| c.id() == options.from) + .ok_or_else(|| CliError::InvalidSliceId(options.from.to_owned()))?; + + let diff = + carcara::elaborator::slice_proof(&proof.commands, source_index, options.max_distance); + let slice = carcara::elaborator::apply_diff(diff, proof.commands); + print_proof(&slice, options.printing.use_sharing)?; + Ok(()) +} From d8a863aad0af36be2ec39012cdabeab84a547e07 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Tue, 11 Jul 2023 13:53:04 -0300 Subject: [PATCH 11/70] Added basic flags --- carcara/src/lib.rs | 11 ++++++++++- cli/src/main.rs | 43 +++++++++++++++++++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 84a181dd..94860df7 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -91,6 +91,10 @@ pub struct CarcaraOptions { /// If `true`, Carcara will skip any rules that it does not recognize, and will consider them as /// holes. Normally, using an unknown rule is considered an error. pub skip_unknown_rules: bool, + + /// If `true`, Carcará will log the check and elaboration statistics of any + /// `check` or `check_and_elaborate` run. If `false` no statistics are logged. + pub stats: bool, } impl CarcaraOptions { @@ -130,7 +134,12 @@ pub enum Error { DoesNotReachEmptyClause, } -pub fn check(problem: T, proof: T, options: CarcaraOptions) -> Result { +pub fn check( + problem: T, + proof: T, + options: CarcaraOptions, + num_threads: usize, +) -> Result { let (prelude, proof, mut pool) = parser::parse_instance( problem, proof, diff --git a/cli/src/main.rs b/cli/src/main.rs index ccd32eb8..93618309 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -85,6 +85,13 @@ struct Input { problem_file: Option, } +#[derive(Args)] +struct StatsOptions { + /// Enables the gathering of performance statistics + #[clap(long)] + stats: bool, +} + #[derive(Args, Clone, Copy)] struct ParsingOptions { /// Expand function definitions introduced by `define-fun`s in the SMT problem. If this flag is @@ -137,6 +144,7 @@ fn build_carcara_options( skip_unknown_rules, lia_via_cvc5, }: CheckingOptions, + StatsOptions { stats }: StatsOptions, ) -> CarcaraOptions { CarcaraOptions { apply_function_defs, @@ -145,6 +153,7 @@ fn build_carcara_options( lia_via_cvc5, strict, skip_unknown_rules, + stats, } } @@ -170,6 +179,23 @@ struct CheckCommandOptions { #[clap(flatten)] checking: CheckingOptions, + + /// Defines the number of cores for proof checking. + #[clap(short = 'u', long, required = false, default_value = "1", validator = |s: &str| -> Result<(), String> { + if let Ok(n) = s.to_string().parse() as Result { + if n < 1 { + Err(format!("The threads number can't be {n}.")) + } else { + Ok(()) + } + } else { + Err(String::from("Not a number.")) + } + })] + num_threads: usize, + + #[clap(flatten)] + stats: StatsOptions, } #[derive(Args)] @@ -185,6 +211,9 @@ struct ElaborateCommandOptions { #[clap(flatten)] printing: PrintingOptions, + + #[clap(flatten)] + stats: StatsOptions, } #[derive(Args)] @@ -323,7 +352,8 @@ fn check_command(options: CheckCommandOptions) -> CliResult { check( problem, proof, - build_carcara_options(options.parsing, options.checking), + build_carcara_options(options.parsing, options.checking, options.stats), + options.num_threads, ) .map_err(Into::into) } @@ -334,7 +364,7 @@ fn elaborate_command(options: ElaborateCommandOptions) -> CliResult<()> { let (_, elaborated) = check_and_elaborate( problem, proof, - build_carcara_options(options.parsing, options.checking), + build_carcara_options(options.parsing, options.checking, options.stats), )?; print_proof(&elaborated.commands, options.printing.use_sharing)?; Ok(()) @@ -353,12 +383,17 @@ fn bench_command(options: BenchCommandOptions) -> CliResult<()> { options.num_runs ); + let carc_options = build_carcara_options( + options.parsing, + options.checking, + StatsOptions { stats: false }, + ); if options.dump_to_csv { benchmarking::run_csv_benchmark( &instances, options.num_runs, options.num_threads, - &build_carcara_options(options.parsing, options.checking), + &carc_options, options.elaborate, &mut File::create("runs.csv")?, &mut File::create("by-rule.csv")?, @@ -370,7 +405,7 @@ fn bench_command(options: BenchCommandOptions) -> CliResult<()> { &instances, options.num_runs, options.num_threads, - &build_carcara_options(options.parsing, options.checking), + &carc_options, options.elaborate, ); if results.is_empty() { From acd33a6044244b35756dd7d1856c1708c08c8837 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Wed, 12 Jul 2023 12:31:26 -0300 Subject: [PATCH 12/70] Add GitHub Actions CI workflow Copied over from `main` branch. --- .github/workflows/ci.yml | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 .github/workflows/ci.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..9825cfa2 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,36 @@ +name: CI + +on: [push, pull_request] + +jobs: + setup: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: install toolchain + run: rustup default 1.67 + - name: add components + run: rustup component add clippy && rustup component add rustfmt + build: + runs-on: ubuntu-latest + needs: setup + steps: + - uses: actions/checkout@v3 + - name: lint + run: cargo clippy --all-targets --all-features --tests --no-deps -- -D warnings + - name: build + run: cargo build + test: + runs-on: ubuntu-latest + needs: setup + steps: + - uses: actions/checkout@v3 + - name: test + run: cargo test --release + format: + runs-on: ubuntu-latest + needs: setup + steps: + - uses: actions/checkout@v3 + - name: check formatting + run: cargo fmt --check From c1210c901c11c9cf85a3af56a00d491093ba01c1 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Wed, 12 Jul 2023 14:34:27 -0300 Subject: [PATCH 13/70] Added load scheduler, added comments and improved some interfaces --- carcara/src/ast/mod.rs | 8 + carcara/src/ast/printer.rs | 1 + carcara/src/checker/lia_generic.rs | 1 + carcara/src/checker/mod.rs | 3 + carcara/src/checker/scheduler/iter.rs | 79 +++++++++ carcara/src/checker/scheduler/mod.rs | 211 +++++++++++++++++++++++ carcara/src/checker/scheduler/weights.rs | 131 ++++++++++++++ carcara/src/elaborator/accumulator.rs | 1 + carcara/src/elaborator/pruning.rs | 1 + carcara/src/lib.rs | 3 + carcara/src/parser/mod.rs | 6 + 11 files changed, 445 insertions(+) create mode 100644 carcara/src/checker/scheduler/iter.rs create mode 100644 carcara/src/checker/scheduler/mod.rs create mode 100644 carcara/src/checker/scheduler/weights.rs diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index a3ffda50..b07c9944 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -75,6 +75,9 @@ pub enum ProofCommand { /// A subproof. Subproof(Subproof), + + /// A subproof closing step + Closing, } impl ProofCommand { @@ -86,6 +89,7 @@ impl ProofCommand { ProofCommand::Assume { id, .. } => id, ProofCommand::Step(s) => &s.id, ProofCommand::Subproof(s) => s.commands.last().unwrap().id(), + ProofCommand::Closing => "", } } @@ -99,6 +103,7 @@ impl ProofCommand { ProofCommand::Assume { id: _, term } => std::slice::from_ref(term), ProofCommand::Step(ProofStep { clause, .. }) => clause, ProofCommand::Subproof(s) => s.commands.last().unwrap().clause(), + ProofCommand::Closing => &[], } } @@ -160,6 +165,9 @@ pub struct Subproof { /// The "variable" style arguments of the subproof, of the form `( )`. pub variable_args: Vec, + + /// Subproof id used for context hashing purpose + pub context_id: usize, } /// An argument for a `step` command. diff --git a/carcara/src/ast/printer.rs b/carcara/src/ast/printer.rs index b1d38916..46632ea0 100644 --- a/carcara/src/ast/printer.rs +++ b/carcara/src/ast/printer.rs @@ -148,6 +148,7 @@ impl<'a> PrintProof for AlethePrinter<'a> { } write!(self.inner, ")")?; } + ProofCommand::Closing => {} } writeln!(self.inner)?; } diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index b71e24a7..3a7473be 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -134,6 +134,7 @@ fn update_premises(commands: &mut [ProofCommand], delta: usize, root_id: &str) { ProofCommand::Subproof(s) => { update_premises(&mut s.commands, delta, root_id); } + ProofCommand::Closing => {} } } } diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 32646f34..03c818bf 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -1,11 +1,13 @@ pub mod error; mod lia_generic; mod rules; +mod scheduler; use crate::{ast::*, benchmarking::CollectResults, elaborator::Elaborator, CarcaraResult, Error}; use ahash::AHashSet; use error::CheckerError; use rules::{ElaborationRule, Premise, Rule, RuleArgs, RuleResult}; +pub use scheduler::Scheduler; use std::{ fmt, time::{Duration, Instant}, @@ -174,6 +176,7 @@ impl<'c> ProofChecker<'c> { }); } } + ProofCommand::Closing => {} } } if self.config.is_running_test || self.reached_empty_clause { diff --git a/carcara/src/checker/scheduler/iter.rs b/carcara/src/checker/scheduler/iter.rs new file mode 100644 index 00000000..e5559414 --- /dev/null +++ b/carcara/src/checker/scheduler/iter.rs @@ -0,0 +1,79 @@ +use crate::ast::ProofCommand; + +/// Iterates through schedule steps +pub struct ScheduleIter<'a> { + proof_stack: Vec<&'a [ProofCommand]>, + steps: &'a Vec<(usize, usize)>, + step_id: usize, +} + +impl<'a> ScheduleIter<'a> { + pub fn new(proof_commands: &'a [ProofCommand], steps: &'a Vec<(usize, usize)>) -> Self { + Self { + proof_stack: vec![proof_commands], + steps, + step_id: 0, + } + } + + /// Returns the current nesting depth of the iterator, or more precisely, + /// the nesting depth of the last step that was returned. This depth starts + /// at zero, for steps in the root proof. + pub fn depth(&self) -> usize { + self.proof_stack.len() - 1 + } + + /// Returns `true` if the iterator is currently in a subproof, that is, if + /// its depth is greater than zero. + pub fn is_in_subproof(&self) -> bool { + self.depth() > 0 + } + + /// Returns a slice to the commands of the inner-most open subproof. + pub fn current_subproof(&self) -> Option<&[ProofCommand]> { + self.is_in_subproof() + .then(|| *self.proof_stack.last().unwrap()) + } + + /// Returns `true` if the most recently returned step is the last step of + /// the current subproof. + pub fn is_end_step(&self) -> bool { + self.is_in_subproof() + && self.steps[self.step_id - 1].1 == self.proof_stack.last().unwrap().len() - 1 + } + + /// Returns the command referenced by a premise index of the form (depth, index in subproof). + /// This method may panic if the premise index does not refer to a valid command. + pub fn get_premise(&self, (depth, index): (usize, usize)) -> &ProofCommand { + &self.proof_stack[depth][index] + } +} + +impl<'a> Iterator for ScheduleIter<'a> { + type Item = &'a ProofCommand; + + fn next(&mut self) -> Option { + // If it isn't the end of the steps + if self.step_id < self.steps.len() { + let cur_step = self.steps[self.step_id]; + self.step_id += 1; + // If current step is an closing subproof + if let (_, usize::MAX) = cur_step { + return Some(&ProofCommand::Closing); + } + while cur_step.0 != self.proof_stack.len() - 1 { + self.proof_stack.pop(); + } + + let top = self.proof_stack.last().unwrap(); + let command = &top[cur_step.1]; + // Opens a new subproof + if let ProofCommand::Subproof(subproof) = command { + self.proof_stack.push(&subproof.commands); + } + Some(command) + } else { + None + } + } +} diff --git a/carcara/src/checker/scheduler/mod.rs b/carcara/src/checker/scheduler/mod.rs new file mode 100644 index 00000000..020475eb --- /dev/null +++ b/carcara/src/checker/scheduler/mod.rs @@ -0,0 +1,211 @@ +pub(crate) mod iter; +pub(crate) mod weights; + +use crate::{ + ast::{Proof, ProofCommand}, + checker::scheduler::weights::get_step_weight, +}; +use iter::ScheduleIter; +use std::{ + cmp::Ordering, + collections::{BinaryHeap, HashSet}, +}; + +/// Struct responsible for storing a thread work schedule. +/// +/// Here, each step from the original proof is represented as a tuple: +/// (depth, subproof index). The first element is the subproof nesting `depth` +/// (in the subproof stack) and `subproof index` is the index where this step is +/// located in the subproof vector. +#[derive(Clone)] +pub struct Schedule { + steps: Vec<(usize, usize)>, +} + +impl Schedule { + pub fn new() -> Self { + Schedule { steps: vec![] } + } + + /// Inserts a new step into the end of the schedule steps vector + pub fn push(&mut self, cmd: (usize, usize)) { + self.steps.push(cmd); + } + + /// Removes the last step from the end of the steps vector + pub fn pop(&mut self) { + self.steps.pop(); + } + + /// Returns the last schedule step + pub fn last(&self) -> Option<&(usize, usize)> { + self.steps.last() + } + + /// Returns an iterator over the proof commands. See [`ProofIter`]. + pub fn iter<'a>(&'a self, proof: &'a [ProofCommand]) -> ScheduleIter { + ScheduleIter::new(proof, &self.steps) + } +} + +// ============================================================================= + +/// Represents the current load assigned for an specific schedule. +/// `0`: Current work load +/// `1`: Schedule index +#[derive(Eq)] +struct AssignedLoad(u64, usize); + +impl Ord for AssignedLoad { + fn cmp(&self, other: &Self) -> Ordering { + if self.0 > other.0 { + return Ordering::Less; + } else if self.0 < other.0 { + return Ordering::Greater; + } + return Ordering::Equal; + } +} + +impl PartialOrd for AssignedLoad { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for AssignedLoad { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +/// Represents a level in the proof stack. It holds the subproof itself, +/// its prerequisite step (anchor) and which schedules used any step inside +/// this layer +struct StackLevel<'a> { + id: usize, + cmds: &'a [ProofCommand], + pre_req: Option<(usize, usize)>, + used_by: HashSet, +} + +impl<'a> StackLevel<'a> { + pub fn new(id: usize, cmds: &'a [ProofCommand], pre_req: Option<(usize, usize)>) -> Self { + Self { + id, + cmds, + pre_req, + used_by: HashSet::new(), + } + } +} + +/// Struct that stores the schedules for each thread. +pub struct Scheduler { + pub loads: Vec, +} + +impl Scheduler { + /// Creates a thread scheduler for this proof using a specific number of + /// workers. This scheduler is responsible for balancing the load (the + /// proof steps have different costs to be checked) aiming the minimum + /// amount of async overhead + pub fn new(num_workers: usize, proof: &Proof) -> (Self, Vec) { + // Initializes the control and result variables + let cmds = &proof.commands; + let mut loads = vec![Schedule::new(); num_workers]; + let mut stack = vec![StackLevel::new(0, cmds, None)]; + let mut pq = BinaryHeap::::new(); + let mut context_usage = vec![]; + for i in 0..num_workers { + pq.push(AssignedLoad { 0: 0, 1: i }); + } + + loop { + // Pop the finished subproofs + while stack.len() != 0 && { + let top = stack.last().unwrap(); + top.id == top.cmds.len() + } { + for schedule_id in &stack.last().unwrap().used_by { + let last = loads[*schedule_id].last().unwrap(); + // If it's an useless context insertion + if last.0 <= stack.len() - 1 + && matches!(stack[last.0].cmds[last.1], ProofCommand::Subproof(_)) + { + // Make sure this context usage count is reduced + let subproof_id = match &stack[last.0].cmds[last.1] { + ProofCommand::Subproof(s) => s.context_id, + _ => unreachable!(), + }; + context_usage[subproof_id] -= 1; + + loads[*schedule_id].pop(); + } + // Creates a closing step for each schedule that used this subproof + else { + loads[*schedule_id].push((stack.len() - 1, usize::MAX)); + } + } + stack.pop(); + } + if stack.len() == 0 { + break; + } + // + let AssignedLoad { 0: mut load, 1: load_index } = pq.pop().unwrap(); + { + let top = stack.last().unwrap(); + let step_weight = get_step_weight(&top.cmds[top.id]); + assert!(u64::MAX - step_weight >= load, "Weight balancing overflow!"); + load += step_weight; + pq.push(AssignedLoad { 0: load, 1: load_index }); + } + + let depth = stack.len() - 1; + let (mut i, initial_layer) = (1, { + let tmp = loads[load_index].last().unwrap_or_else(|| &(0, 0)); + if tmp.1 == usize::MAX { + tmp.0 - 1 + } else { + tmp.0 + } + }); + // If this step needs the context of the subproof oppening step + // but it was not assigned to this schedule yet + while initial_layer + i <= depth { + let subproof_oppening = stack[initial_layer + i].pre_req.unwrap(); + let last_inserted = *loads[load_index].last().unwrap_or_else(|| &(usize::MAX, 0)); + + if last_inserted != subproof_oppening { + loads[load_index].push(subproof_oppening); + stack[subproof_oppening.0].used_by.insert(load_index); + + // Now this subproof is used by another schedule + let subproof_id = match &stack[subproof_oppening.0].cmds[subproof_oppening.1] { + ProofCommand::Subproof(s) => s.context_id, + _ => unreachable!(), + }; + context_usage[subproof_id] += 1; + } + i += 1; + } + + let top = stack.last_mut().unwrap(); + // Assign a step to some Schedule + loads[load_index].push((depth, top.id)); + top.used_by.insert(load_index); + + // Go to next step + let last_id = top.id; + top.id += 1; + if let ProofCommand::Subproof(s) = &top.cmds[last_id] { + stack.push(StackLevel::new(0, &s.commands, Some((depth, last_id)))); + stack.last_mut().unwrap().used_by.insert(load_index); + // First schedule using this subproof + context_usage.push(1); + } + } + (Scheduler { loads }, context_usage) + } +} diff --git a/carcara/src/checker/scheduler/weights.rs b/carcara/src/checker/scheduler/weights.rs new file mode 100644 index 00000000..61d11c99 --- /dev/null +++ b/carcara/src/checker/scheduler/weights.rs @@ -0,0 +1,131 @@ +use crate::ast::ProofCommand; + +/// Function that returns a weight associated with a specific rule. These +/// weights are directly correlated to carcara (Single Thread/previous version) +/// median performance while solving each of those rules. +/// +/// Even though subproofs should have a weight (since it has a high cost to be +/// computed), it's for better of scheduler architecture that subproofs have a +/// null weight. +/// +/// If you're interested in these weight values, take a look at Carcara's paper +/// published at TACAS in April 2023 +/// (https://hanielbarbosa.com/papers/tacas2023.pdf) and its benchmark data. +/// +/// The rules with null weight are rules that we had no info about the median +/// performance, since the solver used in the paper dataset does not generate +/// these rules. +pub fn get_step_weight(step: &ProofCommand) -> u64 { + match step { + ProofCommand::Assume { .. } => 230, + ProofCommand::Subproof(_) => 0, + ProofCommand::Step(s) => { + match &s.rule as &str { + "assume" => 230, + "true" => 0, //-1 + "false" => 263, + "not_not" => 574, + "and_pos" => 361, + "and_neg" => 607, + "or_pos" => 640, + "or_neg" => 460, + "xor_pos1" => 763, + "xor_pos2" => 345, + "xor_neg1" => 0, //-1 + "xor_neg2" => 0, //-1 + "implies_pos" => 394, + "implies_neg1" => 214, + "implies_neg2" => 287, + "equiv_pos1" => 763, + "equiv_pos2" => 541, + "equiv_neg1" => 434, + "equiv_neg2" => 476, + "ite_pos1" => 804, + "ite_pos2" => 344, + "ite_neg1" => 566, + "ite_neg2" => 542, + "eq_reflexive" => 451, + "eq_transitive" => 780, + "eq_congruent" => 722, + "eq_congruent_pred" => 632, + "distinct_elim" => 812, + "la_rw_eq" => 1091, + "la_generic" => 87564, + "la_disequality" => 919, + "la_totality" => 0, //-1 + "la_tautology" => 4291, + "forall_inst" => 7877, + "qnt_join" => 2347, + "qnt_rm_unused" => 3659, + "resolution" => 7491, + "th_resolution" => 2462, + "refl" => 1305, + "trans" => 575, + "cong" => 984, + "ho_cong" => 0, //-1 + "and" => 493, + "tautology" => 0, //-1 + "not_or" => 476, + "or" => 426, + "not_and" => 927, + "xor1" => 0, //-1 + "xor2" => 0, //-1 + "not_xor1" => 0, //-1 + "not_xor2" => 0, //-1 + "implies" => 788, + "not_implies1" => 402, + "not_implies2" => 484, + "equiv1" => 837, + "equiv2" => 812, + "not_equiv1" => 418, + "not_equiv2" => 451, + "ite1" => 509, + "ite2" => 493, + "not_ite1" => 722, + "not_ite2" => 476, + "ite_intro" => 3192, + "contraction" => 1731, + "connective_def" => 705, + "ite_simplify" => 1797, + "eq_simplify" => 845, + "and_simplify" => 1165, + "or_simplify" => 1133, + "not_simplify" => 787, + "implies_simplify" => 1231, + "equiv_simplify" => 1337, + "bool_simplify" => 1436, + "qnt_simplify" => 517, + "div_simplify" => 2117, + "prod_simplify" => 2527, + "unary_minus_simplify" => 0, //-1 + "minus_simplify" => 1059, + "sum_simplify" => 2248, + "comp_simplify" => 1781, + "nary_elim" => 0, //-1 + "ac_simp" => 9781, + "bfun_elim" => 8558, + "bind" => 5924, + "qnt_cnf" => 14244, + "subproof" => 262, + "let" => 4718, + "onepoint" => 7787, + "sko_ex" => 9321, + "sko_forall" => 12242, + "reordering" => 1452, + "symm" => 682, + "not_symm" => 0, //-1 + "eq_symmetric" => 673, + "or_intro" => 508, + "bind_let" => 2324, + "la_mult_pos" => 1446, + "la_mult_neg" => 1447, + "hole" => 185, //Debug only + "trust" => 185, //Debug only + "strict_resolution" => 1276, + + _ => 0, + } + } + ProofCommand::Closing => 0, + } +} diff --git a/carcara/src/elaborator/accumulator.rs b/carcara/src/elaborator/accumulator.rs index 4525a8d3..1c145ba4 100644 --- a/carcara/src/elaborator/accumulator.rs +++ b/carcara/src/elaborator/accumulator.rs @@ -66,6 +66,7 @@ impl Accumulator { commands, assignment_args, variable_args, + context_id: 0, }) } diff --git a/carcara/src/elaborator/pruning.rs b/carcara/src/elaborator/pruning.rs index 0c7d81b7..84ad03ec 100644 --- a/carcara/src/elaborator/pruning.rs +++ b/carcara/src/elaborator/pruning.rs @@ -94,6 +94,7 @@ pub fn slice_proof( }; stack.push(frame); } + ProofCommand::Closing => {} } } let mut frame = stack.pop().unwrap(); diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 94860df7..1e50147e 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -140,6 +140,8 @@ pub fn check( options: CarcaraOptions, num_threads: usize, ) -> Result { + use crate::checker::Scheduler; + let (prelude, proof, mut pool) = parser::parse_instance( problem, proof, @@ -152,6 +154,7 @@ pub fn check( .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) .lia_via_cvc5(options.lia_via_cvc5); + let (scheduler, schedule_context_usage) = Scheduler::new(num_threads, &proof); checker::ProofChecker::new(&mut pool, config, prelude).check(&proof) } diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index 8da113d3..421b79b5 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -557,6 +557,8 @@ impl<'a, R: BufRead> Parser<'a, R> { let mut commands_stack = vec![Vec::new()]; let mut end_step_stack = Vec::new(); let mut subproof_args_stack = Vec::new(); + let mut subproof_id_stack = Vec::new(); + let mut last_subproof_id: i64 = -1; let mut finished_assumes = false; @@ -594,6 +596,8 @@ impl<'a, R: BufRead> Parser<'a, R> { commands_stack.push(Vec::new()); end_step_stack.push(anchor.end_step_id); subproof_args_stack.push((anchor.assignment_args, anchor.variable_args)); + last_subproof_id += 1; + subproof_id_stack.push(last_subproof_id as usize); continue; } _ => return Err(Error::Parser(ParserError::UnexpectedToken(token), position)), @@ -615,6 +619,7 @@ impl<'a, R: BufRead> Parser<'a, R> { let commands = commands_stack.pop().unwrap(); end_step_stack.pop().unwrap(); let (assignment_args, variable_args) = subproof_args_stack.pop().unwrap(); + let subproof_id = subproof_id_stack.pop().unwrap(); // The subproof must contain at least two commands: the end step and the previous // command it implicitly references @@ -643,6 +648,7 @@ impl<'a, R: BufRead> Parser<'a, R> { commands, assignment_args, variable_args, + context_id: subproof_id, })); } self.state From 0fa6659a33e70b97fc15d81c4401b2e226c3da24 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Fri, 14 Jul 2023 09:48:43 -0300 Subject: [PATCH 14/70] Added parallel pool --- carcara/src/ast/context.rs | 4 +- carcara/src/ast/mod.rs | 6 +- carcara/src/ast/pool.rs | 892 +++++++++++++++----- carcara/src/ast/rc.rs | 14 +- carcara/src/ast/substitution.rs | 29 +- carcara/src/ast/tests.rs | 12 +- carcara/src/checker/lia_generic.rs | 2 +- carcara/src/checker/mod.rs | 65 +- carcara/src/checker/parallel.rs | 455 ++++++++++ carcara/src/checker/rules/clausification.rs | 6 +- carcara/src/checker/rules/mod.rs | 2 +- carcara/src/checker/rules/quantifier.rs | 5 +- carcara/src/checker/rules/subproof.rs | 2 +- carcara/src/elaborator/polyeq.rs | 2 +- carcara/src/lib.rs | 6 +- carcara/src/parser/mod.rs | 73 +- carcara/src/parser/tests.rs | 18 +- carcara/tests/test_example_files.rs | 8 +- cli/src/benchmarking.rs | 12 +- 19 files changed, 1278 insertions(+), 335 deletions(-) create mode 100644 carcara/src/checker/parallel.rs diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index b51a4089..60bd05ad 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -53,7 +53,7 @@ impl ContextStack { // we use the current state of the hash map to transform `(f y)` into `(f z)`. The // resulting hash map will then contain `(:= y z)` and `(:= x (f z))` for (var, value) in assignment_args.iter() { - let sort = Term::Sort(pool.sort(value).clone()); + let sort = pool.sort(value).as_ref().clone(); let var_term = Term::new_var(var, pool.add(sort)); let var_term = pool.add(var_term); substitution.insert(pool, var_term.clone(), value.clone())?; @@ -64,7 +64,7 @@ impl ContextStack { let mappings = assignment_args .iter() .map(|(var, value)| { - let sort = Term::Sort(pool.sort(value).clone()); + let sort = pool.sort(value).as_ref().clone(); let var_term = (var.clone(), pool.add(sort)).into(); (pool.add(var_term), value.clone()) }) diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index b07c9944..c60d5f17 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -17,7 +17,9 @@ mod tests; pub use context::{Context, ContextStack}; pub use iter::ProofIter; pub use polyeq::{alpha_equiv, polyeq, tracing_polyeq}; +pub use pool::TPool; pub use pool::TermPool; +pub use pool::{AdvancedPools, PrimitivePool}; pub use printer::print_proof; pub use rc::Rc; pub use substitution::{Substitution, SubstitutionError}; @@ -464,9 +466,9 @@ impl Term { /// Returns the sort of this term. This does not make use of a cache --- if possible, prefer to /// use `TermPool::sort`. pub fn raw_sort(&self) -> Sort { - let mut pool = TermPool::new(); + let mut pool = PrimitivePool::TermPool::new(); let added = pool.add(self.clone()); - pool.sort(&added).clone() + pool.sort(&added).as_sort().unwrap().clone() } /// Returns `true` if the term is a terminal, that is, if it is a constant or a variable. diff --git a/carcara/src/ast/pool.rs b/carcara/src/ast/pool.rs index 0b25399f..f7a1fe06 100644 --- a/carcara/src/ast/pool.rs +++ b/carcara/src/ast/pool.rs @@ -1,245 +1,725 @@ //! This module implements `TermPool`, a structure that stores terms and implements hash consing. -use super::{Constant, Rc, Sort, Term}; -use ahash::{AHashMap, AHashSet}; - -/// A structure to store and manage all allocated terms. -/// -/// You can add a `Term` to the pool using [`TermPool::add`], which will return an `Rc`. This -/// struct ensures that, if two equal terms are added to a pool, they will be in the same -/// allocation. This invariant allows terms to be safely compared and hashed by reference, instead -/// of by value (see [`Rc`]). -/// -/// This struct also provides other utility methods, like computing the sort of a term (see -/// [`TermPool::sort`]) or its free variables (see [`TermPool::free_vars`]). -pub struct TermPool { - /// A map of the terms in the pool. - pub(crate) terms: AHashMap>, - free_vars_cache: AHashMap, AHashSet>>, - sorts_cache: AHashMap, Sort>, - bool_true: Rc, - bool_false: Rc, -} - -impl Default for TermPool { - fn default() -> Self { - Self::new() - } -} - -impl TermPool { - /// Constructs a new `TermPool`. This new pool will already contain the boolean constants `true` - /// and `false`, as well as the `Bool` sort. - pub fn new() -> Self { - let mut terms = AHashMap::new(); - let mut sorts_cache = AHashMap::new(); - let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); - - let [bool_true, bool_false] = ["true", "false"] - .map(|b| Self::add_term_to_map(&mut terms, Term::new_var(b, bool_sort.clone()))); +use super::{Rc, Term}; +use ahash::AHashSet; - sorts_cache.insert(bool_false.clone(), Sort::Bool); - sorts_cache.insert(bool_true.clone(), Sort::Bool); - sorts_cache.insert(bool_sort, Sort::Bool); - - Self { - terms, - free_vars_cache: AHashMap::new(), - sorts_cache, - bool_true, - bool_false, - } - } +pub type TermPool = AdvancedPools::LocalPool; +pub trait TPool { /// Returns the term corresponding to the boolean constant `true`. - pub fn bool_true(&self) -> Rc { - self.bool_true.clone() - } - + fn bool_true(&self) -> Rc; /// Returns the term corresponding to the boolean constant `false`. - pub fn bool_false(&self) -> Rc { - self.bool_false.clone() - } - + fn bool_false(&self) -> Rc; /// Returns the term corresponding to the boolean constant determined by `value`. - pub fn bool_constant(&self, value: bool) -> Rc { + fn bool_constant(&self, value: bool) -> Rc { match value { true => self.bool_true(), false => self.bool_false(), } } - - fn add_term_to_map(terms_map: &mut AHashMap>, term: Term) -> Rc { - use std::collections::hash_map::Entry; - - match terms_map.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - vacant_entry.insert(Rc::new(term)).clone() - } - } - } - /// Takes a term and returns a possibly newly allocated `Rc` that references it. /// /// If the term was not originally in the term pool, it is added to it. Otherwise, this method /// just returns an `Rc` pointing to the existing allocation. This method also computes the /// term's sort, and adds it to the sort cache. - pub fn add(&mut self, term: Term) -> Rc { - let term = Self::add_term_to_map(&mut self.terms, term); - self.compute_sort(&term); - term - } - + fn add(&mut self, term: Term) -> Rc; /// Takes a vector of terms and calls [`TermPool::add`] on each. - pub fn add_all(&mut self, terms: Vec) -> Vec> { + fn add_all(&mut self, terms: Vec) -> Vec> { terms.into_iter().map(|t| self.add(t)).collect() } - /// Returns the sort of the given term. /// /// This method assumes that the sorts of any subterms have already been checked, and are /// correct. If `term` is itself a sort, this simply returns that sort. - pub fn sort(&self, term: &Rc) -> &Sort { - &self.sorts_cache[term] + fn sort(&self, term: &Rc) -> Rc; + /// Returns an `AHashSet` containing all the free variables in the given term. + /// + /// This method uses a cache, so there is no additional cost to computing the free variables of + /// a term multiple times. + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet>; +} + +#[allow(non_snake_case)] +pub mod PrimitivePool { + use crate::ast::Constant; + + use super::{ + super::{Rc, Sort, Term}, + TPool, + }; + use ahash::{AHashMap, AHashSet}; + + /// A structure to store and manage all allocated terms. + /// + /// You can add a `Term` to the pool using [`TermPool::add`], which will return an `Rc`. This + /// struct ensures that, if two equal terms are added to a pool, they will be in the same + /// allocation. This invariant allows terms to be safely compared and hashed by reference, instead + /// of by value (see [`Rc`]). + /// + /// This struct also provides other utility methods, like computing the sort of a term (see + /// [`TermPool::sort`]) or its free variables (see [`TermPool::free_vars`]). + pub struct TermPool { + /// A map of the terms in the pool. + pub(crate) terms: AHashMap>, + pub(crate) free_vars_cache: AHashMap, AHashSet>>, + pub(crate) sorts_cache: AHashMap, Rc>, + pub(crate) bool_true: Rc, + pub(crate) bool_false: Rc, + } + + impl Default for TermPool { + fn default() -> Self { + Self::new() + } } - /// Computes the sort of a term and adds it to the sort cache. - fn compute_sort<'a, 'b: 'a>(&'a mut self, term: &'b Rc) -> &'a Sort { - use super::Operator; - - if self.sorts_cache.contains_key(term) { - return &self.sorts_cache[term]; - } - - let result = match term.as_ref() { - Term::Const(c) => match c { - Constant::Integer(_) => Sort::Int, - Constant::Real(_) => Sort::Real, - Constant::String(_) => Sort::String, - }, - Term::Var(_, sort) => sort.as_sort().unwrap().clone(), - Term::Op(op, args) => match op { - Operator::Not - | Operator::Implies - | Operator::And - | Operator::Or - | Operator::Xor - | Operator::Equals - | Operator::Distinct - | Operator::LessThan - | Operator::GreaterThan - | Operator::LessEq - | Operator::GreaterEq - | Operator::IsInt => Sort::Bool, - Operator::Ite => self.compute_sort(&args[1]).clone(), - Operator::Add | Operator::Sub | Operator::Mult => { - if args.iter().any(|a| *self.compute_sort(a) == Sort::Real) { - Sort::Real - } else { - Sort::Int - } - } - Operator::RealDiv | Operator::ToReal => Sort::Real, - Operator::IntDiv | Operator::Mod | Operator::Abs | Operator::ToInt => Sort::Int, - Operator::Select => match self.compute_sort(&args[0]) { - Sort::Array(_, y) => y.as_sort().unwrap().clone(), - _ => unreachable!(), + impl TermPool { + /// Constructs a new `TermPool`. This new pool will already contain the boolean constants `true` + /// and `false`, as well as the `Bool` sort. + pub fn new() -> Self { + let mut terms = AHashMap::new(); + let mut sorts_cache = AHashMap::new(); + let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); + + let [bool_true, bool_false] = ["true", "false"] + .map(|b| Self::add_term_to_map(&mut terms, Term::new_var(b, bool_sort.clone()))); + + sorts_cache.insert(bool_false.clone(), bool_sort.clone()); + sorts_cache.insert(bool_true.clone(), bool_sort.clone()); + sorts_cache.insert(bool_sort.clone(), bool_sort.clone()); + + Self { + terms, + free_vars_cache: AHashMap::new(), + sorts_cache, + bool_true, + bool_false, + } + } + + fn add_term_to_map(terms_map: &mut AHashMap>, term: Term) -> Rc { + use std::collections::hash_map::Entry; + + match terms_map.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + vacant_entry.insert(Rc::new(term)).clone() + } + } + } + + /// Computes the sort of a term and adds it to the sort cache. + pub(super) fn compute_sort<'a, 'b: 'a>(&'a mut self, term: &'b Rc) -> Rc { + use super::super::Operator; + + if self.sorts_cache.contains_key(term) { + return self.sorts_cache[term].clone(); + } + + let result: Sort = match term.as_ref() { + Term::Const(c) => match c { + Constant::Integer(_) => Sort::Int, + Constant::Real(_) => Sort::Real, + Constant::String(_) => Sort::String, }, - Operator::Store => self.compute_sort(&args[0]).clone(), - }, - Term::App(f, _) => { - match self.compute_sort(f) { - Sort::Function(sorts) => sorts.last().unwrap().as_sort().unwrap().clone(), - _ => unreachable!(), // We assume that the function is correctly sorted - } - } - Term::Sort(sort) => sort.clone(), - Term::Quant(_, _, _) => Sort::Bool, - Term::Choice((_, sort), _) => sort.as_sort().unwrap().clone(), - Term::Let(_, inner) => self.compute_sort(inner).clone(), - Term::Lambda(bindings, body) => { - let mut result: Vec<_> = - bindings.iter().map(|(_name, sort)| sort.clone()).collect(); - let return_sort = Term::Sort(self.compute_sort(body).clone()); - result.push(self.add(return_sort)); - Sort::Function(result) - } - }; - self.sorts_cache.insert(term.clone(), result); - &self.sorts_cache[term] + Term::Var(_, sort) => sort.as_sort().unwrap().clone(), + Term::Op(op, args) => match op { + Operator::Not + | Operator::Implies + | Operator::And + | Operator::Or + | Operator::Xor + | Operator::Equals + | Operator::Distinct + | Operator::LessThan + | Operator::GreaterThan + | Operator::LessEq + | Operator::GreaterEq + | Operator::IsInt => Sort::Bool, + Operator::Ite => self.compute_sort(&args[1]).as_sort().unwrap().clone(), + Operator::Add | Operator::Sub | Operator::Mult => { + if args + .iter() + .any(|a| self.compute_sort(a).as_sort().unwrap() == &Sort::Real) + { + Sort::Real + } else { + Sort::Int + } + } + Operator::RealDiv | Operator::ToReal => Sort::Real, + Operator::IntDiv | Operator::Mod | Operator::Abs | Operator::ToInt => Sort::Int, + Operator::Select => match self.compute_sort(&args[0]).as_sort().unwrap() { + Sort::Array(_, y) => y.as_sort().unwrap().clone(), + _ => unreachable!(), + }, + Operator::Store => self.compute_sort(&args[0]).as_sort().unwrap().clone(), + }, + Term::App(f, _) => { + match self.compute_sort(f).as_sort().unwrap() { + Sort::Function(sorts) => sorts.last().unwrap().as_sort().unwrap().clone(), + _ => unreachable!(), // We assume that the function is correctly sorted + } + } + Term::Sort(sort) => sort.clone(), + Term::Quant(_, _, _) => Sort::Bool, + Term::Choice((_, sort), _) => sort.as_sort().unwrap().clone(), + Term::Let(_, inner) => self.compute_sort(inner).as_sort().unwrap().clone(), + Term::Lambda(bindings, body) => { + let mut result: Vec<_> = + bindings.iter().map(|(_name, sort)| sort.clone()).collect(); + let return_sort = self.compute_sort(body).as_ref().clone(); + result.push(self.add(return_sort)); + Sort::Function(result) + } + }; + let sorted_term = Self::add_term_to_map(&mut self.terms, Term::Sort(result)); + self.sorts_cache.insert(term.clone(), sorted_term); + self.sorts_cache[term].clone() + } } - /// Returns an `AHashSet` containing all the free variables in the given term. - /// - /// This method uses a cache, so there is no additional cost to computing the free variables of - /// a term multiple times. - pub fn free_vars(&mut self, term: &Rc) -> &AHashSet> { - // Here, I would like to do - // ``` - // if let Some(vars) = self.free_vars_cache.get(term) { - // return vars; - // } - // ``` - // However, because of a limitation in the borrow checker, the compiler thinks that - // this immutable borrow of `cache` has to live until the end of the function, even - // though the code immediately returns. This would stop me from mutating `cache` in the - // rest of the function. Because of that, I have to check if the hash map contains - // `term` as a key, and then get the value associated with it, meaning I have to access - // the hash map twice, which is a bit slower. This is an example of problem case #3 - // from the non-lexical lifetimes RFC: - // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md - if self.free_vars_cache.contains_key(term) { - return self.free_vars_cache.get(term).unwrap(); - } - let set = match term.as_ref() { - Term::App(f, args) => { - let mut set = self.free_vars(f).clone(); - for a in args { - set.extend(self.free_vars(a).iter().cloned()); - } - set - } - Term::Op(_, args) => { - let mut set = AHashSet::new(); - for a in args { - set.extend(self.free_vars(a).iter().cloned()); - } - set - } - Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = self.free_vars(inner).clone(); - for bound_var in bindings { + impl TPool for TermPool { + fn bool_true(&self) -> Rc { + self.bool_true.clone() + } + + fn bool_false(&self) -> Rc { + self.bool_false.clone() + } + + fn add(&mut self, term: Term) -> Rc { + let term = Self::add_term_to_map(&mut self.terms, term); + self.compute_sort(&term); + term + } + + fn sort(&self, term: &Rc) -> Rc { + self.sorts_cache[term].clone() + } + + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { + // Here, I would like to do + // ``` + // if let Some(vars) = self.free_vars_cache.get(term) { + // return vars; + // } + // ``` + // However, because of a limitation in the borrow checker, the compiler thinks that + // this immutable borrow of `cache` has to live until the end of the function, even + // though the code immediately returns. This would stop me from mutating `cache` in the + // rest of the function. Because of that, I have to check if the hash map contains + // `term` as a key, and then get the value associated with it, meaning I have to access + // the hash map twice, which is a bit slower. This is an example of problem case #3 + // from the non-lexical lifetimes RFC: + // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md + if self.free_vars_cache.contains_key(term) { + return self.free_vars_cache.get(term).unwrap().clone(); + } + let set = match term.as_ref() { + Term::App(f, args) => { + let mut set = self.free_vars(f).clone(); + for a in args { + set.extend(self.free_vars(a).iter().cloned()); + } + set + } + Term::Op(_, args) => { + let mut set = AHashSet::new(); + for a in args { + set.extend(self.free_vars(a).iter().cloned()); + } + set + } + Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { + let mut vars = self.free_vars(inner).clone(); + for bound_var in bindings { + let term = self.add(bound_var.clone().into()); + vars.remove(&term); + } + vars + } + Term::Let(bindings, inner) => { + let mut vars = self.free_vars(inner).clone(); + for (var, value) in bindings { + let sort = self.sort(value).as_ref().clone(); + let sort = self.add(sort); + let term = self.add((var.clone(), sort).into()); + vars.remove(&term); + } + vars + } + Term::Choice(bound_var, inner) => { + let mut vars = self.free_vars(inner).clone(); let term = self.add(bound_var.clone().into()); vars.remove(&term); + vars + } + Term::Var(..) => { + let mut set = AHashSet::with_capacity(1); + set.insert(term.clone()); + set } - vars + Term::Const(_) | Term::Sort(_) => AHashSet::new(), + }; + self.free_vars_cache.insert(term.clone(), set); + self.free_vars_cache.get(term).unwrap().clone() + } + } +} + +#[allow(non_snake_case)] +pub mod AdvancedPools { + use super::super::{Rc, Term}; + use super::{PrimitivePool, TPool}; + use ahash::AHashSet; + use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}; + + pub struct ContextPool { + pub(crate) global_pool: Arc, + pub(crate) storage: Arc>, + } + + impl Default for ContextPool { + fn default() -> Self { + Self::new() + } + } + + impl ContextPool { + pub fn new() -> Self { + Self { + global_pool: Arc::new(PrimitivePool::TermPool::new()), + storage: Arc::new(RwLock::new(PrimitivePool::TermPool::new())), } - Term::Let(bindings, inner) => { - let mut vars = self.free_vars(inner).clone(); - for (var, value) in bindings { - let sort = Term::Sort(self.sort(value).clone()); - let sort = self.add(sort); - let term = self.add((var.clone(), sort).into()); - vars.remove(&term); + } + + pub fn from_global(global_pool: &Arc) -> Self { + Self { + global_pool: global_pool.clone(), + storage: Arc::new(RwLock::new(PrimitivePool::TermPool::new())), + } + } + + pub fn from_previous(ctx_pool: &Self) -> Self { + Self { + global_pool: ctx_pool.global_pool.clone(), + storage: ctx_pool.storage.clone(), + } + } + + /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. + fn add_by_ref<'d, 'c: 'd>( + ctx_pool: &mut RwLockWriteGuard, + global_pool: &'d Arc, + term: Term, + ) -> Rc { + use std::collections::hash_map::Entry; + + // If the global pool has the term + if let Some(entry) = global_pool.terms.get(&term) { + entry.clone() + } else { + match ctx_pool.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let t = vacant_entry.insert(Rc::new(term)).clone(); + ctx_pool.compute_sort(&t); + t + } } - vars } - Term::Choice(bound_var, inner) => { - let mut vars = self.free_vars(inner).clone(); - let term = self.add(bound_var.clone().into()); - vars.remove(&term); - vars + } + + /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. + fn sort_by_ref<'d: 't, 'c: 'd, 't>( + ctx_pool: &RwLockWriteGuard, + global_pool: &'d Arc, + term: &'t Rc, + ) -> Rc { + if let Some(sort) = global_pool.sorts_cache.get(term) { + sort.clone() + } + // A sort inserted by context + else { + ctx_pool.sorts_cache[term].clone() + } + } + } + + impl TPool for ContextPool { + fn bool_true(&self) -> Rc { + self.global_pool.bool_true.clone() + } + + fn bool_false(&self) -> Rc { + self.global_pool.bool_false.clone() + } + + fn add(&mut self, term: Term) -> Rc { + use std::collections::hash_map::Entry; + + // If the global pool has the term + if let Some(entry) = self.global_pool.terms.get(&term) { + entry.clone() + } else { + let mut ctx_guard = self.storage.write().unwrap(); + match ctx_guard.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let t = vacant_entry.insert(Rc::new(term)).clone(); + ctx_guard.compute_sort(&t); + t + } + } + } + } + + fn sort(&self, term: &Rc) -> Rc { + if let Some(sort) = self.global_pool.sorts_cache.get(term) { + sort.clone() + } + // A sort inserted by context + else { + self.storage.read().unwrap().sorts_cache[term].clone() } - Term::Var(..) => { - let mut set = AHashSet::with_capacity(1); - set.insert(term.clone()); - set + } + + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { + fn internal<'d: 't, 'c: 'd, 't>( + ctx_pool: &'d mut RwLockWriteGuard<'_, PrimitivePool::TermPool>, + global_pool: &'c Arc, + term: &'t Rc, + ) -> &'t AHashSet> { + // Here, I would like to do + // ``` + // if let Some(vars) = self.free_vars_cache.get(term) { + // return vars; + // } + // ``` + // However, because of a limitation in the borrow checker, the compiler thinks that + // this immutable borrow of `cache` has to live until the end of the function, even + // though the code immediately returns. This would stop me from mutating `cache` in the + // rest of the function. Because of that, I have to check if the hash map contains + // `term` as a key, and then get the value associated with it, meaning I have to access + // the hash map twice, which is a bit slower. This is an example of problem case #3 + // from the non-lexical lifetimes RFC: + // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md + if let Some(set) = global_pool.free_vars_cache.get(term) { + return set; + } + if ctx_pool.free_vars_cache.contains_key(term) { + return ctx_pool.free_vars_cache.get(term).unwrap(); + } + + let set = match term.as_ref() { + Term::App(f, args) => { + let mut set = internal(ctx_pool, global_pool, f).clone(); + for a in args { + set.extend(internal(ctx_pool, global_pool, a).iter().cloned()); + } + set + } + Term::Op(_, args) => { + let mut set = AHashSet::new(); + for a in args { + set.extend(internal(ctx_pool, global_pool, a).iter().cloned()); + } + set + } + Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { + let mut vars = internal(ctx_pool, global_pool, inner).clone(); + for bound_var in bindings { + let term = ContextPool::add_by_ref( + ctx_pool, + global_pool, + bound_var.clone().into(), + ); + vars.remove(&term); + } + vars + } + Term::Let(bindings, inner) => { + let mut vars = internal(ctx_pool, global_pool, inner).clone(); + for (var, value) in bindings { + let sort = ContextPool::sort_by_ref(ctx_pool, global_pool, value) + .as_ref() + .clone(); + let sort = ContextPool::add_by_ref(ctx_pool, global_pool, sort); + let term = ContextPool::add_by_ref( + ctx_pool, + global_pool, + (var.clone(), sort).into(), + ); + vars.remove(&term); + } + vars + } + Term::Choice(bound_var, inner) => { + let mut vars = internal(ctx_pool, global_pool, inner).clone(); + let term = ContextPool::add_by_ref( + ctx_pool, + global_pool, + bound_var.clone().into(), + ); + vars.remove(&term); + vars + } + Term::Var(..) => { + let mut set = AHashSet::with_capacity(1); + set.insert(term.clone()); + set + } + Term::Const(_) | Term::Sort(_) => AHashSet::new(), + }; + ctx_pool.free_vars_cache.insert(term.clone(), set); + ctx_pool.free_vars_cache.get(term).unwrap() + } + let mut ctx_guard = self.storage.write(); + internal(ctx_guard.as_mut().unwrap(), &self.global_pool, term).clone() + } + } + + // ========================================================================= + + pub struct LocalPool { + pub(crate) ctx_pool: ContextPool, + pub(crate) storage: PrimitivePool::TermPool, + } + + impl Default for LocalPool { + fn default() -> Self { + Self::new() + } + } + + impl LocalPool { + pub fn new() -> Self { + Self { + ctx_pool: ContextPool::new(), + storage: PrimitivePool::TermPool::new(), } - Term::Const(_) | Term::Sort(_) => AHashSet::new(), - }; - self.free_vars_cache.insert(term.clone(), set); - self.free_vars_cache.get(term).unwrap() + } + + /// Instantiates a new `LocalPool` from a previous `ContextPool` (makes + /// sure the context is shared between threads). + pub fn from_previous(ctx_pool: &ContextPool) -> Self { + Self { + ctx_pool: ContextPool::from_previous(ctx_pool), + storage: PrimitivePool::TermPool::new(), + } + } + + /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. + fn add_by_ref<'d, 'c: 'd>( + local_pool: &'d mut PrimitivePool::TermPool, + ctx_pool: &RwLockReadGuard, + global_pool: &'d Arc, + term: Term, + ) -> Rc { + use std::collections::hash_map::Entry; + + // If the global pool has the term + if let Some(entry) = global_pool.terms.get(&term) { + entry.clone() + } + // If this term was inserted by the context + else if let Some(entry) = ctx_pool.terms.get(&term) { + entry.clone() + } else { + match local_pool.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let t = vacant_entry.insert(Rc::new(term)).clone(); + local_pool.compute_sort(&t); + t + } + } + } + } + + /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. + fn sort_by_ref<'d: 't, 'c: 'd, 't>( + local_pool: &'d mut PrimitivePool::TermPool, + ctx_pool: &RwLockReadGuard, + global_pool: &'d Arc, + term: &'t Rc, + ) -> Rc { + if let Some(sort) = global_pool.sorts_cache.get(term) { + sort.clone() + } + // A sort inserted by context + else if let Some(entry) = ctx_pool.terms.get(&term) { + entry.clone() + } else { + local_pool.sorts_cache[term].clone() + } + } + } + + impl TPool for LocalPool { + fn bool_true(&self) -> Rc { + self.ctx_pool.global_pool.bool_true.clone() + } + + fn bool_false(&self) -> Rc { + self.ctx_pool.global_pool.bool_false.clone() + } + + fn add(&mut self, term: Term) -> Rc { + use std::collections::hash_map::Entry; + + // If there is a constant pool and has the term + if let Some(entry) = self.ctx_pool.global_pool.terms.get(&term) { + entry.clone() + } + // If this term was inserted by the context + else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { + entry.clone() + } else { + match self.storage.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let t = vacant_entry.insert(Rc::new(term)).clone(); + self.storage.compute_sort(&t); + t + } + } + } + } + + fn sort(&self, term: &Rc) -> Rc { + if let Some(sort) = self.ctx_pool.global_pool.sorts_cache.get(term) { + sort.clone() + } + // A sort inserted by context + else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { + entry.clone() + } else { + self.storage.sorts_cache[term].clone() + } + } + + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { + fn internal<'d: 't, 'c: 'd, 't>( + local_pool: &'d mut PrimitivePool::TermPool, + ctx_pool: &'t RwLockReadGuard<'t, PrimitivePool::TermPool>, + global_pool: &'d Arc, + term: &'t Rc, + ) -> &'t AHashSet> { + // Here, I would like to do + // ``` + // if let Some(vars) = self.free_vars_cache.get(term) { + // return vars; + // } + // ``` + // However, because of a limitation in the borrow checker, the compiler thinks that + // this immutable borrow of `cache` has to live until the end of the function, even + // though the code immediately returns. This would stop me from mutating `cache` in the + // rest of the function. Because of that, I have to check if the hash map contains + // `term` as a key, and then get the value associated with it, meaning I have to access + // the hash map twice, which is a bit slower. This is an example of problem case #3 + // from the non-lexical lifetimes RFC: + // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md + if let Some(set) = global_pool.free_vars_cache.get(term) { + return set; + } + if let Some(set) = ctx_pool.free_vars_cache.get(term) { + return set; + } + if local_pool.free_vars_cache.contains_key(term) { + return local_pool.free_vars_cache.get(term).unwrap(); + } + + let set = match term.as_ref() { + Term::App(f, args) => { + let mut set = internal(local_pool, ctx_pool, global_pool, f).clone(); + for a in args { + set.extend( + internal(local_pool, ctx_pool, global_pool, a) + .iter() + .cloned(), + ); + } + set + } + Term::Op(_, args) => { + let mut set = AHashSet::new(); + for a in args { + set.extend( + internal(local_pool, ctx_pool, global_pool, a) + .iter() + .cloned(), + ); + } + set + } + Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { + let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); + for bound_var in bindings { + let term = LocalPool::add_by_ref( + local_pool, + ctx_pool, + global_pool, + bound_var.clone().into(), + ); + vars.remove(&term); + } + vars + } + Term::Let(bindings, inner) => { + let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); + for (var, value) in bindings { + let sort = + LocalPool::sort_by_ref(local_pool, ctx_pool, global_pool, value) + .as_ref() + .clone(); + let sort = + LocalPool::add_by_ref(local_pool, ctx_pool, global_pool, sort); + let term = LocalPool::add_by_ref( + local_pool, + ctx_pool, + global_pool, + (var.clone(), sort).into(), + ); + vars.remove(&term); + } + vars + } + Term::Choice(bound_var, inner) => { + let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); + let term = LocalPool::add_by_ref( + local_pool, + ctx_pool, + global_pool, + bound_var.clone().into(), + ); + vars.remove(&term); + vars + } + Term::Var(..) => { + let mut set = AHashSet::with_capacity(1); + set.insert(term.clone()); + set + } + Term::Const(_) | Term::Sort(_) => AHashSet::new(), + }; + local_pool.free_vars_cache.insert(term.clone(), set); + local_pool.free_vars_cache.get(term).unwrap() + } + + internal( + &mut self.storage, + &self.ctx_pool.storage.read().unwrap(), + &self.ctx_pool.global_pool, + term, + ) + .clone() + } } } diff --git a/carcara/src/ast/rc.rs b/carcara/src/ast/rc.rs index 197e2564..c9a119e9 100644 --- a/carcara/src/ast/rc.rs +++ b/carcara/src/ast/rc.rs @@ -1,6 +1,6 @@ //! This module implements a variant of `Rc` where equality and hashing are done by reference. -use std::{fmt, hash::Hash, ops::Deref, rc}; +use std::{fmt, hash::Hash, ops::Deref, sync}; /// An `Rc` where equality and hashing are done by reference, instead of by value. /// @@ -36,7 +36,7 @@ use std::{fmt, hash::Hash, ops::Deref, rc}; /// assert!(set.contains(&c)); /// ``` #[derive(Eq)] -pub struct Rc(rc::Rc); +pub struct Rc(sync::Arc); // If we simply `#[derive(Clone)]`, it would require that the type parameter `T` also implements // `Clone`, even though it is of course not needed. For more info, see: @@ -49,13 +49,13 @@ impl Clone for Rc { impl PartialEq for Rc { fn eq(&self, other: &Self) -> bool { - rc::Rc::ptr_eq(&self.0, &other.0) + sync::Arc::ptr_eq(&self.0, &other.0) } } impl Hash for Rc { fn hash(&self, state: &mut H) { - rc::Rc::as_ptr(&self.0).hash(state); + sync::Arc::as_ptr(&self.0).hash(state); } } @@ -78,7 +78,7 @@ impl AsRef for Rc { // Implements `From` for every `U` that can be converted into an `rc::Rc` impl From for Rc where - rc::Rc: From, + sync::Arc: From, { fn from(inner: U) -> Self { Self(inner.into()) @@ -108,11 +108,11 @@ impl Rc { /// Constructs a new `Rc`. pub fn new(value: T) -> Self { #[allow(clippy::disallowed_methods)] - Self(rc::Rc::new(value)) + Self(sync::Arc::new(value)) } /// Similar to [`std::rc::Rc::strong_count`]. pub fn strong_count(this: &Self) -> usize { - rc::Rc::strong_count(&this.0) + sync::Arc::strong_count(&this.0) } } diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index 9ba5d2b5..e13f404b 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -1,6 +1,6 @@ //! Algorithms for creating and applying capture-avoiding substitutions over terms. -use super::{BindingList, Rc, SortedVar, Term, TermPool}; +use super::{BindingList, Rc, SortedVar, TPool, Term, TermPool}; use ahash::{AHashMap, AHashSet}; use thiserror::Error; @@ -65,7 +65,10 @@ impl Substitution { /// Constructs a new substitution from an arbitrary mapping of terms to other terms. This /// returns an error if any term in the left-hand side is not a variable, or if any term is /// mapped to a term of a different sort. - pub fn new(pool: &mut TermPool, map: AHashMap, Rc>) -> SubstitutionResult { + pub fn new( + pool: &mut P, + map: AHashMap, Rc>, + ) -> SubstitutionResult { for (k, v) in map.iter() { if !k.is_var() { return Err(SubstitutionError::NotAVariable(k.clone())); @@ -89,9 +92,9 @@ impl Substitution { /// Extends the substitution by adding a new mapping from `x` to `t`. This returns an error if /// the sorts of the given terms are not the same, or if `x` is not a variable term. - pub(crate) fn insert( + pub(crate) fn insert( &mut self, - pool: &mut TermPool, + pool: &mut P, x: Rc, t: Rc, ) -> SubstitutionResult<()> { @@ -122,7 +125,7 @@ impl Substitution { /// Computes which binder variables will need to be renamed, and stores the result in /// `self.should_be_renamed`. - fn compute_should_be_renamed(&mut self, pool: &mut TermPool) { + fn compute_should_be_renamed(&mut self, pool: &mut P) { if self.should_be_renamed.is_some() { return; } @@ -157,7 +160,7 @@ impl Substitution { } /// Applies the substitution to `term`, and returns the result as a new term. - pub fn apply(&mut self, pool: &mut TermPool, term: &Rc) -> Rc { + pub fn apply(&mut self, pool: &mut P, term: &Rc) -> Rc { macro_rules! apply_to_sequence { ($sequence:expr) => { $sequence @@ -212,9 +215,9 @@ impl Substitution { result } - fn can_skip_instead_of_renaming( + fn can_skip_instead_of_renaming( &self, - pool: &mut TermPool, + pool: &mut P, binding_list: &[SortedVar], ) -> bool { // Note: this method assumes that `binding_list` is a "sort" binding list. "Value" lists add @@ -243,9 +246,9 @@ impl Substitution { /// Applies the substitution to a binder term, renaming any bound variables as needed. This /// method uses the function `build_function` to construct the resulting binder term. If the /// binder is a `let` or `lambda` term, `is_value_list` should be true. - fn apply_to_binder) -> Term>( + fn apply_to_binder) -> Term, P: TPool>( &mut self, - pool: &mut TermPool, + pool: &mut P, original_term: &Rc, binding_list: &[SortedVar], inner: &Rc, @@ -287,9 +290,9 @@ impl Substitution { /// returns a clone of the binding list and an empty substitution. The name chosen when renaming /// a variable is the old name with `'` appended. If the binding list is a "value" list, like in /// a `let` or `lambda` term, `is_value_list` should be true. - fn rename_binding_list( + fn rename_binding_list( &mut self, - pool: &mut TermPool, + pool: &mut P, binding_list: &[SortedVar], is_value_list: bool, ) -> (BindingList, Self) { @@ -301,7 +304,7 @@ impl Substitution { // If the binding list is a "sort" binding list, then `value` will be the variable's // sort. Otherwise, we need to get the sort of `value` let sort = if is_value_list { - pool.add(Term::Sort(pool.sort(value).clone())) + pool.add(pool.sort(value).as_ref().clone()) } else { value.clone() }; diff --git a/carcara/src/ast/tests.rs b/carcara/src/ast/tests.rs index 35f032b8..a6db2818 100644 --- a/carcara/src/ast/tests.rs +++ b/carcara/src/ast/tests.rs @@ -1,4 +1,7 @@ -use crate::{ast::TermPool, parser::tests::parse_terms}; +use crate::{ + ast::{TPool, TermPool}, + parser::tests::parse_terms, +}; use ahash::AHashSet; #[test] @@ -8,11 +11,8 @@ fn test_free_vars() { let mut pool = TermPool::new(); let [root] = parse_terms(&mut pool, definitions, [term]); let expected: AHashSet<_> = expected.iter().copied().collect(); - let got: AHashSet<_> = pool - .free_vars(&root) - .iter() - .map(|t| t.as_var().unwrap()) - .collect(); + let set = pool.free_vars(&root); + let got: AHashSet<_> = set.iter().map(|t| t.as_var().unwrap()).collect(); assert_eq!(expected, got); } diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index 3a7473be..3c0b7784 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -112,7 +112,7 @@ fn parse_and_check_cvc5_proof( let commands = parser.parse_proof()?; let proof = Proof { premises, commands }; - ProofChecker::new(pool, Config::new(), prelude).check(&proof)?; + ProofChecker::new(pool, Config::new(), &prelude).check(&proof)?; Ok(proof.commands) } diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 03c818bf..66e61606 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -1,11 +1,13 @@ pub mod error; mod lia_generic; +mod parallel; mod rules; mod scheduler; use crate::{ast::*, benchmarking::CollectResults, elaborator::Elaborator, CarcaraResult, Error}; use ahash::AHashSet; use error::CheckerError; +pub use parallel::ParallelProofChecker; use rules::{ElaborationRule, Premise, Rule, RuleArgs, RuleResult}; pub use scheduler::Scheduler; use std::{ @@ -39,16 +41,16 @@ impl fmt::Debug for CheckerStatistics<'_> { } } -#[derive(Debug, Default)] -pub struct Config<'c> { +#[derive(Debug, Default, Clone)] +pub struct Config { strict: bool, skip_unknown_rules: bool, is_running_test: bool, - statistics: Option>, + statistics: Option<()>, lia_via_cvc5: bool, } -impl<'c> Config<'c> { +impl Config { pub fn new() -> Self { Self::default() } @@ -68,7 +70,7 @@ impl<'c> Config<'c> { self } - pub fn statistics(mut self, value: CheckerStatistics<'c>) -> Self { + pub fn statistics(mut self, value: ()) -> Self { self.statistics = Some(value); self } @@ -76,8 +78,8 @@ impl<'c> Config<'c> { pub struct ProofChecker<'c> { pool: &'c mut TermPool, - config: Config<'c>, - prelude: ProblemPrelude, + config: Config, + prelude: &'c ProblemPrelude, context: ContextStack, elaborator: Option, reached_empty_clause: bool, @@ -85,7 +87,7 @@ pub struct ProofChecker<'c> { } impl<'c> ProofChecker<'c> { - pub fn new(pool: &'c mut TermPool, config: Config<'c>, prelude: ProblemPrelude) -> Self { + pub fn new(pool: &'c mut TermPool, config: Config, prelude: &'c ProblemPrelude) -> Self { ProofChecker { pool, config, @@ -153,19 +155,6 @@ impl<'c> ProofChecker<'c> { if let Some(elaborator) = &mut self.elaborator { elaborator.open_subproof(s.commands.len()); } - - if let Some(stats) = &mut self.config.statistics { - let rule_name = match s.commands.last() { - Some(ProofCommand::Step(step)) => format!("anchor({})", &step.rule), - _ => "anchor".to_owned(), - }; - stats.results.add_step_measurement( - stats.file_name, - step_id, - &rule_name, - time.elapsed(), - ); - } } ProofCommand::Assume { id, term } => { if !self.check_assume(id, term, &proof.premises, &iter) { @@ -197,9 +186,6 @@ impl<'c> ProofChecker<'c> { let elaboration_time = Instant::now(); proof.commands = elaborator.end(proof.commands); - if let Some(stats) = &mut self.config.statistics { - *stats.elaboration_time += elaboration_time.elapsed(); - } Ok((self.is_holey, proof)) } @@ -225,12 +211,6 @@ impl<'c> ProofChecker<'c> { } if premises.contains(term) { - if let Some(s) = &mut self.config.statistics { - let time = time.elapsed(); - *s.assume_time += time; - s.results - .add_assume_measurement(s.file_name, id, true, time); - } if let Some(elaborator) = &mut self.elaborator { elaborator.assume(term); } @@ -249,9 +229,6 @@ impl<'c> ProofChecker<'c> { let mut this_polyeq_time = Duration::ZERO; let (result, depth) = tracing_polyeq(term, p, &mut this_polyeq_time); polyeq_time += this_polyeq_time; - if let Some(s) = &mut self.config.statistics { - s.results.add_polyeq_depth(depth); - } if result { core_time = this_polyeq_time; found = Some(p.clone()); @@ -265,19 +242,6 @@ impl<'c> ProofChecker<'c> { let elaboration_time = Instant::now(); elaborator.elaborate_assume(self.pool, p, term.clone(), id); - - if let Some(s) = &mut self.config.statistics { - *s.elaboration_time += elaboration_time.elapsed(); - } - } - - if let Some(s) = &mut self.config.statistics { - let time = time.elapsed(); - *s.assume_time += time; - *s.assume_core_time += core_time; - *s.polyeq_time += polyeq_time; - s.results - .add_assume_measurement(s.file_name, id, false, time); } true @@ -366,15 +330,6 @@ impl<'c> ProofChecker<'c> { } } - if let Some(s) = &mut self.config.statistics { - let time = time.elapsed(); - s.results - .add_step_measurement(s.file_name, &step.id, &step.rule, time); - *s.polyeq_time += polyeq_time; - if elaborated { - *s.elaboration_time += time; - } - } Ok(()) } diff --git a/carcara/src/checker/parallel.rs b/carcara/src/checker/parallel.rs new file mode 100644 index 00000000..ffdef1b6 --- /dev/null +++ b/carcara/src/checker/parallel.rs @@ -0,0 +1,455 @@ +use super::error::CheckerError; +use super::rules::{Premise, Rule, RuleArgs, RuleResult}; +use super::scheduler::{iter::ScheduleIter, Scheduler}; +use super::{lia_generic, CheckerStatistics, Config}; +use crate::{ + ast::{AdvancedPools::LocalPool, *}, + CarcaraResult, Error, +}; +use ahash::AHashSet; +use std::{ + sync::{Arc, RwLock}, + thread, + time::{Duration, Instant}, +}; + +unsafe impl Sync for CheckerStatistics<'_> {} +unsafe impl Send for CheckerStatistics<'_> {} + +pub struct ParallelProofChecker<'c> { + pool: Arc, + config: Config, + prelude: &'c ProblemPrelude, + context: ContextStack, + reached_empty_clause: bool, + is_holey: bool, +} + +impl<'c> ParallelProofChecker<'c> { + pub fn new( + pool: Arc, + config: Config, + prelude: &'c ProblemPrelude, + ) -> Self { + ParallelProofChecker { + pool, + config, + prelude, + context: ContextStack::new(), + reached_empty_clause: false, + is_holey: false, + } + } + + /// Copies the proof checker and instantiate parallel fields + /// TODO: Change function name + pub fn parallelize_self(&self) -> Self { + ParallelProofChecker { + pool: self.pool.clone(), + config: self.config.clone(), + prelude: self.prelude, + context: ContextStack::new(), + reached_empty_clause: false, + is_holey: false, + } + } + + pub fn check<'s, 'p>( + &'s mut self, + proof: &'p Proof, + scheduler: &'s Scheduler, + ) -> CarcaraResult { + // Used to estimulate threads to abort prematurely (only happens when a + // thread already found out an invalid step) + let premature_abort = Arc::new(RwLock::new(false)); + let context_pool = AdvancedPools::ContextPool::from_global(&self.pool); + // TODO: Add stack size flag + const STACK_SIZE: usize = 128 * 1024 * 1024; + // + thread::scope(|s| { + let threads: Vec<_> = (&scheduler.loads) + .into_iter() + .enumerate() + .map(|(i, schedule)| { + // Creates a local statistics collector, allowing the collection + // of this threads statistics and then the merge + let mut local_stats = None; + let mut local_self = self.parallelize_self(); + let mut local_pool = LocalPool::from_previous(&context_pool); + let should_abort = premature_abort.clone(); + + thread::Builder::new() + .name(format!("worker-{i}")) + .stack_size(STACK_SIZE) + .spawn_scoped(s, move || -> CarcaraResult<(bool, bool, Option<()>)> { + let mut iter = schedule.iter(&proof.commands[..]); + + while let Some(command) = iter.next() { + match command { + ProofCommand::Step(step) => { + // If this step ends a subproof, it might need to implicitly reference the + // previous command in the subproof + let previous_command = if iter.is_end_step() { + let subproof = iter.current_subproof().unwrap(); + let index = subproof.len() - 2; + subproof.get(index).map(|command| { + Premise::new((iter.depth(), index), command) + }) + } else { + None + }; + + local_self + .check_step( + step, + previous_command, + &iter, + &mut local_pool, + &mut local_stats, + ) + .map_err(|e| { + // Signalize to other threads to stop the proof checking + *should_abort.write().unwrap() = true; + Error::Checker { + inner: e, + rule: step.rule.clone(), + step: step.id.clone(), + } + })?; + + if step.clause.is_empty() { + local_self.reached_empty_clause = true; + } + } + ProofCommand::Subproof(s) => { + let time = Instant::now(); + let step_id = command.id(); + + local_self + .context + .push( + &mut local_pool, + &s.assignment_args, + &s.variable_args, + ) + .map_err(|e| { + // Signalize to other threads to stop the proof checking + *should_abort.write().unwrap() = true; + Error::Checker { + inner: e.into(), + rule: "anchor".into(), + step: step_id.to_owned(), + } + })?; + } + ProofCommand::Assume { id, term } => { + if !local_self.check_assume( + id, + term, + &proof.premises, + &iter, + &mut local_stats, + ) { + // Signalize to other threads to stop the proof checking + *should_abort.write().unwrap() = true; + return Err(Error::Checker { + inner: CheckerError::Assume(term.clone()), + rule: "assume".into(), + step: id.clone(), + }); + } + } + ProofCommand::Closing => { + // If this is the last command of a subproof, we have to pop off the subproof + // commands of the stack. The parser already ensures that the last command + // in a subproof is always a `step` command + local_self.context.pop(); + } + } + // Verify if any of the other threads found an error and abort in case of positive + if *should_abort.read().unwrap() { + break; + } + } + + // Returns Ok(reached empty clause, isHoley, current thread statistics) + if local_self.config.is_running_test || local_self.reached_empty_clause + { + Ok((true, local_self.is_holey, Some(()))) + } else { + Ok((false, local_self.is_holey, Some(()))) + } + }) + .unwrap() + }) + .collect(); + + // Unify the results of all threads and generate the final result based on them + let (mut reached, mut holey) = (false, false); + let mut err: Result<_, Error> = Ok(()); + + // Wait until the threads finish and merge the results and statistics + threads + .into_iter() + .map(|t| t.join().unwrap()) + .for_each(|opt| { + match opt { + Ok((_reached, _holey, local_stats)) => { + // Mask the result booleans + (reached, holey) = (reached | _reached, holey | _holey); + } + Err(e) => { + // Since we want the statistics of the whole run + // (even in a error case) we cannot abort at this + // point, since we can have more threads to be + // evaluated and their statistics colleted + err = Err(e); + } + } + }); + + // If an error happend + if let Err(x) = err { + return Err(x); + } + + if reached { + Ok(holey) + } else { + Err(Error::DoesNotReachEmptyClause) + } + }) + } + + // TODO: Remove statistics as an argument since we are going to pass it through config in the local_self + fn check_assume( + &mut self, + id: &str, + term: &Rc, + premises: &AHashSet>, + iter: &ScheduleIter, + statistics: &mut Option, + ) -> bool { + let time = Instant::now(); + + // Some subproofs contain `assume` commands inside them. These don't refer + // to the original problem premises, so we ignore the `assume` command if + // it is inside a subproof. Since the unit tests for the rules don't define the + // original problem, but sometimes use `assume` commands, we also skip the + // command if we are in a testing context. + if self.config.is_running_test || iter.is_in_subproof() { + return true; + } + + if premises.contains(term) { + return true; + } + + if self.config.strict { + return false; + } + + let mut found = None; + let mut polyeq_time = Duration::ZERO; + let mut core_time = Duration::ZERO; + + for p in premises { + let mut this_polyeq_time = Duration::ZERO; + let (result, depth) = tracing_polyeq(term, p, &mut this_polyeq_time); + polyeq_time += this_polyeq_time; + if result { + core_time = this_polyeq_time; + found = Some(p.clone()); + break; + } + } + + let Some(_) = found else { return false }; + + true + } + + // TODO: Ditto + fn check_step<'a>( + &mut self, + step: &'a ProofStep, + previous_command: Option>, + iter: &'a ScheduleIter<'a>, + pool: &mut TermPool, + statistics: &mut Option, + ) -> RuleResult { + let time = Instant::now(); + let mut polyeq_time = Duration::ZERO; + + if step.rule == "lia_generic" { + if self.config.lia_via_cvc5 { + let is_hole = + lia_generic::lia_generic(pool, &step.clause, &self.prelude, None, &step.id); + self.is_holey = self.is_holey || is_hole; + } else { + log::warn!("encountered \"lia_generic\" rule, ignoring"); + self.is_holey = true; + } + } else { + let rule = match Self::get_rule(&step.rule, self.config.strict) { + Some(r) => r, + None if self.config.skip_unknown_rules => { + self.is_holey = true; + return Ok(()); + } + None => return Err(CheckerError::UnknownRule), + }; + + if step.rule == "hole" { + self.is_holey = true; + } + + let premises: Vec<_> = step + .premises + .iter() + .map(|&p| { + let command = iter.get_premise(p); + Premise::new(p, command) + }) + .collect(); + let discharge: Vec<_> = step + .discharge + .iter() + .map(|&i| iter.get_premise(i)) + .collect(); + + let rule_args = RuleArgs { + conclusion: &step.clause, + premises: &premises, + args: &step.args, + pool, + context: &mut self.context, + previous_command, + discharge: &discharge, + polyeq_time: &mut polyeq_time, + }; + + rule(rule_args)?; + } + + Ok(()) + } + + pub fn get_rule(rule_name: &str, strict: bool) -> Option { + use super::rules::*; + + Some(match rule_name { + "true" => tautology::r#true, + "false" => tautology::r#false, + "not_not" => tautology::not_not, + "and_pos" => tautology::and_pos, + "and_neg" => tautology::and_neg, + "or_pos" => tautology::or_pos, + "or_neg" => tautology::or_neg, + "xor_pos1" => tautology::xor_pos1, + "xor_pos2" => tautology::xor_pos2, + "xor_neg1" => tautology::xor_neg1, + "xor_neg2" => tautology::xor_neg2, + "implies_pos" => tautology::implies_pos, + "implies_neg1" => tautology::implies_neg1, + "implies_neg2" => tautology::implies_neg2, + "equiv_pos1" => tautology::equiv_pos1, + "equiv_pos2" => tautology::equiv_pos2, + "equiv_neg1" => tautology::equiv_neg1, + "equiv_neg2" => tautology::equiv_neg2, + "ite_pos1" => tautology::ite_pos1, + "ite_pos2" => tautology::ite_pos2, + "ite_neg1" => tautology::ite_neg1, + "ite_neg2" => tautology::ite_neg2, + "eq_reflexive" => reflexivity::eq_reflexive, + "eq_transitive" => transitivity::eq_transitive, + "eq_congruent" => congruence::eq_congruent, + "eq_congruent_pred" => congruence::eq_congruent_pred, + "distinct_elim" => clausification::distinct_elim, + "la_rw_eq" => linear_arithmetic::la_rw_eq, + "la_generic" => linear_arithmetic::la_generic, + "la_disequality" => linear_arithmetic::la_disequality, + "la_totality" => linear_arithmetic::la_totality, + "la_tautology" => linear_arithmetic::la_tautology, + "forall_inst" => quantifier::forall_inst, + "qnt_join" => quantifier::qnt_join, + "qnt_rm_unused" => quantifier::qnt_rm_unused, + "resolution" | "th_resolution" if strict => resolution::resolution_with_args, + "resolution" | "th_resolution" => resolution::resolution, + "refl" if strict => reflexivity::strict_refl, + "refl" => reflexivity::refl, + "trans" => transitivity::trans, + "cong" => congruence::cong, + "ho_cong" => congruence::ho_cong, + "and" => clausification::and, + "tautology" => resolution::tautology, + "not_or" => clausification::not_or, + "or" => clausification::or, + "not_and" => clausification::not_and, + "xor1" => clausification::xor1, + "xor2" => clausification::xor2, + "not_xor1" => clausification::not_xor1, + "not_xor2" => clausification::not_xor2, + "implies" => clausification::implies, + "not_implies1" => clausification::not_implies1, + "not_implies2" => clausification::not_implies2, + "equiv1" => tautology::equiv1, + "equiv2" => tautology::equiv2, + "not_equiv1" => tautology::not_equiv1, + "not_equiv2" => tautology::not_equiv2, + "ite1" => tautology::ite1, + "ite2" => tautology::ite2, + "not_ite1" => tautology::not_ite1, + "not_ite2" => tautology::not_ite2, + "ite_intro" => tautology::ite_intro, + "contraction" => resolution::contraction, + "connective_def" => tautology::connective_def, + "ite_simplify" => simplification::ite_simplify, + "eq_simplify" => simplification::eq_simplify, + "and_simplify" => simplification::and_simplify, + "or_simplify" => simplification::or_simplify, + "not_simplify" => simplification::not_simplify, + "implies_simplify" => simplification::implies_simplify, + "equiv_simplify" => simplification::equiv_simplify, + "bool_simplify" => simplification::bool_simplify, + "qnt_simplify" => simplification::qnt_simplify, + "div_simplify" => simplification::div_simplify, + "prod_simplify" => simplification::prod_simplify, + // Despite being separate rules in the specification, proofs generated by veriT don't + // differentiate between `unary_minus_simplify` and `minus_simplify`. To account for + // that, `simplification::minus_simplify` implements both rules in the same function. + "unary_minus_simplify" | "minus_simplify" => simplification::minus_simplify, + "sum_simplify" => simplification::sum_simplify, + "comp_simplify" => simplification::comp_simplify, + "nary_elim" => clausification::nary_elim, + "ac_simp" => simplification::ac_simp, + "bfun_elim" => clausification::bfun_elim, + "bind" => subproof::bind, + "qnt_cnf" => quantifier::qnt_cnf, + "subproof" => subproof::subproof, + "let" => subproof::r#let, + "onepoint" => subproof::onepoint, + "sko_ex" => subproof::sko_ex, + "sko_forall" => subproof::sko_forall, + "reordering" => extras::reordering, + "symm" => extras::symm, + "not_symm" => extras::not_symm, + "eq_symmetric" => extras::eq_symmetric, + "or_intro" => extras::or_intro, + "bind_let" => extras::bind_let, + "la_mult_pos" => extras::la_mult_pos, + "la_mult_neg" => extras::la_mult_neg, + + // Special rules that always check as valid, and are used to indicate holes in the + // proof. + "hole" => |_| Ok(()), + + // The Alethe specification does not yet describe how this more strict version of the + // resolution rule will be called. Until that is decided and added to the specification, + // we define a new specialized rule that calls it + "strict_resolution" => resolution::strict_resolution, + + _ => return None, + }) + } +} diff --git a/carcara/src/checker/rules/clausification.rs b/carcara/src/checker/rules/clausification.rs index 60cb5b8f..c675c013 100644 --- a/carcara/src/checker/rules/clausification.rs +++ b/carcara/src/checker/rules/clausification.rs @@ -22,7 +22,7 @@ pub fn distinct_elim(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult } // If there are more than two boolean arguments to the distinct operator, the // second term must be `false` - args if *pool.sort(&args[0]) == Sort::Bool => { + args if pool.sort(&args[0]).as_sort().unwrap() == &Sort::Bool => { if second_term.is_bool_false() { Ok(()) } else { @@ -289,7 +289,9 @@ fn bfun_elim_second_step( processed: usize, ) -> Rc { for i in processed..args.len() { - if *pool.sort(&args[i]) == Sort::Bool && !args[i].is_bool_false() && !args[i].is_bool_true() + if pool.sort(&args[i]).as_sort().unwrap() == &Sort::Bool + && !args[i].is_bool_false() + && !args[i].is_bool_true() { let mut ite_args = Vec::with_capacity(3); ite_args.push(args[i].clone()); diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index e6ef561b..7fd32ea7 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -176,7 +176,7 @@ fn run_tests(test_name: &str, definitions: &str, cases: &[(&str, bool)]) { statistics: None, lia_via_cvc5: false, }, - prelude, + &prelude, ); let got = checker.check(&parsed).is_ok(); assert_eq!( diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index 38f5df85..dce21b84 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -24,7 +24,7 @@ pub fn forall_inst( .iter() .map(|arg| { let (arg_name, arg_value) = arg.as_assign()?; - let arg_sort = pool.add(Term::Sort(pool.sort(arg_value).clone())); + let arg_sort = pool.sort(arg_value).clone(); rassert!( bindings.remove(&(arg_name.clone(), arg_sort.clone())), QuantifierError::NoBindingMatchesArg(arg_name.clone()) @@ -91,6 +91,7 @@ pub fn qnt_rm_unused(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult assert_eq(phi_1, phi_2)?; // Cloning here may be unnecessary + // TODO: Remove the clone from similar situations let free_vars = pool.free_vars(phi_1).clone(); let expected: Vec<_> = bindings_1 @@ -154,7 +155,7 @@ fn negation_normal_form( pool.add(Term::Quant(quant, bindings.clone(), inner)) } else { match match_term!((= p q) = term) { - Some((left, right)) if *pool.sort(left) == Sort::Bool => { + Some((left, right)) if pool.sort(left).as_sort().unwrap() == &Sort::Bool => { let a = negation_normal_form(pool, left, !polarity, cache); let b = negation_normal_form(pool, right, polarity, cache); let c = negation_normal_form(pool, right, !polarity, cache); diff --git a/carcara/src/checker/rules/subproof.rs b/carcara/src/checker/rules/subproof.rs index 5d451bb2..f6925d46 100644 --- a/carcara/src/checker/rules/subproof.rs +++ b/carcara/src/checker/rules/subproof.rs @@ -166,7 +166,7 @@ pub fn r#let( let mut pairs: Vec<_> = let_bindings .iter() .map(|(x, t)| { - let sort = pool.add(Term::Sort(pool.sort(t).clone())); + let sort = pool.add(pool.sort(t).as_ref().clone()); let x_term = pool.add((x.clone(), sort).into()); let s = substitution .get(&x_term) diff --git a/carcara/src/elaborator/polyeq.rs b/carcara/src/elaborator/polyeq.rs index 114fe331..2dbb60a9 100644 --- a/carcara/src/elaborator/polyeq.rs +++ b/carcara/src/elaborator/polyeq.rs @@ -133,7 +133,7 @@ impl<'a> PolyeqElaborator<'a> { let variable_args: Vec<_> = a_bindings .iter() .map(|(name, value)| { - let sort = Term::Sort(pool.sort(value).clone()); + let sort = pool.sort(value).as_ref().clone(); (name.clone(), pool.add(sort)) }) .collect(); diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 1e50147e..e9cbc6b3 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -142,7 +142,7 @@ pub fn check( ) -> Result { use crate::checker::Scheduler; - let (prelude, proof, mut pool) = parser::parse_instance( + let (prelude, proof, pool) = parser::parse_instance_multithread( problem, proof, options.apply_function_defs, @@ -155,7 +155,7 @@ pub fn check( .skip_unknown_rules(options.skip_unknown_rules) .lia_via_cvc5(options.lia_via_cvc5); let (scheduler, schedule_context_usage) = Scheduler::new(num_threads, &proof); - checker::ProofChecker::new(&mut pool, config, prelude).check(&proof) + checker::ParallelProofChecker::new(pool, config, &prelude).check(&proof, &scheduler) } pub fn check_and_elaborate( @@ -175,5 +175,5 @@ pub fn check_and_elaborate( .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) .lia_via_cvc5(options.lia_via_cvc5); - checker::ProofChecker::new(&mut pool, config, prelude).check_and_elaborate(proof) + checker::ProofChecker::new(&mut pool, config, &prelude).check_and_elaborate(proof) } diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index 421b79b5..e04b0de2 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -15,7 +15,7 @@ use crate::{ use ahash::{AHashMap, AHashSet}; use error::assert_num_args; use rug::Integer; -use std::{io::BufRead, str::FromStr}; +use std::{io::BufRead, str::FromStr, sync::Arc}; /// Parses an SMT problem instance (in the SMT-LIB format) and its associated proof (in the Alethe /// format). @@ -45,6 +45,39 @@ pub fn parse_instance( Ok((prelude, proof, pool)) } +/// Parses an SMT problem instance (in the SMT-LIB format) and its associated proof (in the Alethe +/// format). +/// +/// This returns the parsed proof, as well as the `TermPool` used in parsing. Can take any type +/// that implements `BufRead`. +/// +/// Returns an atomic reference counter of an primitive pool. +/// TODO: Unify the two parsing methods? +pub fn parse_instance_multithread( + problem: T, + proof: T, + apply_function_defs: bool, + expand_lets: bool, + allow_int_real_subtyping: bool, +) -> CarcaraResult<(ProblemPrelude, Proof, Arc)> { + let mut pool = Arc::new(PrimitivePool::TermPool::new()); + let mut_pool = Arc::get_mut(&mut pool).unwrap(); + + let mut parser = Parser::new( + mut_pool, + problem, + apply_function_defs, + expand_lets, + allow_int_real_subtyping, + )?; + let (prelude, premises) = parser.parse_problem()?; + parser.reset(proof)?; + let commands = parser.parse_proof()?; + + let proof = Proof { premises, commands }; + Ok((prelude, proof, pool)) +} + /// A function definition, from a `define-fun` command. struct FunctionDef { params: Vec, @@ -79,8 +112,8 @@ struct ParserState { } /// A parser for the Alethe proof format. -pub struct Parser<'a, R> { - pool: &'a mut TermPool, +pub struct Parser<'a, R, P> { + pool: &'a mut P, lexer: Lexer, current_token: Token, current_position: Position, @@ -92,12 +125,12 @@ pub struct Parser<'a, R> { allow_int_real_subtyping: bool, } -impl<'a, R: BufRead> Parser<'a, R> { +impl<'a, R: BufRead, P: TPool> Parser<'a, R, P> { /// Constructs a new `Parser` from a type that implements `BufRead`. /// /// This operation can fail if there is an IO or lexer error on the first token. pub fn new( - pool: &'a mut TermPool, + pool: &'a mut P, input: R, apply_function_defs: bool, expand_lets: bool, @@ -175,7 +208,8 @@ impl<'a, R: BufRead> Parser<'a, R> { /// Constructs and sort checks an operation term. fn make_op(&mut self, op: Operator, args: Vec>) -> Result, ParserError> { - let sorts: Vec<_> = args.iter().map(|t| self.pool.sort(t)).collect(); + let terms: Vec<_> = args.iter().map(|t| self.pool.sort(t)).collect(); + let sorts: Vec<_> = terms.iter().map(|op| op.as_sort().unwrap()).collect(); match op { Operator::Not => { assert_num_args(&args, 1)?; @@ -318,8 +352,9 @@ impl<'a, R: BufRead> Parser<'a, R> { function: Rc, args: Vec>, ) -> Result, ParserError> { + let sort = self.pool.sort(&function); let sorts = { - let function_sort = self.pool.sort(&function); + let function_sort = sort.as_sort().unwrap(); if let Sort::Function(sorts) = function_sort { sorts } else { @@ -329,7 +364,10 @@ impl<'a, R: BufRead> Parser<'a, R> { }; assert_num_args(&args, sorts.len() - 1)?; for i in 0..args.len() { - SortError::assert_eq(sorts[i].as_sort().unwrap(), self.pool.sort(&args[i]))?; + SortError::assert_eq( + sorts[i].as_sort().unwrap(), + self.pool.sort(&args[i]).as_sort().unwrap(), + )?; } Ok(self.pool.add(Term::App(function, args))) } @@ -511,9 +549,7 @@ impl<'a, R: BufRead> Parser<'a, R> { self.pool .add(Term::Lambda(BindingList(func_def.params), func_def.body)) }; - let sort = self - .pool - .add(Term::Sort(self.pool.sort(&lambda_term).clone())); + let sort = self.pool.add(self.pool.sort(&lambda_term).as_ref().clone()); let var = (name, sort); self.insert_sorted_var(var.clone()); let var_term = self.pool.add(var.into()); @@ -811,7 +847,7 @@ impl<'a, R: BufRead> Parser<'a, R> { self.next_token()?; let var = self.expect_symbol()?; let value = self.parse_term()?; - let sort = Term::Sort(self.pool.sort(&value).clone()); + let sort = self.pool.sort(&value).as_ref().clone(); let sort = self.pool.add(sort); self.insert_sorted_var((var.clone(), sort)); self.expect_token(Token::CloseParen)?; @@ -957,7 +993,7 @@ impl<'a, R: BufRead> Parser<'a, R> { fn parse_term_expecting_sort(&mut self, expected_sort: &Sort) -> CarcaraResult> { let pos = self.current_position; let term = self.parse_term()?; - SortError::assert_eq(expected_sort, self.pool.sort(&term)) + SortError::assert_eq(expected_sort, self.pool.sort(&term).as_sort().unwrap()) .map_err(|e| Error::Parser(e.into(), pos))?; Ok(term) } @@ -1024,7 +1060,7 @@ impl<'a, R: BufRead> Parser<'a, R> { p.expect_token(Token::OpenParen)?; let name = p.expect_symbol()?; let value = p.parse_term()?; - let sort = p.pool.add(Term::Sort(p.pool.sort(&value).clone())); + let sort = p.pool.add(p.pool.sort(&value).as_ref().clone()); p.insert_sorted_var((name.clone(), sort)); p.expect_token(Token::CloseParen)?; Ok((name, value)) @@ -1039,7 +1075,7 @@ impl<'a, R: BufRead> Parser<'a, R> { let substitution = bindings .into_iter() .map(|(name, value)| { - let sort = Term::Sort(self.pool.sort(&value).clone()); + let sort = self.pool.sort(&value).as_ref().clone(); let var = Term::new_var(name, self.pool.add(sort)); (self.pool.add(var), value) }) @@ -1139,8 +1175,11 @@ impl<'a, R: BufRead> Parser<'a, R> { assert_num_args(&args, func.params.len()) .map_err(|err| Error::Parser(err, head_pos))?; for (arg, param) in args.iter().zip(func.params.iter()) { - SortError::assert_eq(param.1.as_sort().unwrap(), self.pool.sort(arg)) - .map_err(|err| Error::Parser(err.into(), head_pos))?; + SortError::assert_eq( + param.1.as_sort().unwrap(), + self.pool.sort(arg).as_sort().unwrap(), + ) + .map_err(|err| Error::Parser(err.into(), head_pos))?; } // Build a hash map of all the parameter names and the values they will diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index e6d29822..0fa66ba0 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -92,9 +92,23 @@ fn test_hash_consing() { .into_iter() .collect::>(); - assert_eq!(pool.terms.len(), expected.len()); + let l = &mut pool.storage; + let g = &pool.ctx_pool.global_pool; + let c = &pool.ctx_pool.storage.read().unwrap(); + assert_eq!( + l.terms.len() + g.terms.len() + c.terms.len() - 6, + expected.len() + ); - for got in pool.terms.keys() { + for got in l.terms.keys() { + let formatted: &str = &format!("{}", got); + assert!(expected.contains(formatted), "{}", formatted); + } + for got in g.terms.keys() { + let formatted: &str = &format!("{}", got); + assert!(expected.contains(formatted), "{}", formatted); + } + for got in c.terms.keys() { let formatted: &str = &format!("{}", got); assert!(expected.contains(formatted), "{}", formatted); } diff --git a/carcara/tests/test_example_files.rs b/carcara/tests/test_example_files.rs index fff74c62..4178ca9c 100644 --- a/carcara/tests/test_example_files.rs +++ b/carcara/tests/test_example_files.rs @@ -16,19 +16,19 @@ fn run_test(problem_path: &Path, proof_path: &Path) -> CarcaraResult<()> { )?; // First, we check the proof normally - checker::ProofChecker::new(&mut pool, Config::new(), prelude.clone()).check(&proof)?; + checker::ProofChecker::new(&mut pool, Config::new(), &prelude).check(&proof)?; // Then, we check it while elaborating the proof - let mut checker = checker::ProofChecker::new(&mut pool, Config::new(), prelude.clone()); + let mut checker = checker::ProofChecker::new(&mut pool, Config::new(), &prelude); let (_, elaborated) = checker.check_and_elaborate(proof)?; // After that, we check the elaborated proof normally, to make sure it is valid - checker::ProofChecker::new(&mut pool, Config::new().strict(true), prelude.clone()) + checker::ProofChecker::new(&mut pool, Config::new().strict(true), &prelude) .check(&elaborated)?; // Finally, we elaborate the already elaborated proof, to make sure the elaboration step is // idempotent - let mut checker = checker::ProofChecker::new(&mut pool, Config::new().strict(true), prelude); + let mut checker = checker::ProofChecker::new(&mut pool, Config::new().strict(true), &prelude); let (_, elaborated_twice) = checker.check_and_elaborate(elaborated.clone())?; assert!( elaborated.commands == elaborated_twice.commands, diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index a17ae353..6eadb2ba 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -48,16 +48,8 @@ fn run_job( let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_via_cvc5(options.lia_via_cvc5) - .statistics(checker::CheckerStatistics { - file_name: proof_file_name, - elaboration_time: &mut elaboration, - polyeq_time: &mut polyeq, - assume_time: &mut assume, - assume_core_time: &mut assume_core, - results, - }); - let mut checker = checker::ProofChecker::new(&mut pool, config, prelude); + .lia_via_cvc5(options.lia_via_cvc5); + let mut checker = checker::ProofChecker::new(&mut pool, config, &prelude); let checking = Instant::now(); From 68510ecc1d9a79949c458d5be317576a6f4b0c04 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Thu, 20 Jul 2023 09:44:20 -0300 Subject: [PATCH 15/70] Added statistics and benchmark fix --- carcara/src/ast/pool.rs | 16 +- carcara/src/benchmarking/metrics.rs | 2 +- carcara/src/benchmarking/mod.rs | 210 +++++++++++---- carcara/src/checker/mod.rs | 193 ++++++++++++-- carcara/src/checker/parallel.rs | 397 ++++++++++++++++++---------- carcara/src/checker/rules/mod.rs | 1 - carcara/src/lib.rs | 94 ++++++- cli/src/benchmarking.rs | 32 ++- cli/src/main.rs | 116 +------- 9 files changed, 697 insertions(+), 364 deletions(-) diff --git a/carcara/src/ast/pool.rs b/carcara/src/ast/pool.rs index f7a1fe06..37a3ea3e 100644 --- a/carcara/src/ast/pool.rs +++ b/carcara/src/ast/pool.rs @@ -39,7 +39,6 @@ pub trait TPool { fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet>; } -#[allow(non_snake_case)] pub mod PrimitivePool { use crate::ast::Constant; @@ -269,12 +268,11 @@ pub mod PrimitivePool { } } -#[allow(non_snake_case)] pub mod AdvancedPools { use super::super::{Rc, Term}; use super::{PrimitivePool, TPool}; use ahash::AHashSet; - use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}; + use std::sync::{Arc, RwLock}; pub struct ContextPool { pub(crate) global_pool: Arc, @@ -311,7 +309,7 @@ pub mod AdvancedPools { /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. fn add_by_ref<'d, 'c: 'd>( - ctx_pool: &mut RwLockWriteGuard, + ctx_pool: &mut PrimitivePool::TermPool, global_pool: &'d Arc, term: Term, ) -> Rc { @@ -335,7 +333,7 @@ pub mod AdvancedPools { /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. fn sort_by_ref<'d: 't, 'c: 'd, 't>( - ctx_pool: &RwLockWriteGuard, + ctx_pool: &PrimitivePool::TermPool, global_pool: &'d Arc, term: &'t Rc, ) -> Rc { @@ -390,7 +388,7 @@ pub mod AdvancedPools { fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { fn internal<'d: 't, 'c: 'd, 't>( - ctx_pool: &'d mut RwLockWriteGuard<'_, PrimitivePool::TermPool>, + ctx_pool: &'d mut PrimitivePool::TermPool, global_pool: &'c Arc, term: &'t Rc, ) -> &'t AHashSet> { @@ -516,7 +514,7 @@ pub mod AdvancedPools { /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. fn add_by_ref<'d, 'c: 'd>( local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &RwLockReadGuard, + ctx_pool: &PrimitivePool::TermPool, global_pool: &'d Arc, term: Term, ) -> Rc { @@ -545,7 +543,7 @@ pub mod AdvancedPools { /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. fn sort_by_ref<'d: 't, 'c: 'd, 't>( local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &RwLockReadGuard, + ctx_pool: &PrimitivePool::TermPool, global_pool: &'d Arc, term: &'t Rc, ) -> Rc { @@ -608,7 +606,7 @@ pub mod AdvancedPools { fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { fn internal<'d: 't, 'c: 'd, 't>( local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &'t RwLockReadGuard<'t, PrimitivePool::TermPool>, + ctx_pool: &'t PrimitivePool::TermPool, global_pool: &'d Arc, term: &'t Rc, ) -> &'t AHashSet> { diff --git a/carcara/src/benchmarking/metrics.rs b/carcara/src/benchmarking/metrics.rs index 0afe3a61..aa5be487 100644 --- a/carcara/src/benchmarking/metrics.rs +++ b/carcara/src/benchmarking/metrics.rs @@ -142,7 +142,7 @@ where } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct OnlineMetrics { total: T, count: usize, diff --git a/carcara/src/benchmarking/mod.rs b/carcara/src/benchmarking/mod.rs index 21b6013e..1478e46a 100644 --- a/carcara/src/benchmarking/mod.rs +++ b/carcara/src/benchmarking/mod.rs @@ -55,26 +55,24 @@ pub struct RunMeasurement { pub assume_core: Duration, } -// Higher kinded types would be very useful here. Ideally, I would like `BenchmarkResults` to be -// generic on any kind that implements `Metrics`, like `OnlineMetrics` or `OfflineMetrics`. -#[derive(Debug, Default)] -pub struct BenchmarkResults { - pub parsing: ByRun, - pub checking: ByRun, - pub elaborating: ByRun, - pub total_accounted_for: ByRun, - pub total: ByRun, - pub step_time: ByStep, - pub step_time_by_file: AHashMap, - pub step_time_by_rule: AHashMap, - - pub polyeq_time: ByRun, - pub polyeq_time_ratio: ByRunF64, - pub assume_time: ByRun, - pub assume_time_ratio: ByRunF64, - pub assume_core_time: ByRun, - - pub polyeq_depths: ByPolyeq, +#[derive(Debug, Default, Clone)] +pub struct OnlineBenchmarkResults { + pub parsing: OnlineMetrics, + pub checking: OnlineMetrics, + pub elaborating: OnlineMetrics, + pub total_accounted_for: OnlineMetrics, + pub total: OnlineMetrics, + pub step_time: OnlineMetrics, + pub step_time_by_file: AHashMap>, + pub step_time_by_rule: AHashMap>, + + pub polyeq_time: OnlineMetrics, + pub polyeq_time_ratio: OnlineMetrics, + pub assume_time: OnlineMetrics, + pub assume_time_ratio: OnlineMetrics, + pub assume_core_time: OnlineMetrics, + + pub polyeq_depths: OnlineMetrics<(), usize>, pub num_assumes: usize, pub num_easy_assumes: usize, @@ -82,27 +80,7 @@ pub struct BenchmarkResults { pub had_error: bool, } -pub type OnlineBenchmarkResults = BenchmarkResults< - OnlineMetrics, - OnlineMetrics, - OnlineMetrics, - OnlineMetrics<(), usize>, ->; - -pub type OfflineBenchmarkResults = BenchmarkResults< - OfflineMetrics, - OfflineMetrics, - OfflineMetrics, - OfflineMetrics<(), usize>, ->; - -impl BenchmarkResults -where - ByRun: Metrics + Default, - ByStep: Metrics + Default, - ByRunF64: Metrics + Default, - ByPolyeq: Metrics<(), usize> + Default, -{ +impl OnlineBenchmarkResults { pub fn new() -> Self { Default::default() } @@ -113,44 +91,160 @@ where } /// The time per run to completely parse the proof. - pub fn parsing(&self) -> &ByRun { + pub fn parsing(&self) -> &OnlineMetrics { &self.parsing } /// The time per run to check all the steps in the proof. - pub fn checking(&self) -> &ByRun { + pub fn checking(&self) -> &OnlineMetrics { &self.checking } /// The time per run to elaborate the proof. - pub fn elaborating(&self) -> &ByRun { + pub fn elaborating(&self) -> &OnlineMetrics { &self.elaborating } /// The combined time per run to parse, check, and elaborate all the steps in the proof. - pub fn total_accounted_for(&self) -> &ByRun { + pub fn total_accounted_for(&self) -> &OnlineMetrics { &self.total_accounted_for } /// The total time spent per run. Should be pretty similar to `total_accounted_for`. - pub fn total(&self) -> &ByRun { + pub fn total(&self) -> &OnlineMetrics { &self.total } /// The time spent checking each step. - pub fn step_time(&self) -> &ByStep { + pub fn step_time(&self) -> &OnlineMetrics { &self.step_time } /// For each file, the time spent checking each step in the file. - pub fn step_time_by_file(&self) -> &AHashMap { + pub fn step_time_by_file(&self) -> &AHashMap> { &self.step_time_by_file } /// For each rule, the time spent checking each step that uses that rule. - pub fn step_time_by_rule(&self) -> &AHashMap { + pub fn step_time_by_rule(&self) -> &AHashMap> { &self.step_time_by_rule } + + /// Prints the benchmark results + pub fn print(&self, sort_by_total: bool) { + let [parsing, checking, elaborating, accounted_for, total] = [ + self.parsing(), + self.checking(), + self.elaborating(), + self.total_accounted_for(), + self.total(), + ] + .map(|m| { + if sort_by_total { + format!("{:#}", m) + } else { + format!("{}", m) + } + }); + + println!("parsing: {}", parsing); + println!("checking: {}", checking); + if !elaborating.is_empty() { + println!("elaborating: {}", elaborating); + } + println!( + "on assume: {} ({:.02}% of checking time)", + self.assume_time, + 100.0 * self.assume_time.mean().as_secs_f64() / self.checking().mean().as_secs_f64(), + ); + println!("on assume (core): {}", self.assume_core_time); + println!("assume ratio: {}", self.assume_time_ratio); + println!( + "on polyeq: {} ({:.02}% of checking time)", + self.polyeq_time, + 100.0 * self.polyeq_time.mean().as_secs_f64() / self.checking().mean().as_secs_f64(), + ); + println!("polyeq ratio: {}", self.polyeq_time_ratio); + println!("total accounted for: {}", accounted_for); + println!("total: {}", total); + + let data_by_rule = self.step_time_by_rule(); + let mut data_by_rule: Vec<_> = data_by_rule.iter().collect(); + data_by_rule.sort_by_key(|(_, m)| if sort_by_total { m.total() } else { m.mean() }); + + println!("by rule:"); + for (rule, data) in data_by_rule { + print!(" {: <18}", rule); + if sort_by_total { + println!("{:#}", data) + } else { + println!("{}", data) + } + } + + println!("worst cases:"); + if !self.step_time().is_empty() { + let worst_step = self.step_time().max(); + println!(" step: {} ({:?})", worst_step.0, worst_step.1); + } + + let worst_file_parsing = self.parsing().max(); + println!( + " file (parsing): {} ({:?})", + worst_file_parsing.0 .0, worst_file_parsing.1 + ); + + let worst_file_checking = self.checking().max(); + println!( + " file (checking): {} ({:?})", + worst_file_checking.0 .0, worst_file_checking.1 + ); + + let worst_file_assume = self.assume_time_ratio.max(); + println!( + " file (assume): {} ({:.04}%)", + worst_file_assume.0 .0, + worst_file_assume.1 * 100.0 + ); + + let worst_file_polyeq = self.polyeq_time_ratio.max(); + println!( + " file (polyeq): {} ({:.04}%)", + worst_file_polyeq.0 .0, + worst_file_polyeq.1 * 100.0 + ); + + let worst_file_total = self.total().max(); + println!( + " file overall: {} ({:?})", + worst_file_total.0 .0, worst_file_total.1 + ); + + let num_hard_assumes = self.num_assumes - self.num_easy_assumes; + let percent_easy = (self.num_easy_assumes as f64) * 100.0 / (self.num_assumes as f64); + let percent_hard = (num_hard_assumes as f64) * 100.0 / (self.num_assumes as f64); + println!(" number of assumes: {}", self.num_assumes); + println!( + " (easy): {} ({:.02}%)", + self.num_easy_assumes, percent_easy + ); + println!( + " (hard): {} ({:.02}%)", + num_hard_assumes, percent_hard + ); + + let depths = &self.polyeq_depths; + if !depths.is_empty() { + println!(" max polyeq depth: {}", depths.max().1); + println!(" total polyeq depth: {}", depths.total()); + println!(" number of polyeq checks: {}", depths.count()); + println!(" mean depth: {:.4}", depths.mean()); + println!( + "standard deviation of depth: {:.4}", + depths.standard_deviation() + ); + } + } } #[derive(Default)] @@ -250,6 +344,7 @@ impl CsvBenchmarkResults { } pub trait CollectResults { + fn new() -> Self; fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration); fn add_assume_measurement(&mut self, file: &str, id: &str, is_easy: bool, time: Duration); fn add_polyeq_depth(&mut self, depth: usize); @@ -262,14 +357,11 @@ pub trait CollectResults { Self: Sized; } -impl CollectResults - for BenchmarkResults -where - ByRun: Metrics + Default, - ByStep: Metrics + Default, - ByRunF64: Metrics + Default, - ByPolyeq: Metrics<(), usize> + Default, -{ +impl CollectResults for OnlineBenchmarkResults { + fn new() -> Self { + Default::default() + } + fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration) { let file = file.to_owned(); let rule = rule.to_owned(); @@ -361,6 +453,10 @@ where } impl CollectResults for CsvBenchmarkResults { + fn new() -> Self { + Default::default() + } + fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration) { let id = StepId { file: file.into(), diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 66e61606..ac026881 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -4,7 +4,12 @@ mod parallel; mod rules; mod scheduler; -use crate::{ast::*, benchmarking::CollectResults, elaborator::Elaborator, CarcaraResult, Error}; +use crate::{ + ast::*, + benchmarking::{CollectResults, OnlineBenchmarkResults}, + elaborator::Elaborator, + CarcaraResult, Error, +}; use ahash::AHashSet; use error::CheckerError; pub use parallel::ParallelProofChecker; @@ -15,19 +20,20 @@ use std::{ time::{Duration, Instant}, }; -pub struct CheckerStatistics<'s> { +#[derive(Clone)] +pub struct CheckerStatistics<'s, CR: CollectResults + Send + Default> { pub file_name: &'s str, - pub elaboration_time: &'s mut Duration, - pub polyeq_time: &'s mut Duration, - pub assume_time: &'s mut Duration, + pub elaboration_time: Duration, + pub polyeq_time: Duration, + pub assume_time: Duration, // This is the time to compare the `assume` term with the `assert` that matches it. That is, // this excludes the time spent searching for the correct `assert` premise. - pub assume_core_time: &'s mut Duration, - pub results: &'s mut dyn CollectResults, + pub assume_core_time: Duration, + pub results: CR, } -impl fmt::Debug for CheckerStatistics<'_> { +impl fmt::Debug for CheckerStatistics<'_, CR> { // Since `self.results` does not implement `Debug`, we can't just `#[derive(Debug)]` and instead // have to implement it manually, removing that field. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -46,7 +52,6 @@ pub struct Config { strict: bool, skip_unknown_rules: bool, is_running_test: bool, - statistics: Option<()>, lia_via_cvc5: bool, } @@ -69,11 +74,6 @@ impl Config { self.lia_via_cvc5 = value; self } - - pub fn statistics(mut self, value: ()) -> Self { - self.statistics = Some(value); - self - } } pub struct ProofChecker<'c> { @@ -119,7 +119,97 @@ impl<'c> ProofChecker<'c> { } else { None }; - self.check_step(step, previous_command, &iter) + self.check_step( + step, + previous_command, + &iter, + None::<&mut CheckerStatistics>, + ) + .map_err(|e| Error::Checker { + inner: e, + rule: step.rule.clone(), + step: step.id.clone(), + })?; + + // If this is the last command of a subproof, we have to pop the subproof + // commands off of the stack. The parser already ensures that the last command + // in a subproof is always a `step` command + if is_end_of_subproof { + self.context.pop(); + if let Some(elaborator) = &mut self.elaborator { + elaborator.close_subproof(); + } + } + + if step.clause.is_empty() { + self.reached_empty_clause = true; + } + } + ProofCommand::Subproof(s) => { + let step_id = command.id(); + + self.context + .push(self.pool, &s.assignment_args, &s.variable_args) + .map_err(|e| Error::Checker { + inner: e.into(), + rule: "anchor".into(), + step: step_id.to_owned(), + })?; + + if let Some(elaborator) = &mut self.elaborator { + elaborator.open_subproof(s.commands.len()); + } + } + ProofCommand::Assume { id, term } => { + if !self.check_assume( + id, + term, + &proof.premises, + &iter, + None::<&mut CheckerStatistics>, + ) { + return Err(Error::Checker { + inner: CheckerError::Assume(term.clone()), + rule: "assume".into(), + step: id.clone(), + }); + } + } + ProofCommand::Closing => {} + } + } + if self.config.is_running_test || self.reached_empty_clause { + Ok(self.is_holey) + } else { + Err(Error::DoesNotReachEmptyClause) + } + } + + pub fn check_with_stats<'s, 'p, 'a, CR: CollectResults + Send + Default>( + &'s mut self, + proof: &'p Proof, + stats: &'s mut CheckerStatistics<'a, CR>, + ) -> CarcaraResult { + // Similarly to the parser, to avoid stack overflows in proofs with many nested subproofs, + // we check the subproofs iteratively, instead of recursively + let mut iter = proof.iter(); + while let Some(command) = iter.next() { + match command { + ProofCommand::Step(step) => { + let is_end_of_subproof = iter.is_end_step(); + + // If this step ends a subproof, it might need to implicitly reference the + // previous command in the subproof + let previous_command = if is_end_of_subproof { + let subproof = iter.current_subproof().unwrap(); + let index = subproof.len() - 2; + subproof + .get(index) + .map(|command| Premise::new((iter.depth(), index), command)) + } else { + None + }; + self.check_step(step, previous_command, &iter, Some(stats)) .map_err(|e| Error::Checker { inner: e, rule: step.rule.clone(), @@ -155,9 +245,20 @@ impl<'c> ProofChecker<'c> { if let Some(elaborator) = &mut self.elaborator { elaborator.open_subproof(s.commands.len()); } + + let rule_name = match s.commands.last() { + Some(ProofCommand::Step(step)) => format!("anchor({})", &step.rule), + _ => "anchor".to_owned(), + }; + stats.results.add_step_measurement( + stats.file_name, + step_id, + &rule_name, + time.elapsed(), + ); } ProofCommand::Assume { id, term } => { - if !self.check_assume(id, term, &proof.premises, &iter) { + if !self.check_assume(id, term, &proof.premises, &iter, Some(stats)) { return Err(Error::Checker { inner: CheckerError::Assume(term.clone()), rule: "assume".into(), @@ -184,17 +285,38 @@ impl<'c> ProofChecker<'c> { let mut elaborator = self.elaborator.take().unwrap(); result?; + proof.commands = elaborator.end(proof.commands); + + Ok((self.is_holey, proof)) + } + + pub fn check_and_elaborate_with_stats<'s, 'a, CR: CollectResults + Send + Default>( + &'s mut self, + mut proof: Proof, + stats: &'s mut CheckerStatistics<'a, CR>, + ) -> CarcaraResult<(bool, Proof)> { + self.elaborator = Some(Elaborator::new()); + let result = self.check_with_stats(&proof, stats); + + // We reset `self.elaborator` before returning any errors encountered while checking so we + // don't leave the checker in an invalid state + let mut elaborator = self.elaborator.take().unwrap(); + result?; + let elaboration_time = Instant::now(); proof.commands = elaborator.end(proof.commands); + stats.elaboration_time += elaboration_time.elapsed(); + Ok((self.is_holey, proof)) } - fn check_assume( + fn check_assume<'a, CR: CollectResults + Send + Default>( &mut self, id: &str, term: &Rc, premises: &AHashSet>, iter: &ProofIter, + mut stats: Option<&'a mut CheckerStatistics>, ) -> bool { let time = Instant::now(); @@ -211,6 +333,13 @@ impl<'c> ProofChecker<'c> { } if premises.contains(term) { + if let Some(s) = stats { + let time = time.elapsed(); + + s.assume_time += time; + s.results + .add_assume_measurement(s.file_name, id, true, time); + } if let Some(elaborator) = &mut self.elaborator { elaborator.assume(term); } @@ -229,6 +358,9 @@ impl<'c> ProofChecker<'c> { let mut this_polyeq_time = Duration::ZERO; let (result, depth) = tracing_polyeq(term, p, &mut this_polyeq_time); polyeq_time += this_polyeq_time; + if let Some(s) = &mut stats { + s.results.add_polyeq_depth(depth); + } if result { core_time = this_polyeq_time; found = Some(p.clone()); @@ -242,16 +374,31 @@ impl<'c> ProofChecker<'c> { let elaboration_time = Instant::now(); elaborator.elaborate_assume(self.pool, p, term.clone(), id); + + if let Some(s) = &mut stats { + s.elaboration_time += elaboration_time.elapsed(); + } + } + + if let Some(s) = &mut stats { + let time = time.elapsed(); + + s.assume_time += time; + s.assume_core_time += core_time; + s.polyeq_time += polyeq_time; + s.results + .add_assume_measurement(s.file_name, id, false, time); } true } - fn check_step<'a>( + fn check_step<'a, CR: CollectResults + Send + Default>( &mut self, step: &'a ProofStep, previous_command: Option>, iter: &'a ProofIter<'a>, + stats: Option<&'a mut CheckerStatistics>, ) -> RuleResult { let time = Instant::now(); let mut polyeq_time = Duration::ZERO; @@ -330,6 +477,16 @@ impl<'c> ProofChecker<'c> { } } + if let Some(s) = stats { + let time = time.elapsed(); + + s.results + .add_step_measurement(s.file_name, &step.id, &step.rule, time); + s.polyeq_time += polyeq_time; + if elaborated { + s.elaboration_time += time; + } + } Ok(()) } diff --git a/carcara/src/checker/parallel.rs b/carcara/src/checker/parallel.rs index ffdef1b6..090aced0 100644 --- a/carcara/src/checker/parallel.rs +++ b/carcara/src/checker/parallel.rs @@ -1,21 +1,21 @@ use super::error::CheckerError; -use super::rules::{Premise, Rule, RuleArgs, RuleResult}; +use super::rules::{Premise, RuleArgs, RuleResult}; use super::scheduler::{iter::ScheduleIter, Scheduler}; -use super::{lia_generic, CheckerStatistics, Config}; +use super::{lia_generic, Config}; +use crate::benchmarking::{CollectResults, OnlineBenchmarkResults}; +use crate::checker::CheckerStatistics; use crate::{ ast::{AdvancedPools::LocalPool, *}, CarcaraResult, Error, }; use ahash::AHashSet; use std::{ + ops::ControlFlow, sync::{Arc, RwLock}, thread, time::{Duration, Instant}, }; -unsafe impl Sync for CheckerStatistics<'_> {} -unsafe impl Send for CheckerStatistics<'_> {} - pub struct ParallelProofChecker<'c> { pool: Arc, config: Config, @@ -41,9 +41,8 @@ impl<'c> ParallelProofChecker<'c> { } } - /// Copies the proof checker and instantiate parallel fields - /// TODO: Change function name - pub fn parallelize_self(&self) -> Self { + /// Copies the proof checker and instantiate parallel fields to be shared between threads + pub fn share(&self) -> Self { ParallelProofChecker { pool: self.pool.clone(), config: self.config.clone(), @@ -71,17 +70,15 @@ impl<'c> ParallelProofChecker<'c> { .into_iter() .enumerate() .map(|(i, schedule)| { - // Creates a local statistics collector, allowing the collection - // of this threads statistics and then the merge - let mut local_stats = None; - let mut local_self = self.parallelize_self(); + // Shares the self between threads + let mut local_self = self.share(); let mut local_pool = LocalPool::from_previous(&context_pool); let should_abort = premature_abort.clone(); thread::Builder::new() .name(format!("worker-{i}")) .stack_size(STACK_SIZE) - .spawn_scoped(s, move || -> CarcaraResult<(bool, bool, Option<()>)> { + .spawn_scoped(s, move || -> CarcaraResult<(bool, bool)> { let mut iter = schedule.iter(&proof.commands[..]); while let Some(command) = iter.next() { @@ -105,7 +102,9 @@ impl<'c> ParallelProofChecker<'c> { previous_command, &iter, &mut local_pool, - &mut local_stats, + None::< + &mut CheckerStatistics, + >, ) .map_err(|e| { // Signalize to other threads to stop the proof checking @@ -122,7 +121,6 @@ impl<'c> ParallelProofChecker<'c> { } } ProofCommand::Subproof(s) => { - let time = Instant::now(); let step_id = command.id(); local_self @@ -148,7 +146,7 @@ impl<'c> ParallelProofChecker<'c> { term, &proof.premises, &iter, - &mut local_stats, + None::<&mut CheckerStatistics>, ) { // Signalize to other threads to stop the proof checking *should_abort.write().unwrap() = true; @@ -175,9 +173,9 @@ impl<'c> ParallelProofChecker<'c> { // Returns Ok(reached empty clause, isHoley, current thread statistics) if local_self.config.is_running_test || local_self.reached_empty_clause { - Ok((true, local_self.is_holey, Some(()))) + Ok((true, local_self.is_holey)) } else { - Ok((false, local_self.is_holey, Some(()))) + Ok((false, local_self.is_holey)) } }) .unwrap() @@ -188,13 +186,216 @@ impl<'c> ParallelProofChecker<'c> { let (mut reached, mut holey) = (false, false); let mut err: Result<_, Error> = Ok(()); + // Wait until the threads finish and merge the results and statistics + threads + .into_iter() + .map(|t| t.join().unwrap()) + .try_for_each(|opt| { + match opt { + Ok((_reached, _holey)) => { + // Mask the result booleans + (reached, holey) = (reached | _reached, holey | _holey); + ControlFlow::Continue(()) + } + Err(e) => { + err = Err(e); + ControlFlow::Break(()) + } + } + }); + + // If an error happend + if let Err(x) = err { + return Err(x); + } + + if reached { + Ok(holey) + } else { + Err(Error::DoesNotReachEmptyClause) + } + }) + } + + pub fn check_with_stats<'s, 'p, 'a, CR: CollectResults + Send + Default>( + &'s mut self, + proof: &'p Proof, + scheduler: &'s Scheduler, + stats: &'s mut CheckerStatistics<'a, CR>, + ) -> CarcaraResult { + // Used to estimulate threads to abort prematurely (only happens when a + // thread already found out an invalid step) + let premature_abort = Arc::new(RwLock::new(false)); + let context_pool = AdvancedPools::ContextPool::from_global(&self.pool); + // TODO: Add stack size flag + const STACK_SIZE: usize = 128 * 1024 * 1024; + // + thread::scope(|s| { + let threads: Vec<_> = (&scheduler.loads) + .into_iter() + .enumerate() + .map(|(i, schedule)| { + let mut local_stats = CheckerStatistics { + file_name: "", + elaboration_time: Duration::ZERO, + polyeq_time: Duration::ZERO, + assume_time: Duration::ZERO, + assume_core_time: Duration::ZERO, + results: CR::new(), + }; + // Shares the proof checker between threads + let mut local_self = self.share(); + let mut local_pool = LocalPool::from_previous(&context_pool); + let should_abort = premature_abort.clone(); + + thread::Builder::new() + .name(format!("worker-{i}")) + .stack_size(STACK_SIZE) + .spawn_scoped( + s, + move || -> CarcaraResult<(bool, bool, CheckerStatistics)> { + let mut iter = schedule.iter(&proof.commands[..]); + + while let Some(command) = iter.next() { + match command { + ProofCommand::Step(step) => { + // If this step ends a subproof, it might need to implicitly reference the + // previous command in the subproof + let previous_command = if iter.is_end_step() { + let subproof = iter.current_subproof().unwrap(); + let index = subproof.len() - 2; + subproof.get(index).map(|command| { + Premise::new((iter.depth(), index), command) + }) + } else { + None + }; + + local_self + .check_step( + step, + previous_command, + &iter, + &mut local_pool, + Some(&mut local_stats), + ) + .map_err(|e| { + // Signalize to other threads to stop the proof checking + *should_abort.write().unwrap() = true; + Error::Checker { + inner: e, + rule: step.rule.clone(), + step: step.id.clone(), + } + })?; + + if step.clause.is_empty() { + local_self.reached_empty_clause = true; + } + } + ProofCommand::Subproof(s) => { + let time = Instant::now(); + let step_id = command.id(); + + local_self + .context + .push( + &mut local_pool, + &s.assignment_args, + &s.variable_args, + ) + .map_err(|e| { + // Signalize to other threads to stop the proof checking + *should_abort.write().unwrap() = true; + Error::Checker { + inner: e.into(), + rule: "anchor".into(), + step: step_id.to_owned(), + } + })?; + + // Collects statistics + let rule_name = match s.commands.last() { + Some(ProofCommand::Step(step)) => { + format!("anchor({})", &step.rule) + } + _ => "anchor".to_owned(), + }; + + local_stats.results.add_step_measurement( + local_stats.file_name, + step_id, + &rule_name, + time.elapsed(), + ); + } + ProofCommand::Assume { id, term } => { + if !local_self.check_assume( + id, + term, + &proof.premises, + &iter, + Some(&mut local_stats), + ) { + // Signalize to other threads to stop the proof checking + *should_abort.write().unwrap() = true; + return Err(Error::Checker { + inner: CheckerError::Assume(term.clone()), + rule: "assume".into(), + step: id.clone(), + }); + } + } + ProofCommand::Closing => { + // If this is the last command of a subproof, we have to pop off the subproof + // commands of the stack. The parser already ensures that the last command + // in a subproof is always a `step` command + local_self.context.pop(); + } + } + // Verify if any of the other threads found an error and abort in case of positive + if *should_abort.read().unwrap() { + break; + } + } + + // Returns Ok(reached empty clause, isHoley, current thread statistics) + if local_self.config.is_running_test + || local_self.reached_empty_clause + { + Ok((true, local_self.is_holey, local_stats)) + } else { + Ok((false, local_self.is_holey, local_stats)) + } + }, + ) + .unwrap() + }) + .collect(); + + // Unify the results of all threads and generate the final result based on them + let (mut reached, mut holey) = (false, false); + let mut err: Result<_, Error> = Ok(()); + // Wait until the threads finish and merge the results and statistics threads .into_iter() .map(|t| t.join().unwrap()) .for_each(|opt| { match opt { - Ok((_reached, _holey, local_stats)) => { + Ok((_reached, _holey, mut local_stats)) => { + // Combine the statistics + // Takes the external and local benchmark results to local variables and combine them + let main = std::mem::take(&mut stats.results); + let to_merge = std::mem::take(&mut local_stats.results); + stats.results = CR::combine(main, to_merge); + + // Make sure other times are updated + stats.elaboration_time += local_stats.elaboration_time; + stats.polyeq_time += local_stats.polyeq_time; + stats.assume_time += local_stats.assume_time; + stats.assume_core_time += local_stats.assume_core_time; + // Mask the result booleans (reached, holey) = (reached | _reached, holey | _holey); } @@ -221,14 +422,13 @@ impl<'c> ParallelProofChecker<'c> { }) } - // TODO: Remove statistics as an argument since we are going to pass it through config in the local_self - fn check_assume( + fn check_assume<'a, CR: CollectResults + Send + Default>( &mut self, id: &str, term: &Rc, premises: &AHashSet>, iter: &ScheduleIter, - statistics: &mut Option, + mut stats: Option<&'a mut CheckerStatistics>, ) -> bool { let time = Instant::now(); @@ -242,6 +442,12 @@ impl<'c> ParallelProofChecker<'c> { } if premises.contains(term) { + if let Some(s) = stats { + let time = time.elapsed(); + s.assume_time += time; + s.results + .add_assume_measurement(s.file_name, id, true, time); + } return true; } @@ -257,6 +463,9 @@ impl<'c> ParallelProofChecker<'c> { let mut this_polyeq_time = Duration::ZERO; let (result, depth) = tracing_polyeq(term, p, &mut this_polyeq_time); polyeq_time += this_polyeq_time; + if let Some(s) = &mut stats { + s.results.add_polyeq_depth(depth); + } if result { core_time = this_polyeq_time; found = Some(p.clone()); @@ -264,19 +473,29 @@ impl<'c> ParallelProofChecker<'c> { } } - let Some(_) = found else { return false }; + if found.is_none() { + return false; + } + + if let Some(s) = stats { + let time = time.elapsed(); + s.assume_time += time; + s.assume_core_time += core_time; + s.polyeq_time += polyeq_time; + s.results + .add_assume_measurement(s.file_name, id, false, time); + } true } - // TODO: Ditto - fn check_step<'a>( + fn check_step<'a, CR: CollectResults + Send + Default>( &mut self, step: &'a ProofStep, previous_command: Option>, iter: &'a ScheduleIter<'a>, pool: &mut TermPool, - statistics: &mut Option, + stats: Option<&'a mut CheckerStatistics>, ) -> RuleResult { let time = Instant::now(); let mut polyeq_time = Duration::ZERO; @@ -291,7 +510,7 @@ impl<'c> ParallelProofChecker<'c> { self.is_holey = true; } } else { - let rule = match Self::get_rule(&step.rule, self.config.strict) { + let rule = match super::ProofChecker::get_rule(&step.rule, self.config.strict) { Some(r) => r, None if self.config.skip_unknown_rules => { self.is_holey = true; @@ -332,124 +551,12 @@ impl<'c> ParallelProofChecker<'c> { rule(rule_args)?; } + if let Some(s) = stats { + let time = time.elapsed(); + s.results + .add_step_measurement(s.file_name, &step.id, &step.rule, time); + s.polyeq_time += polyeq_time; + } Ok(()) } - - pub fn get_rule(rule_name: &str, strict: bool) -> Option { - use super::rules::*; - - Some(match rule_name { - "true" => tautology::r#true, - "false" => tautology::r#false, - "not_not" => tautology::not_not, - "and_pos" => tautology::and_pos, - "and_neg" => tautology::and_neg, - "or_pos" => tautology::or_pos, - "or_neg" => tautology::or_neg, - "xor_pos1" => tautology::xor_pos1, - "xor_pos2" => tautology::xor_pos2, - "xor_neg1" => tautology::xor_neg1, - "xor_neg2" => tautology::xor_neg2, - "implies_pos" => tautology::implies_pos, - "implies_neg1" => tautology::implies_neg1, - "implies_neg2" => tautology::implies_neg2, - "equiv_pos1" => tautology::equiv_pos1, - "equiv_pos2" => tautology::equiv_pos2, - "equiv_neg1" => tautology::equiv_neg1, - "equiv_neg2" => tautology::equiv_neg2, - "ite_pos1" => tautology::ite_pos1, - "ite_pos2" => tautology::ite_pos2, - "ite_neg1" => tautology::ite_neg1, - "ite_neg2" => tautology::ite_neg2, - "eq_reflexive" => reflexivity::eq_reflexive, - "eq_transitive" => transitivity::eq_transitive, - "eq_congruent" => congruence::eq_congruent, - "eq_congruent_pred" => congruence::eq_congruent_pred, - "distinct_elim" => clausification::distinct_elim, - "la_rw_eq" => linear_arithmetic::la_rw_eq, - "la_generic" => linear_arithmetic::la_generic, - "la_disequality" => linear_arithmetic::la_disequality, - "la_totality" => linear_arithmetic::la_totality, - "la_tautology" => linear_arithmetic::la_tautology, - "forall_inst" => quantifier::forall_inst, - "qnt_join" => quantifier::qnt_join, - "qnt_rm_unused" => quantifier::qnt_rm_unused, - "resolution" | "th_resolution" if strict => resolution::resolution_with_args, - "resolution" | "th_resolution" => resolution::resolution, - "refl" if strict => reflexivity::strict_refl, - "refl" => reflexivity::refl, - "trans" => transitivity::trans, - "cong" => congruence::cong, - "ho_cong" => congruence::ho_cong, - "and" => clausification::and, - "tautology" => resolution::tautology, - "not_or" => clausification::not_or, - "or" => clausification::or, - "not_and" => clausification::not_and, - "xor1" => clausification::xor1, - "xor2" => clausification::xor2, - "not_xor1" => clausification::not_xor1, - "not_xor2" => clausification::not_xor2, - "implies" => clausification::implies, - "not_implies1" => clausification::not_implies1, - "not_implies2" => clausification::not_implies2, - "equiv1" => tautology::equiv1, - "equiv2" => tautology::equiv2, - "not_equiv1" => tautology::not_equiv1, - "not_equiv2" => tautology::not_equiv2, - "ite1" => tautology::ite1, - "ite2" => tautology::ite2, - "not_ite1" => tautology::not_ite1, - "not_ite2" => tautology::not_ite2, - "ite_intro" => tautology::ite_intro, - "contraction" => resolution::contraction, - "connective_def" => tautology::connective_def, - "ite_simplify" => simplification::ite_simplify, - "eq_simplify" => simplification::eq_simplify, - "and_simplify" => simplification::and_simplify, - "or_simplify" => simplification::or_simplify, - "not_simplify" => simplification::not_simplify, - "implies_simplify" => simplification::implies_simplify, - "equiv_simplify" => simplification::equiv_simplify, - "bool_simplify" => simplification::bool_simplify, - "qnt_simplify" => simplification::qnt_simplify, - "div_simplify" => simplification::div_simplify, - "prod_simplify" => simplification::prod_simplify, - // Despite being separate rules in the specification, proofs generated by veriT don't - // differentiate between `unary_minus_simplify` and `minus_simplify`. To account for - // that, `simplification::minus_simplify` implements both rules in the same function. - "unary_minus_simplify" | "minus_simplify" => simplification::minus_simplify, - "sum_simplify" => simplification::sum_simplify, - "comp_simplify" => simplification::comp_simplify, - "nary_elim" => clausification::nary_elim, - "ac_simp" => simplification::ac_simp, - "bfun_elim" => clausification::bfun_elim, - "bind" => subproof::bind, - "qnt_cnf" => quantifier::qnt_cnf, - "subproof" => subproof::subproof, - "let" => subproof::r#let, - "onepoint" => subproof::onepoint, - "sko_ex" => subproof::sko_ex, - "sko_forall" => subproof::sko_forall, - "reordering" => extras::reordering, - "symm" => extras::symm, - "not_symm" => extras::not_symm, - "eq_symmetric" => extras::eq_symmetric, - "or_intro" => extras::or_intro, - "bind_let" => extras::bind_let, - "la_mult_pos" => extras::la_mult_pos, - "la_mult_neg" => extras::la_mult_neg, - - // Special rules that always check as valid, and are used to indicate holes in the - // proof. - "hole" => |_| Ok(()), - - // The Alethe specification does not yet describe how this more strict version of the - // resolution rule will be called. Until that is decided and added to the specification, - // we define a new specialized rule that calls it - "strict_resolution" => resolution::strict_resolution, - - _ => return None, - }) - } } diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index 7fd32ea7..669d7183 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -173,7 +173,6 @@ fn run_tests(test_name: &str, definitions: &str, cases: &[(&str, bool)]) { strict: false, skip_unknown_rules: false, is_running_test: true, - statistics: None, lia_via_cvc5: false, }, &prelude, diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index e9cbc6b3..c80302b1 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -42,10 +42,11 @@ pub mod elaborator; pub mod parser; mod utils; -use checker::error::CheckerError; -use parser::ParserError; -use parser::Position; +use crate::benchmarking::{CollectResults, OnlineBenchmarkResults, RunMeasurement}; +use checker::{error::CheckerError, CheckerStatistics}; +use parser::{ParserError, Position}; use std::io; +use std::time::{Duration, Instant}; use thiserror::Error; pub type CarcaraResult = Result; @@ -141,7 +142,10 @@ pub fn check( num_threads: usize, ) -> Result { use crate::checker::Scheduler; + let mut run_measures: RunMeasurement = RunMeasurement::default(); + // Parsing + let total = Instant::now(); let (prelude, proof, pool) = parser::parse_instance_multithread( problem, proof, @@ -149,13 +153,50 @@ pub fn check( options.expand_lets, options.allow_int_real_subtyping, )?; + run_measures.parsing = total.elapsed(); let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) .lia_via_cvc5(options.lia_via_cvc5); - let (scheduler, schedule_context_usage) = Scheduler::new(num_threads, &proof); - checker::ParallelProofChecker::new(pool, config, &prelude).check(&proof, &scheduler) + + // Checking + let checking = Instant::now(); + let mut checker = checker::ParallelProofChecker::new(pool, config, &prelude); + let (scheduler, _) = Scheduler::new(num_threads, &proof); + if options.stats { + let mut checker_stats = CheckerStatistics { + file_name: "this", + elaboration_time: Duration::ZERO, + polyeq_time: Duration::ZERO, + assume_time: Duration::ZERO, + assume_core_time: Duration::ZERO, + results: OnlineBenchmarkResults::new(), + }; + let res = checker.check_with_stats(&proof, &scheduler, &mut checker_stats); + + run_measures.checking = checking.elapsed(); + run_measures.total = total.elapsed(); + + checker_stats.results.add_run_measurement( + &("this".to_string(), 0), + RunMeasurement { + parsing: run_measures.parsing, + checking: run_measures.checking, + elaboration: checker_stats.elaboration_time, + total: run_measures.total, + polyeq: checker_stats.polyeq_time, + assume: checker_stats.assume_time, + assume_core: checker_stats.assume_core_time, + }, + ); + // Print the statistics + checker_stats.results.print(false); + + res + } else { + checker.check(&proof, &scheduler) + } } pub fn check_and_elaborate( @@ -163,6 +204,10 @@ pub fn check_and_elaborate( proof: T, options: CarcaraOptions, ) -> Result<(bool, ast::Proof), Error> { + let mut run_measures: RunMeasurement = RunMeasurement::default(); + + // Parsing + let total = Instant::now(); let (prelude, proof, mut pool) = parser::parse_instance( problem, proof, @@ -170,10 +215,47 @@ pub fn check_and_elaborate( options.expand_lets, options.allow_int_real_subtyping, )?; + run_measures.parsing = total.elapsed(); let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) .lia_via_cvc5(options.lia_via_cvc5); - checker::ProofChecker::new(&mut pool, config, &prelude).check_and_elaborate(proof) + + // Checking + let checking = Instant::now(); + let mut checker = checker::ProofChecker::new(&mut pool, config, &prelude); + if options.stats { + let mut checker_stats = CheckerStatistics { + file_name: "this", + elaboration_time: Duration::ZERO, + polyeq_time: Duration::ZERO, + assume_time: Duration::ZERO, + assume_core_time: Duration::ZERO, + results: OnlineBenchmarkResults::new(), + }; + + let res = checker.check_and_elaborate_with_stats(proof, &mut checker_stats); + run_measures.checking = checking.elapsed(); + run_measures.total = total.elapsed(); + + checker_stats.results.add_run_measurement( + &("this".to_string(), 0), + RunMeasurement { + parsing: run_measures.parsing, + checking: run_measures.checking, + elaboration: checker_stats.elaboration_time, + total: run_measures.total, + polyeq: checker_stats.polyeq_time, + assume: checker_stats.assume_time, + assume_core: checker_stats.assume_core_time, + }, + ); + // Print the statistics + checker_stats.results.print(false); + + res + } else { + checker.check_and_elaborate(proof) + } } diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index 6eadb2ba..a7046e2c 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -20,13 +20,21 @@ struct JobDescriptor<'a> { run_index: usize, } -fn run_job( +fn run_job( results: &mut T, job: JobDescriptor, options: &CarcaraOptions, elaborate: bool, ) -> Result { let proof_file_name = job.proof_file.to_str().unwrap(); + let mut checker_stats = checker::CheckerStatistics { + file_name: proof_file_name, + elaboration_time: Duration::ZERO, + polyeq_time: Duration::ZERO, + assume_time: Duration::ZERO, + assume_core_time: Duration::ZERO, + results: std::mem::take(results), + }; let total = Instant::now(); @@ -40,11 +48,6 @@ fn run_job( )?; let parsing = parsing.elapsed(); - let mut elaboration = Duration::ZERO; - let mut polyeq = Duration::ZERO; - let mut assume = Duration::ZERO; - let mut assume_core = Duration::ZERO; - let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) @@ -55,31 +58,32 @@ fn run_job( let checking_result = if elaborate { checker - .check_and_elaborate(proof) + .check_and_elaborate_with_stats(proof, &mut checker_stats) .map(|(is_holey, _)| is_holey) } else { - checker.check(&proof) + checker.check_with_stats(&proof, &mut checker_stats) }; let checking = checking.elapsed(); let total = total.elapsed(); - results.add_run_measurement( + checker_stats.results.add_run_measurement( &(proof_file_name.to_string(), job.run_index), RunMeasurement { parsing, checking, - elaboration, + elaboration: checker_stats.elaboration_time, total, - polyeq, - assume, - assume_core, + polyeq: checker_stats.polyeq_time, + assume: checker_stats.assume_time, + assume_core: checker_stats.assume_core_time, }, ); + *results = checker_stats.results; checking_result } -fn worker_thread( +fn worker_thread( jobs_queue: &ArrayQueue, options: &CarcaraOptions, elaborate: bool, diff --git a/cli/src/main.rs b/cli/src/main.rs index 93618309..c9483a3b 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -4,9 +4,8 @@ mod logger; mod path_args; use carcara::{ - ast::print_proof, - benchmarking::{Metrics, OnlineBenchmarkResults}, - check, check_and_elaborate, parser, CarcaraOptions, + ast::print_proof, benchmarking::OnlineBenchmarkResults, check, check_and_elaborate, parser, + CarcaraOptions, }; use clap::{AppSettings, ArgEnum, Args, Parser, Subcommand}; use const_format::{formatcp, str_index}; @@ -424,116 +423,7 @@ fn bench_command(options: BenchCommandOptions) -> CliResult<()> { } fn print_benchmark_results(results: OnlineBenchmarkResults, sort_by_total: bool) -> CliResult<()> { - let [parsing, checking, elaborating, accounted_for, total] = [ - results.parsing(), - results.checking(), - results.elaborating(), - results.total_accounted_for(), - results.total(), - ] - .map(|m| { - if sort_by_total { - format!("{:#}", m) - } else { - format!("{}", m) - } - }); - - println!("parsing: {}", parsing); - println!("checking: {}", checking); - if !elaborating.is_empty() { - println!("elaborating: {}", elaborating); - } - println!( - "on assume: {} ({:.02}% of checking time)", - results.assume_time, - 100.0 * results.assume_time.mean().as_secs_f64() / results.checking().mean().as_secs_f64(), - ); - println!("on assume (core): {}", results.assume_core_time); - println!("assume ratio: {}", results.assume_time_ratio); - println!( - "on polyeq: {} ({:.02}% of checking time)", - results.polyeq_time, - 100.0 * results.polyeq_time.mean().as_secs_f64() / results.checking().mean().as_secs_f64(), - ); - println!("polyeq ratio: {}", results.polyeq_time_ratio); - println!("total accounted for: {}", accounted_for); - println!("total: {}", total); - - let data_by_rule = results.step_time_by_rule(); - let mut data_by_rule: Vec<_> = data_by_rule.iter().collect(); - data_by_rule.sort_by_key(|(_, m)| if sort_by_total { m.total() } else { m.mean() }); - - println!("by rule:"); - for (rule, data) in data_by_rule { - print!(" {: <18}", rule); - if sort_by_total { - println!("{:#}", data) - } else { - println!("{}", data) - } - } - - println!("worst cases:"); - let worst_step = results.step_time().max(); - println!(" step: {} ({:?})", worst_step.0, worst_step.1); - - let worst_file_parsing = results.parsing().max(); - println!( - " file (parsing): {} ({:?})", - worst_file_parsing.0 .0, worst_file_parsing.1 - ); - - let worst_file_checking = results.checking().max(); - println!( - " file (checking): {} ({:?})", - worst_file_checking.0 .0, worst_file_checking.1 - ); - - let worst_file_assume = results.assume_time_ratio.max(); - println!( - " file (assume): {} ({:.04}%)", - worst_file_assume.0 .0, - worst_file_assume.1 * 100.0 - ); - - let worst_file_polyeq = results.polyeq_time_ratio.max(); - println!( - " file (polyeq): {} ({:.04}%)", - worst_file_polyeq.0 .0, - worst_file_polyeq.1 * 100.0 - ); - - let worst_file_total = results.total().max(); - println!( - " file overall: {} ({:?})", - worst_file_total.0 .0, worst_file_total.1 - ); - - let num_hard_assumes = results.num_assumes - results.num_easy_assumes; - let percent_easy = (results.num_easy_assumes as f64) * 100.0 / (results.num_assumes as f64); - let percent_hard = (num_hard_assumes as f64) * 100.0 / (results.num_assumes as f64); - println!(" number of assumes: {}", results.num_assumes); - println!( - " (easy): {} ({:.02}%)", - results.num_easy_assumes, percent_easy - ); - println!( - " (hard): {} ({:.02}%)", - num_hard_assumes, percent_hard - ); - - let depths = results.polyeq_depths; - if !depths.is_empty() { - println!(" max polyeq depth: {}", depths.max().1); - println!(" total polyeq depth: {}", depths.total()); - println!(" number of polyeq checks: {}", depths.count()); - println!(" mean depth: {:.4}", depths.mean()); - println!( - "standard deviation of depth: {:.4}", - depths.standard_deviation() - ); - } + results.print(sort_by_total); Ok(()) } From 7a59b76a78e8827958216ec48c685ee57b86120f Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Thu, 20 Jul 2023 15:02:00 -0300 Subject: [PATCH 16/70] remodeled pool folder and refs --- carcara/src/ast/mod.rs | 6 +- carcara/src/ast/pool.rs | 723 ------------------------------- carcara/src/ast/pool/advanced.rs | 447 +++++++++++++++++++ carcara/src/ast/pool/mod.rs | 272 ++++++++++++ carcara/src/checker/parallel.rs | 6 +- 5 files changed, 724 insertions(+), 730 deletions(-) delete mode 100644 carcara/src/ast/pool.rs create mode 100644 carcara/src/ast/pool/advanced.rs create mode 100644 carcara/src/ast/pool/mod.rs diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index c60d5f17..1a5689e1 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -7,7 +7,7 @@ mod macros; mod context; mod iter; mod polyeq; -mod pool; +pub mod pool; pub(crate) mod printer; mod rc; mod substitution; @@ -17,9 +17,7 @@ mod tests; pub use context::{Context, ContextStack}; pub use iter::ProofIter; pub use polyeq::{alpha_equiv, polyeq, tracing_polyeq}; -pub use pool::TPool; -pub use pool::TermPool; -pub use pool::{AdvancedPools, PrimitivePool}; +pub use pool::{PrimitivePool, TPool, TermPool}; pub use printer::print_proof; pub use rc::Rc; pub use substitution::{Substitution, SubstitutionError}; diff --git a/carcara/src/ast/pool.rs b/carcara/src/ast/pool.rs deleted file mode 100644 index 37a3ea3e..00000000 --- a/carcara/src/ast/pool.rs +++ /dev/null @@ -1,723 +0,0 @@ -//! This module implements `TermPool`, a structure that stores terms and implements hash consing. - -use super::{Rc, Term}; -use ahash::AHashSet; - -pub type TermPool = AdvancedPools::LocalPool; - -pub trait TPool { - /// Returns the term corresponding to the boolean constant `true`. - fn bool_true(&self) -> Rc; - /// Returns the term corresponding to the boolean constant `false`. - fn bool_false(&self) -> Rc; - /// Returns the term corresponding to the boolean constant determined by `value`. - fn bool_constant(&self, value: bool) -> Rc { - match value { - true => self.bool_true(), - false => self.bool_false(), - } - } - /// Takes a term and returns a possibly newly allocated `Rc` that references it. - /// - /// If the term was not originally in the term pool, it is added to it. Otherwise, this method - /// just returns an `Rc` pointing to the existing allocation. This method also computes the - /// term's sort, and adds it to the sort cache. - fn add(&mut self, term: Term) -> Rc; - /// Takes a vector of terms and calls [`TermPool::add`] on each. - fn add_all(&mut self, terms: Vec) -> Vec> { - terms.into_iter().map(|t| self.add(t)).collect() - } - /// Returns the sort of the given term. - /// - /// This method assumes that the sorts of any subterms have already been checked, and are - /// correct. If `term` is itself a sort, this simply returns that sort. - fn sort(&self, term: &Rc) -> Rc; - /// Returns an `AHashSet` containing all the free variables in the given term. - /// - /// This method uses a cache, so there is no additional cost to computing the free variables of - /// a term multiple times. - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet>; -} - -pub mod PrimitivePool { - use crate::ast::Constant; - - use super::{ - super::{Rc, Sort, Term}, - TPool, - }; - use ahash::{AHashMap, AHashSet}; - - /// A structure to store and manage all allocated terms. - /// - /// You can add a `Term` to the pool using [`TermPool::add`], which will return an `Rc`. This - /// struct ensures that, if two equal terms are added to a pool, they will be in the same - /// allocation. This invariant allows terms to be safely compared and hashed by reference, instead - /// of by value (see [`Rc`]). - /// - /// This struct also provides other utility methods, like computing the sort of a term (see - /// [`TermPool::sort`]) or its free variables (see [`TermPool::free_vars`]). - pub struct TermPool { - /// A map of the terms in the pool. - pub(crate) terms: AHashMap>, - pub(crate) free_vars_cache: AHashMap, AHashSet>>, - pub(crate) sorts_cache: AHashMap, Rc>, - pub(crate) bool_true: Rc, - pub(crate) bool_false: Rc, - } - - impl Default for TermPool { - fn default() -> Self { - Self::new() - } - } - - impl TermPool { - /// Constructs a new `TermPool`. This new pool will already contain the boolean constants `true` - /// and `false`, as well as the `Bool` sort. - pub fn new() -> Self { - let mut terms = AHashMap::new(); - let mut sorts_cache = AHashMap::new(); - let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); - - let [bool_true, bool_false] = ["true", "false"] - .map(|b| Self::add_term_to_map(&mut terms, Term::new_var(b, bool_sort.clone()))); - - sorts_cache.insert(bool_false.clone(), bool_sort.clone()); - sorts_cache.insert(bool_true.clone(), bool_sort.clone()); - sorts_cache.insert(bool_sort.clone(), bool_sort.clone()); - - Self { - terms, - free_vars_cache: AHashMap::new(), - sorts_cache, - bool_true, - bool_false, - } - } - - fn add_term_to_map(terms_map: &mut AHashMap>, term: Term) -> Rc { - use std::collections::hash_map::Entry; - - match terms_map.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - vacant_entry.insert(Rc::new(term)).clone() - } - } - } - - /// Computes the sort of a term and adds it to the sort cache. - pub(super) fn compute_sort<'a, 'b: 'a>(&'a mut self, term: &'b Rc) -> Rc { - use super::super::Operator; - - if self.sorts_cache.contains_key(term) { - return self.sorts_cache[term].clone(); - } - - let result: Sort = match term.as_ref() { - Term::Const(c) => match c { - Constant::Integer(_) => Sort::Int, - Constant::Real(_) => Sort::Real, - Constant::String(_) => Sort::String, - }, - Term::Var(_, sort) => sort.as_sort().unwrap().clone(), - Term::Op(op, args) => match op { - Operator::Not - | Operator::Implies - | Operator::And - | Operator::Or - | Operator::Xor - | Operator::Equals - | Operator::Distinct - | Operator::LessThan - | Operator::GreaterThan - | Operator::LessEq - | Operator::GreaterEq - | Operator::IsInt => Sort::Bool, - Operator::Ite => self.compute_sort(&args[1]).as_sort().unwrap().clone(), - Operator::Add | Operator::Sub | Operator::Mult => { - if args - .iter() - .any(|a| self.compute_sort(a).as_sort().unwrap() == &Sort::Real) - { - Sort::Real - } else { - Sort::Int - } - } - Operator::RealDiv | Operator::ToReal => Sort::Real, - Operator::IntDiv | Operator::Mod | Operator::Abs | Operator::ToInt => Sort::Int, - Operator::Select => match self.compute_sort(&args[0]).as_sort().unwrap() { - Sort::Array(_, y) => y.as_sort().unwrap().clone(), - _ => unreachable!(), - }, - Operator::Store => self.compute_sort(&args[0]).as_sort().unwrap().clone(), - }, - Term::App(f, _) => { - match self.compute_sort(f).as_sort().unwrap() { - Sort::Function(sorts) => sorts.last().unwrap().as_sort().unwrap().clone(), - _ => unreachable!(), // We assume that the function is correctly sorted - } - } - Term::Sort(sort) => sort.clone(), - Term::Quant(_, _, _) => Sort::Bool, - Term::Choice((_, sort), _) => sort.as_sort().unwrap().clone(), - Term::Let(_, inner) => self.compute_sort(inner).as_sort().unwrap().clone(), - Term::Lambda(bindings, body) => { - let mut result: Vec<_> = - bindings.iter().map(|(_name, sort)| sort.clone()).collect(); - let return_sort = self.compute_sort(body).as_ref().clone(); - result.push(self.add(return_sort)); - Sort::Function(result) - } - }; - let sorted_term = Self::add_term_to_map(&mut self.terms, Term::Sort(result)); - self.sorts_cache.insert(term.clone(), sorted_term); - self.sorts_cache[term].clone() - } - } - - impl TPool for TermPool { - fn bool_true(&self) -> Rc { - self.bool_true.clone() - } - - fn bool_false(&self) -> Rc { - self.bool_false.clone() - } - - fn add(&mut self, term: Term) -> Rc { - let term = Self::add_term_to_map(&mut self.terms, term); - self.compute_sort(&term); - term - } - - fn sort(&self, term: &Rc) -> Rc { - self.sorts_cache[term].clone() - } - - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { - // Here, I would like to do - // ``` - // if let Some(vars) = self.free_vars_cache.get(term) { - // return vars; - // } - // ``` - // However, because of a limitation in the borrow checker, the compiler thinks that - // this immutable borrow of `cache` has to live until the end of the function, even - // though the code immediately returns. This would stop me from mutating `cache` in the - // rest of the function. Because of that, I have to check if the hash map contains - // `term` as a key, and then get the value associated with it, meaning I have to access - // the hash map twice, which is a bit slower. This is an example of problem case #3 - // from the non-lexical lifetimes RFC: - // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md - if self.free_vars_cache.contains_key(term) { - return self.free_vars_cache.get(term).unwrap().clone(); - } - let set = match term.as_ref() { - Term::App(f, args) => { - let mut set = self.free_vars(f).clone(); - for a in args { - set.extend(self.free_vars(a).iter().cloned()); - } - set - } - Term::Op(_, args) => { - let mut set = AHashSet::new(); - for a in args { - set.extend(self.free_vars(a).iter().cloned()); - } - set - } - Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = self.free_vars(inner).clone(); - for bound_var in bindings { - let term = self.add(bound_var.clone().into()); - vars.remove(&term); - } - vars - } - Term::Let(bindings, inner) => { - let mut vars = self.free_vars(inner).clone(); - for (var, value) in bindings { - let sort = self.sort(value).as_ref().clone(); - let sort = self.add(sort); - let term = self.add((var.clone(), sort).into()); - vars.remove(&term); - } - vars - } - Term::Choice(bound_var, inner) => { - let mut vars = self.free_vars(inner).clone(); - let term = self.add(bound_var.clone().into()); - vars.remove(&term); - vars - } - Term::Var(..) => { - let mut set = AHashSet::with_capacity(1); - set.insert(term.clone()); - set - } - Term::Const(_) | Term::Sort(_) => AHashSet::new(), - }; - self.free_vars_cache.insert(term.clone(), set); - self.free_vars_cache.get(term).unwrap().clone() - } - } -} - -pub mod AdvancedPools { - use super::super::{Rc, Term}; - use super::{PrimitivePool, TPool}; - use ahash::AHashSet; - use std::sync::{Arc, RwLock}; - - pub struct ContextPool { - pub(crate) global_pool: Arc, - pub(crate) storage: Arc>, - } - - impl Default for ContextPool { - fn default() -> Self { - Self::new() - } - } - - impl ContextPool { - pub fn new() -> Self { - Self { - global_pool: Arc::new(PrimitivePool::TermPool::new()), - storage: Arc::new(RwLock::new(PrimitivePool::TermPool::new())), - } - } - - pub fn from_global(global_pool: &Arc) -> Self { - Self { - global_pool: global_pool.clone(), - storage: Arc::new(RwLock::new(PrimitivePool::TermPool::new())), - } - } - - pub fn from_previous(ctx_pool: &Self) -> Self { - Self { - global_pool: ctx_pool.global_pool.clone(), - storage: ctx_pool.storage.clone(), - } - } - - /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. - fn add_by_ref<'d, 'c: 'd>( - ctx_pool: &mut PrimitivePool::TermPool, - global_pool: &'d Arc, - term: Term, - ) -> Rc { - use std::collections::hash_map::Entry; - - // If the global pool has the term - if let Some(entry) = global_pool.terms.get(&term) { - entry.clone() - } else { - match ctx_pool.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let t = vacant_entry.insert(Rc::new(term)).clone(); - ctx_pool.compute_sort(&t); - t - } - } - } - } - - /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. - fn sort_by_ref<'d: 't, 'c: 'd, 't>( - ctx_pool: &PrimitivePool::TermPool, - global_pool: &'d Arc, - term: &'t Rc, - ) -> Rc { - if let Some(sort) = global_pool.sorts_cache.get(term) { - sort.clone() - } - // A sort inserted by context - else { - ctx_pool.sorts_cache[term].clone() - } - } - } - - impl TPool for ContextPool { - fn bool_true(&self) -> Rc { - self.global_pool.bool_true.clone() - } - - fn bool_false(&self) -> Rc { - self.global_pool.bool_false.clone() - } - - fn add(&mut self, term: Term) -> Rc { - use std::collections::hash_map::Entry; - - // If the global pool has the term - if let Some(entry) = self.global_pool.terms.get(&term) { - entry.clone() - } else { - let mut ctx_guard = self.storage.write().unwrap(); - match ctx_guard.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let t = vacant_entry.insert(Rc::new(term)).clone(); - ctx_guard.compute_sort(&t); - t - } - } - } - } - - fn sort(&self, term: &Rc) -> Rc { - if let Some(sort) = self.global_pool.sorts_cache.get(term) { - sort.clone() - } - // A sort inserted by context - else { - self.storage.read().unwrap().sorts_cache[term].clone() - } - } - - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { - fn internal<'d: 't, 'c: 'd, 't>( - ctx_pool: &'d mut PrimitivePool::TermPool, - global_pool: &'c Arc, - term: &'t Rc, - ) -> &'t AHashSet> { - // Here, I would like to do - // ``` - // if let Some(vars) = self.free_vars_cache.get(term) { - // return vars; - // } - // ``` - // However, because of a limitation in the borrow checker, the compiler thinks that - // this immutable borrow of `cache` has to live until the end of the function, even - // though the code immediately returns. This would stop me from mutating `cache` in the - // rest of the function. Because of that, I have to check if the hash map contains - // `term` as a key, and then get the value associated with it, meaning I have to access - // the hash map twice, which is a bit slower. This is an example of problem case #3 - // from the non-lexical lifetimes RFC: - // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md - if let Some(set) = global_pool.free_vars_cache.get(term) { - return set; - } - if ctx_pool.free_vars_cache.contains_key(term) { - return ctx_pool.free_vars_cache.get(term).unwrap(); - } - - let set = match term.as_ref() { - Term::App(f, args) => { - let mut set = internal(ctx_pool, global_pool, f).clone(); - for a in args { - set.extend(internal(ctx_pool, global_pool, a).iter().cloned()); - } - set - } - Term::Op(_, args) => { - let mut set = AHashSet::new(); - for a in args { - set.extend(internal(ctx_pool, global_pool, a).iter().cloned()); - } - set - } - Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = internal(ctx_pool, global_pool, inner).clone(); - for bound_var in bindings { - let term = ContextPool::add_by_ref( - ctx_pool, - global_pool, - bound_var.clone().into(), - ); - vars.remove(&term); - } - vars - } - Term::Let(bindings, inner) => { - let mut vars = internal(ctx_pool, global_pool, inner).clone(); - for (var, value) in bindings { - let sort = ContextPool::sort_by_ref(ctx_pool, global_pool, value) - .as_ref() - .clone(); - let sort = ContextPool::add_by_ref(ctx_pool, global_pool, sort); - let term = ContextPool::add_by_ref( - ctx_pool, - global_pool, - (var.clone(), sort).into(), - ); - vars.remove(&term); - } - vars - } - Term::Choice(bound_var, inner) => { - let mut vars = internal(ctx_pool, global_pool, inner).clone(); - let term = ContextPool::add_by_ref( - ctx_pool, - global_pool, - bound_var.clone().into(), - ); - vars.remove(&term); - vars - } - Term::Var(..) => { - let mut set = AHashSet::with_capacity(1); - set.insert(term.clone()); - set - } - Term::Const(_) | Term::Sort(_) => AHashSet::new(), - }; - ctx_pool.free_vars_cache.insert(term.clone(), set); - ctx_pool.free_vars_cache.get(term).unwrap() - } - let mut ctx_guard = self.storage.write(); - internal(ctx_guard.as_mut().unwrap(), &self.global_pool, term).clone() - } - } - - // ========================================================================= - - pub struct LocalPool { - pub(crate) ctx_pool: ContextPool, - pub(crate) storage: PrimitivePool::TermPool, - } - - impl Default for LocalPool { - fn default() -> Self { - Self::new() - } - } - - impl LocalPool { - pub fn new() -> Self { - Self { - ctx_pool: ContextPool::new(), - storage: PrimitivePool::TermPool::new(), - } - } - - /// Instantiates a new `LocalPool` from a previous `ContextPool` (makes - /// sure the context is shared between threads). - pub fn from_previous(ctx_pool: &ContextPool) -> Self { - Self { - ctx_pool: ContextPool::from_previous(ctx_pool), - storage: PrimitivePool::TermPool::new(), - } - } - - /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. - fn add_by_ref<'d, 'c: 'd>( - local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &PrimitivePool::TermPool, - global_pool: &'d Arc, - term: Term, - ) -> Rc { - use std::collections::hash_map::Entry; - - // If the global pool has the term - if let Some(entry) = global_pool.terms.get(&term) { - entry.clone() - } - // If this term was inserted by the context - else if let Some(entry) = ctx_pool.terms.get(&term) { - entry.clone() - } else { - match local_pool.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let t = vacant_entry.insert(Rc::new(term)).clone(); - local_pool.compute_sort(&t); - t - } - } - } - } - - /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. - fn sort_by_ref<'d: 't, 'c: 'd, 't>( - local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &PrimitivePool::TermPool, - global_pool: &'d Arc, - term: &'t Rc, - ) -> Rc { - if let Some(sort) = global_pool.sorts_cache.get(term) { - sort.clone() - } - // A sort inserted by context - else if let Some(entry) = ctx_pool.terms.get(&term) { - entry.clone() - } else { - local_pool.sorts_cache[term].clone() - } - } - } - - impl TPool for LocalPool { - fn bool_true(&self) -> Rc { - self.ctx_pool.global_pool.bool_true.clone() - } - - fn bool_false(&self) -> Rc { - self.ctx_pool.global_pool.bool_false.clone() - } - - fn add(&mut self, term: Term) -> Rc { - use std::collections::hash_map::Entry; - - // If there is a constant pool and has the term - if let Some(entry) = self.ctx_pool.global_pool.terms.get(&term) { - entry.clone() - } - // If this term was inserted by the context - else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { - entry.clone() - } else { - match self.storage.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let t = vacant_entry.insert(Rc::new(term)).clone(); - self.storage.compute_sort(&t); - t - } - } - } - } - - fn sort(&self, term: &Rc) -> Rc { - if let Some(sort) = self.ctx_pool.global_pool.sorts_cache.get(term) { - sort.clone() - } - // A sort inserted by context - else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { - entry.clone() - } else { - self.storage.sorts_cache[term].clone() - } - } - - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { - fn internal<'d: 't, 'c: 'd, 't>( - local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &'t PrimitivePool::TermPool, - global_pool: &'d Arc, - term: &'t Rc, - ) -> &'t AHashSet> { - // Here, I would like to do - // ``` - // if let Some(vars) = self.free_vars_cache.get(term) { - // return vars; - // } - // ``` - // However, because of a limitation in the borrow checker, the compiler thinks that - // this immutable borrow of `cache` has to live until the end of the function, even - // though the code immediately returns. This would stop me from mutating `cache` in the - // rest of the function. Because of that, I have to check if the hash map contains - // `term` as a key, and then get the value associated with it, meaning I have to access - // the hash map twice, which is a bit slower. This is an example of problem case #3 - // from the non-lexical lifetimes RFC: - // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md - if let Some(set) = global_pool.free_vars_cache.get(term) { - return set; - } - if let Some(set) = ctx_pool.free_vars_cache.get(term) { - return set; - } - if local_pool.free_vars_cache.contains_key(term) { - return local_pool.free_vars_cache.get(term).unwrap(); - } - - let set = match term.as_ref() { - Term::App(f, args) => { - let mut set = internal(local_pool, ctx_pool, global_pool, f).clone(); - for a in args { - set.extend( - internal(local_pool, ctx_pool, global_pool, a) - .iter() - .cloned(), - ); - } - set - } - Term::Op(_, args) => { - let mut set = AHashSet::new(); - for a in args { - set.extend( - internal(local_pool, ctx_pool, global_pool, a) - .iter() - .cloned(), - ); - } - set - } - Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); - for bound_var in bindings { - let term = LocalPool::add_by_ref( - local_pool, - ctx_pool, - global_pool, - bound_var.clone().into(), - ); - vars.remove(&term); - } - vars - } - Term::Let(bindings, inner) => { - let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); - for (var, value) in bindings { - let sort = - LocalPool::sort_by_ref(local_pool, ctx_pool, global_pool, value) - .as_ref() - .clone(); - let sort = - LocalPool::add_by_ref(local_pool, ctx_pool, global_pool, sort); - let term = LocalPool::add_by_ref( - local_pool, - ctx_pool, - global_pool, - (var.clone(), sort).into(), - ); - vars.remove(&term); - } - vars - } - Term::Choice(bound_var, inner) => { - let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); - let term = LocalPool::add_by_ref( - local_pool, - ctx_pool, - global_pool, - bound_var.clone().into(), - ); - vars.remove(&term); - vars - } - Term::Var(..) => { - let mut set = AHashSet::with_capacity(1); - set.insert(term.clone()); - set - } - Term::Const(_) | Term::Sort(_) => AHashSet::new(), - }; - local_pool.free_vars_cache.insert(term.clone(), set); - local_pool.free_vars_cache.get(term).unwrap() - } - - internal( - &mut self.storage, - &self.ctx_pool.storage.read().unwrap(), - &self.ctx_pool.global_pool, - term, - ) - .clone() - } - } -} diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs new file mode 100644 index 00000000..2b8e7eef --- /dev/null +++ b/carcara/src/ast/pool/advanced.rs @@ -0,0 +1,447 @@ + +use super::super::{Rc, Term}; +use super::{PrimitivePool, TPool}; +use ahash::AHashSet; +use std::sync::{Arc, RwLock}; + +pub struct ContextPool { + pub(crate) global_pool: Arc, + pub(crate) storage: Arc>, +} + +impl Default for ContextPool { + fn default() -> Self { + Self::new() + } +} + +impl ContextPool { + pub fn new() -> Self { + Self { + global_pool: Arc::new(PrimitivePool::TermPool::new()), + storage: Arc::new(RwLock::new(PrimitivePool::TermPool::new())), + } + } + + pub fn from_global(global_pool: &Arc) -> Self { + Self { + global_pool: global_pool.clone(), + storage: Arc::new(RwLock::new(PrimitivePool::TermPool::new())), + } + } + + pub fn from_previous(ctx_pool: &Self) -> Self { + Self { + global_pool: ctx_pool.global_pool.clone(), + storage: ctx_pool.storage.clone(), + } + } + + /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. + fn add_by_ref<'d, 'c: 'd>( + ctx_pool: &mut PrimitivePool::TermPool, + global_pool: &'d Arc, + term: Term, + ) -> Rc { + use std::collections::hash_map::Entry; + + // If the global pool has the term + if let Some(entry) = global_pool.terms.get(&term) { + entry.clone() + } else { + match ctx_pool.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let t = vacant_entry.insert(Rc::new(term)).clone(); + ctx_pool.compute_sort(&t); + t + } + } + } + } + + /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. + fn sort_by_ref<'d: 't, 'c: 'd, 't>( + ctx_pool: &PrimitivePool::TermPool, + global_pool: &'d Arc, + term: &'t Rc, + ) -> Rc { + if let Some(sort) = global_pool.sorts_cache.get(term) { + sort.clone() + } + // A sort inserted by context + else { + ctx_pool.sorts_cache[term].clone() + } + } +} + +impl TPool for ContextPool { + fn bool_true(&self) -> Rc { + self.global_pool.bool_true.clone() + } + + fn bool_false(&self) -> Rc { + self.global_pool.bool_false.clone() + } + + fn add(&mut self, term: Term) -> Rc { + use std::collections::hash_map::Entry; + + // If the global pool has the term + if let Some(entry) = self.global_pool.terms.get(&term) { + entry.clone() + } else { + let mut ctx_guard = self.storage.write().unwrap(); + match ctx_guard.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let t = vacant_entry.insert(Rc::new(term)).clone(); + ctx_guard.compute_sort(&t); + t + } + } + } + } + + fn sort(&self, term: &Rc) -> Rc { + if let Some(sort) = self.global_pool.sorts_cache.get(term) { + sort.clone() + } + // A sort inserted by context + else { + self.storage.read().unwrap().sorts_cache[term].clone() + } + } + + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { + fn internal<'d: 't, 'c: 'd, 't>( + ctx_pool: &'d mut PrimitivePool::TermPool, + global_pool: &'c Arc, + term: &'t Rc, + ) -> &'t AHashSet> { + // Here, I would like to do + // ``` + // if let Some(vars) = self.free_vars_cache.get(term) { + // return vars; + // } + // ``` + // However, because of a limitation in the borrow checker, the compiler thinks that + // this immutable borrow of `cache` has to live until the end of the function, even + // though the code immediately returns. This would stop me from mutating `cache` in the + // rest of the function. Because of that, I have to check if the hash map contains + // `term` as a key, and then get the value associated with it, meaning I have to access + // the hash map twice, which is a bit slower. This is an example of problem case #3 + // from the non-lexical lifetimes RFC: + // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md + if let Some(set) = global_pool.free_vars_cache.get(term) { + return set; + } + if ctx_pool.free_vars_cache.contains_key(term) { + return ctx_pool.free_vars_cache.get(term).unwrap(); + } + + let set = match term.as_ref() { + Term::App(f, args) => { + let mut set = internal(ctx_pool, global_pool, f).clone(); + for a in args { + set.extend(internal(ctx_pool, global_pool, a).iter().cloned()); + } + set + } + Term::Op(_, args) => { + let mut set = AHashSet::new(); + for a in args { + set.extend(internal(ctx_pool, global_pool, a).iter().cloned()); + } + set + } + Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { + let mut vars = internal(ctx_pool, global_pool, inner).clone(); + for bound_var in bindings { + let term = ContextPool::add_by_ref( + ctx_pool, + global_pool, + bound_var.clone().into(), + ); + vars.remove(&term); + } + vars + } + Term::Let(bindings, inner) => { + let mut vars = internal(ctx_pool, global_pool, inner).clone(); + for (var, value) in bindings { + let sort = ContextPool::sort_by_ref(ctx_pool, global_pool, value) + .as_ref() + .clone(); + let sort = ContextPool::add_by_ref(ctx_pool, global_pool, sort); + let term = ContextPool::add_by_ref( + ctx_pool, + global_pool, + (var.clone(), sort).into(), + ); + vars.remove(&term); + } + vars + } + Term::Choice(bound_var, inner) => { + let mut vars = internal(ctx_pool, global_pool, inner).clone(); + let term = + ContextPool::add_by_ref(ctx_pool, global_pool, bound_var.clone().into()); + vars.remove(&term); + vars + } + Term::Var(..) => { + let mut set = AHashSet::with_capacity(1); + set.insert(term.clone()); + set + } + Term::Const(_) | Term::Sort(_) => AHashSet::new(), + }; + ctx_pool.free_vars_cache.insert(term.clone(), set); + ctx_pool.free_vars_cache.get(term).unwrap() + } + let mut ctx_guard = self.storage.write(); + internal(ctx_guard.as_mut().unwrap(), &self.global_pool, term).clone() + } +} + +// ========================================================================= + +pub struct LocalPool { + pub(crate) ctx_pool: ContextPool, + pub(crate) storage: PrimitivePool::TermPool, +} + +impl Default for LocalPool { + fn default() -> Self { + Self::new() + } +} + +impl LocalPool { + pub fn new() -> Self { + Self { + ctx_pool: ContextPool::new(), + storage: PrimitivePool::TermPool::new(), + } + } + + /// Instantiates a new `LocalPool` from a previous `ContextPool` (makes + /// sure the context is shared between threads). + pub fn from_previous(ctx_pool: &ContextPool) -> Self { + Self { + ctx_pool: ContextPool::from_previous(ctx_pool), + storage: PrimitivePool::TermPool::new(), + } + } + + /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. + fn add_by_ref<'d, 'c: 'd>( + local_pool: &'d mut PrimitivePool::TermPool, + ctx_pool: &PrimitivePool::TermPool, + global_pool: &'d Arc, + term: Term, + ) -> Rc { + use std::collections::hash_map::Entry; + + // If the global pool has the term + if let Some(entry) = global_pool.terms.get(&term) { + entry.clone() + } + // If this term was inserted by the context + else if let Some(entry) = ctx_pool.terms.get(&term) { + entry.clone() + } else { + match local_pool.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let t = vacant_entry.insert(Rc::new(term)).clone(); + local_pool.compute_sort(&t); + t + } + } + } + } + + /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. + fn sort_by_ref<'d: 't, 'c: 'd, 't>( + local_pool: &'d mut PrimitivePool::TermPool, + ctx_pool: &PrimitivePool::TermPool, + global_pool: &'d Arc, + term: &'t Rc, + ) -> Rc { + if let Some(sort) = global_pool.sorts_cache.get(term) { + sort.clone() + } + // A sort inserted by context + else if let Some(entry) = ctx_pool.terms.get(&term) { + entry.clone() + } else { + local_pool.sorts_cache[term].clone() + } + } +} + +impl TPool for LocalPool { + fn bool_true(&self) -> Rc { + self.ctx_pool.global_pool.bool_true.clone() + } + + fn bool_false(&self) -> Rc { + self.ctx_pool.global_pool.bool_false.clone() + } + + fn add(&mut self, term: Term) -> Rc { + use std::collections::hash_map::Entry; + + // If there is a constant pool and has the term + if let Some(entry) = self.ctx_pool.global_pool.terms.get(&term) { + entry.clone() + } + // If this term was inserted by the context + else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { + entry.clone() + } else { + match self.storage.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let t = vacant_entry.insert(Rc::new(term)).clone(); + self.storage.compute_sort(&t); + t + } + } + } + } + + fn sort(&self, term: &Rc) -> Rc { + if let Some(sort) = self.ctx_pool.global_pool.sorts_cache.get(term) { + sort.clone() + } + // A sort inserted by context + else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { + entry.clone() + } else { + self.storage.sorts_cache[term].clone() + } + } + + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { + fn internal<'d: 't, 'c: 'd, 't>( + local_pool: &'d mut PrimitivePool::TermPool, + ctx_pool: &'t PrimitivePool::TermPool, + global_pool: &'d Arc, + term: &'t Rc, + ) -> &'t AHashSet> { + // Here, I would like to do + // ``` + // if let Some(vars) = self.free_vars_cache.get(term) { + // return vars; + // } + // ``` + // However, because of a limitation in the borrow checker, the compiler thinks that + // this immutable borrow of `cache` has to live until the end of the function, even + // though the code immediately returns. This would stop me from mutating `cache` in the + // rest of the function. Because of that, I have to check if the hash map contains + // `term` as a key, and then get the value associated with it, meaning I have to access + // the hash map twice, which is a bit slower. This is an example of problem case #3 + // from the non-lexical lifetimes RFC: + // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md + if let Some(set) = global_pool.free_vars_cache.get(term) { + return set; + } + if let Some(set) = ctx_pool.free_vars_cache.get(term) { + return set; + } + if local_pool.free_vars_cache.contains_key(term) { + return local_pool.free_vars_cache.get(term).unwrap(); + } + + let set = match term.as_ref() { + Term::App(f, args) => { + let mut set = internal(local_pool, ctx_pool, global_pool, f).clone(); + for a in args { + set.extend( + internal(local_pool, ctx_pool, global_pool, a) + .iter() + .cloned(), + ); + } + set + } + Term::Op(_, args) => { + let mut set = AHashSet::new(); + for a in args { + set.extend( + internal(local_pool, ctx_pool, global_pool, a) + .iter() + .cloned(), + ); + } + set + } + Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { + let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); + for bound_var in bindings { + let term = LocalPool::add_by_ref( + local_pool, + ctx_pool, + global_pool, + bound_var.clone().into(), + ); + vars.remove(&term); + } + vars + } + Term::Let(bindings, inner) => { + let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); + for (var, value) in bindings { + let sort = LocalPool::sort_by_ref(local_pool, ctx_pool, global_pool, value) + .as_ref() + .clone(); + let sort = LocalPool::add_by_ref(local_pool, ctx_pool, global_pool, sort); + let term = LocalPool::add_by_ref( + local_pool, + ctx_pool, + global_pool, + (var.clone(), sort).into(), + ); + vars.remove(&term); + } + vars + } + Term::Choice(bound_var, inner) => { + let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); + let term = LocalPool::add_by_ref( + local_pool, + ctx_pool, + global_pool, + bound_var.clone().into(), + ); + vars.remove(&term); + vars + } + Term::Var(..) => { + let mut set = AHashSet::with_capacity(1); + set.insert(term.clone()); + set + } + Term::Const(_) | Term::Sort(_) => AHashSet::new(), + }; + local_pool.free_vars_cache.insert(term.clone(), set); + local_pool.free_vars_cache.get(term).unwrap() + } + + internal( + &mut self.storage, + &self.ctx_pool.storage.read().unwrap(), + &self.ctx_pool.global_pool, + term, + ) + .clone() + } +} diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs new file mode 100644 index 00000000..40192578 --- /dev/null +++ b/carcara/src/ast/pool/mod.rs @@ -0,0 +1,272 @@ +//! This module implements `TermPool`, a structure that stores terms and implements hash consing. + +pub mod advanced; + +use super::{Rc, Term}; +use advanced::LocalPool; +use ahash::AHashSet; + +pub type TermPool = LocalPool; + +pub trait TPool { + /// Returns the term corresponding to the boolean constant `true`. + fn bool_true(&self) -> Rc; + /// Returns the term corresponding to the boolean constant `false`. + fn bool_false(&self) -> Rc; + /// Returns the term corresponding to the boolean constant determined by `value`. + fn bool_constant(&self, value: bool) -> Rc { + match value { + true => self.bool_true(), + false => self.bool_false(), + } + } + /// Takes a term and returns a possibly newly allocated `Rc` that references it. + /// + /// If the term was not originally in the term pool, it is added to it. Otherwise, this method + /// just returns an `Rc` pointing to the existing allocation. This method also computes the + /// term's sort, and adds it to the sort cache. + fn add(&mut self, term: Term) -> Rc; + /// Takes a vector of terms and calls [`TermPool::add`] on each. + fn add_all(&mut self, terms: Vec) -> Vec> { + terms.into_iter().map(|t| self.add(t)).collect() + } + /// Returns the sort of the given term. + /// + /// This method assumes that the sorts of any subterms have already been checked, and are + /// correct. If `term` is itself a sort, this simply returns that sort. + fn sort(&self, term: &Rc) -> Rc; + /// Returns an `AHashSet` containing all the free variables in the given term. + /// + /// This method uses a cache, so there is no additional cost to computing the free variables of + /// a term multiple times. + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet>; +} + +pub mod PrimitivePool { + use crate::ast::Constant; + + use super::{ + super::{Rc, Sort, Term}, + TPool, + }; + use ahash::{AHashMap, AHashSet}; + + /// A structure to store and manage all allocated terms. + /// + /// You can add a `Term` to the pool using [`TermPool::add`], which will return an `Rc`. This + /// struct ensures that, if two equal terms are added to a pool, they will be in the same + /// allocation. This invariant allows terms to be safely compared and hashed by reference, instead + /// of by value (see [`Rc`]). + /// + /// This struct also provides other utility methods, like computing the sort of a term (see + /// [`TermPool::sort`]) or its free variables (see [`TermPool::free_vars`]). + pub struct TermPool { + /// A map of the terms in the pool. + pub(crate) terms: AHashMap>, + pub(crate) free_vars_cache: AHashMap, AHashSet>>, + pub(crate) sorts_cache: AHashMap, Rc>, + pub(crate) bool_true: Rc, + pub(crate) bool_false: Rc, + } + + impl Default for TermPool { + fn default() -> Self { + Self::new() + } + } + + impl TermPool { + /// Constructs a new `TermPool`. This new pool will already contain the boolean constants `true` + /// and `false`, as well as the `Bool` sort. + pub fn new() -> Self { + let mut terms = AHashMap::new(); + let mut sorts_cache = AHashMap::new(); + let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); + + let [bool_true, bool_false] = ["true", "false"] + .map(|b| Self::add_term_to_map(&mut terms, Term::new_var(b, bool_sort.clone()))); + + sorts_cache.insert(bool_false.clone(), bool_sort.clone()); + sorts_cache.insert(bool_true.clone(), bool_sort.clone()); + sorts_cache.insert(bool_sort.clone(), bool_sort.clone()); + + Self { + terms, + free_vars_cache: AHashMap::new(), + sorts_cache, + bool_true, + bool_false, + } + } + + fn add_term_to_map(terms_map: &mut AHashMap>, term: Term) -> Rc { + use std::collections::hash_map::Entry; + + match terms_map.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + vacant_entry.insert(Rc::new(term)).clone() + } + } + } + + /// Computes the sort of a term and adds it to the sort cache. + pub(super) fn compute_sort<'a, 'b: 'a>(&'a mut self, term: &'b Rc) -> Rc { + use super::super::Operator; + + if self.sorts_cache.contains_key(term) { + return self.sorts_cache[term].clone(); + } + + let result: Sort = match term.as_ref() { + Term::Const(c) => match c { + Constant::Integer(_) => Sort::Int, + Constant::Real(_) => Sort::Real, + Constant::String(_) => Sort::String, + }, + Term::Var(_, sort) => sort.as_sort().unwrap().clone(), + Term::Op(op, args) => match op { + Operator::Not + | Operator::Implies + | Operator::And + | Operator::Or + | Operator::Xor + | Operator::Equals + | Operator::Distinct + | Operator::LessThan + | Operator::GreaterThan + | Operator::LessEq + | Operator::GreaterEq + | Operator::IsInt => Sort::Bool, + Operator::Ite => self.compute_sort(&args[1]).as_sort().unwrap().clone(), + Operator::Add | Operator::Sub | Operator::Mult => { + if args + .iter() + .any(|a| self.compute_sort(a).as_sort().unwrap() == &Sort::Real) + { + Sort::Real + } else { + Sort::Int + } + } + Operator::RealDiv | Operator::ToReal => Sort::Real, + Operator::IntDiv | Operator::Mod | Operator::Abs | Operator::ToInt => Sort::Int, + Operator::Select => match self.compute_sort(&args[0]).as_sort().unwrap() { + Sort::Array(_, y) => y.as_sort().unwrap().clone(), + _ => unreachable!(), + }, + Operator::Store => self.compute_sort(&args[0]).as_sort().unwrap().clone(), + }, + Term::App(f, _) => { + match self.compute_sort(f).as_sort().unwrap() { + Sort::Function(sorts) => sorts.last().unwrap().as_sort().unwrap().clone(), + _ => unreachable!(), // We assume that the function is correctly sorted + } + } + Term::Sort(sort) => sort.clone(), + Term::Quant(_, _, _) => Sort::Bool, + Term::Choice((_, sort), _) => sort.as_sort().unwrap().clone(), + Term::Let(_, inner) => self.compute_sort(inner).as_sort().unwrap().clone(), + Term::Lambda(bindings, body) => { + let mut result: Vec<_> = + bindings.iter().map(|(_name, sort)| sort.clone()).collect(); + let return_sort = self.compute_sort(body).as_ref().clone(); + result.push(self.add(return_sort)); + Sort::Function(result) + } + }; + let sorted_term = Self::add_term_to_map(&mut self.terms, Term::Sort(result)); + self.sorts_cache.insert(term.clone(), sorted_term); + self.sorts_cache[term].clone() + } + } + + impl TPool for TermPool { + fn bool_true(&self) -> Rc { + self.bool_true.clone() + } + + fn bool_false(&self) -> Rc { + self.bool_false.clone() + } + + fn add(&mut self, term: Term) -> Rc { + let term = Self::add_term_to_map(&mut self.terms, term); + self.compute_sort(&term); + term + } + + fn sort(&self, term: &Rc) -> Rc { + self.sorts_cache[term].clone() + } + + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { + // Here, I would like to do + // ``` + // if let Some(vars) = self.free_vars_cache.get(term) { + // return vars; + // } + // ``` + // However, because of a limitation in the borrow checker, the compiler thinks that + // this immutable borrow of `cache` has to live until the end of the function, even + // though the code immediately returns. This would stop me from mutating `cache` in the + // rest of the function. Because of that, I have to check if the hash map contains + // `term` as a key, and then get the value associated with it, meaning I have to access + // the hash map twice, which is a bit slower. This is an example of problem case #3 + // from the non-lexical lifetimes RFC: + // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md + if self.free_vars_cache.contains_key(term) { + return self.free_vars_cache.get(term).unwrap().clone(); + } + let set = match term.as_ref() { + Term::App(f, args) => { + let mut set = self.free_vars(f).clone(); + for a in args { + set.extend(self.free_vars(a).iter().cloned()); + } + set + } + Term::Op(_, args) => { + let mut set = AHashSet::new(); + for a in args { + set.extend(self.free_vars(a).iter().cloned()); + } + set + } + Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { + let mut vars = self.free_vars(inner).clone(); + for bound_var in bindings { + let term = self.add(bound_var.clone().into()); + vars.remove(&term); + } + vars + } + Term::Let(bindings, inner) => { + let mut vars = self.free_vars(inner).clone(); + for (var, value) in bindings { + let sort = self.sort(value).as_ref().clone(); + let sort = self.add(sort); + let term = self.add((var.clone(), sort).into()); + vars.remove(&term); + } + vars + } + Term::Choice(bound_var, inner) => { + let mut vars = self.free_vars(inner).clone(); + let term = self.add(bound_var.clone().into()); + vars.remove(&term); + vars + } + Term::Var(..) => { + let mut set = AHashSet::with_capacity(1); + set.insert(term.clone()); + set + } + Term::Const(_) | Term::Sort(_) => AHashSet::new(), + }; + self.free_vars_cache.insert(term.clone(), set); + self.free_vars_cache.get(term).unwrap().clone() + } + } +} diff --git a/carcara/src/checker/parallel.rs b/carcara/src/checker/parallel.rs index 090aced0..2a4895b1 100644 --- a/carcara/src/checker/parallel.rs +++ b/carcara/src/checker/parallel.rs @@ -5,7 +5,7 @@ use super::{lia_generic, Config}; use crate::benchmarking::{CollectResults, OnlineBenchmarkResults}; use crate::checker::CheckerStatistics; use crate::{ - ast::{AdvancedPools::LocalPool, *}, + ast::{pool::advanced::*, *}, CarcaraResult, Error, }; use ahash::AHashSet; @@ -61,7 +61,7 @@ impl<'c> ParallelProofChecker<'c> { // Used to estimulate threads to abort prematurely (only happens when a // thread already found out an invalid step) let premature_abort = Arc::new(RwLock::new(false)); - let context_pool = AdvancedPools::ContextPool::from_global(&self.pool); + let context_pool = ContextPool::from_global(&self.pool); // TODO: Add stack size flag const STACK_SIZE: usize = 128 * 1024 * 1024; // @@ -226,7 +226,7 @@ impl<'c> ParallelProofChecker<'c> { // Used to estimulate threads to abort prematurely (only happens when a // thread already found out an invalid step) let premature_abort = Arc::new(RwLock::new(false)); - let context_pool = AdvancedPools::ContextPool::from_global(&self.pool); + let context_pool = ContextPool::from_global(&self.pool); // TODO: Add stack size flag const STACK_SIZE: usize = 128 * 1024 * 1024; // From 77193f9b338866697f01f39ab90280280c6b52e2 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Thu, 20 Jul 2023 21:12:36 -0300 Subject: [PATCH 17/70] Removed and added generics to parse derivative functions and rule functions --- carcara/src/ast/context.rs | 12 +- carcara/src/ast/macros.rs | 6 +- carcara/src/ast/mod.rs | 4 +- carcara/src/ast/pool/advanced.rs | 49 ++- carcara/src/ast/pool/mod.rs | 392 ++++++++++---------- carcara/src/ast/substitution.rs | 32 +- carcara/src/ast/tests.rs | 6 +- carcara/src/checker/lia_generic.rs | 21 +- carcara/src/checker/mod.rs | 4 +- carcara/src/checker/parallel.rs | 10 +- carcara/src/checker/rules/clausification.rs | 8 +- carcara/src/checker/rules/mod.rs | 2 +- carcara/src/checker/rules/quantifier.rs | 8 +- carcara/src/checker/rules/reflexivity.rs | 2 +- carcara/src/checker/rules/resolution.rs | 8 +- carcara/src/checker/rules/simplification.rs | 10 +- carcara/src/checker/rules/transitivity.rs | 2 +- carcara/src/elaborator/mod.rs | 8 +- carcara/src/elaborator/polyeq.rs | 14 +- carcara/src/lib.rs | 5 +- carcara/src/parser/mod.rs | 41 +- carcara/src/parser/tests.rs | 43 +-- cli/src/main.rs | 6 +- 23 files changed, 325 insertions(+), 368 deletions(-) diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index 60bd05ad..0761b785 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -36,7 +36,7 @@ impl ContextStack { pub fn push( &mut self, - pool: &mut TermPool, + pool: &mut dyn TPool, assignment_args: &[(String, Rc)], variable_args: &[SortedVar], ) -> Result<(), SubstitutionError> { @@ -84,7 +84,7 @@ impl ContextStack { std::cmp::min(self.num_cumulative_calculated, self.stack.len()); } - fn catch_up_cumulative(&mut self, pool: &mut TermPool, up_to: usize) { + fn catch_up_cumulative(&mut self, pool: &mut dyn TPool, up_to: usize) { for i in self.num_cumulative_calculated..std::cmp::max(up_to + 1, self.len()) { let simultaneous = build_simultaneous_substitution(pool, &self.stack[i].mappings).map; let mut cumulative_substitution = simultaneous.clone(); @@ -109,13 +109,13 @@ impl ContextStack { } } - fn get_substitution(&mut self, pool: &mut TermPool, index: usize) -> &mut Substitution { + fn get_substitution(&mut self, pool: &mut dyn TPool, index: usize) -> &mut Substitution { assert!(index < self.len()); self.catch_up_cumulative(pool, index); self.stack[index].cumulative_substitution.as_mut().unwrap() } - pub fn apply_previous(&mut self, pool: &mut TermPool, term: &Rc) -> Rc { + pub fn apply_previous(&mut self, pool: &mut dyn TPool, term: &Rc) -> Rc { if self.len() < 2 { term.clone() } else { @@ -124,7 +124,7 @@ impl ContextStack { } } - pub fn apply(&mut self, pool: &mut TermPool, term: &Rc) -> Rc { + pub fn apply(&mut self, pool: &mut dyn TPool, term: &Rc) -> Rc { if self.is_empty() { term.clone() } else { @@ -135,7 +135,7 @@ impl ContextStack { } fn build_simultaneous_substitution( - pool: &mut TermPool, + pool: &mut dyn TPool, mappings: &[(Rc, Rc)], ) -> Substitution { let mut result = Substitution::empty(); diff --git a/carcara/src/ast/macros.rs b/carcara/src/ast/macros.rs index 3147da49..9001b921 100644 --- a/carcara/src/ast/macros.rs +++ b/carcara/src/ast/macros.rs @@ -249,12 +249,12 @@ macro_rules! impl_str_conversion_traits { #[cfg(test)] mod tests { - use crate::ast::*; + use crate::ast::{pool::advanced::LocalPool, *}; use crate::parser::tests::{parse_term, parse_terms}; #[test] fn test_match_term() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let [one, two, five] = [1, 2, 5].map(|n| p.add(Term::new_int(n))); let term = parse_term(&mut p, "(= (= (not false) (= true false)) (not true))"); @@ -303,7 +303,7 @@ mod tests { (declare-fun p () Bool) (declare-fun q () Bool) "; - let mut pool = TermPool::new(); + let mut pool = LocalPool::new(); let bool_sort = pool.add(Term::Sort(Sort::Bool)); let int_sort = pool.add(Term::Sort(Sort::Int)); diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index 1a5689e1..4bed083c 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -17,7 +17,7 @@ mod tests; pub use context::{Context, ContextStack}; pub use iter::ProofIter; pub use polyeq::{alpha_equiv, polyeq, tracing_polyeq}; -pub use pool::{PrimitivePool, TPool, TermPool}; +pub use pool::{PrimitivePool, TPool}; pub use printer::print_proof; pub use rc::Rc; pub use substitution::{Substitution, SubstitutionError}; @@ -464,7 +464,7 @@ impl Term { /// Returns the sort of this term. This does not make use of a cache --- if possible, prefer to /// use `TermPool::sort`. pub fn raw_sort(&self) -> Sort { - let mut pool = PrimitivePool::TermPool::new(); + let mut pool = PrimitivePool::new(); let added = pool.add(self.clone()); pool.sort(&added).as_sort().unwrap().clone() } diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs index 2b8e7eef..bf60bae5 100644 --- a/carcara/src/ast/pool/advanced.rs +++ b/carcara/src/ast/pool/advanced.rs @@ -1,12 +1,11 @@ - use super::super::{Rc, Term}; use super::{PrimitivePool, TPool}; use ahash::AHashSet; use std::sync::{Arc, RwLock}; pub struct ContextPool { - pub(crate) global_pool: Arc, - pub(crate) storage: Arc>, + pub(crate) global_pool: Arc, + pub(crate) storage: Arc>, } impl Default for ContextPool { @@ -18,15 +17,15 @@ impl Default for ContextPool { impl ContextPool { pub fn new() -> Self { Self { - global_pool: Arc::new(PrimitivePool::TermPool::new()), - storage: Arc::new(RwLock::new(PrimitivePool::TermPool::new())), + global_pool: Arc::new(PrimitivePool::new()), + storage: Arc::new(RwLock::new(PrimitivePool::new())), } } - pub fn from_global(global_pool: &Arc) -> Self { + pub fn from_global(global_pool: &Arc) -> Self { Self { global_pool: global_pool.clone(), - storage: Arc::new(RwLock::new(PrimitivePool::TermPool::new())), + storage: Arc::new(RwLock::new(PrimitivePool::new())), } } @@ -39,8 +38,8 @@ impl ContextPool { /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. fn add_by_ref<'d, 'c: 'd>( - ctx_pool: &mut PrimitivePool::TermPool, - global_pool: &'d Arc, + ctx_pool: &mut PrimitivePool, + global_pool: &'d Arc, term: Term, ) -> Rc { use std::collections::hash_map::Entry; @@ -63,8 +62,8 @@ impl ContextPool { /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. fn sort_by_ref<'d: 't, 'c: 'd, 't>( - ctx_pool: &PrimitivePool::TermPool, - global_pool: &'d Arc, + ctx_pool: &PrimitivePool, + global_pool: &'d Arc, term: &'t Rc, ) -> Rc { if let Some(sort) = global_pool.sorts_cache.get(term) { @@ -118,8 +117,8 @@ impl TPool for ContextPool { fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { fn internal<'d: 't, 'c: 'd, 't>( - ctx_pool: &'d mut PrimitivePool::TermPool, - global_pool: &'c Arc, + ctx_pool: &'d mut PrimitivePool, + global_pool: &'c Arc, term: &'t Rc, ) -> &'t AHashSet> { // Here, I would like to do @@ -212,7 +211,7 @@ impl TPool for ContextPool { pub struct LocalPool { pub(crate) ctx_pool: ContextPool, - pub(crate) storage: PrimitivePool::TermPool, + pub(crate) storage: PrimitivePool, } impl Default for LocalPool { @@ -225,7 +224,7 @@ impl LocalPool { pub fn new() -> Self { Self { ctx_pool: ContextPool::new(), - storage: PrimitivePool::TermPool::new(), + storage: PrimitivePool::new(), } } @@ -234,15 +233,15 @@ impl LocalPool { pub fn from_previous(ctx_pool: &ContextPool) -> Self { Self { ctx_pool: ContextPool::from_previous(ctx_pool), - storage: PrimitivePool::TermPool::new(), + storage: PrimitivePool::new(), } } /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. fn add_by_ref<'d, 'c: 'd>( - local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &PrimitivePool::TermPool, - global_pool: &'d Arc, + local_pool: &'d mut PrimitivePool, + ctx_pool: &PrimitivePool, + global_pool: &'d Arc, term: Term, ) -> Rc { use std::collections::hash_map::Entry; @@ -269,9 +268,9 @@ impl LocalPool { /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. fn sort_by_ref<'d: 't, 'c: 'd, 't>( - local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &PrimitivePool::TermPool, - global_pool: &'d Arc, + local_pool: &'d mut PrimitivePool, + ctx_pool: &PrimitivePool, + global_pool: &'d Arc, term: &'t Rc, ) -> Rc { if let Some(sort) = global_pool.sorts_cache.get(term) { @@ -332,9 +331,9 @@ impl TPool for LocalPool { fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { fn internal<'d: 't, 'c: 'd, 't>( - local_pool: &'d mut PrimitivePool::TermPool, - ctx_pool: &'t PrimitivePool::TermPool, - global_pool: &'d Arc, + local_pool: &'d mut PrimitivePool, + ctx_pool: &'t PrimitivePool, + global_pool: &'d Arc, term: &'t Rc, ) -> &'t AHashSet> { // Here, I would like to do diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index 40192578..01c66dc7 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -2,11 +2,9 @@ pub mod advanced; -use super::{Rc, Term}; -use advanced::LocalPool; -use ahash::AHashSet; - -pub type TermPool = LocalPool; +use super::{Rc, Sort, Term}; +use crate::ast::Constant; +use ahash::{AHashMap, AHashSet}; pub trait TPool { /// Returns the term corresponding to the boolean constant `true`. @@ -42,231 +40,221 @@ pub trait TPool { fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet>; } -pub mod PrimitivePool { - use crate::ast::Constant; - - use super::{ - super::{Rc, Sort, Term}, - TPool, - }; - use ahash::{AHashMap, AHashSet}; - - /// A structure to store and manage all allocated terms. - /// - /// You can add a `Term` to the pool using [`TermPool::add`], which will return an `Rc`. This - /// struct ensures that, if two equal terms are added to a pool, they will be in the same - /// allocation. This invariant allows terms to be safely compared and hashed by reference, instead - /// of by value (see [`Rc`]). - /// - /// This struct also provides other utility methods, like computing the sort of a term (see - /// [`TermPool::sort`]) or its free variables (see [`TermPool::free_vars`]). - pub struct TermPool { - /// A map of the terms in the pool. - pub(crate) terms: AHashMap>, - pub(crate) free_vars_cache: AHashMap, AHashSet>>, - pub(crate) sorts_cache: AHashMap, Rc>, - pub(crate) bool_true: Rc, - pub(crate) bool_false: Rc, - } +/// A structure to store and manage all allocated terms. +/// +/// You can add a `Term` to the pool using [`PrimitivePool::add`], which will return an `Rc`. This +/// struct ensures that, if two equal terms are added to a pool, they will be in the same +/// allocation. This invariant allows terms to be safely compared and hashed by reference, instead +/// of by value (see [`Rc`]). +/// +/// This struct also provides other utility methods, like computing the sort of a term (see +/// [`PrimitivePool::sort`]) or its free variables (see [`PrimitivePool::free_vars`]). +pub struct PrimitivePool { + /// A map of the terms in the pool. + pub(crate) terms: AHashMap>, + pub(crate) free_vars_cache: AHashMap, AHashSet>>, + pub(crate) sorts_cache: AHashMap, Rc>, + pub(crate) bool_true: Rc, + pub(crate) bool_false: Rc, +} - impl Default for TermPool { - fn default() -> Self { - Self::new() - } +impl Default for PrimitivePool { + fn default() -> Self { + Self::new() } +} - impl TermPool { - /// Constructs a new `TermPool`. This new pool will already contain the boolean constants `true` - /// and `false`, as well as the `Bool` sort. - pub fn new() -> Self { - let mut terms = AHashMap::new(); - let mut sorts_cache = AHashMap::new(); - let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); +impl PrimitivePool { + /// Constructs a new `TermPool`. This new pool will already contain the boolean constants `true` + /// and `false`, as well as the `Bool` sort. + pub fn new() -> Self { + let mut terms = AHashMap::new(); + let mut sorts_cache = AHashMap::new(); + let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); - let [bool_true, bool_false] = ["true", "false"] - .map(|b| Self::add_term_to_map(&mut terms, Term::new_var(b, bool_sort.clone()))); + let [bool_true, bool_false] = ["true", "false"] + .map(|b| Self::add_term_to_map(&mut terms, Term::new_var(b, bool_sort.clone()))); - sorts_cache.insert(bool_false.clone(), bool_sort.clone()); - sorts_cache.insert(bool_true.clone(), bool_sort.clone()); - sorts_cache.insert(bool_sort.clone(), bool_sort.clone()); + sorts_cache.insert(bool_false.clone(), bool_sort.clone()); + sorts_cache.insert(bool_true.clone(), bool_sort.clone()); + sorts_cache.insert(bool_sort.clone(), bool_sort.clone()); - Self { - terms, - free_vars_cache: AHashMap::new(), - sorts_cache, - bool_true, - bool_false, - } + Self { + terms, + free_vars_cache: AHashMap::new(), + sorts_cache, + bool_true, + bool_false, } + } - fn add_term_to_map(terms_map: &mut AHashMap>, term: Term) -> Rc { - use std::collections::hash_map::Entry; + fn add_term_to_map(terms_map: &mut AHashMap>, term: Term) -> Rc { + use std::collections::hash_map::Entry; - match terms_map.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - vacant_entry.insert(Rc::new(term)).clone() - } + match terms_map.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + vacant_entry.insert(Rc::new(term)).clone() } } + } - /// Computes the sort of a term and adds it to the sort cache. - pub(super) fn compute_sort<'a, 'b: 'a>(&'a mut self, term: &'b Rc) -> Rc { - use super::super::Operator; + /// Computes the sort of a term and adds it to the sort cache. + pub(super) fn compute_sort<'a, 'b: 'a>(&'a mut self, term: &'b Rc) -> Rc { + use super::Operator; - if self.sorts_cache.contains_key(term) { - return self.sorts_cache[term].clone(); - } + if self.sorts_cache.contains_key(term) { + return self.sorts_cache[term].clone(); + } - let result: Sort = match term.as_ref() { - Term::Const(c) => match c { - Constant::Integer(_) => Sort::Int, - Constant::Real(_) => Sort::Real, - Constant::String(_) => Sort::String, - }, - Term::Var(_, sort) => sort.as_sort().unwrap().clone(), - Term::Op(op, args) => match op { - Operator::Not - | Operator::Implies - | Operator::And - | Operator::Or - | Operator::Xor - | Operator::Equals - | Operator::Distinct - | Operator::LessThan - | Operator::GreaterThan - | Operator::LessEq - | Operator::GreaterEq - | Operator::IsInt => Sort::Bool, - Operator::Ite => self.compute_sort(&args[1]).as_sort().unwrap().clone(), - Operator::Add | Operator::Sub | Operator::Mult => { - if args - .iter() - .any(|a| self.compute_sort(a).as_sort().unwrap() == &Sort::Real) - { - Sort::Real - } else { - Sort::Int - } - } - Operator::RealDiv | Operator::ToReal => Sort::Real, - Operator::IntDiv | Operator::Mod | Operator::Abs | Operator::ToInt => Sort::Int, - Operator::Select => match self.compute_sort(&args[0]).as_sort().unwrap() { - Sort::Array(_, y) => y.as_sort().unwrap().clone(), - _ => unreachable!(), - }, - Operator::Store => self.compute_sort(&args[0]).as_sort().unwrap().clone(), - }, - Term::App(f, _) => { - match self.compute_sort(f).as_sort().unwrap() { - Sort::Function(sorts) => sorts.last().unwrap().as_sort().unwrap().clone(), - _ => unreachable!(), // We assume that the function is correctly sorted + let result: Sort = match term.as_ref() { + Term::Const(c) => match c { + Constant::Integer(_) => Sort::Int, + Constant::Real(_) => Sort::Real, + Constant::String(_) => Sort::String, + }, + Term::Var(_, sort) => sort.as_sort().unwrap().clone(), + Term::Op(op, args) => match op { + Operator::Not + | Operator::Implies + | Operator::And + | Operator::Or + | Operator::Xor + | Operator::Equals + | Operator::Distinct + | Operator::LessThan + | Operator::GreaterThan + | Operator::LessEq + | Operator::GreaterEq + | Operator::IsInt => Sort::Bool, + Operator::Ite => self.compute_sort(&args[1]).as_sort().unwrap().clone(), + Operator::Add | Operator::Sub | Operator::Mult => { + if args + .iter() + .any(|a| self.compute_sort(a).as_sort().unwrap() == &Sort::Real) + { + Sort::Real + } else { + Sort::Int } } - Term::Sort(sort) => sort.clone(), - Term::Quant(_, _, _) => Sort::Bool, - Term::Choice((_, sort), _) => sort.as_sort().unwrap().clone(), - Term::Let(_, inner) => self.compute_sort(inner).as_sort().unwrap().clone(), - Term::Lambda(bindings, body) => { - let mut result: Vec<_> = - bindings.iter().map(|(_name, sort)| sort.clone()).collect(); - let return_sort = self.compute_sort(body).as_ref().clone(); - result.push(self.add(return_sort)); - Sort::Function(result) + Operator::RealDiv | Operator::ToReal => Sort::Real, + Operator::IntDiv | Operator::Mod | Operator::Abs | Operator::ToInt => Sort::Int, + Operator::Select => match self.compute_sort(&args[0]).as_sort().unwrap() { + Sort::Array(_, y) => y.as_sort().unwrap().clone(), + _ => unreachable!(), + }, + Operator::Store => self.compute_sort(&args[0]).as_sort().unwrap().clone(), + }, + Term::App(f, _) => { + match self.compute_sort(f).as_sort().unwrap() { + Sort::Function(sorts) => sorts.last().unwrap().as_sort().unwrap().clone(), + _ => unreachable!(), // We assume that the function is correctly sorted } - }; - let sorted_term = Self::add_term_to_map(&mut self.terms, Term::Sort(result)); - self.sorts_cache.insert(term.clone(), sorted_term); - self.sorts_cache[term].clone() - } + } + Term::Sort(sort) => sort.clone(), + Term::Quant(_, _, _) => Sort::Bool, + Term::Choice((_, sort), _) => sort.as_sort().unwrap().clone(), + Term::Let(_, inner) => self.compute_sort(inner).as_sort().unwrap().clone(), + Term::Lambda(bindings, body) => { + let mut result: Vec<_> = + bindings.iter().map(|(_name, sort)| sort.clone()).collect(); + let return_sort = self.compute_sort(body).as_ref().clone(); + result.push(self.add(return_sort)); + Sort::Function(result) + } + }; + let sorted_term = Self::add_term_to_map(&mut self.terms, Term::Sort(result)); + self.sorts_cache.insert(term.clone(), sorted_term); + self.sorts_cache[term].clone() } +} - impl TPool for TermPool { - fn bool_true(&self) -> Rc { - self.bool_true.clone() - } +impl TPool for PrimitivePool { + fn bool_true(&self) -> Rc { + self.bool_true.clone() + } - fn bool_false(&self) -> Rc { - self.bool_false.clone() - } + fn bool_false(&self) -> Rc { + self.bool_false.clone() + } - fn add(&mut self, term: Term) -> Rc { - let term = Self::add_term_to_map(&mut self.terms, term); - self.compute_sort(&term); - term - } + fn add(&mut self, term: Term) -> Rc { + let term = Self::add_term_to_map(&mut self.terms, term); + self.compute_sort(&term); + term + } - fn sort(&self, term: &Rc) -> Rc { - self.sorts_cache[term].clone() - } + fn sort(&self, term: &Rc) -> Rc { + self.sorts_cache[term].clone() + } - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { - // Here, I would like to do - // ``` - // if let Some(vars) = self.free_vars_cache.get(term) { - // return vars; - // } - // ``` - // However, because of a limitation in the borrow checker, the compiler thinks that - // this immutable borrow of `cache` has to live until the end of the function, even - // though the code immediately returns. This would stop me from mutating `cache` in the - // rest of the function. Because of that, I have to check if the hash map contains - // `term` as a key, and then get the value associated with it, meaning I have to access - // the hash map twice, which is a bit slower. This is an example of problem case #3 - // from the non-lexical lifetimes RFC: - // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md - if self.free_vars_cache.contains_key(term) { - return self.free_vars_cache.get(term).unwrap().clone(); - } - let set = match term.as_ref() { - Term::App(f, args) => { - let mut set = self.free_vars(f).clone(); - for a in args { - set.extend(self.free_vars(a).iter().cloned()); - } - set - } - Term::Op(_, args) => { - let mut set = AHashSet::new(); - for a in args { - set.extend(self.free_vars(a).iter().cloned()); - } - set - } - Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = self.free_vars(inner).clone(); - for bound_var in bindings { - let term = self.add(bound_var.clone().into()); - vars.remove(&term); - } - vars + fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { + // Here, I would like to do + // ``` + // if let Some(vars) = self.free_vars_cache.get(term) { + // return vars; + // } + // ``` + // However, because of a limitation in the borrow checker, the compiler thinks that + // this immutable borrow of `cache` has to live until the end of the function, even + // though the code immediately returns. This would stop me from mutating `cache` in the + // rest of the function. Because of that, I have to check if the hash map contains + // `term` as a key, and then get the value associated with it, meaning I have to access + // the hash map twice, which is a bit slower. This is an example of problem case #3 + // from the non-lexical lifetimes RFC: + // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md + if self.free_vars_cache.contains_key(term) { + return self.free_vars_cache.get(term).unwrap().clone(); + } + let set = match term.as_ref() { + Term::App(f, args) => { + let mut set = self.free_vars(f).clone(); + for a in args { + set.extend(self.free_vars(a).iter().cloned()); } - Term::Let(bindings, inner) => { - let mut vars = self.free_vars(inner).clone(); - for (var, value) in bindings { - let sort = self.sort(value).as_ref().clone(); - let sort = self.add(sort); - let term = self.add((var.clone(), sort).into()); - vars.remove(&term); - } - vars + set + } + Term::Op(_, args) => { + let mut set = AHashSet::new(); + for a in args { + set.extend(self.free_vars(a).iter().cloned()); } - Term::Choice(bound_var, inner) => { - let mut vars = self.free_vars(inner).clone(); + set + } + Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { + let mut vars = self.free_vars(inner).clone(); + for bound_var in bindings { let term = self.add(bound_var.clone().into()); vars.remove(&term); - vars } - Term::Var(..) => { - let mut set = AHashSet::with_capacity(1); - set.insert(term.clone()); - set + vars + } + Term::Let(bindings, inner) => { + let mut vars = self.free_vars(inner).clone(); + for (var, value) in bindings { + let sort = self.sort(value).as_ref().clone(); + let sort = self.add(sort); + let term = self.add((var.clone(), sort).into()); + vars.remove(&term); } - Term::Const(_) | Term::Sort(_) => AHashSet::new(), - }; - self.free_vars_cache.insert(term.clone(), set); - self.free_vars_cache.get(term).unwrap().clone() - } + vars + } + Term::Choice(bound_var, inner) => { + let mut vars = self.free_vars(inner).clone(); + let term = self.add(bound_var.clone().into()); + vars.remove(&term); + vars + } + Term::Var(..) => { + let mut set = AHashSet::with_capacity(1); + set.insert(term.clone()); + set + } + Term::Const(_) | Term::Sort(_) => AHashSet::new(), + }; + self.free_vars_cache.insert(term.clone(), set); + self.free_vars_cache.get(term).unwrap().clone() } } diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index e13f404b..f29c830b 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -1,6 +1,6 @@ //! Algorithms for creating and applying capture-avoiding substitutions over terms. -use super::{BindingList, Rc, SortedVar, TPool, Term, TermPool}; +use super::{BindingList, Rc, SortedVar, TPool, Term}; use ahash::{AHashMap, AHashSet}; use thiserror::Error; @@ -56,7 +56,7 @@ impl Substitution { /// Constructs a singleton substitution mapping `x` to `t`. This returns an error if the sorts /// of the given terms are not the same, or if `x` is not a variable term. - pub fn single(pool: &mut TermPool, x: Rc, t: Rc) -> SubstitutionResult { + pub fn single(pool: &mut dyn TPool, x: Rc, t: Rc) -> SubstitutionResult { let mut this = Self::empty(); this.insert(pool, x, t)?; Ok(this) @@ -65,8 +65,8 @@ impl Substitution { /// Constructs a new substitution from an arbitrary mapping of terms to other terms. This /// returns an error if any term in the left-hand side is not a variable, or if any term is /// mapped to a term of a different sort. - pub fn new( - pool: &mut P, + pub fn new( + pool: &mut dyn TPool, map: AHashMap, Rc>, ) -> SubstitutionResult { for (k, v) in map.iter() { @@ -92,9 +92,9 @@ impl Substitution { /// Extends the substitution by adding a new mapping from `x` to `t`. This returns an error if /// the sorts of the given terms are not the same, or if `x` is not a variable term. - pub(crate) fn insert( + pub(crate) fn insert( &mut self, - pool: &mut P, + pool: &mut dyn TPool, x: Rc, t: Rc, ) -> SubstitutionResult<()> { @@ -125,7 +125,7 @@ impl Substitution { /// Computes which binder variables will need to be renamed, and stores the result in /// `self.should_be_renamed`. - fn compute_should_be_renamed(&mut self, pool: &mut P) { + fn compute_should_be_renamed(&mut self, pool: &mut dyn TPool) { if self.should_be_renamed.is_some() { return; } @@ -160,7 +160,7 @@ impl Substitution { } /// Applies the substitution to `term`, and returns the result as a new term. - pub fn apply(&mut self, pool: &mut P, term: &Rc) -> Rc { + pub fn apply(&mut self, pool: &mut dyn TPool, term: &Rc) -> Rc { macro_rules! apply_to_sequence { ($sequence:expr) => { $sequence @@ -215,9 +215,9 @@ impl Substitution { result } - fn can_skip_instead_of_renaming( + fn can_skip_instead_of_renaming( &self, - pool: &mut P, + pool: &mut dyn TPool, binding_list: &[SortedVar], ) -> bool { // Note: this method assumes that `binding_list` is a "sort" binding list. "Value" lists add @@ -246,9 +246,9 @@ impl Substitution { /// Applies the substitution to a binder term, renaming any bound variables as needed. This /// method uses the function `build_function` to construct the resulting binder term. If the /// binder is a `let` or `lambda` term, `is_value_list` should be true. - fn apply_to_binder) -> Term, P: TPool>( + fn apply_to_binder) -> Term>( &mut self, - pool: &mut P, + pool: &mut dyn TPool, original_term: &Rc, binding_list: &[SortedVar], inner: &Rc, @@ -290,9 +290,9 @@ impl Substitution { /// returns a clone of the binding list and an empty substitution. The name chosen when renaming /// a variable is the old name with `'` appended. If the binding list is a "value" list, like in /// a `let` or `lambda` term, `is_value_list` should be true. - fn rename_binding_list( + fn rename_binding_list( &mut self, - pool: &mut P, + pool: &mut dyn TPool, binding_list: &[SortedVar], is_value_list: bool, ) -> (BindingList, Self) { @@ -353,10 +353,10 @@ impl Substitution { #[cfg(test)] mod tests { use super::*; - use crate::parser::*; + use crate::{ast::pool::advanced::LocalPool, parser::*}; fn run_test(definitions: &str, original: &str, x: &str, t: &str, result: &str) { - let mut pool = TermPool::new(); + let mut pool = LocalPool::new(); let mut parser = Parser::new(&mut pool, definitions.as_bytes(), true, false, false).unwrap(); parser.parse_problem().unwrap(); diff --git a/carcara/src/ast/tests.rs b/carcara/src/ast/tests.rs index a6db2818..a2b02089 100644 --- a/carcara/src/ast/tests.rs +++ b/carcara/src/ast/tests.rs @@ -1,5 +1,5 @@ use crate::{ - ast::{TPool, TermPool}, + ast::{pool::advanced::LocalPool, TPool}, parser::tests::parse_terms, }; use ahash::AHashSet; @@ -8,7 +8,7 @@ use ahash::AHashSet; fn test_free_vars() { fn run_tests(definitions: &str, cases: &[(&str, &[&str])]) { for &(term, expected) in cases { - let mut pool = TermPool::new(); + let mut pool = LocalPool::new(); let [root] = parse_terms(&mut pool, definitions, [term]); let expected: AHashSet<_> = expected.iter().copied().collect(); let set = pool.free_vars(&root); @@ -44,7 +44,7 @@ fn test_polyeq() { } fn run_tests(definitions: &str, cases: &[(&str, &str)], test_type: TestType) { - let mut pool = TermPool::new(); + let mut pool = LocalPool::new(); for (a, b) in cases { let [a, b] = parse_terms(&mut pool, definitions, [a, b]); let mut time = std::time::Duration::ZERO; diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index 3c0b7784..889b58ae 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -24,15 +24,17 @@ fn get_problem_string(conclusion: &[Rc], prelude: &ProblemPrelude) -> Stri problem } -pub fn lia_generic( - pool: &mut TermPool, +pub fn lia_generic( + pool: &mut P, conclusion: &[Rc], prelude: &ProblemPrelude, elaborator: Option<&mut Elaborator>, root_id: &str, ) -> bool { + // TODO: Transform it into 2 different functions + let mut pp = PrimitivePool::new(); let problem = get_problem_string(conclusion, prelude); - let commands = match get_cvc5_proof(pool, problem) { + let commands = match get_cvc5_proof(&mut pp, problem) { Ok(c) => c, Err(e) => { log::warn!("failed to check `lia_generic` step using cvc5: {}", e); @@ -50,7 +52,7 @@ pub fn lia_generic( } fn get_cvc5_proof( - pool: &mut TermPool, + pool: &mut PrimitivePool, problem: String, ) -> Result, LiaGenericError> { let mut cvc5 = Command::new("cvc5") @@ -102,7 +104,7 @@ fn get_cvc5_proof( } fn parse_and_check_cvc5_proof( - pool: &mut TermPool, + pool: &mut PrimitivePool, problem: &[u8], proof: &[u8], ) -> CarcaraResult> { @@ -139,8 +141,9 @@ fn update_premises(commands: &mut [ProofCommand], delta: usize, root_id: &str) { } } -fn insert_missing_assumes( - pool: &mut TermPool, +// TODO: Remove generics from here and all other functions +fn insert_missing_assumes( + pool: &mut P, elaborator: &mut Elaborator, conclusion: &[Rc], proof: &[ProofCommand], @@ -180,8 +183,8 @@ fn insert_missing_assumes( (all, num_added) } -fn insert_cvc5_proof( - pool: &mut TermPool, +fn insert_cvc5_proof( + pool: &mut P, elaborator: &mut Elaborator, mut commands: Vec, conclusion: &[Rc], diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index ac026881..feb4c272 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -77,7 +77,7 @@ impl Config { } pub struct ProofChecker<'c> { - pool: &'c mut TermPool, + pool: &'c mut PrimitivePool, config: Config, prelude: &'c ProblemPrelude, context: ContextStack, @@ -87,7 +87,7 @@ pub struct ProofChecker<'c> { } impl<'c> ProofChecker<'c> { - pub fn new(pool: &'c mut TermPool, config: Config, prelude: &'c ProblemPrelude) -> Self { + pub fn new(pool: &'c mut PrimitivePool, config: Config, prelude: &'c ProblemPrelude) -> Self { ProofChecker { pool, config, diff --git a/carcara/src/checker/parallel.rs b/carcara/src/checker/parallel.rs index 2a4895b1..9d56f211 100644 --- a/carcara/src/checker/parallel.rs +++ b/carcara/src/checker/parallel.rs @@ -17,7 +17,7 @@ use std::{ }; pub struct ParallelProofChecker<'c> { - pool: Arc, + pool: Arc, config: Config, prelude: &'c ProblemPrelude, context: ContextStack, @@ -26,11 +26,7 @@ pub struct ParallelProofChecker<'c> { } impl<'c> ParallelProofChecker<'c> { - pub fn new( - pool: Arc, - config: Config, - prelude: &'c ProblemPrelude, - ) -> Self { + pub fn new(pool: Arc, config: Config, prelude: &'c ProblemPrelude) -> Self { ParallelProofChecker { pool, config, @@ -494,7 +490,7 @@ impl<'c> ParallelProofChecker<'c> { step: &'a ProofStep, previous_command: Option>, iter: &'a ScheduleIter<'a>, - pool: &mut TermPool, + pool: &mut LocalPool, stats: Option<&'a mut CheckerStatistics>, ) -> RuleResult { let time = Instant::now(); diff --git a/carcara/src/checker/rules/clausification.rs b/carcara/src/checker/rules/clausification.rs index c675c013..f4afd066 100644 --- a/carcara/src/checker/rules/clausification.rs +++ b/carcara/src/checker/rules/clausification.rs @@ -206,7 +206,7 @@ pub fn nary_elim(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { /// A function to expand terms that fall in the right or left associative cases. For example, /// the term `(=> p q r s)` will be expanded into the term `(=> p (=> q (=> r s)))`. - fn expand_assoc(pool: &mut TermPool, op: Operator, args: &[Rc], case: Case) -> Rc { + fn expand_assoc(pool: &mut dyn TPool, op: Operator, args: &[Rc], case: Case) -> Rc { let (head, tail) = match args { [] => unreachable!(), [t] => return t.clone(), @@ -259,7 +259,7 @@ pub fn nary_elim(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { /// The first simplification step for `bfun_elim`, that expands quantifiers over boolean variables. fn bfun_elim_first_step( - pool: &mut TermPool, + pool: &mut dyn TPool, bindigns: &[SortedVar], term: &Rc, acc: &mut Vec>, @@ -283,7 +283,7 @@ fn bfun_elim_first_step( /// The second simplification step for `bfun_elim`, that expands function applications over /// non-constant boolean arguments into `ite` terms. fn bfun_elim_second_step( - pool: &mut TermPool, + pool: &mut dyn TPool, func: &Rc, args: &[Rc], processed: usize, @@ -313,7 +313,7 @@ fn bfun_elim_second_step( /// Applies the simplification steps for the `bfun_elim` rule. fn apply_bfun_elim( - pool: &mut TermPool, + pool: &mut dyn TPool, term: &Rc, cache: &mut AHashMap, Rc>, ) -> Result, SubstitutionError> { diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index 669d7183..81361f6e 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -18,7 +18,7 @@ pub struct RuleArgs<'a> { pub(super) conclusion: &'a [Rc], pub(super) premises: &'a [Premise<'a>], pub(super) args: &'a [ProofArg], - pub(super) pool: &'a mut TermPool, + pub(super) pool: &'a mut dyn TPool, pub(super) context: &'a mut ContextStack, // For rules that end a subproof, we need to pass the previous command in the subproof that it diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index dce21b84..3ae356cc 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -108,7 +108,7 @@ pub fn qnt_rm_unused(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult /// Converts a term into negation normal form, expanding all connectives. fn negation_normal_form( - pool: &mut TermPool, + pool: &mut dyn TPool, term: &Rc, polarity: bool, cache: &mut AHashMap<(Rc, bool), Rc>, @@ -217,7 +217,7 @@ fn distribute(formulas: &[CnfFormula]) -> CnfFormula { /// Prenex all universal quantifiers in a term. This doesn't prenex existential quantifiers. This /// assumes the term is in negation normal form. -fn prenex_forall(pool: &mut TermPool, acc: &mut C, term: &Rc) -> Rc +fn prenex_forall(pool: &mut dyn TPool, acc: &mut C, term: &Rc) -> Rc where C: Extend, { @@ -465,7 +465,7 @@ mod tests { use super::*; use crate::parser::tests::*; - fn to_cnf_term(pool: &mut TermPool, term: &Rc) -> Rc { + fn to_cnf_term(pool: &mut dyn TPool, term: &Rc) -> Rc { let nnf = negation_normal_form(pool, term, true, &mut AHashMap::new()); let mut bindings = Vec::new(); let prenexed = prenex_forall(pool, &mut bindings, &nnf); @@ -498,7 +498,7 @@ mod tests { fn run_tests(definitions: &str, cases: &[(&str, &str)]) { for &(term, expected) in cases { - let mut pool = TermPool::new(); + let mut pool = crate::ast::pool::advanced::LocalPool::new(); let [term, expected] = parse_terms(&mut pool, definitions, [term, expected]); let got = to_cnf_term(&mut pool, &term); assert_eq!(expected, got); diff --git a/carcara/src/checker/rules/reflexivity.rs b/carcara/src/checker/rules/reflexivity.rs index c5e9ad4e..aa57ccd6 100644 --- a/carcara/src/checker/rules/reflexivity.rs +++ b/carcara/src/checker/rules/reflexivity.rs @@ -75,7 +75,7 @@ pub fn strict_refl(RuleArgs { conclusion, pool, context, .. }: RuleArgs) -> Rule fn elaborate_equality( elaborator: &mut Elaborator, - pool: &mut TermPool, + pool: &mut dyn TPool, left: &Rc, right: &Rc, id: &str, diff --git a/carcara/src/checker/rules/resolution.rs b/carcara/src/checker/rules/resolution.rs index a5861ab5..f43e555e 100644 --- a/carcara/src/checker/rules/resolution.rs +++ b/carcara/src/checker/rules/resolution.rs @@ -45,7 +45,7 @@ impl<'a> ClauseCollection<'a> for AHashSet> { } /// Undoes the transformation done by `Rc::remove_all_negations`. -fn unremove_all_negations(pool: &mut TermPool, (n, term): ResolutionTerm) -> Rc { +fn unremove_all_negations(pool: &mut dyn TPool, (n, term): ResolutionTerm) -> Rc { let mut term = term.clone(); for _ in 0..n { term = build_term!(pool, (not { term })); @@ -92,7 +92,7 @@ struct ResolutionTrace { fn greedy_resolution( conclusion: &[Rc], premises: &[Premise], - pool: &mut TermPool, + pool: &mut dyn TPool, tracing: bool, ) -> Result { // If we are elaborating, we record which pivot was found for each binary resolution step, so we @@ -339,7 +339,7 @@ pub fn strict_resolution( fn apply_generic_resolution<'a, C: ClauseCollection<'a>>( premises: &'a [Premise], args: &'a [ProofArg], - pool: &mut TermPool, + pool: &mut dyn TPool, ) -> Result { assert_num_premises(premises, 1..)?; let num_steps = premises.len() - 1; @@ -375,7 +375,7 @@ fn apply_generic_resolution<'a, C: ClauseCollection<'a>>( } fn binary_resolution<'a, C: ClauseCollection<'a>>( - pool: &mut TermPool, + pool: &mut dyn TPool, current: &mut C, next: &'a [Rc], pivot: ResolutionTerm<'a>, diff --git a/carcara/src/checker/rules/simplification.rs b/carcara/src/checker/rules/simplification.rs index 3f2de8a8..d83d0180 100644 --- a/carcara/src/checker/rules/simplification.rs +++ b/carcara/src/checker/rules/simplification.rs @@ -40,8 +40,8 @@ macro_rules! simplify { fn generic_simplify_rule( conclusion: &[Rc], - pool: &mut TermPool, - simplify_function: fn(&Term, &mut TermPool) -> Option>, + pool: &mut dyn TPool, + simplify_function: fn(&Term, &mut dyn TPool) -> Option>, ) -> RuleResult { assert_clause_len(conclusion, 1)?; @@ -160,7 +160,7 @@ pub fn eq_simplify(args: RuleArgs) -> RuleResult { /// Used for both the `and_simplify` and `or_simplify` rules, depending on `rule_kind`. `rule_kind` /// has to be either `Operator::And` or `Operator::Or`. fn generic_and_or_simplify( - pool: &mut TermPool, + pool: &mut dyn TPool, conclusion: &[Rc], rule_kind: Operator, ) -> RuleResult { @@ -445,7 +445,7 @@ pub fn div_simplify(RuleArgs { conclusion, .. }: RuleArgs) -> RuleResult { /// Used for both the `sum_simplify` and `prod_simplify` rules, depending on `rule_kind`. /// `rule_kind` has to be either `Operator::Add` or `Operator::Mult`. fn generic_sum_prod_simplify_rule( - pool: &mut TermPool, + pool: &mut dyn TPool, ts: &Rc, u: &Rc, rule_kind: Operator, @@ -667,7 +667,7 @@ pub fn comp_simplify(args: RuleArgs) -> RuleResult { } fn apply_ac_simp( - pool: &mut TermPool, + pool: &mut dyn TPool, cache: &mut AHashMap, Rc>, term: &Rc, ) -> Rc { diff --git a/carcara/src/checker/rules/transitivity.rs b/carcara/src/checker/rules/transitivity.rs index 10b0ab5e..4e7c6507 100644 --- a/carcara/src/checker/rules/transitivity.rs +++ b/carcara/src/checker/rules/transitivity.rs @@ -204,7 +204,7 @@ pub fn elaborate_eq_transitive( } fn flip_eq_transitive_premises( - pool: &mut TermPool, + pool: &mut dyn TPool, elaborator: &mut Elaborator, new_eq_transitive_step: (usize, usize), new_clause: &[Rc], diff --git a/carcara/src/elaborator/mod.rs b/carcara/src/elaborator/mod.rs index d20cf7b6..4d6407fe 100644 --- a/carcara/src/elaborator/mod.rs +++ b/carcara/src/elaborator/mod.rs @@ -200,7 +200,7 @@ impl Elaborator { /// index must already be mapped to the new index space. pub fn add_symm_step( &mut self, - pool: &mut TermPool, + pool: &mut dyn TPool, original_premise: (usize, usize), original_equality: (Rc, Rc), id: String, @@ -221,7 +221,7 @@ impl Elaborator { /// Adds a `refl` step that asserts that the two given terms are equal. pub fn add_refl_step( &mut self, - pool: &mut TermPool, + pool: &mut dyn TPool, a: Rc, b: Rc, id: String, @@ -239,7 +239,7 @@ impl Elaborator { pub fn elaborate_polyeq( &mut self, - pool: &mut TermPool, + pool: &mut dyn TPool, root_id: &str, a: Rc, b: Rc, @@ -250,7 +250,7 @@ impl Elaborator { pub fn elaborate_assume( &mut self, - pool: &mut TermPool, + pool: &mut dyn TPool, premise: Rc, term: Rc, id: &str, diff --git a/carcara/src/elaborator/polyeq.rs b/carcara/src/elaborator/polyeq.rs index 2dbb60a9..95addd97 100644 --- a/carcara/src/elaborator/polyeq.rs +++ b/carcara/src/elaborator/polyeq.rs @@ -25,7 +25,7 @@ impl<'a> PolyeqElaborator<'a> { /// Takes two terms that are equal modulo reordering of equalities, and returns a premise that /// proves their equality. - pub fn elaborate(&mut self, pool: &mut TermPool, a: Rc, b: Rc) -> (usize, usize) { + pub fn elaborate(&mut self, pool: &mut dyn TPool, a: Rc, b: Rc) -> (usize, usize) { // TODO: Make this method return an error instead of panicking if the terms aren't equal let key = (a, b); @@ -39,7 +39,7 @@ impl<'a> PolyeqElaborator<'a> { result } - fn elaborate_impl(&mut self, pool: &mut TermPool, a: Rc, b: Rc) -> (usize, usize) { + fn elaborate_impl(&mut self, pool: &mut dyn TPool, a: Rc, b: Rc) -> (usize, usize) { if self.directly_eq(pool, &a, &b) { let id = self.inner.get_new_id(self.root_id); return self.inner.add_refl_step(pool, a, b, id); @@ -187,7 +187,7 @@ impl<'a> PolyeqElaborator<'a> { } /// Returns `true` if the terms are directly equal, modulo application of the current context. - fn directly_eq(&mut self, pool: &mut TermPool, a: &Rc, b: &Rc) -> bool { + fn directly_eq(&mut self, pool: &mut dyn TPool, a: &Rc, b: &Rc) -> bool { match &mut self.context { Some(c) => c.apply(pool, a) == *b, None => a == b, @@ -196,7 +196,7 @@ impl<'a> PolyeqElaborator<'a> { /// Returns `true` if the terms are equal modulo reordering of inequalities, and modulo /// application of the current context. - fn polyeq(&mut self, pool: &mut TermPool, a: &Rc, b: &Rc) -> bool { + fn polyeq(&mut self, pool: &mut dyn TPool, a: &Rc, b: &Rc) -> bool { match &mut self.context { Some(c) => Polyeq::eq(&mut self.checker, &c.apply(pool, a), b), None => Polyeq::eq(&mut self.checker, a, b), @@ -205,7 +205,7 @@ impl<'a> PolyeqElaborator<'a> { fn build_cong( &mut self, - pool: &mut TermPool, + pool: &mut dyn TPool, (a, b): (&Rc, &Rc), (a_args, b_args): (&[Rc], &[Rc]), ) -> (usize, usize) { @@ -235,7 +235,7 @@ impl<'a> PolyeqElaborator<'a> { fn flip_equality( &mut self, - pool: &mut TermPool, + pool: &mut dyn TPool, (a, a_left, a_right): (Rc, Rc, Rc), (b, b_left, b_right): (Rc, Rc, Rc), ) -> (usize, usize) { @@ -339,7 +339,7 @@ impl<'a> PolyeqElaborator<'a> { /// Creates the subproof for a `bind` or `bind_let` step, used to derive the equality of /// quantifier or `let` terms. This assumes the accumulator subproof has already been opened. - fn create_bind_subproof(&mut self, pool: &mut TermPool, inner_equality: (Rc, Rc)) { + fn create_bind_subproof(&mut self, pool: &mut dyn TPool, inner_equality: (Rc, Rc)) { let (a, b) = inner_equality; let inner_eq = self.elaborate(pool, a.clone(), b.clone()); diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index c80302b1..46ad8769 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -142,11 +142,12 @@ pub fn check( num_threads: usize, ) -> Result { use crate::checker::Scheduler; + use std::sync::Arc; let mut run_measures: RunMeasurement = RunMeasurement::default(); // Parsing let total = Instant::now(); - let (prelude, proof, pool) = parser::parse_instance_multithread( + let (prelude, proof, pool) = parser::parse_instance( problem, proof, options.apply_function_defs, @@ -162,7 +163,7 @@ pub fn check( // Checking let checking = Instant::now(); - let mut checker = checker::ParallelProofChecker::new(pool, config, &prelude); + let mut checker = checker::ParallelProofChecker::new(Arc::new(pool), config, &prelude); let (scheduler, _) = Scheduler::new(num_threads, &proof); if options.stats { let mut checker_stats = CheckerStatistics { diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index e04b0de2..a08d2d0c 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -15,21 +15,21 @@ use crate::{ use ahash::{AHashMap, AHashSet}; use error::assert_num_args; use rug::Integer; -use std::{io::BufRead, str::FromStr, sync::Arc}; +use std::{io::BufRead, str::FromStr}; /// Parses an SMT problem instance (in the SMT-LIB format) and its associated proof (in the Alethe /// format). /// /// This returns the parsed proof, as well as the `TermPool` used in parsing. Can take any type that /// implements `BufRead`. -pub fn parse_instance( +pub fn parse_instance( problem: T, proof: T, apply_function_defs: bool, expand_lets: bool, allow_int_real_subtyping: bool, -) -> CarcaraResult<(ProblemPrelude, Proof, TermPool)> { - let mut pool = TermPool::new(); +) -> CarcaraResult<(ProblemPrelude, Proof, P)> { + let mut pool = P::default(); let mut parser = Parser::new( &mut pool, problem, @@ -45,39 +45,6 @@ pub fn parse_instance( Ok((prelude, proof, pool)) } -/// Parses an SMT problem instance (in the SMT-LIB format) and its associated proof (in the Alethe -/// format). -/// -/// This returns the parsed proof, as well as the `TermPool` used in parsing. Can take any type -/// that implements `BufRead`. -/// -/// Returns an atomic reference counter of an primitive pool. -/// TODO: Unify the two parsing methods? -pub fn parse_instance_multithread( - problem: T, - proof: T, - apply_function_defs: bool, - expand_lets: bool, - allow_int_real_subtyping: bool, -) -> CarcaraResult<(ProblemPrelude, Proof, Arc)> { - let mut pool = Arc::new(PrimitivePool::TermPool::new()); - let mut_pool = Arc::get_mut(&mut pool).unwrap(); - - let mut parser = Parser::new( - mut_pool, - problem, - apply_function_defs, - expand_lets, - allow_int_real_subtyping, - )?; - let (prelude, premises) = parser.parse_problem()?; - parser.reset(proof)?; - let commands = parser.parse_proof()?; - - let proof = Proof { premises, commands }; - Ok((prelude, proof, pool)) -} - /// A function definition, from a `define-fun` command. struct FunctionDef { params: Vec, diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index 0fa66ba0..dfdb7657 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -3,11 +3,12 @@ #![cfg(test)] use super::*; +use crate::ast::pool::advanced::LocalPool; const ERROR_MESSAGE: &str = "parser error during test"; -pub fn parse_terms( - pool: &mut TermPool, +pub fn parse_terms( + pool: &mut P, definitions: &str, terms: [&str; N], ) -> [Rc; N] { @@ -21,7 +22,7 @@ pub fn parse_terms( }) } -pub fn parse_term(pool: &mut TermPool, input: &str) -> Rc { +pub fn parse_term(pool: &mut P, input: &str) -> Rc { Parser::new(pool, input.as_bytes(), true, false, false) .and_then(|mut parser| parser.parse_term()) .expect(ERROR_MESSAGE) @@ -30,14 +31,14 @@ pub fn parse_term(pool: &mut TermPool, input: &str) -> Rc { /// Tries to parse a term from a `&str`, expecting it to fail. Returns the error encountered, or /// panics if no error is encountered. pub fn parse_term_err(input: &str) -> Error { - let mut pool = TermPool::new(); + let mut pool = LocalPool::new(); Parser::new(&mut pool, input.as_bytes(), true, false, false) .and_then(|mut p| p.parse_term()) .expect_err("expected error") } /// Parses a proof from a `&str`. Panics if any error is encountered. -pub fn parse_proof(pool: &mut TermPool, input: &str) -> Proof { +pub fn parse_proof(pool: &mut P, input: &str) -> Proof { let commands = Parser::new(pool, input.as_bytes(), true, false, false) .expect(ERROR_MESSAGE) .parse_proof() @@ -45,7 +46,7 @@ pub fn parse_proof(pool: &mut TermPool, input: &str) -> Proof { Proof { premises: AHashSet::new(), commands } } -fn run_parser_tests(pool: &mut TermPool, cases: &[(&str, Rc)]) { +fn run_parser_tests(pool: &mut P, cases: &[(&str, Rc)]) { for (case, expected) in cases { let got = parse_term(pool, case); assert_eq!(expected, &got); @@ -56,7 +57,7 @@ fn run_parser_tests(pool: &mut TermPool, cases: &[(&str, Rc)]) { fn test_hash_consing() { use ahash::AHashSet; - let mut pool = TermPool::new(); + let mut pool = LocalPool::new(); let input = "(- (- (+ 1 2) @@ -116,7 +117,7 @@ fn test_hash_consing() { #[test] fn test_constant_terms() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); assert_eq!(Term::new_int(42), *parse_term(&mut p, "42")); assert_eq!(Term::new_real((3, 2)), *parse_term(&mut p, "1.5")); assert_eq!(Term::new_string("foo"), *parse_term(&mut p, "\"foo\"")); @@ -124,7 +125,7 @@ fn test_constant_terms() { #[test] fn test_arithmetic_ops() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let [one, two, three, five, seven] = [1, 2, 3, 5, 7].map(|n| p.add(Term::new_int(n))); let cases = [ ( @@ -154,7 +155,7 @@ fn test_arithmetic_ops() { #[test] fn test_logic_ops() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let [zero, one, two, three, four] = [0, 1, 2, 3, 4].map(|n| p.add(Term::new_int(n))); let cases = [ ( @@ -236,7 +237,7 @@ fn test_logic_ops() { #[test] fn test_ite() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let [one, two, three] = [1, 2, 3].map(|n| p.add(Term::new_int(n))); let cases = [ ( @@ -273,7 +274,7 @@ fn test_ite() { #[test] fn test_quantifiers() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let bool_sort = p.add(Term::Sort(Sort::Bool)); let real_sort = p.add(Term::Sort(Sort::Real)); let cases = [ @@ -313,7 +314,7 @@ fn test_quantifiers() { #[test] fn test_choice_terms() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let bool_sort = p.add(Term::Sort(Sort::Bool)); let int_sort = p.add(Term::Sort(Sort::Int)); let cases = [ @@ -341,7 +342,7 @@ fn test_choice_terms() { #[test] fn test_let_terms() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let int_sort = p.add(Term::Sort(Sort::Int)); let bool_sort = p.add(Term::Sort(Sort::Bool)); let cases = [ @@ -371,7 +372,7 @@ fn test_let_terms() { #[test] fn test_lambda_terms() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let int_sort = p.add(Term::Sort(Sort::Int)); let cases = [ ("(lambda ((x Int)) x)", { @@ -403,7 +404,7 @@ fn test_lambda_terms() { #[test] fn test_annotated_terms() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let [zero, two, three] = [0, 2, 3].map(|n| p.add(Term::new_int(n))); let cases = [ ("(! 0 :named foo)", zero.clone()), @@ -434,7 +435,7 @@ fn test_annotated_terms() { #[test] fn test_declare_fun() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); parse_terms( &mut p, @@ -456,7 +457,7 @@ fn test_declare_fun() { #[test] fn test_declare_sort() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); parse_terms( &mut p, @@ -481,7 +482,7 @@ fn test_declare_sort() { #[test] fn test_define_fun() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let [got] = parse_terms( &mut p, "(define-fun add ((a Int) (b Int)) Int (+ a b))", @@ -504,7 +505,7 @@ fn test_define_fun() { #[test] fn test_step() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let input = " (step t1 (cl (= (+ 2 3) (- 1 2))) :rule rule-name) (step t2 (cl) :rule rule-name :premises (t1)) @@ -594,7 +595,7 @@ fn test_step() { #[test] fn test_premises_in_subproofs() { - let mut p = TermPool::new(); + let mut p = LocalPool::new(); let input = " (assume h1 true) (assume h2 true) diff --git a/cli/src/main.rs b/cli/src/main.rs index c9483a3b..ef1b06a0 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -333,8 +333,9 @@ fn get_instance(options: &Input) -> CliResult<(Box, Box CliResult<()> { + use carcara::ast::{PrimitivePool, ProblemPrelude, Proof}; let (problem, proof) = get_instance(&options.input)?; - let (_, proof, _) = parser::parse_instance( + let (_, proof, _): (ProblemPrelude, Proof, PrimitivePool) = parser::parse_instance( problem, proof, options.parsing.apply_function_defs, @@ -428,8 +429,9 @@ fn print_benchmark_results(results: OnlineBenchmarkResults, sort_by_total: bool) } fn slice_command(options: SliceCommandOption) -> CliResult<()> { + use carcara::ast::{PrimitivePool, ProblemPrelude, Proof}; let (problem, proof) = get_instance(&options.input)?; - let (_, proof, _) = parser::parse_instance( + let (_, proof, _): (ProblemPrelude, Proof, PrimitivePool) = parser::parse_instance( problem, proof, options.parsing.apply_function_defs, From 69171d231a3f8d1b9eb4f03321b8d9312ee67adc Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Thu, 20 Jul 2023 21:14:59 -0300 Subject: [PATCH 18/70] Converted ref to const ref --- carcara/src/ast/pool/advanced.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs index bf60bae5..41bb56f2 100644 --- a/carcara/src/ast/pool/advanced.rs +++ b/carcara/src/ast/pool/advanced.rs @@ -39,7 +39,7 @@ impl ContextPool { /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. fn add_by_ref<'d, 'c: 'd>( ctx_pool: &mut PrimitivePool, - global_pool: &'d Arc, + global_pool: &'d PrimitivePool, term: Term, ) -> Rc { use std::collections::hash_map::Entry; @@ -63,7 +63,7 @@ impl ContextPool { /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. fn sort_by_ref<'d: 't, 'c: 'd, 't>( ctx_pool: &PrimitivePool, - global_pool: &'d Arc, + global_pool: &'d PrimitivePool, term: &'t Rc, ) -> Rc { if let Some(sort) = global_pool.sorts_cache.get(term) { @@ -118,7 +118,7 @@ impl TPool for ContextPool { fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { fn internal<'d: 't, 'c: 'd, 't>( ctx_pool: &'d mut PrimitivePool, - global_pool: &'c Arc, + global_pool: &'c PrimitivePool, term: &'t Rc, ) -> &'t AHashSet> { // Here, I would like to do @@ -241,7 +241,7 @@ impl LocalPool { fn add_by_ref<'d, 'c: 'd>( local_pool: &'d mut PrimitivePool, ctx_pool: &PrimitivePool, - global_pool: &'d Arc, + global_pool: &'d PrimitivePool, term: Term, ) -> Rc { use std::collections::hash_map::Entry; @@ -270,7 +270,7 @@ impl LocalPool { fn sort_by_ref<'d: 't, 'c: 'd, 't>( local_pool: &'d mut PrimitivePool, ctx_pool: &PrimitivePool, - global_pool: &'d Arc, + global_pool: &'d PrimitivePool, term: &'t Rc, ) -> Rc { if let Some(sort) = global_pool.sorts_cache.get(term) { @@ -333,7 +333,7 @@ impl TPool for LocalPool { fn internal<'d: 't, 'c: 'd, 't>( local_pool: &'d mut PrimitivePool, ctx_pool: &'t PrimitivePool, - global_pool: &'d Arc, + global_pool: &'d PrimitivePool, term: &'t Rc, ) -> &'t AHashSet> { // Here, I would like to do From 0d865bb90e2c7747bf6df7b080b74db0fde47a98 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Thu, 20 Jul 2023 22:04:58 -0300 Subject: [PATCH 19/70] Remodelled the parallel and scheduler folders --- carcara/src/checker/mod.rs | 4 +--- carcara/src/checker/{parallel.rs => parallel/mod.rs} | 4 +++- carcara/src/checker/{ => parallel}/scheduler/iter.rs | 0 carcara/src/checker/{ => parallel}/scheduler/mod.rs | 6 ++---- carcara/src/checker/{ => parallel}/scheduler/weights.rs | 0 5 files changed, 6 insertions(+), 8 deletions(-) rename carcara/src/checker/{parallel.rs => parallel/mod.rs} (99%) rename carcara/src/checker/{ => parallel}/scheduler/iter.rs (100%) rename carcara/src/checker/{ => parallel}/scheduler/mod.rs (98%) rename carcara/src/checker/{ => parallel}/scheduler/weights.rs (100%) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index feb4c272..bd34076e 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -2,7 +2,6 @@ pub mod error; mod lia_generic; mod parallel; mod rules; -mod scheduler; use crate::{ ast::*, @@ -12,9 +11,8 @@ use crate::{ }; use ahash::AHashSet; use error::CheckerError; -pub use parallel::ParallelProofChecker; +pub use parallel::{scheduler::Scheduler, ParallelProofChecker}; use rules::{ElaborationRule, Premise, Rule, RuleArgs, RuleResult}; -pub use scheduler::Scheduler; use std::{ fmt, time::{Duration, Instant}, diff --git a/carcara/src/checker/parallel.rs b/carcara/src/checker/parallel/mod.rs similarity index 99% rename from carcara/src/checker/parallel.rs rename to carcara/src/checker/parallel/mod.rs index 9d56f211..6a486d3e 100644 --- a/carcara/src/checker/parallel.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -1,6 +1,7 @@ +pub mod scheduler; + use super::error::CheckerError; use super::rules::{Premise, RuleArgs, RuleResult}; -use super::scheduler::{iter::ScheduleIter, Scheduler}; use super::{lia_generic, Config}; use crate::benchmarking::{CollectResults, OnlineBenchmarkResults}; use crate::checker::CheckerStatistics; @@ -9,6 +10,7 @@ use crate::{ CarcaraResult, Error, }; use ahash::AHashSet; +pub use scheduler::{iter::ScheduleIter, Scheduler}; use std::{ ops::ControlFlow, sync::{Arc, RwLock}, diff --git a/carcara/src/checker/scheduler/iter.rs b/carcara/src/checker/parallel/scheduler/iter.rs similarity index 100% rename from carcara/src/checker/scheduler/iter.rs rename to carcara/src/checker/parallel/scheduler/iter.rs diff --git a/carcara/src/checker/scheduler/mod.rs b/carcara/src/checker/parallel/scheduler/mod.rs similarity index 98% rename from carcara/src/checker/scheduler/mod.rs rename to carcara/src/checker/parallel/scheduler/mod.rs index 020475eb..7b0c0d49 100644 --- a/carcara/src/checker/scheduler/mod.rs +++ b/carcara/src/checker/parallel/scheduler/mod.rs @@ -1,15 +1,13 @@ pub(crate) mod iter; pub(crate) mod weights; -use crate::{ - ast::{Proof, ProofCommand}, - checker::scheduler::weights::get_step_weight, -}; +use crate::ast::{Proof, ProofCommand}; use iter::ScheduleIter; use std::{ cmp::Ordering, collections::{BinaryHeap, HashSet}, }; +use weights::get_step_weight; /// Struct responsible for storing a thread work schedule. /// diff --git a/carcara/src/checker/scheduler/weights.rs b/carcara/src/checker/parallel/scheduler/weights.rs similarity index 100% rename from carcara/src/checker/scheduler/weights.rs rename to carcara/src/checker/parallel/scheduler/weights.rs From 9e79cba8c0011aeab340739824d02c374ad290fc Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Fri, 21 Jul 2023 13:44:57 -0300 Subject: [PATCH 20/70] Added myself as contributor and removed unecessary cloning --- carcara/Cargo.toml | 2 +- carcara/src/ast/pool/mod.rs | 12 ++++++------ carcara/src/ast/substitution.rs | 4 ++-- carcara/src/checker/rules/quantifier.rs | 5 ++--- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/carcara/Cargo.toml b/carcara/Cargo.toml index 7af5dd7c..2870c99b 100644 --- a/carcara/Cargo.toml +++ b/carcara/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "carcara" version = "1.0.0" -authors = ["Bruno Andreotti "] +authors = ["Bruno Andreotti ", "Vinícius Braga Freire "] edition = "2021" rust-version = "1.67" license = "Apache-2.0" diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index 01c66dc7..4afdf90a 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -210,21 +210,21 @@ impl TPool for PrimitivePool { } let set = match term.as_ref() { Term::App(f, args) => { - let mut set = self.free_vars(f).clone(); + let mut set = self.free_vars(f); for a in args { - set.extend(self.free_vars(a).iter().cloned()); + set.extend(self.free_vars(a).into_iter()); } set } Term::Op(_, args) => { let mut set = AHashSet::new(); for a in args { - set.extend(self.free_vars(a).iter().cloned()); + set.extend(self.free_vars(a).into_iter()); } set } Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = self.free_vars(inner).clone(); + let mut vars = self.free_vars(inner); for bound_var in bindings { let term = self.add(bound_var.clone().into()); vars.remove(&term); @@ -232,7 +232,7 @@ impl TPool for PrimitivePool { vars } Term::Let(bindings, inner) => { - let mut vars = self.free_vars(inner).clone(); + let mut vars = self.free_vars(inner); for (var, value) in bindings { let sort = self.sort(value).as_ref().clone(); let sort = self.add(sort); @@ -242,7 +242,7 @@ impl TPool for PrimitivePool { vars } Term::Choice(bound_var, inner) => { - let mut vars = self.free_vars(inner).clone(); + let mut vars = self.free_vars(inner); let term = self.add(bound_var.clone().into()); vars.remove(&term); vars diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index f29c830b..a98b770a 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -112,7 +112,7 @@ impl Substitution { if let Some(should_be_renamed) = &mut self.should_be_renamed { if x != t { - should_be_renamed.extend(pool.free_vars(&t).iter().cloned()); + should_be_renamed.extend(pool.free_vars(&t).into_iter()); if x.is_var() { should_be_renamed.insert(x.clone()); } @@ -151,7 +151,7 @@ impl Substitution { if x == t { continue; // We ignore reflexive substitutions } - should_be_renamed.extend(pool.free_vars(t).iter().cloned()); + should_be_renamed.extend(pool.free_vars(t).into_iter()); if x.is_var() { should_be_renamed.insert(x.clone()); } diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index 3ae356cc..bbda58a0 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -91,8 +91,7 @@ pub fn qnt_rm_unused(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult assert_eq(phi_1, phi_2)?; // Cloning here may be unnecessary - // TODO: Remove the clone from similar situations - let free_vars = pool.free_vars(phi_1).clone(); + let free_vars = pool.free_vars(phi_1); let expected: Vec<_> = bindings_1 .iter() @@ -300,7 +299,7 @@ pub fn qnt_cnf(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { .ok_or_else(|| QuantifierError::ClauseDoesntAppearInCnf(phi_prime.clone()))?; // Cloning here may be unnecessary - let free_vars = pool.free_vars(selected_clause).clone(); + let free_vars = pool.free_vars(selected_clause); // While all bindings in `r_bindings` must also be in `new_bindings`, the same is not true in // the opposite direction. That is because some variables from the set may be omitted in the From 019afcb0722554fb8ce2d4dea81e18f0a0798888 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Fri, 21 Jul 2023 14:30:38 -0300 Subject: [PATCH 21/70] Duplicated --- carcara/src/checker/lia_generic.rs | 19 ++++++++++++++----- carcara/src/checker/mod.rs | 2 +- carcara/src/checker/parallel/mod.rs | 3 +-- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index 889b58ae..bd4888be 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -24,17 +24,15 @@ fn get_problem_string(conclusion: &[Rc], prelude: &ProblemPrelude) -> Stri problem } -pub fn lia_generic( - pool: &mut P, +pub fn lia_generic_single_thread( + pool: &mut PrimitivePool, conclusion: &[Rc], prelude: &ProblemPrelude, elaborator: Option<&mut Elaborator>, root_id: &str, ) -> bool { - // TODO: Transform it into 2 different functions - let mut pp = PrimitivePool::new(); let problem = get_problem_string(conclusion, prelude); - let commands = match get_cvc5_proof(&mut pp, problem) { + let commands = match get_cvc5_proof(pool, problem) { Ok(c) => c, Err(e) => { log::warn!("failed to check `lia_generic` step using cvc5: {}", e); @@ -51,6 +49,17 @@ pub fn lia_generic( false } +pub fn lia_generic_multi_thread(conclusion: &[Rc], prelude: &ProblemPrelude) -> bool { + let mut pool = PrimitivePool::new(); + let problem = get_problem_string(conclusion, prelude); + if let Err(e) = get_cvc5_proof(&mut pool, problem) { + log::warn!("failed to check `lia_generic` step using cvc5: {}", e); + true + } else { + false + } +} + fn get_cvc5_proof( pool: &mut PrimitivePool, problem: String, diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index bd34076e..9cda8111 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -404,7 +404,7 @@ impl<'c> ProofChecker<'c> { let mut elaborated = false; if step.rule == "lia_generic" { if self.config.lia_via_cvc5 { - let is_hole = lia_generic::lia_generic( + let is_hole = lia_generic::lia_generic_single_thread( self.pool, &step.clause, &self.prelude, diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 6a486d3e..fe207301 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -500,8 +500,7 @@ impl<'c> ParallelProofChecker<'c> { if step.rule == "lia_generic" { if self.config.lia_via_cvc5 { - let is_hole = - lia_generic::lia_generic(pool, &step.clause, &self.prelude, None, &step.id); + let is_hole = lia_generic::lia_generic_multi_thread(&step.clause, &self.prelude); self.is_holey = self.is_holey || is_hole; } else { log::warn!("encountered \"lia_generic\" rule, ignoring"); From 9d8409aa171d1717449f528448bf475657ffa0cc Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Fri, 21 Jul 2023 14:46:04 -0300 Subject: [PATCH 22/70] Renamed trait --- carcara/src/ast/context.rs | 12 ++++----- carcara/src/ast/mod.rs | 2 +- carcara/src/ast/pool/advanced.rs | 6 ++--- carcara/src/ast/pool/mod.rs | 4 +-- carcara/src/ast/substitution.rs | 18 ++++++------- carcara/src/ast/tests.rs | 2 +- carcara/src/checker/lia_generic.rs | 9 +++---- carcara/src/checker/rules/clausification.rs | 13 +++++++--- carcara/src/checker/rules/mod.rs | 2 +- carcara/src/checker/rules/quantifier.rs | 6 ++--- carcara/src/checker/rules/reflexivity.rs | 2 +- carcara/src/checker/rules/resolution.rs | 8 +++--- carcara/src/checker/rules/simplification.rs | 10 ++++---- carcara/src/checker/rules/transitivity.rs | 2 +- carcara/src/elaborator/mod.rs | 8 +++--- carcara/src/elaborator/polyeq.rs | 28 +++++++++++++++------ carcara/src/parser/mod.rs | 4 +-- carcara/src/parser/tests.rs | 8 +++--- 18 files changed, 81 insertions(+), 63 deletions(-) diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index 0761b785..3c0c30ee 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -36,7 +36,7 @@ impl ContextStack { pub fn push( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, assignment_args: &[(String, Rc)], variable_args: &[SortedVar], ) -> Result<(), SubstitutionError> { @@ -84,7 +84,7 @@ impl ContextStack { std::cmp::min(self.num_cumulative_calculated, self.stack.len()); } - fn catch_up_cumulative(&mut self, pool: &mut dyn TPool, up_to: usize) { + fn catch_up_cumulative(&mut self, pool: &mut dyn TermPool, up_to: usize) { for i in self.num_cumulative_calculated..std::cmp::max(up_to + 1, self.len()) { let simultaneous = build_simultaneous_substitution(pool, &self.stack[i].mappings).map; let mut cumulative_substitution = simultaneous.clone(); @@ -109,13 +109,13 @@ impl ContextStack { } } - fn get_substitution(&mut self, pool: &mut dyn TPool, index: usize) -> &mut Substitution { + fn get_substitution(&mut self, pool: &mut dyn TermPool, index: usize) -> &mut Substitution { assert!(index < self.len()); self.catch_up_cumulative(pool, index); self.stack[index].cumulative_substitution.as_mut().unwrap() } - pub fn apply_previous(&mut self, pool: &mut dyn TPool, term: &Rc) -> Rc { + pub fn apply_previous(&mut self, pool: &mut dyn TermPool, term: &Rc) -> Rc { if self.len() < 2 { term.clone() } else { @@ -124,7 +124,7 @@ impl ContextStack { } } - pub fn apply(&mut self, pool: &mut dyn TPool, term: &Rc) -> Rc { + pub fn apply(&mut self, pool: &mut dyn TermPool, term: &Rc) -> Rc { if self.is_empty() { term.clone() } else { @@ -135,7 +135,7 @@ impl ContextStack { } fn build_simultaneous_substitution( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, mappings: &[(Rc, Rc)], ) -> Substitution { let mut result = Substitution::empty(); diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index 4bed083c..0d5efeb3 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -17,7 +17,7 @@ mod tests; pub use context::{Context, ContextStack}; pub use iter::ProofIter; pub use polyeq::{alpha_equiv, polyeq, tracing_polyeq}; -pub use pool::{PrimitivePool, TPool}; +pub use pool::{PrimitivePool, TermPool}; pub use printer::print_proof; pub use rc::Rc; pub use substitution::{Substitution, SubstitutionError}; diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs index 41bb56f2..65d79707 100644 --- a/carcara/src/ast/pool/advanced.rs +++ b/carcara/src/ast/pool/advanced.rs @@ -1,5 +1,5 @@ use super::super::{Rc, Term}; -use super::{PrimitivePool, TPool}; +use super::{PrimitivePool, TermPool}; use ahash::AHashSet; use std::sync::{Arc, RwLock}; @@ -76,7 +76,7 @@ impl ContextPool { } } -impl TPool for ContextPool { +impl TermPool for ContextPool { fn bool_true(&self) -> Rc { self.global_pool.bool_true.clone() } @@ -285,7 +285,7 @@ impl LocalPool { } } -impl TPool for LocalPool { +impl TermPool for LocalPool { fn bool_true(&self) -> Rc { self.ctx_pool.global_pool.bool_true.clone() } diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index 4afdf90a..e1149c43 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -6,7 +6,7 @@ use super::{Rc, Sort, Term}; use crate::ast::Constant; use ahash::{AHashMap, AHashSet}; -pub trait TPool { +pub trait TermPool { /// Returns the term corresponding to the boolean constant `true`. fn bool_true(&self) -> Rc; /// Returns the term corresponding to the boolean constant `false`. @@ -171,7 +171,7 @@ impl PrimitivePool { } } -impl TPool for PrimitivePool { +impl TermPool for PrimitivePool { fn bool_true(&self) -> Rc { self.bool_true.clone() } diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index a98b770a..13969b90 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -1,6 +1,6 @@ //! Algorithms for creating and applying capture-avoiding substitutions over terms. -use super::{BindingList, Rc, SortedVar, TPool, Term}; +use super::{BindingList, Rc, SortedVar, Term, TermPool}; use ahash::{AHashMap, AHashSet}; use thiserror::Error; @@ -56,7 +56,7 @@ impl Substitution { /// Constructs a singleton substitution mapping `x` to `t`. This returns an error if the sorts /// of the given terms are not the same, or if `x` is not a variable term. - pub fn single(pool: &mut dyn TPool, x: Rc, t: Rc) -> SubstitutionResult { + pub fn single(pool: &mut dyn TermPool, x: Rc, t: Rc) -> SubstitutionResult { let mut this = Self::empty(); this.insert(pool, x, t)?; Ok(this) @@ -66,7 +66,7 @@ impl Substitution { /// returns an error if any term in the left-hand side is not a variable, or if any term is /// mapped to a term of a different sort. pub fn new( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, map: AHashMap, Rc>, ) -> SubstitutionResult { for (k, v) in map.iter() { @@ -94,7 +94,7 @@ impl Substitution { /// the sorts of the given terms are not the same, or if `x` is not a variable term. pub(crate) fn insert( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, x: Rc, t: Rc, ) -> SubstitutionResult<()> { @@ -125,7 +125,7 @@ impl Substitution { /// Computes which binder variables will need to be renamed, and stores the result in /// `self.should_be_renamed`. - fn compute_should_be_renamed(&mut self, pool: &mut dyn TPool) { + fn compute_should_be_renamed(&mut self, pool: &mut dyn TermPool) { if self.should_be_renamed.is_some() { return; } @@ -160,7 +160,7 @@ impl Substitution { } /// Applies the substitution to `term`, and returns the result as a new term. - pub fn apply(&mut self, pool: &mut dyn TPool, term: &Rc) -> Rc { + pub fn apply(&mut self, pool: &mut dyn TermPool, term: &Rc) -> Rc { macro_rules! apply_to_sequence { ($sequence:expr) => { $sequence @@ -217,7 +217,7 @@ impl Substitution { fn can_skip_instead_of_renaming( &self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, binding_list: &[SortedVar], ) -> bool { // Note: this method assumes that `binding_list` is a "sort" binding list. "Value" lists add @@ -248,7 +248,7 @@ impl Substitution { /// binder is a `let` or `lambda` term, `is_value_list` should be true. fn apply_to_binder) -> Term>( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, original_term: &Rc, binding_list: &[SortedVar], inner: &Rc, @@ -292,7 +292,7 @@ impl Substitution { /// a `let` or `lambda` term, `is_value_list` should be true. fn rename_binding_list( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, binding_list: &[SortedVar], is_value_list: bool, ) -> (BindingList, Self) { diff --git a/carcara/src/ast/tests.rs b/carcara/src/ast/tests.rs index a2b02089..3bb393d3 100644 --- a/carcara/src/ast/tests.rs +++ b/carcara/src/ast/tests.rs @@ -1,5 +1,5 @@ use crate::{ - ast::{pool::advanced::LocalPool, TPool}, + ast::{pool::advanced::LocalPool, TermPool}, parser::tests::parse_terms, }; use ahash::AHashSet; diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index bd4888be..9ed9110b 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -150,9 +150,8 @@ fn update_premises(commands: &mut [ProofCommand], delta: usize, root_id: &str) { } } -// TODO: Remove generics from here and all other functions -fn insert_missing_assumes( - pool: &mut P, +fn insert_missing_assumes( + pool: &mut PrimitivePool, elaborator: &mut Elaborator, conclusion: &[Rc], proof: &[ProofCommand], @@ -192,8 +191,8 @@ fn insert_missing_assumes( (all, num_added) } -fn insert_cvc5_proof( - pool: &mut P, +fn insert_cvc5_proof( + pool: &mut PrimitivePool, elaborator: &mut Elaborator, mut commands: Vec, conclusion: &[Rc], diff --git a/carcara/src/checker/rules/clausification.rs b/carcara/src/checker/rules/clausification.rs index f4afd066..4a0eb499 100644 --- a/carcara/src/checker/rules/clausification.rs +++ b/carcara/src/checker/rules/clausification.rs @@ -206,7 +206,12 @@ pub fn nary_elim(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { /// A function to expand terms that fall in the right or left associative cases. For example, /// the term `(=> p q r s)` will be expanded into the term `(=> p (=> q (=> r s)))`. - fn expand_assoc(pool: &mut dyn TPool, op: Operator, args: &[Rc], case: Case) -> Rc { + fn expand_assoc( + pool: &mut dyn TermPool, + op: Operator, + args: &[Rc], + case: Case, + ) -> Rc { let (head, tail) = match args { [] => unreachable!(), [t] => return t.clone(), @@ -259,7 +264,7 @@ pub fn nary_elim(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { /// The first simplification step for `bfun_elim`, that expands quantifiers over boolean variables. fn bfun_elim_first_step( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, bindigns: &[SortedVar], term: &Rc, acc: &mut Vec>, @@ -283,7 +288,7 @@ fn bfun_elim_first_step( /// The second simplification step for `bfun_elim`, that expands function applications over /// non-constant boolean arguments into `ite` terms. fn bfun_elim_second_step( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, func: &Rc, args: &[Rc], processed: usize, @@ -313,7 +318,7 @@ fn bfun_elim_second_step( /// Applies the simplification steps for the `bfun_elim` rule. fn apply_bfun_elim( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, term: &Rc, cache: &mut AHashMap, Rc>, ) -> Result, SubstitutionError> { diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index 81361f6e..7c9ba2fa 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -18,7 +18,7 @@ pub struct RuleArgs<'a> { pub(super) conclusion: &'a [Rc], pub(super) premises: &'a [Premise<'a>], pub(super) args: &'a [ProofArg], - pub(super) pool: &'a mut dyn TPool, + pub(super) pool: &'a mut dyn TermPool, pub(super) context: &'a mut ContextStack, // For rules that end a subproof, we need to pass the previous command in the subproof that it diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index bbda58a0..ad3e6503 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -107,7 +107,7 @@ pub fn qnt_rm_unused(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult /// Converts a term into negation normal form, expanding all connectives. fn negation_normal_form( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, term: &Rc, polarity: bool, cache: &mut AHashMap<(Rc, bool), Rc>, @@ -216,7 +216,7 @@ fn distribute(formulas: &[CnfFormula]) -> CnfFormula { /// Prenex all universal quantifiers in a term. This doesn't prenex existential quantifiers. This /// assumes the term is in negation normal form. -fn prenex_forall(pool: &mut dyn TPool, acc: &mut C, term: &Rc) -> Rc +fn prenex_forall(pool: &mut dyn TermPool, acc: &mut C, term: &Rc) -> Rc where C: Extend, { @@ -464,7 +464,7 @@ mod tests { use super::*; use crate::parser::tests::*; - fn to_cnf_term(pool: &mut dyn TPool, term: &Rc) -> Rc { + fn to_cnf_term(pool: &mut dyn TermPool, term: &Rc) -> Rc { let nnf = negation_normal_form(pool, term, true, &mut AHashMap::new()); let mut bindings = Vec::new(); let prenexed = prenex_forall(pool, &mut bindings, &nnf); diff --git a/carcara/src/checker/rules/reflexivity.rs b/carcara/src/checker/rules/reflexivity.rs index aa57ccd6..531b1587 100644 --- a/carcara/src/checker/rules/reflexivity.rs +++ b/carcara/src/checker/rules/reflexivity.rs @@ -75,7 +75,7 @@ pub fn strict_refl(RuleArgs { conclusion, pool, context, .. }: RuleArgs) -> Rule fn elaborate_equality( elaborator: &mut Elaborator, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, left: &Rc, right: &Rc, id: &str, diff --git a/carcara/src/checker/rules/resolution.rs b/carcara/src/checker/rules/resolution.rs index f43e555e..f8ac3825 100644 --- a/carcara/src/checker/rules/resolution.rs +++ b/carcara/src/checker/rules/resolution.rs @@ -45,7 +45,7 @@ impl<'a> ClauseCollection<'a> for AHashSet> { } /// Undoes the transformation done by `Rc::remove_all_negations`. -fn unremove_all_negations(pool: &mut dyn TPool, (n, term): ResolutionTerm) -> Rc { +fn unremove_all_negations(pool: &mut dyn TermPool, (n, term): ResolutionTerm) -> Rc { let mut term = term.clone(); for _ in 0..n { term = build_term!(pool, (not { term })); @@ -92,7 +92,7 @@ struct ResolutionTrace { fn greedy_resolution( conclusion: &[Rc], premises: &[Premise], - pool: &mut dyn TPool, + pool: &mut dyn TermPool, tracing: bool, ) -> Result { // If we are elaborating, we record which pivot was found for each binary resolution step, so we @@ -339,7 +339,7 @@ pub fn strict_resolution( fn apply_generic_resolution<'a, C: ClauseCollection<'a>>( premises: &'a [Premise], args: &'a [ProofArg], - pool: &mut dyn TPool, + pool: &mut dyn TermPool, ) -> Result { assert_num_premises(premises, 1..)?; let num_steps = premises.len() - 1; @@ -375,7 +375,7 @@ fn apply_generic_resolution<'a, C: ClauseCollection<'a>>( } fn binary_resolution<'a, C: ClauseCollection<'a>>( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, current: &mut C, next: &'a [Rc], pivot: ResolutionTerm<'a>, diff --git a/carcara/src/checker/rules/simplification.rs b/carcara/src/checker/rules/simplification.rs index d83d0180..327d6562 100644 --- a/carcara/src/checker/rules/simplification.rs +++ b/carcara/src/checker/rules/simplification.rs @@ -40,8 +40,8 @@ macro_rules! simplify { fn generic_simplify_rule( conclusion: &[Rc], - pool: &mut dyn TPool, - simplify_function: fn(&Term, &mut dyn TPool) -> Option>, + pool: &mut dyn TermPool, + simplify_function: fn(&Term, &mut dyn TermPool) -> Option>, ) -> RuleResult { assert_clause_len(conclusion, 1)?; @@ -160,7 +160,7 @@ pub fn eq_simplify(args: RuleArgs) -> RuleResult { /// Used for both the `and_simplify` and `or_simplify` rules, depending on `rule_kind`. `rule_kind` /// has to be either `Operator::And` or `Operator::Or`. fn generic_and_or_simplify( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, conclusion: &[Rc], rule_kind: Operator, ) -> RuleResult { @@ -445,7 +445,7 @@ pub fn div_simplify(RuleArgs { conclusion, .. }: RuleArgs) -> RuleResult { /// Used for both the `sum_simplify` and `prod_simplify` rules, depending on `rule_kind`. /// `rule_kind` has to be either `Operator::Add` or `Operator::Mult`. fn generic_sum_prod_simplify_rule( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, ts: &Rc, u: &Rc, rule_kind: Operator, @@ -667,7 +667,7 @@ pub fn comp_simplify(args: RuleArgs) -> RuleResult { } fn apply_ac_simp( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, cache: &mut AHashMap, Rc>, term: &Rc, ) -> Rc { diff --git a/carcara/src/checker/rules/transitivity.rs b/carcara/src/checker/rules/transitivity.rs index 4e7c6507..70fd97d5 100644 --- a/carcara/src/checker/rules/transitivity.rs +++ b/carcara/src/checker/rules/transitivity.rs @@ -204,7 +204,7 @@ pub fn elaborate_eq_transitive( } fn flip_eq_transitive_premises( - pool: &mut dyn TPool, + pool: &mut dyn TermPool, elaborator: &mut Elaborator, new_eq_transitive_step: (usize, usize), new_clause: &[Rc], diff --git a/carcara/src/elaborator/mod.rs b/carcara/src/elaborator/mod.rs index 4d6407fe..c2ec13dd 100644 --- a/carcara/src/elaborator/mod.rs +++ b/carcara/src/elaborator/mod.rs @@ -200,7 +200,7 @@ impl Elaborator { /// index must already be mapped to the new index space. pub fn add_symm_step( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, original_premise: (usize, usize), original_equality: (Rc, Rc), id: String, @@ -221,7 +221,7 @@ impl Elaborator { /// Adds a `refl` step that asserts that the two given terms are equal. pub fn add_refl_step( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, a: Rc, b: Rc, id: String, @@ -239,7 +239,7 @@ impl Elaborator { pub fn elaborate_polyeq( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, root_id: &str, a: Rc, b: Rc, @@ -250,7 +250,7 @@ impl Elaborator { pub fn elaborate_assume( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, premise: Rc, term: Rc, id: &str, diff --git a/carcara/src/elaborator/polyeq.rs b/carcara/src/elaborator/polyeq.rs index 95addd97..49525701 100644 --- a/carcara/src/elaborator/polyeq.rs +++ b/carcara/src/elaborator/polyeq.rs @@ -25,7 +25,12 @@ impl<'a> PolyeqElaborator<'a> { /// Takes two terms that are equal modulo reordering of equalities, and returns a premise that /// proves their equality. - pub fn elaborate(&mut self, pool: &mut dyn TPool, a: Rc, b: Rc) -> (usize, usize) { + pub fn elaborate( + &mut self, + pool: &mut dyn TermPool, + a: Rc, + b: Rc, + ) -> (usize, usize) { // TODO: Make this method return an error instead of panicking if the terms aren't equal let key = (a, b); @@ -39,7 +44,12 @@ impl<'a> PolyeqElaborator<'a> { result } - fn elaborate_impl(&mut self, pool: &mut dyn TPool, a: Rc, b: Rc) -> (usize, usize) { + fn elaborate_impl( + &mut self, + pool: &mut dyn TermPool, + a: Rc, + b: Rc, + ) -> (usize, usize) { if self.directly_eq(pool, &a, &b) { let id = self.inner.get_new_id(self.root_id); return self.inner.add_refl_step(pool, a, b, id); @@ -187,7 +197,7 @@ impl<'a> PolyeqElaborator<'a> { } /// Returns `true` if the terms are directly equal, modulo application of the current context. - fn directly_eq(&mut self, pool: &mut dyn TPool, a: &Rc, b: &Rc) -> bool { + fn directly_eq(&mut self, pool: &mut dyn TermPool, a: &Rc, b: &Rc) -> bool { match &mut self.context { Some(c) => c.apply(pool, a) == *b, None => a == b, @@ -196,7 +206,7 @@ impl<'a> PolyeqElaborator<'a> { /// Returns `true` if the terms are equal modulo reordering of inequalities, and modulo /// application of the current context. - fn polyeq(&mut self, pool: &mut dyn TPool, a: &Rc, b: &Rc) -> bool { + fn polyeq(&mut self, pool: &mut dyn TermPool, a: &Rc, b: &Rc) -> bool { match &mut self.context { Some(c) => Polyeq::eq(&mut self.checker, &c.apply(pool, a), b), None => Polyeq::eq(&mut self.checker, a, b), @@ -205,7 +215,7 @@ impl<'a> PolyeqElaborator<'a> { fn build_cong( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, (a, b): (&Rc, &Rc), (a_args, b_args): (&[Rc], &[Rc]), ) -> (usize, usize) { @@ -235,7 +245,7 @@ impl<'a> PolyeqElaborator<'a> { fn flip_equality( &mut self, - pool: &mut dyn TPool, + pool: &mut dyn TermPool, (a, a_left, a_right): (Rc, Rc, Rc), (b, b_left, b_right): (Rc, Rc, Rc), ) -> (usize, usize) { @@ -339,7 +349,11 @@ impl<'a> PolyeqElaborator<'a> { /// Creates the subproof for a `bind` or `bind_let` step, used to derive the equality of /// quantifier or `let` terms. This assumes the accumulator subproof has already been opened. - fn create_bind_subproof(&mut self, pool: &mut dyn TPool, inner_equality: (Rc, Rc)) { + fn create_bind_subproof( + &mut self, + pool: &mut dyn TermPool, + inner_equality: (Rc, Rc), + ) { let (a, b) = inner_equality; let inner_eq = self.elaborate(pool, a.clone(), b.clone()); diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index a08d2d0c..c226b675 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -22,7 +22,7 @@ use std::{io::BufRead, str::FromStr}; /// /// This returns the parsed proof, as well as the `TermPool` used in parsing. Can take any type that /// implements `BufRead`. -pub fn parse_instance( +pub fn parse_instance( problem: T, proof: T, apply_function_defs: bool, @@ -92,7 +92,7 @@ pub struct Parser<'a, R, P> { allow_int_real_subtyping: bool, } -impl<'a, R: BufRead, P: TPool> Parser<'a, R, P> { +impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { /// Constructs a new `Parser` from a type that implements `BufRead`. /// /// This operation can fail if there is an IO or lexer error on the first token. diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index dfdb7657..cc502e4c 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -7,7 +7,7 @@ use crate::ast::pool::advanced::LocalPool; const ERROR_MESSAGE: &str = "parser error during test"; -pub fn parse_terms( +pub fn parse_terms( pool: &mut P, definitions: &str, terms: [&str; N], @@ -22,7 +22,7 @@ pub fn parse_terms( }) } -pub fn parse_term(pool: &mut P, input: &str) -> Rc { +pub fn parse_term(pool: &mut P, input: &str) -> Rc { Parser::new(pool, input.as_bytes(), true, false, false) .and_then(|mut parser| parser.parse_term()) .expect(ERROR_MESSAGE) @@ -38,7 +38,7 @@ pub fn parse_term_err(input: &str) -> Error { } /// Parses a proof from a `&str`. Panics if any error is encountered. -pub fn parse_proof(pool: &mut P, input: &str) -> Proof { +pub fn parse_proof(pool: &mut P, input: &str) -> Proof { let commands = Parser::new(pool, input.as_bytes(), true, false, false) .expect(ERROR_MESSAGE) .parse_proof() @@ -46,7 +46,7 @@ pub fn parse_proof(pool: &mut P, input: &str) -> Proof { Proof { premises: AHashSet::new(), commands } } -fn run_parser_tests(pool: &mut P, cases: &[(&str, Rc)]) { +fn run_parser_tests(pool: &mut P, cases: &[(&str, Rc)]) { for (case, expected) in cases { let got = parse_term(pool, case); assert_eq!(expected, &got); From 0582dd8a6f1488fb760dc6613808895637084596 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Fri, 21 Jul 2023 18:03:40 -0300 Subject: [PATCH 23/70] Added scheduling in statistics gathering --- carcara/src/benchmarking/mod.rs | 14 +++++++++++++- carcara/src/checker/parallel/scheduler/mod.rs | 2 +- carcara/src/lib.rs | 6 +++++- cli/src/benchmarking.rs | 1 + 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/carcara/src/benchmarking/mod.rs b/carcara/src/benchmarking/mod.rs index 1478e46a..7575ca5f 100644 --- a/carcara/src/benchmarking/mod.rs +++ b/carcara/src/benchmarking/mod.rs @@ -49,6 +49,7 @@ pub struct RunMeasurement { pub parsing: Duration, pub checking: Duration, pub elaboration: Duration, + pub scheduling: Duration, pub total: Duration, pub polyeq: Duration, pub assume: Duration, @@ -60,6 +61,7 @@ pub struct OnlineBenchmarkResults { pub parsing: OnlineMetrics, pub checking: OnlineMetrics, pub elaborating: OnlineMetrics, + pub scheduling: OnlineMetrics, pub total_accounted_for: OnlineMetrics, pub total: OnlineMetrics, pub step_time: OnlineMetrics, @@ -105,6 +107,11 @@ impl OnlineBenchmarkResults { &self.elaborating } + /// The time per run to schedule the threads tasks. + pub fn scheduling(&self) -> &OnlineMetrics { + &self.scheduling + } + /// The combined time per run to parse, check, and elaborate all the steps in the proof. pub fn total_accounted_for(&self) -> &OnlineMetrics { &self.total_accounted_for @@ -132,10 +139,11 @@ impl OnlineBenchmarkResults { /// Prints the benchmark results pub fn print(&self, sort_by_total: bool) { - let [parsing, checking, elaborating, accounted_for, total] = [ + let [parsing, checking, elaborating, scheduling, accounted_for, total] = [ self.parsing(), self.checking(), self.elaborating(), + self.scheduling(), self.total_accounted_for(), self.total(), ] @@ -152,6 +160,7 @@ impl OnlineBenchmarkResults { if !elaborating.is_empty() { println!("elaborating: {}", elaborating); } + println!("scheduling: {}", scheduling); println!( "on assume: {} ({:.02}% of checking time)", self.assume_time, @@ -396,6 +405,7 @@ impl CollectResults for OnlineBenchmarkResults { parsing, checking, elaboration, + scheduling, total, polyeq, assume, @@ -405,6 +415,7 @@ impl CollectResults for OnlineBenchmarkResults { self.parsing.add_sample(id, parsing); self.checking.add_sample(id, checking); self.elaborating.add_sample(id, elaboration); + self.scheduling.add_sample(id, scheduling); self.total_accounted_for.add_sample(id, parsing + checking); self.total.add_sample(id, total); @@ -423,6 +434,7 @@ impl CollectResults for OnlineBenchmarkResults { parsing: a.parsing.combine(b.parsing), checking: a.checking.combine(b.checking), elaborating: a.elaborating.combine(b.elaborating), + scheduling: a.scheduling.combine(b.scheduling), total_accounted_for: a.total_accounted_for.combine(b.total_accounted_for), total: a.total.combine(b.total), step_time: a.step_time.combine(b.step_time), diff --git a/carcara/src/checker/parallel/scheduler/mod.rs b/carcara/src/checker/parallel/scheduler/mod.rs index 7b0c0d49..5d4b58f8 100644 --- a/carcara/src/checker/parallel/scheduler/mod.rs +++ b/carcara/src/checker/parallel/scheduler/mod.rs @@ -40,7 +40,7 @@ impl Schedule { self.steps.last() } - /// Returns an iterator over the proof commands. See [`ProofIter`]. + /// Returns an iterator over the proof commands. See [`ScheduleIter`]. pub fn iter<'a>(&'a self, proof: &'a [ProofCommand]) -> ScheduleIter { ScheduleIter::new(proof, &self.steps) } diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 46ad8769..deaa6f99 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -163,8 +163,10 @@ pub fn check( // Checking let checking = Instant::now(); - let mut checker = checker::ParallelProofChecker::new(Arc::new(pool), config, &prelude); let (scheduler, _) = Scheduler::new(num_threads, &proof); + run_measures.scheduling = checking.elapsed(); + let mut checker = checker::ParallelProofChecker::new(Arc::new(pool), config, &prelude); + if options.stats { let mut checker_stats = CheckerStatistics { file_name: "this", @@ -185,6 +187,7 @@ pub fn check( parsing: run_measures.parsing, checking: run_measures.checking, elaboration: checker_stats.elaboration_time, + scheduling: run_measures.scheduling, total: run_measures.total, polyeq: checker_stats.polyeq_time, assume: checker_stats.assume_time, @@ -246,6 +249,7 @@ pub fn check_and_elaborate( parsing: run_measures.parsing, checking: run_measures.checking, elaboration: checker_stats.elaboration_time, + scheduling: run_measures.scheduling, total: run_measures.total, polyeq: checker_stats.polyeq_time, assume: checker_stats.assume_time, diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index a7046e2c..c4752803 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -73,6 +73,7 @@ fn run_job( parsing, checking, elaboration: checker_stats.elaboration_time, + scheduling: Duration::ZERO, total, polyeq: checker_stats.polyeq_time, assume: checker_stats.assume_time, From 73cb6ff6b61b05acb281f22ccd808b13c1da0291 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Sat, 22 Jul 2023 21:20:36 -0300 Subject: [PATCH 24/70] Remove --- carcara/src/ast/mod.rs | 5 --- carcara/src/ast/printer.rs | 1 - carcara/src/checker/lia_generic.rs | 1 - carcara/src/checker/mod.rs | 2 - carcara/src/checker/parallel/mod.rs | 40 +++++++++++++------ .../src/checker/parallel/scheduler/iter.rs | 16 ++++---- .../src/checker/parallel/scheduler/weights.rs | 1 - carcara/src/elaborator/pruning.rs | 1 - 8 files changed, 37 insertions(+), 30 deletions(-) diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index 0d5efeb3..5b079483 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -75,9 +75,6 @@ pub enum ProofCommand { /// A subproof. Subproof(Subproof), - - /// A subproof closing step - Closing, } impl ProofCommand { @@ -89,7 +86,6 @@ impl ProofCommand { ProofCommand::Assume { id, .. } => id, ProofCommand::Step(s) => &s.id, ProofCommand::Subproof(s) => s.commands.last().unwrap().id(), - ProofCommand::Closing => "", } } @@ -103,7 +99,6 @@ impl ProofCommand { ProofCommand::Assume { id: _, term } => std::slice::from_ref(term), ProofCommand::Step(ProofStep { clause, .. }) => clause, ProofCommand::Subproof(s) => s.commands.last().unwrap().clause(), - ProofCommand::Closing => &[], } } diff --git a/carcara/src/ast/printer.rs b/carcara/src/ast/printer.rs index 46632ea0..b1d38916 100644 --- a/carcara/src/ast/printer.rs +++ b/carcara/src/ast/printer.rs @@ -148,7 +148,6 @@ impl<'a> PrintProof for AlethePrinter<'a> { } write!(self.inner, ")")?; } - ProofCommand::Closing => {} } writeln!(self.inner)?; } diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index 9ed9110b..d91d96b7 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -145,7 +145,6 @@ fn update_premises(commands: &mut [ProofCommand], delta: usize, root_id: &str) { ProofCommand::Subproof(s) => { update_premises(&mut s.commands, delta, root_id); } - ProofCommand::Closing => {} } } } diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 9cda8111..bece1ade 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -173,7 +173,6 @@ impl<'c> ProofChecker<'c> { }); } } - ProofCommand::Closing => {} } } if self.config.is_running_test || self.reached_empty_clause { @@ -264,7 +263,6 @@ impl<'c> ProofChecker<'c> { }); } } - ProofCommand::Closing => {} } } if self.config.is_running_test || self.reached_empty_clause { diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index fe207301..1fe256c4 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -78,8 +78,22 @@ impl<'c> ParallelProofChecker<'c> { .stack_size(STACK_SIZE) .spawn_scoped(s, move || -> CarcaraResult<(bool, bool)> { let mut iter = schedule.iter(&proof.commands[..]); + let mut last_depth = 0; while let Some(command) = iter.next() { + // If there is any depth difference between the current and last step + while (last_depth - iter.depth() as i64 > 0) + || (last_depth - iter.depth() as i64 == 0 + && matches!(command, ProofCommand::Subproof(_))) + { + // If this is the last command of a subproof, we have to pop off the subproof + // commands of the stack. The parser already ensures that the last command + // in a subproof is always a `step` command + local_self.context.pop(); + last_depth -= 1; + } + last_depth = iter.depth() as i64; + match command { ProofCommand::Step(step) => { // If this step ends a subproof, it might need to implicitly reference the @@ -155,12 +169,6 @@ impl<'c> ParallelProofChecker<'c> { }); } } - ProofCommand::Closing => { - // If this is the last command of a subproof, we have to pop off the subproof - // commands of the stack. The parser already ensures that the last command - // in a subproof is always a `step` command - local_self.context.pop(); - } } // Verify if any of the other threads found an error and abort in case of positive if *should_abort.read().unwrap() { @@ -253,8 +261,22 @@ impl<'c> ParallelProofChecker<'c> { s, move || -> CarcaraResult<(bool, bool, CheckerStatistics)> { let mut iter = schedule.iter(&proof.commands[..]); + let mut last_depth = 0; while let Some(command) = iter.next() { + // If there is any depth difference between the current and last step + while (last_depth - iter.depth() as i64 > 0) + || (last_depth - iter.depth() as i64 == 0 + && matches!(command, ProofCommand::Subproof(_))) + { + // If this is the last command of a subproof, we have to pop off the subproof + // commands of the stack. The parser already ensures that the last command + // in a subproof is always a `step` command + local_self.context.pop(); + last_depth -= 1; + } + last_depth = iter.depth() as i64; + match command { ProofCommand::Step(step) => { // If this step ends a subproof, it might need to implicitly reference the @@ -344,12 +366,6 @@ impl<'c> ParallelProofChecker<'c> { }); } } - ProofCommand::Closing => { - // If this is the last command of a subproof, we have to pop off the subproof - // commands of the stack. The parser already ensures that the last command - // in a subproof is always a `step` command - local_self.context.pop(); - } } // Verify if any of the other threads found an error and abort in case of positive if *should_abort.read().unwrap() { diff --git a/carcara/src/checker/parallel/scheduler/iter.rs b/carcara/src/checker/parallel/scheduler/iter.rs index e5559414..48151055 100644 --- a/carcara/src/checker/parallel/scheduler/iter.rs +++ b/carcara/src/checker/parallel/scheduler/iter.rs @@ -55,15 +55,17 @@ impl<'a> Iterator for ScheduleIter<'a> { fn next(&mut self) -> Option { // If it isn't the end of the steps if self.step_id < self.steps.len() { - let cur_step = self.steps[self.step_id]; - self.step_id += 1; - // If current step is an closing subproof - if let (_, usize::MAX) = cur_step { - return Some(&ProofCommand::Closing); - } - while cur_step.0 != self.proof_stack.len() - 1 { + // If current step is an closing subproof step + while let (_, usize::MAX) = self.steps[self.step_id] { self.proof_stack.pop(); + self.step_id += 1; + // If reached the last closing step of the whole proof + if self.step_id == self.steps.len() { + return None; + } } + let cur_step = self.steps[self.step_id]; + self.step_id += 1; let top = self.proof_stack.last().unwrap(); let command = &top[cur_step.1]; diff --git a/carcara/src/checker/parallel/scheduler/weights.rs b/carcara/src/checker/parallel/scheduler/weights.rs index 61d11c99..9d2c3a5d 100644 --- a/carcara/src/checker/parallel/scheduler/weights.rs +++ b/carcara/src/checker/parallel/scheduler/weights.rs @@ -126,6 +126,5 @@ pub fn get_step_weight(step: &ProofCommand) -> u64 { _ => 0, } } - ProofCommand::Closing => 0, } } diff --git a/carcara/src/elaborator/pruning.rs b/carcara/src/elaborator/pruning.rs index 84ad03ec..0c7d81b7 100644 --- a/carcara/src/elaborator/pruning.rs +++ b/carcara/src/elaborator/pruning.rs @@ -94,7 +94,6 @@ pub fn slice_proof( }; stack.push(frame); } - ProofCommand::Closing => {} } } let mut frame = stack.pop().unwrap(); From 7b47556470190ad9f469bcce1fe2d4303ec935b4 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Sun, 23 Jul 2023 09:23:20 -0300 Subject: [PATCH 25/70] Resolved some TODO's tasks --- carcara/src/checker/mod.rs | 1 + carcara/src/checker/parallel/mod.rs | 18 +++++++++++------- carcara/src/lib.rs | 4 +++- cli/src/main.rs | 11 +++++++++++ 4 files changed, 26 insertions(+), 8 deletions(-) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index bece1ade..12671cd4 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -216,6 +216,7 @@ impl<'c> ProofChecker<'c> { // If this is the last command of a subproof, we have to pop the subproof // commands off of the stack. The parser already ensures that the last command // in a subproof is always a `step` command + // TODO: Use depth diff to pop context off if is_end_of_subproof { self.context.pop(); if let Some(elaborator) = &mut self.elaborator { diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 1fe256c4..1e3de6fb 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -25,10 +25,16 @@ pub struct ParallelProofChecker<'c> { context: ContextStack, reached_empty_clause: bool, is_holey: bool, + stack_size: usize, } impl<'c> ParallelProofChecker<'c> { - pub fn new(pool: Arc, config: Config, prelude: &'c ProblemPrelude) -> Self { + pub fn new( + pool: Arc, + config: Config, + prelude: &'c ProblemPrelude, + stack_size: usize, + ) -> Self { ParallelProofChecker { pool, config, @@ -36,6 +42,7 @@ impl<'c> ParallelProofChecker<'c> { context: ContextStack::new(), reached_empty_clause: false, is_holey: false, + stack_size, } } @@ -48,6 +55,7 @@ impl<'c> ParallelProofChecker<'c> { context: ContextStack::new(), reached_empty_clause: false, is_holey: false, + stack_size: self.stack_size, } } @@ -60,8 +68,6 @@ impl<'c> ParallelProofChecker<'c> { // thread already found out an invalid step) let premature_abort = Arc::new(RwLock::new(false)); let context_pool = ContextPool::from_global(&self.pool); - // TODO: Add stack size flag - const STACK_SIZE: usize = 128 * 1024 * 1024; // thread::scope(|s| { let threads: Vec<_> = (&scheduler.loads) @@ -75,7 +81,7 @@ impl<'c> ParallelProofChecker<'c> { thread::Builder::new() .name(format!("worker-{i}")) - .stack_size(STACK_SIZE) + .stack_size(self.stack_size) .spawn_scoped(s, move || -> CarcaraResult<(bool, bool)> { let mut iter = schedule.iter(&proof.commands[..]); let mut last_depth = 0; @@ -233,8 +239,6 @@ impl<'c> ParallelProofChecker<'c> { // thread already found out an invalid step) let premature_abort = Arc::new(RwLock::new(false)); let context_pool = ContextPool::from_global(&self.pool); - // TODO: Add stack size flag - const STACK_SIZE: usize = 128 * 1024 * 1024; // thread::scope(|s| { let threads: Vec<_> = (&scheduler.loads) @@ -256,7 +260,7 @@ impl<'c> ParallelProofChecker<'c> { thread::Builder::new() .name(format!("worker-{i}")) - .stack_size(STACK_SIZE) + .stack_size(self.stack_size) .spawn_scoped( s, move || -> CarcaraResult<(bool, bool, CheckerStatistics)> { diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index deaa6f99..0dc3f1f5 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -140,6 +140,7 @@ pub fn check( proof: T, options: CarcaraOptions, num_threads: usize, + stack_size: usize, ) -> Result { use crate::checker::Scheduler; use std::sync::Arc; @@ -165,7 +166,8 @@ pub fn check( let checking = Instant::now(); let (scheduler, _) = Scheduler::new(num_threads, &proof); run_measures.scheduling = checking.elapsed(); - let mut checker = checker::ParallelProofChecker::new(Arc::new(pool), config, &prelude); + let mut checker = + checker::ParallelProofChecker::new(Arc::new(pool), config, &prelude, stack_size); if options.stats { let mut checker_stats = CheckerStatistics { diff --git a/cli/src/main.rs b/cli/src/main.rs index ef1b06a0..d9c080bb 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -91,6 +91,13 @@ struct StatsOptions { stats: bool, } +#[derive(Args)] +struct StackOptions { + /// Defines the thread stack size for each check worker (does not include the main thread stack size, which should be set manually). + #[clap(long, default_value = "0")] + stack_size: usize, +} + #[derive(Args, Clone, Copy)] struct ParsingOptions { /// Expand function definitions introduced by `define-fun`s in the SMT problem. If this flag is @@ -195,6 +202,9 @@ struct CheckCommandOptions { #[clap(flatten)] stats: StatsOptions, + + #[clap(flatten)] + stack: StackOptions, } #[derive(Args)] @@ -354,6 +364,7 @@ fn check_command(options: CheckCommandOptions) -> CliResult { proof, build_carcara_options(options.parsing, options.checking, options.stats), options.num_threads, + options.stack.stack_size, ) .map_err(Into::into) } From 322ed9f9e67028c8dc151efd35ededb63e373bb1 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Mon, 24 Jul 2023 14:45:13 -0300 Subject: [PATCH 26/70] Fixed some tests issues --- carcara/src/ast/iter.rs | 2 +- carcara/src/ast/macros.rs | 12 ++++++------ carcara/src/parser/tests.rs | 1 + 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/carcara/src/ast/iter.rs b/carcara/src/ast/iter.rs index c9b16847..46e3e8ac 100644 --- a/carcara/src/ast/iter.rs +++ b/carcara/src/ast/iter.rs @@ -28,7 +28,7 @@ use super::*; /// (step t5 (cl) :rule resolution :premises (t4 h1 h2)) /// " /// .as_bytes(); -/// let (_, proof, _) = parser::parse_instance("".as_bytes(), proof, true, false, false)?; +/// let (_, proof, _) : (ast::ProblemPrelude, ast::Proof, ast::pool::PrimitivePool) = parser::parse_instance("".as_bytes(), proof, true, false, false)?; /// let ids: Vec<_> = proof.iter().map(|c| c.id()).collect(); /// assert_eq!(ids, ["h1", "h2", "t3", "t3.t1", "t3.t2", "t3", "t4", "t5"]); /// # Ok(()) diff --git a/carcara/src/ast/macros.rs b/carcara/src/ast/macros.rs index 9001b921..61e8c2eb 100644 --- a/carcara/src/ast/macros.rs +++ b/carcara/src/ast/macros.rs @@ -22,7 +22,7 @@ /// Removing two leading negations from a term: /// ``` /// # use carcara::{ast::*, build_term, match_term}; -/// # let mut pool = TermPool::new(); +/// # let mut pool = PrimitivePool::new(); /// # let t = build_term!(pool, (not (not {pool.bool_false()}))); /// let p = match_term!((not (not p)) = t).unwrap(); /// ``` @@ -31,7 +31,7 @@ /// ``` /// # use carcara::{ast::*, match_term, parser::*}; /// # pub fn parse_term(input: &str) -> Rc { -/// # let mut pool = TermPool::new(); +/// # let mut pool = PrimitivePool::new(); /// # let mut parser = Parser::new(&mut pool, input.as_bytes(), true, false, false).unwrap(); /// # parser.parse_term().unwrap() /// # } @@ -42,7 +42,7 @@ /// Pattern matching against boolean constants: /// ``` /// # use carcara::{ast::*, build_term, match_term}; -/// # let mut pool = TermPool::new(); +/// # let mut pool = PrimitivePool::new(); /// # let t = build_term!(pool, (or {pool.bool_false()} {pool.bool_false()})); /// let (p, ()) = match_term!((or p false) = t).unwrap(); /// ``` @@ -51,7 +51,7 @@ /// ``` /// # use carcara::{ast::*, match_term, parser::*}; /// # pub fn parse_term(input: &str) -> Rc { -/// # let mut pool = TermPool::new(); +/// # let mut pool = PrimitivePool::new(); /// # let mut parser = Parser::new(&mut pool, input.as_bytes(), true, false, false).unwrap(); /// # parser.parse_term().unwrap() /// # } @@ -62,7 +62,7 @@ /// Pattern matching against a variable number of arguments: /// ``` /// # use carcara::{ast::*, build_term, match_term}; -/// # let mut pool = TermPool::new(); +/// # let mut pool = PrimitivePool::new(); /// # let t = build_term!(pool, (and {pool.bool_false()} {pool.bool_false()})); /// let args: &[Rc] = match_term!((and ...) = t).unwrap(); /// ``` @@ -175,7 +175,7 @@ macro_rules! match_term_err { /// Building the term `(and true (not false))`: /// ``` /// # use carcara::{ast::*, build_term, match_term}; -/// let mut pool = TermPool::new(); +/// let mut pool = PrimitivePool::new(); /// let t = build_term!(pool, (and {pool.bool_true()} (not {pool.bool_false()}))); /// assert!(match_term!((and true (not false)) = t).is_some()); /// ``` diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index cc502e4c..5fc45cfb 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -83,6 +83,7 @@ fn test_hash_consing() { "true", "false", "1", + "Int", "2", "(+ 1 2)", "(* (+ 1 2) (+ 1 2))", From 5a54bf7ea97aff8cedbfaccbffc3a9d316a3d79e Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Mon, 24 Jul 2023 15:20:07 -0300 Subject: [PATCH 27/70] Fixing linter --- carcara/src/ast/pool/advanced.rs | 4 +-- carcara/src/benchmarking/mod.rs | 4 +-- carcara/src/checker/mod.rs | 16 +++++----- carcara/src/checker/parallel/mod.rs | 30 +++++++----------- carcara/src/checker/parallel/scheduler/mod.rs | 31 ++++++++++--------- .../src/checker/parallel/scheduler/weights.rs | 6 ++-- carcara/src/lib.rs | 4 +-- 7 files changed, 44 insertions(+), 51 deletions(-) diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs index 65d79707..49597e29 100644 --- a/carcara/src/ast/pool/advanced.rs +++ b/carcara/src/ast/pool/advanced.rs @@ -277,7 +277,7 @@ impl LocalPool { sort.clone() } // A sort inserted by context - else if let Some(entry) = ctx_pool.terms.get(&term) { + else if let Some(entry) = ctx_pool.terms.get(term) { entry.clone() } else { local_pool.sorts_cache[term].clone() @@ -322,7 +322,7 @@ impl TermPool for LocalPool { sort.clone() } // A sort inserted by context - else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { + else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(term) { entry.clone() } else { self.storage.sorts_cache[term].clone() diff --git a/carcara/src/benchmarking/mod.rs b/carcara/src/benchmarking/mod.rs index 7575ca5f..f17d24fb 100644 --- a/carcara/src/benchmarking/mod.rs +++ b/carcara/src/benchmarking/mod.rs @@ -185,9 +185,9 @@ impl OnlineBenchmarkResults { for (rule, data) in data_by_rule { print!(" {: <18}", rule); if sort_by_total { - println!("{:#}", data) + println!("{:#}", data); } else { - println!("{}", data) + println!("{}", data); } } diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 12671cd4..6d4a1973 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -182,10 +182,10 @@ impl<'c> ProofChecker<'c> { } } - pub fn check_with_stats<'s, 'p, 'a, CR: CollectResults + Send + Default>( + pub fn check_with_stats<'s, CR: CollectResults + Send + Default>( &'s mut self, - proof: &'p Proof, - stats: &'s mut CheckerStatistics<'a, CR>, + proof: &Proof, + stats: &'s mut CheckerStatistics, ) -> CarcaraResult { // Similarly to the parser, to avoid stack overflows in proofs with many nested subproofs, // we check the subproofs iteratively, instead of recursively @@ -287,10 +287,10 @@ impl<'c> ProofChecker<'c> { Ok((self.is_holey, proof)) } - pub fn check_and_elaborate_with_stats<'s, 'a, CR: CollectResults + Send + Default>( + pub fn check_and_elaborate_with_stats<'s, CR: CollectResults + Send + Default>( &'s mut self, mut proof: Proof, - stats: &'s mut CheckerStatistics<'a, CR>, + stats: &'s mut CheckerStatistics, ) -> CarcaraResult<(bool, Proof)> { self.elaborator = Some(Elaborator::new()); let result = self.check_with_stats(&proof, stats); @@ -307,13 +307,13 @@ impl<'c> ProofChecker<'c> { Ok((self.is_holey, proof)) } - fn check_assume<'a, CR: CollectResults + Send + Default>( + fn check_assume( &mut self, id: &str, term: &Rc, premises: &AHashSet>, iter: &ProofIter, - mut stats: Option<&'a mut CheckerStatistics>, + mut stats: Option<&mut CheckerStatistics>, ) -> bool { let time = Instant::now(); @@ -406,7 +406,7 @@ impl<'c> ProofChecker<'c> { let is_hole = lia_generic::lia_generic_single_thread( self.pool, &step.clause, - &self.prelude, + self.prelude, self.elaborator.as_mut(), &step.id, ); diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 1e3de6fb..b435147e 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -59,11 +59,7 @@ impl<'c> ParallelProofChecker<'c> { } } - pub fn check<'s, 'p>( - &'s mut self, - proof: &'p Proof, - scheduler: &'s Scheduler, - ) -> CarcaraResult { + pub fn check<'s>(&'s mut self, proof: &Proof, scheduler: &'s Scheduler) -> CarcaraResult { // Used to estimulate threads to abort prematurely (only happens when a // thread already found out an invalid step) let premature_abort = Arc::new(RwLock::new(false)); @@ -71,7 +67,7 @@ impl<'c> ParallelProofChecker<'c> { // thread::scope(|s| { let threads: Vec<_> = (&scheduler.loads) - .into_iter() + .iter() .enumerate() .map(|(i, schedule)| { // Shares the self between threads @@ -217,9 +213,7 @@ impl<'c> ParallelProofChecker<'c> { }); // If an error happend - if let Err(x) = err { - return Err(x); - } + err?; if reached { Ok(holey) @@ -229,11 +223,11 @@ impl<'c> ParallelProofChecker<'c> { }) } - pub fn check_with_stats<'s, 'p, 'a, CR: CollectResults + Send + Default>( + pub fn check_with_stats<'s, CR: CollectResults + Send + Default>( &'s mut self, - proof: &'p Proof, + proof: &Proof, scheduler: &'s Scheduler, - stats: &'s mut CheckerStatistics<'a, CR>, + stats: &'s mut CheckerStatistics, ) -> CarcaraResult { // Used to estimulate threads to abort prematurely (only happens when a // thread already found out an invalid step) @@ -242,7 +236,7 @@ impl<'c> ParallelProofChecker<'c> { // thread::scope(|s| { let threads: Vec<_> = (&scheduler.loads) - .into_iter() + .iter() .enumerate() .map(|(i, schedule)| { let mut local_stats = CheckerStatistics { @@ -428,9 +422,7 @@ impl<'c> ParallelProofChecker<'c> { }); // If an error happend - if let Err(x) = err { - return Err(x); - } + err?; if reached { Ok(holey) @@ -440,13 +432,13 @@ impl<'c> ParallelProofChecker<'c> { }) } - fn check_assume<'a, CR: CollectResults + Send + Default>( + fn check_assume( &mut self, id: &str, term: &Rc, premises: &AHashSet>, iter: &ScheduleIter, - mut stats: Option<&'a mut CheckerStatistics>, + mut stats: Option<&mut CheckerStatistics>, ) -> bool { let time = Instant::now(); @@ -520,7 +512,7 @@ impl<'c> ParallelProofChecker<'c> { if step.rule == "lia_generic" { if self.config.lia_via_cvc5 { - let is_hole = lia_generic::lia_generic_multi_thread(&step.clause, &self.prelude); + let is_hole = lia_generic::lia_generic_multi_thread(&step.clause, self.prelude); self.is_holey = self.is_holey || is_hole; } else { log::warn!("encountered \"lia_generic\" rule, ignoring"); diff --git a/carcara/src/checker/parallel/scheduler/mod.rs b/carcara/src/checker/parallel/scheduler/mod.rs index 5d4b58f8..df9a61e9 100644 --- a/carcara/src/checker/parallel/scheduler/mod.rs +++ b/carcara/src/checker/parallel/scheduler/mod.rs @@ -1,5 +1,5 @@ -pub(crate) mod iter; -pub(crate) mod weights; +pub mod iter; +pub mod weights; use crate::ast::{Proof, ProofCommand}; use iter::ScheduleIter; @@ -56,12 +56,7 @@ struct AssignedLoad(u64, usize); impl Ord for AssignedLoad { fn cmp(&self, other: &Self) -> Ordering { - if self.0 > other.0 { - return Ordering::Less; - } else if self.0 < other.0 { - return Ordering::Greater; - } - return Ordering::Equal; + other.0.cmp(&self.0) } } @@ -103,6 +98,12 @@ pub struct Scheduler { pub loads: Vec, } +impl Default for Schedule { + fn default() -> Self { + Self::new() + } +} + impl Scheduler { /// Creates a thread scheduler for this proof using a specific number of /// workers. This scheduler is responsible for balancing the load (the @@ -116,19 +117,19 @@ impl Scheduler { let mut pq = BinaryHeap::::new(); let mut context_usage = vec![]; for i in 0..num_workers { - pq.push(AssignedLoad { 0: 0, 1: i }); + pq.push(AssignedLoad(0, i)); } loop { // Pop the finished subproofs - while stack.len() != 0 && { + while !stack.is_empty() && { let top = stack.last().unwrap(); top.id == top.cmds.len() } { for schedule_id in &stack.last().unwrap().used_by { let last = loads[*schedule_id].last().unwrap(); // If it's an useless context insertion - if last.0 <= stack.len() - 1 + if last.0 < stack.len() && matches!(stack[last.0].cmds[last.1], ProofCommand::Subproof(_)) { // Make sure this context usage count is reduced @@ -147,7 +148,7 @@ impl Scheduler { } stack.pop(); } - if stack.len() == 0 { + if stack.is_empty() { break; } // @@ -157,12 +158,12 @@ impl Scheduler { let step_weight = get_step_weight(&top.cmds[top.id]); assert!(u64::MAX - step_weight >= load, "Weight balancing overflow!"); load += step_weight; - pq.push(AssignedLoad { 0: load, 1: load_index }); + pq.push(AssignedLoad(load, load_index)); } let depth = stack.len() - 1; let (mut i, initial_layer) = (1, { - let tmp = loads[load_index].last().unwrap_or_else(|| &(0, 0)); + let tmp = loads[load_index].last().unwrap_or(&(0, 0)); if tmp.1 == usize::MAX { tmp.0 - 1 } else { @@ -173,7 +174,7 @@ impl Scheduler { // but it was not assigned to this schedule yet while initial_layer + i <= depth { let subproof_oppening = stack[initial_layer + i].pre_req.unwrap(); - let last_inserted = *loads[load_index].last().unwrap_or_else(|| &(usize::MAX, 0)); + let last_inserted = *loads[load_index].last().unwrap_or(&(usize::MAX, 0)); if last_inserted != subproof_oppening { loads[load_index].push(subproof_oppening); diff --git a/carcara/src/checker/parallel/scheduler/weights.rs b/carcara/src/checker/parallel/scheduler/weights.rs index 9d2c3a5d..fc9a6f0c 100644 --- a/carcara/src/checker/parallel/scheduler/weights.rs +++ b/carcara/src/checker/parallel/scheduler/weights.rs @@ -8,9 +8,9 @@ use crate::ast::ProofCommand; /// computed), it's for better of scheduler architecture that subproofs have a /// null weight. /// -/// If you're interested in these weight values, take a look at Carcara's paper -/// published at TACAS in April 2023 -/// (https://hanielbarbosa.com/papers/tacas2023.pdf) and its benchmark data. +/// If you're interested in these weight values, take a look at [Carcara's +/// paper](https://hanielbarbosa.com/papers/tacas2023.pdf) +/// published at TACAS in April 2023 and its benchmark data. /// /// The rules with null weight are rules that we had no info about the median /// performance, since the solver used in the paper dataset does not generate diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 0dc3f1f5..d0366e98 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -184,7 +184,7 @@ pub fn check( run_measures.total = total.elapsed(); checker_stats.results.add_run_measurement( - &("this".to_string(), 0), + &("this".to_owned(), 0), RunMeasurement { parsing: run_measures.parsing, checking: run_measures.checking, @@ -246,7 +246,7 @@ pub fn check_and_elaborate( run_measures.total = total.elapsed(); checker_stats.results.add_run_measurement( - &("this".to_string(), 0), + &("this".to_owned(), 0), RunMeasurement { parsing: run_measures.parsing, checking: run_measures.checking, From 9b1973d6a99df822e6dedeef919e3e500a4e54d0 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Mon, 24 Jul 2023 15:27:03 -0300 Subject: [PATCH 28/70] More linting --- carcara/src/checker/parallel/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index b435147e..15a45414 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -66,7 +66,8 @@ impl<'c> ParallelProofChecker<'c> { let context_pool = ContextPool::from_global(&self.pool); // thread::scope(|s| { - let threads: Vec<_> = (&scheduler.loads) + let threads: Vec<_> = scheduler + .loads .iter() .enumerate() .map(|(i, schedule)| { @@ -235,7 +236,8 @@ impl<'c> ParallelProofChecker<'c> { let context_pool = ContextPool::from_global(&self.pool); // thread::scope(|s| { - let threads: Vec<_> = (&scheduler.loads) + let threads: Vec<_> = scheduler + .loads .iter() .enumerate() .map(|(i, schedule)| { From fa27230f1b678f09df1173cab75107c378de160e Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Tue, 25 Jul 2023 18:51:53 -0300 Subject: [PATCH 29/70] Fixing some `Sort` unecessary operations --- carcara/src/ast/context.rs | 6 ++---- carcara/src/ast/pool/mod.rs | 21 +++------------------ carcara/src/ast/substitution.rs | 2 +- carcara/src/checker/rules/quantifier.rs | 2 +- carcara/src/checker/rules/subproof.rs | 2 +- carcara/src/elaborator/polyeq.rs | 5 +---- carcara/src/parser/mod.rs | 10 ++++------ 7 files changed, 13 insertions(+), 35 deletions(-) diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index 3c0c30ee..be79c6ea 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -53,8 +53,7 @@ impl ContextStack { // we use the current state of the hash map to transform `(f y)` into `(f z)`. The // resulting hash map will then contain `(:= y z)` and `(:= x (f z))` for (var, value) in assignment_args.iter() { - let sort = pool.sort(value).as_ref().clone(); - let var_term = Term::new_var(var, pool.add(sort)); + let var_term = Term::new_var(var, pool.sort(value)); let var_term = pool.add(var_term); substitution.insert(pool, var_term.clone(), value.clone())?; let new_value = substitution_until_fixed_point.apply(pool, value); @@ -64,8 +63,7 @@ impl ContextStack { let mappings = assignment_args .iter() .map(|(var, value)| { - let sort = pool.sort(value).as_ref().clone(); - let var_term = (var.clone(), pool.add(sort)).into(); + let var_term = (var.clone(), pool.sort(value)).into(); (pool.add(var_term), value.clone()) }) .collect(); diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index e1149c43..19e909fd 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -191,22 +191,8 @@ impl TermPool for PrimitivePool { } fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { - // Here, I would like to do - // ``` - // if let Some(vars) = self.free_vars_cache.get(term) { - // return vars; - // } - // ``` - // However, because of a limitation in the borrow checker, the compiler thinks that - // this immutable borrow of `cache` has to live until the end of the function, even - // though the code immediately returns. This would stop me from mutating `cache` in the - // rest of the function. Because of that, I have to check if the hash map contains - // `term` as a key, and then get the value associated with it, meaning I have to access - // the hash map twice, which is a bit slower. This is an example of problem case #3 - // from the non-lexical lifetimes RFC: - // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md - if self.free_vars_cache.contains_key(term) { - return self.free_vars_cache.get(term).unwrap().clone(); + if let Some(vars) = self.free_vars_cache.get(term) { + return vars.clone(); } let set = match term.as_ref() { Term::App(f, args) => { @@ -234,8 +220,7 @@ impl TermPool for PrimitivePool { Term::Let(bindings, inner) => { let mut vars = self.free_vars(inner); for (var, value) in bindings { - let sort = self.sort(value).as_ref().clone(); - let sort = self.add(sort); + let sort = self.sort(value); let term = self.add((var.clone(), sort).into()); vars.remove(&term); } diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index 13969b90..d78af0e0 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -304,7 +304,7 @@ impl Substitution { // If the binding list is a "sort" binding list, then `value` will be the variable's // sort. Otherwise, we need to get the sort of `value` let sort = if is_value_list { - pool.add(pool.sort(value).as_ref().clone()) + pool.sort(value) } else { value.clone() }; diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index ad3e6503..14fdc171 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -24,7 +24,7 @@ pub fn forall_inst( .iter() .map(|arg| { let (arg_name, arg_value) = arg.as_assign()?; - let arg_sort = pool.sort(arg_value).clone(); + let arg_sort = pool.sort(arg_value); rassert!( bindings.remove(&(arg_name.clone(), arg_sort.clone())), QuantifierError::NoBindingMatchesArg(arg_name.clone()) diff --git a/carcara/src/checker/rules/subproof.rs b/carcara/src/checker/rules/subproof.rs index f6925d46..a78aec53 100644 --- a/carcara/src/checker/rules/subproof.rs +++ b/carcara/src/checker/rules/subproof.rs @@ -166,7 +166,7 @@ pub fn r#let( let mut pairs: Vec<_> = let_bindings .iter() .map(|(x, t)| { - let sort = pool.add(pool.sort(t).as_ref().clone()); + let sort = pool.sort(t); let x_term = pool.add((x.clone(), sort).into()); let s = substitution .get(&x_term) diff --git a/carcara/src/elaborator/polyeq.rs b/carcara/src/elaborator/polyeq.rs index 49525701..c747bc00 100644 --- a/carcara/src/elaborator/polyeq.rs +++ b/carcara/src/elaborator/polyeq.rs @@ -142,10 +142,7 @@ impl<'a> PolyeqElaborator<'a> { let variable_args: Vec<_> = a_bindings .iter() - .map(|(name, value)| { - let sort = pool.sort(value).as_ref().clone(); - (name.clone(), pool.add(sort)) - }) + .map(|(name, value)| (name.clone(), pool.sort(value))) .collect(); self.open_subproof(); diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index c226b675..aa21a98f 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -516,7 +516,7 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { self.pool .add(Term::Lambda(BindingList(func_def.params), func_def.body)) }; - let sort = self.pool.add(self.pool.sort(&lambda_term).as_ref().clone()); + let sort = self.pool.sort(&lambda_term); let var = (name, sort); self.insert_sorted_var(var.clone()); let var_term = self.pool.add(var.into()); @@ -814,8 +814,7 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { self.next_token()?; let var = self.expect_symbol()?; let value = self.parse_term()?; - let sort = self.pool.sort(&value).as_ref().clone(); - let sort = self.pool.add(sort); + let sort = self.pool.sort(&value); self.insert_sorted_var((var.clone(), sort)); self.expect_token(Token::CloseParen)?; AnchorArg::Assign(var, value) @@ -1027,7 +1026,7 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { p.expect_token(Token::OpenParen)?; let name = p.expect_symbol()?; let value = p.parse_term()?; - let sort = p.pool.add(p.pool.sort(&value).as_ref().clone()); + let sort = p.pool.sort(&value); p.insert_sorted_var((name.clone(), sort)); p.expect_token(Token::CloseParen)?; Ok((name, value)) @@ -1042,8 +1041,7 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { let substitution = bindings .into_iter() .map(|(name, value)| { - let sort = self.pool.sort(&value).as_ref().clone(); - let var = Term::new_var(name, self.pool.add(sort)); + let var = Term::new_var(name, self.pool.sort(&value)); (self.pool.add(var), value) }) .collect(); From 5d355f4f821b1f8ef58d98ba0775b27fa5bf72cc Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Tue, 25 Jul 2023 22:23:31 -0300 Subject: [PATCH 30/70] Fix some issues in parser and some unecessary operations related with --- carcara/src/ast/macros.rs | 6 +- carcara/src/ast/substitution.rs | 4 +- carcara/src/ast/tests.rs | 6 +- carcara/src/checker/rules/quantifier.rs | 2 +- carcara/src/parser/mod.rs | 93 +++++++++++++++---------- carcara/src/parser/tests.rs | 62 +++++++---------- 6 files changed, 91 insertions(+), 82 deletions(-) diff --git a/carcara/src/ast/macros.rs b/carcara/src/ast/macros.rs index 61e8c2eb..ce27a98f 100644 --- a/carcara/src/ast/macros.rs +++ b/carcara/src/ast/macros.rs @@ -249,12 +249,12 @@ macro_rules! impl_str_conversion_traits { #[cfg(test)] mod tests { - use crate::ast::{pool::advanced::LocalPool, *}; + use crate::ast::{pool::PrimitivePool, *}; use crate::parser::tests::{parse_term, parse_terms}; #[test] fn test_match_term() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let [one, two, five] = [1, 2, 5].map(|n| p.add(Term::new_int(n))); let term = parse_term(&mut p, "(= (= (not false) (= true false)) (not true))"); @@ -303,7 +303,7 @@ mod tests { (declare-fun p () Bool) (declare-fun q () Bool) "; - let mut pool = LocalPool::new(); + let mut pool = PrimitivePool::new(); let bool_sort = pool.add(Term::Sort(Sort::Bool)); let int_sort = pool.add(Term::Sort(Sort::Int)); diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index d78af0e0..e3cfd252 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -353,10 +353,10 @@ impl Substitution { #[cfg(test)] mod tests { use super::*; - use crate::{ast::pool::advanced::LocalPool, parser::*}; + use crate::{ast::PrimitivePool, parser::*}; fn run_test(definitions: &str, original: &str, x: &str, t: &str, result: &str) { - let mut pool = LocalPool::new(); + let mut pool = PrimitivePool::new(); let mut parser = Parser::new(&mut pool, definitions.as_bytes(), true, false, false).unwrap(); parser.parse_problem().unwrap(); diff --git a/carcara/src/ast/tests.rs b/carcara/src/ast/tests.rs index 3bb393d3..f1e1c63a 100644 --- a/carcara/src/ast/tests.rs +++ b/carcara/src/ast/tests.rs @@ -1,5 +1,5 @@ use crate::{ - ast::{pool::advanced::LocalPool, TermPool}, + ast::{pool::PrimitivePool, TermPool}, parser::tests::parse_terms, }; use ahash::AHashSet; @@ -8,7 +8,7 @@ use ahash::AHashSet; fn test_free_vars() { fn run_tests(definitions: &str, cases: &[(&str, &[&str])]) { for &(term, expected) in cases { - let mut pool = LocalPool::new(); + let mut pool = PrimitivePool::new(); let [root] = parse_terms(&mut pool, definitions, [term]); let expected: AHashSet<_> = expected.iter().copied().collect(); let set = pool.free_vars(&root); @@ -44,7 +44,7 @@ fn test_polyeq() { } fn run_tests(definitions: &str, cases: &[(&str, &str)], test_type: TestType) { - let mut pool = LocalPool::new(); + let mut pool = PrimitivePool::new(); for (a, b) in cases { let [a, b] = parse_terms(&mut pool, definitions, [a, b]); let mut time = std::time::Duration::ZERO; diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index 14fdc171..c00339b2 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -497,7 +497,7 @@ mod tests { fn run_tests(definitions: &str, cases: &[(&str, &str)]) { for &(term, expected) in cases { - let mut pool = crate::ast::pool::advanced::LocalPool::new(); + let mut pool = crate::ast::pool::PrimitivePool::new(); let [term, expected] = parse_terms(&mut pool, definitions, [term, expected]); let got = to_cnf_term(&mut pool, &term); assert_eq!(expected, got); diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index aa21a98f..1367ed85 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -22,14 +22,14 @@ use std::{io::BufRead, str::FromStr}; /// /// This returns the parsed proof, as well as the `TermPool` used in parsing. Can take any type that /// implements `BufRead`. -pub fn parse_instance( +pub fn parse_instance( problem: T, proof: T, apply_function_defs: bool, expand_lets: bool, allow_int_real_subtyping: bool, -) -> CarcaraResult<(ProblemPrelude, Proof, P)> { - let mut pool = P::default(); +) -> CarcaraResult<(ProblemPrelude, Proof, PrimitivePool)> { + let mut pool = PrimitivePool::new(); let mut parser = Parser::new( &mut pool, problem, @@ -79,8 +79,8 @@ struct ParserState { } /// A parser for the Alethe proof format. -pub struct Parser<'a, R, P> { - pool: &'a mut P, +pub struct Parser<'a, R> { + pool: &'a mut PrimitivePool, lexer: Lexer, current_token: Token, current_position: Position, @@ -92,12 +92,12 @@ pub struct Parser<'a, R, P> { allow_int_real_subtyping: bool, } -impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { +impl<'a, R: BufRead> Parser<'a, R> { /// Constructs a new `Parser` from a type that implements `BufRead`. /// /// This operation can fail if there is an IO or lexer error on the first token. pub fn new( - pool: &'a mut P, + pool: &'a mut PrimitivePool, input: R, apply_function_defs: bool, expand_lets: bool, @@ -175,34 +175,38 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { /// Constructs and sort checks an operation term. fn make_op(&mut self, op: Operator, args: Vec>) -> Result, ParserError> { - let terms: Vec<_> = args.iter().map(|t| self.pool.sort(t)).collect(); - let sorts: Vec<_> = terms.iter().map(|op| op.as_sort().unwrap()).collect(); + let sorts: Vec<_> = args.iter().map(|t| self.pool.sort(t)).collect(); match op { Operator::Not => { assert_num_args(&args, 1)?; - SortError::assert_eq(&Sort::Bool, sorts[0])?; + SortError::assert_eq(&Sort::Bool, sorts[0].as_sort().unwrap())?; } Operator::Implies => { assert_num_args(&args, 2..)?; for s in sorts { - SortError::assert_eq(&Sort::Bool, s)?; + SortError::assert_eq(&Sort::Bool, s.as_sort().unwrap())?; } } Operator::Or | Operator::And | Operator::Xor => { // These operators can be called with only one argument assert_num_args(&args, 1..)?; for s in sorts { - SortError::assert_eq(&Sort::Bool, s)?; + SortError::assert_eq(&Sort::Bool, s.as_sort().unwrap())?; } } Operator::Equals | Operator::Distinct => { assert_num_args(&args, 2..)?; - SortError::assert_all_eq(&sorts)?; + SortError::assert_all_eq( + &sorts + .iter() + .map(|op| op.as_sort().unwrap()) + .collect::>(), + )?; } Operator::Ite => { assert_num_args(&args, 3)?; - SortError::assert_eq(&Sort::Bool, sorts[0])?; - SortError::assert_eq(sorts[1], sorts[2])?; + SortError::assert_eq(&Sort::Bool, sorts[0].as_sort().unwrap())?; + SortError::assert_eq(sorts[1].as_sort().unwrap(), sorts[2].as_sort().unwrap())?; } Operator::Add | Operator::Sub | Operator::Mult => { // The `-` operator, in particular, can be called with only one argument, in which @@ -217,17 +221,30 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { // Int/Real subtyping, all arguments must have the same sort if self.allow_int_real_subtyping { for s in sorts { - SortError::assert_one_of(&[Sort::Int, Sort::Real], s)?; + SortError::assert_one_of(&[Sort::Int, Sort::Real], s.as_sort().unwrap())?; } } else { - SortError::assert_one_of(&[Sort::Int, Sort::Real], sorts[0])?; - SortError::assert_all_eq(&sorts)?; + SortError::assert_one_of( + &[Sort::Int, Sort::Real], + sorts[0].as_sort().unwrap(), + )?; + SortError::assert_all_eq( + &sorts + .iter() + .map(|op| op.as_sort().unwrap()) + .collect::>(), + )?; } } Operator::IntDiv => { assert_num_args(&args, 2..)?; - SortError::assert_eq(&Sort::Int, sorts[0])?; - SortError::assert_all_eq(&sorts)?; + SortError::assert_eq(&Sort::Int, sorts[0].as_sort().unwrap())?; + SortError::assert_all_eq( + &sorts + .iter() + .map(|op| op.as_sort().unwrap()) + .collect::>(), + )?; } Operator::RealDiv => { assert_num_args(&args, 2..)?; @@ -236,41 +253,46 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { // allowing Int/Real subtyping, it may also receive Ints if self.allow_int_real_subtyping { for s in sorts { - SortError::assert_one_of(&[Sort::Int, Sort::Real], s)?; + SortError::assert_one_of(&[Sort::Int, Sort::Real], s.as_sort().unwrap())?; } } else { - SortError::assert_eq(&Sort::Real, sorts[0])?; - SortError::assert_all_eq(&sorts)?; + SortError::assert_eq(&Sort::Real, sorts[0].as_sort().unwrap())?; + SortError::assert_all_eq( + &sorts + .iter() + .map(|op| op.as_sort().unwrap()) + .collect::>(), + )?; } } Operator::Mod => { assert_num_args(&args, 2)?; - SortError::assert_eq(&Sort::Int, sorts[0])?; - SortError::assert_eq(&Sort::Int, sorts[1])?; + SortError::assert_eq(&Sort::Int, sorts[0].as_sort().unwrap())?; + SortError::assert_eq(&Sort::Int, sorts[1].as_sort().unwrap())?; } Operator::Abs => { assert_num_args(&args, 1)?; - SortError::assert_eq(&Sort::Int, sorts[0])?; + SortError::assert_eq(&Sort::Int, sorts[0].as_sort().unwrap())?; } Operator::LessThan | Operator::GreaterThan | Operator::LessEq | Operator::GreaterEq => { assert_num_args(&args, 2..)?; // All the arguments must be either Int or Real sorted, but they don't need to all // have the same sort for s in sorts { - SortError::assert_one_of(&[Sort::Int, Sort::Real], s)?; + SortError::assert_one_of(&[Sort::Int, Sort::Real], s.as_sort().unwrap())?; } } Operator::ToReal => { assert_num_args(&args, 1)?; - SortError::assert_eq(&Sort::Int, sorts[0])?; + SortError::assert_eq(&Sort::Int, sorts[0].as_sort().unwrap())?; } Operator::ToInt | Operator::IsInt => { assert_num_args(&args, 1)?; - SortError::assert_eq(&Sort::Real, sorts[0])?; + SortError::assert_eq(&Sort::Real, sorts[0].as_sort().unwrap())?; } Operator::Select => { assert_num_args(&args, 2)?; - match sorts[0] { + match sorts[0].as_sort().unwrap() { Sort::Array(_, _) => (), got => { // Instead of creating some special case for sort errors with parametric @@ -278,7 +300,7 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { // infer the `X` sort from the second operator argument. This may be // changed later let got = got.clone(); - let x = sorts[1].clone(); + let x = sorts[1].as_sort().unwrap().clone(); let x = self.pool.add(Term::Sort(x)); let y = self .pool @@ -293,14 +315,15 @@ impl<'a, R: BufRead, P: TermPool> Parser<'a, R, P> { } Operator::Store => { assert_num_args(&args, 3)?; - match sorts[0] { + match sorts[0].as_sort().unwrap() { Sort::Array(x, y) => { - SortError::assert_eq(x.as_sort().unwrap(), sorts[1])?; - SortError::assert_eq(y.as_sort().unwrap(), sorts[2])?; + SortError::assert_eq(x.as_sort().unwrap(), sorts[1].as_sort().unwrap())?; + SortError::assert_eq(y.as_sort().unwrap(), sorts[2].as_sort().unwrap())?; } got => { let got = got.clone(); - let [x, y] = [sorts[0], sorts[1]].map(|s| Term::Sort(s.clone())); + let [x, y] = [&sorts[0], &sorts[1]] + .map(|s| Term::Sort(s.as_sort().unwrap().clone())); return Err(SortError { expected: vec![Sort::Array(self.pool.add(x), self.pool.add(y))], got, diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index 5fc45cfb..aa1d3dcd 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -3,12 +3,12 @@ #![cfg(test)] use super::*; -use crate::ast::pool::advanced::LocalPool; +use crate::ast::pool::PrimitivePool; const ERROR_MESSAGE: &str = "parser error during test"; -pub fn parse_terms( - pool: &mut P, +pub fn parse_terms( + pool: &mut PrimitivePool, definitions: &str, terms: [&str; N], ) -> [Rc; N] { @@ -22,7 +22,7 @@ pub fn parse_terms( }) } -pub fn parse_term(pool: &mut P, input: &str) -> Rc { +pub fn parse_term(pool: &mut PrimitivePool, input: &str) -> Rc { Parser::new(pool, input.as_bytes(), true, false, false) .and_then(|mut parser| parser.parse_term()) .expect(ERROR_MESSAGE) @@ -31,14 +31,14 @@ pub fn parse_term(pool: &mut P, input: &str) -> Rc { /// Tries to parse a term from a `&str`, expecting it to fail. Returns the error encountered, or /// panics if no error is encountered. pub fn parse_term_err(input: &str) -> Error { - let mut pool = LocalPool::new(); + let mut pool = PrimitivePool::new(); Parser::new(&mut pool, input.as_bytes(), true, false, false) .and_then(|mut p| p.parse_term()) .expect_err("expected error") } /// Parses a proof from a `&str`. Panics if any error is encountered. -pub fn parse_proof(pool: &mut P, input: &str) -> Proof { +pub fn parse_proof(pool: &mut PrimitivePool, input: &str) -> Proof { let commands = Parser::new(pool, input.as_bytes(), true, false, false) .expect(ERROR_MESSAGE) .parse_proof() @@ -46,7 +46,7 @@ pub fn parse_proof(pool: &mut P, input: &str) -> Proof { Proof { premises: AHashSet::new(), commands } } -fn run_parser_tests(pool: &mut P, cases: &[(&str, Rc)]) { +fn run_parser_tests(pool: &mut PrimitivePool, cases: &[(&str, Rc)]) { for (case, expected) in cases { let got = parse_term(pool, case); assert_eq!(expected, &got); @@ -57,7 +57,7 @@ fn run_parser_tests(pool: &mut P, cases: &[(&str, Rc)]) { fn test_hash_consing() { use ahash::AHashSet; - let mut pool = LocalPool::new(); + let mut pool = PrimitivePool::new(); let input = "(- (- (+ 1 2) @@ -94,23 +94,9 @@ fn test_hash_consing() { .into_iter() .collect::>(); - let l = &mut pool.storage; - let g = &pool.ctx_pool.global_pool; - let c = &pool.ctx_pool.storage.read().unwrap(); - assert_eq!( - l.terms.len() + g.terms.len() + c.terms.len() - 6, - expected.len() - ); + assert_eq!(pool.terms.len(), expected.len()); - for got in l.terms.keys() { - let formatted: &str = &format!("{}", got); - assert!(expected.contains(formatted), "{}", formatted); - } - for got in g.terms.keys() { - let formatted: &str = &format!("{}", got); - assert!(expected.contains(formatted), "{}", formatted); - } - for got in c.terms.keys() { + for got in pool.terms.keys() { let formatted: &str = &format!("{}", got); assert!(expected.contains(formatted), "{}", formatted); } @@ -118,7 +104,7 @@ fn test_hash_consing() { #[test] fn test_constant_terms() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); assert_eq!(Term::new_int(42), *parse_term(&mut p, "42")); assert_eq!(Term::new_real((3, 2)), *parse_term(&mut p, "1.5")); assert_eq!(Term::new_string("foo"), *parse_term(&mut p, "\"foo\"")); @@ -126,7 +112,7 @@ fn test_constant_terms() { #[test] fn test_arithmetic_ops() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let [one, two, three, five, seven] = [1, 2, 3, 5, 7].map(|n| p.add(Term::new_int(n))); let cases = [ ( @@ -156,7 +142,7 @@ fn test_arithmetic_ops() { #[test] fn test_logic_ops() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let [zero, one, two, three, four] = [0, 1, 2, 3, 4].map(|n| p.add(Term::new_int(n))); let cases = [ ( @@ -238,7 +224,7 @@ fn test_logic_ops() { #[test] fn test_ite() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let [one, two, three] = [1, 2, 3].map(|n| p.add(Term::new_int(n))); let cases = [ ( @@ -275,7 +261,7 @@ fn test_ite() { #[test] fn test_quantifiers() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let bool_sort = p.add(Term::Sort(Sort::Bool)); let real_sort = p.add(Term::Sort(Sort::Real)); let cases = [ @@ -315,7 +301,7 @@ fn test_quantifiers() { #[test] fn test_choice_terms() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let bool_sort = p.add(Term::Sort(Sort::Bool)); let int_sort = p.add(Term::Sort(Sort::Int)); let cases = [ @@ -343,7 +329,7 @@ fn test_choice_terms() { #[test] fn test_let_terms() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let int_sort = p.add(Term::Sort(Sort::Int)); let bool_sort = p.add(Term::Sort(Sort::Bool)); let cases = [ @@ -373,7 +359,7 @@ fn test_let_terms() { #[test] fn test_lambda_terms() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let int_sort = p.add(Term::Sort(Sort::Int)); let cases = [ ("(lambda ((x Int)) x)", { @@ -405,7 +391,7 @@ fn test_lambda_terms() { #[test] fn test_annotated_terms() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let [zero, two, three] = [0, 2, 3].map(|n| p.add(Term::new_int(n))); let cases = [ ("(! 0 :named foo)", zero.clone()), @@ -436,7 +422,7 @@ fn test_annotated_terms() { #[test] fn test_declare_fun() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); parse_terms( &mut p, @@ -458,7 +444,7 @@ fn test_declare_fun() { #[test] fn test_declare_sort() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); parse_terms( &mut p, @@ -483,7 +469,7 @@ fn test_declare_sort() { #[test] fn test_define_fun() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let [got] = parse_terms( &mut p, "(define-fun add ((a Int) (b Int)) Int (+ a b))", @@ -506,7 +492,7 @@ fn test_define_fun() { #[test] fn test_step() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let input = " (step t1 (cl (= (+ 2 3) (- 1 2))) :rule rule-name) (step t2 (cl) :rule rule-name :premises (t1)) @@ -596,7 +582,7 @@ fn test_step() { #[test] fn test_premises_in_subproofs() { - let mut p = LocalPool::new(); + let mut p = PrimitivePool::new(); let input = " (assume h1 true) (assume h2 true) From 2023f4e670f4b23dd71b1ae6a7527d16c9cc1e30 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Wed, 26 Jul 2023 08:20:33 -0300 Subject: [PATCH 31/70] Split the check method into a single and multithread for the carcaras api --- carcara/src/lib.rs | 61 +++++++++++++++++++++++++++++++++++++++++++++- cli/src/main.rs | 23 ++++++++++------- 2 files changed, 74 insertions(+), 10 deletions(-) diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index d0366e98..e58be0cc 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -135,7 +135,66 @@ pub enum Error { DoesNotReachEmptyClause, } -pub fn check( +pub fn check(problem: T, proof: T, options: CarcaraOptions) -> Result { + let mut run_measures: RunMeasurement = RunMeasurement::default(); + + // Parsing + let total = Instant::now(); + let (prelude, proof, mut pool) = parser::parse_instance( + problem, + proof, + options.apply_function_defs, + options.expand_lets, + options.allow_int_real_subtyping, + )?; + run_measures.parsing = total.elapsed(); + + let config = checker::Config::new() + .strict(options.strict) + .skip_unknown_rules(options.skip_unknown_rules) + .lia_via_cvc5(options.lia_via_cvc5); + + // Checking + let checking = Instant::now(); + let mut checker = checker::ProofChecker::new(&mut pool, config, &prelude); + + if options.stats { + let mut checker_stats = CheckerStatistics { + file_name: "this", + elaboration_time: Duration::ZERO, + polyeq_time: Duration::ZERO, + assume_time: Duration::ZERO, + assume_core_time: Duration::ZERO, + results: OnlineBenchmarkResults::new(), + }; + let res = checker.check_with_stats(&proof, &mut checker_stats); + + run_measures.checking = checking.elapsed(); + run_measures.total = total.elapsed(); + + checker_stats.results.add_run_measurement( + &("this".to_owned(), 0), + RunMeasurement { + parsing: run_measures.parsing, + checking: run_measures.checking, + elaboration: checker_stats.elaboration_time, + scheduling: run_measures.scheduling, + total: run_measures.total, + polyeq: checker_stats.polyeq_time, + assume: checker_stats.assume_time, + assume_core: checker_stats.assume_core_time, + }, + ); + // Print the statistics + checker_stats.results.print(false); + + res + } else { + checker.check(&proof) + } +} + +pub fn check_parallel( problem: T, proof: T, options: CarcaraOptions, diff --git a/cli/src/main.rs b/cli/src/main.rs index d9c080bb..32fca734 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -4,8 +4,8 @@ mod logger; mod path_args; use carcara::{ - ast::print_proof, benchmarking::OnlineBenchmarkResults, check, check_and_elaborate, parser, - CarcaraOptions, + ast::print_proof, benchmarking::OnlineBenchmarkResults, check, check_and_elaborate, + check_parallel, parser, CarcaraOptions, }; use clap::{AppSettings, ArgEnum, Args, Parser, Subcommand}; use const_format::{formatcp, str_index}; @@ -359,13 +359,18 @@ fn parse_command(options: ParseCommandOptions) -> CliResult<()> { fn check_command(options: CheckCommandOptions) -> CliResult { let (problem, proof) = get_instance(&options.input)?; - check( - problem, - proof, - build_carcara_options(options.parsing, options.checking, options.stats), - options.num_threads, - options.stack.stack_size, - ) + let carc_options = build_carcara_options(options.parsing, options.checking, options.stats); + if options.num_threads == 1 { + check(problem, proof, carc_options) + } else { + check_parallel( + problem, + proof, + carc_options, + options.num_threads, + options.stack.stack_size, + ) + } .map_err(Into::into) } From 3ceed1b655fbe0908d0a6e4d68953f5f8d2802a4 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Wed, 26 Jul 2023 09:29:16 -0300 Subject: [PATCH 32/70] Test --- carcara/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index e58be0cc..07b6af97 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -157,7 +157,6 @@ pub fn check(problem: T, proof: T, options: CarcaraOptions) -> R // Checking let checking = Instant::now(); let mut checker = checker::ProofChecker::new(&mut pool, config, &prelude); - if options.stats { let mut checker_stats = CheckerStatistics { file_name: "this", From 46934fae91e9335ecb19251eb9a9f4c20867ef88 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Wed, 26 Jul 2023 15:22:21 -0300 Subject: [PATCH 33/70] Rebranded `free_vars` function --- carcara/src/ast/pool/advanced.rs | 297 ++----------------------------- carcara/src/ast/pool/mod.rs | 107 ++++++++--- 2 files changed, 91 insertions(+), 313 deletions(-) diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs index 49597e29..d50d5b2d 100644 --- a/carcara/src/ast/pool/advanced.rs +++ b/carcara/src/ast/pool/advanced.rs @@ -35,45 +35,6 @@ impl ContextPool { storage: ctx_pool.storage.clone(), } } - - /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. - fn add_by_ref<'d, 'c: 'd>( - ctx_pool: &mut PrimitivePool, - global_pool: &'d PrimitivePool, - term: Term, - ) -> Rc { - use std::collections::hash_map::Entry; - - // If the global pool has the term - if let Some(entry) = global_pool.terms.get(&term) { - entry.clone() - } else { - match ctx_pool.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let t = vacant_entry.insert(Rc::new(term)).clone(); - ctx_pool.compute_sort(&t); - t - } - } - } - } - - /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. - fn sort_by_ref<'d: 't, 'c: 'd, 't>( - ctx_pool: &PrimitivePool, - global_pool: &'d PrimitivePool, - term: &'t Rc, - ) -> Rc { - if let Some(sort) = global_pool.sorts_cache.get(term) { - sort.clone() - } - // A sort inserted by context - else { - ctx_pool.sorts_cache[term].clone() - } - } } impl TermPool for ContextPool { @@ -115,95 +76,11 @@ impl TermPool for ContextPool { } } - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { - fn internal<'d: 't, 'c: 'd, 't>( - ctx_pool: &'d mut PrimitivePool, - global_pool: &'c PrimitivePool, - term: &'t Rc, - ) -> &'t AHashSet> { - // Here, I would like to do - // ``` - // if let Some(vars) = self.free_vars_cache.get(term) { - // return vars; - // } - // ``` - // However, because of a limitation in the borrow checker, the compiler thinks that - // this immutable borrow of `cache` has to live until the end of the function, even - // though the code immediately returns. This would stop me from mutating `cache` in the - // rest of the function. Because of that, I have to check if the hash map contains - // `term` as a key, and then get the value associated with it, meaning I have to access - // the hash map twice, which is a bit slower. This is an example of problem case #3 - // from the non-lexical lifetimes RFC: - // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md - if let Some(set) = global_pool.free_vars_cache.get(term) { - return set; - } - if ctx_pool.free_vars_cache.contains_key(term) { - return ctx_pool.free_vars_cache.get(term).unwrap(); - } - - let set = match term.as_ref() { - Term::App(f, args) => { - let mut set = internal(ctx_pool, global_pool, f).clone(); - for a in args { - set.extend(internal(ctx_pool, global_pool, a).iter().cloned()); - } - set - } - Term::Op(_, args) => { - let mut set = AHashSet::new(); - for a in args { - set.extend(internal(ctx_pool, global_pool, a).iter().cloned()); - } - set - } - Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = internal(ctx_pool, global_pool, inner).clone(); - for bound_var in bindings { - let term = ContextPool::add_by_ref( - ctx_pool, - global_pool, - bound_var.clone().into(), - ); - vars.remove(&term); - } - vars - } - Term::Let(bindings, inner) => { - let mut vars = internal(ctx_pool, global_pool, inner).clone(); - for (var, value) in bindings { - let sort = ContextPool::sort_by_ref(ctx_pool, global_pool, value) - .as_ref() - .clone(); - let sort = ContextPool::add_by_ref(ctx_pool, global_pool, sort); - let term = ContextPool::add_by_ref( - ctx_pool, - global_pool, - (var.clone(), sort).into(), - ); - vars.remove(&term); - } - vars - } - Term::Choice(bound_var, inner) => { - let mut vars = internal(ctx_pool, global_pool, inner).clone(); - let term = - ContextPool::add_by_ref(ctx_pool, global_pool, bound_var.clone().into()); - vars.remove(&term); - vars - } - Term::Var(..) => { - let mut set = AHashSet::with_capacity(1); - set.insert(term.clone()); - set - } - Term::Const(_) | Term::Sort(_) => AHashSet::new(), - }; - ctx_pool.free_vars_cache.insert(term.clone(), set); - ctx_pool.free_vars_cache.get(term).unwrap() - } - let mut ctx_guard = self.storage.write(); - internal(ctx_guard.as_mut().unwrap(), &self.global_pool, term).clone() + fn free_vars(&mut self, term: &Rc) -> AHashSet> { + self.storage + .write() + .unwrap() + .free_vars_with_priorities(term, [&self.global_pool]) } } @@ -236,53 +113,6 @@ impl LocalPool { storage: PrimitivePool::new(), } } - - /// Takes a term and returns an `Rc` referencing it. Receive the pools references directly. - fn add_by_ref<'d, 'c: 'd>( - local_pool: &'d mut PrimitivePool, - ctx_pool: &PrimitivePool, - global_pool: &'d PrimitivePool, - term: Term, - ) -> Rc { - use std::collections::hash_map::Entry; - - // If the global pool has the term - if let Some(entry) = global_pool.terms.get(&term) { - entry.clone() - } - // If this term was inserted by the context - else if let Some(entry) = ctx_pool.terms.get(&term) { - entry.clone() - } else { - match local_pool.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let t = vacant_entry.insert(Rc::new(term)).clone(); - local_pool.compute_sort(&t); - t - } - } - } - } - - /// Returns the sort of this term exactly as the sort function. Receive the pools references directly. - fn sort_by_ref<'d: 't, 'c: 'd, 't>( - local_pool: &'d mut PrimitivePool, - ctx_pool: &PrimitivePool, - global_pool: &'d PrimitivePool, - term: &'t Rc, - ) -> Rc { - if let Some(sort) = global_pool.sorts_cache.get(term) { - sort.clone() - } - // A sort inserted by context - else if let Some(entry) = ctx_pool.terms.get(term) { - entry.clone() - } else { - local_pool.sorts_cache[term].clone() - } - } } impl TermPool for LocalPool { @@ -329,118 +159,13 @@ impl TermPool for LocalPool { } } - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { - fn internal<'d: 't, 'c: 'd, 't>( - local_pool: &'d mut PrimitivePool, - ctx_pool: &'t PrimitivePool, - global_pool: &'d PrimitivePool, - term: &'t Rc, - ) -> &'t AHashSet> { - // Here, I would like to do - // ``` - // if let Some(vars) = self.free_vars_cache.get(term) { - // return vars; - // } - // ``` - // However, because of a limitation in the borrow checker, the compiler thinks that - // this immutable borrow of `cache` has to live until the end of the function, even - // though the code immediately returns. This would stop me from mutating `cache` in the - // rest of the function. Because of that, I have to check if the hash map contains - // `term` as a key, and then get the value associated with it, meaning I have to access - // the hash map twice, which is a bit slower. This is an example of problem case #3 - // from the non-lexical lifetimes RFC: - // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md - if let Some(set) = global_pool.free_vars_cache.get(term) { - return set; - } - if let Some(set) = ctx_pool.free_vars_cache.get(term) { - return set; - } - if local_pool.free_vars_cache.contains_key(term) { - return local_pool.free_vars_cache.get(term).unwrap(); - } - - let set = match term.as_ref() { - Term::App(f, args) => { - let mut set = internal(local_pool, ctx_pool, global_pool, f).clone(); - for a in args { - set.extend( - internal(local_pool, ctx_pool, global_pool, a) - .iter() - .cloned(), - ); - } - set - } - Term::Op(_, args) => { - let mut set = AHashSet::new(); - for a in args { - set.extend( - internal(local_pool, ctx_pool, global_pool, a) - .iter() - .cloned(), - ); - } - set - } - Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); - for bound_var in bindings { - let term = LocalPool::add_by_ref( - local_pool, - ctx_pool, - global_pool, - bound_var.clone().into(), - ); - vars.remove(&term); - } - vars - } - Term::Let(bindings, inner) => { - let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); - for (var, value) in bindings { - let sort = LocalPool::sort_by_ref(local_pool, ctx_pool, global_pool, value) - .as_ref() - .clone(); - let sort = LocalPool::add_by_ref(local_pool, ctx_pool, global_pool, sort); - let term = LocalPool::add_by_ref( - local_pool, - ctx_pool, - global_pool, - (var.clone(), sort).into(), - ); - vars.remove(&term); - } - vars - } - Term::Choice(bound_var, inner) => { - let mut vars = internal(local_pool, ctx_pool, global_pool, inner).clone(); - let term = LocalPool::add_by_ref( - local_pool, - ctx_pool, - global_pool, - bound_var.clone().into(), - ); - vars.remove(&term); - vars - } - Term::Var(..) => { - let mut set = AHashSet::with_capacity(1); - set.insert(term.clone()); - set - } - Term::Const(_) | Term::Sort(_) => AHashSet::new(), - }; - local_pool.free_vars_cache.insert(term.clone(), set); - local_pool.free_vars_cache.get(term).unwrap() - } - - internal( - &mut self.storage, - &self.ctx_pool.storage.read().unwrap(), - &self.ctx_pool.global_pool, + fn free_vars(&mut self, term: &Rc) -> AHashSet> { + self.storage.free_vars_with_priorities( term, + [ + &self.ctx_pool.global_pool, + &self.ctx_pool.storage.read().unwrap(), + ], ) - .clone() } } diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index 19e909fd..1dd1e73c 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -37,7 +37,7 @@ pub trait TermPool { /// /// This method uses a cache, so there is no additional cost to computing the free variables of /// a term multiple times. - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet>; + fn free_vars(&mut self, term: &Rc) -> AHashSet>; } /// A structure to store and manage all allocated terms. @@ -169,66 +169,95 @@ impl PrimitivePool { self.sorts_cache.insert(term.clone(), sorted_term); self.sorts_cache[term].clone() } -} -impl TermPool for PrimitivePool { - fn bool_true(&self) -> Rc { - self.bool_true.clone() - } + fn add_with_priorities( + &mut self, + term: Term, + prior_pools: [&PrimitivePool; N], + ) -> Rc { + use std::collections::hash_map::Entry; - fn bool_false(&self) -> Rc { - self.bool_false.clone() - } + for p in prior_pools { + // If this prior pool has the term + if let Some(entry) = p.terms.get(&term) { + return entry.clone(); + } + } - fn add(&mut self, term: Term) -> Rc { - let term = Self::add_term_to_map(&mut self.terms, term); - self.compute_sort(&term); - term + match self.terms.entry(term) { + Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), + Entry::Vacant(vacant_entry) => { + let term = vacant_entry.key().clone(); + let term = vacant_entry.insert(Rc::new(term)).clone(); + self.compute_sort(&term); + term + } + } } - fn sort(&self, term: &Rc) -> Rc { + fn sort_with_priorities( + &mut self, + term: &Rc, + prior_pools: [&PrimitivePool; N], + ) -> Rc { + for p in prior_pools { + if let Some(sort) = p.sorts_cache.get(term) { + return sort.clone(); + } + } self.sorts_cache[term].clone() } - fn free_vars<'s, 't: 's>(&'s mut self, term: &'t Rc) -> AHashSet> { - if let Some(vars) = self.free_vars_cache.get(term) { - return vars.clone(); + pub fn free_vars_with_priorities( + &mut self, + term: &Rc, + prior_pools: [&PrimitivePool; N], + ) -> AHashSet> { + for p in prior_pools { + if let Some(set) = p.free_vars_cache.get(term) { + return set.clone(); + } } + + if let Some(set) = self.free_vars_cache.get(term) { + return set.clone(); + } + let set = match term.as_ref() { Term::App(f, args) => { - let mut set = self.free_vars(f); + let mut set = self.free_vars_with_priorities(f, prior_pools); for a in args { - set.extend(self.free_vars(a).into_iter()); + set.extend(self.free_vars_with_priorities(a, prior_pools).into_iter()); } set } Term::Op(_, args) => { let mut set = AHashSet::new(); for a in args { - set.extend(self.free_vars(a).into_iter()); + set.extend(self.free_vars_with_priorities(a, prior_pools).into_iter()); } set } Term::Quant(_, bindings, inner) | Term::Lambda(bindings, inner) => { - let mut vars = self.free_vars(inner); + let mut vars = self.free_vars_with_priorities(inner, prior_pools); for bound_var in bindings { - let term = self.add(bound_var.clone().into()); + let term = self.add_with_priorities(bound_var.clone().into(), prior_pools); vars.remove(&term); } vars } Term::Let(bindings, inner) => { - let mut vars = self.free_vars(inner); + let mut vars = self.free_vars_with_priorities(inner, prior_pools); for (var, value) in bindings { - let sort = self.sort(value); - let term = self.add((var.clone(), sort).into()); + let sort = self.sort_with_priorities(value, prior_pools); + let term = self.add_with_priorities((var.clone(), sort).into(), prior_pools); vars.remove(&term); } vars } Term::Choice(bound_var, inner) => { - let mut vars = self.free_vars(inner); - let term = self.add(bound_var.clone().into()); + let mut vars = self.free_vars_with_priorities(inner, prior_pools); + let term = self.add_with_priorities(bound_var.clone().into(), prior_pools); vars.remove(&term); vars } @@ -243,3 +272,27 @@ impl TermPool for PrimitivePool { self.free_vars_cache.get(term).unwrap().clone() } } + +impl TermPool for PrimitivePool { + fn bool_true(&self) -> Rc { + self.bool_true.clone() + } + + fn bool_false(&self) -> Rc { + self.bool_false.clone() + } + + fn add(&mut self, term: Term) -> Rc { + let term = Self::add_term_to_map(&mut self.terms, term); + self.compute_sort(&term); + term + } + + fn sort(&self, term: &Rc) -> Rc { + self.sorts_cache[term].clone() + } + + fn free_vars(&mut self, term: &Rc) -> AHashSet> { + self.free_vars_with_priorities(term, []) + } +} From 8a020306cde873625945143d9da8dad966517210 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:08:46 -0300 Subject: [PATCH 34/70] Scheduler related fixes --- .../src/checker/parallel/scheduler/iter.rs | 42 +++++++++---------- carcara/src/checker/parallel/scheduler/mod.rs | 31 +++++++------- 2 files changed, 37 insertions(+), 36 deletions(-) diff --git a/carcara/src/checker/parallel/scheduler/iter.rs b/carcara/src/checker/parallel/scheduler/iter.rs index 48151055..18f494e9 100644 --- a/carcara/src/checker/parallel/scheduler/iter.rs +++ b/carcara/src/checker/parallel/scheduler/iter.rs @@ -53,29 +53,29 @@ impl<'a> Iterator for ScheduleIter<'a> { type Item = &'a ProofCommand; fn next(&mut self) -> Option { - // If it isn't the end of the steps - if self.step_id < self.steps.len() { - // If current step is an closing subproof step - while let (_, usize::MAX) = self.steps[self.step_id] { - self.proof_stack.pop(); - self.step_id += 1; - // If reached the last closing step of the whole proof - if self.step_id == self.steps.len() { - return None; - } - } - let cur_step = self.steps[self.step_id]; - self.step_id += 1; + // If it is the end of the steps + if self.step_id >= self.steps.len() { + return None; + } - let top = self.proof_stack.last().unwrap(); - let command = &top[cur_step.1]; - // Opens a new subproof - if let ProofCommand::Subproof(subproof) = command { - self.proof_stack.push(&subproof.commands); + // If current step is an closing subproof step + while let (_, usize::MAX) = self.steps[self.step_id] { + self.proof_stack.pop(); + self.step_id += 1; + // If reached the last closing step of the whole proof + if self.step_id == self.steps.len() { + return None; } - Some(command) - } else { - None } + let cur_step = self.steps[self.step_id]; + self.step_id += 1; + + let top = self.proof_stack.last().unwrap(); + let command = &top[cur_step.1]; + // Opens a new subproof + if let ProofCommand::Subproof(subproof) = command { + self.proof_stack.push(&subproof.commands); + } + Some(command) } } diff --git a/carcara/src/checker/parallel/scheduler/mod.rs b/carcara/src/checker/parallel/scheduler/mod.rs index df9a61e9..fc7d1ea6 100644 --- a/carcara/src/checker/parallel/scheduler/mod.rs +++ b/carcara/src/checker/parallel/scheduler/mod.rs @@ -15,14 +15,14 @@ use weights::get_step_weight; /// (depth, subproof index). The first element is the subproof nesting `depth` /// (in the subproof stack) and `subproof index` is the index where this step is /// located in the subproof vector. -#[derive(Clone)] +#[derive(Clone, Default)] pub struct Schedule { steps: Vec<(usize, usize)>, } impl Schedule { pub fn new() -> Self { - Schedule { steps: vec![] } + Self::default() } /// Inserts a new step into the end of the schedule steps vector @@ -98,17 +98,16 @@ pub struct Scheduler { pub loads: Vec, } -impl Default for Schedule { - fn default() -> Self { - Self::new() - } -} - impl Scheduler { /// Creates a thread scheduler for this proof using a specific number of /// workers. This scheduler is responsible for balancing the load (the - /// proof steps have different costs to be checked) aiming the minimum - /// amount of async overhead + /// proof steps have different costs to be checked) aiming for minimum + /// amount of async overhead. + /// + /// Returns a scheduler itself and context usage info (a vector holding + /// how many threads are going to use each of the contexts. This vector maps + /// the contexts based in the subproof hashing value (i.e. `subproof_id`) + /// created in the parser). pub fn new(num_workers: usize, proof: &Proof) -> (Self, Vec) { // Initializes the control and result variables let cmds = &proof.commands; @@ -152,24 +151,26 @@ impl Scheduler { break; } // - let AssignedLoad { 0: mut load, 1: load_index } = pq.pop().unwrap(); + let AssignedLoad(mut load, load_index) = pq.pop().unwrap(); { let top = stack.last().unwrap(); let step_weight = get_step_weight(&top.cmds[top.id]); - assert!(u64::MAX - step_weight >= load, "Weight balancing overflow!"); - load += step_weight; + load = load + .checked_add(step_weight) + .expect("Weight balancing overflow!"); pq.push(AssignedLoad(load, load_index)); } let depth = stack.len() - 1; - let (mut i, initial_layer) = (1, { + let mut i = 1; + let initial_layer = { let tmp = loads[load_index].last().unwrap_or(&(0, 0)); if tmp.1 == usize::MAX { tmp.0 - 1 } else { tmp.0 } - }); + }; // If this step needs the context of the subproof oppening step // but it was not assigned to this schedule yet while initial_layer + i <= depth { From e1ce41ea557db4883b0b9d94d72e43e80bd97914 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:14:33 -0300 Subject: [PATCH 35/70] Merge all the scheduler related files --- carcara/src/checker/parallel/mod.rs | 2 +- carcara/src/checker/parallel/scheduler.rs | 415 ++++++++++++++++++ .../src/checker/parallel/scheduler/iter.rs | 81 ---- carcara/src/checker/parallel/scheduler/mod.rs | 211 --------- .../src/checker/parallel/scheduler/weights.rs | 130 ------ 5 files changed, 416 insertions(+), 423 deletions(-) create mode 100644 carcara/src/checker/parallel/scheduler.rs delete mode 100644 carcara/src/checker/parallel/scheduler/iter.rs delete mode 100644 carcara/src/checker/parallel/scheduler/mod.rs delete mode 100644 carcara/src/checker/parallel/scheduler/weights.rs diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 15a45414..81d01bbe 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -10,7 +10,7 @@ use crate::{ CarcaraResult, Error, }; use ahash::AHashSet; -pub use scheduler::{iter::ScheduleIter, Scheduler}; +pub use scheduler::{ScheduleIter, Scheduler}; use std::{ ops::ControlFlow, sync::{Arc, RwLock}, diff --git a/carcara/src/checker/parallel/scheduler.rs b/carcara/src/checker/parallel/scheduler.rs new file mode 100644 index 00000000..b5988033 --- /dev/null +++ b/carcara/src/checker/parallel/scheduler.rs @@ -0,0 +1,415 @@ +use crate::ast::{Proof, ProofCommand}; +use std::{ + cmp::Ordering, + collections::{BinaryHeap, HashSet}, +}; + +/// Struct responsible for storing a thread work schedule. +/// +/// Here, each step from the original proof is represented as a tuple: +/// (depth, subproof index). The first element is the subproof nesting `depth` +/// (in the subproof stack) and `subproof index` is the index where this step is +/// located in the subproof vector. +#[derive(Clone, Default)] +pub struct Schedule { + steps: Vec<(usize, usize)>, +} + +impl Schedule { + pub fn new() -> Self { + Self::default() + } + + /// Inserts a new step into the end of the schedule steps vector + pub fn push(&mut self, cmd: (usize, usize)) { + self.steps.push(cmd); + } + + /// Removes the last step from the end of the steps vector + pub fn pop(&mut self) { + self.steps.pop(); + } + + /// Returns the last schedule step + pub fn last(&self) -> Option<&(usize, usize)> { + self.steps.last() + } + + /// Returns an iterator over the proof commands. See [`ScheduleIter`]. + pub fn iter<'a>(&'a self, proof: &'a [ProofCommand]) -> ScheduleIter { + ScheduleIter::new(proof, &self.steps) + } +} + +// ============================================================================= + +/// Represents the current load assigned for an specific schedule. +/// `0`: Current work load +/// `1`: Schedule index +#[derive(Eq)] +struct AssignedLoad(u64, usize); + +impl Ord for AssignedLoad { + fn cmp(&self, other: &Self) -> Ordering { + other.0.cmp(&self.0) + } +} + +impl PartialOrd for AssignedLoad { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for AssignedLoad { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +/// Represents a level in the proof stack. It holds the subproof itself, +/// its prerequisite step (anchor) and which schedules used any step inside +/// this layer +struct StackLevel<'a> { + id: usize, + cmds: &'a [ProofCommand], + pre_req: Option<(usize, usize)>, + used_by: HashSet, +} + +impl<'a> StackLevel<'a> { + pub fn new(id: usize, cmds: &'a [ProofCommand], pre_req: Option<(usize, usize)>) -> Self { + Self { + id, + cmds, + pre_req, + used_by: HashSet::new(), + } + } +} + +/// Struct that stores the schedules for each thread. +pub struct Scheduler { + pub loads: Vec, +} + +impl Scheduler { + /// Creates a thread scheduler for this proof using a specific number of + /// workers. This scheduler is responsible for balancing the load (the + /// proof steps have different costs to be checked) aiming for minimum + /// amount of async overhead. + /// + /// Returns a scheduler itself and context usage info (a vector holding + /// how many threads are going to use each of the contexts. This vector maps + /// the contexts based in the subproof hashing value (i.e. `subproof_id`) + /// created in the parser). + pub fn new(num_workers: usize, proof: &Proof) -> (Self, Vec) { + // Initializes the control and result variables + let cmds = &proof.commands; + let mut loads = vec![Schedule::new(); num_workers]; + let mut stack = vec![StackLevel::new(0, cmds, None)]; + let mut pq = BinaryHeap::::new(); + let mut context_usage = vec![]; + for i in 0..num_workers { + pq.push(AssignedLoad(0, i)); + } + + loop { + // Pop the finished subproofs + while !stack.is_empty() && { + let top = stack.last().unwrap(); + top.id == top.cmds.len() + } { + for schedule_id in &stack.last().unwrap().used_by { + let last = loads[*schedule_id].last().unwrap(); + // If it's an useless context insertion + if last.0 < stack.len() + && matches!(stack[last.0].cmds[last.1], ProofCommand::Subproof(_)) + { + // Make sure this context usage count is reduced + let subproof_id = match &stack[last.0].cmds[last.1] { + ProofCommand::Subproof(s) => s.context_id, + _ => unreachable!(), + }; + context_usage[subproof_id] -= 1; + + loads[*schedule_id].pop(); + } + // Creates a closing step for each schedule that used this subproof + else { + loads[*schedule_id].push((stack.len() - 1, usize::MAX)); + } + } + stack.pop(); + } + if stack.is_empty() { + break; + } + // + let AssignedLoad(mut load, load_index) = pq.pop().unwrap(); + { + let top = stack.last().unwrap(); + let step_weight = get_step_weight(&top.cmds[top.id]); + load = load + .checked_add(step_weight) + .expect("Weight balancing overflow!"); + pq.push(AssignedLoad(load, load_index)); + } + + let depth = stack.len() - 1; + let mut i = 1; + let initial_layer = { + let tmp = loads[load_index].last().unwrap_or(&(0, 0)); + if tmp.1 == usize::MAX { + tmp.0 - 1 + } else { + tmp.0 + } + }; + // If this step needs the context of the subproof oppening step + // but it was not assigned to this schedule yet + while initial_layer + i <= depth { + let subproof_oppening = stack[initial_layer + i].pre_req.unwrap(); + let last_inserted = *loads[load_index].last().unwrap_or(&(usize::MAX, 0)); + + if last_inserted != subproof_oppening { + loads[load_index].push(subproof_oppening); + stack[subproof_oppening.0].used_by.insert(load_index); + + // Now this subproof is used by another schedule + let subproof_id = match &stack[subproof_oppening.0].cmds[subproof_oppening.1] { + ProofCommand::Subproof(s) => s.context_id, + _ => unreachable!(), + }; + context_usage[subproof_id] += 1; + } + i += 1; + } + + let top = stack.last_mut().unwrap(); + // Assign a step to some Schedule + loads[load_index].push((depth, top.id)); + top.used_by.insert(load_index); + + // Go to next step + let last_id = top.id; + top.id += 1; + if let ProofCommand::Subproof(s) = &top.cmds[last_id] { + stack.push(StackLevel::new(0, &s.commands, Some((depth, last_id)))); + stack.last_mut().unwrap().used_by.insert(load_index); + // First schedule using this subproof + context_usage.push(1); + } + } + (Scheduler { loads }, context_usage) + } +} + +/// Iterates through schedule steps +pub struct ScheduleIter<'a> { + proof_stack: Vec<&'a [ProofCommand]>, + steps: &'a Vec<(usize, usize)>, + step_id: usize, +} + +impl<'a> ScheduleIter<'a> { + pub fn new(proof_commands: &'a [ProofCommand], steps: &'a Vec<(usize, usize)>) -> Self { + Self { + proof_stack: vec![proof_commands], + steps, + step_id: 0, + } + } + + /// Returns the current nesting depth of the iterator, or more precisely, + /// the nesting depth of the last step that was returned. This depth starts + /// at zero, for steps in the root proof. + pub fn depth(&self) -> usize { + self.proof_stack.len() - 1 + } + + /// Returns `true` if the iterator is currently in a subproof, that is, if + /// its depth is greater than zero. + pub fn is_in_subproof(&self) -> bool { + self.depth() > 0 + } + + /// Returns a slice to the commands of the inner-most open subproof. + pub fn current_subproof(&self) -> Option<&[ProofCommand]> { + self.is_in_subproof() + .then(|| *self.proof_stack.last().unwrap()) + } + + /// Returns `true` if the most recently returned step is the last step of + /// the current subproof. + pub fn is_end_step(&self) -> bool { + self.is_in_subproof() + && self.steps[self.step_id - 1].1 == self.proof_stack.last().unwrap().len() - 1 + } + + /// Returns the command referenced by a premise index of the form (depth, index in subproof). + /// This method may panic if the premise index does not refer to a valid command. + pub fn get_premise(&self, (depth, index): (usize, usize)) -> &ProofCommand { + &self.proof_stack[depth][index] + } +} + +impl<'a> Iterator for ScheduleIter<'a> { + type Item = &'a ProofCommand; + + fn next(&mut self) -> Option { + // If it is the end of the steps + if self.step_id >= self.steps.len() { + return None; + } + + // If current step is an closing subproof step + while let (_, usize::MAX) = self.steps[self.step_id] { + self.proof_stack.pop(); + self.step_id += 1; + // If reached the last closing step of the whole proof + if self.step_id == self.steps.len() { + return None; + } + } + let cur_step = self.steps[self.step_id]; + self.step_id += 1; + + let top = self.proof_stack.last().unwrap(); + let command = &top[cur_step.1]; + // Opens a new subproof + if let ProofCommand::Subproof(subproof) = command { + self.proof_stack.push(&subproof.commands); + } + Some(command) + } +} + +/// Function that returns a weight associated with a specific rule. These +/// weights are directly correlated to carcara (Single Thread/previous version) +/// median performance while solving each of those rules. +/// +/// Even though subproofs should have a weight (since it has a high cost to be +/// computed), it's for better of scheduler architecture that subproofs have a +/// null weight. +/// +/// If you're interested in these weight values, take a look at [Carcara's +/// paper](https://hanielbarbosa.com/papers/tacas2023.pdf) +/// published at TACAS in April 2023 and its benchmark data. +/// +/// The rules with null weight are rules that we had no info about the median +/// performance, since the solver used in the paper dataset does not generate +/// these rules. +pub fn get_step_weight(step: &ProofCommand) -> u64 { + match step { + ProofCommand::Assume { .. } => 230, + ProofCommand::Subproof(_) => 0, + ProofCommand::Step(s) => { + match &s.rule as &str { + "assume" => 230, + "true" => 0, //-1 + "false" => 263, + "not_not" => 574, + "and_pos" => 361, + "and_neg" => 607, + "or_pos" => 640, + "or_neg" => 460, + "xor_pos1" => 763, + "xor_pos2" => 345, + "xor_neg1" => 0, //-1 + "xor_neg2" => 0, //-1 + "implies_pos" => 394, + "implies_neg1" => 214, + "implies_neg2" => 287, + "equiv_pos1" => 763, + "equiv_pos2" => 541, + "equiv_neg1" => 434, + "equiv_neg2" => 476, + "ite_pos1" => 804, + "ite_pos2" => 344, + "ite_neg1" => 566, + "ite_neg2" => 542, + "eq_reflexive" => 451, + "eq_transitive" => 780, + "eq_congruent" => 722, + "eq_congruent_pred" => 632, + "distinct_elim" => 812, + "la_rw_eq" => 1091, + "la_generic" => 87564, + "la_disequality" => 919, + "la_totality" => 0, //-1 + "la_tautology" => 4291, + "forall_inst" => 7877, + "qnt_join" => 2347, + "qnt_rm_unused" => 3659, + "resolution" => 7491, + "th_resolution" => 2462, + "refl" => 1305, + "trans" => 575, + "cong" => 984, + "ho_cong" => 0, //-1 + "and" => 493, + "tautology" => 0, //-1 + "not_or" => 476, + "or" => 426, + "not_and" => 927, + "xor1" => 0, //-1 + "xor2" => 0, //-1 + "not_xor1" => 0, //-1 + "not_xor2" => 0, //-1 + "implies" => 788, + "not_implies1" => 402, + "not_implies2" => 484, + "equiv1" => 837, + "equiv2" => 812, + "not_equiv1" => 418, + "not_equiv2" => 451, + "ite1" => 509, + "ite2" => 493, + "not_ite1" => 722, + "not_ite2" => 476, + "ite_intro" => 3192, + "contraction" => 1731, + "connective_def" => 705, + "ite_simplify" => 1797, + "eq_simplify" => 845, + "and_simplify" => 1165, + "or_simplify" => 1133, + "not_simplify" => 787, + "implies_simplify" => 1231, + "equiv_simplify" => 1337, + "bool_simplify" => 1436, + "qnt_simplify" => 517, + "div_simplify" => 2117, + "prod_simplify" => 2527, + "unary_minus_simplify" => 0, //-1 + "minus_simplify" => 1059, + "sum_simplify" => 2248, + "comp_simplify" => 1781, + "nary_elim" => 0, //-1 + "ac_simp" => 9781, + "bfun_elim" => 8558, + "bind" => 5924, + "qnt_cnf" => 14244, + "subproof" => 262, + "let" => 4718, + "onepoint" => 7787, + "sko_ex" => 9321, + "sko_forall" => 12242, + "reordering" => 1452, + "symm" => 682, + "not_symm" => 0, //-1 + "eq_symmetric" => 673, + "or_intro" => 508, + "bind_let" => 2324, + "la_mult_pos" => 1446, + "la_mult_neg" => 1447, + "hole" => 185, //Debug only + "trust" => 185, //Debug only + "strict_resolution" => 1276, + + _ => 0, + } + } + } +} diff --git a/carcara/src/checker/parallel/scheduler/iter.rs b/carcara/src/checker/parallel/scheduler/iter.rs deleted file mode 100644 index 18f494e9..00000000 --- a/carcara/src/checker/parallel/scheduler/iter.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::ast::ProofCommand; - -/// Iterates through schedule steps -pub struct ScheduleIter<'a> { - proof_stack: Vec<&'a [ProofCommand]>, - steps: &'a Vec<(usize, usize)>, - step_id: usize, -} - -impl<'a> ScheduleIter<'a> { - pub fn new(proof_commands: &'a [ProofCommand], steps: &'a Vec<(usize, usize)>) -> Self { - Self { - proof_stack: vec![proof_commands], - steps, - step_id: 0, - } - } - - /// Returns the current nesting depth of the iterator, or more precisely, - /// the nesting depth of the last step that was returned. This depth starts - /// at zero, for steps in the root proof. - pub fn depth(&self) -> usize { - self.proof_stack.len() - 1 - } - - /// Returns `true` if the iterator is currently in a subproof, that is, if - /// its depth is greater than zero. - pub fn is_in_subproof(&self) -> bool { - self.depth() > 0 - } - - /// Returns a slice to the commands of the inner-most open subproof. - pub fn current_subproof(&self) -> Option<&[ProofCommand]> { - self.is_in_subproof() - .then(|| *self.proof_stack.last().unwrap()) - } - - /// Returns `true` if the most recently returned step is the last step of - /// the current subproof. - pub fn is_end_step(&self) -> bool { - self.is_in_subproof() - && self.steps[self.step_id - 1].1 == self.proof_stack.last().unwrap().len() - 1 - } - - /// Returns the command referenced by a premise index of the form (depth, index in subproof). - /// This method may panic if the premise index does not refer to a valid command. - pub fn get_premise(&self, (depth, index): (usize, usize)) -> &ProofCommand { - &self.proof_stack[depth][index] - } -} - -impl<'a> Iterator for ScheduleIter<'a> { - type Item = &'a ProofCommand; - - fn next(&mut self) -> Option { - // If it is the end of the steps - if self.step_id >= self.steps.len() { - return None; - } - - // If current step is an closing subproof step - while let (_, usize::MAX) = self.steps[self.step_id] { - self.proof_stack.pop(); - self.step_id += 1; - // If reached the last closing step of the whole proof - if self.step_id == self.steps.len() { - return None; - } - } - let cur_step = self.steps[self.step_id]; - self.step_id += 1; - - let top = self.proof_stack.last().unwrap(); - let command = &top[cur_step.1]; - // Opens a new subproof - if let ProofCommand::Subproof(subproof) = command { - self.proof_stack.push(&subproof.commands); - } - Some(command) - } -} diff --git a/carcara/src/checker/parallel/scheduler/mod.rs b/carcara/src/checker/parallel/scheduler/mod.rs deleted file mode 100644 index fc7d1ea6..00000000 --- a/carcara/src/checker/parallel/scheduler/mod.rs +++ /dev/null @@ -1,211 +0,0 @@ -pub mod iter; -pub mod weights; - -use crate::ast::{Proof, ProofCommand}; -use iter::ScheduleIter; -use std::{ - cmp::Ordering, - collections::{BinaryHeap, HashSet}, -}; -use weights::get_step_weight; - -/// Struct responsible for storing a thread work schedule. -/// -/// Here, each step from the original proof is represented as a tuple: -/// (depth, subproof index). The first element is the subproof nesting `depth` -/// (in the subproof stack) and `subproof index` is the index where this step is -/// located in the subproof vector. -#[derive(Clone, Default)] -pub struct Schedule { - steps: Vec<(usize, usize)>, -} - -impl Schedule { - pub fn new() -> Self { - Self::default() - } - - /// Inserts a new step into the end of the schedule steps vector - pub fn push(&mut self, cmd: (usize, usize)) { - self.steps.push(cmd); - } - - /// Removes the last step from the end of the steps vector - pub fn pop(&mut self) { - self.steps.pop(); - } - - /// Returns the last schedule step - pub fn last(&self) -> Option<&(usize, usize)> { - self.steps.last() - } - - /// Returns an iterator over the proof commands. See [`ScheduleIter`]. - pub fn iter<'a>(&'a self, proof: &'a [ProofCommand]) -> ScheduleIter { - ScheduleIter::new(proof, &self.steps) - } -} - -// ============================================================================= - -/// Represents the current load assigned for an specific schedule. -/// `0`: Current work load -/// `1`: Schedule index -#[derive(Eq)] -struct AssignedLoad(u64, usize); - -impl Ord for AssignedLoad { - fn cmp(&self, other: &Self) -> Ordering { - other.0.cmp(&self.0) - } -} - -impl PartialOrd for AssignedLoad { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for AssignedLoad { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } -} - -/// Represents a level in the proof stack. It holds the subproof itself, -/// its prerequisite step (anchor) and which schedules used any step inside -/// this layer -struct StackLevel<'a> { - id: usize, - cmds: &'a [ProofCommand], - pre_req: Option<(usize, usize)>, - used_by: HashSet, -} - -impl<'a> StackLevel<'a> { - pub fn new(id: usize, cmds: &'a [ProofCommand], pre_req: Option<(usize, usize)>) -> Self { - Self { - id, - cmds, - pre_req, - used_by: HashSet::new(), - } - } -} - -/// Struct that stores the schedules for each thread. -pub struct Scheduler { - pub loads: Vec, -} - -impl Scheduler { - /// Creates a thread scheduler for this proof using a specific number of - /// workers. This scheduler is responsible for balancing the load (the - /// proof steps have different costs to be checked) aiming for minimum - /// amount of async overhead. - /// - /// Returns a scheduler itself and context usage info (a vector holding - /// how many threads are going to use each of the contexts. This vector maps - /// the contexts based in the subproof hashing value (i.e. `subproof_id`) - /// created in the parser). - pub fn new(num_workers: usize, proof: &Proof) -> (Self, Vec) { - // Initializes the control and result variables - let cmds = &proof.commands; - let mut loads = vec![Schedule::new(); num_workers]; - let mut stack = vec![StackLevel::new(0, cmds, None)]; - let mut pq = BinaryHeap::::new(); - let mut context_usage = vec![]; - for i in 0..num_workers { - pq.push(AssignedLoad(0, i)); - } - - loop { - // Pop the finished subproofs - while !stack.is_empty() && { - let top = stack.last().unwrap(); - top.id == top.cmds.len() - } { - for schedule_id in &stack.last().unwrap().used_by { - let last = loads[*schedule_id].last().unwrap(); - // If it's an useless context insertion - if last.0 < stack.len() - && matches!(stack[last.0].cmds[last.1], ProofCommand::Subproof(_)) - { - // Make sure this context usage count is reduced - let subproof_id = match &stack[last.0].cmds[last.1] { - ProofCommand::Subproof(s) => s.context_id, - _ => unreachable!(), - }; - context_usage[subproof_id] -= 1; - - loads[*schedule_id].pop(); - } - // Creates a closing step for each schedule that used this subproof - else { - loads[*schedule_id].push((stack.len() - 1, usize::MAX)); - } - } - stack.pop(); - } - if stack.is_empty() { - break; - } - // - let AssignedLoad(mut load, load_index) = pq.pop().unwrap(); - { - let top = stack.last().unwrap(); - let step_weight = get_step_weight(&top.cmds[top.id]); - load = load - .checked_add(step_weight) - .expect("Weight balancing overflow!"); - pq.push(AssignedLoad(load, load_index)); - } - - let depth = stack.len() - 1; - let mut i = 1; - let initial_layer = { - let tmp = loads[load_index].last().unwrap_or(&(0, 0)); - if tmp.1 == usize::MAX { - tmp.0 - 1 - } else { - tmp.0 - } - }; - // If this step needs the context of the subproof oppening step - // but it was not assigned to this schedule yet - while initial_layer + i <= depth { - let subproof_oppening = stack[initial_layer + i].pre_req.unwrap(); - let last_inserted = *loads[load_index].last().unwrap_or(&(usize::MAX, 0)); - - if last_inserted != subproof_oppening { - loads[load_index].push(subproof_oppening); - stack[subproof_oppening.0].used_by.insert(load_index); - - // Now this subproof is used by another schedule - let subproof_id = match &stack[subproof_oppening.0].cmds[subproof_oppening.1] { - ProofCommand::Subproof(s) => s.context_id, - _ => unreachable!(), - }; - context_usage[subproof_id] += 1; - } - i += 1; - } - - let top = stack.last_mut().unwrap(); - // Assign a step to some Schedule - loads[load_index].push((depth, top.id)); - top.used_by.insert(load_index); - - // Go to next step - let last_id = top.id; - top.id += 1; - if let ProofCommand::Subproof(s) = &top.cmds[last_id] { - stack.push(StackLevel::new(0, &s.commands, Some((depth, last_id)))); - stack.last_mut().unwrap().used_by.insert(load_index); - // First schedule using this subproof - context_usage.push(1); - } - } - (Scheduler { loads }, context_usage) - } -} diff --git a/carcara/src/checker/parallel/scheduler/weights.rs b/carcara/src/checker/parallel/scheduler/weights.rs deleted file mode 100644 index fc9a6f0c..00000000 --- a/carcara/src/checker/parallel/scheduler/weights.rs +++ /dev/null @@ -1,130 +0,0 @@ -use crate::ast::ProofCommand; - -/// Function that returns a weight associated with a specific rule. These -/// weights are directly correlated to carcara (Single Thread/previous version) -/// median performance while solving each of those rules. -/// -/// Even though subproofs should have a weight (since it has a high cost to be -/// computed), it's for better of scheduler architecture that subproofs have a -/// null weight. -/// -/// If you're interested in these weight values, take a look at [Carcara's -/// paper](https://hanielbarbosa.com/papers/tacas2023.pdf) -/// published at TACAS in April 2023 and its benchmark data. -/// -/// The rules with null weight are rules that we had no info about the median -/// performance, since the solver used in the paper dataset does not generate -/// these rules. -pub fn get_step_weight(step: &ProofCommand) -> u64 { - match step { - ProofCommand::Assume { .. } => 230, - ProofCommand::Subproof(_) => 0, - ProofCommand::Step(s) => { - match &s.rule as &str { - "assume" => 230, - "true" => 0, //-1 - "false" => 263, - "not_not" => 574, - "and_pos" => 361, - "and_neg" => 607, - "or_pos" => 640, - "or_neg" => 460, - "xor_pos1" => 763, - "xor_pos2" => 345, - "xor_neg1" => 0, //-1 - "xor_neg2" => 0, //-1 - "implies_pos" => 394, - "implies_neg1" => 214, - "implies_neg2" => 287, - "equiv_pos1" => 763, - "equiv_pos2" => 541, - "equiv_neg1" => 434, - "equiv_neg2" => 476, - "ite_pos1" => 804, - "ite_pos2" => 344, - "ite_neg1" => 566, - "ite_neg2" => 542, - "eq_reflexive" => 451, - "eq_transitive" => 780, - "eq_congruent" => 722, - "eq_congruent_pred" => 632, - "distinct_elim" => 812, - "la_rw_eq" => 1091, - "la_generic" => 87564, - "la_disequality" => 919, - "la_totality" => 0, //-1 - "la_tautology" => 4291, - "forall_inst" => 7877, - "qnt_join" => 2347, - "qnt_rm_unused" => 3659, - "resolution" => 7491, - "th_resolution" => 2462, - "refl" => 1305, - "trans" => 575, - "cong" => 984, - "ho_cong" => 0, //-1 - "and" => 493, - "tautology" => 0, //-1 - "not_or" => 476, - "or" => 426, - "not_and" => 927, - "xor1" => 0, //-1 - "xor2" => 0, //-1 - "not_xor1" => 0, //-1 - "not_xor2" => 0, //-1 - "implies" => 788, - "not_implies1" => 402, - "not_implies2" => 484, - "equiv1" => 837, - "equiv2" => 812, - "not_equiv1" => 418, - "not_equiv2" => 451, - "ite1" => 509, - "ite2" => 493, - "not_ite1" => 722, - "not_ite2" => 476, - "ite_intro" => 3192, - "contraction" => 1731, - "connective_def" => 705, - "ite_simplify" => 1797, - "eq_simplify" => 845, - "and_simplify" => 1165, - "or_simplify" => 1133, - "not_simplify" => 787, - "implies_simplify" => 1231, - "equiv_simplify" => 1337, - "bool_simplify" => 1436, - "qnt_simplify" => 517, - "div_simplify" => 2117, - "prod_simplify" => 2527, - "unary_minus_simplify" => 0, //-1 - "minus_simplify" => 1059, - "sum_simplify" => 2248, - "comp_simplify" => 1781, - "nary_elim" => 0, //-1 - "ac_simp" => 9781, - "bfun_elim" => 8558, - "bind" => 5924, - "qnt_cnf" => 14244, - "subproof" => 262, - "let" => 4718, - "onepoint" => 7787, - "sko_ex" => 9321, - "sko_forall" => 12242, - "reordering" => 1452, - "symm" => 682, - "not_symm" => 0, //-1 - "eq_symmetric" => 673, - "or_intro" => 508, - "bind_let" => 2324, - "la_mult_pos" => 1446, - "la_mult_neg" => 1447, - "hole" => 185, //Debug only - "trust" => 185, //Debug only - "strict_resolution" => 1276, - - _ => 0, - } - } - } -} From 499d8b83abeb6ed0d2a79f1d5145fdf793414653 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Wed, 26 Jul 2023 17:05:26 -0300 Subject: [PATCH 36/70] Fix clippy version in GitHub Actions --- .github/workflows/ci.yml | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9825cfa2..1a387b77 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,34 +3,29 @@ name: CI on: [push, pull_request] jobs: - setup: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: install toolchain - run: rustup default 1.67 - - name: add components - run: rustup component add clippy && rustup component add rustfmt build: runs-on: ubuntu-latest - needs: setup steps: - uses: actions/checkout@v3 + - name: setup + run: rustup default 1.67 && rustup component add clippy - name: lint - run: cargo clippy --all-targets --all-features --tests --no-deps -- -D warnings + run: cargo clippy --version && cargo clippy --all-targets --all-features --tests --no-deps -- -D warnings - name: build - run: cargo build + run: cargo --version && cargo build test: runs-on: ubuntu-latest - needs: setup steps: - uses: actions/checkout@v3 + - name: setup + run: rustup default 1.67 - name: test - run: cargo test --release + run: cargo --version && cargo test --release format: runs-on: ubuntu-latest - needs: setup steps: - uses: actions/checkout@v3 + - name: setup + run: rustup default 1.67 && rustup component add rustfmt - name: check formatting - run: cargo fmt --check + run: cargo fmt --version && cargo fmt --check From e7ab869272797ef98a06bb075868227ff67ab217 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Wed, 26 Jul 2023 17:09:20 -0300 Subject: [PATCH 37/70] Dummy commit --- carcara/src/checker/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 6d4a1973..ee9a7d23 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -277,8 +277,8 @@ impl<'c> ProofChecker<'c> { self.elaborator = Some(Elaborator::new()); let result = self.check(&proof); - // We reset `self.elaborator` before returning any errors encountered while checking so we - // don't leave the checker in an invalid state + // We reset `self.elaborator` before returning any errors encountered while checking so + // we don't leave the checker in an invalid state let mut elaborator = self.elaborator.take().unwrap(); result?; From 7bdc192e65662e1ec8e83830ff3f9691f566d303 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Wed, 26 Jul 2023 17:18:50 -0300 Subject: [PATCH 38/70] Linting --- carcara/src/ast/pool/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index 1dd1e73c..08431aaa 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -77,7 +77,7 @@ impl PrimitivePool { sorts_cache.insert(bool_false.clone(), bool_sort.clone()); sorts_cache.insert(bool_true.clone(), bool_sort.clone()); - sorts_cache.insert(bool_sort.clone(), bool_sort.clone()); + sorts_cache.insert(bool_sort.clone(), bool_sort); Self { terms, From 4a010d9474ec6f504d26a7c77a3ffa3ed63ad490 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Thu, 27 Jul 2023 12:14:30 -0300 Subject: [PATCH 39/70] Fix broken `equiv_simplify` in polyequal elaboration --- carcara/src/elaborator/polyeq.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/carcara/src/elaborator/polyeq.rs b/carcara/src/elaborator/polyeq.rs index 114fe331..bc12f7e7 100644 --- a/carcara/src/elaborator/polyeq.rs +++ b/carcara/src/elaborator/polyeq.rs @@ -296,11 +296,20 @@ impl<'a> PolyeqElaborator<'a> { (&[a_left, a_right], &[b_right, b_left]), ); + // It might be the case that `x'` is syntactically equal to `y'`, which would mean that we + // are adding an `equiv_simplify` step to prove a reflexivity step. This is not valid + // according to the `equiv_simplify` specification, so we must change the rule to `refl` in + // this case. + let rule = if b == flipped_b { + "refl".to_owned() + } else { + "equiv_simplify".to_owned() + }; let id = self.inner.get_new_id(self.root_id); let equiv_step = self.inner.add_new_step(ProofStep { id, clause: vec![build_term!(pool, (= {flipped_b} {b.clone()}))], - rule: "equiv_simplify".to_owned(), + rule, premises: Vec::new(), args: Vec::new(), discharge: Vec::new(), From 3ce6c6bb22f9d5dfe5eddda392a513881b6a6e59 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Thu, 27 Jul 2023 12:38:04 -0300 Subject: [PATCH 40/70] Fix bug in pruning algorithm --- carcara/src/elaborator/pruning.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/carcara/src/elaborator/pruning.rs b/carcara/src/elaborator/pruning.rs index 0c7d81b7..018f072f 100644 --- a/carcara/src/elaborator/pruning.rs +++ b/carcara/src/elaborator/pruning.rs @@ -62,8 +62,8 @@ pub fn slice_proof( match &frame.commands[current] { ProofCommand::Assume { .. } => (), ProofCommand::Step(s) => { - for &(_, i) in &s.premises { - frame.queue.push_back((i, current_dist + 1)); + for &(depth, i) in &s.premises { + stack[depth].queue.push_back((i, current_dist + 1)); } } ProofCommand::Subproof(s) => { From a568ab3b6fdcc32dbbe8cb926b56b55081f16d61 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Fri, 28 Jul 2023 13:45:15 -0300 Subject: [PATCH 41/70] Remodelled parallel check functions, changed some lifetime specifiers and removed some functions calls --- carcara/src/ast/pool/mod.rs | 10 +- carcara/src/benchmarking/mod.rs | 9 - carcara/src/checker/parallel/mod.rs | 396 ++++++++-------------- carcara/src/checker/parallel/scheduler.rs | 2 +- cli/src/main.rs | 12 +- 5 files changed, 158 insertions(+), 271 deletions(-) diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index 08431aaa..e2f42948 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -101,11 +101,11 @@ impl PrimitivePool { } /// Computes the sort of a term and adds it to the sort cache. - pub(super) fn compute_sort<'a, 'b: 'a>(&'a mut self, term: &'b Rc) -> Rc { + fn compute_sort(&mut self, term: &Rc) -> Rc { use super::Operator; - if self.sorts_cache.contains_key(term) { - return self.sorts_cache[term].clone(); + if let Some(sort) = self.sorts_cache.get(term) { + return sort.clone(); } let result: Sort = match term.as_ref() { @@ -160,8 +160,7 @@ impl PrimitivePool { Term::Lambda(bindings, body) => { let mut result: Vec<_> = bindings.iter().map(|(_name, sort)| sort.clone()).collect(); - let return_sort = self.compute_sort(body).as_ref().clone(); - result.push(self.add(return_sort)); + result.push(self.compute_sort(body)); Sort::Function(result) } }; @@ -208,6 +207,7 @@ impl PrimitivePool { self.sorts_cache[term].clone() } + // TODO: Try to workaround the lifetime specifiers and return a ref pub fn free_vars_with_priorities( &mut self, term: &Rc, diff --git a/carcara/src/benchmarking/mod.rs b/carcara/src/benchmarking/mod.rs index f17d24fb..36453b57 100644 --- a/carcara/src/benchmarking/mod.rs +++ b/carcara/src/benchmarking/mod.rs @@ -353,7 +353,6 @@ impl CsvBenchmarkResults { } pub trait CollectResults { - fn new() -> Self; fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration); fn add_assume_measurement(&mut self, file: &str, id: &str, is_easy: bool, time: Duration); fn add_polyeq_depth(&mut self, depth: usize); @@ -367,10 +366,6 @@ pub trait CollectResults { } impl CollectResults for OnlineBenchmarkResults { - fn new() -> Self { - Default::default() - } - fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration) { let file = file.to_owned(); let rule = rule.to_owned(); @@ -465,10 +460,6 @@ impl CollectResults for OnlineBenchmarkResults { } impl CollectResults for CsvBenchmarkResults { - fn new() -> Self { - Default::default() - } - fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration) { let id = StepId { file: file.into(), diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 81d01bbe..91aeebd9 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -10,10 +10,10 @@ use crate::{ CarcaraResult, Error, }; use ahash::AHashSet; -pub use scheduler::{ScheduleIter, Scheduler}; +pub use scheduler::{Schedule, ScheduleIter, Scheduler}; use std::{ ops::ControlFlow, - sync::{Arc, RwLock}, + sync::{atomic::AtomicBool, Arc}, thread, time::{Duration, Instant}, }; @@ -59,10 +59,10 @@ impl<'c> ParallelProofChecker<'c> { } } - pub fn check<'s>(&'s mut self, proof: &Proof, scheduler: &'s Scheduler) -> CarcaraResult { + pub fn check(&mut self, proof: &Proof, scheduler: &Scheduler) -> CarcaraResult { // Used to estimulate threads to abort prematurely (only happens when a // thread already found out an invalid step) - let premature_abort = Arc::new(RwLock::new(false)); + let premature_abort = Arc::new(AtomicBool::new(false)); let context_pool = ContextPool::from_global(&self.pool); // thread::scope(|s| { @@ -73,119 +73,20 @@ impl<'c> ParallelProofChecker<'c> { .map(|(i, schedule)| { // Shares the self between threads let mut local_self = self.share(); - let mut local_pool = LocalPool::from_previous(&context_pool); + let local_pool = LocalPool::from_previous(&context_pool); let should_abort = premature_abort.clone(); thread::Builder::new() .name(format!("worker-{i}")) .stack_size(self.stack_size) .spawn_scoped(s, move || -> CarcaraResult<(bool, bool)> { - let mut iter = schedule.iter(&proof.commands[..]); - let mut last_depth = 0; - - while let Some(command) = iter.next() { - // If there is any depth difference between the current and last step - while (last_depth - iter.depth() as i64 > 0) - || (last_depth - iter.depth() as i64 == 0 - && matches!(command, ProofCommand::Subproof(_))) - { - // If this is the last command of a subproof, we have to pop off the subproof - // commands of the stack. The parser already ensures that the last command - // in a subproof is always a `step` command - local_self.context.pop(); - last_depth -= 1; - } - last_depth = iter.depth() as i64; - - match command { - ProofCommand::Step(step) => { - // If this step ends a subproof, it might need to implicitly reference the - // previous command in the subproof - let previous_command = if iter.is_end_step() { - let subproof = iter.current_subproof().unwrap(); - let index = subproof.len() - 2; - subproof.get(index).map(|command| { - Premise::new((iter.depth(), index), command) - }) - } else { - None - }; - - local_self - .check_step( - step, - previous_command, - &iter, - &mut local_pool, - None::< - &mut CheckerStatistics, - >, - ) - .map_err(|e| { - // Signalize to other threads to stop the proof checking - *should_abort.write().unwrap() = true; - Error::Checker { - inner: e, - rule: step.rule.clone(), - step: step.id.clone(), - } - })?; - - if step.clause.is_empty() { - local_self.reached_empty_clause = true; - } - } - ProofCommand::Subproof(s) => { - let step_id = command.id(); - - local_self - .context - .push( - &mut local_pool, - &s.assignment_args, - &s.variable_args, - ) - .map_err(|e| { - // Signalize to other threads to stop the proof checking - *should_abort.write().unwrap() = true; - Error::Checker { - inner: e.into(), - rule: "anchor".into(), - step: step_id.to_owned(), - } - })?; - } - ProofCommand::Assume { id, term } => { - if !local_self.check_assume( - id, - term, - &proof.premises, - &iter, - None::<&mut CheckerStatistics>, - ) { - // Signalize to other threads to stop the proof checking - *should_abort.write().unwrap() = true; - return Err(Error::Checker { - inner: CheckerError::Assume(term.clone()), - rule: "assume".into(), - step: id.clone(), - }); - } - } - } - // Verify if any of the other threads found an error and abort in case of positive - if *should_abort.read().unwrap() { - break; - } - } - - // Returns Ok(reached empty clause, isHoley, current thread statistics) - if local_self.config.is_running_test || local_self.reached_empty_clause - { - Ok((true, local_self.is_holey)) - } else { - Ok((false, local_self.is_holey)) - } + local_self.worker_thread_check( + proof, + schedule, + local_pool, + should_abort, + None::<&mut CheckerStatistics>, + ) }) .unwrap() }) @@ -224,15 +125,15 @@ impl<'c> ParallelProofChecker<'c> { }) } - pub fn check_with_stats<'s, CR: CollectResults + Send + Default>( - &'s mut self, + pub fn check_with_stats( + &mut self, proof: &Proof, - scheduler: &'s Scheduler, - stats: &'s mut CheckerStatistics, + scheduler: &Scheduler, + stats: &mut CheckerStatistics, ) -> CarcaraResult { // Used to estimulate threads to abort prematurely (only happens when a // thread already found out an invalid step) - let premature_abort = Arc::new(RwLock::new(false)); + let premature_abort = Arc::new(AtomicBool::new(false)); let context_pool = ContextPool::from_global(&self.pool); // thread::scope(|s| { @@ -247,11 +148,11 @@ impl<'c> ParallelProofChecker<'c> { polyeq_time: Duration::ZERO, assume_time: Duration::ZERO, assume_core_time: Duration::ZERO, - results: CR::new(), + results: CR::default(), }; // Shares the proof checker between threads let mut local_self = self.share(); - let mut local_pool = LocalPool::from_previous(&context_pool); + let local_pool = LocalPool::from_previous(&context_pool); let should_abort = premature_abort.clone(); thread::Builder::new() @@ -260,127 +161,14 @@ impl<'c> ParallelProofChecker<'c> { .spawn_scoped( s, move || -> CarcaraResult<(bool, bool, CheckerStatistics)> { - let mut iter = schedule.iter(&proof.commands[..]); - let mut last_depth = 0; - - while let Some(command) = iter.next() { - // If there is any depth difference between the current and last step - while (last_depth - iter.depth() as i64 > 0) - || (last_depth - iter.depth() as i64 == 0 - && matches!(command, ProofCommand::Subproof(_))) - { - // If this is the last command of a subproof, we have to pop off the subproof - // commands of the stack. The parser already ensures that the last command - // in a subproof is always a `step` command - local_self.context.pop(); - last_depth -= 1; - } - last_depth = iter.depth() as i64; - - match command { - ProofCommand::Step(step) => { - // If this step ends a subproof, it might need to implicitly reference the - // previous command in the subproof - let previous_command = if iter.is_end_step() { - let subproof = iter.current_subproof().unwrap(); - let index = subproof.len() - 2; - subproof.get(index).map(|command| { - Premise::new((iter.depth(), index), command) - }) - } else { - None - }; - - local_self - .check_step( - step, - previous_command, - &iter, - &mut local_pool, - Some(&mut local_stats), - ) - .map_err(|e| { - // Signalize to other threads to stop the proof checking - *should_abort.write().unwrap() = true; - Error::Checker { - inner: e, - rule: step.rule.clone(), - step: step.id.clone(), - } - })?; - - if step.clause.is_empty() { - local_self.reached_empty_clause = true; - } - } - ProofCommand::Subproof(s) => { - let time = Instant::now(); - let step_id = command.id(); - - local_self - .context - .push( - &mut local_pool, - &s.assignment_args, - &s.variable_args, - ) - .map_err(|e| { - // Signalize to other threads to stop the proof checking - *should_abort.write().unwrap() = true; - Error::Checker { - inner: e.into(), - rule: "anchor".into(), - step: step_id.to_owned(), - } - })?; - - // Collects statistics - let rule_name = match s.commands.last() { - Some(ProofCommand::Step(step)) => { - format!("anchor({})", &step.rule) - } - _ => "anchor".to_owned(), - }; - - local_stats.results.add_step_measurement( - local_stats.file_name, - step_id, - &rule_name, - time.elapsed(), - ); - } - ProofCommand::Assume { id, term } => { - if !local_self.check_assume( - id, - term, - &proof.premises, - &iter, - Some(&mut local_stats), - ) { - // Signalize to other threads to stop the proof checking - *should_abort.write().unwrap() = true; - return Err(Error::Checker { - inner: CheckerError::Assume(term.clone()), - rule: "assume".into(), - step: id.clone(), - }); - } - } - } - // Verify if any of the other threads found an error and abort in case of positive - if *should_abort.read().unwrap() { - break; - } - } - - // Returns Ok(reached empty clause, isHoley, current thread statistics) - if local_self.config.is_running_test - || local_self.reached_empty_clause - { - Ok((true, local_self.is_holey, local_stats)) - } else { - Ok((false, local_self.is_holey, local_stats)) - } + let res = local_self.worker_thread_check( + proof, + schedule, + local_pool, + should_abort, + Some(&mut local_stats), + ); + res.and_then(|r| Ok((r.0, r.1, local_stats))) }, ) .unwrap() @@ -434,13 +222,127 @@ impl<'c> ParallelProofChecker<'c> { }) } - fn check_assume( + fn worker_thread_check( + &mut self, + proof: &Proof, + schedule: &Schedule, + mut pool: LocalPool, + should_abort: Arc, + mut stats: Option<&mut CheckerStatistics>, + ) -> CarcaraResult<(bool, bool)> { + use std::sync::atomic::Ordering; + + let mut iter = schedule.iter(&proof.commands[..]); + let mut last_depth = 0; + + while let Some(command) = iter.next() { + // If there is any depth difference between the current and last step + while (last_depth - iter.depth() as i64 > 0) + || (last_depth - iter.depth() as i64 == 0 + && matches!(command, ProofCommand::Subproof(_))) + { + // If this is the last command of a subproof, we have to pop off the subproof + // commands of the stack. The parser already ensures that the last command + // in a subproof is always a `step` command + self.context.pop(); + last_depth -= 1; + } + last_depth = iter.depth() as i64; + + match command { + ProofCommand::Step(step) => { + // If this step ends a subproof, it might need to implicitly reference the + // previous command in the subproof + let previous_command = if iter.is_end_step() { + let subproof = iter.current_subproof().unwrap(); + let index = subproof.len() - 2; + subproof + .get(index) + .map(|command| Premise::new((iter.depth(), index), command)) + } else { + None + }; + + self.check_step(step, previous_command, &iter, &mut pool, &mut stats) + .map_err(|e| { + // Signalize to other threads to stop the proof checking + should_abort.store(true, Ordering::Release); + Error::Checker { + inner: e, + rule: step.rule.clone(), + step: step.id.clone(), + } + })?; + + if step.clause.is_empty() { + self.reached_empty_clause = true; + } + } + ProofCommand::Subproof(s) => { + let time = Instant::now(); + let step_id = command.id(); + + self.context + .push(&mut pool, &s.assignment_args, &s.variable_args) + .map_err(|e| { + // Signalize to other threads to stop the proof checking + should_abort.store(true, Ordering::Release); + Error::Checker { + inner: e.into(), + rule: "anchor".into(), + step: step_id.to_owned(), + } + })?; + + if let Some(stats) = &mut stats { + // Collects statistics + let rule_name = match s.commands.last() { + Some(ProofCommand::Step(step)) => { + format!("anchor({})", &step.rule) + } + _ => "anchor".to_owned(), + }; + stats.results.add_step_measurement( + stats.file_name, + step_id, + &rule_name, + time.elapsed(), + ); + } + } + ProofCommand::Assume { id, term } => { + if !self.check_assume(id, term, &proof.premises, &iter, &mut stats) { + // Signalize to other threads to stop the proof checking + should_abort.store(true, Ordering::Release); + return Err(Error::Checker { + inner: CheckerError::Assume(term.clone()), + rule: "assume".into(), + step: id.clone(), + }); + } + } + } + // Verify if any of the other threads found an error and abort in case of positive + if should_abort.load(Ordering::Acquire) { + break; + } + } + + // Returns Ok(reached empty clause, isHoley, current thread statistics) + if self.config.is_running_test || self.reached_empty_clause { + Ok((true, self.is_holey)) + } else { + Ok((false, self.is_holey)) + } + } + + fn check_assume<'a, 'i, CR: CollectResults + Send + Default>( &mut self, id: &str, term: &Rc, premises: &AHashSet>, - iter: &ScheduleIter, - mut stats: Option<&mut CheckerStatistics>, + iter: &'i ScheduleIter<'i>, + mut stats: &mut Option<&'a mut CheckerStatistics>, ) -> bool { let time = Instant::now(); @@ -501,13 +403,13 @@ impl<'c> ParallelProofChecker<'c> { true } - fn check_step<'a, CR: CollectResults + Send + Default>( + fn check_step<'a, 'i, CR: CollectResults + Send + Default>( &mut self, - step: &'a ProofStep, - previous_command: Option>, - iter: &'a ScheduleIter<'a>, + step: &ProofStep, + previous_command: Option, + iter: &'i ScheduleIter<'i>, pool: &mut LocalPool, - stats: Option<&'a mut CheckerStatistics>, + stats: &mut Option<&'a mut CheckerStatistics>, ) -> RuleResult { let time = Instant::now(); let mut polyeq_time = Duration::ZERO; diff --git a/carcara/src/checker/parallel/scheduler.rs b/carcara/src/checker/parallel/scheduler.rs index b5988033..e228c46e 100644 --- a/carcara/src/checker/parallel/scheduler.rs +++ b/carcara/src/checker/parallel/scheduler.rs @@ -235,7 +235,7 @@ impl<'a> ScheduleIter<'a> { } /// Returns a slice to the commands of the inner-most open subproof. - pub fn current_subproof(&self) -> Option<&[ProofCommand]> { + pub fn current_subproof<'s: 'a>(&'s self) -> Option<&[ProofCommand]> { self.is_in_subproof() .then(|| *self.proof_stack.last().unwrap()) } diff --git a/cli/src/main.rs b/cli/src/main.rs index 32fca734..d9c3e38f 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -343,9 +343,8 @@ fn get_instance(options: &Input) -> CliResult<(Box, Box CliResult<()> { - use carcara::ast::{PrimitivePool, ProblemPrelude, Proof}; let (problem, proof) = get_instance(&options.input)?; - let (_, proof, _): (ProblemPrelude, Proof, PrimitivePool) = parser::parse_instance( + let (_, proof, _) = parser::parse_instance( problem, proof, options.parsing.apply_function_defs, @@ -436,18 +435,13 @@ fn bench_command(options: BenchCommandOptions) -> CliResult<()> { } else { println!("valid"); } - print_benchmark_results(results, options.sort_by_total) -} - -fn print_benchmark_results(results: OnlineBenchmarkResults, sort_by_total: bool) -> CliResult<()> { - results.print(sort_by_total); + results.print(options.sort_by_total); Ok(()) } fn slice_command(options: SliceCommandOption) -> CliResult<()> { - use carcara::ast::{PrimitivePool, ProblemPrelude, Proof}; let (problem, proof) = get_instance(&options.input)?; - let (_, proof, _): (ProblemPrelude, Proof, PrimitivePool) = parser::parse_instance( + let (_, proof, _) = parser::parse_instance( problem, proof, options.parsing.apply_function_defs, From bb14caa05b97f02e6899c32abf7aa45775983f18 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Fri, 28 Jul 2023 14:23:58 -0300 Subject: [PATCH 42/70] Removed duplicated code from `ProofChecker` and some useless lifetime specifiers --- carcara/src/checker/mod.rs | 136 +++++++--------------------- carcara/src/checker/parallel/mod.rs | 29 +++--- 2 files changed, 49 insertions(+), 116 deletions(-) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index ee9a7d23..cd2acf4d 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -98,94 +98,24 @@ impl<'c> ProofChecker<'c> { } pub fn check(&mut self, proof: &Proof) -> CarcaraResult { - // Similarly to the parser, to avoid stack overflows in proofs with many nested subproofs, - // we check the subproofs iteratively, instead of recursively - let mut iter = proof.iter(); - while let Some(command) = iter.next() { - match command { - ProofCommand::Step(step) => { - let is_end_of_subproof = iter.is_end_step(); - - // If this step ends a subproof, it might need to implicitly reference the - // previous command in the subproof - let previous_command = if is_end_of_subproof { - let subproof = iter.current_subproof().unwrap(); - let index = subproof.len() - 2; - subproof - .get(index) - .map(|command| Premise::new((iter.depth(), index), command)) - } else { - None - }; - self.check_step( - step, - previous_command, - &iter, - None::<&mut CheckerStatistics>, - ) - .map_err(|e| Error::Checker { - inner: e, - rule: step.rule.clone(), - step: step.id.clone(), - })?; - - // If this is the last command of a subproof, we have to pop the subproof - // commands off of the stack. The parser already ensures that the last command - // in a subproof is always a `step` command - if is_end_of_subproof { - self.context.pop(); - if let Some(elaborator) = &mut self.elaborator { - elaborator.close_subproof(); - } - } - - if step.clause.is_empty() { - self.reached_empty_clause = true; - } - } - ProofCommand::Subproof(s) => { - let step_id = command.id(); - - self.context - .push(self.pool, &s.assignment_args, &s.variable_args) - .map_err(|e| Error::Checker { - inner: e.into(), - rule: "anchor".into(), - step: step_id.to_owned(), - })?; - - if let Some(elaborator) = &mut self.elaborator { - elaborator.open_subproof(s.commands.len()); - } - } - ProofCommand::Assume { id, term } => { - if !self.check_assume( - id, - term, - &proof.premises, - &iter, - None::<&mut CheckerStatistics>, - ) { - return Err(Error::Checker { - inner: CheckerError::Assume(term.clone()), - rule: "assume".into(), - step: id.clone(), - }); - } - } - } - } - if self.config.is_running_test || self.reached_empty_clause { - Ok(self.is_holey) - } else { - Err(Error::DoesNotReachEmptyClause) - } + self.check_impl( + proof, + None::<&mut CheckerStatistics>, + ) } pub fn check_with_stats<'s, CR: CollectResults + Send + Default>( &'s mut self, proof: &Proof, stats: &'s mut CheckerStatistics, + ) -> CarcaraResult { + self.check_impl(proof, Some(stats)) + } + + fn check_impl( + &mut self, + proof: &Proof, + mut stats: Option<&mut CheckerStatistics>, ) -> CarcaraResult { // Similarly to the parser, to avoid stack overflows in proofs with many nested subproofs, // we check the subproofs iteratively, instead of recursively @@ -206,7 +136,7 @@ impl<'c> ProofChecker<'c> { } else { None }; - self.check_step(step, previous_command, &iter, Some(stats)) + self.check_step(step, previous_command, &iter, &mut stats) .map_err(|e| Error::Checker { inner: e, rule: step.rule.clone(), @@ -244,19 +174,21 @@ impl<'c> ProofChecker<'c> { elaborator.open_subproof(s.commands.len()); } - let rule_name = match s.commands.last() { - Some(ProofCommand::Step(step)) => format!("anchor({})", &step.rule), - _ => "anchor".to_owned(), - }; - stats.results.add_step_measurement( - stats.file_name, - step_id, - &rule_name, - time.elapsed(), - ); + if let Some(stats) = &mut stats { + let rule_name = match s.commands.last() { + Some(ProofCommand::Step(step)) => format!("anchor({})", &step.rule), + _ => "anchor".to_owned(), + }; + stats.results.add_step_measurement( + stats.file_name, + step_id, + &rule_name, + time.elapsed(), + ); + } } ProofCommand::Assume { id, term } => { - if !self.check_assume(id, term, &proof.premises, &iter, Some(stats)) { + if !self.check_assume(id, term, &proof.premises, &iter, &mut stats) { return Err(Error::Checker { inner: CheckerError::Assume(term.clone()), rule: "assume".into(), @@ -307,13 +239,13 @@ impl<'c> ProofChecker<'c> { Ok((self.is_holey, proof)) } - fn check_assume( + fn check_assume<'i, CR: CollectResults + Send + Default>( &mut self, id: &str, term: &Rc, premises: &AHashSet>, - iter: &ProofIter, - mut stats: Option<&mut CheckerStatistics>, + iter: &'i ProofIter<'i>, + mut stats: &mut Option<&mut CheckerStatistics>, ) -> bool { let time = Instant::now(); @@ -390,12 +322,12 @@ impl<'c> ProofChecker<'c> { true } - fn check_step<'a, CR: CollectResults + Send + Default>( + fn check_step<'a, 'i, CR: CollectResults + Send + Default>( &mut self, - step: &'a ProofStep, - previous_command: Option>, - iter: &'a ProofIter<'a>, - stats: Option<&'a mut CheckerStatistics>, + step: &ProofStep, + previous_command: Option, + iter: &'i ProofIter<'i>, + stats: &mut Option<&'a mut CheckerStatistics>, ) -> RuleResult { let time = Instant::now(); let mut polyeq_time = Duration::ZERO; diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 91aeebd9..debce8a5 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -102,9 +102,9 @@ impl<'c> ParallelProofChecker<'c> { .map(|t| t.join().unwrap()) .try_for_each(|opt| { match opt { - Ok((_reached, _holey)) => { + Ok((local_reached, local_holey)) => { // Mask the result booleans - (reached, holey) = (reached | _reached, holey | _holey); + (reached, holey) = (reached | local_reached, holey | local_holey); ControlFlow::Continue(()) } Err(e) => { @@ -161,14 +161,15 @@ impl<'c> ParallelProofChecker<'c> { .spawn_scoped( s, move || -> CarcaraResult<(bool, bool, CheckerStatistics)> { - let res = local_self.worker_thread_check( - proof, - schedule, - local_pool, - should_abort, - Some(&mut local_stats), - ); - res.and_then(|r| Ok((r.0, r.1, local_stats))) + local_self + .worker_thread_check( + proof, + schedule, + local_pool, + should_abort, + Some(&mut local_stats), + ) + .map(|r| (r.0, r.1, local_stats)) }, ) .unwrap() @@ -185,7 +186,7 @@ impl<'c> ParallelProofChecker<'c> { .map(|t| t.join().unwrap()) .for_each(|opt| { match opt { - Ok((_reached, _holey, mut local_stats)) => { + Ok((local_reached, local_holey, mut local_stats)) => { // Combine the statistics // Takes the external and local benchmark results to local variables and combine them let main = std::mem::take(&mut stats.results); @@ -199,7 +200,7 @@ impl<'c> ParallelProofChecker<'c> { stats.assume_core_time += local_stats.assume_core_time; // Mask the result booleans - (reached, holey) = (reached | _reached, holey | _holey); + (reached, holey) = (reached | local_reached, holey | local_holey); } Err(e) => { // Since we want the statistics of the whole run @@ -336,13 +337,13 @@ impl<'c> ParallelProofChecker<'c> { } } - fn check_assume<'a, 'i, CR: CollectResults + Send + Default>( + fn check_assume<'i, CR: CollectResults + Send + Default>( &mut self, id: &str, term: &Rc, premises: &AHashSet>, iter: &'i ScheduleIter<'i>, - mut stats: &mut Option<&'a mut CheckerStatistics>, + mut stats: &mut Option<&mut CheckerStatistics>, ) -> bool { let time = Instant::now(); From d651c6ffeca958a007d31b10ed7248c8994ff2a5 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Fri, 28 Jul 2023 14:32:45 -0300 Subject: [PATCH 43/70] Lint --- carcara/src/checker/mod.rs | 4 ++-- carcara/src/checker/parallel/mod.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index cd2acf4d..f2969ea8 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -322,12 +322,12 @@ impl<'c> ProofChecker<'c> { true } - fn check_step<'a, 'i, CR: CollectResults + Send + Default>( + fn check_step<'i, CR: CollectResults + Send + Default>( &mut self, step: &ProofStep, previous_command: Option, iter: &'i ProofIter<'i>, - stats: &mut Option<&'a mut CheckerStatistics>, + stats: &mut Option<&mut CheckerStatistics>, ) -> RuleResult { let time = Instant::now(); let mut polyeq_time = Duration::ZERO; diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index debce8a5..10540c03 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -404,13 +404,13 @@ impl<'c> ParallelProofChecker<'c> { true } - fn check_step<'a, 'i, CR: CollectResults + Send + Default>( + fn check_step<'i, CR: CollectResults + Send + Default>( &mut self, step: &ProofStep, previous_command: Option, iter: &'i ScheduleIter<'i>, pool: &mut LocalPool, - stats: &mut Option<&'a mut CheckerStatistics>, + stats: &mut Option<&mut CheckerStatistics>, ) -> RuleResult { let time = Instant::now(); let mut polyeq_time = Duration::ZERO; From 3067e548b269b670d24b7f841d195d48f9ec0d5f Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Sun, 30 Jul 2023 12:20:20 -0300 Subject: [PATCH 44/70] Removing some useless lifetime specifiers and comments --- carcara/src/checker/mod.rs | 6 +++--- carcara/src/checker/parallel/mod.rs | 10 +++++----- carcara/src/checker/parallel/scheduler.rs | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index f2969ea8..ef3321e9 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -104,10 +104,10 @@ impl<'c> ProofChecker<'c> { ) } - pub fn check_with_stats<'s, CR: CollectResults + Send + Default>( - &'s mut self, + pub fn check_with_stats( + &mut self, proof: &Proof, - stats: &'s mut CheckerStatistics, + stats: &mut CheckerStatistics, ) -> CarcaraResult { self.check_impl(proof, Some(stats)) } diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 10540c03..f698f785 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -329,7 +329,7 @@ impl<'c> ParallelProofChecker<'c> { } } - // Returns Ok(reached empty clause, isHoley, current thread statistics) + // Returns Ok(reached empty clause, isHoley) if self.config.is_running_test || self.reached_empty_clause { Ok((true, self.is_holey)) } else { @@ -337,12 +337,12 @@ impl<'c> ParallelProofChecker<'c> { } } - fn check_assume<'i, CR: CollectResults + Send + Default>( + fn check_assume( &mut self, id: &str, term: &Rc, premises: &AHashSet>, - iter: &'i ScheduleIter<'i>, + iter: &ScheduleIter, mut stats: &mut Option<&mut CheckerStatistics>, ) -> bool { let time = Instant::now(); @@ -404,11 +404,11 @@ impl<'c> ParallelProofChecker<'c> { true } - fn check_step<'i, CR: CollectResults + Send + Default>( + fn check_step( &mut self, step: &ProofStep, previous_command: Option, - iter: &'i ScheduleIter<'i>, + iter: &ScheduleIter, pool: &mut LocalPool, stats: &mut Option<&mut CheckerStatistics>, ) -> RuleResult { diff --git a/carcara/src/checker/parallel/scheduler.rs b/carcara/src/checker/parallel/scheduler.rs index e228c46e..b5988033 100644 --- a/carcara/src/checker/parallel/scheduler.rs +++ b/carcara/src/checker/parallel/scheduler.rs @@ -235,7 +235,7 @@ impl<'a> ScheduleIter<'a> { } /// Returns a slice to the commands of the inner-most open subproof. - pub fn current_subproof<'s: 'a>(&'s self) -> Option<&[ProofCommand]> { + pub fn current_subproof(&self) -> Option<&[ProofCommand]> { self.is_in_subproof() .then(|| *self.proof_stack.last().unwrap()) } From 4fc7a382071d380995670132e2576e625df51841 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Sun, 30 Jul 2023 17:09:53 -0300 Subject: [PATCH 45/70] First context commit --- carcara/src/ast/context.rs | 231 ++++++++++++++++++++------ carcara/src/checker/mod.rs | 8 +- carcara/src/checker/parallel/mod.rs | 12 +- carcara/src/checker/rules/subproof.rs | 41 +++-- carcara/src/elaborator/polyeq.rs | 4 +- carcara/src/lib.rs | 11 +- 6 files changed, 239 insertions(+), 68 deletions(-) diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index be79c6ea..ee289f5f 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -1,5 +1,5 @@ use crate::ast::*; -use ahash::AHashSet; +use std::sync::{atomic::AtomicUsize, Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}; pub struct Context { pub mappings: Vec<(Rc, Rc)>, @@ -7,9 +7,25 @@ pub struct Context { pub cumulative_substitution: Option, } +/// A tuple that will represent a single `Context` and allows a `Context` to be shared between threads. +/// +/// `0`: Number of threads that will use this context. +/// +/// `1`: Shareable and droppable slot for the context. +type ContextInfo = (AtomicUsize, RwLock>); + #[derive(Default)] +/// Struct that implements a thread-shared context stack. That way, this stack +/// tries to use an already existing global `Context` (built by another thread). +/// If it was still not built, then the current thread is going to build this +/// context so other threads can also use it. pub struct ContextStack { - stack: Vec, + /// The context vector that is shared globally between all the threads. + /// The contexts storage is index based (which the index of each context is + /// defined by the anchor/subproof id obtained in the parser). + context_vec: Arc>, + /// The stack of contexts id (works just like a map to `context_vec`). + stack: Vec, num_cumulative_calculated: usize, } @@ -18,6 +34,33 @@ impl ContextStack { Default::default() } + /// Creates an empty stack from contexts usage info (a vector indicating how + /// many threads are going to use each context). + pub fn from_usage(context_usage: &Vec) -> Self { + let mut context_vec: Arc> = Arc::new(vec![]); + let ctx_ref = Arc::get_mut(&mut context_vec).unwrap(); + + for &usage in context_usage { + ctx_ref.push((AtomicUsize::new(usage), RwLock::new(None))); + } + + Self { + context_vec, + stack: vec![], + num_cumulative_calculated: 0, + } + } + + /// Creates an empty stack from a previous stack (starts with context infos + /// already instantiated). + pub fn from_previous(&self) -> Self { + Self { + context_vec: self.context_vec.clone(), + stack: vec![], + num_cumulative_calculated: 0, + } + } + pub fn len(&self) -> usize { self.stack.len() } @@ -26,69 +69,137 @@ impl ContextStack { self.len() == 0 } - pub fn last(&self) -> Option<&Context> { - self.stack.last() + pub fn last(&self) -> Option>> { + self.stack + .last() + .and_then(|id| Some(self.context_vec[*id].1.read().unwrap())) } - pub fn last_mut(&mut self) -> Option<&mut Context> { - self.stack.last_mut() + pub fn last_mut(&mut self) -> Option>> { + self.stack + .last_mut() + .and_then(|id| Some(self.context_vec[*id].1.write().unwrap())) } + // TODO: Add pre push function for single thread tasks + pub fn push( &mut self, pool: &mut dyn TermPool, assignment_args: &[(String, Rc)], variable_args: &[SortedVar], + context_id: usize, ) -> Result<(), SubstitutionError> { - // Since some rules (like `refl`) need to apply substitutions until a fixed point, we - // precompute these substitutions into a separate hash map. This assumes that the assignment - // arguments are in the correct order. - let mut substitution = Substitution::empty(); - let mut substitution_until_fixed_point = Substitution::empty(); - - // We build the `substitution_until_fixed_point` hash map from the bottom up, by using the - // substitutions already introduced to transform the result of a new substitution before - // inserting it into the hash map. So for instance, if the substitutions are `(:= y z)` and - // `(:= x (f y))`, we insert the first substitution, and then, when introducing the second, - // we use the current state of the hash map to transform `(f y)` into `(f z)`. The - // resulting hash map will then contain `(:= y z)` and `(:= x (f z))` - for (var, value) in assignment_args.iter() { - let var_term = Term::new_var(var, pool.sort(value)); - let var_term = pool.add(var_term); - substitution.insert(pool, var_term.clone(), value.clone())?; - let new_value = substitution_until_fixed_point.apply(pool, value); - substitution_until_fixed_point.insert(pool, var_term, new_value)?; - } + let ctx_building_status = self.context_vec[context_id].1.try_write(); + match ctx_building_status { + // The write guard was yielded to this thread + Ok(mut ctx_write_guard) => { + match ctx_write_guard.as_mut() { + // Since the context already exists, just use it + Some(_) => { + drop(ctx_write_guard); + } + // It's the first thread trying to build this context. It will + // build this context at the context vec (accessible for all threads) + None => { + // Since some rules (like `refl`) need to apply substitutions until a fixed point, we + // precompute these substitutions into a separate hash map. This assumes that the assignment + // arguments are in the correct order. + let mut substitution = Substitution::empty(); + let mut substitution_until_fixed_point = Substitution::empty(); + + // We build the `substitution_until_fixed_point` hash map from the bottom up, by using the + // substitutions already introduced to transform the result of a new substitution before + // inserting it into the hash map. So for instance, if the substitutions are `(:= y z)` and + // `(:= x (f y))`, we insert the first substitution, and then, when introducing the second, + // we use the current state of the hash map to transform `(f y)` into `(f z)`. The + // resulting hash map will then contain `(:= y z)` and `(:= x (f z))` + for (var, value) in assignment_args.iter() { + let var_term = Term::new_var(var, pool.sort(value)); + let var_term = pool.add(var_term); + substitution.insert(pool, var_term.clone(), value.clone())?; + let new_value = substitution_until_fixed_point.apply(pool, value); + substitution_until_fixed_point.insert(pool, var_term, new_value)?; + } - let mappings = assignment_args - .iter() - .map(|(var, value)| { - let var_term = (var.clone(), pool.sort(value)).into(); - (pool.add(var_term), value.clone()) - }) - .collect(); - let bindings = variable_args.iter().cloned().collect(); - self.stack.push(Context { - mappings, - bindings, - cumulative_substitution: None, - }); + let mappings = assignment_args + .iter() + .map(|(var, value)| { + let var_term = (var.clone(), pool.sort(value)).into(); + (pool.add(var_term), value.clone()) + }) + .collect(); + let bindings = variable_args.iter().cloned().collect(); + // Finally creates the new context under this RwLock + *ctx_write_guard = Some(Context { + mappings, + bindings, + cumulative_substitution: None, + }); + } + } + } + // A thread is currently building the context + Err(_) => {} + } + // Adds this context in the stack + // Notice that even though the context is not ready for use, the write + // guard is still being held by some thread, then if this context is + // required at any moment, then we are assured it will wait until the + // fully context construction + self.stack.push(context_id); Ok(()) } pub fn pop(&mut self) { - self.stack.pop(); + use std::sync::atomic::Ordering; + + if let Some(id) = self.stack.pop() { + let this_context = &self.context_vec[id]; + + let mut remaining_threads = this_context.0.load(Ordering::Acquire); + remaining_threads = remaining_threads + .checked_sub(1) + .expect("A thread tried to access a context not allocated for it."); + + if remaining_threads == 0 { + // Drop this context since the last thread stopped using it + *this_context.1.write().unwrap() = None; + } + this_context.0.store(remaining_threads, Ordering::Release); + } + self.num_cumulative_calculated = std::cmp::min(self.num_cumulative_calculated, self.stack.len()); } fn catch_up_cumulative(&mut self, pool: &mut dyn TermPool, up_to: usize) { for i in self.num_cumulative_calculated..std::cmp::max(up_to + 1, self.len()) { - let simultaneous = build_simultaneous_substitution(pool, &self.stack[i].mappings).map; + // Requires read guard. Since the i-th context will be mutated far + // below this line, we first take the read guard here and then, when + // necessary, we require the write guard. This tries to avoid bigger + // overheads + let context_guard = self.context_vec[self.stack[i]].1.read().unwrap(); + let curr_context = context_guard.as_ref().unwrap(); + + let simultaneous = build_simultaneous_substitution(pool, &curr_context.mappings).map; let mut cumulative_substitution = simultaneous.clone(); if i > 0 { - if let Some(previous_context) = self.stack.get(i - 1) { + // Waits until OS allows to read this previous context. The code structure + // makes sure that this context, when released for reading, will be already + // instantiated since there are only 2 cases: + // - This thread was responsible for building this previous context. Then + // this context has already been built. + // - Another thread was assigned to build this context. Then, it doesn't + // matter if this other thread has already finished the process, the + // current thread will have to wait until the guard is released. + if let Some(previous_context) = self + .stack + .get(i - 1) + .and_then(|id| Some(self.context_vec[*id].1.read().unwrap())) + { + let previous_context = previous_context.as_ref().unwrap(); let previous_substitution = previous_context.cumulative_substitution.as_ref().unwrap(); @@ -101,23 +212,33 @@ impl ContextStack { } } } - self.stack[i].cumulative_substitution = + drop(context_guard); + + // Waits until the OS allows to mutate at this context + // TODO: Does it really needs to require a write guard here instead of up there + let mut context_guard = self.context_vec[self.stack[i]].1.write().unwrap(); + let mut curr_context = context_guard.as_mut().unwrap(); + curr_context.cumulative_substitution = Some(Substitution::new(pool, cumulative_substitution).unwrap()); self.num_cumulative_calculated = i + 1; } } - fn get_substitution(&mut self, pool: &mut dyn TermPool, index: usize) -> &mut Substitution { - assert!(index < self.len()); - self.catch_up_cumulative(pool, index); - self.stack[index].cumulative_substitution.as_mut().unwrap() - } - pub fn apply_previous(&mut self, pool: &mut dyn TermPool, term: &Rc) -> Rc { if self.len() < 2 { term.clone() } else { - self.get_substitution(pool, self.len() - 2) + let index = self.len() - 2; + self.catch_up_cumulative(pool, index); + self.context_vec[self.stack[index]] + .1 + .write() + .unwrap() + .as_mut() + .unwrap() + .cumulative_substitution + .as_mut() + .unwrap() .apply(pool, term) } } @@ -126,7 +247,17 @@ impl ContextStack { if self.is_empty() { term.clone() } else { - self.get_substitution(pool, self.len() - 1) + let index = self.len() - 1; + self.catch_up_cumulative(pool, index); + self.context_vec[self.stack[index]] + .1 + .write() + .unwrap() + .as_mut() + .unwrap() + .cumulative_substitution + .as_mut() + .unwrap() .apply(pool, term) } } diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index ef3321e9..267f570d 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -162,8 +162,14 @@ impl<'c> ProofChecker<'c> { let time = Instant::now(); let step_id = command.id(); + // TODO: Add a special function call here self.context - .push(self.pool, &s.assignment_args, &s.variable_args) + .push( + self.pool, + &s.assignment_args, + &s.variable_args, + s.context_id, + ) .map_err(|e| Error::Checker { inner: e.into(), rule: "anchor".into(), diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index f698f785..e774d3d3 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -33,13 +33,14 @@ impl<'c> ParallelProofChecker<'c> { pool: Arc, config: Config, prelude: &'c ProblemPrelude, + context_usage: &Vec, stack_size: usize, ) -> Self { ParallelProofChecker { pool, config, prelude, - context: ContextStack::new(), + context: ContextStack::from_usage(context_usage), reached_empty_clause: false, is_holey: false, stack_size, @@ -52,7 +53,7 @@ impl<'c> ParallelProofChecker<'c> { pool: self.pool.clone(), config: self.config.clone(), prelude: self.prelude, - context: ContextStack::new(), + context: ContextStack::from_previous(&self.context), reached_empty_clause: false, is_holey: false, stack_size: self.stack_size, @@ -284,7 +285,12 @@ impl<'c> ParallelProofChecker<'c> { let step_id = command.id(); self.context - .push(&mut pool, &s.assignment_args, &s.variable_args) + .push( + &mut pool.ctx_pool, + &s.assignment_args, + &s.variable_args, + s.context_id, + ) .map_err(|e| { // Signalize to other threads to stop the proof checking should_abort.store(true, Ordering::Release); diff --git a/carcara/src/checker/rules/subproof.rs b/carcara/src/checker/rules/subproof.rs index a78aec53..0e4e29d0 100644 --- a/carcara/src/checker/rules/subproof.rs +++ b/carcara/src/checker/rules/subproof.rs @@ -91,6 +91,7 @@ pub fn bind( // Since we are closing a subproof, we only care about the substitutions that were introduced // in it let context = context.last().unwrap(); + let context = context.as_ref().unwrap(); // The quantifier binders must be the xs and ys of the context substitution let (xs, ys): (AHashSet<_>, AHashSet<_>) = context @@ -142,8 +143,15 @@ pub fn r#let( // Since we are closing a subproof, we only care about the substitutions that were introduced // in it - let substitution: AHashMap, Rc> = - context.last().unwrap().mappings.iter().cloned().collect(); + let substitution: AHashMap, Rc> = context + .last() + .unwrap() + .as_ref() + .unwrap() + .mappings + .iter() + .cloned() + .collect(); let (let_term, u_prime) = match_term_err!((= l u) = &conclusion[0])?; let Term::Let(let_bindings, u) = let_term.as_ref() else { @@ -265,11 +273,13 @@ pub fn onepoint( } ); - let last_context = context.last_mut().unwrap(); - if let Some((var, _)) = r_bindings - .iter() - .find(|b| !last_context.bindings.contains(b)) - { + let last_context = context.last().unwrap(); + if let Some((var, _)) = { + let last_context = last_context.as_ref().unwrap(); + r_bindings + .iter() + .find(|b| !last_context.bindings.contains(b)) + } { return Err(SubproofError::BindingIsNotInContext(var.clone()).into()); } @@ -282,10 +292,13 @@ pub fn onepoint( .map(|var| pool.add(var.clone().into())) .collect(); let substitution_vars: AHashSet<_> = last_context + .as_ref() + .unwrap() .mappings .iter() .map(|(k, _)| k.clone()) .collect(); + drop(last_context); let points = extract_points(quant, left); @@ -299,7 +312,8 @@ pub fn onepoint( .map(|(x, t)| (x, context.apply(pool, &t))) .collect(); - let last_context = context.last_mut().unwrap(); + let last_context = context.last().unwrap(); + let last_context = last_context.as_ref().unwrap(); // For each substitution (:= x t) in the context, the equality (= x t) must appear in phi if let Some((k, v)) = last_context .mappings @@ -353,8 +367,15 @@ fn generic_skolemization_rule( current_phi = context.apply_previous(pool, ¤t_phi); } - let substitution: AHashMap, Rc> = - context.last().unwrap().mappings.iter().cloned().collect(); + let substitution: AHashMap, Rc> = context + .last() + .unwrap() + .as_ref() + .unwrap() + .mappings + .iter() + .cloned() + .collect(); for (i, x) in bindings.iter().enumerate() { let x_term = pool.add(Term::from(x.clone())); let t = substitution diff --git a/carcara/src/elaborator/polyeq.rs b/carcara/src/elaborator/polyeq.rs index c747bc00..e4241511 100644 --- a/carcara/src/elaborator/polyeq.rs +++ b/carcara/src/elaborator/polyeq.rs @@ -112,7 +112,9 @@ impl<'a> PolyeqElaborator<'a> { .map(|((a_var, _), b)| (a_var.clone(), pool.add(b.clone().into()))) .collect(); - c.push(pool, &assigment_args, &variable_args).unwrap(); + // TODO: Add new context pool when using it here and fix s.context_id bellow + c.push(pool, &assigment_args, &variable_args, 99999999) + .unwrap(); (variable_args, assigment_args) } }; diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 07b6af97..f8bc7304 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -222,10 +222,15 @@ pub fn check_parallel( // Checking let checking = Instant::now(); - let (scheduler, _) = Scheduler::new(num_threads, &proof); + let (scheduler, schedule_context_usage) = Scheduler::new(num_threads, &proof); run_measures.scheduling = checking.elapsed(); - let mut checker = - checker::ParallelProofChecker::new(Arc::new(pool), config, &prelude, stack_size); + let mut checker = checker::ParallelProofChecker::new( + Arc::new(pool), + config, + &prelude, + &schedule_context_usage, + stack_size, + ); if options.stats { let mut checker_stats = CheckerStatistics { From 6b4fded4e1c4c7eb456a36c447f62f136183ac71 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Mon, 31 Jul 2023 19:25:59 -0300 Subject: [PATCH 46/70] Added single thread context stack fix --- carcara/src/ast/context.rs | 13 ++++++++++++- carcara/src/checker/mod.rs | 4 ++-- carcara/src/elaborator/polyeq.rs | 4 ++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index ee289f5f..4f19aeb4 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -81,7 +81,18 @@ impl ContextStack { .and_then(|id| Some(self.context_vec[*id].1.write().unwrap())) } - // TODO: Add pre push function for single thread tasks + /// A function used to force the creation of a new context at the end of the + /// `context_vec`. This function should be called before a + /// `ContextStack::push` in a single thread operation. Since a single + /// thread doesn't require a schedule balancing, then there is no info about + /// how many contexts there are in the proof (and it's not needed since we + /// can always add a new context at the end of the vector just like an usual + /// stack) + pub fn force_new_context(&mut self) -> usize { + let ctx_vec = Arc::get_mut(&mut self.context_vec).unwrap(); + ctx_vec.push((AtomicUsize::new(1), RwLock::new(None))); + ctx_vec.len() - 1 + } pub fn push( &mut self, diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 267f570d..97c6e259 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -162,13 +162,13 @@ impl<'c> ProofChecker<'c> { let time = Instant::now(); let step_id = command.id(); - // TODO: Add a special function call here + let new_context_id = self.context.force_new_context(); self.context .push( self.pool, &s.assignment_args, &s.variable_args, - s.context_id, + new_context_id, ) .map_err(|e| Error::Checker { inner: e.into(), diff --git a/carcara/src/elaborator/polyeq.rs b/carcara/src/elaborator/polyeq.rs index e4241511..74d30ed6 100644 --- a/carcara/src/elaborator/polyeq.rs +++ b/carcara/src/elaborator/polyeq.rs @@ -112,8 +112,8 @@ impl<'a> PolyeqElaborator<'a> { .map(|((a_var, _), b)| (a_var.clone(), pool.add(b.clone().into()))) .collect(); - // TODO: Add new context pool when using it here and fix s.context_id bellow - c.push(pool, &assigment_args, &variable_args, 99999999) + let new_context_id = c.force_new_context(); + c.push(pool, &assigment_args, &variable_args, new_context_id) .unwrap(); (variable_args, assigment_args) } From e774b11e0d73a5b22655934a8c8184dc162472d9 Mon Sep 17 00:00:00 2001 From: vinciusb <65973642+vinciusb@users.noreply.github.com> Date: Mon, 31 Jul 2023 20:11:56 -0300 Subject: [PATCH 47/70] Linting --- carcara/src/ast/context.rs | 91 +++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 51 deletions(-) diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index 4f19aeb4..5e9663bd 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -72,13 +72,13 @@ impl ContextStack { pub fn last(&self) -> Option>> { self.stack .last() - .and_then(|id| Some(self.context_vec[*id].1.read().unwrap())) + .map(|id| self.context_vec[*id].1.read().unwrap()) } pub fn last_mut(&mut self) -> Option>> { self.stack .last_mut() - .and_then(|id| Some(self.context_vec[*id].1.write().unwrap())) + .map(|id| self.context_vec[*id].1.write().unwrap()) } /// A function used to force the creation of a new context at the end of the @@ -101,57 +101,46 @@ impl ContextStack { variable_args: &[SortedVar], context_id: usize, ) -> Result<(), SubstitutionError> { - let ctx_building_status = self.context_vec[context_id].1.try_write(); - match ctx_building_status { - // The write guard was yielded to this thread - Ok(mut ctx_write_guard) => { - match ctx_write_guard.as_mut() { - // Since the context already exists, just use it - Some(_) => { - drop(ctx_write_guard); - } - // It's the first thread trying to build this context. It will - // build this context at the context vec (accessible for all threads) - None => { - // Since some rules (like `refl`) need to apply substitutions until a fixed point, we - // precompute these substitutions into a separate hash map. This assumes that the assignment - // arguments are in the correct order. - let mut substitution = Substitution::empty(); - let mut substitution_until_fixed_point = Substitution::empty(); - - // We build the `substitution_until_fixed_point` hash map from the bottom up, by using the - // substitutions already introduced to transform the result of a new substitution before - // inserting it into the hash map. So for instance, if the substitutions are `(:= y z)` and - // `(:= x (f y))`, we insert the first substitution, and then, when introducing the second, - // we use the current state of the hash map to transform `(f y)` into `(f z)`. The - // resulting hash map will then contain `(:= y z)` and `(:= x (f z))` - for (var, value) in assignment_args.iter() { - let var_term = Term::new_var(var, pool.sort(value)); - let var_term = pool.add(var_term); - substitution.insert(pool, var_term.clone(), value.clone())?; - let new_value = substitution_until_fixed_point.apply(pool, value); - substitution_until_fixed_point.insert(pool, var_term, new_value)?; - } + // The write guard was yielded to this thread + if let Ok(mut ctx_write_guard) = self.context_vec[context_id].1.try_write() { + // It's the first thread trying to build this context. It will + // build this context at the context vec (accessible for all threads) + if ctx_write_guard.is_none() { + // Since some rules (like `refl`) need to apply substitutions until a fixed point, we + // precompute these substitutions into a separate hash map. This assumes that the assignment + // arguments are in the correct order. + let mut substitution = Substitution::empty(); + let mut substitution_until_fixed_point = Substitution::empty(); - let mappings = assignment_args - .iter() - .map(|(var, value)| { - let var_term = (var.clone(), pool.sort(value)).into(); - (pool.add(var_term), value.clone()) - }) - .collect(); - let bindings = variable_args.iter().cloned().collect(); - // Finally creates the new context under this RwLock - *ctx_write_guard = Some(Context { - mappings, - bindings, - cumulative_substitution: None, - }); - } + // We build the `substitution_until_fixed_point` hash map from the bottom up, by using the + // substitutions already introduced to transform the result of a new substitution before + // inserting it into the hash map. So for instance, if the substitutions are `(:= y z)` and + // `(:= x (f y))`, we insert the first substitution, and then, when introducing the second, + // we use the current state of the hash map to transform `(f y)` into `(f z)`. The + // resulting hash map will then contain `(:= y z)` and `(:= x (f z))` + for (var, value) in assignment_args.iter() { + let var_term = Term::new_var(var, pool.sort(value)); + let var_term = pool.add(var_term); + substitution.insert(pool, var_term.clone(), value.clone())?; + let new_value = substitution_until_fixed_point.apply(pool, value); + substitution_until_fixed_point.insert(pool, var_term, new_value)?; } + + let mappings = assignment_args + .iter() + .map(|(var, value)| { + let var_term = (var.clone(), pool.sort(value)).into(); + (pool.add(var_term), value.clone()) + }) + .collect(); + let bindings = variable_args.iter().cloned().collect(); + // Finally creates the new context under this RwLock + *ctx_write_guard = Some(Context { + mappings, + bindings, + cumulative_substitution: None, + }); } - // A thread is currently building the context - Err(_) => {} } // Adds this context in the stack // Notice that even though the context is not ready for use, the write @@ -208,7 +197,7 @@ impl ContextStack { if let Some(previous_context) = self .stack .get(i - 1) - .and_then(|id| Some(self.context_vec[*id].1.read().unwrap())) + .map(|id| self.context_vec[*id].1.read().unwrap()) { let previous_context = previous_context.as_ref().unwrap(); let previous_substitution = From bd2d55a8e2f2238853195528691c003551b92f6a Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Tue, 8 Aug 2023 15:34:53 -0300 Subject: [PATCH 48/70] Allow user to choose solver binary for `lia_generic` --- carcara/src/checker/error.rs | 28 +++++++-------- carcara/src/checker/lia_generic.rs | 53 ++++++++++++++++------------- carcara/src/checker/mod.rs | 9 ++--- carcara/src/checker/parallel/mod.rs | 5 +-- carcara/src/checker/rules/mod.rs | 2 +- carcara/src/lib.rs | 17 ++++----- cli/src/benchmarking.rs | 2 +- cli/src/main.rs | 25 ++++++++++++-- 8 files changed, 85 insertions(+), 56 deletions(-) diff --git a/carcara/src/checker/error.rs b/carcara/src/checker/error.rs index 45996332..d791aa6c 100644 --- a/carcara/src/checker/error.rs +++ b/carcara/src/checker/error.rs @@ -248,29 +248,29 @@ pub enum LinearArithmeticError { #[derive(Debug, Error)] pub enum LiaGenericError { - #[error("failed to spawn cvc5 process")] - FailedSpawnCvc5(io::Error), + #[error("failed to spawn solver process")] + FailedSpawnSolver(io::Error), - #[error("failed to write to cvc5 stdin")] - FailedWriteToCvc5Stdin(io::Error), + #[error("failed to write to solver stdin")] + FailedWriteToSolverStdin(io::Error), - #[error("error while waiting for cvc5 to exit")] - FailedWaitForCvc5(io::Error), + #[error("error while waiting for solver to exit")] + FailedWaitForSolver(io::Error), - #[error("cvc5 gave invalid output")] - Cvc5GaveInvalidOutput, + #[error("solver gave invalid output")] + SolverGaveInvalidOutput, - #[error("cvc5 output not unsat")] - Cvc5OutputNotUnsat, + #[error("solver output not unsat")] + OutputNotUnsat, - #[error("cvc5 timed out when solving problem")] - Cvc5Timeout, + #[error("solver timed out when solving problem")] + SolverTimeout, #[error( - "cvc5 returned non-zero exit code: {}", + "solver returned non-zero exit code: {}", if let Some(i) = .0 { format!("{}", i) } else { "none".to_owned() } )] - Cvc5NonZeroExitCode(Option), + NonZeroExitCode(Option), #[error("error in inner proof: {0}")] InnerProofError(Box), diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index d91d96b7..cdcc1ae2 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -30,12 +30,13 @@ pub fn lia_generic_single_thread( prelude: &ProblemPrelude, elaborator: Option<&mut Elaborator>, root_id: &str, + solver: &str, ) -> bool { let problem = get_problem_string(conclusion, prelude); - let commands = match get_cvc5_proof(pool, problem) { + let commands = match get_solver_proof(pool, problem, solver) { Ok(c) => c, Err(e) => { - log::warn!("failed to check `lia_generic` step using cvc5: {}", e); + log::warn!("failed to check `lia_generic` step: {}", e); if let Some(elaborator) = elaborator { elaborator.unchanged(conclusion); } @@ -44,27 +45,32 @@ pub fn lia_generic_single_thread( }; if let Some(elaborator) = elaborator { - insert_cvc5_proof(pool, elaborator, commands, conclusion, root_id); + insert_solver_proof(pool, elaborator, commands, conclusion, root_id); } false } -pub fn lia_generic_multi_thread(conclusion: &[Rc], prelude: &ProblemPrelude) -> bool { +pub fn lia_generic_multi_thread( + conclusion: &[Rc], + prelude: &ProblemPrelude, + solver: &str, +) -> bool { let mut pool = PrimitivePool::new(); let problem = get_problem_string(conclusion, prelude); - if let Err(e) = get_cvc5_proof(&mut pool, problem) { - log::warn!("failed to check `lia_generic` step using cvc5: {}", e); + if let Err(e) = get_solver_proof(&mut pool, problem, solver) { + log::warn!("failed to check `lia_generic` step using: {}", e); true } else { false } } -fn get_cvc5_proof( +fn get_solver_proof( pool: &mut PrimitivePool, problem: String, + solver: &str, ) -> Result, LiaGenericError> { - let mut cvc5 = Command::new("cvc5") + let mut process = Command::new(solver) .args([ "--tlimit=10000", "--lang=smt2", @@ -76,25 +82,26 @@ fn get_cvc5_proof( .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() - .map_err(LiaGenericError::FailedSpawnCvc5)?; + .map_err(LiaGenericError::FailedSpawnSolver)?; - cvc5.stdin + process + .stdin .take() - .expect("failed to open cvc5 stdin") + .expect("failed to open solver stdin") .write_all(problem.as_bytes()) - .map_err(LiaGenericError::FailedWriteToCvc5Stdin)?; + .map_err(LiaGenericError::FailedWriteToSolverStdin)?; - let output = cvc5 + let output = process .wait_with_output() - .map_err(LiaGenericError::FailedWaitForCvc5)?; + .map_err(LiaGenericError::FailedWaitForSolver)?; if !output.status.success() { if let Ok(s) = std::str::from_utf8(&output.stderr) { - if s.contains("cvc5 interrupted by timeout.") { - return Err(LiaGenericError::Cvc5Timeout); + if s.contains("interrupted by timeout.") { + return Err(LiaGenericError::SolverTimeout); } } - return Err(LiaGenericError::Cvc5NonZeroExitCode(output.status.code())); + return Err(LiaGenericError::NonZeroExitCode(output.status.code())); } let mut proof = output.stdout.as_slice(); @@ -102,17 +109,17 @@ fn get_cvc5_proof( proof .read_line(&mut first_line) - .map_err(|_| LiaGenericError::Cvc5GaveInvalidOutput)?; + .map_err(|_| LiaGenericError::SolverGaveInvalidOutput)?; if first_line.trim_end() != "unsat" { - return Err(LiaGenericError::Cvc5OutputNotUnsat); + return Err(LiaGenericError::OutputNotUnsat); } - parse_and_check_cvc5_proof(pool, problem.as_bytes(), proof) + parse_and_check_solver_proof(pool, problem.as_bytes(), proof) .map_err(|e| LiaGenericError::InnerProofError(Box::new(e))) } -fn parse_and_check_cvc5_proof( +fn parse_and_check_solver_proof( pool: &mut PrimitivePool, problem: &[u8], proof: &[u8], @@ -190,7 +197,7 @@ fn insert_missing_assumes( (all, num_added) } -fn insert_cvc5_proof( +fn insert_solver_proof( pool: &mut PrimitivePool, elaborator: &mut Elaborator, mut commands: Vec, @@ -206,7 +213,7 @@ fn insert_cvc5_proof( conclusion, &commands, // This is a bit ugly, but we have to add the ".added" to avoid colliding with the first few - // steps in the cvc5 proof + // steps in the solver proof &format!("{}.added", root_id), ); diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 97c6e259..acefd30c 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -50,7 +50,7 @@ pub struct Config { strict: bool, skip_unknown_rules: bool, is_running_test: bool, - lia_via_cvc5: bool, + lia_solver: Option>, } impl Config { @@ -68,8 +68,8 @@ impl Config { self } - pub fn lia_via_cvc5(mut self, value: bool) -> Self { - self.lia_via_cvc5 = value; + pub fn lia_solver(mut self, value: impl Into>>) -> Self { + self.lia_solver = value.into(); self } } @@ -340,13 +340,14 @@ impl<'c> ProofChecker<'c> { let mut elaborated = false; if step.rule == "lia_generic" { - if self.config.lia_via_cvc5 { + if let Some(solver) = &self.config.lia_solver { let is_hole = lia_generic::lia_generic_single_thread( self.pool, &step.clause, self.prelude, self.elaborator.as_mut(), &step.id, + solver, ); self.is_holey = self.is_holey || is_hole; elaborated = self.elaborator.is_some(); diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index e774d3d3..d47024af 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -422,8 +422,9 @@ impl<'c> ParallelProofChecker<'c> { let mut polyeq_time = Duration::ZERO; if step.rule == "lia_generic" { - if self.config.lia_via_cvc5 { - let is_hole = lia_generic::lia_generic_multi_thread(&step.clause, self.prelude); + if let Some(solver) = &self.config.lia_solver { + let is_hole = + lia_generic::lia_generic_multi_thread(&step.clause, self.prelude, solver); self.is_holey = self.is_holey || is_hole; } else { log::warn!("encountered \"lia_generic\" rule, ignoring"); diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index 7c9ba2fa..3ed77fe4 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -173,7 +173,7 @@ fn run_tests(test_name: &str, definitions: &str, cases: &[(&str, bool)]) { strict: false, skip_unknown_rules: false, is_running_test: true, - lia_via_cvc5: false, + lia_solver: None, }, &prelude, ); diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index f8bc7304..ee450569 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -72,11 +72,12 @@ pub struct CarcaraOptions { /// to a function that expects a `Real` will still be an error. pub allow_int_real_subtyping: bool, - /// Enable checking/elaboration of `lia_generic` steps using cvc5. When checking a proof, this - /// will call cvc5 to solve the linear integer arithmetic problem, check the proof, and discard - /// it. When elaborating, the proof will instead be inserted in the place of the `lia_generic` - /// step. - pub lia_via_cvc5: bool, + /// Enable checking/elaboration of `lia_generic` steps using the given solver. When checking a + /// proof, this will call the solver to solve the linear integer arithmetic problem, check the + /// proof, and discard it. When elaborating, the proof will instead be inserted in the place of + /// the `lia_generic` step. The solver should be a binary that can read SMT-LIB from stdin and + /// output an Alethe proof from stdout. + pub lia_solver: Option>, /// Enables "strict" checking of some rules. /// @@ -152,7 +153,7 @@ pub fn check(problem: T, proof: T, options: CarcaraOptions) -> R let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_via_cvc5(options.lia_via_cvc5); + .lia_solver(options.lia_solver); // Checking let checking = Instant::now(); @@ -218,7 +219,7 @@ pub fn check_parallel( let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_via_cvc5(options.lia_via_cvc5); + .lia_solver(options.lia_solver); // Checking let checking = Instant::now(); @@ -289,7 +290,7 @@ pub fn check_and_elaborate( let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_via_cvc5(options.lia_via_cvc5); + .lia_solver(options.lia_solver); // Checking let checking = Instant::now(); diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index c4752803..89410766 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -51,7 +51,7 @@ fn run_job( let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_via_cvc5(options.lia_via_cvc5); + .lia_solver(options.lia_solver.clone()); let mut checker = checker::ProofChecker::new(&mut pool, config, &prelude); let checking = Instant::now(); diff --git a/cli/src/main.rs b/cli/src/main.rs index d9c3e38f..6195c2f1 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -117,7 +117,7 @@ struct ParsingOptions { allow_int_real_subtyping: bool, } -#[derive(Args, Clone, Copy)] +#[derive(Args, Clone)] struct CheckingOptions { /// Enables the strict checking of certain rules. #[clap(short, long)] @@ -127,8 +127,12 @@ struct CheckingOptions { #[clap(long)] skip_unknown_rules: bool, - /// Check `lia_generic` steps by calling into cvc5. + /// Check `lia_generic` steps using the provided solver. #[clap(long)] + lia_solver: Option, + + /// Check `lia_generic` steps by calling into cvc5 (deprecated). + #[clap(long, conflicts_with("lia-solver"))] lia_via_cvc5: bool, } @@ -148,15 +152,21 @@ fn build_carcara_options( CheckingOptions { strict, skip_unknown_rules, + lia_solver, lia_via_cvc5, }: CheckingOptions, StatsOptions { stats }: StatsOptions, ) -> CarcaraOptions { + // If no solver is provided by the `--lia-solver` option, *and* the `--lia-via-cvc5` option was + // passed, we default to cvc5 as a solver + let lia_solver = lia_solver + .map(Into::into) + .or_else(|| lia_via_cvc5.then(|| "cvc5".into())); CarcaraOptions { apply_function_defs, expand_lets: expand_let_bindings, allow_int_real_subtyping, - lia_via_cvc5, + lia_solver, strict, skip_unknown_rules, stats, @@ -301,6 +311,15 @@ fn main() { let colors_enabled = !cli.no_color && atty::is(atty::Stream::Stderr); logger::init(cli.log_level.into(), colors_enabled); + if let Command::Check(CheckCommandOptions { checking, .. }) + | Command::Elaborate(ElaborateCommandOptions { checking, .. }) + | Command::Bench(BenchCommandOptions { checking, .. }) = &cli.command + { + if checking.lia_via_cvc5 { + log::warn!("`--lia-via-cvc5` option is deprecated, please use `--lia-solver cvc5`") + } + } + let result = match cli.command { Command::Parse(options) => parse_command(options), Command::Check(options) => { From 13bc50bcca50ac6e325f76a51c7f4d20a013a2b5 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Tue, 8 Aug 2023 22:08:23 -0300 Subject: [PATCH 49/70] Allow user to control solver arguments --- carcara/src/checker/lia_generic.rs | 22 ++++++++-------------- carcara/src/checker/mod.rs | 12 ++++++------ carcara/src/checker/parallel/mod.rs | 4 ++-- carcara/src/checker/rules/mod.rs | 2 +- carcara/src/lib.rs | 29 ++++++++++++++++++++--------- cli/src/benchmarking.rs | 2 +- cli/src/main.rs | 23 ++++++++++++++++++----- 7 files changed, 56 insertions(+), 38 deletions(-) diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index cdcc1ae2..0af99983 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -1,5 +1,5 @@ use super::*; -use crate::{checker::error::LiaGenericError, parser}; +use crate::{checker::error::LiaGenericError, parser, LiaGenericOptions}; use ahash::AHashMap; use std::{ io::{BufRead, Write}, @@ -30,10 +30,10 @@ pub fn lia_generic_single_thread( prelude: &ProblemPrelude, elaborator: Option<&mut Elaborator>, root_id: &str, - solver: &str, + options: &LiaGenericOptions, ) -> bool { let problem = get_problem_string(conclusion, prelude); - let commands = match get_solver_proof(pool, problem, solver) { + let commands = match get_solver_proof(pool, problem, options) { Ok(c) => c, Err(e) => { log::warn!("failed to check `lia_generic` step: {}", e); @@ -53,11 +53,11 @@ pub fn lia_generic_single_thread( pub fn lia_generic_multi_thread( conclusion: &[Rc], prelude: &ProblemPrelude, - solver: &str, + options: &LiaGenericOptions, ) -> bool { let mut pool = PrimitivePool::new(); let problem = get_problem_string(conclusion, prelude); - if let Err(e) = get_solver_proof(&mut pool, problem, solver) { + if let Err(e) = get_solver_proof(&mut pool, problem, options) { log::warn!("failed to check `lia_generic` step using: {}", e); true } else { @@ -68,16 +68,10 @@ pub fn lia_generic_multi_thread( fn get_solver_proof( pool: &mut PrimitivePool, problem: String, - solver: &str, + options: &LiaGenericOptions, ) -> Result, LiaGenericError> { - let mut process = Command::new(solver) - .args([ - "--tlimit=10000", - "--lang=smt2", - "--proof-format-mode=alethe", - "--proof-granularity=theory-rewrite", - "--proof-alethe-res-pivots", - ]) + let mut process = Command::new(options.solver.as_ref()) + .args(options.arguments.iter().map(AsRef::as_ref)) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index acefd30c..9aba6f06 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -7,7 +7,7 @@ use crate::{ ast::*, benchmarking::{CollectResults, OnlineBenchmarkResults}, elaborator::Elaborator, - CarcaraResult, Error, + CarcaraResult, Error, LiaGenericOptions, }; use ahash::AHashSet; use error::CheckerError; @@ -50,7 +50,7 @@ pub struct Config { strict: bool, skip_unknown_rules: bool, is_running_test: bool, - lia_solver: Option>, + lia_options: Option, } impl Config { @@ -68,8 +68,8 @@ impl Config { self } - pub fn lia_solver(mut self, value: impl Into>>) -> Self { - self.lia_solver = value.into(); + pub fn lia_options(mut self, value: impl Into>) -> Self { + self.lia_options = value.into(); self } } @@ -340,14 +340,14 @@ impl<'c> ProofChecker<'c> { let mut elaborated = false; if step.rule == "lia_generic" { - if let Some(solver) = &self.config.lia_solver { + if let Some(options) = &self.config.lia_options { let is_hole = lia_generic::lia_generic_single_thread( self.pool, &step.clause, self.prelude, self.elaborator.as_mut(), &step.id, - solver, + options, ); self.is_holey = self.is_holey || is_hole; elaborated = self.elaborator.is_some(); diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index d47024af..40c1815d 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -422,9 +422,9 @@ impl<'c> ParallelProofChecker<'c> { let mut polyeq_time = Duration::ZERO; if step.rule == "lia_generic" { - if let Some(solver) = &self.config.lia_solver { + if let Some(options) = &self.config.lia_options { let is_hole = - lia_generic::lia_generic_multi_thread(&step.clause, self.prelude, solver); + lia_generic::lia_generic_multi_thread(&step.clause, self.prelude, options); self.is_holey = self.is_holey || is_hole; } else { log::warn!("encountered \"lia_generic\" rule, ignoring"); diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index 3ed77fe4..a2714c28 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -173,7 +173,7 @@ fn run_tests(test_name: &str, definitions: &str, cases: &[(&str, bool)]) { strict: false, skip_unknown_rules: false, is_running_test: true, - lia_solver: None, + lia_options: None, }, &prelude, ); diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index ee450569..ae82f6f7 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -72,12 +72,11 @@ pub struct CarcaraOptions { /// to a function that expects a `Real` will still be an error. pub allow_int_real_subtyping: bool, - /// Enable checking/elaboration of `lia_generic` steps using the given solver. When checking a - /// proof, this will call the solver to solve the linear integer arithmetic problem, check the - /// proof, and discard it. When elaborating, the proof will instead be inserted in the place of - /// the `lia_generic` step. The solver should be a binary that can read SMT-LIB from stdin and - /// output an Alethe proof from stdout. - pub lia_solver: Option>, + /// If `Some`, enables the checking/elaboration of `lia_generic` steps using an external solver. + /// When checking a proof, this means calling the solver to solve the linear integer arithmetic + /// problem, checking the proof, and discarding it. When elaborating, the proof will instead be + /// inserted in the place of the `lia_generic` step. See [`LiaGenericOptions`] for more details. + pub lia_options: Option, /// Enables "strict" checking of some rules. /// @@ -99,6 +98,18 @@ pub struct CarcaraOptions { pub stats: bool, } +/// The options that control how `lia_generic` steps are checked/elaborated using an external +/// solver. +#[derive(Debug, Clone)] +pub struct LiaGenericOptions { + /// The external solver path. The solver should be a binary that can read SMT-LIB from stdin and + /// output an Alethe proof to stdout. + pub solver: Box, + + /// The arguments to pass to the solver. + pub arguments: Vec>, +} + impl CarcaraOptions { /// Constructs a new `CarcaraOptions` with all options set to `false`. pub fn new() -> Self { @@ -153,7 +164,7 @@ pub fn check(problem: T, proof: T, options: CarcaraOptions) -> R let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_solver(options.lia_solver); + .lia_options(options.lia_options); // Checking let checking = Instant::now(); @@ -219,7 +230,7 @@ pub fn check_parallel( let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_solver(options.lia_solver); + .lia_options(options.lia_options); // Checking let checking = Instant::now(); @@ -290,7 +301,7 @@ pub fn check_and_elaborate( let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_solver(options.lia_solver); + .lia_options(options.lia_options); // Checking let checking = Instant::now(); diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index 89410766..7ddc3a50 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -51,7 +51,7 @@ fn run_job( let config = checker::Config::new() .strict(options.strict) .skip_unknown_rules(options.skip_unknown_rules) - .lia_solver(options.lia_solver.clone()); + .lia_options(options.lia_options.clone()); let mut checker = checker::ProofChecker::new(&mut pool, config, &prelude); let checking = Instant::now(); diff --git a/cli/src/main.rs b/cli/src/main.rs index 6195c2f1..d2a04727 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -5,7 +5,7 @@ mod path_args; use carcara::{ ast::print_proof, benchmarking::OnlineBenchmarkResults, check, check_and_elaborate, - check_parallel, parser, CarcaraOptions, + check_parallel, parser, CarcaraOptions, LiaGenericOptions, }; use clap::{AppSettings, ArgEnum, Args, Parser, Subcommand}; use const_format::{formatcp, str_index}; @@ -131,6 +131,16 @@ struct CheckingOptions { #[clap(long)] lia_solver: Option, + /// The arguments to pass to the `lia_generic` solver. This should be a single string where + /// multiple arguments are separated by spaces. + #[clap( + long, + requires = "lia-solver", + allow_hyphen_values = true, + default_value = "--tlimit=10000 --lang=smt2 --proof-format-mode=alethe --proof-granularity=theory-rewrite --proof-alethe-res-pivots" + )] + lia_solver_args: String, + /// Check `lia_generic` steps by calling into cvc5 (deprecated). #[clap(long, conflicts_with("lia-solver"))] lia_via_cvc5: bool, @@ -154,19 +164,22 @@ fn build_carcara_options( skip_unknown_rules, lia_solver, lia_via_cvc5, + lia_solver_args, }: CheckingOptions, StatsOptions { stats }: StatsOptions, ) -> CarcaraOptions { // If no solver is provided by the `--lia-solver` option, *and* the `--lia-via-cvc5` option was // passed, we default to cvc5 as a solver - let lia_solver = lia_solver - .map(Into::into) - .or_else(|| lia_via_cvc5.then(|| "cvc5".into())); + let solver = lia_solver.or_else(|| lia_via_cvc5.then(|| "cvc5".into())); + let lia_options = solver.map(|solver| LiaGenericOptions { + solver: solver.into(), + arguments: lia_solver_args.split_whitespace().map(Into::into).collect(), + }); CarcaraOptions { apply_function_defs, expand_lets: expand_let_bindings, allow_int_real_subtyping, - lia_solver, + lia_options, strict, skip_unknown_rules, stats, From 64e818790023ad27181d3e09c2bdbbe35f6028b2 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Wed, 9 Aug 2023 15:42:40 -0300 Subject: [PATCH 50/70] Document new `lia_generic` arguments in README --- README.md | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3f42b040..a101efa9 100644 --- a/README.md +++ b/README.md @@ -43,12 +43,42 @@ This command will check the given proof while elaborating it, and print the elab standard output. The `--print-with-sharing` flag controls whether the elaborated proof will be printed using term sharing. -By default, elaboration of `lia_generic` steps using cvc5 is disabled. To enable it, pass the -`--lia-via-cvc5` flag. You will need to have a working binary of cvc5 in your PATH. - Many of the same flags used in the `check` command also apply to the `elaborate` command. See `carcara help elaborate` for more details. +### `lia_generic` steps + +By default, Carcara ignores steps of the `lia_generic` rule when checking or elaborating a proof, +instead considering them as holes. However, you can use an external solver to aid Carcara in +checking these steps, using the `--lia-solver` option. For example, running +``` +carcara check example.smt2.proof --lia-solver cvc5 +``` + +will check the proof using cvc5 (more precisely, the cvc5 binary in your `PATH`) to check any +`lia_generic` steps. This is done by converting the `lia_generic` step into an SMT-LIB problem, +giving it to the solver, and checking the Alethe proof that the solver produces. If instead of just +checking we were also elaborating the proof, this would also insert the solver proof in the place of +the `lia_generic` step. + +The value given to `--lia-solver` should be the path of the solver binary. Conceivably, any solver +can be used (SMT or otherwise) as long as it is able to read SMT-LIB from stdin, solve the linear +integer arithmetic problem, and output an Alethe proof to stdout. + +The `--lia-solver-args` option can be used to change the arguments passed to the solver binary. This +option should receive a single value, where multiple arguments are separated by spaces. For example, +if you wanted to instead check `lia_generic` steps using veriT, you might pass the following +arguments: +``` +carcara check example.smt2.proof --lia-solver veriT --lia-solver-args "--proof=- --proof-with-sharing" +``` + +The default arguments for `--lia-solver-args` are as follows (note that they assume you use cvc5 as +a solver): +``` +--tlimit=10000 --lang=smt2 --proof-format-mode=alethe --proof-granularity=theory-rewrite --proof-alethe-res-pivots +``` + ### Running benchmarks The `bench` command is used to run benchmarks. For example, the following command will run a @@ -76,7 +106,6 @@ enable multiple threads using the `-j`/`--num-threads` option. See `carcara help bench` for more options. - ## "Strict" checking Strict checking mode can be enabled by using the `--strict` flag when checking. Currently, this only From b548d37866f552d687fa0ed5922cf45e3438b47c Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Thu, 10 Aug 2023 20:03:04 -0300 Subject: [PATCH 51/70] Allow extra attributes in `assume` commands --- carcara/src/parser/mod.rs | 1 + carcara/src/parser/tests.rs | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index 1367ed85..a09feb79 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -699,6 +699,7 @@ impl<'a, R: BufRead> Parser<'a, R> { fn parse_assume_command(&mut self) -> CarcaraResult<(String, Rc)> { let id = self.expect_symbol()?; let term = self.parse_term_expecting_sort(&Sort::Bool)?; + self.ignore_remaining_attributes()?; self.expect_token(Token::CloseParen)?; Ok((id, term)) } diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index aa1d3dcd..a5ace572 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -490,6 +490,33 @@ fn test_define_fun() { assert_eq!(expected, got); } +#[test] +fn test_assume() { + let mut p = PrimitivePool::new(); + let input = " + (assume h1 true) + (assume h2 (or true false) :ignore \"extra\" :attributes) + "; + let proof = parse_proof(&mut p, input); + assert_eq!(proof.commands.len(), 2); + + assert_eq!( + &proof.commands[0], + &ProofCommand::Assume { + id: "h1".into(), + term: p.bool_true(), + } + ); + + assert_eq!( + &proof.commands[1], + &ProofCommand::Assume { + id: "h2".into(), + term: parse_term(&mut p, "(or true false)"), + } + ); +} + #[test] fn test_step() { let mut p = PrimitivePool::new(); From a990334e3a6faecb94a60a7e19f88ef526c3bd95 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Thu, 10 Aug 2023 20:07:53 -0300 Subject: [PATCH 52/70] Remove unneeded `mut` variable --- carcara/src/ast/context.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index 5e9663bd..68b1e5cc 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -217,8 +217,7 @@ impl ContextStack { // Waits until the OS allows to mutate at this context // TODO: Does it really needs to require a write guard here instead of up there let mut context_guard = self.context_vec[self.stack[i]].1.write().unwrap(); - let mut curr_context = context_guard.as_mut().unwrap(); - curr_context.cumulative_substitution = + context_guard.as_mut().unwrap().cumulative_substitution = Some(Substitution::new(pool, cumulative_substitution).unwrap()); self.num_cumulative_calculated = i + 1; } From c2acb16e935a9a1a18a7e0dbff9af80f7bea97e1 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Thu, 10 Aug 2023 20:25:31 -0300 Subject: [PATCH 53/70] Remove `is_running_test` option form checker config Now, the rule tests handle the special behaviour by modifying the test proof directly. --- carcara/src/checker/mod.rs | 13 ++++------ carcara/src/checker/parallel/mod.rs | 11 +++----- carcara/src/checker/rules/mod.rs | 39 ++++++++++++++++++++--------- 3 files changed, 36 insertions(+), 27 deletions(-) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 9aba6f06..745d1b61 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -49,7 +49,6 @@ impl fmt::Debug for CheckerStatistics<'_, C pub struct Config { strict: bool, skip_unknown_rules: bool, - is_running_test: bool, lia_options: Option, } @@ -204,7 +203,7 @@ impl<'c> ProofChecker<'c> { } } } - if self.config.is_running_test || self.reached_empty_clause { + if self.reached_empty_clause { Ok(self.is_holey) } else { Err(Error::DoesNotReachEmptyClause) @@ -255,12 +254,10 @@ impl<'c> ProofChecker<'c> { ) -> bool { let time = Instant::now(); - // Some subproofs contain `assume` commands inside them. These don't refer - // to the original problem premises, so we ignore the `assume` command if - // it is inside a subproof. Since the unit tests for the rules don't define the - // original problem, but sometimes use `assume` commands, we also skip the - // command if we are in a testing context. - if self.config.is_running_test || iter.is_in_subproof() { + // Some subproofs contain `assume` commands inside them. These don't refer to the original + // problem premises, but are instead local assumptions that are discharged by the subproof's + // final step, so we ignore the `assume` command if it is inside a subproof. + if iter.is_in_subproof() { if let Some(elaborator) = &mut self.elaborator { elaborator.assume(term); } diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 40c1815d..d9fc8d77 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -336,7 +336,7 @@ impl<'c> ParallelProofChecker<'c> { } // Returns Ok(reached empty clause, isHoley) - if self.config.is_running_test || self.reached_empty_clause { + if self.reached_empty_clause { Ok((true, self.is_holey)) } else { Ok((false, self.is_holey)) @@ -353,12 +353,9 @@ impl<'c> ParallelProofChecker<'c> { ) -> bool { let time = Instant::now(); - // Some subproofs contain `assume` commands inside them. These don't refer - // to the original problem premises, so we ignore the `assume` command if - // it is inside a subproof. Since the unit tests for the rules don't define the - // original problem, but sometimes use `assume` commands, we also skip the - // command if we are in a testing context. - if self.config.is_running_test || iter.is_in_subproof() { + // Similarly to the single-threaded checker, we ignore `assume` commands that are inside + // subproofs + if iter.is_in_subproof() { return true; } diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index a2714c28..819ad8b9 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -159,7 +159,7 @@ fn run_tests(test_name: &str, definitions: &str, cases: &[(&str, bool)]) { for (i, (proof, expected)) in cases.iter().enumerate() { // This parses the definitions again for every case, which is not ideal - let (prelude, parsed, mut pool) = parse_instance( + let (prelude, mut proof, mut pool) = parse_instance( Cursor::new(definitions), Cursor::new(proof), true, @@ -167,17 +167,32 @@ fn run_tests(test_name: &str, definitions: &str, cases: &[(&str, bool)]) { false, ) .unwrap_or_else(|e| panic!("parser error during test \"{}\": {}", test_name, e)); - let mut checker = ProofChecker::new( - &mut pool, - Config { - strict: false, - skip_unknown_rules: false, - is_running_test: true, - lia_options: None, - }, - &prelude, - ); - let got = checker.check(&parsed).is_ok(); + + // Since rule tests often use `assume` commands to introduce premises, we search the proof + // for all `assume`d terms and retroactively add them as the problem premises, to avoid + // checker errors on the `assume`s + proof.premises = proof + .commands + .iter() + .filter_map(|c| match c { + ProofCommand::Assume { term, .. } => Some(term.clone()), + _ => None, + }) + .collect(); + + // All proofs must eventually reach the empty clause, so to avoid having to add a dummy + // `(step end (cl) :rule hole)` to every rule test, we add this dummy step here + proof.commands.push(ProofCommand::Step(ProofStep { + id: "end".into(), + clause: Vec::new(), + rule: "hole".into(), + premises: Vec::new(), + args: Vec::new(), + discharge: Vec::new(), + })); + + let mut checker = ProofChecker::new(&mut pool, Config::new(), &prelude); + let got = checker.check(&proof).is_ok(); assert_eq!( *expected, got, "test case \"{}\" index {} failed", From 271152c6bd4f71e8daaa186e0c5b9df5b01b4bd3 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 11 Aug 2023 12:34:23 -0300 Subject: [PATCH 54/70] Also test parallel checker in integration tests --- carcara/tests/test_example_files.rs | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/carcara/tests/test_example_files.rs b/carcara/tests/test_example_files.rs index 4178ca9c..5dfb2366 100644 --- a/carcara/tests/test_example_files.rs +++ b/carcara/tests/test_example_files.rs @@ -4,6 +4,34 @@ use std::{ path::{Path, PathBuf}, }; +fn run_parallel_checker_test( + problem_path: &Path, + proof_path: &Path, + num_threads: usize, +) -> CarcaraResult<()> { + use checker::Config; + use std::sync::Arc; + + let (prelude, proof, pool) = parser::parse_instance( + io::BufReader::new(fs::File::open(problem_path)?), + io::BufReader::new(fs::File::open(proof_path)?), + false, + false, + false, + )?; + + let (scheduler, schedule_context_usage) = checker::Scheduler::new(num_threads, &proof); + let mut checker = checker::ParallelProofChecker::new( + Arc::new(pool), + Config::new(), + &prelude, + &schedule_context_usage, + 128 * 1024 * 1024, + ); + checker.check(&proof, &scheduler)?; + Ok(()) +} + fn run_test(problem_path: &Path, proof_path: &Path) -> CarcaraResult<()> { use checker::Config; @@ -35,6 +63,11 @@ fn run_test(problem_path: &Path, proof_path: &Path) -> CarcaraResult<()> { "elaboration was not idempotent!" ); + // We also test the parallel checker, with different values for the number of threads + run_parallel_checker_test(problem_path, proof_path, 1)?; + run_parallel_checker_test(problem_path, proof_path, 4)?; + run_parallel_checker_test(problem_path, proof_path, 16)?; + Ok(()) } From 4d2a059ffbe9c4ced65e372e798d6bfa266a69fb Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 11 Aug 2023 12:46:35 -0300 Subject: [PATCH 55/70] Avoid holding two copies of each term in the term pool Now, instead of storing terms with a `HashMap>`, the pool uses a hash set. --- carcara/src/ast/pool/advanced.rs | 31 +++------------ carcara/src/ast/pool/mod.rs | 42 ++++++-------------- carcara/src/ast/pool/storage.rs | 66 ++++++++++++++++++++++++++++++++ carcara/src/parser/tests.rs | 6 +-- 4 files changed, 86 insertions(+), 59 deletions(-) create mode 100644 carcara/src/ast/pool/storage.rs diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs index d50d5b2d..6eb8b9cf 100644 --- a/carcara/src/ast/pool/advanced.rs +++ b/carcara/src/ast/pool/advanced.rs @@ -47,23 +47,14 @@ impl TermPool for ContextPool { } fn add(&mut self, term: Term) -> Rc { - use std::collections::hash_map::Entry; - // If the global pool has the term if let Some(entry) = self.global_pool.terms.get(&term) { - entry.clone() - } else { - let mut ctx_guard = self.storage.write().unwrap(); - match ctx_guard.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let t = vacant_entry.insert(Rc::new(term)).clone(); - ctx_guard.compute_sort(&t); - t - } - } + return entry.clone(); } + let mut ctx_guard = self.storage.write().unwrap(); + let term = ctx_guard.terms.add(term); + ctx_guard.compute_sort(&term); + term } fn sort(&self, term: &Rc) -> Rc { @@ -125,8 +116,6 @@ impl TermPool for LocalPool { } fn add(&mut self, term: Term) -> Rc { - use std::collections::hash_map::Entry; - // If there is a constant pool and has the term if let Some(entry) = self.ctx_pool.global_pool.terms.get(&term) { entry.clone() @@ -135,15 +124,7 @@ impl TermPool for LocalPool { else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { entry.clone() } else { - match self.storage.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let t = vacant_entry.insert(Rc::new(term)).clone(); - self.storage.compute_sort(&t); - t - } - } + self.storage.add(term) } } diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index e2f42948..d36a74af 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -1,10 +1,12 @@ //! This module implements `TermPool`, a structure that stores terms and implements hash consing. pub mod advanced; +mod storage; use super::{Rc, Sort, Term}; use crate::ast::Constant; use ahash::{AHashMap, AHashSet}; +use storage::Storage; pub trait TermPool { /// Returns the term corresponding to the boolean constant `true`. @@ -51,7 +53,7 @@ pub trait TermPool { /// [`PrimitivePool::sort`]) or its free variables (see [`PrimitivePool::free_vars`]). pub struct PrimitivePool { /// A map of the terms in the pool. - pub(crate) terms: AHashMap>, + pub(crate) terms: Storage, pub(crate) free_vars_cache: AHashMap, AHashSet>>, pub(crate) sorts_cache: AHashMap, Rc>, pub(crate) bool_true: Rc, @@ -68,12 +70,12 @@ impl PrimitivePool { /// Constructs a new `TermPool`. This new pool will already contain the boolean constants `true` /// and `false`, as well as the `Bool` sort. pub fn new() -> Self { - let mut terms = AHashMap::new(); + let mut terms = Storage::new(); let mut sorts_cache = AHashMap::new(); - let bool_sort = Self::add_term_to_map(&mut terms, Term::Sort(Sort::Bool)); + let bool_sort = terms.add(Term::Sort(Sort::Bool)); - let [bool_true, bool_false] = ["true", "false"] - .map(|b| Self::add_term_to_map(&mut terms, Term::new_var(b, bool_sort.clone()))); + let [bool_true, bool_false] = + ["true", "false"].map(|b| terms.add(Term::new_var(b, bool_sort.clone()))); sorts_cache.insert(bool_false.clone(), bool_sort.clone()); sorts_cache.insert(bool_true.clone(), bool_sort.clone()); @@ -88,18 +90,6 @@ impl PrimitivePool { } } - fn add_term_to_map(terms_map: &mut AHashMap>, term: Term) -> Rc { - use std::collections::hash_map::Entry; - - match terms_map.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - vacant_entry.insert(Rc::new(term)).clone() - } - } - } - /// Computes the sort of a term and adds it to the sort cache. fn compute_sort(&mut self, term: &Rc) -> Rc { use super::Operator; @@ -164,8 +154,8 @@ impl PrimitivePool { Sort::Function(result) } }; - let sorted_term = Self::add_term_to_map(&mut self.terms, Term::Sort(result)); - self.sorts_cache.insert(term.clone(), sorted_term); + let sort = self.terms.add(Term::Sort(result)); + self.sorts_cache.insert(term.clone(), sort); self.sorts_cache[term].clone() } @@ -174,8 +164,6 @@ impl PrimitivePool { term: Term, prior_pools: [&PrimitivePool; N], ) -> Rc { - use std::collections::hash_map::Entry; - for p in prior_pools { // If this prior pool has the term if let Some(entry) = p.terms.get(&term) { @@ -183,15 +171,7 @@ impl PrimitivePool { } } - match self.terms.entry(term) { - Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), - Entry::Vacant(vacant_entry) => { - let term = vacant_entry.key().clone(); - let term = vacant_entry.insert(Rc::new(term)).clone(); - self.compute_sort(&term); - term - } - } + self.add(term) } fn sort_with_priorities( @@ -283,7 +263,7 @@ impl TermPool for PrimitivePool { } fn add(&mut self, term: Term) -> Rc { - let term = Self::add_term_to_map(&mut self.terms, term); + let term = self.terms.add(term); self.compute_sort(&term); term } diff --git a/carcara/src/ast/pool/storage.rs b/carcara/src/ast/pool/storage.rs new file mode 100644 index 00000000..fb611e51 --- /dev/null +++ b/carcara/src/ast/pool/storage.rs @@ -0,0 +1,66 @@ +//* The behaviour of the term pool could be modeled by a hash map from `Term` to `Rc`, but +//* that would require allocating two copies of each term, one in the key of the hash map, and one +//* inside the `Rc`. Instead, we store a hash set of `Rc`s, combining the key and the value +//* into a single object. We access this hash set using a `&Term`, and if the entry is present, we +//* clone it; otherwise, we allocate a new `Rc`. + +use crate::ast::*; +use std::borrow::Borrow; + +/// Since `ast::Rc` intentionally implements hashing and equality by reference (instead of by +/// value), we cannot safely implement `Borrow` for `Rc`, so we cannot access a +/// `HashSet>` using a `&Term` as a key. To go around that, we use this struct that wraps +/// an `Rc` and that re-implements hashing and equality by value, meaning we can implement +/// `Borrow` for it, and use it as the contents of the hash set instead. +#[derive(Debug, Clone, Eq)] +struct ByValue(Rc); + +impl PartialEq for ByValue { + fn eq(&self, other: &Self) -> bool { + self.0.as_ref() == other.0.as_ref() + } +} + +impl Hash for ByValue { + fn hash(&self, state: &mut H) { + self.0.as_ref().hash(state); + } +} + +impl Borrow for ByValue { + fn borrow(&self) -> &Term { + self.0.as_ref() + } +} + +#[derive(Debug, Clone, Default)] +pub struct Storage(AHashSet); + +impl Storage { + pub fn new() -> Self { + Self::default() + } + + pub fn add(&mut self, term: Term) -> Rc { + // If the `hash_set_entry` feature was stable, this would be much simpler to do using + // `get_or_insert_with` (and would avoid rehashing the term) + match self.0.get(&term) { + Some(t) => t.0.clone(), + None => { + let result = Rc::new(term); + self.0.insert(ByValue(result.clone())); + result + } + } + } + + pub fn get(&self, term: &Term) -> Option<&Rc> { + self.0.get(term).map(|t| &t.0) + } + + // This method is only necessary for the hash consing tests + #[cfg(test)] + pub fn into_vec(self) -> Vec> { + self.0.into_iter().map(|ByValue(t)| t).collect() + } +} diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index a5ace572..0f493dbc 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -94,9 +94,9 @@ fn test_hash_consing() { .into_iter() .collect::>(); - assert_eq!(pool.terms.len(), expected.len()); - - for got in pool.terms.keys() { + let pool_terms = pool.terms.into_vec(); + assert_eq!(pool_terms.len(), expected.len()); + for got in pool_terms { let formatted: &str = &format!("{}", got); assert!(expected.contains(formatted), "{}", formatted); } From 9b762cc7a118d37d197b528517913b50fb6dd12d Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Fri, 11 Aug 2023 13:19:52 -0300 Subject: [PATCH 56/70] Rename `{ContextPool,LocalPool}::storage` for clarity --- carcara/src/ast/pool/advanced.rs | 38 ++++++++++++++++---------------- carcara/src/ast/pool/mod.rs | 18 +++++++-------- carcara/src/parser/tests.rs | 2 +- 3 files changed, 28 insertions(+), 30 deletions(-) diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs index 6eb8b9cf..ae47f86f 100644 --- a/carcara/src/ast/pool/advanced.rs +++ b/carcara/src/ast/pool/advanced.rs @@ -5,7 +5,7 @@ use std::sync::{Arc, RwLock}; pub struct ContextPool { pub(crate) global_pool: Arc, - pub(crate) storage: Arc>, + pub(crate) inner: Arc>, } impl Default for ContextPool { @@ -18,21 +18,21 @@ impl ContextPool { pub fn new() -> Self { Self { global_pool: Arc::new(PrimitivePool::new()), - storage: Arc::new(RwLock::new(PrimitivePool::new())), + inner: Arc::new(RwLock::new(PrimitivePool::new())), } } pub fn from_global(global_pool: &Arc) -> Self { Self { global_pool: global_pool.clone(), - storage: Arc::new(RwLock::new(PrimitivePool::new())), + inner: Arc::new(RwLock::new(PrimitivePool::new())), } } pub fn from_previous(ctx_pool: &Self) -> Self { Self { global_pool: ctx_pool.global_pool.clone(), - storage: ctx_pool.storage.clone(), + inner: ctx_pool.inner.clone(), } } } @@ -48,11 +48,11 @@ impl TermPool for ContextPool { fn add(&mut self, term: Term) -> Rc { // If the global pool has the term - if let Some(entry) = self.global_pool.terms.get(&term) { + if let Some(entry) = self.global_pool.storage.get(&term) { return entry.clone(); } - let mut ctx_guard = self.storage.write().unwrap(); - let term = ctx_guard.terms.add(term); + let mut ctx_guard = self.inner.write().unwrap(); + let term = ctx_guard.storage.add(term); ctx_guard.compute_sort(&term); term } @@ -63,12 +63,12 @@ impl TermPool for ContextPool { } // A sort inserted by context else { - self.storage.read().unwrap().sorts_cache[term].clone() + self.inner.read().unwrap().sorts_cache[term].clone() } } fn free_vars(&mut self, term: &Rc) -> AHashSet> { - self.storage + self.inner .write() .unwrap() .free_vars_with_priorities(term, [&self.global_pool]) @@ -79,7 +79,7 @@ impl TermPool for ContextPool { pub struct LocalPool { pub(crate) ctx_pool: ContextPool, - pub(crate) storage: PrimitivePool, + pub(crate) inner: PrimitivePool, } impl Default for LocalPool { @@ -92,7 +92,7 @@ impl LocalPool { pub fn new() -> Self { Self { ctx_pool: ContextPool::new(), - storage: PrimitivePool::new(), + inner: PrimitivePool::new(), } } @@ -101,7 +101,7 @@ impl LocalPool { pub fn from_previous(ctx_pool: &ContextPool) -> Self { Self { ctx_pool: ContextPool::from_previous(ctx_pool), - storage: PrimitivePool::new(), + inner: PrimitivePool::new(), } } } @@ -117,14 +117,14 @@ impl TermPool for LocalPool { fn add(&mut self, term: Term) -> Rc { // If there is a constant pool and has the term - if let Some(entry) = self.ctx_pool.global_pool.terms.get(&term) { + if let Some(entry) = self.ctx_pool.global_pool.storage.get(&term) { entry.clone() } // If this term was inserted by the context - else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(&term) { + else if let Some(entry) = self.ctx_pool.inner.read().unwrap().storage.get(&term) { entry.clone() } else { - self.storage.add(term) + self.inner.add(term) } } @@ -133,19 +133,19 @@ impl TermPool for LocalPool { sort.clone() } // A sort inserted by context - else if let Some(entry) = self.ctx_pool.storage.read().unwrap().terms.get(term) { + else if let Some(entry) = self.ctx_pool.inner.read().unwrap().storage.get(term) { entry.clone() } else { - self.storage.sorts_cache[term].clone() + self.inner.sorts_cache[term].clone() } } fn free_vars(&mut self, term: &Rc) -> AHashSet> { - self.storage.free_vars_with_priorities( + self.inner.free_vars_with_priorities( term, [ &self.ctx_pool.global_pool, - &self.ctx_pool.storage.read().unwrap(), + &self.ctx_pool.inner.read().unwrap(), ], ) } diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index d36a74af..af2c294d 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -52,8 +52,7 @@ pub trait TermPool { /// This struct also provides other utility methods, like computing the sort of a term (see /// [`PrimitivePool::sort`]) or its free variables (see [`PrimitivePool::free_vars`]). pub struct PrimitivePool { - /// A map of the terms in the pool. - pub(crate) terms: Storage, + pub(crate) storage: Storage, pub(crate) free_vars_cache: AHashMap, AHashSet>>, pub(crate) sorts_cache: AHashMap, Rc>, pub(crate) bool_true: Rc, @@ -70,19 +69,19 @@ impl PrimitivePool { /// Constructs a new `TermPool`. This new pool will already contain the boolean constants `true` /// and `false`, as well as the `Bool` sort. pub fn new() -> Self { - let mut terms = Storage::new(); + let mut storage = Storage::new(); let mut sorts_cache = AHashMap::new(); - let bool_sort = terms.add(Term::Sort(Sort::Bool)); + let bool_sort = storage.add(Term::Sort(Sort::Bool)); let [bool_true, bool_false] = - ["true", "false"].map(|b| terms.add(Term::new_var(b, bool_sort.clone()))); + ["true", "false"].map(|b| storage.add(Term::new_var(b, bool_sort.clone()))); sorts_cache.insert(bool_false.clone(), bool_sort.clone()); sorts_cache.insert(bool_true.clone(), bool_sort.clone()); sorts_cache.insert(bool_sort.clone(), bool_sort); Self { - terms, + storage, free_vars_cache: AHashMap::new(), sorts_cache, bool_true, @@ -154,7 +153,7 @@ impl PrimitivePool { Sort::Function(result) } }; - let sort = self.terms.add(Term::Sort(result)); + let sort = self.storage.add(Term::Sort(result)); self.sorts_cache.insert(term.clone(), sort); self.sorts_cache[term].clone() } @@ -166,11 +165,10 @@ impl PrimitivePool { ) -> Rc { for p in prior_pools { // If this prior pool has the term - if let Some(entry) = p.terms.get(&term) { + if let Some(entry) = p.storage.get(&term) { return entry.clone(); } } - self.add(term) } @@ -263,7 +261,7 @@ impl TermPool for PrimitivePool { } fn add(&mut self, term: Term) -> Rc { - let term = self.terms.add(term); + let term = self.storage.add(term); self.compute_sort(&term); term } diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index 0f493dbc..c2ba06bf 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -94,7 +94,7 @@ fn test_hash_consing() { .into_iter() .collect::>(); - let pool_terms = pool.terms.into_vec(); + let pool_terms = pool.storage.into_vec(); assert_eq!(pool_terms.len(), expected.len()); for got in pool_terms { let formatted: &str = &format!("{}", got); From de1e65485f9d63704564216dbe5c6c0a1e34ed26 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 14 Aug 2023 12:15:42 -0300 Subject: [PATCH 57/70] Move parser configuration to separate struct --- carcara/src/ast/iter.rs | 2 +- carcara/src/ast/macros.rs | 4 +-- carcara/src/ast/substitution.rs | 3 +- carcara/src/checker/lia_generic.rs | 2 +- carcara/src/checker/rules/mod.rs | 13 +++----- carcara/src/lib.rs | 39 +++++++++++------------ carcara/src/parser/mod.rs | 49 +++++++++++++---------------- carcara/src/parser/tests.rs | 18 +++++++---- carcara/tests/test_example_files.rs | 8 ++--- cli/src/benchmarking.rs | 15 ++++----- cli/src/main.rs | 23 +++++++------- 11 files changed, 83 insertions(+), 93 deletions(-) diff --git a/carcara/src/ast/iter.rs b/carcara/src/ast/iter.rs index 46e3e8ac..e3788aab 100644 --- a/carcara/src/ast/iter.rs +++ b/carcara/src/ast/iter.rs @@ -28,7 +28,7 @@ use super::*; /// (step t5 (cl) :rule resolution :premises (t4 h1 h2)) /// " /// .as_bytes(); -/// let (_, proof, _) : (ast::ProblemPrelude, ast::Proof, ast::pool::PrimitivePool) = parser::parse_instance("".as_bytes(), proof, true, false, false)?; +/// let (_, proof, _) = parser::parse_instance("".as_bytes(), proof, parser::Config::new())?; /// let ids: Vec<_> = proof.iter().map(|c| c.id()).collect(); /// assert_eq!(ids, ["h1", "h2", "t3", "t3.t1", "t3.t2", "t3", "t4", "t5"]); /// # Ok(()) diff --git a/carcara/src/ast/macros.rs b/carcara/src/ast/macros.rs index ce27a98f..42de35cf 100644 --- a/carcara/src/ast/macros.rs +++ b/carcara/src/ast/macros.rs @@ -32,7 +32,7 @@ /// # use carcara::{ast::*, match_term, parser::*}; /// # pub fn parse_term(input: &str) -> Rc { /// # let mut pool = PrimitivePool::new(); -/// # let mut parser = Parser::new(&mut pool, input.as_bytes(), true, false, false).unwrap(); +/// # let mut parser = Parser::new(&mut pool, Config::new(), input.as_bytes()).unwrap(); /// # parser.parse_term().unwrap() /// # } /// # let t = parse_term("(and (=> false false) (> (+ 0 0) 0))"); @@ -52,7 +52,7 @@ /// # use carcara::{ast::*, match_term, parser::*}; /// # pub fn parse_term(input: &str) -> Rc { /// # let mut pool = PrimitivePool::new(); -/// # let mut parser = Parser::new(&mut pool, input.as_bytes(), true, false, false).unwrap(); +/// # let mut parser = Parser::new(&mut pool, Config::new(), input.as_bytes()).unwrap(); /// # parser.parse_term().unwrap() /// # } /// # let t = parse_term("(forall ((x Int) (y Int)) (> x y))"); diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index e3cfd252..ebae8b74 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -357,8 +357,7 @@ mod tests { fn run_test(definitions: &str, original: &str, x: &str, t: &str, result: &str) { let mut pool = PrimitivePool::new(); - let mut parser = - Parser::new(&mut pool, definitions.as_bytes(), true, false, false).unwrap(); + let mut parser = Parser::new(&mut pool, Config::new(), definitions.as_bytes()).unwrap(); parser.parse_problem().unwrap(); let [original, x, t, result] = [original, x, t, result].map(|s| { diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index 0af99983..a13e3cf2 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -118,7 +118,7 @@ fn parse_and_check_solver_proof( problem: &[u8], proof: &[u8], ) -> CarcaraResult> { - let mut parser = parser::Parser::new(pool, problem, true, false, true)?; + let mut parser = parser::Parser::new(pool, parser::Config::new(), problem)?; let (prelude, premises) = parser.parse_problem()?; parser.reset(proof)?; let commands = parser.parse_proof()?; diff --git a/carcara/src/checker/rules/mod.rs b/carcara/src/checker/rules/mod.rs index 819ad8b9..6ba0962f 100644 --- a/carcara/src/checker/rules/mod.rs +++ b/carcara/src/checker/rules/mod.rs @@ -151,20 +151,15 @@ fn assert_is_bool_constant(got: &Rc, expected: bool) -> RuleResult { #[cfg(test)] fn run_tests(test_name: &str, definitions: &str, cases: &[(&str, bool)]) { - use crate::{ - checker::{Config, ProofChecker}, - parser::parse_instance, - }; + use crate::{checker, parser}; use std::io::Cursor; for (i, (proof, expected)) in cases.iter().enumerate() { // This parses the definitions again for every case, which is not ideal - let (prelude, mut proof, mut pool) = parse_instance( + let (prelude, mut proof, mut pool) = parser::parse_instance( Cursor::new(definitions), Cursor::new(proof), - true, - false, - false, + parser::Config::new(), ) .unwrap_or_else(|e| panic!("parser error during test \"{}\": {}", test_name, e)); @@ -191,7 +186,7 @@ fn run_tests(test_name: &str, definitions: &str, cases: &[(&str, bool)]) { discharge: Vec::new(), })); - let mut checker = ProofChecker::new(&mut pool, Config::new(), &prelude); + let mut checker = checker::ProofChecker::new(&mut pool, checker::Config::new(), &prelude); let got = checker.check(&proof).is_ok(); assert_eq!( *expected, got, diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index ae82f6f7..8134198a 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -152,13 +152,12 @@ pub fn check(problem: T, proof: T, options: CarcaraOptions) -> R // Parsing let total = Instant::now(); - let (prelude, proof, mut pool) = parser::parse_instance( - problem, - proof, - options.apply_function_defs, - options.expand_lets, - options.allow_int_real_subtyping, - )?; + let config = parser::Config { + apply_function_defs: options.apply_function_defs, + expand_lets: options.expand_lets, + allow_int_real_subtyping: options.allow_int_real_subtyping, + }; + let (prelude, proof, mut pool) = parser::parse_instance(problem, proof, config)?; run_measures.parsing = total.elapsed(); let config = checker::Config::new() @@ -218,13 +217,12 @@ pub fn check_parallel( // Parsing let total = Instant::now(); - let (prelude, proof, pool) = parser::parse_instance( - problem, - proof, - options.apply_function_defs, - options.expand_lets, - options.allow_int_real_subtyping, - )?; + let config = parser::Config { + apply_function_defs: options.apply_function_defs, + expand_lets: options.expand_lets, + allow_int_real_subtyping: options.allow_int_real_subtyping, + }; + let (prelude, proof, pool) = parser::parse_instance(problem, proof, config)?; run_measures.parsing = total.elapsed(); let config = checker::Config::new() @@ -289,13 +287,12 @@ pub fn check_and_elaborate( // Parsing let total = Instant::now(); - let (prelude, proof, mut pool) = parser::parse_instance( - problem, - proof, - options.apply_function_defs, - options.expand_lets, - options.allow_int_real_subtyping, - )?; + let config = parser::Config { + apply_function_defs: options.apply_function_defs, + expand_lets: options.expand_lets, + allow_int_real_subtyping: options.allow_int_real_subtyping, + }; + let (prelude, proof, mut pool) = parser::parse_instance(problem, proof, config)?; run_measures.parsing = total.elapsed(); let config = checker::Config::new() diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index a09feb79..240c224d 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -17,6 +17,19 @@ use error::assert_num_args; use rug::Integer; use std::{io::BufRead, str::FromStr}; +#[derive(Debug, Default, Clone, Copy)] +pub struct Config { + pub apply_function_defs: bool, + pub expand_lets: bool, + pub allow_int_real_subtyping: bool, +} + +impl Config { + pub fn new() -> Self { + Self::default() + } +} + /// Parses an SMT problem instance (in the SMT-LIB format) and its associated proof (in the Alethe /// format). /// @@ -25,18 +38,10 @@ use std::{io::BufRead, str::FromStr}; pub fn parse_instance( problem: T, proof: T, - apply_function_defs: bool, - expand_lets: bool, - allow_int_real_subtyping: bool, + config: Config, ) -> CarcaraResult<(ProblemPrelude, Proof, PrimitivePool)> { let mut pool = PrimitivePool::new(); - let mut parser = Parser::new( - &mut pool, - problem, - apply_function_defs, - expand_lets, - allow_int_real_subtyping, - )?; + let mut parser = Parser::new(&mut pool, config, problem)?; let (prelude, premises) = parser.parse_problem()?; parser.reset(proof)?; let commands = parser.parse_proof()?; @@ -81,28 +86,20 @@ struct ParserState { /// A parser for the Alethe proof format. pub struct Parser<'a, R> { pool: &'a mut PrimitivePool, + config: Config, lexer: Lexer, current_token: Token, current_position: Position, state: ParserState, interpret_integers_as_reals: bool, - apply_function_defs: bool, - expand_lets: bool, problem: Option<(ProblemPrelude, AHashSet>)>, - allow_int_real_subtyping: bool, } impl<'a, R: BufRead> Parser<'a, R> { /// Constructs a new `Parser` from a type that implements `BufRead`. /// /// This operation can fail if there is an IO or lexer error on the first token. - pub fn new( - pool: &'a mut PrimitivePool, - input: R, - apply_function_defs: bool, - expand_lets: bool, - allow_int_real_subtyping: bool, - ) -> CarcaraResult { + pub fn new(pool: &'a mut PrimitivePool, config: Config, input: R) -> CarcaraResult { let mut state = ParserState::default(); let bool_sort = pool.add(Term::Sort(Sort::Bool)); for iden in ["true", "false"] { @@ -113,15 +110,13 @@ impl<'a, R: BufRead> Parser<'a, R> { let (current_token, current_position) = lexer.next_token()?; Ok(Parser { pool, + config, lexer, current_token, current_position, state, interpret_integers_as_reals: false, - apply_function_defs, - expand_lets, problem: None, - allow_int_real_subtyping, }) } @@ -219,7 +214,7 @@ impl<'a, R: BufRead> Parser<'a, R> { // All the arguments must be either Int or Real. Also, if we are not allowing // Int/Real subtyping, all arguments must have the same sort - if self.allow_int_real_subtyping { + if self.config.allow_int_real_subtyping { for s in sorts { SortError::assert_one_of(&[Sort::Int, Sort::Real], s.as_sort().unwrap())?; } @@ -251,7 +246,7 @@ impl<'a, R: BufRead> Parser<'a, R> { // Normally, the `/` operator may only receive Real arguments, but if we are // allowing Int/Real subtyping, it may also receive Ints - if self.allow_int_real_subtyping { + if self.config.allow_int_real_subtyping { for s in sorts { SortError::assert_one_of(&[Sort::Int, Sort::Real], s.as_sort().unwrap())?; } @@ -528,7 +523,7 @@ impl<'a, R: BufRead> Parser<'a, R> { Token::ReservedWord(Reserved::DefineFun) => { let (name, func_def) = self.parse_define_fun()?; - if self.apply_function_defs { + if self.config.apply_function_defs { self.state.function_defs.insert(name, func_def); } else { // If `self.apply_function_defs` is false, we instead add the function name @@ -1061,7 +1056,7 @@ impl<'a, R: BufRead> Parser<'a, R> { self.expect_token(Token::CloseParen)?; self.state.symbol_table.pop_scope(); - if self.expand_lets { + if self.config.expand_lets { let substitution = bindings .into_iter() .map(|(name, value)| { diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index c2ba06bf..1cc4bac0 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -7,13 +7,19 @@ use crate::ast::pool::PrimitivePool; const ERROR_MESSAGE: &str = "parser error during test"; +const TEST_CONFIG: Config = Config { + // Some tests need function definitions to be applied + apply_function_defs: true, + expand_lets: false, + allow_int_real_subtyping: false, +}; + pub fn parse_terms( pool: &mut PrimitivePool, definitions: &str, terms: [&str; N], ) -> [Rc; N] { - let mut parser = - Parser::new(pool, definitions.as_bytes(), true, false, false).expect(ERROR_MESSAGE); + let mut parser = Parser::new(pool, TEST_CONFIG, definitions.as_bytes()).expect(ERROR_MESSAGE); parser.parse_problem().expect(ERROR_MESSAGE); terms.map(|s| { @@ -23,7 +29,7 @@ pub fn parse_terms( } pub fn parse_term(pool: &mut PrimitivePool, input: &str) -> Rc { - Parser::new(pool, input.as_bytes(), true, false, false) + Parser::new(pool, TEST_CONFIG, input.as_bytes()) .and_then(|mut parser| parser.parse_term()) .expect(ERROR_MESSAGE) } @@ -32,14 +38,14 @@ pub fn parse_term(pool: &mut PrimitivePool, input: &str) -> Rc { /// panics if no error is encountered. pub fn parse_term_err(input: &str) -> Error { let mut pool = PrimitivePool::new(); - Parser::new(&mut pool, input.as_bytes(), true, false, false) + Parser::new(&mut pool, TEST_CONFIG, input.as_bytes()) .and_then(|mut p| p.parse_term()) .expect_err("expected error") } /// Parses a proof from a `&str`. Panics if any error is encountered. pub fn parse_proof(pool: &mut PrimitivePool, input: &str) -> Proof { - let commands = Parser::new(pool, input.as_bytes(), true, false, false) + let commands = Parser::new(pool, TEST_CONFIG, input.as_bytes()) .expect(ERROR_MESSAGE) .parse_proof() .expect(ERROR_MESSAGE); @@ -65,7 +71,7 @@ fn test_hash_consing() { ) (* 2 2) )"; - let mut parser = Parser::new(&mut pool, input.as_bytes(), true, false, false).unwrap(); + let mut parser = Parser::new(&mut pool, Config::new(), input.as_bytes()).unwrap(); parser.parse_term().unwrap(); // We expect this input to result in 7 unique terms after parsing: diff --git a/carcara/tests/test_example_files.rs b/carcara/tests/test_example_files.rs index 5dfb2366..17385630 100644 --- a/carcara/tests/test_example_files.rs +++ b/carcara/tests/test_example_files.rs @@ -15,9 +15,7 @@ fn run_parallel_checker_test( let (prelude, proof, pool) = parser::parse_instance( io::BufReader::new(fs::File::open(problem_path)?), io::BufReader::new(fs::File::open(proof_path)?), - false, - false, - false, + parser::Config::new(), )?; let (scheduler, schedule_context_usage) = checker::Scheduler::new(num_threads, &proof); @@ -38,9 +36,7 @@ fn run_test(problem_path: &Path, proof_path: &Path) -> CarcaraResult<()> { let (prelude, proof, mut pool) = parser::parse_instance( io::BufReader::new(fs::File::open(problem_path)?), io::BufReader::new(fs::File::open(proof_path)?), - true, - false, - false, + parser::Config::new(), )?; // First, we check the proof normally diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index 7ddc3a50..72ba8518 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -1,8 +1,6 @@ use carcara::{ benchmarking::{CollectResults, CsvBenchmarkResults, RunMeasurement}, - checker, - parser::parse_instance, - CarcaraOptions, + checker, parser, CarcaraOptions, }; use crossbeam_queue::ArrayQueue; use std::{ @@ -39,12 +37,15 @@ fn run_job( let total = Instant::now(); let parsing = Instant::now(); - let (prelude, proof, mut pool) = parse_instance( + let config = parser::Config { + apply_function_defs: options.apply_function_defs, + expand_lets: options.expand_lets, + allow_int_real_subtyping: options.allow_int_real_subtyping, + }; + let (prelude, proof, mut pool) = parser::parse_instance( BufReader::new(File::open(job.problem_file)?), BufReader::new(File::open(job.proof_file)?), - options.apply_function_defs, - options.expand_lets, - options.allow_int_real_subtyping, + config, )?; let parsing = parsing.elapsed(); diff --git a/cli/src/main.rs b/cli/src/main.rs index d2a04727..0c3a3d0b 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -379,9 +379,11 @@ fn parse_command(options: ParseCommandOptions) -> CliResult<()> { let (_, proof, _) = parser::parse_instance( problem, proof, - options.parsing.apply_function_defs, - options.parsing.expand_let_bindings, - options.parsing.allow_int_real_subtyping, + parser::Config { + apply_function_defs: options.parsing.apply_function_defs, + expand_lets: options.parsing.expand_let_bindings, + allow_int_real_subtyping: options.parsing.allow_int_real_subtyping, + }, ) .map_err(carcara::Error::from)?; print_proof(&proof.commands, options.printing.use_sharing)?; @@ -473,14 +475,13 @@ fn bench_command(options: BenchCommandOptions) -> CliResult<()> { fn slice_command(options: SliceCommandOption) -> CliResult<()> { let (problem, proof) = get_instance(&options.input)?; - let (_, proof, _) = parser::parse_instance( - problem, - proof, - options.parsing.apply_function_defs, - options.parsing.expand_let_bindings, - options.parsing.allow_int_real_subtyping, - ) - .map_err(carcara::Error::from)?; + let config = parser::Config { + apply_function_defs: options.parsing.apply_function_defs, + expand_lets: options.parsing.expand_let_bindings, + allow_int_real_subtyping: options.parsing.allow_int_real_subtyping, + }; + let (_, proof, _) = + parser::parse_instance(problem, proof, config).map_err(carcara::Error::from)?; let source_index = proof .commands From 1e21b85d0571dbd67a916712988ed8d45dac01a1 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 14 Aug 2023 15:16:40 -0300 Subject: [PATCH 58/70] Improve memory usage of CSV benchmark Previously, the time measurement for each step of each proof held a copy of the step id, the file name, and the rule name. To avoid this, we now intern every string that the `CsvBenchmarkResults` encounters, meaning we only allocate each string once. This improves memory usage drastically. --- carcara/src/benchmarking/mod.rs | 58 +++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/carcara/src/benchmarking/mod.rs b/carcara/src/benchmarking/mod.rs index 36453b57..b192ae63 100644 --- a/carcara/src/benchmarking/mod.rs +++ b/carcara/src/benchmarking/mod.rs @@ -4,11 +4,12 @@ mod tests; pub use metrics::*; -use ahash::AHashMap; -use std::{fmt, io, time::Duration}; +use ahash::{AHashMap, AHashSet}; +use std::{fmt, hash::Hash, io, sync::Arc, time::Duration}; -fn combine_map(mut a: AHashMap, b: AHashMap) -> AHashMap +fn combine_map(mut a: AHashMap, b: AHashMap) -> AHashMap where + S: Eq + Hash, V: MetricsUnit, M: Metrics + Default, { @@ -256,10 +257,26 @@ impl OnlineBenchmarkResults { } } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct InternedStepId { + pub(crate) file: Arc, + pub(crate) step_id: Arc, + pub(crate) rule: Arc, +} + +impl fmt::Display for InternedStepId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}:{} ({})", self.file, self.step_id, self.rule) + } +} + +type InternedRunId = (Arc, usize); + #[derive(Default)] pub struct CsvBenchmarkResults { - runs: AHashMap, - step_time_by_rule: AHashMap>, + strings: AHashSet>, + runs: AHashMap, + step_time_by_rule: AHashMap, OfflineMetrics>, is_holey: bool, num_errors: usize, } @@ -277,6 +294,17 @@ impl CsvBenchmarkResults { self.num_errors } + fn intern(&mut self, s: &str) -> Arc { + match self.strings.get(s) { + Some(interned) => interned.clone(), + None => { + let result: Arc = Arc::from(s); + self.strings.insert(result.clone()); + result + } + } + } + pub fn write_csv( self, runs_dest: &mut dyn io::Write, @@ -287,7 +315,7 @@ impl CsvBenchmarkResults { } fn write_runs_csv( - data: AHashMap, + data: AHashMap, dest: &mut dyn io::Write, ) -> io::Result<()> { writeln!( @@ -321,7 +349,7 @@ impl CsvBenchmarkResults { } fn write_by_rule_csv( - data: AHashMap>, + data: AHashMap, OfflineMetrics>, dest: &mut dyn io::Write, ) -> io::Result<()> { let mut data: Vec<_> = data.into_iter().collect(); @@ -461,13 +489,14 @@ impl CollectResults for OnlineBenchmarkResults { impl CollectResults for CsvBenchmarkResults { fn add_step_measurement(&mut self, file: &str, step_id: &str, rule: &str, time: Duration) { - let id = StepId { - file: file.into(), - step_id: step_id.into(), - rule: rule.into(), + let rule = self.intern(rule); + let id = InternedStepId { + file: self.intern(file), + step_id: self.intern(step_id), + rule: rule.clone(), }; self.step_time_by_rule - .entry(rule.to_owned()) + .entry(rule) .or_default() .add_sample(&id, time); } @@ -478,8 +507,9 @@ impl CollectResults for CsvBenchmarkResults { fn add_polyeq_depth(&mut self, _: usize) {} - fn add_run_measurement(&mut self, id: &RunId, measurement: RunMeasurement) { - self.runs.insert(id.clone(), measurement); + fn add_run_measurement(&mut self, (file, i): &RunId, measurement: RunMeasurement) { + let id = (self.intern(file), *i); + self.runs.insert(id, measurement); } fn register_holey(&mut self) { From 9f78dcedd07faf21b73bee7cdefd707e22848663 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Wed, 23 Aug 2023 20:49:44 -0300 Subject: [PATCH 59/70] Use `indexmap` instead of `ahash` for determinism --- Cargo.lock | 118 +++++++++--------- carcara/Cargo.toml | 2 +- carcara/src/ast/context.rs | 2 +- carcara/src/ast/mod.rs | 4 +- carcara/src/ast/pool/advanced.rs | 6 +- carcara/src/ast/pool/mod.rs | 24 ++-- carcara/src/ast/pool/storage.rs | 2 +- carcara/src/ast/printer.rs | 8 +- carcara/src/ast/substitution.rs | 22 ++-- carcara/src/ast/tests.rs | 6 +- carcara/src/benchmarking/mod.rs | 23 ++-- carcara/src/checker/lia_generic.rs | 4 +- carcara/src/checker/mod.rs | 4 +- carcara/src/checker/parallel/mod.rs | 4 +- carcara/src/checker/rules/clausification.rs | 6 +- carcara/src/checker/rules/extras.rs | 6 +- .../src/checker/rules/linear_arithmetic.rs | 8 +- carcara/src/checker/rules/quantifier.rs | 18 +-- carcara/src/checker/rules/resolution.rs | 22 ++-- carcara/src/checker/rules/simplification.rs | 12 +- carcara/src/checker/rules/subproof.rs | 26 ++-- carcara/src/parser/mod.rs | 14 +-- carcara/src/parser/tests.rs | 6 +- carcara/src/utils.rs | 14 +-- cli/Cargo.toml | 1 - 25 files changed, 182 insertions(+), 180 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 938f6a3a..1e409fc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,18 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "ahash" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" -dependencies = [ - "cfg-if", - "getrandom", - "once_cell", - "version_check", -] - [[package]] name = "ansi_term" version = "0.12.1" @@ -56,7 +44,7 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" name = "carcara" version = "1.0.0" dependencies = [ - "ahash", + "indexmap 2.0.0", "log", "rand", "rug", @@ -68,7 +56,6 @@ dependencies = [ name = "carcara-cli" version = "1.0.0" dependencies = [ - "ahash", "ansi_term", "atty", "carcara", @@ -87,15 +74,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "3.2.23" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", "bitflags", "clap_derive", "clap_lex", - "indexmap", + "indexmap 1.9.3", "once_cell", "strsim", "termcolor", @@ -104,9 +91,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.18" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck", "proc-macro-error", @@ -126,18 +113,18 @@ dependencies = [ [[package]] name = "const_format" -version = "0.2.30" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7309d9b4d3d2c0641e018d449232f2e28f1b22933c137f157d3dbc14228b8c0e" +checksum = "c990efc7a285731f9a4378d81aff2f0e85a2c8781a05ef0f8baa8dac54d0ff48" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.29" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f47bf7270cf70d370f8f98c1abb6d2d4cf60a6845d30e05bfb90c6568650" +checksum = "e026b6ce194a874cb9cf32cd5772d1ef9767cc8fcb5765948d74f37a9d8b2bf6" dependencies = [ "proc-macro2", "quote", @@ -156,18 +143,24 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -198,9 +191,9 @@ dependencies = [ [[package]] name = "gmp-mpfr-sys" -version = "1.5.2" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b560063e2ffa8ce9c2ef9bf487f2944a97deca5b8de0b5bcd0ae6437ef8b75f" +checksum = "19c5c67d8c29fe87e3266e691dd60948e6e4df4496c53355ef3551142945721b" dependencies = [ "libc", "windows-sys", @@ -212,6 +205,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" + [[package]] name = "heck" version = "0.4.1" @@ -234,35 +233,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", ] [[package]] name = "libc" -version = "0.2.142" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "log" -version = "0.4.17" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "os_str_bytes" -version = "6.5.0" +version = "6.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" +checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" [[package]] name = "ppv-lite86" @@ -302,18 +308,18 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.26" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -350,9 +356,9 @@ dependencies = [ [[package]] name = "rug" -version = "1.19.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555e8b44763d034526db899c88cd56ccc4486cd38b444c8aa0e79d4e70ae5a34" +checksum = "240ad7cbc5fc7cea4592203f8f6100835e8ad083196491b8a9c84ce84711ff68" dependencies = [ "az", "gmp-mpfr-sys", @@ -387,9 +393,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2", "quote", @@ -423,29 +429,29 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.29", ] [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-xid" diff --git a/carcara/Cargo.toml b/carcara/Cargo.toml index 2870c99b..8a225f96 100644 --- a/carcara/Cargo.toml +++ b/carcara/Cargo.toml @@ -7,7 +7,7 @@ rust-version = "1.67" license = "Apache-2.0" [dependencies] -ahash = "0.8.3" +indexmap = "2.0.0" log = "0.4.17" rug = { version = "1.19.2", features = ["integer", "rational"] } thiserror = "1.0.40" diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index 68b1e5cc..323c661c 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -3,7 +3,7 @@ use std::sync::{atomic::AtomicUsize, Arc, RwLock, RwLockReadGuard, RwLockWriteGu pub struct Context { pub mappings: Vec<(Rc, Rc)>, - pub bindings: AHashSet, + pub bindings: IndexSet, pub cumulative_substitution: Option, } diff --git a/carcara/src/ast/mod.rs b/carcara/src/ast/mod.rs index 5b079483..51a513f7 100644 --- a/carcara/src/ast/mod.rs +++ b/carcara/src/ast/mod.rs @@ -25,7 +25,7 @@ pub use substitution::{Substitution, SubstitutionError}; pub(crate) use polyeq::{Polyeq, PolyeqComparator}; use crate::checker::error::CheckerError; -use ahash::AHashSet; +use indexmap::IndexSet; use rug::Integer; use rug::Rational; use std::{hash::Hash, ops::Deref}; @@ -51,7 +51,7 @@ pub struct Proof { /// The proof's premises. /// /// Those are the terms introduced in the original problem's `assert` commands. - pub premises: AHashSet>, + pub premises: IndexSet>, /// The proof commands. pub commands: Vec, diff --git a/carcara/src/ast/pool/advanced.rs b/carcara/src/ast/pool/advanced.rs index ae47f86f..abad5c1e 100644 --- a/carcara/src/ast/pool/advanced.rs +++ b/carcara/src/ast/pool/advanced.rs @@ -1,6 +1,6 @@ use super::super::{Rc, Term}; use super::{PrimitivePool, TermPool}; -use ahash::AHashSet; +use indexmap::IndexSet; use std::sync::{Arc, RwLock}; pub struct ContextPool { @@ -67,7 +67,7 @@ impl TermPool for ContextPool { } } - fn free_vars(&mut self, term: &Rc) -> AHashSet> { + fn free_vars(&mut self, term: &Rc) -> IndexSet> { self.inner .write() .unwrap() @@ -140,7 +140,7 @@ impl TermPool for LocalPool { } } - fn free_vars(&mut self, term: &Rc) -> AHashSet> { + fn free_vars(&mut self, term: &Rc) -> IndexSet> { self.inner.free_vars_with_priorities( term, [ diff --git a/carcara/src/ast/pool/mod.rs b/carcara/src/ast/pool/mod.rs index af2c294d..a70ede42 100644 --- a/carcara/src/ast/pool/mod.rs +++ b/carcara/src/ast/pool/mod.rs @@ -5,7 +5,7 @@ mod storage; use super::{Rc, Sort, Term}; use crate::ast::Constant; -use ahash::{AHashMap, AHashSet}; +use indexmap::{IndexMap, IndexSet}; use storage::Storage; pub trait TermPool { @@ -35,11 +35,11 @@ pub trait TermPool { /// This method assumes that the sorts of any subterms have already been checked, and are /// correct. If `term` is itself a sort, this simply returns that sort. fn sort(&self, term: &Rc) -> Rc; - /// Returns an `AHashSet` containing all the free variables in the given term. + /// Returns an `IndexSet` containing all the free variables in the given term. /// /// This method uses a cache, so there is no additional cost to computing the free variables of /// a term multiple times. - fn free_vars(&mut self, term: &Rc) -> AHashSet>; + fn free_vars(&mut self, term: &Rc) -> IndexSet>; } /// A structure to store and manage all allocated terms. @@ -53,8 +53,8 @@ pub trait TermPool { /// [`PrimitivePool::sort`]) or its free variables (see [`PrimitivePool::free_vars`]). pub struct PrimitivePool { pub(crate) storage: Storage, - pub(crate) free_vars_cache: AHashMap, AHashSet>>, - pub(crate) sorts_cache: AHashMap, Rc>, + pub(crate) free_vars_cache: IndexMap, IndexSet>>, + pub(crate) sorts_cache: IndexMap, Rc>, pub(crate) bool_true: Rc, pub(crate) bool_false: Rc, } @@ -70,7 +70,7 @@ impl PrimitivePool { /// and `false`, as well as the `Bool` sort. pub fn new() -> Self { let mut storage = Storage::new(); - let mut sorts_cache = AHashMap::new(); + let mut sorts_cache = IndexMap::new(); let bool_sort = storage.add(Term::Sort(Sort::Bool)); let [bool_true, bool_false] = @@ -82,7 +82,7 @@ impl PrimitivePool { Self { storage, - free_vars_cache: AHashMap::new(), + free_vars_cache: IndexMap::new(), sorts_cache, bool_true, bool_false, @@ -190,7 +190,7 @@ impl PrimitivePool { &mut self, term: &Rc, prior_pools: [&PrimitivePool; N], - ) -> AHashSet> { + ) -> IndexSet> { for p in prior_pools { if let Some(set) = p.free_vars_cache.get(term) { return set.clone(); @@ -210,7 +210,7 @@ impl PrimitivePool { set } Term::Op(_, args) => { - let mut set = AHashSet::new(); + let mut set = IndexSet::new(); for a in args { set.extend(self.free_vars_with_priorities(a, prior_pools).into_iter()); } @@ -240,11 +240,11 @@ impl PrimitivePool { vars } Term::Var(..) => { - let mut set = AHashSet::with_capacity(1); + let mut set = IndexSet::with_capacity(1); set.insert(term.clone()); set } - Term::Const(_) | Term::Sort(_) => AHashSet::new(), + Term::Const(_) | Term::Sort(_) => IndexSet::new(), }; self.free_vars_cache.insert(term.clone(), set); self.free_vars_cache.get(term).unwrap().clone() @@ -270,7 +270,7 @@ impl TermPool for PrimitivePool { self.sorts_cache[term].clone() } - fn free_vars(&mut self, term: &Rc) -> AHashSet> { + fn free_vars(&mut self, term: &Rc) -> IndexSet> { self.free_vars_with_priorities(term, []) } } diff --git a/carcara/src/ast/pool/storage.rs b/carcara/src/ast/pool/storage.rs index fb611e51..1ebceaf0 100644 --- a/carcara/src/ast/pool/storage.rs +++ b/carcara/src/ast/pool/storage.rs @@ -34,7 +34,7 @@ impl Borrow for ByValue { } #[derive(Debug, Clone, Default)] -pub struct Storage(AHashSet); +pub struct Storage(IndexSet); impl Storage { pub fn new() -> Self { diff --git a/carcara/src/ast/printer.rs b/carcara/src/ast/printer.rs index b1d38916..216ff1a5 100644 --- a/carcara/src/ast/printer.rs +++ b/carcara/src/ast/printer.rs @@ -5,7 +5,7 @@ use crate::{ parser::Token, utils::{is_symbol_character, DedupIterator}, }; -use ahash::AHashMap; +use indexmap::IndexMap; use std::{borrow::Cow, fmt, io}; /// Prints a proof to the standard output. @@ -17,7 +17,7 @@ pub fn print_proof(commands: &[ProofCommand], use_sharing: bool) -> io::Result<( let mut stdout = io::stdout(); let mut printer = AlethePrinter { inner: &mut stdout, - term_indices: use_sharing.then(AHashMap::new), + term_indices: use_sharing.then(IndexMap::new), term_sharing_variable_prefix: "@p_", }; printer.write_proof(commands) @@ -32,7 +32,7 @@ pub fn write_lia_smt_instance( ) -> io::Result<()> { let mut printer = AlethePrinter { inner: dest, - term_indices: use_sharing.then(AHashMap::new), + term_indices: use_sharing.then(IndexMap::new), term_sharing_variable_prefix: "p_", }; printer.write_lia_smt_instance(clause) @@ -107,7 +107,7 @@ impl PrintWithSharing for Operator { struct AlethePrinter<'a> { inner: &'a mut dyn io::Write, - term_indices: Option, usize>>, + term_indices: Option, usize>>, term_sharing_variable_prefix: &'static str, } diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index ebae8b74..9d091e34 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -1,7 +1,7 @@ //! Algorithms for creating and applying capture-avoiding substitutions over terms. use super::{BindingList, Rc, SortedVar, Term, TermPool}; -use ahash::{AHashMap, AHashSet}; +use indexmap::{IndexMap, IndexSet}; use thiserror::Error; /// The error type for errors when constructing or applying substitutions. @@ -36,21 +36,21 @@ type SubstitutionResult = Result; /// actually be `(forall ((y' Int)) (= y y'))`. pub struct Substitution { /// The substitution's mappings. - pub(crate) map: AHashMap, Rc>, + pub(crate) map: IndexMap, Rc>, /// The variables that should be renamed to preserve capture-avoidance, if they are bound by a /// binder term. - should_be_renamed: Option>>, - cache: AHashMap, Rc>, + should_be_renamed: Option>>, + cache: IndexMap, Rc>, } impl Substitution { /// Constructs an empty substitution. pub fn empty() -> Self { Self { - map: AHashMap::new(), + map: IndexMap::new(), should_be_renamed: None, - cache: AHashMap::new(), + cache: IndexMap::new(), } } @@ -67,7 +67,7 @@ impl Substitution { /// mapped to a term of a different sort. pub fn new( pool: &mut dyn TermPool, - map: AHashMap, Rc>, + map: IndexMap, Rc>, ) -> SubstitutionResult { for (k, v) in map.iter() { if !k.is_var() { @@ -81,7 +81,7 @@ impl Substitution { Ok(Self { map, should_be_renamed: None, - cache: AHashMap::new(), + cache: IndexMap::new(), }) } @@ -146,7 +146,7 @@ impl Substitution { // // See https://en.wikipedia.org/wiki/Lambda_calculus#Capture-avoiding_substitutions for // more details. - let mut should_be_renamed = AHashSet::new(); + let mut should_be_renamed = IndexSet::new(); for (x, t) in self.map.iter() { if x == t { continue; // We ignore reflexive substitutions @@ -297,7 +297,7 @@ impl Substitution { is_value_list: bool, ) -> (BindingList, Self) { let mut new_substitution = Self::empty(); - let mut new_vars = AHashSet::new(); + let mut new_vars = IndexSet::new(); let new_binding_list = binding_list .iter() .map(|(var, value)| { @@ -365,7 +365,7 @@ mod tests { parser.parse_term().unwrap() }); - let mut map = AHashMap::new(); + let mut map = IndexMap::new(); map.insert(x, t); let got = Substitution::new(&mut pool, map) diff --git a/carcara/src/ast/tests.rs b/carcara/src/ast/tests.rs index f1e1c63a..4b034429 100644 --- a/carcara/src/ast/tests.rs +++ b/carcara/src/ast/tests.rs @@ -2,7 +2,7 @@ use crate::{ ast::{pool::PrimitivePool, TermPool}, parser::tests::parse_terms, }; -use ahash::AHashSet; +use indexmap::IndexSet; #[test] fn test_free_vars() { @@ -10,9 +10,9 @@ fn test_free_vars() { for &(term, expected) in cases { let mut pool = PrimitivePool::new(); let [root] = parse_terms(&mut pool, definitions, [term]); - let expected: AHashSet<_> = expected.iter().copied().collect(); + let expected: IndexSet<_> = expected.iter().copied().collect(); let set = pool.free_vars(&root); - let got: AHashSet<_> = set.iter().map(|t| t.as_var().unwrap()).collect(); + let got: IndexSet<_> = set.iter().map(|t| t.as_var().unwrap()).collect(); assert_eq!(expected, got); } diff --git a/carcara/src/benchmarking/mod.rs b/carcara/src/benchmarking/mod.rs index b192ae63..47577a92 100644 --- a/carcara/src/benchmarking/mod.rs +++ b/carcara/src/benchmarking/mod.rs @@ -4,16 +4,15 @@ mod tests; pub use metrics::*; -use ahash::{AHashMap, AHashSet}; +use indexmap::{map::Entry, IndexMap, IndexSet}; use std::{fmt, hash::Hash, io, sync::Arc, time::Duration}; -fn combine_map(mut a: AHashMap, b: AHashMap) -> AHashMap +fn combine_map(mut a: IndexMap, b: IndexMap) -> IndexMap where S: Eq + Hash, V: MetricsUnit, M: Metrics + Default, { - use std::collections::hash_map::Entry; for (k, v) in b { match a.entry(k) { Entry::Occupied(mut e) => { @@ -66,8 +65,8 @@ pub struct OnlineBenchmarkResults { pub total_accounted_for: OnlineMetrics, pub total: OnlineMetrics, pub step_time: OnlineMetrics, - pub step_time_by_file: AHashMap>, - pub step_time_by_rule: AHashMap>, + pub step_time_by_file: IndexMap>, + pub step_time_by_rule: IndexMap>, pub polyeq_time: OnlineMetrics, pub polyeq_time_ratio: OnlineMetrics, @@ -129,12 +128,12 @@ impl OnlineBenchmarkResults { } /// For each file, the time spent checking each step in the file. - pub fn step_time_by_file(&self) -> &AHashMap> { + pub fn step_time_by_file(&self) -> &IndexMap> { &self.step_time_by_file } /// For each rule, the time spent checking each step that uses that rule. - pub fn step_time_by_rule(&self) -> &AHashMap> { + pub fn step_time_by_rule(&self) -> &IndexMap> { &self.step_time_by_rule } @@ -274,9 +273,9 @@ type InternedRunId = (Arc, usize); #[derive(Default)] pub struct CsvBenchmarkResults { - strings: AHashSet>, - runs: AHashMap, - step_time_by_rule: AHashMap, OfflineMetrics>, + strings: IndexSet>, + runs: IndexMap, + step_time_by_rule: IndexMap, OfflineMetrics>, is_holey: bool, num_errors: usize, } @@ -315,7 +314,7 @@ impl CsvBenchmarkResults { } fn write_runs_csv( - data: AHashMap, + data: IndexMap, dest: &mut dyn io::Write, ) -> io::Result<()> { writeln!( @@ -349,7 +348,7 @@ impl CsvBenchmarkResults { } fn write_by_rule_csv( - data: AHashMap, OfflineMetrics>, + data: IndexMap, OfflineMetrics>, dest: &mut dyn io::Write, ) -> io::Result<()> { let mut data: Vec<_> = data.into_iter().collect(); diff --git a/carcara/src/checker/lia_generic.rs b/carcara/src/checker/lia_generic.rs index a13e3cf2..21233505 100644 --- a/carcara/src/checker/lia_generic.rs +++ b/carcara/src/checker/lia_generic.rs @@ -1,6 +1,6 @@ use super::*; use crate::{checker::error::LiaGenericError, parser, LiaGenericOptions}; -use ahash::AHashMap; +use indexmap::IndexMap; use std::{ io::{BufRead, Write}, process::{Command, Stdio}, @@ -157,7 +157,7 @@ fn insert_missing_assumes( proof: &[ProofCommand], root_id: &str, ) -> (Vec>, usize) { - let mut count_map: AHashMap<&Rc, usize> = AHashMap::new(); + let mut count_map: IndexMap<&Rc, usize> = IndexMap::new(); for c in conclusion { *count_map.entry(c).or_default() += 1; } diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 745d1b61..73b4e5a9 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -9,8 +9,8 @@ use crate::{ elaborator::Elaborator, CarcaraResult, Error, LiaGenericOptions, }; -use ahash::AHashSet; use error::CheckerError; +use indexmap::IndexSet; pub use parallel::{scheduler::Scheduler, ParallelProofChecker}; use rules::{ElaborationRule, Premise, Rule, RuleArgs, RuleResult}; use std::{ @@ -248,7 +248,7 @@ impl<'c> ProofChecker<'c> { &mut self, id: &str, term: &Rc, - premises: &AHashSet>, + premises: &IndexSet>, iter: &'i ProofIter<'i>, mut stats: &mut Option<&mut CheckerStatistics>, ) -> bool { diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index d9fc8d77..13f1b9d4 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -9,7 +9,7 @@ use crate::{ ast::{pool::advanced::*, *}, CarcaraResult, Error, }; -use ahash::AHashSet; +use indexmap::IndexSet; pub use scheduler::{Schedule, ScheduleIter, Scheduler}; use std::{ ops::ControlFlow, @@ -347,7 +347,7 @@ impl<'c> ParallelProofChecker<'c> { &mut self, id: &str, term: &Rc, - premises: &AHashSet>, + premises: &IndexSet>, iter: &ScheduleIter, mut stats: &mut Option<&mut CheckerStatistics>, ) -> bool { diff --git a/carcara/src/checker/rules/clausification.rs b/carcara/src/checker/rules/clausification.rs index 4a0eb499..2dbed31b 100644 --- a/carcara/src/checker/rules/clausification.rs +++ b/carcara/src/checker/rules/clausification.rs @@ -3,7 +3,7 @@ use super::{ assert_polyeq_expected, get_premise_term, CheckerError, EqualityError, RuleArgs, RuleResult, }; use crate::ast::*; -use ahash::AHashMap; +use indexmap::IndexMap; pub fn distinct_elim(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { assert_clause_len(conclusion, 1)?; @@ -320,7 +320,7 @@ fn bfun_elim_second_step( fn apply_bfun_elim( pool: &mut dyn TermPool, term: &Rc, - cache: &mut AHashMap, Rc>, + cache: &mut IndexMap, Rc>, ) -> Result, SubstitutionError> { if let Some(v) = cache.get(term) { return Ok(v.clone()); @@ -400,7 +400,7 @@ pub fn bfun_elim( let psi = get_premise_term(&premises[0])?; - let expected = apply_bfun_elim(pool, psi, &mut AHashMap::new())?; + let expected = apply_bfun_elim(pool, psi, &mut IndexMap::new())?; assert_polyeq_expected(&conclusion[0], expected, polyeq_time) } diff --git a/carcara/src/checker/rules/extras.rs b/carcara/src/checker/rules/extras.rs index e1f8f275..cbbc3132 100644 --- a/carcara/src/checker/rules/extras.rs +++ b/carcara/src/checker/rules/extras.rs @@ -5,7 +5,7 @@ use super::{ EqualityError, RuleArgs, RuleResult, }; use crate::{ast::*, checker::rules::assert_operation_len}; -use ahash::AHashSet; +use indexmap::IndexSet; pub fn reordering(RuleArgs { conclusion, premises, .. }: RuleArgs) -> RuleResult { assert_num_premises(premises, 1)?; @@ -13,8 +13,8 @@ pub fn reordering(RuleArgs { conclusion, premises, .. }: RuleArgs) -> RuleResult let premise = premises[0].clause; assert_clause_len(conclusion, premise.len())?; - let premise_set: AHashSet<_> = premise.iter().collect(); - let conclusion_set: AHashSet<_> = conclusion.iter().collect(); + let premise_set: IndexSet<_> = premise.iter().collect(); + let conclusion_set: IndexSet<_> = conclusion.iter().collect(); if let Some(&t) = premise_set.difference(&conclusion_set).next() { Err(CheckerError::ReorderingMissingTerm(t.clone())) } else if let Some(&t) = conclusion_set.difference(&premise_set).next() { diff --git a/carcara/src/checker/rules/linear_arithmetic.rs b/carcara/src/checker/rules/linear_arithmetic.rs index d81f2d06..7547b142 100644 --- a/carcara/src/checker/rules/linear_arithmetic.rs +++ b/carcara/src/checker/rules/linear_arithmetic.rs @@ -3,7 +3,7 @@ use crate::{ ast::*, checker::error::{CheckerError, LinearArithmeticError}, }; -use ahash::AHashMap; +use indexmap::{map::Entry, IndexMap}; use rug::{ops::NegAssign, Integer, Rational}; pub fn la_rw_eq(RuleArgs { conclusion, .. }: RuleArgs) -> RuleResult { @@ -62,11 +62,11 @@ fn negate_disequality(term: &Rc) -> Result<(Operator, LinearComb, LinearCo /// plus a constant term. This is also used to represent a disequality, in which case the left side /// is the non-constant terms and their coefficients, and the right side is the constant term. #[derive(Debug)] -pub struct LinearComb(pub(crate) AHashMap, Rational>, pub(crate) Rational); +pub struct LinearComb(pub(crate) IndexMap, Rational>, pub(crate) Rational); impl LinearComb { fn new() -> Self { - Self(AHashMap::new(), Rational::new()) + Self(IndexMap::new(), Rational::new()) } /// Flattens a term and adds it to the linear combination, multiplying by the coefficient @@ -125,8 +125,6 @@ impl LinearComb { } fn insert(&mut self, key: Rc, value: Rational) { - use std::collections::hash_map::Entry; - match self.0.entry(key) { Entry::Occupied(mut e) => { *e.get_mut() += value; diff --git a/carcara/src/checker/rules/quantifier.rs b/carcara/src/checker/rules/quantifier.rs index c00339b2..2cf2f780 100644 --- a/carcara/src/checker/rules/quantifier.rs +++ b/carcara/src/checker/rules/quantifier.rs @@ -3,7 +3,7 @@ use super::{ CheckerError, RuleArgs, RuleResult, }; use crate::{ast::*, checker::error::QuantifierError, utils::DedupIterator}; -use ahash::{AHashMap, AHashSet}; +use indexmap::{IndexMap, IndexSet}; pub fn forall_inst( RuleArgs { @@ -19,8 +19,8 @@ pub fn forall_inst( // Since the bindings and arguments may not be in the same order, we collect the bindings into // a hash set, and remove each binding from it as we find the associated argument - let mut bindings: AHashSet<_> = bindings.iter().cloned().collect(); - let substitution: AHashMap<_, _> = args + let mut bindings: IndexSet<_> = bindings.iter().cloned().collect(); + let substitution: IndexMap<_, _> = args .iter() .map(|arg| { let (arg_name, arg_value) = arg.as_assign()?; @@ -110,7 +110,7 @@ fn negation_normal_form( pool: &mut dyn TermPool, term: &Rc, polarity: bool, - cache: &mut AHashMap<(Rc, bool), Rc>, + cache: &mut IndexMap<(Rc, bool), Rc>, ) -> Rc { if let Some(v) = cache.get(&(term.clone(), polarity)) { return v.clone(); @@ -270,10 +270,10 @@ pub fn qnt_cnf(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { (l_b, phi, r_b, phi_prime) }; - let r_bindings = r_bindings.iter().cloned().collect::>(); - let mut new_bindings = l_bindings.iter().cloned().collect::>(); + let r_bindings = r_bindings.iter().cloned().collect::>(); + let mut new_bindings = l_bindings.iter().cloned().collect::>(); let clauses: Vec<_> = { - let nnf = negation_normal_form(pool, phi, true, &mut AHashMap::new()); + let nnf = negation_normal_form(pool, phi, true, &mut IndexMap::new()); let prenexed = prenex_forall(pool, &mut new_bindings, &nnf); let cnf = conjunctive_normal_form(&prenexed); cnf.into_iter() @@ -287,7 +287,7 @@ pub fn qnt_cnf(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { // `new_bindings` contains all bindings that existed in the original term, plus all bindings // added by the prenexing step. All bindings in the right side must be in this set - if let Some((var, _)) = r_bindings.iter().find(|b| !new_bindings.contains(b)) { + if let Some((var, _)) = r_bindings.iter().find(|&b| !new_bindings.contains(b)) { return Err(CheckerError::Quant( QuantifierError::CnfNewBindingIntroduced(var.clone()), )); @@ -465,7 +465,7 @@ mod tests { use crate::parser::tests::*; fn to_cnf_term(pool: &mut dyn TermPool, term: &Rc) -> Rc { - let nnf = negation_normal_form(pool, term, true, &mut AHashMap::new()); + let nnf = negation_normal_form(pool, term, true, &mut IndexMap::new()); let mut bindings = Vec::new(); let prenexed = prenex_forall(pool, &mut bindings, &nnf); let cnf = conjunctive_normal_form(&prenexed); diff --git a/carcara/src/checker/rules/resolution.rs b/carcara/src/checker/rules/resolution.rs index f8ac3825..e343ec42 100644 --- a/carcara/src/checker/rules/resolution.rs +++ b/carcara/src/checker/rules/resolution.rs @@ -7,8 +7,8 @@ use crate::{ checker::{error::ResolutionError, Elaborator}, utils::DedupIterator, }; -use ahash::{AHashMap, AHashSet}; -use std::{collections::hash_map::Entry, iter::FromIterator}; +use indexmap::{map::Entry, IndexMap, IndexSet}; +use std::iter::FromIterator; type ResolutionTerm<'a> = (u32, &'a Rc); @@ -34,7 +34,7 @@ impl<'a> ClauseCollection<'a> for Vec> { } } -impl<'a> ClauseCollection<'a> for AHashSet> { +impl<'a> ClauseCollection<'a> for IndexSet> { fn insert_term(&mut self, item: ResolutionTerm<'a>) { self.insert(item); } @@ -111,21 +111,21 @@ fn greedy_resolution( // Without looking at the conclusion, it is unclear if the (not p) term should be removed by the // p term, or if the (not (not p)) should be removed by the (not (not (not p))). We can only // determine this by looking at the conclusion and using it to derive the pivots. - let conclusion: AHashSet<_> = conclusion + let conclusion: IndexSet<_> = conclusion .iter() .map(Rc::remove_all_negations) .map(|(n, t)| (n as i32, t)) .collect(); // The working clause contains the terms from the conclusion clause that we already encountered - let mut working_clause = AHashSet::new(); + let mut working_clause = IndexSet::new(); // The pivots are the encountered terms that are not present in the conclusion clause, and so // should be removed. After being used to eliminate a term, a pivot can still be used to // eliminate other terms. Because of that, we represent the pivots as a hash map to a boolean, // which represents if the pivot was already eliminated or not. At the end, this boolean should // be true for all pivots - let mut pivots = AHashMap::new(); + let mut pivots = IndexMap::new(); for premise in premises { // Only one pivot may be eliminated per clause. This restriction is required so logically @@ -247,7 +247,7 @@ fn greedy_resolution( } fn rup_resolution(conclusion: &[Rc], premises: &[Premise]) -> bool { - let mut clauses: Vec)>> = premises + let mut clauses: Vec)>> = premises .iter() .map(|p| { p.clause @@ -258,7 +258,7 @@ fn rup_resolution(conclusion: &[Rc], premises: &[Premise]) -> bool { .collect(); clauses.extend(conclusion.iter().map(|t| { let (p, t) = t.remove_all_negations_with_polarity(); - let mut clause = AHashSet::new(); + let mut clause = IndexSet::new(); clause.insert((!p, t)); clause })); @@ -292,9 +292,9 @@ pub fn resolution_with_args( conclusion, premises, args, pool, .. }: RuleArgs, ) -> RuleResult { - let resolution_result = apply_generic_resolution::>(premises, args, pool)?; + let resolution_result = apply_generic_resolution::>(premises, args, pool)?; - let conclusion: AHashSet<_> = conclusion.iter().map(Rc::remove_all_negations).collect(); + let conclusion: IndexSet<_> = conclusion.iter().map(Rc::remove_all_negations).collect(); if let Some(extra) = conclusion.difference(&resolution_result).next() { let extra = unremove_all_negations(pool, *extra); @@ -549,7 +549,7 @@ pub fn tautology(RuleArgs { conclusion, premises, .. }: RuleArgs) -> RuleResult assert_is_bool_constant(&conclusion[0], true)?; let premise = premises[0].clause; - let mut seen = AHashSet::with_capacity(premise.len()); + let mut seen = IndexSet::with_capacity(premise.len()); let with_negations_removed = premise.iter().map(Rc::remove_all_negations_with_polarity); for (polarity, term) in with_negations_removed { if seen.contains(&(!polarity, term)) { diff --git a/carcara/src/checker/rules/simplification.rs b/carcara/src/checker/rules/simplification.rs index 327d6562..593dc1f3 100644 --- a/carcara/src/checker/rules/simplification.rs +++ b/carcara/src/checker/rules/simplification.rs @@ -3,7 +3,7 @@ use super::{ RuleResult, }; use crate::{ast::*, utils::DedupIterator}; -use ahash::{AHashMap, AHashSet}; +use indexmap::{IndexMap, IndexSet}; use rug::Rational; /// A macro to define the possible transformations for a "simplify" rule. @@ -48,7 +48,7 @@ fn generic_simplify_rule( let mut simplify_until_fixed_point = |term: &Rc, goal: &Rc| -> Result, CheckerError> { let mut current = term.clone(); - let mut seen = AHashSet::new(); + let mut seen = IndexSet::new(); loop { if !seen.insert(current.clone()) { return Err(CheckerError::CycleInSimplification(current)); @@ -213,14 +213,14 @@ fn generic_and_or_simplify( // Then, we remove all duplicate terms. We do this in place to avoid another allocation. // Similarly to the step that removes the "skip term", we check if we already found the result // after this step. This is also necessary in some examples - let mut seen = AHashSet::with_capacity(phis.len()); + let mut seen = IndexSet::with_capacity(phis.len()); phis.retain(|t| seen.insert(t.clone())); if result_args.iter().eq(&phis) { return Ok(()); } // Finally, we check to see if the result was short-circuited - let seen: AHashSet<(bool, &Rc)> = phis + let seen: IndexSet<(bool, &Rc)> = phis .iter() .map(Rc::remove_all_negations_with_polarity) .collect(); @@ -668,7 +668,7 @@ pub fn comp_simplify(args: RuleArgs) -> RuleResult { fn apply_ac_simp( pool: &mut dyn TermPool, - cache: &mut AHashMap, Rc>, + cache: &mut IndexMap, Rc>, term: &Rc, ) -> Rc { if let Some(t) = cache.get(term) { @@ -723,7 +723,7 @@ pub fn ac_simp(RuleArgs { conclusion, pool, .. }: RuleArgs) -> RuleResult { let (original, flattened) = match_term_err!((= psi phis) = &conclusion[0])?; assert_eq( flattened, - &apply_ac_simp(pool, &mut AHashMap::new(), original), + &apply_ac_simp(pool, &mut IndexMap::new(), original), ) } diff --git a/carcara/src/checker/rules/subproof.rs b/carcara/src/checker/rules/subproof.rs index 0e4e29d0..7a347d3a 100644 --- a/carcara/src/checker/rules/subproof.rs +++ b/carcara/src/checker/rules/subproof.rs @@ -3,7 +3,7 @@ use super::{ CheckerError, EqualityError, RuleArgs, RuleResult, }; use crate::{ast::*, checker::error::SubproofError}; -use ahash::{AHashMap, AHashSet}; +use indexmap::{IndexMap, IndexSet}; pub fn subproof( RuleArgs { @@ -71,7 +71,7 @@ pub fn bind( let [l_bindings, r_bindings] = [l_bindings, r_bindings].map(|b| { b.iter() .map(|var| pool.add(var.clone().into())) - .collect::>() + .collect::>() }); // The terms in the quantifiers must be phi and phi' @@ -94,7 +94,7 @@ pub fn bind( let context = context.as_ref().unwrap(); // The quantifier binders must be the xs and ys of the context substitution - let (xs, ys): (AHashSet<_>, AHashSet<_>) = context + let (xs, ys): (IndexSet<_>, IndexSet<_>) = context .mappings .iter() // We skip terms which are not simply variables @@ -143,7 +143,7 @@ pub fn r#let( // Since we are closing a subproof, we only care about the substitutions that were introduced // in it - let substitution: AHashMap, Rc> = context + let substitution: IndexMap, Rc> = context .last() .unwrap() .as_ref() @@ -199,8 +199,8 @@ pub fn r#let( Ok(()) } -fn extract_points(quant: Quantifier, term: &Rc) -> AHashSet<(Rc, Rc)> { - fn find_points(acc: &mut AHashSet<(Rc, Rc)>, polarity: bool, term: &Rc) { +fn extract_points(quant: Quantifier, term: &Rc) -> IndexSet<(Rc, Rc)> { + fn find_points(acc: &mut IndexSet<(Rc, Rc)>, polarity: bool, term: &Rc) { // This does not make use of a cache, so there may be performance issues // TODO: Measure the performance of this function, and see if a cache is needed @@ -233,7 +233,7 @@ fn extract_points(quant: Quantifier, term: &Rc) -> AHashSet<(Rc, Rc< } } - let mut result = AHashSet::new(); + let mut result = IndexSet::new(); find_points(&mut result, quant == Quantifier::Exists, term); result } @@ -278,20 +278,20 @@ pub fn onepoint( let last_context = last_context.as_ref().unwrap(); r_bindings .iter() - .find(|b| !last_context.bindings.contains(b)) + .find(|&b| !last_context.bindings.contains(b)) } { return Err(SubproofError::BindingIsNotInContext(var.clone()).into()); } - let l_bindings_set: AHashSet<_> = l_bindings + let l_bindings_set: IndexSet<_> = l_bindings .iter() .map(|var| pool.add(var.clone().into())) .collect(); - let r_bindings_set: AHashSet<_> = r_bindings + let r_bindings_set: IndexSet<_> = r_bindings .iter() .map(|var| pool.add(var.clone().into())) .collect(); - let substitution_vars: AHashSet<_> = last_context + let substitution_vars: IndexSet<_> = last_context .as_ref() .unwrap() .mappings @@ -306,7 +306,7 @@ pub fn onepoint( // substitution to the points in order to replace these variables by their value. We also // create a duplicate of every point in the reverse order, since the order of equalities may be // flipped - let points: AHashSet<_> = points + let points: IndexSet<_> = points .into_iter() .flat_map(|(x, t)| [(x.clone(), t.clone()), (t, x)]) .map(|(x, t)| (x, context.apply(pool, &t))) @@ -367,7 +367,7 @@ fn generic_skolemization_rule( current_phi = context.apply_previous(pool, ¤t_phi); } - let substitution: AHashMap, Rc> = context + let substitution: IndexMap, Rc> = context .last() .unwrap() .as_ref() diff --git a/carcara/src/parser/mod.rs b/carcara/src/parser/mod.rs index 240c224d..d2db489d 100644 --- a/carcara/src/parser/mod.rs +++ b/carcara/src/parser/mod.rs @@ -12,8 +12,8 @@ use crate::{ utils::{HashCache, HashMapStack}, CarcaraResult, Error, }; -use ahash::{AHashMap, AHashSet}; use error::assert_num_args; +use indexmap::{IndexMap, IndexSet}; use rug::Integer; use std::{io::BufRead, str::FromStr}; @@ -78,8 +78,8 @@ enum AnchorArg { #[derive(Default)] struct ParserState { symbol_table: HashMapStack, Rc>, - function_defs: AHashMap, - sort_declarations: AHashMap, + function_defs: IndexMap, + sort_declarations: IndexMap, step_ids: HashMapStack, usize>, } @@ -92,7 +92,7 @@ pub struct Parser<'a, R> { current_position: Position, state: ParserState, interpret_integers_as_reals: bool, - problem: Option<(ProblemPrelude, AHashSet>)>, + problem: Option<(ProblemPrelude, IndexSet>)>, } impl<'a, R: BufRead> Parser<'a, R> { @@ -154,7 +154,7 @@ impl<'a, R: BufRead> Parser<'a, R> { } /// Shortcut for `self.problem.as_mut().unwrap().1` - fn premises(&mut self) -> &mut AHashSet> { + fn premises(&mut self) -> &mut IndexSet> { &mut self.problem.as_mut().unwrap().1 } @@ -489,8 +489,8 @@ impl<'a, R: BufRead> Parser<'a, R> { /// /// All other commands are ignored. This method returns a hash set containing the premises /// introduced in `assert` commands. - pub fn parse_problem(&mut self) -> CarcaraResult<(ProblemPrelude, AHashSet>)> { - self.problem = Some((ProblemPrelude::default(), AHashSet::new())); + pub fn parse_problem(&mut self) -> CarcaraResult<(ProblemPrelude, IndexSet>)> { + self.problem = Some((ProblemPrelude::default(), IndexSet::new())); while self.current_token != Token::Eof { self.expect_token(Token::OpenParen)?; diff --git a/carcara/src/parser/tests.rs b/carcara/src/parser/tests.rs index 1cc4bac0..5019bf2f 100644 --- a/carcara/src/parser/tests.rs +++ b/carcara/src/parser/tests.rs @@ -49,7 +49,7 @@ pub fn parse_proof(pool: &mut PrimitivePool, input: &str) -> Proof { .expect(ERROR_MESSAGE) .parse_proof() .expect(ERROR_MESSAGE); - Proof { premises: AHashSet::new(), commands } + Proof { premises: IndexSet::new(), commands } } fn run_parser_tests(pool: &mut PrimitivePool, cases: &[(&str, Rc)]) { @@ -61,7 +61,7 @@ fn run_parser_tests(pool: &mut PrimitivePool, cases: &[(&str, Rc)]) { #[test] fn test_hash_consing() { - use ahash::AHashSet; + use indexmap::IndexSet; let mut pool = PrimitivePool::new(); let input = "(- @@ -98,7 +98,7 @@ fn test_hash_consing() { "(- (- (+ 1 2) (* (+ 1 2) (+ 1 2))) (* 2 2))", ] .into_iter() - .collect::>(); + .collect::>(); let pool_terms = pool.storage.into_vec(); assert_eq!(pool_terms.len(), expected.len()); diff --git a/carcara/src/utils.rs b/carcara/src/utils.rs index 25dd7c44..dc662a7e 100644 --- a/carcara/src/utils.rs +++ b/carcara/src/utils.rs @@ -1,5 +1,5 @@ use crate::ast::{BindingList, Quantifier, Rc, Term}; -use ahash::{AHashMap, AHashSet, AHasher}; +use indexmap::{IndexMap, IndexSet}; use std::{ borrow::Borrow, fmt, @@ -25,7 +25,7 @@ pub fn is_symbol_character(ch: char) -> bool { /// An iterator that removes duplicate elements from `iter`. This will yield the elements in /// `iter` in order, skipping elements that have already been seen before. pub struct Dedup { - seen: AHashSet, + seen: IndexSet, iter: I, } @@ -59,7 +59,7 @@ impl> DedupIterator for I { where Self: Sized, { - Dedup { seen: AHashSet::new(), iter: self } + Dedup { seen: IndexSet::new(), iter: self } } } @@ -84,7 +84,7 @@ impl Hash for HashCache { impl HashCache { pub fn new(value: T) -> Self { - let mut hasher = AHasher::default(); + let mut hasher = std::collections::hash_map::DefaultHasher::default(); value.hash(&mut hasher); Self { hash: hasher.finish(), value } } @@ -102,16 +102,16 @@ impl AsRef for HashCache { #[derive(Debug)] pub struct HashMapStack { - scopes: Vec>, + scopes: Vec>, } impl HashMapStack { pub fn new() -> Self { - Self { scopes: vec![AHashMap::new()] } + Self { scopes: vec![IndexMap::new()] } } pub fn push_scope(&mut self) { - self.scopes.push(AHashMap::new()); + self.scopes.push(IndexMap::new()); } pub fn pop_scope(&mut self) { diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 33bfb204..4961130a 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -11,7 +11,6 @@ path = "src/main.rs" [dependencies] carcara = { path = "../carcara" } -ahash = "0.8.3" clap = { version = "3.2.23", features = ["derive"] } const_format = "0.2.30" crossbeam-queue = "0.3.8" From 001e1e3379ab458b251e72cad6df8431fc135d80 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 28 Aug 2023 13:11:04 -0300 Subject: [PATCH 60/70] Change default extension from `.proof` to `.alethe` --- cli/src/main.rs | 2 +- cli/src/path_args.rs | 7 ++++++- scripts/generate-benchmarks.sh | 2 +- scripts/solve.sh | 8 ++++---- test-generator/src/lib.rs | 4 ++-- 5 files changed, 14 insertions(+), 9 deletions(-) diff --git a/cli/src/main.rs b/cli/src/main.rs index 0c3a3d0b..671edc4b 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -277,7 +277,7 @@ struct BenchCommandOptions { dump_to_csv: bool, /// The proof files on which the benchmark will be run. If a directory is passed, the checker - /// will recursively find all '.proof' files in the directory. The problem files will be + /// will recursively find all proof files in the directory. The problem files will be /// inferred from the proof files. files: Vec, } diff --git a/cli/src/path_args.rs b/cli/src/path_args.rs index 3e6208b7..b9fa2f24 100644 --- a/cli/src/path_args.rs +++ b/cli/src/path_args.rs @@ -4,6 +4,7 @@ use crate::error::CliError; use std::{ffi::OsStr, fs, path::PathBuf}; const SMT_FILE_EXTENSIONS: [&str; 3] = ["smt", "smt2", "smt_in"]; +const ALETHE_FILE_EXTENSIONS: [&str; 2] = ["alethe", "proof"]; pub fn infer_problem_path(proof_path: impl Into) -> Result { fn inner(mut path: PathBuf) -> Option { @@ -22,7 +23,11 @@ fn get_instances_from_dir( ) -> Result<(), CliError> { let file_type = fs::metadata(&path)?.file_type(); if file_type.is_file() { - if path.extension() == Some(OsStr::new("proof")) { + let is_proof_file = path + .extension() + .and_then(OsStr::to_str) + .is_some_and(|ext| ALETHE_FILE_EXTENSIONS.contains(&ext)); + if is_proof_file { let problem_file = infer_problem_path(&path)?; acc.push((problem_file, path)) } diff --git a/scripts/generate-benchmarks.sh b/scripts/generate-benchmarks.sh index 2d64707f..5feed2a2 100755 --- a/scripts/generate-benchmarks.sh +++ b/scripts/generate-benchmarks.sh @@ -88,7 +88,7 @@ find $benchmark_dir -name '*.smt2' | xargs -P $num_jobs -n 1 bash -c 'scripts/so if [ -n "clean_flag" ]; then echo "cleaning up..." for f in $(find $benchmark_dir -name '*.smt2'); do - if [ ! -f $f.proof ]; then + if [ ! -f $f.alethe ]; then rm -f $f fi done diff --git a/scripts/solve.sh b/scripts/solve.sh index 74ca30e2..d71a0f12 100755 --- a/scripts/solve.sh +++ b/scripts/solve.sh @@ -1,13 +1,13 @@ #!/bin/bash timeout $timeout $VERIT $1 \ - --proof-file-from-input --proof-with-sharing \ + --proof=$1.alethe --proof-with-sharing \ --proof-prune --proof-merge &> /dev/null # If a complete proof could not be generated, we delete it -if [ -f $1.proof ]; then - if ! grep -q -F '(cl)' $1.proof; then - rm $1.proof +if [ -f $1.alethe ]; then + if ! grep -q -F '(cl)' $1.alethe; then + rm $1.alethe exit fi fi diff --git a/test-generator/src/lib.rs b/test-generator/src/lib.rs index 4bcf10e9..5cb22584 100644 --- a/test-generator/src/lib.rs +++ b/test-generator/src/lib.rs @@ -54,11 +54,11 @@ pub fn from_dir(args: TokenStream, input: TokenStream) -> TokenStream { for entry in walkdir::WalkDir::new(&arg) { let Ok(entry) = entry else { continue }; - if entry.file_type().is_file() && entry.path().extension() == Some(OsStr::new("proof")) { + if entry.file_type().is_file() && entry.path().extension() == Some(OsStr::new("alethe")) { let path = entry.path().to_str().unwrap(); let new_ident = { let path = path.strip_prefix(&arg).unwrap().strip_prefix('/').unwrap(); - let path = path.strip_suffix(".proof").unwrap(); + let path = path.strip_suffix(".alethe").unwrap(); let path = path.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "_"); syn::Ident::new(&format!("{}_{}", func_ident, path), func_ident.span()) }; From c3f9c5704fbaae24b7482ba70e36a536a87cdc48 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 28 Aug 2023 13:53:44 -0300 Subject: [PATCH 61/70] Upgrade to Rust version 1.72 --- .github/workflows/ci.yml | 6 +++--- Cargo.lock | 1 - Cargo.toml | 1 + README.md | 2 +- carcara/Cargo.toml | 2 +- carcara/src/ast/context.rs | 4 ++-- carcara/src/ast/substitution.rs | 6 +++--- carcara/src/checker/error.rs | 2 +- carcara/src/checker/rules/linear_arithmetic.rs | 2 +- carcara/src/checker/rules/simplification.rs | 4 ++-- carcara/src/checker/rules/transitivity.rs | 2 +- carcara/src/elaborator/pruning.rs | 4 ++-- carcara/src/lib.rs | 1 + cli/Cargo.toml | 3 +-- cli/src/main.rs | 4 ++-- test-generator/Cargo.toml | 2 +- 16 files changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1a387b77..61ac7736 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,7 +8,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: setup - run: rustup default 1.67 && rustup component add clippy + run: rustup default 1.72 && rustup component add clippy - name: lint run: cargo clippy --version && cargo clippy --all-targets --all-features --tests --no-deps -- -D warnings - name: build @@ -18,7 +18,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: setup - run: rustup default 1.67 + run: rustup default 1.72 - name: test run: cargo --version && cargo test --release format: @@ -26,6 +26,6 @@ jobs: steps: - uses: actions/checkout@v3 - name: setup - run: rustup default 1.67 && rustup component add rustfmt + run: rustup default 1.72 && rustup component add rustfmt - name: check formatting run: cargo fmt --version && cargo fmt --check diff --git a/Cargo.lock b/Cargo.lock index 1e409fc2..f1a145a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -57,7 +57,6 @@ name = "carcara-cli" version = "1.0.0" dependencies = [ "ansi_term", - "atty", "carcara", "clap", "const_format", diff --git a/Cargo.toml b/Cargo.toml index 1024221f..867dbc0e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = ["carcara", "cli", "test-generator"] +resolver = "2" [profile.release] debug = 1 diff --git a/README.md b/README.md index a101efa9..09eda0b1 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Carcara is a proof checker and elaborator for SMT proofs in the [Alethe format]( ## Building -To build Carcara, you will need Rust and Cargo 1.67 or newer. Build the project with `cargo build`. +To build Carcara, you will need Rust and Cargo 1.72 or newer. Build the project with `cargo build`. When running on large proofs, we recommend compiling with optimizations enabled: `cargo build --release`. diff --git a/carcara/Cargo.toml b/carcara/Cargo.toml index 8a225f96..90116f7a 100644 --- a/carcara/Cargo.toml +++ b/carcara/Cargo.toml @@ -3,7 +3,7 @@ name = "carcara" version = "1.0.0" authors = ["Bruno Andreotti ", "Vinícius Braga Freire "] edition = "2021" -rust-version = "1.67" +rust-version = "1.72" license = "Apache-2.0" [dependencies] diff --git a/carcara/src/ast/context.rs b/carcara/src/ast/context.rs index 323c661c..f967a923 100644 --- a/carcara/src/ast/context.rs +++ b/carcara/src/ast/context.rs @@ -118,7 +118,7 @@ impl ContextStack { // `(:= x (f y))`, we insert the first substitution, and then, when introducing the second, // we use the current state of the hash map to transform `(f y)` into `(f z)`. The // resulting hash map will then contain `(:= y z)` and `(:= x (f z))` - for (var, value) in assignment_args.iter() { + for (var, value) in assignment_args { let var_term = Term::new_var(var, pool.sort(value)); let var_term = pool.add(var_term); substitution.insert(pool, var_term.clone(), value.clone())?; @@ -203,7 +203,7 @@ impl ContextStack { let previous_substitution = previous_context.cumulative_substitution.as_ref().unwrap(); - for (k, v) in previous_substitution.map.iter() { + for (k, v) in &previous_substitution.map { let value = match simultaneous.get(v) { Some(new_value) => new_value, None => v, diff --git a/carcara/src/ast/substitution.rs b/carcara/src/ast/substitution.rs index 9d091e34..1d7fdb04 100644 --- a/carcara/src/ast/substitution.rs +++ b/carcara/src/ast/substitution.rs @@ -69,7 +69,7 @@ impl Substitution { pool: &mut dyn TermPool, map: IndexMap, Rc>, ) -> SubstitutionResult { - for (k, v) in map.iter() { + for (k, v) in &map { if !k.is_var() { return Err(SubstitutionError::NotAVariable(k.clone())); } @@ -112,7 +112,7 @@ impl Substitution { if let Some(should_be_renamed) = &mut self.should_be_renamed { if x != t { - should_be_renamed.extend(pool.free_vars(&t).into_iter()); + should_be_renamed.extend(pool.free_vars(&t)); if x.is_var() { should_be_renamed.insert(x.clone()); } @@ -147,7 +147,7 @@ impl Substitution { // See https://en.wikipedia.org/wiki/Lambda_calculus#Capture-avoiding_substitutions for // more details. let mut should_be_renamed = IndexSet::new(); - for (x, t) in self.map.iter() { + for (x, t) in &self.map { if x == t { continue; // We ignore reflexive substitutions } diff --git a/carcara/src/checker/error.rs b/carcara/src/checker/error.rs index d791aa6c..e92c8d15 100644 --- a/carcara/src/checker/error.rs +++ b/carcara/src/checker/error.rs @@ -330,7 +330,7 @@ impl<'a> fmt::Display for DisplayLinearComb<'a> { 1 => write_var(f, vars.iter().next().unwrap()), _ => { write!(f, "(+")?; - for var in vars.iter() { + for var in vars { write!(f, " ")?; write_var(f, var)?; } diff --git a/carcara/src/checker/rules/linear_arithmetic.rs b/carcara/src/checker/rules/linear_arithmetic.rs index 7547b142..4bd81066 100644 --- a/carcara/src/checker/rules/linear_arithmetic.rs +++ b/carcara/src/checker/rules/linear_arithmetic.rs @@ -183,7 +183,7 @@ impl LinearComb { } let mut result = self.1.numer().clone(); - for (_, coeff) in self.0.iter() { + for (_, coeff) in &self.0 { if result == 1 { return Integer::from(1); } diff --git a/carcara/src/checker/rules/simplification.rs b/carcara/src/checker/rules/simplification.rs index 593dc1f3..e06f2567 100644 --- a/carcara/src/checker/rules/simplification.rs +++ b/carcara/src/checker/rules/simplification.rs @@ -430,7 +430,7 @@ pub fn div_simplify(RuleArgs { conclusion, .. }: RuleArgs) -> RuleResult { CheckerError::ExpectedNumber(Rational::new(), right.clone()) ); Ok(()) - } else if t_2.as_number().map_or(false, |n| n == 1) { + } else if t_2.as_number().is_some_and(|n| n == 1) { assert_eq(right, t_1) } else { let expected = t_1.as_signed_number_err()? / t_2.as_signed_number_err()?; @@ -557,7 +557,7 @@ pub fn minus_simplify(RuleArgs { conclusion, .. }: RuleArgs) -> RuleResult { // the `minus_simplify` and the `unary_minus_simplify` rules fn try_unary_minus_simplify(t: &Rc, u: &Rc) -> bool { // First case of `unary_minus_simplify` - if match_term!((-(-t)) = t).map_or(false, |t| t == u) { + if match_term!((-(-t)) = t) == Some(u) { return true; } diff --git a/carcara/src/checker/rules/transitivity.rs b/carcara/src/checker/rules/transitivity.rs index 70fd97d5..17dc4d9b 100644 --- a/carcara/src/checker/rules/transitivity.rs +++ b/carcara/src/checker/rules/transitivity.rs @@ -180,7 +180,7 @@ pub fn elaborate_eq_transitive( if !not_needed.is_empty() { let mut clause = latest_clause; - clause.extend(not_needed.into_iter()); + clause.extend(not_needed); let or_intro_step = ProofStep { id: elaborator.get_new_id(&command_id), clause, diff --git a/carcara/src/elaborator/pruning.rs b/carcara/src/elaborator/pruning.rs index 018f072f..5c5de3c2 100644 --- a/carcara/src/elaborator/pruning.rs +++ b/carcara/src/elaborator/pruning.rs @@ -55,7 +55,7 @@ pub fn slice_proof( frame.distance_to_source[current] = std::cmp::min(frame.distance_to_source[current], current_dist); - if max_distance.map_or(false, |max| current_dist > max) { + if max_distance.is_some_and(|max| current_dist > max) { continue; } @@ -108,7 +108,7 @@ pub fn slice_proof( if frame.distance_to_source[i] == usize::MAX { result_diff.push((i, CommandDiff::Delete)); num_pruned += 1; - } else if max_distance.map_or(false, |max| frame.distance_to_source[i] == max + 1) { + } else if max_distance.is_some_and(|max| frame.distance_to_source[i] == max + 1) { let new_command = ProofCommand::Step(ProofStep { id: frame.commands[i].id().to_owned(), clause: frame.commands[i].clause().to_vec(), diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 8134198a..8341d33a 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -26,6 +26,7 @@ #![warn(clippy::multiple_crate_versions)] #![warn(clippy::redundant_closure_for_method_calls)] #![warn(clippy::redundant_pub_crate)] +#![warn(clippy::redundant_type_annotations)] #![warn(clippy::semicolon_if_nothing_returned)] #![warn(clippy::str_to_string)] #![warn(clippy::string_to_string)] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 4961130a..6676349e 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -2,7 +2,7 @@ name = "carcara-cli" version = "1.0.0" edition = "2021" -rust-version = "1.67" +rust-version = "1.72" license = "Apache-2.0" [[bin]] @@ -17,4 +17,3 @@ crossbeam-queue = "0.3.8" log = { version = "0.4.17", features = ["std"] } ansi_term = "0.12" git-version = "0.3.5" -atty = "0.2.14" diff --git a/cli/src/main.rs b/cli/src/main.rs index 671edc4b..109fc5b0 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -14,7 +14,7 @@ use git_version::git_version; use path_args::{get_instances_from_paths, infer_problem_path}; use std::{ fs::File, - io::{self, BufRead}, + io::{self, BufRead, IsTerminal}, path::Path, }; @@ -321,7 +321,7 @@ impl From for log::LevelFilter { fn main() { let cli = Cli::parse(); - let colors_enabled = !cli.no_color && atty::is(atty::Stream::Stderr); + let colors_enabled = !cli.no_color && std::io::stderr().is_terminal(); logger::init(cli.log_level.into(), colors_enabled); if let Command::Check(CheckCommandOptions { checking, .. }) diff --git a/test-generator/Cargo.toml b/test-generator/Cargo.toml index 99c009d7..af88c421 100644 --- a/test-generator/Cargo.toml +++ b/test-generator/Cargo.toml @@ -2,7 +2,7 @@ name = "test-generator" version = "0.1.0" edition = "2021" -rust-version = "1.67" +rust-version = "1.72" license = "Apache-2.0" [lib] From 52a56312d71a185fceb352708f769ccf1dac4672 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 28 Aug 2023 14:41:43 -0300 Subject: [PATCH 62/70] Improve documentation for `ast::Rc` --- carcara/src/ast/rc.rs | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/carcara/src/ast/rc.rs b/carcara/src/ast/rc.rs index c9a119e9..27274e3d 100644 --- a/carcara/src/ast/rc.rs +++ b/carcara/src/ast/rc.rs @@ -2,12 +2,32 @@ use std::{fmt, hash::Hash, ops::Deref, sync}; -/// An `Rc` where equality and hashing are done by reference, instead of by value. +/// A wrapper for `std::rc::Rc` where equality and hashing are done by reference, instead of by +/// value. /// /// This means that two `Rc`s will not be considered equal and won't have the same hash value unless /// they point to the same allocation. This has the advantage that equality and hashing can be done /// in constant time, even for recursive structures. /// +/// The Carcara parser makes use of hash consing, meaning that each term is only allocated once, +/// even if it appears multiple times in the proof. This means that if we want to compare two terms +/// for equality, we only need to compare them by reference, since if they are equal they will point +/// to the same allocation. However, `std::rc::Rc` implements `PartialEq` by comparing the inner +/// values for equality. If we simply used this implementation, each equality comparison would need +/// to traverse the terms recursively, which would be prohibitively expensive. Instead, this wrapper +/// overrides the `PartialEq` implementation to compare the pointers directly, allowing for constant +/// time equality comparisons. +/// +/// Similarly, when inserting terms in a hash map or set, we can also just hash the pointers +/// instead of recursively hashing the inner value (as `std::rc::Rc`'s `Hash` implementation does). +/// Therefore, this wrapper also overrides the implementation of the `Hash` trait. +/// +/// Note: when using this struct, it's important to avoid constructing terms with `Rc::new` and +/// instead prefer to construct them by adding them to a `TermPool`. This is because `Rc::new` will +/// create a brand new allocation for that term, instead of reusing the existing allocation if that +/// term was already added to the pool. Two indentical terms created independently with `Rc::new` +/// will not compare as equal. +/// /// # Examples /// /// ``` From d360b3b81608e37fb865409eccd4c5e8fe4c58e1 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 28 Aug 2023 14:51:59 -0300 Subject: [PATCH 63/70] Use `.alethe` extension in README --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 09eda0b1..6b599b51 100644 --- a/README.md +++ b/README.md @@ -17,12 +17,12 @@ the project with all optimizations enabled, and install the CLI binary in `$HOME To check a proof file, use the `check` command, passing both the proof file and the original SMT-LIB problem file. ``` -carcara check example.smt2.proof example.smt2 +carcara check example.smt2.alethe example.smt2 ``` -If the problem file name is exactly the proof file name minus `.proof`, you can omit it: +If the problem file name is exactly the proof file name minus `.alethe`, you can omit it: ``` -carcara check example.smt2.proof +carcara check example.smt2.alethe ``` By default, Carcara will return a checking error when encountering a rule it does not recognize. If @@ -37,7 +37,7 @@ See `carcara help check` for more options. You can elaborate a proof file using the `elaborate` command. ``` -carcara elaborate example.smt2.proof example.smt2 +carcara elaborate example.smt2.alethe example.smt2 ``` This command will check the given proof while elaborating it, and print the elaborated proof to standard output. The `--print-with-sharing` flag controls whether the elaborated proof will be @@ -52,7 +52,7 @@ By default, Carcara ignores steps of the `lia_generic` rule when checking or ela instead considering them as holes. However, you can use an external solver to aid Carcara in checking these steps, using the `--lia-solver` option. For example, running ``` -carcara check example.smt2.proof --lia-solver cvc5 +carcara check example.smt2.alethe --lia-solver cvc5 ``` will check the proof using cvc5 (more precisely, the cvc5 binary in your `PATH`) to check any @@ -70,7 +70,7 @@ option should receive a single value, where multiple arguments are separated by if you wanted to instead check `lia_generic` steps using veriT, you might pass the following arguments: ``` -carcara check example.smt2.proof --lia-solver veriT --lia-solver-args "--proof=- --proof-with-sharing" +carcara check example.smt2.alethe --lia-solver veriT --lia-solver-args "--proof=- --proof-with-sharing" ``` The default arguments for `--lia-solver-args` are as follows (note that they assume you use cvc5 as @@ -85,13 +85,13 @@ The `bench` command is used to run benchmarks. For example, the following comman benchmark on three proof files. ``` -carcara bench a.smt2.proof b.smt2.proof c.smt2.proof +carcara bench a.smt2.alethe b.smt2.alethe c.smt2.alethe ``` The command takes as arguments any number of proof files or directories. If a directory is passed, -the benchmark will be run on all `.proof` files in that directory. This command assumes that the +the benchmark will be run on all `.alethe` files in that directory. This command assumes that the problem file associated with each proof is in the same directory as the proof, and that they follow -the pattern `.smt2`/`.smt2.proof`. +the pattern `.smt2`/`.smt2.alethe`. The benchmark will parse and check each file, and record performance data. If you pass the `--elaborate` flag, the proofs will also be elaborated (though the resulting elaborated proof is From f3931c498b75e8926c0b3b9648c6bf46c064c752 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 28 Aug 2023 15:48:44 -0300 Subject: [PATCH 64/70] Check that local assumptions are discharged --- carcara/src/checker/mod.rs | 20 +++++++++++++++++++- carcara/src/checker/parallel/mod.rs | 16 ++++++++++++---- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 73b4e5a9..3f8d9f8c 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -145,7 +145,6 @@ impl<'c> ProofChecker<'c> { // If this is the last command of a subproof, we have to pop the subproof // commands off of the stack. The parser already ensures that the last command // in a subproof is always a `step` command - // TODO: Use depth diff to pop context off if is_end_of_subproof { self.context.pop(); if let Some(elaborator) = &mut self.elaborator { @@ -410,6 +409,11 @@ impl<'c> ProofChecker<'c> { } } + if iter.is_end_step() { + let subproof = iter.current_subproof().unwrap(); + Self::check_discharge(subproof, iter.depth(), &step.discharge)?; + } + if let Some(s) = stats { let time = time.elapsed(); @@ -423,6 +427,20 @@ impl<'c> ProofChecker<'c> { Ok(()) } + fn check_discharge( + subproof: &[ProofCommand], + depth: usize, + discharge: &[(usize, usize)], + ) -> RuleResult { + let discharge: IndexSet<_> = discharge.iter().collect(); + subproof + .iter() + .enumerate() + .all(|(i, command)| !command.is_assume() || discharge.contains(&(depth, i))) + .then_some(()) + .ok_or_else(|| CheckerError::Unspecified) // TODO: add custom error + } + pub fn get_rule(rule_name: &str, strict: bool) -> Option { use rules::*; diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 13f1b9d4..945ba801 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -1,8 +1,11 @@ pub mod scheduler; -use super::error::CheckerError; -use super::rules::{Premise, RuleArgs, RuleResult}; -use super::{lia_generic, Config}; +use super::{ + error::CheckerError, + lia_generic, + rules::{Premise, RuleArgs, RuleResult}, + Config, ProofChecker, +}; use crate::benchmarking::{CollectResults, OnlineBenchmarkResults}; use crate::checker::CheckerStatistics; use crate::{ @@ -428,7 +431,7 @@ impl<'c> ParallelProofChecker<'c> { self.is_holey = true; } } else { - let rule = match super::ProofChecker::get_rule(&step.rule, self.config.strict) { + let rule = match ProofChecker::get_rule(&step.rule, self.config.strict) { Some(r) => r, None if self.config.skip_unknown_rules => { self.is_holey = true; @@ -469,6 +472,11 @@ impl<'c> ParallelProofChecker<'c> { rule(rule_args)?; } + if iter.is_end_step() { + let subproof = iter.current_subproof().unwrap(); + ProofChecker::check_discharge(subproof, iter.depth(), &step.discharge)?; + } + if let Some(s) = stats { let time = time.elapsed(); s.results From 9382cadd762b2b2eb5b624130b79f3c806b02e70 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Mon, 28 Aug 2023 16:15:17 -0300 Subject: [PATCH 65/70] Add specific error types for discharge errors --- carcara/src/checker/error.rs | 6 ++++++ carcara/src/checker/mod.rs | 19 ++++++++++++++----- carcara/src/checker/parallel/mod.rs | 6 +++++- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/carcara/src/checker/error.rs b/carcara/src/checker/error.rs index e92c8d15..0cb7a9b6 100644 --- a/carcara/src/checker/error.rs +++ b/carcara/src/checker/error.rs @@ -282,6 +282,12 @@ pub enum SubproofError { #[error("discharge must be 'assume' command: '{0}'")] DischargeMustBeAssume(String), + #[error("local assumption '{0}' was not discharged")] + LocalAssumeNotDischarged(String), + + #[error("only the `subproof` rule may discharge local assumptions")] + DischargeInWrongRule, + #[error("binding '{0}' appears as free variable in phi")] BindBindingIsFreeVarInPhi(String), diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 3f8d9f8c..4fb7fea8 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -9,7 +9,7 @@ use crate::{ elaborator::Elaborator, CarcaraResult, Error, LiaGenericOptions, }; -use error::CheckerError; +use error::{CheckerError, SubproofError}; use indexmap::IndexSet; pub use parallel::{scheduler::Scheduler, ParallelProofChecker}; use rules::{ElaborationRule, Premise, Rule, RuleArgs, RuleResult}; @@ -334,6 +334,10 @@ impl<'c> ProofChecker<'c> { let time = Instant::now(); let mut polyeq_time = Duration::ZERO; + if !step.discharge.is_empty() && step.rule != "subproof" { + return Err(CheckerError::Subproof(SubproofError::DischargeInWrongRule)); + } + let mut elaborated = false; if step.rule == "lia_generic" { if let Some(options) = &self.config.lia_options { @@ -433,12 +437,17 @@ impl<'c> ProofChecker<'c> { discharge: &[(usize, usize)], ) -> RuleResult { let discharge: IndexSet<_> = discharge.iter().collect(); - subproof + if let Some((_, not_discharged)) = subproof .iter() .enumerate() - .all(|(i, command)| !command.is_assume() || discharge.contains(&(depth, i))) - .then_some(()) - .ok_or_else(|| CheckerError::Unspecified) // TODO: add custom error + .find(|&(i, command)| command.is_assume() && !discharge.contains(&(depth, i))) + { + Err(CheckerError::Subproof( + SubproofError::LocalAssumeNotDischarged(not_discharged.id().to_owned()), + )) + } else { + Ok(()) + } } pub fn get_rule(rule_name: &str, strict: bool) -> Option { diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 945ba801..322e2595 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -1,7 +1,7 @@ pub mod scheduler; use super::{ - error::CheckerError, + error::{CheckerError, SubproofError}, lia_generic, rules::{Premise, RuleArgs, RuleResult}, Config, ProofChecker, @@ -421,6 +421,10 @@ impl<'c> ParallelProofChecker<'c> { let time = Instant::now(); let mut polyeq_time = Duration::ZERO; + if !step.discharge.is_empty() && step.rule != "subproof" { + return Err(CheckerError::Subproof(SubproofError::DischargeInWrongRule)); + } + if step.rule == "lia_generic" { if let Some(options) = &self.config.lia_options { let is_hole = From 365b6c5b14bdd623873012bcc9f60b5905ac1b4e Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Tue, 29 Aug 2023 12:42:16 -0300 Subject: [PATCH 66/70] Rename `skip-unknown-rules` to `ignore-unknown-rules` --- carcara/src/checker/mod.rs | 8 ++++---- carcara/src/checker/parallel/mod.rs | 2 +- carcara/src/lib.rs | 10 +++++----- cli/src/benchmarking.rs | 2 +- cli/src/main.rs | 21 +++++++++++++++++---- 5 files changed, 28 insertions(+), 15 deletions(-) diff --git a/carcara/src/checker/mod.rs b/carcara/src/checker/mod.rs index 4fb7fea8..4f1b6aac 100644 --- a/carcara/src/checker/mod.rs +++ b/carcara/src/checker/mod.rs @@ -48,7 +48,7 @@ impl fmt::Debug for CheckerStatistics<'_, C #[derive(Debug, Default, Clone)] pub struct Config { strict: bool, - skip_unknown_rules: bool, + ignore_unknown_rules: bool, lia_options: Option, } @@ -62,8 +62,8 @@ impl Config { self } - pub fn skip_unknown_rules(mut self, value: bool) -> Self { - self.skip_unknown_rules = value; + pub fn ignore_unknown_rules(mut self, value: bool) -> Self { + self.ignore_unknown_rules = value; self } @@ -361,7 +361,7 @@ impl<'c> ProofChecker<'c> { } else { let rule = match Self::get_rule(&step.rule, self.config.strict) { Some(r) => r, - None if self.config.skip_unknown_rules => { + None if self.config.ignore_unknown_rules => { self.is_holey = true; if let Some(elaborator) = &mut self.elaborator { elaborator.unchanged(&step.clause); diff --git a/carcara/src/checker/parallel/mod.rs b/carcara/src/checker/parallel/mod.rs index 322e2595..c5eac4c6 100644 --- a/carcara/src/checker/parallel/mod.rs +++ b/carcara/src/checker/parallel/mod.rs @@ -437,7 +437,7 @@ impl<'c> ParallelProofChecker<'c> { } else { let rule = match ProofChecker::get_rule(&step.rule, self.config.strict) { Some(r) => r, - None if self.config.skip_unknown_rules => { + None if self.config.ignore_unknown_rules => { self.is_holey = true; return Ok(()); } diff --git a/carcara/src/lib.rs b/carcara/src/lib.rs index 8341d33a..ae70f6b0 100644 --- a/carcara/src/lib.rs +++ b/carcara/src/lib.rs @@ -90,9 +90,9 @@ pub struct CarcaraOptions { /// benefit). pub strict: bool, - /// If `true`, Carcara will skip any rules that it does not recognize, and will consider them as + /// If `true`, Carcara will skip any steps with rules that it does not recognize, and will consider them as /// holes. Normally, using an unknown rule is considered an error. - pub skip_unknown_rules: bool, + pub ignore_unknown_rules: bool, /// If `true`, Carcará will log the check and elaboration statistics of any /// `check` or `check_and_elaborate` run. If `false` no statistics are logged. @@ -163,7 +163,7 @@ pub fn check(problem: T, proof: T, options: CarcaraOptions) -> R let config = checker::Config::new() .strict(options.strict) - .skip_unknown_rules(options.skip_unknown_rules) + .ignore_unknown_rules(options.ignore_unknown_rules) .lia_options(options.lia_options); // Checking @@ -228,7 +228,7 @@ pub fn check_parallel( let config = checker::Config::new() .strict(options.strict) - .skip_unknown_rules(options.skip_unknown_rules) + .ignore_unknown_rules(options.ignore_unknown_rules) .lia_options(options.lia_options); // Checking @@ -298,7 +298,7 @@ pub fn check_and_elaborate( let config = checker::Config::new() .strict(options.strict) - .skip_unknown_rules(options.skip_unknown_rules) + .ignore_unknown_rules(options.ignore_unknown_rules) .lia_options(options.lia_options); // Checking diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index 72ba8518..175457f6 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -51,7 +51,7 @@ fn run_job( let config = checker::Config::new() .strict(options.strict) - .skip_unknown_rules(options.skip_unknown_rules) + .ignore_unknown_rules(options.ignore_unknown_rules) .lia_options(options.lia_options.clone()); let mut checker = checker::ProofChecker::new(&mut pool, config, &prelude); diff --git a/cli/src/main.rs b/cli/src/main.rs index 109fc5b0..c7f60eb7 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -123,8 +123,12 @@ struct CheckingOptions { #[clap(short, long)] strict: bool, - /// Skips rules that are not known by the checker. - #[clap(long)] + /// Allow steps with rules that are not known by the checker, and consider them as holes. + #[clap(short, long)] + ignore_unknown_rules: bool, + + // Note: the `--skip-unknown-rules` flag has been deprecated in favor of `--ignore-unknown-rules` + #[clap(long, conflicts_with("ignore-unknown-rules"), hide = true)] skip_unknown_rules: bool, /// Check `lia_generic` steps using the provided solver. @@ -161,6 +165,7 @@ fn build_carcara_options( }: ParsingOptions, CheckingOptions { strict, + ignore_unknown_rules, skip_unknown_rules, lia_solver, lia_via_cvc5, @@ -181,7 +186,7 @@ fn build_carcara_options( allow_int_real_subtyping, lia_options, strict, - skip_unknown_rules, + ignore_unknown_rules: ignore_unknown_rules || skip_unknown_rules, stats, } } @@ -328,8 +333,16 @@ fn main() { | Command::Elaborate(ElaborateCommandOptions { checking, .. }) | Command::Bench(BenchCommandOptions { checking, .. }) = &cli.command { + if checking.skip_unknown_rules { + log::warn!( + "the `--skip-unknown-rules` option is deprecated, please use \ + `--ignore-unknown-rules` instead" + ) + } if checking.lia_via_cvc5 { - log::warn!("`--lia-via-cvc5` option is deprecated, please use `--lia-solver cvc5`") + log::warn!( + "the `--lia-via-cvc5` option is deprecated, please use `--lia-solver cvc5` instead" + ) } } From 3a9ca4a405062cd9ee6507dd5032837de2945b71 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Tue, 29 Aug 2023 12:45:57 -0300 Subject: [PATCH 67/70] Rename `bench` command's `--num-threads` option to `--num-jobs` This is to avoid confusion with the `--num-threads` option of the `check` command, that controls the number of threads to use in the parallel proof checker. --- cli/src/benchmarking.rs | 8 ++++---- cli/src/main.rs | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cli/src/benchmarking.rs b/cli/src/benchmarking.rs index 175457f6..82dbdf14 100644 --- a/cli/src/benchmarking.rs +++ b/cli/src/benchmarking.rs @@ -109,7 +109,7 @@ fn worker_thread( pub fn run_benchmark( instances: &[(PathBuf, PathBuf)], num_runs: usize, - num_threads: usize, + num_jobs: usize, options: &CarcaraOptions, elaborate: bool, ) -> T { @@ -133,7 +133,7 @@ pub fn run_benchmark( // We of course need to `collect` here to ensure we spawn all threads before starting to // `join` them #[allow(clippy::needless_collect)] - let workers: Vec<_> = (0..num_threads) + let workers: Vec<_> = (0..num_jobs) .map(|_| { thread::Builder::new() .stack_size(STACK_SIZE) @@ -153,14 +153,14 @@ pub fn run_benchmark( pub fn run_csv_benchmark( instances: &[(PathBuf, PathBuf)], num_runs: usize, - num_threads: usize, + num_jobs: usize, options: &CarcaraOptions, elaborate: bool, runs_dest: &mut dyn io::Write, by_rule_dest: &mut dyn io::Write, ) -> io::Result<()> { let result: CsvBenchmarkResults = - run_benchmark(instances, num_runs, num_threads, options, elaborate); + run_benchmark(instances, num_runs, num_jobs, options, elaborate); println!( "{} errors encountered during benchmark", result.num_errors() diff --git a/cli/src/main.rs b/cli/src/main.rs index c7f60eb7..2710b6fb 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -269,9 +269,9 @@ struct BenchCommandOptions { #[clap(short, long, default_value_t = 1)] num_runs: usize, - /// Number of threads to use when running the benchmark. + /// Number of jobs to run simultaneously when running the benchmark. #[clap(short = 'j', long, default_value_t = 1)] - num_threads: usize, + num_jobs: usize, /// Show benchmark results sorted by total time taken, instead of by average time taken. #[clap(short = 't', long)] @@ -454,7 +454,7 @@ fn bench_command(options: BenchCommandOptions) -> CliResult<()> { benchmarking::run_csv_benchmark( &instances, options.num_runs, - options.num_threads, + options.num_jobs, &carc_options, options.elaborate, &mut File::create("runs.csv")?, @@ -466,7 +466,7 @@ fn bench_command(options: BenchCommandOptions) -> CliResult<()> { let results: OnlineBenchmarkResults = benchmarking::run_benchmark( &instances, options.num_runs, - options.num_threads, + options.num_jobs, &carc_options, options.elaborate, ); From c5f0e4240a9fd920642302479231870267da47dd Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Tue, 29 Aug 2023 13:50:36 -0300 Subject: [PATCH 68/70] Fix benchmark results not respecting `-t` flag --- carcara/src/benchmarking/mod.rs | 42 +++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/carcara/src/benchmarking/mod.rs b/carcara/src/benchmarking/mod.rs index 47577a92..dbcd88eb 100644 --- a/carcara/src/benchmarking/mod.rs +++ b/carcara/src/benchmarking/mod.rs @@ -139,21 +139,25 @@ impl OnlineBenchmarkResults { /// Prints the benchmark results pub fn print(&self, sort_by_total: bool) { - let [parsing, checking, elaborating, scheduling, accounted_for, total] = [ - self.parsing(), - self.checking(), - self.elaborating(), - self.scheduling(), - self.total_accounted_for(), - self.total(), - ] - .map(|m| { - if sort_by_total { - format!("{:#}", m) - } else { - format!("{}", m) - } - }); + let [parsing, checking, elaborating, scheduling, accounted_for, total, assume_time, assume_core_time, polyeq_time] = + [ + self.parsing(), + self.checking(), + self.elaborating(), + self.scheduling(), + self.total_accounted_for(), + self.total(), + &self.assume_time, + &self.assume_core_time, + &self.polyeq_time, + ] + .map(|m| { + if sort_by_total { + format!("{:#}", m) + } else { + format!("{}", m) + } + }); println!("parsing: {}", parsing); println!("checking: {}", checking); @@ -161,19 +165,21 @@ impl OnlineBenchmarkResults { println!("elaborating: {}", elaborating); } println!("scheduling: {}", scheduling); + println!( "on assume: {} ({:.02}% of checking time)", - self.assume_time, + assume_time, 100.0 * self.assume_time.mean().as_secs_f64() / self.checking().mean().as_secs_f64(), ); - println!("on assume (core): {}", self.assume_core_time); + println!("on assume (core): {}", assume_core_time); println!("assume ratio: {}", self.assume_time_ratio); println!( "on polyeq: {} ({:.02}% of checking time)", - self.polyeq_time, + polyeq_time, 100.0 * self.polyeq_time.mean().as_secs_f64() / self.checking().mean().as_secs_f64(), ); println!("polyeq ratio: {}", self.polyeq_time_ratio); + println!("total accounted for: {}", accounted_for); println!("total: {}", total); From b6729cf74e57ab0002fcdcdf49047d52a6b24593 Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Tue, 29 Aug 2023 14:07:53 -0300 Subject: [PATCH 69/70] Upgrade dependencies --- Cargo.lock | 4 ++-- carcara/Cargo.toml | 6 +++--- cli/Cargo.toml | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1a145a1..6d04342c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -355,9 +355,9 @@ dependencies = [ [[package]] name = "rug" -version = "1.20.1" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240ad7cbc5fc7cea4592203f8f6100835e8ad083196491b8a9c84ce84711ff68" +checksum = "8882d6fd62b334b72dcf5c79f7e6b529d6790322de14bb49339415266131b031" dependencies = [ "az", "gmp-mpfr-sys", diff --git a/carcara/Cargo.toml b/carcara/Cargo.toml index 90116f7a..c14d4d8f 100644 --- a/carcara/Cargo.toml +++ b/carcara/Cargo.toml @@ -8,9 +8,9 @@ license = "Apache-2.0" [dependencies] indexmap = "2.0.0" -log = "0.4.17" -rug = { version = "1.19.2", features = ["integer", "rational"] } -thiserror = "1.0.40" +log = "0.4.20" +rug = { version = "1.21.0", features = ["integer", "rational"] } +thiserror = "1.0.47" [dev-dependencies] test-generator = { path = "../test-generator" } diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 6676349e..6a5e026c 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -11,9 +11,9 @@ path = "src/main.rs" [dependencies] carcara = { path = "../carcara" } -clap = { version = "3.2.23", features = ["derive"] } -const_format = "0.2.30" +clap = { version = "3.2.25", features = ["derive"] } +const_format = "0.2.31" crossbeam-queue = "0.3.8" -log = { version = "0.4.17", features = ["std"] } +log = { version = "0.4.20", features = ["std"] } ansi_term = "0.12" git-version = "0.3.5" From 13bf580783a6bdbafdc45114f2d91e190f91d8bb Mon Sep 17 00:00:00 2001 From: Bruno Andreotti Date: Tue, 29 Aug 2023 14:10:00 -0300 Subject: [PATCH 70/70] Bump version to 1.1.0 --- Cargo.lock | 4 ++-- carcara/Cargo.toml | 2 +- cli/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d04342c..cf629d78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,7 +42,7 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "carcara" -version = "1.0.0" +version = "1.1.0" dependencies = [ "indexmap 2.0.0", "log", @@ -54,7 +54,7 @@ dependencies = [ [[package]] name = "carcara-cli" -version = "1.0.0" +version = "1.1.0" dependencies = [ "ansi_term", "carcara", diff --git a/carcara/Cargo.toml b/carcara/Cargo.toml index c14d4d8f..8d84176a 100644 --- a/carcara/Cargo.toml +++ b/carcara/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "carcara" -version = "1.0.0" +version = "1.1.0" authors = ["Bruno Andreotti ", "Vinícius Braga Freire "] edition = "2021" rust-version = "1.72" diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 6a5e026c..27f22106 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "carcara-cli" -version = "1.0.0" +version = "1.1.0" edition = "2021" rust-version = "1.72" license = "Apache-2.0"