diff --git a/bin/egg-sc-tptp b/bin/egg-sc-tptp new file mode 100755 index 000000000..315ff9433 Binary files /dev/null and b/bin/egg-sc-tptp differ diff --git a/bin/egg-sc-tptp.exe b/bin/egg-sc-tptp.exe new file mode 100644 index 000000000..250581394 Binary files /dev/null and b/bin/egg-sc-tptp.exe differ diff --git a/bin/goeland_linux_release b/bin/goeland_linux_release new file mode 100755 index 000000000..3af331044 Binary files /dev/null and b/bin/goeland_linux_release differ diff --git a/bin/p9.jar b/bin/p9.jar new file mode 100755 index 000000000..28a5afa19 Binary files /dev/null and b/bin/p9.jar differ diff --git a/bin/sctptpUtils.jar b/bin/sctptpUtils.jar new file mode 100755 index 000000000..043aba6cc Binary files /dev/null and b/bin/sctptpUtils.jar differ diff --git a/build.sbt b/build.sbt index 4a3136c13..52c5f4465 100644 --- a/build.sbt +++ b/build.sbt @@ -16,7 +16,7 @@ ThisBuild / semanticdbEnabled := true ThisBuild / semanticdbVersion := scalafixSemanticdb.revision val scala2 = "2.13.8" -val scala3 = "3.5.1" +val scala3 = "3.5.2" val commonSettings = Seq( crossScalaVersions := Seq(scala3), @@ -32,7 +32,8 @@ val commonSettings3 = commonSettings ++ Seq( scalacOptions ++= Seq( "-language:implicitConversions", //"-rewrite", "-source", "3.4-migration", - "-Wconf:msg=.*will never be selected.*:silent" + "-Wconf:msg=.*will never be selected.*:silent", + "-language:experimental.modularity" ), javaOptions += "-Xmx10G", @@ -47,7 +48,7 @@ def withTests(project: Project): ClasspathDependency = def githubProject(repo: String, commitHash: String) = RootProject(uri(s"$repo#$commitHash")) -lazy val customTstpParser = githubProject("https://github.com/SimonGuilloud/scala-tptp-parser.git", "eae9c1b7a9546f74779d77ff50fa6e8a1654cfa0") +lazy val customTstpParser = githubProject("https://github.com/SC-TPTP/scala-tptp-parser.git", "851338c4175036279279835d9f58895aed2f37ba") lazy val root = Project( id = "lisa", @@ -69,12 +70,19 @@ lazy val kernel = Project( ) lazy val sets = Project( - id = "lisa-sets", - base = file("lisa-sets") + id = "lisa-sets2", + base = file("lisa-sets2") ) .settings(commonSettings3) .dependsOn(kernel, withTests(utils)) - +/* +lazy val sets2 = Project( + id = "lisa-sets2", + base = file("lisa-sets2") +) + .settings(commonSettings3) + .dependsOn(kernel, withTests(utils)) +*/ lazy val utils = Project( id = "lisa-utils", base = file("lisa-utils") diff --git a/lisa-examples/goeland/Example.buveurs2.p b/lisa-examples/goeland/Example.buveurs2.p new file mode 100644 index 000000000..290451925 --- /dev/null +++ b/lisa-examples/goeland/Example.buveurs2.p @@ -0,0 +1,21 @@ +%-------------------------------------------------------------------------- +% File : Example.buveurs2 : TPTP v8.0.0. +% Domain : None +% Problem : question0 +% Version : None +% English : + +% Refs : https://github.com/epfl-lara/lisa +% : lisa.utils.tptp.ProofParser +% Source : [Lisa, Example.buveurs2] +% Names : + +% Status : Unknown +% Rating : ? +% Syntax : ? +% SPC : FOF_UNK_RFO_SEQ + +% Comments : This problem, was printed from a statement in a proof of a theorem by the Lisa theorem prover for submission to proof-producing ATPs. +%-------------------------------------------------------------------------- +fof(c1, conjecture, ($true => (? [Xx]: ((sP(Xx) => (! [Xy]: (sP(Xy)))))))). + diff --git a/lisa-examples/goeland/Example.buveurs2_sol.p b/lisa-examples/goeland/Example.buveurs2_sol.p new file mode 100644 index 000000000..fca50d003 --- /dev/null +++ b/lisa-examples/goeland/Example.buveurs2_sol.p @@ -0,0 +1,29 @@ + +% SZS output start Proof for Example.buveurs2.p + + +fof(c_Example_buveurs2_p, conjecture, ($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))). + +fof(f9, plain, [~(($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))), $true, ~((? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6))))))), ~((sP(XX4_8) => (! [Xy6] : (sP(Xy6))))), sP(XX4_8), ~((! [Xy6] : (sP(Xy6)))), ~(sP(Sko_0)), ~((sP(Sko_0) => (! [Xy6] : (sP(Xy6))))), sP(Sko_0)] --> [], inference(leftHyp, param(8, 6), [])). + +fof(f8, plain, [~(($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))), $true, ~((? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6))))))), ~((sP(XX4_8) => (! [Xy6] : (sP(Xy6))))), sP(XX4_8), ~((! [Xy6] : (sP(Xy6)))), ~(sP(Sko_0)), ~((sP(Sko_0) => (! [Xy6] : (sP(Xy6)))))] --> [], inference(leftNotImp, param(7), [f9])). + +fof(f7, plain, [~(($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))), $true, ~((? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6))))))), ~((sP(XX4_8) => (! [Xy6] : (sP(Xy6))))), sP(XX4_8), ~((! [Xy6] : (sP(Xy6)))), ~(sP(Sko_0))] --> [], inference(leftNotEx, param(2, $fot(Sko_0)), [f8])). + +fof(f6, plain, [~(($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))), $true, ~((? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6))))))), ~((sP(XX4_8) => (! [Xy6] : (sP(Xy6))))), sP(XX4_8), ~((! [Xy6] : (sP(Xy6))))] --> [], inference(leftNotForall, param(5, $fot(Sko_0)), [f7])). + +fof(f5, plain, [~(($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))), $true, ~((? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6))))))), ~((sP(XX4_8) => (! [Xy6] : (sP(Xy6)))))] --> [], inference(leftNotImp, param(3), [f6])). + +fof(f4, plain, [~(($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))), $true, ~((? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))] --> [], inference(leftNotEx, param(2, $fot(XX4_8)), [f5])). + +fof(f3, plain, [~(($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6))))))))] --> [], inference(leftNotImp, param(0), [f4])). + +fof(f2, plain, [($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))] --> [($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))], inference(hyp, param(0, 0), [])). + +fof(f1, plain, [] --> [($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6))))))), ~(($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6))))))))], inference(rightNot, param(1), [f2])). + +fof(f0, plain, [] --> [($true => (? [Xx4] : ((sP(Xx4) => (! [Xy6] : (sP(Xy6)))))))], inference(cut, param(1, 0), [f1, f3])). + + + +% SZS output end Proof for Example.buveurs2.p diff --git a/lisa-examples/src/main/scala/Example.scala b/lisa-examples/src/main/scala/Example.scala index 494964d9c..5f8676c86 100644 --- a/lisa-examples/src/main/scala/Example.scala +++ b/lisa-examples/src/main/scala/Example.scala @@ -138,6 +138,16 @@ object GoelandExample extends lisa.Main: val P = predicate[1] val f = function[1] - val buveurs2 = Theorem(exists(x, P(x) ==> forall(y, P(y)))) { - have(thesis) by Goeland // ("goeland/Example.buveurs2_sol") - } + val x = variable[Term] + val f = variable[Term >>: Term] + + val buveurs = Theorem(exists(x, P(x) ==> forall(y, P(y)))): + have(thesis) by Goeland + + + val rule8 = Axiom(forall(x, x === f(f(f(f(f(f(f(f(x))))))))) ) + val rule5 = Axiom(forall(x, x === f(f(f(f(f(x)))))) ) + + val saturation = Theorem(∅ === f(∅)): + have(thesis) by Egg.from(rule8, rule5) + diff --git a/lisa-examples/src/main/scala/Lattices.scala b/lisa-examples/src/main/scala/Lattices.scala index cbc4c28ed..bc8d1b79e 100644 --- a/lisa-examples/src/main/scala/Lattices.scala +++ b/lisa-examples/src/main/scala/Lattices.scala @@ -69,8 +69,8 @@ object Lattices extends lisa.Main { // Can we automate this? // Yes, we can! - import lisa.prooflib.ProofTacticLib.ProofTactic - import lisa.prooflib.Library + import lisa.utils.prooflib.ProofTacticLib.ProofTactic + import lisa.utils.prooflib.Library object Whitman extends ProofTactic { def solve(using lib: library.type, proof: lib.Proof)(goal: Sequent): proof.ProofTacticJudgement = { diff --git a/lisa-kernel/src/main/scala/lisa/kernel/fol/CommonDefinitions.scala b/lisa-kernel/src/main/scala/lisa/kernel/fol/CommonDefinitions.scala deleted file mode 100644 index 6f65ffdfa..000000000 --- a/lisa-kernel/src/main/scala/lisa/kernel/fol/CommonDefinitions.scala +++ /dev/null @@ -1,57 +0,0 @@ -package lisa.kernel.fol - -/** - * Definitions that are common to terms and formulas. - */ -private[fol] trait CommonDefinitions { - val MaxArity: Int = 1000000 - - /** - * A labelled node for tree-like structures. - */ - trait Label { - val arity: Int - val id: Identifier - } - - sealed case class Identifier(val name: String, val no: Int) { - require(no >= 0, "Variable index must be positive") - require(Identifier.isValidIdentifier(name), "Variable name " + name + "is not valid.") - override def toString: String = if (no == 0) name else name + Identifier.counterSeparator + no - } - object Identifier { - def unapply(i: Identifier): Option[(String, Int)] = Some((i.name, i.no)) - def apply(name: String): Identifier = new Identifier(name, 0) - def apply(name: String, no: Int): Identifier = new Identifier(name, no) - - val counterSeparator: Char = '_' - val delimiter: Char = '`' - val forbiddenChars: Set[Char] = ("()[]{}?,;" + delimiter + counterSeparator).toSet - def isValidIdentifier(s: String): Boolean = s.forall(c => !forbiddenChars.contains(c) && !c.isWhitespace) - } - - /** - * return am identifier that is different from a set of give identifier. - * @param taken ids which are not available - * @param base prefix of the new id - * @return a fresh id. - */ - private[kernel] def freshId(taken: Iterable[Identifier], base: Identifier): Identifier = { - new Identifier( - base.name, - (taken.collect({ case Identifier(base.name, no) => - no - }) ++ Iterable(base.no)).max + 1 - ) - } - - /** - * A label for constant (non-schematic) symbols in formula and terms - */ - trait ConstantLabel extends Label - - /** - * A schematic label in a formula or a term can be substituted by any formula or term of the adequate kind. - */ - trait SchematicLabel extends Label -} diff --git a/lisa-kernel/src/main/scala/lisa/kernel/fol/EquivalenceChecker.scala b/lisa-kernel/src/main/scala/lisa/kernel/fol/EquivalenceChecker.scala deleted file mode 100644 index d323f6219..000000000 --- a/lisa-kernel/src/main/scala/lisa/kernel/fol/EquivalenceChecker.scala +++ /dev/null @@ -1,810 +0,0 @@ -package lisa.kernel.fol - -import scala.collection.mutable -import scala.math.Numeric.IntIsIntegral - -/** - * An EquivalenceChecker is an object that allows to detect some notion of equivalence between formulas - * and between terms. - * This allows the proof checker and writer to avoid having to deal with a class of "easy" equivalence. - * For example, by considering "x ∨ y" as being the same formula as "y ∨ x", we can avoid frustrating errors. - * For soundness, this relation should always be a subrelation of the usual FOL implication. - * The implementation checks for Orthocomplemented Bismeilatices equivalence, plus symetry and reflexivity - * of equality and alpha-equivalence. - * See https://github.com/epfl-lara/OCBSL for more informations - */ -private[fol] trait EquivalenceChecker extends FormulaDefinitions { - - def reducedForm(formula: Formula): Formula = { - val p = simplify(formula) - val nf = computeNormalForm(p) - val fln = fromLocallyNameless(nf, Map.empty, 0) - val res = toFormulaAIG(fln) - res - } - - def reducedNNFForm(formula: Formula): Formula = { - val p = simplify(formula) - val nf = computeNormalForm(p) - val fln = fromLocallyNameless(nf, Map.empty, 0) - val res = toFormulaNNF(fln) - res - } - def reduceSet(s: Set[Formula]): Set[Formula] = { - var res: List[Formula] = Nil - s.map(reducedForm) - .foreach({ f => - if (!res.exists(isSame(f, _))) res = f :: res - }) - res.toSet - } - - def isSameTerm(term1: Term, term2: Term): Boolean = term1.label == term2.label && term1.args.lazyZip(term2.args).forall(isSameTerm) - def isSame(formula1: Formula, formula2: Formula): Boolean = { - val nf1 = computeNormalForm(simplify(formula1)) - val nf2 = computeNormalForm(simplify(formula2)) - latticesEQ(nf1, nf2) - } - - /** - * returns true if the first argument implies the second by the laws of ortholattices. - */ - def isImplying(formula1: Formula, formula2: Formula): Boolean = { - val nf1 = computeNormalForm(simplify(formula1)) - val nf2 = computeNormalForm(simplify(formula2)) - latticesLEQ(nf1, nf2) - } - - def isSubset(s1: Set[Formula], s2: Set[Formula]): Boolean = { - s1.forall(f1 => s2.exists(g1 => isSame(f1, g1))) - } - def isSameSet(s1: Set[Formula], s2: Set[Formula]): Boolean = - isSubset(s1, s2) && isSubset(s2, s1) - - def isSameSetL(s1: Set[Formula], s2: Set[Formula]): Boolean = - isSame(ConnectorFormula(And, s1.toSeq), ConnectorFormula(And, s2.toSeq)) - - def isSameSetR(s1: Set[Formula], s2: Set[Formula]): Boolean = - isSame(ConnectorFormula(Or, s2.toSeq), ConnectorFormula(Or, s1.toSeq)) - - def contains(s: Set[Formula], f: Formula): Boolean = { - s.exists(g => isSame(f, g)) - } - - private var totPolarFormula = 0 - sealed abstract class SimpleFormula { - val uniqueKey: Int = totPolarFormula - totPolarFormula += 1 - val size: Int - var inverse: Option[SimpleFormula] = None - private[EquivalenceChecker] var normalForm: Option[NormalFormula] = None - def getNormalForm = normalForm - } - case class SimplePredicate(id: AtomicLabel, args: Seq[Term], polarity: Boolean) extends SimpleFormula { - override def toString: String = s"SimplePredicate($id, $args, $polarity)" - val size = 1 - } - case class SimpleSchemConnector(id: SchematicConnectorLabel, args: Seq[SimpleFormula], polarity: Boolean) extends SimpleFormula { - val size = 1 - } - case class SimpleAnd(children: Seq[SimpleFormula], polarity: Boolean) extends SimpleFormula { - val size: Int = (children map (_.size)).foldLeft(1) { case (a, b) => a + b } - } - case class SimpleForall(x: Identifier, inner: SimpleFormula, polarity: Boolean) extends SimpleFormula { - val size: Int = 1 + inner.size - } - case class SimpleLiteral(polarity: Boolean) extends SimpleFormula { - val size = 1 - normalForm = Some(NormalLiteral(polarity)) - } - - private var totNormalFormula = 0 - sealed abstract class NormalFormula { - val uniqueKey: Int = totNormalFormula - totNormalFormula += 1 - var formulaP: Option[Formula] = None - var formulaN: Option[Formula] = None - var formulaAIG: Option[Formula] = None - var inverse: Option[NormalFormula] = None - - private val lessThanBitSet: mutable.Set[Long] = new mutable.HashSet() - setLessThanCache(this, true) - - def lessThanCached(other: NormalFormula): Option[Boolean] = { - val otherIx = 2 * other.uniqueKey - if (lessThanBitSet.contains(otherIx)) Some(lessThanBitSet.contains(otherIx + 1)) - else None - } - - def setLessThanCache(other: NormalFormula, value: Boolean): Unit = { - val otherIx = 2 * other.uniqueKey - lessThanBitSet.contains(otherIx) - if (value) lessThanBitSet.update(otherIx + 1, true) - } - - def recoverFormula: Formula = toFormulaAIG(this) - } - sealed abstract class NonTraversable extends NormalFormula - case class NormalPredicate(id: AtomicLabel, args: Seq[Term], polarity: Boolean) extends NonTraversable { - override def toString: String = s"NormalPredicate($id, $args, $polarity)" - } - case class NormalSchemConnector(id: SchematicConnectorLabel, args: Seq[NormalFormula], polarity: Boolean) extends NonTraversable - case class NormalAnd(args: Seq[NormalFormula], polarity: Boolean) extends NormalFormula - case class NormalForall(x: Identifier, inner: NormalFormula, polarity: Boolean) extends NonTraversable - case class NormalLiteral(polarity: Boolean) extends NormalFormula - - /** - * Puts back in regular formula syntax, in an AIG (without disjunctions) format - */ - def toFormulaAIG(f: NormalFormula): Formula = - if (f.formulaAIG.isDefined) f.formulaAIG.get - else { - val r: Formula = f match { - case NormalPredicate(id, args, polarity) => - if (polarity) AtomicFormula(id, args) else ConnectorFormula(Neg, Seq(AtomicFormula(id, args))) - case NormalSchemConnector(id, args, polarity) => - val f = ConnectorFormula(id, args.map(toFormulaAIG)) - if (polarity) f else ConnectorFormula(Neg, Seq(f)) - case NormalAnd(args, polarity) => - val f = ConnectorFormula(And, args.map(toFormulaAIG)) - if (polarity) f else ConnectorFormula(Neg, Seq(f)) - case NormalForall(x, inner, polarity) => - val f = BinderFormula(Forall, VariableLabel(x), toFormulaAIG(inner)) - if (polarity) f else ConnectorFormula(Neg, Seq(f)) - case NormalLiteral(polarity) => if (polarity) AtomicFormula(top, Seq()) else AtomicFormula(bot, Seq()) - } - f.formulaAIG = Some(r) - r - } - - /** - * Puts back in regular formula syntax, and performs negation normal form to produce shorter version. - */ - def toFormulaNNF(f: NormalFormula, positive: Boolean = true): Formula = { - if (positive){ - if (f.formulaP.isDefined) return f.formulaP.get - if (f.inverse.isDefined && f.inverse.get.formulaN.isDefined) return f.inverse.get.formulaN.get - } - else if (!positive) { - if (f.formulaN.isDefined) return f.formulaN.get - if (f.inverse.isDefined && f.inverse.get.formulaP.isDefined) return f.inverse.get.formulaP.get - } - val r = f match{ - case NormalPredicate(id, args, polarity) => - if (positive==polarity) AtomicFormula(id, args) else ConnectorFormula(Neg, Seq(AtomicFormula(id, args))) - case NormalSchemConnector(id, args, polarity) => - val f = ConnectorFormula(id, args.map(toFormulaNNF(_, true))) - if (positive==polarity) f else ConnectorFormula(Neg, Seq(f)) - case NormalAnd(args, polarity) => - if (positive==polarity) - ConnectorFormula(And, args.map(c => toFormulaNNF(c, true))) - else - ConnectorFormula(Or, args.map(c => toFormulaNNF(c, false))) - case NormalForall(x, inner, polarity) => - if (positive==polarity) - BinderFormula(Forall, VariableLabel(x), toFormulaNNF(inner, true)) - else - BinderFormula(Exists, VariableLabel(x), toFormulaNNF(inner, false)) - case NormalLiteral(polarity) => if (polarity) AtomicFormula(top, Seq()) else AtomicFormula(bot, Seq()) - } - if (positive) f.formulaP = Some(r) - else f.formulaN = Some(r) - r - } - - /** - * Inverse a formula in Polar normal form. Corresponds semantically to taking the negation of the formula. - */ - def getInversePolar(f: SimpleFormula): SimpleFormula = { - f.inverse match { - case Some(value) => value - case None => - val second = f match { - case f: SimplePredicate => f.copy(polarity = !f.polarity) - case f: SimpleSchemConnector => f.copy(polarity = !f.polarity) - case f: SimpleAnd => f.copy(polarity = !f.polarity) - case f: SimpleForall => f.copy(polarity = !f.polarity) - case f: SimpleLiteral => f.copy(polarity = !f.polarity) - } - f.inverse = Some(second) - second.inverse = Some(f) - second - } - } - - /** - * Inverse a formula in Polar normal form. Corresponds semantically to taking the negation of the formula. - */ - def getInverse(f: NormalFormula): NormalFormula = { - f.inverse match { - case Some(value) => value - case None => - val second: NormalFormula = f match { - case f: NormalPredicate => f.copy(polarity = !f.polarity) - case f: NormalSchemConnector => f.copy(polarity = !f.polarity) - case f: NormalAnd => f.copy(polarity = !f.polarity) - case f: NormalForall => f.copy(polarity = !f.polarity) - case f: NormalLiteral => f.copy(polarity = !f.polarity) - } - f.inverse = Some(second) - second.inverse = Some(f) - second - } - } - - /** - * Put a formula in Polar form, which means desugared. In this normal form, the only (non-schematic) symbol is - * conjunction, the only binder is universal, and negations are flattened - * @param f The formula that has to be transformed - * @param polarity If the formula is in a positive or negative context. It is usually true. - * @return The corresponding PolarFormula - */ - def polarize(f: Formula, polarity: Boolean): SimpleFormula = { - if (polarity & f.polarFormula.isDefined) { - f.polarFormula.get - } else if (!polarity & f.polarFormula.isDefined) { - getInversePolar(f.polarFormula.get) - } else { - val r = f match { - case AtomicFormula(label, args) => - if (label == top) SimpleLiteral(polarity) - else if (label == bot) SimpleLiteral(!polarity) - else SimplePredicate(label, args, polarity) - case ConnectorFormula(label, args) => - label match { - case cl: ConstantConnectorLabel => - cl match { - case Neg => polarize(args.head, !polarity) - case Implies => SimpleAnd(Seq(polarize(args(0), true), polarize(args(1), false)), !polarity) - case Iff => - val l1 = polarize(args(0), true) - val r1 = polarize(args(1), true) - SimpleAnd( - Seq( - SimpleAnd(Seq(l1, getInversePolar(r1)), false), - SimpleAnd(Seq(getInversePolar(l1), r1), false) - ), - polarity - ) - case And => - SimpleAnd(args.map(polarize(_, true)), polarity) - case Or => SimpleAnd(args.map(polarize(_, false)), !polarity) - } - case scl: SchematicConnectorLabel => - SimpleSchemConnector(scl, args.map(polarize(_, true)), polarity) - } - case BinderFormula(label, bound, inner) => - label match { - case Forall => SimpleForall(bound.id, polarize(inner, true), polarity) - case Exists => SimpleForall(bound.id, polarize(inner, false), !polarity) - case ExistsOne => - val y = VariableLabel(freshId(inner.freeVariables.map(_.id), bound.id)) - val c = AtomicFormula(equality, Seq(VariableTerm(bound), VariableTerm(y))) - val newInner = polarize(ConnectorFormula(Iff, Seq(c, inner)), true) - SimpleForall(y.id, SimpleForall(bound.id, newInner, false), !polarity) - } - } - if (polarity) f.polarFormula = Some(r) - else f.polarFormula = Some(getInversePolar(r)) - r - } - } - - def toLocallyNameless(t: Term, subst: Map[Identifier, Int], i: Int): Term = { - t match { - case Term(label: VariableLabel, _) => - if (subst.contains(label.id)) VariableTerm(VariableLabel(Identifier("x", i - subst(label.id)))) - else VariableTerm(VariableLabel(Identifier("$" + label.id.name, label.id.no))) - case Term(label, args) => Term(label, args.map(c => toLocallyNameless(c, subst, i))) - } - } - - def toLocallyNameless(phi: SimpleFormula, subst: Map[Identifier, Int], i: Int): SimpleFormula = { - phi match { - case SimplePredicate(id, args, polarity) => SimplePredicate(id, args.map(c => toLocallyNameless(c, subst, i)), polarity) - case SimpleSchemConnector(id, args, polarity) => SimpleSchemConnector(id, args.map(f => toLocallyNameless(f, subst, i)), polarity) - case SimpleAnd(children, polarity) => SimpleAnd(children.map(toLocallyNameless(_, subst, i)), polarity) - case SimpleForall(x, inner, polarity) => SimpleForall(x, toLocallyNameless(inner, subst + (x -> i), i + 1), polarity) - case SimpleLiteral(polarity) => phi - } - } - - def fromLocallyNameless(t: Term, subst: Map[Int, Identifier], i: Int): Term = { - - t match { - case Term(label: VariableLabel, _) => - if ((label.id.name == "x") && subst.contains(i - label.id.no)) VariableTerm(VariableLabel(subst(i - label.id.no))) - else if (label.id.name.head == '$') VariableTerm(VariableLabel(Identifier(label.id.name.tail, label.id.no))) - else { - throw new Exception("This case should be unreachable, error") - } - case Term(label, args) => Term(label, args.map(c => fromLocallyNameless(c, subst, i))) - } - } - - def fromLocallyNameless(phi: NormalFormula, subst: Map[Int, Identifier], i: Int): NormalFormula = { - phi match { - case NormalPredicate(id, args, polarity) => NormalPredicate(id, args.map(c => fromLocallyNameless(c, subst, i)), polarity) - case NormalSchemConnector(id, args, polarity) => NormalSchemConnector(id, args.map(f => fromLocallyNameless(f, subst, i)), polarity) - case NormalAnd(children, polarity) => NormalAnd(children.map(fromLocallyNameless(_, subst, i)), polarity) - case NormalForall(x, inner, polarity) => NormalForall(x, fromLocallyNameless(inner, subst + (i -> x), i + 1), polarity) - case NormalLiteral(_) => phi - } - - } - - def simplify(f: Formula): SimpleFormula = toLocallyNameless(polarize(f, polarity = true), Map.empty, 0) - - def computeNormalForm(formula: SimpleFormula): NormalFormula = { - formula.normalForm match { - case Some(value) => - value - case None => - val r = formula match { - case SimplePredicate(id, args, true) => - if (id == equality) { - if (args(0) == args(1)) - NormalLiteral(true) - else - NormalPredicate(id, args.sortBy(_.hashCode()), true) - } else NormalPredicate(id, args, true) - case SimplePredicate(id, args, false) => - getInverse(computeNormalForm(getInversePolar(formula))) - case SimpleSchemConnector(id, args, true) => - NormalSchemConnector(id, args.map(computeNormalForm), true) - case SimpleSchemConnector(id, args, false) => - getInverse(computeNormalForm(getInversePolar(formula))) - case SimpleAnd(children, polarity) => - val newChildren = children map computeNormalForm - val simp = reduce(newChildren, polarity) - simp match { - case conj: NormalAnd if checkForContradiction(conj) => - NormalLiteral(!polarity) - case _ => - simp - } - case SimpleForall(x, inner, polarity) => NormalForall(x, computeNormalForm(inner), polarity) - case SimpleLiteral(polarity) => NormalLiteral(polarity) - } - formula.normalForm = Some(r) - r - - } - } - def reduceList(children: Seq[NormalFormula], polarity: Boolean): List[NormalFormula] = { - val nonSimplified = NormalAnd(children, polarity) - var remaining: Seq[NormalFormula] = Nil - def treatChild(i: NormalFormula): Seq[NormalFormula] = { - val r: Seq[NormalFormula] = i match { - case NormalAnd(ch, true) => ch - case NormalAnd(ch, false) => - if (polarity) { - val trCh = ch map getInverse - trCh.find(f => { - latticesLEQ(nonSimplified, f) - }) match { - case Some(value) => - treatChild(value) - case None => List(i) - } - } else { - val trCh = ch - trCh.find(f => { - latticesLEQ(f, nonSimplified) - }) match { - case Some(value) => - treatChild(getInverse(value)) - case None => List(i) - } - } - case _ => List(i) - } - r - } - children.foreach(i => { - val r = treatChild(i) - remaining = r ++ remaining - }) - - var accepted: List[NormalFormula] = Nil - while (remaining.nonEmpty) { - val current = remaining.head - remaining = remaining.tail - if (!latticesLEQ(NormalAnd(remaining ++ accepted, true), current)) { - accepted = current :: accepted - } - } - accepted - } - - def reduce(children: Seq[NormalFormula], polarity: Boolean): NormalFormula = { - val accepted: List[NormalFormula] = reduceList(children, polarity) - val r = { - if (accepted.isEmpty) NormalLiteral(polarity) - else if (accepted.size == 1) - if (polarity) accepted.head else getInverse(accepted.head) - else NormalAnd(accepted, polarity) - } - r - } - - def latticesEQ(formula1: NormalFormula, formula2: NormalFormula): Boolean = latticesLEQ(formula1, formula2) && latticesLEQ(formula2, formula1) - - def latticesLEQ(formula1: NormalFormula, formula2: NormalFormula): Boolean = { - if (formula1.uniqueKey == formula2.uniqueKey) true - else - formula1.lessThanCached(formula2) match { - case Some(value) => value - case None => - val r = (formula1, formula2) match { - case (NormalLiteral(b1), NormalLiteral(b2)) => !b1 || b2 - case (NormalLiteral(b), _) => !b - case (_, NormalLiteral(b)) => b - - case (NormalPredicate(id1, args1, polarity1), NormalPredicate(id2, args2, polarity2)) => - id1 == id2 && polarity1 == polarity2 && - (args1 == args2 || (id1 == equality && args1(0) == args2(1) && args1(1) == args2(0))) - case (NormalSchemConnector(id1, args1, polarity1), NormalSchemConnector(id2, args2, polarity2)) => - id1 == id2 && polarity1 == polarity2 && args1.zip(args2).forall(f => latticesEQ(f._1, f._2)) - case (NormalForall(x1, inner1, polarity1), NormalForall(x2, inner2, polarity2)) => - polarity1 == polarity2 && (if (polarity1) latticesLEQ(inner1, inner2) else latticesLEQ(inner2, inner1)) - case (_: NonTraversable, _: NonTraversable) => false - - case (_, NormalAnd(children, true)) => - children.forall(c => latticesLEQ(formula1, c)) - case (NormalAnd(children, false), _) => - children.forall(c => latticesLEQ(getInverse(c), formula2)) - case (NormalAnd(children1, true), NormalAnd(children2, false)) => - children1.exists(c => latticesLEQ(c, formula2)) || children2.exists(c => latticesLEQ(formula1, getInverse(c))) - - case (nt: NonTraversable, NormalAnd(children, false)) => - children.exists(c => latticesLEQ(nt, getInverse(c))) - case (NormalAnd(children, true), nt: NonTraversable) => - children.exists(c => latticesLEQ(c, nt)) - - } - formula1.setLessThanCache(formula2, r) - r - } - } - - def checkForContradiction(f: NormalAnd): Boolean = { - f match { - case NormalAnd(children, false) => - children.exists(cc => latticesLEQ(cc, f)) - case NormalAnd(children, true) => - val shadowChildren = children map getInverse - shadowChildren.exists(sc => latticesLEQ(f, sc)) - } - } - - /* - - /** - * A class that encapsulate "runtime" information of the equivalence checker. It performs memoization for efficiency. - */ - class LocalEquivalenceChecker2 { - - private val unsugaredVersion = scala.collection.mutable.HashMap[Formula, PolarFormula]() - // transform a LISA formula into a simpler, desugarised version with less symbols. Conjunction, implication, iff, existsOne are treated as alliases and translated. - def removeSugar1(phi: Formula): PolarFormula = { - phi match { - case AtomicFormula(label, args) => - if (label == top) PolarLiteral(true) - else if (label == bot) PolarLiteral(false) - else PolarPredicate(label, args.toList) - case ConnectorFormula(label, args) => - label match { - case Neg => SNeg(removeSugar1(args(0))) - case Implies => - val l = removeSugar1(args(0)) - val r = removeSugar1(args(1)) - PolarAnd(List(SNeg(l), r)) - case Iff => - val l = removeSugar1(args(0)) - val r = removeSugar1(args(1)) - val c1 = SNeg(PolarAnd(List(SNeg(l), r))) - val c2 = SNeg(PolarAnd(List(SNeg(r), l))) - SNeg(PolarAnd(List(c1, c2))) - case And => - SNeg(SOr(args.map(c => SNeg(removeSugar1(c))).toList)) - case Or => PolarAnd((args map removeSugar1).toList) - case _ => PolarSchemConnector(label, args.toList.map(removeSugar1)) - } - case BinderFormula(label, bound, inner) => - label match { - case Forall => PolarForall(bound.id, removeSugar1(inner)) - case Exists => SExists(bound.id, removeSugar1(inner)) - case ExistsOne => - val y = VariableLabel(freshId(inner.freeVariables.map(_.id), bound.id)) - val c1 = PolarPredicate(equality, List(VariableTerm(bound), VariableTerm(y))) - val c2 = removeSugar1(inner) - val c1_c2 = PolarAnd(List(SNeg(c1), c2)) - val c2_c1 = PolarAnd(List(SNeg(c2), c1)) - SExists(y.id, PolarForall(bound.id, SNeg(PolarAnd(List(SNeg(c1_c2), SNeg(c2_c1)))))) - } - } - } - - def toLocallyNameless(t: Term, subst: Map[Identifier, Int], i: Int): Term = { - t match { - case Term(label: VariableLabel, _) => - if (subst.contains(label.id)) VariableTerm(VariableLabel(Identifier("x", i - subst(label.id)))) - else VariableTerm(VariableLabel(Identifier("$" + label.id.name, label.id.no))) - case Term(label, args) => Term(label, args.map(c => toLocallyNameless(c, subst, i))) - } - } - - def toLocallyNameless(phi: PolarFormula, subst: Map[Identifier, Int], i: Int): PolarFormula = { - phi match { - case PolarPredicate(id, args) => PolarPredicate(id, args.map(c => toLocallyNameless(c, subst, i))) - case PolarSchemConnector(id, args) => PolarSchemConnector(id, args.map(f => toLocallyNameless(f, subst, i))) - case SNeg(child) => SNeg(toLocallyNameless(child, subst, i)) - case PolarAnd(children) => PolarAnd(children.map(toLocallyNameless(_, subst, i))) - case PolarForall(x, inner) => PolarForall(Identifier(""), toLocallyNameless(inner, subst + (x -> i), i + 1)) - case SExists(x, inner) => SExists(Identifier(""), toLocallyNameless(inner, subst + (x -> i), i + 1)) - case PolarLiteral(b) => phi - } - } - - def removeSugar(phi: Formula): PolarFormula = { - unsugaredVersion.getOrElseUpdate(phi, toLocallyNameless(removeSugar1(phi), Map.empty, 0)) - } - - private val codesSig: mutable.HashMap[(String, Seq[Int]), Int] = mutable.HashMap() - codesSig.update(("zero", Nil), 0) - codesSig.update(("one", Nil), 1) - - val codesTerms: mutable.HashMap[Term, Int] = mutable.HashMap() - val codesSigTerms: mutable.HashMap[(TermLabel, Seq[Int]), Int] = mutable.HashMap() - - def codesOfTerm(t: Term): Int = codesTerms.getOrElseUpdate( - t, - t match { - case Term(label: VariableLabel, _) => - codesSigTerms.getOrElseUpdate((label, Nil), codesSigTerms.size) - case Term(label, args) => - val c = args map codesOfTerm - codesSigTerms.getOrElseUpdate((label, c), codesSigTerms.size) - } - ) - - def checkForContradiction(children: List[(NormalFormula, Int)]): Boolean = { - val (negatives_temp, positives_temp) = children.foldLeft[(List[NormalFormula], List[NormalFormula])]((Nil, Nil))((acc, ch) => - acc match { - case (negatives, positives) => - ch._1 match { - case NNeg(child, c) => (child :: negatives, positives) - case _ => (negatives, ch._1 :: positives) - } - } - ) - val (negatives, positives) = (negatives_temp.sortBy(_.code), positives_temp.reverse) - var i, j = 0 - while (i < positives.size && j < negatives.size) { // checks if there is a positive and negative nodes with same code. - val (c1, c2) = (positives(i).code, negatives(j).code) - if (c1 < c2) i += 1 - else if (c1 == c2) return true - else j += 1 - } - var k = 0 - val children_codes = children.map(c => c._2).toSet // check if there is a negated disjunction whose children all share a code with an uncle - while (k < negatives.size) { - negatives(k) match { - case NOr(gdChildren, c) => - if (gdChildren.forall(sf => children_codes.contains(sf.code))) return true - case _ => () - } - k += 1 - } - false - } - - def updateCodesSig(sig: (String, Seq[Int])): Int = { - if (!codesSig.contains(sig)) codesSig.update(sig, codesSig.size) - codesSig(sig) - } - - def OCBSLCode(phi: PolarFormula): Int = { - if (phi.normalForm.nonEmpty) return phi.normalForm.get.code - val L = pDisj(phi, Nil) - val L2 = L zip (L map (_.code)) - val L3 = L2.sortBy(_._2).distinctBy(_._2).filterNot(_._2 == 0) // actually efficient has set based implementation already - if (L3.isEmpty) { - phi.normalForm = Some(NLiteral(false)) - } else if (L3.length == 1) { - phi.normalForm = Some(L3.head._1) - } else if (L3.exists(_._2 == 1) || checkForContradiction(L3)) { - phi.normalForm = Some(NLiteral(true)) - } else { - phi.normalForm = Some(NOr(L3.map(_._1), updateCodesSig(("or", L3.map(_._2))))) - } - phi.normalForm.get.code - } - - def pDisj(phi: PolarFormula, acc: List[NormalFormula]): List[NormalFormula] = { - if (phi.normalForm.nonEmpty) return pDisjNormal(phi.normalForm.get, acc) - val r: List[NormalFormula] = phi match { - case PolarPredicate(label, args) => - val lab = label match { - case _: ConstantAtomicLabel => "cp_" + label.id + "_" + label.arity - case _: SchematicAtomicLabel => "sp_" + label.id + "_" + label.arity - } - if (label == top) { - phi.normalForm = Some(NLiteral(true)) - } else if (label == bot) { - phi.normalForm = Some(NLiteral(false)) - } else if (label == equality) { - if (codesOfTerm(args(0)) == codesOfTerm(args(1))) - phi.normalForm = Some(NLiteral(true)) - else - phi.normalForm = Some(NormalPredicate(label, args, updateCodesSig((lab, (args map codesOfTerm).sorted)))) - } else { - phi.normalForm = Some(NormalPredicate(label, args, updateCodesSig((lab, args map codesOfTerm)))) - } - phi.normalForm.get :: acc - case PolarSchemConnector(label, args) => - val lab = label match { - case _: ConstantConnectorLabel => "cc_" + label.id + "_" + label.arity - case _: SchematicConnectorLabel => "sc_" + label.id + "_" + label.arity - } - phi.normalForm = Some(NormalConnector(label, args.map(_.normalForm.get), updateCodesSig((lab, args map OCBSLCode)))) - phi.normalForm.get :: acc - case SNeg(child) => pNeg(child, phi, acc) - case PolarAnd(children) => children.foldLeft(acc)((p, a) => pDisj(a, p)) - case PolarForall(x, inner) => - val r = OCBSLCode(inner) - phi.normalForm = Some(NForall(x, inner.normalForm.get, updateCodesSig(("forall", List(r))))) - phi.normalForm.get :: acc - case SExists(x, inner) => - val r = OCBSLCode(inner) - phi.normalForm = Some(NExists(x, inner.normalForm.get, updateCodesSig(("exists", List(r))))) - phi.normalForm.get :: acc - case PolarLiteral(true) => - phi.normalForm = Some(NLiteral(true)) - phi.normalForm.get :: acc - case PolarLiteral(false) => - phi.normalForm = Some(NLiteral(false)) - phi.normalForm.get :: acc - } - r - } - - def pNeg(phi: PolarFormula, parent: PolarFormula, acc: List[NormalFormula]): List[NormalFormula] = { - if (phi.normalForm.nonEmpty) return pNegNormal(phi.normalForm.get, parent, acc) - val r: List[NormalFormula] = phi match { - case PolarPredicate(label, args) => - val lab = label match { - case _: ConstantAtomicLabel => "cp_" + label.id + "_" + label.arity - case _: SchematicAtomicLabel => "sp_" + label.id + "_" + label.arity - } - if (label == top) { - phi.normalForm = Some(NLiteral(true)) - parent.normalForm = Some(NLiteral(false)) - } else if (label == bot) { - phi.normalForm = Some(NLiteral(false)) - parent.normalForm = Some(NLiteral(true)) - } else if (label == equality) { - if (codesOfTerm(args(0)) == codesOfTerm(args(1))) { - phi.normalForm = Some(NLiteral(true)) - parent.normalForm = Some(NLiteral(false)) - } else { - phi.normalForm = Some(NormalPredicate(label, args, updateCodesSig((lab, (args map codesOfTerm).sorted)))) - parent.normalForm = Some(NNeg(phi.normalForm.get, updateCodesSig(("neg", List(phi.normalForm.get.code))))) - } - } else { - phi.normalForm = Some(NormalPredicate(label, args, updateCodesSig((lab, args map codesOfTerm)))) - parent.normalForm = Some(NNeg(phi.normalForm.get, updateCodesSig(("neg", List(phi.normalForm.get.code))))) - // phi.normalForm = Some(NormalPredicate(id, args, updateCodesSig((lab, args map codesOfTerm)))) - } - parent.normalForm.get :: acc - case PolarSchemConnector(label, args) => - val lab = label match { - case _: ConstantConnectorLabel => "cc_" + label.id + "_" + label.arity - case _: SchematicConnectorLabel => "sc_" + label.id + "_" + label.arity - } - phi.normalForm = Some(NormalConnector(label, args.map(_.normalForm.get), updateCodesSig((lab, args map OCBSLCode)))) - parent.normalForm = Some(NNeg(phi.normalForm.get, updateCodesSig(("neg", List(phi.normalForm.get.code))))) - parent.normalForm.get :: acc - case SNeg(child) => pDisj(child, acc) - case PolarForall(x, inner) => - val r = OCBSLCode(inner) - phi.normalForm = Some(NForall(x, inner.normalForm.get, updateCodesSig(("forall", List(r))))) - parent.normalForm = Some(NNeg(phi.normalForm.get, updateCodesSig(("neg", List(phi.normalForm.get.code))))) - parent.normalForm.get :: acc - case SExists(x, inner) => - val r = OCBSLCode(inner) - phi.normalForm = Some(NExists(x, inner.normalForm.get, updateCodesSig(("exists", List(r))))) - parent.normalForm = Some(NNeg(phi.normalForm.get, updateCodesSig(("neg", List(phi.normalForm.get.code))))) - parent.normalForm.get :: acc - case PolarLiteral(true) => - parent.normalForm = Some(NLiteral(false)) - parent.normalForm.get :: acc - case PolarLiteral(false) => - parent.normalForm = Some(NLiteral(true)) - parent.normalForm.get :: acc - case PolarAnd(children) => - if (children.isEmpty) { - parent.normalForm = Some(NLiteral(true)) - parent.normalForm.get :: acc - } else { - val T = children.sortBy(_.size) - val r1 = T.tail.foldLeft(List[NormalFormula]())((p, a) => pDisj(a, p)) - val r2 = r1 zip (r1 map (_.code)) - val r3 = r2.sortBy(_._2).distinctBy(_._2).filterNot(_._2 == 0) - if (r3.isEmpty) pNeg(T.head, parent, acc) - else { - val s1 = pDisj(T.head, r1) - val s2 = s1 zip (s1 map (_.code)) - val s3 = s2.sortBy(_._2).distinctBy(_._2).filterNot(_._2 == 0) - if (s3.exists(_._2 == 1) || checkForContradiction(s3)) { - phi.normalForm = Some(NLiteral(true)) - parent.normalForm = Some(NLiteral(false)) - parent.normalForm.get :: acc - } else if (s3.length == 1) { - pNegNormal(s3.head._1, parent, acc) - } else { - phi.normalForm = Some(NOr(s3.map(_._1), updateCodesSig(("or", s3.map(_._2))))) - parent.normalForm = Some(NNeg(phi.normalForm.get, updateCodesSig(("neg", List(phi.normalForm.get.code))))) - parent.normalForm.get :: acc - } - } - } - } - r - } - def pDisjNormal(f: NormalFormula, acc: List[NormalFormula]): List[NormalFormula] = f match { - case NOr(children, c) => children ++ acc - case p @ _ => p :: acc - } - def pNegNormal(f: NormalFormula, parent: PolarFormula, acc: List[NormalFormula]): List[NormalFormula] = f match { - case NNeg(child, c) => - pDisjNormal(child, acc) - case _ => - parent.normalForm = Some(NNeg(f, updateCodesSig(("neg", List(f.code))))) - parent.normalForm.get :: acc - } - - def check(formula1: Formula, formula2: Formula): Boolean = { - getCode(formula1) == getCode(formula2) - } - def getCode(formula: Formula): Int = OCBSLCode(removeSugar(formula)) - - def isSame(term1: Term, term2: Term): Boolean = codesOfTerm(term1) == codesOfTerm(term2) - - def isSame(formula1: Formula, formula2: Formula): Boolean = { - this.check(formula1, formula2) - } - - def isSameSet(s1: Set[Formula], s2: Set[Formula]): Boolean = { - s1.map(this.getCode).toList.sorted == s2.map(this.getCode).toList.sorted - } - - def isSubset(s1: Set[Formula], s2: Set[Formula]): Boolean = { - val codesSet1 = s1.map(this.getCode) - val codesSet2 = s2.map(this.getCode) - codesSet1.subsetOf(codesSet2) - } - - def contains(s: Set[Formula], f: Formula): Boolean = { - val codesSet = s.map(this.getCode) - val codesFormula = this.getCode(f) - codesSet.contains(codesFormula) - } - def normalForm(phi: Formula): NormalFormula = { - getCode(phi) - removeSugar(phi).normalForm.get - } - - } - def isSame(term1: Term, term2: Term): Boolean = (new LocalEquivalenceChecker2).isSame(term1, term2) - - def isSame(formula1: Formula, formula2: Formula): Boolean = (new LocalEquivalenceChecker2).isSame(formula1, formula2) - - def isSameSet(s1: Set[Formula], s2: Set[Formula]): Boolean = (new LocalEquivalenceChecker2).isSameSet(s1, s2) - - def isSubset(s1: Set[Formula], s2: Set[Formula]): Boolean = (new LocalEquivalenceChecker2).isSubset(s1, s2) - - def contains(s: Set[Formula], f: Formula): Boolean = (new LocalEquivalenceChecker2).contains(s, f) - */ -} diff --git a/lisa-kernel/src/main/scala/lisa/kernel/fol/FOL.scala b/lisa-kernel/src/main/scala/lisa/kernel/fol/FOL.scala index 24f895f36..f0616e3c1 100644 --- a/lisa-kernel/src/main/scala/lisa/kernel/fol/FOL.scala +++ b/lisa-kernel/src/main/scala/lisa/kernel/fol/FOL.scala @@ -1,10 +1,11 @@ package lisa.kernel.fol +import lisa.kernel.fol.OLEquivalenceChecker /** * The concrete implementation of first order logic. * All its content can be imported using a single statement: - *
- * import lisa.fol.FOL._ - *+ * {{{ + * import lisa.utils.fol.FOL._ + * }}} */ -object FOL extends FormulaDefinitions with EquivalenceChecker with Substitutions {} +object FOL extends OLEquivalenceChecker {} \ No newline at end of file diff --git a/lisa-kernel/src/main/scala/lisa/kernel/fol/FormulaDefinitions.scala b/lisa-kernel/src/main/scala/lisa/kernel/fol/FormulaDefinitions.scala deleted file mode 100644 index 192460cef..000000000 --- a/lisa-kernel/src/main/scala/lisa/kernel/fol/FormulaDefinitions.scala +++ /dev/null @@ -1,138 +0,0 @@ -package lisa.kernel.fol - -/** - * Definitions of formulas; analogous to [[TermDefinitions]]. - * Depends on [[FormulaLabelDefinitions]] and [[TermDefinitions]]. - */ -private[fol] trait FormulaDefinitions extends FormulaLabelDefinitions with TermDefinitions { - - type SimpleFormula - def reducedForm(formula: Formula): Formula - def reduceSet(s: Set[Formula]): Set[Formula] - def isSameTerm(term1: Term, term2: Term): Boolean - def isSame(formula1: Formula, formula2: Formula): Boolean - def isImplying(formula1: Formula, formula2: Formula): Boolean - def isSameSet(s1: Set[Formula], s2: Set[Formula]): Boolean - def isSubset(s1: Set[Formula], s2: Set[Formula]): Boolean - def contains(s: Set[Formula], f: Formula): Boolean - - /** - * The parent class of formulas. - * A formula is a tree whose nodes are either terms or labeled by predicates or logical connectors. - */ - sealed trait Formula extends TreeWithLabel[FormulaLabel] { - val uniqueNumber: Long = Formula.getNewId - private[fol] var polarFormula: Option[SimpleFormula] = None - val arity: Int = label.arity - - override def constantTermLabels: Set[ConstantFunctionLabel] - override def schematicTermLabels: Set[SchematicTermLabel] - override def freeSchematicTermLabels: Set[SchematicTermLabel] - override def freeVariables: Set[VariableLabel] - - /** - * @return The list of constant predicate symbols in the formula. - */ - def constantAtomicLabels: Set[ConstantAtomicLabel] - - /** - * @return The list of schematic predicate symbols in the formula, including variable formulas . - */ - def schematicAtomicLabels: Set[SchematicAtomicLabel] - - /** - * @return The list of schematic connector symbols in the formula. - */ - def schematicConnectorLabels: Set[SchematicConnectorLabel] - - /** - * @return The list of schematic connector, predicate and formula variable symbols in the formula. - */ - def schematicFormulaLabels: Set[SchematicFormulaLabel] = - (schematicAtomicLabels.toSet: Set[SchematicFormulaLabel]) union (schematicConnectorLabels.toSet: Set[SchematicFormulaLabel]) - - /** - * @return The list of free formula variable symbols in the formula - */ - def freeVariableFormulaLabels: Set[VariableFormulaLabel] - - } - private object Formula { - var totalNumberOfFormulas: Long = 0 - def getNewId: Long = { - totalNumberOfFormulas += 1 - totalNumberOfFormulas - } - } - - /** - * The formula counterpart of [[AtomicLabel]]. - */ - sealed case class AtomicFormula(label: AtomicLabel, args: Seq[Term]) extends Formula { - require(label.arity == args.size) - override def constantTermLabels: Set[ConstantFunctionLabel] = - args.foldLeft(Set.empty[ConstantFunctionLabel])((prev, next) => prev union next.constantTermLabels) - override def schematicTermLabels: Set[SchematicTermLabel] = - args.foldLeft(Set.empty[SchematicTermLabel])((prev, next) => prev union next.schematicTermLabels) - override def freeSchematicTermLabels: Set[SchematicTermLabel] = - args.foldLeft(Set.empty[SchematicTermLabel])((prev, next) => prev union next.freeSchematicTermLabels) - override def freeVariables: Set[VariableLabel] = - args.foldLeft(Set.empty[VariableLabel])((prev, next) => prev union next.freeVariables) - override def constantAtomicLabels: Set[ConstantAtomicLabel] = label match { - case l: ConstantAtomicLabel => Set(l) - case _ => Set() - } - override def schematicAtomicLabels: Set[SchematicAtomicLabel] = label match { - case l: SchematicAtomicLabel => Set(l) - case _ => Set() - } - override def schematicConnectorLabels: Set[SchematicConnectorLabel] = Set() - - override def freeVariableFormulaLabels: Set[VariableFormulaLabel] = label match { - case l: VariableFormulaLabel => Set(l) - case _ => Set() - } - } - - /** - * The formula counterpart of [[ConnectorLabel]]. - */ - sealed case class ConnectorFormula(label: ConnectorLabel, args: Seq[Formula]) extends Formula { - require(label.arity == args.size || label.arity == -1) - require(label.arity != 0) - override def constantTermLabels: Set[ConstantFunctionLabel] = - args.foldLeft(Set.empty[ConstantFunctionLabel])((prev, next) => prev union next.constantTermLabels) - override def schematicTermLabels: Set[SchematicTermLabel] = - args.foldLeft(Set.empty[SchematicTermLabel])((prev, next) => prev union next.schematicTermLabels) - override def freeSchematicTermLabels: Set[SchematicTermLabel] = - args.foldLeft(Set.empty[SchematicTermLabel])((prev, next) => prev union next.freeSchematicTermLabels) - override def freeVariables: Set[VariableLabel] = - args.foldLeft(Set.empty[VariableLabel])((prev, next) => prev union next.freeVariables) - override def constantAtomicLabels: Set[ConstantAtomicLabel] = - args.foldLeft(Set.empty[ConstantAtomicLabel])((prev, next) => prev union next.constantAtomicLabels) - override def schematicAtomicLabels: Set[SchematicAtomicLabel] = - args.foldLeft(Set.empty[SchematicAtomicLabel])((prev, next) => prev union next.schematicAtomicLabels) - override def schematicConnectorLabels: Set[SchematicConnectorLabel] = label match { - case l: ConstantConnectorLabel => - args.foldLeft(Set.empty[SchematicConnectorLabel])((prev, next) => prev union next.schematicConnectorLabels) - case l: SchematicConnectorLabel => - args.foldLeft(Set(l))((prev, next) => prev union next.schematicConnectorLabels) - } - override def freeVariableFormulaLabels: Set[VariableFormulaLabel] = - args.foldLeft(Set.empty[VariableFormulaLabel])((prev, next) => prev union next.freeVariableFormulaLabels) - } - - /** - * The formula counterpart of [[BinderLabel]]. - */ - sealed case class BinderFormula(label: BinderLabel, bound: VariableLabel, inner: Formula) extends Formula { - override def constantTermLabels: Set[ConstantFunctionLabel] = inner.constantTermLabels - override def schematicTermLabels: Set[SchematicTermLabel] = inner.schematicTermLabels - override def freeSchematicTermLabels: Set[SchematicTermLabel] = inner.freeSchematicTermLabels - bound - override def freeVariables: Set[VariableLabel] = inner.freeVariables - bound - override def constantAtomicLabels: Set[ConstantAtomicLabel] = inner.constantAtomicLabels - override def schematicAtomicLabels: Set[SchematicAtomicLabel] = inner.schematicAtomicLabels - override def schematicConnectorLabels: Set[SchematicConnectorLabel] = inner.schematicConnectorLabels - override def freeVariableFormulaLabels: Set[VariableFormulaLabel] = inner.freeVariableFormulaLabels - } -} diff --git a/lisa-kernel/src/main/scala/lisa/kernel/fol/FormulaLabelDefinitions.scala b/lisa-kernel/src/main/scala/lisa/kernel/fol/FormulaLabelDefinitions.scala deleted file mode 100644 index 61e750774..000000000 --- a/lisa-kernel/src/main/scala/lisa/kernel/fol/FormulaLabelDefinitions.scala +++ /dev/null @@ -1,112 +0,0 @@ -package lisa.kernel.fol - -/** - * Definitions of formula labels. Analogous to [[TermLabelDefinitions]]. - */ -private[fol] trait FormulaLabelDefinitions extends CommonDefinitions { - - /** - * The parent class of formula labels. - * These are labels that can be applied to nodes that form the tree of a formula. - * In logical terms, those labels are FOL symbols or predicate symbols, including equality. - */ - sealed abstract class FormulaLabel extends Label - - /** - * The label for a predicate, namely a function taking a fixed number of terms and returning a formula. - * In logical terms it is a predicate symbol. - */ - sealed trait AtomicLabel extends FormulaLabel { - require(arity < MaxArity && arity >= 0) - } - - /** - * The label for a connector, namely a function taking a fixed number of formulas and returning another formula. - */ - sealed trait ConnectorLabel extends FormulaLabel { - require(arity < MaxArity && arity >= -1) - } - - /** - * A standard predicate symbol. Typical example are equality (=) and membership (∈) - */ - sealed case class ConstantAtomicLabel(id: Identifier, arity: Int) extends AtomicLabel with ConstantLabel - - /** - * The equality symbol (=) for first order logic. - * It is represented as any other predicate symbol but has unique semantic and deduction rules. - */ - val equality: ConstantAtomicLabel = ConstantAtomicLabel(Identifier("="), 2) - val top: ConstantAtomicLabel = ConstantAtomicLabel(Identifier("⊤"), 0) - val bot: ConstantAtomicLabel = ConstantAtomicLabel(Identifier("⊥"), 0) - - /** - * The label for a connector, namely a function taking a fixed number of formulas and returning another formula. - */ - sealed abstract class ConstantConnectorLabel(val id: Identifier, val arity: Int) extends ConnectorLabel with ConstantLabel - case object Neg extends ConstantConnectorLabel(Identifier("¬"), 1) - - case object Implies extends ConstantConnectorLabel(Identifier("⇒"), 2) - - case object Iff extends ConstantConnectorLabel(Identifier("⇔"), 2) - - case object And extends ConstantConnectorLabel(Identifier("∧"), -1) - - case object Or extends ConstantConnectorLabel(Identifier("∨"), -1) - - /** - * A schematic symbol that can be instantiated with some formula. - * We distinguish arity-0 schematic formula labels, arity->1 schematic predicates and arity->1 schematic connectors. - */ - sealed trait SchematicFormulaLabel extends FormulaLabel with SchematicLabel - - /** - * A schematic symbol whose arguments are any number of Terms. This means the symbol is either a variable formula or a predicate schema - */ - sealed trait SchematicAtomicLabel extends SchematicFormulaLabel with AtomicLabel - - /** - * A predicate symbol of arity 0 that can be instantiated with any formula. - */ - sealed case class VariableFormulaLabel(id: Identifier) extends SchematicAtomicLabel { - val arity = 0 - } - - /** - * A predicate symbol of non-zero arity that can be instantiated with any functional formula taking term arguments. - */ - sealed case class SchematicPredicateLabel(id: Identifier, arity: Int) extends SchematicAtomicLabel - - /** - * A predicate symbol of non-zero arity that can be instantiated with any functional formula taking formula arguments. - */ - sealed case class SchematicConnectorLabel(id: Identifier, arity: Int) extends SchematicFormulaLabel with ConnectorLabel - - /** - * The label for a binder, namely an object with a body that has the ability to bind variables in it. - */ - sealed abstract class BinderLabel(val id: Identifier) extends FormulaLabel { - val arity = 1 - } - - /** - * The symbol of the universal quantifier ∀ - */ - case object Forall extends BinderLabel(Identifier("∀")) - - /** - * The symbol of the existential quantifier ∃ - */ - case object Exists extends BinderLabel(Identifier("∃")) - - /** - * The symbol of the quantifier for existence and unicity ∃! - */ - case object ExistsOne extends BinderLabel(Identifier("∃!")) - - /** - * A function returning true if and only if the two symbols are considered "the same", i.e. same category, same arity and same id. - */ - def isSame(l: FormulaLabel, r: FormulaLabel): Boolean = l == r - -} diff --git a/lisa-kernel/src/main/scala/lisa/kernel/fol/OLEquivalenceChecker.scala b/lisa-kernel/src/main/scala/lisa/kernel/fol/OLEquivalenceChecker.scala new file mode 100644 index 000000000..dc4032f51 --- /dev/null +++ b/lisa-kernel/src/main/scala/lisa/kernel/fol/OLEquivalenceChecker.scala @@ -0,0 +1,659 @@ +package lisa.kernel.fol + +import scala.collection.mutable +import lisa.kernel.fol.Syntax + +private[fol] trait OLEquivalenceChecker extends Syntax { + + + /** Returns the reduced form of the given expression in AIG representation. + * + * Obtain the normal form of type [[SimpleExpression]] using [[simplify]] and [[computeNormalForm]]. + * Then recover an [[Expression]] using [[fromLocallyNameless]] and [[toExpressionAIG]]. + */ + def reducedForm(expr: Expression): Expression = { + val bnf = expr.betaNormalForm + val p = simplify(bnf) + val nf = computeNormalForm(p) + val fln = fromLocallyNameless(nf, Map.empty, 0) + val res = toExpressionAIG(fln) + res + } + + /** Returns the reduced form of the given expression in NNF representation. + * + * Obtain the normal form of type [[SimpleExpression]] using [[simplify]] and [[computeNormalForm]]. + * Then recover an [[Expression]] using [[fromLocallyNameless]] and [[toExpressionNNF]]. + */ + def reducedNNFForm(expr: Expression): Expression = { + val bnf = expr.betaNormalForm + val p = simplify(expr) + val nf = computeNormalForm(p) + val fln = fromLocallyNameless(nf, Map.empty, 0) + val res = toExpressionNNF(fln, true) + res + } + + /** Maps a set of expressions to their reduced forms using [[reducedForm]], then eliminates equivalent expressions. + * + * @see [[isSame]] + * @see [[isSubset]] + * @see [[isSameSetL]] + * @see [[isSameSetR]] + * @see [[contains]] + */ + def reduceSet(s: Set[Expression]): Set[Expression] = { + var res: List[Expression] = Nil + s.map(reducedForm) + .foreach({ f => + if (!res.exists(isSame(f, _))) res = f :: res + }) + res.toSet + } + + @deprecated("Use isSame instead", "0.8") + def isSameTerm(term1: Expression, term2: Expression): Boolean = isSame(term1, term2) + + /** Returns true if the two expressions are equivalent by the rules of the OL equivalence checker. + * + * The expressions are simplified and reduced to their orthologic normal form, and then compared. + * This takes into account all the laws of ortholattices, symmetry and reflexivity of equality, alpha-beta-eta-equivalence, and unfolds ⇒, ⇔, ∃ using other connectives. + */ + def isSame(e1: Expression, e2: Expression): Boolean = { + val nf1 = computeNormalForm(simplify(e1.betaNormalForm)) + val nf2 = computeNormalForm(simplify(e2.betaNormalForm)) + latticesEQ(nf1, nf2) + + } + + /** + * Returns true if the first expression implies the second by the rules of the OL equivalence checker. + * + * The two arguments must be expressions of type [[Prop]]. + * + * @see [[isSame]] + * @see [[isSubset]] + * @see [[isSameSetL]] + * @see [[isSameSetR]] + * @see [[contains]] + */ + def isImplying(e1: Expression, e2: Expression): Boolean = { + require(e1.sort == Prop && e2.sort == Prop) + val nf1 = computeNormalForm(simplify(e1.betaNormalForm)) + val nf2 = computeNormalForm(simplify(e2.betaNormalForm)) + latticesLEQ(nf1, nf2) + } + + /** Returns true if all the expressions in `s1` are equivalent to some expression in `s2`. + * + * @see [[isSame]] + * @see [[isSubset]] + * @see [[isSameSetL]] + * @see [[isSameSetR]] + * @see [[contains]] + */ + def isSubset(s1: Set[Expression], s2: Set[Expression]): Boolean = { + s1.forall(e1 => s2.exists(e2 => isSame(e1, e2))) + } + + /** Returns true if all the expressions in `s1` are equivalent to some expression in `s2` and vice versa. + * + * @see [[isSame]] + * @see [[isSubset]] + * @see [[isSameSetL]] + * @see [[isSameSetR]] + * @see [[contains]] + */ + def isSameSet(s1: Set[Expression], s2: Set[Expression]): Boolean = + isSubset(s1, s2) && isSubset(s2, s1) + + + /** Returns true if the conjunction of all elements of `s1` is equivalent to the conjunction of all elements of `s2`. + * + * Useful to compare left-hand sides of sequents. + * + * @see [[isSame]] + * @see [[isSubset]] + * @see [[isSameSetL]] + * @see [[isSameSetR]] + * @see [[contains]] + */ + def isSameSetL(s1: Set[Expression], s2: Set[Expression]): Boolean = + isSame(s1.reduceLeft(and(_)(_)), s2.reduceLeft(and(_)(_))) + + /** Returns true if the disjunction of all elements of `s1` is equivalent to the disjunction of all elements of `s2`. + * + * Useful to compare right-hand sides of sequents. + * + * @see [[isSame]] + * @see [[isSubset]] + * @see [[isSameSetL]] + * @see [[isSameSetR]] + * @see [[contains]] + */ + def isSameSetR(s1: Set[Expression], s2: Set[Expression]): Boolean = + isSame(s1.reduceLeft(or(_)(_)), s2.reduceLeft(or(_)(_))) + + /** Returns true if the set `s` contains an expression equivalent to `f`. + * + * @see [[isSame]] + * @see [[isSubset]] + * @see [[isSameSetL]] + * @see [[isSameSetR]] + * @see [[isSameSet]] + */ + def contains(s: Set[Expression], f: Expression): Boolean = { + s.exists(g => isSame(f, g)) + } + + + /** A counter for [[SimpleExpression]] instances. Used for efficient reference equality.*/ + private var totSimpleExpr = 0 + + /** Represents [[Expression]]s in a polar normalized form where + * - ⇒, ⇔, ∃, ∨ are unfolded using other connectives: ¬, ∧, ∀ + * - consecutive conjunctions are flattened + * - double negations are eliminated + */ + sealed abstract class SimpleExpression { + /** The sort of the expression. */ + val sort: Sort + /** True if the expression contains formulas. */ + val containsFormulas : Boolean + /** A unique key for the expression, used for efficient reference equality checking. */ + val uniqueKey = totSimpleExpr + totSimpleExpr += 1 + /** The number of subterms which are actual concrete formulas. */ + //val size : Int + private[OLEquivalenceChecker] var inverse : Option[SimpleExpression] = None + def getInverse = inverse + private[OLEquivalenceChecker] var NNF_pos: Option[Expression] = None + def getNNF_pos = NNF_pos + private[OLEquivalenceChecker] var NNF_neg: Option[Expression] = None + def getNNF_neg = NNF_neg + private[OLEquivalenceChecker] var formulaAIG: Option[Expression] = None + def getFormulaAIG = formulaAIG + private[OLEquivalenceChecker] var normalForm: Option[SimpleExpression] = None + def getNormalForm = normalForm + private[OLEquivalenceChecker] var namelessForm: Option[SimpleExpression] = None + def getNamelessForm = normalForm + + /** + * Caching for the lessThan relation. + * + * Using a mutable BitSet was the most efficient. + * @see [[lessThanCached]] + */ + private val lessThanBitSet: mutable.Set[Long] = new mutable.HashSet() + setLessThanCache(this, true) + + /** + * Checks if `this` is less than `that` in the cache of `this`. + * + * The cache is organized as pairs of bits: + * For an expression `that` with id `i`, if `this.lessThanBitSet` contains `2*i`, then `this.lessThanBitSet(2*i + 1)` is true iff `this` is less than `that`. + */ + def lessThanCached(that: SimpleExpression): Option[Boolean] = { + val thatIx = 2 * that.uniqueKey + if (lessThanBitSet.contains(thatIx)) Some(lessThanBitSet.contains(thatIx + 1)) + else None + } + + /** + * Sets the cache for the lessThan relation between `this` and `that` to `value`. + * @see [[lessThanCached]] + */ + def setLessThanCache(that: SimpleExpression, value: Boolean): Unit = { + val thatIx = 2 * that.uniqueKey + if (value) lessThanBitSet.update(thatIx + 1, true) + } + } + + /** Polar version of [[variable]] variable. */ + case class SimpleVariable(id: Identifier, sort:Sort, polarity: Boolean) extends SimpleExpression { + val containsFormulas: Boolean = sort == Prop + } + + /** Polar version of [[Variable]] for a bound variable in locally nameless representation. */ + case class SimpleBoundVariable(no: Int, sort: Sort, polarity: Boolean) extends SimpleExpression { + val containsFormulas: Boolean = sort == Prop + } + + /** Polar version of [[Constant]] for a constant. */ + case class SimpleConstant(id: Identifier, sort: Sort, polarity: Boolean) extends SimpleExpression { + val containsFormulas: Boolean = sort == Prop + } + + /** Polar version of [[Application]] for an application of a function to an argument. */ + case class SimpleApplication(f: SimpleExpression, arg: SimpleExpression, polarity: Boolean) extends SimpleExpression { + private val legalapp = legalApplication(f.sort, arg.sort) // Optimize after debugging + val sort = legalapp.get + val containsFormulas: Boolean = sort == Prop || f.containsFormulas || arg.containsFormulas + } + + /** Polar version of [[Lambda]] for a lambda abstraction. */ + case class SimpleLambda(v: Variable, body: SimpleExpression) extends SimpleExpression { + val containsFormulas: Boolean = body.containsFormulas + val sort = (v.sort -> body.sort) + } + + /** Polar version of [[And]]```(_)(_)...```. */ + case class SimpleAnd(children: Seq[SimpleExpression], polarity: Boolean) extends SimpleExpression{ + val containsFormulas: Boolean = true + val sort = Prop + } + + /** Polar version of [[Forall]]```Lambda(_, _)``` for a universal quantification. */ + case class SimpleForall(id: Identifier, body: SimpleExpression, polarity: Boolean) extends SimpleExpression { + val containsFormulas: Boolean = true + val sort = Prop + } + + /** Polar version of [[top]] and [[bot]]. */ + case class SimpleLiteral(polarity: Boolean) extends SimpleExpression { + val containsFormulas: Boolean = true + val sort = Prop + } + + /** Polar version of [[Equality]]```(_)(_)``` for an equality. */ + case class SimpleEquality(left: SimpleExpression, right: SimpleExpression, polarity: Boolean) extends SimpleExpression { + val containsFormulas: Boolean = true + val sort = Prop + } + + + /** Returns the negation of `e` in polar form. Use caching. */ + def getInversePolar(e: SimpleExpression): SimpleExpression = e.inverse match { + case Some(inverse) => inverse + case None => + val inverse = e match { + case e: SimpleAnd => e.copy(polarity = !e.polarity) + case e: SimpleForall => e.copy(polarity = !e.polarity) + case e: SimpleLiteral => e.copy(polarity = !e.polarity) + case e: SimpleEquality => e.copy(polarity = !e.polarity) + case e: SimpleVariable if e.sort == Prop => e.copy(polarity = !e.polarity) + case e: SimpleBoundVariable if e.sort == Prop => e.copy(polarity = !e.polarity) + case e: SimpleConstant if e.sort == Prop => e.copy(polarity = !e.polarity) + case e: SimpleApplication if e.sort == Prop => e.copy(polarity = !e.polarity) + case _ => throw new Exception("Cannot invert expression that is not a formula") + } + e.inverse = Some(inverse) + inverse + } + + /** Converts back a [[SimpleExpression]] to an [[Expression]] in AIG representation. */ + def toExpressionAIG(e:SimpleExpression): Expression = + if (e.formulaAIG.isDefined) e.formulaAIG.get + else { + val r: Expression = e match { + case SimpleAnd(children, polarity) => + val f = children.map(toExpressionAIG).reduceLeft(and(_)(_)) + if (polarity) f else neg(f) + case SimpleForall(x, body, polarity) => + val f = forall(Lambda(Variable(x, Ind), toExpressionAIG(body))) + if (polarity) f else neg(f) + case SimpleEquality(left, right, polarity) => + val f = equality(toExpressionAIG(left))(toExpressionAIG(right)) + if (polarity) f else neg(f) + case SimpleLiteral(polarity) => if (polarity) top else bot + case SimpleVariable(id, sort, polarity) => if (polarity) Variable(id, sort) else neg(Variable(id, sort)) + case SimpleBoundVariable(no, sort, polarity) => throw new Exception("This case should be unreachable. Can't call toFormulaAIG on a bound variable") + case SimpleConstant(id, sort, polarity) => if (polarity) Constant(id, sort) else neg(Constant(id, sort)) + case SimpleApplication(f, arg, polarity) => + val g = Application(toExpressionAIG(f), toExpressionAIG(arg)) + if (polarity) + g else + neg(g) + case SimpleLambda(v, body) => Lambda(v, toExpressionAIG(body)) + } + e.formulaAIG = Some(r) + r + } + + /** Converts a [[SimpleExpression]] to an [[Expression]] in NNF representation. */ + def toExpressionNNF(e: SimpleExpression, positive: Boolean): Expression = { + if (positive){ + if (e.NNF_pos.isDefined) return e.NNF_pos.get + if (e.inverse.isDefined && e.inverse.get.NNF_neg.isDefined) return e.inverse.get.NNF_neg.get + } + else if (!positive) { + if (e.NNF_neg.isDefined) return e.NNF_neg.get + if (e.inverse.isDefined && e.inverse.get.NNF_pos.isDefined) return e.inverse.get.NNF_pos.get + } + val r = e match { + case SimpleAnd(children, polarity) => + if (positive == polarity) + children.map(toExpressionNNF(_, true)).reduceLeft(and(_)(_)) + else + children.map(toExpressionNNF(_, false)).reduceLeft(or(_)(_)) + case SimpleForall(x, body, polarity) => + if (positive == polarity) + forall(Lambda(Variable(x, Ind), toExpressionNNF(body, true))) //rebuilding variable not ideal + else + exists(Lambda(Variable(x, Ind), toExpressionNNF(body, false))) + case SimpleEquality(left, right, polarity) => + if (positive == polarity) + equality(toExpressionNNF(left, true))(toExpressionNNF(right, true)) + else + neg(equality(toExpressionNNF(left, true))(toExpressionNNF(right, true))) + case SimpleLiteral(polarity) => + if (positive == polarity) top + else bot + case SimpleVariable(id, sort, polarity) => + if (polarity == positive) Variable(id, sort) + else neg(Variable(id, sort)) + case SimpleBoundVariable(no, sort, polarity) => throw new Exception("This case should be unreachable. Can't call toExpressionNNF on a bound variable") + case SimpleConstant(id, sort, polarity) => + if (polarity == positive) Constant(id, sort) + else neg(Constant(id, sort)) + case SimpleApplication(f, arg, polarity) => + if (polarity == positive) + Application(toExpressionNNF(f, true), toExpressionNNF(arg, true)) + else + neg(Application(toExpressionNNF(f, true), toExpressionNNF(arg, true))) + case SimpleLambda(v, body) => Lambda(v, toExpressionNNF(body, true)) + } + if (positive) e.NNF_pos = Some(r) + else e.NNF_neg = Some(r) + r + } + + + + /** Converts an [[Expression]] to a [[SimpleExpression]], where + * - ⇒, ⇔, ∃, ∨ are unfolded using other connectives: ¬, ∧, ∀ + * - consecutive conjunctions are flattened + * - double negations are eliminated + */ + def polarize(e: Expression, polarity:Boolean): SimpleExpression = { + if (polarity & e.polarExpr.isDefined) { + e.polarExpr.get + } else if (!polarity & e.polarExpr.isDefined) { + getInversePolar(e.polarExpr.get) + } else { + val r = e match { + case neg(arg) => + polarize(arg, !polarity) + case implies(arg1, arg2) => + SimpleAnd(Seq(polarize(arg1, true), polarize(arg2, false)), !polarity) + case iff(arg1, arg2) => + val l1 = polarize(arg1, true) + val r1 = polarize(arg2, true) + SimpleAnd( + Seq( + SimpleAnd(Seq(l1, getInversePolar(r1)), false), + SimpleAnd(Seq(getInversePolar(l1), r1), false) + ), polarity) + case and(arg1, arg2) => + SimpleAnd(Seq(polarize(arg1, true), polarize(arg2, true)), polarity) + case or(arg1, arg2) => + SimpleAnd(Seq(polarize(arg1, false), polarize(arg2, false)), !polarity) + case forall(Lambda(v, body)) => + SimpleForall(v.id, polarize(body, true), polarity) + case forall(p) => + val fresh = freshId(p.freeVariables.map(_.id), Identifier("x", 0)) + val newInner = polarize(Application(p, Variable(fresh, Ind)), true) + SimpleForall(fresh, newInner, polarity) + case exists(Lambda(v, body)) => + SimpleForall(v.id, polarize(body, false), !polarity) + case exists(p) => + val fresh = freshId(p.freeVariables.map(_.id), Identifier("x", 0)) + val newInner = polarize(Application(p, Variable(fresh, Ind)), false) + SimpleForall(fresh, newInner, !polarity) + case equality(arg1, arg2) => + SimpleEquality(polarize(arg1, true), polarize(arg2, true), polarity) + case Application(f, arg) => + SimpleApplication(polarize(f, true), polarize(arg, true), polarity) + case Lambda(v, body) => SimpleLambda(v, polarize(body, true)) + case `top` => SimpleLiteral(polarity) + case `bot` => SimpleLiteral(!polarity) + case Constant(id, sort) => SimpleConstant(id, sort, polarity) + case Variable(id, sort) => SimpleVariable(id, sort, polarity) + } + if (polarity) e.polarExpr = Some(r) + else e.polarExpr = Some(getInversePolar(r)) + r + } + } + + /** + * Replaces all [[SimpleVariable]]s with [[SimpleBoundVariable]]s in `e` using localy nameless (de Bruijn) representation. + * @see [[fromLocallyNameless]] + */ + def toLocallyNameless(e: SimpleExpression): SimpleExpression = + e.namelessForm match { + case None => + val r = e match { + case SimpleAnd(children, polarity) => SimpleAnd(children.map(toLocallyNameless), polarity) + case SimpleForall(x, inner, polarity) => SimpleForall(x, toLocallyNameless2(inner, Map((x, Ind) -> 0), 1), polarity) + case e: SimpleLiteral => e + case SimpleEquality(left, right, polarity) => SimpleEquality(toLocallyNameless(left), toLocallyNameless(right), polarity) + case v: SimpleVariable => v + case s: SimpleBoundVariable => throw new Exception("This case should be unreachable. Can't call toLocallyNameless on a bound variable") + case e: SimpleConstant => e + case SimpleApplication(arg1, arg2, polarity) => SimpleApplication(toLocallyNameless(arg1), toLocallyNameless(arg2), polarity) + case SimpleLambda(x, inner) => SimpleLambda(x, toLocallyNameless2(inner, Map((x.id, Ind) -> 0), 1)) + } + toLocallyNameless2(e, Map.empty, 0) + e.namelessForm = Some(r) + r + case Some(value) => value + } + + /** Replaces all [[SimpleVariable]]s with [[SimpleBoundVariable]]s in `e` using localy nameless (de Bruijn) representation. */ + def toLocallyNameless2(e: SimpleExpression, subst: Map[(Identifier, Sort), Int], i: Int): SimpleExpression = e match { + case SimpleAnd(children, polarity) => SimpleAnd(children.map(toLocallyNameless2(_, subst, i)), polarity) + case SimpleForall(x, inner, polarity) => SimpleForall(x, toLocallyNameless2(inner, subst + ((x, Ind) -> i), i + 1), polarity) + case e: SimpleLiteral => e + case SimpleEquality(left, right, polarity) => SimpleEquality(toLocallyNameless2(left, subst, i), toLocallyNameless2(right, subst, i), polarity) + case v: SimpleVariable => + if (subst.contains((v.id, v.sort))) SimpleBoundVariable(i - subst((v.id, v.sort)), v.sort, v.polarity) + else v + case s: SimpleBoundVariable => throw new Exception("This case should be unreachable. Can't call toLocallyNameless on a bound variable") + case e: SimpleConstant => e + case SimpleApplication(arg1, arg2, polarity) => SimpleApplication(toLocallyNameless2(arg1, subst, i), toLocallyNameless2(arg2, subst, i), polarity) + case SimpleLambda(x, inner) => SimpleLambda(x, toLocallyNameless2(inner, subst + ((x.id, x.sort) -> i), i + 1)) + } + + /** Replaces all [[SimpleBoundVariable]]s with [[SimpleVariable]]s in `e`, reverting localy nameless representation. + * @see [[toLocallyNameless]] + */ + def fromLocallyNameless(e: SimpleExpression, subst: Map[Int, (Identifier, Sort)], i: Int): SimpleExpression = e match { + case SimpleAnd(children, polarity) => SimpleAnd(children.map(fromLocallyNameless(_, subst, i)), polarity) + case SimpleForall(x, inner, polarity) => SimpleForall(x, fromLocallyNameless(inner, subst + (i -> (x, Ind)), i + 1), polarity) + case e: SimpleLiteral => e + case SimpleEquality(left, right, polarity) => SimpleEquality(fromLocallyNameless(left, subst, i), fromLocallyNameless(right, subst, i), polarity) + case SimpleBoundVariable(no, sort, polarity) => + val dist = i - no + if (subst.contains(dist)) {val (id, sort) = subst(dist); SimpleVariable(id, sort, polarity)} + else throw new Exception("This case should be unreachable, error") + case v: SimpleVariable => v + case e: SimpleConstant => e + case SimpleApplication(arg1, arg2, polarity) => SimpleApplication(fromLocallyNameless(arg1, subst, i), fromLocallyNameless(arg2, subst, i), polarity) + case SimpleLambda(x, inner) => SimpleLambda(x, fromLocallyNameless(inner, subst + (i -> (x.id, x.sort)), i + 1)) + } + + /** Simplifies an [[Expression]] to a [[SimpleExpression]] using [[polarize]] and [[toLocallyNameless]]. */ + def simplify(e: Expression): SimpleExpression = toLocallyNameless(polarize(e, true)) + + + ////////////////////// + //// OL Algorithm //// + ////////////////////// + + /** Computes the OL normal form of `e` modulo Orthologic. Uses caching. */ + def computeNormalForm(e: SimpleExpression): SimpleExpression = { + e.normalForm match { + case Some(value) => + value + case None => + val r: SimpleExpression = e match { + case SimpleAnd(children, polarity) => + val newChildren = children map computeNormalForm + val simp = reduce(newChildren, polarity) + simp match { + case conj: SimpleAnd if checkForContradiction(conj) => SimpleLiteral(!polarity) + case _ => simp + } + + case SimpleApplication(f, arg, true) => SimpleApplication(computeNormalForm(f), computeNormalForm(arg), true) + + case SimpleBoundVariable(no, sort, true) => e + + case SimpleVariable(id, sort, true) => e + + case SimpleConstant(id, sort, true) => e + + case SimpleEquality(left, right, true) => + val l = computeNormalForm(left) + val r = computeNormalForm(right) + if (l == r) SimpleLiteral(true) + else if (l.uniqueKey >= r.uniqueKey) SimpleEquality(l, r, true) + else SimpleEquality(r, l, true) + + case SimpleForall(id, body, true) => + val inner = computeNormalForm(body) + if (inner == SimpleLiteral(true)) SimpleLiteral(true) + else if (inner == SimpleLiteral(false)) SimpleLiteral(false) + else SimpleForall(id, inner, true) + + case SimpleLambda(v, body) => SimpleLambda(v, computeNormalForm(body)) + + case SimpleLiteral(polarity) => e + + case _ => getInversePolar(computeNormalForm(getInversePolar(e))) + + } + e.normalForm = Some(r) + r + } + } + + /** Returns true if the children of `f` contains a direct contradiction. */ + def checkForContradiction(f: SimpleAnd): Boolean = { + f match { + case SimpleAnd(children, false) => + children.exists(cc => latticesLEQ(cc, f)) + case SimpleAnd(children, true) => + val shadowChildren = children map getInversePolar + shadowChildren.exists(sc => latticesLEQ(f, sc)) + } + } + + /** Reduces a conjunction to an antichain */ + def reduceList(children: Seq[SimpleExpression], polarity: Boolean): List[SimpleExpression] = { + val nonSimplified = SimpleAnd(children, polarity) + var remaining : Seq[SimpleExpression] = Nil + def treatChild(i: SimpleExpression): Seq[SimpleExpression] = { + val r: Seq[SimpleExpression] = i match { + case SimpleAnd(ch, true) => ch + case SimpleAnd(ch, false) => + if (polarity) { + val trCh = ch map getInversePolar + trCh.find(f => latticesLEQ(nonSimplified, f)) match { + case Some(value) => treatChild(value) + case None => List(i) + } + } else { + val trCH = ch + trCH.find(f => latticesLEQ(f, nonSimplified)) match { + case Some(value) => treatChild(getInversePolar(value)) + case None => List(i) + } + } + case _ => List(i) + } + r + } + children.foreach(i => { + val r = treatChild(i) + remaining = r ++ remaining + }) + + var accepted: List[SimpleExpression] = Nil + while (remaining.nonEmpty) { + val current = remaining.head + remaining = remaining.tail + if (!latticesLEQ(SimpleAnd(remaining ++ accepted, true), current)) { + accepted = current :: accepted + } + } + accepted + } + + + /** Reduces a conjunction to a simplified form using [[reduceList]] */ + def reduce(children: Seq[SimpleExpression], polarity: Boolean): SimpleExpression = { + val accepted: List[SimpleExpression] = reduceList(children, polarity) + if (accepted.isEmpty) SimpleLiteral(polarity) + else if (accepted.size == 1) + if (polarity) accepted.head + else getInversePolar(accepted.head) + else SimpleAnd(accepted, polarity) + } + + /** Checks if `e1` is less than `e2` by the laws of OL */ + def latticesLEQ(e1: SimpleExpression, e2: SimpleExpression): Boolean = { + require(e1.sort == Prop && e2.sort == Prop) + if (e1.uniqueKey == e2.uniqueKey) true + else + e1.lessThanCached(e2) match { + case Some(value) => value + case None => + val r = (e1, e2) match { + case (SimpleLiteral(false), _) => true + + case (_, SimpleLiteral(true)) => true + + case (SimpleEquality(l1, r1, pol1), SimpleEquality(l2, r2, pol2)) => + pol1 == pol2 && ((latticesEQ(l1, l2) && latticesEQ(r1, r2)) || (latticesEQ(l1, r2) && latticesEQ(r1, l2))) + + case (SimpleForall(x1, body1, polarity1), SimpleForall(x2, body2, polarity2)) => + polarity1 == polarity2 && (if (polarity1) latticesLEQ(body1, body2) else latticesLEQ(body2, body1)) + + // Usual lattice conjunction/disjunction cases + case (_, SimpleAnd(children, true)) => + children.forall(c => latticesLEQ(e1, c)) + case (SimpleAnd(children, false), _) => + children.forall(c => latticesLEQ(getInversePolar(c), e2)) + case (SimpleAnd(children1, true), SimpleAnd(children2, false)) => + children1.exists(c => latticesLEQ(c, e2)) || children2.exists(c => latticesLEQ(e1, getInversePolar(c))) + case (_, SimpleAnd(children, false)) => + children.exists(c => latticesLEQ(e1, getInversePolar(c))) + case (SimpleAnd(children, true), _) => + children.exists(c => latticesLEQ(c, e2)) + + + case (s1: SimpleBoundVariable, s2: SimpleBoundVariable) => s1 == s2 + + case (s1: SimpleVariable, s2: SimpleVariable) => s1 == s2 + + case (s1: SimpleConstant, s2: SimpleConstant) => s1 == s2 + + case (SimpleApplication(f1, arg1, polarity1), SimpleApplication(f2, arg2, polarity2)) => + polarity1 == polarity2 && latticesEQ(f1, f2) && latticesEQ(arg1, arg2) + + case (_, _) => false + } + e1.setLessThanCache(e2, r) + r + } + + + } + + /** Checks if `e1` is equivalent to `e2` by the laws of OL */ + def latticesEQ(e1: SimpleExpression, e2: SimpleExpression): Boolean = + if (e1.uniqueKey == e2.uniqueKey) true + else if (e1.sort == Prop) latticesLEQ(e1, e2) && latticesLEQ(e2, e1) + else (e1, e2) match { + case (s1: SimpleBoundVariable, s2: SimpleBoundVariable) => s1 == s2 + case (s1: SimpleVariable, s2: SimpleVariable) => s1 == s2 + case (s1: SimpleConstant, s2: SimpleConstant) => s1 == s2 + case (SimpleApplication(f1, arg1, polarity1), SimpleApplication(f2, arg2, polarity2)) => + polarity1 == polarity2 && latticesEQ(f1, f2) && latticesEQ(arg1, arg2) + case (SimpleLambda(x1, body1), SimpleLambda(x2, body2)) => + latticesEQ(body1, body2) + case (_, _) => false + } +} diff --git a/lisa-kernel/src/main/scala/lisa/kernel/fol/Substitutions.scala b/lisa-kernel/src/main/scala/lisa/kernel/fol/Substitutions.scala deleted file mode 100644 index 51bfb4654..000000000 --- a/lisa-kernel/src/main/scala/lisa/kernel/fol/Substitutions.scala +++ /dev/null @@ -1,244 +0,0 @@ -package lisa.kernel.fol - -trait Substitutions extends FormulaDefinitions { - - /** - * A lambda term to express a "term with holes". Main use is to be substituted in place of a function schema or variable. - * Also used for some deduction rules. - * Morally equivalent to a 2-tuples containing the same informations. - * @param vars The names of the "holes" in the term, necessarily of arity 0. The bound variables of the functional term. - * @param body The term represented by the object, up to instantiation of the bound schematic variables in args. - */ - case class LambdaTermTerm(vars: Seq[VariableLabel], body: Term) { - def apply(args: Seq[Term]): Term = substituteVariablesInTerm(body, (vars zip args).toMap) - } - - /** - * A lambda formula to express a "formula with term holes". Main use is to be substituted in place of a predicate schema. - * Also used for some deduction rules. - * Morally equivalent to a 2-tuples containing the same informations. - * @param vars The names of the "holes" in a formula, necessarily of arity 0. The bound variables of the functional formula. - * @param body The formula represented by the object, up to instantiation of the bound schematic variables in args. - */ - case class LambdaTermFormula(vars: Seq[VariableLabel], body: Formula) { - def apply(args: Seq[Term]): Formula = { - substituteVariablesInFormula(body, (vars zip args).toMap) - } - } - - /** - * A lambda formula to express a "formula with formula holes". Main use is to be substituted in place of a connector schema. - * Also used for some deduction rules. - * Morally equivalent to a 2-tuples containing the same informations. - * @param vars The names of the "holes" in a formula, necessarily of arity 0. - * @param body The formula represented by the object, up to instantiation of the bound schematic variables in args. - */ - case class LambdaFormulaFormula(vars: Seq[VariableFormulaLabel], body: Formula) { - def apply(args: Seq[Formula]): Formula = { - substituteFormulaVariables(body, (vars zip args).toMap) - // instantiatePredicateSchemas(body, (vars zip (args map (LambdaTermFormula(Nil, _)))).toMap) - } - } - - ////////////////////////// - // **--- ON TERMS ---** // - ////////////////////////// - - /** - * Performs simultaneous substitution of multiple variables by multiple terms in a term. - * @param t The base term - * @param m A map from variables to terms. - * @return t[m] - */ - def substituteVariablesInTerm(t: Term, m: Map[VariableLabel, Term]): Term = t match { - case Term(label: VariableLabel, _) => m.getOrElse(label, t) - case Term(label, args) => Term(label, args.map(substituteVariablesInTerm(_, m))) - } - - /** - * Performs simultaneous substitution of schematic function symbol by "functional" terms, or terms with holes. - * If the arity of one of the function symbol to substitute doesn't match the corresponding number of arguments, it will produce an error. - * @param t The base term - * @param m The map from schematic function symbols to lambda expressions Term(s) -> Term [[LambdaTermTerm]]. - * @return t[m] - */ - def instantiateTermSchemasInTerm(t: Term, m: Map[SchematicTermLabel, LambdaTermTerm]): Term = { - require(m.forall { case (symbol, LambdaTermTerm(arguments, body)) => arguments.length == symbol.arity }) - t match { - case Term(label: VariableLabel, _) => m.get(label).map(_.apply(Nil)).getOrElse(t) - case Term(label, args) => - val newArgs = args.map(instantiateTermSchemasInTerm(_, m)) - label match { - case label: ConstantFunctionLabel => Term(label, newArgs) - case label: SchematicTermLabel => - m.get(label).map(_(newArgs)).getOrElse(Term(label, newArgs)) - } - } - } - - ///////////////////////////// - // **--- ON FORMULAS ---** // - ///////////////////////////// - - /** - * Performs simultaneous substitution of multiple variables by multiple terms in a formula. - * - * @param phi The base formula - * @param m A map from variables to terms - * @return t[m] - */ - def substituteVariablesInFormula(phi: Formula, m: Map[VariableLabel, Term], takenIds: Seq[Identifier] = Seq[Identifier]()): Formula = phi match { - case AtomicFormula(label, args) => AtomicFormula(label, args.map(substituteVariablesInTerm(_, m))) - case ConnectorFormula(label, args) => ConnectorFormula(label, args.map(substituteVariablesInFormula(_, m))) - case BinderFormula(label, bound, inner) => - val newSubst = m - bound - val newTaken = takenIds :+ bound.id - val fv = m.values.flatMap(_.freeVariables).toSet - if (fv.contains(bound)) { - val newBoundVariable = VariableLabel(freshId(fv.map(_.name) ++ m.keys.map(_.id) ++ newTaken, bound.name)) - val newInner = substituteVariablesInFormula(inner, Map(bound -> VariableTerm(newBoundVariable)), newTaken) - BinderFormula(label, newBoundVariable, substituteVariablesInFormula(newInner, newSubst, newTaken)) - } else BinderFormula(label, bound, substituteVariablesInFormula(inner, newSubst, newTaken)) - } - - /** - * Performs simultaneous substitution of multiple formula variables by multiple formula terms in a formula. - * - * @param phi The base formula - * @param m A map from variables to terms - * @return t[m] - */ - def substituteFormulaVariables(phi: Formula, m: Map[VariableFormulaLabel, Formula], takenIds: Seq[Identifier] = Seq[Identifier]()): Formula = phi match { - case AtomicFormula(label: VariableFormulaLabel, _) => m.getOrElse(label, phi) - case _: AtomicFormula => phi - case ConnectorFormula(label, args) => ConnectorFormula(label, args.map(substituteFormulaVariables(_, m, takenIds))) - case BinderFormula(label, bound, inner) => - val fv = m.values.flatMap(_.freeVariables).toSet - val newTaken = takenIds :+ bound.id - if (fv.contains(bound)) { - val newBoundVariable = VariableLabel(freshId(fv.map(_.name) ++ newTaken, bound.name)) - val newInner = substituteVariablesInFormula(inner, Map(bound -> VariableTerm(newBoundVariable)), newTaken) - BinderFormula(label, newBoundVariable, substituteFormulaVariables(newInner, m, newTaken)) - } else BinderFormula(label, bound, substituteFormulaVariables(inner, m, newTaken)) - } - - /** - * Performs simultaneous substitution of schematic function symbol by "functional" terms, or terms with holes. - * If the arity of one of the predicate symbol to substitute doesn't match the corresponding number of arguments, it will produce an error. - * @param phi The base formula - * @param m The map from schematic function symbols to lambda expressions Term(s) -> Term [[LambdaTermTerm]]. - * @return phi[m] - */ - def instantiateTermSchemas(phi: Formula, m: Map[SchematicTermLabel, LambdaTermTerm]): Formula = { - require(m.forall { case (symbol, LambdaTermTerm(arguments, body)) => arguments.length == symbol.arity }) - phi match { - case AtomicFormula(label, args) => AtomicFormula(label, args.map(a => instantiateTermSchemasInTerm(a, m))) - case ConnectorFormula(label, args) => ConnectorFormula(label, args.map(instantiateTermSchemas(_, m))) - case BinderFormula(label, bound, inner) => - val newSubst = m - bound - val fv: Set[VariableLabel] = newSubst.flatMap { case (symbol, LambdaTermTerm(arguments, body)) => body.freeVariables }.toSet ++ inner.freeVariables - if (fv.contains(bound)) { - val newBoundVariable = VariableLabel(freshId(fv.map(_.name) ++ m.keys.map(_.id), bound.name)) - val newInner = substituteVariablesInFormula(inner, Map(bound -> VariableTerm(newBoundVariable))) - BinderFormula(label, newBoundVariable, instantiateTermSchemas(newInner, newSubst)) - } else BinderFormula(label, bound, instantiateTermSchemas(inner, newSubst)) - } - } - - /** - * Instantiate a schematic predicate symbol in a formula, using higher-order instantiation. - * If the arity of one of the connector symbol to substitute doesn't match the corresponding number of arguments, it will produce an error. - * @param phi The base formula - * @param m The map from schematic predicate symbols to lambda expressions Term(s) -> Formula [[LambdaTermFormula]]. - * @return phi[m] - */ - def instantiatePredicateSchemas(phi: Formula, m: Map[SchematicAtomicLabel, LambdaTermFormula]): Formula = { - require(m.forall { case (symbol, LambdaTermFormula(arguments, body)) => arguments.length == symbol.arity }) - phi match { - case AtomicFormula(label, args) => - label match { - case label: SchematicAtomicLabel if m.contains(label) => m(label)(args) - case _ => phi - } - case ConnectorFormula(label, args) => ConnectorFormula(label, args.map(instantiatePredicateSchemas(_, m))) - case BinderFormula(label, bound, inner) => - val fv: Set[VariableLabel] = (m.flatMap { case (symbol, LambdaTermFormula(arguments, body)) => body.freeVariables }).toSet ++ inner.freeVariables - if (fv.contains(bound)) { - val newBoundVariable = VariableLabel(freshId(fv.map(_.name), bound.name)) - val newInner = substituteVariablesInFormula(inner, Map(bound -> VariableTerm(newBoundVariable))) - BinderFormula(label, newBoundVariable, instantiatePredicateSchemas(newInner, m)) - } else BinderFormula(label, bound, instantiatePredicateSchemas(inner, m)) - } - } - - /** - * Instantiate a schematic connector symbol in a formula, using higher-order instantiation. - * - * @param phi The base formula - * @param m The map from schematic function symbols to lambda expressions Formula(s) -> Formula [[LambdaFormulaFormula]]. - * @return phi[m] - */ - def instantiateConnectorSchemas(phi: Formula, m: Map[SchematicConnectorLabel, LambdaFormulaFormula]): Formula = { - require(m.forall { case (symbol, LambdaFormulaFormula(arguments, body)) => arguments.length == symbol.arity }) - phi match { - case _: AtomicFormula => phi - case ConnectorFormula(label, args) => - val newArgs = args.map(instantiateConnectorSchemas(_, m)) - label match { - case label: SchematicConnectorLabel if m.contains(label) => m(label)(newArgs) - case _ => ConnectorFormula(label, newArgs) - } - case BinderFormula(label, bound, inner) => - val fv: Set[VariableLabel] = (m.flatMap { case (symbol, LambdaFormulaFormula(arguments, body)) => body.freeVariables }).toSet ++ inner.freeVariables - if (fv.contains(bound)) { - val newBoundVariable = VariableLabel(freshId(fv.map(_.name), bound.name)) - val newInner = substituteVariablesInFormula(inner, Map(bound -> VariableTerm(newBoundVariable))) - BinderFormula(label, newBoundVariable, instantiateConnectorSchemas(newInner, m)) - } else BinderFormula(label, bound, instantiateConnectorSchemas(inner, m)) - } - } - - /** - * Instantiate a schematic connector symbol in a formula, using higher-order instantiation. - * - * @param phi The base formula - * @param m The map from schematic function symbols to lambda expressions Formula(s) -> Formula [[LambdaFormulaFormula]]. - * @return phi[m] - */ - def instantiateSchemas( - phi: Formula, - mCon: Map[SchematicConnectorLabel, LambdaFormulaFormula], - mPred: Map[SchematicAtomicLabel, LambdaTermFormula], - mTerm: Map[SchematicTermLabel, LambdaTermTerm] - ): Formula = { - require(mCon.forall { case (symbol, LambdaFormulaFormula(arguments, body)) => arguments.length == symbol.arity }) - require(mPred.forall { case (symbol, LambdaTermFormula(arguments, body)) => arguments.length == symbol.arity }) - require(mTerm.forall { case (symbol, LambdaTermTerm(arguments, body)) => arguments.length == symbol.arity }) - phi match { - case AtomicFormula(label, args) => - val newArgs = args.map(a => instantiateTermSchemasInTerm(a, mTerm)) - label match { - case label: SchematicAtomicLabel if mPred.contains(label) => mPred(label)(newArgs) - case _ => AtomicFormula(label, newArgs) - } - case ConnectorFormula(label, args) => - val newArgs = args.map(a => instantiateSchemas(a, mCon, mPred, mTerm)) - label match { - case label: SchematicConnectorLabel if mCon.contains(label) => mCon(label)(newArgs) - case _ => ConnectorFormula(label, newArgs) - } - case BinderFormula(label, bound, inner) => - val newmTerm = mTerm - bound - val fv: Set[VariableLabel] = - (mCon.flatMap { case (symbol, LambdaFormulaFormula(arguments, body)) => body.freeVariables }).toSet ++ - (mPred.flatMap { case (symbol, LambdaTermFormula(arguments, body)) => body.freeVariables }).toSet ++ - (mTerm.flatMap { case (symbol, LambdaTermTerm(arguments, body)) => body.freeVariables }).toSet ++ inner.freeVariables - if (fv.contains(bound)) { - val newBoundVariable = VariableLabel(freshId(fv.map(_.name) ++ mTerm.keys.map(_.id), bound.name)) - val newInner = substituteVariablesInFormula(inner, Map(bound -> VariableTerm(newBoundVariable))) - BinderFormula(label, newBoundVariable, instantiateSchemas(newInner, mCon, mPred, newmTerm)) - } else BinderFormula(label, bound, instantiateSchemas(inner, mCon, mPred, newmTerm)) - } - } - -} diff --git a/lisa-kernel/src/main/scala/lisa/kernel/fol/Syntax.scala b/lisa-kernel/src/main/scala/lisa/kernel/fol/Syntax.scala new file mode 100644 index 000000000..992132694 --- /dev/null +++ b/lisa-kernel/src/main/scala/lisa/kernel/fol/Syntax.scala @@ -0,0 +1,403 @@ +package lisa.kernel.fol + +/** Defines the syntax of statements Lisa's kernel + * + * This syntax is a (conservative) extension of first-order logic with higher order expressions. + * An expression in Lisa is a term of the simply typed lambda calculus with base types (called [[Sort]]) [[Ind]] and [[Prop]]. + * + */ +private[fol] trait Syntax { + + /** An abstract type, later instantiated in [[OLEquivalenceChecker]] to be the type of expressions in normal form modulo OL. + */ + type SimpleExpression + + /** An identifier for a variable or constant symbol. + * + * An identifier must not contain one of the following characters: {{{()[]{}?,;_`}}} and must not contain whitespace. + * + * Idiomatic representation is as follows: + * - Identifier("x", 0) ~ "x" + * - Identifier("x", 1) ~ "x_1" + * - Identifier("myvariable", 227) ~ "myvariable_227" + * + * @param name The name of the identifier + * @param no The index of the identifier. Used to easily compute fresh names. + */ + sealed case class Identifier(val name: String, val no: Int) { + require(no >= 0, "Variable index must be positive") + require(Identifier.isValidIdentifier(name), "Variable name " + name + "is not valid.") + override def toString: String = if (no == 0) name else name + Identifier.counterSeparator + no + } + + /** Factory for [[Identifier]] instances. + * + */ + object Identifier { + /** Extractor for identifiers. */ + def unapply(i: Identifier): Option[(String, Int)] = Some((i.name, i.no)) + + /** Creates a new identifier with the given name and index = 0. */ + def apply(name: String): Identifier = new Identifier(name, 0) + + /** Creates a new identifier with the given name and index. */ + def apply(name: String, no: Int): Identifier = new Identifier(name, no) + + val counterSeparator: Char = '_' + val delimiter: Char = '`' + val forbiddenChars: Set[Char] = ("()[]{}?,;" + delimiter + counterSeparator).toSet + + /** Checks if a string is a valid identifier. */ + def isValidIdentifier(s: String): Boolean = s.forall(c => !forbiddenChars.contains(c) && !c.isWhitespace) + } + + /** Creates a fresh identifier based on a base identifier and a set of taken identifiers. + * Find the largest index in the set of taken identifiers and increment it by one. + */ + private[kernel] def freshId(taken: Iterable[Identifier], base: Identifier): Identifier = { + new Identifier( + base.name, + (Iterable(base.no) ++ taken.collect({ case Identifier(base.name, no) => + no + })).max + 1 + ) + } + + + + + /** + * A `Sort` is a base type in the simply typed lambda calculus of Lisa expressions. + * + * There are two sorts: `Ind` and `Prop`. + */ + sealed trait Sort { + /** shortcut for `Assow(this, to)` */ + def ->(to: Sort): Arrow = Arrow(this, to) + + /** @return true if the sort is of the form `Ind -> ... -> Ind -> Ind` */ + val isFunctional: Boolean + + /** @return true if the sort is of the form `Ind -> ... -> Ind -> Prop` */ + val isPredicate: Boolean + + /** @return the number of arguments of the type. + * + * For example, `Ind` has depth 0, `Prop -> Ind` has depth 1, `Ind -> (Prop -> Ind) -> Ind` has depth 2, etc. + */ + val depth: Int + } + + /** The sort of terms in the simply typed lambda calculus. + * Expressions of this type correspond to terms of first-order logic. + * Semantically they are interpreted as elements of the universe, i.e. sets in ZFC. + */ + case object Ind extends Sort { + val isFunctional = true + val isPredicate = false + val depth = 0 + } + + /** The sort of formulas in the simply typed lambda calculus. + * Expressions of this type correspond to formulas of first-order logic. + */ + case object Prop extends Sort { + val isFunctional = false + val isPredicate = true + val depth = 0 + } + + /** An arrow sort, representing a function type in the simply typed lambda calculus. + * The arrow sort is of the form `from -> to`, where `from` and `to` are sorts. + * + * Expressions of type `Ind -> ... -> Ind -> Ind` correspond to function symbols of first-order logic. + * Expressions of type `Ind -> ... -> Ind -> Prop` correspond to predicate symbols of first-order logic. + * Expressions of type `(Ind -> Prop) -> Prop` correspond to quantifiers (∀ and ∃) in first-order logic. + */ + sealed case class Arrow(from: Sort, to: Sort) extends Sort { + val isFunctional = from == Ind && to.isFunctional + val isPredicate = from == Ind && to.isPredicate + val depth = 1+to.depth + } + + /** If `typ1` is of the form `typ2 -> to`, returns `Some(to)`, otherwise returns `None`. + * + * This means, it check that an application of a term of type `typ1` to a term of type `typ2` is legal, and if so returns the resulting type. + */ + def legalApplication(typ1: Sort, typ2: Sort): Option[Sort] = { + typ1 match { + case Arrow(`typ2`, to) => Some(to) + case _ => None + } + } + + /** Store global counters used for distinguishing expressions. + * Useful for efficient reference-based equality checking. + */ + private object ExpressionCounters { + var totalNumberOfExpressions: Long = 0 + def getNewId: Long = { + totalNumberOfExpressions += 1 + totalNumberOfExpressions + } + } + + + /** Expressions are lambda-terms in the simply typed lambda calculus with base types `Ind` and `Prop`. + * + * Expressions are the core part of Lisa's kernel and correspond to standard terms and formulas in first-order logic. + * They are built from constants, variables, applications and abstractions though the following grammar: + * V ::= x | y | z | ... + * C ::= a | b | c | f | g | h | ... + * E ::= V | C | E E | λV.E + * + * Expressions must be well-typed, i.e. the types of the argument in an application must match the type of the function. + */ + sealed trait Expression { + /** Cached normal form of the expression by [[OLEquivalenceChecker]]. */ + private[fol] var polarExpr: Option[SimpleExpression] = None + /** Cached normal form of the expression by [[OLEquivalenceChecker]]. */ + def getPolarExpr : Option[SimpleExpression] = polarExpr + /** Sort of the expression. */ + val sort: Sort + /** Unique number of the expression assigned by [[ExpressionCounters]]. Used for efficient reference equality. */ + val uniqueNumber: Long = ExpressionCounters.getNewId + /** True if the expression contains subexpressions of type `Prop`. */ + val containsFormulas : Boolean + /** Creates an application of this expression to an argument. */ + def apply(arg: Expression): Application = Application(this, arg) + /** Extractor for the arguments of the expression. + * Usage: + * {{{ + * Application(Application(f, x), y) match + * case f(x, y) => println("This case is matched") + * }}} + */ + def unapplySeq(arg: Expression): Option[Seq[Expression]] = arg match { + case Application(f, arg) if f == this => Some(arg :: Nil) + case Application(f, arg) => unapplySeq(f).map(fargs => fargs :+ arg) + case _ => None + } + + /** The beta-normal form of the expression and if it is in beta-normal form. */ + val (betaNormalForm: Expression, isBetaNormal: Boolean) = this match { + case Application(f, arg) => { + val f1 = f.betaNormalForm + val a2 = arg.betaNormalForm + f1 match { + case Lambda(v, body) => { + (substituteVariables(body, Map(v -> a2)).betaNormalForm, false) + } + case _ if f.isBetaNormal && arg.isBetaNormal => (this, true) + case _ => (Application(f1, a2), false) + } + } + case Lambda(v, Application(f, arg)) if v == arg && !f.freeVariables.contains(v) => (f.betaNormalForm, false) + case Lambda(v, inner) if inner.isBetaNormal => (this, true) + case Lambda(v, inner) => (Lambda(v, inner.betaNormalForm), false) + case _ => (this, true) + } + + /** + * @return The list of free variables in the expression. + */ + def freeVariables: Set[Variable] + + /** + * @return The list of constant symbols in the expression. + */ + def constants: Set[Constant] + + /** + * @return The list of all variables in the expression. + */ + def allVariables: Set[Variable] + } + + /** + * A variable symbol, which is a special case of [[Expression]]. + * + * Logically, variables can be bound by lambda abstractions (and quantifiers) or free. + * Free variables in theorems can be instantiated by valules of the same sort. + */ + case class Variable(id: Identifier, sort:Sort) extends Expression { + val containsFormulas = sort == Prop + def freeVariables: Set[Variable] = Set(this) + def constants: Set[Constant] = Set() + def allVariables: Set[Variable] = Set(this) + } + + /** + * A constant symbol, which is a special case of [[Expression]]. + * + * Constants generalize function and predicate symbols of any arity in strict first-order logic. + */ + case class Constant(id: Identifier, sort: Sort) extends Expression { + val containsFormulas = sort == Prop + def freeVariables: Set[Variable] = Set() + def constants: Set[Constant] = Set(this) + def allVariables: Set[Variable] = Set() + } + + /** + * An application of an expression to an argument, which is a special case of [[Expression]]. + * `f.sort` must be of the form `arg.sort -> _`. + */ + case class Application(f: Expression, arg: Expression) extends Expression { + private val legalapp = legalApplication(f.sort, arg.sort) + require(legalapp.isDefined, s"Application of $f to $arg is not legal") + val sort = legalapp.get + val containsFormulas = sort == Prop || f.containsFormulas || arg.containsFormulas + def freeVariables: Set[Variable] = f.freeVariables union arg.freeVariables + def constants: Set[Constant] = f.constants union arg.constants + def allVariables: Set[Variable] = f.allVariables union arg.allVariables + } + + /** + * A lambda abstraction, which is a special case of [[Expression]]. + * + * In `Lambda(v, body)`, `v` is a called "bound" in `body`. + * Every expression that bind a variable uses a Lambda abstraction. + * + * Example: {{{Application(∀, Lambda(x, Application(P, x)))}}} + * corresponds to the formula in strict first-order logic ∀x.P(x). + */ + case class Lambda(v: Variable, body: Expression) extends Expression { + val containsFormulas = body.containsFormulas + val sort = (v.sort -> body.sort) + + def freeVariables: Set[Variable] = body.freeVariables - v + def constants: Set[Constant] = body.constants + def allVariables: Set[Variable] = body.allVariables + } + + + /** The constant symbol for the equality predicate. + * + * Type: `Ind -> (Ind -> Prop)`. + * + * Symbol: `=` + */ + val equality = Constant(Identifier("="), Ind -> (Ind -> Prop)) + + /** The constant symbol for the true formula.
i
th step of the proof.
+ * Returns the `i`th step of the proof.
* @param i the index
- * @return a step
*/
def apply(i: Int): SCProofStep = {
if (i >= 0)
@@ -31,7 +30,7 @@ case class SCProof(steps: IndexedSeq[SCProofStep], imports: IndexedSeq[Sequent]
* If the index is negative, return the (-i-1)
th imported sequent.
*
* @param i The reference number of a sequent in the proof
- * @return A sequent, either imported or reached during the proof.
+ * @return A sequent, either imported or proved during the proof.
*/
def getSequent(i: Int): Sequent = {
if (i >= 0)
@@ -94,4 +93,4 @@ object SCProof {
SCProof(steps.toIndexedSeq)
}
-}
+}
\ No newline at end of file
diff --git a/lisa-kernel/src/main/scala/lisa/kernel/proof/SCProofChecker.scala b/lisa-kernel/src/main/scala/lisa/kernel/proof/SCProofChecker.scala
index bf7c71147..9fc443204 100644
--- a/lisa-kernel/src/main/scala/lisa/kernel/proof/SCProofChecker.scala
+++ b/lisa-kernel/src/main/scala/lisa/kernel/proof/SCProofChecker.scala
@@ -4,6 +4,7 @@ import lisa.kernel.fol.FOL._
import lisa.kernel.proof.SCProofCheckerJudgement._
import lisa.kernel.proof.SequentCalculus._
+
object SCProofChecker {
/**
@@ -42,7 +43,7 @@ object SCProofChecker {
* Γ |- Γ
*/
case RestateTrue(s) =>
- val truth = Sequent(Set(), Set(AtomicFormula(top, Nil)))
+ val truth = Sequent(Set(), Set(top))
if (isSameSequent(s, truth)) SCValidProof(SCProof(step)) else SCInvalidProof(SCProof(step), Nil, s"The desired conclusion is not a trivial tautology")
/*
*
@@ -50,17 +51,22 @@ object SCProofChecker {
* Γ, φ |- φ, Δ
*/
case Hypothesis(Sequent(left, right), phi) =>
- if (contains(left, phi))
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (contains(left, phi))
if (contains(right, phi)) SCValidProof(SCProof(step))
else SCInvalidProof(SCProof(step), Nil, s"Right-hand side does not contain formula φ")
else SCInvalidProof(SCProof(step), Nil, s"Left-hand side does not contain formula φ")
+
/*
* Γ |- Δ, φ φ, Σ |- Π
* ------------------------
* Γ, Σ |- Δ, Π
*/
case Cut(b, t1, t2, phi) =>
- if (isSameSet(b.left + phi, ref(t1).left union ref(t2).left) && (!contains(ref(t1).left, phi) || contains(b.left, phi)))
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (isSameSet(b.left + phi, ref(t1).left union ref(t2).left) && (!contains(ref(t1).left, phi) || contains(b.left, phi)))
if (isSameSet(b.right + phi, ref(t2).right union ref(t1).right) && (!contains(ref(t2).right, phi) || contains(b.right, phi)))
if (contains(ref(t2).left, phi))
if (contains(ref(t1).right, phi))
@@ -77,8 +83,12 @@ object SCProofChecker {
* Γ, φ∧ψ |- Δ Γ, φ∧ψ |- Δ
*/
case LeftAnd(b, t1, phi, psi) =>
- if (isSameSet(ref(t1).right, b.right)) {
- val phiAndPsi = ConnectorFormula(And, Seq(phi, psi))
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (psi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "ψ must be a formula, but it is a " + phi.sort)
+ else if (isSameSet(ref(t1).right, b.right)) {
+ val phiAndPsi = and(phi)(psi)
if (
isSameSet(b.left + phi, ref(t1).left + phiAndPsi) ||
isSameSet(b.left + psi, ref(t1).left + phiAndPsi) ||
@@ -93,13 +103,15 @@ object SCProofChecker {
* Γ, Σ, φ∨ψ |- Δ, Π
*/
case LeftOr(b, t, disjuncts) =>
- if (isSameSet(b.right, t.map(ref(_).right).fold(Set.empty)(_ union _))) {
- val phiOrPsi = ConnectorFormula(Or, disjuncts)
+ if (disjuncts.exists(phi => phi.sort != Prop)){
+ val culprit = disjuncts.find(phi => phi.sort != Prop).get
+ SCInvalidProof(SCProof(step), Nil, "all φs must be a formula, but " + culprit + " is a " + culprit.sort)
+ } else if (isSameSet(b.right, t.map(ref(_).right).fold(Set.empty)(_ union _))) {
+ val phiOrPsi = disjuncts.reduceLeft(or(_)(_))
if (
t.zip(disjuncts).forall { case (s, phi) => isSubset(ref(s).left, b.left + phi) } &&
isSubset(b.left, t.map(ref(_).left).fold(Set.empty)(_ union _) + phiOrPsi)
)
-
SCValidProof(SCProof(step))
else SCInvalidProof(SCProof(step), Nil, s"Left-hand side of conclusion + disjuncts is not the same as the union of the left-hand sides of the premises + φ∨ψ.")
} else SCInvalidProof(SCProof(step), Nil, s"Right-hand side of conclusion is not the union of the right-hand sides of the premises.")
@@ -109,30 +121,42 @@ object SCProofChecker {
* Γ, Σ, φ⇒ψ |- Δ, Π
*/
case LeftImplies(b, t1, t2, phi, psi) =>
- val phiImpPsi = ConnectorFormula(Implies, Seq(phi, psi))
- if (isSameSet(b.right + phi, ref(t1).right union ref(t2).right))
- if (isSameSet(b.left + psi, ref(t1).left union ref(t2).left + phiImpPsi))
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, s"Left-hand side of conclusion + ψ must be identical to union of left-hand sides of premisces + φ⇒ψ.")
- else SCInvalidProof(SCProof(step), Nil, s"Right-hand side of conclusion + φ must be identical to union of right-hand sides of premisces.")
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (psi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "ψ must be a formula, but it is a " + phi.sort)
+ else {
+ val phiImpPsi = implies(phi)(psi)
+ if (isSameSet(b.right + phi, ref(t1).right union ref(t2).right))
+ if (isSameSet(b.left + psi, ref(t1).left union ref(t2).left + phiImpPsi))
+ SCValidProof(SCProof(step))
+ else SCInvalidProof(SCProof(step), Nil, s"Left-hand side of conclusion + ψ must be identical to union of left-hand sides of premisces + φ⇒ψ.")
+ else SCInvalidProof(SCProof(step), Nil, s"Right-hand side of conclusion + φ must be identical to union of right-hand sides of premisces.")
+ }
/*
* Γ, φ⇒ψ |- Δ Γ, φ⇒ψ, ψ⇒φ |- Δ
* -------------- or ---------------
* Γ, φ⇔ψ |- Δ Γ, φ⇔ψ |- Δ
*/
case LeftIff(b, t1, phi, psi) =>
- val phiImpPsi = ConnectorFormula(Implies, Seq(phi, psi))
- val psiImpPhi = ConnectorFormula(Implies, Seq(psi, phi))
- val phiIffPsi = ConnectorFormula(Iff, Seq(phi, psi))
- if (isSameSet(ref(t1).right, b.right))
- if (
- isSameSet(b.left + phiImpPsi, ref(t1).left + phiIffPsi) ||
- isSameSet(b.left + psiImpPhi, ref(t1).left + phiIffPsi) ||
- isSameSet(b.left + phiImpPsi + psiImpPhi, ref(t1).left + phiIffPsi)
- )
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + φ⇔ψ must be same as left-hand side of premise + either φ⇒ψ, ψ⇒φ or both.")
- else SCInvalidProof(SCProof(step), Nil, "Right-hand sides of premise and conclusion must be the same.")
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (psi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "ψ must be a formula, but it is a " + phi.sort)
+ else {
+ val phiImpPsi = implies(phi)(psi)
+ val psiImpPhi = implies(psi)(phi)
+ val phiIffPsi = iff(phi)(psi)
+ if (isSameSet(ref(t1).right, b.right))
+ if (
+ isSameSet(b.left + phiImpPsi, ref(t1).left + phiIffPsi) ||
+ isSameSet(b.left + psiImpPhi, ref(t1).left + phiIffPsi) ||
+ isSameSet(b.left + phiImpPsi + psiImpPhi, ref(t1).left + phiIffPsi)
+ )
+ SCValidProof(SCProof(step))
+ else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + φ⇔ψ must be same as left-hand side of premise + either φ⇒ψ, ψ⇒φ or both.")
+ else SCInvalidProof(SCProof(step), Nil, "Right-hand sides of premise and conclusion must be the same.")
+ }
/*
* Γ |- φ, Δ
@@ -140,12 +164,16 @@ object SCProofChecker {
* Γ, ¬φ |- Δ
*/
case LeftNot(b, t1, phi) =>
- val nPhi = ConnectorFormula(Neg, Seq(phi))
- if (isSameSet(b.left, ref(t1).left + nPhi))
- if (isSameSet(b.right + phi, ref(t1).right))
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion + φ must be the same as right-hand side of premise")
- else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion must be the same as left-hand side of premise + ¬φ")
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else {
+ val nPhi = neg(phi)
+ if (isSameSet(b.left, ref(t1).left + nPhi))
+ if (isSameSet(b.right + phi, ref(t1).right))
+ SCValidProof(SCProof(step))
+ else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion + φ must be the same as right-hand side of premise")
+ else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion must be the same as left-hand side of premise + ¬φ")
+ }
/*
* Γ, φ[t/x] |- Δ
@@ -153,8 +181,14 @@ object SCProofChecker {
* Γ, ∀x. φ |- Δ
*/
case LeftForall(b, t1, phi, x, t) =>
- if (isSameSet(b.right, ref(t1).right))
- if (isSameSet(b.left + substituteVariablesInFormula(phi, Map(x -> t)), ref(t1).left + BinderFormula(Forall, x, phi)))
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (x.sort != Ind)
+ SCInvalidProof(SCProof(step), Nil, "x must be a term variable, but it is a " + x.sort)
+ else if (t.sort != Ind)
+ SCInvalidProof(SCProof(step), Nil, "t must be a term , but it is a " + t.sort)
+ else if (isSameSet(b.right, ref(t1).right))
+ if (isSameSet(b.left + substituteVariables(phi, Map(x -> t)), ref(t1).left + forall(Lambda(x, phi))))
SCValidProof(SCProof(step))
else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + φ[t/x] must be the same as left-hand side of premise + ∀x. φ")
else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion must be the same as right-hand side of premise")
@@ -165,28 +199,18 @@ object SCProofChecker {
* Γ, ∃x. φ|- Δ
*/
case LeftExists(b, t1, phi, x) =>
- if (isSameSet(b.right, ref(t1).right))
- if (isSameSet(b.left + phi, ref(t1).left + BinderFormula(Exists, x, phi)))
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (x.sort != Ind)
+ SCInvalidProof(SCProof(step), Nil, "x must be a term variable, but it is a " + x.sort)
+ else if (isSameSet(b.right, ref(t1).right))
+ if (isSameSet(b.left + phi, ref(t1).left + exists(Lambda(x, phi))))
if ((b.left union b.right).forall(f => !f.freeVariables.contains(x)))
SCValidProof(SCProof(step))
else SCInvalidProof(SCProof(step), Nil, "The variable x must not be free in the resulting sequent.")
else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + φ must be the same as left-hand side of premise + ∃x. φ")
else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion must be the same as right-hand side of premise")
- /*
- * Γ, ∃y.∀x. (x=y) ⇔ φ |- Δ
- * ---------------------------- if y is not free in φ
- * Γ, ∃!x. φ |- Δ
- */
- case LeftExistsOne(b, t1, phi, x) =>
- val y = VariableLabel(freshId(phi.freeVariables.map(_.id), x.id))
- val temp = BinderFormula(Exists, y, BinderFormula(Forall, x, ConnectorFormula(Iff, List(AtomicFormula(equality, List(VariableTerm(x), VariableTerm(y))), phi))))
- if (isSameSet(b.right, ref(t1).right))
- if (isSameSet(b.left + temp, ref(t1).left + BinderFormula(ExistsOne, x, phi)))
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + ∃y.∀x. (x=y) ⇔ φ must be the same as left-hand side of premise + ∃!x. φ")
- else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion must be the same as right-hand side of premise")
-
// Right rules
/*
* Γ |- φ, Δ Σ |- ψ, Π
@@ -194,82 +218,113 @@ object SCProofChecker {
* Γ, Σ |- φ∧ψ, Π, Δ
*/
case RightAnd(b, t, cunjuncts) =>
- val phiAndPsi = ConnectorFormula(And, cunjuncts)
- if (isSameSet(b.left, t.map(ref(_).left).fold(Set.empty)(_ union _)))
- if (
- t.zip(cunjuncts).forall { case (s, phi) => isSubset(ref(s).right, b.right + phi) } &&
- isSubset(b.right, t.map(ref(_).right).fold(Set.empty)(_ union _) + phiAndPsi)
- //isSameSet(cunjuncts.foldLeft(b.right)(_ + _), t.map(ref(_).right).fold(Set.empty)(_ union _) + phiAndPsi)
- )
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, s"Right-hand side of conclusion + φ + ψ is not the same as the union of the right-hand sides of the premises φ∧ψ.")
- else SCInvalidProof(SCProof(step), Nil, s"Left-hand side of conclusion is not the union of the left-hand sides of the premises.")
+ if (cunjuncts.exists(phi => phi.sort != Prop)){
+ val culprit = cunjuncts.find(phi => phi.sort != Prop).get
+ SCInvalidProof(SCProof(step), Nil, "all φs must be a formula, but " + culprit + " is a " + culprit.sort)
+ } else {
+ val phiAndPsi = cunjuncts.reduce(and(_)(_))
+ if (isSameSet(b.left, t.map(ref(_).left).fold(Set.empty)(_ union _)))
+ if (
+ t.zip(cunjuncts).forall { case (s, phi) => isSubset(ref(s).right, b.right + phi) } &&
+ isSubset(b.right, t.map(ref(_).right).fold(Set.empty)(_ union _) + phiAndPsi)
+ //isSameSet(cunjuncts.foldLeft(b.right)(_ + _), t.map(ref(_).right).fold(Set.empty)(_ union _) + phiAndPsi)
+ )
+ SCValidProof(SCProof(step))
+ else SCInvalidProof(SCProof(step), Nil, s"Right-hand side of conclusion + φ + ψ is not the same as the union of the right-hand sides of the premises φ∧ψ.")
+ else SCInvalidProof(SCProof(step), Nil, s"Left-hand side of conclusion is not the union of the left-hand sides of the premises.")
+ }
/*
* Γ |- φ, Δ Γ |- φ, ψ, Δ
* -------------- or ---------------
* Γ |- φ∨ψ, Δ Γ |- φ∨ψ, Δ
*/
case RightOr(b, t1, phi, psi) =>
- val phiOrPsi = ConnectorFormula(Or, Seq(phi, psi))
- if (isSameSet(ref(t1).left, b.left))
- if (
- isSameSet(b.right + phi, ref(t1).right + phiOrPsi) ||
- isSameSet(b.right + psi, ref(t1).right + phiOrPsi) ||
- isSameSet(b.right + phi + psi, ref(t1).right + phiOrPsi)
- )
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion + φ∧ψ must be same as right-hand side of premise + either φ, ψ or both.")
- else SCInvalidProof(SCProof(step), Nil, "Left-hand sides of the premise and the conclusion must be the same.")
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (psi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "ψ must be a formula, but it is a " + phi.sort)
+ else {
+ val phiOrPsi = or(phi)(psi)
+ if (isSameSet(ref(t1).left, b.left))
+ if (
+ isSameSet(b.right + phi, ref(t1).right + phiOrPsi) ||
+ isSameSet(b.right + psi, ref(t1).right + phiOrPsi) ||
+ isSameSet(b.right + phi + psi, ref(t1).right + phiOrPsi)
+ )
+ SCValidProof(SCProof(step))
+ else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion + φ∧ψ must be same as right-hand side of premise + either φ, ψ or both.")
+ else SCInvalidProof(SCProof(step), Nil, "Left-hand sides of the premise and the conclusion must be the same.")
+ }
/*
* Γ, φ |- ψ, Δ
* --------------
* Γ |- φ⇒ψ, Δ
*/
case RightImplies(b, t1, phi, psi) =>
- val phiImpPsi = ConnectorFormula(Implies, Seq(phi, psi))
- if (isSameSet(ref(t1).left, b.left + phi))
- if (isSameSet(b.right + psi, ref(t1).right + phiImpPsi))
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion + ψ must be same as right-hand side of premise + φ⇒ψ.")
- else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + psi must be same as left-hand side of premise.")
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (psi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "ψ must be a formula, but it is a " + phi.sort)
+ else {
+ val phiImpPsi = implies(phi)(psi)
+ if (isSameSet(ref(t1).left, b.left + phi))
+ if (isSameSet(b.right + psi, ref(t1).right + phiImpPsi))
+ SCValidProof(SCProof(step))
+ else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion + ψ must be same as right-hand side of premise + φ⇒ψ.")
+ else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + psi must be same as left-hand side of premise.")
+ }
/*
* Γ |- φ⇒ψ, Δ Σ |- ψ⇒φ, Π
* ----------------------------
* Γ, Σ |- φ⇔ψ, Π, Δ
*/
case RightIff(b, t1, t2, phi, psi) =>
- val phiImpPsi = ConnectorFormula(Implies, Seq(phi, psi))
- val psiImpPhi = ConnectorFormula(Implies, Seq(psi, phi))
- val phiIffPsi = ConnectorFormula(Iff, Seq(phi, psi))
- if (isSameSet(b.left, ref(t1).left union ref(t2).left))
- if (
- isSubset(ref(t1).right, b.right + phiImpPsi) &&
- isSubset(ref(t2).right, b.right + psiImpPhi) &&
- isSubset(b.right, ref(t1).right union ref(t2).right + phiIffPsi)
- )
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, s"Right-hand side of conclusion + a⇒ψ + ψ⇒φ is not the same as the union of the right-hand sides of the premises φ⇔b.")
- else SCInvalidProof(SCProof(step), Nil, s"Left-hand side of conclusion is not the union of the left-hand sides of the premises.")
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (psi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "ψ must be a formula, but it is a " + phi.sort)
+ else {
+ val phiImpPsi = implies(phi)(psi)
+ val psiImpPhi = implies(psi)(phi)
+ val phiIffPsi = iff(phi)(psi)
+ if (isSameSet(b.left, ref(t1).left union ref(t2).left))
+ if (
+ isSubset(ref(t1).right, b.right + phiImpPsi) &&
+ isSubset(ref(t2).right, b.right + psiImpPhi) &&
+ isSubset(b.right, ref(t1).right union ref(t2).right + phiIffPsi)
+ )
+ SCValidProof(SCProof(step))
+ else SCInvalidProof(SCProof(step), Nil, s"Right-hand side of conclusion + a⇒ψ + ψ⇒φ is not the same as the union of the right-hand sides of the premises φ⇔b.")
+ else SCInvalidProof(SCProof(step), Nil, s"Left-hand side of conclusion is not the union of the left-hand sides of the premises.")
+ }
/*
* Γ, φ |- Δ
* --------------
* Γ |- ¬φ, Δ
*/
case RightNot(b, t1, phi) =>
- val nPhi = ConnectorFormula(Neg, Seq(phi))
- if (isSameSet(b.right, ref(t1).right + nPhi))
- if (isSameSet(b.left + phi, ref(t1).left))
- SCValidProof(SCProof(step))
- else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + φ must be the same as left-hand side of premise")
- else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion must be the same as right-hand side of premise + ¬φ")
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else {
+ val nPhi = neg(phi)
+ if (isSameSet(b.right, ref(t1).right + nPhi))
+ if (isSameSet(b.left + phi, ref(t1).left))
+ SCValidProof(SCProof(step))
+ else SCInvalidProof(SCProof(step), Nil, "Left-hand side of conclusion + φ must be the same as left-hand side of premise")
+ else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion must be the same as right-hand side of premise + ¬φ")
+ }
/*
* Γ |- φ, Δ
* ------------------- if x is not free in the resulting sequent
* Γ |- ∀x. φ, Δ
*/
case RightForall(b, t1, phi, x) =>
- if (isSameSet(b.left, ref(t1).left))
- if (isSameSet(b.right + phi, ref(t1).right + BinderFormula(Forall, x, phi)))
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (x.sort != Ind)
+ SCInvalidProof(SCProof(step), Nil, "x must be a term variable, but it is a " + x.sort)
+ else if (isSameSet(b.left, ref(t1).left))
+ if (isSameSet(b.right + phi, ref(t1).right + forall(Lambda(x, phi))))
if ((b.left union b.right).forall(f => !f.freeVariables.contains(x)))
SCValidProof(SCProof(step))
else SCInvalidProof(SCProof(step), Nil, "The variable x must not be free in the resulting sequent.")
@@ -281,27 +336,39 @@ object SCProofChecker {
* Γ |- ∃x. φ, Δ
*/
case RightExists(b, t1, phi, x, t) =>
- if (isSameSet(b.left, ref(t1).left))
- if (isSameSet(b.right + substituteVariablesInFormula(phi, Map(x -> t)), ref(t1).right + BinderFormula(Exists, x, phi)))
+ if (phi.sort != Prop)
+ SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort)
+ else if (x.sort != Ind)
+ SCInvalidProof(SCProof(step), Nil, "x must be a term variable, but it is a " + x.sort)
+ else if (t.sort != Ind)
+ SCInvalidProof(SCProof(step), Nil, "t must be a term , but it is a " + t.sort)
+ else if (isSameSet(b.left, ref(t1).left))
+ if (isSameSet(b.right + substituteVariables(phi, Map(x -> t)), ref(t1).right + exists(Lambda(x, phi))))
SCValidProof(SCProof(step))
else SCInvalidProof(SCProof(step), Nil, "Right-hand side of the conclusion + φ[t/x] must be the same as right-hand side of the premise + ∃x. φ")
else SCInvalidProof(SCProof(step), Nil, "Left-hand sides or conclusion and premise must be the same.")
/**
* - * Γ |- ∃y.∀x. (x=y) ⇔ φ, Δ - * ---------------------------- if y is not free in φ - * Γ|- ∃!x. φ, Δ + * Γ |- φ[t/x], Δ + * -------------------------- + * Γ|- φ[(εx. φ)/x], Δ **/ - case RightExistsOne(b, t1, phi, x) => - val y = VariableLabel(freshId(phi.freeVariables.map(_.id), x.id)) - val temp = BinderFormula(Exists, y, BinderFormula(Forall, x, ConnectorFormula(Iff, List(AtomicFormula(equality, List(VariableTerm(x), VariableTerm(y))), phi)))) - if (isSameSet(b.left, ref(t1).left)) - if (isSameSet(b.right + temp, ref(t1).right + BinderFormula(ExistsOne, x, phi))) + case RightEpsilon(b, t1, phi, x, t) => + if (phi.sort != Prop) + SCInvalidProof(SCProof(step), Nil, "φ must be a formula, but it is a " + phi.sort) + else if (x.sort != Ind) + SCInvalidProof(SCProof(step), Nil, "x must be a term variable, but it is a " + x.sort) + else if (t.sort != Ind) + SCInvalidProof(SCProof(step), Nil, "t must be a term , but it is a " + t.sort) + else if (isSameSet(b.left, ref(t1).left)) { + val expected_top = substituteVariables(phi, Map(x -> t)) + val expected_bot = substituteVariables(phi, Map(x -> epsilon(Lambda(x, phi)))) + if (isSameSet(b.right + expected_top, ref(t1).right + expected_bot)) SCValidProof(SCProof(step)) - else SCInvalidProof(SCProof(step), Nil, "Right-hand side of conclusion + ∃y.∀x. (x=y) ⇔ φ must be the same as right-hand side of premise + ∃!x. φ") - else SCInvalidProof(SCProof(step), Nil, "Left-hand sides of conclusion and premise must be the same") + else SCInvalidProof(SCProof(step), Nil, "Right-hand side of the conclusion + φ[t/x] must be the same as right-hand side of the premise + ∃x. φ") + } else SCInvalidProof(SCProof(step), Nil, "Left-hand sides or conclusion and premise must be the same.") // Structural rules /* @@ -314,6 +381,18 @@ object SCProofChecker { SCValidProof(SCProof(step)) else SCInvalidProof(SCProof(step), Nil, "Conclusion cannot be trivially derived from premise.") + /** + *
+ * Γ, φ[(λy. e)t/x] |- Δ + * --------------------------- + * Γ, φ[e[t/y]/x] |- Δ + *+ */ + case Beta(b, t1) => + if (isSame(sequentToFormula(b).betaNormalForm, sequentToFormula(ref(t1)).betaNormalForm)) { + SCValidProof(SCProof(step)) + } else SCInvalidProof(SCProof(step), Nil, "The conclusion is not beta-OL-equivalent to the premise.") + // Equality Rules /* * Γ, s=s |- Δ @@ -322,8 +401,8 @@ object SCProofChecker { */ case LeftRefl(b, t1, phi) => phi match { - case AtomicFormula(`equality`, Seq(left, right)) => - if (isSameTerm(left, right)) + case equality(left, right) => + if (isSame(left, right)) if (isSameSet(b.right, ref(t1).right)) if (isSameSet(b.left + phi, ref(t1).left)) SCValidProof(SCProof(step)) @@ -340,8 +419,8 @@ object SCProofChecker { */ case RightRefl(b, phi) => phi match { - case AtomicFormula(`equality`, Seq(left, right)) => - if (isSameTerm(left, right)) + case equality(left, right) => + if (isSame(left, right)) if (contains(b.right, phi)) SCValidProof(SCProof(step)) else SCInvalidProof(SCProof(step), Nil, s"Right-Hand side of conclusion does not contain φ") @@ -349,26 +428,31 @@ object SCProofChecker { case _ => SCInvalidProof(SCProof(step), Nil, s"φ is not an equality.") } - /* - * Γ, φ(s_) |- Δ - * --------------------- - * Γ, (s=t)_, φ(t_)|- Δ + /** + *
+ * Γ, φ(s_) |- Δ + * ----------------------------------------------------- + * Γ, (∀x,...,z. (s x ... z)=(t x ... z))_, φ(t_) |- Δ + **/ case LeftSubstEq(b, t1, equals, lambdaPhi) => val (s_es, t_es) = equals.unzip val (phi_args, phi_body) = lambdaPhi if (phi_args.size != s_es.size) // Not strictly necessary, but it's a good sanity check. To reactivate when tactics have been modified. SCInvalidProof(SCProof(step), Nil, "The number of arguments of φ must be the same as the number of equalities.") - else if (equals.zip(phi_args).exists { case ((s, t), arg) => s.vars.size != arg.arity || t.vars.size != arg.arity }) + else if (equals.zip(phi_args).exists { case ((s, t), arg) => s.sort != arg.sort || t.sort != arg.sort || !(arg.sort.isFunctional || arg.sort.isPredicate) }) SCInvalidProof(SCProof(step), Nil, "The arities of symbols in φ must be the same as the arities of equalities.") else { - val phi_s_for_f = instantiateTermSchemas(phi_body, (phi_args zip s_es).toMap) - val phi_t_for_f = instantiateTermSchemas(phi_body, (phi_args zip t_es).toMap) + val phi_s_for_f = substituteVariables(phi_body, (phi_args zip s_es).toMap) + val phi_t_for_f = substituteVariables(phi_body, (phi_args zip t_es).toMap) val sEqT_es = equals map { case (s, t) => - assert(s.vars.size == t.vars.size) - val base = AtomicFormula(equality, Seq(s.body, if (s.vars == t.vars) t.body else t(s.vars.map(VariableTerm)))) - (s.vars).foldLeft(base: Formula) { case (acc, s_arg) => BinderFormula(Forall, s_arg, acc) } + val no = ((s.freeVariables ++ t.freeVariables).view.map(_.id.no) ++ Seq(-1)).max+1 + val vars = (no until no+s.sort.depth).map(i => Variable(Identifier("x", i), Ind)) + val inner1 = vars.foldLeft(s)(_(_)) + val inner2 = vars.foldLeft(t)(_(_)) + val base = if (inner1.sort == Prop) iff(inner1)(inner2) else equality(inner1)(inner2) + vars.foldRight(base : Expression) { case (s_arg, acc) => forall(Lambda(s_arg, acc)) } } if (isSameSet(b.right, ref(t1).right)) @@ -387,117 +471,128 @@ object SCProofChecker { } /* - * Γ |- φ(s_), Δ - * --------------------- - * Γ, (s=t)_ |- φ(t_), Δ + * Γ |- φ(s), Δ Σ |- s=t, Π + * --------------------------------- + * Γ, Σ |- φ(t), Δ, Π */ case RightSubstEq(b, t1, equals, lambdaPhi) => val (s_es, t_es) = equals.unzip val (phi_args, phi_body) = lambdaPhi - if (phi_args.size != equals.size) // Not strictly necessary, but it's a good sanity check. To reactivate when tactics have been modified. + if (phi_args.size != s_es.size) // Not strictly necessary, but it's a good sanity check. To reactivate when tactics have been modified. SCInvalidProof(SCProof(step), Nil, "The number of arguments of φ must be the same as the number of equalities.") - else if (equals.zip(phi_args).exists { case ((s, t), arg) => s.vars.size != arg.arity || t.vars.size != arg.arity }) + else if (equals.zip(phi_args).exists { case ((s, t), arg) => s.sort != arg.sort || t.sort != arg.sort }) SCInvalidProof(SCProof(step), Nil, "The arities of symbols in φ must be the same as the arities of equalities.") else { - val phi_s_for_f = instantiateTermSchemas(phi_body, (phi_args zip s_es).toMap) - val phi_t_for_f = instantiateTermSchemas(phi_body, (phi_args zip t_es).toMap) + val phi_s_for_f = substituteVariables(phi_body, (phi_args zip s_es).toMap) + val phi_t_for_f = substituteVariables(phi_body, (phi_args zip t_es).toMap) val sEqT_es = equals map { case (s, t) => - assert(s.vars.size == t.vars.size) - val base = AtomicFormula(equality, Seq(s.body, if (s.vars == t.vars) t.body else t(s.vars.map(VariableTerm)))) - (s.vars).foldLeft(base: Formula) { case (acc, s_arg) => BinderFormula(Forall, s_arg, acc) } + val no = ((s.freeVariables ++ t.freeVariables).view.map(_.id.no) ++ Seq(0)).max+1 + val vars = (no until no+s.sort.depth).map(i => Variable(Identifier("x", i), Ind)) + val inner1 = vars.foldLeft(s)(_(_)) + val inner2 = vars.foldLeft(t)(_(_)) + val base = if (inner1.sort == Prop) iff(inner1)(inner2) else equality(inner1)(inner2) + vars.foldRight(base : Expression) { case (s_arg, acc) => forall(Lambda(s_arg, acc)) } } - - if (isSameSet(ref(t1).left ++ sEqT_es, b.left)) + if (isSameSet(b.left, ref(t1).left ++ sEqT_es)) if ( - isSameSet(b.right + phi_s_for_f, ref(t1).right + phi_t_for_f) || - isSameSet(b.right + phi_t_for_f, ref(t1).right + phi_s_for_f) - ) + isSameSet(b.right + phi_t_for_f, ref(t1).right + phi_s_for_f) || + isSameSet(b.right + phi_s_for_f, ref(t1).right + phi_t_for_f) + ) { SCValidProof(SCProof(step)) - else + } + else { + SCInvalidProof( SCProof(step), Nil, "Right-hand side of the premise and the conclusion should be the same with each containing one of φ(s_) φ(t_), but it isn't the case." - ) + )} else SCInvalidProof(SCProof(step), Nil, "Left-hand sides of the premise + (s=t)_ must be the same as left-hand side of the premise.") } + +/* /* - * Γ, φ(ψ_) |- Δ + * Γ |- φ[ψ/?p], Δ * --------------------- - * Γ, ψ⇔τ, φ(τ) |- Δ + * Γ, ψ⇔τ |- φ[τ/?p], Δ */ - case LeftSubstIff(b, t1, equals, lambdaPhi) => - val (phi_s, tau_s) = equals.unzip - val (phi_args, phi_body) = lambdaPhi - if (phi_args.size != phi_s.size) // Not strictly necessary, but it's a good sanity check. To reactivate when tactics have been modified. - SCInvalidProof(SCProof(step), Nil, "The number of arguments of φ must be the same as the number of equalities.") - else if (equals.zip(phi_args).exists { case ((s, t), arg) => s.vars.size != arg.arity || t.vars.size != arg.arity }) - SCInvalidProof(SCProof(step), Nil, "The arities of symbols in φ must be the same as the arities of equalities.") + case RightSubstIff(b, t1, t2, psi, tau, vars, lambdaPhi) => + val (phi_arg, phi_body) = lambdaPhi + if (psi.sort != phi_arg.sort || tau.sort != phi_arg.sort) + SCInvalidProof(SCProof(step), Nil, "The types of the variable of φ must be the same as the types of ψ and τ.") + else if (!psi.sort.isPredicate) + SCInvalidProof(SCProof(step), Nil, "Can only substitute predicate-like terms (with type Ind -> ... -> Ind -> Prop)") else { - val phi_psi_for_q = instantiatePredicateSchemas(phi_body, (phi_args zip phi_s).toMap) - val phi_tau_for_q = instantiatePredicateSchemas(phi_body, (phi_args zip tau_s).toMap) - val psiIffTau = equals map { - case (s, t) => - assert(s.vars.size == t.vars.size) - val base = ConnectorFormula(Iff, Seq(s.body, if (s.vars == t.vars) t.body else t(s.vars.map(VariableTerm)))) - (s.vars).foldLeft(base: Formula) { case (acc, s_arg) => BinderFormula(Forall, s_arg, acc) } - } + val phi_s_for_f = substituteVariables(phi_body, Map(phi_arg -> psi)) + val phi_t_for_f = substituteVariables(phi_body, Map(phi_arg -> tau)) - if (isSameSet(b.right, ref(t1).right)) - if ( - isSameSet(b.left + phi_tau_for_q, ref(t1).left ++ psiIffTau + phi_psi_for_q) || - isSameSet(b.left + phi_psi_for_q, ref(t1).left ++ psiIffTau + phi_tau_for_q) - ) - SCValidProof(SCProof(step)) - else - SCInvalidProof( - SCProof(step), - Nil, - "Left-hand sides of the conclusion + φ(ψ_) must be the same as left-hand side of the premise + (ψ⇔τ)_ + φ(τ_) (or with ψ and τ swapped)." - ) + val inner1 = vars.foldLeft(psi)(_(_)) + val inner2 = vars.foldLeft(tau)(_(_)) + val sEqt = iff(inner1)(inner2) + val varss = vars.toSet + + if ( + isSubset(ref(t1).right, b.right + phi_s_for_f) && + isSubset(ref(t2).right, b.right + sEqt) && + isSubset(b.right, ref(t1).right union ref(t2).right + phi_t_for_f) + ) { + if (isSameSet(b.left, ref(t1).left union ref(t2).left)) { + if ( + ref(t2).left.exists(f => f.freeVariables.intersect(varss).nonEmpty) || + ref(t2).right.exists(f => !isSame(f, sEqt) && f.freeVariables.intersect(varss).nonEmpty) + ) { + SCInvalidProof(SCProof(step), Nil, "The variable x1...xn must not be free in the second premise other than as parameters of the equality.") + } else SCValidProof(SCProof(step)) + } + else SCInvalidProof(SCProof(step), Nil, "Left-hand sides of the conclusion + φ(s_) must be the same as left-hand side of the premise + (s=t)_ + φ(t_).") + } else SCInvalidProof(SCProof(step), Nil, "Right-hand sides of the premise and the conclusion aren't the same.") } - /* - * Γ |- φ[ψ/?p], Δ - * --------------------- - * Γ, ψ⇔τ |- φ[τ/?p], Δ + /* + * Γ, φ(ψ) |- Δ Σ |- a⇔b, Π + * -------------------------------- + * Γ, Σ φ(b) |- Δ, Π */ - case RightSubstIff(b, t1, equals, lambdaPhi) => - val (psi_s, tau_s) = equals.unzip - val (phi_args, phi_body) = lambdaPhi - if (phi_args.size != psi_s.size) - SCInvalidProof(SCProof(step), Nil, "The number of arguments of φ must be the same as the number of equalities.") - else if (equals.zip(phi_args).exists { case ((s, t), arg) => s.vars.size != arg.arity || t.vars.size != arg.arity }) - SCInvalidProof(SCProof(step), Nil, "The arities of symbols in φ must be the same as the arities of equalities.") - else { - val phi_psi_for_q = instantiatePredicateSchemas(phi_body, (phi_args zip psi_s).toMap) - val phi_tau_for_q = instantiatePredicateSchemas(phi_body, (phi_args zip tau_s).toMap) - val psiIffTau = equals map { - case (s, t) => - assert(s.vars.size == t.vars.size) - val base = ConnectorFormula(Iff, Seq(s.body, if (s.vars == t.vars) t.body else t(s.vars.map(VariableTerm)))) - (s.vars).foldLeft(base: Formula) { case (acc, s_arg) => BinderFormula(Forall, s_arg, acc) } - } + case LeftSubstIff(b, t1, t2, psi, tau, vars, lambdaPhi) => + val (phi_arg, phi_body) = lambdaPhi + if (psi.sort != phi_arg.sort || tau.sort != phi_arg.sort) + SCInvalidProof(SCProof(step), Nil, "The types of the variable of φ must be the same as the types of ψ and τ.") + else /*if (!psi.sort.isPredicate) + SCInvalidProof(SCProof(step), Nil, "Can only substitute predicate-like terms (with type Ind -> ... -> Ind -> Prop)") + else */{ + val phi_s_for_f = substituteVariables(phi_body, Map(phi_arg -> psi)) + val phi_t_for_f = substituteVariables(phi_body, Map(phi_arg -> tau)) + + val inner1 = vars.foldLeft(psi)(_(_)) + val inner2 = vars.foldLeft(tau)(_(_)) + val sEqt = iff(inner1)(inner2) + val varss = vars.toSet - if (isSameSet(ref(t1).left ++ psiIffTau, b.left)) + if ( + isSubset(ref(t1).right, b.right) && + isSubset(ref(t2).right, b.right + sEqt) && + isSubset(b.right, ref(t1).right union ref(t2).right) + ) { if ( - isSameSet(b.right + phi_tau_for_q, ref(t1).right + phi_psi_for_q) || - isSameSet(b.right + phi_psi_for_q, ref(t1).right + phi_tau_for_q) - ) - SCValidProof(SCProof(step)) - else - SCInvalidProof( - SCProof(step), - Nil, - "Right-hand side of the premise and the conclusion should be the same with each containing one of φ[τ/?q] and φ[ψ/?q], but it isn't the case." - ) - else SCInvalidProof(SCProof(step), Nil, "Left-hand sides of the premise + ψ⇔τ must be the same as left-hand side of the premise.") + isSubset(ref(t1).left, b.left + phi_s_for_f) && + isSubset(ref(t2).left, b.left) && + isSubset(b.left, ref(t1).left union ref(t2).left + phi_t_for_f) + ) { + if ( + ref(t2).left.exists(f => f.freeVariables.intersect(varss).nonEmpty) || + ref(t2).right.exists(f => !isSame(f, sEqt) && f.freeVariables.intersect(varss).nonEmpty) + ) { + SCInvalidProof(SCProof(step), Nil, "The variable x1...xn must not be free in the second premise other than as parameters of the equality.") + } else SCValidProof(SCProof(step)) + } + else SCInvalidProof(SCProof(step), Nil, "Left-hand sides of the conclusion + φ(s_) must be the same as left-hand side of the premise + (s=t)_ + φ(t_).") + } + else SCInvalidProof(SCProof(step), Nil, "Right-hand sides of the premise and the conclusion aren't the same.") } - - +*/ /** *
@@ -506,9 +601,9 @@ object SCProofChecker { * Γ[ψ/?p] |- Δ[ψ/?p] **/ - case InstSchema(bot, t1, mCon, mPred, mTerm) => + case InstSchema(bot, t1, subst) => val expected = - (ref(t1).left.map(phi => instantiateSchemas(phi, mCon, mPred, mTerm)), ref(t1).right.map(phi => instantiateSchemas(phi, mCon, mPred, mTerm))) + (ref(t1).left.map(phi => substituteVariables(phi, subst)), ref(t1).right.map(phi => substituteVariables(phi, subst))) if (isSameSet(bot.left, expected._1)) if (isSameSet(bot.right, expected._2)) SCValidProof(SCProof(step)) @@ -564,4 +659,4 @@ object SCProofChecker { else possibleError.get } -} +} \ No newline at end of file diff --git a/lisa-kernel/src/main/scala/lisa/kernel/proof/SequentCalculus.scala b/lisa-kernel/src/main/scala/lisa/kernel/proof/SequentCalculus.scala index b84d84b44..254b8d9bf 100644 --- a/lisa-kernel/src/main/scala/lisa/kernel/proof/SequentCalculus.scala +++ b/lisa-kernel/src/main/scala/lisa/kernel/proof/SequentCalculus.scala @@ -21,12 +21,27 @@ object SequentCalculus { * @param left the left side of the sequent * @param right the right side of the sequent */ - case class Sequent(left: Set[Formula], right: Set[Formula]) + case class Sequent(left: Set[Expression], right: Set[Expression]){ + require(left.forall(_.sort == Prop) && right.forall(_.sort == Prop), "Sequent can only contain formulas") + } /** * Simple method that transforms a sequent to a logically equivalent formula. */ - def sequentToFormula(s: Sequent): Formula = ConnectorFormula(Implies, List(ConnectorFormula(And, s.left.toSeq), ConnectorFormula(Or, s.right.toSeq))) + def sequentToFormula(s: Sequent): Expression = { + val left = { + if (s.left.isEmpty) top + else if (s.left.size == 1) s.left.head + else s.left.reduce(and(_)(_)) + } + val right ={ + if (s.right.isEmpty) bot + else if (s.right.size == 1) s.right.head + else s.right.reduce(or(_)(_)) + } + if (s.left.isEmpty) right + else implies(left)(right) + } /** * Checks whether two sequents are equivalent, with respect to [[isSameTerm]]. @@ -61,265 +76,261 @@ object SequentCalculus { } /** - *
+ * {{{ * Γ |- Δ * ------------ * Γ |- Δ (OL rewrite) - *+ * }}} */ case class Restate(bot: Sequent, t1: Int) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * * ------------ * Γ |- Γ (OL tautology) - *+ * }}} */ case class RestateTrue(bot: Sequent) extends SCProofStep { val premises = Seq() } /** - *
+ * {{{ * * -------------- * Γ, φ |- φ, Δ - *+ * }}} */ - case class Hypothesis(bot: Sequent, phi: Formula) extends SCProofStep { val premises = Seq() } + case class Hypothesis(bot: Sequent, phi: Expression) extends SCProofStep { val premises = Seq() } /** - *
+ * {{{ * Γ |- Δ, φ φ, Σ |- Π * ------------------------ * Γ, Σ |-Δ, Π - *+ * }}} */ - case class Cut(bot: Sequent, t1: Int, t2: Int, phi: Formula) extends SCProofStep { val premises = Seq(t1, t2) } + case class Cut(bot: Sequent, t1: Int, t2: Int, phi: Expression) extends SCProofStep { val premises = Seq(t1, t2) } // Left rules /** - *
+ * {{{ * Γ, φ |- Δ Γ, φ, ψ |- Δ * -------------- or -------------- * Γ, φ∧ψ |- Δ Γ, φ∧ψ |- Δ - *+ * }}} */ - case class LeftAnd(bot: Sequent, t1: Int, phi: Formula, psi: Formula) extends SCProofStep { val premises = Seq(t1) } + case class LeftAnd(bot: Sequent, t1: Int, phi: Expression, psi: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * Γ, φ |- Δ Σ, ψ |- Π ... * -------------------------------- * Γ, Σ, φ∨ψ∨... |- Δ, Π - *+ * }}} */ - case class LeftOr(bot: Sequent, t: Seq[Int], disjuncts: Seq[Formula]) extends SCProofStep { val premises = t } + case class LeftOr(bot: Sequent, t: Seq[Int], disjuncts: Seq[Expression]) extends SCProofStep { val premises = t } /** - *
+ * {{{ * Γ |- φ, Δ Σ, ψ |- Π * ------------------------ * Γ, Σ, φ⇒ψ |- Δ, Π - *+ * }}} */ - case class LeftImplies(bot: Sequent, t1: Int, t2: Int, phi: Formula, psi: Formula) extends SCProofStep { val premises = Seq(t1, t2) } + case class LeftImplies(bot: Sequent, t1: Int, t2: Int, phi: Expression, psi: Expression) extends SCProofStep { val premises = Seq(t1, t2) } /** - *
+ * {{{ * Γ, φ⇒ψ |- Δ Γ, φ⇒ψ, ψ⇒φ |- Δ * -------------- or -------------------- * Γ, φ⇔ψ |- Δ Γ, φ⇔ψ |- Δ - *+ * }}} */ - case class LeftIff(bot: Sequent, t1: Int, phi: Formula, psi: Formula) extends SCProofStep { val premises = Seq(t1) } + case class LeftIff(bot: Sequent, t1: Int, phi: Expression, psi: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * Γ |- φ, Δ * -------------- * Γ, ¬φ |- Δ - *+ * }}} */ - case class LeftNot(bot: Sequent, t1: Int, phi: Formula) extends SCProofStep { val premises = Seq(t1) } + case class LeftNot(bot: Sequent, t1: Int, phi: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * Γ, φ[t/x] |- Δ * ------------------- * Γ, ∀ φ |- Δ * - *+ * }}} */ - case class LeftForall(bot: Sequent, t1: Int, phi: Formula, x: VariableLabel, t: Term) extends SCProofStep { val premises = Seq(t1) } + case class LeftForall(bot: Sequent, t1: Int, phi: Expression, x: Variable, t: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * Γ, φ |- Δ * ------------------- if x is not free in the resulting sequent * Γ, ∃x φ|- Δ * - *- */ - case class LeftExists(bot: Sequent, t1: Int, phi: Formula, x: VariableLabel) extends SCProofStep { val premises = Seq(t1) } - - /** - *
- * Γ, ∃y.∀x. (x=y) ⇔ φ |- Δ - * ---------------------------- if y is not free in φ - * Γ, ∃!x. φ |- Δ - *+ * }}} */ - case class LeftExistsOne(bot: Sequent, t1: Int, phi: Formula, x: VariableLabel) extends SCProofStep { val premises = Seq(t1) } + case class LeftExists(bot: Sequent, t1: Int, phi: Expression, x: Variable) extends SCProofStep { val premises = Seq(t1) } // Right rules /** - *
+ * {{{ * Γ |- φ, Δ Σ |- ψ, Π ... * ------------------------------------ * Γ, Σ |- φ∧ψ∧..., Π, Δ - *+ * }}} */ - case class RightAnd(bot: Sequent, t: Seq[Int], cunjuncts: Seq[Formula]) extends SCProofStep { val premises = t } + case class RightAnd(bot: Sequent, t: Seq[Int], cunjuncts: Seq[Expression]) extends SCProofStep { val premises = t } /** - *
+ * {{{ * Γ |- φ, Δ Γ |- φ, ψ, Δ * -------------- or --------------- * Γ |- φ∨ψ, Δ Γ |- φ∨ψ, Δ - *+ * }}} */ - case class RightOr(bot: Sequent, t1: Int, phi: Formula, psi: Formula) extends SCProofStep { val premises = Seq(t1) } + case class RightOr(bot: Sequent, t1: Int, phi: Expression, psi: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * Γ, φ |- ψ, Δ * -------------- * Γ |- φ⇒ψ, Δ - *+ * }}} */ - case class RightImplies(bot: Sequent, t1: Int, phi: Formula, psi: Formula) extends SCProofStep { val premises = Seq(t1) } + case class RightImplies(bot: Sequent, t1: Int, phi: Expression, psi: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * Γ |- a⇒ψ, Δ Σ |- ψ⇒φ, Π * ---------------------------- * Γ, Σ |- φ⇔ψ, Π, Δ - *+ * }}} */ - case class RightIff(bot: Sequent, t1: Int, t2: Int, phi: Formula, psi: Formula) extends SCProofStep { val premises = Seq(t1, t2) } + case class RightIff(bot: Sequent, t1: Int, t2: Int, phi: Expression, psi: Expression) extends SCProofStep { val premises = Seq(t1, t2) } /** - *
+ * {{{ * Γ, φ |- Δ * -------------- * Γ |- ¬φ, Δ - *+ * }}} */ - case class RightNot(bot: Sequent, t1: Int, phi: Formula) extends SCProofStep { val premises = Seq(t1) } + case class RightNot(bot: Sequent, t1: Int, phi: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * Γ |- φ, Δ * ------------------- if x is not free in the resulting sequent * Γ |- ∀x. φ, Δ - *+ * }}} */ - case class RightForall(bot: Sequent, t1: Int, phi: Formula, x: VariableLabel) extends SCProofStep { val premises = Seq(t1) } + case class RightForall(bot: Sequent, t1: Int, phi: Expression, x: Variable) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * Γ |- φ[t/x], Δ * ------------------- * Γ |- ∃x. φ, Δ * * (ln-x stands for locally nameless x) - *+ * }}} */ - case class RightExists(bot: Sequent, t1: Int, phi: Formula, x: VariableLabel, t: Term) extends SCProofStep { val premises = Seq(t1) } + case class RightExists(bot: Sequent, t1: Int, phi: Expression, x: Variable, t: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
- * Γ |- ∃y.∀x. (x=y) ⇔ φ, Δ - * ---------------------------- if y is not free in φ - * Γ|- ∃!x. φ, Δ - *+ * {{{ + * Γ |- φ[t/x], Δ + * -------------------------- if y is not free in φ + * Γ|- φ[(εx. φ)/x], Δ + * }}} */ - case class RightExistsOne(bot: Sequent, t1: Int, phi: Formula, x: VariableLabel) extends SCProofStep { val premises = Seq(t1) } + case class RightEpsilon(bot: Sequent, t1: Int, phi: Expression, x: Variable, t: Expression) extends SCProofStep { val premises = Seq(t1) } // Structural rule /** - *
+ * {{{ * Γ |- Δ * -------------- * Γ, Σ |- Δ, Π - *+ * }}} */ case class Weakening(bot: Sequent, t1: Int) extends SCProofStep { val premises = Seq(t1) } + + /** + * {{{ + * Γ |- φ[(λy. e)t/x], Δ + * --------------------------- + * Γ |- φ[e[t/y]/x], Δ + * }}} + */ + @deprecated + case class Beta(bot: Sequent, t1: Int) extends SCProofStep { val premises = Seq(t1) } + + + // Equality Rules /** - *
+ * {{{ * Γ, s=s |- Δ * -------------- * Γ |- Δ - *+ * }}} */ - case class LeftRefl(bot: Sequent, t1: Int, fa: Formula) extends SCProofStep { val premises = Seq(t1) } + case class LeftRefl(bot: Sequent, t1: Int, fa: Expression) extends SCProofStep { val premises = Seq(t1) } /** - *
+ * {{{ * * -------------- * |- s=s - *+ * }}} */ - case class RightRefl(bot: Sequent, fa: Formula) extends SCProofStep { val premises = Seq() } + case class RightRefl(bot: Sequent, fa: Expression) extends SCProofStep { val premises = Seq() } /** - *
- * Γ, φ(s1,...,sn) |- Δ - * --------------------- - * Γ, s1=t1, ..., sn=tn, φ(t1,...tn) |- Δ - *+ * {{{ + * Γ, φ(s) |- Δ + * ----------------------------------------------------- + * Γ, ∀x,...,z. (s x ... z)=(t x ... z), φ(t) |- Δ + * }}} + * equals elements must have type ... -> ... -> Ind */ - case class LeftSubstEq(bot: Sequent, t1: Int, equals: List[(LambdaTermTerm, LambdaTermTerm)], lambdaPhi: (Seq[SchematicTermLabel], Formula)) extends SCProofStep { val premises = Seq(t1) } + case class LeftSubstEq(bot: Sequent, t1: Int, equals: Seq[(Expression, Expression)], lambdaPhi: (Seq[Variable], Expression)) extends SCProofStep { val premises = Seq(t1) } /** - *
- * Γ |- φ(s1,...,sn), Δ - * --------------------- - * Γ, s1=t1, ..., sn=tn |- φ(t1,...tn), Δ - *+ * {{{ + * Γ |- φ(s), Δ + * ------------------------------------------------------ + * Γ, ∀x,...,z. (s x ... z)=(t x ... z) |- φ(t), Δ + * }}} */ - case class RightSubstEq(bot: Sequent, t1: Int, equals: List[(LambdaTermTerm, LambdaTermTerm)], lambdaPhi: (Seq[SchematicTermLabel], Formula)) extends SCProofStep { val premises = Seq(t1) } + case class RightSubstEq(bot: Sequent, t1: Int, equals: Seq[(Expression, Expression)], lambdaPhi: (Seq[Variable], Expression)) extends SCProofStep { val premises = Seq(t1) } - /** - *
- * Γ, φ(a1,...an) |- Δ - * --------------------- - * Γ, a1⇔b1, ..., an⇔bn, φ(b1,...bn) |- Δ - *- */ - case class LeftSubstIff(bot: Sequent, t1: Int, equals: List[(LambdaTermFormula, LambdaTermFormula)], lambdaPhi: (Seq[SchematicAtomicLabel], Formula)) extends SCProofStep { val premises = Seq(t1) } + object LeftSubstIff { + def apply(bot: Sequent, t1: Int, equals: Seq[(Expression, Expression)], lambdaPhi: (Seq[Variable], Expression)): LeftSubstEq = { + new LeftSubstEq(bot, t1, equals, lambdaPhi) + } + } - /** - *
- * Γ |- φ(a1,...an), Δ - * --------------------- - * Γ, a1⇔b1, ..., an⇔bn |- φ(b1,...bn), Δ - *- */ - - case class RightSubstIff(bot: Sequent, t1: Int, equals: List[(LambdaTermFormula, LambdaTermFormula)], lambdaPhi: (Seq[SchematicAtomicLabel], Formula)) extends SCProofStep { val premises = Seq(t1) } + object RightSubstIff { + def apply(bot: Sequent, t1: Int, equals: Seq[(Expression, Expression)], lambdaPhi: (Seq[Variable], Expression)): RightSubstEq = { + new RightSubstEq(bot, t1, equals, lambdaPhi) + } + } // Rule for schemas case class InstSchema( bot: Sequent, t1: Int, - mCon: Map[SchematicConnectorLabel, LambdaFormulaFormula], - mPred: Map[SchematicAtomicLabel, LambdaTermFormula], - mTerm: Map[SchematicTermLabel, LambdaTermTerm] + subst: Map[Variable, Expression] ) extends SCProofStep { val premises = Seq(t1) } // Proof Organisation rules @@ -337,12 +348,12 @@ object SequentCalculus { } /** - *
+ * {{{ * * -------------- * Γ |- Δ - *+ * }}} */ case class Sorry(bot: Sequent) extends SCProofStep { val premises = Seq() } -} +} \ No newline at end of file diff --git a/lisa-sets/src/main/scala/lisa/Main.scala b/lisa-sets/src/main/scala/lisa/Main.scala index c96ac042b..28c59a912 100644 --- a/lisa-sets/src/main/scala/lisa/Main.scala +++ b/lisa-sets/src/main/scala/lisa/Main.scala @@ -1,17 +1,17 @@ package lisa import lisa.SetTheoryLibrary -import lisa.prooflib.BasicMain +import lisa.utils.prooflib.BasicMain /** * The parent trait of all theory files containing mathematical development */ trait Main extends BasicMain { - export lisa.fol.FOL.{*, given} + export lisa.utils.fol.FOL.{*, given} export SetTheoryLibrary.{given, _} - export lisa.prooflib.BasicStepTactic.* - export lisa.prooflib.SimpleDeducedSteps.* + export lisa.utils.prooflib.BasicStepTactic.* + export lisa.utils.prooflib.SimpleDeducedSteps.* export lisa.automation.Tautology export lisa.automation.Substitution @@ -25,7 +25,7 @@ trait Main extends BasicMain { knownDefs.update(powerSet, Some(powerAxiom)) knownDefs.update(subset, Some(subsetAxiom)) - extension (symbol: ConstantLabel[?]) { + extension (symbol: Constant[?]) { def definition: JUSTIFICATION = { getDefinition(symbol).get } diff --git a/lisa-sets/src/main/scala/lisa/SetTheoryLibrary.scala b/lisa-sets/src/main/scala/lisa/SetTheoryLibrary.scala index addae4c89..2685c4eb3 100644 --- a/lisa-sets/src/main/scala/lisa/SetTheoryLibrary.scala +++ b/lisa-sets/src/main/scala/lisa/SetTheoryLibrary.scala @@ -1,13 +1,13 @@ package lisa -import lisa.fol.FOL.{_, given} +import lisa.utils.fol.FOL.{_, given} import lisa.kernel.proof.RunningTheory -import lisa.prooflib.Library +import lisa.utils.prooflib.Library /** * Specific implementation of [[utilities.Library]] for Set Theory, with a RunningTheory that is supposed to be used by the standard library. */ -object SetTheoryLibrary extends lisa.prooflib.Library { +object SetTheoryLibrary extends lisa.utils.prooflib.Library { val theory = new RunningTheory() @@ -15,17 +15,17 @@ object SetTheoryLibrary extends lisa.prooflib.Library { /** * The symbol for the set membership predicate. */ - final val in = ConstantPredicateLabel("elem", 2) + final val in = constant[Term >>: Term >>: Formula]("elem") /** * The symbol for the subset predicate. */ - final val subset = ConstantPredicateLabel("subsetOf", 2) + final val subset = constant[Term >>: Term >>: Formula]("subsetOf") /** * The symbol for the equicardinality predicate. Needed for Tarski's axiom. */ - final val sim = ConstantPredicateLabel("sameCardinality", 2) // Equicardinality + final val sim = constant[Term >>: Term >>: Formula]("sameCardinality") // Equicardinality /** * Set Theory basic predicates */ @@ -36,33 +36,34 @@ object SetTheoryLibrary extends lisa.prooflib.Library { /** * The symbol for the empty set constant. */ - final val emptySet = Constant("emptySet") + final val emptySet = constant[Term ]("emptySet") /** * The symbol for the unordered pair function. */ - final val unorderedPair = ConstantFunctionLabel("unorderedPair", 2) + final val unorderedPair = constant[Term >>: Term >>: Term]("unorderedPair") /** * The symbol for the powerset function. */ - final val powerSet = ConstantFunctionLabel("powerSet", 1) + final val powerSet = constant[Term >>: Term]("powerSet") /** * The symbol for the set union function. */ - final val union = ConstantFunctionLabel("union", 1) + final val union = constant[Term >>: Term]("union") /** * The symbol for the universe function. Defined in TG set theory. */ - final val universe = ConstantFunctionLabel("universe", 1) + final val universe = constant[Term >>: Term]("universe") /** * Set Theory basic functions. */ final val functions = Set(unorderedPair, powerSet, union, universe) + /** * The kernel theory loaded with Set Theory symbols and axioms. */ @@ -73,13 +74,13 @@ object SetTheoryLibrary extends lisa.prooflib.Library { functions.foreach(s => addSymbol(s)) addSymbol(emptySet) - private val x = variable - private val y = variable - private val z = variable - final val φ = predicate[1] - private val A = variable - private val B = variable - private val P = predicate[2] + private val x = variable[Term] + private val y = variable[Term] + private val z = variable[Term] + final val φ = variable[Term >>: Formula] + private val A = variable[Term] + private val B = variable[Term] + private val P = variable[Term >>: Term >>: Formula] //////////// // Axioms // @@ -94,7 +95,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * * `() |- (x = y) ⇔ ∀ z. z ∈ x ⇔ z ∈ y` */ - final val extensionalityAxiom: this.AXIOM = Axiom(forall(z, in(z, x) <=> in(z, y)) <=> (x === y)) + final val extensionalityAxiom: this.AXIOM = Axiom(forall(z, (z ∈ x) <=> (z ∈ y)) <=> (x === y)) /** * Pairing Axiom --- For any sets `x` and `y`, there is a set that contains @@ -106,7 +107,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * This axiom defines [[unorderedPair]] as the function symbol representing * this set. */ - final val pairAxiom: AXIOM = Axiom(in(z, unorderedPair(x, y)) <=> (x === z) \/ (y === z)) + final val pairAxiom: AXIOM = Axiom(z ∈ unorderedPair(x, y) <=> (x === z) \/ (y === z)) /** * Comprehension/Separation Schema --- For a formula `ϕ(_, _)` and a set `z`, @@ -119,7 +120,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * This schema represents an infinite collection of axioms, one for each * formula `ϕ(x, z)`. */ - final val comprehensionSchema: AXIOM = Axiom(exists(y, forall(x, in(x, y) <=> (in(x, z) /\ φ(x))))) + final val comprehensionSchema: AXIOM = Axiom(exists(y, forall(x, (x ∈ y) <=> ((x ∈ z) /\ φ(x))))) /** * Empty Set Axiom --- From the Comprehension Schema follows the existence of @@ -131,7 +132,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * * `() |- !(x ∈ ∅)` */ - final val emptySetAxiom: AXIOM = Axiom(!in(x, emptySet)) + final val emptySetAxiom: AXIOM = Axiom(!(x ∈ emptySet)) /** * Union Axiom --- For any set `x`, there exists a set `union(x)` which is the @@ -144,7 +145,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * * This axiom defines [[union]] as the function symbol representing this set. */ - final val unionAxiom: AXIOM = Axiom(in(z, union(x)) <=> exists(y, in(y, x) /\ in(z, y))) + final val unionAxiom: AXIOM = Axiom(z ∈ union(x) <=> exists(y, (y ∈ x) /\ (z ∈ y))) /** * Subset Axiom --- For sets `x` and `y`, `x` is a subset of `y` iff every @@ -154,7 +155,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * * This axiom defines the [[subset]] symbol as this predicate. */ - final val subsetAxiom: AXIOM = Axiom(subset(x, y) <=> forall(z, in(z, x) ==> in(z, y))) + final val subsetAxiom: AXIOM = Axiom((x ⊆ y) <=> forall(z, (z ∈ x) ==> (z ∈ y))) /** * Power Set Axiom --- For a set `x`, there exists a power set of `x`, denoted @@ -165,7 +166,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * This axiom defines [[powerSet]] as the function symbol representing this * set. */ - final val powerAxiom: AXIOM = Axiom(in(x, powerSet(y)) <=> subset(x, y)) + final val powerAxiom: AXIOM = Axiom(x ∈ powerSet(y) <=> x ⊆ y) /** * Infinity Axiom --- There exists an infinite set. @@ -180,7 +181,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * * `() |- ∃ x. inductive(x)` */ - final val infinityAxiom: AXIOM = Axiom(exists(x, in(emptySet, x) /\ forall(y, in(y, x) ==> in(union(unorderedPair(y, unorderedPair(y, y))), x)))) + final val infinityAxiom: AXIOM = Axiom(exists(x, emptySet ∈ x /\ forall(y, (y ∈ x) ==> union(unorderedPair(y, unorderedPair(y, y))) ∈ x ))) /** * Foundation/Regularity Axiom --- Every non-empty set `x` has an `∈`-minimal @@ -189,7 +190,7 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * * `() |- (x != ∅) ==> ∃ y ∈ x. ∀ z. z ∈ x ⇒ ! z ∈ y` */ - final val foundationAxiom: AXIOM = Axiom(!(x === emptySet) ==> exists(y, in(y, x) /\ forall(z, in(z, x) ==> !in(z, y)))) + final val foundationAxiom: AXIOM = Axiom(!(x === emptySet) ==> exists(y, (y ∈ x) /\ forall(z, (z ∈ x) ==> !(z ∈ y)))) // ZF ///////// @@ -201,18 +202,18 @@ object SetTheoryLibrary extends lisa.prooflib.Library { * satisfy `P` for each `a ∈ x`. */ final val replacementSchema: AXIOM = Axiom( - forall(x, in(x, A) ==> ∀(y, ∀(z, (P(x, y) /\ P(x, z)) ==> (y === z)))) ==> - exists(B, forall(y, in(y, B) <=> exists(x, in(x, A) /\ P(x, y)))) + forall(x, (x ∈ A) ==> ∀(y, ∀(z, (P(x)(y) /\ P(x)(z)) ==> (y === z)))) ==> + exists(B, forall(y, (y ∈ B) <=> exists(x, (x ∈ A) /\ P(x)(y)))) ) final val tarskiAxiom: AXIOM = Axiom( forall( x, - in(x, universe(x)) /\ + (x ∈ universe(x)) /\ forall( y, - in(y, universe(x)) ==> (in(powerSet(y), universe(x)) /\ subset(powerSet(y), universe(x))) /\ - forall(z, subset(z, universe(x)) ==> (sim(y, universe(x)) /\ in(y, universe(x)))) + (y ∈ universe(x)) ==> ((powerSet(y) ∈ universe(x)) /\ (powerSet(y) ⊆ universe(x))) /\ + forall(z, (z ⊆ universe(x)) ==> (sim(y)(universe(x)) /\ (y ∈ universe(x)))) ) ) ) @@ -245,12 +246,12 @@ object SetTheoryLibrary extends lisa.prooflib.Library { val ∅ = emptySet val ∈ = in - extension (thi: Term) { - def ∈(that: Term): Formula = in(thi, that) - def ⊆(that: Term): Formula = subset(thi, that) + extension (l: Term) + def ∈(r: Term): Formula = in(l)(r) + def ⊆(r: Term): Formula = subset(l)(r) + def =/=(r: Term): Formula = !(l === r) - def =/=(that: Term): Formula = !(thi === that) - } + def unorderedPair(x: Term, y: Term): Term = App(App(unorderedPair, x), y) } diff --git a/lisa-sets/src/main/scala/lisa/automation/Apply.scala b/lisa-sets/src/main/scala/lisa/automation/Apply.scala index eb3896284..0f11127a9 100644 --- a/lisa-sets/src/main/scala/lisa/automation/Apply.scala +++ b/lisa-sets/src/main/scala/lisa/automation/Apply.scala @@ -1,10 +1,10 @@ package lisa.automation -import lisa.fol.FOL.* -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.ProofTacticLib.* -import lisa.prooflib.SimpleDeducedSteps.* -import lisa.prooflib.* +import lisa.utils.fol.FOL.* +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.prooflib.SimpleDeducedSteps.* +import lisa.utils.prooflib.* import lisa.utils.unification.UnificationUtils.* import scala.util.boundary diff --git a/lisa-sets/src/main/scala/lisa/automation/CommonTactics.scala b/lisa-sets/src/main/scala/lisa/automation/CommonTactics.scala index 2d2b8219d..ac035a24f 100644 --- a/lisa-sets/src/main/scala/lisa/automation/CommonTactics.scala +++ b/lisa-sets/src/main/scala/lisa/automation/CommonTactics.scala @@ -1,15 +1,15 @@ package lisa.automation.kernel import lisa.automation.Tautology -import lisa.fol.FOLHelpers.* -import lisa.fol.FOL as F -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.ProofTacticLib.{_, given} -import lisa.prooflib.SimpleDeducedSteps.* -import lisa.prooflib.* +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.ProofTacticLib.{_, given} +import lisa.utils.prooflib.SimpleDeducedSteps.* +import lisa.utils.prooflib.* import lisa.utils.K object CommonTactics { + /* /** *
@@ -185,7 +185,7 @@ object CommonTactics { TacticSubproof { lib.have(F.∀(y, (y === fxs) <=> P)) by Tautology.from(uniqueness, definition.of(subst*)) lib.thenHave((y === fxs) <=> P) by InstantiateForall(y) - lib.thenHave((fxs === fxs) <=> P.substitute(y := fxs)) by InstFunSchema(Map(y -> fxs)) + lib.thenHave((fxs === fxs) <=> P.substitute(y := fxs)) by InstSchema(Map(y -> fxs)) lib.thenHave(P.substitute(y := fxs)) by Restate } @@ -248,7 +248,7 @@ object CommonTactics { TacticSubproof { lib.have(F.∀(y, (y === fxs) <=> P)) by Tautology.from(uniqueness, definition.of(subst*)) lib.thenHave((y === fxs) <=> P) by InstantiateForall(y) - lib.thenHave((fxs === fxs) <=> P.substitute(y := fxs)) by InstFunSchema(Map(y -> fxs)) + lib.thenHave((fxs === fxs) <=> P.substitute(y := fxs)) by InstSchema(Map(y -> fxs)) lib.thenHave(P.substitute(y := fxs)) by Restate lib.thenHave(phi ==> Q(fxs)) by Tautology lib.thenHave(phi |- Q(fxs)) by Restate @@ -258,5 +258,5 @@ object CommonTactics { } } } - +*/ } diff --git a/lisa-sets/src/main/scala/lisa/automation/Congruence.scala b/lisa-sets/src/main/scala/lisa/automation/Congruence.scala index 8c60de410..1b4a39bf7 100644 --- a/lisa-sets/src/main/scala/lisa/automation/Congruence.scala +++ b/lisa-sets/src/main/scala/lisa/automation/Congruence.scala @@ -1,11 +1,11 @@ package lisa.automation -import lisa.fol.FOL.{*, given} -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.ProofTacticLib.* -import lisa.prooflib.SimpleDeducedSteps.* -import lisa.prooflib.* -import lisa.utils.parsing.UnreachableException -import leo.datastructures.TPTP.CNF.AtomicFormula +import lisa.utils.fol.FOL.{*, given} +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.prooflib.SimpleDeducedSteps.* +import lisa.utils.prooflib.* +import lisa.utils.K +import leo.datastructures.TPTP.AnnotatedFormula.FormulaType /** * This tactic tries to prove a sequent by congruence. @@ -27,79 +27,79 @@ import leo.datastructures.TPTP.CNF.AtomicFormula * */ object Congruence extends ProofTactic with ProofSequentTactic { - def apply(using lib: Library, proof: lib.Proof)(bot: Sequent): proof.ProofTacticJudgement = TacticSubproof { - import lib.* + def apply(using lib: Library, proof: lib.Proof)(bot: Sequent): proof.ProofTacticJudgement = TacticSubproof { + import lib.* - val egraph = new EGraphTerms() - egraph.addAll(bot.left) - egraph.addAll(bot.right) + val egraph = new EGraphExpr() + egraph.addAll(bot.left) + egraph.addAll(bot.right) - bot.left.foreach{ - case (left === right) => egraph.merge(left, right) - case (left <=> right) => egraph.merge(left, right) - case _ => () + bot.left.foreach{ + case (left === right) => egraph.merge(left, right) + case (left <=> right) => egraph.merge(left, right) + case _ => () + } + + if isSameSequent(bot, ⊤) then + have(bot) by Restate + else if bot.left.exists { lf => + bot.right.exists { rf => + if egraph.idEq(lf, rf) then + val base = have(bot.left |- (bot.right + lf) ) by Restate + val eq = have(egraph.proveExpr(lf, rf, bot)) + val a = variable[Formula] + have((bot.left + (lf <=> rf)) |- (bot.right) ) by RightSubstIff.withParameters(Seq((lf, rf)), (Seq(a), a))(base) + have(bot) by Cut(eq, lastStep) + true + else false + } || + bot.left.exists{ + case rf2 @ neg(rf) if egraph.idEq(lf, rf)=> + val base = have((bot.left + !lf) |- bot.right ) by Restate + val eq = have(egraph.proveExpr(lf, rf, bot)) + val a = variable[Formula] + have((bot.left + (lf <=> rf)) |- (bot.right) ) by LeftSubstIff.withParameters(Seq((lf, rf)), (Seq(a), !a))(base) + have(bot) by Cut(eq, lastStep) + true + case _ => false + } || { + lf match + case !(a === b) if egraph.idEq(a, b) => + have(egraph.proveExpr(a, b, bot)) + true + case !(a <=> b) if egraph.idEq(a, b) => + have(egraph.proveExpr(a, b, bot)) + true + case _ => false } - if isSameSequent(bot, ⊤) then - have(bot) by Restate - else if bot.left.exists { lf => - bot.right.exists { rf => - if egraph.idEq(lf, rf) then - val base = have(bot.left |- (bot.right + lf) ) by Restate - val eq = have(egraph.proveFormula(lf, rf, bot)) - val a = formulaVariable - have((bot.left + (lf <=> rf)) |- (bot.right) ) by RightSubstIff.withParametersSimple(List((lf, rf)), lambda(a, a))(base) - have(bot) by Cut(eq, lastStep) - true - else false - } || - bot.left.exists{ - case rf2 @ Neg(rf) if egraph.idEq(lf, rf)=> - val base = have((bot.left + !lf) |- bot.right ) by Restate - val eq = have(egraph.proveFormula(lf, rf, bot)) - val a = formulaVariable - have((bot.left + (lf <=> rf)) |- (bot.right) ) by LeftSubstIff.withParametersSimple(List((lf, rf)), lambda(a, !a))(base) - have(bot) by Cut(eq, lastStep) - true - case _ => false - } || { - lf match - case !(a === b) if egraph.idEq(a, b) => - have(egraph.proveTerm(a, b, bot)) - true - case !(a <=> b) if egraph.idEq(a, b) => - have(egraph.proveFormula(a, b, bot)) - true - case _ => false - } + } then () + else if bot.right.exists { rf => + bot.right.exists{ + case lf2 @ neg(lf) if egraph.idEq(lf, rf)=> + val base = have((bot.left) |- (bot.right + !rf) ) by Restate + val eq = have(egraph.proveExpr(lf, rf, bot)) + val a = variable[Formula] + have((bot.left + (lf <=> rf)) |- (bot.right) ) by RightSubstIff.withParameters(Seq((lf, rf)), (Seq(a), !a))(base) + have(bot) by Cut(eq, lastStep) + true + case _ => false + } || { + rf match + case (a === b) if egraph.idEq(a, b) => + have(egraph.proveExpr(a, b, bot)) + true + case (a <=> b) if egraph.idEq(a, b) => + have(egraph.proveExpr(a, b, bot)) + true + case _ => false + } + } then () + else + return proof.InvalidProofTactic(s"No congruence found to show sequent\n $bot") + } - } then () - else if bot.right.exists { rf => - bot.right.exists{ - case lf2 @ Neg(lf) if egraph.idEq(lf, rf)=> - val base = have((bot.left) |- (bot.right + !rf) ) by Restate - val eq = have(egraph.proveFormula(lf, rf, bot)) - val a = formulaVariable - have((bot.left + (lf <=> rf)) |- (bot.right) ) by RightSubstIff.withParametersSimple(List((lf, rf)), lambda(a, !a))(base) - have(bot) by Cut(eq, lastStep) - true - case _ => false - } || { - rf match - case (a === b) if egraph.idEq(a, b) => - have(egraph.proveTerm(a, b, bot)) - true - case (a <=> b) if egraph.idEq(a, b) => - have(egraph.proveFormula(a, b, bot)) - true - case _ => false - } - } then () - else - return proof.InvalidProofTactic(s"No congruence found to show sequent\n $bot") - } - } @@ -114,8 +114,8 @@ class UnionFind[T] { var unionCounter = 0 /** - * add a new element to the union-find. - */ + * add a new element to the union-find. + */ def add(x: T): Unit = { parent(x) = x realParent(x) = (x, ((x, x), true, 0)) @@ -123,10 +123,10 @@ class UnionFind[T] { } /** - * - * @param x the element whose parent we want to find - * @return the root of x - */ + * + * @param x the element whose parent we want to find + * @return the root of x + */ def find(x: T): T = { if parent(x) == x then x @@ -142,8 +142,8 @@ class UnionFind[T] { } /** - * Merges the classes of x and y - */ + * Merges the classes of x and y + */ def union(x: T, y: T): Unit = { unionCounter += 1 val xRoot = find(x) @@ -185,9 +185,9 @@ class UnionFind[T] { } /** - * Returns a path from x to y made of pairs of elements (u, v) - * such that union(u, v) was called. - */ + * Returns a path from x to y made of pairs of elements (u, v) + * such that union(u, v) was called. + */ def explain(x:T, y:T): Option[List[(T, T)]]= { if (x == y) then return Some(List()) @@ -220,291 +220,198 @@ class UnionFind[T] { /** - * Returns the set of all roots of all classes - */ + * Returns the set of all roots of all classes + */ def getClasses: Set[T] = parent.keys.map(find).toSet /** - * Add all elements in the collection to the union-find - */ + * Add all elements in the collection to the union-find + */ def addAll(xs: Iterable[T]): Unit = xs.foreach(add) } -/////////////////////////////// -///////// E-graph ///////////// -/////////////////////////////// + /////////////////////////////// + ///////// E-graph ///////////// + /////////////////////////////// import scala.collection.mutable -class EGraphTerms() { +class EGraphExpr() { - val termParentsT = mutable.Map[Term, mutable.Set[AppliedFunctional]]() - val termParentsF = mutable.Map[Term, mutable.Set[AppliedPredicate]]() - val termUF = new UnionFind[Term]() + val parents = mutable.Map[Expr[?], mutable.Set[Expr[?]]]() + val UF = new UnionFind[Expr[?]]() - val formulaParents = mutable.Map[Formula, mutable.Set[AppliedConnector]]() - val formulaUF = new UnionFind[Formula]() - def find(id: Term): Term = termUF.find(id) - def find(id: Formula): Formula = formulaUF.find(id) + def find[T](id: Expr[T]): Expr[T] = UF.find(id).asInstanceOf[Expr[T]] - trait TermStep - case class TermExternal(between: (Term, Term)) extends TermStep - case class TermCongruence(between: (Term, Term)) extends TermStep + trait Step + case class ExternalStep(between: (Expr[?], Expr[?])) extends Step + case class CongruenceStep(between: (Expr[?], Expr[?])) extends Step - trait FormulaStep - case class FormulaExternal(between: (Formula, Formula)) extends FormulaStep - case class FormulaCongruence(between: (Formula, Formula)) extends FormulaStep - val termProofMap = mutable.Map[(Term, Term), TermStep]() - val formulaProofMap = mutable.Map[(Formula, Formula), FormulaStep]() + val proofMap = mutable.Map[(Expr[?], Expr[?]), Step]() - def explain(id1: Term, id2: Term): Option[List[TermStep]] = { - val steps = termUF.explain(id1, id2) - steps.map(_.foldLeft((id1, List[TermStep]())) { + def explain(id1: Expr[?], id2: Expr[?]): Option[List[Step]] = { + val steps = UF.explain(id1, id2) + steps.map(_.foldLeft((id1, List[Step]())) { case ((prev, acc), step) => - termProofMap(step) match - case s @ TermExternal((l, r)) => + proofMap(step) match + case s @ ExternalStep((l, r)) => if l == prev then (r, s :: acc) else if r == prev then - (l, TermExternal(r, l) :: acc) + (l, ExternalStep(r, l) :: acc) else throw new Exception("Invalid proof recovered: It is not a chain") - case s @ TermCongruence((l, r)) => + case s @ CongruenceStep((l, r)) => if l == prev then (r, s :: acc) else if r == prev then - (l, TermCongruence(r, l) :: acc) + (l, CongruenceStep(r, l) :: acc) else throw new Exception("Invalid proof recovered: It is not a chain") }._2.reverse) } - def explain(id1: Formula, id2: Formula): Option[List[FormulaStep]] = { - val steps = formulaUF.explain(id1, id2) - steps.map(_.foldLeft((id1, List[FormulaStep]())) { - case ((prev, acc), step) => - formulaProofMap(step) match - case s @ FormulaExternal((l, r)) => - if l == prev then - (r, s :: acc) - else if r == prev then - (l, FormulaExternal(r, l) :: acc) - else throw new Exception("Invalid proof recovered: It is not a chain") - case s @ FormulaCongruence((l, r)) => - if l == prev then - (r, s :: acc) - else if r == prev then - (l, FormulaCongruence(r, l) :: acc) - else throw new Exception("Invalid proof recovered: It is not a chain") - }._2.reverse) - } - - - def makeSingletonEClass(node:Term): Term = { - termUF.add(node) - termParentsT(node) = mutable.Set() - termParentsF(node) = mutable.Set() - node - } - def makeSingletonEClass(node:Formula): Formula = { - formulaUF.add(node) - formulaParents(node) = mutable.Set() + def makeSingletonEClass(node:Expr[?]): Expr[?] = { + UF.add(node) + parents(node) = mutable.Set() node } - def idEq(id1: Term, id2: Term): Boolean = find(id1) == find(id2) - def idEq(id1: Formula, id2: Formula): Boolean = find(id1) == find(id2) + def idEq(id1: Expr[?], id2: Expr[?]): Boolean = find(id1) == find(id2) - - def canonicalize(node: Term): Term = node match - case AppliedFunctional(label, args) => - AppliedFunctional(label, args.map(t => find(t))) + def canonicalize(node: Expr[?]) : Expr[?] = node match + case App(f, a) => App.unsafe(canonicalize(f), find(a)) case _ => node - - - def canonicalize(node: Formula): Formula = { - node match - case AppliedPredicate(label, args) => AppliedPredicate(label, args.map(find)) - case AppliedConnector(label, args) => AppliedConnector(label, args.map(find)) - case node => node - } - def add(node: Term): Term = - if termUF.parent.contains(node) then return node - makeSingletonEClass(node) - codes(node) = codes.size - node match - case node @ AppliedFunctional(_, args) => - args.foreach(child => - add(child) - termParentsT(find(child)).add(node) - ) - case _ => () - termSigs(canSig(node)) = node - node - - def add(node: Formula): Formula = - if formulaUF.parent.contains(node) then return node - makeSingletonEClass(node) - node match - case node @ AppliedPredicate(_, args) => - args.foreach(child => - add(child) - termParentsF(find(child)).add(node) - ) - node - case node @ AppliedConnector(_, args) => - args.foreach(child => - add(child) - formulaParents(find(child)).add(node) - ) - node - case _ => node + + + def add(node: Expr[?]): Expr[?] = + if codes.contains(node) then node + else + codes(node) = codes.size + if node.sort == K.Term || node.sort == K.Formula then + makeSingletonEClass(node) + node match + case Multiapp(f, args) => + args.foreach(child => + add(child) + parents(find(child)).add(node) + ) + mapSigs(canSig(node)) = node + node def addAll(nodes: Iterable[Term|Formula]): Unit = nodes.foreach{ - case node: Term => add(node) - case node: Formula => add(node) + case node: Term if node.sort == K.Term => add(node) + case node: Formula if node.sort == K.Formula => add(node) } - - def merge(id1: Term, id2: Term): Unit = { - mergeWithStep(id1, id2, TermExternal((id1, id2))) - } - def merge(id1: Formula, id2: Formula): Unit = { - mergeWithStep(id1, id2, FormulaExternal((id1, id2))) + def merge[S](id1: Expr[S], id2: Expr[S]): Unit = { + mergeWithStep(id1, id2, ExternalStep((id1, id2))) } - type Sig = (TermLabel[?]|Term, List[Int]) - val termSigs = mutable.Map[Sig, Term]() - val codes = mutable.Map[Term, Int]() + def mergeUnsafe(id1: Expr[?], id2: Expr[?]): Unit = { + mergeWithStep(id1, id2, ExternalStep((id1, id2))) + } + + type Sig = (Expr[?], List[Int]) + val mapSigs = mutable.Map[Sig, Expr[?]]() + val codes = mutable.Map[Expr[?], Int]() - def canSig(node: Term): Sig = node match - case AppliedFunctional(label, args) => + def canSig(node: Expr[?]): Sig = node match + case Multiapp(label, args) => (label, args.map(a => codes(find(a))).toList) case _ => (node, List()) - protected def mergeWithStep(id1: Term, id2: Term, step: TermStep): Unit = { + protected def mergeWithStep(id1: Expr[?], id2: Expr[?], step: Step): Unit = { + if id1.sort != id2.sort then throw new IllegalArgumentException("Cannot merge nodes of different sorts") if find(id1) == find(id2) then () else - termProofMap((id1, id2)) = step - val parentsT1 = termParentsT(find(id1)) - val parentsF1 = termParentsF(find(id1)) - - val parentsT2 = termParentsT(find(id2)) - val parentsF2 = termParentsF(find(id2)) - val preSigs : Map[Term, Sig] = parentsT1.map(t => (t, canSig(t))).toMap - codes(find(id2)) = codes(find(id1)) //assume parents(find(id1)) >= parents(find(id2)) - termUF.union(id1, id2) - val newId = find(id1) - - val formulaSeen = mutable.Map[Formula, AppliedPredicate]() - var formWorklist = List[(Formula, Formula, FormulaStep)]() - var termWorklist = List[(Term, Term, TermStep)]() - - parentsT2.foreach { - case pTerm: AppliedFunctional => - val canonicalPTerm = canSig(pTerm) - if termSigs.contains(canonicalPTerm) then - val qTerm = termSigs(canonicalPTerm) - termWorklist = (pTerm, qTerm, TermCongruence((pTerm, qTerm))) :: termWorklist - else - termSigs(canonicalPTerm) = pTerm - } - (parentsF2 ++ parentsF1).foreach { - case pFormula: AppliedPredicate => - val canonicalPFormula = canonicalize(pFormula) - if formulaSeen.contains(canonicalPFormula) then - val qFormula = formulaSeen(canonicalPFormula) - formWorklist = (pFormula, qFormula, FormulaCongruence((pFormula, qFormula))) :: formWorklist - else - formulaSeen(canonicalPFormula) = pFormula - } - termParentsT(newId) = termParentsT(id1) - termParentsT(newId).addAll(termParentsT(id2)) - termParentsF(newId) = formulaSeen.values.to(mutable.Set) - formWorklist.foreach { case (l, r, step) => mergeWithStep(l, r, step) } - termWorklist.foreach { case (l, r, step) => mergeWithStep(l, r, step) } - } + proofMap((id1, id2)) = step + val parents1 = parents(find(id1)) + val parents2 = parents(find(id2)) - protected def mergeWithStep(id1: Formula, id2: Formula, step: FormulaStep): Unit = - if find(id1) == find(id2) then () - else - formulaProofMap((id1, id2)) = step - val newparents = formulaParents(find(id1)) ++ formulaParents(find(id2)) - formulaUF.union(id1, id2) - val newId = find(id1) - - val formulaSeen = mutable.Map[Formula, AppliedConnector]() - var formWorklist = List[(Formula, Formula, FormulaStep)]() - - newparents.foreach { - case pFormula: AppliedConnector => - val canonicalPFormula = canonicalize(pFormula) - if formulaSeen.contains(canonicalPFormula) then - val qFormula = formulaSeen(canonicalPFormula) - formWorklist = (pFormula, qFormula, FormulaCongruence((pFormula, qFormula))) :: formWorklist - //mergeWithStep(pFormula, qFormula, FormulaCongruence((pFormula, qFormula))) - else - formulaSeen(canonicalPFormula) = pFormula - } - formulaParents(newId) = formulaSeen.values.to(mutable.Set) - formWorklist.foreach { case (l, r, step) => mergeWithStep(l, r, step) } + if find(id1) == find(id2) then return () - def proveTerm(using lib: Library, proof: lib.Proof)(id1: Term, id2:Term, base: Sequent): proof.ProofTacticJudgement = + proofMap((id1, id2)) = step + val (small, big ) = if parents(find(id1)).size < parents(find(id2)).size then + (id1, id2) else (id2, id1) + codes(find(small)) = codes(find(big)) + UF.union(id1, id2) + val newId = find(id1) + var worklist = List[(Expr[?], Expr[?], Step)]() + + parents(small).foreach { pExpr => + val canonicalPExpr = canSig(pExpr) + if mapSigs.contains(canonicalPExpr) then + val qExpr = mapSigs(canonicalPExpr) + + worklist = (pExpr, qExpr, CongruenceStep((pExpr, qExpr))) :: worklist + else + mapSigs(canonicalPExpr) = pExpr + } + parents(newId) = parents(big) + parents(newId).addAll(parents(small)) + worklist.foreach { case (l, r, step) => mergeWithStep(l, r, step) } + } + + + def proveExpr[S](using lib: Library, proof: lib.Proof)(id1: Expr[S], id2:Expr[S], base: Sequent): proof.ProofTacticJudgement = TacticSubproof { proveInnerTerm(id1, id2, base) } - def proveInnerTerm(using lib: Library, proof: lib.Proof)(id1: Term, id2:Term, base: Sequent): Unit = { + + + def proveInnerTerm(using lib: Library, proof: lib.Proof)(id1: Expr[?], id2:Expr[?], base: Sequent): Unit = { import lib.* val steps = explain(id1, id2) steps match { case None => throw new Exception("No proof found in the egraph") case Some(steps) => - if steps.isEmpty then have(base.left |- (base.right + (id1 === id2))) by Restate + if steps.isEmpty then have(base.left |- (base.right + (makeEq(id1, id2)))) by Restate steps.foreach { - case TermExternal((l, r)) => - val goalSequent = base.left |- (base.right + (id1 === r)) + case ExternalStep((l, r)) => + val goalSequent = base.left |- (base.right + (makeEq(id1, r))) if l == id1 then have(goalSequent) by Restate else - val x = freshVariable(id1) - have(goalSequent) by RightSubstEq.withParametersSimple(List((l, r)), lambda(x, id1 === x))(lastStep) - case TermCongruence((l, r)) => + val x = variable[Term](freshId(Seq(id1))) + have(goalSequent) by RightSubstEq.withParameters(List((l, r)), (Seq(x), makeEq(id1, x)))(lastStep) + case CongruenceStep((l, r)) => val prev = if id1 != l then lastStep else null - val leqr = have(base.left |- (base.right + (l === r))) subproof { sp ?=> + val leqr = have(base.left |- (base.right + (makeEq(l, r)))) subproof { sp ?=> (l, r) match - case (AppliedFunctional(labell, argsl), AppliedFunctional(labelr, argsr)) if labell == labelr && argsl.size == argsr.size => - var freshn = freshId((l.freeVariables ++ r.freeVariables).map(_.id), "n").no + case (Multiapp(labell, argsl), Multiapp(labelr, argsr)) if labell == labelr && argsl.size == argsr.size => + var freshn = freshId((l.freeVars ++ r.freeVars).map(_.id), "n").no val ziped = (argsl zip argsr) - var zip = List[(Term, Term)]() - var children = List[Term]() - var vars = List[Variable]() + var zip = List[(Expr[?], Expr[?])]() + var children = List[Expr[?]]() + var vars = List[Variable[?]]() var steps = List[(Formula, sp.ProofStep)]() ziped.reverse.foreach { (al, ar) => if al == ar then children = al :: children else { - val x = Variable(Identifier("n", freshn)) + val x = variable(Identifier("n", freshn), al.sort) freshn = freshn + 1 children = x :: children vars = x :: vars - steps = (al === ar, have(proveTerm(al, ar, base))) :: steps + steps = (makeEq(al, ar), have(proveExpr(al, ar.asInstanceOf, base))) :: steps zip = (al, ar) :: zip } } - have(base.left |- (base.right + (l === l))) by Restate - val eqs = zip.map((l, r) => l === r) - val goal = have((base.left ++ eqs) |- (base.right + (l === r))).by.bot - have((base.left ++ eqs) |- (base.right + (l === r))) by RightSubstEq.withParametersSimple(zip, lambda(vars, l === labelr.applyUnsafe(children)))(lastStep) + have(base.left |- (base.right + makeEq(l, l))) by Restate + val eqs = zip.map((l, r) => makeEq(l, r)) + val goal = have((base.left ++ eqs) |- (base.right + makeEq(l, r))).by.bot + have((base.left ++ eqs) |- (base.right + makeEq(l, r))) by RightSubstEq.withParameters(zip, (vars, makeEq(l, Multiapp.unsafe(labelr, children))))(lastStep) steps.foreach { s => have( if s._2.bot.left.contains(s._1) then lastStep.bot else lastStep.bot -<< s._1 @@ -513,19 +420,21 @@ class EGraphTerms() { case _ => println(s"l: $l") println(s"r: $r") - throw UnreachableException + throw Exception("Unreachable") } if id1 != l then - val goalSequent = base.left |- (base.right + (id1 === r)) - val x = freshVariable(id1) - have(goalSequent +<< (l === r)) by RightSubstEq.withParametersSimple(List((l, r)), lambda(x, id1 === x))(prev) + val goalSequent = base.left |- (base.right + (makeEq(id1, r))) + val x = variable(freshId(Seq(id1)), id1.sort) + have(goalSequent +<< makeEq(l, r)) by RightSubstEq.withParameters(List((l, r)), (Seq(x), makeEq(id1, x)))(prev) have(goalSequent) by Cut(leqr, lastStep) } } } - def proveFormula(using lib: Library, proof: lib.Proof)(id1: Formula, id2:Formula, base: Sequent): proof.ProofTacticJudgement = + /* + + def proveExpr(using lib: Library, proof: lib.Proof)(id1: Formula, id2:Formula, base: Sequent): proof.ProofTacticJudgement = TacticSubproof { proveInnerFormula(id1, id2, base) } def proveInnerFormula(using lib: Library, proof: lib.Proof)(id1: Formula, id2:Formula, base: Sequent): Unit = { @@ -536,14 +445,14 @@ class EGraphTerms() { case Some(steps) => if steps.isEmpty then have(base.left |- (base.right + (id1 <=> id2))) by Restate steps.foreach { - case FormulaExternal((l, r)) => + case ExternalStep((l, r)) => val goalSequent = base.left |- (base.right + (id1 <=> r)) if l == id1 then have(goalSequent) by Restate else val x = freshVariableFormula(id1) - have(goalSequent) by RightSubstIff.withParametersSimple(List((l, r)), lambda(x, id1 <=> x))(lastStep) - case FormulaCongruence((l, r)) => + have(goalSequent) by RightSubstIff.withParameters(List((l, r)), lambda(x, id1 <=> x))(lastStep) + case CongruenceStep((l, r)) => val prev = if id1 != l then lastStep else null val leqr = have(base.left |- (base.right + (l <=> r))) subproof { sp ?=> (l, r) match @@ -561,14 +470,14 @@ class EGraphTerms() { freshn = freshn + 1 children = x :: children vars = x :: vars - steps = (al <=> ar, have(proveFormula(al, ar, base))) :: steps + steps = (al <=> ar, have(proveExpr(al, ar, base))) :: steps zip = (al, ar) :: zip } } have(base.left |- (base.right + (l <=> l))) by Restate val eqs = zip.map((l, r) => l <=> r) val goal = have((base.left ++ eqs) |- (base.right + (l <=> r))).by.bot - have((base.left ++ eqs) |- (base.right + (l <=> r))) by RightSubstIff.withParametersSimple(zip, lambda(vars, l <=> labelr.applyUnsafe(children)))(lastStep) + have((base.left ++ eqs) |- (base.right + (l <=> r))) by RightSubstIff.withParameters(zip, lambda(vars, l <=> labelr.applyUnsafe(children)))(lastStep) steps.foreach { s => have( if s._2.bot.left.contains(s._1) then lastStep.bot else lastStep.bot -<< s._1 @@ -596,7 +505,7 @@ class EGraphTerms() { have(base.left |- (base.right + (l <=> l))) by Restate val eqs = zip.map((l, r) => l === r) val goal = have((base.left ++ eqs) |- (base.right + (l <=> r))).by.bot - have((base.left ++ eqs) |- (base.right + (l <=> r))) by RightSubstEq.withParametersSimple(zip, lambda(vars, l <=> labelr.applyUnsafe(children)))(lastStep) + have((base.left ++ eqs) |- (base.right + (l <=> r))) by RightSubstEq.withParameters(zip, lambda(vars, l <=> labelr.applyUnsafe(children)))(lastStep) steps.foreach { s => have( if s._2.bot.left.contains(s._1) then lastStep.bot else lastStep.bot -<< s._1 @@ -611,12 +520,13 @@ class EGraphTerms() { if id1 != l then val goalSequent = base.left |- (base.right + (id1 <=> r)) val x = freshVariableFormula(id1) - have(goalSequent +<< (l <=> r)) by RightSubstIff.withParametersSimple(List((l, r)), lambda(x, id1 <=> x))(prev) + have(goalSequent +<< (l <=> r)) by RightSubstIff.withParameters(List((l, r)), lambda(x, id1 <=> x))(prev) have(goalSequent) by Cut(leqr, lastStep) } } } + */ } \ No newline at end of file diff --git a/lisa-sets/src/main/scala/lisa/automation/CongruenceSimp.scala b/lisa-sets/src/main/scala/lisa/automation/CongruenceSimp.scala new file mode 100644 index 000000000..4c6dadd3e --- /dev/null +++ b/lisa-sets/src/main/scala/lisa/automation/CongruenceSimp.scala @@ -0,0 +1,172 @@ +package lisa.automation +import lisa.utils.fol.FOL.{*, given} +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.prooflib.SimpleDeducedSteps.* +import lisa.utils.prooflib.* + + + + +/////////////////////////////// +///////// E-graph ///////////// +/////////////////////////////// + +import scala.collection.mutable + + + + + +class EGraphExprSimp() { + /* + + val termParents = mutable.Map[Term, mutable.Set[AppliedFunctional]]() + val termUF = new UnionFind[Term]() + val termProofMap = mutable.Map[(Term, Term), Boolean]() + + + + def find(id: Term): Term = termUF.find(id) + + def add(node: Term): Term = + termUF.add(node) + termParents(node) = mutable.Set() + node match + case node @ AppliedFunctional(_, args) => + + args.foreach(child => + add(child) + termParents(child).add(node) + ) + node + case _ => node + + + def merge(id1: Term, id2: Term): Unit = { + mergeWithStep(id1, id2, true) + } + + type Sig = (TermLabel[?]|Term, List[Int]) + val termSigs = mutable.Map[Sig, Term]() + val codes = mutable.Map[Term, Int]() + + protected def mergeWithStep(id1: Term, id2: Term, isExternal: Boolean): Unit = { + if find(id1) == find(id2) then return () + termProofMap((id1, id2)) = isExternal + val (small, big ) = if termParents(find(id1)).size < termParents(find(id2)).size then + (id1, id2) else (id2, id1) + codes(find(small)) = codes(find(big)) + termUF.union(id1, id2) + val newId = find(id1) + var worklist = List[(Term, Term, Boolean)]() + + termParents(small).foreach { pTerm => + val canonicalPTerm = canonicalize(pTerm) + if termSigs.contains(canonicalPTerm) then + val qTerm = termSigs(canonicalPTerm) + mergeWithStep(pTerm, qTerm, false) + else + termSigs(canonicalPTerm) = pTerm + } + termParents(newId) = termParents(big) + termParents(newId).addAll(termParents(small)) + } + + def canonicalize(node: Term): Sig = node match + case AppliedFunctional(label, args) => + (label, args.map(a => codes(find(a))).toList) + case _ => (node, List()) + + + + + // Explain + + + + + def explain(id1: Term, id2: Term): Option[List[(Term, Term, Boolean)]] = { + val steps = termUF.explain(id1, id2) + steps.map(_.map { a => (a._1, a._2, termProofMap(a)) + + }) + } + + + + + + + + + + + + // Proofs Lisa + + def proveTerm(using lib: Library, proof: lib.Proof)(id1: Term, id2:Term, base: Sequent): proof.ProofTacticJudgement = + TacticSubproof { proveInnerTerm(id1, id2, base) } + + def proveInnerTerm(using lib: Library, proof: lib.Proof)(id1: Term, id2:Term, base: Sequent): Unit = { + import lib.* + val steps = explain(id1, id2) + steps match { + case None => throw new Exception("No proof found in the egraph") + case Some(steps) => // External + have(base.left |- (base.right + (id1 === id2))) by Restate + var current = id1 + steps.foreach { + case (l, r, true) => + current = if current == l then r else l + val goalSequent = base.left |- (base.right + (id1 === r)) + val x = freshVariable(id1) + //thenHave(id1 === current) by Transitivity(l === r) + have(goalSequent) by RightSubstEq.withParametersSimple(List((l, r)), lambda(x, id1 === x))(lastStep) + case (l, r, false) => // Congruence + val prev = lastStep + val leqr = have(base.left |- (base.right + (l === r))) subproof { sp ?=> + (l, r) match + case (AppliedFunctional(labell, argsl), AppliedFunctional(labelr, argsr)) if labell == labelr && argsl.size == argsr.size => + var freshn = freshId((l.freeVariables ++ r.freeVariables).map(_.id), "n").no + val ziped = (argsl zip argsr) + var zip = List[(Term, Term)]() + var children = List[Term]() + var vars = List[Variable]() + var steps = List[(Formula, sp.ProofStep)]() + ziped.reverse.foreach { (al, ar) => + if al == ar then children = al :: children + else { + val x = Variable(Identifier("n", freshn)) + freshn = freshn + 1 + children = x :: children + vars = x :: vars + steps = (al === ar, have(proveTerm(al, ar, base))) :: steps + zip = (al, ar) :: zip + } + } + have(base.left |- (base.right + (l === l))) by Restate + val eqs = zip.map((l, r) => l === r) + val goal = have((base.left ++ eqs) |- (base.right + (l === r))).by.bot + have((base.left ++ eqs) |- (base.right + (l === r))) by RightSubstEq.withParametersSimple(zip, lambda(vars, l === labelr.applyUnsafe(children)))(lastStep) + steps.foreach { s => + have( + if s._2.bot.left.contains(s._1) then lastStep.bot else lastStep.bot -<< s._1 + ) by Cut(s._2, lastStep) + } + case _ => + println(s"l: $l") + println(s"r: $r") + throw UnreachableException + + } + val goalSequent = base.left |- (base.right + (id1 === r)) + val x = freshVariable(id1) + have(goalSequent +<< (l === r)) by RightSubstEq.withParametersSimple(List((l, r)), lambda(x, id1 === x))(prev) + have(goalSequent) by Cut(leqr, lastStep) + } + } + } +*/ + +} \ No newline at end of file diff --git a/lisa-sets/src/main/scala/lisa/automation/Substitution.scala b/lisa-sets/src/main/scala/lisa/automation/Substitution.scala index 87eab43cd..ece64ad2a 100644 --- a/lisa-sets/src/main/scala/lisa/automation/Substitution.scala +++ b/lisa-sets/src/main/scala/lisa/automation/Substitution.scala @@ -1,641 +1,467 @@ package lisa.automation -import lisa.fol.FOL as F + +import lisa.utils.fol.FOL as F import lisa.kernel.proof.RunningTheory import lisa.kernel.proof.SCProof import lisa.kernel.proof.SequentCalculus -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.ProofTacticLib.{_, given} -import lisa.prooflib.* -import lisa.utils.FOLPrinter +import lisa.utils.prooflib.BasicStepTactic +import lisa.utils.prooflib.SimpleDeducedSteps +import lisa.utils.prooflib.ProofTacticLib.{*, given} +import lisa.utils.prooflib.* import lisa.utils.K import lisa.utils.UserLisaException -import lisa.utils.parsing.FOLPrinter -import lisa.utils.unification.UnificationUtils -import lisa.utils.unification.UnificationUtils.getContextFormulaSet +import lisa.utils.unification.UnificationUtils.* +import lisa.utils.collection.Extensions.* import scala.annotation.nowarn import scala.collection.mutable.{Map as MMap} import F.{*, given} -import F.|- - -object Substitution { - def validRule(using lib: lisa.prooflib.Library, proof: lib.Proof)(r: (proof.Fact | F.Formula | lib.JUSTIFICATION)): Boolean = - r match { - case F.equality(_, _) => true - case F.Iff(_, _) => true - case _: Formula => false - case j: lib.JUSTIFICATION => j.statement.right.size == 1 && validRule(j.statement.right.head) - case f: proof.Fact @unchecked => proof.sequentOfFact(f).right.size == 1 && validRule(proof.sequentOfFact(f).right.head) - // case j: RunningTheory#Justification => - // proof.sequentOfFact(j.asInstanceOf[lib.theory.Justification]).right.size == 1 && validRule(proof.sequentOfFact(j.asInstanceOf[lib.theory.Justification]).right.head) - } - - object ApplyRules extends ProofTactic { - - def apply(using lib: lisa.prooflib.Library, proof: lib.Proof)(substitutions: (proof.Fact | F.Formula | lib.JUSTIFICATION)*)( - premise: proof.Fact - )(bot: F.Sequent): proof.ProofTacticJudgement = { - // figure out instantiations for rules - // takes a premise - val premiseSequent: F.Sequent = proof.getSequent(premise) - - // make sure substitutions are all valid - val violatingSubstitutions = substitutions.collect { - case f : proof.Fact @unchecked if !validRule(f) => proof.sequentOfFact(f) - case j: lib.JUSTIFICATION if !validRule(j) => j.statement - } - - val violatingFormulas = substitutions.collect { - case f: F.Formula if !validRule(f) => f - } - - if (!violatingSubstitutions.isEmpty) - // return error - proof.InvalidProofTactic("Substitution rules must have a single equality or equivalence on the right-hand side. Violating sequents passed:\n" + violatingSubstitutions.zipWithIndex.map { - (s, i) => - s"${i + 1}. ${s.toString}" - }) - else if (!violatingFormulas.isEmpty) - proof.InvalidProofTactic("Substitution rules must be equalities or equivalences. Violating formulas passed:\n" + violatingFormulas.zipWithIndex.map { (s, i) => - s"${i + 1}. ${s.toString}" - }) - else { - // proceed as usual - - // maintain a list of where subtitutions come from - val sourceOf: MMap[(F.Formula, F.Formula) | (F.Term, F.Term), proof.Fact] = MMap() - val takenTermVars: Set[lisa.fol.FOL.Variable] = - premiseSequent.left.flatMap(_.freeVariables).toSet union substitutions.collect { case f: F.Formula => f.freeVariables.toSet }.foldLeft(Set.empty)(_.union(_)) - val takenFormulaVars: Set[lisa.fol.FOL.VariableFormula] = premiseSequent.left.flatMap(_.freeVariableFormulas).toSet union substitutions - .collect { case f: F.Formula => f.freeVariableFormulas.toSet } - .foldLeft(Set.empty)(_.union(_)) // TODO: should this just be the LHS of the premise sequent instead? - - var freeEqualitiesPre = List[(F.Term, F.Term)]() - var confinedEqualitiesPre = List[(F.Term, F.Term)]() - var freeIffsPre = List[(F.Formula, F.Formula)]() - var confinedIffsPre = List[(F.Formula, F.Formula)]() - - def updateSource(t: (F.Formula, F.Formula) | (F.Term, F.Term), f: proof.Fact) = { - sourceOf.update(t, f) - sourceOf.update(t.swap.asInstanceOf[(F.Formula, F.Formula) | (F.Term, F.Term)], f) - } - - // collect substitutions into the right buckets - substitutions.foreach { - case f: F.Formula => - f match { - case F.AppliedPredicate(F.equality, Seq(l, r)) => - confinedEqualitiesPre = (l, r) :: confinedEqualitiesPre - case F.AppliedConnector(F.Iff, Seq(l, r)) => - confinedIffsPre = (l, r) :: confinedIffsPre - case _ => () - } - case j: lib.JUSTIFICATION => - j.statement.right.head match { - case F.AppliedPredicate(F.equality, Seq(l, r)) => - updateSource((l, r), j) - freeEqualitiesPre = (l, r) :: freeEqualitiesPre - case F.AppliedConnector(F.Iff, Seq(l, r)) => - updateSource((l, r), j) - freeIffsPre = (l, r) :: freeIffsPre - case _ => () - } - case f: proof.Fact @unchecked => - proof.sequentOfFact(f).right.head match { - case F.AppliedPredicate(F.equality, Seq(l, r)) => - updateSource((l, r), f) - confinedEqualitiesPre = (l, r) :: confinedEqualitiesPre - case F.AppliedConnector(F.Iff, Seq(l, r)) => - updateSource((l, r), f) - confinedIffsPre = (l, r) :: confinedIffsPre - case _ => () - } - } - - // get the original and swapped versions - val freeEqualities: List[(F.Term, F.Term)] = freeEqualitiesPre ++ freeEqualitiesPre.map(_.swap) - val confinedEqualities: List[(F.Term, F.Term)] = confinedEqualitiesPre ++ confinedEqualitiesPre.map(_.swap) - val freeIffs: List[(F.Formula, F.Formula)] = freeIffsPre ++ freeIffsPre.map(_.swap) - val confinedIffs: List[(F.Formula, F.Formula)] = confinedIffsPre ++ confinedIffsPre.map(_.swap) - - val filteredPrem: Seq[F.Formula] = (premiseSequent.left filter { - case F.AppliedPredicate(F.equality, Seq(l, r)) if freeEqualities.contains((l, r)) || confinedEqualities.contains((l, r)) => false - case F.AppliedConnector(F.Iff, Seq(l, r)) if freeIffs.contains((l, r)) || confinedIffs.contains((l, r)) => false - case _ => true - }).toSeq - - val filteredBot: Seq[F.Formula] = (bot.left filter { - case F.AppliedPredicate(F.equality, Seq(l, r)) if freeEqualities.contains((l, r)) || confinedEqualities.contains((l, r)) => false - case F.AppliedConnector(F.Iff, Seq(l, r)) if freeIffs.contains((l, r)) || confinedIffs.contains((l, r)) => false - case _ => true - }).toSeq - - // construct the right instantiations - lazy val leftContextsOpt: Option[Seq[UnificationUtils.FormulaRewriteLambda]] = getContextFormulaSet( - filteredPrem, - filteredBot, - freeEqualities, - freeIffs, - confinedEqualities, - takenTermVars, - confinedIffs, - takenFormulaVars - ) - - lazy val rightContextsOpt: Option[Seq[UnificationUtils.FormulaRewriteLambda]] = getContextFormulaSet( - premiseSequent.right.toSeq, - bot.right.toSeq, - freeEqualities, - freeIffs, - confinedEqualities, - takenTermVars, - confinedIffs, - takenFormulaVars - ) - - lazy val rightPairs = premiseSequent.right zip premiseSequent.right.map(x => - bot.right.find(y => - UnificationUtils - .getContextFormula( - x, - y, - freeEqualities, - freeIffs, - confinedEqualities, - takenTermVars, - confinedIffs, - takenFormulaVars - ) - .isDefined - ) - ) - - lazy val leftPairs = filteredPrem zip filteredPrem.map(x => - filteredBot.find(y => - UnificationUtils - .getContextFormula( - x, - y, - freeEqualities, - freeIffs, - confinedEqualities, - takenTermVars, - confinedIffs, - takenFormulaVars - ) - .isDefined - ) - ) - - lazy val violatingFormulaLeft = leftPairs.find(_._2.isEmpty) - lazy val violatingFormulaRight = rightPairs.find(_._2.isEmpty) - - if (leftContextsOpt.isEmpty) - proof.InvalidProofTactic(s"Could not rewrite LHS of premise into conclusion with given substitutions.\nViolating Formula: ${violatingFormulaLeft.get}") - else if (rightContextsOpt.isEmpty) - proof.InvalidProofTactic(s"Could not rewrite RHS of premise into conclusion with given substitutions.\nViolating Formula: ${violatingFormulaRight.get}") - else { - // actually construct proof - TacticSubproof { - - def eq(rule: (Term, Term)) = AppliedPredicate(equality, Seq(rule._1, rule._2)) - def iff(rule: (Formula, Formula)) = AppliedConnector(Iff, Seq(rule._1, rule._2)) - - def eqSource(rule: (Term, Term)) = lib.have(eq(rule) |- eq(rule)) by SimpleDeducedSteps.Restate - def iffSource(rule: (Formula, Formula)) = lib.have(iff(rule) |- iff(rule)) by SimpleDeducedSteps.Restate - val leftContexts: Seq[UnificationUtils.FormulaRewriteLambda] = leftContextsOpt.get // remove the options - val rightContexts: Seq[UnificationUtils.FormulaRewriteLambda] = rightContextsOpt.get // remove the options - - val leftBody = AppliedConnector(And, leftContexts.map(f => f.body)) - - val defaultLeft = UnificationUtils.FormulaRewriteLambda(body = leftBody) - - val leftContextReduced = leftContexts.foldLeft(defaultLeft) { (f, s) => - UnificationUtils.FormulaRewriteLambda( - termRules = f.termRules ++ s.termRules, - formulaRules = f.formulaRules ++ s.formulaRules, - leftBody - ) - } - - val rightBody = AppliedConnector(Or, rightContexts.map(f => f.body)) - - val defaultRight = UnificationUtils.FormulaRewriteLambda(body = rightBody) - - val rightContextReduced = rightContexts.foldLeft(defaultRight) { (f, s) => - UnificationUtils.FormulaRewriteLambda( - termRules = f.termRules ++ s.termRules, - formulaRules = f.formulaRules ++ s.formulaRules, - rightBody - ) - } - - // find the justifications for each rule, or generate them, as required - val leftDischarges = - leftContextReduced.termRules.map { case (_, (rule, subst)) => - sourceOf.get(rule) match { - case Some(f: proof.Fact) => - f.of(subst.toSeq.map((l, r) => (l := r))*) - // case Some(j: lib.theory.Justification) => - // j.of(subst.toSeq.map((l, r) => (l, lambda(Seq(), r))): _*) - case _ => - eqSource(rule).of() - } - } ++ - leftContextReduced.formulaRules.map { case (_, (rule, subst)) => - sourceOf.get(rule) match { - case Some(f: proof.Fact) => - f.of(subst._1.toSeq.map((l, r) => (l := r)) ++ subst._2.toSeq.map((l, r) => (l := r))*) - // case Some(j: lib.theory.Justification) => - // j.of(subst._1.toSeq.map((l, r) => (l, lambda(Seq[Variable](), r))) ++ subst._2.toSeq.map((l, r) => (l, lambda(Seq[Variable](), r))): _*) - case _ => - iffSource(rule).of() - } - } - val rightDischarges = - rightContextReduced.termRules.map { case (_, (rule, subst)) => - sourceOf.get(rule) match { - case Some(f: proof.Fact) => - f.of(subst.toSeq.map((l, r) => (l := r))*) - // case Some(j: lib.theory.Justification) => - // j.of(subst.toSeq.map((l, r) => (l, lambda(Seq(), r))): _*) - case None => - eqSource(rule).of() - } - } ++ - rightContextReduced.formulaRules.map { case (_, (rule, subst)) => - sourceOf.get(rule) match { - case Some(f: proof.Fact) => - f.of(subst._1.toSeq.map((l, r) => (l := r)) ++ subst._2.toSeq.map((l, r) => (l := r))*) - // case Some(j: lib.theory.Justification) => - // j.of(subst._1.toSeq.map((l, r) => (l, lambda(Seq[Variable](), r))) ++ subst._2.toSeq.map((l, r) => (l, lambda(Seq[Variable](), r))): _*) - case None => - iffSource(rule).of() - } - } - - val discharges = leftDischarges ++ rightDischarges - // ------------------- - // LEFT SUBSTITUTIONS - // ------------------- - val nextSequent = { - // we have a lambda like λx. Λp. body - // where the p are formula variables, and the x are term variables - val ctx = leftContextReduced - - val termVars = ctx.termRules.map(_._1) - - val termInputs = ctx.termRules.map { case (_, (rule: (Term, Term), subst: UnificationUtils.TermSubstitution)) => - ( - rule._1.substituteUnsafe2(subst), - rule._2.substituteUnsafe2(subst) - ) - } - - lazy val (termInputsL, termInputsR) = (termInputs.map(_._1), termInputs.map(_._2)) - - val formulaVars = ctx.formulaRules.map(_._1) - - val formulaInputs = ctx.formulaRules.map { case (_, (rule, subst)) => - ( - rule._1.substituteUnsafe2(subst._2).substituteUnsafe2(subst._1), - rule._2.substituteUnsafe2(subst._2).substituteUnsafe2(subst._1) - ) - } - val (formulaInputsL, formulaInputsR) = (formulaInputs.map(_._1), formulaInputs.map(_._2)) - - // get premise into the right form - val prem = AppliedConnector(And, filteredPrem.toSeq) |- AppliedConnector(Or, premiseSequent.right.toSeq) - val eqs = termInputs.map(eq(_)) - val iffs = formulaInputs.map(iff(_)) - val premiseWithSubst = prem ++<< (eqs |- ()) ++<< (iffs |- ()) - lib.have(premiseWithSubst) by BasicStepTactic.Weakening(premise) - - // left === - val eqSubst = Map((termVars zip termInputsR)*) - val formSubstL = Map((formulaVars zip formulaInputsL)*) - val lhsAfterEq = ctx.body.substituteUnsafe2(eqSubst).substituteUnsafe2(formSubstL) - val sequentAfterEqPre = lhsAfterEq |- premiseWithSubst.right - val sequentAfterEq = sequentAfterEqPre ++<< (eqs |- ()) ++<< (iffs |- ()) - - // this uses the "lambda" (λx. Λp. body) (p = left formulas) - lib.thenHave(sequentAfterEq) by BasicStepTactic.LeftSubstEq.withParametersSimple(termInputs.toList, lambda(termVars, ctx.body.substituteUnsafe2(formSubstL))) - - // left <=> - val formSubstR = Map((formulaVars zip formulaInputsR)*) - val lhsAfterIff = ctx.body.substituteUnsafe2(eqSubst).substituteUnsafe2(formSubstR) - val sequentAfterIffPre = lhsAfterIff |- sequentAfterEq.right - val sequentAfterIff = sequentAfterIffPre ++<< (eqs |- ()) ++<< (iffs |- ()) - - // this uses the "lambda" (λx. Λp. body) (x = right terms) - lib.thenHave(sequentAfterIff) by BasicStepTactic.LeftSubstIff.withParametersSimple(formulaInputs.toList, lambda(formulaVars, ctx.body.substituteUnsafe2(eqSubst))) - sequentAfterIff - } - - // ------------------- - // RIGHT SUBSTITUTIONS - // ------------------- - val finalSequent = { - // we have a lambda like λx. Λp. body - // where the p are formula variables, and the x are term variables - val ctx = rightContextReduced - - val termVars = ctx.termRules.map(_._1) - - val termInputs = ctx.termRules.map { case (_, (rule, subst)) => - ( - rule._1.substituteUnsafe2(subst), - rule._2.substituteUnsafe2(subst) - ) - } - - lazy val (termInputsL, termInputsR) = (termInputs.map(_._1), termInputs.map(_._2)) - - val formulaVars = ctx.formulaRules.map(_._1) - - val formulaInputs = ctx.formulaRules.map { case (_, (rule, subst)) => - ( - rule._1.substituteUnsafe2(subst._2).substituteUnsafe2(subst._1), - rule._2.substituteUnsafe2(subst._2).substituteUnsafe2(subst._1) - ) - } - val (formulaInputsL, formulaInputsR) = (formulaInputs.map(_._1), formulaInputs.map(_._2)) - - // get premise into the right form - val prem = nextSequent - val eqs = termInputs.map(eq(_)) - val iffs = formulaInputs.map(iff(_)) - val premiseWithSubst = prem ++<< (eqs |- ()) ++<< (iffs |- ()) - lib.thenHave(premiseWithSubst) by BasicStepTactic.Weakening - - // right === - val eqSubst = Map((termVars zip termInputsR)*) - val formSubstL = Map((formulaVars zip formulaInputsL)*) - val rhsAfterEq = ctx.body.substituteUnsafe2(eqSubst).substituteUnsafe2(formSubstL) - val sequentAfterEqPre = premiseWithSubst.left |- rhsAfterEq - val sequentAfterEq = sequentAfterEqPre ++<< (eqs |- ()) ++<< (iffs |- ()) - - // this uses the "lambda" (λx. Λp. body) (p = left formulas) - lib.thenHave(sequentAfterEq) by BasicStepTactic.RightSubstEq.withParametersSimple(termInputs.toList, lambda(termVars, ctx.body.substituteUnsafe2(formSubstL))) - - // right <=> - val formSubstR = Map((formulaVars zip formulaInputsR)*) - val rhsAfterIff = ctx.body.substituteUnsafe2(eqSubst).substituteUnsafe2(formSubstR) - val sequentAfterIffPre = sequentAfterEq.left |- rhsAfterIff - val sequentAfterIff = sequentAfterIffPre ++<< (eqs |- ()) ++<< (iffs |- ()) - - // this uses the "lambda" (λx. Λp. body) (x = right terms) - lib.thenHave(sequentAfterIff) by BasicStepTactic.RightSubstIff.withParametersSimple(formulaInputs.toList, lambda(formulaVars, ctx.body.substituteUnsafe2(eqSubst))) - - } - // discharge any assumptions - - // custom discharge - // invariant: all facts are known to have only one formula in their RHS - discharges.foreach { f => - lib.thenHave(lib.lastStep.bot +<< f.result.right.head) by BasicStepTactic.Weakening // in case of double discharges, add the formula back in - lib.have(lib.lastStep.bot - f.result.right.head ++ (f.result.left |- ())) by BasicStepTactic.Cut.withParameters(f.result.right.head)(f, lib.lastStep) - } - - // finally, make sure our substitutions and discharges led us to the required conclusion - lib.thenHave(bot) by BasicStepTactic.Weakening - } - } - } - } - } - object applySubst extends ProofTactic { - - private def condflat[T](s: Seq[(T, Boolean)]): (Seq[T], Boolean) = (s.map(_._1), s.exists(_._2)) - - private def findSubterm2(t: Term, subs: Seq[(Variable, Term)]): (Term, Boolean) = { - val eq = subs.find(s => isSameTerm(t, s._2)) - if (eq.nonEmpty) (eq.get._1, true) - else { - val induct = condflat(t.args.map(te => findSubterm2(te, subs))) - if (!induct._2) (t, false) + +object Substitution: + + /** + * Extracts a raw substitution into a `RewriteRule`. + */ + def extractRule + (using lib: Library, proof: lib.Proof) + (rule: proof.Fact | F.Formula): RewriteRule = + rule match + case f: Formula => f match + case l === r => TermRewriteRule(l, r) + case l <=> r => FormulaRewriteRule(l, r) + case f: proof.Fact => extractRule(proof.getSequent(f).right.head) + + /** + * Partitions raw substitution rules into free and confined rules, also + * creating a source map, mapping each rule to the `Fact` it was derived from, + * for proof construction. + */ + def partition + (using lib: Library, proof: lib.Proof) + (substitutions: Seq[proof.Fact | F.Formula]): (Map[RewriteRule, proof.Fact], RewriteContext) = + substitutions.foldLeft((Map.empty, RewriteContext.empty)): + case ((source, ctx), rule) => + val erule = extractRule(rule) + val (l, r) = (erule.l, erule.r) + rule match + case f: Formula => + (source + (erule -> erule.source), ctx.withConfinedRule(l, r).withBound(f.freeVars)) + case j: lib.JUSTIFICATION => + (source + (erule -> j), ctx.withFreeRule(l, r)) + case f: proof.Fact => + (source + (erule -> f), ctx.withConfinedRule(l, r)) + + /** + * Checks if a raw substitution input can be used as a rewrite rule (is === or + * <=>, basically). + */ + def validSubstitutionRule + (using lib: lisa.utils.prooflib.Library, proof: lib.Proof) + (rule: (proof.Fact | F.Formula)): Boolean = + rule match + // as formula + case f: Formula => f match + case _ === _ => true + case _ <=> _ => true + case _ => false + // as a justification + case just: proof.Fact => + val sequent = proof.getSequent(just) + sequent.right.size == 1 && validSubstitutionRule(sequent.right.head) + + object Apply extends ProofTactic: + def apply + (using lib: Library, proof: lib.Proof) + (substitutions: (proof.Fact | F.Formula)*) + (premiseStep: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = + + // are all substitution rules actually valid? + // if not, exit early + + val violatingFacts = substitutions.collect: + case f: proof.Fact if !validSubstitutionRule(f) => proof.getSequent(f) + + val violatingFormulas = substitutions.collect: + case f: F.Formula if !validSubstitutionRule(f) => f + + if violatingFacts.nonEmpty then + val msgBase = "Substitution rules must have a single equality or equivalence on the right-hand side. Violating sequents passed:\n" + val msgList = violatingFacts.zipWithIndex.map: + case (f, i) => s"\t${i + 1}. $f" + + proof.InvalidProofTactic(msgBase + msgList.mkString("\n")) + else if violatingFormulas.nonEmpty then + val msgBase = "Substitution rules must be equalities or equivalences. Violating formulas passed:\n" + val msgList = violatingFacts.zipWithIndex.map: + case (f, i) => s"\t${i + 1}. $f" + + proof.InvalidProofTactic(msgBase + msgList.mkString("\n")) else - (t.label.applySeq(induct._1), true) - - } - - } - private def findSubterm2(f: Formula, subs: Seq[(Variable, Term)]): (Formula, Boolean) = { - f match { - case f: VariableFormula => (f, false) - case f: ConstantFormula => (f, false) - case AppliedPredicate(label, args) => - val induct = condflat(args.map(findSubterm2(_, subs))) - if (!induct._2) (f, false) - else (AppliedPredicate(label, induct._1), true) - case AppliedConnector(label, args) => - val induct = condflat(args.map(findSubterm2(_, subs))) - if (!induct._2) (f, false) - else (AppliedConnector(label, induct._1), true) - case BinderFormula(label, bound, inner) => - val fv_in_f = subs.flatMap(e => e._2.freeVariables + e._1) - if (!fv_in_f.contains(bound)) { - val induct = findSubterm2(inner, subs) - if (!induct._2) (f, false) - else (BinderFormula(label, bound, induct._1), true) - } else { - val newv = Variable(freshId((f.freeVariables ++ fv_in_f).map(_.id), bound.id)) - val newInner = inner.substitute(bound := newv) - val induct = findSubterm2(newInner, subs) - if (!induct._2) (f, false) - else (BinderFormula(label, newv, induct._1), true) - } - } - } - - private def findSubformula2(f: Formula, subs: Seq[(VariableFormula, Formula)]): (Formula, Boolean) = { - val eq = subs.find(s => isSame(f, s._2)) - if (eq.nonEmpty) (eq.get._1, true) - else - f match { - case f: AtomicFormula => (f, false) - case AppliedConnector(label, args) => - val induct = condflat(args.map(findSubformula2(_, subs))) - if (!induct._2) (f, false) - else (AppliedConnector(label, induct._1), true) - case BinderFormula(label, bound, inner) => - val fv_in_f = subs.flatMap(_._2.freeVariables) - if (!fv_in_f.contains(bound)) { - val induct = findSubformula2(inner, subs) - if (!induct._2) (f, false) - else (BinderFormula(label, bound, induct._1), true) - } else { - val newv = Variable(freshId((f.freeVariables ++ fv_in_f).map(_.id), bound.id)) - val newInner = inner.substitute(bound := newv) - val induct = findSubformula2(newInner, subs) - if (!induct._2) (f, false) - else (BinderFormula(label, newv, induct._1), true) - } - } - } - - def findSubterm(t: Term, subs: Seq[(Variable, Term)]): Option[LambdaExpression[Term, Term, ?]] = { - val vars = subs.map(_._1) - val r = findSubterm2(t, subs) - if (r._2) Some(LambdaExpression(vars, r._1, vars.size)) - else None - } - - def findSubterm(f: Formula, subs: Seq[(Variable, Term)]): Option[LambdaExpression[Term, Formula, ?]] = { - val vars = subs.map(_._1) - val r = findSubterm2(f, subs) - if (r._2) Some(LambdaExpression(vars, r._1, vars.size)) - else None - } - - def findSubformula(f: Formula, subs: Seq[(VariableFormula, Formula)]): Option[LambdaExpression[Formula, Formula, ?]] = { - val vars = subs.map(_._1) - val r = findSubformula2(f, subs) - if (r._2) Some(LambdaExpression(vars, r._1, vars.size)) - else None - } - - def applyLeftRight(using lib: lisa.prooflib.Library, proof: lib.Proof)( - phi: Formula - )(premise: proof.Fact)(rightLeft: Boolean = false, toLeft: Boolean = true, toRight: Boolean = true): proof.ProofTacticJudgement = { - import lisa.utils.K - val originSequent = proof.getSequent(premise) - val leftOrigin = AppliedConnector(And, originSequent.left.toSeq) - val rightOrigin = AppliedConnector(Or, originSequent.right.toSeq) - - if (!toLeft && !toRight) return proof.InvalidProofTactic("applyLeftRight called with no substitution selected (toLeft or toRight).") - - phi match { - case AppliedPredicate(label, args) if label == equality => - val left = args(0) - val right = args(1) - val fv_in_phi = (originSequent.left ++ originSequent.right).flatMap(_.allSchematicLabels).map(_.id) - val v = Variable(nFreshId(fv_in_phi, 1).head) - lazy val isolatedLeft = originSequent.left.filterNot(f => isSame(f, phi)).map(f => (f, findSubterm(f, IndexedSeq(v -> left)))) - lazy val isolatedRight = originSequent.right.map(f => (f, findSubterm(f, IndexedSeq(v -> left)))) - if ((!toLeft || isolatedLeft.forall(_._2.isEmpty)) && (!toRight || isolatedRight.forall(_._2.isEmpty))) - if (rightLeft) - return proof.InvalidProofTactic(s"There is no instance of ${right} to replace.") - else - applyLeftRight(equality(right, left))(premise)(true, toLeft, toRight) match { - case proof.InvalidProofTactic(m) => return proof.InvalidProofTactic(s"There is no instance of ${left} to replace.") - case v: proof.ValidProofTactic => return v - } - - val leftForm = AppliedConnector(And, isolatedLeft.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.body).toSeq) - val rightForm = AppliedConnector(Or, isolatedRight.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.body).toSeq) - val newleft = if (toLeft) isolatedLeft.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.applyUnsafe(Seq(right))) else originSequent.left - val newright = if (toRight) isolatedRight.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.applyUnsafe(Seq(right))) else originSequent.right - val result1: Sequent = (AppliedConnector(And, newleft.toSeq), phi) |- rightOrigin - val result2: Sequent = result1.left |- AppliedConnector(Or, newright.toSeq) - var scproof: Seq[K.SCProofStep] = Seq(K.Restate((leftOrigin |- rightOrigin).underlying, -1)) - if (toLeft) - scproof = scproof :+ K.LeftSubstEq( - result1.underlying, - scproof.length - 1, - List(K.LambdaTermTerm(Seq(), left.underlying) -> (K.LambdaTermTerm(Seq(), right.underlying))), - (Seq(v.underlyingLabel), leftForm.underlying) - ) - if (toRight) - scproof = scproof :+ K.RightSubstEq( - result2.underlying, - scproof.length - 1, - List(K.LambdaTermTerm(Seq(), left.underlying) -> (K.LambdaTermTerm(Seq(), right.underlying))), - (Seq(v.underlyingLabel), rightForm.underlying) - ) - val bot = newleft + phi |- newright - scproof = scproof :+ K.Restate(bot.underlying, scproof.length - 1) - - proof.ValidProofTactic( - bot, - scproof, - Seq(premise) - ) - - case AppliedConnector(label, args) if label == Iff => - val left = args(0) - val right = args(1) - val fv_in_phi = (originSequent.left ++ originSequent.right).flatMap(_.allSchematicLabels).map(_.id) - val H = VariableFormula(nFreshId(fv_in_phi, 1).head) - lazy val isolatedLeft = originSequent.left.filterNot(f => isSame(f, phi)).map(f => (f, findSubformula(f, IndexedSeq(H -> left)))) - lazy val isolatedRight = originSequent.right.map(f => (f, findSubformula(f, IndexedSeq(H -> left)))) - if ((!toLeft || isolatedLeft.forall(_._2.isEmpty)) && (!toRight || isolatedRight.forall(_._2.isEmpty))) - if (rightLeft) - return proof.InvalidProofTactic(s"There is no instance of ${right} to replace.") - else - applyLeftRight(Iff(right, left))(premise)(true, toLeft, toRight) match { - case proof.InvalidProofTactic(m) => return proof.InvalidProofTactic(s"There is no instance of ${left} to replace.") - case v: proof.ValidProofTactic => return v - } - - val leftForm = AppliedConnector(And, isolatedLeft.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.body).toSeq) - val rightForm = AppliedConnector(Or, isolatedRight.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.body).toSeq) - val newleft = if (toLeft) isolatedLeft.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.applyUnsafe(Seq(right))) else originSequent.left - val newright = if (toRight) isolatedRight.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.applyUnsafe(Seq(right))) else originSequent.right - val result1: Sequent = (AppliedConnector(And, newleft.toSeq), phi) |- rightOrigin - val result2: Sequent = result1.left |- AppliedConnector(Or, newright.toSeq) - - var scproof: Seq[K.SCProofStep] = Seq(K.Restate((leftOrigin |- rightOrigin).underlying, -1)) - if (toLeft) - scproof = scproof :+ K.LeftSubstIff( - result1.underlying, - scproof.length - 1, - List(K.LambdaTermFormula(Seq(), left.underlying) -> (K.LambdaTermFormula(Seq(), right.underlying))), - (Seq(H.underlyingLabel), leftForm.underlying) - ) - if (toRight) - scproof = scproof :+ K.RightSubstIff( - result2.underlying, - scproof.length - 1, - List(K.LambdaTermFormula(Seq(), left.underlying) -> (K.LambdaTermFormula(Seq(), right.underlying))), - (Seq(H.underlyingLabel), rightForm.underlying) - ) - - val bot = newleft + phi |- newright - scproof = scproof :+ K.Restate(bot.underlying, scproof.length - 1) - - proof.ValidProofTactic( - bot, - scproof, - Seq(premise) - ) - case _ => proof.InvalidProofTactic(s"Formula in applySingleSimp need to be of the form a=b or q<=>p and not ${phi}") - } - } - - @nowarn("msg=.*the type test for proof.Fact cannot be checked at runtime*") - def apply(using - lib: lisa.prooflib.Library, - proof: lib.Proof, - line: sourcecode.Line, - file: sourcecode.File - )(f: proof.Fact | Formula, rightLeft: Boolean = false, toLeft: Boolean = true, toRight: Boolean = true)( - premise: proof.Fact - ): proof.ProofTacticJudgement = { - f match { - case phi: Formula => applyLeftRight(phi)(premise)(rightLeft, toLeft, toRight) - case f: proof.Fact => - val seq = proof.getSequent(f) - val phi = seq.right.head - val sp = TacticSubproof { - val x = applyLeftRight(phi)(premise)(rightLeft, toLeft, toRight) - proof.library.have(x) - proof.library.andThen(SimpleDeducedSteps.Discharge(f)) - } - - BasicStepTactic.unwrapTactic(sp)("Subproof substitution fail.") - } - - } - - def toLeft(using lib: lisa.prooflib.Library, proof: lib.Proof, line: sourcecode.Line, file: sourcecode.File)(f: proof.Fact | Formula, rightLeft: Boolean = false)( - premise: proof.Fact - ): proof.ProofTacticJudgement = apply(f, rightLeft, toLeft = true, toRight = false)(premise) - - def toRight(using lib: lisa.prooflib.Library, proof: lib.Proof, line: sourcecode.Line, file: sourcecode.File)(f: proof.Fact | Formula, rightLeft: Boolean = false)( - premise: proof.Fact - ): proof.ProofTacticJudgement = apply(f, rightLeft, toLeft = false, toRight = true)(premise) - - } -} + // continue, we have a list of rules to work with + + // rewrite base + val premise = proof.getSequent(premiseStep) + // the target is bot + + // metadata: + // maintain a list of where substitutions come from + // and categorize them for the rewrite context + val (sourceMap, prectx) = partition(substitutions) + val ctx = prectx.withBound(premise.left.flatMap(_.freeVars)) + + // TODO: CHECK is this really necessary? + // remove from the premise equalities we are rewriting with, as these + // terms themselves are not targets for the rewriting + // val filteredPrem = ??? + + // check whether this rewrite is even possible. + // if it is, get the context (term with holes) corresponding to the + // single-step simultaneous rewrite + + // for each formula in the premise left (resp. right), there must be a + // corresponding formula in the conclusion left (resp. right) that it + // can be rewritten into. + + // discover a (possibly non-injective non-surjective) mapping from one + // formula set to another where a formula maps to another by the + // rewrites above + inline def collectRewritingPairs + (base: Set[Formula], target: Set[Formula]): Option[Seq[RewriteResult]] = + base.iterator.map: formula => + target.collectFirstDefined: target => + rewrite(using ctx)(formula, target) + .toOptionSeq + + // collect the set of formulas in `base` that rewrite to *no* formula + // in `target`. Guaranteed to be non-empty if + // `collectRewritingPairs(base, target)` is None. + inline def collectViolatingPairs + (base: Set[Formula], target: Set[Formula]): Set[Formula] = + premise.left.filter: formula => + bot.left.forall: target => + rewrite(using ctx)(formula, target).isEmpty + + + val leftSubsts = collectRewritingPairs(premise.left, bot.left) + val rightSubsts = collectRewritingPairs(premise.right, bot.right) + + if leftSubsts.isEmpty then + // error, find formulas that failed to rewrite + val msgBase = "Could not rewrite LHS of premise into conclusion with given substitutions.\nViolating Formulas:" + val msgList = + collectViolatingPairs(premise.left, bot.left) + .zipWithIndex + .map: + case (formula, i) => s"\t${i + 1}. $formula" + + proof.InvalidProofTactic(msgBase + msgList.mkString("\n")) + else if rightSubsts.isEmpty then + // error, find formulas that failed to rewrite + val msgBase = "Could not rewrite LHS of premise into conclusion with given substitutions.\nViolating Formulas:" + val msgList = + collectViolatingPairs(premise.right, bot.right) + .zipWithIndex + .map: + case (formula, i) => s"\t${i + 1}. $formula" + + proof.InvalidProofTactic(msgBase + msgList.mkString("\n")) + else + // rewriting is possible, construct the proof + + import lib.{have, thenHave, lastStep} + import BasicStepTactic.{TacticSubproof, Weakening, Cut, LeftSubstEq, RightSubstEq} + import SimpleDeducedSteps.Restate + + TacticSubproof: + val leftRewrites = leftSubsts.get + val rightRewrites = rightSubsts.get + val leftRules = leftRewrites.head.rules + val rightRules = rightRewrites.head.rules + + // instantiated discharges + + val leftDischarges = leftRules.map(r => r -> sourceMap(r)) + val rightDischarges = rightRules.map(r => r -> sourceMap(r)) + + val discharges = leftDischarges ++ rightDischarges + + // start proof + have(andAll(premise.left) |- premise.right) by Restate.from(premiseStep) + + // left rewrites + val leftFormulas = leftRules.map(_.toFormula) + val preLeft = leftRewrites.map(_.toLeft) + val postLeft = leftRewrites.map(_.toRight) + val leftVars = leftRewrites.head.lambda._1 + val leftLambda = andAll(leftRewrites.map(_.lambda._2)) + thenHave(andAll(preLeft) |- premise.right) by Restate + thenHave(andAll(preLeft) +: leftFormulas |- premise.right) by Weakening + thenHave(andAll(postLeft) +: leftFormulas |- premise.right) by LeftSubstEq.withParameters(leftRules.map(r => r.l -> r.r), leftVars -> leftLambda) + + val rpremise = lastStep.bot + + // right rewrites + val rightFormulas = rightRules.map(_.toFormula) + val preRight = rightRewrites.map(_.toLeft).toSet + val postRight = rightRewrites.map(_.toRight).toSet + val rightVars = rightRewrites.head.lambda._1 + val rightLambda = orAll(rightRewrites.map(_.lambda._2)) + thenHave(rpremise.left |- orAll(preRight)) by Restate + thenHave(rpremise.left ++ rightFormulas |- orAll(preRight)) by Weakening + thenHave(rpremise.left ++ rightFormulas |- orAll(postRight)) by RightSubstEq.withParameters(rightRules.map(r => r.l -> r.r), rightVars -> rightLambda) + + // rewrite to destruct sequent + thenHave(postLeft ++ leftFormulas ++ rightFormulas |- postRight) by Restate + + val dpremise = lastStep.bot + + // discharge assumptions + discharges.foldLeft(dpremise): + case (premise, (rule, source)) => + val sseq = proof.getSequent(source) + val form = rule.toFormula + val nextSequent = premise.left - form ++ sseq.left |- premise.right ++ sseq.right - form + have(nextSequent) by Cut.withParameters(form)(source, lastStep) + nextSequent + + // restate to the result + thenHave(bot) by Weakening + + end Apply + + // object applySubst extends ProofTactic { + + // private def condflat[T](s: Seq[(T, Boolean)]): (Seq[T], Boolean) = (s.map(_._1), s.exists(_._2)) + + // private def findSubterm2(t: Term, subs: Seq[(Variable, Term)]): (Term, Boolean) = { + // val eq = subs.find(s => isSameTerm(t, s._2)) + // if (eq.nonEmpty) (eq.get._1, true) + // else { + // val induct = condflat(t.args.map(te => findSubterm2(te, subs))) + // if (!induct._2) (t, false) + // else + // (t.label.applySeq(induct._1), true) + + // } + + // } + // private def findSubterm2(f: Formula, subs: Seq[(Variable, Term)]): (Formula, Boolean) = { + // f match { + // case f: VariableFormula => (f, false) + // case f: ConstantFormula => (f, false) + // case AppliedPredicate(label, args) => + // val induct = condflat(args.map(findSubterm2(_, subs))) + // if (!induct._2) (f, false) + // else (AppliedPredicate(label, induct._1), true) + // case AppliedConnector(label, args) => + // val induct = condflat(args.map(findSubterm2(_, subs))) + // if (!induct._2) (f, false) + // else (AppliedConnector(label, induct._1), true) + // case BinderFormula(label, bound, inner) => + // val fv_in_f = subs.flatMap(e => e._2.freeVariables + e._1) + // if (!fv_in_f.contains(bound)) { + // val induct = findSubterm2(inner, subs) + // if (!induct._2) (f, false) + // else (BinderFormula(label, bound, induct._1), true) + // } else { + // val newv = Variable(freshId((f.freeVariables ++ fv_in_f).map(_.id), bound.id)) + // val newInner = inner.substitute(bound := newv) + // val induct = findSubterm2(newInner, subs) + // if (!induct._2) (f, false) + // else (BinderFormula(label, newv, induct._1), true) + // } + // } + // } + + // private def findSubformula2(f: Formula, subs: Seq[(VariableFormula, Formula)]): (Formula, Boolean) = { + // val eq = subs.find(s => isSame(f, s._2)) + // if (eq.nonEmpty) (eq.get._1, true) + // else + // f match { + // case f: AtomicFormula => (f, false) + // case AppliedConnector(label, args) => + // val induct = condflat(args.map(findSubformula2(_, subs))) + // if (!induct._2) (f, false) + // else (AppliedConnector(label, induct._1), true) + // case BinderFormula(label, bound, inner) => + // val fv_in_f = subs.flatMap(_._2.freeVariables) + // if (!fv_in_f.contains(bound)) { + // val induct = findSubformula2(inner, subs) + // if (!induct._2) (f, false) + // else (BinderFormula(label, bound, induct._1), true) + // } else { + // val newv = Variable(freshId((f.freeVariables ++ fv_in_f).map(_.id), bound.id)) + // val newInner = inner.substitute(bound := newv) + // val induct = findSubformula2(newInner, subs) + // if (!induct._2) (f, false) + // else (BinderFormula(label, newv, induct._1), true) + // } + // } + // } + + // def findSubterm(t: Term, subs: Seq[(Variable, Term)]): Option[LambdaExpression[Term, Term, ?]] = { + // val vars = subs.map(_._1) + // val r = findSubterm2(t, subs) + // if (r._2) Some(LambdaExpression(vars, r._1, vars.size)) + // else None + // } + + // def findSubterm(f: Formula, subs: Seq[(Variable, Term)]): Option[LambdaExpression[Term, Formula, ?]] = { + // val vars = subs.map(_._1) + // val r = findSubterm2(f, subs) + // if (r._2) Some(LambdaExpression(vars, r._1, vars.size)) + // else None + // } + + // def findSubformula(f: Formula, subs: Seq[(VariableFormula, Formula)]): Option[LambdaExpression[Formula, Formula, ?]] = { + // val vars = subs.map(_._1) + // val r = findSubformula2(f, subs) + // if (r._2) Some(LambdaExpression(vars, r._1, vars.size)) + // else None + // } + + // def applyLeftRight(using lib: lisa.utils.prooflib.Library, proof: lib.Proof)( + // phi: Formula + // )(premise: proof.Fact)(rightLeft: Boolean = false, toLeft: Boolean = true, toRight: Boolean = true): proof.ProofTacticJudgement = { + // import lisa.utils.K + // val originSequent = proof.getSequent(premise) + // val leftOrigin = AppliedConnector(And, originSequent.left.toSeq) + // val rightOrigin = AppliedConnector(Or, originSequent.right.toSeq) + + // if (!toLeft && !toRight) return proof.InvalidProofTactic("applyLeftRight called with no substitution selected (toLeft or toRight).") + + // phi match { + // case AppliedPredicate(label, args) if label == equality => + // val left = args(0) + // val right = args(1) + // val fv_in_phi = (originSequent.left ++ originSequent.right).flatMap(_.allSchematicLabels).map(_.id) + // val v = Variable(nFreshId(fv_in_phi, 1).head) + // lazy val isolatedLeft = originSequent.left.filterNot(f => isSame(f, phi)).map(f => (f, findSubterm(f, IndexedSeq(v -> left)))) + // lazy val isolatedRight = originSequent.right.map(f => (f, findSubterm(f, IndexedSeq(v -> left)))) + // if ((!toLeft || isolatedLeft.forall(_._2.isEmpty)) && (!toRight || isolatedRight.forall(_._2.isEmpty))) + // if (rightLeft) + // return proof.InvalidProofTactic(s"There is no instance of ${right} to replace.") + // else + // applyLeftRight(equality(right, left))(premise)(true, toLeft, toRight) match { + // case proof.InvalidProofTactic(m) => return proof.InvalidProofTactic(s"There is no instance of ${left} to replace.") + // case v: proof.ValidProofTactic => return v + // } + + // val leftForm = AppliedConnector(And, isolatedLeft.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.body).toSeq) + // val rightForm = AppliedConnector(Or, isolatedRight.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.body).toSeq) + // val newleft = if (toLeft) isolatedLeft.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.applyUnsafe(Seq(right))) else originSequent.left + // val newright = if (toRight) isolatedRight.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.applyUnsafe(Seq(right))) else originSequent.right + // val result1: Sequent = (AppliedConnector(And, newleft.toSeq), phi) |- rightOrigin + // val result2: Sequent = result1.left |- AppliedConnector(Or, newright.toSeq) + // var scproof: Seq[K.SCProofStep] = Seq(K.Restate((leftOrigin |- rightOrigin).underlying, -1)) + // if (toLeft) + // scproof = scproof :+ K.LeftSubstEq( + // result1.underlying, + // scproof.length - 1, + // List(K.LambdaTermTerm(Seq(), left.underlying) -> (K.LambdaTermTerm(Seq(), right.underlying))), + // (Seq(v.underlyingLabel), leftForm.underlying) + // ) + // if (toRight) + // scproof = scproof :+ K.RightSubstEq( + // result2.underlying, + // scproof.length - 1, + // List(K.LambdaTermTerm(Seq(), left.underlying) -> (K.LambdaTermTerm(Seq(), right.underlying))), + // (Seq(v.underlyingLabel), rightForm.underlying) + // ) + // val bot = newleft + phi |- newright + // scproof = scproof :+ K.Restate(bot.underlying, scproof.length - 1) + + // proof.ValidProofTactic( + // bot, + // scproof, + // Seq(premise) + // ) + + // case AppliedConnector(label, args) if label == Iff => + // val left = args(0) + // val right = args(1) + // val fv_in_phi = (originSequent.left ++ originSequent.right).flatMap(_.allSchematicLabels).map(_.id) + // val H = VariableFormula(nFreshId(fv_in_phi, 1).head) + // lazy val isolatedLeft = originSequent.left.filterNot(f => isSame(f, phi)).map(f => (f, findSubformula(f, IndexedSeq(H -> left)))) + // lazy val isolatedRight = originSequent.right.map(f => (f, findSubformula(f, IndexedSeq(H -> left)))) + // if ((!toLeft || isolatedLeft.forall(_._2.isEmpty)) && (!toRight || isolatedRight.forall(_._2.isEmpty))) + // if (rightLeft) + // return proof.InvalidProofTactic(s"There is no instance of ${right} to replace.") + // else + // applyLeftRight(Iff(right, left))(premise)(true, toLeft, toRight) match { + // case proof.InvalidProofTactic(m) => return proof.InvalidProofTactic(s"There is no instance of ${left} to replace.") + // case v: proof.ValidProofTactic => return v + // } + + // val leftForm = AppliedConnector(And, isolatedLeft.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.body).toSeq) + // val rightForm = AppliedConnector(Or, isolatedRight.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.body).toSeq) + // val newleft = if (toLeft) isolatedLeft.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.applyUnsafe(Seq(right))) else originSequent.left + // val newright = if (toRight) isolatedRight.map((f, ltf) => if (ltf.isEmpty) f else ltf.get.applyUnsafe(Seq(right))) else originSequent.right + // val result1: Sequent = (AppliedConnector(And, newleft.toSeq), phi) |- rightOrigin + // val result2: Sequent = result1.left |- AppliedConnector(Or, newright.toSeq) + + // var scproof: Seq[K.SCProofStep] = Seq(K.Restate((leftOrigin |- rightOrigin).underlying, -1)) + // if (toLeft) + // scproof = scproof :+ K.LeftSubstIff( + // result1.underlying, + // scproof.length - 1, + // List(K.LambdaTermFormula(Seq(), left.underlying) -> (K.LambdaTermFormula(Seq(), right.underlying))), + // (Seq(H.underlyingLabel), leftForm.underlying) + // ) + // if (toRight) + // scproof = scproof :+ K.RightSubstIff( + // result2.underlying, + // scproof.length - 1, + // List(K.LambdaTermFormula(Seq(), left.underlying) -> (K.LambdaTermFormula(Seq(), right.underlying))), + // (Seq(H.underlyingLabel), rightForm.underlying) + // ) + + // val bot = newleft + phi |- newright + // scproof = scproof :+ K.Restate(bot.underlying, scproof.length - 1) + + // proof.ValidProofTactic( + // bot, + // scproof, + // Seq(premise) + // ) + // case _ => proof.InvalidProofTactic(s"Formula in applySingleSimp need to be of the form a=b or q<=>p and not ${phi}") + // } + // } + + // @nowarn("msg=.*the type test for proof.Fact cannot be checked at runtime*") + // def apply(using + // lib: lisa.utils.prooflib.Library, + // proof: lib.Proof, + // line: sourcecode.Line, + // file: sourcecode.File + // )(f: proof.Fact | Formula, rightLeft: Boolean = false, toLeft: Boolean = true, toRight: Boolean = true)( + // premise: proof.Fact + // ): proof.ProofTacticJudgement = { + // f match { + // case phi: Formula => applyLeftRight(phi)(premise)(rightLeft, toLeft, toRight) + // case f: proof.Fact => + // val seq = proof.getSequent(f) + // val phi = seq.right.head + // val sp = TacticSubproof { + // val x = applyLeftRight(phi)(premise)(rightLeft, toLeft, toRight) + // proof.library.have(x) + // proof.library.andThen(SimpleDeducedSteps.Discharge(f)) + // } + + // BasicStepTactic.unwrapTactic(sp)("Subproof substitution fail.") + // } + + // } + + // def toLeft(using lib: lisa.utils.prooflib.Library, proof: lib.Proof, line: sourcecode.Line, file: sourcecode.File)(f: proof.Fact | Formula, rightLeft: Boolean = false)( + // premise: proof.Fact + // ): proof.ProofTacticJudgement = apply(f, rightLeft, toLeft = true, toRight = false)(premise) + + // def toRight(using lib: lisa.utils.prooflib.Library, proof: lib.Proof, line: sourcecode.Line, file: sourcecode.File)(f: proof.Fact | Formula, rightLeft: Boolean = false)( + // premise: proof.Fact + // ): proof.ProofTacticJudgement = apply(f, rightLeft, toLeft = false, toRight = true)(premise) + + // } + +end Substitution diff --git a/lisa-sets/src/main/scala/lisa/automation/Tableau.scala b/lisa-sets/src/main/scala/lisa/automation/Tableau.scala index de6f6bcbd..1df0fbf3b 100644 --- a/lisa-sets/src/main/scala/lisa/automation/Tableau.scala +++ b/lisa-sets/src/main/scala/lisa/automation/Tableau.scala @@ -1,14 +1,10 @@ package lisa.automation -import lisa.fol.FOL as F -import lisa.prooflib.Library -import lisa.prooflib.OutputManager.* -import lisa.prooflib.ProofTacticLib.* +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.OutputManager.* +import lisa.utils.prooflib.ProofTacticLib.* import lisa.utils.K import lisa.utils.K.{_, given} -import lisa.utils.parsing.FOLPrinter.prettyFormula -import lisa.utils.parsing.FOLPrinter.prettySCProof -import lisa.utils.parsing.FOLPrinter.prettySequent -import lisa.utils.parsing.FOLPrinter.prettyTerm import scala.collection.immutable.HashMap import scala.collection.immutable.HashSet @@ -47,7 +43,7 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent def from(using lib: Library, proof: lib.Proof)(premises: proof.Fact*)(bot: F.Sequent): proof.ProofTacticJudgement = { val botK = bot.underlying - val premsFormulas: Seq[((proof.Fact, Formula), Int)] = premises.map(p => (p, sequentToFormula(proof.getSequent(p).underlying))).zipWithIndex + val premsFormulas: Seq[((proof.Fact, Expression), Int)] = premises.map(p => (p, sequentToFormula(proof.getSequent(p).underlying))).zipWithIndex val initProof = premsFormulas.map(s => Restate(() |- s._1._2, -(1 + s._2))).toList val sqToProve = botK ++<< (premsFormulas.map(s => s._1._2).toSet |- ()) @@ -69,13 +65,13 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent def solve(sequent: K.Sequent): Option[SCProof] = { - val f = K.ConnectorFormula(K.And, (sequent.left.toSeq ++ sequent.right.map(f => K.ConnectorFormula(K.Neg, List(f))))) - val taken = f.schematicTermLabels + val f = K.multiand(sequent.left.toSeq ++ sequent.right.map(f => K.neg)) + val taken = f.allVariables val nextIdNow = if taken.isEmpty then 0 else taken.maxBy(_.id.no).id.no + 1 - val (fnamed, nextId, _) = makeVariableNamesUnique(f, nextIdNow, f.freeVariables) + val (fnamed, nextId) = makeVariableNamesUnique(f, nextIdNow, f.freeVariables) val nf = reducedNNFForm(fnamed) - val uv = VariableLabel(Identifier("§", nextId)) + val uv = Variable(Identifier("§", nextId), Term) val proof = decide(Branch.empty(nextId + 1, uv).prepended(nf)) proof match case None => None @@ -101,53 +97,50 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent * maxIndex stores an index that is used to generate fresh variable names. */ case class Branch( - alpha: List[ConnectorFormula], // label = And - beta: List[ConnectorFormula], // label = Or - delta: List[BinderFormula], // Exists(...)) - gamma: List[BinderFormula], // Forall(...) - atoms: (List[AtomicFormula], List[AtomicFormula]), // split into positive and negatives! - unifiable: Map[VariableLabel, (BinderFormula, Int)], // map between metavariables and the original formula they came from, with the penalty associated to the complexity of the formula. - numberInstantiated: Map[VariableLabel, Int], // map between variables and the number of times they have been instantiated - - skolemized: Set[VariableLabel], // set of variables that have been skolemized - triedInstantiation: Map[VariableLabel, Set[Term]], // map between metavariables and the term they were already instantiated with + alpha: List[Expression], // label = And + beta: List[Expression], // label = Or + delta: List[Expression], // Exists(...)) + gamma: List[Expression], // Forall(...) + atoms: (List[Expression], List[Expression]), // split into positive and negatives! + unifiable: Map[Variable, (Expression, Int)], // map between metavariables and the original formula they came from, with the penalty associated to the complexity of the formula. + numberInstantiated: Map[Variable, Int], // map between variables and the number of times they have been instantiated + + skolemized: Set[Variable], // set of variables that have been skolemized + triedInstantiation: Map[Variable, Set[Expression]], // map between metavariables and the term they were already instantiated with maxIndex: Int, // the maximum index used for skolemization and metavariables - varsOrder: Map[VariableLabel, Int], // the order in which variables were instantiated. In particular, if the branch contained the formula ∀x. ∀y. ... then x > y. - unusedVar: VariableLabel // a variable the is neither free nor bound in the original formula. + varsOrder: Map[Variable, Int], // the order in which variables were instantiated. In particular, if the branch contained the formula ∀x. ∀y. ... then x > y. + unusedVar: Variable // a variable the is neither free nor bound in the original formula. ) { - def pop(f: Formula): Branch = f match - case f @ ConnectorFormula(Or, args) => + def pop(f: Expression): Branch = f match + case f @ Or(l, r) => if (beta.nonEmpty && beta.head.uniqueNumber == f.uniqueNumber) copy(beta = beta.tail) else throw Exception("First formula of beta is not f") - case f @ BinderFormula(Exists, x, inner) => + case f @ Exists(x, inner) => if (delta.nonEmpty && delta.head.uniqueNumber == f.uniqueNumber) copy(delta = delta.tail) else throw Exception("First formula of delta is not f") - case f @ BinderFormula(Forall, x, inner) => + case f @ Forall(x, inner) => if (gamma.nonEmpty && gamma.head.uniqueNumber == f.uniqueNumber) copy(gamma = gamma.tail) else throw Exception("First formula of gamma is not f") - case ConnectorFormula(And, args) => + case And(left, right) => if (alpha.nonEmpty && alpha.head.uniqueNumber == f.uniqueNumber) copy(alpha = alpha.tail) else throw Exception("First formula of alpha is not f") - case f @ AtomicFormula(id, args) => - throw Exception("Should not pop Atoms") - case f @ ConnectorFormula(Neg, List(AtomicFormula(id, args))) => - throw Exception("Should not pop Atoms") - case _ => ??? - - def prepended(f: Formula): Branch = f match - case f @ ConnectorFormula(And, args) => this.copy(alpha = f :: alpha) - case f @ ConnectorFormula(Or, args) => this.copy(beta = f :: beta) - case f @ BinderFormula(Exists, x, inner) => this.copy(delta = f :: delta) - case f @ BinderFormula(Forall, x, inner) => this.copy(gamma = f :: gamma) - case f @ AtomicFormula(id, args) => - this.copy(atoms = (f :: atoms._1, atoms._2)) - case ConnectorFormula(Neg, List(f @ AtomicFormula(id, args))) => + case _ => + throw Exception("Should not pop Atoms: " + f.repr) + + def prepended(f: Expression): Branch = f match + case And(left, right) => this.copy(alpha = f :: alpha) + case Or(left, right) => this.copy(beta = f :: beta) + case Exists(x, inner) => this.copy(delta = f :: delta) + case Forall(x, inner) => this.copy(gamma = f :: gamma) + case Neg(_) => this.copy(atoms = (atoms._1, f :: atoms._2)) + case _ => + this.copy(atoms = (f :: atoms._1, atoms._2)) case _ => ??? - def prependedAll(l: Seq[Formula]): Branch = l.foldLeft(this)((a, b) => a.prepended(b)) + def prependedAll(l: Seq[Expression]): Branch = l.foldLeft(this)((a, b) => a.prepended(b)) def asSequent: Sequent = (beta ++ delta ++ gamma ++ atoms._1 ++ atoms._2.map(a => !a)).toSet |- Set() // inefficient, not used import Branch.* override def toString(): String = - val pretUnif = unifiable.map((x, f) => x.id + " -> " + prettyFormula(f._1) + " : " + f._2).mkString("Unif(", ", ", ")") + val pretUnif = unifiable.map((x, f) => x.id + " -> " + f._1.repr + " : " + f._2).mkString("Unif(", ", ", ")") // val pretTried = triedInstantiation.map((x, t) => x.id + " -> " + prettyTerm(t, true)).mkString("Tried(", ", ", ")") (s"Branch(" + s"${RED(prettyIte(alpha, "alpha"))}, " + @@ -159,80 +152,63 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent } object Branch { - def empty = Branch(Nil, Nil, Nil, Nil, (Nil, Nil), Map.empty, Map.empty, Set.empty, Map.empty, 1, Map.empty, VariableLabel(Identifier("§uv", 0))) - def empty(n: Int, uv: VariableLabel) = Branch(Nil, Nil, Nil, Nil, (Nil, Nil), Map.empty, Map.empty, Set.empty, Map.empty, n, Map.empty, uv) - def prettyIte(l: Iterable[Formula], head: String): String = l match + def empty = Branch(Nil, Nil, Nil, Nil, (Nil, Nil), Map.empty, Map.empty, Set.empty, Map.empty, 1, Map.empty, Variable(Identifier("§uv", 0), Term)) + def empty(n: Int, uv: Variable) = Branch(Nil, Nil, Nil, Nil, (Nil, Nil), Map.empty, Map.empty, Set.empty, Map.empty, n, Map.empty, uv) + def prettyIte(l: Iterable[Expression], head: String): String = l match case Nil => "Nil" - case _ => l.map(prettyFormula(_, true)).mkString(head + "(", ", ", ")") + case _ => l.map(_.repr).mkString(head + "(", ", ", ")") } - def makeVariableNamesUnique(f: Formula, nextId: Int, seen: Set[VariableLabel]): (Formula, Int, Set[VariableLabel]) = f match - case ConnectorFormula(label, args) => - val (nArgs, nnId, nSeen) = args.foldLeft((List(): Seq[Formula], nextId, seen))((prev, next) => - val (l, n, s) = prev - val (nf, nn, ns) = makeVariableNamesUnique(next, n, s) - (l :+ nf, nn, ns) - ) - (ConnectorFormula(label, nArgs), nnId, nSeen) - case pf: AtomicFormula => (pf, nextId, seen) - case BinderFormula(label, x, inner) => - if (seen.contains(x)) - val (nInner, nnId, nSeen) = makeVariableNamesUnique(inner, nextId + 1, seen) - val newX = VariableLabel(Identifier(x.id, nextId)) - (BinderFormula(label, newX, substituteVariablesInFormula(nInner, Map(x -> newX), Seq())), nnId, nSeen) - else - val (nInner, nnId, nSeen) = makeVariableNamesUnique(inner, nextId, seen + x) - (BinderFormula(label, x, nInner), nnId, nSeen) - - type Substitution = Map[VariableLabel, Term] + def makeVariableNamesUnique(f: Expression, nextId: Int, seen2: Set[Variable]): (Expression, Int) = { + var nextId2: Int = nextId + var seen = seen2 + def recurse(f: Expression): Expression = f match + case Application(f, a) => + Application(recurse(f), recurse(a)) + case Lambda(v, body) => + if seen.contains(v) then + val newV = Variable(Identifier(v.id, nextId2), Term) + nextId2 += 1 + Lambda(newV, substituteVariables(recurse(body), Map(v -> newV))) + else + seen += v + Lambda(v, recurse(body)) + case _ => f + (recurse(f), nextId2) + } + type Substitution = Map[Variable, Expression] val Substitution = HashMap - def prettySubst(s: Substitution): String = s.map((x, t) => x.id + " -> " + prettyTerm(t, true)).mkString("Subst(", ", ", ")") + def prettySubst(s: Substitution): String = s.map((x, t) => x.id + " -> " + t.repr).mkString("Subst(", ", ", ")") /** * Detect if two terms can be unified, and if so, return a substitution that unifies them. */ - def unify(t1: Term, t2: Term, current: Substitution, br: Branch): Option[Substitution] = (t1, t2) match - case (VariableTerm(x), VariableTerm(y)) if (br.unifiable.contains(x) || x.id.no > br.maxIndex) && (br.unifiable.contains(y) || y.id.no > br.maxIndex) => + def unify(t1: Expression, t2: Expression, current: Substitution, br: Branch): Option[Substitution] = (t1, t2) match + case (x: Variable, y: Variable) if (br.unifiable.contains(x) || x.id.no > br.maxIndex) && (br.unifiable.contains(y) || y.id.no > br.maxIndex) => if x == y then Some(current) else if current.contains(x) then unify(current(x), t2, current, br) else if current.contains(y) then unify(t1, current(y), current, br) else Some(current + (x -> y)) - case (VariableTerm(x), t2: Term) if br.unifiable.contains(x) || x.id.no > br.maxIndex => - val newt2 = substituteVariablesInTerm(t2, current) + case (x: Variable, t2: Expression) if br.unifiable.contains(x) || x.id.no > br.maxIndex => + val newt2 = substituteVariables(t2, current) if newt2.freeVariables.contains(x) then None else if (current.contains(x)) unify(current(x), newt2, current, br) else Some(current + (x -> newt2)) - case (t1: Term, VariableTerm(y)) if br.unifiable.contains(y) || y.id.no > br.maxIndex => - val newt1 = substituteVariablesInTerm(t1, current) + case (t1: Expression, y: Variable) if br.unifiable.contains(y) || y.id.no > br.maxIndex => + val newt1 = substituteVariables(t1, current) if newt1.freeVariables.contains(y) then None else if (current.contains(y)) unify(newt1, current(y), current, br) else Some(current + (y -> newt1)) - case (Term(label1, args1), Term(label2, args2)) => - if label1 == label2 && args1.size == args2.size then - args1 - .zip(args2) - .foldLeft(Some(current): Option[Substitution])((prev, next) => - prev match - case None => None - case Some(s) => unify(next._1, next._2, s, br) - ) - else None + case (Application(f1, a1), Application(f2, a2)) => + unify(f1, f2, current, br).flatMap(s => unify(a1, a2, s, br)) /** * Detect if two atoms can be unified, and if so, return a substitution that unifies them. */ - def unifyPred(pos: AtomicFormula, neg: AtomicFormula, br: Branch): Option[Substitution] = { - (pos, neg) match - case (AtomicFormula(id1, args1), AtomicFormula(id2, args2)) if (id1 == id2 && args1.size == args2.size) => - args1 - .zip(args2) - .foldLeft(Some(Substitution.empty): Option[Substitution])((prev, next) => - prev match - case None => None - case Some(s) => unify(next._1, next._2, s, br) - ) - case _ => None + def unifyPred(pos: Expression, neg: Expression, br: Branch): Option[Substitution] = { + assert(pos.sort == Formula && neg.sort == Formula) + unify(pos, neg, Substitution.empty, br) } @@ -242,20 +218,18 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent * The substitution cannot do substitutions that were already done in branch.triedInstantiation. * When multiple substitutions are possible, the one with the smallest size is returned. (Maybe there is a better heuristic, like distance from the root?) */ - def close(branch: Branch): Option[(Substitution, Set[Formula])] = { + def close(branch: Branch): Option[(Substitution, Set[Expression])] = { val newMap = branch.atoms._1 .flatMap(pred => pred.freeVariables.filter(v => branch.unifiable.contains(v))) - .map(v => v -> VariableLabel(Identifier(v.id.name, v.id.no + branch.maxIndex + 1))) + .map(v => v -> Variable(Identifier(v.id.name, v.id.no + branch.maxIndex + 1), Term)) .toMap - val newMapTerm = newMap.map((k, v) => k -> VariableTerm(v)) val inverseNewMap = newMap.map((k, v) => v -> k).toMap - val inverseNewMapTerm = inverseNewMap.map((k, v) => k -> VariableTerm(v)) - val pos = branch.atoms._1.map(pred => substituteVariablesInFormula(pred, newMapTerm, Seq())).asInstanceOf[List[AtomicFormula]].iterator - var substitutions: List[(Substitution, Set[Formula])] = Nil + val pos = branch.atoms._1.map(pred => substituteVariables(pred, newMap)).iterator + var substitutions: List[(Substitution, Set[Expression])] = Nil while (pos.hasNext) { val p = pos.next() - if (p.label == bot) return Some((Substitution.empty, Set(bot))) + if (p == bot) return Some((Substitution.empty, Set(bot))) val neg = branch.atoms._2.iterator while (neg.hasNext) { val n = neg.next() @@ -270,12 +244,12 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent ( sub.flatMap((v, t) => if v.id.no > branch.maxIndex then - if t == inverseNewMapTerm(v) then None - else Some(inverseNewMap(v) -> substituteVariablesInTerm(t, inverseNewMapTerm.map((v, t) => v -> substituteVariablesInTerm(t, sub)))) + if t == inverseNewMap(v) then None + else Some(inverseNewMap(v) -> substituteVariables(t, inverseNewMap.map((v, t) => v -> substituteVariables(t, sub)))) else if newMap.contains(v) && t == newMap(v) then None - else Some(v -> substituteVariablesInTerm(t, inverseNewMapTerm)) + else Some(v -> substituteVariables(t, inverseNewMap)) ), - set.map(f => substituteVariablesInFormula(f, inverseNewMapTerm, Seq())) + set.map(f => substituteVariables(f, inverseNewMap)) ) ) @@ -290,7 +264,7 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent } - def bestSubst(substs: List[(Substitution, Set[Formula])], branch: Branch): Option[(Substitution, Set[Formula])] = { + def bestSubst(substs: List[(Substitution, Set[Expression])], branch: Branch): Option[(Substitution, Set[Expression])] = { if substs.isEmpty then return None val minSize = substs.minBy(_._1.size) val smallSubst = substs.filter(_._1.size == minSize._1.size) @@ -299,22 +273,22 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent val best = smallSubst.minBy(s => substitutionScore(s._1, branch)) Some(best) } - def formulaPenalty(f: Formula, branch: Branch): Int = f match - case ConnectorFormula(And, args) => 10 + args.map(formulaPenalty(_, branch)).sum - case ConnectorFormula(Or, args) => 40 + args.map(formulaPenalty(_, branch)).sum - case BinderFormula(Exists, x, inner) => 30 + formulaPenalty(inner, branch) - case BinderFormula(Forall, x, inner) => 200 + formulaPenalty(inner, branch) - case AtomicFormula(id, args) => 0 - case ConnectorFormula(Neg, List(AtomicFormula(id, args))) => 0 - case _ => ??? + def formulaPenalty(f: Expression, branch: Branch): Int = f match + case And(left, right) => 10 + formulaPenalty(left, branch) + formulaPenalty(right, branch) + case Or(left, right) => 40 + formulaPenalty(left, branch) + formulaPenalty(right, branch) + case Exists(x, inner) => 30 + formulaPenalty(inner, branch) + case Forall(x, inner) => 200 + formulaPenalty(inner, branch) + case _ => 0 def substitutionScore(subst: Substitution, branch: Branch): Int = { - def pairPenalty(v: VariableLabel, t: Term) = { + def pairPenalty(v: Variable, t: Expression) = { val variablePenalty = branch.unifiable(v)._2 + branch.numberInstantiated(v) * 20 - def termPenalty(t: Term): Int = t match - case VariableTerm(x) => if branch.unifiable.contains(x) then branch.unifiable(x)._2 * 1 else 0 - case Term(label, args) => 100 + args.map(termPenalty).sum - variablePenalty + termPenalty(t) + def termPenalty(t: Expression): Int = t match + case x: Variable => if branch.unifiable.contains(x) then branch.unifiable(x)._2 * 1 else 0 + case c: Constant => 40 + case Application(f, a) => 100 + termPenalty(f) + termPenalty(a) + case Lambda(v, inner) => 100 + termPenalty(inner) + 1*variablePenalty + 1*termPenalty(t) } subst.map((v, t) => pairPenalty(v, t)).sum } @@ -325,7 +299,9 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent */ def alpha(branch: Branch): Branch = { val f = branch.alpha.head - branch.copy(alpha = branch.alpha.tail).prependedAll(f.args) + f match + case And(l, r) => branch.copy(alpha = branch.alpha.tail).prepended(l).prepended(r) + case _ => throw Exception("Error: First formula of alpha is not an And") } /** @@ -333,13 +309,13 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent * Add the exploded formula to the used list, if one beta formula is found * The beta list of the branch must not be empty */ - def beta(branch: Branch): List[(Branch, Formula)] = { + def beta(branch: Branch): List[(Branch, Expression)] = { val f = branch.beta.head val b1 = branch.copy(beta = branch.beta.tail) - val resList = f.args.toList.map(disjunct => { - ((b1.prepended(disjunct), disjunct)) - }) - resList + f match + case Or(l, r) => + List((b1.prepended(l), l), (b1.prepended(r), r)) + case _ => throw Exception("Error: First formula of beta is not an Or") } /** @@ -347,13 +323,16 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent * Add the unquantified formula to the branch * Since the bound variable is not marked as suitable for instantiation, it behaves as a constant symbol (skolem) */ - def delta(branch: Branch): (Branch, VariableLabel, Formula) = { + def delta(branch: Branch): (Branch, Variable, Expression) = { val f = branch.delta.head - if branch.skolemized.contains(branch.delta.head.bound) then - val newX = VariableLabel(Identifier(f.bound.id.name, branch.maxIndex)) - val newInner = substituteVariablesInFormula(f.inner, Map(f.bound -> newX), Seq()) - (branch.copy(delta = branch.delta.tail, maxIndex = branch.maxIndex + 1).prepended(newInner), newX, newInner) - else (branch.copy(delta = branch.delta.tail, skolemized = branch.skolemized + f.bound).prepended(f.inner), f.bound, f.inner) + f match + case Exists(v, body) => + if branch.skolemized.contains(v) then + val newV = Variable(Identifier(v.id.name, branch.maxIndex), Term) + val newInner = substituteVariables(body, Map(v -> newV)) + (branch.copy(delta = branch.delta.tail, maxIndex = branch.maxIndex + 1).prepended(newInner), newV, newInner) + else (branch.copy(delta = branch.delta.tail, skolemized = branch.skolemized + v).prepended(body), v, body) + case _ => throw Exception("Error: First formula of delta is not an Exists") } /** @@ -361,36 +340,43 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent * Add the unquantified formula to the branch and mark the bound variable as suitable for unification * This step will most of the time be cancelled when building the proof, unless any arbitrary instantiation is sufficient to get a proof. */ - def gamma(branch: Branch): (Branch, VariableLabel, Formula) = { + def gamma(branch: Branch): (Branch, Variable, Expression) = { val f = branch.gamma.head - val (ni, nb) = branch.unifiable.get(f.bound) match - case None => - (f.inner, f.bound) - case Some(value) => - val newBound = VariableLabel(Identifier(f.bound.id.name, branch.maxIndex)) - val newInner = substituteVariablesInFormula(f.inner, Map(f.bound -> newBound), Seq()) - (newInner, newBound) - val b1 = branch.copy( - gamma = branch.gamma.tail, - unifiable = branch.unifiable + (nb -> (f, formulaPenalty(f.inner, branch))), - numberInstantiated = branch.numberInstantiated + (nb -> (branch.numberInstantiated.getOrElse(f.bound, 0))), - maxIndex = branch.maxIndex + 1, - varsOrder = branch.varsOrder + (nb -> branch.varsOrder.size) - ) - (b1.prepended(ni), nb, ni) + f match + case Forall(v, body) => + val (ni, nb) = branch.unifiable.get(v) match + case None => + (body, v) + case Some(value) => + val newBound = Variable(Identifier(v.id.name, branch.maxIndex), Term) + val newInner = substituteVariables(body, Map(v -> newBound)) + (newInner, newBound) + val b1 = branch.copy( + gamma = branch.gamma.tail, + unifiable = branch.unifiable + (nb -> (f, formulaPenalty(body, branch))), + numberInstantiated = branch.numberInstantiated + (nb -> (branch.numberInstantiated.getOrElse(v, 0))), + maxIndex = branch.maxIndex + 1, + varsOrder = branch.varsOrder + (nb -> branch.varsOrder.size) + ) + (b1.prepended(ni), nb, ni) + case _ => throw Exception("Error: First formula of gamma is not a Forall") + + } /** * When a closing unification has been found, apply it to the branch - * This does not backtracking: The metavariable remains available if it needs further instantiation. + * This does not do backtracking: The metavariable remains available if it needs further instantiation. */ - def applyInst(branch: Branch, x: VariableLabel, t: Term): (Branch, Formula) = { + def applyInst(branch: Branch, x: Variable, t: Expression): (Branch, Expression) = { val f = branch.unifiable(x)._1 val newTried = branch.triedInstantiation.get(x) match case None => branch.triedInstantiation + (x -> Set(t)) case Some(s) => branch.triedInstantiation + (x -> (s + t)) - val inst = instantiate(f.inner, f.bound, t) + val inst = f match + case Forall(v, body) => instantiate(body, v, t) + case _ => throw Exception("Error: Formula in unifiable is not a Forall") val r = branch .prepended(inst) .copy( @@ -414,10 +400,14 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent else if (branch.alpha.nonEmpty) // If branch contains an Alpha formula (LeftAnd) val rec = alpha(branch) decide(rec).map((proof, step) => - if branch.alpha.head.args.exists(proof.head.bot.left.contains) then - val sequent = proof.head.bot.copy(left = (proof.head.bot.left -- branch.alpha.head.args) + branch.alpha.head) - (Weakening(sequent, proof.size - 1) :: proof, step + 1) - else (proof, step) + branch.alpha.head match + case Application(Application(and, left), right) => + + if proof.head.bot.left.contains(left) || proof.head.bot.left.contains(right) then + val sequent = proof.head.bot.copy(left = (proof.head.bot.left - left - right) + branch.alpha.head) + (Weakening(sequent, proof.size - 1) :: proof, step + 1) + else (proof, step) + case _ => throw Exception("Error: First formula of alpha is not an And") ) else if (branch.delta.nonEmpty) // If branch contains a Delta formula (LeftExists) val rec = delta(branch) @@ -452,17 +442,22 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent proof.map(proo => if needed == true then val sequent = ((proo.reverse.zip(list).flatMap((proof, bf) => proof.bot.left - bf._2).toSet + branch.beta.head) |- ()) - (LeftOr(sequent, treversed.reverse, branch.beta.head.args) :: proo, treversed.size) + branch.beta.head match + case Or(left, right) => + (LeftOr(sequent, treversed.reverse, Seq(left, right)) :: proo, treversed.size) + case _ => throw Exception("Error: First formula of beta is not an Or") else (proo, proo.size - 1) ) else if (branch.gamma.nonEmpty) // If branch contains a Gamma formula (LeftForall) val rec = gamma(branch) val upperProof = decide(rec._1) - // LeftForall(bot: Sequent, t1: Int, phi: Formula, x: VariableLabel, t: Term) + // LeftForall(bot: Sequent, t1: Int, phi: Expression, x: Variable, t: Expression) upperProof.map((proof, step) => if proof.head.bot.left.contains(rec._3) then val sequent = (proof.head.bot -<< rec._3) +<< branch.gamma.head - (LeftForall(sequent, step, branch.gamma.head.inner, branch.gamma.head.bound, rec._2()) :: proof, step + 1) + branch.gamma.head match + case Forall(v, body) => + (LeftForall(sequent, step, body, v, rec._2()) :: proof, step + 1) else (proof, step) ) else if (closeSubst.nonEmpty && closeSubst.get._1.nonEmpty) // If branch can be closed with Instantiation (LeftForall) @@ -472,20 +467,23 @@ object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequent upperProof.map((proof, step) => if proof.head.bot.left.contains(instantiated) then val sequent = (proof.head.bot -<< instantiated) +<< branch.unifiable(x)._1 - (LeftForall(sequent, step, branch.unifiable(x)._1.inner, branch.unifiable(x)._1.bound, t) :: proof, step + 1) + branch.unifiable(x)._1 match + case Forall(v, body) => + (LeftForall(sequent, step, body, v, t) :: proof, step + 1) else (proof, step) ) else None // End of decide } - def containsAlpha(set: Set[Formula], f: Formula) = f match { - case ConnectorFormula(And, args) => args.exists(set.contains) + def containsAlpha(set: Set[Expression], f: Expression): Boolean = f match { + case And(left, right) => containsAlpha(set, left) || containsAlpha(set, right) case _ => set.contains(f) } - def instantiate(f: Formula, x: VariableLabel, t: Term): Formula = f match - case ConnectorFormula(label, args) => ConnectorFormula(label, args.map(instantiate(_, x, t))) - case AtomicFormula(id, args) => AtomicFormula(id, args.map(substituteVariablesInTerm(_, Substitution(x -> t)))) - case BinderFormula(label, y, inner) => if (x == y) f else BinderFormula(label, y, instantiate(inner, x, t)) + def instantiate(f: Expression, x: Variable, t: Expression): Expression = f match + case v: Variable => if v == x then t else v + case c: Constant => c + case Application(f, a) => Application(instantiate(f, x, t), instantiate(a, x, t)) + case Lambda(v, inner) => if (v == x) f else Lambda(v, instantiate(inner, x, t)) } diff --git a/lisa-sets/src/main/scala/lisa/automation/Tautology.scala b/lisa-sets/src/main/scala/lisa/automation/Tautology.scala index d1367feeb..1f3cb9dae 100644 --- a/lisa-sets/src/main/scala/lisa/automation/Tautology.scala +++ b/lisa-sets/src/main/scala/lisa/automation/Tautology.scala @@ -1,9 +1,9 @@ package lisa.automation import lisa.automation.Substitution -import lisa.fol.FOL as F -import lisa.prooflib.Library -import lisa.prooflib.ProofTacticLib.* +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.ProofTacticLib.* import lisa.utils.K.{_, given} /** @@ -39,7 +39,7 @@ object Tautology extends ProofTactic with ProofSequentTactic with ProofFactSeque def from(using lib: Library, proof: lib.Proof)(premises: proof.Fact*)(bot: F.Sequent): proof.ProofTacticJudgement = { val botK = bot.underlying - val premsFormulas: Seq[((proof.Fact, Formula), Int)] = premises.map(p => (p, sequentToFormula(proof.getSequent(p).underlying))).zipWithIndex + val premsFormulas: Seq[((proof.Fact, Expression), Int)] = premises.map(p => (p, sequentToFormula(proof.getSequent(p).underlying))).zipWithIndex val initProof = premsFormulas.map(s => Restate(() |- s._1._2, -(1 + s._2))).toList val sqToProve = botK ++<< (premsFormulas.map(s => s._1._2).toSet |- ()) @@ -67,7 +67,7 @@ object Tautology extends ProofTactic with ProofSequentTactic with ProofFactSeque */ def solveSequent(s: Sequent): Either[SCProof, (String, Sequent)] = { val augSeq = augmentSequent(s) - val MaRvIn = VariableFormulaLabel(freshId(augSeq.formula.schematicFormulaLabels.map(_.id), "MaRvIn")) // arbitrary name that is unlikely to already exist in the formula + val MaRvIn = Variable(freshId(augSeq.formula.freeVariables.map(_.id), "MaRvIn"), Formula) // arbitrary name that is unlikely to already exist in the formula try { val steps = solveAugSequent(augSeq, 0)(using MaRvIn) @@ -78,7 +78,7 @@ object Tautology extends ProofTactic with ProofSequentTactic with ProofFactSeque ( "The statement may be incorrect or not provable within propositional logic.\n" + "The proof search failed because it needed the truth of the following sequent:\n" + - s"${lisa.utils.FOLPrinter.prettySequent(e.unsolvable)}", + s"${e.unsolvable.repr}", e.unsolvable ) ) @@ -88,36 +88,31 @@ object Tautology extends ProofTactic with ProofSequentTactic with ProofFactSeque // From there, private code. // Augmented Sequent - private case class AugSequent(decisions: (List[Formula], List[Formula]), formula: Formula) + private case class AugSequent(decisions: (List[Expression], List[Expression]), formula: Expression) // Transform a sequent into a format more adequate for solving private def augmentSequent(s: Sequent): AugSequent = { val f = reducedForm(sequentToFormula(s)) - val atoms: scala.collection.mutable.Map[Formula, Int] = scala.collection.mutable.Map.empty + val atoms: scala.collection.mutable.Map[Expression, Int] = scala.collection.mutable.Map.empty AugSequent((Nil, Nil), f) } - def reduceSequent(s: Sequent): Formula = { + def reduceSequent(s: Sequent): Expression = { val p = simplify(sequentToFormula(s)) val nf = computeNormalForm(p) val fln = fromLocallyNameless(nf, Map.empty, 0) - val res = toFormulaAIG(fln) + val res = toExpressionAIG(fln) res } // Find all "atoms" of the formula. // We mean atom in the propositional logic sense, so any formula starting with a predicate symbol, a binder or a schematic connector is an atom here. - def findBestAtom(f: Formula): Option[Formula] = { - val atoms: scala.collection.mutable.Map[Formula, Int] = scala.collection.mutable.Map.empty - def findAtoms2(f: Formula, add: Formula => Unit): Unit = f match { - case AtomicFormula(label, _) if label != top && label != bot => add(f) - case AtomicFormula(_, _) => () - case ConnectorFormula(label, args) => - label match { - case label: ConstantConnectorLabel => args.foreach(c => findAtoms2(c, add)) - case SchematicConnectorLabel(id, arity) => add(f) - } - case BinderFormula(label, bound, inner) => add(f) + def findBestAtom(f: Expression): Option[Expression] = { + val atoms: scala.collection.mutable.Map[Expression, Int] = scala.collection.mutable.Map.empty + def findAtoms2(f: Expression, add: Expression => Unit): Unit = f match { + case And(f1, f2) => findAtoms2(f1, add); findAtoms2(f2, add) + case Neg(f1) => findAtoms2(f1, add) + case _ if f != top && f != bot => add(f) } findAtoms2(f, a => atoms.update(a, { val g = atoms.get(a); if (g.isEmpty) 1 else g.get + 1 })) if (atoms.isEmpty) None else Some(atoms.toList.maxBy(_._2)._1) @@ -128,129 +123,80 @@ object Tautology extends ProofTactic with ProofSequentTactic with ProofFactSeque // Given a sequent, return a proof of that sequent if on exists that only uses propositional logic rules and reflexivity of equality. // Alternates between reducing the formulas using the OL algorithm for propositional logic and branching on an atom using excluded middle. // An atom is a subformula of the input that is either a predicate, a binder or a schematic connector, i.e. a subformula that has not meaning in propositional logic. - private def solveAugSequent(s: AugSequent, offset: Int)(using MaRvIn: VariableFormulaLabel): List[SCProofStep] = { + private def solveAugSequent(s: AugSequent, offset: Int)(using MaRvIn: Variable): List[SCProofStep] = { val bestAtom = findBestAtom(s.formula) val redF = reducedForm(s.formula) if (redF == top()) { - List(RestateTrue(s.decisions._1 ++ s.decisions._2.map((f: Formula) => Neg(f)) |- s.formula)) + List(RestateTrue(s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- s.formula)) } else if (bestAtom.isEmpty) { assert(redF == bot()) // sanity check; If the formula has no atom left in it and is reduced, it should be either ⊤ or ⊥. val res = s.decisions._1 |- redF :: s.decisions._2 // the branch that can't be closed throw new NoProofFoundException(res) } else { val atom = bestAtom.get - val optLambda = findSubformula(redF, Seq((MaRvIn, atom))) + val optLambda = findSubformula(redF, MaRvIn, atom) if (optLambda.isEmpty) return solveAugSequent(AugSequent(s.decisions, redF), offset) val lambdaF = optLambda.get - val seq1 = AugSequent((atom :: s.decisions._1, s.decisions._2), lambdaF(Seq(top()))) + val seq1 = AugSequent((atom :: s.decisions._1, s.decisions._2), substituteVariables(lambdaF, Map(MaRvIn -> top))) val proof1 = solveAugSequent(seq1, offset) val subst1 = RightSubstIff( - atom :: s.decisions._1 ++ s.decisions._2.map((f: Formula) => Neg(f)) |- redF, + atom :: s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- redF, offset + proof1.length - 1, - List((LambdaTermFormula(Seq(), atom), LambdaTermFormula(Seq(), top()))), - (lambdaF.vars, lambdaF.body) + Seq((atom, top)), + (Seq(MaRvIn), lambdaF) ) - val seq2 = AugSequent((s.decisions._1, atom :: s.decisions._2), lambdaF(Seq(bot()))) + val negatom = neg(atom) + val seq2 = AugSequent((s.decisions._1, atom :: s.decisions._2), substituteVariables(lambdaF, Map(MaRvIn -> top))) val proof2 = solveAugSequent(seq2, offset + proof1.length + 1) val subst2 = RightSubstIff( - Neg(atom) :: s.decisions._1 ++ s.decisions._2.map((f: Formula) => Neg(f)) |- redF, - offset + proof1.length + proof2.length - 1 + 1, - List((LambdaTermFormula(Seq(), atom), LambdaTermFormula(Seq(), bot()))), - (lambdaF.vars, lambdaF.body) + negatom :: s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- redF, + offset + proof1.length + proof2.length + 1 - 1, + Seq((atom, bot)), + (Seq(MaRvIn), lambdaF) ) - val red2 = Restate(s.decisions._1 ++ s.decisions._2.map((f: Formula) => Neg(f)) |- (redF, atom), offset + proof1.length + proof2.length + 2 - 1) - val cutStep = Cut(s.decisions._1 ++ s.decisions._2.map((f: Formula) => Neg(f)) |- redF, offset + proof1.length + proof2.length + 3 - 1, offset + proof1.length + 1 - 1, atom) - val redStep = Restate(s.decisions._1 ++ s.decisions._2.map((f: Formula) => Neg(f)) |- s.formula, offset + proof1.length + proof2.length + 4 - 1) + val red2 = Restate(s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- (redF, atom), offset + proof1.length + proof2.length + 2 - 1) + val cutStep = Cut(s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- redF, offset + proof1.length + proof2.length + 3 - 1, offset + proof1.length + 1 - 1, atom) + val redStep = Restate(s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- s.formula, offset + proof1.length + proof2.length + 4 - 1) redStep :: cutStep :: red2 :: subst2 :: proof2 ++ (subst1 :: proof1) } } - private def condflat[T](s: Seq[(T, Boolean)]): (Seq[T], Boolean) = (s.map(_._1), s.exists(_._2)) - private def findSubterm2(t: Term, subs: Seq[(VariableLabel, Term)]): (Term, Boolean) = { - val eq = subs.find(s => isSameTerm(t, s._2)) - if (eq.nonEmpty) (eq.get._1(), true) - else { - val induct = condflat(t.args.map(te => findSubterm2(te, subs))) - if (!induct._2) (t, false) - else (Term(t.label, induct._1), true) - } - } - private def findSubterm2(f: Formula, subs: Seq[(VariableLabel, Term)]): (Formula, Boolean) = { - f match { - case AtomicFormula(label, args) => - val induct = condflat(args.map(findSubterm2(_, subs))) - if (!induct._2) (f, false) - else (AtomicFormula(label, induct._1), true) - case ConnectorFormula(label, args) => - val induct = condflat(args.map(findSubterm2(_, subs))) - if (!induct._2) (f, false) - else (ConnectorFormula(label, induct._1), true) - case BinderFormula(label, bound, inner) => - val fv_in_f = subs.flatMap(e => e._2.freeVariables + e._1) - if (!fv_in_f.contains(bound)) { - val induct = findSubterm2(inner, subs) - if (!induct._2) (f, false) - else (BinderFormula(label, bound, induct._1), true) - } else { - val newv = VariableLabel(freshId((f.freeVariables ++ fv_in_f).map(_.id), bound.id)) - val newInner = substituteVariablesInFormula(inner, Map(bound -> newv()), Seq.empty) - val induct = findSubterm2(newInner, subs) - if (!induct._2) (f, false) - else (BinderFormula(label, newv, induct._1), true) - } - } - } + private def condflat[T](s: Seq[(T, Boolean)]): (Seq[T], Boolean) = (s.map(_._1), s.exists(_._2)) - private def findSubformula2(f: Formula, subs: Seq[(VariableFormulaLabel, Formula)]): (Formula, Boolean) = { - val eq = subs.find(s => isSame(f, s._2)) - if (eq.nonEmpty) (eq.get._1(), true) + private def findSubformula2(f: Expression, x: Variable, e: Expression, fv: Set[Variable]): (Expression, Boolean) = { + if (isSame(f, e)) (x, true) else f match { - case AtomicFormula(label, args) => - (f, false) - case ConnectorFormula(label, args) => - val induct = condflat(args.map(findSubformula2(_, subs))) - if (!induct._2) (f, false) - else (ConnectorFormula(label, induct._1), true) - case BinderFormula(label, bound, inner) => - val fv_in_f = subs.flatMap(_._2.freeVariables) - if (!fv_in_f.contains(bound)) { - val induct = findSubformula2(inner, subs) + case Application(f, arg) => + val rf = findSubformula2(f, x, e, fv) + val ra = findSubformula2(arg, x, e, fv) + if (rf._2 || ra._2) (Application(rf._1, ra._1), true) + else (f, false) + case Lambda(v, inner) => + if (!fv.contains(v)) { + val induct = findSubformula2(inner, x, e, fv) if (!induct._2) (f, false) - else (BinderFormula(label, bound, induct._1), true) + else (Lambda(v, induct._1), true) } else { - val newv = VariableLabel(freshId((f.freeVariables ++ fv_in_f).map(_.id), bound.id)) - val newInner = substituteVariablesInFormula(inner, Map(bound -> newv()), Seq.empty) - val induct = findSubformula2(newInner, subs) + val newv = Variable(freshId((f.freeVariables ++ fv).map(_.id), v.id), v.sort) + val newInner = substituteVariables(inner, Map(v -> newv)) + val induct = findSubformula2(newInner, x, e, fv + newv) if (!induct._2) (f, false) - else (BinderFormula(label, newv, induct._1), true) + else (Lambda(newv, induct._1), true) } + case _ => (f, false) } } - def findSubterm(t: Term, subs: Seq[(VariableLabel, Term)]): Option[LambdaTermTerm] = { - val vars = subs.map(_._1) - val r = findSubterm2(t, subs) - if (r._2) Some(LambdaTermTerm(vars, r._1)) - else None - } - - def findSubterm(f: Formula, subs: Seq[(VariableLabel, Term)]): Option[LambdaTermFormula] = { - val vars = subs.map(_._1) - val r = findSubterm2(f, subs) - if (r._2) Some(LambdaTermFormula(vars, r._1)) - else None - } - def findSubformula(f: Formula, subs: Seq[(VariableFormulaLabel, Formula)]): Option[LambdaFormulaFormula] = { - val vars = subs.map(_._1) - val r = findSubformula2(f, subs) - if (r._2) Some(LambdaFormulaFormula(vars, r._1)) + def findSubformula(f: Expression, x: Variable, e: Expression): Option[Expression] = { + val r = findSubformula2(f, x, e, e.freeVariables) + if (r._2) Some(r._1) else None } diff --git a/lisa-sets/src/main/scala/lisa/automation/atp/Goeland.scala b/lisa-sets/src/main/scala/lisa/automation/atp/Goeland.scala index e8d50cf0b..2204e42d0 100644 --- a/lisa-sets/src/main/scala/lisa/automation/atp/Goeland.scala +++ b/lisa-sets/src/main/scala/lisa/automation/atp/Goeland.scala @@ -1,8 +1,8 @@ package lisa.automation.atp -import lisa.fol.FOL as F -import lisa.prooflib.Library -import lisa.prooflib.OutputManager -import lisa.prooflib.ProofTacticLib.* +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.OutputManager +import lisa.utils.prooflib.ProofTacticLib.* import lisa.utils.K import lisa.utils.tptp.* @@ -81,10 +81,10 @@ object Goeland extends ProofTactic with ProofSequentTactic { val directory = File(foldername) if (directory != null) && !directory.exists() then directory.mkdirs() - val freevars = (sequent.left.flatMap(_.freeVariables) ++ sequent.right.flatMap(_.freeVariables) ).toSet.map(x => x -> K.Term(K.VariableLabel(K.Identifier("X"+x.id.name, x.id.no)), Seq())).toMap + val freevars = (sequent.left.flatMap(_.freeVariables) ++ sequent.right.flatMap(_.freeVariables) ).toSet.map(x => x -> K.Variable(K.Identifier("X"+x.id.name, x.id.no), x.sort) ).toMap val backMap = freevars.map{ - case (x: K.VariableLabel, K.Term(xx: K.VariableLabel, _)) => xx -> K.LambdaTermTerm(Seq(), K.Term(x, Seq())) + case (x: K.Variable, xx: K.Variable) => xx -> x case _ => throw new Exception("This should not happen") } val r = problemToFile(foldername, filename, "question"+i, axioms, sequent, source) diff --git a/lisa-sets/src/main/scala/lisa/automation/settheory/SetTheoryTactics.scala b/lisa-sets/src/main/scala/lisa/automation/settheory/SetTheoryTactics.scala index 651a3f7a1..fce9ad24b 100644 --- a/lisa-sets/src/main/scala/lisa/automation/settheory/SetTheoryTactics.scala +++ b/lisa-sets/src/main/scala/lisa/automation/settheory/SetTheoryTactics.scala @@ -2,15 +2,15 @@ package lisa.automation.settheory import lisa.SetTheoryLibrary.{_, given} import lisa.automation.Tautology -import lisa.fol.FOL.{_, given} +import lisa.utils.fol.FOL.{_, given} import lisa.kernel.proof.SequentCalculus as SCunique import lisa.maths.Quantifiers import lisa.maths.settheory.SetTheory -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.Library -import lisa.prooflib.ProofTacticLib.{_, given} -import lisa.prooflib.SimpleDeducedSteps.Restate -import lisa.prooflib.* +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.ProofTacticLib.{_, given} +import lisa.utils.prooflib.SimpleDeducedSteps.Restate +import lisa.utils.prooflib.* import lisa.utils.Printer import lisa.utils.unification.UnificationUtils.FormulaSubstitution import lisa.utils.unification.UnificationUtils.TermSubstitution diff --git a/lisa-sets/src/main/scala/lisa/maths/Quantifiers.scala b/lisa-sets/src/main/scala/lisa/maths/Quantifiers.scala index 1aeaf55ac..bad4ab927 100644 --- a/lisa-sets/src/main/scala/lisa/maths/Quantifiers.scala +++ b/lisa-sets/src/main/scala/lisa/maths/Quantifiers.scala @@ -5,13 +5,13 @@ package lisa.maths */ object Quantifiers extends lisa.Main { - private val x = variable - private val y = variable - private val z = variable - private val a = variable - private val p = formulaVariable - private val P = predicate[1] - private val Q = predicate[1] + private val x = variable[Term] + private val y = variable[Term] + private val z = variable[Term] + private val a = variable[Term] + private val p = variable[Formula] + private val P = variable[Term >>: Formula] + private val Q = variable[Term >>: Formula] /** * Theorem --- A formula is equivalent to itself universally quantified if @@ -42,7 +42,7 @@ object Quantifiers extends lisa.Main { ) { have((x === y) <=> P(y) |- (x === y) <=> P(y)) by Hypothesis thenHave(∀(y, (x === y) <=> P(y)) |- (x === y) <=> P(y)) by LeftForall - thenHave(∀(y, (x === y) <=> P(y)) |- P(x)) by InstFunSchema(Map(y -> x)) + thenHave(∀(y, (x === y) <=> P(y)) |- P(x)) by InstSchema(y := x) thenHave(∀(y, (x === y) <=> P(y)) |- ∃(x, P(x))) by RightExists thenHave(∃(x, ∀(y, (x === y) <=> P(y))) |- ∃(x, P(x))) by LeftExists thenHave(thesis) by Restate @@ -55,7 +55,7 @@ object Quantifiers extends lisa.Main { (x === y) /\ (y === z) |- (x === z) ) { have((x === y) |- (x === y)) by Hypothesis - thenHave(((x === y), (y === z)) |- (x === z)) by RightSubstEq.withParametersSimple(List((y, z)), lambda(y, x === y)) + thenHave(((x === y), (y === z)) |- (x === z)) by RightSubstEq.withParametersSimple(List((y, z)), (Seq(y), x === y)) thenHave(thesis) by Restate } @@ -95,7 +95,7 @@ object Quantifiers extends lisa.Main { ) { have(exists(x, P(x) /\ (y === x)) |- P(y)) subproof { have(P(x) |- P(x)) by Hypothesis - thenHave((P(x), y === x) |- P(y)) by RightSubstEq.withParametersSimple(List((y, x)), lambda(y, P(y))) + thenHave((P(x), y === x) |- P(y)) by RightSubstEq.withParametersSimple(List((y, x)), (Seq(y), P(y))) thenHave(P(x) /\ (y === x) |- P(y)) by Restate thenHave(thesis) by LeftExists } @@ -104,7 +104,7 @@ object Quantifiers extends lisa.Main { have(P(y) |- exists(x, P(x) /\ (y === x))) subproof { have(P(x) /\ (y === x) |- P(x) /\ (y === x)) by Hypothesis thenHave(P(x) /\ (y === x) |- exists(x, P(x) /\ (y === x))) by RightExists - thenHave(P(y) /\ (y === y) |- exists(x, P(x) /\ (y === x))) by InstFunSchema(Map(x -> y)) + thenHave(P(y) /\ (y === y) |- exists(x, P(x) /\ (y === x))) by InstSchema(x := y) thenHave(thesis) by Restate } val backward = thenHave(P(y) ==> exists(x, P(x) /\ (y === x))) by Restate @@ -186,8 +186,8 @@ object Quantifiers extends lisa.Main { val fy = thenHave(forall(z, P(z) <=> Q(z)) |- forall(y, ((y === z) <=> P(y)) <=> ((y === z) <=> Q(y)))) by RightForall have(forall(y, P(y) <=> Q(y)) |- (forall(y, P(y)) <=> forall(y, Q(y)))) by Restate.from(universalEquivalenceDistribution) - val univy = thenHave(forall(y, ((y === z) <=> P(y)) <=> ((y === z) <=> Q(y))) |- (forall(y, ((y === z) <=> P(y))) <=> forall(y, ((y === z) <=> Q(y))))) by InstPredSchema( - Map((P -> lambda(y, (y === z) <=> P(y))), (Q -> lambda(y, (y === z) <=> Q(y)))) + val univy = thenHave(forall(y, ((y === z) <=> P(y)) <=> ((y === z) <=> Q(y))) |- (forall(y, ((y === z) <=> P(y))) <=> forall(y, ((y === z) <=> Q(y))))) by InstSchema( + P := lambda(y, (y === z) <=> P(y)), Q := lambda(y, (y === z) <=> Q(y)) ) have(forall(z, P(z) <=> Q(z)) |- (forall(y, ((y === z) <=> P(y))) <=> forall(y, ((y === z) <=> Q(y))))) by Cut(fy, univy) @@ -195,7 +195,7 @@ object Quantifiers extends lisa.Main { thenHave(forall(z, P(z) <=> Q(z)) |- forall(z, forall(y, ((y === z) <=> P(y))) <=> forall(y, ((y === z) <=> Q(y))))) by RightForall have(forall(z, P(z) <=> Q(z)) |- exists(z, forall(y, ((y === z) <=> P(y)))) <=> exists(z, forall(y, ((y === z) <=> Q(y))))) by Cut( lastStep, - existentialEquivalenceDistribution of (P -> lambda(z, forall(y, (y === z) <=> P(y))), Q -> lambda(z, forall(y, (y === z) <=> Q(y)))) + existentialEquivalenceDistribution of (P := lambda(z, forall(y, (y === z) <=> P(y))), Q := lambda(z, forall(y, (y === z) <=> Q(y)))) ) thenHave(thesis) by Restate diff --git a/lisa-sets/src/main/scala/lisa/maths/settheory/Comprehensions.scala b/lisa-sets/src/main/scala/lisa/maths/settheory/Comprehensions.scala index 6330e4caa..64fd6a7ab 100644 --- a/lisa-sets/src/main/scala/lisa/maths/settheory/Comprehensions.scala +++ b/lisa-sets/src/main/scala/lisa/maths/settheory/Comprehensions.scala @@ -3,9 +3,9 @@ package lisa.maths.settheory import lisa.SetTheoryLibrary import lisa.SetTheoryLibrary.* import lisa.maths.settheory.functions.functional -import lisa.prooflib.BasicStepTactic.RightForall -import lisa.prooflib.BasicStepTactic.TacticSubproof -import lisa.prooflib.SimpleDeducedSteps.* +import lisa.utils.prooflib.BasicStepTactic.RightForall +import lisa.utils.prooflib.BasicStepTactic.TacticSubproof +import lisa.utils.prooflib.SimpleDeducedSteps.* import lisa.utils.KernelHelpers.++<< import lisa.utils.KernelHelpers.+<< import lisa.utils.KernelHelpers.-<< @@ -16,7 +16,7 @@ import lisa.utils.{_, given} // See also https://github.com/lampepfl/dotty/issues/18569 object Comprehensions { - import lisa.fol.FOL.{*, given} + import lisa.utils.fol.FOL.{*, given} import lisa.maths.settheory.SetTheory2.{primReplacement, replacement, functionalIsFunctional, onePointRule} import lisa.automation.Tautology import lisa.automation.Substitution diff --git a/lisa-sets/src/main/scala/lisa/maths/settheory/InductiveSets.scala b/lisa-sets/src/main/scala/lisa/maths/settheory/InductiveSets.scala index cbc135a14..768e9bd85 100644 --- a/lisa-sets/src/main/scala/lisa/maths/settheory/InductiveSets.scala +++ b/lisa-sets/src/main/scala/lisa/maths/settheory/InductiveSets.scala @@ -38,7 +38,7 @@ object InductiveSets extends lisa.Main { ∃(z, ∀(t, in(t, z) <=> ∀(y, inductive(y) ==> in(t, y)))) ) { val inductExt = - have(∃(x, inductive(x)) |- ∃(z, ∀(t, in(t, z) <=> ∀(y, inductive(y) ==> in(t, y))))) by InstPredSchema(Map(P -> lambda(x, inductive(x))))(intersectionOfPredicateClassExists) + have(∃(x, inductive(x)) |- ∃(z, ∀(t, in(t, z) <=> ∀(y, inductive(y) ==> in(t, y))))) by InstSchema(Map(P -> lambda(x, inductive(x))))(intersectionOfPredicateClassExists) have(∃(z, ∀(t, in(t, z) <=> ∀(y, inductive(y) ==> in(t, y))))) by Cut(inductiveSetExists, inductExt) } @@ -51,7 +51,7 @@ object InductiveSets extends lisa.Main { val prop = ∀(y, inductive(y) ==> in(t, y)) val fprop = ∀(t, in(t, z) <=> prop) - val existsRhs = have(∃(z, fprop) |- ∃!(z, fprop)) by InstPredSchema(Map(schemPred -> (t, prop)))(uniqueByExtension) + val existsRhs = have(∃(z, fprop) |- ∃!(z, fprop)) by InstSchema(Map(schemPred -> (t, prop)))(uniqueByExtension) val existsLhs = have(∃(z, fprop)) by Restate.from(inductiveIntersectionExistence) have(∃!(z, fprop)) by Cut(existsLhs, existsRhs) @@ -117,11 +117,11 @@ object InductiveSets extends lisa.Main { (∀(t, in(t, z) <=> (∀(x, inductive(x) ==> in(t, x)))), inductive(z) <=> (in(∅, z) /\ ∀(y, in(y, z) ==> in(successor(y), z)))) |- inductive(z) ) by RightSubstIff.withParametersSimple(List((inductive(z), in(∅, z) /\ ∀(y, in(y, z) ==> in(successor(y), z)))), lambda(form, form)) - val inductDef = have(inductive(z) <=> (in(∅, z) /\ ∀(y, in(y, z) ==> in(successor(y), z)))) by InstFunSchema(Map(x -> z))(inductive.definition) + val inductDef = have(inductive(z) <=> (in(∅, z) /\ ∀(y, in(y, z) ==> in(successor(y), z)))) by InstSchema(Map(x -> z))(inductive.definition) have((∀(t, in(t, z) <=> (∀(x, inductive(x) ==> in(t, x))))) |- inductive(z)) by Cut(inductDef, inductIff) val inductExpansion = - thenHave((forall(t, in(t, naturalsInductive) <=> (forall(x, inductive(x) ==> in(t, x))))) |- inductive(naturalsInductive)) by InstFunSchema(Map(z -> naturalsInductive)) + thenHave((forall(t, in(t, naturalsInductive) <=> (forall(x, inductive(x) ==> in(t, x))))) |- inductive(naturalsInductive)) by InstSchema(Map(z -> naturalsInductive)) have((naturalsInductive === naturalsInductive) <=> forall(t, in(t, naturalsInductive) <=> (forall(x, inductive(x) ==> in(t, x))))) by InstantiateForall(naturalsInductive)( naturalsInductive.definition diff --git a/lisa-sets/src/main/scala/lisa/maths/settheory/SetTheory.scala b/lisa-sets/src/main/scala/lisa/maths/settheory/SetTheory.scala index 3439bad44..4908bc36a 100644 --- a/lisa-sets/src/main/scala/lisa/maths/settheory/SetTheory.scala +++ b/lisa-sets/src/main/scala/lisa/maths/settheory/SetTheory.scala @@ -83,7 +83,7 @@ object SetTheory extends lisa.Main { * where `P(t)` does not contain `z` as a free variable. * * @example {{{ - * have(∃(z, ∀(t, in(t, z) ⇔ myProperty(t))) ⊢ ∃!(z, ∀(t, in(t, z) ⇔ myProperty(t)))) by InstPredSchema(ScalaMap(schemPred -> (t, myProperty(t))))` + * have(∃(z, ∀(t, in(t, z) ⇔ myProperty(t))) ⊢ ∃!(z, ∀(t, in(t, z) ⇔ myProperty(t)))) by InstSchema(ScalaMap(schemPred -> (t, myProperty(t))))` * }}} * * Instantiation will fail if `myProperty(t)` contains `z` as a free variable. @@ -103,12 +103,12 @@ object SetTheory extends lisa.Main { // backward direction have(fprop(z) |- fprop(z)) by Hypothesis val instLhs = thenHave(fprop(z) |- prop(z)) by InstantiateForall(t) - val instRhs = thenHave(fprop(a) |- prop(a)) by InstFunSchema(ScalaMap(z -> a)) + val instRhs = thenHave(fprop(a) |- prop(a)) by InstSchema(ScalaMap(z -> a)) have((fprop(z), fprop(a)) |- prop(z) /\ prop(a)) by RightAnd(instLhs, instRhs) thenHave(fprop(z) /\ fprop(a) |- in(t, a) <=> in(t, z)) by Tautology val extLhs = thenHave(fprop(z) /\ fprop(a) |- ∀(t, in(t, a) <=> in(t, z))) by RightForall - val extRhs = have(∀(t, in(t, a) <=> in(t, z)) <=> (a === z)) by InstFunSchema(ScalaMap(x -> a, y -> z))(extensionalityAxiom) + val extRhs = have(∀(t, in(t, a) <=> in(t, z)) <=> (a === z)) by InstSchema(ScalaMap(x -> a, y -> z))(extensionalityAxiom) have(fprop(z) /\ fprop(a) |- (∀(t, in(t, a) <=> in(t, z)) <=> (a === z)) /\ ∀(t, in(t, a) <=> in(t, z))) by RightAnd(extLhs, extRhs) thenHave(fprop(z) /\ fprop(a) |- (a === z)) by Tautology @@ -195,7 +195,7 @@ object SetTheory extends lisa.Main { thenHave((in(z, x), (a === x)) |- in(z, a) /\ ((a === x) \/ (a === y))) by Tautology andThen(Substitution.applySubst(upairax, false)) thenHave((in(z, x), (a === x)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by RightExists - thenHave((in(z, x), (x === x)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by InstFunSchema(ScalaMap(a -> x)) + thenHave((in(z, x), (x === x)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by InstSchema(ScalaMap(a -> x)) val tax = thenHave((in(z, x)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by Restate have((in(z, y), (a === y)) |- in(z, y)) by Hypothesis @@ -203,7 +203,7 @@ object SetTheory extends lisa.Main { thenHave((in(z, y), (a === y)) |- in(z, a) /\ ((a === x) \/ (a === y))) by Tautology andThen(Substitution.applySubst(upairax, false)) thenHave((in(z, y), (a === y)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by RightExists - thenHave((in(z, y), (y === y)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by InstFunSchema(ScalaMap(a -> y)) + thenHave((in(z, y), (y === y)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by InstSchema(ScalaMap(a -> y)) val tay = thenHave((in(z, y)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by Restate have((in(z, x) \/ in(z, y)) |- ∃(a, in(z, a) /\ in(a, unorderedPair(x, y)))) by LeftOr(tax, tay) @@ -252,7 +252,7 @@ object SetTheory extends lisa.Main { ) { val form = formulaVariable - have(∀(x, (x === successor(y)) <=> (x === union(unorderedPair(y, unorderedPair(y, y)))))) by InstFunSchema(ScalaMap(x -> y))(successor.definition) + have(∀(x, (x === successor(y)) <=> (x === union(unorderedPair(y, unorderedPair(y, y)))))) by InstSchema(ScalaMap(x -> y))(successor.definition) thenHave(((successor(y) === successor(y)) <=> (successor(y) === union(unorderedPair(y, unorderedPair(y, y)))))) by InstantiateForall(successor(y)) val succDef = thenHave((successor(y) === union(unorderedPair(y, unorderedPair(y, y))))) by Restate val inductDef = have(inductive(x) <=> in(∅, x) /\ ∀(y, in(y, x) ==> in(successor(y), x))) by Restate.from(inductive.definition) @@ -343,7 +343,7 @@ object SetTheory extends lisa.Main { val setWithNoElementsIsEmpty = Theorem( ∀(y, !in(y, x)) |- (x === ∅) ) { - have(!in(y, ∅)) by InstFunSchema(ScalaMap(x -> y))(emptySetAxiom) + have(!in(y, ∅)) by InstSchema(ScalaMap(x -> y))(emptySetAxiom) thenHave(() |- (!in(y, ∅), in(y, x))) by Weakening val lhs = thenHave(in(y, ∅) ==> in(y, x)) by Restate @@ -355,7 +355,7 @@ object SetTheory extends lisa.Main { thenHave(∀(y, !in(y, x)) |- in(y, x) <=> in(y, ∅)) by LeftForall val exLhs = thenHave(∀(y, !in(y, x)) |- ∀(y, in(y, x) <=> in(y, ∅))) by RightForall - have(∀(z, in(z, x) <=> in(z, ∅)) <=> (x === ∅)) by InstFunSchema(ScalaMap(x -> x, y -> ∅))(extensionalityAxiom) + have(∀(z, in(z, x) <=> in(z, ∅)) <=> (x === ∅)) by InstSchema(ScalaMap(x -> x, y -> ∅))(extensionalityAxiom) val exRhs = thenHave(∀(y, in(y, x) <=> in(y, ∅)) <=> (x === ∅)) by Restate have(∀(y, !in(y, x)) |- (∀(y, in(y, x) <=> in(y, ∅)) <=> (x === ∅)) /\ ∀(y, in(y, x) <=> in(y, ∅))) by RightAnd(exLhs, exRhs) @@ -435,7 +435,7 @@ object SetTheory extends lisa.Main { ) { // specialization of the pair axiom to a singleton - have(in(y, unorderedPair(x, x)) <=> (x === y) \/ (x === y)) by InstFunSchema(ScalaMap(x -> x, y -> x, z -> y))(pairAxiom) + have(in(y, unorderedPair(x, x)) <=> (x === y) \/ (x === y)) by InstSchema(ScalaMap(x -> x, y -> x, z -> y))(pairAxiom) thenHave(in(y, singleton(x)) <=> (x === y)) by Restate } @@ -501,13 +501,13 @@ object SetTheory extends lisa.Main { val secondElemInPair = Theorem( in(y, unorderedPair(x, y)) ) { - val lhs = have(in(z, unorderedPair(x, y)) <=> ((z === x) \/ (z === y))) by InstFunSchema(ScalaMap(x -> x, y -> y, z -> z))(pairAxiom) + val lhs = have(in(z, unorderedPair(x, y)) <=> ((z === x) \/ (z === y))) by InstSchema(ScalaMap(x -> x, y -> y, z -> z))(pairAxiom) have((z === y) |- (z === y)) by Hypothesis val rhs = thenHave((z === y) |- (z === x) \/ (z === y)) by Restate val factset = have((z === y) |- (in(z, unorderedPair(x, y)) <=> ((z === x) \/ (z === y))) /\ ((z === x) \/ (z === y))) by RightAnd(lhs, rhs) thenHave((z === y) |- in(z, unorderedPair(x, y))) by Tautology - thenHave((y === y) |- in(y, unorderedPair(x, y))) by InstFunSchema(ScalaMap(z -> y)) + thenHave((y === y) |- in(y, unorderedPair(x, y))) by InstSchema(ScalaMap(z -> y)) thenHave(in(y, unorderedPair(x, y))) by LeftRefl } @@ -583,7 +583,7 @@ object SetTheory extends lisa.Main { ) val lhs = thenHave(Set((a === c) /\ (b === d)) |- unorderedPair(a, b) === unorderedPair(c, d)) by Restate - have(unorderedPair(a, b) === unorderedPair(b, a)) by InstFunSchema(ScalaMap(x -> a, y -> b))(unorderedPairSymmetry) + have(unorderedPair(a, b) === unorderedPair(b, a)) by InstSchema(ScalaMap(x -> a, y -> b))(unorderedPairSymmetry) thenHave((a === d, b === c) |- (unorderedPair(a, b) === unorderedPair(c, d))) by RightSubstEq.withParametersSimple( List((a, d), (b, c)), lambda(Seq(x, y), unorderedPair(a, b) === unorderedPair(y, x)) @@ -604,13 +604,13 @@ object SetTheory extends lisa.Main { val singletonNonEmpty = Theorem( !(singleton(x) === ∅) ) { - val reflLhs = have(in(x, singleton(x)) <=> (x === x)) by InstFunSchema(ScalaMap(y -> x))(singletonHasNoExtraElements) + val reflLhs = have(in(x, singleton(x)) <=> (x === x)) by InstSchema(ScalaMap(y -> x))(singletonHasNoExtraElements) val reflRhs = have((x === x)) by RightRefl have((x === x) /\ (in(x, singleton(x)) <=> (x === x))) by RightAnd(reflLhs, reflRhs) val lhs = thenHave(in(x, singleton(x))) by Tautology - val rhs = have(in(x, singleton(x)) |- !(singleton(x) === ∅)) by InstFunSchema(ScalaMap(y -> x, x -> singleton(x)))(setWithElementNonEmpty) + val rhs = have(in(x, singleton(x)) |- !(singleton(x) === ∅)) by InstSchema(ScalaMap(y -> x, x -> singleton(x)))(setWithElementNonEmpty) have(!(singleton(x) === ∅)) by Cut(lhs, rhs) } @@ -625,18 +625,18 @@ object SetTheory extends lisa.Main { ) { // forward direction // {x} === {y} |- x === y - have(∀(z, in(z, singleton(x)) <=> in(z, singleton(y))) <=> (singleton(x) === singleton(y))) by InstFunSchema(ScalaMap(x -> singleton(x), y -> singleton(y)))(extensionalityAxiom) + have(∀(z, in(z, singleton(x)) <=> in(z, singleton(y))) <=> (singleton(x) === singleton(y))) by InstSchema(ScalaMap(x -> singleton(x), y -> singleton(y)))(extensionalityAxiom) thenHave((singleton(x) === singleton(y)) |- ∀(z, in(z, singleton(x)) <=> in(z, singleton(y)))) by Tautology val singiff = thenHave((singleton(x) === singleton(y)) |- in(z, singleton(x)) <=> in(z, singleton(y))) by InstantiateForall(z) - val singX = have(in(z, singleton(x)) <=> (z === x)) by InstFunSchema(ScalaMap(y -> z))(singletonHasNoExtraElements) + val singX = have(in(z, singleton(x)) <=> (z === x)) by InstSchema(ScalaMap(y -> z))(singletonHasNoExtraElements) have((singleton(x) === singleton(y)) |- (in(z, singleton(x)) <=> in(z, singleton(y))) /\ (in(z, singleton(x)) <=> (z === x))) by RightAnd(singiff, singX) val yToX = thenHave((singleton(x) === singleton(y)) |- (in(z, singleton(y)) <=> (z === x))) by Tautology - val singY = have(in(z, singleton(y)) <=> (z === y)) by InstFunSchema(ScalaMap(x -> y))(singX) + val singY = have(in(z, singleton(y)) <=> (z === y)) by InstSchema(ScalaMap(x -> y))(singX) have((singleton(x) === singleton(y)) |- (in(z, singleton(y)) <=> (z === x)) /\ (in(z, singleton(y)) <=> (z === y))) by RightAnd(yToX, singY) thenHave((singleton(x) === singleton(y)) |- ((z === x) <=> (z === y))) by Tautology - thenHave((singleton(x) === singleton(y)) |- ((x === x) <=> (x === y))) by InstFunSchema(ScalaMap(z -> x)) + thenHave((singleton(x) === singleton(y)) |- ((x === x) <=> (x === y))) by InstSchema(ScalaMap(z -> x)) thenHave((singleton(x) === singleton(y)) |- (x === y)) by Restate val fwd = thenHave((singleton(x) === singleton(y)) ==> (x === y)) by Tautology @@ -786,7 +786,7 @@ object SetTheory extends lisa.Main { ) { val X = singleton(x) - have(!(X === ∅) ==> ∃(y, in(y, X) /\ ∀(z, in(z, X) ==> !in(z, y)))) by InstFunSchema(ScalaMap(x -> X))(foundationAxiom) + have(!(X === ∅) ==> ∃(y, in(y, X) /\ ∀(z, in(z, X) ==> !in(z, y)))) by InstSchema(ScalaMap(x -> X))(foundationAxiom) val lhs = thenHave(!(X === ∅) |- ∃(y, in(y, X) /\ ∀(z, in(z, X) ==> !in(z, y)))) by Restate have(in(y, X) |- in(y, X) <=> (x === y)) by Weakening(singletonHasNoExtraElements) @@ -794,7 +794,7 @@ object SetTheory extends lisa.Main { have((in(x, X), (in(z, X) ==> !in(z, x)), in(y, X)) |- in(z, X) ==> !in(z, x)) by Hypothesis thenHave((in(x, X), ∀(z, in(z, X) ==> !in(z, x)), in(y, X)) |- in(z, X) ==> !in(z, x)) by LeftForall - thenHave((in(x, X), ∀(z, in(z, X) ==> !in(z, x)), in(x, X)) |- in(x, X) ==> !in(x, x)) by InstFunSchema(ScalaMap(z -> x, y -> x)) + thenHave((in(x, X), ∀(z, in(z, X) ==> !in(z, x)), in(x, X)) |- in(x, X) ==> !in(x, x)) by InstSchema(ScalaMap(z -> x, y -> x)) val coreRhs = thenHave(in(x, X) /\ ∀(z, in(z, X) ==> !in(z, x)) |- !in(x, x)) by Restate // now we need to show that the assumption is indeed true @@ -843,12 +843,12 @@ object SetTheory extends lisa.Main { ) { val lhs = have(subset(powerSet(x), x) |- subset(powerSet(x), x)) by Hypothesis - val rhs = have(in(powerSet(x), powerSet(x)) <=> subset(powerSet(x), x)) by InstFunSchema(ScalaMap(x -> powerSet(x), y -> x))(powerAxiom) + val rhs = have(in(powerSet(x), powerSet(x)) <=> subset(powerSet(x), x)) by InstSchema(ScalaMap(x -> powerSet(x), y -> x))(powerAxiom) have(subset(powerSet(x), x) |- subset(powerSet(x), x) /\ (in(powerSet(x), powerSet(x)) <=> subset(powerSet(x), x))) by RightAnd(lhs, rhs) val contraLhs = thenHave(subset(powerSet(x), x) |- in(powerSet(x), powerSet(x))) by Tautology - val contraRhs = have(!in(powerSet(x), powerSet(x))) by InstFunSchema(ScalaMap(x -> powerSet(x)))(selfNonInclusion) + val contraRhs = have(!in(powerSet(x), powerSet(x))) by InstSchema(ScalaMap(x -> powerSet(x)))(selfNonInclusion) have(subset(powerSet(x), x) |- !in(powerSet(x), powerSet(x)) /\ in(powerSet(x), powerSet(x))) by RightAnd(contraLhs, contraRhs) thenHave(subset(powerSet(x), x) |- ()) by Restate @@ -1042,9 +1042,9 @@ object SetTheory extends lisa.Main { val intersectionOfPredicateClassExists = Theorem( ∃(x, P(x)) |- ∃(z, ∀(t, in(t, z) <=> ∀(y, P(y) ==> in(t, y)))) ) { - have(∃(z, ∀(t, in(t, z) <=> (in(t, x) /\ φ(t))))) by InstFunSchema(ScalaMap(z -> x))(comprehensionSchema) + have(∃(z, ∀(t, in(t, z) <=> (in(t, x) /\ φ(t))))) by InstSchema(ScalaMap(z -> x))(comprehensionSchema) - val conjunction = thenHave(∃(z, ∀(t, in(t, z) <=> (in(t, x) /\ ∀(y, P(y) ==> in(t, y)))))) by InstPredSchema(ScalaMap(φ -> lambda(t, ∀(y, P(y) ==> in(t, y))))) + val conjunction = thenHave(∃(z, ∀(t, in(t, z) <=> (in(t, x) /\ ∀(y, P(y) ==> in(t, y)))))) by InstSchema(ScalaMap(φ -> lambda(t, ∀(y, P(y) ==> in(t, y))))) have(∀(y, P(y) ==> in(t, y)) |- ∀(y, P(y) ==> in(t, y))) by Hypothesis thenHave(∀(y, P(y) ==> in(t, y)) /\ P(x) |- ∀(y, P(y) ==> in(t, y))) by Weakening @@ -1224,7 +1224,7 @@ object SetTheory extends lisa.Main { thenHave((union(pair(x, y)) === unaryIntersection(pair(x, y))) |- in(z, union(pair(x, y))) <=> in(z, unaryIntersection(pair(x, y)))) by InstantiateForall(z) have((union(pair(x, y)) === unaryIntersection(pair(x, y))) |- (((z === x) \/ (z === y)) <=> (z === x))) by Tautology.from(lastStep, unionPair, pairUnaryIntersection) - thenHave((union(pair(x, y)) === unaryIntersection(pair(x, y))) |- (((y === x) \/ (y === y)) <=> (y === x))) by InstFunSchema(ScalaMap(z -> y)) + thenHave((union(pair(x, y)) === unaryIntersection(pair(x, y))) |- (((y === x) \/ (y === y)) <=> (y === x))) by InstSchema(ScalaMap(z -> y)) thenHave(thesis) by Restate } diff --git a/lisa-sets/src/main/scala/lisa/maths/settheory/functions/Functionals.scala b/lisa-sets/src/main/scala/lisa/maths/settheory/functions/Functionals.scala index 71efaef16..b69346534 100644 --- a/lisa-sets/src/main/scala/lisa/maths/settheory/functions/Functionals.scala +++ b/lisa-sets/src/main/scala/lisa/maths/settheory/functions/Functionals.scala @@ -294,7 +294,7 @@ object Functionals extends lisa.Main { } // class SetOfFunctions(val x: Term, val y: Term) extends AppliedFunctional(setOfFunctions, Seq(x, y)) with LisaObject[SetOfFunctions] { - // override def substituteUnsafe(map: Map[lisa.fol.FOL.SchematicLabel[?], lisa.fol.FOL.LisaObject[?]]): SetOfFunctions = SetOfFunctions(x.substituteUnsafe(map), y.substituteUnsafe(map)) + // override def substituteUnsafe(map: Map[lisa.utils.fol.FOL.SchematicLabel[?], lisa.utils.fol.FOL.LisaObject[?]]): SetOfFunctions = SetOfFunctions(x.substituteUnsafe(map), y.substituteUnsafe(map)) // override def toString(): String = x.toStringSeparated() + " |=> " + y.toStringSeparated() // override def toStringSeparated(): String = toString() diff --git a/lisa-sets/src/main/scala/lisa/maths/settheory/types/TypeSystem.scala b/lisa-sets/src/main/scala/lisa/maths/settheory/types/TypeSystem.scala index 0fbc4b1c1..6c489ba51 100644 --- a/lisa-sets/src/main/scala/lisa/maths/settheory/types/TypeSystem.scala +++ b/lisa-sets/src/main/scala/lisa/maths/settheory/types/TypeSystem.scala @@ -1,15 +1,15 @@ package lisa.maths.settheory.types -import lisa.prooflib.ProofTacticLib.* -import lisa.fol.FOL +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.fol.FOL import lisa.automation.Tautology -import lisa.fol.FOL.{*, given} -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.SimpleDeducedSteps.* +import lisa.utils.fol.FOL.{*, given} +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.SimpleDeducedSteps.* import lisa.SetTheoryLibrary.{given, *} import lisa.SetTheoryLibrary import lisa.kernel.proof.SequentCalculus.SCProofStep -import lisa.prooflib.OutputManager +import lisa.utils.prooflib.OutputManager import lisa.maths.settheory.SetTheory.singleton import lisa.maths.settheory.functions.{functional, app, functionFromApplication, |=>} @@ -228,7 +228,7 @@ object TypeSystem { val formula = TypeAssignment(this, typ) assert(justif.statement.left.isEmpty && (justif.statement.right.head == formula)) - override def substituteUnsafe(map: Map[lisa.fol.FOL.SchematicLabel[?], lisa.fol.FOL.LisaObject[?]]): TypedConstant[A] = this + override def substituteUnsafe(map: Map[lisa.utils.fol.FOL.SchematicLabel[?], lisa.utils.fol.FOL.LisaObject[?]]): TypedConstant[A] = this } // Function Labels @@ -244,7 +244,7 @@ object TypeSystem { val justif: JUSTIFICATION ) extends ConstantFunctionLabel[N](id, arity) with LisaObject[TypedConstantFunctional[N]] { - override def substituteUnsafe(map: Map[lisa.fol.FOL.SchematicLabel[?], lisa.fol.FOL.LisaObject[?]]): TypedConstantFunctional[N] = this + override def substituteUnsafe(map: Map[lisa.utils.fol.FOL.SchematicLabel[?], lisa.utils.fol.FOL.LisaObject[?]]): TypedConstantFunctional[N] = this } @@ -253,7 +253,7 @@ object TypeSystem { class AppliedFunction(val func: Term, val arg: Term) extends AppliedFunctional(app, Seq(func, arg)) with LisaObject[AppliedFunction] { - override def substituteUnsafe(map: Map[lisa.fol.FOL.SchematicLabel[?], lisa.fol.FOL.LisaObject[?]]): AppliedFunction = AppliedFunction(func.substituteUnsafe(map), arg.substituteUnsafe(map)) + override def substituteUnsafe(map: Map[lisa.utils.fol.FOL.SchematicLabel[?], lisa.utils.fol.FOL.LisaObject[?]]): AppliedFunction = AppliedFunction(func.substituteUnsafe(map), arg.substituteUnsafe(map)) override def toString(): String = func match @@ -296,7 +296,7 @@ object TypeSystem { val intName = "definition_" + fullName val out = Variable(freshId(expression.allSchematicLabels.map(_.id), "y")) val defThm = THM(ExistsOne(out, out === expression), intName, line, file, InternalStatement)({ - have(lisa.prooflib.SimpleDeducedSteps.simpleFunctionDefinition(lambda(Seq[Variable](), expression), out)) + have(lisa.utils.prooflib.SimpleDeducedSteps.simpleFunctionDefinition(lambda(Seq[Variable](), expression), out)) }) new TypedSimpleConstantDefinition(fullName, line, file)(expression, out, defThm, typ) } @@ -334,7 +334,7 @@ object TypeSystem { class TypingException(val msg: String) extends Exception(msg) - def prove(using proof: SetTheoryLibrary.Proof)(bot:lisa.fol.FOL.Sequent): proof.ProofTacticJudgement = + def prove(using proof: SetTheoryLibrary.Proof)(bot:lisa.utils.fol.FOL.Sequent): proof.ProofTacticJudgement = val context = bot.left var success: proof.ProofTacticJudgement = null var typingError: proof.ProofTacticJudgement = null diff --git a/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Helpers.scala b/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Helpers.scala index dfc601d29..5efd29f3f 100644 --- a/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Helpers.scala +++ b/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Helpers.scala @@ -13,11 +13,11 @@ package lisa.maths.settheory.types.adt * * given a proof of the sequents without quantification. */ -object QuantifiersIntro extends lisa.prooflib.ProofTacticLib.ProofTactic { +object QuantifiersIntro extends lisa.utils.prooflib.ProofTacticLib.ProofTactic { - import lisa.prooflib.SimpleDeducedSteps.Restate - import lisa.prooflib.BasicStepTactic.* - import lisa.fol.FOL.* + import lisa.utils.prooflib.SimpleDeducedSteps.Restate + import lisa.utils.prooflib.BasicStepTactic.* + import lisa.utils.fol.FOL.* /** * Executes the tactic on a specific goal. @@ -28,7 +28,7 @@ object QuantifiersIntro extends lisa.prooflib.ProofTacticLib.ProofTactic { * @param fact the proof of the sequent without quantification * @param bot the statement to prove */ - def apply(using lib: lisa.prooflib.Library, proof: lib.Proof)(vars: Seq[Variable])(fact: proof.Fact)(bot: Sequent): proof.ProofTacticJudgement = + def apply(using lib: lisa.utils.prooflib.Library, proof: lib.Proof)(vars: Seq[Variable])(fact: proof.Fact)(bot: Sequent): proof.ProofTacticJudgement = TacticSubproof { sp ?=> if vars.isEmpty then lib.have(bot) by Restate.from(fact) @@ -84,7 +84,7 @@ object QuantifiersIntro extends lisa.prooflib.ProofTacticLib.ProofTactic { */ private [adt] object Helpers { - import lisa.fol.FOL.{*, given} + import lisa.utils.fol.FOL.{*, given} /** * Benchmarks a block of code. diff --git a/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Tactics.scala b/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Tactics.scala index 7a05b0b49..0b354d897 100644 --- a/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Tactics.scala +++ b/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Tactics.scala @@ -30,7 +30,7 @@ import Helpers.* * @param expectedVar the variable on which the induction is performed * @param expectedADT the algebraic data type on which the induction is performed */ -class Induction[M <: Arity](expectedVar: Option[Variable], expectedADT: Option[ADT[M]]) extends lisa.prooflib.ProofTacticLib.ProofTactic { +class Induction[M <: Arity](expectedVar: Option[Variable], expectedADT: Option[ADT[M]]) extends lisa.utils.prooflib.ProofTacticLib.ProofTactic { /** * Given a proof of the claim for each case (possibly using the induction hypothesis), diff --git a/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Untyped.scala b/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Untyped.scala index 70087f4c2..4855fbf15 100644 --- a/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Untyped.scala +++ b/lisa-sets/src/main/scala/lisa/maths/settheory/types/adt/Untyped.scala @@ -21,7 +21,7 @@ import ADTThm.{N, pair, pairExtensionality} import lisa.maths.settheory.functions.|=> import lisa.maths.settheory.types.TypeSystem.{ :: } import lisa.maths.Quantifiers.{universalEquivalenceDistribution} -import lisa.fol.FOL.Variable +import lisa.utils.fol.FOL.Variable /** * Helpers for constructors diff --git a/lisa-sets/src/test/scala/lisa/automation/CongruenceTest.scala b/lisa-sets/src/test/scala/lisa/automation/CongruenceTest.scala index 34bc77eee..73a4d9a04 100644 --- a/lisa-sets/src/test/scala/lisa/automation/CongruenceTest.scala +++ b/lisa-sets/src/test/scala/lisa/automation/CongruenceTest.scala @@ -1,5 +1,5 @@ package lisa.automation -import lisa.fol.FOL.{*, given} +import lisa.utils.fol.FOL.{*, given} import lisa.automation.Congruence.* import lisa.automation.Congruence import org.scalatest.funsuite.AnyFunSuite @@ -66,7 +66,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("3 terms no congruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(a) egraph.add(b) egraph.add(c) @@ -77,7 +77,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("8 terms no congruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(a) egraph.add(b) egraph.add(c) @@ -98,7 +98,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("15 terms no congruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(a) egraph.add(b) egraph.add(c) @@ -133,7 +133,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("15 terms no congruence egraph test with redundant merges") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(a) egraph.add(b) egraph.add(c) @@ -176,7 +176,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("4 terms withcongruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(F(a)) egraph.add(F(b)) egraph.merge(a, b) @@ -195,7 +195,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("divide-mult-shift in terms by 2 egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(one) egraph.add(two) egraph.add(a) @@ -236,7 +236,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("long chain of terms congruence eGraph") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(x) val fx = egraph.add(F(x)) val ffx = egraph.add(F(fx)) @@ -259,7 +259,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("3 formulas no congruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(af) egraph.add(bf) egraph.add(cf) @@ -270,7 +270,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("8 formulas no congruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(af) egraph.add(bf) egraph.add(cf) @@ -291,7 +291,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("15 formulas no congruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(af) egraph.add(bf) egraph.add(cf) @@ -326,7 +326,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("15 formulas no congruence egraph test with redundant merges") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(af) egraph.add(bf) egraph.add(cf) @@ -369,7 +369,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("4 formulas withcongruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(Ff(af)) egraph.add(Ff(bf)) egraph.merge(af, bf) @@ -386,7 +386,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("divide-mult-shift in formulas by 2 egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(onef) egraph.add(twof) egraph.add(af) @@ -429,7 +429,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("long chain of formulas congruence eGraph") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(xf) val fx = egraph.add(Ff(xf)) val ffx = egraph.add(Ff(fx)) @@ -456,7 +456,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { ////////////////////////////////////// test("2 terms 6 predicates with congruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(Ff(Ff(Fp(a)))) egraph.add(Ff(Ff(Fp(b)))) egraph.merge(a, b) @@ -481,7 +481,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("6 terms 6 predicates with congruence egraph test") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(Ff(Ff(Fp(F(F(a)))))) egraph.add(Ff(Ff(Fp(F(F(b)))))) egraph.merge(a, b) @@ -499,7 +499,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("15 terms no congruence with redundant merges test with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(a) egraph.add(b) egraph.add(c) @@ -551,7 +551,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("4 elements with congruence test with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(F(a)) egraph.add(F(b)) egraph.merge(a, b) @@ -562,7 +562,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("divide-mult-shift by 2 in terms egraph test with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(one) egraph.add(two) egraph.add(a) @@ -603,7 +603,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("long chain of termscongruence eGraph with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(x) val fx = egraph.add(F(x)) val ffx = egraph.add(F(fx)) @@ -635,7 +635,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("15 formulas no congruence proofs with redundant merges test with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(af) egraph.add(bf) egraph.add(cf) @@ -688,7 +688,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("4 formulas with congruence test with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(Ff(af)) egraph.add(Ff(bf)) egraph.merge(af, bf) @@ -698,7 +698,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("divide-mult-shift by 2 in formulas egraph test with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(onef) egraph.add(twof) egraph.add(af) @@ -739,7 +739,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("long chain of formulas congruence eGraph with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(xf) val fx = egraph.add(Ff(xf)) val ffx = egraph.add(Ff(fx)) @@ -768,7 +768,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { test("2 terms 6 predicates with congruence egraph test with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(Ff(Ff(Fp(a)))) egraph.add(Ff(Ff(Fp(b)))) egraph.merge(a, b) @@ -788,7 +788,7 @@ class CongruenceTest extends AnyFunSuite with lisa.TestMain { } test("6 terms 6 predicates with congruence egraph test with proofs") { - val egraph = new EGraphTerms() + val egraph = new EGraphExpr() egraph.add(Ff(Ff(Fp(F(F(a)))))) egraph.add(Ff(Ff(Fp(F(F(b)))))) egraph.merge(a, b) diff --git a/lisa-sets/src/test/scala/lisa/automation/TableauTest.scala b/lisa-sets/src/test/scala/lisa/automation/TableauTest.scala index 69a8d2507..32cf90af8 100644 --- a/lisa-sets/src/test/scala/lisa/automation/TableauTest.scala +++ b/lisa-sets/src/test/scala/lisa/automation/TableauTest.scala @@ -3,8 +3,8 @@ package lisa.test.automation import lisa.SetTheoryLibrary.{_, given} import lisa.automation.Substitution.* import lisa.automation.Tableau.* -import lisa.fol.FOL.* -import lisa.prooflib.Exports.* +import lisa.utils.fol.FOL.* +import lisa.utils.prooflib.Exports.* import lisa.utils.K.SCProofChecker.checkSCProof import lisa.utils.parsing.FOLPrinter.prettyFormula import lisa.utils.parsing.FOLPrinter.prettySCProof diff --git a/lisa-sets/src/test/scala/lisa/examples/peano_example/Peano.scala b/lisa-sets/src/test/scala/lisa/examples/peano_example/Peano.scala index 2f01882f7..fe3596514 100644 --- a/lisa-sets/src/test/scala/lisa/examples/peano_example/Peano.scala +++ b/lisa-sets/src/test/scala/lisa/examples/peano_example/Peano.scala @@ -4,8 +4,8 @@ import lisa.kernel.fol.FOL.* import lisa.kernel.proof.RunningTheory import lisa.kernel.proof.SCProof import lisa.kernel.proof.SequentCalculus.* -import lisa.prooflib.Library -import lisa.prooflib.OutputManager +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.OutputManager import lisa.utils.KernelHelpers.{_, given} import lisa.utils.Printer @@ -123,7 +123,7 @@ object Peano { /* x ) - val inductionInstance: SCProofStep = SC.InstPredSchema( + val inductionInstance: SCProofStep = SC.InstSchema( () |- ((plus(zero, zero) === plus(zero, zero)) /\ forall(x, (plus(x, zero) === plus(zero, x)) ==> (plus(s(x), zero) === plus(zero, s(x))))) ==> forall( x, plus(x, zero) === plus(zero, x) @@ -238,7 +238,7 @@ object Peano { /* val inductionInstance = { val inductionOnY0 = SC.Rewrite(() |- (sPhi(zero) /\ forall(y, sPhi(y) ==> sPhi(s(y)))) ==> forall(y, sPhi(y)), -1) - val inductionInstance1 = SC.InstPredSchema( + val inductionInstance1 = SC.InstSchema( () |- ((plus(s(x), zero) === plus(x, s(zero))) /\ forall(y, (plus(x, s(y)) === plus(s(x), y)) ==> (plus(x, s(s(y))) === plus(s(x), s(y))))) ==> @@ -320,7 +320,7 @@ object Peano { /* val inductionInstance = { val inductionOnY0 = SC.Rewrite(() |- (sPhi(zero) /\ forall(y, sPhi(y) ==> sPhi(s(y)))) ==> forall(y, sPhi(y)), -1) - val inductionInstance1 = SC.InstPredSchema( + val inductionInstance1 = SC.InstSchema( () |- ((plus(x, zero) === plus(zero, x)) /\ forall(y, (plus(x, y) === plus(y, x)) ==> (plus(x, s(y)) === plus(s(y), x)))) ==> diff --git a/lisa-sets/src/test/scala/lisa/examples/peano_example/PeanoArithmetics.scala b/lisa-sets/src/test/scala/lisa/examples/peano_example/PeanoArithmetics.scala index e422b6f15..3277db3c6 100644 --- a/lisa-sets/src/test/scala/lisa/examples/peano_example/PeanoArithmetics.scala +++ b/lisa-sets/src/test/scala/lisa/examples/peano_example/PeanoArithmetics.scala @@ -4,8 +4,8 @@ import lisa.kernel.fol.FOL.* import lisa.kernel.proof.RunningTheory import lisa.utils.KernelHelpers.{_, given} -object PeanoArithmetics extends lisa.prooflib.Library { - export lisa.fol.FOL.{*, given} +object PeanoArithmetics extends lisa.utils.prooflib.Library { + export lisa.utils.fol.FOL.{*, given} final val (x, y, z) = (variable, variable, variable) diff --git a/lisa-sets/src/test/scala/lisa/examples/peano_example/PeanoArithmeticsLibrary.scala b/lisa-sets/src/test/scala/lisa/examples/peano_example/PeanoArithmeticsLibrary.scala index 3f9d423fe..cd615708f 100644 --- a/lisa-sets/src/test/scala/lisa/examples/peano_example/PeanoArithmeticsLibrary.scala +++ b/lisa-sets/src/test/scala/lisa/examples/peano_example/PeanoArithmeticsLibrary.scala @@ -2,6 +2,6 @@ package lisa.examples.peano_example import lisa.examples.peano_example -trait PeanoArithmeticsLibrary extends lisa.prooflib.BasicMain { +trait PeanoArithmeticsLibrary extends lisa.utils.prooflib.BasicMain { export PeanoArithmetics.* } diff --git a/lisa-sets/src/test/scala/lisa/utilities/TestMain.scala b/lisa-sets/src/test/scala/lisa/utilities/TestMain.scala index 6d93e5d05..e93e5d78f 100644 --- a/lisa-sets/src/test/scala/lisa/utilities/TestMain.scala +++ b/lisa-sets/src/test/scala/lisa/utilities/TestMain.scala @@ -1,6 +1,6 @@ package lisa -import lisa.prooflib.* +import lisa.utils.prooflib.* trait TestMain extends lisa.Main { diff --git a/lisa-sets2/src/main/scala/lisa/Main.scala b/lisa-sets2/src/main/scala/lisa/Main.scala new file mode 100644 index 000000000..ce40dc238 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/Main.scala @@ -0,0 +1,38 @@ +package lisa + +import lisa.SetTheoryLibrary +import lisa.utils.prooflib.BasicMain + +/** + * The parent trait of all theory files containing mathematical development + */ +trait Main extends BasicMain { + + export lisa.utils.fol.FOL.{*, given} + export SetTheoryLibrary.{given, _} + export lisa.utils.prooflib.BasicStepTactic.* + export lisa.utils.prooflib.SimpleDeducedSteps.* + + export lisa.automation.Tautology + // export lisa.automation.Substitution + export lisa.automation.Tableau + export lisa.automation.Congruence + // export lisa.automation.Apply + // export lisa.automation.Exact + + knownDefs.update(emptySet, Some(emptySetAxiom)) + knownDefs.update(unorderedPair, Some(pairAxiom)) + knownDefs.update(union, Some(unionAxiom)) + knownDefs.update(powerSet, Some(powerAxiom)) + knownDefs.update(subset, Some(subsetAxiom)) + + extension (symbol: Constant[?]) { + def definition: JUSTIFICATION = { + getDefinition(symbol).get + } + def shortDefinition: JUSTIFICATION = { + getShortDefinition(symbol).get + } + } + +} diff --git a/lisa-sets2/src/main/scala/lisa/SetTheoryLibrary.scala b/lisa-sets2/src/main/scala/lisa/SetTheoryLibrary.scala new file mode 100644 index 000000000..a0f8b6577 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/SetTheoryLibrary.scala @@ -0,0 +1,255 @@ +package lisa + +import lisa.utils.fol.FOL.{_, given} +import lisa.kernel.proof.RunningTheory +import lisa.utils.prooflib.Library + +/** + * Specific implementation of [[utilities.Library]] for Set Theory, with a RunningTheory that is supposed to be used by the standard library. + */ +object SetTheoryLibrary extends lisa.utils.prooflib.Library { + + val theory = new RunningTheory() + + // Predicates + /** + * The symbol for the set membership predicate. + */ + final val in = constant[Ind >>: Ind >>: Prop]("elem") + + /** + * The symbol for the subset predicate. + */ + final val subset = constant[Ind >>: Ind >>: Prop]("subsetOf") + + /** + * The symbol for the equicardinality predicate. Needed for Tarski's axiom. + */ + final val sim = constant[Ind >>: Ind >>: Prop]("sameCardinality") // Equicardinality + /** + * Set Theory basic predicates + */ + final val predicates = Set(in, subset, sim) + // val choice + + // Functions + /** + * The symbol for the empty set constant. + */ + final val emptySet = constant[Ind]("emptySet") + + /** + * The symbol for the unordered pair function. + */ + final val unorderedPair = constant[Ind >>: Ind >>: Ind]("unorderedPair") + + /** + * The symbol for the powerset function. + */ + final val powerSet = constant[Ind >>: Ind]("powerSet") + + /** + * The symbol for the set union function. + */ + final val union = constant[Ind >>: Ind]("union") + + /** + * The symbol for the universe function. Defined in TG set theory. + */ + final val universe = constant[Ind >>: Ind]("universe") + + /** + * Set Theory basic functions. + */ + final val functions = Set(unorderedPair, powerSet, union, universe) + + /** + * The kernel theory loaded with Set Theory symbols and axioms. + */ + // val runningSetTheory: RunningTheory = new RunningTheory() + // given RunningTheory = runningSetTheory + + predicates.foreach(s => addSymbol(s)) + functions.foreach(s => addSymbol(s)) + addSymbol(emptySet) + + private val x = variable[Ind] + private val y = variable[Ind] + private val z = variable[Ind] + final val φ = variable[Ind >>: Prop] + private val A = variable[Ind] + private val B = variable[Ind] + private val P = variable[Ind >>: Ind >>: Prop] + + //////////// + // Axioms // + //////////// + + // Z + //////// + + /** + * Extensionality Axiom --- Two sets are equal iff they have the same + * elements. + * + * `() |- (x = y) ⇔ ∀ z. z ∈ x ⇔ z ∈ y` + */ + final val extensionalityAxiom: this.AXIOM = Axiom(forall(z, (z ∈ x) <=> (z ∈ y)) <=> (x === y)) + + /** + * Pairing Axiom --- For any sets `x` and `y`, there is a set that contains + * exactly `x` and `y`. This set is denoted mathematically as `{x, y}` and + * here as `unorderedPair(x, y)`. + * + * `() |- z ∈ {x, y} ⇔ (z === x ∨ z === y)` + * + * This axiom defines [[unorderedPair]] as the function symbol representing + * this set. + */ + final val pairAxiom: AXIOM = Axiom(z ∈ unorderedPair(x, y) <=> (x === z) \/ (y === z)) + + /** + * Comprehension/Separation Schema --- For a formula `ϕ(_, _)` and a set `z`, + * there exists a set `y` which contains only the elements `x` of `z` that + * satisfy `ϕ(x, z)`. This is represented mathematically as `y = {x ∈ z | ϕ(x, + * z)}`. + * + * `() |- ∃ y. ∀ x. x ∈ y ⇔ (x ∈ z ∧ ϕ(x, z))` + * + * This schema represents an infinite collection of axioms, one for each + * formula `ϕ(x, z)`. + */ + final val comprehensionSchema: AXIOM = Axiom(exists(y, forall(x, (x ∈ y) <=> ((x ∈ z) /\ φ(x))))) + + /** + * Empty Set Axiom --- From the Comprehension Schema follows the existence of + * a set containing no elements, the empty set. + * + * `∅ = {x ∈ X | x != x}`. + * + * This axiom defines [[emptySet]] as the constant symbol representing this set. + * + * `() |- !(x ∈ ∅)` + */ + final val emptySetAxiom: AXIOM = Axiom(!(x ∈ emptySet)) + + /** + * Union Axiom --- For any set `x`, there exists a set `union(x)` which is the + * union of its elements. For every element of `union(x)`, there is an element + * `y` of `x` which contains it. + * + * `() |- z ∈ union(x) ⇔ ∃ y. y ∈ x ∧ z ∈ y` + * + * Mathematically, we write `union(x)` as `∪ x`. + * + * This axiom defines [[union]] as the function symbol representing this set. + */ + final val unionAxiom: AXIOM = Axiom(z ∈ union(x) <=> exists(y, (y ∈ x) /\ (z ∈ y))) + + /** + * Subset Axiom --- For sets `x` and `y`, `x` is a subset of `y` iff every + * element of `x` is in `y`. Denoted `x ⊆ y`. + * + * `() |- x ⊆ y ⇔ (z ∈ x ⇒ z ∈ y)` + * + * This axiom defines the [[subset]] symbol as this predicate. + */ + final val subsetAxiom: AXIOM = Axiom((x ⊆ y) <=> forall(z, (z ∈ x) ==> (z ∈ y))) + + /** + * Power Set Axiom --- For a set `x`, there exists a power set of `x`, denoted + * `PP(x)` or `power(x)` which contains every subset of x. + * + * `() |- z ∈ power(x) ⇔ z ⊆ x` + * + * This axiom defines [[powerSet]] as the function symbol representing this + * set. + */ + final val powerAxiom: AXIOM = Axiom(x ∈ powerSet(y) <=> x ⊆ y) + + /** + * Infinity Axiom --- There exists an infinite set. + * + * The definition requires a notion of finiteness, which generally corresponds + * to natural numbers. Since the naturals have not yet been defined, their + * definition and structure is imitated in the definition of an inductive set. + * + * `inductive(x) ⇔ (∅ ∈ x ∧ ∀ y. y ∈ x ⇒ y ∪ {y} ∈ x)` + * + * This axiom postulates that there exists an inductive set. + * + * `() |- ∃ x. inductive(x)` + */ + final val infinityAxiom: AXIOM = Axiom(exists(x, emptySet ∈ x /\ forall(y, (y ∈ x) ==> union(unorderedPair(y, unorderedPair(y, y))) ∈ x))) + + /** + * Foundation/Regularity Axiom --- Every non-empty set `x` has an `∈`-minimal + * element. Equivalently, the relation `∈` on any family of sets is + * well-founded. + * + * `() |- (x != ∅) ==> ∃ y ∈ x. ∀ z. z ∈ x ⇒ ! z ∈ y` + */ + final val foundationAxiom: AXIOM = Axiom(!(x === emptySet) ==> exists(y, (y ∈ x) /\ forall(z, (z ∈ x) ==> !(z ∈ y)))) + + // ZF + ///////// + + /** + * Replacement Schema --- If a predicate `P` is 'functional' over `x`, i.e., + * given `a ∈ x`, there is a unique `b` such that `P(x, a, b)`, then the + * 'image' of `x` in P exists and is a set. It contains exactly the `b`'s that + * satisfy `P` for each `a ∈ x`. + */ + final val replacementSchema: AXIOM = Axiom( + forall(x, (x ∈ A) ==> ∀(y, ∀(z, (P(x)(y) /\ P(x)(z)) ==> (y === z)))) ==> + exists(B, forall(y, (y ∈ B) <=> exists(x, (x ∈ A) /\ P(x)(y)))) + ) + + final val tarskiAxiom: AXIOM = Axiom( + forall( + x, + (x ∈ universe(x)) /\ + forall( + y, + (y ∈ universe(x)) ==> ((powerSet(y) ∈ universe(x)) /\ (powerSet(y) ⊆ universe(x))) /\ + forall(z, (z ⊆ universe(x)) ==> (sim(y)(universe(x)) /\ (y ∈ universe(x)))) + ) + ) + ) + + /** + * The set of all axioms of Tarski-Grothedick (TG) set theory. + * + * @return + */ + def axioms: Set[(String, AXIOM)] = Set( + ("EmptySet", emptySetAxiom), + ("extensionalityAxiom", extensionalityAxiom), + ("pairAxiom", pairAxiom), + ("unionAxiom", unionAxiom), + ("subsetAxiom", subsetAxiom), + ("powerAxiom", powerAxiom), + ("foundationAxiom", foundationAxiom), + ("infinityAxiom", infinityAxiom), + ("comprehensionSchema", comprehensionSchema), + ("replacementSchema", replacementSchema), + ("TarskiAxiom", tarskiAxiom) + ) + + ///////////// + // Aliases // + ///////////// + + // Unicode symbols + + val ∅ = emptySet + val ∈ = in + + extension (l: Expr[Ind]) + def ∈(r: Expr[Ind]): Expr[Prop] = in(l)(r) + def ⊆(r: Expr[Ind]): Expr[Prop] = subset(l)(r) + def =/=(r: Expr[Ind]): Expr[Prop] = !(l === r) + + def unorderedPair(x: Expr[Ind], y: Expr[Ind]): Expr[Ind] = App(App(unorderedPair, x), y) + +} diff --git a/lisa-sets2/src/main/scala/lisa/Tests.scala b/lisa-sets2/src/main/scala/lisa/Tests.scala new file mode 100644 index 000000000..10074797a --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/Tests.scala @@ -0,0 +1,65 @@ + +package lisa.maths +import lisa.automation.atp.* +import lisa.utils.KernelHelpers.checkProof +import lisa.tptp.* + + +object Tests extends lisa.Main { + draft() + + val x = variable[Ind] + val y = variable[Ind] + val z = variable[Ind] + val P = variable[Ind >>: Prop] + val d = variable[Ind >>: Prop] + val Q = variable[Ind >>: Prop] + val f = variable[Ind >>: Ind] + + val t0 = constant[Ind] + val t1 = constant[Ind] + val t2 = constant[Ind] + val t3 = constant[Ind] + val a = constant[Ind] + val mult = variable[Ind >>: Ind >>: Ind] + val div = variable[Ind >>: Ind >>: Ind] + addSymbol(t0) + addSymbol(t1) + addSymbol(t2) + addSymbol(t3) + addSymbol(a) + + extension (t: Expr[Ind]) { + def / (y: Expr[Ind]): Expr[Ind] = div(t)(y) + def * (y: Expr[Ind]): Expr[Ind] = mult(t)(y) + } + + + def _div(x: Expr[Ind], y: Expr[Ind]): Expr[Ind] = div(x)(y) + def _mult(x: Expr[Ind], y: Expr[Ind]): Expr[Ind] = mult(x)(y) + + + val divide_mult_shift = Theorem(( + ∀(x, x/t1 === x), + ∀(x, ∀(y, x/y === t1/(y/x))), + ∀(x, ∀(y, (x/y)*y === x)), + ) |- ((t2/t3)*(t3/t2))/t1 === t1): + have(thesis) by Egg + + + val saturation = Theorem( + (∀(x, x === f(f(f(x)))), ∀(x, ∀(y, x === f(f(x))))) |- ∅ === f(∅)): + have(thesis) by Egg + + val drinkers2 = Theorem(∃(x, ∀(y, d(x) ==> d(y)))): + have(thesis) by Goeland + + val example = Theorem( (∀(x, P(x)) \/ ∀(y, Q(y))) ==> (P(∅) \/ Q(∅)) ): + have(thesis) by Prover9 + + val example2 = Theorem(∃(x, ∀(y, d(x) ==> d(y)))): + have(thesis) by Prover9 + + + +} diff --git a/lisa-sets2/src/main/scala/lisa/automation/CommonTactics.scala b/lisa-sets2/src/main/scala/lisa/automation/CommonTactics.scala new file mode 100644 index 000000000..6e925d9cf --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/automation/CommonTactics.scala @@ -0,0 +1,262 @@ +package lisa.automation.kernel + +import lisa.automation.Tautology +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.ProofTacticLib.{_, given} +import lisa.utils.prooflib.SimpleDeducedSteps.* +import lisa.utils.prooflib.* +import lisa.utils.K + +object CommonTactics { + /* + + /** + **/ object Cut extends ProofTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula)(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop])(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val leftSequent = proof.getSequent(prem1).underlying lazy val rightSequent = proof.getSequent(prem2).underlying val botK = bot.underlying @@ -105,12 +104,12 @@ object BasicStepTactic { * */ object LeftAnd extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, psi: F.Formula)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], psi: F.Expr[F.Prop])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying val botK = bot.underlying val phiK = phi.underlying val psiK = psi.underlying - lazy val phiAndPsi = K.ConnectorFormula(K.And, Seq(phiK, psiK)) + lazy val phiAndPsi = phiK /\ psiK if (!K.isSameSet(botK.right, premiseSequent.right)) proof.InvalidProofTactic("Right-hand side of the conclusion is not the same as the right-hand side of the premise.") @@ -130,7 +129,7 @@ object BasicStepTactic { if (!pivot.isEmpty && pivot.tail.isEmpty) pivot.head match { - case F.AppliedConnector(F.And, Seq(phi, psi)) => + case F.App(F.App(F.and, phi), psi) => if (premiseSequent.left.contains(phi)) LeftAnd.withParameters(phi, psi)(premise)(bot) else @@ -154,11 +153,11 @@ object BasicStepTactic { * */ object LeftOr extends ProofTactic { - def withParameters(using lib: Library, proof: lib.Proof)(disjuncts: F.Formula*)(premises: proof.Fact*)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(disjuncts: F.Expr[F.Prop]*)(premises: proof.Fact*)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequents = premises.map(proof.getSequent(_).underlying) val botK = bot.underlying val disjunctsK = disjuncts.map(_.underlying) - lazy val disjunction = K.ConnectorFormula(K.Or, disjunctsK) + lazy val disjunction = K.multior(disjunctsK) if (premises.length == 0) proof.InvalidProofTactic(s"Premises expected, ${premises.length} received.") @@ -202,13 +201,13 @@ object BasicStepTactic { * */ object LeftImplies extends ProofTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, psi: F.Formula)(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], psi: F.Expr[F.Prop])(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val leftSequent = proof.getSequent(prem1).underlying lazy val rightSequent = proof.getSequent(prem2).underlying val botK = bot.underlying val phiK = phi.underlying val psiK = psi.underlying - lazy val implication = K.ConnectorFormula(K.Implies, Seq(phiK, psiK)) + lazy val implication = (phiK ==> psiK) if (!K.isSameSet(botK.right + phiK, leftSequent.right union rightSequent.right)) proof.InvalidProofTactic("Right-hand side of conclusion + φ is not the union of right-hand sides of premises.") @@ -248,14 +247,14 @@ object BasicStepTactic { * */ object LeftIff extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, psi: F.Formula)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], psi: F.Expr[F.Prop])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying val botK = bot.underlying val phiK = phi.underlying val psiK = psi.underlying - lazy val implication = K.ConnectorFormula(K.Iff, Seq(phiK, psiK)) - lazy val impLeft = K.ConnectorFormula(K.Implies, Seq(phiK, psiK)) - lazy val impRight = K.ConnectorFormula(K.Implies, Seq(psiK, phiK)) + lazy val implication = phiK <=> psiK + lazy val impLeft = phiK ==> psiK + lazy val impRight = psiK ==> phiK if (!K.isSameSet(botK.right, premiseSequent.right)) proof.InvalidProofTactic("Right-hand side of premise is not the same as right-hand side of conclusion.") @@ -280,7 +279,7 @@ object BasicStepTactic { proof.InvalidProofTactic("Right-hand side of conclusion is not a superset of the premises.") else pivot.head match { - case F.AppliedConnector(F.Implies, Seq(phi, psi)) => LeftIff.withParameters(phi, psi)(premise)(bot) + case F.App(F.App(F.implies, phi), psi) => LeftIff.withParameters(phi, psi)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer a pivot implication from premise.") } } @@ -294,11 +293,11 @@ object BasicStepTactic { * */ object LeftNot extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying val botK = bot.underlying val phiK = phi.underlying - lazy val negation = K.ConnectorFormula(K.Neg, Seq(phiK)) + lazy val negation = !phiK if (!K.isSameSet(botK.right + phiK, premiseSequent.right)) proof.InvalidProofTactic("Right-hand side of conclusion + φ is not the same as right-hand side of premise.") @@ -333,17 +332,14 @@ object BasicStepTactic { * */ object LeftForall extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, x: F.Variable, t: F.Term | K.Term)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], x: F.Variable[F.Ind], t: F.Expr[F.Ind])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val xK = x.underlyingLabel - lazy val tK = t match { - case t: F.Term => t.underlying - case t: K.Term => t - } + lazy val xK = x.underlying + lazy val tK = t.underlying lazy val phiK = phi.underlying lazy val botK = bot.underlying - lazy val quantified = K.BinderFormula(K.Forall, xK, phiK) - lazy val instantiated = K.substituteVariablesInFormula(phiK, Map(xK -> tK), Seq()) + lazy val quantified = K.forall(xK, phiK) + lazy val instantiated = K.substituteVariables(phiK, Map(xK -> tK)) if (!K.isSameSet(botK.right, premiseSequent.right)) proof.InvalidProofTactic("Right-hand side of conclusion is not the same as right-hand side of premise") @@ -353,7 +349,7 @@ object BasicStepTactic { proof.ValidProofTactic(bot, Seq(K.LeftForall(botK, -1, phiK, xK, tK)), Seq(premise)) } - def withParameters(using lib: Library, proof: lib.Proof)(t: F.Term)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(t: F.Expr[F.Ind])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise) lazy val pivot = bot.left.diff(premiseSequent.left) lazy val instantiatedPivot = premiseSequent.left // .diff(botK.left) @@ -361,7 +357,7 @@ object BasicStepTactic { if (!pivot.isEmpty) if (pivot.tail.isEmpty) pivot.head match { - case F.BinderFormula(F.Forall, x, phi) => LeftForall.withParameters(phi, x, t)(premise)(bot) + case F.forall(x, phi) => LeftForall.withParameters(phi, x, t)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer a universally quantified pivot from premise and conclusion.") } else @@ -374,16 +370,16 @@ object BasicStepTactic { else if (instantiatedPivot.tail.isEmpty) { // go through conclusion to find a matching quantified formula - val in: F.Formula = instantiatedPivot.head - val quantifiedPhi: Option[F.Formula] = bot.left.find(f => + val in: F.Expr[F.Prop] = instantiatedPivot.head + val quantifiedPhi: Option[F.Expr[F.Prop]] = bot.left.find(f => f match { - case g @ F.BinderFormula(F.Forall, _, _) => F.isSame(F.instantiateBinder(g, t), in) + case g @ F.forall(v, e) => F.isSame(e.substitute(v := t), in) case _ => false } ) quantifiedPhi match { - case Some(F.BinderFormula(F.Forall, x, phi)) => LeftForall.withParameters(phi, x, t)(premise)(bot) + case Some(F.forall(x, phi)) => LeftForall.withParameters(phi, x, t)(premise)(bot) case _ => proof.InvalidProofTactic("Could not match discovered quantified pivot with premise.") } } else proof.InvalidProofTactic("Left-hand side of conclusion + φ[t/x] is not the same as left-hand side of premise + ∀x. φ.") @@ -403,17 +399,19 @@ object BasicStepTactic { else if (instantiatedPivot.tail.isEmpty) { // go through conclusion to find a matching quantified formula - val in: F.Formula = instantiatedPivot.head - val quantifiedPhi: Option[F.Formula] = pivot.find(f => + val in: F.Expr[F.Prop] = instantiatedPivot.head + val quantifiedPhi: Option[(F.Expr[F.Prop], Substitution)] = pivot.collectFirstDefined(f => f match { - case g @ F.BinderFormula(F.Forall, x, phi) => UnificationUtils.matchFormula(in, phi, takenTermVariables = (phi.freeVariables - x)).isDefined - case _ => false + case g @ F.forall(x, phi) => + val ctx = RewriteContext.withBound(phi.freeVars - x) + matchExpr(using ctx)(phi, in).map(f -> _) + case _ => None } ) quantifiedPhi match { - case Some(F.BinderFormula(F.Forall, x, phi)) => - LeftForall.withParameters(phi, x, UnificationUtils.matchFormula(in, phi, takenTermVariables = (phi.freeVariables - x)).get._2.getOrElse(x, x))(premise)(bot) + case Some((F.forall(x, phi), subst)) => + LeftForall.withParameters(phi, x, subst(x).getOrElse(x))(premise)(bot) case _ => proof.InvalidProofTactic("Could not match discovered quantified pivot with premise.") } } else proof.InvalidProofTactic("Left-hand side of conclusion + φ[t/x] is not the same as left-hand side of premise + ∀x. φ.") @@ -429,12 +427,12 @@ object BasicStepTactic { * */ object LeftExists extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, x: F.Variable)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], x: F.Variable[F.Ind])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val xK = x.underlyingLabel + lazy val xK = x.underlying lazy val phiK = phi.underlying lazy val botK = bot.underlying - lazy val quantified = K.BinderFormula(K.Exists, xK, phiK) + lazy val quantified = K.exists(xK, phiK) if ((botK.left union botK.right).exists(_.freeVariables.contains(xK))) proof.InvalidProofTactic("The variable x must not be free in the resulting sequent.") @@ -459,22 +457,22 @@ object BasicStepTactic { else proof.InvalidProofTactic("Could not infer a pivot from premise and conclusion.") else if (instantiatedPivot.tail.isEmpty) { - val in: F.Formula = instantiatedPivot.head - val quantifiedPhi: Option[F.Formula] = bot.left.find(f => + val in: F.Expr[F.Prop] = instantiatedPivot.head + val quantifiedPhi: Option[F.Expr[F.Prop]] = bot.left.find(f => f match { - case F.BinderFormula(F.Exists, _, g) => F.isSame(g, in) + case F.exists(_, g) => F.isSame(g, in) case _ => false } ) quantifiedPhi match { - case Some(F.BinderFormula(F.Exists, x, phi)) => LeftExists.withParameters(phi, x)(premise)(bot) + case Some(F.exists(x, phi)) => LeftExists.withParameters(phi, x)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer an existensially quantified pivot from premise and conclusion.") } } else proof.InvalidProofTactic("Ambigous application of LeftExists, multiple pivots corresponding to the unquantified formula found.") else if (pivot.tail.isEmpty) pivot.head match { - case F.BinderFormula(F.Exists, x, phi) => LeftExists.withParameters(phi, x)(premise)(bot) + case F.exists(x, phi) => LeftExists.withParameters(phi, x)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer an existentially quantified pivot from premise and conclusion.") } else @@ -482,6 +480,7 @@ object BasicStepTactic { } } + /* /** *+ * Γ |- ∃x. φ, Δ Γ', φ, φ[y/x] |- x = y, Δ' + * ------------------------------------------- + * Γ, Γ' |- ∃!x. φ, Δ, Δ' + *+ * + * This tactic separates the existence and the uniqueness proofs, which are often easier to prove independently, at + * the expense of brevity. + * + * @see [[RightExistsOne]]. + */ + object ExistenceAndUniqueness extends ProofTactic { + def withParameters(using lib: Library, proof: lib.Proof, om: OutputManager)(phi: F.Prop, x: F.Variable, y: F.Variable)(existence: proof.Fact, uniqueness: proof.Fact)( + bot: F.Sequent + ): proof.ProofTacticJudgement = { + val existenceSeq = proof.getSequent(existence) + val uniquenessSeq = proof.getSequent(uniqueness) + + lazy val substPhi = phi.substitute(x := y) + lazy val existenceFormula = F.∃(x, phi) + lazy val uniqueExistenceFormula = F.∃!(x, phi) + + // Checking that all formulas are present + if (x == y) { + proof.InvalidProofTactic("x and y can not be equal.") + } else if (!F.contains(existenceSeq.right, existenceFormula)) { + proof.InvalidProofTactic(s"Existence sequent conclusion does not contain ∃x. φ.") + } else if (!F.contains(uniquenessSeq.left, phi)) { + proof.InvalidProofTactic("Uniqueness sequent premises do not contain φ.") + } else if (!F.contains(uniquenessSeq.left, substPhi)) { + proof.InvalidProofTactic(s"Uniqueness sequent premises do not contain φ[y/x].") + } else if (!F.contains(uniquenessSeq.right, x === y) && !F.contains(uniquenessSeq.right, y === x)) { + proof.InvalidProofTactic(s"Uniqueness sequent conclusion does not contain x = y") + } else if (!F.contains(bot.right, uniqueExistenceFormula)) { + proof.InvalidProofTactic(s"Bottom sequent conclusion does not contain ∃!x. φ") + } + + // Checking pivots + else if (!F.isSameSet(existenceSeq.left ++ uniquenessSeq.left, bot.left + phi + substPhi)) { + proof.InvalidProofTactic("Could not infer correct left pivots.") + } else if (!F.isSameSet(existenceSeq.right ++ uniquenessSeq.right + uniqueExistenceFormula, bot.right + existenceFormula + (x === y))) { + proof.InvalidProofTactic("Could not infer correct right pivots.") + } else { + val gammaPrime = uniquenessSeq.left.filter(f => !F.isSame(f, phi) && !F.isSame(f, substPhi)) + + TacticSubproof { + // There's got to be a better way of importing have/thenHave/assume methods + // but I did not find one + + val forward = lib.have(phi |- ((x === y) ==> substPhi)) subproof { + lib.assume(phi) + lib.thenHave((x === y) |- substPhi) by RightSubstEq.withParametersSimple(List((x, y)), F.lambda(x, phi)) + lib.thenHave((x === y) ==> substPhi) by Restate + } + + for (f <- gammaPrime) { + lib.assume(f) + } + + val backward = lib.have(phi |- (substPhi ==> (x === y))) by Restate.from(uniqueness) + + lib.have(phi |- ((x === y) <=> substPhi)) by RightIff(forward, backward) + lib.thenHave(phi |- F.∀(y, (x === y) <=> substPhi)) by RightForall + lib.thenHave(phi |- F.∃(x, F.∀(y, (x === y) <=> substPhi))) by RightExists + lib.thenHave(F.∃(x, phi) |- F.∃(x, F.∀(y, (x === y) <=> substPhi))) by LeftExists + lib.thenHave(F.∃(x, phi) |- F.∃!(x, phi)) by RightExistsOne + + lib.have(bot) by Cut(existence, lib.lastStep) + } + } + } + + def apply(using lib: Library, proof: lib.Proof, om: OutputManager)(phi: F.Prop)(existence: proof.Fact, uniqueness: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + val existenceSeq = proof.getSequent(existence) + val uniquenessSeq = proof.getSequent(uniqueness) + + // Try to infer x from the premises + // Specifically, find variables in the correct quantifiers, common to all three sequents + val existsVars: Set[F.Variable] = existenceSeq.right.collect { + case F.BinderFormula(F.Exists, x, f) if F.isSame(f, phi) => x + } + if (existsVars.isEmpty) { + return proof.InvalidProofTactic("Missing existential quantifier in the existence sequent.") + } + + val commonVars = bot.right.collect { + case F.BinderFormula(F.ExistsOne, x, f) if F.isSame(f, phi) && existsVars.contains(x) => x + } + if (commonVars.size != 1) { + return proof.InvalidProofTactic("Could not infer correct variable x in quantifiers.") + } + + val x = commonVars.head + + // Infer y from the equalities in the uniqueness sequent + uniquenessSeq.right.collectFirst { + case F.AppliedPredicate(F.`equality`, Seq(`x`, (y: F.Variable))) if x != y && F.contains(uniquenessSeq.left, phi.substitute(x := y)) => + y + + case F.AppliedPredicate(F.`equality`, List(F.AppliedFunctional(y: F.Variable, _), F.AppliedFunctional(`x`, _))) if x != y && F.contains(uniquenessSeq.left, phi.substitute(x := y)) => + y + } match { + case Some(y) => ExistenceAndUniqueness.withParameters(phi, x, y)(existence, uniqueness)(bot) + case None => proof.InvalidProofTactic("Could not infer correct variable y in uniqueness sequent.") + } + } + } + + /** + *+ * + * ------------- if f(xs) = The(y, P(y)) is a function definition + * |- P(f(xs)) + *+ * Here `xs` is an arbitrary list of parameters. + * + * If `f(xs) = The(y, (φ ==> Q(y)) /\ (!φ ==> (y === t)))` is a conditional function definition, then: + *+ * + * -------------- + * φ |- Q(f(xs)) + *+ */ + object Definition extends ProofTactic { + def apply(using lib: Library, proof: lib.Proof)(f: F.ConstantFunctionLabel[?], uniqueness: proof.Fact)(xs: F.Ind*)(bot: F.Sequent): proof.ProofTacticJudgement = { + val expr = lib.getDefinition(f) match { + case Some(value: lib.FunctionDefinition[?]) => value + case _ => return proof.InvalidProofTactic("Could not get definition of function.") + } + val method: (F.ConstantFunctionLabel[?], proof.Fact) => Seq[F.Ind] => F.Sequent => proof.ProofTacticJudgement = + expr.f.substituteUnsafe(expr.vars.zip(xs).toMap) match { + case F.AppliedConnector( + F.And, + Seq( + F.AppliedConnector(F.Implies, Seq(a, _)), + F.AppliedConnector(F.Implies, Seq(b, _)) + ) + ) if F.isSame(F.Neg(a), b) => + conditional(using lib, proof) + + case _ => unconditional(using lib, proof) + } + method(f, uniqueness)(xs)(bot) + } + + /** + *+ * + * ------------- if f(xs) = The(y, P(y)) is a function definition + * |- P(f(xs)) + *+ */ + def unconditional(using lib: Library, proof: lib.Proof)(f: F.ConstantFunctionLabel[?], uniqueness: proof.Fact)(xs: F.Ind*)(bot: F.Sequent): proof.ProofTacticJudgement = { + lib.getDefinition(f) match { + case Some(definition: lib.FunctionDefinition[?]) => + if (bot.right.size != 1) { + return proof.InvalidProofTactic("Right-hand side of bottom sequent should contain only 1 formula.") + } + val y = definition.out + val vars = definition.vars + val fxs = f.applyUnsafe(xs) + + // Instantiate terms in the definition + val subst = vars.zip(xs).map(tup => tup._1 := tup._2) + val P = definition.f.substitute(subst*) + val expected = P.substitute(y := fxs) + if (!F.isSame(expected, bot.right.head)) { + return proof.InvalidProofTactic("Right-hand side of bottom sequent should be of the form P(f(xs)).") + } + + TacticSubproof { + lib.have(F.∀(y, (y === fxs) <=> P)) by Tautology.from(uniqueness, definition.of(subst*)) + lib.thenHave((y === fxs) <=> P) by InstantiateForall(y) + lib.thenHave((fxs === fxs) <=> P.substitute(y := fxs)) by InstSchema(Map(y -> fxs)) + lib.thenHave(P.substitute(y := fxs)) by Restate + } + + case _ => proof.InvalidProofTactic("Could not get definition of function.") + } + } + + /** + *+ * + * -------------- if f(xs) = The(y, (φ ==> Q(y)) /\ (!φ ==> R(y))) + * φ |- Q(f(xs)) + *+ */ + def conditional(using lib: Library, proof: lib.Proof)(f: F.ConstantFunctionLabel[?], uniqueness: proof.Fact)(xs: F.Ind*)(bot: F.Sequent): proof.ProofTacticJudgement = { + lib.getDefinition(f) match { + case Some(definition: lib.FunctionDefinition[?]) => + if (bot.right.size != 1) { + return proof.InvalidProofTactic("Right-hand side of bottom sequent should contain exactly 1 formula.") + } else if (bot.left.isEmpty) { + return proof.InvalidProofTactic("Left-hand side of bottom sequent should not be empty.") + } + val y = definition.out + val vars = definition.vars + + // Extract variable labels to instantiate them later in the proof + // val F.LambdaTermFormula(vars, _) = expr + // val instantiations: Seq[(F.SchematicTermLabel, F.LambdaTermTerm)] = vars.zip(xs.map(x => F.LambdaTermTerm(Seq(), x))) + + val subst = vars.zip(xs).map(tup => tup._1 := tup._2) + val P = definition.f.substitute(subst*) + // Instantiate terms in the definition + // val P = F.LambdaTermFormula(Seq(y), expr(xs)) + + // Unfold the conditional definition to find Q + val phi = F.And(bot.left.toSeq) + val Q: F.LambdaExpression[F.Ind, F.Prop, 1] = P.body match { + case F.AppliedConnector( + F.And, + Seq( + F.AppliedConnector(F.Implies, Seq(a, f)), + F.AppliedConnector(F.Implies, Seq(b, g)) + ) + ) if F.isSame(F.Neg(a), b) => + if (F.isSame(a, phi)) F.lambda(y, f) + else if (F.isSame(b, phi)) F.lambda(y, g) + else return proof.InvalidProofTactic("Condition of definition is not satisfied.") + + case _ => + return proof.InvalidProofTactic("Definition is not conditional.") + } + + val fxs = f.applyUnsafe(xs) + + val expected = P.substitute(y := fxs) + if (!F.isSame(expected, bot.right.head)) { + return proof.InvalidProofTactic("Right-hand side of bottom sequent should be of the form Q(fxs).") + } + + TacticSubproof { + lib.have(F.∀(y, (y === fxs) <=> P)) by Tautology.from(uniqueness, definition.of(subst*)) + lib.thenHave((y === fxs) <=> P) by InstantiateForall(y) + lib.thenHave((fxs === fxs) <=> P.substitute(y := fxs)) by InstSchema(Map(y -> fxs)) + lib.thenHave(P.substitute(y := fxs)) by Restate + lib.thenHave(phi ==> Q(fxs)) by Tautology + lib.thenHave(phi |- Q(fxs)) by Restate + } + + case _ => proof.InvalidProofTactic("Could not get definition of function.") + } + } + } + */ +} diff --git a/lisa-sets2/src/main/scala/lisa/automation/Congruence.scala b/lisa-sets2/src/main/scala/lisa/automation/Congruence.scala new file mode 100644 index 000000000..fc1ab25ae --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/automation/Congruence.scala @@ -0,0 +1,499 @@ +package lisa.automation +import lisa.utils.fol.FOL.{*, given} +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.prooflib.SimpleDeducedSteps.* +import lisa.utils.prooflib.* +import lisa.utils.K +import leo.datastructures.TPTP.AnnotatedFormula.FormulaType + +/** + * This tactic tries to prove a sequent by congruence. + * Consider the congruence closure of all terms and formulas in the sequent, with respect to all === and <=> left of the sequent. + * The sequent is provable by congruence if one of the following conditions is met: + * - The right side contains an equality s === t or equivalence a <=> b provable in the congruence closure. + * - The left side contains an negated equality !(s === t) or equivalence !(a <=> b) provable in the congruence closure. + * - There is a formula a on the left and b on the right such that a and b are congruent. + * - There are two formulas a and !b on the left such that a and b are congruent. + * - There are two formulas a and !b on the right such that a and b are congruent. + * - The sequent is Ol-valid without equality reasoning + * Note that complete congruence closure modulo OL is an open problem. + * + * The tactic uses an egraph datastructure to compute the congruence closure. + * The egraph itselfs relies on two underlying union-find datastructure, one for terms and one for formulas. + * The union-finds are equiped with an `explain` method that produces a path between any two elements in the same equivalence class. + * Each edge of the path can come from an external equality, or be the consequence of congruence. + * The tactic uses uses this path to produce needed proofs. + */ +object Congruence extends ProofTactic with ProofSequentTactic { + + def apply(using lib: Library, proof: lib.Proof)(bot: Sequent): proof.ProofTacticJudgement = TacticSubproof { + + import lib.* + + val egraph = new EGraphExpr() + egraph.addAll(bot.left) + egraph.addAll(bot.right) + + bot.left.foreach { + case === #@ l #@ r => egraph.merge(l, r) + case <=> #@ l #@ r => egraph.merge(l, r) + case _ => () + } + + if isSameSequent(bot, ⊤) then have(bot) by Restate + else if bot.left.exists { lf => + bot.right.exists { rf => + if egraph.idEq(lf, rf) then + val base = have(bot.left |- (bot.right + lf)) by Restate + val eq = have(egraph.proveExpr(lf, rf, bot)) + val a = variable[Prop] + have((bot.left + (lf <=> rf)) |- (bot.right)) by RightSubstEq.withParameters(Seq((lf, rf)), (Seq(a), a))(base) + have(bot) by Cut(eq, lastStep) + true + else false + } || + bot.left.exists { + case rf2 @ neg #@ rf if egraph.idEq(lf, rf) => + val base = have((bot.left + !lf) |- bot.right) by Restate + val eq = have(egraph.proveExpr(lf, rf.asInstanceOf, bot)) + val a = variable[Prop] + have((bot.left + makeEq(lf, rf)) |- (bot.right)) by LeftSubstEq.withParameters(Seq((lf, rf)), (Seq(a), !a))(base) + have(bot) by Cut(eq, lastStep) + true + case _ => false + } || { + lf match + case neg #@ (=== #@ a #@ b) if egraph.idEq(a, b) => + have(egraph.proveExpr(a, b, bot)) + true + case neg #@ (<=> #@ a #@ b) if egraph.idEq(a, b) => + have(egraph.proveExpr(a, b, bot)) + true + case _ => false + } + + } + then () + else if bot.right.exists { rf => + bot.right.exists { + case lf2 @ neg #@ (lf) if egraph.idEq(lf, rf) => + val base = have((bot.left) |- (bot.right + !rf)) by Restate + val eq = have(egraph.proveExpr[Prop](lf.asInstanceOf, rf, bot)) + val a = variable[Prop] + have((bot.left + makeEq(lf, rf)) |- (bot.right)) by RightSubstEq.withParameters(Seq((lf, rf)), (Seq(a), !a))(base) + have(bot) by Cut(eq, lastStep) + true + case _ => false + } || { + rf match + case (=== #@ a #@ b) if egraph.idEq(a, b) => + have(egraph.proveExpr(a, b, bot)) + true + case (<=> #@ a #@ b) if egraph.idEq(a, b) => + have(egraph.proveExpr(a, b, bot)) + true + case _ => false + } + } + then () + else return proof.InvalidProofTactic(s"No congruence found to show sequent\n $bot") + + } + +} + +class UnionFind[T] { + // parent of each element, leading to its root. Uses path compression + val parent = scala.collection.mutable.Map[T, T]() + // original parent of each element, leading to its root. Does not use path compression. Used for explain. + val realParent = scala.collection.mutable.Map[T, (T, ((T, T), Boolean, Int))]() + // keep track of the rank (i.e. number of elements bellow it) of each element. Necessary to optimize union. + val rank = scala.collection.mutable.Map[T, Int]() + // tracks order of ancientness of unions. + var unionCounter = 0 + + /** + * add a new element to the union-find. + */ + def add(x: T): Unit = { + parent(x) = x + realParent(x) = (x, ((x, x), true, 0)) + rank(x) = 0 + } + + /** + * @param x the element whose parent we want to find + * @return the root of x + */ + def find(x: T): T = { + if parent(x) == x then x + else + var root = x + while parent(root) != root do root = parent(root) + var y = x + while parent(y) != root do + parent(y) = root + y = parent(y) + root + } + + /** + * Merges the classes of x and y + */ + def union(x: T, y: T): Unit = { + unionCounter += 1 + val xRoot = find(x) + val yRoot = find(y) + if (xRoot == yRoot) return + if (rank(xRoot) < rank(yRoot)) { + parent(xRoot) = yRoot + realParent(xRoot) = (yRoot, ((x, y), true, unionCounter)) + } else if (rank(xRoot) > rank(yRoot)) { + parent(yRoot) = xRoot + realParent(yRoot) = (xRoot, ((x, y), false, unionCounter)) + } else { + parent(yRoot) = xRoot + realParent(yRoot) = (xRoot, ((x, y), false, unionCounter)) + rank(xRoot) = rank(xRoot) + 1 + } + } + + private def getPathToRoot(x: T): List[T] = { + if x == find(x) then List(x) + else + val next = realParent(x) + x :: getPathToRoot(next._1) + + } + + private def getExplanationFromTo(x: T, c: T): List[(T, ((T, T), Boolean, Int))] = { + if x == c then List() + else + val next = realParent(x) + next :: getExplanationFromTo(next._1, c) + } + + private def lowestCommonAncestor(x: T, y: T): Option[T] = { + val pathX = getPathToRoot(x) + val pathY = getPathToRoot(y) + pathX.find(pathY.contains) + } + + /** + * Returns a path from x to y made of pairs of elements (u, v) + * such that union(u, v) was called. + */ + def explain(x: T, y: T): Option[List[(T, T)]] = { + + if (x == y) then return Some(List()) + val lca = lowestCommonAncestor(x, y) + lca match + case None => None + case Some(lca) => + var max: ((T, T), Boolean, Int) = ((x, x), true, 0) + var itX = x + while itX != lca do + val (next, ((u1, u2), b, c)) = realParent(itX) + if c > max._3 then max = ((u1, u2), b, c) + itX = next + + var itY = y + while itY != lca do + val (next, ((u1, u2), b, c)) = realParent(itY) + if c > max._3 then max = ((u1, u2), !b, c) + itY = next + + val u1 = max._1._1 + val u2 = max._1._2 + if max._2 then Some(explain(x, u1).get ++ List((u1, u2)) ++ explain(u2, y).get) + else Some(explain(x, u2).get ++ List((u1, u2)) ++ explain(u1, y).get) + } + + /** + * Returns the set of all roots of all classes + */ + def getClasses: Set[T] = parent.keys.map(find).toSet + + /** + * Add all elements in the collection to the union-find + */ + def addAll(xs: Iterable[T]): Unit = xs.foreach(add) + +} + +/////////////////////////////// +///////// E-graph ///////////// +/////////////////////////////// + +import scala.collection.mutable + +class EGraphExpr() { + + val parents = mutable.Map[Expr[?], mutable.Set[Expr[?]]]() + val UF = new UnionFind[Expr[?]]() + + def find[T](id: Expr[T]): Expr[T] = UF.find(id).asInstanceOf[Expr[T]] + + trait Step + case class ExternalStep(between: (Expr[?], Expr[?])) extends Step + case class CongruenceStep(between: (Expr[?], Expr[?])) extends Step + + val proofMap = mutable.Map[(Expr[?], Expr[?]), Step]() + + def explain(id1: Expr[?], id2: Expr[?]): Option[List[Step]] = { + val steps = UF.explain(id1, id2) + steps.map(_.foldLeft((id1, List[Step]())) { case ((prev, acc), step) => + proofMap(step) match + case s @ ExternalStep((l, r)) => + if l == prev then (r, s :: acc) + else if r == prev then (l, ExternalStep(r, l) :: acc) + else throw new Exception("Invalid proof recovered: It is not a chain") + case s @ CongruenceStep((l, r)) => + if l == prev then (r, s :: acc) + else if r == prev then (l, CongruenceStep(r, l) :: acc) + else throw new Exception("Invalid proof recovered: It is not a chain") + + }._2.reverse) + } + + def makeSingletonEClass(node: Expr[?]): Expr[?] = { + UF.add(node) + parents(node) = mutable.Set() + node + } + + def idEq(id1: Expr[?], id2: Expr[?]): Boolean = find(id1) == find(id2) + + def canonicalize(node: Expr[?]): Expr[?] = node match + case App(f, a) => App.unsafe(canonicalize(f), find(a)) + case _ => node + + def add(node: Expr[?]): Expr[?] = + if codes.contains(node) then node + else codes(node) = codes.size + if node.sort == K.Ind || node.sort == K.Prop then + makeSingletonEClass(node) + node match + case Multiapp(f, args) => + args.foreach(child => + add(child) + parents(find(child)).add(node) + ) + mapSigs(canSig(node)) = node + node + + def addAll(nodes: Iterable[Expr[Ind] | Expr[Prop]]): Unit = + nodes.foreach { e => + (e: @unchecked) match + case node: Expr[Ind] if node.sort == K.Ind => add(node) + case node: Expr[Prop] if node.sort == K.Prop => add(node) + case _ => () + } + + def merge[S](id1: Expr[S], id2: Expr[S]): Unit = { + mergeWithStep(id1, id2, ExternalStep((id1, id2))) + } + + def mergeUnsafe(id1: Expr[?], id2: Expr[?]): Unit = { + mergeWithStep(id1, id2, ExternalStep((id1, id2))) + } + + type Sig = (Expr[?], List[Int]) + val mapSigs = mutable.Map[Sig, Expr[?]]() + val codes = mutable.Map[Expr[?], Int]() + + def canSig(node: Expr[?]): Sig = node match + case Multiapp(label, args) => + (label, args.map(a => codes(find(a))).toList) + + protected def mergeWithStep(id1: Expr[?], id2: Expr[?], step: Step): Unit = { + if id1.sort != id2.sort then throw new IllegalArgumentException("Cannot merge nodes of different sorts") + if find(id1) == find(id2) then () + else + proofMap((id1, id2)) = step + val parents1 = parents(find(id1)) + val parents2 = parents(find(id2)) + + if find(id1) == find(id2) then return () + + proofMap((id1, id2)) = step + val (small, big) = if parents(find(id1)).size < parents(find(id2)).size then (id1, id2) else (id2, id1) + codes(find(small)) = codes(find(big)) + UF.union(id1, id2) + val newId = find(id1) + var worklist = List[(Expr[?], Expr[?], Step)]() + + parents(small).foreach { pExpr => + val canonicalPExpr = canSig(pExpr) + if mapSigs.contains(canonicalPExpr) then + val qExpr = mapSigs(canonicalPExpr) + + worklist = (pExpr, qExpr, CongruenceStep((pExpr, qExpr))) :: worklist + else mapSigs(canonicalPExpr) = pExpr + } + parents(newId) = parents(big) + parents(newId).addAll(parents(small)) + worklist.foreach { case (l, r, step) => mergeWithStep(l, r, step) } + } + + def proveExpr[S](using lib: Library, proof: lib.Proof)(id1: Expr[S], id2: Expr[S], base: Sequent): proof.ProofTacticJudgement = + TacticSubproof { proveInnerTerm(id1, id2, base) } + + def proveInnerTerm(using lib: Library, proof: lib.Proof)(id1: Expr[?], id2: Expr[?], base: Sequent): Unit = { + import lib.* + val steps = explain(id1, id2) + steps match { + case None => throw new Exception("No proof found in the egraph") + case Some(steps) => + if steps.isEmpty then have(base.left |- (base.right + (makeEq(id1, id2)))) by Restate + steps.foreach { + case ExternalStep((l, r)) => + val goalSequent = base.left |- (base.right + (makeEq(id1, r))) + if l == id1 then have(goalSequent) by Restate + else + val x = variable[Ind](freshId(Seq(id1))) + have(goalSequent) by RightSubstEq.withParameters(List((l, r)), (Seq(x), makeEq(id1, x)))(lastStep) + case CongruenceStep((l, r)) => + val prev = if id1 != l then lastStep else null + val leqr = have(base.left |- (base.right + (makeEq(l, r)))) subproof { sp ?=> + (l, r) match + case (Multiapp(labell, argsl), Multiapp(labelr, argsr)) if labell == labelr && argsl.size == argsr.size => + var freshn = freshId((l.freeVars ++ r.freeVars).map(_.id), "n").no + val ziped = (argsl zip argsr) + var zip = List[(Expr[?], Expr[?])]() + var children = List[Expr[?]]() + var vars = List[Variable[?]]() + var steps = List[(Expr[Prop], sp.ProofStep)]() + ziped.reverse.foreach { (al, ar) => + if al == ar then children = al :: children + else { + val x = variable(Identifier("n", freshn), al.sort) + freshn = freshn + 1 + children = x :: children + vars = x :: vars + steps = (makeEq(al, ar), have(proveExpr(al, ar.asInstanceOf, base))) :: steps + zip = (al, ar) :: zip + } + } + have(base.left |- (base.right + makeEq(l, l))) by Restate + val eqs = zip.map((l, r) => makeEq(l, r)) + val goal = have((base.left ++ eqs) |- (base.right + makeEq(l, r))).by.bot + have((base.left ++ eqs) |- (base.right + makeEq(l, r))) by RightSubstEq.withParameters(zip, (vars, makeEq(l, Multiapp.unsafe(labelr, children))))(lastStep) + steps.foreach { s => + have( + if s._2.bot.left.contains(s._1) then lastStep.bot else lastStep.bot -<< s._1 + ) by Cut(s._2, lastStep) + } + case _ => + println(s"l: $l") + println(s"r: $r") + throw Exception("Unreachable") + + } + if id1 != l then + val goalSequent = base.left |- (base.right + (makeEq(id1, r))) + val x = variable(freshId(Seq(id1)), id1.sort) + have(goalSequent +<< makeEq(l, r)) by RightSubstEq.withParameters(List((l, r)), (Seq(x), makeEq(id1, x)))(prev) + have(goalSequent) by Cut(leqr, lastStep) + } + } + } + + /* + + def proveExpr(using lib: Library, proof: lib.Proof)(id1: Prop, id2:Prop, base: Sequent): proof.ProofTacticJudgement = + TacticSubproof { proveInnerFormula(id1, id2, base) } + + def proveInnerFormula(using lib: Library, proof: lib.Proof)(id1: Prop, id2:Prop, base: Sequent): Unit = { + import lib.* + val steps = explain(id1, id2) + steps match { + case None => throw new Exception("No proof found in the egraph") + case Some(steps) => + if steps.isEmpty then have(base.left |- (base.right + (id1 <=> id2))) by Restate + steps.foreach { + case ExternalStep((l, r)) => + val goalSequent = base.left |- (base.right + (id1 <=> r)) + if l == id1 then + have(goalSequent) by Restate + else + val x = freshVariableFormula(id1) + have(goalSequent) by RightSubstEq.withParameters(List((l, r)), lambda(x, id1 <=> x))(lastStep) + case CongruenceStep((l, r)) => + val prev = if id1 != l then lastStep else null + val leqr = have(base.left |- (base.right + (l <=> r))) subproof { sp ?=> + (l, r) match + case (AppliedConnector(labell, argsl), AppliedConnector(labelr, argsr)) if labell == labelr && argsl.size == argsr.size => + var freshn = freshId((l.freeVariableFormulas ++ r.freeVariableFormulas).map(_.id), "n").no + val ziped = (argsl zip argsr) + var zip = List[(Prop, Prop)]() + var children = List[Prop]() + var vars = List[VariableFormula]() + var steps = List[(Prop, sp.ProofStep)]() + ziped.reverse.foreach { (al, ar) => + if al == ar then children = al :: children + else { + val x = VariableFormula(Identifier("n", freshn)) + freshn = freshn + 1 + children = x :: children + vars = x :: vars + steps = (al <=> ar, have(proveExpr(al, ar, base))) :: steps + zip = (al, ar) :: zip + } + } + have(base.left |- (base.right + (l <=> l))) by Restate + val eqs = zip.map((l, r) => l <=> r) + val goal = have((base.left ++ eqs) |- (base.right + (l <=> r))).by.bot + have((base.left ++ eqs) |- (base.right + (l <=> r))) by RightSubstEq.withParameters(zip, lambda(vars, l <=> labelr.applyUnsafe(children)))(lastStep) + steps.foreach { s => + have( + if s._2.bot.left.contains(s._1) then lastStep.bot else lastStep.bot -<< s._1 + ) by Cut(s._2, lastStep) + } + + case (AppliedPredicate(labell, argsl), AppliedPredicate(labelr, argsr)) if labell == labelr && argsl.size == argsr.size => + var freshn = freshId((l.freeVariableFormulas ++ r.freeVariableFormulas).map(_.id), "n").no + val ziped = (argsl zip argsr) + var zip = List[(Ind, Ind)]() + var children = List[Ind]() + var vars = List[Variable]() + var steps = List[(Prop, sp.ProofStep)]() + ziped.reverse.foreach { (al, ar) => + if al == ar then children = al :: children + else { + val x = Variable(Identifier("n", freshn)) + freshn = freshn + 1 + children = x :: children + vars = x :: vars + steps = (al === ar, have(proveTerm(al, ar, base))) :: steps + zip = (al, ar) :: zip + } + } + have(base.left |- (base.right + (l <=> l))) by Restate + val eqs = zip.map((l, r) => l === r) + val goal = have((base.left ++ eqs) |- (base.right + (l <=> r))).by.bot + have((base.left ++ eqs) |- (base.right + (l <=> r))) by RightSubstEq.withParameters(zip, lambda(vars, l <=> labelr.applyUnsafe(children)))(lastStep) + steps.foreach { s => + have( + if s._2.bot.left.contains(s._1) then lastStep.bot else lastStep.bot -<< s._1 + ) by Cut(s._2, lastStep) + } + case _ => + println(s"l: $l") + println(s"r: $r") + throw UnreachableException + + } + if id1 != l then + val goalSequent = base.left |- (base.right + (id1 <=> r)) + val x = freshVariableFormula(id1) + have(goalSequent +<< (l <=> r)) by RightSubstEq.withParameters(List((l, r)), lambda(x, id1 <=> x))(prev) + have(goalSequent) by Cut(leqr, lastStep) + + } + } + } + */ + +} diff --git a/lisa-sets2/src/main/scala/lisa/automation/Substitution.scala b/lisa-sets2/src/main/scala/lisa/automation/Substitution.scala new file mode 100644 index 000000000..86d5d8f3e --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/automation/Substitution.scala @@ -0,0 +1,231 @@ +package lisa.automation + +import lisa.utils.fol.FOL as F +import lisa.kernel.proof.RunningTheory +import lisa.kernel.proof.SCProof +import lisa.kernel.proof.SequentCalculus +import lisa.utils.prooflib.BasicStepTactic +import lisa.utils.prooflib.SimpleDeducedSteps +import lisa.utils.prooflib.ProofTacticLib.{*, given} +import lisa.utils.prooflib.* +import lisa.utils.K +import lisa.utils.UserLisaException +import lisa.utils.unification.UnificationUtils.* +import lisa.utils.collection.Extensions.* + +import scala.annotation.nowarn +import scala.collection.mutable.{Map as MMap} + +import F.{*, given} +import lisa.utils.collection.VecSet +import lisa.utils.Printing.printList + +object Substitution: + + /** + * Extracts a raw substitution into a `RewriteRule`. + */ + def extractRule(using lib: Library, proof: lib.Proof)(rule: proof.Fact | Expr[Prop]): RewriteRule = + rule match + case f: Expr[Prop] @unchecked => + (f: @unchecked) match + case === #@ (l: Expr[Ind]) #@ (r: Expr[Ind]) => TermRewriteRule(l, r) + case <=> #@ (l: Expr[Prop]) #@ (r: Expr[Prop]) => FormulaRewriteRule(l, r) + case f: proof.Fact @unchecked => extractRule(proof.getSequent(f).right.head) + + /** + * Partitions raw substitution rules into free and confined rules, also + * creating a source map, mapping each rule to the `Fact` it was derived from, + * for proof construction. + */ + def partition(using lib: Library, proof: lib.Proof)(substitutions: Seq[proof.Fact | Expr[Prop]]): (Map[RewriteRule, proof.Fact], RewriteContext) = + substitutions.foldLeft((Map.empty, RewriteContext.empty)): + case ((source, ctx), rule) => + val erule = extractRule(rule) + rule match + case f: Expr[Prop] @unchecked => + (source + (erule -> erule.source) + (erule.swap -> erule.source), ctx.withConfinedRule(erule).withConfinedRule(erule.swap)) + case j: lib.JUSTIFICATION => + (source + (erule -> j) + (erule.swap -> j), ctx.withFreeRule(erule).withFreeRule(erule.swap)) + case f: proof.Fact @unchecked => + (source + (erule -> f) + (erule.swap -> f), ctx.withConfinedRule(erule).withConfinedRule(erule.swap)) + + /** + * Checks if a raw substitution input can be used as a rewrite rule (is === or + * <=>, basically). + */ + def validSubstitutionRule(using lib: lisa.utils.prooflib.Library, proof: lib.Proof)(rule: (proof.Fact | Expr[Prop])): Boolean = + rule match + // as formula + case f: Expr[Prop] @unchecked => + f match + case === #@ l #@ r => true + case <=> #@ l #@ r => true + case _ => false + // as a justification + case just: proof.Fact @unchecked => + val sequent = proof.getSequent(just) + sequent.right.size == 1 && validSubstitutionRule(sequent.right.head) + + object Apply extends ProofTactic: + def apply(using lib: Library, proof: lib.Proof)(substitutions: (proof.Fact | Expr[Prop])*)(premiseStep: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = + + // are all substitution rules actually valid? + // if not, exit early + + val violatingFacts = substitutions.collect: + case f: proof.Fact @unchecked if !validSubstitutionRule(f) => proof.getSequent(f) + + val violatingFormulas = substitutions.collect: + case f: Expr[Prop] @unchecked if !validSubstitutionRule(f) => f + + if violatingFacts.nonEmpty then + val msgBase = "Substitution rules must have a single equality or equivalence on the right-hand side. Violating sequents passed:\n" + val msgList = violatingFacts.zipWithIndex.map: + case (f, i) => s"\t${i + 1}. $f" + + proof.InvalidProofTactic(msgBase + msgList.mkString("\n")) + else if violatingFormulas.nonEmpty then + val msgBase = "Substitution rules must be equalities or equivalences. Violating formulas passed:\n" + val msgList = violatingFacts.zipWithIndex.map: + case (f, i) => s"\t${i + 1}. $f" + + proof.InvalidProofTactic(msgBase + msgList.mkString("\n")) + else + // continue, we have a list of rules to work with + + // rewrite base + val premise = proof.getSequent(premiseStep) + // the target is bot + + // metadata: + // maintain a list of where substitutions come from + // and categorize them for the rewrite context + val (sourceMap, prectx) = partition(substitutions) + val ctx = prectx.withBound(premise.left.flatMap(_.freeVars)) + + // TODO: CHECK is this really necessary? + // remove from the premise equalities we are rewriting with, as these + // terms themselves are not targets for the rewriting + // val filteredPrem = ??? + + // check whether this rewrite is even possible. + // if it is, get the context (term with holes) corresponding to the + // single-step simultaneous rewrite + + // for each formula in the premise left (resp. right), there must be a + // corresponding formula in the conclusion left (resp. right) that it + // can be rewritten into. + + // discover a (possibly non-injective non-surjective) mapping from one + // formula set to another where a formula maps to another by the + // rewrites above + inline def collectRewritingPairs(base: Set[Expr[Prop]], target: Set[Expr[Prop]]): Option[Seq[FormulaRewriteResult]] = + base.iterator + .map: formula => + target.collectFirstDefined: target => + rewrite(using ctx)(formula, target) + .toOptionSeq + + // collect the set of formulas in `base` that rewrite to *no* formula + // in `target`. Guaranteed to be non-empty if + // `collectRewritingPairs(base, target)` is None. + inline def collectViolatingPairs(base: Set[Expr[Prop]], target: Set[Expr[Prop]]): Set[Expr[Prop]] = + base.filter: formula => + target.forall: target => + rewrite(using ctx)(formula, target).isEmpty + + inline def ruleList: String = + val printed = substitutions.map: + case f: Expr[?] => f + case f: proof.Fact @unchecked => proof.getSequent(f) + printList(printed) + + inline def ruleMsg = s"\nSubstitution rules given:\n$ruleList" + + val leftSubsts = collectRewritingPairs(premise.left, bot.left) + val rightSubsts = collectRewritingPairs(premise.right, bot.right) + + if leftSubsts.isEmpty then + // error, find formulas that failed to rewrite + val msgBase = "Could not rewrite LHS of premise into conclusion with given substitutions.\nViolating Formulas:\n" + val formulaList = printList(collectViolatingPairs(premise.left, bot.left)) + + proof.InvalidProofTactic(msgBase + formulaList + ruleMsg) + else if rightSubsts.isEmpty then + // error, find formulas that failed to rewrite + val msgBase = "Could not rewrite RHS of premise into conclusion with given substitutions.\nViolating Formulas:\n" + val formulaList = printList(collectViolatingPairs(premise.right, bot.right)) + + proof.InvalidProofTactic(msgBase + formulaList + ruleMsg) + else + // rewriting is possible, construct the proof + + import lib.{have, thenHave, lastStep} + import BasicStepTactic.{TacticSubproof, Weakening, Cut, LeftSubstEq, RightSubstEq} + import SimpleDeducedSteps.Restate + + TacticSubproof: + val leftRewrites = leftSubsts.get + val rightRewrites = rightSubsts.get + val leftRules = leftRewrites.to(VecSet).flatMap(_.rules) + val rightRules = rightRewrites.to(VecSet).flatMap(_.rules) + + // instantiated discharges + + val leftDischarges = leftRules.map(r => r -> proof.InstantiatedFact(sourceMap(r.rule), r.subst.asSubstPair)) + val rightDischarges = rightRules.map(r => r -> proof.InstantiatedFact(sourceMap(r.rule), r.subst.asSubstPair)) + + val discharges = leftDischarges ++ rightDischarges + + // start proof + have(andAll(premise.left) |- premise.right) by Restate.from(premiseStep) + + // left rewrites + val leftFormulas = leftRules.map(_.toFormula) + val preLeft = leftRewrites.map(_.toLeft) + val postLeft = leftRewrites.map(_.toRight) + val leftVars = leftRewrites.head.vars + val leftLambda = andAll(leftRewrites.map(_.lambda)) + thenHave(andAll(preLeft) |- premise.right) by Restate + thenHave(leftFormulas + andAll(preLeft) |- premise.right) by Weakening + thenHave(leftFormulas + andAll(postLeft) |- premise.right) by LeftSubstEq.withParameters(leftRules.map(r => r.l -> r.r).toSeq, leftVars -> leftLambda) + + val rpremise = lastStep.bot + + // right rewrites + val rightFormulas = rightRules.map(_.toFormula) + val preRight = rightRewrites.map(_.toLeft).toSet + val postRight = rightRewrites.map(_.toRight).toSet + val rightVars = rightRewrites.head.vars + val rightLambda = orAll(rightRewrites.map(_.lambda)) + thenHave(rpremise.left |- orAll(preRight)) by Restate + thenHave(rightFormulas ++ rpremise.left |- orAll(preRight)) by Weakening + thenHave(rightFormulas ++ rpremise.left |- orAll(postRight)) by RightSubstEq.withParameters(rightRules.map(r => r.l -> r.r).toSeq, rightVars -> rightLambda) + + // rewrite to destruct sequent + thenHave(postLeft ++ leftFormulas ++ rightFormulas |- postRight) by Restate + + val postRewriteSequent = lastStep.bot + + // discharge assumptions + discharges.foldLeft(postRewriteSequent): + case (premise, (rule, source)) => + val sseq = proof.getSequent(source) + val form = rule.toFormula + val nextSequent = premise.left.filterNot(isSame(_, form)) ++ sseq.left |- premise.right ++ sseq.right.filterNot(isSame(_, form)) + have(nextSequent) by Cut.withParameters(form)(source, lastStep) + nextSequent + + // restate to the result + thenHave(bot) by Weakening + + end Apply + + object Unfold extends ProofTactic: + def apply(using lib: Library, proof: lib.Proof)(definition: lib.theory.Definition)(premise: proof.Fact): proof.ProofTacticJudgement = + ??? + + end Unfold + +end Substitution diff --git a/lisa-sets2/src/main/scala/lisa/automation/Tableau.scala b/lisa-sets2/src/main/scala/lisa/automation/Tableau.scala new file mode 100644 index 000000000..48726e2d5 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/automation/Tableau.scala @@ -0,0 +1,508 @@ +package lisa.automation +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.OutputManager.* +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.K +import lisa.utils.K.{_, given} + +import scala.collection.immutable.HashMap +import scala.collection.immutable.HashSet + +/** + * Now need to deal with variables unifying with terms containing themselves + * optimiye list siye computation + * Then, optimize unification check by not checking all pairs all the time + * Then, shortcut branches by checking if they are OL-true or OL-false + * + * Next test: No quantifiers but actual terms with variables + */ + +object Tableau extends ProofTactic with ProofSequentTactic with ProofFactSequentTactic { + + var debug = true + def pr(s: Object) = if debug then println(s) + + def apply(using lib: Library, proof: lib.Proof)(bot: F.Sequent): proof.ProofTacticJudgement = { + solve(bot) match { + case Some(value) => proof.ValidProofTactic(bot, value.steps, Seq()) + case None => proof.InvalidProofTactic("Could not prove the statement.") + } + } + + /** + * Given a targeted conclusion sequent, try to prove it using laws of propositional logic and reflexivity and symmetry of equality. + * Uses the given already proven facts as assumptions to reach the desired goal. + * + * @param proof The ongoing proof object in which the step happens. + * @param premise A previously proven step necessary to reach the conclusion. + * @param bot The desired conclusion. + */ + def apply(using lib: Library, proof: lib.Proof)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = + from(using lib, proof)(Seq(premise)*)(bot) + + def from(using lib: Library, proof: lib.Proof)(premises: proof.Fact*)(bot: F.Sequent): proof.ProofTacticJudgement = { + val botK = bot.underlying + val premsFormulas: Seq[((proof.Fact, Expression), Int)] = premises.map(p => (p, sequentToFormula(proof.getSequent(p).underlying))).zipWithIndex + val initProof = premsFormulas.map(s => Restate(() |- s._1._2, -(1 + s._2))).toList + val sqToProve = botK ++<< (premsFormulas.map(s => s._1._2).toSet |- ()) + + solve(sqToProve) match { + case Some(value) => + val subpr = SCSubproof(value) + val stepsList = premsFormulas.foldLeft[List[SCProofStep]](List(subpr))((prev: List[SCProofStep], cur) => { + val ((prem, form), position) = cur + Cut(prev.head.bot -<< form, position, initProof.length + prev.length - 1, form) :: prev + }) + val steps = (initProof ++ stepsList.reverse).toIndexedSeq + proof.ValidProofTactic(bot, steps, premises) + case None => + proof.InvalidProofTactic("Could not prove the statement.") + } + } + + /* + def from(premises: Seq[K.Sequent], bot: K.Sequent): Option[SCProof] = { + val botK = bot.underlying + val premsFormulas: Seq[((proof.Fact, Expression), Int)] = premises.map(p => (p, sequentToFormula(proof.getSequent(p).underlying))).zipWithIndex + val initProof = premsFormulas.map(s => Restate(() |- s._1._2, -(1 + s._2))).toList + val sqToProve = botK ++<< (premsFormulas.map(s => s._1._2).toSet |- ()) + + solve(sqToProve) match { + case Some(value) => + val subpr = SCSubproof(value) + val stepsList = premsFormulas.foldLeft[List[SCProofStep]](List(subpr))((prev: List[SCProofStep], cur) => { + val ((prem, form), position) = cur + Cut(prev.head.bot -<< form, position, initProof.length + prev.length - 1, form) :: prev + }) + val steps = (initProof ++ stepsList.reverse).toIndexedSeq + proof.ValidProofTactic(bot, steps, premises) + case None => + proof.InvalidProofTactic("Could not prove the statement.") + } + }*/ + + inline def solve(sequent: F.Sequent): Option[SCProof] = solve(sequent.underlying) + + def solve(sequent: K.Sequent): Option[SCProof] = { + val f = K.multiand(sequent.left.toSeq ++ sequent.right.map(f => K.neg(f))) + val taken = f.allVariables + val nextIdNow = if taken.isEmpty then 0 else taken.maxBy(_.id.no).id.no + 1 + val (fnamed, nextId) = makeVariableNamesUnique(f, nextIdNow, f.freeVariables) + val nf = reducedNNFForm(fnamed) + val uv = Variable(Identifier("§", nextId), Ind) + val proof = decide(Branch.empty(nextId + 1, uv).prepended(nf)) + proof match + case None => None + case Some((p, _)) => Some(SCProof((Restate(sequent, p.length) :: Weakening(nf |- (), p.length - 1) :: p).reverse.toIndexedSeq, IndexedSeq.empty)) + + } + + /** + * A branch represent a sequent (whose right hand side is empty) that is being proved. + * It is assumed that the sequent is in negation normal form, negations are only applied to atoms. + * Formulas are sorted according to their shape : + * Conjunctions are in alpha + * Disjunctions are in beta + * Existential quantifiers are in delta + * Universal quantifiers are in gamma + * Atoms are in atoms (split into positive and negative) + * At each step of the procedure, a formula is deconstructed in accordance with the rules of the tableau calculus. + * Then that formula is removed from the branch as it is no longer needed. + * Variables coming from universal quantifiers are marked as suitable for unification in unifiable + * Instantiations that have been done already are stored in triedInstantiation, to avoid infinite loops. + * When a quantifier Q1 is below a universal quantifier Q2, Q2 can be instantiated multiple times. + * Then, Q1 may also need to be instantiated multiple versions, requiring fresh variable names. + * maxIndex stores an index that is used to generate fresh variable names. + */ + case class Branch( + alpha: List[Expression], // label = And + beta: List[Expression], // label = Or + delta: List[Expression], // Exists(...)) + gamma: List[Expression], // Forall(...) + atoms: (List[Expression], List[Expression]), // split into positive and negatives! + unifiable: Map[Variable, (Expression, Int)], // map between metavariables and the original formula they came from, with the penalty associated to the complexity of the formula. + numberInstantiated: Map[Variable, Int], // map between variables and the number of times they have been instantiated + + skolemized: Set[Variable], // set of variables that have been skolemized + triedInstantiation: Map[Variable, Set[Expression]], // map between metavariables and the term they were already instantiated with + maxIndex: Int, // the maximum index used for skolemization and metavariables + varsOrder: Map[Variable, Int], // the order in which variables were instantiated. In particular, if the branch contained the formula ∀x. ∀y. ... then x > y. + unusedVar: Variable // a variable the is neither free nor bound in the original formula. + ) { + def pop(f: Expression): Branch = f match + case f @ Or(l, r) => + if (beta.nonEmpty && beta.head.uniqueNumber == f.uniqueNumber) copy(beta = beta.tail) else throw Exception("First formula of beta is not f") + case f @ Exists(x, inner) => + if (delta.nonEmpty && delta.head.uniqueNumber == f.uniqueNumber) copy(delta = delta.tail) else throw Exception("First formula of delta is not f") + case f @ Forall(x, inner) => + if (gamma.nonEmpty && gamma.head.uniqueNumber == f.uniqueNumber) copy(gamma = gamma.tail) else throw Exception("First formula of gamma is not f") + case And(left, right) => + if (alpha.nonEmpty && alpha.head.uniqueNumber == f.uniqueNumber) copy(alpha = alpha.tail) else throw Exception("First formula of alpha is not f") + case _ => + throw Exception("Should not pop Atoms: " + f.repr) + + def prepended(f: Expression): Branch = f match + case And(left, right) => this.copy(alpha = f :: alpha) + case Or(left, right) => this.copy(beta = f :: beta) + case Exists(x, inner) => this.copy(delta = f :: delta) + case Forall(x, inner) => this.copy(gamma = f :: gamma) + case Neg(f) => + this.copy(atoms = (atoms._1, f :: atoms._2)) + case _ => + this.copy(atoms = (f :: atoms._1, atoms._2)) + + def prependedAll(l: Seq[Expression]): Branch = l.foldLeft(this)((a, b) => a.prepended(b)) + + def asSequent: Sequent = (beta ++ delta ++ gamma ++ atoms._1 ++ atoms._2.map(a => !a)).toSet |- Set() // inefficient, not used + + import Branch.* + override def toString(): String = + val pretUnif = unifiable.map((x, f) => x.id + " -> " + f._1.repr + " : " + f._2).mkString("Unif(", ", ", ")") + // val pretTried = triedInstantiation.map((x, t) => x.id + " -> " + prettyTerm(t, true)).mkString("Tried(", ", ", ")") + (s"Branch(" + + s"${RED(prettyIte(alpha, "alpha"))}, " + + s"${GREEN(prettyIte(beta, "beta"))}, " + + s"${BLUE(prettyIte(delta, "delta"))}, " + + s"${YELLOW(prettyIte(gamma, "gamma"))}, " + + s"${MAGENTA(prettyIte(atoms._1, "+"))}, ${CYAN(prettyIte(atoms._2, "-"))}, " + + s"$pretUnif, _, _)").split("'").mkString("").split("_").mkString("") + + } + object Branch { + def empty = Branch(Nil, Nil, Nil, Nil, (Nil, Nil), Map.empty, Map.empty, Set.empty, Map.empty, 1, Map.empty, Variable(Identifier("§uv", 0), Ind)) + def empty(n: Int, uv: Variable) = Branch(Nil, Nil, Nil, Nil, (Nil, Nil), Map.empty, Map.empty, Set.empty, Map.empty, n, Map.empty, uv) + def prettyIte(l: Iterable[Expression], head: String): String = l match + case Nil => "Nil" + case _ => l.map(_.repr).mkString(head + "(", ", ", ")") + + } + + def makeVariableNamesUnique(f: Expression, nextId: Int, seen2: Set[Variable]): (Expression, Int) = { + var nextId2: Int = nextId + var seen = seen2 + def recurse(f: Expression): Expression = f match + case Application(f, a) => + Application(recurse(f), recurse(a)) + case Lambda(v, body) => + if seen.contains(v) then + val newV = Variable(Identifier(v.id, nextId2), Ind) + nextId2 += 1 + Lambda(newV, substituteVariables(recurse(body), Map(v -> newV))) + else + seen += v + Lambda(v, recurse(body)) + case _ => f + (recurse(f), nextId2) + } + type Substitution = Map[Variable, Expression] + val Substitution = HashMap + def prettySubst(s: Substitution): String = s.map((x, t) => x.id + " -> " + t.repr).mkString("Subst(", ", ", ")") + + /** + * Detect if two terms can be unified, and if so, return a substitution that unifies them. + */ + def unify(t1: Expression, t2: Expression, current: Substitution, br: Branch): Option[Substitution] = (t1, t2) match + case (x: Variable, y: Variable) if (br.unifiable.contains(x) || x.id.no > br.maxIndex) && (br.unifiable.contains(y) || y.id.no > br.maxIndex) => + if x == y then Some(current) + else if current.contains(x) then unify(current(x), t2, current, br) + else if current.contains(y) then unify(t1, current(y), current, br) + else Some(current + (x -> y)) + case (x: Variable, t2: Expression) if br.unifiable.contains(x) || x.id.no > br.maxIndex => + val newt2 = substituteVariables(t2, current) + if newt2.freeVariables.contains(x) then None + else if (current.contains(x)) unify(current(x), newt2, current, br) + else Some(current + (x -> newt2)) + case (t1: Expression, y: Variable) if br.unifiable.contains(y) || y.id.no > br.maxIndex => + val newt1 = substituteVariables(t1, current) + if newt1.freeVariables.contains(y) then None + else if (current.contains(y)) unify(newt1, current(y), current, br) + else Some(current + (y -> newt1)) + case (Application(f1, a1), Application(f2, a2)) => + unify(f1, f2, current, br).flatMap(s => unify(a1, a2, s, br)) + case _ => if t1 == t2 then Some(current) else None + + /** + * Detect if two atoms can be unified, and if so, return a substitution that unifies them. + */ + def unifyPred(pos: Expression, neg: Expression, br: Branch): Option[Substitution] = { + assert(pos.sort == Prop && neg.sort == Prop) + unify(pos, neg, Substitution.empty, br) + + } + + /** + * Detect if a branch can be closed, and if so, return a list of substitutions that closes it along with the formulas used to close it + * If it can't be closed, returns None + * The substitution cannot do substitutions that were already done in branch.triedInstantiation. + * When multiple substitutions are possible, the one with the smallest size is returned. (Maybe there is a better heuristic, like distance from the root?) + */ + def close(branch: Branch): Option[(Substitution, Set[Expression])] = { + val newMap = branch.atoms._1 + .flatMap(pred => pred.freeVariables.filter(v => branch.unifiable.contains(v))) + .map(v => v -> Variable(Identifier(v.id.name, v.id.no + branch.maxIndex + 1), Ind)) + .toMap + val inverseNewMap = newMap.map((k, v) => v -> k).toMap + val pos = branch.atoms._1.map(pred => substituteVariables(pred, newMap)).iterator + var substitutions: List[(Substitution, Set[Expression])] = Nil + + while (pos.hasNext) { + val p = pos.next() + if (p == bot) return Some((Substitution.empty, Set(bot))) + val neg = branch.atoms._2.iterator + while (neg.hasNext) { + val n = neg.next() + unifyPred(p, n, branch) match + case None => () + case Some(unif) => + substitutions = (unif, Set(p, !n)) :: substitutions + } + } + + val cr1 = substitutions.map((sub, set) => + ( + sub.flatMap((v, t) => + if v.id.no > branch.maxIndex then + if t == inverseNewMap(v) then None + else Some(inverseNewMap(v) -> substituteVariables(t, inverseNewMap.map((v, t) => v -> substituteVariables(t, sub)))) + else if newMap.contains(v) && t == newMap(v) then None + else Some(v -> substituteVariables(t, inverseNewMap)) + ), + set.map(f => substituteVariables(f, inverseNewMap)) + ) + ) + + val cr = cr1.filterNot(s => + s._1.exists((x, t) => + val v = branch.triedInstantiation.contains(x) && branch.triedInstantiation(x).contains(t) + v + ) + ) + + bestSubst(cr, branch) + + } + + def bestSubst(substs: List[(Substitution, Set[Expression])], branch: Branch): Option[(Substitution, Set[Expression])] = { + if substs.isEmpty then return None + val minSize = substs.minBy(_._1.size) + val smallSubst = substs.filter(_._1.size == minSize._1.size) + // Up to this, it is necessary for completeness. From this, it is heuristic. + + val best = smallSubst.minBy(s => substitutionScore(s._1, branch)) + Some(best) + } + def formulaPenalty(f: Expression, branch: Branch): Int = f match + case And(left, right) => 10 + formulaPenalty(left, branch) + formulaPenalty(right, branch) + case Or(left, right) => 40 + formulaPenalty(left, branch) + formulaPenalty(right, branch) + case Exists(x, inner) => 30 + formulaPenalty(inner, branch) + case Forall(x, inner) => 200 + formulaPenalty(inner, branch) + case _ => 0 + + def substitutionScore(subst: Substitution, branch: Branch): Int = { + def pairPenalty(v: Variable, t: Expression) = { + val variablePenalty = branch.unifiable(v)._2 + branch.numberInstantiated(v) * 20 + def termPenalty(t: Expression): Int = t match + case x: Variable => if branch.unifiable.contains(x) then branch.unifiable(x)._2 * 1 else 0 + case c: Constant => 40 + case Application(f, a) => 100 + termPenalty(f) + termPenalty(a) + case Lambda(v, inner) => 100 + termPenalty(inner) + 1 * variablePenalty + 1 * termPenalty(t) + } + subst.map((v, t) => pairPenalty(v, t)).sum + } + + /** + * Explodes one And formula + * The alpha list of the branch must not be empty + */ + def alpha(branch: Branch): Branch = { + val f = branch.alpha.head + f match + case And(l, r) => branch.copy(alpha = branch.alpha.tail).prepended(l).prepended(r) + case _ => throw Exception("Error: First formula of alpha is not an And") + } + + /** + * Explodes one Or formula, and alpha-simplifies it + * Add the exploded formula to the used list, if one beta formula is found + * The beta list of the branch must not be empty + */ + def beta(branch: Branch): List[(Branch, Expression)] = { + val f = branch.beta.head + val b1 = branch.copy(beta = branch.beta.tail) + f match + case Or(l, r) => + List((b1.prepended(l), l), (b1.prepended(r), r)) + case _ => throw Exception("Error: First formula of beta is not an Or") + } + + /** + * Explodes one Exists formula + * Add the unquantified formula to the branch + * Since the bound variable is not marked as suitable for instantiation, it behaves as a constant symbol (skolem) + */ + def delta(branch: Branch): (Branch, Variable, Expression) = { + val f = branch.delta.head + f match + case Exists(v, body) => + if branch.skolemized.contains(v) then + val newV = Variable(Identifier(v.id.name, branch.maxIndex), Ind) + val newInner = substituteVariables(body, Map(v -> newV)) + (branch.copy(delta = branch.delta.tail, maxIndex = branch.maxIndex + 1).prepended(newInner), newV, newInner) + else (branch.copy(delta = branch.delta.tail, skolemized = branch.skolemized + v).prepended(body), v, body) + case _ => throw Exception("Error: First formula of delta is not an Exists") + } + + /** + * Explodes one Forall formula + * Add the unquantified formula to the branch and mark the bound variable as suitable for unification + * This step will most of the time be cancelled when building the proof, unless any arbitrary instantiation is sufficient to get a proof. + */ + def gamma(branch: Branch): (Branch, Variable, Expression) = { + val f = branch.gamma.head + f match + case Forall(v, body) => + val (ni, nb) = branch.unifiable.get(v) match + case None => + (body, v) + case Some(value) => + val newBound = Variable(Identifier(v.id.name, branch.maxIndex), Ind) + val newInner = substituteVariables(body, Map(v -> newBound)) + (newInner, newBound) + val b1 = branch.copy( + gamma = branch.gamma.tail, + unifiable = branch.unifiable + (nb -> (f, formulaPenalty(body, branch))), + numberInstantiated = branch.numberInstantiated + (nb -> (branch.numberInstantiated.getOrElse(v, 0))), + maxIndex = branch.maxIndex + 1, + varsOrder = branch.varsOrder + (nb -> branch.varsOrder.size) + ) + (b1.prepended(ni), nb, ni) + case _ => throw Exception("Error: First formula of gamma is not a Forall") + + } + + /** + * When a closing unification has been found, apply it to the branch + * This does not do backtracking: The metavariable remains available if it needs further instantiation. + */ + def applyInst(branch: Branch, x: Variable, t: Expression): (Branch, Expression) = { + val f = branch.unifiable(x)._1 + val newTried = branch.triedInstantiation.get(x) match + case None => branch.triedInstantiation + (x -> Set(t)) + case Some(s) => branch.triedInstantiation + (x -> (s + t)) + + val inst = f match + case Forall(v, body) => instantiate(body, v, t) + case _ => throw Exception("Error: Prop in unifiable is not a Forall") + val r = branch + .prepended(inst) + .copy( + triedInstantiation = newTried, + numberInstantiated = branch.numberInstantiated + (x -> (branch.numberInstantiated(x) + 1)) + ) + (r, inst) + } + + /** + * Decide if a branch can be closed, and if not, explode it. + * Main routine of the decision procedure. If it succeeds, return a proof of the branch. + * Note that the proof actually proves a subset of a branch when possible, to cut short on unneeded steps and formulas. + * The return integer is the size of the proof: Used to avoid computing the size every time in linear time. + */ + def decide(branch: Branch): Option[(List[SCProofStep], Int)] = { + + val closeSubst = close(branch) + if (closeSubst.nonEmpty && closeSubst.get._1.isEmpty) // If branch can be closed without Instantiation (Hyp) + Some((List(RestateTrue(Sequent(closeSubst.get._2, Set()))), 0)) + else if (branch.alpha.nonEmpty) // If branch contains an Alpha formula (LeftAnd) + val rec = alpha(branch) + decide(rec).map((proof, step) => + branch.alpha.head match + case Application(Application(and, left), right) => + if proof.head.bot.left.contains(left) || proof.head.bot.left.contains(right) then + val sequent = proof.head.bot.copy(left = (proof.head.bot.left - left - right) + branch.alpha.head) + (Weakening(sequent, proof.size - 1) :: proof, step + 1) + else (proof, step) + case _ => throw Exception("Error: First formula of alpha is not an And") + ) + else if (branch.delta.nonEmpty) // If branch contains a Delta formula (LeftExists) + val rec = delta(branch) + val upperProof = decide(rec._1) + upperProof.map((proof, step) => + if proof.head.bot.left.contains(rec._3) then + val sequent = (proof.head.bot -<< rec._3) +<< branch.delta.head + (LeftExists(sequent, step, rec._3, rec._2) :: proof, step + 1) + else (proof, step) + ) + else if (branch.beta.nonEmpty) // If branch contains a Beta formula (LeftOr) + val list = beta(branch) + val (proof, treversed, needed) = list.foldLeft((Some(Nil): Option[List[SCProofStep]], Nil: List[Int], true: Boolean))((prev, next) => + prev match + case (None, _, _) => prev // proof failed + case (_, _, false) => + prev // proof succeded early + case (Some(prevProof), t, true) => + val res = decide(next._1) + res match + case None => (None, t, true) + case Some((nextProof, step)) => + if nextProof.head.bot.left.contains(next._2) then // If the disjunct was used, encapsulate the subbranch in a Subproof + val subproofDisj = + if nextProof.size == 1 then nextProof.head + else SCSubproof(SCProof(nextProof.toIndexedSeq.reverse, IndexedSeq.empty), IndexedSeq.empty) + (Some(subproofDisj :: prevProof), prevProof.size :: t, true) + else + // If the disjunct was not used, then the subbranch is a proof of the whole statement and the split is not necessary. + (res.map(_._1), List(nextProof.size - 1), false) + ) + proof.map(proo => + if needed == true then + val sequent = ((proo.reverse.zip(list).flatMap((proof, bf) => proof.bot.left - bf._2).toSet + branch.beta.head) |- ()) + branch.beta.head match + case Or(left, right) => + (LeftOr(sequent, treversed.reverse, Seq(left, right)) :: proo, treversed.size) + case _ => throw Exception("Error: First formula of beta is not an Or") + else (proo, proo.size - 1) + ) + else if (branch.gamma.nonEmpty) // If branch contains a Gamma formula (LeftForall) + val rec = gamma(branch) + val upperProof = decide(rec._1) + // LeftForall(bot: Sequent, t1: Int, phi: Expression, x: Variable, t: Expression) + upperProof.map((proof, step) => + if proof.head.bot.left.contains(rec._3) then + val sequent = (proof.head.bot -<< rec._3) +<< branch.gamma.head + branch.gamma.head match + case Forall(v, body) => + (LeftForall(sequent, step, body, v, rec._2()) :: proof, step + 1) + case _ => throw Exception("Error: First formula of gamma is not a Forall") + else (proof, step) + ) + else if (closeSubst.nonEmpty && closeSubst.get._1.nonEmpty) // If branch can be closed with Instantiation (LeftForall) + val (x, t) = closeSubst.get._1.minBy((x, t) => branch.varsOrder(x)) + val (recBranch, instantiated) = applyInst(branch, x, t) + val upperProof = decide(recBranch) + upperProof.map((proof, step) => + if proof.head.bot.left.contains(instantiated) then + val sequent = (proof.head.bot -<< instantiated) +<< branch.unifiable(x)._1 + branch.unifiable(x)._1 match + case Forall(v, body) => + (LeftForall(sequent, step, body, v, t) :: proof, step + 1) + case _ => throw Exception("Error: Prop in unifiable is not a Forall") + else (proof, step) + ) + else None + // End of decide + } + + def containsAlpha(set: Set[Expression], f: Expression): Boolean = f match { + case And(left, right) => containsAlpha(set, left) || containsAlpha(set, right) + case _ => set.contains(f) + } + + def instantiate(f: Expression, x: Variable, t: Expression): Expression = f match + case v: Variable => if v == x then t else v + case c: Constant => c + case Application(f, a) => Application(instantiate(f, x, t), instantiate(a, x, t)) + case Lambda(v, inner) => if (v == x) f else Lambda(v, instantiate(inner, x, t)) +} diff --git a/lisa-sets2/src/main/scala/lisa/automation/Tautology.scala b/lisa-sets2/src/main/scala/lisa/automation/Tautology.scala new file mode 100644 index 000000000..589d06d46 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/automation/Tautology.scala @@ -0,0 +1,202 @@ +package lisa.automation + +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.K.{_, given} + +/** + * A tactic object dedicated to solve any propositionaly provable sequent (possibly in exponential time). Can be used with arbitrary many premises. + * Leverages the OL algorithm for scalafmpropositional logic. + */ +object Tautology extends ProofTactic with ProofSequentTactic with ProofFactSequentTactic { + + /** + * Given a targeted conclusion sequent, try to prove it using laws of propositional logic and reflexivity and symmetry of equality. + * + * @param proof The ongoing proof object in which the step happens. + * @param bot The desired conclusion. + */ + def apply(using lib: Library, proof: lib.Proof)(bot: F.Sequent): proof.ProofTacticJudgement = { + val botK = bot.underlying + solveSequent(botK) match { + case Left(value) => proof.ValidProofTactic(bot, value.steps, Seq()) + case Right((msg, seq)) => proof.InvalidProofTactic(msg) + } + } + + /** + * Given a targeted conclusion sequent, try to prove it using laws of propositional logic and reflexivity and symmetry of equality. + * Uses the given already proven facts as assumptions to reach the desired goal. + * + * @param proof The ongoing proof object in which the step happens. + * @param premise A previously proven step necessary to reach the conclusion. + * @param bot The desired conclusion. + */ + def apply(using lib: Library, proof: lib.Proof)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = + from(using lib, proof)(Seq(premise)*)(bot) + + def from(using lib: Library, proof: lib.Proof)(premises: proof.Fact*)(bot: F.Sequent): proof.ProofTacticJudgement = { + val botK = bot.underlying + val premsFormulas: Seq[((proof.Fact, Expression), Int)] = premises.map(p => (p, sequentToFormula(proof.getSequent(p).underlying))).zipWithIndex + val initProof = premsFormulas.map(s => Restate(() |- s._1._2, -(1 + s._2))).toList + val sqToProve = botK ++<< (premsFormulas.map(s => s._1._2).toSet |- ()) + + solveSequent(sqToProve) match { + case Left(value) => + val subpr = SCSubproof(value) + val stepsList = premsFormulas.foldLeft[List[SCProofStep]](List(subpr))((prev: List[SCProofStep], cur) => { + val ((prem, form), position) = cur + if prev.head.bot.left.contains(form) then Cut(prev.head.bot -<< form, position, initProof.length + prev.length - 1, form) :: prev + else prev + }) + val steps = (initProof ++ stepsList.reverse).toIndexedSeq + proof.ValidProofTactic(bot, steps, premises) + case Right((msg, seq)) => + proof.InvalidProofTactic(msg) + } + } + + /** + * This function returns a proof of the given sequent if such a proof exists using only the rules of propositional logic and reflexivity and symmetry of equality. + * Be aware that the runtime and size of the proof may be exponential in the number of atoms (i.e. number of non-propositional subformulas of the input). + * The strategy consists in leveraging OL formula reduction by alternating between branching on an atom and reducing the formula. + * @param s A sequent that should be a propositional logic tautology. It can contain binders and schematic connector symbols, but they will be treated as atoms. + * @return A proof of the given sequent, if it exists + */ + def solveSequent(s: Sequent): Either[SCProof, (String, Sequent)] = { + val augSeq = augmentSequent(s) + val MaRvIn = Variable(freshId(augSeq.formula.freeVariables.map(_.id), "MaRvIn"), Prop) // arbitrary name that is unlikely to already exist in the formula + + try { + val steps = solveAugSequent(augSeq, 0)(using MaRvIn) + Left(SCProof((Restate(s, steps.length - 1) :: steps).reverse.toIndexedSeq)) + } catch + case e: NoProofFoundException => + Right( + ( + "The statement may be incorrect or not provable within propositional logic.\n" + + "The proof search failed because it needed the truth of the following sequent:\n" + + s"${e.unsolvable.repr}", + e.unsolvable + ) + ) + + } + + // From there, private code. + + // Augmented Sequent + private case class AugSequent(decisions: (List[Expression], List[Expression]), formula: Expression) + + // Transform a sequent into a format more adequate for solving + private def augmentSequent(s: Sequent): AugSequent = { + val f = reducedForm(sequentToFormula(s)) + val atoms: scala.collection.mutable.Map[Expression, Int] = scala.collection.mutable.Map.empty + AugSequent((Nil, Nil), f) + } + + def reduceSequent(s: Sequent): Expression = { + val p = simplify(sequentToFormula(s)) + val nf = computeNormalForm(p) + val fln = fromLocallyNameless(nf, Map.empty, 0) + val res = toExpressionAIG(fln) + res + } + + // Find all "atoms" of the formula. + // We mean atom in the propositional logic sense, so any formula starting with a predicate symbol, a binder or a schematic connector is an atom here. + def findBestAtom(f: Expression): Option[Expression] = { + val atoms: scala.collection.mutable.Map[Expression, Int] = scala.collection.mutable.Map.empty + def findAtoms2(fi: Expression, add: Expression => Unit): Unit = fi match { + case And(f1, f2) => findAtoms2(f1, add); findAtoms2(f2, add) + case Neg(f1) => findAtoms2(f1, add) + case _ if fi != top && fi != bot => add(fi) + case _ => () + } + findAtoms2(f, a => atoms.update(a, { val g = atoms.get(a); if (g.isEmpty) 1 else g.get + 1 })) + if (atoms.isEmpty) None else Some(atoms.toList.maxBy(_._2)._1) + } + + private class NoProofFoundException(val unsolvable: Sequent) extends Exception + + // Given a sequent, return a proof of that sequent if on exists that only uses propositional logic rules and reflexivity of equality. + // Alternates between reducing the formulas using the OL algorithm for propositional logic and branching on an atom using excluded middle. + // An atom is a subformula of the input that is either a predicate, a binder or a schematic connector, i.e. a subformula that has not meaning in propositional logic. + private def solveAugSequent(s: AugSequent, offset: Int)(using MaRvIn: Variable): List[SCProofStep] = { + val redF = reducedForm(s.formula) + if (redF == top()) { + List(RestateTrue(s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- s.formula)) + } else + val bestAtom = findBestAtom(redF) + if (bestAtom.isEmpty) { + assert(redF == bot()) // sanity check; If the formula has no atom left in it and is reduced, it should be either ⊤ or ⊥. + val res = s.decisions._1 |- redF :: s.decisions._2 // the branch that can't be closed + throw new NoProofFoundException(res) + } else { + val atom = bestAtom.get + val optLambda = findSubformula(redF, MaRvIn, atom) + if (optLambda.isEmpty) return solveAugSequent(AugSequent(s.decisions, redF), offset) + val lambdaF = optLambda.get + + val seq1 = AugSequent((atom :: s.decisions._1, s.decisions._2), substituteVariables(lambdaF, Map(MaRvIn -> top))) + val proof1 = solveAugSequent(seq1, offset) + val subst1 = RightSubstIff( + atom :: s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- redF, + offset + proof1.length - 1, + Seq((atom, top)), + (Seq(MaRvIn), lambdaF) + ) + val negatom = neg(atom) + val seq2 = AugSequent((negatom :: s.decisions._1, s.decisions._2), substituteVariables(lambdaF, Map(MaRvIn -> bot))) + val proof2 = solveAugSequent(seq2, offset + proof1.length + 1) + val subst2 = RightSubstIff( + negatom :: s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- redF, + offset + proof1.length + proof2.length + 1 - 1, + Seq((atom, bot)), + (Seq(MaRvIn), lambdaF) + ) + val red2 = Restate(s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- (redF, atom), offset + proof1.length + proof2.length + 2 - 1) + val cutStep = Cut(s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- redF, offset + proof1.length + proof2.length + 3 - 1, offset + proof1.length + 1 - 1, atom) + val redStep = Restate(s.decisions._1 ++ s.decisions._2.map((f: Expression) => neg(f)) |- s.formula, offset + proof1.length + proof2.length + 4 - 1) + redStep :: cutStep :: red2 :: subst2 :: proof2 ++ (subst1 :: proof1) + + } + } + + private def condflat[T](s: Seq[(T, Boolean)]): (Seq[T], Boolean) = (s.map(_._1), s.exists(_._2)) + + private def findSubformula2(outer: Expression, x: Variable, e: Expression, fv: Set[Variable]): (Expression, Boolean) = { + if (isSame(outer, e)) (x, true) + else + val res = outer match { + case Application(f, arg) => + val rf = findSubformula2(f, x, e, fv) + val ra = findSubformula2(arg, x, e, fv) + if (rf._2 || ra._2) (Application(rf._1, ra._1), true) + else (outer, false) + case Lambda(v, inner) => + if (!fv.contains(v)) { + val induct = findSubformula2(inner, x, e, fv) + if (!induct._2) (outer, false) + else (Lambda(v, induct._1), true) + } else { + val newv = Variable(freshId((outer.freeVariables ++ fv).map(_.id), v.id), v.sort) + val newInner = substituteVariables(inner, Map(v -> newv)) + val induct = findSubformula2(newInner, x, e, fv + newv) + if (!induct._2) (outer, false) + else (Lambda(newv, induct._1), true) + } + case _ => (outer, false) + } + // assert(res._1.sort == f.sort, s"Sort mismatch in findSubformula2. ${res._1.repr} : ${res._1.sort} != ${f.repr} : ${f.sort}") + res + } + + def findSubformula(f: Expression, x: Variable, e: Expression): Option[Expression] = { + val r = findSubformula2(f, x, e, e.freeVariables) + if (r._2) Some(r._1) + else None + } + +} diff --git a/lisa-sets2/src/main/scala/lisa/automation/atp/Egg.scala b/lisa-sets2/src/main/scala/lisa/automation/atp/Egg.scala new file mode 100644 index 000000000..0b929f4f5 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/automation/atp/Egg.scala @@ -0,0 +1,132 @@ +package lisa.automation.atp +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.OutputManager +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.K + +import java.io.* +import scala.io.Source +import scala.util.Failure +import scala.util.Success +import scala.util.Try + +import sys.process._ +import lisa.tptp.ProofParser.* +import lisa.tptp.KernelParser.* + +/** + * Goéland is an automated theorem prover. This tactic calls the Goéland prover to solve the current sequent. + * Goéland is only available on Linux yet, but proofs generated by Goéland should be kept in the library for future use. + * To ensure that proofs are published and can be replayed in any system, proofs from an ATPcan only be generated in draft mode. + * When in non-draft mode, the proof file should be given as an argument to the tactic (the exact file is provided by Lisa upon run without draft mode). + */ +object Egg extends ProofTactic with ProofSequentTactic { + private var i : Int = 0 + + var timeLimit = 5 + + val eggExec_linux = "../bin/egg-sc-tptp" + val eggExec_windows = "..\\bin\\egg-sc-tptp.exe" + + class OsNotSupportedException(msg: String) extends Exception(msg) + + val foldername = "egg/" + + /** + * Fetch a proof of a sequent that was previously proven by Goéland. + * The file must be in SC-TPTP format. + */ + def apply(using lib: Library, proof: lib.Proof)(file:String)(bot: F.Sequent): proof.ProofTacticJudgement = { + val outputname = proof.owningTheorem.fullName+"_sol" + try { + val scproof = reconstructProof(new File(foldername+outputname+".p"))(using mapAtom, mapTerm, mapVariable) + proof.ValidProofTactic(bot, scproof.steps, Seq()) + } catch { + case e: FileNotFoundException => + throw FileNotFoundException("The file "+foldername+outputname+".p was not found. To produce a proof, use `by Egg`. ") + case e => throw e + } + } + + + /** + * Solve a sequent using the Goéland automated theorem prover. + * At the moment, this option is only available on Linux system. + * The proof is generated and saved in a file in the `Egg` folder. + */ + def apply(using lib: Library, proof: lib.Proof)(bot: F.Sequent): proof.ProofTacticJudgement = { + from(using lib, proof)()(bot) + } + + def from(using lib: Library, proof: lib.Proof)(premises: proof.Fact*)(bot: F.Sequent) = { + val axioms = premises.map(proof.getSequent) + solve(axioms, bot, proof.owningTheorem.fullName, lib.isDraft) match { + case Success(value) => proof.ValidProofTactic(bot, value.steps, premises) + case Failure(e) => e match + case e: FileNotFoundException => throw new Exception("For compatibility reasons, external provers can't be called in non-draft mode" + + " unless all proofs have already been generated and be available in static files. You can enable draft mode by adding `draft()` at the top of your working file.") + case e: OsNotSupportedException => throw e + case e => + throw e + } + } + + inline def solve(axioms: Seq[F.Sequent], sequent: F.Sequent, source: String, generateProofs : Boolean): Try[K.SCProof] = + solveK(axioms.map(_.underlying), sequent.underlying, source, generateProofs) + + + /** + * Solve a sequent using the Goéland automated theorem prover, and return the kernel proof. + * At the moment, this option is only available on Linux systems. + */ + def solveK(using line: sourcecode.Line, file: sourcecode.File)(axioms: Seq[K.Sequent], sequent: K.Sequent, source:String, generateProofs : Boolean): Try[K.SCProof] = { + val filename = source + val outputname = source+"_sol" + val directory = File(foldername) + if (directory != null) && !directory.exists() then directory.mkdirs() + + val freevars = (sequent.left.flatMap(_.freeVariables) ++ sequent.right.flatMap(_.freeVariables) ).toSet.map(x => x -> K.Variable(K.Identifier("X"+x.id.name, x.id.no), x.sort) ).toMap + + val backMap = freevars.map{ + case (x: K.Variable, xx: K.Variable) => xx -> x + } + val r = problemToFile(foldername, filename, "question"+i, axioms, sequent, source) + i += 1 + if generateProofs then + val OS = System.getProperty("os.name") + if OS.contains("nix") || OS.contains("nux") || OS.contains("aix") then + val ret = s"chmod u+x \"$eggExec_linux\"".! + val cmd = (s"$eggExec_linux $foldername$filename.p $foldername$outputname.p --level1") // TODO + val res = try { + cmd.!! + } catch { + case e: Exception => + throw e + } + val proof = reconstructProof(new File(foldername+outputname+".p"))(using mapAtom, mapTerm, mapVariable) + Success(proof) + else if OS.contains("win") || OS.contains("Win") then + val cmd = (s"$eggExec_windows $foldername$filename.p $foldername$outputname.p --level1") // TODO + val res = try { + cmd.!! + } catch { + case e: Exception => + throw e + } + val proof = reconstructProof(new File(foldername+outputname+".p"))(using mapAtom, mapTerm, mapVariable) + Success(proof) + else + Failure(OsNotSupportedException("The Egg automated theorem prover is only supported on Linux for now.")) + else + if File(foldername+outputname+".p").exists() then + val proof = reconstructProof(new File(foldername+outputname+".p"))(using mapAtom, mapTerm, mapVariable) + println(OutputManager.WARNING(s"WARNING: in ${file.value}:$line, For compatibility reasons, replace `by Egg` with `by Egg(\"$foldername$outputname\")`.")) + Success(proof) + + else Failure(Exception("For compatibility reasons, external provers can't be called in non-draft mode. You can enable draft mode by adding `draft()` at the top of your working file.")) + + + } + +} \ No newline at end of file diff --git a/lisa-sets2/src/main/scala/lisa/automation/atp/Goeland.scala b/lisa-sets2/src/main/scala/lisa/automation/atp/Goeland.scala new file mode 100644 index 000000000..2d2a9746c --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/automation/atp/Goeland.scala @@ -0,0 +1,119 @@ +package lisa.automation.atp +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.OutputManager +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.K +import lisa.utils.K.|- + +import java.io.* +import scala.io.Source +import scala.util.Failure +import scala.util.Success +import scala.util.Try + +import sys.process._ +import lisa.tptp.ProofParser.* +import lisa.tptp.KernelParser.* + +/** + * Goéland is an automated theorem prover. This tactic calls the Goéland prover to solve the current sequent. + * Goéland is only available on Linux yet, but proofs generated by Goéland should be kept in the library for future use. + * To ensure that proofs are published and can be replayed in any system, proofs from an ATPcan only be generated in draft mode. + * When in non-draft mode, the proof file should be given as an argument to the tactic (the exact file is provided by Lisa upon run without draft mode). + */ +object Goeland extends ProofTactic with ProofSequentTactic { + private var i: Int = 0 + + val goelandExec = "../bin/goeland_linux_release" + + class OsNotSupportedException(msg: String) extends Exception(msg) + + val foldername = "goeland/" + + /** + * Fetch a proof of a sequent that was previously proven by Goéland. + * The file must be in SC-TPTP format. + */ + def apply(using lib: Library, proof: lib.Proof)(file: String)(bot: F.Sequent): proof.ProofTacticJudgement = { + val outputname = proof.owningTheorem.fullName + "_sol" + try { + val scproof = reconstructProof(new File(foldername + outputname + ".p"))(using mapAtom, mapTerm, mapVariable) + proof.ValidProofTactic(bot, scproof.steps, Seq()) + } catch { + case e: FileNotFoundException => + throw FileNotFoundException("The file " + foldername + outputname + ".p was not found. To produce a proof, use `by Goeland`. ") + case e => throw e + } + } + + /** + * Solve a sequent using the Goéland automated theorem prover. + * At the moment, this option is only available on Linux system. + * The proof is generated and saved in a file in the `Goeland` folder. + */ + def apply(using lib: Library, proof: lib.Proof)(bot: F.Sequent): proof.ProofTacticJudgement = { + + solve(Seq(), bot, proof.owningTheorem.fullName, lib.isDraft) match { + case Success(value) => proof.ValidProofTactic(bot, value.steps, Seq()) + case Failure(e) => + e match + case e: FileNotFoundException => + throw new Exception( + "For compatibility reasons, external provers can't be called in non-draft mode" + + " unless all proofs have already been generated and be available in static files. You can enable draft mode by adding `draft()` at the top of your working file." + ) + case e: OsNotSupportedException => throw e + case e => + throw e + } + } + + inline def solve(axioms: Seq[F.Sequent], sequent: F.Sequent, source: String, generateProofs: Boolean): Try[K.SCProof] = + solveK(axioms.map(_.underlying), sequent.underlying, source, generateProofs) + + /** + * Solve a sequent using the Goéland automated theorem prover, and return the kernel proof. + * At the moment, this option is only available on Linux systems. + */ + def solveK(using line: sourcecode.Line, file: sourcecode.File)(axioms: Seq[K.Sequent], sequent: K.Sequent, source: String, generateProofs: Boolean): Try[K.SCProof] = { + val filename = source + val outputname = source + "_sol" + val directory = File(foldername) + if (directory != null) && !directory.exists() then directory.mkdirs() + + val freevars = (sequent.left.flatMap(_.freeVariables) ++ sequent.right.flatMap(_.freeVariables)).toSet.map(x => x -> K.Variable(K.Identifier("X" + x.id.name, x.id.no), x.sort)).toMap + + val backMap = freevars.map { + case (x: K.Variable, xx: K.Variable) => xx -> x + case null => throw new Exception("This should not happen") + } + val seq2 = () |- K.sequentToFormula(sequent) + val r = problemToFile(foldername, filename, "question" + i, axioms, seq2, source) + i += 1 + + if generateProofs then + val OS = System.getProperty("os.name") + if OS.contains("nix") || OS.contains("nux") || OS.contains("aix") then + val ret = s"chmod u+x \"$goelandExec\"".! + val cmd = (s"$goelandExec -otptp -wlogs -no_id -quoted_pred -proof_file=$foldername$outputname $foldername$filename.p") + val res = + try { + cmd.!! + } catch { + case e: Exception => + throw e + } + val proof = reconstructProof(new File(foldername + outputname + ".p"))(using mapAtom, mapTerm, mapVariable) + Success(proof) + else if OS.contains("win") then Failure(OsNotSupportedException("The Goeland automated theorem prover is not yet supported on Windows.")) + else Failure(OsNotSupportedException("The Goeland automated theorem prover is only supported on Linux for now.")) + else if File(foldername + outputname + ".p").exists() then + val proof = reconstructProof(new File(foldername + outputname + ".p"))(using mapAtom, mapTerm, mapVariable) + println(OutputManager.WARNING(s"WARNING: in ${file.value}:$line, For compatibility reasons, replace `by Goeland` with `by Goeland(\"$foldername$outputname\")`.")) + Success(proof) + else Failure(Exception("For compatibility reasons, external provers can't be called in non-draft mode. You can enable draft mode by adding `draft()` at the top of your working file.")) + + } + +} diff --git a/lisa-sets2/src/main/scala/lisa/automation/atp/Prover9.scala b/lisa-sets2/src/main/scala/lisa/automation/atp/Prover9.scala new file mode 100644 index 000000000..5c4bd8615 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/automation/atp/Prover9.scala @@ -0,0 +1,117 @@ +package lisa.automation.atp +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.OutputManager +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.K + +import java.io.* +import scala.io.Source +import scala.util.Failure +import scala.util.Success +import scala.util.Try + +import sys.process._ +import lisa.tptp.ProofParser.* +import lisa.tptp.KernelParser.* + +/** + * Goéland is an automated theorem prover. This tactic calls the Goéland prover to solve the current sequent. + * Goéland is only available on Linux yet, but proofs generated by Goéland should be kept in the library for future use. + * To ensure that proofs are published and can be replayed in any system, proofs from an ATPcan only be generated in draft mode. + * When in non-draft mode, the proof file should be given as an argument to the tactic (the exact file is provided by Lisa upon run without draft mode). + */ +object Prover9 extends ProofTactic with ProofSequentTactic { + private var i: Int = 0 + + val sctptpExec = "../bin/sctptpUtils.jar" + + class OsNotSupportedException(msg: String) extends Exception(msg) + + val foldername = "prover9/" + + /** + * Fetch a proof of a sequent that was previously proven by Goéland. + * The file must be in SC-TPTP format. + */ + def apply(using lib: Library, proof: lib.Proof)(file: String)(bot: F.Sequent): proof.ProofTacticJudgement = { + val outputname = proof.owningTheorem.fullName + "_sol" + try { + val scproof = reconstructProof(new File(foldername + outputname + ".p"))(using mapAtom, mapTerm, mapVariable) + proof.ValidProofTactic(bot, scproof.steps, Seq()) + } catch { + case e: FileNotFoundException => + throw FileNotFoundException("The file " + foldername + outputname + ".p was not found. To produce a proof, use `by Prover9`. ") + case e => throw e + } + } + + /** + * Solve a sequent using the Goéland automated theorem prover. + * At the moment, this option is only available on Linux system. + * The proof is generated and saved in a file in the `Prover9` folder. + */ + def apply(using lib: Library, proof: lib.Proof)(bot: F.Sequent): proof.ProofTacticJudgement = { + + solve(Seq(), bot, proof.owningTheorem.fullName, lib.isDraft) match { + case Success(value) => proof.ValidProofTactic(bot, value.steps, Seq()) + case Failure(e) => + e match + case e: FileNotFoundException => + throw new Exception( + "For compatibility reasons, external provers can't be called in non-draft mode" + + " unless all proofs have already been generated and be available in static files. You can enable draft mode by adding `draft()` at the top of your working file." + ) + case e: OsNotSupportedException => throw e + case e => + throw e + } + } + + inline def solve(axioms: Seq[F.Sequent], sequent: F.Sequent, source: String, generateProofs: Boolean): Try[K.SCProof] = + solveK(axioms.map(_.underlying), sequent.underlying, source, generateProofs) + + /** + * Solve a sequent using the Goéland automated theorem prover, and return the kernel proof. + * At the moment, this option is only available on Linux systems. + */ + def solveK(using line: sourcecode.Line, file: sourcecode.File)(axioms: Seq[K.Sequent], sequent: K.Sequent, source: String, generateProofs: Boolean): Try[K.SCProof] = { + val filename = source + val outputname = source + "_sol" + val directory = File(foldername) + if (directory != null) && !directory.exists() then directory.mkdirs() + + val freevars = (sequent.left.flatMap(_.freeVariables) ++ sequent.right.flatMap(_.freeVariables)).toSet.map(x => x -> K.Variable(K.Identifier("X" + x.id.name, x.id.no), x.sort)).toMap + + val backMap = freevars.map { + case (x: K.Variable, xx: K.Variable) => xx -> x + case null => throw new Exception("This should not happen") + } + val r = problemToFile(foldername, filename, "question" + i, axioms, sequent, source) + i += 1 + + if generateProofs then + val OS = System.getProperty("os.name") + if OS.contains("nix") || OS.contains("nux") || OS.contains("aix") then + val ret = s"chmod u+x \"$sctptpExec\"".! + val cmd = (s"java -jar $sctptpExec p9 --input $foldername$filename.p --output $foldername$outputname.p") + val res = + try { + cmd.!! + } catch { + case e: Exception => + throw e + } + val proof = reconstructProof(new File(foldername + outputname + ".p"))(using mapAtom, mapTerm, mapVariable) + Success(proof) + else if OS.contains("win") then Failure(OsNotSupportedException("The Prover9 automated theorem prover is not yet supported on Windows.")) + else Failure(OsNotSupportedException("The Prover9 automated theorem prover is only supported on Linux for now.")) + else if File(foldername + outputname + ".p").exists() then + val proof = reconstructProof(new File(foldername + outputname + ".p"))(using mapAtom, mapTerm, mapVariable) + println(OutputManager.WARNING(s"WARNING: in ${file.value}:$line, For compatibility reasons, replace `by Prover9` with `by Prover9(\"$foldername$outputname\")`.")) + Success(proof) + else Failure(Exception("For compatibility reasons, external provers can't be called in non-draft mode. You can enable draft mode by adding `draft()` at the top of your working file.")) + + } + +} diff --git a/lisa-sets2/src/main/scala/lisa/maths/Quantifiers.scala b/lisa-sets2/src/main/scala/lisa/maths/Quantifiers.scala new file mode 100644 index 000000000..dc3d5adb8 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/Quantifiers.scala @@ -0,0 +1,363 @@ +package lisa.maths + +import lisa.utils.Serialization.sorry +import lisa.utils.prooflib.BasicStepTactic.Sorry +import lisa.utils.K.repr +import lisa.automation.atp.Goeland +import lisa.utils.prooflib.Library +import lisa.utils.Printing +import lisa.utils.prooflib.ProofTacticLib.ProofFactSequentTactic +import lisa.automation.Substitution + +/** + * Implements theorems about first-order logic. + */ +object Quantifiers extends lisa.Main { + + private val X = variable[Prop] + private val Y = variable[Prop] + private val Z = variable[Prop] + private val x = variable[Ind] + private val y = variable[Ind] + private val z = variable[Ind] + private val a = variable[Ind] + private val p = variable[Prop] + private val P = variable[Ind >>: Prop] + private val Q = variable[Ind >>: Prop] + private val Phi = variable[Prop >>: Prop] + + /** + * Theorem --- A formula is equivalent to itself universally quantified if + * the bound variable is not free in it. + */ + val closedFormulaUniversal1 = Theorem( + () |- ∀(x, p) ==> p + ) { + have(thesis) by Tableau + } + + /** + * Theorem --- A formula is equivalent to itself universally quantified if + * the bound variable is not free in it. + */ + val closedFormulaUniversal = Theorem( + () |- ∀(x, p) <=> p + ) { + have(thesis) by Tableau + } + draft() + + /** + * Theorem --- A formula is equivalent to itself existentially quantified if + * the bound variable is not free in it. + */ + val closedFormulaExistential = Theorem( + () |- ∃(x, p) <=> p + ) { + have(thesis) by Tableau + } + + val ∃! = DEF(lambda(P, ∃(x, forall(y, P(y) <=> (x === y))))).asBinder[Ind, Prop, Prop] + val existsOne = ∃! + println(∃!.definition) + + /** + * Theorem --- If there ∃ a *unique* element satisfying a predicate, + * then we can say there *∃* an element satisfying it as well. + */ + val existsOneImpliesExists = Theorem( + ∃!(x, P(x)) |- ∃(x, P(x)) + ) { + have((x === y) <=> P(y) |- (x === y) <=> P(y)) by Hypothesis + thenHave(∀(y, (x === y) <=> P(y)) |- (x === y) <=> P(y)) by LeftForall + thenHave(∀(y, (x === y) <=> P(y)) |- P(x)) by InstSchema(y := x) + thenHave(∀(y, (x === y) <=> P(y)) |- ∃(x, P(x))) by RightExists + thenHave(∃(x, ∀(y, (x === y) <=> P(y))) |- ∃(x, P(x))) by LeftExists + thenHave((∃(x, ∀(y, (x === y) <=> P(y))) <=> ∃!(P), ∃!(P)) |- ∃(x, P(x))) by + LeftSubstEq.withParameters(List((∃!(P), ∃(x, ∀(y, (x === y) <=> P(y))))), (Seq(X), X)) + have(thesis) by Tautology.from(lastStep, existsOne.definition) + } + + /** + * Theorem --- Equality relation is transitive. + */ + val equalityTransitivity = Theorem( + (x === y) /\ (y === z) |- (x === z) + ) { + have((x === y) |- (x === y)) by Hypothesis + thenHave(((x === y), (y === z)) |- (x === z)) by RightSubstEq.withParameters(List((y, z)), (Seq(y), x === y)) + thenHave(thesis) by Restate + } + + /** + * Theorem --- Conjunction and universal quantification commute. + */ + val universalConjunctionCommutation = Theorem( + () |- forall(x, P(x) /\ Q(x)) <=> forall(x, P(x)) /\ forall(x, Q(x)) + ) { + have(thesis) by Tableau + } + + /** + * Theorem -- Existential quantification distributes conjunction. + */ + val existentialConjunctionDistribution = Theorem( + ∃(x, P(x) /\ Q(x)) |- ∃(x, P(x)) /\ ∃(x, Q(x)) + ) { + have(thesis) by Tableau + } + + /** + * Theorem -- Existential quantification fully distributes when the conjunction involves one closed formula. + */ + val existentialConjunctionWithClosedFormula = Theorem( + ∃(x, P(x) /\ p) <=> (∃(x, P(x)) /\ p) + ) { + have(thesis) by Tableau + } + + /** + * Theorem -- If there is an equality on the existential quantifier's bound variable inside its body, then we can reduce + * the existential quantifier to the satisfaction of the remaining body. + */ + val equalityInExistentialQuantifier = Theorem( + ∃(x, P(x) /\ (y === x)) <=> P(y) + ) { + have(∃(x, P(x) /\ (y === x)) |- P(y)) subproof { + have(P(x) |- P(x)) by Hypothesis + thenHave((P(x), y === x) |- P(y)) by RightSubstEq.withParameters(List((y, x)), (Seq(y), P(y))) + thenHave(P(x) /\ (y === x) |- P(y)) by Restate + thenHave(thesis) by LeftExists + } + val forward = thenHave(∃(x, P(x) /\ (y === x)) ==> P(y)) by Restate + + have(P(y) |- ∃(x, P(x) /\ (y === x))) subproof { + have(P(x) /\ (y === x) |- P(x) /\ (y === x)) by Hypothesis + thenHave(P(x) /\ (y === x) |- ∃(x, P(x) /\ (y === x))) by RightExists + thenHave(P(y) /\ (y === y) |- ∃(x, P(x) /\ (y === x))) by InstSchema(x := y) + thenHave(thesis) by Restate + } + val backward = thenHave(P(y) ==> ∃(x, P(x) /\ (y === x))) by Restate + + have(thesis) by RightIff(forward, backward) + } + + /** + * Theorem --- Disjunction and existential quantification commute. + */ + val existentialDisjunctionCommutation = Theorem( + () |- ∃(x, P(x) \/ Q(x)) <=> ∃(x, P(x)) \/ ∃(x, Q(x)) + ) { + have(thesis) by Tableau + } + + /** + * Theorem --- Universal quantification distributes over equivalence + */ + val universalEquivalenceDistribution = Theorem( + forall(z, P(z) <=> Q(z)) |- (forall(z, P(z)) <=> forall(z, Q(z))) + ) { + have(thesis) by Tableau + } + + /** + * Theorem --- Universal quantification of equivalence implies equivalence + * of existential quantification. + */ + val existentialEquivalenceDistribution = Theorem( + forall(z, P(z) <=> Q(z)) |- (∃(z, P(z)) <=> ∃(z, Q(z))) + ) { + have(thesis) by Tableau + + } + + /** + * Theorem --- Universal quantification distributes over implication + */ + val universalImplicationDistribution = Theorem( + forall(z, P(z) ==> Q(z)) |- (forall(z, P(z)) ==> forall(z, Q(z))) + ) { + have(thesis) by Tableau + } + + /** + * Theorem --- Universal quantification of implication implies implication + * of existential quantification. + */ + val existentialImplicationDistribution = Theorem( + forall(z, P(z) ==> Q(z)) |- (∃(z, P(z)) ==> ∃(z, Q(z))) + ) { + have(thesis) by Tableau + } + + /** + * Existential substitutes for ε + */ + val existsEpsilon = Theorem( + ∃(x, P(x)) |- P(ε(x, P(x))) + ) { + have(P(x) |- P(x)) by Restate + thenHave(P(x) |- P(ε(x, P(x)))) by RightEpsilon.withParameters(P(x), x, x) + thenHave(∃(x, P(x)) |- P(ε(x, P(x)))) by LeftExists + val i1 = thenHave(∃(x, P(x)) ==> P(ε(x, P(x)))) by Restate.from + + have(P(ε(x, P(x))) |- P(ε(x, P(x)))) by Restate + thenHave(P(ε(x, P(x))) |- ∃(x, P(x))) by RightExists + val i2 = thenHave(P(ε(x, P(x))) ==> ∃(x, P(x))) by Restate.from + + val iffstep = have(∃(x, P(x)) <=> P(ε(x, P(x)))) by RightIff(i1, i2) + + } + + val Psi = variable[((Ind >>: Prop) >>: Prop) >>: Prop] + val existsEpsilon2 = Theorem( + Psi(∃) <=> Psi(lambda(P, P(ε(P)))) + ) { + sorry + } + + /* + /** + * Theorem --- Universal quantification of equivalence implies equivalence + * of unique existential quantification. + */ + val uniqueExistentialEquivalenceDistribution = Theorem( + forall(z, P(z) <=> Q(z)) |- (existsOne(z, P(z)) <=> existsOne(z, Q(z))) + ) { + val yz = have(forall(z, P(z) <=> Q(z)) |- ((y === z) <=> P(y)) <=> ((y === z) <=> Q(y))) subproof { + have(forall(z, P(z) <=> Q(z)) |- forall(z, P(z) <=> Q(z))) by Hypothesis + val quant = thenHave(forall(z, P(z) <=> Q(z)) |- P(y) <=> Q(y)) by InstantiateForall(y) + + val lhs = have((forall(z, P(z) <=> Q(z)), ((y === z) <=> P(y))) |- ((y === z) <=> Q(y))) subproof { + have((P(y) <=> Q(y), ((y === z) <=> P(y))) |- ((y === z) <=> Q(y))) by Tautology + have(thesis) by Tautology.from(lastStep, quant) + } + val rhs = have((forall(z, P(z) <=> Q(z)), ((y === z) <=> Q(y))) |- ((y === z) <=> P(y))) subproof { + have((P(y) <=> Q(y), ((y === z) <=> Q(y))) |- ((y === z) <=> P(y))) by Tautology + have(thesis) by Tautology.from(lastStep, quant) + } + + have(thesis) by Tautology.from(lhs, rhs) + } + + val fy = thenHave(forall(z, P(z) <=> Q(z)) |- forall(y, ((y === z) <=> P(y)) <=> ((y === z) <=> Q(y)))) by RightForall + + have(forall(y, P(y) <=> Q(y)) |- (forall(y, P(y)) <=> forall(y, Q(y)))) by Restate.from(universalEquivalenceDistribution) + val univy = thenHave(forall(y, ((y === z) <=> P(y)) <=> ((y === z) <=> Q(y))) |- (forall(y, ((y === z) <=> P(y))) <=> forall(y, ((y === z) <=> Q(y))))) by InstSchema( + P := lambda(y, (y === z) <=> P(y)), Q := lambda(y, (y === z) <=> Q(y)) + ) + + have(forall(z, P(z) <=> Q(z)) |- (forall(y, ((y === z) <=> P(y))) <=> forall(y, ((y === z) <=> Q(y))))) by Cut(fy, univy) + + thenHave(forall(z, P(z) <=> Q(z)) |- forall(z, forall(y, ((y === z) <=> P(y))) <=> forall(y, ((y === z) <=> Q(y))))) by RightForall + have(forall(z, P(z) <=> Q(z)) |- ∃(z, forall(y, ((y === z) <=> P(y)))) <=> ∃(z, forall(y, ((y === z) <=> Q(y))))) by Cut( + lastStep, + existentialEquivalenceDistribution of (P := lambda(z, forall(y, (y === z) <=> P(y))), Q := lambda(z, forall(y, (y === z) <=> Q(y)))) + ) + thenHave(thesis) by Restate + } + + /** + * Theorem --- if atleast two distinct elements exist, then there is no unique + * existence + */ + val atleastTwoExist = Theorem( + (∃(x, P(x)) /\ !existsOne(x, P(x))) <=> ∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y))) + ) { + val fwd = have((∃(x, P(x)) /\ !existsOne(x, P(x))) ==> ∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y)))) subproof { + have((P(x), ((x === y) /\ !P(y))) |- P(x) /\ !P(y)) by Restate + have((P(x), ((x === y) /\ !P(y))) |- P(y) /\ !P(y)) by Sorry //Substitution.ApplyRules(x === y) // contradiction + val xy = thenHave((P(x), ((x === y) /\ !P(y))) |- ∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y)))) by Weakening + + have((P(x), (!(x === y) /\ P(y))) |- (!(x === y) /\ P(y) /\ P(x))) by Restate + thenHave((P(x), (!(x === y) /\ P(y))) |- ∃(y, !(x === y) /\ P(y) /\ P(x))) by RightExists + val nxy = thenHave((P(x), (!(x === y) /\ P(y))) |- ∃(x, ∃(y, !(x === y) /\ P(y) /\ P(x)))) by RightExists + + have((P(x), (!(x === y) /\ P(y)) \/ ((x === y) /\ !P(y))) |- ∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y)))) by Tautology.from(xy, nxy) + thenHave((P(x), ∃(y, (!(x === y) /\ P(y)) \/ ((x === y) /\ !P(y)))) |- ∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y)))) by LeftExists + thenHave((P(x), forall(x, ∃(y, (!(x === y) /\ P(y)) \/ ((x === y) /\ !P(y))))) |- ∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y)))) by LeftForall + thenHave((∃(x, P(x)), forall(x, ∃(y, (!(x === y) /\ P(y)) \/ ((x === y) /\ !P(y))))) |- ∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y)))) by LeftExists + + thenHave(thesis) by Restate + } + + val bwd = have(∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y))) ==> (∃(x, P(x)) /\ !existsOne(x, P(x)))) subproof { + have((P(x), P(y), !(x === y)) |- P(x)) by Restate + val ex = thenHave((P(x), P(y), !(x === y)) |- ∃(x, P(x))) by RightExists + + have((P(x), P(y), !(x === y)) |- P(y) /\ !(y === x)) by Restate + have((P(x), P(y), !(x === y), (x === z)) |- P(y) /\ !(y === z)) by Sorry //Substitution.ApplyRules(x === z) + thenHave((P(x), P(y), !(x === y), (x === z)) |- (P(y) /\ !(y === z)) \/ (!P(y) /\ (y === z))) by Weakening + val xz = thenHave((P(x), P(y), !(x === y), (x === z)) |- ∃(y, (P(y) /\ !(y === z)) \/ (!P(y) /\ (y === z)))) by RightExists + + have((P(x), P(y), !(x === y), !(x === z)) |- (P(x) /\ !(x === z)) \/ (!P(x) /\ (x === z))) by Restate + val nxz = thenHave((P(x), P(y), !(x === y), !(x === z)) |- ∃(x, (P(x) /\ !(x === z)) \/ (!P(x) /\ (x === z)))) by RightExists + + have((P(x), P(y), !(x === y)) |- ∃(x, (P(x) /\ !(x === z)) \/ (!P(x) /\ (x === z)))) by Tautology.from(xz, nxz) + thenHave((P(x), P(y), !(x === y)) |- forall(z, ∃(x, (P(x) /\ !(x === z)) \/ (!P(x) /\ (x === z))))) by RightForall + val uex = thenHave(P(x) /\ P(y) /\ !(x === y) |- !existsOne(z, P(z))) by Restate + + have(P(x) /\ P(y) /\ !(x === y) |- ∃(x, P(x)) /\ !existsOne(z, P(z))) by Tautology.from(ex, uex) + thenHave(∃(y, P(x) /\ P(y) /\ !(x === y)) |- ∃(x, P(x)) /\ !existsOne(z, P(z))) by LeftExists + thenHave(∃(x, ∃(y, P(x) /\ P(y) /\ !(x === y))) |- ∃(x, P(x)) /\ !existsOne(z, P(z))) by LeftExists + + thenHave(thesis) by Restate + } + + have(thesis) by Tautology.from(fwd, bwd) + } + + */ + + /** + * Quantify all variables in a formula on the right side of the premise sequent. + * + *+ * Γ ⊢ φ, Δ + * -------------------------- x, y, ..., z do not appear in Γ + * Γ ⊢ ∀x.∀y. ... ∀z. φ, Δ + *+ */ + object quantifyAll extends ProofFactSequentTactic: + def apply(using lib: Library, proof: lib.Proof)(premiseStep: proof.Fact)(conclusion: Sequent) = + def isQuantifiedOf(target: Expr[Prop], pivot: Expr[Prop], vars: List[Variable[Ind]] = Nil): Option[List[Variable[Ind]]] = + target match + case ∀(x, inner) => + val next = x :: vars + if isSame(inner, pivot) then Some(next) else isQuantifiedOf(inner, pivot, next) + case _ => None + val premise = proof.getSequent(premiseStep) + val difference = premise.right -- conclusion.right + + if difference.isEmpty then Restate(using lib, proof)(premiseStep)(conclusion) + else if difference.size > 1 then proof.InvalidProofTactic(s"There must be only one formula to quantify over between the premise and the conclusion. Found: \n${Printing.printList(difference)}") + else + val rdifference = conclusion.right -- premise.right + if rdifference.size != 1 then proof.InvalidProofTactic(s"There must be only one formula to quantify over between the premise and the conclusion. Found: \n${Printing.printList(rdifference)}") + else + val pivot = difference.head + val target = rdifference.head + val varsOption = isQuantifiedOf(target, pivot) + + if varsOption.isEmpty then proof.InvalidProofTactic("Could not find a formula to quantify over in the conclusion.") + else + val vars = varsOption.get + val conflicts = vars.toSet `intersect` premise.left.flatMap(_.freeVars) + + if conflicts.nonEmpty then proof.InvalidProofTactic(s"Variable(s) ${conflicts.mkString(", ")} to be quantified appear in the LHS of the conclusion.") + else + // safe, proceed + TacticSubproof: + val vars = varsOption.get + lib.have(premise) by Restate.from(premiseStep) + + val base = premise ->> pivot + + vars.foldLeft(pivot): (pivot, v) => + val quant = ∀(v, pivot) + lib.thenHave(base +>> quant) by RightForall.withParameters(pivot, v) + quant + + lib.thenHave(conclusion) by Restate + +} diff --git a/lisa-sets2/src/main/scala/lisa/maths/algebra/Group_Theory.scala b/lisa-sets2/src/main/scala/lisa/maths/algebra/Group_Theory.scala new file mode 100644 index 000000000..138924274 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/algebra/Group_Theory.scala @@ -0,0 +1,1953 @@ +package lisa.maths.algebra + +/* +import lisa.automation.kernel.CommonTactics.Definition +import lisa.automation.kernel.CommonTactics.ExistenceAndUniqueness +import lisa.automation.Tautology +import lisa.automation.settheory.SetTheoryTactics.UniqueComprehension +import lisa.kernel.proof.SequentCalculus.Hypothesis +import lisa.SetTheoryLibrary +import lisa.kernel.fol.FOL.VariableLabel +import lisa.maths.settheory.functions.Functionals.* +import lisa.maths.settheory.SetTheory.* +import lisa.maths.settheory.* +import lisa.maths.settheory.functions.FunctionProperties.bijective +import lisa.automation.settheory.SetTheoryTactics.TheConditional + + +/** + * Group theory, developed following Chapter 2 of S. Lang "Undergraduate Algebra". + * + * Book : [[https://link.springer.com/book/10.1007/978-1-4684-9234-7]] + */ +object GroupTheory extends lisa.Main { + // Groups + private val G, H = variable[Ind] + + // Group laws + private val * = variable[Ind] + + // Group elements + private val a, b, c, d = variable[Ind] + private val x, y, z = variable[Ind] + private val t, u, v, w = variable[Ind] + + // Identity elements + private val e, f = variable[Ind] + + // Predicates + private val P, Q = variable[Ind >>: Prop] + + // + // 0. Notation + // + + /** + * Defines the element that is uniquely given by the uniqueness theorem, or falls back to the error element if the + * assumptions of the theorem are not satisfied. + * + * This is useful in defining specific elements in groups, where their uniqueness (and existence) strongly rely + * on the assumption of the group structure. + */ + + // def TheConditional(u: Variable, f: Formula)(just: JUSTIFICATION, defaultValue: Term = ∅): The = { + // //The(u, f)(just) + // val seq = just.proposition + + // if (seq.left.isEmpty) { + // The(u, f)(just) + // } else { + // val prem = if (seq.left.size == 1) seq.left.head else And(seq.left.toSeq: _*) + // val completeDef = (prem ==> f) /\ (!prem ==> (u === defaultValue)) + // val substF = substituteVariables(completeDef, Map[VariableLabel, Term](u -> defaultValue), Seq()) + // val substDef = substituteVariables(completeDef, Map[VariableLabel, Term](u -> v), Seq()) + + // val completeUniquenessTheorem = Lemma( + // ∃!(u, completeDef) + // ) { + // val case1 = have(prem |- ∃!(u, completeDef)) subproof { + // // We prove the equivalence f <=> completeDef so that we can substitute it in the uniqueness quantifier + // val equiv = have(prem |- ∀(u, f <=> completeDef)) subproof { + // have(f |- f) by Hypothesis + // thenHave((prem, f) |- f) by Weakening + // val left = thenHave(f |- (prem ==> f)) by Restate + + // have(prem |- prem) by Hypothesis + // thenHave((prem, !prem) |- ()) by LeftNot + // thenHave((prem, !prem) |- (u === defaultValue)) by Weakening + // val right = thenHave(prem |- (!prem ==> (u === defaultValue))) by Restate + + // have((prem, f) |- completeDef) by RightAnd(left, right) + // val forward = thenHave(prem |- f ==> completeDef) by Restate + + // have(completeDef |- completeDef) by Hypothesis + // thenHave((prem, completeDef) |- completeDef) by Weakening + // thenHave((prem, completeDef) |- f) by Tautology + // val backward = thenHave(prem |- completeDef ==> f) by Restate + + // have(prem |- f <=> completeDef) by RightIff(forward, backward) + // thenHave(thesis) by RightForall + // } + + // val substitution = have((∃!(u, f), ∀(u, f <=> completeDef)) |- ∃!(u, completeDef)) by Restate.from( + // substitutionInUniquenessQuantifier of (P -> lambda(u, f), Q -> lambda(u, completeDef)) + // ) + + // val implication = have((prem, ∃!(u, f)) |- ∃!(u, completeDef)) by Cut(equiv, substitution) + // val uniqueness = have(prem |- ∃!(u, f)) by Restate.from(just) + // have(prem |- ∃!(u, completeDef)) by Cut(uniqueness, implication) + // } + + // val case2 = have(!prem |- ∃!(u, completeDef)) subproof { + // val existence = have(!prem |- ∃(u, completeDef)) subproof { + // have(!prem |- !prem) by Hypothesis + // thenHave((prem, !prem) |- ()) by LeftNot + // thenHave((prem, !prem) |- substF) by Weakening + // val left = thenHave(!prem |- (prem ==> substF)) by Restate + + // have(defaultValue === defaultValue) by RightRefl + // thenHave(!prem |- defaultValue === defaultValue) by Weakening + // val right = thenHave(!prem ==> (defaultValue === defaultValue)) by Restate + + // have(!prem |- (prem ==> substF) /\ (!prem ==> (defaultValue === defaultValue))) by RightAnd(left, right) + // thenHave(thesis) by RightExists.withParameters(defaultValue) + // } + + // val uniqueness = have((!prem, completeDef, substDef) |- (u === v)) subproof { + // assume(!prem) + // assume(completeDef) + // assume(substDef) + + // val eq1 = have(u === defaultValue) by Tautology + // val eq2 = have(defaultValue === v) by Tautology + // val p = have((u === defaultValue) /\ (defaultValue === v)) by RightAnd(eq1, eq2) + + // val transitivity = equalityTransitivity of (x -> u, y -> defaultValue, z -> v) + // have(thesis) by Cut(p, transitivity) + // } + + // have(thesis) by ExistenceAndUniqueness(completeDef)(existence, uniqueness) + // } + + // have(thesis) by Cases(case1, case2) + // } + + // The(u, completeDef)(completeUniquenessTheorem) + // } + // } + + // + // 1. Basic definitions and results + // + + /** + * Binary operation --- `*` is a binary operation on `G` if it associates to each pair of elements of `G` + * a unique element in `G`. In other words, `*` is a function `G × G -> G`. + */ + val binaryOperation = DEF(G, *) --> functionFrom(*, cartesianProduct(G, G), G) + + + /** + * Short-hand alias for `x * y`. + */ + inline def op(x: Term, * : Term, y: Term) = app(*, pair(x, y)) + + + /** + * Associativity --- `*` is associative (in `G`) if `(x * y) * z = x * (y * z)` for all `x, y, z` in `G`. + */ + val associativityAxiom = DEF(G, *) --> + ∀(x, x ∈ G ==> ∀(y, y ∈ G ==> ∀(z, z ∈ G ==> (op(op(x, *, y), *, z) === op(x, *, op(y, *, z)))))) + + /** + * Neutral element --- We say that an element `e` in `G` is neutral if `e * x = x * e = x` for all `x` in `G`. + */ + val isNeutral = DEF(e, G, *) --> (e ∈ G /\ ∀(x, (x ∈ G) ==> ((op(e, *, x) === x) /\ (op(x, *, e) === x)))) + + /** + * Identity existence --- There exists a neutral element `e` in `G`. + */ + val identityExistence = DEF(G, *) --> ∃(e, isNeutral(e, G, *)) + + /** + * Inverse element --- `y` is called an inverse of `x` if `x * y = y * x = e`. + */ + val isInverse = DEF(y, x, G, *) --> (y ∈ G) /\ isNeutral(op(x, *, y), G, *) /\ isNeutral(op(y, *, x), G, *) + + /** + * Inverse existence --- For all `x` in G, there exists an element `y` in G such that `x * y = y * x = e`. + */ + val inverseExistence = DEF(G, *) --> ∀(x, (x ∈ G) ==> ∃(y, isInverse(y, x, G, *))) + + /** + * Group --- A group (G, *) is a set along with a law of composition `*`, satisfying [[associativityAxiom]], [[identityExistence]] + * and [[inverseExistence]]. + */ + val group = DEF(G, *) --> binaryOperation(G, *) /\ associativityAxiom(G, *) /\ identityExistence(G, *) /\ inverseExistence(G, *) + + /** + * Commutativity --- `*` is said to be commutative on `G` if `x * y = y * x` for all `x, y ∈ G`. + */ + val commutativityAxiom = DEF(G, *) --> ∀(x, x ∈ G ==> ∀(y, y ∈ G ==> (op(x, *, y) === op(y, *, x)))) + + /** + * Abelian group --- A group is said to beabelian (or commutative) if every element commutes, + * i.e. it satisfies [[commutativityAxiom]]. + */ + val abelianGroup = DEF(G, *) --> group(G, *) /\ commutativityAxiom(G, *) + + /** + * Alias for abelian group. + */ + val commutativeGroup = abelianGroup + + /** + * Lemma --- For elements `x, y, z` in a group `(G, *)`, we have `(xy)z = x(yz)`. + * + * Practical reformulation of the [[associativityAxiom]]. + */ + val associativity = Lemma( + (group(G, *), x ∈ G, y ∈ G, z ∈ G) |- op(op(x, *, y), *, z) === op(x, *, op(y, *, z)) + ) { + assume(group(G, *)) + + have(∀(x, x ∈ G ==> ∀(y, y ∈ G ==> ∀(z, z ∈ G ==> (op(op(x, *, y), *, z) === op(x, *, op(y, *, z))))))) by Tautology.from( + group.definition, + associativityAxiom.definition + ) + thenHave(x ∈ G ==> ∀(y, y ∈ G ==> ∀(z, z ∈ G ==> (op(op(x, *, y), *, z) === op(x, *, op(y, *, z)))))) by InstantiateForall(x) + thenHave(x ∈ G |- ∀(y, y ∈ G ==> ∀(z, z ∈ G ==> (op(op(x, *, y), *, z) === op(x, *, op(y, *, z)))))) by Restate + thenHave(x ∈ G |- y ∈ G ==> ∀(z, z ∈ G ==> (op(op(x, *, y), *, z) === op(x, *, op(y, *, z))))) by InstantiateForall(y) + thenHave((x ∈ G, y ∈ G) |- ∀(z, z ∈ G ==> (op(op(x, *, y), *, z) === op(x, *, op(y, *, z))))) by Restate + thenHave((x ∈ G, y ∈ G) |- z ∈ G ==> (op(op(x, *, y), *, z) === op(x, *, op(y, *, z)))) by InstantiateForall(z) + thenHave((x ∈ G, y ∈ G, z ∈ G) |- (op(op(x, *, y), *, z) === op(x, *, op(y, *, z)))) by Restate + } + + /** + * Lemma --- For elements `x, y` in an abelian group `(G, *)`, we have `xy = yx`. + * + * Practical reformulation of [[commutativityAxiom]]. + */ + val commutativity = Lemma( + (abelianGroup(G, *), x ∈ G, y ∈ G) |- op(x, *, y) === op(y, *, x) + ) { + assume(abelianGroup(G, *)) + + have(∀(x, x ∈ G ==> ∀(y, y ∈ G ==> (op(x, *, y) === op(y, *, x))))) by Tautology.from( + abelianGroup.definition, + commutativityAxiom.definition + ) + thenHave(x ∈ G ==> ∀(y, y ∈ G ==> (op(x, *, y) === op(y, *, x)))) by InstantiateForall(x) + thenHave(x ∈ G |- ∀(y, y ∈ G ==> (op(x, *, y) === op(y, *, x)))) by Restate + thenHave(x ∈ G |- (y ∈ G ==> (op(x, *, y) === op(y, *, x)))) by InstantiateForall(y) + thenHave((x ∈ G, y ∈ G) |- ((op(x, *, y) === op(y, *, x)))) by Restate + } + + /** + * Group operation is functional -- The group operation `*` is functional. + */ + val groupOperationIsFunctional = Lemma( + group(G, *) |- functional(*) + ) { + have(thesis) by Tautology.from( + group.definition, + binaryOperation.definition, + functionFromImpliesFunctional of (f -> *, x -> cartesianProduct(G, G), y -> G) + ) + } + + /** + * Group operation domain -- The domain of a group law is the cartesian product of the group `G` with itself. + * + * Follows directly from the definition of `binaryRelation`. + */ + val groupOperationDomain = Lemma( + group(G, *) |- relationDomain(*) === cartesianProduct(G, G) + ) { + have(thesis) by Tautology.from( + group.definition, + binaryOperation.definition, + functionFromImpliesDomainEq of (f -> *, x -> cartesianProduct(G, G), y -> G) + ) + } + + /** + * Lemma --- If `x` and `y` are two elements of the group, the pair `(x, y)` is in the relation domain of `*. + */ + val groupPairInOperationDomain = Lemma( + (group(G, *), x ∈ G, y ∈ G) |- pair(x, y) ∈ relationDomain(*) + ) { + sorry + // assume(group(G, *)) + // assume(x ∈ G) + // assume(y ∈ G) + + // have(x ∈ G /\ y ∈ G) by Tautology + // have(pair(x, y) ∈ cartesianProduct(G, G)) by Tautology.from( + // lastStep, + // pairInCartesianProduct of (a -> x, b -> y, x -> G, y -> G) + // ) + // thenHave((relationDomain(*) === cartesianProduct(G, G)) |- pair(x, y) ∈ relationDomain(*)) by RightSubstEq( + // List((relationDomain(*), cartesianProduct(G, G))), + // lambda(z, pair(x, y) ∈ z) + // ) + + // have(thesis) by Cut(groupOperationDomain, lastStep) + } + + /** + * Lemma --- If `x, y ∈ G`, then `x * y ∈ G`. + */ + val groupIsClosedByProduct = Lemma( + (group(G, *), x ∈ G, y ∈ G) |- op(x, *, y) ∈ G + ) { + sorry + // have(∀(t, (t ∈ relationRange(*)) <=> ∃(a, pair(a, t) ∈ *))) by Definition(relationRange, relationRangeUniqueness)(*) + // val relationRangeDef = thenHave((op(x, *, y) ∈ relationRange(*)) <=> ∃(a, pair(a, op(x, *, y)) ∈ *)) by InstantiateForall(op(x, *, y)) + + // val appDef = have( + // (functional(*), pair(x, y) ∈ relationDomain(*)) |- pair(pair(x, y), op(x, *, y)) ∈ * + // ) by Definition(app, functionApplicationUniqueness)(*, pair(x, y)) + + // assume(group(G, *)) + // assume(x ∈ G) + // assume(y ∈ G) + + // // Show that x * y is in relation range + // have(pair(pair(x, y), op(x, *, y)) ∈ *) by Tautology.from( + // appDef, + // groupOperationIsFunctional, + // groupPairInOperationDomain + // ) + // thenHave(∃(a, pair(a, op(x, *, y)) ∈ *)) by RightExists + + // val productInRelationRange = have(op(x, *, y) ∈ relationRange(*)) by Tautology.from(lastStep, relationRangeDef) + + // // Conclude by [[functionImpliesRangeSubsetOfCodomain]] + // have(∀(t, t ∈ relationRange(*) ==> t ∈ G)) by Tautology.from( + // group.definition, + // binaryOperation.definition, + // functionImpliesRangeSubsetOfCodomain of (f -> *, x -> cartesianProduct(G, G), y -> G), + // subset.definition of (x -> relationRange(*), y -> G) + // ) + // thenHave(op(x, *, y) ∈ relationRange(*) ==> op(x, *, y) ∈ G) by InstantiateForall(op(x, *, y)) + // thenHave(op(x, *, y) ∈ relationRange(*) |- op(x, *, y) ∈ G) by Restate + + // have(thesis) by Cut(productInRelationRange, lastStep) + } + + /** + * Identity uniqueness --- In a group (G, *), an identity element is unique, i.e. if both `e * x = x * e = x` and + * `f * x = x * f = x` for all `x`, then `e = f`. + * + * This justifies calling `e` the identity element. + */ + val identityUniqueness = Theorem( + group(G, *) |- ∃!(e, isNeutral(e, G, *)) + ) { + sorry + // val existence = have(group(G, *) |- ∃(e, isNeutral(e, G, *))) by Tautology.from(group.definition, identityExistence.definition) + + // // We prove that if e and f are neutral elements then ef = f = e, where the first equality comes from e's left neutrality, + // // and the second equality from f's right neutrality + // val uniqueness = have((isNeutral(e, G, *), isNeutral(f, G, *)) |- (e === f)) subproof { + // // We prove that neutral elements are elements of G, such that * can be applied. + // val membership = have(isNeutral(e, G, *) |- e ∈ G) by Tautology.from(isNeutral.definition) + + // assume(isNeutral(e, G, *)) + // assume(isNeutral(f, G, *)) + + // // 1. ef = f + // have(∀(x, x ∈ G ==> ((op(e, *, x) === x) /\ (op(x, *, e) === x)))) by Tautology.from(isNeutral.definition) + // thenHave(f ∈ G ==> ((op(e, *, f) === f) /\ (op(f, *, e) === f))) by InstantiateForall(f) + // val neutrality = thenHave(f ∈ G |- ((op(e, *, f) === f) /\ (op(f, *, e) === f))) by Restate + + // have((op(e, *, f) === f) /\ (op(f, *, e) === f)) by Cut(membership of (e -> f), neutrality) + // val firstEq = thenHave(op(e, *, f) === f) by Tautology + + // // 2. ef = e + // have((op(f, *, e) === e) /\ (op(e, *, f) === e)) by Cut(membership of (e -> e), neutrality of (e -> f, f -> e)) + // val secondEq = thenHave(e === op(e, *, f)) by Tautology + + // // 3. Conclude by transitivity + // have(e === f) by Equalities(firstEq, secondEq) + // } + + // have(group(G, *) |- ∃!(e, isNeutral(e, G, *))) by ExistenceAndUniqueness(isNeutral(e, G, *))(existence, uniqueness) + } + + /** + * Defines the identity element of `(G, *)`. + */ + val identity = DEF(G, *) --> TheConditional(e, isNeutral(e, G, *))(identityUniqueness) + + /** + * Lemma --- The identity element is neutral by definition. + */ + private val identityIsNeutral = Lemma( + group(G, *) |- isNeutral(identity(G, *), G, *) + ) { + sorry + //have(thesis) by Definition(identity, identityUniqueness)(G, *) + } + + /** + * Lemma --- For any element `x` in a group `(G, *)`, we have `x * e = e * x = x`. + * + * Practical reformulation of [[identityIsNeutral]]. + */ + val identityNeutrality = Lemma( + (group(G, *), x ∈ G) |- (op(identity(G, *), *, x) === x) /\ (op(x, *, identity(G, *)) === x) + ) { + have(group(G, *) |- ∀(x, (x ∈ G) ==> ((op(identity(G, *), *, x) === x) /\ (op(x, *, identity(G, *)) === x)))) by Tautology.from( + identityIsNeutral, + isNeutral.definition of (e -> identity(G, *)) + ) + thenHave(group(G, *) |- (x ∈ G) ==> ((op(identity(G, *), *, x) === x) /\ (op(x, *, identity(G, *)) === x))) by InstantiateForall(x) + thenHave(thesis) by Restate + } + + /** + * Theorem --- The identity element belongs to the group. + * + * This simple theorem has unexpected consequences, such as [[groupNonEmpty]]. + */ + val identityInGroup = Theorem( + group(G, *) |- identity(G, *) ∈ G + ) { + have(thesis) by Tautology.from( + identityIsNeutral, + isNeutral.definition of (e -> identity(G, *)) + ) + } + + /** + * Theorem --- A group is non-empty. + * + * Direct corollary of [[identityInGroup]]. + */ + val groupNonEmpty = Theorem( + group(G, *) |- (G =/= ∅) + ) { + have(thesis) by Cut(identityInGroup, setWithElementNonEmpty of (x -> G, y -> identity(G, *))) + } + + /** + * Theorem --- The inverse of an element `x` (i.e. `y` such that `x * y = y * x = e`) in `G` is unique. + */ + val inverseUniqueness = Theorem( + (group(G, *), x ∈ G) |- ∃!(y, isInverse(y, x, G, *)) + ) { + sorry + // have(group(G, *) |- ∀(x, x ∈ G ==> ∃(y, isInverse(y, x, G, *)))) by Tautology.from(group.definition, inverseExistence.definition) + // thenHave(group(G, *) |- (x ∈ G ==> ∃(y, isInverse(y, x, G, *)))) by InstantiateForall(x) + // val existence = thenHave((group(G, *), x ∈ G) |- ∃(y, isInverse(y, x, G, *))) by Restate + + // // Assume y and z are inverses of x. + // // We prove the following chain of equalities: + // // z === (yx)z === y(xz) === y + // // where equalities come from + // // 1. Left neutrality of yx + // // 2. Associativity + // // 3. Right neutrality of xz + // val uniqueness = have((group(G, *), x ∈ G, isInverse(y, x, G, *), isInverse(z, x, G, *)) |- (y === z)) subproof { + // val inverseMembership = have(isInverse(y, x, G, *) |- y ∈ G) by Tautology.from(isInverse.definition) + + // // 1. (yx)z = z + // val leftNeutrality = have((group(G, *), x ∈ G, isInverse(y, x, G, *), z ∈ G) |- (op(op(y, *, x), *, z) === z)) subproof { + // assume(group(G, *)) + // assume(x ∈ G) + // assume(isInverse(y, x, G, *)) + // assume(z ∈ G) + + // have(∀(u, u ∈ G ==> ((op(op(y, *, x), *, u) === u) /\ (op(u, *, op(y, *, x)) === u)))) by Tautology.from(isInverse.definition, isNeutral.definition of (e -> op(y, *, x))) + // thenHave(z ∈ G ==> ((op(op(y, *, x), *, z) === z) /\ (op(z, *, op(y, *, x)) === z))) by InstantiateForall(z) + // thenHave(op(op(y, *, x), *, z) === z) by Tautology + // } + // val firstEq = have((group(G, *), x ∈ G, isInverse(y, x, G, *), isInverse(z, x, G, *)) |- op(op(y, *, x), *, z) === z) by Cut(inverseMembership of (y -> z), leftNeutrality) + + // // 2. (yx)z = y(xz) + // val associativityCut = have((group(G, *), x ∈ G /\ y ∈ G /\ z ∈ G) |- (op(op(y, *, x), *, z) === op(y, *, op(x, *, z)))) by Restate.from( + // associativity of (x -> y, y -> x) + // ) + // val memberships = have((x ∈ G, isInverse(y, x, G, *), isInverse(z, x, G, *)) |- x ∈ G /\ y ∈ G /\ z ∈ G) by Tautology.from(inverseMembership of (y -> y), inverseMembership of (y -> z)) + // val secondEq = have((group(G, *), x ∈ G, isInverse(y, x, G, *), isInverse(z, x, G, *)) |- op(op(y, *, x), *, z) === op(y, *, op(x, *, z))) by Cut(memberships, associativityCut) + + // // 3. y(xz) = y + // val rightNeutrality = have((group(G, *), x ∈ G, y ∈ G, isInverse(z, x, G, *)) |- (op(y, *, op(x, *, z)) === y)) subproof { + // assume(group(G, *)) + // assume(x ∈ G) + // assume(y ∈ G) + // assume(isInverse(z, x, G, *)) + + // have(∀(u, u ∈ G ==> ((op(op(x, *, z), *, u) === u) /\ (op(u, *, op(x, *, z)) === u)))) by Tautology.from(isInverse.definition of (y -> z), isNeutral.definition of (e -> op(x, *, z))) + // thenHave(y ∈ G ==> ((op(op(x, *, z), *, y) === y) /\ (op(y, *, op(x, *, z)) === y))) by InstantiateForall(y) + // thenHave(op(y, *, op(x, *, z)) === y) by Tautology + // } + // val thirdEq = have((group(G, *), x ∈ G, isInverse(y, x, G, *), isInverse(z, x, G, *)) |- op(y, *, op(x, *, z)) === y) by Cut(inverseMembership of (y -> y), rightNeutrality) + + // // 4. z = y + // have((group(G, *), x ∈ G, isInverse(y, x, G, *), isInverse(z, x, G, *)) |- z === y) by Equalities(firstEq, secondEq, thirdEq) + // } + + // have(thesis) by ExistenceAndUniqueness(isInverse(y, x, G, *))(existence, uniqueness) + } + + /** + * Defines the inverse of an element `x` in a group `(G, *)`. + */ + val inverse = DEF(x, G, *) --> TheConditional(y, isInverse(y, x, G, *))(inverseUniqueness) + + /** + * Lemma --- The inverse of `x` is an inverse of `x` (by definition). + */ + val inverseIsInverse = Lemma( + (group(G, *), x ∈ G) |- isInverse(inverse(x, G, *), x, G, *) + ) { + sorry + //have(thesis) by Definition(inverse, inverseUniqueness)(x, G, *) + } + + /** + * Lemma --- The inverse element `y` of `x` is in `G`. + */ + val inverseInGroup = Lemma( + (group(G, *), x ∈ G) |- inverse(x, G, *) ∈ G + ) { + have(thesis) by Tautology.from( + inverseIsInverse, + isInverse.definition of (y -> inverse(x, G, *)) + ) + } + + /** + * Theorem --- For any element `x`, we have `x * inverse(x) = inverse(x) * x = e`. + */ + val inverseCancellation = Theorem( + (group(G, *), x ∈ G) |- (op(x, *, inverse(x, G, *)) === identity(G, *)) /\ (op(inverse(x, G, *), *, x) === identity(G, *)) + ) { + assume(group(G, *)) + + have(∀(y, (y === identity(G, *)) <=> isNeutral(y, G, *))) by Tautology.from( + identity.definition, + identityUniqueness + ) + val idCharacterization = thenHave((y === identity(G, *)) <=> isNeutral(y, G, *)) by InstantiateForall(y) + + assume(x ∈ G) + val inverseDef = have((inverse(x, G, *) ∈ G) /\ isNeutral(op(x, *, inverse(x, G, *)), G, *) /\ isNeutral(op(inverse(x, G, *), *, x), G, *)) by Tautology.from( + inverseIsInverse, + isInverse.definition of (y -> inverse(x, G, *)) + ) + + val left = have(op(x, *, inverse(x, G, *)) === identity(G, *)) by Tautology.from( + inverseDef, + idCharacterization of (y -> op(x, *, inverse(x, G, *))) + ) + val right = have(op(inverse(x, G, *), *, x) === identity(G, *)) by Tautology.from( + inverseDef, + idCharacterization of (y -> op(inverse(x, G, *), *, x)) + ) + + have(thesis) by RightAnd(left, right) + } + + /** + * Theorem --- `y` is the inverse of `x` iff `x` is the inverse of `y` + */ + val inverseSymmetry = Theorem( + (group(G, *), x ∈ G, y ∈ G) |- (y === inverse(x, G, *)) <=> (x === inverse(y, G, *)) + ) { + assume(group(G, *)) + + val inverseCharacterization = have(x ∈ G |- ((y === inverse(x, G, *)) <=> isInverse(y, x, G, *))) subproof { + have(x ∈ G |- ∀(y, (y === inverse(x, G, *)) <=> isInverse(y, x, G, *))) by Tautology.from(inverseUniqueness, inverse.definition) + thenHave(thesis) by InstantiateForall(y) + } + + val forward = have(x ∈ G |- isInverse(y, x, G, *) ==> isInverse(x, y, G, *)) subproof { + assume(x ∈ G) + have(isInverse(y, x, G, *) |- y ∈ G /\ isNeutral(op(x, *, y), G, *) /\ isNeutral(op(y, *, x), G, *)) by Tautology.from(isInverse.definition) + thenHave(isInverse(y, x, G, *) |- isNeutral(op(y, *, x), G, *) /\ isNeutral(op(x, *, y), G, *)) by Tautology + thenHave(isInverse(y, x, G, *) |- x ∈ G /\ isNeutral(op(y, *, x), G, *) /\ isNeutral(op(x, *, y), G, *)) by Tautology + + have(isInverse(y, x, G, *) |- isInverse(x, y, G, *)) by Tautology.from(lastStep, isInverse.definition of (y -> x, x -> y)) + thenHave(thesis) by Restate + } + + val backward = forward of (x -> y, y -> x) + + have((x ∈ G, y ∈ G) |- isInverse(y, x, G, *) <=> isInverse(x, y, G, *)) by RightIff(forward, backward) + + have(thesis) by Tautology.from( + inverseCharacterization, + lastStep, + inverseCharacterization of (x -> y, y -> x) + ) + } + + /** + * Involution of the inverse -- For all `x`, `inverse(inverse(x)) = x`. + * + * Direct corollary of [[inverseSymmetry]]. + */ + val inverseIsInvolutive = Theorem( + (group(G, *), x ∈ G) |- (inverse(inverse(x, G, *), G, *) === x) + ) { + have(thesis) by Tautology.from( + inverseSymmetry of (y -> inverse(x, G, *)), + inverseInGroup + ) + } + + /** + * Theorem --- In a group `(G, *)`, we have `xy = xz ==> y = z`. + */ + val leftCancellation = Theorem( + (group(G, *), x ∈ G, y ∈ G, z ∈ G) |- (op(x, *, y) === op(x, *, z)) ==> (y === z) + ) { + sorry + // val i = inverse(x, G, *) + + // // 1. Prove that i * (xy) = y and i * (xz) = z + // val cancellation = have((group(G, *), x ∈ G, y ∈ G) |- op(i, *, op(x, *, y)) === y) subproof { + // // (ix)y = i(xy) + // val eq1 = have((group(G, *), x ∈ G, y ∈ G) |- op(op(i, *, x), *, y) === op(i, *, op(x, *, y))) by Cut( + // inverseInGroup, + // associativity of (x -> i, y -> x, z -> y) + // ) + + // // (ix)y = y + // have((group(G, *), x ∈ G) |- ∀(y, (y ∈ G) ==> ((op(op(i, *, x), *, y) === y) /\ (op(y, *, op(i, *, x)) === y)))) by Tautology.from( + // inverseIsInverse, + // isInverse.definition of (y -> i), + // isNeutral.definition of (e -> op(i, *, x)) + // ) + // thenHave((group(G, *), x ∈ G) |- (y ∈ G) ==> ((op(op(i, *, x), *, y) === y) /\ (op(y, *, op(i, *, x)) === y))) by InstantiateForall(y) + // val eq2 = thenHave((group(G, *), x ∈ G, y ∈ G) |- op(op(i, *, x), *, y) === y) by Tautology + + // // i(xy) = y + // have(thesis) by Equalities(eq1, eq2) + // } + + // // 2. By substitution, xy = xz implies i(xy) = i(xz) + // have(op(i, *, op(x, *, y)) === op(i, *, op(x, *, y))) by RightRefl + // val substitution = thenHave(op(x, *, y) === op(x, *, z) |- op(i, *, op(x, *, y)) === op(i, *, op(x, *, z))) by RightSubstEq( + // List((op(x, *, y), op(x, *, z))), + // lambda(z, op(i, *, op(x, *, y)) === op(i, *, z)) + // ) + + // // 3. Conclude that xy = xz ==> y === z + // have((group(G, *), x ∈ G, y ∈ G, z ∈ G, op(x, *, y) === op(x, *, z)) |- y === z) by Equalities(cancellation, cancellation of (y -> z), substitution) + // thenHave(thesis) by Restate + } + + /** + * Theorem --- In a group `(G, *)`, we have `yx = zx ==> y = z`. + * + * Analoguous to [[leftCancellation]]. + */ + val rightCancellation = Theorem( + (group(G, *), x ∈ G, y ∈ G, z ∈ G) |- (op(y, *, x) === op(z, *, x)) ==> (y === z) + ) { + sorry + // val i = inverse(x, G, *) + + // // 1. Prove that (yx)i = y and (zx)i = z + // val cancellation = have((group(G, *), x ∈ G, y ∈ G) |- op(op(y, *, x), *, i) === y) subproof { + // // (xy)i = y(xi) + // val eq1 = have((group(G, *), x ∈ G, y ∈ G) |- op(op(y, *, x), *, i) === op(y, *, op(x, *, i))) by Cut( + // inverseInGroup, + // associativity of (x -> y, y -> x, z -> i) + // ) + + // // y(xi) = y + // have((group(G, *), x ∈ G) |- ∀(y, (y ∈ G) ==> ((op(op(x, *, i), *, y) === y) /\ (op(y, *, op(x, *, i)) === y)))) by Tautology.from( + // inverseIsInverse, + // isInverse.definition of (y -> i), + // isNeutral.definition of (e -> op(x, *, i)) + // ) + // thenHave((group(G, *), x ∈ G) |- (y ∈ G) ==> ((op(op(x, *, i), *, y) === y) /\ (op(y, *, op(x, *, i)) === y))) by InstantiateForall(y) + // val eq2 = thenHave((group(G, *), x ∈ G, y ∈ G) |- op(y, *, op(x, *, i)) === y) by Tautology + + // // (yx)i = y + // have(thesis) by Equalities(eq1, eq2) + // } + + // // 2. By substitution, yx = zx implies (yx)i = (zx)i + // have(op(op(y, *, x), *, i) === op(op(y, *, x), *, i)) by RightRefl + // val substitution = thenHave(op(y, *, x) === op(z, *, x) |- op(op(y, *, x), *, i) === op(op(z, *, x), *, i)) by RightSubstEq( + // List((op(y, *, x), op(z, *, x))), + // lambda(z, op(op(y, *, x), *, i) === op(z, *, i)) + // ) + + // // 3. Conclude that yx = zx ==> y === z + // have((group(G, *), x ∈ G, y ∈ G, z ∈ G, op(y, *, x) === op(z, *, x)) |- y === z) by Equalities(cancellation, cancellation of (y -> z), substitution) + // thenHave(thesis) by Restate + } + + /** + * Theorem --- An element `x` of a group `(G, *)` is idempotent if and only if `x` is the identity element. + */ + val identityIdempotence = Theorem( + (group(G, *), x ∈ G) |- (op(x, *, x) === x) <=> (x === identity(G, *)) + ) { + sorry + // assume(group(G, *)) + // assume(x ∈ G) + + // val neutralityEquality = have(op(identity(G, *), *, x) === x) by Tautology.from(identityNeutrality) + + // // Forward direction, using the equality x * x = x = e * x + // // and concluding by right cancellation + // have(op(x, *, x) === x |- x === identity(G, *)) subproof { + // have(op(x, *, x) === x |- op(x, *, x) === x) by Hypothesis + // have(op(x, *, x) === x |- op(x, *, x) === op(identity(G, *), *, x)) by Equalities(lastStep, neutralityEquality) + // have((op(x, *, x) === x, identity(G, *) ∈ G) |- x === identity(G, *)) by Tautology.from( + // lastStep, + // rightCancellation of (x -> x, y -> x, z -> identity(G, *)) + // ) + // have(thesis) by Cut(identityInGroup, lastStep) + // } + // val forward = thenHave((op(x, *, x) === x) ==> (x === identity(G, *))) by Restate + + // have(x === identity(G, *) |- op(x, *, x) === x) by RightSubstEq( + // List((x, identity(G, *))), + // lambda(z, op(z, *, x) === x) + // )(neutralityEquality) + // val backward = thenHave((x === identity(G, *)) ==> (op(x, *, x) === x)) by Restate + + // have(thesis) by RightIff(forward, backward) + } + + /** + * Theorem --- If `x * y = e` then `y = inverse(x)`. + * + * This also implies that `x = inverse(y)` by [[inverseSymmetry]]. + */ + val inverseTest = Theorem( + (group(G, *), x ∈ G, y ∈ G) |- (op(x, *, y) === identity(G, *)) ==> (y === inverse(x, G, *)) + ) { + sorry + // assume(group(G, *)) + // assume(x ∈ G) + // assume(y ∈ G) + + // val e = identity(G, *) + + // // 1. e = x * inverse(x) + // val eq1 = have(op(x, *, inverse(x, G, *)) === e) by Tautology.from( + // identityInGroup, + // inverseCancellation + // ) + + // // 2. x * y = x * inverse(x) + // have((op(x, *, y) === e) |- (op(x, *, y) === e)) by Hypothesis + // val eq2 = have((op(x, *, y) === e) |- op(x, *, y) === op(x, *, inverse(x, G, *))) by Equalities(eq1, lastStep) + + // // Conclude by left cancellation + // have((op(x, *, y) === e, inverse(x, G, *) ∈ G) |- (y === inverse(x, G, *))) by Tautology.from( + // lastStep, + // leftCancellation of (z -> inverse(x, G, *)) + // ) + // have((op(x, *, y) === e) |- (y === inverse(x, G, *))) by Cut(inverseInGroup, lastStep) + // thenHave(thesis) by Restate + } + + /** + * Theorem --- The inverse of the identity element is itself. + */ + val inverseOfIdentityIsIdentity = Theorem( + group(G, *) |- inverse(identity(G, *), G, *) === identity(G, *) + ) { + sorry + // assume(group(G, *)) + + // val e = identity(G, *) + + // have(x ∈ G |- ∀(y, (y === inverse(x, G, *)) <=> isInverse(y, x, G, *))) by Tautology.from( + // inverseUniqueness, + // inverse.definition + // ) + // thenHave(x ∈ G |- (e === inverse(x, G, *)) <=> isInverse(e, x, G, *)) by InstantiateForall(e) + // val characterization = have((e === inverse(e, G, *)) <=> isInverse(e, e, G, *)) by Cut(identityInGroup, lastStep of (x -> e)) + + // // Prove that e is an inverse of e + // val satisfaction = have(isInverse(e, e, G, *)) subproof { + // val neutrality = have(op(e, *, e) === e) by Cut(identityInGroup, identityNeutrality of (x -> e)) + + // have((op(e, *, e) === e) |- isNeutral(op(e, *, e), G, *)) by RightSubstEq( + // List((op(e, *, e), e)), + // lambda(z, isNeutral(z, G, *)) + // )(identityIsNeutral) + + // have(isNeutral(op(e, *, e), G, *)) by Cut(neutrality, lastStep) + + // have(e ∈ G /\ isNeutral(op(e, *, e), G, *) /\ isNeutral(op(e, *, e), G, *)) by RightAnd(identityInGroup, lastStep, lastStep) + // have(thesis) by Tautology.from(lastStep, isInverse.definition of (x -> e, y -> e)) + // } + + // have(thesis) by Tautology.from(characterization, satisfaction) + } + + // TODO Direct product group + // TODO Permutation group + + // + // 2. Subgroups + // + + // By convention, this will always refer to the restricted operation. + private val ★ = restrictedFunction(*, cartesianProduct(H, H)) + + /** + * Subgroup --- `H` is a subgroup of `(G, *)` if `H` is a subset of `G`, such that the restriction of `*` to `H` is + * a group law for `H`, i.e. `(H, *_H)` is a group. + * + * We denote `H <= G` for `H` a subgroup of `G`. + */ + val subgroup = DEF(H, G, *) --> group(G, *) /\ subset(H, G) /\ group(H, restrictedFunction(*, cartesianProduct(H, H))) + + /** + * Lemma --- A group is a subgroup of itself, i.e. the subgroup relationship is reflexive. + */ + val groupIsSubgroupOfItself = Theorem( + group(G, *) |- subgroup(G, G, *) + ) { + sorry + // val condition1 = have(group(G, *) |- group(G, *)) by Hypothesis + // val condition2 = have(subset(G, G)) by Restate.from(subsetReflexivity of (x -> G)) + + // // For condition 3, we need to substitute everything using the 3 following equalities: + // // 1. restrictedFunction(*, relationDomain(*)) === * (restrictedFunctionCancellation) + // // 2. relationDomain(*) === cartesianProduct(G, G) (groupOperationDomain) + // // 3. restrictedFunction(*, cartesianProduct(G, G)) === * (derived from 1. and 2.) + + // val substitution = have((group(G, *), restrictedFunction(*, cartesianProduct(G, G)) === *) |- group(G, restrictedFunction(*, cartesianProduct(G, G)))) by RightSubstEq( + // List((restrictedFunction(*, cartesianProduct(G, G)), *)), + // lambda(z, group(G, z)) + // )(condition1) + + // val eq3 = have(group(G, *) |- restrictedFunction(*, cartesianProduct(G, G)) === *) subproof { + // assume(group(G, *)) + // val eq1 = have(restrictedFunction(*, relationDomain(*)) === *) by Cut( + // groupOperationIsFunctional, + // restrictedFunctionCancellation of (f -> *) + // ) + // thenHave((relationDomain(*) === cartesianProduct(G, G)) |- restrictedFunction(*, cartesianProduct(G, G)) === *) by RightSubstEq( + // List((relationDomain(*), cartesianProduct(G, G))), + // lambda(z, restrictedFunction(*, z) === *) + // ) + + // have(thesis) by Cut(groupOperationDomain, lastStep) + // } + + // val condition3 = have(group(G, *) |- group(G, restrictedFunction(*, cartesianProduct(G, G)))) by Cut(eq3, substitution) + + // have(group(G, *) |- group(G, *) /\ subset(G, G) /\ group(G, restrictedFunction(*, cartesianProduct(G, G)))) by RightAnd(condition1, condition2, condition3) + // have(thesis) by Tautology.from(lastStep, subgroup.definition of (G -> G, H -> G)) + } + + /** + * Proper subgroup --- `H` is a proper subgroup of `(G, *)` if `H` is a subgroup of `G` and `H != G`. + */ + val properSubgroup = DEF(H, G, *) --> subgroup(H, G, *) /\ (H =/= G) + + /** + * Lemma --- If `x` and `y` are two elements of the subgroup `H` of `(G, *)`, the pair belongs to the relation domain + * of the parent group's operation `*`. + * + * Analogous to [[groupPairInOperationDomain]], except that the considered relation is different. + */ + val subgroupPairInParentOperationDomain = Lemma( + (subgroup(H, G, *), x ∈ H, y ∈ H) |- pair(x, y) ∈ relationDomain(*) + ) { + assume(subgroup(H, G, *)) + assume(x ∈ H) + assume(y ∈ H) + + have(subset(H, G)) by Tautology.from(subgroup.definition) + have(∀(x, x ∈ H ==> x ∈ G)) by Tautology.from(lastStep, subset.definition of (x -> H, y -> G)) + val subsetDef = thenHave(x ∈ H ==> x ∈ G) by InstantiateForall(x) + + val left = have(x ∈ G) by Tautology.from(subsetDef) + val right = have(y ∈ G) by Tautology.from(subsetDef of (x -> y)) + + have(group(G, *)) by Tautology.from(subgroup.definition) + + have(thesis) by Tautology.from(lastStep, left, right, groupPairInOperationDomain) + } + + /** + * Theorem --- The subgroup operation is exactly the same as in the above group, i.e. if `(G, *)` is a group, `H` a + * subgroup of `G`, then for elements `x, y ∈ H` we have `x ★ y = x * y`, where `★ = *_H`. + */ + val subgroupOperation = Theorem( + (subgroup(H, G, *), x ∈ H, y ∈ H) |- (op(x, ★, y) === op(x, *, y)) + ) { + sorry + // assume(subgroup(H, G, *)) + // val groupG = have(group(G, *)) by Tautology.from(subgroup.definition) + // val groupH = have(group(H, ★)) by Tautology.from(subgroup.definition) + + // have((x ∈ H, y ∈ H) |- pair(x, y) ∈ relationDomain(★)) by Tautology.from( + // groupH, + // groupPairInOperationDomain of (G -> H, * -> ★) + // ) + // have((functional(*), x ∈ H, y ∈ H) |- op(x, ★, y) === op(x, *, y)) by Cut( + // lastStep, + // restrictedFunctionApplication of (f -> *, d -> cartesianProduct(H, H), x -> pair(x, y)) + // ) + // have(thesis) by Tautology.from( + // lastStep, + // groupOperationIsFunctional, + // groupG + // ) + } + + /** + * Lemma --- If `H` is a subgroup of `G`, then `e_H ∈ G`. + */ + val subgroupIdentityInParent = Lemma( + subgroup(H, G, *) |- identity(H, ★) ∈ G + ) { + val identityInH = have(subgroup(H, G, *) |- identity(H, ★) ∈ H) by Tautology.from( + subgroup.definition, + identityInGroup of (G -> H, * -> ★) + ) + + have(subgroup(H, G, *) |- ∀(x, x ∈ H ==> x ∈ G)) by Tautology.from( + subgroup.definition, + subset.definition of (x -> H, y -> G) + ) + thenHave(subgroup(H, G, *) |- identity(H, ★) ∈ H ==> identity(H, ★) ∈ G) by InstantiateForall(identity(H, ★)) + thenHave((subgroup(H, G, *), identity(H, ★) ∈ H) |- identity(H, ★) ∈ G) by Restate + + have(thesis) by Cut(identityInH, lastStep) + } + + /** + * Identity in subgroup --- The identity element `e_H` of a subgroup `H` of `G` is exactly the identity element `e_G` of + * the parent group `(G, *)`. + */ + val subgroupIdentity = Theorem( + subgroup(H, G, *) |- identity(H, ★) === identity(G, *) + ) { + sorry + // val e_G = identity(G, *) + // val e_H = identity(H, ★) + + // val groupG = have(subgroup(H, G, *) |- group(G, *)) by Tautology.from(subgroup.definition) + // val groupH = have(subgroup(H, G, *) |- group(H, ★)) by Tautology.from(subgroup.definition) + + // val subgroupIdentityInH = have(subgroup(H, G, *) |- identity(H, ★) ∈ H) by Tautology.from( + // subgroup.definition, + // identityInGroup of (G -> H, * -> ★) + // ) + + // // 1. e_H ★ e_H = e_H + // val eq1 = have(subgroup(H, G, *) |- op(e_H, ★, e_H) === e_H) subproof { + // have(group(H, ★) |- (op(e_H, ★, e_H) === e_H)) by Cut( + // identityInGroup of (G -> H, * -> ★), + // identityNeutrality of (G -> H, * -> ★, x -> e_H) + // ) + + // have(thesis) by Cut(groupH, lastStep) + // } + + // // 2. e_H * e_H = e_H + // have(subgroup(H, G, *) |- op(e_H, ★, e_H) === op(e_H, *, e_H)) by Cut( + // subgroupIdentityInH, + // subgroupOperation of (x -> e_H, y -> e_H) + // ) + // val eq2 = have(subgroup(H, G, *) |- op(e_H, *, e_H) === e_H) by Equalities(eq1, lastStep) + + // // 3. e_G * e_H = e_H + // val eq3 = have(subgroup(H, G, *) |- op(e_G, *, e_H) === e_H) subproof { + // have((group(G, *), e_H ∈ G) |- op(e_G, *, e_H) === e_H) by Tautology.from(identityNeutrality of (x -> e_H)) + // have((subgroup(H, G, *), e_H ∈ G) |- op(e_G, *, e_H) === e_H) by Cut(groupG, lastStep) + // have(thesis) by Cut(subgroupIdentityInParent, lastStep) + // } + + // // Conclude by right cancellation + // val eq4 = have(subgroup(H, G, *) |- op(e_H, *, e_H) === op(e_G, *, e_H)) by Equalities(eq2, eq3) + // have((group(G, *), e_H ∈ G, e_G ∈ G, op(e_H, *, e_H) === op(e_G, *, e_H)) |- e_H === e_G) by Restate.from( + // rightCancellation of (x -> e_H, y -> e_H, z -> e_G) + // ) + // have((subgroup(H, G, *), e_H ∈ G, e_G ∈ G, op(e_H, *, e_H) === op(e_G, *, e_H)) |- e_H === e_G) by Cut(groupG, lastStep) + // have((subgroup(H, G, *), e_H ∈ G, e_G ∈ G) |- e_H === e_G) by Cut(eq4, lastStep) + + // val finalStep = have((subgroup(H, G, *), e_G ∈ G) |- e_H === e_G) by Cut(subgroupIdentityInParent, lastStep) + + // have(subgroup(H, G, *) |- e_G ∈ G) by Cut(groupG, identityInGroup) + // have(thesis) by Cut(lastStep, finalStep) + } + + /** + * Theorem --- If `H` is a subgroup of `G`, then the inverse is the same as in the parent group. + */ + val subgroupInverse = Theorem( + (subgroup(H, G, *), x ∈ H) |- inverse(x, H, ★) === inverse(x, G, *) + ) { + sorry + // assume(subgroup(H, G, *)) + // assume(x ∈ H) + + // have(∀(x, (x ∈ H) ==> (x ∈ G))) by Tautology.from( + // subgroup.definition, + // subset.definition of (x -> H, y -> G) + // ) + // val subsetDef = thenHave((x ∈ H) ==> (x ∈ G)) by InstantiateForall(x) + // val xInG = thenHave(x ∈ G) by Tautology + + // val groupG = have(group(G, *)) by Tautology.from(subgroup.definition) + // val groupH = have(group(H, ★)) by Tautology.from(subgroup.definition) + + // val eG = identity(G, *) + // val eH = identity(H, ★) + + // val inverseHInH = have(inverse(x, H, ★) ∈ H) by Cut(groupH, inverseInGroup of (G -> H, * -> ★)) + // val inverseHInG = have(inverse(x, H, ★) ∈ G) by Tautology.from(inverseHInH, subsetDef of (x -> inverse(x, H, ★))) + + // // 1. x * inverse(x, H, ★) = e_H + // have((inverse(x, H, ★) ∈ H) |- (op(x, ★, inverse(x, H, ★)) === eH)) by Tautology.from( + // groupH, + // inverseCancellation of (G -> H, * -> ★) + // ) + // have((inverse(x, H, ★) ∈ H) |- (op(x, *, inverse(x, H, ★)) === eH)) by Equalities( + // lastStep, + // subgroupOperation of (y -> inverse(x, H, ★)) + // ) + + // val eq1 = have(op(x, *, inverse(x, H, ★)) === eH) by Cut(inverseHInH, lastStep) + + // // 2. e_H = e_G + // val eq2 = have(eH === eG) by Tautology.from(subgroupIdentity) + + // // 3. x * inverse(x, G, *) = e_G + // val eq3 = have(op(x, *, inverse(x, G, *)) === eG) by Tautology.from( + // groupG, + // xInG, + // inverseInGroup, + // inverseCancellation + // ) + + // // 4. x * inverse(x, H, ★) === x * inverse(x, G, *) + // have(op(x, *, inverse(x, H, ★)) === op(x, *, inverse(x, G, *))) by Equalities(eq1, eq2, eq3) + + // // Conclude by left cancellation + // have(thesis) by Tautology.from( + // lastStep, + // groupG, + // xInG, + // inverseHInG, + // inverseInGroup, + // leftCancellation of (y -> inverse(x, H, ★), z -> inverse(x, G, *)) + // ) + } + + // + // 2.1 Main subgroup test + // + // We define several useful lemmas to attack this easy, but long theorem to formalize + // + + private val nonEmpty = H =/= ∅ + private val closedByProducts = ∀(x, ∀(y, (x ∈ H /\ y ∈ H) ==> (op(x, *, y) ∈ H))) + private val closedByInverses = ∀(x, x ∈ H ==> (inverse(x, G, *) ∈ H)) + private val subgroupConditions = nonEmpty /\ closedByProducts /\ closedByInverses + + /** + * Lemma --- Reformulation of the subset definition. + */ + private val subgroupConditionsSubset = Lemma( + (subset(H, G), x ∈ H) |- x ∈ G + ) { + assume(subset(H, G)) + have(∀(x, x ∈ H ==> x ∈ G)) by Tautology.from(subset.definition of (x -> H, y -> G)) + thenHave(x ∈ H ==> x ∈ G) by InstantiateForall(x) + thenHave(x ∈ H |- x ∈ G) by Restate + } + + /** + * Lemma --- The subgroup conditions imply that `relationDomain(★) === cartesianProduct(H, H)`. + */ + private val subgroupConditionsDomain = Lemma( + (group(G, *), subset(H, G), subgroupConditions) |- relationDomain(★) === cartesianProduct(H, H) + ) { + sorry + // val H2 = cartesianProduct(H, H) + // val G2 = cartesianProduct(G, G) + + // assume(group(G, *)) + // assume(subset(H, G)) + // assume(subgroupConditions) + + // have(relationDomain(★) === (H2 ∩ relationDomain(*))) by Tautology.from(restrictedFunctionDomain of (f -> *, x -> H2)) + // thenHave((relationDomain(*) === G2) |- relationDomain(★) === (H2 ∩ G2)) by RightSubstEq( + // List((relationDomain(*), G2)), + // lambda(z, relationDomain(★) === (H2 ∩ z)) + // ) + // val eq1 = have(relationDomain(★) === (H2 ∩ G2)) by Cut(groupOperationDomain, lastStep) + + // // Prove that (H2 ∩ G2) = H2 + // have(subset(H2, G2)) by Tautology.from(subsetsCartesianProduct of (a -> H, b -> G, c -> H, d -> G)) + // val eq2 = have((H2 ∩ G2) === H2) by Cut( + // lastStep, + // setIntersectionSubset of (x -> H2, y -> G2) + // ) + + // have(thesis) by Equalities(eq1, eq2) + } + + /** + * Lemma --- The subgroup conditions imply that `(x, y)` is in the relation domain of `★`. + * + * Analogous to [[groupPairInOperationDomain]]. + */ + private val subgroupConditionsPairInDomain = Lemma( + (group(G, *), subset(H, G), subgroupConditions, x ∈ H, y ∈ H) |- pair(x, y) ∈ relationDomain(★) + ) { + sorry + // assume(group(G, *)) + // assume(subset(H, G)) + // assume(subgroupConditions) + // assume(x ∈ H) + // assume(y ∈ H) + + // have(pair(x, y) ∈ cartesianProduct(H, H)) by Tautology.from( + // pairInCartesianProduct of (a -> x, b -> y, x -> H, y -> H) + // ) + // thenHave((relationDomain(★) === cartesianProduct(H, H)) |- pair(x, y) ∈ relationDomain(★)) by RightSubstEq( + // List((relationDomain(★), cartesianProduct(H, H))), + // lambda(z, pair(x, y) ∈ z) + // ) + + // have(thesis) by Cut(subgroupConditionsDomain, lastStep) + } + + /** + * Lemma --- The subgroup conditions imply that `x ★ y = x * y`. + * + * Analogous to [[subgroupOperation]]. + */ + private val subgroupConditionsOperation = Lemma( + (group(G, *), subset(H, G), subgroupConditions, x ∈ H, y ∈ H) |- op(x, ★, y) === op(x, *, y) + ) { + sorry + // have(thesis) by Tautology.from( + // subgroupConditionsPairInDomain, + // groupOperationIsFunctional, + // restrictedFunctionIsFunctionalOver of (f -> *, x -> cartesianProduct(H, H)), + // restrictedFunctionApplication of (f -> *, d -> cartesianProduct(H, H), x -> pair(x, y)) + // ) + } + + /** + * Lemma --- The subgroup conditions imply that `x ★ y ∈ H`. + */ + private val subgroupConditionsProductClosure = Lemma( + (group(G, *), subset(H, G), subgroupConditions, x ∈ H, y ∈ H) |- op(x, ★, y) ∈ H + ) { + sorry + // assume(group(G, *)) + // assume(subset(H, G)) + // assume(subgroupConditions) + + // have(closedByProducts) by Tautology + // thenHave(∀(y, (x ∈ H /\ y ∈ H) ==> (op(x, *, y) ∈ H))) by InstantiateForall(x) + // thenHave((x ∈ H /\ y ∈ H) ==> (op(x, *, y) ∈ H)) by InstantiateForall(y) + // thenHave((x ∈ H, y ∈ H) |- (op(x, *, y) ∈ H)) by Restate + // thenHave((x ∈ H, y ∈ H, op(x, ★, y) === op(x, *, y)) |- (op(x, ★, y) ∈ H)) by RightSubstEq( + // List((op(x, ★, y), op(x, *, y))), + // lambda(z, z ∈ H) + // ) + + // have(thesis) by Cut(subgroupConditionsOperation, lastStep) + } + + /** + * Lemma --- The subgroup conditions imply that `★` is a binary relation on `H`. + */ + private val subgroupConditionsBinaryRelation = Lemma( + (group(G, *), subset(H, G), subgroupConditions) |- binaryOperation(H, ★) + ) { + sorry + // assume(group(G, *)) + // assume(subset(H, G)) + // assume(subgroupConditions) + + // val H2 = cartesianProduct(H, H) + // val r = variable[Ind] + + // have(∀(t, (t ∈ setOfFunctions(H2, H)) <=> (t ∈ powerSet(cartesianProduct(H2, H)) /\ functionalOver(t, H2)))) by Definition(setOfFunctions, setOfFunctionsUniqueness)(H2, H) + // val setOfFunDef = thenHave((★ ∈ setOfFunctions(H2, H)) <=> (★ ∈ powerSet(cartesianProduct(H2, H)) /\ functionalOver(★, H2))) by InstantiateForall(★) + + // val fun = have(functional(★)) by Tautology.from( + // groupOperationIsFunctional, + // restrictedFunctionIsFunctionalOver of (f -> *, x -> H2) + // ) + // have(functional(★) /\ (relationDomain(★) === H2)) by RightAnd(fun, subgroupConditionsDomain) + // val funOver = have(functionalOver(★, H2)) by Tautology.from(lastStep, functionalOver.definition of (f -> ★, x -> H2)) + + // have(subset(★, cartesianProduct(relationDomain(★), relationRange(★)))) by Tautology.from( + // fun, + // functional.definition of (f -> ★), + // relation.definition of (r -> ★), + // relationImpliesRelationBetweenDomainAndRange of (r -> ★), + // relationBetween.definition of (r -> ★, a -> relationDomain(★), b -> relationRange(★)) + // ) + // thenHave((relationDomain(★) === H2) |- subset(★, cartesianProduct(H2, relationRange(★)))) by RightSubstEq( + // List((relationDomain(★), H2)), + // lambda(z, subset(★, cartesianProduct(z, relationRange(★)))) + // ) + + // val subsetDomRange = have(subset(★, cartesianProduct(H2, relationRange(★)))) by Cut( + // subgroupConditionsDomain, + // lastStep + // ) + + // // Prove that ★ is a subset of H2 x H + // val left = have(subset(H2, H2)) by Tautology.from(subsetReflexivity of (x -> H2)) + // val right = have(subset(relationRange(★), H)) subproof { + // // Use pullback to characterize t + // val pullback = have(t ∈ relationRange(★) |- ∃(x, (x ∈ relationDomain(★)) /\ (app(★, x) === t))) by Tautology.from( + // groupOperationIsFunctional, + // restrictedFunctionIsFunctional of (f -> *, x -> H2), + // inRangeImpliesPullbackExists of (f -> ★, z -> t) + // ) + + // have((x ∈ relationDomain(★)) <=> (x ∈ relationDomain(★))) by Restate + // thenHave((relationDomain(★) === H2) |- (x ∈ relationDomain(★)) <=> (x ∈ H2)) by RightSubstEq( + // List((relationDomain(★), H2)), + // lambda(z, (x ∈ relationDomain(★)) <=> (x ∈ z)) + // ) + // val equiv1 = have((x ∈ relationDomain(★)) <=> (x ∈ H2)) by Cut(subgroupConditionsDomain, lastStep) + // val equiv2 = have((x ∈ H2) <=> ∃(a, ∃(b, (x === pair(a, b)) /\ in(a, H) /\ in(b, H)))) by Tautology.from( + // elemOfCartesianProduct of (t -> x, x -> H, y -> H) + // ) + + // // Use closure by products to show that app(★, x) ∈ H + // have(closedByProducts) by Tautology + // thenHave(∀(y, (a ∈ H /\ y ∈ H) ==> (op(a, *, y) ∈ H))) by InstantiateForall(a) + // thenHave((a ∈ H /\ b ∈ H) ==> (op(a, *, b) ∈ H)) by InstantiateForall(b) + // thenHave((a ∈ H, b ∈ H) |- (op(a, *, b) ∈ H)) by Restate + // thenHave((a ∈ H, b ∈ H, op(a, ★, b) === op(a, *, b)) |- (op(a, ★, b) ∈ H)) by RightSubstEq( + // List((op(a, ★, b), op(a, *, b))), + // lambda(z, z ∈ H) + // ) + + // have((a ∈ H, b ∈ H) |- (op(a, ★, b) ∈ H)) by Cut( + // subgroupConditionsOperation of (x -> a, y -> b), + // lastStep + // ) + // thenHave((x === pair(a, b), a ∈ H, b ∈ H) |- (app(★, x) ∈ H)) by RightSubstEq( + // List((x, pair(a, b))), + // lambda(z, app(★, z) ∈ H) + // ) + // thenHave(((x === pair(a, b)) /\ a ∈ H /\ b ∈ H) |- (app(★, x) ∈ H)) by Restate + // thenHave(∃(b, (x === pair(a, b)) /\ a ∈ H /\ b ∈ H) |- (app(★, x) ∈ H)) by LeftExists + // thenHave(∃(a, ∃(b, (x === pair(a, b)) /\ a ∈ H /\ b ∈ H)) |- (app(★, x) ∈ H)) by LeftExists + + // have((x ∈ relationDomain(★)) |- (app(★, x) ∈ H)) by Tautology.from(lastStep, equiv1, equiv2) + // thenHave((x ∈ relationDomain(★), app(★, x) === t) |- (t ∈ H)) by RightSubstEq( + // List((app(★, x), t)), + // lambda(z, z ∈ H) + // ) + // thenHave((x ∈ relationDomain(★) /\ (app(★, x) === t)) |- (t ∈ H)) by Restate + // thenHave(∃(x, x ∈ relationDomain(★) /\ (app(★, x) === t)) |- (t ∈ H)) by LeftExists + + // have(t ∈ relationRange(★) |- t ∈ H) by Cut(pullback, lastStep) + // thenHave(t ∈ relationRange(★) ==> t ∈ H) by Restate + // thenHave(∀(t, t ∈ relationRange(★) ==> t ∈ H)) by RightForall + + // have(thesis) by Tautology.from(lastStep, subset.definition of (x -> relationRange(★), y -> H)) + // } + + // have(subset(cartesianProduct(H2, relationRange(★)), cartesianProduct(H2, H))) by Tautology.from( + // left, + // right, + // subsetsCartesianProduct of (a -> H2, b -> H2, c -> relationRange(★), d -> H) + // ) + // have(subset(★, cartesianProduct(H2, H))) by Tautology.from( + // lastStep, + // subsetDomRange, + // subsetTransitivity of (a -> ★, b -> cartesianProduct(H2, relationRange(★)), c -> cartesianProduct(H2, H)) + // ) + // have(★ ∈ powerSet(cartesianProduct(H2, H))) by Tautology.from( + // lastStep, + // powerSet.definition of (x -> ★, y -> cartesianProduct(H2, H)) + // ) + + // have(★ ∈ powerSet(cartesianProduct(H2, H)) /\ functionalOver(★, H2)) by RightAnd(lastStep, funOver) + + // have(thesis) by Tautology.from( + // lastStep, + // setOfFunDef, + // functionFrom.definition of (f -> ★, x -> H2, y -> H), + // binaryOperation.definition of (G -> H, * -> ★) + // ) + } + + /** + * Lemma --- The subgroup conditions imply associativity on `H`. + * + * This directly follows from associativity on `G` and [[subgroupConditionsOperation]]. + */ + private val subgroupConditionsAssociativity = Lemma( + (group(G, *), subset(H, G), subgroupConditions) |- associativityAxiom(H, ★) + ) { + sorry + // assume(group(G, *)) + // assume(subset(H, G)) + // assume(subgroupConditions) + + // have((x ∈ H, y ∈ H, z ∈ H) |- op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z))) subproof { + // assume(x ∈ H) + // assume(y ∈ H) + // assume(z ∈ H) + + // have(op(op(x, *, y), *, z) === op(x, *, op(y, *, z))) by Tautology.from( + // associativity, + // subgroupConditionsSubset, + // subgroupConditionsSubset of (x -> y), + // subgroupConditionsSubset of (x -> z) + // ) + // thenHave((op(x, ★, y) === op(x, *, y), op(y, ★, z) === op(y, *, z)) |- (op(op(x, ★, y), *, z) === op(x, *, op(y, ★, z)))) by RightSubstEq( + // List((op(x, ★, y), op(x, *, y)), (op(y, ★, z), op(y, *, z))), + // lambda(Seq(a, b), op(a, *, z) === op(x, *, b)) + // ) + + // have(op(op(x, ★, y), *, z) === op(x, *, op(y, ★, z))) by Tautology.from( + // lastStep, + // subgroupConditionsOperation, + // subgroupConditionsOperation of (x -> y, y -> z) + // ) + // thenHave((op(op(x, ★, y), ★, z) === op(op(x, ★, y), *, z), op(x, ★, op(y, ★, z)) === op(x, *, op(y, ★, z))) |- (op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z)))) by RightSubstEq( + // List((op(op(x, ★, y), ★, z), op(op(x, ★, y), *, z)), (op(x, ★, op(y, ★, z)), op(x, *, op(y, ★, z)))), + // lambda(Seq(a, b), a === b) + // ) + + // have(op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z))) by Tautology.from( + // lastStep, + // subgroupConditionsOperation of (x -> op(x, ★, y), y -> z), + // subgroupConditionsOperation of (x -> x, y -> op(y, ★, z)), + // subgroupConditionsProductClosure, + // subgroupConditionsProductClosure of (x -> y, y -> z) + // ) + // } + + // Reconstruct the axiom in its closed form + // thenHave((x ∈ H, y ∈ H) |- (z ∈ H) ==> (op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z)))) by Restate + // thenHave((x ∈ H, y ∈ H) |- ∀(z, (z ∈ H) ==> (op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z))))) by RightForall + // thenHave((x ∈ H) |- (y ∈ H) ==> ∀(z, (z ∈ H) ==> (op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z))))) by Restate + // thenHave((x ∈ H) |- ∀(y, (y ∈ H) ==> ∀(z, (z ∈ H) ==> (op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z)))))) by RightForall + // thenHave((x ∈ H) ==> ∀(y, (y ∈ H) ==> ∀(z, (z ∈ H) ==> (op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z)))))) by Restate + // thenHave(∀(x, (x ∈ H) ==> ∀(y, (y ∈ H) ==> ∀(z, (z ∈ H) ==> (op(op(x, ★, y), ★, z) === op(x, ★, op(y, ★, z))))))) by RightForall + + // have(thesis) by Tautology.from(lastStep, associativityAxiom.definition of (G -> H, * -> ★)) + } + + /** + * Lemma --- The subgroup conditions imply the existence of an identity element on `H`. + * + * We show in particular that identity(G, *) is neutral on `H`. + */ + private val subgroupConditionsIdentityExistence = Lemma( + (group(G, *), subset(H, G), subgroupConditions) |- identityExistence(H, ★) + ) { + sorry + // assume(group(G, *)) + // assume(subset(H, G)) + // assume(subgroupConditions) + + // // We show that for an element x ∈ H: + // // 1. inverse(x) ∈ H [[closedByInverses]] + // // 2. x * inverse(x) ∈ H [[closedByProducts]] + // // 3. x * inverse(x) = identity(G, *) [[inverseCancellation]] + // // 4. identity(G, *) ∈ H Substitution of 3. in 2. + // // 5. isNeutral(identity(G, *), H, ★) [[identityNeutrality]] + // // 6. identityExistence(H, ★) [[identityExistence]] + // // We finally conclude by [[nonEmpty]]. + + // // 1. inverse(x) ∈ H + // have(closedByInverses) by Tautology + // thenHave(((x ∈ H) ==> (inverse(x, G, *) ∈ H))) by InstantiateForall(x) + // val step1 = thenHave((x ∈ H) |- (inverse(x, G, *) ∈ H)) by Restate + + // // 2. x * inverse(x) ∈ H + // have(closedByProducts) by Tautology + // thenHave(∀(y, (x ∈ H /\ y ∈ H) ==> (op(x, *, y) ∈ H))) by InstantiateForall(x) + // thenHave((x ∈ H /\ inverse(x, G, *) ∈ H) ==> (op(x, *, inverse(x, G, *)) ∈ H)) by InstantiateForall(inverse(x, G, *)) + // thenHave((x ∈ H, inverse(x, G, *) ∈ H) |- (op(x, *, inverse(x, G, *)) ∈ H)) by Restate + + // val step2 = have((x ∈ H) |- (op(x, *, inverse(x, G, *)) ∈ H)) by Cut(step1, lastStep) + + // // 3. x * inverse(x) = identity(G, *) + // val step3 = have((x ∈ H) |- op(x, *, inverse(x, G, *)) === identity(G, *)) by Tautology.from( + // subgroupConditionsSubset, + // inverseCancellation + // ) + + // // 4. identity(G, *) ∈ H + // have((x ∈ H, op(x, *, inverse(x, G, *)) === identity(G, *)) |- (identity(G, *) ∈ H)) by RightSubstEq( + // List((op(x, *, inverse(x, G, *)), identity(G, *))), + // lambda(z, z ∈ H) + // )(step2) + // val step4 = have((x ∈ H) |- (identity(G, *) ∈ H)) by Cut(step3, lastStep) + + // // 5. isNeutral(identity(G, *), H, ★) + // have((x ∈ H) |- (op(identity(G, *), *, x) === x) /\ (op(x, *, identity(G, *)) === x)) by Tautology.from( + // subgroupConditionsSubset, + // identityNeutrality + // ) + // thenHave( + // (x ∈ H, op(identity(G, *), ★, x) === op(identity(G, *), *, x), op(x, ★, identity(G, *)) === op(x, *, identity(G, *))) |- (op(identity(G, *), ★, x) === x) /\ (op(x, ★, identity(G, *)) === x) + // ) by RightSubstEq( + // List((op(identity(G, *), ★, x), op(identity(G, *), *, x)), (op(x, ★, identity(G, *)), op(x, *, identity(G, *)))), + // lambda(Seq(a, b), (a === x) /\ (b === x)) + // ) + + // have(x ∈ H |- (op(identity(G, *), ★, x) === x) /\ (op(x, ★, identity(G, *)) === x)) by Tautology.from( + // lastStep, + // step4, + // subgroupConditionsOperation of (x -> identity(G, *), y -> x), + // subgroupConditionsOperation of (x -> x, y -> identity(G, *)) + // ) + + // thenHave((x ∈ H) ==> (op(identity(G, *), ★, x) === x) /\ (op(x, ★, identity(G, *)) === x)) by Restate + // thenHave(∀(x, (x ∈ H) ==> (op(identity(G, *), ★, x) === x) /\ (op(x, ★, identity(G, *)) === x))) by RightForall + // val step5 = have((x ∈ H) |- isNeutral(identity(G, *), H, ★)) by Tautology.from( + // lastStep, + // step4, + // isNeutral.definition of (e -> identity(G, *), G -> H, * -> ★) + // ) + + // // 6. identityExistence(H, ★) + // thenHave((x ∈ H) |- ∃(e, isNeutral(e, H, ★))) by RightExists + // val step6 = have((x ∈ H) |- identityExistence(H, ★)) by Tautology.from(lastStep, identityExistence.definition of (G -> H, * -> ★)) + + // // Conclude by [[nonEmpty]] + // thenHave(∃(x, x ∈ H) |- identityExistence(H, ★)) by LeftExists + + // have(thesis) by Tautology.from(lastStep, nonEmptySetHasElement of (x -> H)) + } + + /** + * Lemma --- The subgroup conditions imply that for all elements `x` in `H`, there exists an inverse in `H`. + */ + private val subgroupConditionsInverseExistence = Lemma( + (group(G, *), subset(H, G), subgroupConditions) |- inverseExistence(H, ★) + ) { + sorry + // assume(group(G, *)) + // assume(subset(H, G)) + // assume(subgroupConditions) + + // val i = inverse(x, G, *) + + // have(closedByInverses) by Tautology + // thenHave(x ∈ H ==> i ∈ H) by InstantiateForall(x) + // val inverseInH = thenHave(x ∈ H |- i ∈ H) by Restate + + // Show that a neutral element of G is also neutral in H + // val neutralityInheritance = have((e ∈ H, isNeutral(e, G, *)) |- isNeutral(e, H, ★)) subproof { + // assume(isNeutral(e, G, *)) + // have(∀(x, (x ∈ G) ==> ((op(e, *, x) === x) /\ (op(x, *, e) === x)))) by Tautology.from(isNeutral.definition) + // thenHave((x ∈ G) ==> ((op(e, *, x) === x) /\ (op(x, *, e) === x))) by InstantiateForall(x) + // thenHave(x ∈ G |- (op(e, *, x) === x) /\ (op(x, *, e) === x)) by Restate + + // have(x ∈ H |- (op(e, *, x) === x) /\ (op(x, *, e) === x)) by Cut(subgroupConditionsSubset, lastStep) + // thenHave((x ∈ H, op(e, ★, x) === op(e, *, x), op(x, ★, e) === op(x, *, e)) |- (op(e, ★, x) === x) /\ (op(x, ★, e) === x)) by RightSubstEq( + // List((op(e, ★, x), op(e, *, x)), (op(x, ★, e), op(x, *, e))), + // lambda(Seq(a, b), (a === x) /\ (b === x)) + // ) + + // have((x ∈ H, e ∈ H) |- (op(e, ★, x) === x) /\ (op(x, ★, e) === x)) by Tautology.from( + // lastStep, + // subgroupConditionsOperation of (x -> e, y -> x), + // subgroupConditionsOperation of (x -> x, y -> e) + // ) + // thenHave(e ∈ H |- (x ∈ H) ==> (op(e, ★, x) === x) /\ (op(x, ★, e) === x)) by Restate + // thenHave(e ∈ H |- ∀(x, (x ∈ H) ==> (op(e, ★, x) === x) /\ (op(x, ★, e) === x))) by RightForall + + // have(e ∈ H |- isNeutral(e, H, ★)) by Tautology.from(lastStep, isNeutral.definition of (G -> H, * -> ★)) + // } + + // Show that i is neutral in H + // have(x ∈ H |- isNeutral(op(x, *, i), G, *) /\ isNeutral(op(i, *, x), G, *)) by Tautology.from( + // subgroupConditionsSubset, + // inverseIsInverse, + // isInverse.definition of (y -> inverse(x, G, *)) + // ) + // thenHave((x ∈ H, op(x, ★, i) === op(x, *, i), op(i, ★, x) === op(i, *, x)) |- isNeutral(op(x, ★, i), G, *) /\ isNeutral(op(i, ★, x), G, *)) by RightSubstEq( + // List((op(x, ★, i), op(x, *, i)), (op(i, ★, x), op(i, *, x))), + // lambda(Seq(a, b), isNeutral(a, G, *) /\ isNeutral(b, G, *)) + // ) + + // have((x ∈ H, i ∈ H) |- isNeutral(op(x, ★, i), G, *) /\ isNeutral(op(i, ★, x), G, *)) by Tautology.from( + // lastStep, + // subgroupConditionsOperation of (x -> x, y -> i), + // subgroupConditionsOperation of (x -> i, y -> x) + // ) + + // have((x ∈ H, i ∈ H) |- isNeutral(op(x, ★, i), H, ★) /\ isNeutral(op(i, ★, x), H, ★)) by Tautology.from( + // lastStep, + // neutralityInheritance of (e -> op(x, ★, i)), + // neutralityInheritance of (e -> op(i, ★, x)), + // subgroupConditionsProductClosure of (x -> x, y -> i), + // subgroupConditionsProductClosure of (x -> i, y -> x) + // ) + + // have(x ∈ H |- (i ∈ H) /\ isNeutral(op(x, ★, i), H, ★) /\ isNeutral(op(i, ★, x), H, ★)) by Tautology.from(inverseInH, lastStep) + // have(x ∈ H |- isInverse(i, x, H, ★)) by Tautology.from(lastStep, isInverse.definition of (y -> i, G -> H, * -> ★)) + // thenHave(x ∈ H |- ∃(y, isInverse(y, x, H, ★))) by RightExists + // thenHave(x ∈ H ==> ∃(y, isInverse(y, x, H, ★))) by Restate + // thenHave(∀(x, x ∈ H ==> ∃(y, isInverse(y, x, H, ★)))) by RightForall + + // have(thesis) by Tautology.from(lastStep, inverseExistence.definition of (G -> H, * -> ★)) + } + + /** + * Theorem (Main subgroup test) --- A subset `H ⊆ G` of a group `(G, *)` is a subgroup if and only if: + * 1. `H` is non-empty, + * 2. `H` is closed by products, and + * 3. `H` is closed by inversion. + * + * It is often easier to prove the 3 conditions independently than using the definition directly. + * + * Note that in the case where H is finite, conditions 1 and 2 are sufficient. + */ + val subgroupTest = Theorem( + (group(G, *), subset(H, G)) |- (subgroup(H, G, *) <=> subgroupConditions) + ) { + sorry + // assume(group(G, *)) + // assume(subset(H, G)) + + // // The forward direction follow directly: + // // 1. nonEmpty --> [[groupNonEmpty]] + // // 2. closedByProducts --> [[subgroupOperation]] and [[groupIsClosedByProduct]] + // // 3. closedByInverses --> [[subgroupInverse]] and [[inverseInGroup]] + // have(subgroup(H, G, *) |- subgroupConditions) subproof { + // assume(subgroup(H, G, *)) + // val groupH = have(group(H, ★)) by Tautology.from(subgroup.definition) + + // val condition1 = have(nonEmpty) by Cut(groupH, groupNonEmpty of (G -> H, * -> ★)) + + // have((x ∈ H, y ∈ H) |- op(x, ★, y) ∈ H) by Cut(groupH, groupIsClosedByProduct of (G -> H, * -> ★)) + // thenHave((x ∈ H, y ∈ H, op(x, ★, y) === op(x, *, y)) |- op(x, *, y) ∈ H) by RightSubstEq( + // List((op(x, ★, y), op(x, *, y))), + // lambda(z, z ∈ H) + // ) + + // have((x ∈ H, y ∈ H) |- op(x, *, y) ∈ H) by Cut(subgroupOperation, lastStep) + // thenHave((x ∈ H /\ y ∈ H) ==> (op(x, *, y) ∈ H)) by Restate + // thenHave(∀(y, (x ∈ H /\ y ∈ H) ==> (op(x, *, y) ∈ H))) by RightForall + // val condition2 = thenHave(closedByProducts) by RightForall + + // have((x ∈ H) |- (inverse(x, H, ★) ∈ H)) by Cut(groupH, inverseInGroup of (G -> H, * -> ★)) + // thenHave((x ∈ H, inverse(x, H, ★) === inverse(x, G, *)) |- inverse(x, G, *) ∈ H) by RightSubstEq( + // List((inverse(x, H, ★), inverse(x, G, *))), + // lambda(z, z ∈ H) + // ) + + // have((x ∈ H) |- (inverse(x, G, *) ∈ H)) by Cut(subgroupInverse, lastStep) + // thenHave((x ∈ H) ==> (inverse(x, G, *) ∈ H)) by Restate + // val condition3 = thenHave(closedByInverses) by RightForall + + // have(subgroupConditions) by RightAnd(condition1, condition2, condition3) + // } + // val forward = thenHave(subgroup(H, G, *) ==> subgroupConditions) by Restate + + // // For the backward direction, we must prove that the conditions make (H, ★) satisfy the axioms of a group: + // // 1. Closure by products (i.e. ★'s codomain is H): [[closedByProducts]] + // // 2. Associativity: follows from G's associativity + // // 3. Identity existence: follows from [[nonEmpty]], [[closedByProducts]] and [[closedByInverses]] + // // 4. Inverse existence: [[closedByInverse]] + // // + // // This direction is quite painful to prove. Each step is presented in its own lemma for easier legibility. + // have(subgroupConditions |- subgroup(H, G, *)) subproof { + // assume(subgroupConditions) + // have(binaryOperation(H, ★) /\ associativityAxiom(H, ★) /\ identityExistence(H, ★) /\ inverseExistence(H, ★)) by RightAnd( + // subgroupConditionsBinaryRelation, + // subgroupConditionsAssociativity, + // subgroupConditionsIdentityExistence, + // subgroupConditionsInverseExistence + // ) + // have(group(H, ★)) by Tautology.from(lastStep, group.definition of (G -> H, * -> ★)) + // thenHave(group(G, *) /\ subset(H, G) /\ group(H, ★)) by Tautology + // have(thesis) by Tautology.from(lastStep, subgroup.definition) + // } + // val backward = thenHave(subgroupConditions ==> subgroup(H, G, *)) by Restate + + // have(thesis) by RightIff(forward, backward) + } + + // TODO Trivial subgroup + + // + // 3. Homomorphisms + // + + // Extra group composition law + val -* = variable[Ind] + + /** + * Definition --- A group homomorphism is a mapping `f: G -> H` from structures `G` and `H` equipped with binary operations `*` and `★` respectively, + * such that for all `x, y ∈ G`, we have* `f(x * y) = f(x) ** f(y)`. + * + * In the following, "homomorphism" always stands for "group homomorphism", i.e. `(G, *)` and `(H, **)` are groups. + */ + val homomorphism = DEF(f, G, *, H, -*) --> group(G, *) /\ group(H, -*) /\ functionFrom(f, G, H) /\ ∀(x, x ∈ G ==> ∀(y, y ∈ G ==> (app(f, op(x, *, y)) === op(app(f, x), -*, app(f, y))))) + + /** + * Lemma --- Practical reformulation of the homomorphism definition. + */ + val homomorphismApplication = Lemma( + (homomorphism(f, G, *, H, -*), x ∈ G, y ∈ G) |- app(f, op(x, *, y)) === op(app(f, x), -*, app(f, y)) + ) { + assume(homomorphism(f, G, *, H, -*)) + have(∀(x, x ∈ G ==> ∀(y, y ∈ G ==> (app(f, op(x, *, y)) === op(app(f, x), -*, app(f, y)))))) by Tautology.from(homomorphism.definition) + thenHave(x ∈ G ==> ∀(y, y ∈ G ==> (app(f, op(x, *, y)) === op(app(f, x), -*, app(f, y))))) by InstantiateForall(x) + thenHave((x ∈ G) |- ∀(y, y ∈ G ==> (app(f, op(x, *, y)) === op(app(f, x), -*, app(f, y))))) by Restate + thenHave((x ∈ G) |- y ∈ G ==> (app(f, op(x, *, y)) === op(app(f, x), -*, app(f, y)))) by InstantiateForall(y) + thenHave(thesis) by Restate + } + + /** + * Lemma --- If `f` is a homomorphism, then `f(x) ∈ H` for all `x ∈ G`. + */ + private val homomorphismAppInH = Lemma( + (homomorphism(f, G, *, H, -*), x ∈ G) |- app(f, x) ∈ H + ) { + sorry + // have(homomorphism(f, G, *, H, -*) |- functionFrom(f, G, H)) by Tautology.from(homomorphism.definition) + // have(thesis) by Cut( + // lastStep, + // functionAppInCodomain of (VariableLabel("t") -> x, VariableLabel("x") -> G, y -> H) + // ) + } + + /** + * Theorem --- If `f` is a group-homomorphism between `G` and `H`, then `f(e_G) = e_H`. + */ + val homomorphismMapsIdentityToIdentity = Theorem( + homomorphism(f, G, *, H, -*) |- app(f, identity(G, *)) === identity(H, -*) + ) { + sorry + // val e = identity(G, *) + + // val groupG = have(homomorphism(f, G, *, H, -*) |- group(G, *)) by Tautology.from(homomorphism.definition) + // val groupH = have(homomorphism(f, G, *, H, -*) |- group(H, -*)) by Tautology.from(homomorphism.definition) + + // val identityInG = have(homomorphism(f, G, *, H, -*) |- e ∈ G) by Cut(groupG, identityInGroup) + // val appInH = have(homomorphism(f, G, *, H, -*) |- app(f, e) ∈ H) by Cut(identityInG, homomorphismAppInH of (x -> e)) + + // // 0. e * e = e (to apply substitution) + // have(group(G, *) |- op(e, *, e) === e) by Cut( + // identityInGroup, + // identityIdempotence of (x -> e) + // ) + // val eq0 = have(homomorphism(f, G, *, H, -*) |- op(e, *, e) === e) by Cut(groupG, lastStep) + + // // 1. f(e * e) = f(e) + // have(app(f, e) === app(f, e)) by RightRefl + // thenHave(op(e, *, e) === e |- app(f, op(e, *, e)) === app(f, e)) by RightSubstEq( + // List((op(e, *, e), e)), + // lambda(z, app(f, z) === app(f, e)) + // ) + // val eq1 = have(homomorphism(f, G, *, H, -*) |- app(f, op(e, *, e)) === app(f, e)) by Cut(eq0, lastStep) + + // // 2. f(e * e) = f(e) ** f(e) + // val eq2 = have(homomorphism(f, G, *, H, -*) |- app(f, op(e, *, e)) === op(app(f, e), -*, app(f, e))) by Cut( + // identityInG, + // homomorphismApplication of (x -> e, y -> e) + // ) + + // // 3. f(e) ** f(e) = f(e) + // val eq3 = have(homomorphism(f, G, *, H, -*) |- op(app(f, e), -*, app(f, e)) === app(f, e)) by Equalities(eq1, eq2) + + // // Conclude by idempotence + // have((homomorphism(f, G, *, H, -*), app(f, e) ∈ H) |- (op(app(f, e), -*, app(f, e)) === app(f, e)) <=> (app(f, e) === identity(H, -*))) by Cut( + // groupH, + // identityIdempotence of (x -> app(f, e), G -> H, * -> -*) + // ) + // have(homomorphism(f, G, *, H, -*) |- (op(app(f, e), -*, app(f, e)) === app(f, e)) <=> (app(f, e) === identity(H, -*))) by Cut( + // appInH, + // lastStep + // ) + + // have(thesis) by Tautology.from(lastStep, eq3) + } + + /** + * Theorem --- If `f: G -> H` is a group homomorphism, then `f(inverse(x, G, *)) = inverse(f(x), H, **)`. + */ + val homomorphismMapsInverseToInverse = Theorem( + (homomorphism(f, G, *, H, -*), x ∈ G) |- app(f, inverse(x, G, *)) === inverse(app(f, x), H, -*) + ) { + sorry + // assume(homomorphism(f, G, *, H, -*)) + // assume(x ∈ G) + + // val groupG = have(group(G, *)) by Tautology.from(homomorphism.definition) + // val groupH = have(group(H, -*)) by Tautology.from(homomorphism.definition) + + // val eG = identity(G, *) + // val eH = identity(H, -*) + // val i = inverse(x, G, *) + // val iInG = have(i ∈ G) by Cut(groupG, inverseInGroup) + + // // 1. f(x * inverse(x)) = f(x) f(inverse(x)) + // val eq1 = have(app(f, op(x, *, i)) === op(app(f, x), -*, app(f, i))) by Cut( + // iInG, + // homomorphismApplication of (y -> i) + // ) + + // // 2. f(x * inverse(x)) = f(e) + // val cancellation = have(op(x, *, i) === eG) by Tautology.from( + // groupG, + // inverseCancellation + // ) + + // have(app(f, op(x, *, i)) === app(f, op(x, *, i))) by RightRefl + // thenHave((op(x, *, i) === eG) |- (app(f, op(x, *, i)) === app(f, eG))) by RightSubstEq( + // List((op(x, *, i), eG)), + // lambda(z, app(f, op(x, *, i)) === app(f, z)) + // ) + + // val eq2 = have(app(f, op(x, *, i)) === app(f, eG)) by Cut(cancellation, lastStep) + + // // 3. f(e) = e' + // val eq3 = have(app(f, eG) === eH) by Tautology.from(homomorphismMapsIdentityToIdentity) + + // // 4. f(x)f(inverse(x)) = e' + // val eq4 = have(op(app(f, x), -*, app(f, i)) === eH) by Equalities(eq1, eq2, eq3) + + // // Conclude + // val conclusion = have((app(f, i) ∈ H) |- (app(f, i) === inverse(app(f, x), H, -*))) by Tautology.from( + // groupH, + // inverseTest of (G -> H, * -> -*, x -> app(f, x), y -> app(f, i)), + // eq4, + // homomorphismAppInH + // ) + // have(app(f, i) ∈ H) by Cut(iInG, homomorphismAppInH of (x -> i)) + + // have(thesis) by Cut(lastStep, conclusion) + } + + // TODO Homomorphism composition once we have function composition + + /** + * Kernel uniqueness --- The kernel of a homomorphism is well-defined. + */ + val kernelUniqueness = Theorem( + homomorphism(f, G, *, H, -*) |- ∃!(z, ∀(t, (t ∈ z) <=> (t ∈ G /\ (app(f, t) === identity(H, -*))))) + ) { + sorry + // // We apply the comprehension axiom here. + // // It might seem odd that the homomorphism assumption is not needed for the set to be defined, + // // but remember that [[app]] and [[identity]] default to the empty set when the assumptions are not met. + // // We add the assumption of `f` being a homomorphism to discard any value when the assumptions do not hold. + // have(∃!(z, ∀(t, (t ∈ z) <=> (t ∈ G /\ (app(f, t) === identity(H, -*)))))) by UniqueComprehension( + // G, + // lambda(Seq(t, G), app(f, t) === identity(H, -*)) + // ) + // thenHave(thesis) by Weakening + } + + /** + * Kernel --- The kernel of a homomorphism `f: G -> H` is the set of elements `t ∈ G` such that `f(t) = e_H`. + */ + val kernel = DEF(f, G, *, H, -*) --> TheConditional(z, ∀(t, (t ∈ z) <=> (t ∈ G /\ (app(f, t) === identity(H, -*)))))(kernelUniqueness) + + // Shortcut alias + private val Ker = kernel(f, G, *, H, -*) + + /** + * Lemma --- Reformulation of the kernel definition. + */ + private val kernelDef = Lemma( + homomorphism(f, G, *, H, -*) |- (x ∈ Ker) <=> (x ∈ G /\ (app(f, x) === identity(H, -*))) + ) { + sorry + // assume(homomorphism(f, G, *, H, -*)) + // have(∀(t, (t ∈ Ker) <=> (t ∈ G /\ (app(f, t) === identity(H, -*))))) by Definition(kernel, kernelUniqueness)(f, G, *, H, -*) + // thenHave(thesis) by InstantiateForall(x) + } + + /** + * Lemma --- The kernel is closed by products, i.e. if `x, y ∈ K`, then `x * y ∈ K`. + */ + val kernelIsClosedByProducts = Lemma( + (homomorphism(f, G, *, H, -*), x ∈ Ker, y ∈ Ker) |- op(x, *, y) ∈ Ker + ) { + sorry + // assume(homomorphism(f, G, *, H, -*)) + // assume(x ∈ Ker) + // assume(y ∈ Ker) + + // val elemInG = have(x ∈ G) by Tautology.from(kernelDef) + + // val groupG = have(group(G, *)) by Tautology.from(homomorphism.definition) + // val groupH = have(group(H, -*)) by Tautology.from(homomorphism.definition) + + // val e = identity(H, -*) + // val eInH = have(e ∈ H) by Cut(groupH, identityInGroup of (G -> H, * -> -*)) + + // // 1. f(x) ** f(y) = f(x * y) + // val eq1 = have(app(f, op(x, *, y)) === op(app(f, x), -*, app(f, y))) by Tautology.from( + // homomorphismApplication, + // elemInG, + // elemInG of (x -> y) + // ) + + // // 2. f(x) ** f(y) = e ** e + // val appValue = have(app(f, x) === e) by Tautology.from(kernelDef) + // have(op(app(f, x), -*, app(f, y)) === op(app(f, x), -*, app(f, y))) by RightRefl + // thenHave((app(f, x) === e, app(f, y) === e) |- op(app(f, x), -*, app(f, y)) === op(e, -*, e)) by RightSubstEq( + // List((app(f, x), e), (app(f, y), e)), + // lambda(Seq(a, b), op(app(f, x), -*, app(f, y)) === op(a, -*, b)) + // ) + + // val eq2 = have(op(app(f, x), -*, app(f, y)) === op(e, -*, e)) by Tautology.from( + // lastStep, + // appValue, + // appValue of (x -> y) + // ) + + // // 3. e ** e = e + // val eq3 = have(op(e, -*, e) === e) by Tautology.from( + // identityNeutrality of (G -> H, * -> -*, x -> e), + // groupH, + // eInH + // ) + + // // 4. f(x * y) = e + // val eq4 = have(app(f, op(x, *, y)) === e) by Equalities(eq1, eq2, eq3) + + // // Conclude that x * y ∈ K + // have(op(x, *, y) ∈ G) by Tautology.from( + // groupG, + // elemInG, + // elemInG of (x -> y), + // groupIsClosedByProduct + // ) + + // have(op(x, *, y) ∈ G /\ (app(f, op(x, *, y)) === e)) by RightAnd(lastStep, eq4) + // have(thesis) by Tautology.from(lastStep, kernelDef of (x -> op(x, *, y))) + } + + /** + * Lemma --- The kernel is closed by inversion, i.e. if `x ∈ K` then `inverse(x, G, *) ∈ K`. + */ + val kernelIsClosedByInversion = Lemma( + (homomorphism(f, G, *, H, -*), x ∈ Ker) |- inverse(x, G, *) ∈ Ker + ) { + sorry + // assume(homomorphism(f, G, *, H, -*)) + // assume(x ∈ Ker) + + // val groupG = have(group(G, *)) by Tautology.from(homomorphism.definition) + // val groupH = have(group(H, -*)) by Tautology.from(homomorphism.definition) + // val elemInG = have(x ∈ G) by Tautology.from(kernelDef) + + // val e = identity(H, -*) + // val appValue = have(app(f, x) === e) by Tautology.from(kernelDef) + + // // 1. f(inverse(x)) = inverse(f(x)) = inverse(e) + // have(app(f, inverse(x, G, *)) === inverse(app(f, x), H, -*)) by Tautology.from( + // homomorphismMapsInverseToInverse, + // elemInG + // ) + // thenHave((app(f, x) === e) |- (app(f, inverse(x, G, *)) === inverse(e, H, -*))) by RightSubstEq( + // List((app(f, x), e)), + // lambda(z, app(f, inverse(x, G, *)) === inverse(z, H, -*)) + // ) + + // val eq1 = have(app(f, inverse(x, G, *)) === inverse(e, H, -*)) by Cut(appValue, lastStep) + + // // 2. inverse(e) = e + // val eq2 = have(inverse(e, H, -*) === e) by Cut(groupH, inverseOfIdentityIsIdentity of (G -> H, * -> -*)) + + // // 3. Conclude + // val eq3 = have(app(f, inverse(x, G, *)) === e) by Equalities(eq1, eq2) + // have(inverse(x, G, *) ∈ G) by Tautology.from( + // groupG, + // elemInG, + // inverseInGroup + // ) + + // have((inverse(x, G, *) ∈ G) /\ (app(f, inverse(x, G, *)) === e)) by RightAnd(lastStep, eq3) + + // have(thesis) by Tautology.from(lastStep, kernelDef of (x -> inverse(x, G, *))) + } + + /** + * Theorem --- The kernel of a homomorphism `f: G -> H` is a subgroup of `G`. + */ + val kernelIsSubgroup = Theorem( + homomorphism(f, G, *, H, -*) |- subgroup(kernel(f, G, *, H, -*), G, *) + ) { + assume(homomorphism(f, G, *, H, -*)) + val groupG = have(group(G, *)) by Tautology.from(homomorphism.definition) + + // We show that the kernel satisfies all requirements of [[subgroupTest]] + have((x ∈ Ker) ==> (x ∈ G)) by Tautology.from(kernelDef) + thenHave(∀(x, x ∈ Ker ==> x ∈ G)) by RightForall + val kernelIsSubset = have(subset(Ker, G)) by Tautology.from(lastStep, subsetAxiom of (x -> Ker, y -> G)) + + // 1. kernel != ∅ + have(identity(G, *) ∈ G) by Cut(groupG, identityInGroup) + have(identity(G, *) ∈ G /\ (app(f, identity(G, *)) === identity(H, -*))) by RightAnd( + lastStep, + homomorphismMapsIdentityToIdentity + ) + have(identity(G, *) ∈ Ker) by Tautology.from( + lastStep, + kernelDef of (x -> identity(G, *)) + ) + val condition1 = have(Ker =/= ∅) by Cut(lastStep, setWithElementNonEmpty of (y -> identity(G, *), x -> Ker)) + + // 2. The kernel is closed by products + have((x ∈ Ker /\ y ∈ Ker) ==> op(x, *, y) ∈ Ker) by Restate.from(kernelIsClosedByProducts) + thenHave(∀(y, (x ∈ Ker /\ y ∈ Ker) ==> op(x, *, y) ∈ Ker)) by RightForall + val condition2 = thenHave(∀(x, ∀(y, (x ∈ Ker /\ y ∈ Ker) ==> op(x, *, y) ∈ Ker))) by RightForall + + // 3. The kernel is closed by inversion + have((x ∈ Ker) ==> (inverse(x, G, *) ∈ Ker)) by Restate.from(kernelIsClosedByInversion) + val condition3 = thenHave(∀(x, (x ∈ Ker) ==> (inverse(x, G, *) ∈ Ker))) by RightForall + + // Conclude + have((Ker =/= ∅) /\ ∀(x, ∀(y, (x ∈ Ker /\ y ∈ Ker) ==> op(x, *, y) ∈ Ker)) /\ ∀(x, (x ∈ Ker) ==> (inverse(x, G, *) ∈ Ker))) by RightAnd( + condition1, + condition2, + condition3 + ) + + have(subgroup(Ker, G, *)) by Tautology.from( + lastStep, + subgroupTest of (H -> Ker), + groupG, + kernelIsSubset + ) + } + + // TODO Kernel injectivity + // TODO Image is subgroup + + /** + * Isomorphism --- An isomorphism `f: G -> H` is a bijective homomorphism. + * + * In some sense, isomorphic groups are equivalent, up to relabelling their elements. + */ + // val isomorphism = DEF(f, G, *, H, -*) --> homomorphism(f, G, *, H, -*) /\ bijective(f, G, H) + + // /** + // * Automorphism --- An automorphism is an isomorphism from a group to itself. + // */ + // val automorphism = DEF(f, G, *) --> isomorphism(f, G, *, G, *) +} + +*/ \ No newline at end of file diff --git a/lisa-sets2/src/main/scala/lisa/maths/settheory/Comprehension.scala b/lisa-sets2/src/main/scala/lisa/maths/settheory/Comprehension.scala new file mode 100644 index 000000000..b0d2392ea --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/settheory/Comprehension.scala @@ -0,0 +1,33 @@ +package lisa.maths.settheory + +import lisa.automation.Substitution + +object Comprehension extends lisa.Main: + + val x = variable[Ind] + val y = variable[Ind] + val z = variable[Ind] + val t = variable[Ind] + val s = variable[Ind] + + val filter = DEF(lambda(t, lambda(φ, ε(s, ∀(x, (x ∈ s) <=> (x ∈ t /\ φ(x))))))) + + println(filter.definition.statement) + + private val comprehension: filter.type = filter + + extension (t: Expr[Ind]) + def filter(predicate: Expr[Ind >>: Prop]): Expr[Ind] = + comprehension(t)(predicate) + + val existence = Theorem(∃(s, ∀(x, (x ∈ s) <=> (x ∈ t /\ φ(x))))): + have(thesis) by Restate.from(comprehensionSchema of (z := t)) + + val definition: THM = Theorem(∀(x, x ∈ s.filter(φ) <=> (x ∈ s /\ φ(x)))): + have(∀(x, x ∈ y <=> (x ∈ s /\ φ(x))) |- ∀(x, x ∈ y <=> (x ∈ s /\ φ(x)))) by Hypothesis + thenHave(∀(x, x ∈ y <=> (x ∈ s /\ φ(x))) |- ∀(x, x ∈ ε(t, ∀(x, x ∈ t <=> (x ∈ s /\ φ(x)))) <=> (x ∈ s /\ φ(x)))) by RightEpsilon + thenHave(∀(x, x ∈ y <=> (x ∈ s /\ φ(x))) |- ∀(x, x ∈ s.filter(φ) <=> (x ∈ s /\ φ(x)))) by Substitution.Apply(filter.definition of (t := s)) + thenHave(∃(y, ∀(x, x ∈ y <=> (x ∈ s /\ φ(x)))) |- ∀(x, x ∈ s.filter(φ) <=> (x ∈ s /\ φ(x)))) by LeftExists + have(thesis) by Cut.withParameters(∃(t, ∀(x, (x ∈ t) <=> (x ∈ s /\ φ(x)))))(existence of (t := s), lastStep) + +end Comprehension diff --git a/lisa-sets2/src/main/scala/lisa/maths/settheory/Equality.scala b/lisa-sets2/src/main/scala/lisa/maths/settheory/Equality.scala new file mode 100644 index 000000000..fdbc5804b --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/settheory/Equality.scala @@ -0,0 +1,13 @@ +package lisa.maths.settheory + +object Equality extends lisa.Main: + + val x = variable[Ind] + val y = variable[Ind] + val z = variable[Ind] + + val transitivity = Theorem((x === y, y === z) |- (x === z)): + have((x === y, y === z) |- (x === y)) by Hypothesis + thenHave(thesis) by RightSubstEq.withParameters(Seq(y -> z), Seq(y) -> (x === y)) + +end Equality diff --git a/lisa-sets2/src/main/scala/lisa/maths/settheory/Extensionality.scala b/lisa-sets2/src/main/scala/lisa/maths/settheory/Extensionality.scala new file mode 100644 index 000000000..31921eefd --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/settheory/Extensionality.scala @@ -0,0 +1,49 @@ +package lisa.maths.settheory + +import lisa.utils.prooflib.ProofTacticLib.ProofTactic +import lisa.utils.prooflib.Library +import lisa.SetTheoryLibrary +import lisa.utils.prooflib.ProofTacticLib.ProofFactSequentTactic + +object Extensionality extends lisa.Main: + + private val s = variable[Ind] + private val x = variable[Ind] + private val y = variable[Ind] + private val z = variable[Ind] + private val P = variable[Ind >>: Prop] + private val Q = variable[Ind >>: Ind >>: Prop] + + val implied = Theorem(forall(z, z ∈ x <=> z ∈ y) |- (x === y)): + have(thesis) by Weakening(extensionalityAxiom) + + /** + * Given that z ∈ x <=> z ∈ y, prove that x = y if z is free. + * + * Γ ⊢ z ∈ x <=> z ∈ y, Δ + * ------------------------ z not in Γ + * Γ ⊢ x === y, Δ + */ + def tactic(using proof: Proof)(premiseStep: proof.Fact)(conclusion: Sequent) = + val premise = proof.getSequent(premiseStep) + val boundVars = premise.left.flatMap(_.freeVars) + inline def valid(z1: Variable[Ind], z2: Variable[Ind], x: Expr[Ind], y: Expr[Ind]) = + z1 == z2 && !boundVars.contains(z1) && conclusion.right.exists(isSame(_, x === y)) + val pivot: Option[(Variable[Ind], Expr[Ind], Expr[Ind])] = premise.right.collectFirst: + case (<=> #@ (∈ #@ (z1: Variable[Ind]) #@ (x: Expr[Ind])) #@ (∈ #@ (z2: Variable[Ind]) #@ (y: Expr[Ind]))) if valid(z1, z2, x, y) => (z1, x, y) + + pivot match + case None => + proof.InvalidProofTactic("Could not find a formula of the form z ∈ x <=> z ∈ y in the RHS of the premise.") + case Some((z, xe, ye)) => + TacticSubproof: + val pivot = z ∈ xe <=> z ∈ ye + val qpivot = forall(z, pivot) + val eq = xe === ye + val baseSequent = premise ->> pivot + val implication = proof.InstantiatedFact(implied, Seq(x := xe, y := ye)) + + have(baseSequent +>> qpivot) by RightForall.withParameters(pivot, z)(premiseStep) + have(baseSequent +>> eq) by Cut.withParameters(qpivot)(lastStep, implication) + +end Extensionality diff --git a/lisa-sets2/src/main/scala/lisa/maths/settheory/Pair.scala b/lisa-sets2/src/main/scala/lisa/maths/settheory/Pair.scala new file mode 100644 index 000000000..39ad4f53c --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/settheory/Pair.scala @@ -0,0 +1,37 @@ +package lisa.maths.settheory + +import lisa.maths.settheory.UnorderedPair.* +import lisa.maths.settheory.Singleton.* + +object Pair extends lisa.Main: + + private val s = variable[Ind] + private val x = variable[Ind] + private val y = variable[Ind] + private val z = variable[Ind] + private val p = variable[Ind] + private val P = variable[Ind >>: Prop] + private val Q = variable[Ind >>: Ind >>: Prop] + + /** + * An ordered pair. + */ + val pair = DEF(lambda(x, lambda(y, ~x <> (x <> y)))) + + extension (t: Expr[Ind]) infix def ::(s: Expr[Ind]) = pair(t)(s) + + // val firstMemberExists = Theorem(exists(x, exists(y, p === x :: y)) ==> exists(y, p === x :: y)): + // have(p === x :: y |- p === x :: y) by Restate + // thenHave(p === x :: y |- exists(y, p === x :: y)) by Restate + + // // first of a pair + // val first = DEF( lambda(p, ε(x, exists(x, exists(y, p === x :: y)) ==> exists(y, p === x :: y))) ) + + // // second of a pair + // val second = DEF( lambda(p, ε(y, exists(x, exists(y, p === x :: y)) ==> exists(x, p === x :: y))) ) + + // val firstExists = Theorem(exists(x, exists(x, exists(y, p === x :: y)) ==> exists(y, p === x :: y))): + // have(p === x :: y |- p === x :: y) by Restate + // thenHave(p === x :: y |- exists(y, p === x :: y)) by RightExists + +end Pair diff --git a/lisa-sets2/src/main/scala/lisa/maths/settheory/Replacement.scala b/lisa-sets2/src/main/scala/lisa/maths/settheory/Replacement.scala new file mode 100644 index 000000000..e6172fbb1 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/settheory/Replacement.scala @@ -0,0 +1,70 @@ +package lisa.maths.settheory + +import lisa.automation.Substitution +import lisa.maths.Quantifiers + +object Replacement extends lisa.Main: + + val x = variable[Ind] + val y = variable[Ind] + val z = variable[Ind] + val t = variable[Ind] + val s = variable[Ind] + val A = variable[Ind] + val B = variable[Ind] + val f = variable[Ind >>: Ind] + val P = variable[Ind >>: Ind >>: Prop] + + val map = DEF(lambda(t, lambda(f, ε(s, ∀(y, (y ∈ s) <=> ∃(x, x ∈ t /\ (y === f(x)))))))) + + private val replacement: map.type = map + + extension (t: Expr[Ind]) + def map(function: Expr[Ind >>: Ind]): Expr[Ind] = + replacement(t)(function) + + /** + * The existence of the image of a set under a function. Or, the functional + * form of the replacement schema. + */ + val existence = Theorem(∃(s, ∀(y, (y ∈ s) <=> ∃(x, x ∈ t /\ (y === f(x)))))): + val inst = replacementSchema of (A := t, P := lambda(x, lambda(y, y === f(x)))) + val conditional = have(∀(x, x ∈ t ==> ∀(y, ∀(z, ((y === f(x)) /\ (z === f(x))) ==> (y === z)))) |- ∃(s, ∀(y, (y ∈ s) <=> ∃(x, x ∈ t /\ (y === f(x)))))) by Weakening(inst) + + val eqTautology = + have(((y === f(x)) /\ (z === f(x))) ==> (y === z)) by Weakening(Equality.transitivity of (x := y, y := f(x), z := z)) + thenHave(∀(y, ∀(z, ((y === f(x)) /\ (z === f(x))) ==> (y === z)))) by Quantifiers.quantifyAll + + thenHave(x ∈ t ==> ∀(y, ∀(z, ((y === f(x)) /\ (z === f(x))) ==> (y === z)))) by Weakening + thenHave(∀(x, x ∈ t ==> ∀(y, ∀(z, ((y === f(x)) /\ (z === f(x))) ==> (y === z))))) by RightForall + + have(thesis) by Cut(lastStep, conditional) + + /** + * The extensional definition of a [[map]]ped set. + * + * `∀(x, x ∈ s.map(f) <=> ∃(y, y ∈ s /\ x === f(y)))` + */ + val definition: THM = Theorem(∀(y, y ∈ s.map(f) <=> ∃(x, x ∈ s /\ (y === f(x))))): + have(∀(y, y ∈ t <=> ∃(x, x ∈ s /\ (y === f(x)))) |- ∀(y, y ∈ t <=> ∃(x, x ∈ s /\ (y === f(x))))) by Hypothesis + thenHave(∀(y, y ∈ t <=> ∃(x, x ∈ s /\ (y === f(x)))) |- ∀(y, y ∈ ε(t, ∀(y, y ∈ t <=> ∃(x, x ∈ s /\ (y === f(x))))) <=> ∃(x, x ∈ s /\ (y === f(x))))) by RightEpsilon.withParameters(∀(y, y ∈ t <=> ∃(x, x ∈ s /\ (y === f(x)))), t, t) + thenHave(∀(y, y ∈ t <=> ∃(x, x ∈ s /\ (y === f(x)))) |- ∀(y, y ∈ s.map(f) <=> ∃(x, x ∈ s /\ (y === f(x))))) by Substitution.Apply(map.definition of (t := s)) + thenHave(∃(t, ∀(y, y ∈ t <=> ∃(x, x ∈ s /\ (y === f(x))))) |- ∀(y, y ∈ s.map(f) <=> ∃(x, x ∈ s /\ (y === f(x))))) by LeftExists + have(thesis) by Cut(existence of (t := s), lastStep) + + /** + * The replacement property of a [[map]]ped set. + * + * `x ∈ s ==> f(x) ∈ s.map(f)` + */ + val unfolding = Theorem(x ∈ s ==> f(x) ∈ s.map(f)): + have(x ∈ s |- x ∈ s /\ (f(x) === f(x))) by Restate + val cond = thenHave(x ∈ s |- ∃(y, y ∈ s /\ (f(x) === f(y)))) by RightExists + + val inst = + have(f(x) ∈ s.map(f) <=> ∃(y, y ∈ s /\ (f(x) === f(y)))) by InstantiateForall(f(x))(definition) + thenHave(∃(y, y ∈ s /\ (f(x) === f(y))) |- f(x) ∈ s.map(f)) by Weakening + + have(x ∈ s |- f(x) ∈ s.map(f)) by Cut(cond, inst) + +end Replacement diff --git a/lisa-sets2/src/main/scala/lisa/maths/settheory/SetTheory2.scala b/lisa-sets2/src/main/scala/lisa/maths/settheory/SetTheory2.scala new file mode 100644 index 000000000..b6beca98a --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/settheory/SetTheory2.scala @@ -0,0 +1,109 @@ +package lisa.maths.settheory + +/** + * Revamp of the set theory library from scratch, since most of the current one is severely outdated. + * We can make a better presentation and organisation of theorems, better automation, uniform comments/latex tags, etc. + */ +object SetTheory2 extends lisa.Main { + // import lisa.maths.settheory.SetTheory.* + // import Comprehensions.* + + /* + private val s = variable + private val x = variable + private val x_1 = variable + private val y = variable + private val z = variable + private val f = function[1] + private val t = variable + private val A = variable + private val B = variable + private val C = variable + private val P = predicate[2] + private val Q = predicate[1] + private val Filter = predicate[1] + private val Map = function[1] + + val primReplacement = Theorem( + ∀(x, in(x, A) ==> ∀(y, ∀(z, (P(x, y) /\ P(x, z)) ==> (y === z)))) |- + ∃(B, forall(y, in(y, B) <=> ∃(x, in(x, A) /\ P(x, y)))) + ) { + have(thesis) by Restate.from(replacementSchema of (A := A, x := x, P := P)) + } + + val manyForall = Lemma( + ∀(x, in(x, A) ==> ∀(y, ∀(z, (P(x, y) /\ P(x, z)) ==> (y === z)))).substitute(P := lambda((A, B), P(A, B) /\ ∀(C, P(A, C) ==> (B === C)))) <=> top + ) { + have(thesis) by Tableau + } + + val functionalIsFunctional = Theorem( + ∀(x, in(x, A) ==> ∀(y, ∀(z, (P(x, y) /\ P(x, z)) ==> (y === z)))).substitute(P := lambda((A, B), Filter(A) /\ (B === Map(A)))) <=> top + ) { + + have(y === Map(x) |- (y === Map(x))) by Restate + thenHave((y === Map(x), z === Map(x)) |- y === z) by Substitution.ApplyRules(Map(x) === z) + thenHave(in(x, A) |- ((Filter(x) /\ (y === Map(x)) /\ (z === Map(x))) ==> (y === z))) by Weakening + thenHave(in(x, A) |- ∀(z, ((Filter(x) /\ (y === Map(x)) /\ (z === Map(x))) ==> (y === z)))) by RightForall + thenHave(in(x, A) |- ∀(y, ∀(z, ((Filter(x) /\ (y === Map(x)) /\ (z === Map(x))) ==> (y === z))))) by RightForall + thenHave(in(x, A) ==> ∀(y, ∀(z, ((Filter(x) /\ (y === Map(x)) /\ (z === Map(x))) ==> (y === z))))) by Restate + thenHave(∀(x, in(x, A) ==> ∀(y, ∀(z, ((Filter(x) /\ (y === Map(x)) /\ (z === Map(x))) ==> (y === z)))))) by RightForall + thenHave(thesis) by Restate + + } + + /** + * Theorem --- the refined replacement axiom. Easier to use as a rule than primReplacement. + */ + val replacement = Theorem( + ∃(B, ∀(y, in(y, B) <=> ∃(x, in(x, A) /\ P(x, y) /\ ∀(z, P(x, z) ==> (z === y))))) + ) { + have(thesis) by Tautology.from(manyForall, primReplacement of (P := lambda((A, B), P(A, B) /\ ∀(C, P(A, C) ==> (B === C))))) + } + + val onePointRule = Theorem( + ∃(x, (x === y) /\ Q(x)) <=> Q(y) + ) { + val s1 = have(∃(x, (x === y) /\ Q(x)) ==> Q(y)) subproof { + assume(∃(x, (x === y) /\ Q(x))) + val ex = witness(lastStep) + val s1 = have(Q(ex)) by Tautology.from(ex.definition) + val s2 = have(ex === y) by Tautology.from(ex.definition) + have(Q(y)) by Substitution.ApplyRules(s2)(s1) + } + val s2 = have(Q(y) ==> ∃(x, (x === y) /\ Q(x))) subproof { + assume(Q(y)) + thenHave((y === y) /\ Q(y)) by Restate + thenHave(∃(x, (x === y) /\ Q(x))) by RightExists + thenHave(thesis) by Restate.from + } + have(thesis) by Tautology.from(s1, s2) + } + + /** + * Theorem - `∃(x_1, in(x_1, singleton(∅)) /\ (x === f(x_1))) <=> (x === f(∅))` + */ + val singletonMap = Lemma( + ∃(x_1, in(x_1, singleton(∅)) /\ (x === f(x_1))) <=> (x === f(∅)) + ) { + val s1 = have(∃(x_1, in(x_1, singleton(∅)) /\ (x === f(x_1))) ==> (x === f(∅))) subproof { + have(x === f(∅) |- x === f(∅)) by Restate + thenHave((x_1 === ∅, x === f(x_1)) |- x === f(∅)) by Substitution.ApplyRules(x_1 === ∅) + thenHave((x_1 === ∅) /\ (x === f(x_1)) |- x === f(∅)) by Restate + thenHave((in(x_1, singleton(∅))) /\ ((x === f(x_1))) |- x === f(∅)) by Substitution.ApplyRules(singletonHasNoExtraElements of (y := x_1, x := ∅)) + thenHave(∃(x_1, in(x_1, singleton(∅)) /\ ((x === f(x_1)))) |- x === f(∅)) by LeftExists + + } + + val s2 = have((x === f(∅)) ==> ∃(x_1, in(x_1, singleton(∅)) /\ (x === f(x_1)))) subproof { + have(x === f(∅) |- (∅ === ∅) /\ (x === f(∅))) by Restate + thenHave(x === f(∅) |- in(∅, singleton(∅)) /\ (x === f(∅))) by Substitution.ApplyRules(singletonHasNoExtraElements of (y := x_1, x := ∅)) + thenHave(x === f(∅) |- ∃(x_1, in(x_1, singleton(∅)) /\ (x === f(x_1)))) by RightExists + thenHave(thesis) by Restate.from + + } + + have(thesis) by Tautology.from(s1, s2) + } + */ +} diff --git a/lisa-sets2/src/main/scala/lisa/maths/settheory/Singleton.scala b/lisa-sets2/src/main/scala/lisa/maths/settheory/Singleton.scala new file mode 100644 index 000000000..433926df6 --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/settheory/Singleton.scala @@ -0,0 +1,30 @@ +package lisa.maths.settheory + +import lisa.maths.settheory.UnorderedPair.* +import lisa.automation.Substitution + +object Singleton extends lisa.Main: + + private val s = variable[Ind] + private val x = variable[Ind] + private val y = variable[Ind] + private val z = variable[Ind] + private val P = variable[Ind >>: Prop] + private val Q = variable[Ind >>: Ind >>: Prop] + + val singleton = DEF(lambda(x, x <> x)) + + extension (t: Expr[Ind]) + /** + * Prefix notation for singleton set + */ + def unary_~ = singleton(x) + + + println(singleton.definition.statement) + + val membership = Theorem(x ∈ ~x): + have(x ∈ (x <> x)) by Restate.from(UnorderedPair.firstMember of (y := x)) + thenHave(thesis) by Substitution.Apply(singleton.definition) + +end Singleton diff --git a/lisa-sets2/src/main/scala/lisa/maths/settheory/UnorderedPair.scala b/lisa-sets2/src/main/scala/lisa/maths/settheory/UnorderedPair.scala new file mode 100644 index 000000000..13ce0450c --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/maths/settheory/UnorderedPair.scala @@ -0,0 +1,41 @@ +package lisa.maths.settheory + +object UnorderedPair extends lisa.Main: + + private val s = variable[Ind] + private val x = variable[Ind] + private val y = variable[Ind] + private val z = variable[Ind] + private val P = variable[Ind >>: Prop] + private val Q = variable[Ind >>: Ind >>: Prop] + + /** + * Unordered pair of sets + */ + val upair = unorderedPair + + /** + * Unordered pair of sets + */ + val <> = upair + + extension (t: Expr[Ind]) + /** + * Infix notation for an unordered pair. + */ + infix def <>(s: Expr[Ind]) = upair(t)(s) + + val firstMember = Theorem(x ∈ (x <> y)): + have(thesis) by Tautology.from(<>.definition of (z := x)) + + val secondMember = Theorem(y ∈ (x <> y)): + have(thesis) by Tautology.from(<>.definition of (z := y)) + + // val symmetry = Theorem( (x <> y) === (y <> x) ): + // val fwd = have(z ∈ (x <> y) <=> ((z === x) \/ (z === y))) by Restate.from(<>.definition) + // val bwd = have(z ∈ (y <> x) <=> ((z === y) \/ (z === x))) by Restate.from(<>.definition of (x := y, y := x)) + + // have(z ∈ (x <> y) <=> z ∈ (y <> x)) by Tautology.from(fwd, bwd) + // thenHave(thesis) by Extensionality.tactic + +end UnorderedPair diff --git a/lisa-utils/src/main/scala/lisa/utils/tptp/Example.scala b/lisa-sets2/src/main/scala/lisa/tptp/Example.scala similarity index 70% rename from lisa-utils/src/main/scala/lisa/utils/tptp/Example.scala rename to lisa-sets2/src/main/scala/lisa/tptp/Example.scala index 4c834ae95..e39cb9392 100644 --- a/lisa-utils/src/main/scala/lisa/utils/tptp/Example.scala +++ b/lisa-sets2/src/main/scala/lisa/tptp/Example.scala @@ -1,17 +1,15 @@ -package lisa.utils.tptp +package lisa.tptp -import lisa.utils.parsing.FOLParser.* -import lisa.utils.tptp.KernelParser.annotatedStatementToKernel -import lisa.utils.tptp.KernelParser.parseToKernel -import lisa.utils.tptp.KernelParser.problemToSequent -import lisa.utils.tptp.ProblemGatherer.getPRPproblems +import lisa.tptp.KernelParser.annotatedStatementToKernel +import lisa.tptp.KernelParser.parseToKernel +import lisa.tptp.KernelParser.problemToSequent +import lisa.tptp.ProblemGatherer.getPRPproblems +import lisa.utils.K.{repr, given} -import KernelParser.{mapAtom, mapTerm, mapVariable} +import KernelParser.{strictMapAtom, strictMapTerm, strictMapVariable, emptyctx} object Example { - val prettyFormula = lisa.utils.parsing.FOLParser.printFormula - val prettySequent = lisa.utils.parsing.FOLParser.printSequent def tptpExample(): Unit = { val axioms = List( "( ~ ( ? [X] : ( big_s(X) & big_q(X) ) ) )", @@ -29,11 +27,11 @@ object Example { ) println("\n---Individual Fetched Formulas---") - axioms.foreach(a => println(prettyFormula(parseToKernel(a)(using mapAtom, mapTerm, mapVariable)))) - println(prettyFormula(parseToKernel(conjecture)(using mapAtom, mapTerm, mapVariable))) + axioms.foreach(a => println(parseToKernel(a)(using emptyctx, (strictMapAtom, strictMapTerm, strictMapVariable)).repr)) + println(parseToKernel(conjecture)(using emptyctx, (strictMapAtom, strictMapTerm, strictMapVariable)).repr) println("\n---Annotated Formulas---") - anStatements.map(annotatedStatementToKernel(_)(using mapAtom, mapTerm, mapVariable)).foreach(f => printAnnotatedStatement(f)) + anStatements.map(annotatedStatementToKernel(_)(using emptyctx, (strictMapAtom, strictMapTerm, strictMapVariable))).foreach(f => printAnnotatedStatement(f)) println("\n---Problems---") @@ -48,7 +46,7 @@ object Example { val seq = problemToSequent(probs.head) printProblem(probs.head) println("\n---Sequent---") - println(prettySequent(seq)) + println(seq.repr) } } catch { case error: NullPointerException => println("You can download the tptp library at http://www.tptp.org/ and put it in main/resources") @@ -59,8 +57,8 @@ object Example { // Utility def printAnnotatedStatement(a: AnnotatedStatement): Unit = { val prettyStatement = a match { - case f: AnnotatedFormula => prettyFormula(f.formula) - case s: AnnotatedSequent => prettySequent(s.sequent) + case f: AnnotatedFormula => f.formula.repr + case s: AnnotatedSequent => s.sequent.repr } if (a.role == "axiom") println("Given " + a.name + ": " + prettyStatement) else if (a.role == "conjecture") println("Prove " + a.name + ": " + prettyStatement) diff --git a/lisa-utils/src/main/scala/lisa/utils/tptp/KernelParser.scala b/lisa-sets2/src/main/scala/lisa/tptp/KernelParser.scala similarity index 64% rename from lisa-utils/src/main/scala/lisa/utils/tptp/KernelParser.scala rename to lisa-sets2/src/main/scala/lisa/tptp/KernelParser.scala index 1911e4791..aad2ea3a3 100644 --- a/lisa-utils/src/main/scala/lisa/utils/tptp/KernelParser.scala +++ b/lisa-sets2/src/main/scala/lisa/tptp/KernelParser.scala @@ -1,4 +1,4 @@ -package lisa.utils.tptp +package lisa.tptp import leo.datastructures.TPTP import leo.datastructures.TPTP.CNF @@ -8,7 +8,7 @@ import lisa.utils.K import lisa.utils.KernelHelpers.* import lisa.utils.KernelHelpers.given_Conversion_Identifier_String import lisa.utils.KernelHelpers.given_Conversion_String_Identifier -import lisa.utils.tptp.* +import lisa.tptp.* import java.io.File import scala.util.matching.Regex @@ -16,6 +16,8 @@ import scala.util.matching.Regex import Parser.TPTPParseException object KernelParser { + type DefContext = (String => Option[K.Expression]) + val emptyctx: DefContext = _ => None private case class ProblemMetadata(file: String, domain: String, problem: String, status: String, spc: Seq[String]) @@ -23,29 +25,34 @@ object KernelParser { * @param formula A formula in the tptp language * @return the corresponding LISA formula */ - def parseToKernel(formula: String)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): K.Formula = convertToKernel( + def parseToKernel(formula: String)(using defctx: DefContext, maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): K.Expression = convertToKernel( Parser.fof(formula) - ) + )(using defctx, (strictMapAtom, strictMapTerm, strictMapVariable)) /** * @param formula a tptp formula in leo parser * @return the same formula in LISA */ - def convertToKernel(formula: FOF.Formula)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): K.Formula = { + def convertToKernel(formula: FOF.Formula)(using defctx: DefContext, maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): K.Expression = { + val (mapAtom, mapTerm, mapVariable) = maps formula match { case FOF.AtomicFormula(f, args) => - if f == "$true" then K.top() - else if f == "$false" then K.bot() - else K.AtomicFormula(mapAtom(f, args.size), args map convertTermToKernel) + if f == "$true" then K.top + else if f == "$false" then K.bot + else if (f.head == '$' && args.size == 0 && defctx(f.tail).isDefined) then defctx(f.tail).get + else args.foldLeft(mapAtom(f, args.size): K.Expression)((acc, arg) => acc(convertTermToKernel(arg))) + // else throw new Exception("Unknown atomic formula kind: " + kind +" in " + f) case FOF.QuantifiedFormula(quantifier, variableList, body) => quantifier match { - case FOF.! => variableList.foldRight(convertToKernel(body))((s, f) => K.Forall(mapVariable(s), f)) - case FOF.? => variableList.foldRight(convertToKernel(body))((s, f) => K.Exists(mapVariable(s), f)) + case FOF.! => + variableList.foldRight(convertToKernel(body))((s, f) => K.forall(mapVariable(s), f)) + case FOF.? => variableList.foldRight(convertToKernel(body))((s, f) => K.exists(mapVariable(s), f)) + case FOF.Epsilon => ??? } case FOF.UnaryFormula(connective, body) => connective match { - case FOF.~ => K.Neg(convertToKernel(body)) + case FOF.~ => K.neg(convertToKernel(body)) } case FOF.BinaryFormula(connective, left, right) => connective match { @@ -58,24 +65,22 @@ object KernelParser { case FOF.| => convertToKernel(left) \/ convertToKernel(right) case FOF.& => convertToKernel(left) /\ convertToKernel(right) } - case FOF.Equality(left, right) => K.equality(convertTermToKernel(left), convertTermToKernel(right)) - case FOF.Inequality(left, right) => !K.equality(convertTermToKernel(left), convertTermToKernel(right)) + case FOF.Equality(left, right) => K.equality(convertTermToKernel(left))(convertTermToKernel(right)) + case FOF.Inequality(left, right) => !K.equality(convertTermToKernel(left))(convertTermToKernel(right)) } } - def convertToKernel(sequent: FOF.Sequent)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): K.Sequent = { + def convertToKernel(sequent: FOF.Sequent)(using defctx: DefContext, maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): K.Sequent = { K.Sequent(sequent.lhs.map(convertToKernel).toSet, sequent.rhs.map(convertToKernel).toSet) } - def convertToKernel(formula: CNF.Formula)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): K.Formula = { - - K.ConnectorFormula( - K.Or, + def convertToKernel(formula: CNF.Formula)(using defctx: DefContext, maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): K.Expression = { + K.multior( formula.map { - case CNF.PositiveAtomic(formula) => K.AtomicFormula(mapAtom(formula.f, formula.args.size), formula.args.map(convertTermToKernel).toList) - case CNF.NegativeAtomic(formula) => !K.AtomicFormula(mapAtom(formula.f, formula.args.size), formula.args.map(convertTermToKernel).toList) - case CNF.Equality(left, right) => K.equality(convertTermToKernel(left), convertTermToKernel(right)) - case CNF.Inequality(left, right) => !K.equality(convertTermToKernel(left), convertTermToKernel(right)) + case CNF.PositiveAtomic(formula) => multiapply(strictMapAtom(formula.f, formula.args.size))(formula.args.map(convertTermToKernel).toList) + case CNF.NegativeAtomic(formula) => !multiapply(strictMapAtom(formula.f, formula.args.size))(formula.args.map(convertTermToKernel).toList) + case CNF.Equality(left, right) => K.equality(convertTermToKernel(left))(convertTermToKernel(right)) + case CNF.Inequality(left, right) => !K.equality(convertTermToKernel(left))(convertTermToKernel(right)) } ) } @@ -84,34 +89,38 @@ object KernelParser { * @param term a tptp term in leo parser * @return the same term in LISA */ - def convertTermToKernel(term: CNF.Term)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): K.Term = term match { - case CNF.AtomicTerm(f, args) => K.Term(mapTerm(f, args.size), args map convertTermToKernel) - case CNF.Variable(name) => K.VariableTerm(mapVariable(name)) - case CNF.DistinctObject(name) => ??? - } + def convertTermToKernel(term: CNF.Term)(using defctx: DefContext, maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): K.Expression = + val (mapAtom, mapTerm, mapVariable) = maps + term match { + case CNF.AtomicTerm(f, args) => K.multiapply(mapTerm(f, args.size))(args map convertTermToKernel) + case CNF.Variable(name) => mapVariable(name) + case CNF.DistinctObject(name) => ??? + } /** * @param term a tptp term in leo parser * @return the same term in LISA */ - def convertTermToKernel(term: FOF.Term)(using mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): K.Term = term match { - - case FOF.AtomicTerm(f, args) => - K.Term(mapTerm(f, args.size), args map convertTermToKernel) - case FOF.Variable(name) => K.VariableTerm(mapVariable(name)) - case FOF.DistinctObject(name) => ??? - case FOF.NumberTerm(value) => ??? - + def convertTermToKernel(term: FOF.Term)(using defctx: DefContext, maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): K.Expression = + val (mapAtom, mapTerm, mapVariable) = maps + term match { + case FOF.AtomicTerm(f, args) => + if (f.head == '$' && args.size == 0 && defctx(f.tail).isDefined) then defctx(f.tail).get + else K.multiapply(mapTerm(f, args.size))(args map convertTermToKernel) + case FOF.Variable(name) => mapVariable(name) + case FOF.DistinctObject(name) => ??? + case FOF.NumberTerm(value) => ??? + case FOF.QuantifiedTerm(quantifier, Seq(x), body) => K.epsilon(mapVariable(x), convertToKernel(body)) } /** * @param formula an annotated tptp statement * @return the corresponding LISA formula augmented with name and role. */ - def annotatedStatementToKernel(formula: String)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): AnnotatedStatement = { + def annotatedStatementToKernel(formula: String)(using defctx: DefContext, maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): AnnotatedStatement = { val i = Parser.annotatedFOF(formula) i match - case TPTP.FOFAnnotated(name, role, formula, annotations) => + case TPTP.FOFAnnotated(name, role, formula, annotations, origin) => formula match { case FOF.Logical(formula) => AnnotatedFormula(role, name, convertToKernel(formula), annotations) case FOF.Sequent(antecedent, succedent) => @@ -120,26 +129,24 @@ object KernelParser { } - private def problemToKernel(problemFile: File, md: ProblemMetadata)(using - mapAtom: (String, Int) => K.AtomicLabel, - mapTerm: (String, Int) => K.TermLabel, - mapVariable: String => K.VariableLabel - ): Problem = { + private def problemToKernel(problemFile: File, md: ProblemMetadata)(using maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): Problem = { + val (mapAtom, mapTerm, mapVariable) = maps val file = io.Source.fromFile(problemFile) val pattern = "SPC\\s*:\\s*[A-z]{3}(_[A-z]{3})*".r val g = file.getLines() + given emptyctx: DefContext = _ => None def search(): String = pattern.findFirstIn(g.next()).getOrElse(search()) val i = Parser.problem(file) val sq = i.formulas map { - case TPTP.FOFAnnotated(name, role, formula, annotations) => + case TPTP.FOFAnnotated(name, role, formula, annotations, origin) => formula match { case FOF.Logical(formula) => AnnotatedFormula(role, name, convertToKernel(formula), annotations) case FOF.Sequent(antecedent, succedent) => AnnotatedSequent(role, name, K.Sequent(antecedent.map(convertToKernel).toSet, succedent.map(convertToKernel).toSet), annotations) } - case TPTP.CNFAnnotated(name, role, formula, annotations) => + case TPTP.CNFAnnotated(name, role, formula, annotations, origin) => formula match { case CNF.Logical(formula) => AnnotatedFormula(role, name, convertToKernel(formula), annotations) } @@ -152,7 +159,7 @@ object KernelParser { * @param problemFile a file containning a tptp problem * @return a Problem object containing the data of the tptp problem in LISA representation */ - def problemToKernel(problemFile: File)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Problem = { + def problemToKernel(problemFile: File)(using maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): Problem = { problemToKernel(problemFile, getProblemInfos(problemFile)) } @@ -160,7 +167,7 @@ object KernelParser { * @param problemFile a path to a file containing a tptp problem * @return a Problem object containing the data of the tptp problem in LISA representation */ - def problemToKernel(problemFile: String)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Problem = { + def problemToKernel(problemFile: String)(using maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): Problem = { problemToKernel(File(problemFile)) } @@ -188,9 +195,20 @@ object KernelParser { if last.nonEmpty && last.forall(_.isDigit) && last.head != '0' then lead.mkString("$u") + "_" + last else pieces.mkString("$u") - val mapAtom: ((String, Int) => K.AtomicLabel) = (f, n) => K.ConstantAtomicLabel(sanitize(f), n) - val mapTerm: ((String, Int) => K.TermLabel) = (f, n) => K.ConstantFunctionLabel(sanitize(f), n) - val mapVariable: (String => K.VariableLabel) = f => K.VariableLabel(sanitize(f)) + val strictMapAtom: ((String, Int) => K.Expression) = (f, n) => + val kind = f.head + val id = f.tail + if f(0).isUpper then + K.Variable(sanitize(f), K.predicateType(n)) + else K.Constant(sanitize(f), K.predicateType(n)) + val strictMapTerm: ((String, Int) => K.Expression) = (f, n) => + val kind = f.head + val id = f.tail + if f(0).isUpper then + K.Variable(sanitize(f), K.functionType(n)) + else K.Constant(sanitize(f), K.functionType(n)) + val strictMapVariable: (String => K.Variable) = f => + K.Variable(sanitize(f), K.Ind) /** * Given a folder containing folders containing problem (typical organisation of TPTP library) and a list of spc, @@ -226,7 +244,7 @@ object KernelParser { val r = probfiles.foldRight(List.empty[Problem])((p, current) => { val md = getProblemInfos(p) - if (md.spc.exists(spc.contains)) problemToKernel(p, md)(using mapAtom, mapTerm, mapVariable) :: current + if (md.spc.exists(spc.contains)) problemToKernel(p, md)(using (strictMapAtom, strictMapTerm, strictMapVariable)) :: current else current }) r diff --git a/lisa-utils/src/main/scala/lisa/utils/tptp/ProblemGatherer.scala b/lisa-sets2/src/main/scala/lisa/tptp/ProblemGatherer.scala similarity index 85% rename from lisa-utils/src/main/scala/lisa/utils/tptp/ProblemGatherer.scala rename to lisa-sets2/src/main/scala/lisa/tptp/ProblemGatherer.scala index b635c5a72..fdb4bb392 100644 --- a/lisa-utils/src/main/scala/lisa/utils/tptp/ProblemGatherer.scala +++ b/lisa-sets2/src/main/scala/lisa/tptp/ProblemGatherer.scala @@ -1,6 +1,6 @@ -package lisa.utils.tptp +package lisa.tptp -import lisa.utils.tptp.KernelParser.* +import KernelParser.* object ProblemGatherer { diff --git a/lisa-sets2/src/main/scala/lisa/tptp/ProofParser.scala b/lisa-sets2/src/main/scala/lisa/tptp/ProofParser.scala new file mode 100644 index 000000000..fe61ee35c --- /dev/null +++ b/lisa-sets2/src/main/scala/lisa/tptp/ProofParser.scala @@ -0,0 +1,1100 @@ +package lisa.tptp + +import leo.datastructures.TPTP.AnnotatedFormula +import leo.datastructures.TPTP.FOF +import leo.datastructures.TPTP.FOFAnnotated +import leo.datastructures.TPTP.FOTAnnotated +import leo.modules.input.{TPTPParser => Parser} +import lisa.utils.K +import K.{repr, -<<, +<<, ->>, +>>, |-} + +import java.io.File + +import Parser.TPTPParseException +import KernelParser.* +import K.{given} +import lisa.automation.Tautology +import lisa.automation.Tableau +import lisa.automation.Tableau.instantiate + +object ProofParser { + val TPTPversion = "TPTP v8.0.0" + val rand = scala.util.Random() + + type MapTriplet = ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable) + + val mapAtom: ((String, Int) => K.Expression) = (f, n) => + val kind = f.head + val id = f.tail + if kind == 's' then + K.Variable(sanitize(id), K.predicateType(n)) + else if kind == 'c' then K.Constant(sanitize(id), K.predicateType(n)) + else if f(0).isUpper then + K.Variable(sanitize(f), K.predicateType(n)) + else throw new Exception(s"Unknown kind of atomic label: $f") + val mapTerm: ((String, Int) => K.Expression) = (f, n) => + val kind = f.head + val id = f.tail + if kind == 's' then K.Variable(sanitize(id), K.functionType(n)) + else if kind == 'c' then K.Constant(sanitize(id), K.functionType(n)) + else if f(0).isUpper then + K.Variable(sanitize(f), K.functionType(n)) + else throw new Exception(s"Unknown kind of term label: $f") + val mapVariable: (String => K.Variable) = f => K.Variable(sanitize(f), K.Ind) + //if f.head == 'X' then K.Variable(sanitize(f.tail), K.Ind) + //else K.Variable(sanitize(f), K.Ind) + + + given maps: MapTriplet = (mapAtom, mapTerm, mapVariable) + + def problemToFile(fileDirectory: String, fileName: String, name: String, axioms: Seq[K.Sequent], conjecture: K.Sequent, source: String): File = { + // case class Problem(file: String, domain: String, name: String, status: String, spc: Seq[String], formulas: Seq[AnnotatedStatement]) + val number = rand.nextInt(1000) + val file = new File(fileDirectory + fileName + ".p") + // val fileName = originFile.split("/").last + val header = + s"""%-------------------------------------------------------------------------- +% File : $fileName : $TPTPversion. +% Domain : None +% Problem : ${name} +% Version : None +% English : + +% Refs : https://github.com/epfl-lara/lisa +% : lisa.utils.tptp.ProofParser +% Source : [Lisa, $source] +% Names : + +% Status : Unknown +% Rating : ? +% Syntax : ? +% SPC : FOF_UNK_RFO_SEQ + +% Comments : This problem, was printed from a statement in a proof of a theorem by the Lisa theorem prover for submission to proof-producing ATPs. +%-------------------------------------------------------------------------- +""" + val writer = new java.io.PrintWriter(file) + writer.write(header) + var counter = 0 + def nextc = { counter += 1; counter } + axioms.foreach(s => writer.write(sequentToFOFAnnotated(s, "a" + nextc, "axiom").pretty + "\n")) + writer.write(sequentToFOFAnnotated(conjecture, "c" + nextc, "conjecture").pretty + "\n\n") + writer.close() + file + } + + def sequentToFOFAnnotated(sequent: K.Sequent, name: String, role: String): FOFAnnotated = { + val annotations = None + if sequent.left.isEmpty && sequent.right.size == 1 then + val formula = sequent.right.head + return FOFAnnotated(name, role, formulaToFOFStatement(formula), annotations) + else + val seq = FOF.Sequent(sequent.left.map(formulaToFOFFormula(_, Set())).toSeq, sequent.right.map(formulaToFOFFormula(_, Set())).toSeq) + FOFAnnotated(name, role, seq, annotations) + } + + def isLowerWord(s: String): Boolean = s.head.isLower && s.tail.forall(_.isLetterOrDigit) + inline def quoted(s: String): String = if isLowerWord(s) then s else s"'$s'" + + def termToFOFTerm(term: K.Expression, bound: Set[K.Identifier]): FOF.Term = { + term match { + case K.Variable(id, K.Ind) => + if bound.contains(id) then FOF.Variable("X" + id) + else FOF.Variable(quoted("s" + id)) + case K.Constant(id, K.Ind) => FOF.AtomicTerm(quoted("c" + id), Seq()) + case K.Multiapp(K.Constant(id, typ), args) => + FOF.AtomicTerm(quoted("c" + id), args.map(termToFOFTerm(_, bound))) + case K.Multiapp(K.Variable(id, typ), args) => + FOF.AtomicTerm(quoted("s" + id), args.map(termToFOFTerm(_, bound))) + case K.Epsilon(v, f) => throw new Exception("Epsilon terms are not supported") + case _ => throw new Exception("The expression is not purely first order") + } + } + def formulaToFOFFormula(formula: K.Expression, bound: Set[K.Identifier]): FOF.Formula = { + formula match + case K.equality(left, right) => + FOF.Equality(termToFOFTerm(left, bound), termToFOFTerm(right, bound)) + case K.top => FOF.AtomicFormula("$true", Seq()) + case K.bot => FOF.AtomicFormula("$false", Seq()) + case K.neg(f) => FOF.UnaryFormula(FOF.~, formulaToFOFFormula(f, bound)) + case K.and(f1, f2) => FOF.BinaryFormula(FOF.&, formulaToFOFFormula(f1, bound), formulaToFOFFormula(f2, bound)) + case K.or(f1, f2) => FOF.BinaryFormula(FOF.|, formulaToFOFFormula(f1, bound), formulaToFOFFormula(f2, bound)) + case K.implies(f1, f2) => FOF.BinaryFormula(FOF.Impl, formulaToFOFFormula(f1, bound), formulaToFOFFormula(f2, bound)) + case K.iff(f1, f2) => FOF.BinaryFormula(FOF.<=>, formulaToFOFFormula(f1, bound), formulaToFOFFormula(f2, bound)) + case K.forall(K.Lambda(v, f)) => FOF.QuantifiedFormula(FOF.!, Seq("X" + v.id), formulaToFOFFormula(f, bound + v.id)) + case K.exists(K.Lambda(v, f)) => FOF.QuantifiedFormula(FOF.?, Seq("X" + v.id), formulaToFOFFormula(f, bound + v.id)) + case K.forall(p) => + val x = K.freshId(p.freeVariables.map(_.id), "x") + FOF.QuantifiedFormula(FOF.!, Seq("X" + x), formulaToFOFFormula(K.Application(p, K.Variable(x, K.Ind)), bound + x)) + case K.exists(p) => + val x = K.freshId(p.freeVariables.map(_.id), "x") + FOF.QuantifiedFormula(FOF.?, Seq("X" + x), formulaToFOFFormula(K.Application(p, K.Variable(x, K.Ind)), bound + x)) + case K.Multiapp(K.Constant(id, typ), args) => + FOF.AtomicFormula(quoted("c" + id), args.map(termToFOFTerm(_, bound))) + case K.Multiapp(K.Variable(id, typ), args) => + FOF.AtomicFormula(quoted("s" + id), args.map(termToFOFTerm(_, bound))) + case _ => throw new Exception("The expression is not purely first order: " + formula) + + } + + def formulaToFOFStatement(formula: K.Expression): FOF.Statement = { + FOF.Logical(formulaToFOFFormula(formula, Set())) + } + + def reconstructProof(file: File)(using maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): K.SCProof = { + val problem = Parser.problem(io.Source.fromFile(file)) + val nameMap = scala.collection.mutable.Map[String, (Int, FOF.Sequent)]() + var prems = List[K.Sequent]() + var steps = List[K.SCProofStep]() + var numberSteps = 0 + var contextExpr = scala.collection.mutable.Map[String, K.Expression]() + given defcontext: DefContext = contextExpr.get + problem.formulas.foreach { + case ft: FOTAnnotated => + if ft.role == "let" then + val term = ft.formula + val defedcst = ft.name + contextExpr(defedcst) = convertTermToKernel(term)(using defcontext) + case fa: FOFAnnotated => + if fa.role == "conjecture" then () + else if fa.role == "let" then + val formula = fa.formula match { + case FOF.Logical(formula) => formula + case s: FOF.Sequent => throw new Exception("Sequent in let statement is incorrect") + } + val defedcst = fa.name + + contextExpr(defedcst) = convertToKernel(formula)(using defcontext) + else + val fofsequent = fa.formula match { + case FOF.Logical(formula) => FOF.Sequent(Seq(), Seq(formula)) + case s: FOF.Sequent => s + } + if fa.role == "axiom" then + val sequent = K.Sequent(fofsequent.lhs.map(convertToKernel).toSet, fofsequent.rhs.map(convertToKernel).toSet) + nameMap(fa.name) = (-prems.size - 1, fofsequent) + prems = sequent :: prems + else + annotatedStatementToProofStep(fa, e => nameMap(e)._1, e => nameMap(e)._2, defcontext) match { + case Some((step, name)) => + nameMap(name) = (numberSteps, fofsequent) + numberSteps += 1 + steps = step :: steps + case None => throw new Exception(s"Proof step could not be reconstructed from ${fa.pretty}") + } + case _ => throw new Exception("Only FOF statements are supported") + } + K.SCProof(steps.reverse.toIndexedSeq, prems.reverse.toIndexedSeq) + } + + def annotatedStatementToProofStep(ann: FOFAnnotated, numbermap: String => Int, sequentmap: String => FOF.Sequent, defctx: DefContext) + (using maps: ((String, Int) => K.Expression, (String, Int) => K.Expression, String => K.Variable)): Option[(K.SCProofStep, String)] = { + given (String => Int) = numbermap + given (String => FOF.Sequent) = sequentmap + given defcontext: DefContext = defctx + val r = ann match { + case Inference.LeftFalse(step, name) => Some((step, name)) + case Inference.Hyp(step, name) => Some((step, name)) + case Inference.LeftWeaken(step, name) => Some((step, name)) + case Inference.RightWeaken(step, name) => Some((step, name)) + case Inference.Cut(step, name) => Some((step, name)) + case Inference.LeftHyp(step, name) => Some((step, name)) + case Inference.LeftNNot(step, name) => Some((step, name)) + case Inference.LeftAnd(step, name) => Some((step, name)) + case Inference.LeftNOr(step, name) => Some((step, name)) + case Inference.LeftNImp(step, name) => Some((step, name)) + case Inference.LeftNAnd(step, name) => Some((step, name)) + case Inference.LeftOr(step, name) => Some((step, name)) + case Inference.LeftImplies(step, name) => Some((step, name)) + case Inference.LeftIff(step, name) => Some((step, name)) + case Inference.LeftNot(step, name) => Some((step, name)) + case Inference.LeftImp2(step, name) => Some((step, name)) + case Inference.LeftNAll(step, name) => Some((step, name)) + case Inference.LeftExists(step, name) => Some((step, name)) + case Inference.LeftForall(step, name) => Some((step, name)) + case Inference.LeftNEx(step, name) => Some((step, name)) + case Inference.RightNot(step, name) => Some((step, name)) + case Inference.RightImplies(step, name) => Some((step, name)) + case Inference.RightIff(step, name) => Some((step, name)) + case Inference.RightOr(step, name) => Some((step, name)) + case Inference.RightAnd(step, name) => Some((step, name)) + case Inference.RightForall(step, name) => Some((step, name)) + case Inference.RightExists(step, name) => Some((step, name)) + case Inference.RightRefl(step, name) => Some((step, name)) + case Inference.RightSubst(step, name) => Some((step, name)) + case Inference.LeftSubst(step, name) => Some((step, name)) + case Inference.RightSubstIff(step, name) => Some((step, name)) + case Inference.LeftSubstIff(step, name) => Some((step, name)) + case Inference.InstFun(step, name) => Some((step, name)) + case Inference.InstPred(step, name) => Some((step, name)) + case Inference.InstMult(step, name) => Some((step, name)) + case Inference.ElimIffRefl(step, name) => Some((step, name)) + case Inference.ElimEqRefl(step, name) => Some((step, name)) + case Inference.RightNnf(step, name) => Some((step, name)) + case Inference.Clausify(step, name) => Some((step, name)) + case Inference.RightPrenex(step, name) => Some((step, name)) + case Inference.InstForall(step, name) => Some((step, name)) + case Inference.Res(step, name) => Some((step, name)) + case Inference.ExistsIffEpsilon(step, name) => Some((step, name)) + case Inference.RightSubstFun(step, name) => Some((step, name)) + case Inference.RightSubstPred(step, name) => Some((step, name)) + + case _ => None + } + r + } + + object Inference { + import leo.datastructures.TPTP.{Annotations, GeneralTerm, MetaFunctionData, NumberData, Integer, FOF, GeneralFormulaData, FOTData, FOFData} + import K.apply + + object Tuple { + def unapply(ann_seq: GeneralTerm): Option[Seq[GeneralTerm]] = + ann_seq match { + case GeneralTerm(List(MetaFunctionData("tuple3", tup)), None) => Some(tup) + case _ => None + } + } + + object Number { + def unapply(ann_seq: GeneralTerm): Option[BigInt] = + ann_seq match { + case GeneralTerm(List(NumberData(Integer(n))), None) => Some(n) + case _ => None + } + } + object Ind { + def unapply(ann_seq: GeneralTerm)(using defctx: DefContext, maps: MapTriplet): Option[K.Expression] = + ann_seq match { + case GeneralTerm(List(GeneralFormulaData(FOTData(term))), None) => Some(convertTermToKernel(term)) + case _ => None + } + } + object Prop { + def unapply(ann_seq: GeneralTerm)(using defctx: DefContext, maps: MapTriplet): Option[K.Expression] = + ann_seq match { + case GeneralTerm(List(GeneralFormulaData(FOFData(FOF.Logical(formula)))), None) => Some(convertToKernel(formula)) + case _ => None + } + } + object String { + def unapply(ann_seq: GeneralTerm): Option[String] = + ann_seq match { + case GeneralTerm(List(MetaFunctionData(string, List())), None) => + if string.head == '\'' then Some(string.tail.init) + else Some(string) + case _ => None + } + } + object StrOrNum { + def unapply(ann_seq: GeneralTerm): Option[String] = + ann_seq match { + case String(s) => Some(s) + case Number(n) => Some(n.toString) + case _ => None + } + } + object Sequence { + def unapply(ann_seq: GeneralTerm): Option[Seq[GeneralTerm]] = + ann_seq match { + case GeneralTerm(List(), Some(terms)) => Some(terms) + case _ => None + } + } + def unapply(ann_seq: Annotations): Option[(String, Seq[GeneralTerm], Seq[String])] = + ann_seq match { + case Some( + ( + GeneralTerm( + List( + MetaFunctionData( + "inference", + List( + GeneralTerm(List(MetaFunctionData(stepName, List())), None), // stepnames + GeneralTerm(List(), Some(parameters)), // params + GeneralTerm(List(), Some(numberTerms)) + ) // numbers + ) + ), + None + ), + None + ) + ) => + Some( + ( + stepName, + parameters, + numberTerms.map { + case StrOrNum(n) => n.toString + case String(n) => n + case _ => throw new Exception(s"Expected a list of number as last parameter of inference, but got $numberTerms") + } + ) + ) + case _ => None + } + + object LeftFalse { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftFalse", Seq(_), Seq()), origin) => + Some((K.RestateTrue(convertToKernel(sequent)), name)) + case _ => None + } + } + + object Hyp { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("hyp", Seq(_, StrOrNum(n)), Seq()), origin) => + Some((K.RestateTrue(convertToKernel(sequent)), name)) + case _ => None + } + } + + object Cut { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("cut", Seq(_, StrOrNum(n)), Seq(t1, t2)), origin) => + val formula1 = sequentmap(t1).rhs(n.toInt) + Some((K.Cut(convertToKernel(sequent), numbermap(t1), numbermap(t2), convertToKernel(formula1)), name)) + case _ => + None + } + } + + object LeftWeaken { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftWeaken", Seq(_, StrOrNum(n)), Seq(t1)), origin) => + Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) + case _ => None + } + } + + object RightWeaken { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightWeaken", Seq(_, StrOrNum(n)), Seq(t1)), origin) => + Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) + case _ => None + } + } + + object LeftAnd { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftAnd", Seq(_, StrOrNum(n)), Seq(t1)), origin) => + Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) + case _ => None + } + } + + object LeftOr { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftOr", Seq(_, StrOrNum(n)), Seq(t1, t2)), origin) => + val f = sequent.lhs(n.toInt) + val (a, b) = convertToKernel(f) match { + case K.Or(x, y) => (x, y) + case _ => throw new Exception(s"$name: Expected a disjunction, but got $f") + } + Some((K.LeftOr(convertToKernel(sequent), Seq(numbermap(t1), numbermap(t2)), Seq(a, b))), name) + case _ => None + } + } + + object LeftImplies { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftImplies", Seq(_, StrOrNum(n)), Seq(t1, t2)), origin) => + val f = sequent.lhs(n.toInt) + val (a, b) = convertToKernel(f) match { + case K.Implies(x, y) => (x, y) + case _ => throw new Exception(s"$name: Expected an implication, but got $f") + } + Some((K.LeftImplies(convertToKernel(sequent), numbermap(t1), numbermap(t2), a, b), name)) + case _ => None + } + } + + object LeftIff { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftIff", Seq(_, StrOrNum(n)), Seq(t1)), _) => + Some(K.Restate(convertToKernel(sequent), numbermap(t1)), name) + case _ => None + } + } + + object LeftNot { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNot", Seq(_, StrOrNum(n)), Seq(t1)), _) => + Some(K.Restate(convertToKernel(sequent), numbermap(t1)), name) + case _ => None + } + } + + object LeftExists { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftExists", Seq(_, StrOrNum(n), String(xl)), Seq(t1)), origin) => // x has to be a GeneralTerm representinf a variable, i.e. $fot(x) + val f = sequent.lhs(n.toInt) + val x = K.Variable(sanitize(xl), K.Ind) + val (y: K.Variable, phi: K.Expression) = convertToKernel(f) match { + case K.Exists(x, phi) => (x, phi) + case _ => throw new Exception(s"$name: Expected an existential quantification, but got $f") + } + if x == y then Some((K.LeftExists(convertToKernel(sequent), numbermap(t1), phi, x), name)) + else Some((K.LeftExists(convertToKernel(sequent), numbermap(t1), K.substituteVariables(phi, Map(y -> x)), x), name)) + case _ => None + } + } + + object LeftForall { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftForall", Seq(_, StrOrNum(n), Ind(t)), Seq(t1)), origin) => + val f = sequent.lhs(n.toInt) + val (x, phi) = convertToKernel(f) match { + case K.Forall(x, phi) => (x, phi) + case _ => throw new Exception(s"$name: Expected a universal quantification, but got $f") + } + + Some((K.LeftForall(convertToKernel(sequent), numbermap(t1), phi, x, t), name)) + case _ => None + } + } + + object RightAnd { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightAnd", Seq(_, StrOrNum(n)), Seq(t1, t2)), _) => + val f = sequent.rhs(n.toInt) + val (a, b) = convertToKernel(f) match { + case K.And(x, y) => (x, y) + case _ => throw new Exception(s"$name: Expected a conjunction, but got $f") + } + Some((K.RightAnd(convertToKernel(sequent), Seq(numbermap(t1), numbermap(t2)), Seq(a, b)), name)) + case _ => None + } + } + + object RightOr { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightOr", Seq(_, StrOrNum(n)), Seq(t1)), _) => + val f = sequent.rhs(n.toInt) + val (a, b) = convertToKernel(f) match { + case K.Or(x, y) => (x, y) + case _ => throw new Exception(s"$name: Expected a disjunction, but got $f") + } + Some((K.RightOr(convertToKernel(sequent), numbermap(t1), a, b), name)) + case _ => None + } + } + + object RightImplies { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightImplies", Seq(_, StrOrNum(n)), Seq(t1)), _) => + val f = sequent.rhs(n.toInt) + val (a, b) = convertToKernel(f) match { + case K.Implies(x, y) => (x, y) + case _ => throw new Exception(s"$name: Expected an implication, but got $f") + } + Some((K.RightImplies(convertToKernel(sequent), numbermap(t1), a, b), name)) + case _ => None + } + } + + object RightIff { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightIff", Seq(_, StrOrNum(n)), Seq(t1, t2)), _) => + val f = sequent.rhs(n.toInt) + val (a, b) = convertToKernel(f) match { + case K.Iff(x, y) => (x, y) + case _ => throw new Exception(s"$name: Expected an implication, but got $f") + } + Some((K.RightIff(convertToKernel(sequent), numbermap(t1), numbermap(t2), a, b), name)) + case _ => None + } + } + + + object RightNot { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightNot", Seq(_, StrOrNum(n)), Seq(t1)), origin) => + Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) + case _ => None + } + } + + object RightExists { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightExists", Seq(_, StrOrNum(n), Ind(t)), Seq(t1)), origin) => + val f = sequent.rhs(n.toInt) + val (x, phi) = convertToKernel(f) match { + case K.Exists(x, phi) => (x, phi) + case _ => throw new Exception(s"$name: Expected an existential quantification, but got $f") + } + Some((K.RightExists(convertToKernel(sequent), numbermap(t1), phi, x, t), name)) + case _ => None + } + } + + object RightForall { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightForall", Seq(_, StrOrNum(n), String(xl)), Seq(t1)), origin) => // x has to be a GeneralTerm representinf a variable, i.e. $fot(x) + val f = sequent.rhs(n.toInt) + val x = K.Variable(sanitize(xl), K.Ind) + val (y: K.Variable, phi: K.Expression) = convertToKernel(f) match { + case K.Forall(x, phi) => (x, phi) + case _ => throw new Exception(s"$name: Expected a universal quantification, but got $f") + } + if x == y then Some((K.RightForall(convertToKernel(sequent), numbermap(t1), phi, x), name)) + else Some((K.RightForall(convertToKernel(sequent), numbermap(t1), K.substituteVariables(phi, Map(y -> x)), x), name)) + case _ => None + } + } + + object RightRefl { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightRefl", Seq(_, StrOrNum(n)), Seq()), origin) => + val left = sequent.lhs.map(convertToKernel) + val right = sequent.rhs.map(convertToKernel) + val formula = right(n.toInt) + formula match + case K.equality(s, t) if K.isSame(s, t) => Some((K.RightRefl(K.Sequent(left.toSet, right.toSet), formula), name)) + case _ => throw new Exception(s"$name: Expected an equality, but got $formula") + case _ => None + } + } + + object RightSubst { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightSubst", Seq(_, StrOrNum(n), StrOrNum(_), Prop(fl), String(xl)), Seq(t1)), origin) => + val f = sequent.lhs(n.toInt) + val x = K.Variable(sanitize(xl), K.Ind) + val (s, t) = convertToKernel(f) match { + case K.equality(s, t) => (s, t) + case _ => throw new Exception(s"$name: Expected an existential quantification, but got $f") + } + Some((K.RightSubstEq(convertToKernel(sequent), numbermap(t1), Seq((s, t)), (Seq(x), fl)), name)) + case _ => None + } + } + + object LeftSubst { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftSubst", Seq(_, StrOrNum(n), StrOrNum(_), Prop(fl), String(xl)), Seq(t1)), origin) => + val f = sequent.lhs(n.toInt) + val x = K.Variable(sanitize(xl), K.Ind) + val (s, t) = convertToKernel(f) match { + case K.equality(s, t) => (s, t) + case _ => throw new Exception(s"$name: Expected an existential quantification, but got $f") + } + Some((K.LeftSubstEq(convertToKernel(sequent), numbermap(t1), Seq((s, t)), (Seq(x), fl)), name)) + case _ => None + } + } + + object LeftSubstIff { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftSubstIff", Seq(_, StrOrNum(n), StrOrNum(_), Prop(fl), String(xl)), Seq(t1)), origin) => + val f = sequent.lhs(n.toInt) + val x = K.Variable(sanitize(xl), K.Prop) + val (s, t) = convertToKernel(f) match { + case K.iff(s, t) => (s, t) + case _ => throw new Exception(s"$name: Expected an existential quantification, but got $f") + } + Some((K.LeftSubstEq(convertToKernel(sequent), numbermap(t1), Seq((s, t)), (Seq(x), fl)), name)) + case _ => None + } + } + + object RightSubstIff { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightSubstIff", Seq(_, StrOrNum(n), StrOrNum(_), Prop(fl), String(al)), Seq(t1)), origin) => + val f = sequent.lhs(n.toInt) + val a = K.Variable(al, K.Prop) + val (s, t) = convertToKernel(f) match { + case K.iff(s, t) => (s, t) + case _ => throw new Exception(s"$name: Expected an biimplication, but got $f") + } + Some((K.RightSubstEq(convertToKernel(sequent), numbermap(t1), Seq((s, t)), (Seq(a), fl)), name)) + case _ => None + } + } + + + + object LeftHyp { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftHyp", Seq(_, StrOrNum(n)), Seq()), origin) => + val left = sequent.lhs.map(convertToKernel) + val formula = left(n.toInt) + Some((K.RestateTrue(convertToKernel(sequent)), name)) + case _ => + None + } + } + + object LeftNOr { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotOr", Seq(_, StrOrNum(n)), Seq(t1)), origin) => + Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) + case _ => None + } + } + + object LeftNNot { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotNot", Seq(_, StrOrNum(n)), Seq(t1)), origin) => + Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) + case _ => None + } + } + object LeftNImp { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotImplies", Seq(_, StrOrNum(n)), Seq(t1)), origin) => + Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) + case _ => None + } + } + + object LeftNAnd { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotAnd", Seq(_, StrOrNum(n)), Seq(t1, t2)), origin) => + val f = sequent.lhs(n.toInt) + val (a, b) = convertToKernel(f) match { + case K.Neg(K.And(x, y)) => (x, y) + case _ => throw new Exception(s"$name: Expected a negated conjunction, but got $f") + } + Some((K.LeftOr(convertToKernel(sequent), Seq(numbermap(t1), numbermap(t2)), Seq(K.neg(a), K.neg(b))), name)) + case _ => None + } + } + object LeftImp2 { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftImp2", Seq(_, StrOrNum(n)), Seq(t1, t2)), origin) => + val f = sequent.lhs(n.toInt) + val (a, b) = convertToKernel(f) match { + case K.Implies(x, y) => (x, y) + case _ => throw new Exception(s"$name: Expected an implication, but got $f") + } + Some((K.LeftOr(convertToKernel(sequent), Seq(numbermap(t1), numbermap(t2)), Seq(K.neg(a), b)), name)) + case _ => None + } + } + + object LeftNAll { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotAll", Seq(_, StrOrNum(n), String(xl)), Seq(t1)), origin) => // x has to be a GeneralTerm representinf a variable, i.e. $fot(x) + val f = sequent.lhs(n.toInt) + val x = K.Variable(sanitize(xl), K.Ind) + val (y: K.Variable, phi: K.Expression) = convertToKernel(f) match { + case K.Neg(K.forall(K.Lambda(x, phi))) => (x, phi) + case _ => throw new Exception(s"$name: Expected a universal quantification, but got $f") + } + if x == y then Some((K.LeftExists(convertToKernel(sequent), numbermap(t1), K.neg(phi), x), name)) + else + Some((K.LeftExists(convertToKernel(sequent), numbermap(t1), K.substituteVariables(K.neg(phi), Map(y -> x)), x), name)) + case _ => None + } + } + object LeftNEx { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotEx", Seq(_, StrOrNum(n), Ind(t)), Seq(t1)), origin) => + val f = sequent.lhs(n.toInt) + val (x, phi) = convertToKernel(f) match { + case K.Neg(K.Exists(x, phi)) => (x, phi) + case _ => throw new Exception(s"$name: Expected a negated existential quantification, but got $f") + } + Some((K.LeftForall(convertToKernel(sequent), numbermap(t1), K.neg(phi), x, t), name)) + case _ => None + } + } + + object InstFun { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("instFun", Seq(_, String(sfl), Ind(t), Sequence(varsl)), Seq(t1)), origin) => + val vars = varsl.map { + case String(xl) => K.Variable(sanitize(xl), K.Ind) + case _ => throw new Exception(s"$name: Expected a list of strings, but got $varsl") + } + val sf = K.Variable(sfl, K.functionType(vars.size)) + val seq = convertToKernel(sequent) + val prem = convertToKernel(sequentmap(t1)) + Some((K.InstSchema(seq, numbermap(t1), Map(sf -> K.lambda(vars, t))), name)) + case _ => None + } + } + + object InstPred { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("instPred", Seq(_, String(sfl), Prop(phi), Sequence(varsl)), Seq(t1)), origin) => + val vars = varsl.map { + case String(xl) => K.Variable(sanitize(xl), K.Ind) + case _ => throw new Exception(s"$name: Expected a list of strings, but got $varsl") + } + val sp = K.Variable(sfl, K.predicateType(vars.size)) + val seq = convertToKernel(sequent) + Some((K.InstSchema(seq, numbermap(t1), Map(sp -> K.lambda(vars, phi))), name)) + case _ => None + } + } + + object InstMult { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("instMult", Seq(_, Sequence(instantiations)), Seq(t1)), origin) => + val map = instantiations.map { + case Tuple(Seq(String(sfl), expr, Sequence(varsl))) => + val vars = varsl.map { + case String(xl) => K.Variable(sanitize(xl), K.Ind) + case _ => throw new Exception(s"$name: Expected a list of strings, but got $varsl") + } + expr match + case Ind(t) => + val sf = K.Variable(sfl, K.functionType(vars.size)) + sf -> K.lambda(vars, t) + case Prop(phi) => + val sp = K.Variable(sfl, K.predicateType(vars.size)) + sp -> K.lambda(vars, phi) + }.toMap + val seq = convertToKernel(sequent) + Some((K.InstSchema(seq, numbermap(t1), map), name)) + case _ => None + } + } + object Clausify { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("clausify", Seq(_, StrOrNum(n)), Seq()), origin) => + val seq = convertToKernel(sequent) + Tautology.solveSequent(seq) match + case Left(proof) => Some((K.SCSubproof(proof), name)) + case Right(msg, seq) => throw new Exception(s"Failed to justify clausify inference for sequent step ${name} with sequent ${seq.repr}: $msg") + case _ => None + } + } + + object ElimIffRefl { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("elimIffRefl", Seq(_, StrOrNum(_)), Seq(t1)), origin) => + val seq = convertToKernel(sequent) + Some((K.Weakening(seq, numbermap(t1)), name)) + case _ => None + } + } + + object ElimEqRefl { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("elimEqRefl", Seq(_, StrOrNum(_)), Seq(t1)), origin) => + val seq = convertToKernel(sequent) + Some((K.Weakening(seq, numbermap(t1)), name)) + case _ => None + } + } + + object RightNnf { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightNnf", Seq(_, StrOrNum(_), StrOrNum(_)), Seq(t1)), origin) => + val seq = convertToKernel(sequent) + Some((K.Weakening(seq, numbermap(t1)), name)) + case _ => None + } + } + + object RightPrenex { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightPrenex", Seq(_, StrOrNum(n), StrOrNum(m)), Seq(t1)), origin) => + val seq = convertToKernel(sequent) + val prem = sequentmap(t1) + val formula1 = convertToKernel(prem.rhs(n.toInt)) + val formula2 = convertToKernel(sequent.rhs(m.toInt)) + Tableau.solve(formula1 |- formula2) match + case Some(proof) => + val steps = proof.steps + val last = K.Cut(seq, -1, steps.size-1, formula1) + Some((K.SCSubproof(K.SCProof(steps :+ last, IndexedSeq(convertToKernel(prem))), IndexedSeq(numbermap(t1))), name)) + case None => throw new Exception(s"Failed to justify prenex inference for sequent ${seq.repr}") + case _ => None + } + } + + object InstForall { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("instForall", Seq(_, StrOrNum(n), Ind(t)), Seq(t1)), origin) => + val seq = convertToKernel(sequent) + val prem = sequentmap(t1) + val formula = convertToKernel(prem.rhs(n.toInt)) + formula match { + case r @ K.Application(K.forall, K.Lambda(x, f)) => + val newf = K.substituteVariables(f, Map(x -> t)) + val s0 = K.Hypothesis((newf |- newf), newf) + val s1 = K.LeftForall((r |- newf), 0, f, x, t) + val s2 = K.Cut(seq +>> newf, -1, 1, r) + Some((K.SCSubproof(K.SCProof(IndexedSeq(s0, s1, s2), IndexedSeq(convertToKernel(sequentmap(t1)))), Seq(numbermap(t1))), name)) + case _ => throw new IllegalArgumentException(s"InstForall: Expected a universal quantification, but got ${formula.repr}") + } + case _ => None + } + } + + object Res { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("res", Seq(_, StrOrNum(n)), Seq(t1, t2)), origin) => + val seq = convertToKernel(sequent) + val seqt1 = convertToKernel(sequentmap(t1)) + val seqt2 = convertToKernel(sequentmap(t2)) + val formula1 = convertToKernel(sequentmap(t1).rhs(n.toInt)) + val seqint = seqt2 +<< formula1 ->> K.!(formula1) + val subproof = K.SCProof(IndexedSeq(K.Restate(seqint, -2), K.Cut(seq, -1, 0, formula1)), IndexedSeq(seqt1, seqt2)) + val step = K.SCSubproof(subproof, IndexedSeq(numbermap(t1), numbermap(t2))) + Some((step, name)) + case _ => None + } + } + + object ExistsIffEpsilon { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("existsIffEpsilon", Seq(_, StrOrNum(n)), Seq()), origin) => + val f = convertToKernel(sequent.rhs(n.toInt)) + def extractForall(f: K.Expression): (List[K.Variable], K.Expression, K.Expression) = f match + case K.Forall(x, phi) => val (xs, psi, psi1) = extractForall(phi); (x :: xs, psi, psi1) + case K.Iff(phi, psi) => (Nil, phi, psi) + case _ => throw new Exception(s"Expected an iff, but got $f") + val (bounds, exi, epsi) = extractForall(f) + val (x, inner) = K.Exists.unapply(exi).get + val epsiterm = K.epsilon(K.lambda(x, inner)) + val epsiform = K.substituteVariables(inner, Map(x -> epsiterm)) + val s0 = K.RestateTrue(inner |- inner) + val s1 = K.RightEpsilon(inner |- epsiform, 0, inner, x, x) + val s2 = K.LeftExists(exi |- epsiform, 1, inner, x) + val s3 = K.Restate(() |- (K.==>(exi, epsiform)), 2) + val s4 = K.RestateTrue(epsiform |- epsiform) + val s5 = K.RightExists(epsiform |- exi, 4, inner, x, epsiterm) + val s6 = K.Restate(() |- (K.==>(epsiform, exi)), 5) + val endIff = K.<=>(exi, epsiform) + val s7 = K.RightIff(() |- endIff, 3, 6, exi, epsiform) + var t1 = 6 + var steps = List(s7, s6, s5, s4, s3, s2, s1, s0) + bounds.foldLeft(endIff)((acc, x) => + val newf = K.forall(x, acc) + t1 += 1 + steps = K.RightForall(() |- newf, t1, acc, x) :: steps + newf + ) + val subproof = K.SCProof(steps.reverse.toIndexedSeq, IndexedSeq()) + val step = K.SCSubproof(subproof, IndexedSeq()) + Some((step, name)) + + + case _ => None + } + } + + object RightSubstFun { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightSubstFun", Seq(_, StrOrNum(n), StrOrNum(_), String(xl), Prop(fl) ), Seq(t1)), origin) => + val f = convertToKernel(sequent.lhs(n.toInt)) + def extractForall(f: K.Expression): (List[K.Variable], K.Expression, K.Expression) = f match + case K.Forall(x, phi) => val (xs, psi, psi1) = extractForall(phi); (x :: xs, psi, psi1) + case K.Equality(phi, psi) => (Nil, phi, psi) + case _ => throw new Exception(s"Expected an equality, but got $f") + val (bounds, left, right) = extractForall(f) + val hole = K.Variable(sanitize(xl), K.functionType(bounds.size)) + val x = K.Variable(sanitize(xl), K.Ind) + val equals = Seq((K.lambda(bounds, left), K.lambda(bounds, right))) + Some((K.RightSubstEq(convertToKernel(sequent), numbermap(t1), equals, (Seq(hole), fl)), name)) + case _ => None + } + } + + object RightSubstPred { + def unapply(ann_seq: FOFAnnotated)(using defctx: DefContext, + numbermap: String => Int, + sequentmap: String => FOF.Sequent + )(using maps: MapTriplet): Option[(K.SCProofStep, String)] = + ann_seq match { + case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightSubstPred", Seq(_, StrOrNum(n), StrOrNum(_), String(xl), Prop(fl)), Seq(t1)), origin) => + val f = convertToKernel(sequent.lhs(n.toInt)) + def extractForall(f: K.Expression): (List[K.Variable], K.Expression, K.Expression) = f match + case K.Forall(x, phi) => val (xs, psi, psi1) = extractForall(phi); (x :: xs, psi, psi1) + case K.Iff(phi, psi) => (Nil, phi, psi) + case _ => throw new Exception(s"Expected an iff, but got $f") + val (bounds, left, right) = extractForall(f) + val hole = K.Variable(sanitize(xl), K.predicateType(bounds.size)) + val x = K.Variable(sanitize(xl), K.Ind) + val equals = Seq((K.lambda(bounds, left), K.lambda(bounds, right))) + Some((K.RightSubstEq(convertToKernel(sequent), numbermap(t1), equals, (Seq(hole), fl)), name)) + case _ => None + } + } + + + } +} diff --git a/lisa-utils/src/main/scala/lisa/utils/tptp/package.scala b/lisa-sets2/src/main/scala/lisa/tptp/package.scala similarity index 91% rename from lisa-utils/src/main/scala/lisa/utils/tptp/package.scala rename to lisa-sets2/src/main/scala/lisa/tptp/package.scala index 56192deef..18d6eb96a 100644 --- a/lisa-utils/src/main/scala/lisa/utils/tptp/package.scala +++ b/lisa-sets2/src/main/scala/lisa/tptp/package.scala @@ -1,4 +1,4 @@ -package lisa.utils.tptp +package lisa.tptp import leo.datastructures.TPTP import lisa.utils.K @@ -19,7 +19,7 @@ sealed trait AnnotatedStatement { } } -case class AnnotatedFormula(role: String, name: String, formula: K.Formula, annotations: TPTP.Annotations) extends AnnotatedStatement +case class AnnotatedFormula(role: String, name: String, formula: K.Expression, annotations: TPTP.Annotations) extends AnnotatedStatement case class AnnotatedSequent(role: String, name: String, sequent: K.Sequent, annotations: TPTP.Annotations) extends AnnotatedStatement diff --git a/lisa-sets2/src/test/resources/goeland_test_1.p b/lisa-sets2/src/test/resources/goeland_test_1.p new file mode 100644 index 000000000..df6eddef1 --- /dev/null +++ b/lisa-sets2/src/test/resources/goeland_test_1.p @@ -0,0 +1,19 @@ +fof(c_simple_p, conjecture, ((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))). + +fof(f7, plain, [~(((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))), (! [X5] : (p_2(X5))), ~((p_2(a_3) & p_2(b_4))), ~(p_2(a_3)), p_2(a_3)] --> [], inference(leftHyp, [status(thm), 3], [])). + +fof(f5, plain, [~(((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))), (! [X5] : (p_2(X5))), ~((p_2(a_3) & p_2(b_4))), ~(p_2(a_3))] --> [], inference(leftForall, [status(thm), 1, $fot(a_3)], [f7])). + +fof(f8, plain, [~(((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))), (! [X5] : (p_2(X5))), ~((p_2(a_3) & p_2(b_4))), ~(p_2(b_4)), p_2(b_4)] --> [], inference(leftHyp, [status(thm), 3], [])). + +fof(f6, plain, [~(((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))), (! [X5] : (p_2(X5))), ~((p_2(a_3) & p_2(b_4))), ~(p_2(b_4))] --> [], inference(leftForall, [status(thm), 1, $fot(b_4)], [f8])). + +fof(f4, plain, [~(((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))), (! [X5] : (p_2(X5))), ~((p_2(a_3) & p_2(b_4)))] --> [], inference(leftNotAnd, [status(thm), 2], [f5, f6])). + +fof(f3, plain, [~(((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4))))] --> [], inference(leftNotImplies, [status(thm), 0], [f4])). + +fof(f2, plain, [((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))] --> [((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))], inference(hyp, [status(thm), 0], [])). + +fof(f1, plain, [] --> [((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4))), ~(((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4))))], inference(rightNot, [status(thm), 1], [f2])). + +fof(f0, plain, [] --> [((! [X5] : (p_2(X5))) => (p_2(a_3) & p_2(b_4)))], inference(cut, [status(thm), 1], [f1, f3])). \ No newline at end of file diff --git a/lisa-sets2/src/test/resources/level2_steps/instMult.p b/lisa-sets2/src/test/resources/level2_steps/instMult.p new file mode 100644 index 000000000..c08a33d7e --- /dev/null +++ b/lisa-sets2/src/test/resources/level2_steps/instMult.p @@ -0,0 +1,31 @@ +fof(a1, axiom, [q(X)] --> []). +fof(f1, plain, [q(b)] --> [], inference(instMult, [status(thm), [tuple3('X', $fot(b), [])]], [a1])). + +fof(a2, axiom, [q(g(X, f(X)))] --> []). +fof(f2, plain, [q(g(f(c), f(f(c))))] --> [], inference(instMult, [status(thm), [tuple3('X', $fot(f(c)), [])]], [a2])). + +fof(a3, axiom, [q(X), q(g(X, f(X)))] --> [q(g(f(X, Y)))]). +fof(f3, plain, [q(f(b)), q(g(f(b), f(f(b))))] --> [q(g(f(f(b), Y)))], inference(instMult, [status(thm), [tuple3('X', $fot(f(b)), [])]], [a3])). + + +fof(a4, axiom, [![X] : q(g(X, f(Y)))] --> [q(g(f(X, Y)))]). +fof(f4, plain, [![Z] : q(g(Z, f(f(X))))] --> [q(g(f(X, f(X))))], inference(instMult, [status(thm), [tuple3('Y', $fot(f(X)), [])]], [a4])). + +fof(a5, axiom, [![X] : q(g(X, f(Y)))] --> [q(g(f(X, Y)))]). +fof(f5, plain, [![Y] : q(g(Y, f(f(X))))] --> [q(g(f(X, f(X))))], inference(instMult, [status(thm), [tuple3('Y', $fot(f(X)), [])]], [a5])). + + +fof(a6, axiom, [~(A | ~A)] --> []). +fof(f6, plain, [~(~p(b) | ~(~p(b)) )] --> [], inference(instMult, [status(thm), [tuple3('A', $fof(~p(b)), [])]], [a6])). + +fof(a7, axiom, [A, (A & ~p(b))] --> [(~A) => B]). +fof(f7, plain, [~p(B), (~p(B) & ~p(b))] --> [(~~p(B)) => B], inference(instMult, [status(thm), [tuple3('A', $fof(~p(B)), [])]], [a7])). + +fof(a8, axiom, [![X] : P(b, X)] --> []). +fof(f8, plain, [![A] : p(A, Z)] --> [], inference(instMult, [status(thm), [tuple3('P', $fof(p(Y, Z)), ['X', 'Y'])]], [a8])). + + +fof(a9, axiom, [![X] : P(Y, X)] --> []). +fof(f9, plain, [![A] : p(A, f(X))] --> [], inference(instMult, [status(thm), [tuple3('P', $fof(p(Y, X)), ['X', 'Y']), tuple3('Y', $fot(f(X)), [])]], [a9])). + + diff --git a/lisa-sets2/src/test/resources/p9_test_1.p b/lisa-sets2/src/test/resources/p9_test_1.p new file mode 100644 index 000000000..babe13ef3 --- /dev/null +++ b/lisa-sets2/src/test/resources/p9_test_1.p @@ -0,0 +1,36 @@ +fof(test_minimaliste, conjecture, [] --> [(! [X]: p(X) => (p(a) & p(b)))]). +fof(phi, let, ~(! [X]: p(X) => (p(a) & p(b)))). +fof(negated_conjecture, assumption, [~(! [X]: p(X) => (p(a) & p(b)))] --> [~(! [X]: p(X) => (p(a) & p(b)))], inference(hyp, [status(thm), 0], [])). +fof(nnf_step, plain, [$phi] --> [(! [X]: p(X) & (~p(a) | ~p(b)))], inference(rightNnf, [status(thm), 0, 0], [negated_conjecture])). +fof(prenex_step, plain, [$phi] --> [! [X]: (p(X) & (~p(a) | ~p(b)))], inference(rightPrenex, [status(thm), 0, 0], [nnf_step])). +fof(i0, plain, [$phi] --> [(p(V0) & (~p(a) | ~p(b)))], inference(instForall, [status(thm), 0, $fot(V0)], [prenex_step])). +fof(tsStep0, let, (Ts3 <=> (~p(a) | ~p(b)))). +fof(tsStep1, let, ! [V0]: (Ts1(V0) <=> (p(V0) & Ts3))). +fof(tsStepExpl1, plain, [$phi,$tsStep0] --> [(p(V0) & Ts3)], inference(rightSubstIff, [status(thm), 1, $fof((p(V0) & A)), 'A'], [i0])). +fof(tsStepExpl2, plain, [$phi,$tsStep0,(Ts1(V0) <=> (p(V0) & Ts3))] --> [Ts1(V0)], inference(rightSubstIff, [status(thm), 2, $fof(A), 'A'], [tsStepExpl1])). +fof(4, plain, [$phi,$tsStep0,$tsStep1] --> [Ts1(V0)], inference(leftForall, [status(thm), 2, $fot(V0)], [tsStepExpl2])). +fof(a3, plain, [$phi,$tsStep0,(Ts1(V0) <=> (p(V0) & Ts3))] --> [~Ts1(V0),p(V0)], inference(clausify, [status(thm), 2], [])). +fof(3, plain, [$phi,$tsStep0,$tsStep1] --> [~Ts1(V0),p(V0)], inference(leftForall, [status(thm), 2, $fot(V0)], [a3])). +fof(a6, plain, [$phi,$tsStep0,(Ts1(V0) <=> (p(V0) & Ts3))] --> [~Ts1(V0),Ts3], inference(clausify, [status(thm), 2], [])). +fof(6, plain, [$phi,$tsStep0,$tsStep1] --> [~Ts1(V0),Ts3], inference(leftForall, [status(thm), 2, $fot(V0)], [a6])). +fof(7, plain, [$phi,$tsStep0,$tsStep1] --> [~Ts3,~p(a),~p(b)], inference(clausify, [status(thm), 1], [])). +fof(15, plain, [$phi,$tsStep0,$tsStep1] --> [~Ts1(V100),p(V100)], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), [])]], [3])). +fof(16, plain, [$phi,$tsStep0,$tsStep1] --> [Ts1(V100)], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), [])]], [4])). +fof(17, plain, [$phi,$tsStep0,$tsStep1] --> [p(V100)], inference(res, [status(thm), 0], [16, 15])). +fof(10, plain, [$phi,$tsStep0,$tsStep1] --> [p(V0)], inference(instMult, [status(thm), [tuple3('V100', $fot(V0), [])]], [17])). +fof(18, plain, [$phi,$tsStep0,$tsStep1] --> [~Ts1(V100),Ts3], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), [])]], [6])). +fof(19, plain, [$phi,$tsStep0,$tsStep1] --> [Ts1(V100)], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), [])]], [4])). +fof(11, plain, [$phi,$tsStep0,$tsStep1] --> [Ts3], inference(res, [status(thm), 0], [19, 18])). +fof(a012, plain, [$phi,$tsStep0,$tsStep1] --> [~p(a),~p(b)], inference(res, [status(thm), 0], [11, 7])). +fof(20, plain, [$phi,$tsStep0,$tsStep1] --> [p(a)], inference(instMult, [status(thm), [tuple3('V0', $fot(a), [])]], [10])). +fof(a112, plain, [$phi,$tsStep0,$tsStep1] --> [~p(b)], inference(res, [status(thm), 0], [20, a012])). +fof(21, plain, [$phi,$tsStep0,$tsStep1] --> [p(b)], inference(instMult, [status(thm), [tuple3('V0', $fot(b), [])]], [10])). +fof(12, plain, [$phi,$tsStep0,$tsStep1] --> [], inference(res, [status(thm), 0], [21, a112])). +fof(psi, let, (! [X]: p(X) => (p(a) & p(b)))). +fof(addPsi0, assumption, [$psi] --> [$psi], inference(hyp, [status(thm), 0], [])). +fof(addPsi1, plain, [] --> [$psi,$phi], inference(rightNot, [status(thm), 0], [addPsi0])). +fof(addPsi2, plain, [$tsStep0,$tsStep1] --> [$psi], inference(cut, [status(thm), 1], [addPsi1, 12])). +fof(removeTseitin0, plain, [$tsStep0,! [V0]: ((p(V0) & Ts3) <=> (p(V0) & Ts3))] --> [$psi], inference(instPred, [status(thm), 'Ts1', $fof((p(V0) & Ts3)), ['V0']], [addPsi2])). +fof(removeTseitin1, plain, [$tsStep0] --> [$psi], inference(elimIffRefl, [status(thm), 1], [removeTseitin0])). +fof(removeTseitin2, plain, [((~p(a) | ~p(b)) <=> (~p(a) | ~p(b)))] --> [$psi], inference(instPred, [status(thm), 'Ts3', $fof((~p(a) | ~p(b))), []], [removeTseitin1])). +fof(removeTseitin3, plain, [] --> [$psi], inference(elimIffRefl, [status(thm), 0], [removeTseitin2])). \ No newline at end of file diff --git a/lisa-sets2/src/test/resources/p9_test_2.p b/lisa-sets2/src/test/resources/p9_test_2.p new file mode 100644 index 000000000..3d6f69333 --- /dev/null +++ b/lisa-sets2/src/test/resources/p9_test_2.p @@ -0,0 +1,55 @@ +fof(c1, conjecture, [] --> [((! [X]: p(X) | ! [Y]: q(Y)) => (p(c) | q(c)))]). +fof(phi, let, ~((! [X]: p(X) | ! [Y]: q(Y)) => (p(c) | q(c)))). +fof(negated_conjecture, assumption, [~((! [X]: p(X) | ! [Y]: q(Y)) => (p(c) | q(c)))] --> [~((! [X]: p(X) | ! [Y]: q(Y)) => (p(c) | q(c)))], inference(hyp, [status(thm), 0], [])). +fof(nnf_step, plain, [$phi] --> [((! [X]: p(X) | ! [Y]: q(Y)) & (~p(c) & ~q(c)))], inference(rightNnf, [status(thm), 0, 0], [negated_conjecture])). +fof(prenex_step, plain, [$phi] --> [! [X]: ! [Y]: ((p(X) | q(Y)) & (~p(c) & ~q(c)))], inference(rightPrenex, [status(thm), 0, 0], [nnf_step])). +fof(i0, plain, [$phi] --> [! [Y]: ((p(V0) | q(Y)) & (~p(c) & ~q(c)))], inference(instForall, [status(thm), 0, $fot(V0)], [prenex_step])). +fof(i1, plain, [$phi] --> [((p(V0) | q(V1)) & (~p(c) & ~q(c)))], inference(instForall, [status(thm), 0, $fot(V1)], [i0])). +fof(tsStep0, let, (Ts5 <=> (~p(c) & ~q(c)))). +fof(tsStep1, let, ! [V1]: ! [V0]: (Ts2(V0,V1) <=> (p(V0) | q(V1)))). +fof(tsStep2, let, ! [V1]: ! [V0]: (Ts3(V0,V1) <=> (Ts2(V0,V1) & Ts5))). +fof(tsStepExpl0, plain, [$phi,$tsStep0] --> [((p(V0) | q(V1)) & Ts5)], inference(rightSubstIff, [status(thm), 1, 1, $fof(((p(V0) | q(V1)) & A)), 'A'], [i1])). +fof(tsStepExpl1, plain, [$phi,$tsStep0,(Ts2(V0,V1) <=> (p(V0) | q(V1)))] --> [(Ts2(V0,V1) & Ts5)], inference(rightSubstIff, [status(thm), 2, 1, $fof((A & Ts5)), 'A'], [tsStepExpl0])). +fof(tsStepExpl2, plain, [$phi,$tsStep0,! [V0]: (Ts2(V0,V1) <=> (p(V0) | q(V1)))] --> [(Ts2(V0,V1) & Ts5)], inference(leftForall, [status(thm), 2, $fot(V0)], [tsStepExpl1])). +fof(tsStepExpl3, plain, [$phi,$tsStep0,$tsStep1] --> [(Ts2(V0,V1) & Ts5)], inference(leftForall, [status(thm), 2, $fot(V1)], [tsStepExpl2])). +fof(tsStepExpl4, plain, [$phi,$tsStep0,$tsStep1,(Ts3(V0,V1) <=> (Ts2(V0,V1) & Ts5))] --> [Ts3(V0,V1)], inference(rightSubstIff, [status(thm), 3, 1, $fof(A), 'A'], [tsStepExpl3])). +fof(tsStepExpl5, plain, [$phi,$tsStep0,$tsStep1,! [V0]: (Ts3(V0,V1) <=> (Ts2(V0,V1) & Ts5))] --> [Ts3(V0,V1)], inference(leftForall, [status(thm), 3, $fot(V0)], [tsStepExpl4])). +fof(3, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [Ts3(V0,V1)], inference(leftForall, [status(thm), 3, $fot(V1)], [tsStepExpl5])). +fof(a2s2, plain, [$phi,$tsStep0,$tsStep1,(Ts3(V0,V1) <=> (Ts2(V0,V1) & Ts5))] --> [~Ts3(V0,V1),Ts2(V0,V1)], inference(clausify, [status(thm), 3], [])). +fof(a2s1, plain, [$phi,$tsStep0,$tsStep1,! [V0]: (Ts3(V0,V1) <=> (Ts2(V0,V1) & Ts5))] --> [~Ts3(V0,V1),Ts2(V0,V1)], inference(leftForall, [status(thm), 3, $fot(V0)], [a2s2])). +fof(2, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [~Ts3(V0,V1),Ts2(V0,V1)], inference(leftForall, [status(thm), 3, $fot(V1)], [a2s1])). +fof(a5s2, plain, [$phi,$tsStep0,$tsStep1,(Ts3(V0,V1) <=> (Ts2(V0,V1) & Ts5))] --> [~Ts3(V0,V1),Ts5], inference(clausify, [status(thm), 3], [])). +fof(a5s1, plain, [$phi,$tsStep0,$tsStep1,! [V0]: (Ts3(V0,V1) <=> (Ts2(V0,V1) & Ts5))] --> [~Ts3(V0,V1),Ts5], inference(leftForall, [status(thm), 3, $fot(V0)], [a5s2])). +fof(5, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [~Ts3(V0,V1),Ts5], inference(leftForall, [status(thm), 3, $fot(V1)], [a5s1])). +fof(a7s2, plain, [$phi,$tsStep0,$tsStep1,(Ts2(V0,V1) <=> (p(V0) | q(V1)))] --> [~Ts2(V0,V1),p(V0),q(V1)], inference(clausify, [status(thm), 3], [])). +fof(a7s1, plain, [$phi,$tsStep0,$tsStep1,! [V0]: (Ts2(V0,V1) <=> (p(V0) | q(V1)))] --> [~Ts2(V0,V1),p(V0),q(V1)], inference(leftForall, [status(thm), 3, $fot(V0)], [a7s2])). +fof(7, plain, [$phi,$tsStep0,! [V1]: ! [V0]: (Ts2(V0,V1) <=> (p(V0) | q(V1))),$tsStep2] --> [~Ts2(V0,V1),p(V0),q(V1)], inference(leftForall, [status(thm), 2, $fot(V1)], [a7s1])). +fof(19, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [~Ts3(V100,V101),Ts2(V100,V101)], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), []), tuple3('V1', $fot(V101), [])]], [2])). +fof(20, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [Ts3(V100,V101)], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), []), tuple3('V1', $fot(V101), [])]], [3])). +fof(21, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [Ts2(V100,V101)], inference(res, [status(thm), 0], [20, 19])). +fof(9, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [Ts2(V0,V1)], inference(instMult, [status(thm), [tuple3('V100', $fot(V0), []), tuple3('V101', $fot(V1), [])]], [21])). +fof(10, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [~Ts5,~p(c)], inference(clausify, [status(thm), 1], [])). +fof(22, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [Ts2(V100,V101)], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), []), tuple3('V1', $fot(V101), [])]], [9])). +fof(23, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [~Ts2(V100,V101),p(V100),q(V101)], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), []), tuple3('V1', $fot(V101), [])]], [7])). +fof(24, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [p(V100),q(V101)], inference(res, [status(thm), 0], [22, 23])). +fof(12, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [p(V0),q(V1)], inference(instMult, [status(thm), [tuple3('V100', $fot(V0), []), tuple3('V101', $fot(V1), [])]], [24])). +fof(25, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [p(c),q(V1)], inference(instMult, [status(thm), [tuple3('V0', $fot(c), [])]], [12])). +fof(26, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [q(V1),~Ts5], inference(res, [status(thm), 0], [25, 10])). +fof(13, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [q(V0),~Ts5], inference(instMult, [status(thm), [tuple3('V1', $fot(V0), [])]], [26])). +fof(14, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [~Ts5,~q(c)], inference(clausify, [status(thm), 1], [])). +fof(27, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [~Ts3(V100,V101),Ts5], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), []), tuple3('V1', $fot(V101), [])]], [5])). +fof(28, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [Ts3(V100,V101)], inference(instMult, [status(thm), [tuple3('V0', $fot(V100), []), tuple3('V1', $fot(V101), [])]], [3])). +fof(15, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [Ts5], inference(res, [status(thm), 0], [28, 27])). +fof(29, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [q(c),~Ts5], inference(instMult, [status(thm), [tuple3('V0', $fot(c), [])]], [13])). +fof(16, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [~Ts5,~Ts5], inference(res, [status(thm), 0], [29, 14])). +fof(17, plain, [$phi,$tsStep0,$tsStep1,$tsStep2] --> [], inference(res, [status(thm), 0], [15, 16])). +fof(psi, let, ((! [X]: p(X) | ! [Y]: q(Y)) => (p(c) | q(c)))). +fof(addPsi0, assumption, [$psi] --> [$psi], inference(hyp, [status(thm), 0], [])). +fof(addPsi1, plain, [] --> [$psi,$phi], inference(rightNot, [status(thm), 0], [addPsi0])). +fof(addPsi2, plain, [$tsStep0,$tsStep1,$tsStep2] --> [$psi], inference(cut, [status(thm), 1], [addPsi1, 17])). +fof(removeTseitin0, plain, [$tsStep0,$tsStep1,! [V1]: ! [V0]: ((Ts2(V0,V1) & Ts5) <=> (Ts2(V0,V1) & Ts5))] --> [$psi], inference(instPred, [status(thm), 'Ts3', $fof((Ts2(V0,V1) & Ts5)), ['V0','V1']], [addPsi2])). +fof(removeTseitin1, plain, [$tsStep0,$tsStep1] --> [$psi], inference(elimIffRefl, [status(thm), 2], [removeTseitin0])). +fof(removeTseitin2, plain, [$tsStep0,! [V1]: ! [V0]: ((p(V0) | q(V1)) <=> (p(V0) | q(V1)))] --> [$psi], inference(instPred, [status(thm), 'Ts2', $fof((p(V0) | q(V1))), ['V0','V1']], [removeTseitin1])). +fof(removeTseitin3, plain, [$tsStep0] --> [$psi], inference(elimIffRefl, [status(thm), 1], [removeTseitin2])). +fof(removeTseitin4, plain, [((~p(c) & ~q(c)) <=> (~p(c) & ~q(c)))] --> [$psi], inference(instPred, [status(thm), 'Ts5', $fof((~p(c) & ~q(c))), []], [removeTseitin3])). +fof(removeTseitin5, plain, [] --> [$psi], inference(elimIffRefl, [status(thm), 0], [removeTseitin4])). \ No newline at end of file diff --git a/lisa-sets2/src/test/resources/steps_tests/cut.p b/lisa-sets2/src/test/resources/steps_tests/cut.p new file mode 100644 index 000000000..efc73f636 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/cut.p @@ -0,0 +1,21 @@ +fof(a1_1, axiom, [p] --> [q]). +fof(a1_2, axiom, [q] --> [r]). +fof(f1, plain, [p] --> [r], inference(cut, [status(thm), 0, 0], [a1_1, a1_2])). + +fof(a2_1, axiom, [p, q] --> [r, s]). +fof(a2_2, axiom, [s, t] --> [u, v]). +fof(f2, plain, [p, q, t] --> [r, u, v], inference(cut, [status(thm), 1, 0], [a2_1, a2_2])). + +fof(a3_1, axiom, [p(X) & Q(X, Y), r(Y)] --> [s(X, c)]). +fof(a3_2, axiom, [s(X, c)] --> [r(t)]). +fof(f3, plain, [p(X) & Q(X, Y), r(Y)] --> [r(t)], inference(cut, [status(thm), 0, 0], [a3_1, a3_2])). + + +fof(a4_1, axiom, [![X, Y] : (p(X) & Q(X, Y)), r(Y)] --> [![X] : s(X, c)]). +fof(a4_2, axiom, [![X] : s(X, c)] --> [![Y] : r(f(t))]). +fof(f4, plain, [![X, Y] : (p(X) & Q(X, Y)), r(Y)] --> [![Y] : r(f(t))], inference(cut, [status(thm), 0, 0], [a4_1, a4_2])). + + +%fof(a4_1, axiom, [![X, Y] : (p(X) & Q(#[X] : p(X), Y)), r(Y)] --> [![X] : s(X, c)]). +%fof(a4_2, axiom, [![X] : s(X, c)] --> [![Y] : r(f(t))]). +%fof(f4, plain, [![X, Y] : (p(X) & Q(#[X] : p(X), Y)), r(Y)] --> [![Y] : r(f(t))], inference(cut, [status(thm), 0, 0], [a4_1, a4_2])). \ No newline at end of file diff --git a/lisa-sets2/src/test/resources/steps_tests/hyp.p b/lisa-sets2/src/test/resources/steps_tests/hyp.p new file mode 100644 index 000000000..c16c1dbd8 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/hyp.p @@ -0,0 +1,13 @@ +fof(f1, plain, [p] --> [p], inference(hyp, [status(thm), 0], [])). +fof(f3, plain, [p, q] --> [p], inference(hyp, [status(thm), 0], [])). +fof(f4, plain, [q] --> [p, q], inference(hyp, [status(thm), 0], [])). +fof(f5, plain, [p, q] --> [r, q], inference(hyp, [status(thm), 1], [])). +fof(f6, plain, [p & q] --> [p & q], inference(hyp, [status(thm), 0], [])). +fof(f7, plain, [p | q] --> [p | q], inference(hyp, [status(thm), 0], [])). +fof(f8, plain, [p => q] --> [p => q], inference(hyp, [status(thm), 0], [])). +fof(f9, plain, [p(X, c) => q(X)] --> [p(X, c) => q(X)], inference(hyp, [status(thm), 0], [])). +fof(f10, plain, [![X] :( p(X, c) => q(X))] --> [![X] : (p(X, c) => q(X))], inference(hyp, [status(thm), 0], [])). +fof(f11, plain, [![X] : (p(X, c) => q(X))] --> [![Y] : (p(Y, c) => q(Y))], inference(hyp, [status(thm), 0], [])). +fof(f12, plain, [?[Y] : ![X] : (p(X, c) => q(X))] --> [?[X] : ![Y] : (p(Y, c) => q(Y))], inference(hyp, [status(thm), 0], [])). +fof(f13, plain, [?[Y] : ![X] : (p(X, c) => q(Y))] --> [?[X] : ![Y] : (p(Y, c) => q(X))], inference(hyp, [status(thm), 0], [])). + diff --git a/lisa-sets2/src/test/resources/steps_tests/instFun.p b/lisa-sets2/src/test/resources/steps_tests/instFun.p new file mode 100644 index 000000000..648758a14 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/instFun.p @@ -0,0 +1,30 @@ +fof(a1, axiom, [q(X)] --> []). +fof(f1, plain, [q(b)] --> [], inference(instFun, [status(thm), 'X', $fot(b), []], [a1])). + +fof(a2, axiom, [q(g(X, f(X)))] --> []). +fof(f2, plain, [q(g(f(c), f(f(c))))] --> [], inference(instFun, [status(thm), 'X', $fot(f(c)), []], [a2])). + +fof(a3, axiom, [q(X), q(g(X, f(X)))] --> [q(g(f(X, Y)))]). +fof(f3, plain, [q(f(b)), q(g(f(b), f(f(b))))] --> [q(g(f(f(b), Y)))], inference(instFun, [status(thm), 'X', $fot(f(b)), []], [a3])). + + +fof(a4, axiom, [![X] : q(g(X, f(Y)))] --> [q(g(f(X, Y)))]). +fof(f4, plain, [![Z] : q(g(Z, f(f(X))))] --> [q(g(f(X, f(X))))], inference(instFun, [status(thm), 'Y', $fot(f(X)), []], [a4])). + +fof(a5, axiom, [![X] : q(g(X, f(Y)))] --> [q(g(f(X, Y)))]). +fof(f5, plain, [![Y] : q(g(Y, f(f(X))))] --> [q(g(f(X, f(X))))], inference(instFun, [status(thm), 'Y', $fot(f(X)), []], [a5])). + +fof(a6, axiom, [q(F(c))] --> []). +fof(f6, plain, [q(g(c, c))] --> [], inference(instFun, [status(thm), 'F', $fot(g(X, X)), ['X']], [a6])). + +fof(a7, axiom, [q(G(X, f(c)))] --> []). +fof(f7, plain, [q(g(F(f(c)), F(F(X))))] --> [], inference(instFun, [status(thm), 'G', $fot(g(F(Y), F(F(X)))), ['X', 'Y']], [a7])). + +fof(a8, axiom, [![X] : q(G(X, f(c)))] --> []). +fof(f8, plain, [![X] : q(g(F(f(c)), F(F(X))))] --> [], inference(instFun, [status(thm), 'G', $fot(g(F(Y), F(F(X)))), ['X', 'Y']], [a8])). + + +fof(a9, axiom, [![X] : q(G(X, f(c)))] --> [?[Y] : q(G(b, f(Y)))]). +fof(f9, plain, [![X] : q(g(F(f(c)), F(F(X))))] --> [?[Y] : q(g(F(f(Y)), F(F(b))))], inference(instFun, [status(thm), 'G', $fot(g(F(Y), F(F(X)))), ['X', 'Y']], [a9])). + + diff --git a/lisa-sets2/src/test/resources/steps_tests/instPred.p b/lisa-sets2/src/test/resources/steps_tests/instPred.p new file mode 100644 index 000000000..3078860df --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/instPred.p @@ -0,0 +1,30 @@ +fof(a1, axiom, [A] --> []). +fof(f1, plain, [p(b, c) ] --> [], inference(instPred, [status(thm), 'A', $fof(p(b, c)), []], [a1])). + +fof(a2, axiom, [~(A | ~A)] --> []). +fof(f2, plain, [~(~p(b) | ~(~p(b)) )] --> [], inference(instPred, [status(thm), 'A', $fof(~p(b)), []], [a2])). + +fof(a3, axiom, [A, (A & ~p(b))] --> [(~A) => B]). +fof(f3, plain, [~p(B), (~p(B) & ~p(b))] --> [(~~p(B)) => B], inference(instPred, [status(thm), 'A', $fof(~p(B)), []], [a3])). + +fof(a4, axiom, [![X] : P(b, X)] --> []). +fof(f4, plain, [![A] : p(A, Z)] --> [], inference(instPred, [status(thm), 'P', $fof(p(Y, Z)), ['X', 'Y']], [a4])). + +fof(a5, axiom, [![X] : P(X, f(Y))] --> []). +fof(f5, plain, [![X] : P(f(Y), f(f(X)))] --> [], inference(instPred, [status(thm), 'P', $fof(P(Y, f(f(X)))), ['X', 'Y']], [a5])). + +fof(a6, axiom, [P(a, b) & ~P(c, d)] --> []). +fof(f6, plain, [P(X, f(a)) & ~P(X, f(c))] --> [], inference(instPred, [status(thm), 'P', $fof(P(X, f(Y))), ['Y', 'Z']], [a6])). + +fof(a7, axiom, [![X] : P(X, c)] --> []). +fof(f7, plain, [![X] :( q(X) | ![Y] : p(Y, c))] --> [], + inference(instPred, [status(thm), 'P', $fof(q(X) | ![X] : p(X, Y)), ['X', 'Y']], [a7])). + +fof(a8, axiom, [![X] : P(X, f(c))] --> [?[Y] : P(b, f(Y))]). +fof(f8, plain, [![X] : (q(X) | ![Y] : ~q(Y, f(f(c))))] --> [?[Z] : (q(b) | ![X] : ~(q(X, f(f(Z)))))], + inference(instPred, [status(thm), 'P', $fof(q(X) | ![X] : ~(q(X, f(Y)))), ['X', 'Y']], [a8])). + + +%fof(a8, axiom, [![X] : P(X, #[Y] : (Q(X) & P(X, Y)))] --> []). +%fof(f8, plain, [![X] :( q(X) | ![Z] : p(Z, #[Y] : (Q(X) & P(X, Y))))] --> [], +% inference(instPred, [status(thm), 'P', $fof(q(X) | ![X] : p(X, Y)), ['X', 'Y']], [a8])). \ No newline at end of file diff --git a/lisa-sets2/src/test/resources/steps_tests/leftAnd.p b/lisa-sets2/src/test/resources/steps_tests/leftAnd.p new file mode 100644 index 000000000..7134aa2c2 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftAnd.p @@ -0,0 +1,21 @@ +fof(a1, axiom, [p, q] --> [r]). +fof(f1, plain, [p & q] --> [r], inference(leftAnd, [status(thm), 0], [a1])). + +fof(a2, axiom, [a, p, q] --> [r]). +fof(f2, plain, [a, p & q] --> [r], inference(leftAnd, [status(thm), 1], [a2])). + +fof(a3, axiom, [P(X), Q(X)] --> [R]). +fof(f3, plain, [P(X) & Q(X)] --> [R], inference(leftAnd, [status(thm), 0], [a3])). + +fof(a4, axiom, [s, p, t, q] --> [u]). +fof(f4, plain, [s, p & q, t] --> [u], inference(leftAnd, [status(thm), 1], [a4])). + +fof(a5, axiom, [p(X), q(Y)] --> [r(X, Y)]). +fof(f5, plain, [p(X) & q(Y)] --> [r(X, Y)], inference(leftAnd, [status(thm), 0], [a5])). + + +fof(a6, axiom, [![X]: (p(X) & q(X)), (r(X) | s(X))] --> [(t(X) & u(X)), v(X)]). +fof(f6, plain, [![X]: (p(X) & q(X)) & (r(X) | s(X)), (r(X) | s(X))] --> [(t(X) & u(X)), v(X)], inference(leftAnd, [status(thm), 0], [a6])). + +fof(a7, axiom, [q, ![X]: (p(X) & q(X)), s(Y)] --> []). +fof(f7, plain, [![X]: (p(X) & q(X)), q & s(Y)] --> [], inference(leftAnd, [status(thm), 1], [a7])). diff --git a/lisa-sets2/src/test/resources/steps_tests/leftEpsilon.p b/lisa-sets2/src/test/resources/steps_tests/leftEpsilon.p new file mode 100644 index 000000000..19b85a068 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftEpsilon.p @@ -0,0 +1,22 @@ +fof(a1, axiom, [p, q] --> [r]). +fof(f1, plain, [p, q] --> [r], inference(leftExists, [status(thm), 0, 'X'], [a1])). + +fof(a2, axiom, [a, p(X)] --> [r]). +fof(f2, plain, [a, p(#[X] : Q(X))] --> [r], inference(leftExists, [status(thm), 1, 'X'], [a2])). + +fof(a3, axiom, [P(Z), Q(Y)] --> [R]). +fof(f3, plain, [P(Z), Q(#[X] : Q(X))] --> [R], inference(leftExists, [status(thm), 1, 'Y'], [a3])). + + +fof(a4, axiom, [p(X), q(Z) | q(Y)] --> [r(X, Y)]). +fof(f4, plain, [p(X), (q(#[X] : (q(X) | q(Y))) | q(Y))] --> [r(X, Y)], inference(leftExists, [status(thm), 1, 'Z'], [a4])). + + +fof(a6, axiom, [?[X]: (p(X) & q(Z)), (r(X) | s(Y))] --> [(t(X) & u(X)), v(X)]). +fof(f6, plain, [?[X]: (p(# [Y] : ? [X]: (p(X) & q(Y))) & q(Y)), (r(X) | s(Y))] --> [(t(X) & u(X)), v(X)], inference(leftExists, [status(thm), 0, 'Z'], [a6])). + +fof(a7, axiom, [q, ?[X]: (p(X) & q(Z)), s(Y)] --> []). +fof(f7, plain, [q, ?[X, Y]: (p(Y) & q(X)), s(Y)] --> [], inference(leftExists, [status(thm), 1, 'Z'], [a7])). + +fof(a8, axiom, [q, ?[X]: (p(X) & r(Z, Z)), s(Y)] --> []). +fof(f8, plain, [q, ?[X, Y]: (p(Y) & r(X, X)), s(Y)] --> [], inference(leftExists, [status(thm), 1, 'Z'], [a8])). diff --git a/lisa-sets2/src/test/resources/steps_tests/leftExists.p b/lisa-sets2/src/test/resources/steps_tests/leftExists.p new file mode 100644 index 000000000..87958f555 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftExists.p @@ -0,0 +1,22 @@ +fof(a1, axiom, [p, q] --> [r]). +fof(f1, plain, [?[X] : p, q] --> [r], inference(leftExists, [status(thm), 0, 'X'], [a1])). + +fof(a2, axiom, [a, p(X)] --> [r]). +fof(f2, plain, [a, ?[X] : p(X)] --> [r], inference(leftExists, [status(thm), 1, 'X'], [a2])). + +fof(a3, axiom, [P(Z), Q(Y)] --> [R]). +fof(f3, plain, [P(Z), ?[X] : Q(X)] --> [R], inference(leftExists, [status(thm), 1, 'Y'], [a3])). + + +fof(a4, axiom, [p(X), q(Z) | q(Y)] --> [r(X, Y)]). +fof(f4, plain, [p(X), ?[X] : (q(X) | q(Y))] --> [r(X, Y)], inference(leftExists, [status(thm), 1, 'Z'], [a4])). + + +fof(a6, axiom, [?[X]: (p(X) & q(Z)), (r(X) | s(Y))] --> [(t(X) & u(X)), v(X)]). +fof(f6, plain, [?[Y, X]: (p(X) & q(Y)), (r(X) | s(Y))] --> [(t(X) & u(X)), v(X)], inference(leftExists, [status(thm), 0, 'Z'], [a6])). + +fof(a7, axiom, [q, ?[X]: (p(X) & q(Z)), s(Y)] --> []). +fof(f7, plain, [q, ?[X, Y]: (p(Y) & q(X)), s(Y)] --> [], inference(leftExists, [status(thm), 1, 'Z'], [a7])). + +fof(a8, axiom, [q, ?[X]: (p(X) & r(Z, Z)), s(Y)] --> []). +fof(f8, plain, [q, ?[X, Y]: (p(Y) & r(X, X)), s(Y)] --> [], inference(leftExists, [status(thm), 1, 'Z'], [a8])). diff --git a/lisa-sets2/src/test/resources/steps_tests/leftForall.p b/lisa-sets2/src/test/resources/steps_tests/leftForall.p new file mode 100644 index 000000000..60c1eec1c --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftForall.p @@ -0,0 +1,22 @@ +fof(a1, axiom, [p, q] --> [r]). +fof(f1, plain, [![X] : p, q] --> [r], inference(leftForall, [status(thm), 0, $fot(X)], [a1])). + +fof(a2, axiom, [a, p(X)] --> [r]). +fof(f2, plain, [a, ![X] : p(X)] --> [r], inference(leftForall, [status(thm), 1, $fot(X)], [a2])). + +fof(a3, axiom, [P(X), Q(f(c))] --> [R]). +fof(f3, plain, [P(X), ![X] : Q(X)] --> [R], inference(leftForall, [status(thm), 1, $fot(f(c))], [a3])). + + +fof(a4, axiom, [p(X), q(g(Y, c)) | q(Y)] --> [r(X, Y)]). +fof(f4, plain, [p(X), ![X] : (q(X) | q(Y))] --> [r(X, Y)], inference(leftForall, [status(thm), 1, $fot(g(Y, c))], [a4])). + + +fof(a6, axiom, [![X]: (p(X) & q(f(c))), (r(X) | s(Y))] --> [(t(X) & u(X)), v(X)]). +fof(f6, plain, [![Y, X]: (p(X) & q(Y)), (r(X) | s(Y))] --> [(t(X) & u(X)), v(X)], inference(leftForall, [status(thm), 0, $fot(f(c))], [a6])). + +fof(a7, axiom, [q, ![X]: (p(X) & q(f(c))), s(Y)] --> []). +fof(f7, plain, [q, ![X, Y]: (p(Y) & q(X)), s(Y)] --> [], inference(leftForall, [status(thm), 1, $fot(f(c))], [a7])). + +fof(a7, axiom, [q, ![X]: (p(X) & r(f(c), f(c))), s(Y)] --> []). +fof(f7, plain, [q, ![X, Y]: (p(Y) & r(X, X)), s(Y)] --> [], inference(leftForall, [status(thm), 1, $fot(f(c))], [a7])). diff --git a/lisa-sets2/src/test/resources/steps_tests/leftIff.p b/lisa-sets2/src/test/resources/steps_tests/leftIff.p new file mode 100644 index 000000000..957d5f396 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftIff.p @@ -0,0 +1,21 @@ +fof(a1, axiom, [p=>q, q=>p] --> [r]). +fof(f1, plain, [p<=>q] --> [r], inference(leftIff, [status(thm), 0], [a1])). + +fof(a2, axiom, [a, p=>q, q=>p] --> [r]). +fof(f2, plain, [a, p <=> q] --> [r], inference(leftIff, [status(thm), 1], [a2])). + +fof(a3, axiom, [P(X)=>Q(X), Q(X)=>P(X)] --> [R]). +fof(f3, plain, [P(X) <=> Q(X)] --> [R], inference(leftIff, [status(thm), 0], [a3])). + +fof(a4, axiom, [s, p=>q, t, q=>p] --> [u]). +fof(f4, plain, [s, p <=> q, t] --> [u], inference(leftIff, [status(thm), 1], [a4])). + +fof(a5, axiom, [p(X)=>q(Y), q(Y)=>p(X)] --> [r(X, Y)]). +fof(f5, plain, [p(X) <=> q(Y)] --> [r(X, Y)], inference(leftIff, [status(thm), 0], [a5])). + + +fof(a6, axiom, [![X]: (p(X)) => (r(X) | s(X)), (r(X) | s(X)) => ![X]: (p(X))] --> [(t(X) & u(X)), v(X)]). +fof(f6, plain, [![X]: (p(X)) <=> (r(X) | s(X)), (r(X) | s(X)) => ![X]: (p(X))] --> [(t(X) & u(X)), v(X)], inference(leftIff, [status(thm), 0], [a6])). + +fof(a7, axiom, [q=>s(Y), ![X]: (p(X) & q(X)), s(Y)=>q] --> []). +fof(f7, plain, [![X]: (p(X) & q(X)), q <=> s(Y)] --> [], inference(leftIff, [status(thm), 1], [a7])). diff --git a/lisa-sets2/src/test/resources/steps_tests/leftImplies.p b/lisa-sets2/src/test/resources/steps_tests/leftImplies.p new file mode 100644 index 000000000..5ad6306f1 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftImplies.p @@ -0,0 +1,21 @@ +fof(a1_1, axiom, [] --> [p, q]). +fof(a1_2, axiom, [q] --> [r]). +fof(f1, plain, [p => q] --> [r, q], inference(leftImplies, [status(thm), 0], [a1_1, a1_2])). + +fof(a2_1, axiom, [p] --> [q]). +fof(a2_2, axiom, [s, t] --> []). +fof(f2, plain, [p, q => s, t] --> [], inference(leftImplies, [status(thm), 1], [a2_1, a2_2])). + +fof(a3_1, axiom, [q] --> [p]). +fof(a3_2, axiom, [s, t] --> []). +fof(f3, plain, [p => t , q, s] --> [], inference(leftImplies, [status(thm), 0], [a3_1, a3_2])). + +fof(a4_1, axiom, [P(X)] --> [R & r(f(g(X, Y))), Q(X)]). +fof(a4_2, axiom, [R & Q(X)] --> [R]). +fof(f4, plain, [P(X), Q(X) => (R & Q(X))] --> [R & r(f(g(X, Y))), R], inference(leftImplies, [status(thm), 1], [a4_1, a4_2])). + +fof(a5_1, axiom, [A] --> [(t(X) & u(Z)), v(f(c)), ![X]: (p(X) & q(X))]). +fof(a5_2, axiom, [(r(X) | s(X))] --> [(t(X) & u(Z))]). +fof(f5, plain, [A, ![X]: (p(X) & q(X)) => (r(X) | s(X))] --> [(t(X) & u(Z)), v(f(c))], inference(leftImplies, [status(thm), 1], [a5_1, a5_2])). + + diff --git a/lisa-sets2/src/test/resources/steps_tests/leftNot.p b/lisa-sets2/src/test/resources/steps_tests/leftNot.p new file mode 100644 index 000000000..ec3f1536d --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftNot.p @@ -0,0 +1,20 @@ +fof(a1, axiom, [] --> [p]). +fof(f1, plain, [~p] --> [], inference(leftNot, [status(thm), 0], [a1])). + +fof(a2, axiom, [q] --> [r]). +fof(f2, plain, [~r, q] --> [], inference(leftNot, [status(thm), 0], [a2])). + +fof(a3, axiom, [p, q] --> [r]). +fof(f3, plain, [~r, p, q] --> [], inference(leftNot, [status(thm), 0], [a3])). + +fof(a4, axiom, [p(X)] --> [q(X)]). +fof(f4, plain, [~q(X), p(X)] --> [], inference(leftNot, [status(thm), 0], [a4])). + +fof(a5, axiom, [p(X, Y)] --> [![Y] : q(Y)]). +fof(f5, plain, [~![Y] : q(Y), p(X, Y)] --> [], inference(leftNot, [status(thm), 0], [a5])). + +fof(a6, axiom, [p(X) & q(Y)] --> [r(Z), ![X] : p(X) & q(Y)]). +fof(f6, plain, [p(X) & q(Y), ~r(Z)] --> [![X] : p(X) & q(Y)], inference(leftNot, [status(thm), 1], [a6])). + +fof(a7, axiom, [] --> [r(c), ![X] : p(X)]). +fof(f7, plain, [~![Y] : p(Y)] --> [r(c)], inference(leftNot, [status(thm), 0], [a7])). \ No newline at end of file diff --git a/lisa-sets2/src/test/resources/steps_tests/leftOr.p b/lisa-sets2/src/test/resources/steps_tests/leftOr.p new file mode 100644 index 000000000..b2b47e45b --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftOr.p @@ -0,0 +1,21 @@ +fof(a1_1, axiom, [p] --> [q]). +fof(a1_2, axiom, [q] --> [r]). +fof(f1, plain, [p | q] --> [r, q], inference(leftOr, [status(thm), 0], [a1_1, a1_2])). + +fof(a2_1, axiom, [p, q] --> []). +fof(a2_2, axiom, [s, t] --> []). +fof(f2, plain, [p, q | s, t] --> [], inference(leftOr, [status(thm), 1], [a2_1, a2_2])). + +fof(a3_1, axiom, [p, q] --> []). +fof(a3_2, axiom, [s, t] --> []). +fof(f3, plain, [p | t , q, s] --> [], inference(leftOr, [status(thm), 0], [a3_1, a3_2])). + +fof(a4_1, axiom, [P(X), Q(X)] --> [R & r(f(g(X, Y)))]). +fof(a4_2, axiom, [R & Q(X)] --> [R]). +fof(f4, plain, [P(X), Q(X) | (R & Q(X))] --> [R & r(f(g(X, Y))), R], inference(leftOr, [status(thm), 1], [a4_1, a4_2])). + +fof(a5_1, axiom, [![X]: (p(X) & q(X)), A] --> [(t(X) & u(Z)), v(f(c))]). +fof(a5_2, axiom, [(r(X) | s(X))] --> [(t(X) & u(Z))]). +fof(f5, plain, [A, ![X]: (p(X) & q(X)) | (r(X) | s(X))] --> [(t(X) & u(Z)), v(f(c))], inference(leftOr, [status(thm), 1], [a5_1, a5_2])). + + diff --git a/lisa-sets2/src/test/resources/steps_tests/leftSubst.p b/lisa-sets2/src/test/resources/steps_tests/leftSubst.p new file mode 100644 index 000000000..a50f614e4 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftSubst.p @@ -0,0 +1,15 @@ +fof(a1, axiom, [q(a)] --> []). +fof(f1, plain, [a = b, q(b)] --> [], inference(leftSubst, [status(thm), 0, $fof(q(X)), 'X'], [a1])). + +fof(a2, axiom, [q(g(a, f(c)))] --> []). +fof(f2, plain, [a = b, q(g(b, f(c)))] --> [], inference(leftSubst, [status(thm), 0, $fof(q(g(X, f(c)))), 'X'], [a2])). + +fof(a3, axiom, [q(g(a, f(a)))] --> []). +fof(f3, plain, [a = b, q(g(b, f(a)))] --> [], inference(leftSubst, [status(thm), 0, $fof(q(g(X, f(a)))), 'X'], [a3])). + +fof(a3, axiom, [q(g(a, f(a)))] --> []). +fof(f3, plain, [q(g(b, f(b))), a = b] --> [], inference(leftSubst, [status(thm), 1, $fof(q(g(X, f(X)))), 'X'], [a3])). + +fof(a4, axiom, [![X] : q(g(X, f(c)))] --> []). +fof(f4, plain, [f(c) = g(c, c), ![X] : q(g(X, g(c, c)))] --> [], inference(leftSubst, [status(thm), 0, $fof(![Z] : q(g(Z, X))), 'X'], [a4])). + diff --git a/lisa-sets2/src/test/resources/steps_tests/leftSubstIff.p b/lisa-sets2/src/test/resources/steps_tests/leftSubstIff.p new file mode 100644 index 000000000..c38dcc394 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftSubstIff.p @@ -0,0 +1,15 @@ +fof(a1, axiom, [q(s)] --> []). +fof(f1, plain, [q(s) <=> q(t), q(t)] --> [], inference(leftSubstIff, [status(thm), 0, $fof(K), 'K'], [a1])). + +fof(a2, axiom, [a & p(s, t)] --> []). +fof(f2, plain, [p(s, t) <=> q(t), a & q(t)] --> [], inference(leftSubstIff, [status(thm), 0, $fof(a & K), 'K'], [a2])). + +fof(a3, axiom, [q(s) & (q(t) | q(s))] --> []). +fof(f3, plain, [q(s) <=> q(t), q(t) & (q(t) | q(s))] --> [], inference(leftSubstIff, [status(thm), 0, $fof(K & (q(t) | q(s))), 'K'], [a3])). + +fof(a3, axiom, [~(q(s) & q(t)) => q(s)] --> []). +fof(f3, plain, [~(~(a & q(t)) & q(t)) => ~(a & q(t)), q(s) <=> ~(a & q(t))] --> [], inference(leftSubstIff, [status(thm), 1, $fof(~(K & q(t)) => K), 'K'], [a3])). + +fof(a4, axiom, [![X] : ~(q(s) & q(X))] --> []). +fof(f4, plain, [q(s) <=> q(X), ![Y] : ~(q(X) & q(Y))] --> [], inference(leftSubstIff, [status(thm), 0, $fof(![X] : ~(K & q(X))), 'K'], [a4])). + diff --git a/lisa-sets2/src/test/resources/steps_tests/leftWeaken.p b/lisa-sets2/src/test/resources/steps_tests/leftWeaken.p new file mode 100644 index 000000000..591ed0a36 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/leftWeaken.p @@ -0,0 +1,12 @@ + +fof(a1, axiom, [] --> [p]). +fof(f1, plain, [q] --> [p], inference(leftWeaken, [status(thm), 0], [a1])). +fof(f2, plain, [q, P(X)] --> [p], inference(leftWeaken, [status(thm), 1], [f1])). +fof(f3, plain, [q, P(X), Q(X, c)] --> [p], inference(leftWeaken, [status(thm), 2], [f2])). +fof(f4, plain, [q, ![X] : (P(X) | Q(a, X)), P(X), Q(X, c)] --> [p], inference(leftWeaken, [status(thm), 1], [f3])). +fof(f5, plain, [q, ![X] : (P(X) | Q(a, X)), P(X), Q(X, c)] --> [p], inference(leftWeaken, [status(thm), 2], [f4])). +fof(f6, plain, [q, ![Y] : (P(Y) | Q(a, Y)), P(X), Q(X, c), p1 => p2] --> [p], inference(leftWeaken, [status(thm), 4], [f5])). + + + + diff --git a/lisa-sets2/src/test/resources/steps_tests/rightAnd.p b/lisa-sets2/src/test/resources/steps_tests/rightAnd.p new file mode 100644 index 000000000..e4b6db5ac --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightAnd.p @@ -0,0 +1,21 @@ +fof(a1_1, axiom, [q] --> [p]). +fof(a1_2, axiom, [r] --> [q]). +fof(f1, plain, [r, q] --> [p & q], inference(rightAnd, [status(thm), 0], [a1_1, a1_2])). + +fof(a2_1, axiom, [] --> [p, q]). +fof(a2_2, axiom, [] --> [s, t]). +fof(f2, plain, [] --> [p, q & s, t], inference(rightAnd, [status(thm), 1], [a2_1, a2_2])). + +fof(a3_1, axiom, [] --> [p, q]). +fof(a3_2, axiom, [] --> [s, t]). +fof(f3, plain, [] --> [p & t , q, s], inference(rightAnd, [status(thm), 0], [a3_1, a3_2])). + +fof(a4_1, axiom, [R & r(f(g(X, Y)))] --> [P(X), Q(X)]). +fof(a4_2, axiom, [R] --> [R & Q(X)]). +fof(f4, plain, [R & r(f(g(X, Y))), R] --> [P(X), Q(X) & (R & Q(X))], inference(rightAnd, [status(thm), 1], [a4_1, a4_2])). + +fof(a5_1, axiom, [(t(X) & u(Z)), v(f(c))] --> [![X]: (p(X) & q(X)), A]). +fof(a5_2, axiom, [(t(X) & u(Z))] --> [(r(X) | s(X))]). +fof(f5, plain, [(t(X) & u(Z)), v(f(c))] --> [A, ![X]: (p(X) & q(X)) & (r(X) | s(X))], inference(rightAnd, [status(thm), 1], [a5_1, a5_2])). + + diff --git a/lisa-sets2/src/test/resources/steps_tests/rightEpsilon.p b/lisa-sets2/src/test/resources/steps_tests/rightEpsilon.p new file mode 100644 index 000000000..e69de29bb diff --git a/lisa-sets2/src/test/resources/steps_tests/rightExists.p b/lisa-sets2/src/test/resources/steps_tests/rightExists.p new file mode 100644 index 000000000..3b763e240 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightExists.p @@ -0,0 +1,23 @@ +fof(a1, axiom, [r] --> [p, q]). +fof(f1, plain, [r] --> [?[X] : p, q], inference(rightExists, [status(thm), 0, $fot(X)], [a1])). + +fof(a2, axiom, [r] --> [a, p(X)]). +fof(f2, plain, [r] --> [a, ?[X] : p(X)], inference(rightExists, [status(thm), 1, $fot(X)], [a2])). + +fof(a3, axiom, [R] --> [P(X), Q(f(c))]). +fof(f3, plain, [R] --> [P(X), ?[X] : Q(X)], inference(rightExists, [status(thm), 1, $fot(f(c))], [a3])). + + +fof(a4, axiom, [r(X, Y)] --> [p(X), q(g(Y, c)) | q(Y)]). +fof(f4, plain, [r(X, Y)] --> [p(X), ?[X] : (q(X) | q(Y))], inference(rightExists, [status(thm), 1, $fot(g(Y, c))], [a4])). + + +fof(a6, axiom, [(t(X) & u(X)), v(X)] --> [?[X]: (p(X) & q(f(c))), (r(X) | s(Y))]). +fof(f6, plain, [(t(X) & u(X)), v(X)] --> [?[Y, X]: (p(X) & q(Y)), (r(X) | s(Y))], inference(rightExists, [status(thm), 0, $fot(f(c))], [a6])). + +fof(a7, axiom, [] --> [q, ?[X]: (p(X) & q(f(c))), s(Y)]). +fof(f7, plain, [] --> [q, ?[X, Y]: (p(Y) & q(X)), s(Y)], inference(rightExists, [status(thm), 1, $fot(f(c))], [a7])). + + +fof(a7, axiom, [] --> [q, ?[X]: (p(X) & r(f(c), f(c))), s(Y)]). +fof(f7, plain, [] --> [q, ?[X, Y]: (p(Y) & r(X, X)), s(Y)], inference(rightExists, [status(thm), 1, $fot(f(c))], [a7])). \ No newline at end of file diff --git a/lisa-sets2/src/test/resources/steps_tests/rightForall.p b/lisa-sets2/src/test/resources/steps_tests/rightForall.p new file mode 100644 index 000000000..82ba2a416 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightForall.p @@ -0,0 +1,22 @@ +fof(a1, axiom, [r] --> [p, q]). +fof(f1, plain, [r] --> [![X] : p, q], inference(rightForall, [status(thm), 0, 'X'], [a1])). + +fof(a2, axiom, [r] --> [a, p(X)]). +fof(f2, plain, [r] --> [a, ![X] : p(X)], inference(rightForall, [status(thm), 1, 'X'], [a2])). + +fof(a3, axiom, [R] --> [P(X), Q(Y)]). +fof(f3, plain, [R] --> [P(X), ![X] : Q(X)], inference(rightForall, [status(thm), 1, 'Y'], [a3])). + + +fof(a4, axiom, [r(X, Y)] --> [p(X), q(Z) | q(Y)]). +fof(f4, plain, [r(X, Y)] --> [p(X), ![X] : (q(X) | q(Y))], inference(rightForall, [status(thm), 1, 'Z'], [a4])). + + +fof(a6, axiom, [(t(X) & u(X)), v(X)] --> [![X]: (p(X) & q(Z)), (r(X) | s(Y))]). +fof(f6, plain, [(t(X) & u(X)), v(X)] --> [![Y, X]: (p(X) & q(Y)), (r(X) | s(Y))], inference(rightForall, [status(thm), 0, 'Z'], [a6])). + +fof(a7, axiom, [] --> [q, ![X]: (p(X) & q(Z)), s(Y)]). +fof(f7, plain, [] --> [q, ![X, Y]: (p(Y) & q(X)), s(Y)], inference(rightForall, [status(thm), 1, 'Z'], [a7])). + +fof(a8, axiom, [] --> [q, ![X]: (p(X) & r(Z, Z)), s(Y)]). +fof(f8, plain, [] --> [q, ![X, Y]: (p(Y) & r(X, X)), s(Y)], inference(rightForall, [status(thm), 1, 'Z'], [a8])). diff --git a/lisa-sets2/src/test/resources/steps_tests/rightIff.p b/lisa-sets2/src/test/resources/steps_tests/rightIff.p new file mode 100644 index 000000000..3c1075704 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightIff.p @@ -0,0 +1,21 @@ +fof(a1_1, axiom, [q] --> [p=>q]). +fof(a1_2, axiom, [r] --> [q=>p]). +fof(f1, plain, [r, q] --> [p <=> q], inference(rightIff, [status(thm), 0], [a1_1, a1_2])). + +fof(a2_1, axiom, [] --> [p, q=>s]). +fof(a2_2, axiom, [] --> [s=>q, t]). +fof(f2, plain, [] --> [p, q <=> s, t], inference(rightIff, [status(thm), 1], [a2_1, a2_2])). + +fof(a3_1, axiom, [] --> [p=>t, q]). +fof(a3_2, axiom, [] --> [s, t=>p]). +fof(f3, plain, [] --> [p <=> t , q, s], inference(rightIff, [status(thm), 0], [a3_1, a3_2])). + +fof(a4_1, axiom, [R & r(f(g(X, Y)))] --> [P(X), Q(X)=>(R & Q(X))]). +fof(a4_2, axiom, [R] --> [(R & Q(X)) => Q(X)]). +fof(f4, plain, [R & r(f(g(X, Y))), R] --> [P(X), Q(X) <=> (R & Q(X))], inference(rightIff, [status(thm), 1], [a4_1, a4_2])). + +fof(a5_1, axiom, [(t(X) & u(Z)), v(f(c))] --> [![X]: (p(X) & q(X))=>(r(X) | s(X)), A]). +fof(a5_2, axiom, [(t(X) & u(Z))] --> [(r(X) | s(X)) => ![X]: (p(X) & q(X))]). +fof(f5, plain, [(t(X) & u(Z)), v(f(c))] --> [A, ![X]: (p(X) & q(X)) <=> (r(X) | s(X))], inference(rightIff, [status(thm), 1], [a5_1, a5_2])). + + diff --git a/lisa-sets2/src/test/resources/steps_tests/rightImplies.p b/lisa-sets2/src/test/resources/steps_tests/rightImplies.p new file mode 100644 index 000000000..9c0679f01 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightImplies.p @@ -0,0 +1,21 @@ +fof(a1, axiom, [r, p] --> [q]). +fof(f1, plain, [r] --> [p => q], inference(rightImplies, [status(thm), 0], [a1])). + +fof(a2, axiom, [p, r] --> [a, q]). +fof(f2, plain, [r] --> [a, p => q], inference(rightImplies, [status(thm), 1], [a2])). + +fof(a3, axiom, [P(X), R] --> [Q(X)]). +fof(f3, plain, [R] --> [P(X) => Q(X)], inference(rightImplies, [status(thm), 0], [a3])). + +fof(a4, axiom, [p, u] --> [s, t, q]). +fof(f4, plain, [u] --> [s, p => q, t], inference(rightImplies, [status(thm), 1], [a4])). + +fof(a5, axiom, [r(X, Y), p(X)] --> [q(Y)]). +fof(f5, plain, [r(X, Y)] --> [p(X) => q(Y)], inference(rightImplies, [status(thm), 0], [a5])). + + +fof(a6, axiom, [![X]: (p(X) | q(X)), (t(X) | u(X)), v(X)] --> [(r(X) & s(X))]). +fof(f6, plain, [(t(X) | u(X)), v(X)] --> [![X]: (p(X) | q(X)) => (r(X) & s(X)), (r(X) & s(X))], inference(rightImplies, [status(thm), 0], [a6])). + +fof(a7, axiom, [q] --> [![X]: (p(X) | q(X)), s(Y)]). +fof(f7, plain, [] --> [![X]: (p(X) | q(X)), q => s(Y)], inference(rightImplies, [status(thm), 1], [a7])). diff --git a/lisa-sets2/src/test/resources/steps_tests/rightNot.p b/lisa-sets2/src/test/resources/steps_tests/rightNot.p new file mode 100644 index 000000000..6c9354d16 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightNot.p @@ -0,0 +1,20 @@ +fof(a1, axiom, [p] --> []). +fof(f1, plain, [] --> [~p], inference(rightNot, [status(thm), 0], [a1])). + +fof(a2, axiom, [r] --> [q]). +fof(f2, plain, [] --> [~r, q], inference(rightNot, [status(thm), 0], [a2])). + +fof(a3, axiom, [r, p] --> [q]). +fof(f3, plain, [p] --> [~r, q], inference(rightNot, [status(thm), 0], [a3])). + +fof(a4, axiom, [q(X)] --> [p(X)]). +fof(f4, plain, [] --> [p(X), ~q(X)], inference(rightNot, [status(thm), 1], [a4])). + +fof(a5, axiom, [![Y] : q(Y)] --> [p(X, Y)]). +fof(f5, plain, [] --> [~![Y] : q(Y), p(X, Y)], inference(rightNot, [status(thm), 0], [a5])). + +fof(a6, axiom, [r(Z), ![X] : (p(X) & q(Y))] --> [p(X) & q(Y)]). +fof(f6, plain, [r(Z)] --> [p(X) & q(Y), ~![X] :( p(X) & q(Y))], inference(rightNot, [status(thm), 1], [a6])). + +fof(a7, axiom, [r(c), ![X] : p(X)] --> []). +fof(f7, plain, [r(c)] --> [~![Y] : p(Y)], inference(rightNot, [status(thm), 0], [a7])). diff --git a/lisa-sets2/src/test/resources/steps_tests/rightOr.p b/lisa-sets2/src/test/resources/steps_tests/rightOr.p new file mode 100644 index 000000000..66dd70411 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightOr.p @@ -0,0 +1,21 @@ +fof(a1, axiom, [r] --> [p, q]). +fof(f1, plain, [r] --> [p | q], inference(rightOr, [status(thm), 0], [a1])). + +fof(a2, axiom, [r] --> [a, p, q]). +fof(f2, plain, [r] --> [a, p | q], inference(rightOr, [status(thm), 1], [a2])). + +fof(a3, axiom, [R] --> [P(X), Q(X)]). +fof(f3, plain, [R] --> [P(X) | Q(X)], inference(rightOr, [status(thm), 0], [a3])). + +fof(a4, axiom, [u] --> [s, p, t, q]). +fof(f4, plain, [u] --> [s, p | q, t], inference(rightOr, [status(thm), 1], [a4])). + +fof(a5, axiom, [r(X, Y)] --> [p(X), q(Y)]). +fof(f5, plain, [r(X, Y)] --> [p(X) | q(Y)], inference(rightOr, [status(thm), 0], [a5])). + + +fof(a6, axiom, [(t(X) | u(X)), v(X)] --> [![X]: (p(X) | q(X)), (r(X) & s(X))]). +fof(f6, plain, [(t(X) | u(X)), v(X)] --> [![X]: (p(X) | q(X)) | (r(X) & s(X)), (r(X) & s(X))], inference(rightOr, [status(thm), 0], [a6])). + +fof(a7, axiom, [] --> [q, ![X]: (p(X) | q(X)), s(Y)]). +fof(f7, plain, [] --> [![X]: (p(X) | q(X)), q | s(Y)], inference(rightOr, [status(thm), 1], [a7])). diff --git a/lisa-sets2/src/test/resources/steps_tests/rightRefl.p b/lisa-sets2/src/test/resources/steps_tests/rightRefl.p new file mode 100644 index 000000000..ff7ad121a --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightRefl.p @@ -0,0 +1,3 @@ +fof(f1, plain, [r(c, d), p(X), a & b] --> [X = X], inference(rightRefl, [status(thm), 0], [])). + +fof(f2, plain, [r(c, d), p(X), a & b] --> [ p(X), a & b, g(X, c) = g(X, c)], inference(rightRefl, [status(thm), 2], [])). \ No newline at end of file diff --git a/lisa-sets2/src/test/resources/steps_tests/rightSubst.p b/lisa-sets2/src/test/resources/steps_tests/rightSubst.p new file mode 100644 index 000000000..7642c33b8 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightSubst.p @@ -0,0 +1,15 @@ +fof(a1, axiom, [] --> [q(a)]). +fof(f1, plain, [a = b] --> [q(b)], inference(rightSubst, [status(thm), 0, $fof(q(X)), 'X'], [a1])). + +fof(a2, axiom, [] --> [q(g(a, f(c)))]). +fof(f2, plain, [a = b] --> [q(g(b, f(c)))], inference(rightSubst, [status(thm), 0, $fof(q(g(X, f(c)))), 'X'], [a2])). + +fof(a3, axiom, [] --> [q(g(a, f(a)))]). +fof(f3, plain, [a = b] --> [q(g(b, f(a)))], inference(rightSubst, [status(thm), 0, $fof(q(g(X, f(a)))), 'X'], [a3])). + +fof(a3, axiom, [] --> [q(g(a, f(a)))]). +fof(f3, plain, [a = b] --> [q(g(b, f(b)))], inference(rightSubst, [status(thm), 0, $fof(q(g(X, f(X)))), 'X'], [a3])). + +fof(a4, axiom, [] --> [![X] : q(g(X, f(c)))]). +fof(f4, plain, [f(c) = g(c, c)] --> [![X] : q(g(X, g(c, c)))], inference(rightSubst, [status(thm), 0, $fof(![Z] : q(g(Z, X))), 'X'], [a4])). + diff --git a/lisa-sets2/src/test/resources/steps_tests/rightSubstIff.p b/lisa-sets2/src/test/resources/steps_tests/rightSubstIff.p new file mode 100644 index 000000000..c349afc68 --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightSubstIff.p @@ -0,0 +1,15 @@ +fof(a1, axiom, [] --> [q(s)]). +fof(f1, plain, [q(s) <=> q(t)] --> [q(t)], inference(rightSubstIff, [status(thm), 0, $fof(K), 'K'], [a1])). + +fof(a2, axiom, [] --> [a & p(s, t)]). +fof(f2, plain, [p(s, t) <=> q(t)] --> [a & q(t)], inference(rightSubstIff, [status(thm), 0, $fof(a & K), 'K'], [a2])). + +fof(a3, axiom, [] --> [q(s) & (q(t) | q(s))]). +fof(f3, plain, [q(s) <=> q(t)] --> [ q(t) & (q(t) | q(s))], inference(rightSubstIff, [status(thm), 0, $fof(K & (q(t) | q(s))), 'K'], [a3])). + +fof(a3, axiom, [] --> [~(q(s) & q(t)) => q(s)]). +fof(f3, plain, [q(s) <=> ~(a & q(t))] --> [~(~(a & q(t)) & q(t)) => ~(a & q(t))], inference(rightSubstIff, [status(thm), 0, $fof(~(K & q(t)) => K), 'K'], [a3])). + +fof(a4, axiom, [] --> [![X] : ~(q(s) & q(X))]). +fof(f4, plain, [q(s) <=> q(X)] --> [![Y] : ~(q(X) & q(Y))], inference(rightSubstIff, [status(thm), 0, $fof(![X] : ~(K & q(X))), 'K'], [a4])). + diff --git a/lisa-sets2/src/test/resources/steps_tests/rightWeaken.p b/lisa-sets2/src/test/resources/steps_tests/rightWeaken.p new file mode 100644 index 000000000..9006afa9c --- /dev/null +++ b/lisa-sets2/src/test/resources/steps_tests/rightWeaken.p @@ -0,0 +1,12 @@ + +fof(a1, axiom, [p] --> []). +fof(f1, plain, [p] --> [q], inference(rightWeaken, [status(thm), 0], [a1])). +fof(f2, plain, [p] --> [q, P(X)], inference(rightWeaken, [status(thm), 1], [f1])). +fof(f3, plain, [p] --> [q, P(X), Q(X, c)], inference(rightWeaken, [status(thm), 2], [f2])). +fof(f4, plain, [p] --> [q, ![X] : (P(X) | Q(a, X)), P(X), Q(X, c)], inference(rightWeaken, [status(thm), 1], [f3])). +fof(f5, plain, [p] --> [q, ![X] : (P(X) | Q(a, X)), P(X), Q(X, c)], inference(rightWeaken, [status(thm), 2], [f4])). +fof(f6, plain, [p] --> [q, ![Y] : (P(Y) | Q(a, Y)), P(X), Q(X, c), p1 => p2], inference(rightWeaken, [status(thm), 4], [f5])). + + + + diff --git a/lisa-sets2/src/test/scala/lisa/tptp/ATPProofs.scala b/lisa-sets2/src/test/scala/lisa/tptp/ATPProofs.scala new file mode 100644 index 000000000..e57c7d492 --- /dev/null +++ b/lisa-sets2/src/test/scala/lisa/tptp/ATPProofs.scala @@ -0,0 +1,53 @@ +package lisa.tptp + +import org.scalatest.compatible.Assertion +import org.scalatest.funsuite.AnyFunSuite +import scala.io.Source +import java.io.File +import ProofParser.* +import KernelParser.* +import lisa.utils.K +import lisa.utils.K.{>>:, repr, lambda, given} +import K.SCProofChecker + + +import leo.modules.input.TPTPParser +import lisa.kernel.proof.SCProofCheckerJudgement.SCInvalidProof +import lisa.tptp.{ProofParser, KernelParser} + +class ATPProofs extends AnyFunSuite { + + + private val sources = getClass.getResource("/").getPath + println(s"Sources: $sources") + + + private val problems = Seq[(String, String)]( + //"p9_test_1.p" -> "prover9 test 1", + //"p9_test_2.p" -> "prover9 test 2", + "p9_test_3.p" -> "prover9 test 3", + //"goeland_test_1.p" -> "goeland test 1", + //"egg_test_1.p" -> "egg test 1", + + ) + + + for (p <- problems) { + test(p._2) { + println("###################################") + print(s"Parsing ${p._1} ...") + try { + val res = reconstructProof(File(s"$sources/${p._1}"))(using lisa.tptp.KernelParser.strictMapAtom, lisa.tptp.KernelParser.strictMapTerm, lisa.tptp.KernelParser.strictMapVariable) + val judgement = SCProofChecker.checkSCProof(res) + assert(judgement.isValid, K.prettySCProof(judgement)) + + println(s"Parsed ${p._1}") + } catch { + case e: TPTPParser.TPTPParseException => + println(s"Parse error at line ${e.line}:${e.offset}: ${e.getMessage}") + fail() + } + } + } + +} diff --git a/lisa-sets2/src/test/scala/lisa/tptp/LVL1Test.scala b/lisa-sets2/src/test/scala/lisa/tptp/LVL1Test.scala new file mode 100644 index 000000000..0199f19aa --- /dev/null +++ b/lisa-sets2/src/test/scala/lisa/tptp/LVL1Test.scala @@ -0,0 +1,72 @@ +package lisa.tptp + +import org.scalatest.compatible.Assertion +import org.scalatest.funsuite.AnyFunSuite +import scala.io.Source +import java.io.File +import ProofParser.* +import KernelParser.* +import lisa.utils.K +import lisa.utils.K.{>>:, repr, lambda, given} +import K.SCProofChecker + + +import leo.modules.input.TPTPParser +import lisa.kernel.proof.SCProofCheckerJudgement.SCInvalidProof +import lisa.tptp.{ProofParser, KernelParser} + +class LVL1Test extends AnyFunSuite { + + + private val sources = getClass.getResource("/steps_tests").getPath + println(s"Sources: $sources") + + + private val problems = Seq[(String, String)]( + "cut.p" -> "cut rule tests", + "hyp.p" -> "hyp rule tests", + "instFun.p" -> "instFun rule tests", + "instPred.p" -> "instPred rule tests", + "leftForall.p" -> "leftForall rule tests", + "leftAnd.p" -> "leftAnd rule tests", + "leftExists.p" -> "leftExists rule tests", + "leftIff.p" -> "leftIff rule tests", + "leftImplies.p" -> "leftImplies rule tests", + "leftNot.p" -> "leftNot rule tests", + "leftOr.p" -> "leftOr rule tests", + "leftSubst.p" -> "leftSubst rule tests", + "leftSubstIff.p" -> "leftSubstIff rule tests", + "leftWeaken.p" -> "leftWeaken rule tests", + "rightForall.p" -> "rightForall rule tests", + "rightAnd.p" -> "rightAnd rule tests", + "rightExists.p" -> "rightExists rule tests", + "rightIff.p" -> "rightIff rule tests", + "rightImplies.p" -> "rightImplies rule tests", + "rightNot.p" -> "rightNot rule tests", + "rightOr.p" -> "rightOr rule tests", + "rightRefl.p" -> "rightRefl rule tests", + "rightSubst.p" -> "rightSubst rule tests", + "rightSubstIff.p" -> "rightSubstIff rule tests", + "rightWeaken.p" -> "RightWeaken rule tests", + ) + + + for (p <- problems) { + test(p._2) { + println("###################################") + print(s"Parsing ${p._1} ...") + try { + val res = reconstructProof(File(s"$sources/${p._1}"))(using lisa.tptp.KernelParser.strictMapAtom, lisa.tptp.KernelParser.strictMapTerm, lisa.tptp.KernelParser.strictMapVariable) + val judgement = SCProofChecker.checkSCProof(res) + assert(judgement.isValid, K.prettySCProof(judgement)) + + println(s"Parsed ${p._1}") + } catch { + case e: TPTPParser.TPTPParseException => + println(s"Parse error at line ${e.line}:${e.offset}: ${e.getMessage}") + fail() + } + } + } + +} diff --git a/lisa-sets2/src/test/scala/lisa/tptp/LVL2Test.scala b/lisa-sets2/src/test/scala/lisa/tptp/LVL2Test.scala new file mode 100644 index 000000000..418f76281 --- /dev/null +++ b/lisa-sets2/src/test/scala/lisa/tptp/LVL2Test.scala @@ -0,0 +1,49 @@ +package lisa.tptp + +import org.scalatest.compatible.Assertion +import org.scalatest.funsuite.AnyFunSuite +import scala.io.Source +import java.io.File +import ProofParser.* +import KernelParser.* +import lisa.utils.K +import lisa.utils.K.{>>:, repr, lambda, given} +import K.SCProofChecker + + +import leo.modules.input.TPTPParser +import lisa.kernel.proof.SCProofCheckerJudgement.SCInvalidProof +import lisa.tptp.{ProofParser, KernelParser} + +class LVL2Test extends AnyFunSuite { + + + private val sources = getClass.getResource("/level2_steps").getPath + println(s"Sources: $sources") + + + private val problems = Seq[(String, String)]( + "instMult.p" -> "instMult rule tests", + + ) + + + for (p <- problems) { + test(p._2) { + println("###################################") + print(s"Parsing ${p._1} ...") + try { + val res = reconstructProof(File(s"$sources/${p._1}"))(using lisa.tptp.KernelParser.strictMapAtom, lisa.tptp.KernelParser.strictMapTerm, lisa.tptp.KernelParser.strictMapVariable) + val judgement = SCProofChecker.checkSCProof(res) + assert(judgement.isValid, K.prettySCProof(judgement)) + + println(s"Parsed ${p._1}") + } catch { + case e: TPTPParser.TPTPParseException => + println(s"Parse error at line ${e.line}:${e.offset}: ${e.getMessage}") + fail() + } + } + } + +} diff --git a/lisa-utils/src/main/scala/lisa/fol/Common.scala b/lisa-utils/src/main/scala/lisa/fol/Common.scala deleted file mode 100644 index ab09773d5..000000000 --- a/lisa-utils/src/main/scala/lisa/fol/Common.scala +++ /dev/null @@ -1,929 +0,0 @@ -package lisa.fol - -import lisa.utils.K -import lisa.utils.UserLisaException - -import scala.annotation.nowarn -import scala.annotation.showAsInfix -import scala.annotation.targetName -import scala.compiletime.ops.int.- - -import K.given_Conversion_Identifier_String - -trait Common { - - //////////////////////////////////////////////// - ////////////// Base Definitions ////////////// - //////////////////////////////////////////////// - - export K.Identifier - type Arity = Int & Singleton - - /** - * Type of sequences of length N - */ - opaque type **[+T, N <: Arity] >: Seq[T] = Seq[T] - object ** { - def apply[T, N <: Arity](args: T*): T ** N = args.toSeq - def unapplySeq[T, N <: Arity](arg: T ** N): Option[Seq[T]] = Some(arg) - } - - extension [T, N <: Arity](self: T ** N) { - def toSeq: Seq[T] = self - - } - - trait WithArity[N <: Arity] { - val arity: N - } - - class BadArityException(msg: String)(using line: sourcecode.Line, file: sourcecode.File) extends UserLisaException(msg) { - def showError: String = msg - } - - def isLegalApplication(withArity: WithArity[?], args: Seq[?]): Boolean = - withArity.arity == -1 || withArity.arity == args.size - - /** - * A container for valid substitutions. For example, if X is a schematic variable and t a term, SubstPair(X, t) is valid. - * If a is a formula, SubstPair(X, a) is not valid - * If P is a schematic predicate of arity N, and L a Lambda of type Term**N |-> Formula, SubstPair(P, L) is valid. - * Etc. SubstPair can be constructed with X := t. - * - * @param _1 The schematic label to substitute - * @param _2 The value to replace it with - */ - class SubstPair private (val _1: SchematicLabel[?], val _2: LisaObject[?]) { - // def toTuple = (_1, _2) - } - object SubstPair { - def apply[S <: LisaObject[S]](_1: SchematicLabel[S], _2: S) = new SubstPair(_1, _2) - } - - given trsubst[S <: LisaObject[S]]: Conversion[(SchematicLabel[S], S), SubstPair] = s => SubstPair(s._1, s._2) - - /** - * A LisaObject is the type for formulas, terms, lambdas. A child of LisaObject is supposed to be parametrized by itself. - * It key property is to define substitution and computation of free scematic symbols. - * The type T denotes the type that the object is guaranteed to keep after a substitution. - * For example, Term <: LisaObject[Term], because a term with some substitution is still a term. - * Similarly, Variable <: LisaObject[Term] because a variable is a term and still is after any substitution. - * However, Variable <: LisaObject[Variable] does not hold because a variable after a substitution is not necessarily a variable anymore. - */ - trait LisaObject[+T <: LisaObject[T]] { - this: T => - - def lift: T & this.type = this - - /** - * Substitution in the LisaObject of schematics symbols by values. It is not guaranteed by the type system that types of schematics and values match, and the substitution can fail if that is the case. - * This is the substitution function that should be implemented. - */ - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): T - def substituteUnsafe2[A <: SchematicLabel[?], B <: LisaObject[B]](map: Map[A, B]): T = substituteUnsafe(map.asInstanceOf) - - /** - * Substitution in the LisaObject of schematics by values, with guaranteed correspondance between the types of schematics and values. - * This is the substitution that should be used when writing proofs. - */ - def substitute(pairs: SubstPair*): T = { - substituteUnsafe(Map(pairs.map(s => (s._1, (s._2: LisaObject[?])))*)) - } - def substituteOne[S <: LisaObject[S]](v: SchematicLabel[S], arg: S): T = substituteUnsafe(Map(v -> arg)) - - /** - * Compute the free schematic symbols in the expression. - */ - def freeSchematicLabels: Set[SchematicLabel[?]] - def freeVariables: Set[Variable] = freeSchematicLabels.collect { case v: Variable => v } - def freeVariableFormulas: Set[VariableFormula] = freeSchematicLabels.collect { case v: VariableFormula => v } - - /** - * Compute the free and non-free schematic symbols in the expression. - */ - def allSchematicLabels: Set[SchematicLabel[?]] - } - - /** - * Base types for LisaObjects: Terms and Formulas. - */ - sealed trait TermOrFormula extends LisaObject[TermOrFormula] {} - - /** - * Constructor types for LISA Objects: Functions into Terms and Formulas. - */ - @showAsInfix - infix trait |->[-I, +O <: LisaObject[O]] extends LisaObject[I |-> O] { - def applyUnsafe(arg: I): O - - } - - /** - * A label is a [[LisaObject]] which is just a name. In general, constant symbols and schematic symbols. - */ - trait Label[-A <: LisaObject[A]] { - this: A & LisaObject[A] => - def liftLabel: LisaObject[?] = this - def id: Identifier - - /** - * Renames the symbol. - */ - def rename(newid: Identifier): Label[A] - - /** - * Renames the symbol with an identifier that is fresh for the given list. - */ - def freshRename(taken: Iterable[Identifier]): Label[A] - - } - - /** - * Schematic labels can be substituted by expressions (LisaObject) of the corresponding type - */ - sealed trait SchematicLabel[-A <: LisaObject[A]] extends Label[A] { - this: A & LisaObject[A] => - - /** - * The schematic label can be substituted by anything of an equivalent type. See [[SubstPair]], [[LisaObject]]. - */ - // type SubstitutionType <: A - def rename(newid: Identifier): SchematicLabel[A] - def freshRename(taken: Iterable[Identifier]): SchematicLabel[A] - - /** - * Helper to build a [[SubstPair]] - */ - def :=(replacement: A) = SubstPair(this, replacement) - - } - - /** - * ConstantLabel represent constants in the theory and can't be freely substituted. - */ - sealed trait ConstantLabel[-A <: LisaObject[A]] extends Label[A] { - this: A & LisaObject[A] => - def rename(newid: Identifier): ConstantLabel[A] - def freshRename(taken: Iterable[Identifier]): ConstantLabel[A] - } - - class TypeError extends Error - - /** - * Can be thrown during an unsafe substitution when the type of a schematic symbol and its substituted value don't match. - */ - class SubstitutionException extends Exception - - /** - * Indicates LisaObjects corresponding directly to a Kernel member - */ - trait Absolute - - //////////////////////////////////// - ////////////// Term ////////////// - //////////////////////////////////// - - /** - * The type of terms, corresponding to [[K.Term]]. It can be either of a [[Variable]], a [[Constant]] - * a [[ConstantFunctionLabel]] or a [[SchematicFunctionLabel]]. - */ - sealed trait Term extends TermOrFormula with LisaObject[Term] { - val underlying: K.Term - val label: TermLabel[?] - val args: Seq[Term] - def toStringSeparated(): String = toString() - } - - /** - * A TermLabel is a [[LisaObject]] of type ((Term ** N) |-> Term), that is represented by a functional label. - * It can be either a [[SchematicFunctionLabel]] or a [[ConstantFunctionLabel]]. It corresponds to [[K.TermLabel]] - */ - sealed trait TermLabel[A <: (Term | (Seq[Term] |-> Term)) & LisaObject[A]] extends Label[A] with Absolute { - this: A & LisaObject[A] => - val arity: Arity - def id: Identifier - val underlyingLabel: K.TermLabel - def applySeq(args: Seq[Term]): Term = this match - case l: Variable => l.applyUnsafe(args) - case l: Constant => l.applyUnsafe(args) - case l: FunctionLabel[?] => l.applyUnsafe(args) - def rename(newid: Identifier): TermLabel[A] - def freshRename(taken: Iterable[Identifier]): TermLabel[A] - def mkString(args: Seq[Term]): String - def mkStringSeparated(args: Seq[Term]): String = mkString(args) - } - - /** - * A constant [[TermLabel]], which can be either a [[Constant]] symbol or a [[ConstantFunctionSymbol]]. Corresponds to a [[K.ConstantFunctionLabel]] - */ - sealed trait ConstantTermLabel[A <: (Term | (Seq[Term] |-> Term)) & LisaObject[A]] extends TermLabel[A] with ConstantLabel[A] { - this: A & LisaObject[A] => - val underlyingLabel: K.ConstantFunctionLabel - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): ConstantTermLabel[A] - override def rename(newid: Identifier): ConstantTermLabel[A] - def freshRename(taken: Iterable[Identifier]): ConstantTermLabel[A] - } - object ConstantTermLabel { - - /** - * Construct a ConstantTermLabel according to arity: - * A [[Constant]] for arity 0, a [[ConstantFunctionLabel]] otherwise. - * @param id The identifier of the new symbol - * @param arity The arity of the new symbol - * @return The new symbol - */ - def apply[N <: Arity](id: Identifier, arity: N): ConstantTermLabelOfArity[N] = arity match { - case a: 0 => Constant(id) - case n: N => ConstantFunctionLabel[N](id, arity) - } - } - - /** - * Types of constant term labels: [[Constant]] for if N = 0, [[ConstantFunctionLabel]] otherwise. - */ - type ConstantTermLabelOfArity[N <: Arity] <: ConstantTermLabel[?] = N match - case 0 => Constant - case N => ConstantFunctionLabel[N] - - /** - * A schematic [[TermLabel]], which can be either a [[Variable]] symbol or a [[SchematicFunctionSymbol]]. Corresponds to a [[K.SchematicFunctionLabel]] - */ - sealed trait SchematicTermLabel[A <: (Term | (Seq[Term] |-> Term)) & LisaObject[A]] extends TermLabel[A] with SchematicLabel[A] { - this: A & LisaObject[A] => - val underlyingLabel: K.SchematicTermLabel - override def rename(newid: Identifier): SchematicTermLabel[A] - def freshRename(taken: Iterable[Identifier]): SchematicTermLabel[A] - } - object SchematicTermLabel { // Companion - /** - * Construct a SchematicTermLabel according to arity: - * A [[Variable]] for arity 0, a [[SchematicFunctionLabel]] otherwise. - * @param id The identifier of the new symbol - * @param arity The arity of the new symbol - * @return The new symbol - */ - def apply[N <: Arity](id: Identifier, arity: N): SchematicFunctionLabelOfArity[N] = arity match { - case a: 0 => new Variable(id) - case n: N => new SchematicFunctionLabel[N](id, arity) - } - } - type SchematicFunctionLabelOfArity[N <: Arity] <: SchematicTermLabel[?] = N match - case 0 => Variable - case N => SchematicFunctionLabel[N] - - /** - * Can be either a [[ConstantFunctionSymbol]] symbol or a [[SchematicFunctionSymbol]]. Corresponds to a [[K.TermLabel]] - */ - sealed trait FunctionLabel[N <: Arity] extends TermLabel[(Term ** N) |-> Term] with ((Term ** N) |-> Term) { - val underlyingLabel: K.TermLabel - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): (Term ** N) |-> Term - def applyUnsafe(args: (Term ** N)): Term = AppliedFunctional(this, args.toSeq) - override def rename(newid: Identifier): FunctionLabel[N] - def freshRename(taken: Iterable[Identifier]): FunctionLabel[N] - } - - /** - * A Variable, corresponding to [[K.VariableLabel]], is a schematic symbol for terms. - * It counts both as the label and as the term itself. - */ - class Variable(val id: Identifier) extends SchematicTermLabel[Term] with Term with Absolute { - val arity: 0 = 0 - val label: Variable = this - val args: Seq[Nothing] = Seq.empty - val underlyingLabel: K.VariableLabel = K.VariableLabel(id) - val underlying = K.VariableTerm(underlyingLabel) - def applyUnsafe(args: Term ** 0) = this - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): Term = { - map.get(this) match { - case Some(subst) => - subst match { - case s: Term => s - case _ => throw SubstitutionException() - } - case None => this - } - } - def freeSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def allSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def rename(newid: Identifier): Variable = Variable(newid) - def freshRename(taken: Iterable[Identifier]): Variable = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Term]): String = if (args.size == 0) toString() else toString() + "(" + "illegal_arguments: " + args.mkString(", ") + ")" - - def canEqual(that: Any): Boolean = - that.isInstanceOf[Variable] - - // Intentionally avoiding the call to super.equals because no ancestor has overridden equals (see note 7 below) - override def equals(that: Any): Boolean = - that match { - case other: Variable => - ((this eq other) // optional, but highly recommended sans very specific knowledge about this exact class implementation - || (other.canEqual(this) // optional only if this class is marked final - && (hashCode == other.hashCode) // optional, exceptionally execution efficient if hashCode is cached, at an obvious space inefficiency tradeoff - && ((id == other.id)))) - case _ => - false - } - - // Intentionally avoiding the call to super.hashCode because no ancestor has overridden hashCode (see note 7 below) - override def hashCode(): Int = - id.## - } - object Variable { - def unapply(variable: Variable): Option[Identifier] = Some(variable.id) - } - - /** - * A Constant, corresponding to [[K.ConstantLabel]], is a label for terms. - * It counts both as the label and as the term itself. - */ - class Constant(val id: Identifier) extends Term with Absolute with ConstantTermLabel[Constant] with LisaObject[Constant] { - val arity: 0 = 0 - val label: Constant = this - val args: Seq[Nothing] = Seq.empty - val underlyingLabel: K.ConstantFunctionLabel = K.ConstantFunctionLabel(id, 0) - val underlying = K.Term(underlyingLabel, Seq.empty) - def applyUnsafe(args: Term ** 0) = this - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): Constant = this - def freeSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def allSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def rename(newid: Identifier): Constant = Constant(newid) - def freshRename(taken: Iterable[Identifier]): Constant = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Term]): String = if (args.size == 0) toString() else toString() + "(" + "illegal_arguments: " + args.mkString(", ") + ")" - - def canEqual(that: Any): Boolean = - that.isInstanceOf[Constant] - - // Intentionally avoiding the call to super.equals because no ancestor has overridden equals (see note 7 below) - override def equals(that: Any): Boolean = - that match { - case other: Constant => - ((this eq other) // optional, but highly recommended sans very specific knowledge about this exact class implementation - || (other.canEqual(this) // optional only if this class is marked final - && (hashCode == other.hashCode) // optional, exceptionally execution efficient if hashCode is cached, at an obvious space inefficiency tradeoff - && ((id == other.id)))) - case _ => - false - } - - // Intentionally avoiding the call to super.hashCode because no ancestor has overridden hashCode (see note 7 below) - override def hashCode(): Int = - id.## - } - object Constant { - def unapply(constant: Constant): Option[Identifier] = Some(constant.id) - } - - /** - * A schematic functional label (corresponding to [[K.SchematicFunctionLabel]]) is a functional label and also a schematic label. - * It can be substituted by any expression of type (Term ** N) |-> Term - */ - class SchematicFunctionLabel[N <: Arity](val id: Identifier, val arity: N) extends SchematicTermLabel[(Term ** N) |-> Term] with FunctionLabel[N] { - val underlyingLabel: K.SchematicTermLabel = K.SchematicFunctionLabel(id, arity) - - def unapplySeq(t: AppliedFunctional): Seq[Term] = t match { - case AppliedFunctional(label, args) if (label == this) => args - case _ => Seq.empty - } - @nowarn("msg=the type test for.*cannot be checked at runtime because its type arguments") - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): ((Term ** N) |-> Term) = { - map.get(this) match { - case Some(subst) => - subst match { - case s: ((Term ** N) |-> Term) => s - case _ => throw SubstitutionException() - } - case None => this - } - } - def freeSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def allSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def rename(newid: Identifier): SchematicFunctionLabel[N] = SchematicFunctionLabel(newid, arity) - def freshRename(taken: Iterable[Identifier]): SchematicFunctionLabel[N] = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Term]): String = toString() + "(" + args.mkString(", ") + ")" - override def mkStringSeparated(args: Seq[Term]): String = mkString(args) - - def canEqual(that: Any): Boolean = - that.isInstanceOf[SchematicFunctionLabel[?]] - - override def equals(that: Any): Boolean = - that match { - case other: SchematicFunctionLabel[_] => - ((this eq other) // optional, but highly recommended sans very specific knowledge about this exact class implementation - || (other.canEqual(this) // optional only if this class is marked final - && (hashCode == other.hashCode) // optional, exceptionally execution efficient if hashCode is cached, at an obvious space inefficiency tradeoff - && ((id == other.id) - && (arity == other.arity)))) - case _ => - false - } - - // Intentionally avoiding the call to super.hashCode because no ancestor has overridden hashCode (see note 7 below) - override def hashCode(): Int = - 31 * ( - id.## - ) + arity.## - } - object SchematicFunctionLabel { - def unapply[N <: Arity](sfl: SchematicFunctionLabel[N]): Option[(Identifier, N)] = Some((sfl.id, sfl.arity)) - } - - /** - * A constant functional label of arity N. - */ - class ConstantFunctionLabel[N <: Arity](val id: Identifier, val arity: N) extends ConstantTermLabel[((Term ** N) |-> Term)] with FunctionLabel[N] { - val underlyingLabel: K.ConstantFunctionLabel = K.ConstantFunctionLabel(id, arity) - private var infix: Boolean = false - def unapplySeq(t: AppliedFunctional): Seq[Term] = t match { - case AppliedFunctional(label, args) if (label == this) => args - case _ => Seq.empty - } - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): ConstantFunctionLabel[N] = this - def freeSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def allSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def rename(newid: Identifier): ConstantFunctionLabel[N] = ConstantFunctionLabel(newid, arity) - def freshRename(taken: Iterable[Identifier]): ConstantFunctionLabel[N] = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Term]): String = - if (infix & args.size == 2) (args(0).toStringSeparated() + " " + toString() + " " + args(1).toStringSeparated()) else toString() + "(" + args.mkString(", ") + ")" - override def mkStringSeparated(args: Seq[Term]): String = if (infix) "(" + mkString(args) + ")" else mkString(args) - - def canEqual(that: Any): Boolean = - that.isInstanceOf[SchematicFunctionLabel[?]] - - // Intentionally avoiding the call to super.equals because no ancestor has overridden equals (see note 7 below) - override def equals(that: Any): Boolean = - that match { - case other: ConstantFunctionLabel[_] => - ((this eq other) // optional, but highly recommended sans very specific knowledge about this exact class implementation - || (other.canEqual(this) // optional only if this class is marked final - && (hashCode == other.hashCode) // optional, exceptionally execution efficient if hashCode is cached, at an obvious space inefficiency tradeoff - && ((id == other.id) - && (arity == other.arity)))) - case _ => - false - } - - // Intentionally avoiding the call to super.hashCode because no ancestor has overridden hashCode (see note 7 below) - override def hashCode(): Int = - 31 * ( - id.## - ) + arity.## - } - object ConstantFunctionLabel { - def infix[N <: Arity](id: Identifier, arity: N): ConstantFunctionLabel[N] = - val x = ConstantFunctionLabel[N](id, arity) - x.infix = true - x - def unapply[N <: Arity](cfl: ConstantFunctionLabel[N]): Option[(Identifier, N)] = Some((cfl.id, cfl.arity)) - } - - /** - * A term made from a functional label of arity N and N arguments - */ - class AppliedFunctional(val label: FunctionLabel[?], val args: Seq[Term]) extends Term with Absolute { - override val underlying = K.Term(label.underlyingLabel, args.map(_.underlying)) - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): Term = - label.substituteUnsafe(map).applyUnsafe(args.map[Term]((x: Term) => x.substituteUnsafe(map))) - - def freeSchematicLabels: Set[SchematicLabel[?]] = label.freeSchematicLabels ++ args.flatMap(_.freeSchematicLabels) - def allSchematicLabels: Set[SchematicLabel[?]] = label.allSchematicLabels ++ args.flatMap(_.allSchematicLabels) - override def toString: String = label.mkString(args) - override def toStringSeparated(): String = label.mkStringSeparated(args) - - def canEqual(that: Any): Boolean = - that.isInstanceOf[AppliedFunctional] - - // Intentionally avoiding the call to super.equals because no ancestor has overridden equals (see note 7 below) - override def equals(that: Any): Boolean = - that match { - case other: AppliedFunctional => - ((this eq other) // optional, but highly recommended sans very specific knowledge about this exact class implementation - || (other.canEqual(this) // optional only if this class is marked final - && (hashCode == other.hashCode) // optional, exceptionally execution efficient if hashCode is cached, at an obvious space inefficiency tradeoff - && ((label == other.label) - && (args == other.args)))) - case _ => - false - } - - // Intentionally avoiding the call to super.hashCode because no ancestor has overridden hashCode (see note 7 below) - override def hashCode(): Int = - 31 * ( - label.## - ) + args.## - } - object AppliedFunctional { - def unapply(af: AppliedFunctional): Option[(FunctionLabel[?], Seq[Term])] = Some((af.label, af.args)) - } - - ////////////////////////////////////// - ////////////// Formulas ////////////// - ////////////////////////////////////// - - /** - * The type of formulas, corresponding to [[K.Formula]] - */ - sealed trait Formula extends TermOrFormula with LisaObject[Formula] { - val underlying: K.Formula - def toStringSeparated() = toString() - } - - ///////////////////// - // Atomic Formulas // - ///////////////////// - - sealed trait AtomicFormula extends Formula { - val label: AtomicLabel[?] - val args: Seq[Term] - } - - /** - * A AtomicLabel is a [[LisaObject]] of type ((Term ** N) |-> Formula), that is represented by a predicate label. - * It can be either a [[SchematicPredicateLabel]] or a [[ConstantPredicateLabel]]. - */ - sealed trait AtomicLabel[A <: (Formula | (Seq[Term] |-> Formula)) & LisaObject[A]] extends Label[A] with Absolute { - this: A & LisaObject[A] => - val arity: Arity - def id: Identifier - val underlyingLabel: K.AtomicLabel - def applySeq(args: Seq[Term]): Formula = this match - case l: VariableFormula => l.applyUnsafe(args) - case l: ConstantFormula => l.applyUnsafe(args) - case l: PredicateLabel[?] => l.applyUnsafe(args) - - def rename(newid: Identifier): AtomicLabel[A] - def freshRename(taken: Iterable[Identifier]): AtomicLabel[A] - def mkString(args: Seq[Term]): String - def mkStringSeparated(args: Seq[Term]): String = mkString(args) - } - - /** - * A constant [[AtomicLabel]], which can be either a [[ConstantFormula]] symbol or a [[ConstantPredicateSymbol]]. Corresponds to a [[K.ConstantAtomicLabel]] - */ - sealed trait ConstantAtomicLabel[A <: (Formula | (Seq[Term] |-> Formula)) & LisaObject[A]] extends AtomicLabel[A] with ConstantLabel[A] { - this: A & LisaObject[A] => - val underlyingLabel: K.ConstantAtomicLabel - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): ConstantAtomicLabel[A] - override def rename(newid: Identifier): ConstantAtomicLabel[A] - def freshRename(taken: Iterable[Identifier]): ConstantAtomicLabel[A] - } - object ConstantAtomicLabel { - - /** - * Construct a ConstantTermLabel according to arity: - * A [[Constant]] for arity 0, a [[ConstantFunctionLabel]] otherwise. - * @param id The identifier of the new symbol - * @param arity The arity of the new symbol - * @return The new symbol - */ - def apply[N <: Arity](id: Identifier, arity: N): ConstantAtomicLabelOfArity[N] = arity match { - case a: 0 => ConstantFormula(id) - case n: N => ConstantPredicateLabel[N](id, arity) - } - } - - /** - * Types of constant atomic labels: [[ConstantFormula]] for if N = 0, [[ConstantPredicateLabel]] otherwise. - */ - type ConstantAtomicLabelOfArity[N <: Arity] <: ConstantAtomicLabel[?] = N match { - case 0 => ConstantFormula - case N => ConstantPredicateLabel[N] - } - - /** - * A schematic [[AtomicLabel]], which can be either a [[VariableFormula]] symbol or a [[SchematicPredicateLabel]]. Corresponds to a [[K.SchematicAtomicLabel]] - */ - sealed trait SchematicAtomicLabel[A <: (Formula | (Seq[Term] |-> Formula)) & LisaObject[A]] extends AtomicLabel[A] with SchematicLabel[A] { - this: A & LisaObject[A] => - val underlyingLabel: K.SchematicAtomicLabel - override def rename(newid: Identifier): SchematicAtomicLabel[A] - def freshRename(taken: Iterable[Identifier]): SchematicAtomicLabel[A] - - } - object SchematicAtomicLabel { // Companion - /** - * Construct a SchematicTermLabel according to arity: - * A [[Variable]] for arity 0, a [[SchematicFunctionLabel]] otherwise. - * @param id The identifier of the new symbol - * @param arity The arity of the new symbol - * @return The new symbol - */ - def apply[N <: Arity](id: Identifier, arity: N): SchematicAtomicLabelOfArity[N] = arity match { - case a: 0 => new VariableFormula(id) - case n: N => new SchematicPredicateLabel[N](id, arity) - } - } - - type SchematicAtomicLabelOfArity[N <: Arity] <: SchematicAtomicLabel[?] = N match { - case 0 => VariableFormula - case N => SchematicPredicateLabel[N] - } - - /** - * Can be either a [[ConstantFunctionSymbol]] symbol or a [[SchematicFunctionSymbol]]. Corresponds to a [[K.TermLabel]] - */ - sealed trait PredicateLabel[N <: Arity] extends AtomicLabel[(Term ** N) |-> Formula] with ((Term ** N) |-> Formula) with Absolute { - val underlyingLabel: K.AtomicLabel - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): (Term ** N) |-> Formula - def applyUnsafe(args: (Term ** N)): Formula = AppliedPredicate(this, args.toSeq) - override def rename(newid: Identifier): PredicateLabel[N] - def freshRename(taken: Iterable[Identifier]): PredicateLabel[N] - } - - /** - * A Variable for formulas, corresponding to [[K.VariableFormulaLabel]], is a schematic symbol for formulas. - * It counts both as the label and as the term itself. - */ - case class VariableFormula(id: Identifier) extends SchematicAtomicLabel[Formula] with AtomicFormula with Absolute { - override val arity: 0 = 0 - val label: VariableFormula = this - val args: Seq[Nothing] = Seq.empty - val underlyingLabel: K.VariableFormulaLabel = K.VariableFormulaLabel(id) - val underlying = K.AtomicFormula(underlyingLabel, Seq.empty) - def applyUnsafe(args: Term ** 0): Formula = this - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): Formula = { - map.get(this) match { - case Some(subst) => - subst match { - case s: Formula => s - case _ => throw SubstitutionException() - } - case None => this - } - } - def freeSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def allSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def rename(newid: Identifier): VariableFormula = VariableFormula(newid) - def freshRename(taken: Iterable[Identifier]): VariableFormula = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Term]): String = if (args.size == 0) toString() else toString() + "(" + "illegal_arguments: " + args.mkString(", ") + ")" - } - - /** - * A Constant formula, corresponding to [[K.ConstantFormulaLabel]]. - * It counts both as the label and as the formula itself. Usually either True or False. - */ - case class ConstantFormula(id: Identifier) extends ConstantAtomicLabel[Formula] with AtomicFormula with Absolute with ConstantLabel[Formula] { - override val arity: 0 = 0 - val label: ConstantFormula = this - val args: Seq[Nothing] = Seq.empty - val underlyingLabel: K.ConstantAtomicLabel = K.ConstantAtomicLabel(id, 0) - val underlying = K.AtomicFormula(underlyingLabel, Seq.empty) - def applyUnsafe(args: Term ** 0): Formula = this - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): ConstantFormula = this - def freeSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def allSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def rename(newid: Identifier): ConstantFormula = ConstantFormula(newid) - def freshRename(taken: Iterable[Identifier]): ConstantFormula = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Term]): String = if (args.size == 0) toString() else toString() + "(" + "illegal_arguments: " + args.mkString(", ") + ")" - } - - /** - * A schematic predicate label (corresponding to [[K.SchematicPredicateLabel]]) is a [[AtomicLabel]] and also a [[SchematicLabel]]. - * It can be substituted by any expression of type (Term ** N) |-> Formula - */ - case class SchematicPredicateLabel[N <: Arity](id: Identifier, arity: N) extends SchematicAtomicLabel[(Term ** N) |-> Formula] with PredicateLabel[N] { - val underlyingLabel: K.SchematicPredicateLabel = K.SchematicPredicateLabel(id, arity) - def unapplySeq(t: AppliedFunctional): Seq[Term] = t match { - case AppliedFunctional(label, args) if (label == this) => args - case _ => Seq.empty - } - @nowarn("msg=the type test for.*cannot be checked at runtime because its type arguments") - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): |->[Term ** N, Formula] = { - map.get(this) match { - case Some(subst) => - subst match { - case s: |->[Term ** N, Formula] => s - case _ => throw SubstitutionException() - } - case None => this - } - } - def freeSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def allSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def rename(newid: Identifier): SchematicPredicateLabel[N] = SchematicPredicateLabel(newid, arity) - def freshRename(taken: Iterable[Identifier]): SchematicPredicateLabel[N] = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Term]): String = toString() + "(" + args.mkString(", ") + ")" - override def mkStringSeparated(args: Seq[Term]): String = mkString(args) - } - - /** - * A constant predicate label corresponding to [[K.ConstantAtomicLabel]] of arity >= 1. - */ - case class ConstantPredicateLabel[N <: Arity](id: Identifier, arity: N) extends ConstantAtomicLabel[Term ** N |-> Formula] with PredicateLabel[N] { - val underlyingLabel: K.ConstantAtomicLabel = K.ConstantAtomicLabel(id, arity) - private var infix = false - def unapplySeq(f: AppliedPredicate): Seq[Term] = f match { - case AppliedPredicate(label, args) if (label == this) => args - case _ => Seq.empty - } - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): ConstantPredicateLabel[N] = this - def freeSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def allSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def rename(newid: Identifier): ConstantPredicateLabel[N] = ConstantPredicateLabel(newid, arity) - def freshRename(taken: Iterable[Identifier]): ConstantPredicateLabel[N] = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Term]): String = if (infix) (args(0).toStringSeparated() + " " + toString() + " " + args(1).toStringSeparated()) else toString() + "(" + args.mkString(", ") + ")" - override def mkStringSeparated(args: Seq[Term]): String = if (infix) "(" + mkString(args) + ")" else mkString(args) - } - object ConstantPredicateLabel { - def infix[N <: Arity](id: Identifier, arity: N): ConstantPredicateLabel[N] = - val x = new ConstantPredicateLabel[N](id, arity) - x.infix = true - x - } - - /** - * A formula made from a predicate label of arity N and N arguments - */ - case class AppliedPredicate(label: PredicateLabel[?], args: Seq[Term]) extends AtomicFormula with Absolute { - override val underlying = K.AtomicFormula(label.underlyingLabel, args.map(_.underlying)) - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): Formula = - label.substituteUnsafe(map).applyUnsafe(args.map[Term]((x: Term) => x.substituteUnsafe(map))) - - def freeSchematicLabels: Set[SchematicLabel[?]] = label.freeSchematicLabels ++ args.toSeq.flatMap(_.freeSchematicLabels) - def allSchematicLabels: Set[SchematicLabel[?]] = label.allSchematicLabels ++ args.toSeq.flatMap(_.allSchematicLabels) - - override def toString: String = label.mkString(args) - override def toStringSeparated(): String = label.mkStringSeparated(args) - } - - //////////////// - // Connectors // - //////////////// - - /** - * A ConnectorLabel is a [[LisaObject]] of type ((Formula ** N) |-> Formula), that is represented by a connector label in the kernel. - * It can be either a [[SchematicConnectorLabel]] or a [[ConstantConnectorLabel]]. - */ - sealed trait ConnectorLabel extends (Seq[Formula] |-> Formula) with Label[(Seq[Formula] |-> Formula)] with Absolute { - val arity: Arity - def id: Identifier - val underlyingLabel: K.ConnectorLabel - def applySeq(args: Seq[Formula]): Formula = applyUnsafe(args) - def rename(newid: Identifier): ConnectorLabel - def freshRename(taken: Iterable[Identifier]): ConnectorLabel - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): |->[Seq[Formula], Formula] - def mkString(args: Seq[Formula]): String - def mkStringSeparated(args: Seq[Formula]): String - - } - - /** - * A schematic predicate label (corresponding to [[K.SchematicPredicateLabel]]) is a [[ConnectorLabel]] and also a [[SchematicLabel]]. - * It can be substituted by any expression of type (Formula ** N) |-> Formula - */ - case class SchematicConnectorLabel[N <: Arity](id: Identifier, arity: N) extends ConnectorLabel with SchematicLabel[Formula ** N |-> Formula] with ((Formula ** N) |-> Formula) { - val underlyingLabel: K.SchematicConnectorLabel = K.SchematicConnectorLabel(id, arity) - def unapplySeq(f: AppliedPredicate): Seq[Term] = f match { - case AppliedPredicate(label, args) if (label == this) => args - case _ => Seq.empty - } - @nowarn("msg=the type test for.*cannot be checked at runtime because its type arguments") - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): |->[Formula ** N, Formula] = { - map.get(this) match { - case Some(subst) => - subst match { - case s: |->[Formula ** N, Formula] => s - case _ => throw SubstitutionException() - } - case None => this - } - } - // def apply(args: Seq[Formula]): Formula = apply(args) - def applyUnsafe(args: Formula ** N): Formula = AppliedConnector(this, args.toSeq) - - def freeSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def allSchematicLabels: Set[SchematicLabel[?]] = Set(this) - def rename(newid: Identifier): SchematicConnectorLabel[N] = SchematicConnectorLabel(newid, arity) - def freshRename(taken: Iterable[Identifier]): SchematicConnectorLabel[N] = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Formula]): String = toString() + "(" + args.mkString(", ") + ")" - def mkStringSeparated(args: Seq[Formula]): String = mkString(args) - - } - - /** - * A constant connector label is a logical operator such as /\, \/, !, ==>, <=>. - * It corresponds to a [[K.ConstantConnectorLabel]]. - */ - trait ConstantConnectorLabel[N <: Arity] extends ConnectorLabel with ConstantLabel[Formula ** N |-> Formula] with ((Formula ** N) |-> Formula) { - val underlyingLabel: K.ConstantConnectorLabel - def id: Identifier = underlyingLabel.id - def unapplySeq(f: AppliedConnector): Seq[Formula] = f match { - case AppliedConnector(label, args) if (label == this) => args - case _ => Seq.empty - } - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): this.type = this - def applyUnsafe(args: Formula ** N): Formula = AppliedConnector(this, args.toSeq) - def freeSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def allSchematicLabels: Set[SchematicLabel[?]] = Set.empty - def rename(newid: Identifier): ConstantConnectorLabel[N] = throw new Error("Can't rename a constant connector label") - def freshRename(taken: Iterable[Identifier]): ConstantConnectorLabel[N] = rename(K.freshId(taken, id)) - override def toString(): String = id - def mkString(args: Seq[Formula]): String = if (args.length == 2) (args(0).toString() + " " + toString() + " " + args(1).toString()) else toString() + "(" + args.mkString(", ") + ")" - override def mkStringSeparated(args: Seq[Formula]): String = if (args.length == 2) "(" + mkString(args) + ")" else mkString(args) - - } - - /** - * A formula made from a connector label of arity N and N arguments - */ - case class AppliedConnector(label: ConnectorLabel, args: Seq[Formula]) extends Formula with Absolute { - override val underlying = K.ConnectorFormula(label.underlyingLabel, args.map(_.underlying)) - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): Formula = - label.applyUnsafe(args.map[Formula]((x: Formula) => x.substituteUnsafe(map))) - def freeSchematicLabels: Set[SchematicLabel[?]] = label.freeSchematicLabels ++ args.flatMap(_.freeSchematicLabels) - def allSchematicLabels: Set[SchematicLabel[?]] = label.allSchematicLabels ++ args.flatMap(_.allSchematicLabels) - // override def substituteUnsafe(v: Variable, subs: Term) = AppliedPredicateFormula[N](f, args.map(_.substituteUnsafe(v, subs))) - - override def toString: String = label.mkString(args) - override def toStringSeparated(): String = label.mkString(args) - } - - ///////////// - // Binders // - ///////////// - - /** - * A binder for variables, for example \exists, \forall and \exists! but possibly others. - */ - trait BinderLabel extends |->[(Variable, Formula), Formula] with Absolute { - def id: Identifier - } - - /** - * A binder label that exactly correspond to a kernel binder, i.e. \exists, \forall and \exists! - */ - trait BaseBinderLabel extends BinderLabel with ((Variable, Formula) |-> BinderFormula) with Absolute { - val underlyingLabel: K.BinderLabel - - def applyUnsafe(arg: (Variable, Formula)): BinderFormula = BinderFormula(this, arg._1, arg._2) - def apply(v: Variable, f: Formula): BinderFormula = applyUnsafe((v, f)) - def unapply(b: BinderFormula): Option[(Variable, Formula)] = b match { - case BinderFormula(label, v, f) if (label == this) => Some((v, f)) - case _ => None - } - inline def freeSchematicLabels: Set[SchematicLabel[?]] = Set.empty - inline def allSchematicLabels: Set[SchematicLabel[?]] = Set.empty - inline def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): this.type = this - override def toString() = id - - } - - /** - * A quantified formula made of a [[BaseBinderLabel]] and an underlying formula, in a namefull representation. - */ - case class BinderFormula(f: BaseBinderLabel, bound: Variable, body: Formula) extends Absolute with Formula with LisaObject[BinderFormula] { - override val underlying = K.BinderFormula(f.underlyingLabel, bound.underlyingLabel, body.underlying) - - def allSchematicLabels: Set[Common.this.SchematicLabel[?]] = body.allSchematicLabels + bound - def freeSchematicLabels: Set[Common.this.SchematicLabel[?]] = body.freeSchematicLabels - bound - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): BinderFormula = { - val newSubst = map - bound - if (map.values.flatMap(_.freeSchematicLabels).toSet.contains(bound)) { - val taken: Set[SchematicLabel[?]] = body.allSchematicLabels ++ map.keys - val newBound: Variable = bound.rename(lisa.utils.KernelHelpers.freshId(taken.map(_.id), bound.id)) - val newBody = body.substituteOne(bound, newBound.lift) - BinderFormula(f, newBound, newBody.substituteUnsafe(newSubst)) - } else { - BinderFormula(f, bound, body.substituteUnsafe(newSubst)) - } - } - // override def toString():String = f.toString()+bound.toString()+". "+body.toString() - override def toString(): String = f.toString() + "(" + bound.toString() + ", " + body.toString() + ")" - - } - def instantiateBinder(f: BinderFormula, t: Term): Formula = f.body.substituteUnsafe(Map(f.bound -> t)) - - // Application methods for |-> - - extension [S, T <: LisaObject[T]](t: (S ** -1) |-> T) { - def apply(s: Seq[S]): T = t.applyUnsafe(s) - } - extension [S, T <: LisaObject[T], N <: Arity](t: (S ** N) |-> T) { - def applySeq(s: Seq[S]): T = t.applyUnsafe(s) - } - - extension [S, T <: LisaObject[T]](t: (S ** 1) |-> T) { - def apply(s1: S): T = t.applyUnsafe(Seq(s1)) - } - extension [S, T <: LisaObject[T]](t: (S ** 2) |-> T) { - def apply(s1: S, s2: S): T = t.applyUnsafe(Seq(s1, s2)) - } - extension [S <: LisaObject[S], T <: LisaObject[T]](t: (S ** 3) |-> T) { - def apply(s1: S, s2: S, s3: S): T = t.applyUnsafe(Seq(s1, s2, s3)) - } - extension [S <: LisaObject[S], T <: LisaObject[T]](t: (S ** 4) |-> T) { - def apply(s1: S, s2: S, s3: S, s4: S): T = t.applyUnsafe(Seq(s1, s2, s3, s4)) - } - extension [S <: LisaObject[S], T <: LisaObject[T]](t: (S ** 5) |-> T) { - def apply(s1: S, s2: S, s3: S, s4: S, s5: S): T = t.applyUnsafe(Seq(s1, s2, s3, s4, s5)) - } - -} diff --git a/lisa-utils/src/main/scala/lisa/fol/FOL.scala b/lisa-utils/src/main/scala/lisa/fol/FOL.scala deleted file mode 100644 index d22d98c5e..000000000 --- a/lisa-utils/src/main/scala/lisa/fol/FOL.scala +++ /dev/null @@ -1,6 +0,0 @@ -package lisa.fol - -object FOL extends Common with Sequents with Lambdas with Predef { - export FOLHelpers.{*, given} - -} diff --git a/lisa-utils/src/main/scala/lisa/fol/FOLHelpers.scala b/lisa-utils/src/main/scala/lisa/fol/FOLHelpers.scala deleted file mode 100644 index d9232d3ba..000000000 --- a/lisa-utils/src/main/scala/lisa/fol/FOLHelpers.scala +++ /dev/null @@ -1,138 +0,0 @@ -package lisa.fol - -import lisa.fol.FOL.* -import lisa.kernel.fol.FOL.Identifier -import lisa.utils.FOLParser -import lisa.utils.K -import lisa.utils.LisaException - -/** - * A helper file that provides various syntactic sugars for LISA's FOL and proofs. Best imported through utilities.Helpers - * Usage: - *- * import utilities.Helpers.* - *- * or - *- * extends utilities.KernelHelpers.* - *- */ -object FOLHelpers { - export lisa.utils.KernelHelpers.{freshId, nFreshId, given_Conversion_String_Identifier, given_Conversion_Identifier_String, given_Conversion_Boolean_List_String_Option} - - ///////////////// - // FOL helpers // - ///////////////// - - /* Conversions */ - // Conversions to lambdaExpression's - given [T <: LisaObject[T], R <: LisaObject[R]]: Conversion[R, LambdaExpression[T, R, 0]] = LambdaExpression[T, R, 0](Seq(), _, 0) - given [T <: LisaObject[T], R <: LisaObject[R]]: Conversion[(SchematicLabel[T], R), LambdaExpression[T, R, 1]] = a => LambdaExpression(Seq(a._1), a._2, 1) - given [T <: LisaObject[T], R <: LisaObject[R], N <: Arity]: Conversion[(SchematicLabel[T] ** N, R), LambdaExpression[T, R, N]] = a => { - val s = a._1.toSeq - LambdaExpression(s, a._2, s.length.asInstanceOf) - } - - given [T <: LisaObject[T]]: Conversion[T, T ** 1] = **.apply[T, 1](_) - - given Conversion[Int, Arity] = _.asInstanceOf - - /* - extension [I, O <: LisaObject[O]] (e: (I ** 0) |-> O) { - def apply() = e.apply(EmptyTuple) - } - */ - - // helpers to create new schematic symbols, fetching the scala name of the variable. - def variable(using name: sourcecode.Name): Variable = Variable(name.value) - def function[N <: Arity: ValueOf](using name: sourcecode.Name): SchematicFunctionLabel[N] = SchematicFunctionLabel[N](name.value, valueOf[N]) - def formulaVariable(using name: sourcecode.Name): VariableFormula = VariableFormula(name.value) - def predicate[N <: Arity: ValueOf](using name: sourcecode.Name): SchematicPredicateLabel[N] = SchematicPredicateLabel[N](name.value, valueOf[N]) - def connector[N <: Arity: ValueOf](using name: sourcecode.Name): SchematicConnectorLabel[N] = SchematicConnectorLabel[N](name.value, valueOf[N]) - - def freshVariable(using name: sourcecode.Name)(elems: LisaObject[?]*): Variable = Variable(freshId(elems.flatMap(_.freeVariables).map(_.id), name.value)) - def freshVariableFormula(using name: sourcecode.Name)(elems: LisaObject[?]*): VariableFormula = VariableFormula(freshId(elems.flatMap(_.freeVariables).map(_.id), name.value)) - - //////////////////////////////////////// - // Kernel to Front transformers // - //////////////////////////////////////// - - // TermLabel - def asFrontLabel(tl: K.TermLabel): TermLabel[?] = tl match - case tl: K.ConstantFunctionLabel => asFrontLabel(tl) - case tl: K.SchematicTermLabel => asFrontLabel(tl) - def asFrontLabel[N <: Arity](cfl: K.ConstantFunctionLabel): ConstantTermLabelOfArity[N] = cfl.arity.asInstanceOf[N] match - case n: 0 => Constant(cfl.id) - case n: N => ConstantFunctionLabel[N](cfl.id, n) - def asFrontLabel(stl: K.SchematicTermLabel): SchematicTermLabel[?] = stl match - case v: K.VariableLabel => asFrontLabel(v) - case v: K.SchematicFunctionLabel => asFrontLabel(v) - def asFrontLabel[N <: Arity](sfl: K.SchematicFunctionLabel): SchematicFunctionLabel[N] = - SchematicFunctionLabel(sfl.id, sfl.arity.asInstanceOf) - def asFrontLabel(v: K.VariableLabel): Variable = Variable(v.id) - - // Term - def asFront(t: K.Term): Term = asFrontLabel(t.label).applySeq(t.args.map(asFront)) - - // FormulaLabel - def asFrontLabel(fl: K.FormulaLabel): AtomicLabel[?] | ConnectorLabel | BinderLabel = fl match - case fl: K.ConnectorLabel => asFrontLabel(fl) - case fl: K.AtomicLabel => asFrontLabel(fl) - case fl: K.BinderLabel => asFrontLabel(fl) - def asFrontLabel(pl: K.AtomicLabel): AtomicLabel[?] = pl match - case pl: K.ConstantAtomicLabel => asFrontLabel(pl) - case pl: K.SchematicAtomicLabel => asFrontLabel(pl) - def asFrontLabel(cl: K.ConnectorLabel): ConnectorLabel = cl match - case cl: K.ConstantConnectorLabel => asFrontLabel(cl) - case cl: K.SchematicConnectorLabel => asFrontLabel(cl) - def asFrontLabel[N <: Arity](cpl: K.ConstantAtomicLabel): ConstantAtomicLabelOfArity[N] = cpl.arity.asInstanceOf[N] match - case n: 0 => ConstantFormula(cpl.id) - case n: N => ConstantPredicateLabel(cpl.id, cpl.arity.asInstanceOf) - def asFrontLabel(sfl: K.SchematicFormulaLabel): SchematicAtomicLabel[?] | SchematicConnectorLabel[?] = - sfl match - case v: K.VariableFormulaLabel => asFrontLabel(v) - case v: K.SchematicPredicateLabel => asFrontLabel(v) - case v: K.SchematicConnectorLabel => asFrontLabel(v) - def asFrontLabel(svop: K.SchematicAtomicLabel): SchematicAtomicLabel[?] = svop match - case v: K.VariableFormulaLabel => asFrontLabel(v) - case v: K.SchematicPredicateLabel => asFrontLabel(v) - def asFrontLabel(v: K.VariableFormulaLabel): VariableFormula = VariableFormula(v.id) - def asFrontLabel[N <: Arity](spl: K.SchematicPredicateLabel): SchematicPredicateLabel[N] = - SchematicPredicateLabel(spl.id, spl.arity.asInstanceOf) - def asFrontLabel[N <: Arity](scl: K.SchematicConnectorLabel): SchematicConnectorLabel[N] = - SchematicConnectorLabel(scl.id, scl.arity.asInstanceOf) - def asFrontLabel(cpl: K.ConstantConnectorLabel): ConnectorLabel = cpl match - case K.Neg => Neg - case K.Implies => Implies - case K.Iff => Iff - case K.And => And - case K.Or => Or - def asFrontLabel(bl: K.BinderLabel): BaseBinderLabel = bl match { - case K.Forall => Forall - case K.Exists => Exists - case K.ExistsOne => ExistsOne - } - - // Formula - def asFront(f: K.Formula): Formula = f match - case f: K.AtomicFormula => asFront(f) - case f: K.ConnectorFormula => asFront(f) - case f: K.BinderFormula => asFront(f) - def asFront(pf: K.AtomicFormula): Formula = - asFrontLabel(pf.label).applySeq(pf.args.map(asFront)) - def asFront(cf: K.ConnectorFormula): Formula = - asFrontLabel(cf.label).applyUnsafe(cf.args.map(asFront)) - def asFront(bf: K.BinderFormula): BinderFormula = - asFrontLabel(bf.label).apply(asFrontLabel(bf.bound), asFront(bf.inner)) - - // Sequents - def asFront(s: K.Sequent): Sequent = Sequent(s.left.map(asFront), s.right.map(asFront)) - - // Lambdas - def asFrontLambda(l: K.LambdaTermTerm): LambdaExpression[Term, Term, ?] = LambdaExpression(l.vars.map(asFrontLabel), asFront(l.body), l.vars.size) - def asFrontLambda(l: K.LambdaTermFormula): LambdaExpression[Term, Formula, ?] = LambdaExpression(l.vars.map(asFrontLabel), asFront(l.body), l.vars.size) - def asFrontLambda(l: K.LambdaFormulaFormula): LambdaExpression[Formula, Formula, ?] = LambdaExpression(l.vars.map(asFrontLabel), asFront(l.body), l.vars.size) - - def freshVariable[A <: LisaObject[A]](obj: A, name: Identifier): Variable = Variable(freshId(obj.allSchematicLabels.map(_.id), name)) - def freshVariable[A <: LisaObject[A]](objs: Iterable[A], name: Identifier): Variable = Variable(freshId(objs.flatMap(_.allSchematicLabels).map(_.id), name)) -} diff --git a/lisa-utils/src/main/scala/lisa/fol/Lambdas.scala b/lisa-utils/src/main/scala/lisa/fol/Lambdas.scala deleted file mode 100644 index 45d1f8689..000000000 --- a/lisa-utils/src/main/scala/lisa/fol/Lambdas.scala +++ /dev/null @@ -1,100 +0,0 @@ -package lisa.fol -import lisa.kernel.fol.FOL.Identifier -import lisa.utils.K - -import scala.reflect.ClassTag - -import FOLHelpers.freshId -trait Lambdas extends Common { - - /** - * Denotes a lambda expression, i.e. an expression with "holes". - * N is the number of arguments (-1 for arbitrary or unknown). - * T is the type of input of the lambda. - * R is the return type. - * For example, LambdaExpression[Term, Formula, 2] denotes an expression of type (Term**2 |-> Formula), - * i.e. an expression that can be substituted in place of a 2-variable predicate - * - * @param bounds The bound variable encoding the parameter of the lambda - * @param body The body of the lambda - * @param arity The number of parameters. - */ - case class LambdaExpression[T <: LisaObject[T], R <: LisaObject[R], N <: Arity](bounds: Seq[SchematicLabel[T]], body: R, arity: N) extends |->[T ** N, R] { - assert(arity == bounds.length) - private val seqBounds = bounds.toSeq - - def applyUnsafe(args: T ** N): R = body.substituteUnsafe((bounds zip args.toSeq).toMap) - def appUnsafe(args: Seq[T]): R = body.substituteUnsafe((bounds zip args.toSeq).toMap) - - /** - * Substitute schematic symbols by values of corresponding type in the body of expressions. The variables of the expression are bound: This implies that - * 1. They are not substituted in the body even if they are in the substitution map, and - * 2. The bounds of the expression are renamed before substitution if they appear in the substitution. - * - * @param map - * @return - */ - def substituteUnsafe(map: Map[SchematicLabel[?], LisaObject[?]]): LambdaExpression[T, R, N] = { - val newSubst = map -- seqBounds - val conflict = map.values.flatMap(_.freeSchematicLabels).toSet.intersect(bounds.toSet.asInstanceOf) - if (conflict.nonEmpty) { - val taken = (map.values.flatMap(_.allSchematicLabels).map(_.id) ++ map.keys.map(_.id)).toList - val newBounds = seqBounds.scanLeft[List[Identifier]](taken)((list, v: SchematicLabel[T]) => freshId(list, v.id) :: list).map(_.head).zip(seqBounds).map(v => v._2.rename(v._1)) - val newBody = body.substituteUnsafe(seqBounds.zip(newBounds.map(_.liftLabel)).toMap) - LambdaExpression(newBounds, newBody.substituteUnsafe(newSubst), arity) - } else { - LambdaExpression(bounds, body.substituteUnsafe(newSubst), arity) - } - } - - def freeSchematicLabels: Set[SchematicLabel[?]] = body.freeSchematicLabels -- seqBounds - def allSchematicLabels: Set[SchematicLabel[?]] = body.freeSchematicLabels - - } - - /** - * Construct a Lambda expression with a single variable - */ - def lambda[T <: LisaObject[T], R <: LisaObject[R]](bound: SchematicLabel[T], body: R): LambdaExpression[T, R, 1] = LambdaExpression[T, R, 1](Seq(bound), body, 1) - - /** - * Construct a Lambda expression with multiple variables - */ - def lambda[T <: LisaObject[T], R <: LisaObject[R], N <: Arity, Tu <: Tuple](bounds: Tu, body: R)(using Tuple.Union[Tu] <:< SchematicLabel[T], Tuple.Size[Tu] =:= N): LambdaExpression[T, R, N] = { - val boundsSeq = bounds.toList - LambdaExpression[T, R, N](boundsSeq.asInstanceOf, body, boundsSeq.length.asInstanceOf) - } - def lambda[T <: LisaObject[T], R <: LisaObject[R]](bounds: Seq[SchematicLabel[T]], body: R): LambdaExpression[T, R, ?] = { - val boundsSeq = bounds - LambdaExpression(boundsSeq, body, boundsSeq.length.asInstanceOf) - } - - type LambdaTT[N <: Arity] = LambdaExpression[Term, Term, N] - type LambdaTF[N <: Arity] = LambdaExpression[Term, Formula, N] - type LambdaFF[N <: Arity] = LambdaExpression[Formula, Formula, N] - - /** - * Recovers the underlying [[K.LambdaTermTerm]] - */ - extension [N <: Arity](ltt: LambdaExpression[Term, Term, N]) { - def underlyingLTT: K.LambdaTermTerm = - K.LambdaTermTerm(ltt.bounds.map(b => b.asInstanceOf[Variable].underlyingLabel), ltt.body.underlying) - } - - /** - * Recovers the underlying [[K.LambdaTermFormula]] - */ - extension [N <: Arity](ltf: LambdaExpression[Term, Formula, N]) { - def underlyingLTF: K.LambdaTermFormula = - K.LambdaTermFormula(ltf.bounds.map(b => b.asInstanceOf[Variable].underlyingLabel), ltf.body.underlying) - } - - /** - * Recovers the underlying [[K.LambdaFormulaFormula]] - */ - extension [N <: Arity](lff: LambdaExpression[Formula, Formula, N]) { - def underlyingLFF: K.LambdaFormulaFormula = - K.LambdaFormulaFormula(lff.bounds.map((b: SchematicLabel[Formula]) => b.asInstanceOf[VariableFormula].underlyingLabel), lff.body.underlying) - } - -} diff --git a/lisa-utils/src/main/scala/lisa/fol/Predef.scala b/lisa-utils/src/main/scala/lisa/fol/Predef.scala deleted file mode 100644 index 5d53b6a92..000000000 --- a/lisa-utils/src/main/scala/lisa/fol/Predef.scala +++ /dev/null @@ -1,78 +0,0 @@ -package lisa.fol - -import lisa.utils.K - -trait Predef extends Common { - - val equality: ConstantPredicateLabel[2] = ConstantPredicateLabel.infix(K.Identifier("="), 2) - val === = equality - val = = equality - - extension (t: Term) { - infix def ===(u: Term): Formula = equality(t, u) - infix def =(u: Term): Formula = equality(t, u) - } - - val top: ConstantFormula = ConstantFormula(K.Identifier("⊤")) - val ⊤ : top.type = top - val True: top.type = top - - val bot: ConstantFormula = ConstantFormula(K.Identifier("⊥")) - val ⊥ : bot.type = bot - val False: bot.type = bot - - case object Neg extends ConstantConnectorLabel[1] { val underlyingLabel = K.Neg; val arity = 1 } - val neg = Neg - val ¬ = Neg - val ! = Neg - - case object And extends ConstantConnectorLabel[-1] { val underlyingLabel = K.And; val arity = -1 } - val and: And.type = And - val /\ : And.type = And - val ∧ : And.type = And - - case object Or extends ConstantConnectorLabel[-1] { val underlyingLabel = K.Or; val arity = -1 } - val or: Or.type = Or - val \/ : Or.type = Or - val ∨ : Or.type = Or - - case object Implies extends ConstantConnectorLabel[2] { val underlyingLabel = K.Implies; val arity = 2 } - val implies: Implies.type = Implies - val ==> : Implies.type = Implies - - case object Iff extends ConstantConnectorLabel[2] { val underlyingLabel = K.Iff; val arity = 2 } - val iff: Iff.type = Iff - val <=> : Iff.type = Iff - - case object Forall extends BaseBinderLabel { - val id = K.Identifier("∀") - val underlyingLabel: K.Forall.type = K.Forall - } - val forall: Forall.type = Forall - val ∀ : Forall.type = forall - - case object Exists extends BaseBinderLabel { - val id = K.Identifier("∃") - val underlyingLabel: K.Exists.type = K.Exists - } - val exists: Exists.type = Exists - val ∃ : Exists.type = exists - - case object ExistsOne extends BaseBinderLabel { - val id = K.Identifier("∃!") - val underlyingLabel: K.ExistsOne.type = K.ExistsOne - } - val existsOne: ExistsOne.type = ExistsOne - val ∃! : ExistsOne.type = existsOne - - extension (f: Formula) { - def unary_! = Neg(f) - infix inline def ==>(g: Formula): Formula = Implies(f, g) - infix inline def <=>(g: Formula): Formula = Iff(f, g) - infix inline def /\(g: Formula): Formula = And(List(f, g)) - infix inline def ∧(g: Formula): Formula = And(List(f, g)) - infix inline def \/(g: Formula): Formula = Or(List(f, g)) - infix inline def ∨(g: Formula): Formula = Or(List(f, g)) - } - -} diff --git a/lisa-utils/src/main/scala/lisa/fol/Sequents.scala b/lisa-utils/src/main/scala/lisa/fol/Sequents.scala deleted file mode 100644 index 7c8b4f355..000000000 --- a/lisa-utils/src/main/scala/lisa/fol/Sequents.scala +++ /dev/null @@ -1,249 +0,0 @@ -package lisa.fol - -//import lisa.kernel.proof.SequentCalculus.Sequent - -import lisa.fol.FOLHelpers.* -import lisa.prooflib.BasicStepTactic -import lisa.prooflib.Library -import lisa.prooflib.ProofTacticLib.ProofTactic -import lisa.utils.K - -import scala.annotation.showAsInfix - -trait Sequents extends Common with lisa.fol.Lambdas with Predef { - object SequentInstantiationRule extends ProofTactic - given ProofTactic = SequentInstantiationRule - - case class Sequent(left: Set[Formula], right: Set[Formula]) extends LisaObject[Sequent] with Absolute { - def underlying: lisa.kernel.proof.SequentCalculus.Sequent = K.Sequent(left.map(_.underlying), right.map(_.underlying)) - - def allSchematicLabels: Set[SchematicLabel[?]] = left.flatMap(_.allSchematicLabels) ++ right.flatMap(_.allSchematicLabels) - def freeSchematicLabels: Set[SchematicLabel[?]] = left.flatMap(_.freeSchematicLabels) ++ right.flatMap(_.freeSchematicLabels) - def substituteUnsafe(map: Map[SchematicLabel[?], ? <: LisaObject[?]]): Sequent = Sequent(left.map(_.substituteUnsafe(map)), right.map(_.substituteUnsafe(map))) - - /*Ok for now but what when we have more*/ - /** - * Substitute schematic symbols inside this, and produces a kernel proof. - * Namely, if "that" is the result of the substitution, the proof should conclude with "that.underlying", - * using the assumption "this.underlying" at step index -1. - * - * @param map - * @return - */ - def instantiateWithProof(map: Map[SchematicLabel[?], ? <: LisaObject[?]], index: Int): (Sequent, Seq[K.SCProofStep]) = { - - val mTerm: Map[SchematicFunctionLabel[?] | Variable, LambdaExpression[Term, Term, ?]] = - map.collect[SchematicFunctionLabel[?] | Variable, LambdaExpression[Term, Term, ?]](p => - p._1 match { - case sl: Variable => (sl, LambdaExpression[Term, Term, 0](Seq(), p._2.asInstanceOf[Term], 0)) - case sl: SchematicFunctionLabel[?] => - p._2 match { - case l: LambdaExpression[Term, Term, ?] @unchecked if (l.bounds.isEmpty || l.bounds.head.isInstanceOf[Variable]) & l.body.isInstanceOf[Term] => - (sl, l) - case s: TermLabel[?] => - val vars = nFreshId(Seq(s.id), s.arity).map(id => Variable(id)) - (sl, LambdaExpression(vars, s.applySeq(vars), s.arity)) - } - } - ) - val mPred: Map[SchematicPredicateLabel[?] | VariableFormula, LambdaExpression[Term, Formula, ?]] = - map.collect[SchematicPredicateLabel[?] | VariableFormula, LambdaExpression[Term, Formula, ?]](p => - p._1 match { - case sl: VariableFormula => (sl, LambdaExpression[Term, Formula, 0](Seq(), p._2.asInstanceOf[Formula], 0)) - case sl: SchematicPredicateLabel[?] => - p._2 match { - case l: LambdaExpression[Term, Formula, ?] @unchecked if (l.bounds.isEmpty || l.bounds.head.isInstanceOf[Variable]) & l.body.isInstanceOf[Formula] => (sl, l) - case s: AtomicLabel[?] => - val vars = nFreshId(Seq(s.id), s.arity).map(id => Variable(id)) - (sl, LambdaExpression(vars, s.applySeq(vars), s.arity)) - } - } - ) - val mConn = map.collect[SchematicConnectorLabel[?], LambdaExpression[Formula, Formula, ?]](p => - p._1 match { - case sl: SchematicConnectorLabel[?] => - p._2 match { - case l: LambdaExpression[Formula, Formula, ?] @unchecked if (l.bounds.isEmpty || l.bounds.head.isInstanceOf[VariableFormula]) & l.body.isInstanceOf[Formula] => (sl, l) - case s: ConnectorLabel => - val vars = nFreshId(Seq(s.id), s.arity).map(VariableFormula.apply) - (sl, LambdaExpression(vars, s.applyUnsafe(vars), s.arity)) - } - } - ) - (substituteUnsafe(map), instantiateWithProofLikeKernel(mConn, mPred, mTerm, index)) - - } - - def instantiateForallWithProof(args: Seq[Term], index: Int): (Sequent, Seq[K.SCProofStep]) = { - if this.right.size != 1 then throw new IllegalArgumentException("Right side of sequent must be a single universally quantified formula") - this.right.head match { - case r @ Forall(x, f) => - val t = args.head - val newf = f.substitute(x := t) - val s0 = K.Hypothesis((newf |- newf).underlying, newf.underlying) - val s1 = K.LeftForall((r |- newf).underlying, index + 1, f.underlying, x.underlyingLabel, t.underlying) - val s2 = K.Cut((this.left |- newf).underlying, index, index + 2, r.underlying) - if args.tail.isEmpty then (this.left |- newf, Seq(s0, s1, s2)) - else - (this.left |- newf).instantiateForallWithProof(args.tail, index + 3) match { - case (s, p) => (s, Seq(s0, s1, s2) ++ p) - } - - case _ => throw new IllegalArgumentException("Right side of sequent must be a single universally quantified formula") - } - - } - - /** - * Given 3 substitution maps like the kernel accepts, i.e. Substitution of Predicate Connector and Term schemas, do the substitution - * and produce the (one-step) kernel proof that the result is provable from the original sequent - * - * @param mCon The substitution of connector schemas - * @param mPred The substitution of predicate schemas - * @param mTerm The substitution of function schemas - * @return - */ - def instantiateWithProofLikeKernel( - mCon: Map[SchematicConnectorLabel[?], LambdaExpression[Formula, Formula, ?]], - mPred: Map[SchematicPredicateLabel[?] | VariableFormula, LambdaExpression[Term, Formula, ?]], - mTerm: Map[SchematicFunctionLabel[?] | Variable, LambdaExpression[Term, Term, ?]], - index: Int - ): Seq[K.SCProofStep] = { - val premiseSequent = this.underlying - val mConK = mCon.map((sl, le) => (sl.underlyingLabel, underlyingLFF(le))) - val mPredK = mPred.map((sl, le) => - sl match { - case v: VariableFormula => (v.underlyingLabel, underlyingLTF(le)) - case spl: SchematicPredicateLabel[?] => (spl.underlyingLabel, underlyingLTF(le)) - } - ) - val mTermK = mTerm.map((sl, le) => - sl match { - case v: Variable => (v.underlyingLabel, underlyingLTT(le)) - case sfl: SchematicFunctionLabel[?] => (sfl.underlyingLabel, underlyingLTT(le)) - } - ) - val botK = lisa.utils.KernelHelpers.instantiateSchemaInSequent(premiseSequent, mConK, mPredK, mTermK) - val smap = Map[SchematicLabel[?], LisaObject[?]]() ++ mCon ++ mPred ++ mTerm - Seq(K.InstSchema(botK, index, mConK, mPredK, mTermK)) - } - - infix def +<<(f: Formula): Sequent = this.copy(left = this.left + f) - infix def -<<(f: Formula): Sequent = this.copy(left = this.left - f) - infix def +>>(f: Formula): Sequent = this.copy(right = this.right + f) - infix def ->>(f: Formula): Sequent = this.copy(right = this.right - f) - infix def ++<<(s1: Sequent): Sequent = this.copy(left = this.left ++ s1.left) - infix def --<<(s1: Sequent): Sequent = this.copy(left = this.left -- s1.left) - infix def ++>>(s1: Sequent): Sequent = this.copy(right = this.right ++ s1.right) - infix def -->>(s1: Sequent): Sequent = this.copy(right = this.right -- s1.right) - infix def ++(s1: Sequent): Sequent = this.copy(left = this.left ++ s1.left, right = this.right ++ s1.right) - infix def --(s1: Sequent): Sequent = this.copy(left = this.left -- s1.left, right = this.right -- s1.right) - - infix def removeLeft(f: Formula): Sequent = this.copy(left = this.left.filterNot(isSame(_, f))) - infix def removeRight(f: Formula): Sequent = this.copy(right = this.right.filterNot(isSame(_, f))) - infix def removeAllLeft(s1: Sequent): Sequent = this.copy(left = this.left.filterNot(e1 => s1.left.exists(e2 => isSame(e1, e2)))) - infix def removeAllLeft(s1: Set[Formula]): Sequent = this.copy(left = this.left.filterNot(e1 => s1.exists(e2 => isSame(e1, e2)))) - infix def removeAllRight(s1: Sequent): Sequent = this.copy(right = this.right.filterNot(e1 => s1.right.exists(e2 => isSame(e1, e2)))) - infix def removeAllRight(s1: Set[Formula]): Sequent = this.copy(right = this.right.filterNot(e1 => s1.exists(e2 => isSame(e1, e2)))) - infix def removeAll(s1: Sequent): Sequent = - this.copy(left = this.left.filterNot(e1 => s1.left.exists(e2 => isSame(e1, e2))), right = this.right.filterNot(e1 => s1.right.exists(e2 => isSame(e1, e2)))) - - infix def addLeftIfNotExists(f: Formula): Sequent = if (this.left.exists(isSame(_, f))) this else (this +<< f) - infix def addRightIfNotExists(f: Formula): Sequent = if (this.right.exists(isSame(_, f))) this else (this +>> f) - infix def addAllLeftIfNotExists(s1: Sequent): Sequent = this ++<< s1.copy(left = s1.left.filterNot(e1 => this.left.exists(isSame(_, e1)))) - infix def addAllRightIfNotExists(s1: Sequent): Sequent = this ++>> s1.copy(right = s1.right.filterNot(e1 => this.right.exists(isSame(_, e1)))) - infix def addAllIfNotExists(s1: Sequent): Sequent = - this ++ s1.copy(left = s1.left.filterNot(e1 => this.left.exists(isSame(_, e1))), right = s1.right.filterNot(e1 => this.right.exists(isSame(_, e1)))) - - // OL shorthands - infix def +(f: Formula): Sequent = this addLeftIfNotExists f - infix def -(f: Formula): Sequent = this removeLeft f - infix def +>?(f: Formula): Sequent = this addRightIfNotExists f - infix def ->?(f: Formula): Sequent = this removeRight f - infix def ++(s1: Sequent): Sequent = this addAllLeftIfNotExists s1 - infix def --(s1: Sequent): Sequent = this removeAllLeft s1 - infix def ++>?(s1: Sequent): Sequent = this addAllRightIfNotExists s1 - infix def -->?(s1: Sequent): Sequent = this removeAllRight s1 - infix def --?(s1: Sequent): Sequent = this removeAll s1 - infix def ++?(s1: Sequent): Sequent = this addAllIfNotExists s1 - - override def toString = - (if left.size == 0 then "" else if left.size == 1 then left.head.toString else "( " + left.mkString(", ") + " )") + - " ⊢ " + - (if right.size == 0 then "" else if right.size == 1 then right.head.toString else "( " + right.mkString(", ") + " )") - - } - - val emptySeq: Sequent = Sequent(Set.empty, Set.empty) - - given Conversion[Formula, Sequent] = f => Sequent(Set.empty, Set(f)) - - def isSame(formula1: Formula, formula2: Formula): Boolean = { - K.isSame(formula1.underlying, formula2.underlying) - } - - def isSameTerm(term1: Term, term2: Term): Boolean = { - K.isSameTerm(term1.underlying, term2.underlying) - } - - def isSameSequent(sequent1: Sequent, sequent2: Sequent): Boolean = { - K.isSameSequent(sequent1.underlying, sequent2.underlying) - } - - /** - * returns true if the first argument implies the second by the laws of ortholattices. - */ - def isImplying(formula1: Formula, formula2: Formula): Boolean = { - K.isImplying(formula1.underlying, formula2.underlying) - } - def isImplyingSequent(sequent1: Sequent, sequent2: Sequent): Boolean = { - K.isImplyingSequent(sequent1.underlying, sequent2.underlying) - } - - def isSubset(s1: Set[Formula], s2: Set[Formula]): Boolean = { - K.isSubset(s1.map(_.underlying), s2.map(_.underlying)) - } - def isSameSet(s1: Set[Formula], s2: Set[Formula]): Boolean = - K.isSameSet(s1.map(_.underlying), s2.map(_.underlying)) - - def contains(s: Set[Formula], f: Formula): Boolean = { - K.contains(s.map(_.underlying), f.underlying) - } - - /** - * Represents a converter of some object into a set. - * @tparam S The type of elements in that set - * @tparam T The type to convert from - */ - trait FormulaSetConverter[T] { - def apply(t: T): Set[Formula] - } - - given FormulaSetConverter[Unit] with { - override def apply(u: Unit): Set[Formula] = Set.empty - } - - given FormulaSetConverter[EmptyTuple] with { - override def apply(t: EmptyTuple): Set[Formula] = Set.empty - } - - given [H <: Formula, T <: Tuple](using c: FormulaSetConverter[T]): FormulaSetConverter[H *: T] with { - override def apply(t: H *: T): Set[Formula] = c.apply(t.tail) + t.head - } - - given formula_to_set[T <: Formula]: FormulaSetConverter[T] with { - override def apply(f: T): Set[Formula] = Set(f) - } - - given iterable_to_set[T <: Formula, I <: Iterable[T]]: FormulaSetConverter[I] with { - override def apply(s: I): Set[Formula] = s.toSet - } - - private def any2set[A, T <: A](any: T)(using c: FormulaSetConverter[T]): Set[Formula] = c.apply(any) - - extension [A, T1 <: A](left: T1)(using FormulaSetConverter[T1]) { - infix def |-[B, T2 <: B](right: T2)(using FormulaSetConverter[T2]): Sequent = Sequent(any2set(left), any2set(right)) - infix def ⊢[B, T2 <: B](right: T2)(using FormulaSetConverter[T2]): Sequent = Sequent(any2set(left), any2set(right)) - } - -} diff --git a/lisa-utils/src/main/scala/lisa/prooflib/Exports.scala b/lisa-utils/src/main/scala/lisa/prooflib/Exports.scala deleted file mode 100644 index 836d1cac5..000000000 --- a/lisa-utils/src/main/scala/lisa/prooflib/Exports.scala +++ /dev/null @@ -1,6 +0,0 @@ -package lisa.prooflib - -object Exports { - export BasicStepTactic.* - export lisa.prooflib.SimpleDeducedSteps.* -} diff --git a/lisa-utils/src/main/scala/lisa/prooflib/ProofsHelpers.scala b/lisa-utils/src/main/scala/lisa/prooflib/ProofsHelpers.scala deleted file mode 100644 index f6f030f8e..000000000 --- a/lisa-utils/src/main/scala/lisa/prooflib/ProofsHelpers.scala +++ /dev/null @@ -1,441 +0,0 @@ -package lisa.prooflib - -import lisa.kernel.proof.SCProofChecker.checkSCProof -import lisa.prooflib.BasicStepTactic.Rewrite -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.ProofTacticLib.* -import lisa.prooflib.SimpleDeducedSteps.* -import lisa.prooflib.* -import lisa.utils.KernelHelpers.{_, given} -import lisa.utils.LisaException -import lisa.utils.UserLisaException -import lisa.utils.parsing.FOLPrinter -import lisa.utils.{_, given} - -import scala.annotation.targetName - -trait ProofsHelpers { - library: Library & WithTheorems => - - import lisa.fol.FOL.{given, *} - - class HaveSequent(val bot: Sequent) { - - inline infix def by(using proof: library.Proof, line: sourcecode.Line, file: sourcecode.File): By { val _proof: proof.type } = By(proof, line, file).asInstanceOf - - class By(val _proof: library.Proof, line: sourcecode.Line, file: sourcecode.File) { - - val bot = HaveSequent.this.bot ++ (F.iterable_to_set(_proof.getAssumptions) |- ()) - inline infix def apply(tactic: Sequent => _proof.ProofTacticJudgement): _proof.ProofStep & _proof.Fact = { - tactic(bot).validate(line, file) - } - inline infix def apply(tactic: ProofSequentTactic): _proof.ProofStep = { - tactic(using library, _proof)(bot).validate(line, file) - } - } - - infix def subproof(using proof: Library#Proof, line: sourcecode.Line, file: sourcecode.File)(computeProof: proof.InnerProof ?=> Unit): proof.ProofStep = { - val botWithAssumptions = HaveSequent.this.bot ++ (proof.getAssumptions |- ()) - val iProof: proof.InnerProof = new proof.InnerProof(Some(botWithAssumptions)) - computeProof(using iProof) - (new BasicStepTactic.SUBPROOF(using proof)(Some(botWithAssumptions))(iProof)).judgement.validate(line, file).asInstanceOf[proof.ProofStep] - } - - } - - class AndThenSequent private[ProofsHelpers] (val bot: Sequent) { - - inline infix def by(using proof: library.Proof, line: sourcecode.Line, file: sourcecode.File): By { val _proof: proof.type } = - By(proof, line, file).asInstanceOf[By { val _proof: proof.type }] - - class By(val _proof: library.Proof, line: sourcecode.Line, file: sourcecode.File) { - private val bot = AndThenSequent.this.bot ++ (_proof.getAssumptions |- ()) - inline infix def apply(tactic: _proof.Fact => Sequent => _proof.ProofTacticJudgement): _proof.ProofStep = { - tactic(_proof.mostRecentStep)(bot).validate(line, file) - } - - inline infix def apply(tactic: ProofFactSequentTactic): _proof.ProofStep = { - tactic(using library, _proof)(_proof.mostRecentStep)(bot).validate(line, file) - } - - } - } - - /** - * Claim the given Sequent as a ProofTactic, which may require a justification by a proof tactic and premises. - */ - def have(using proof: library.Proof)(res: Sequent): HaveSequent = HaveSequent(res) - - def have(using line: sourcecode.Line, file: sourcecode.File)(using proof: library.Proof)(v: proof.Fact | proof.ProofTacticJudgement) = v match { - case judg: proof.ProofTacticJudgement => judg.validate(line, file) - case fact: proof.Fact @unchecked => HaveSequent(proof.sequentOfFact(fact)).by(using proof, line, file)(Rewrite(using library, proof)(fact)) - } - - /** - * Claim the given Sequent as a ProofTactic directly following the previously proven tactic, - * which may require a justification by a proof tactic. - */ - def thenHave(using proof: library.Proof)(res: Sequent): AndThenSequent = AndThenSequent(res) - - infix def andThen(using proof: library.Proof, line: sourcecode.Line, file: sourcecode.File): AndThen { val _proof: proof.type } = AndThen(proof, line, file).asInstanceOf - - class AndThen private[ProofsHelpers] (val _proof: library.Proof, line: sourcecode.Line, file: sourcecode.File) { - inline infix def apply(tactic: _proof.Fact => _proof.ProofTacticJudgement): _proof.ProofStep = { - tactic(_proof.mostRecentStep).validate(line, file) - } - inline infix def apply(tactic: ProofFactTactic): _proof.ProofStep = { - tactic(using library, _proof)(_proof.mostRecentStep).validate(line, file) - } - } - - /* - /** - * Assume the given formula in all future left hand-side of claimed sequents. - */ - def assume(using proof: library.Proof)(f: Formula): proof.ProofStep = { - proof.addAssumption(f) - have(() |- f) by BasicStepTactic.Hypothesis - } - */ - /** - * Assume the given formulas in all future left hand-side of claimed sequents. - */ - def assume(using proof: library.Proof)(fs: Formula*): proof.ProofStep = { - fs.foreach(f => proof.addAssumption(f)) - have(() |- fs.toSet) by BasicStepTactic.Hypothesis - } - - def thesis(using proof: library.Proof): Sequent = proof.possibleGoal.get - def goal(using proof: library.Proof): Sequent = proof.possibleGoal.get - - def lastStep(using proof: library.Proof): proof.ProofStep = proof.mostRecentStep - - def sorry(using proof: library.Proof): proof.ProofStep = have(thesis) by Sorry - - def showCurrentProof(using om: OutputManager, _proof: library.Proof)(): Unit = { - om.output("Current proof of " + _proof.owningTheorem.prettyGoal + ": ") - om.output( - lisa.utils.parsing.ProofPrinter.prettyProof(_proof, 2) - ) - } - - extension (using proof: library.Proof)(fact: proof.Fact) { - infix def of(insts: (F.SubstPair | F.Term)*): proof.InstantiatedFact = { - proof.InstantiatedFact(fact, insts) - } - def statement: F.Sequent = proof.sequentOfFact(fact) - } - - def currentProof(using p: library.Proof): Library#Proof = p - - //////////////////////////////////////// - // DSL for definitions and theorems // - //////////////////////////////////////// - - class UserInvalidDefinitionException(val symbol: String, errorMessage: String)(using line: sourcecode.Line, file: sourcecode.File) extends UserLisaException(errorMessage) { // TODO refine - val showError: String = { - val source = scala.io.Source.fromFile(file.value) - val textline = source.getLines().drop(line.value - 1).next().dropWhile(c => c.isWhitespace) - source.close() - s" Definition of $symbol at.(${file.value.split("/").last.split("\\\\").last}:${line.value}) is invalid:\n" + - " " + Console.RED + textline + Console.RESET + "\n\n" + - " " + errorMessage - } - } - - class The(val out: Variable, val f: Formula)( - val just: JUSTIFICATION - ) - class definitionWithVars[N <: Arity](val args: Seq[Variable]) { - - // inline infix def -->(using om: OutputManager, name: sourcecode.FullName, line: sourcecode.Line, file: sourcecode.File)(t: Term) = simpleDefinition(lambda(args, t, args.length)) - // inline infix def -->(using om: OutputManager, name: sourcecode.FullName, line: sourcecode.Line, file: sourcecode.File)(f: Formula) = predicateDefinition(lambda(args, f, args.length)) - - inline infix def -->(using om: OutputManager, name: sourcecode.FullName, line: sourcecode.Line, file: sourcecode.File)(t: The): ConstantTermLabelOfArity[N] = - FunctionDefinition[N](name.value, line.value, file.value)(args, t.out, t.f, t.just).label - - inline infix def -->(using om: OutputManager, name: sourcecode.FullName, line: sourcecode.Line, file: sourcecode.File)(term: Term): ConstantTermLabelOfArity[N] = - SimpleFunctionDefinition[N](name.value, line.value, file.value)(lambda(args, term).asInstanceOf).label - - inline infix def -->(using om: OutputManager, name: sourcecode.FullName, line: sourcecode.Line, file: sourcecode.File)(formula: Formula): ConstantAtomicLabelOfArity[N] = - PredicateDefinition[N](name.value, line.value, file.value)(lambda(args, formula).asInstanceOf).label - - } - - def DEF(): definitionWithVars[0] = new definitionWithVars[0](Nil) - def DEF(a: Variable): definitionWithVars[1] = new definitionWithVars[1](Seq(a)) - def DEF(a: Variable, b: Variable): definitionWithVars[2] = new definitionWithVars[2](Seq(a, b)) - def DEF(a: Variable, b: Variable, c: Variable): definitionWithVars[3] = new definitionWithVars[3](Seq(a, b, c)) - def DEF(a: Variable, b: Variable, c: Variable, d: Variable): definitionWithVars[4] = new definitionWithVars[4](Seq(a, b, c, d)) - def DEF(a: Variable, b: Variable, c: Variable, d: Variable, e: Variable): definitionWithVars[5] = new definitionWithVars[5](Seq(a, b, c, d, e)) - def DEF(a: Variable, b: Variable, c: Variable, d: Variable, e: Variable, f: Variable): definitionWithVars[6] = new definitionWithVars[6](Seq(a, b, c, d, e, f)) - def DEF(a: Variable, b: Variable, c: Variable, d: Variable, e: Variable, f: Variable, g: Variable): definitionWithVars[7] = new definitionWithVars[7](Seq(a, b, c, d, e, f, g)) - - // def DEF: definitionWithVars[0] = new definitionWithVars[0](EmptyTuple) //todo conversion to empty tuple gets bad type - - // Definition helpers, not part of the DSL - - /** - * Allows to make definitions "by unique existance" of a function symbol - */ - class FunctionDefinition[N <: F.Arity](using om: OutputManager)(val fullName: String, line: Int, file: String)( - val vars: Seq[F.Variable], - val out: F.Variable, - val f: F.Formula, - j: JUSTIFICATION - ) extends DEFINITION(line, file) { - def funWithArgs = label.applySeq(vars) - override def repr: String = - s" ${if (withSorry) " Sorry" else ""} Definition of function symbol ${funWithArgs} := the ${out} such that ${(out === funWithArgs) <=> f})\n" - - // val expr = LambdaExpression[Term, Formula, N](vars, f, valueOf[N]) - - lazy val label: ConstantTermLabelOfArity[N] = (if (vars.length == 0) F.Constant(name) else F.ConstantFunctionLabel[N](name, vars.length.asInstanceOf)).asInstanceOf - - val innerJustification: theory.FunctionDefinition = { - val conclusion: F.Sequent = j.statement - val pr: SCProof = SCProof(IndexedSeq(SC.Restate(conclusion.underlying, -1)), IndexedSeq(conclusion.underlying)) - if (!(conclusion.left.isEmpty && (conclusion.right.size == 1))) { - om.lisaThrow( - UserInvalidDefinitionException( - name, - s"The given justification is not valid for a definition" + - s"The justification should be of the form ${(() |- F.BinderFormula(F.ExistsOne, out, F.VariableFormula("phi")))}" + - s"instead of the given ${conclusion.underlying}" - ) - ) - } - if (!(f.freeSchematicLabels.subsetOf(vars.toSet + out))) { - om.lisaThrow( - UserInvalidDefinitionException( - name, - s"The definition is not allowed to contain schematic symbols or free variables." + - s"The symbols {${(f.freeSchematicLabels -- vars.toSet - out).mkString(", ")}} are free in the expression ${f.toString}." - ) - ) - } - val proven = conclusion.right.head match { - case F.BinderFormula(F.ExistsOne, bound, inner) => inner - case F.BinderFormula(F.Exists, x, F.BinderFormula(F.Forall, y, F.AppliedConnector(F.Iff, Seq(l, r)))) if F.isSame(l, x === y) => r - case F.BinderFormula(F.Exists, x, F.BinderFormula(F.Forall, y, F.AppliedConnector(F.Iff, Seq(l, r)))) if F.isSame(r, x === y) => l - case _ => - om.lisaThrow( - UserInvalidDefinitionException( - name, - s"The given justification is not valid for a definition" + - s"The justification should be of the form ${(() |- F.BinderFormula(F.ExistsOne, out, F.VariableFormula("phi")))}" + - s"instead of the given ${conclusion.underlying}" - ) - ) - } - val underf = f.underlying - val undervars = vars.map(_.underlyingLabel) - val ulabel = K.ConstantFunctionLabel(name, vars.size) - val judgement = theory.makeFunctionDefinition(pr, Seq(j.innerJustification), ulabel, out.underlyingLabel, K.LambdaTermFormula(undervars, underf), proven.underlying) - judgement match { - case K.ValidJustification(just) => - just - case wrongJudgement: K.InvalidJustification[?] => - if (!theory.belongsToTheory(underf)) { - import K.findUndefinedSymbols - om.lisaThrow( - UserInvalidDefinitionException( - name, - s"All symbols in the definition must belong to the theory. The symbols ${theory.findUndefinedSymbols(underf)} are unknown and you need to define them first." - ) - ) - } - if (!theory.isAvailable(ulabel)) { - om.lisaThrow(UserInvalidDefinitionException(name, s"The symbol ${name} has already been defined and can't be redefined.")) - } - if (!(underf.freeSchematicTermLabels.subsetOf(undervars.toSet + out.underlyingLabel) && underf.schematicFormulaLabels.isEmpty)) { - om.lisaThrow( - UserInvalidDefinitionException( - name, - s"The definition is not allowed to contain schematic symbols or free variables." + - s"Kernel returned error: The symbols {${(underf.freeSchematicTermLabels -- undervars.toSet - out.underlyingLabel ++ underf.freeSchematicTermLabels) - .mkString(", ")}} are free in the expression ${underf.toString}." - ) - ) - } - om.lisaThrow( - LisaException.InvalidKernelJustificationComputation( - "The final proof was rejected by LISA's logical kernel. This may be due to a faulty proof computation or an error in LISA.", - wrongJudgement, - None - ) - ) - } - } - - // val label: ConstantTermLabel[N] - val statement: F.Sequent = - () |- F.Forall( - out, - Iff( - equality(label.applySeq(vars), out), - f - ) - ) - - library.last = Some(this) - - } - - /** - * Allows to make definitions "by equality" of a function symbol - */ - class SimpleFunctionDefinition[N <: F.Arity](using om: OutputManager)(fullName: String, line: Int, file: String)( - val lambda: LambdaExpression[Term, Term, N], - out: F.Variable, - j: JUSTIFICATION - ) extends FunctionDefinition[N](fullName, line, file)(lambda.bounds.asInstanceOf, out, out === lambda.body, j) { - - private val term = label.applySeq(lambda.bounds.asInstanceOf) - private val simpleProp = lambda.body === term - val simplePropName = "simpleDef_" + fullName - val simpleDef = THM(simpleProp, simplePropName, line, file, InternalStatement)({ - have(thesis) by Restate.from(this of term) - }) - shortDefs.update(label, Some(simpleDef)) - - } - - object SimpleFunctionDefinition { - def apply[N <: F.Arity](using om: OutputManager)(fullName: String, line: Int, file: String)(lambda: LambdaExpression[Term, Term, N]): SimpleFunctionDefinition[N] = { - val intName = "definition_" + fullName - val out = Variable(freshId(lambda.allSchematicLabels.map(_.id), "y")) - val defThm = THM(F.ExistsOne(out, out === lambda.body), intName, line, file, InternalStatement)({ - have(SimpleDeducedSteps.simpleFunctionDefinition(lambda, out)) - }) - new SimpleFunctionDefinition[N](fullName, line, file)(lambda, out, defThm) - } - } - - class PredicateDefinition[N <: F.Arity](using om: OutputManager)(val fullName: String, line: Int, file: String)(val lambda: LambdaExpression[Term, Formula, N]) extends DEFINITION(line, file) { - - lazy val vars: Seq[F.Variable] = lambda.bounds.asInstanceOf - val arity = lambda.arity - - lazy val label: ConstantAtomicLabelOfArity[N] = { - ( - if (vars.length == 0) F.ConstantFormula(name) - else F.ConstantPredicateLabel[N](name, vars.length.asInstanceOf[N]) - ).asInstanceOf[ConstantAtomicLabelOfArity[N]] - } - - val innerJustification: theory.PredicateDefinition = { - import lisa.utils.K.{predicateDefinition, findUndefinedSymbols} - val underlambda = lambda.underlyingLTF - val ulabel = K.ConstantFunctionLabel(name, vars.size) - val undervars = vars.map(_.asInstanceOf[F.Variable].underlyingLabel) - val judgement = theory.predicateDefinition(name, lambda.underlyingLTF) - judgement match { - case K.ValidJustification(just) => - just - case wrongJudgement: K.InvalidJustification[?] => - if (!theory.belongsToTheory(underlambda.body)) { - om.lisaThrow( - UserInvalidDefinitionException( - name, - s"All symbols in the definition must belong to the theory. The symbols ${theory.findUndefinedSymbols(underlambda.body)} are unknown and you need to define them first." - ) - ) - } - if (!theory.isAvailable(ulabel)) { - om.lisaThrow(UserInvalidDefinitionException(name, s"The symbol ${name} has already been defined and can't be redefined.")) - } - if (!(underlambda.body.freeSchematicTermLabels.subsetOf(undervars.toSet) && underlambda.body.schematicFormulaLabels.isEmpty)) { - om.lisaThrow( - UserInvalidDefinitionException( - name, - s"The definition is not allowed to contain schematic symbols or free variables." + - s"Kernel returned error: The symbols {${(underlambda.body.freeSchematicTermLabels -- undervars.toSet ++ underlambda.body.freeSchematicTermLabels) - .mkString(", ")}} are free in the expression ${underlambda.body.toString}." - ) - ) - } - om.lisaThrow( - LisaException.InvalidKernelJustificationComputation( - "The final proof was rejected by LISA's logical kernel. This may be due to a faulty proof computation or an error in LISA.", - wrongJudgement, - None - ) - ) - } - } - - val statement: F.Sequent = () |- Iff(label.applySeq(vars), lambda.body) - library.last = Some(this) - } - - ///////////////////////// - // Local Definitions // - ///////////////////////// - - import lisa.utils.parsing.FOLPrinter.prettySCProof - import lisa.utils.KernelHelpers.apply - - /** - * A term with a definition, local to a proof. - * - * @param proof - * @param id - */ - abstract class LocalyDefinedVariable(val proof: library.Proof, id: Identifier) extends Variable(id) { - - val definition: proof.Fact - lazy val definingFormula = proof.sequentOfFact(definition).right.head - - // proof.addDefinition(this, defin(this), fact) - // val definition: proof.Fact = proof.getDefinition(this) - } - - /** - * A witness for a statement of the form ∃(x, P(x)) is a (fresh) variable y such that P(y) holds. This is a local definition, typically used with `val y = witness(fact)` - * where `fact` is a fact of the form `∃(x, P(x))`. The property P(y) can then be used with y.elim - */ - def witness(using _proof: library.Proof, line: sourcecode.Line, file: sourcecode.File, name: sourcecode.Name)(fact: _proof.Fact): LocalyDefinedVariable { val proof: _proof.type } = { - - val (els, eli) = _proof.sequentAndIntOfFact(fact) - els.right.head match - case Exists(x, inner) => - val id = freshId((els.allSchematicLabels ++ _proof.lockedSymbols ++ _proof.possibleGoal.toSet.flatMap(_.allSchematicLabels)).map(_.id), name.value) - val c: LocalyDefinedVariable = new LocalyDefinedVariable(_proof, id) { val definition = assume(using proof)(inner.substitute(x := this)) } - val defin = c.definingFormula - val definU = defin.underlying - val exDefinU = K.Exists(c.underlyingLabel, definU) - - if els.right.size != 1 || !K.isSame(els.right.head.underlying, exDefinU) then - throw new UserInvalidDefinitionException(c.id, "Eliminator fact for " + c + " in a definition should have a single formula, equivalent to " + exDefinU + ", on the right side.") - - _proof.addElimination( - defin, - (i, sequent) => - val resSequent = (sequent.underlying -<< definU) - List( - SC.LeftExists(resSequent +<< exDefinU, i, definU, c.underlyingLabel), - SC.Cut(resSequent ++<< els.underlying, eli, i + 1, exDefinU) - ) - ) - - c.asInstanceOf[LocalyDefinedVariable { val proof: _proof.type }] - - case _ => throw new Exception("Pick is used to obtain a witness of an existential statement.") - - } - - /** - * Check correctness of the proof, using LISA's logical kernel, to the current point. - */ - def sanityProofCheck(using p: Proof)(message: String): Unit = { - val csc = p.toSCProof - if checkSCProof(csc).isValid then - println("Proof is valid. " + message) - Thread.sleep(100) - else - checkProof(csc) - throw Exception("Proof is not valid: " + message) - } - -} diff --git a/lisa-utils/src/main/scala/lisa/utils/K.scala b/lisa-utils/src/main/scala/lisa/utils/K.scala index 2269cfed3..1980bc0f4 100644 --- a/lisa-utils/src/main/scala/lisa/utils/K.scala +++ b/lisa-utils/src/main/scala/lisa/utils/K.scala @@ -11,6 +11,5 @@ object K { export lisa.kernel.proof.RunningTheoryJudgement as Judgement export lisa.kernel.proof.RunningTheoryJudgement.* export lisa.utils.KernelHelpers.{*, given} - export lisa.utils.parsing.FOLPrinter.* } diff --git a/lisa-utils/src/main/scala/lisa/utils/KernelHelpers.scala b/lisa-utils/src/main/scala/lisa/utils/KernelHelpers.scala index 70d404521..5cf8ce443 100644 --- a/lisa-utils/src/main/scala/lisa/utils/KernelHelpers.scala +++ b/lisa-utils/src/main/scala/lisa/utils/KernelHelpers.scala @@ -3,110 +3,221 @@ package lisa.utils import lisa.kernel.fol.FOL.* import lisa.kernel.proof.RunningTheoryJudgement.InvalidJustification import lisa.kernel.proof.SCProofCheckerJudgement.SCInvalidProof +import lisa.kernel.proof.SCProofCheckerJudgement.SCValidProof import lisa.kernel.proof.SequentCalculus.* import lisa.kernel.proof.* -import lisa.utils.FOLParser import scala.annotation.targetName - +import lisa.utils.unification.UnificationUtils.matchExpr /** * A helper file that provides various syntactic sugars for LISA's FOL and proofs at the Kernel level. */ object KernelHelpers { + def predicateType(arity: Int) = Range(0, arity).foldLeft(Prop: Sort)((acc, _) => Ind -> acc) + def functionType(arity: Int) = Range(0, arity).foldLeft(Ind: Sort)((acc, _) => Ind -> acc) + ///////////////// // FOL helpers // ///////////////// /* Prefix syntax */ + extension (s: Sort) { + def >>:(t: Sort) : Sort = Arrow(s, t) + } + + val Equality = equality val === = equality - val ⊤ : Formula = top() - val ⊥ : Formula = bot() - val True: Formula = top() - val False: Formula = bot() + val ⊤ : Expression = top + val ⊥ : Expression = bot + val True: Expression = top + val False: Expression = bot - val neg = Neg val ¬ = neg val ! = neg - val and = And - val /\ = And - val or = Or - val \/ = Or - val implies = Implies - val ==> = Implies - val iff = Iff - val <=> = Iff - val forall = Forall + val /\ = and + val \/ = or + val ==> = implies + val <=> = iff val ∀ = forall - val exists = Exists val ∃ = exists - val existsOne = ExistsOne - val ∃! = existsOne - - extension [L <: TermLabel](label: L) { - def apply(args: Term*): Term = Term(label, args) - @targetName("applySeq") - def apply(args: Seq[Term]): Term = Term(label, args) - def unapply(f: Formula): Option[Seq[Formula]] = f match { - case ConnectorFormula(`label`, args) => Some(args) + val ε = epsilon + + + + // UnapplyMethods + + object And : + def unapply(e: Expression): Option[(Expression, Expression)] = e match + case Application(Application(`and`, l), r) => Some((l, r)) case _ => None - } - } - extension [L <: AtomicLabel](label: L) { - def apply(args: Term*): Formula = AtomicFormula(label, args) - @targetName("applySeq") - def apply(args: Seq[Term]): Formula = AtomicFormula(label, args) - def unapply(f: Formula): Option[Seq[Term]] = f match { - case AtomicFormula(`label`, args) => Some(args) + object Or : + def unapply(e: Expression): Option[(Expression, Expression)] = e match + case Application(Application(`or`, l), r) => Some((l, r)) case _ => None - } - } - extension [L <: ConnectorLabel](label: L) { - def apply(args: Formula*): Formula = ConnectorFormula(label, args) - @targetName("applySeq") - def apply(args: Seq[Formula]): Formula = ConnectorFormula(label, args) - def unapply(f: Formula): Option[Seq[Formula]] = f match { - case ConnectorFormula(`label`, args) => Some(args) + object Neg : + def unapply(e: Expression): Option[Expression] = e match + case Application(`neg`, a) => Some(a) + case _ => None + + object Implies : + def unapply(e: Expression): Option[(Expression, Expression)] = e match + case Application(Application(`implies`, l), r) => Some((l, r)) case _ => None - } - } - extension [L <: BinderLabel](label: L) { - def apply(bound: VariableLabel, inner: Formula): Formula = BinderFormula(label, bound, inner) - def unapply(f: Formula): Option[(VariableLabel, Formula)] = f match { - case BinderFormula(`label`, x, inner) => Some((x, inner)) + object Iff : + def unapply(e: Expression): Option[(Expression, Expression)] = e match + case Application(Application(`iff`, l), r) => Some((l, r)) + case _ => None + + object Forall : + def unapply(e: Expression): Option[(Variable, Expression)] = e match + case Application(`forall`, Lambda(x, inner)) => Some((x, inner)) + case _ => None + + object Exists : + def unapply(e: Expression): Option[(Variable, Expression)] = e match + case Application(`exists`, Lambda(x, inner)) => Some((x, inner)) + case _ => None + + object Epsilon : + def unapply(e: Expression): Option[(Variable, Expression)] = e match + case Application(`epsilon`, Lambda(x, inner)) => Some((x, inner)) case _ => None - } - } + + object Multiand : + def unapply(e: Expression): Option[Seq[Expression]] = e match + case Application(Application(`and`, l), r) => Some(l +: unapply(r).getOrElse(Seq(r))) + case _ => None + + object Multior : + def unapply(e: Expression): Option[Seq[Expression]] = e match + case Application(Application(`or`, l), r) => Some(l +: unapply(r).getOrElse(Seq(r))) + case _ => None + + object Multiapp : + def unapply(e: Expression): Option[(Expression, Seq[Expression])] = + def inner(e: Expression): Option[List[Expression]] = e match + case Application(f, arg) => inner(f) map (l => arg :: l) + case _ => Some(List(e)) + val r = inner(e) + r match + case Some(l) if l.size > 1 => + val rev = l.reverse + Some(rev.head, rev.tail) + case _ => None + + + + + def multiand(args: Seq[Expression]): Expression = args.reduceLeft(and(_)(_)) + def multior(args: Seq[Expression]): Expression = args.reduceLeft(or(_)(_)) + def multiapply(f: Expression)(args: Seq[Expression]): Expression = args.foldLeft(f)(_(_)) /* Infix syntax */ - extension (f: Formula) { - def unary_! = ConnectorFormula(Neg, Seq(f)) - infix inline def ==>(g: Formula): Formula = ConnectorFormula(Implies, Seq(f, g)) - infix inline def <=>(g: Formula): Formula = ConnectorFormula(Iff, Seq(f, g)) - infix inline def /\(g: Formula): Formula = ConnectorFormula(And, Seq(f, g)) - infix inline def \/(g: Formula): Formula = ConnectorFormula(Or, Seq(f, g)) + + extension (e: exists.type) + @targetName("existsApply") + def apply(v: Variable, inner: Expression): Expression = exists(lambda(v, inner)) + + extension (e: forall.type) + @targetName("forallApply") + def apply(v: Variable, inner: Expression): Expression = forall(lambda(v, inner)) + + extension (e: epsilon.type) + @targetName("epsilonApply") + def apply(v: Variable, inner: Expression): Expression = epsilon(lambda(v, inner)) + + + extension (f: Expression) { + def apply(args: Expression*): Expression = multiapply(f)(args) + def unary_! = neg(f) + infix inline def ==>(g: Expression): Expression = implies(f)(g) + infix inline def <=>(g: Expression): Expression = iff(f)(g) + infix inline def /\(g: Expression): Expression = and(f)(g) + infix inline def \/(g: Expression): Expression = or(f)(g) + infix def ===(g: Expression): Expression = equality(f)(g) + infix def =(g: Expression): Expression = equality(f)(g) + + def maxVarId(): Int = f match { + case Variable(id, _) => id.no+1 + case Constant(_, _) => 0 + case Application(f, arg) => f.maxVarId() max arg.maxVarId() + case Lambda(v, inner) => v.id.no max inner.maxVarId() + } + + def leadingVars(): List[Variable] = + def recurse(e:Expression) : List[Variable] = e match { + case Lambda(v, inner) => v :: recurse(inner) + case _ => Nil + } + recurse(f).reverse + + def repr: String = f match + case equality(a, b) => s"${a.repr} === ${b.repr}" + case neg(a) => s"!${a.repr}" + case and(a, b) => s"(${a.repr} /\\ ${b.repr})" + case or(a, b) => s"(${a.repr} \\/ ${b.repr})" + case implies(a, b) => s"(${a.repr} ==> ${b.repr})" + case iff(a, b) => s"(${a.repr} <=> ${b.repr})" + case forall(v, inner) => s"(forall(${v.repr}, ${inner.repr})" + case exists(v, inner) => s"(exists(${v.repr}, ${inner.repr})" + case epsilon(v, inner) => s"(epsilon(${v.repr}, ${inner.repr})" + + case Application(f, arg) => s"${f.repr}(${arg.repr})" + case Constant(id, sort) => id.toString + case Lambda(v, body) => s"lambda(${v.repr}, ${body.repr})" + case Variable(id, sort) => id.toString + + def fullRepr: String = f match + case Application(f, arg) => s"${f.fullRepr}(${arg.fullRepr})" + case Constant(id, sort) => s"cst(${id},${sort})" + case Lambda(v, body) => s"λ${v.fullRepr}.${body.fullRepr}" + case Variable(id, sort) => s"v(${id},${sort})" } - extension (t: Term) { - infix def ===(u: Term): Formula = AtomicFormula(equality, Seq(t, u)) - infix def =(u: Term): Formula = AtomicFormula(equality, Seq(t, u)) + extension (se: SimpleExpression) { + def repr: String = se match + case SimpleAnd(children, polarity) => + val pol = if polarity then "" else "!" + s"${pol}and(${children.map(_.repr).mkString(", ")})" + case SimpleForall(x, inner, polarity) => + val pol = if polarity then "" else "!" + s"${pol}∀$x.${inner.repr}" + case SimpleLiteral(polarity) => + val pol = if polarity then "" else "!" + s"${pol}lit" + case SimpleEquality(left, right, polarity) => + val pol = if polarity then "" else "!" + s"${pol}(${left.repr} === ${right.repr})" + case SimpleVariable(id, sort, polarity) => + val pol = if polarity then "" else "!" + s"${pol}${id}" + case SimpleBoundVariable(no, sort, polarity) => + val pol = if polarity then "" else "!" + s"${pol}bv$no" + case SimpleConstant(id, sort, polarity) => + val pol = if polarity then "" else "!" + s"${pol}${id}" + case SimpleApplication(arg1, arg2, polarity) => + val pol = if polarity then "" else "!" + s"${pol}(${arg1.repr}(${arg2.repr}))" + case SimpleLambda(x, inner) => + s"λ${x.repr}.${inner.repr}" + + } /* Conversions */ - given Conversion[TermLabel, Term] = Term(_, Seq()) - given Conversion[Term, TermLabel] = _.label - given Conversion[AtomicLabel, AtomicFormula] = AtomicFormula(_, Seq()) - given Conversion[AtomicFormula, AtomicLabel] = _.label - - given Conversion[VariableFormulaLabel, AtomicFormula] = AtomicFormula(_, Seq()) + /* given Conversion[(Boolean, List[Int], String), Option[(List[Int], String)]] = tr => if (tr._1) None else Some(tr._2, tr._3) - given Conversion[Formula, Sequent] = () |- _ +*/ + given Conversion[Expression, Sequent] = () |- _ /* Sequents */ @@ -114,10 +225,10 @@ object KernelHelpers { extension (s: Sequent) { // non OL-based / naive Sequent manipulation - infix def +<<(f: Formula): Sequent = s.copy(left = s.left + f) - infix def -<<(f: Formula): Sequent = s.copy(left = s.left - f) - infix def +>>(f: Formula): Sequent = s.copy(right = s.right + f) - infix def ->>(f: Formula): Sequent = s.copy(right = s.right - f) + infix def +<<(f: Expression): Sequent = s.copy(left = s.left + f) + infix def -<<(f: Expression): Sequent = s.copy(left = s.left - f) + infix def +>>(f: Expression): Sequent = s.copy(right = s.right + f) + infix def ->>(f: Expression): Sequent = s.copy(right = s.right - f) infix def ++<<(s1: Sequent): Sequent = s.copy(left = s.left ++ s1.left) infix def --<<(s1: Sequent): Sequent = s.copy(left = s.left -- s1.left) infix def ++>>(s1: Sequent): Sequent = s.copy(right = s.right ++ s1.right) @@ -126,68 +237,67 @@ object KernelHelpers { infix def --(s1: Sequent): Sequent = s.copy(left = s.left -- s1.left, right = s.right -- s1.right) // OL-based Sequent manipulation - infix def removeLeft(f: Formula): Sequent = s.copy(left = s.left.filterNot(isSame(_, f))) - infix def removeRight(f: Formula): Sequent = s.copy(right = s.right.filterNot(isSame(_, f))) + infix def removeLeft(f: Expression): Sequent = s.copy(left = s.left.filterNot(isSame(_, f))) + infix def removeRight(f: Expression): Sequent = s.copy(right = s.right.filterNot(isSame(_, f))) infix def removeAllLeft(s1: Sequent): Sequent = s.copy(left = s.left.filterNot(e1 => s1.left.exists(e2 => isSame(e1, e2)))) - infix def removeAllLeft(s1: Set[Formula]): Sequent = s.copy(left = s.left.filterNot(e1 => s1.exists(e2 => isSame(e1, e2)))) + infix def removeAllLeft(s1: Set[Expression]): Sequent = s.copy(left = s.left.filterNot(e1 => s1.exists(e2 => isSame(e1, e2)))) infix def removeAllRight(s1: Sequent): Sequent = s.copy(right = s.right.filterNot(e1 => s1.right.exists(e2 => isSame(e1, e2)))) - infix def removeAllRight(s1: Set[Formula]): Sequent = s.copy(right = s.right.filterNot(e1 => s1.exists(e2 => isSame(e1, e2)))) + infix def removeAllRight(s1: Set[Expression]): Sequent = s.copy(right = s.right.filterNot(e1 => s1.exists(e2 => isSame(e1, e2)))) infix def removeAll(s1: Sequent): Sequent = s.copy(left = s.left.filterNot(e1 => s1.left.exists(e2 => isSame(e1, e2))), right = s.right.filterNot(e1 => s1.right.exists(e2 => isSame(e1, e2)))) - infix def addLeftIfNotExists(f: Formula): Sequent = if (s.left.exists(isSame(_, f))) s else (s +<< f) - infix def addRightIfNotExists(f: Formula): Sequent = if (s.right.exists(isSame(_, f))) s else (s +>> f) + infix def addLeftIfNotExists(f: Expression): Sequent = if (s.left.exists(isSame(_, f))) s else (s +<< f) + infix def addRightIfNotExists(f: Expression): Sequent = if (s.right.exists(isSame(_, f))) s else (s +>> f) infix def addAllLeftIfNotExists(s1: Sequent): Sequent = s ++<< s1.copy(left = s1.left.filterNot(e1 => s.left.exists(isSame(_, e1)))) infix def addAllRightIfNotExists(s1: Sequent): Sequent = s ++>> s1.copy(right = s1.right.filterNot(e1 => s.right.exists(isSame(_, e1)))) infix def addAllIfNotExists(s1: Sequent): Sequent = s ++ s1.copy(left = s1.left.filterNot(e1 => s.left.exists(isSame(_, e1))), right = s1.right.filterNot(e1 => s.right.exists(isSame(_, e1)))) // OL shorthands - infix def +(f: Formula): Sequent = s addLeftIfNotExists f - infix def -(f: Formula): Sequent = s removeLeft f - infix def +>?(f: Formula): Sequent = s addRightIfNotExists f - infix def ->?(f: Formula): Sequent = s removeRight f + infix def +(f: Expression): Sequent = s addLeftIfNotExists f + infix def -(f: Expression): Sequent = s removeLeft f + infix def +>?(f: Expression): Sequent = s addRightIfNotExists f + infix def ->?(f: Expression): Sequent = s removeRight f infix def ++(s1: Sequent): Sequent = s addAllLeftIfNotExists s1 infix def --(s1: Sequent): Sequent = s removeAllLeft s1 infix def ++>?(s1: Sequent): Sequent = s addAllRightIfNotExists s1 infix def -->?(s1: Sequent): Sequent = s removeAllRight s1 infix def --?(s1: Sequent): Sequent = s removeAll s1 infix def ++?(s1: Sequent): Sequent = s addAllIfNotExists s1 + + def repr: String = s"${s.left.map(_.repr).mkString(", ")} |- ${s.right.map(_.repr).mkString(", ")}" + + def fullRepr: String = s"${s.left.map(_.fullRepr).mkString(", ")} |- ${s.right.map(_.fullRepr).mkString(", ")}" } - // TODO: Should make less generic /** * Represents a converter of some object into a set. * @tparam S The type of elements in that set * @tparam T The type to convert from */ protected trait FormulaSetConverter[T] { - def apply(t: T): Set[Formula] + def apply(t: T): Set[Expression] } given FormulaSetConverter[Unit] with { - override def apply(u: Unit): Set[Formula] = Set.empty + override def apply(u: Unit): Set[Expression] = Set.empty } given FormulaSetConverter[EmptyTuple] with { - override def apply(t: EmptyTuple): Set[Formula] = Set.empty - } - - given [H <: Formula, T <: Tuple](using c: FormulaSetConverter[T]): FormulaSetConverter[H *: T] with { - override def apply(t: H *: T): Set[Formula] = c.apply(t.tail) + t.head + override def apply(t: EmptyTuple): Set[Expression] = Set.empty } - given formula_to_set[T <: Formula]: FormulaSetConverter[T] with { - override def apply(f: T): Set[Formula] = Set(f) + given [H <: Expression, T <: Tuple](using c: FormulaSetConverter[T]): FormulaSetConverter[H *: T] with { + override def apply(t: H *: T): Set[Expression] = c.apply(t.tail) + t.head } - given [T <: Formula, I <: Iterable[T]]: FormulaSetConverter[I] with { - override def apply(s: I): Set[Formula] = s.toSet + given formula_to_set[T <: Expression]: FormulaSetConverter[T] with { + override def apply(f: T): Set[Expression] = Set(f) } - given FormulaSetConverter[VariableFormulaLabel] with { - override def apply(s: VariableFormulaLabel): Set[Formula] = Set(s()) + given [T <: Expression, I <: Iterable[T]]: FormulaSetConverter[I] with { + override def apply(s: I): Set[Expression] = s.toSet } - private def any2set[A, T <: A](any: T)(using c: FormulaSetConverter[T]): Set[Formula] = c.apply(any) + private def any2set[A, T <: A](any: T)(using c: FormulaSetConverter[T]): Set[Expression] = c.apply(any) extension [A, T1 <: A](left: T1)(using FormulaSetConverter[T1]) { infix def |-[B, T2 <: B](right: T2)(using FormulaSetConverter[T2]): Sequent = Sequent(any2set(left), any2set(right)) @@ -196,21 +306,8 @@ object KernelHelpers { // Instatiation functions for formulas lifted to sequents. - def instantiatePredicateSchemaInSequent(s: Sequent, m: Map[SchematicAtomicLabel, LambdaTermFormula]): Sequent = { - s.left.map(phi => instantiatePredicateSchemas(phi, m)) |- s.right.map(phi => instantiatePredicateSchemas(phi, m)) - } - - def instantiateFunctionSchemaInSequent(s: Sequent, m: Map[SchematicTermLabel, LambdaTermTerm]): Sequent = { - s.left.map(phi => instantiateTermSchemas(phi, m)) |- s.right.map(phi => instantiateTermSchemas(phi, m)) - } - - def instantiateSchemaInSequent( - s: Sequent, - mCon: Map[SchematicConnectorLabel, LambdaFormulaFormula], - mPred: Map[SchematicAtomicLabel, LambdaTermFormula], - mTerm: Map[SchematicTermLabel, LambdaTermTerm] - ): Sequent = { - s.left.map(phi => instantiateSchemas(phi, mCon, mPred, mTerm)) |- s.right.map(phi => instantiateSchemas(phi, mCon, mPred, mTerm)) + def substituteVariablesInSequent(s: Sequent, m: Map[Variable, Expression]): Sequent = { + s.left.map(phi => substituteVariables(phi, m)) |- s.right.map(phi => substituteVariables(phi, m)) } ////////////////////// @@ -240,50 +337,58 @@ object KernelHelpers { def followPath(path: Seq[Int]): SCProofStep = SCSubproof(p, p.imports.indices).followPath(path) } - // Conversions from String to datatypes - // given Conversion[String, Sequent] = FOLParser.parseSequent(_) - // given Conversion[String, Formula] = FOLParser.parseFormula(_) - // given Conversion[String, Term] = FOLParser.parseTerm(_) - // given Conversion[String, VariableLabel] = s => VariableLabel(if (s.head == '?') s.tail else s) - +/* // Conversion from pairs (e.g. x -> f(x)) to lambdas - given Conversion[Term, LambdaTermTerm] = LambdaTermTerm(Seq(), _) - given Conversion[VariableLabel, LambdaTermTerm] = a => LambdaTermTerm(Seq(), a: Term) - given Conversion[(VariableLabel, Term), LambdaTermTerm] = a => LambdaTermTerm(Seq(a._1), a._2) - given Conversion[(Seq[VariableLabel], Term), LambdaTermTerm] = a => LambdaTermTerm(a._1, a._2) + given Conversion[Expression, LambdaTermTerm] = LambdaTermTerm(Seq(), _) + given Conversion[VariableLabel, LambdaTermTerm] = a => LambdaTermTerm(Seq(), a: Expression) + given Conversion[(VariableLabel, Expression), LambdaTermTerm] = a => LambdaTermTerm(Seq(a._1), a._2) + given Conversion[(Seq[VariableLabel], Expression), LambdaTermTerm] = a => LambdaTermTerm(a._1, a._2) - given Conversion[Formula, LambdaTermFormula] = LambdaTermFormula(Seq(), _) - given Conversion[(VariableLabel, Formula), LambdaTermFormula] = a => LambdaTermFormula(Seq(a._1), a._2) - given Conversion[(Seq[VariableLabel], Formula), LambdaTermFormula] = a => LambdaTermFormula(a._1, a._2) + given Conversion[Expression, LambdaTermFormula] = LambdaTermFormula(Seq(), _) + given Conversion[(VariableLabel, Expression), LambdaTermFormula] = a => LambdaTermFormula(Seq(a._1), a._2) + given Conversion[(Seq[VariableLabel], Expression), LambdaTermFormula] = a => LambdaTermFormula(a._1, a._2) - given Conversion[Formula, LambdaFormulaFormula] = LambdaFormulaFormula(Seq(), _) - given Conversion[(VariableFormulaLabel, Formula), LambdaFormulaFormula] = a => LambdaFormulaFormula(Seq(a._1), a._2) - given Conversion[(Seq[VariableFormulaLabel], Formula), LambdaFormulaFormula] = a => LambdaFormulaFormula(a._1, a._2) + given Conversion[Expression, LambdaFormulaFormula] = LambdaFormulaFormula(Seq(), _) + given Conversion[(VariableFormulaLabel, Expression), LambdaFormulaFormula] = a => LambdaFormulaFormula(Seq(a._1), a._2) + given Conversion[(Seq[VariableFormulaLabel], Expression), LambdaFormulaFormula] = a => LambdaFormulaFormula(a._1, a._2) + def // Shortcut for LambdaTermTerm, LambdaTermFormula and LambdaFormulaFormula construction - def lambda(x: VariableLabel, t: Term): LambdaTermTerm = LambdaTermTerm(Seq(x), t) - def lambda(xs: Seq[VariableLabel], t: Term): LambdaTermTerm = LambdaTermTerm(xs, t) + def lambda(x: VariableLabel, t: Expression): LambdaTermTerm = LambdaTermTerm(Seq(x), t) + def lambda(xs: Seq[VariableLabel], t: Expression): LambdaTermTerm = LambdaTermTerm(xs, t) def lambda(x: VariableLabel, l: LambdaTermTerm): LambdaTermTerm = LambdaTermTerm(Seq(x) ++ l.vars, l.body) def lambda(xs: Seq[VariableLabel], l: LambdaTermTerm): LambdaTermTerm = LambdaTermTerm(xs ++ l.vars, l.body) - def lambda(x: VariableLabel, phi: Formula): LambdaTermFormula = LambdaTermFormula(Seq(x), phi) - def lambda(xs: Seq[VariableLabel], phi: Formula): LambdaTermFormula = LambdaTermFormula(xs, phi) + def lambda(x: VariableLabel, phi: Expression): LambdaTermFormula = LambdaTermFormula(Seq(x), phi) + def lambda(xs: Seq[VariableLabel], phi: Expression): LambdaTermFormula = LambdaTermFormula(xs, phi) def lambda(x: VariableLabel, l: LambdaTermFormula): LambdaTermFormula = LambdaTermFormula(Seq(x) ++ l.vars, l.body) def lambda(xs: Seq[VariableLabel], l: LambdaTermFormula): LambdaTermFormula = LambdaTermFormula(xs ++ l.vars, l.body) - def lambda(X: VariableFormulaLabel, phi: Formula): LambdaFormulaFormula = LambdaFormulaFormula(Seq(X), phi) - def lambda(Xs: Seq[VariableFormulaLabel], phi: Formula): LambdaFormulaFormula = LambdaFormulaFormula(Xs, phi) + def lambda(X: VariableFormulaLabel, phi: Expression): LambdaFormulaFormula = LambdaFormulaFormula(Seq(X), phi) + def lambda(Xs: Seq[VariableFormulaLabel], phi: Expression): LambdaFormulaFormula = LambdaFormulaFormula(Xs, phi) def lambda(X: VariableFormulaLabel, l: LambdaFormulaFormula): LambdaFormulaFormula = LambdaFormulaFormula(Seq(X) ++ l.vars, l.body) def lambda(Xs: Seq[VariableFormulaLabel], l: LambdaFormulaFormula): LambdaFormulaFormula = LambdaFormulaFormula(Xs ++ l.vars, l.body) - - def instantiateBinder(f: BinderFormula, t: Term): Formula = substituteVariablesInFormula(f.inner, Map(f.bound -> t)) +*/ + def lambda(x: Variable, t: Expression): Lambda = Lambda(x, t) + def lambda(xs: Seq[Variable], t: Expression): Expression = xs.foldRight(t)((x, t) => Lambda(x, t)) + def reduceLambda(f: Lambda, t: Expression): Expression = substituteVariables(f.body, Map(f.v -> t)) // declare symbols easily: "val x = variable;" - def variable(using name: sourcecode.Name): VariableLabel = VariableLabel(name.value) - def function(arity: Integer)(using name: sourcecode.Name): SchematicFunctionLabel = SchematicFunctionLabel(name.value, arity) - def formulaVariable(using name: sourcecode.Name): VariableFormulaLabel = VariableFormulaLabel(name.value) - def predicate(arity: Integer)(using name: sourcecode.Name): SchematicPredicateLabel = SchematicPredicateLabel(name.value, arity) - def connector(arity: Integer)(using name: sourcecode.Name): SchematicConnectorLabel = SchematicConnectorLabel(name.value, arity) + def HOvariable(using name: sourcecode.Name)(sort: Sort): Variable = Variable(name.value, sort) + def variable(using name: sourcecode.Name): Variable = Variable(name.value, Ind) + def function(arity: Integer)(using name: sourcecode.Name): Variable = Variable(name.value, Range(0, arity).foldLeft(Ind: Sort)((acc, _)=> Ind -> acc)) + def formulaVariable(using name: sourcecode.Name): Variable = Variable(name.value, Prop) + def predicate(arity: Integer)(using name: sourcecode.Name): Variable = Variable(name.value, Range(0, arity).foldLeft(Prop: Sort)((acc, _)=> Ind -> acc)) + def connector(arity: Integer)(using name: sourcecode.Name): Variable = Variable(name.value, Range(0, arity).foldLeft(Prop: Sort)((acc, _)=> Prop -> acc)) + def cst(using name: sourcecode.Name)(sort: Sort): Constant = Constant(name.value, sort) + + def HOvariable(sort: Sort)(id: Identifier): Variable = Variable(id, sort) + def variable(id: Identifier): Variable = Variable(id, Ind) + def function(arity: Integer)(id: Identifier): Variable = Variable(id, Range(0, arity).foldLeft(Ind: Sort)((acc, _)=> Ind -> acc)) + def formulaVariable(id: Identifier): Variable = Variable(id, Prop) + def predicate(arity: Integer)(id: Identifier): Variable = Variable(id, Range(0, arity).foldLeft(Prop: Sort)((acc, _)=> Ind -> acc)) + def connector(arity: Integer)(id: Identifier): Variable = Variable(id, Range(0, arity).foldLeft(Prop: Sort)((acc, _)=> Prop -> acc)) + def cst(id: Identifier, sort:Sort): Constant = Constant(id, sort) // Conversions from String to Identifier class InvalidIdentifierException(identifier: String, errorMessage: String) extends LisaException(errorMessage) { @@ -336,9 +441,9 @@ object KernelHelpers { ///////////////////////////// extension (theory: RunningTheory) { - def makeAxiom(using name: sourcecode.Name)(formula: Formula): theory.Axiom = theory.addAxiom(name.value, formula) match { + def makeAxiom(using name: sourcecode.Name)(formula: Expression): theory.Axiom = theory.addAxiom(name.value, formula) match { case Some(value) => value - case None => throw new LisaException.InvalidKernelAxiomException("Axiom contains undefined symbols", name.value, formula, theory) + case None => throw new Exception("Axiom contains undefined symbols " + name.value + formula + theory) } /** @@ -348,32 +453,22 @@ object KernelHelpers { def theorem(name: String, statement: Sequent, proof: SCProof, justifications: Seq[theory.Justification]): RunningTheoryJudgement[theory.Theorem] = { if (statement == proof.conclusion) theory.makeTheorem(name, statement, proof, justifications) else if (isSameSequent(statement, proof.conclusion)) theory.makeTheorem(name, statement, proof.appended(Restate(statement, proof.length - 1)), justifications) - else InvalidJustification(s"The proof proves \n ${FOLPrinter.prettySequent(proof.conclusion)}\ninstead of claimed \n ${FOLPrinter.prettySequent(statement)}", None) - } - - /** - * Make a function definition in the theory, but only ask for the identifier of the new symbol; Arity is inferred - * of the theorem to have more explicit writing and for sanity check. See [[lisa.kernel.proof.RunningTheory.makeFunctionDefinition]] - */ - def functionDefinition( - symbol: String, - expression: LambdaTermFormula, - out: VariableLabel, - proof: SCProof, - proven: Formula, - justifications: Seq[theory.Justification] - ): RunningTheoryJudgement[theory.FunctionDefinition] = { - val label = ConstantFunctionLabel(symbol, expression.vars.size) - theory.makeFunctionDefinition(proof, justifications, label, out, expression, proven) + else InvalidJustification(s"The proof proves \n ${proof.conclusion.repr}\ninstead of claimed \n ${statement.repr}", None) } /** * Make a predicate definition in the theory, but only ask for the identifier of the new symbol; Arity is inferred * of the theorem to have more explicit writing and for sanity check. See also [[lisa.kernel.proof.RunningTheory.makePredicateDefinition]] */ - def predicateDefinition(symbol: String, expression: LambdaTermFormula): RunningTheoryJudgement[theory.PredicateDefinition] = { - val label = ConstantAtomicLabel(symbol, expression.vars.size) - theory.makePredicateDefinition(label, expression) + def definition(symbol: String, expression: Expression): RunningTheoryJudgement[theory.Definition] = { + val label = Constant(symbol, expression.sort) + val vars = expression.leadingVars() + if (vars.length == expression.sort.depth) then + theory.makeDefinition(label, expression, vars) + else + var maxid = expression.maxVarId()-1 + val newvars = flatTypeParameters(expression.sort).drop(vars.length).map(t => {maxid+=1;Variable(Identifier("x", maxid), t)}) + theory.makeDefinition(label, expression, vars ++ newvars) } /** @@ -388,29 +483,11 @@ object KernelHelpers { * @param phi The formula to check * @return The List of undefined symols */ - def findUndefinedSymbols(phi: Formula): Set[ConstantLabel] = phi match { - case AtomicFormula(label, args) => - label match { - case l: ConstantAtomicLabel => ((if (theory.isSymbol(l)) Nil else List(l)) ++ args.flatMap(findUndefinedSymbols)).toSet - case _ => args.flatMap(findUndefinedSymbols).toSet - } - case ConnectorFormula(label, args) => args.flatMap(findUndefinedSymbols).toSet - case BinderFormula(label, bound, inner) => findUndefinedSymbols(inner) - } - - /** - * Verify if a given term belongs to the language of the theory. - * - * @param t The term to check - * @return The List of undefined symols - */ - def findUndefinedSymbols(t: Term): Set[ConstantLabel] = t match { - case Term(label, args) => - label match { - case l: ConstantFunctionLabel => ((if (theory.isSymbol(l)) Nil else List(l)) ++ args.flatMap(findUndefinedSymbols)).toSet - case _: SchematicTermLabel => args.flatMap(findUndefinedSymbols).toSet - } - + def findUndefinedSymbols(phi: Expression): Set[Constant] = phi match { + case Variable(id, sort) => Set.empty + case cst: Constant => if (theory.isSymbol(cst)) Set.empty else Set(cst) + case Lambda(v, inner) => findUndefinedSymbols(inner) + case Application(f, arg) => findUndefinedSymbols(f) ++ findUndefinedSymbols(arg) } /** @@ -419,23 +496,18 @@ object KernelHelpers { * @param s The sequent to check * @return The List of undefined symols */ - def findUndefinedSymbols(s: Sequent): Set[ConstantLabel] = + def findUndefinedSymbols(s: Sequent): Set[Constant] = s.left.flatMap(findUndefinedSymbols) ++ s.right.flatMap(findUndefinedSymbols) } extension (just: RunningTheory#Justification) { def repr: String = just match { - case thm: RunningTheory#Theorem => s" Theorem ${thm.name} := ${FOLPrinter.prettySequent(thm.proposition)}${if (thm.withSorry) " (!! Relies on Sorry)" else ""}\n" - case axiom: RunningTheory#Axiom => s" Axiom ${axiom.name} := ${FOLPrinter.prettyFormula(axiom.ax)}\n" + case thm: RunningTheory#Theorem => s" Theorem ${thm.name} := ${thm.proposition.repr}${if (thm.withSorry) " (!! Relies on Sorry)" else ""}\n" + case axiom: RunningTheory#Axiom => s" Axiom ${axiom.name} := ${axiom.ax.repr}\n" case d: RunningTheory#Definition => - d match { - case pd: RunningTheory#PredicateDefinition => - s" Definition of predicate symbol ${pd.label.id} := ${FOLPrinter.prettyFormula(pd.label(pd.expression.vars.map(VariableTerm.apply)*) <=> pd.expression.body)}\n" - case fd: RunningTheory#FunctionDefinition => - s" Definition of function symbol ${FOLPrinter.prettyTerm(fd.label(fd.expression.vars.map(VariableTerm.apply)*))} := the ${fd.out.id} such that ${FOLPrinter - .prettyFormula((fd.out === fd.label(fd.expression.vars.map(VariableTerm.apply)*)) <=> fd.expression.body)})${if (fd.withSorry) " (!! Relies on Sorry)" else ""}\n" - } + s" Definition of symbol ${d.cst.id} : ${d.cst.sort} := ${d.expression}\n" + } } @@ -451,22 +523,164 @@ object KernelHelpers { just.repr case InvalidJustification(message, error) => s"$message\n${error match { - case Some(judgement) => FOLPrinter.prettySCProof(judgement) + case Some(judgement) => prettySCProof(judgement) case None => "" }}" } } } + + extension (judg: SCProofCheckerJudgement) { + def repr: String = prettySCProof(judg) + } + + /** * output a readable representation of a proof. */ def checkProof(proof: SCProof, output: String => Unit = println): Unit = { val judgement = SCProofChecker.checkSCProof(proof) + if judgement.isValid then + output("Proof is valid") + else + output("Proof is invalid") val pl = proof.totalLength if pl > 100 then output("...") output(s"Proof is too long to be displayed [$pl steps]") - else output(FOLPrinter.prettySCProof(judgement)) + else output(prettySCProof(judgement)) } + + private def spaceSeparator(compact: Boolean): String = if (compact) "" else " " + + private def commaSeparator(compact: Boolean, symbol: String = ","): String = s"$symbol${spaceSeparator(compact)}" + + /** + * Returns a string representation of this proof. + * + * @param proof the proof + * @param judgement optionally provide a proof checking judgement that will mark a particular step in the proof + * (`->`) as an error. The proof is considered to be valid by default + * @return a string where each indented line corresponds to a step in the proof + */ + def prettySCProof(judgement: SCProofCheckerJudgement, forceDisplaySubproofs: Boolean = false): String = { + val proof = judgement.proof + def computeMaxNumberingLengths(proof: SCProof, level: Int, result: IndexedSeq[Int]): IndexedSeq[Int] = { + val resultWithCurrent = result.updated( + level, + (Seq((proof.steps.size - 1).toString.length, result(level)) ++ (if (proof.imports.nonEmpty) Seq((-proof.imports.size).toString.length) else Seq.empty)).max + ) + proof.steps.collect { case sp: SCSubproof => sp }.foldLeft(resultWithCurrent)((acc, sp) => computeMaxNumberingLengths(sp.sp, level + 1, if (acc.size <= level + 1) acc :+ 0 else acc)) + } + + val maxNumberingLengths = computeMaxNumberingLengths(proof, 0, IndexedSeq(0)) // The maximum value for each number column + val maxLevel = maxNumberingLengths.size - 1 + + def leftPadSpaces(v: Any, n: Int): String = { + val s = String.valueOf(v) + if (s.length < n) (" " * (n - s.length)) + s else s + } + + def rightPadSpaces(v: Any, n: Int): String = { + val s = String.valueOf(v) + if (s.length < n) s + (" " * (n - s.length)) else s + } + + def prettySCProofRecursive(proof: SCProof, level: Int, tree: IndexedSeq[Int], topMostIndices: IndexedSeq[Int]): Seq[(Boolean, String, String, String)] = { + val printedImports = proof.imports.zipWithIndex.reverse.flatMap { case (imp, i) => + val currentTree = tree :+ (-i - 1) + val showErrorForLine = judgement match { + case SCValidProof(_, _) => false + case SCInvalidProof(proof, position, _) => currentTree.startsWith(position) && currentTree.drop(position.size).forall(_ == 0) + } + val prefix = (Seq.fill(level - topMostIndices.size)(None) ++ Seq.fill(topMostIndices.size)(None) :+ Some(-i - 1)) ++ Seq.fill(maxLevel - level)(None) + val prefixString = prefix.map(_.map(_.toString).getOrElse("")).zipWithIndex.map { case (v, i1) => leftPadSpaces(v, maxNumberingLengths(i1)) }.mkString(" ") + + def pretty(stepName: String, topSteps: Int*): (Boolean, String, String, String) = + ( + showErrorForLine, + prefixString, + Seq(stepName, topSteps.mkString(commaSeparator(compact = false))).filter(_.nonEmpty).mkString(" "), + imp.repr + ) + + Seq(pretty("Import", 0)) + } + printedImports ++ proof.steps.zipWithIndex.flatMap { case (step, i) => + val currentTree = tree :+ i + val showErrorForLine = judgement match { + case SCValidProof(_, _) => false + case SCInvalidProof(proof, position, _) => + currentTree.startsWith(position) && currentTree.drop(position.size).forall(_ == 0) + } + val prefix = (Seq.fill(level - topMostIndices.size)(None) ++ Seq.fill(topMostIndices.size)(None) :+ Some(i)) ++ Seq.fill(maxLevel - level)(None) + val prefixString = prefix.map(_.map(_.toString).getOrElse("")).zipWithIndex.map { case (v, i1) => leftPadSpaces(v, maxNumberingLengths(i1)) }.mkString(" ") + + def pretty(stepName: String, topSteps: Int*): (Boolean, String, String, String) = + ( + showErrorForLine, + prefixString, + Seq(stepName, topSteps.mkString(commaSeparator(compact = false))).filter(_.nonEmpty).mkString(" "), + step.bot.repr + ) + + step match { + case sp @ SCSubproof(_, _) => + pretty("Subproof", sp.premises*) +: prettySCProofRecursive(sp.sp, level + 1, currentTree, (if (i == 0) topMostIndices else IndexedSeq.empty) :+ i) + case other => + val line = other match { + case Restate(_, t1) => pretty("Rewrite", t1) + case RestateTrue(_) => pretty("RewriteTrue") + case Hypothesis(_, _) => pretty("Hypo.") + case Cut(_, t1, t2, _) => pretty("Cut", t1, t2) + case LeftAnd(_, t1, _, _) => pretty("Left ∧", t1) + case LeftNot(_, t1, _) => pretty("Left ¬", t1) + case RightOr(_, t1, _, _) => pretty("Right ∨", t1) + case RightNot(_, t1, _) => pretty("Right ¬", t1) + case LeftExists(_, t1, _, _) => pretty("Left ∃", t1) + case LeftForall(_, t1, _, _, _) => pretty("Left ∀", t1) + case LeftOr(_, l, _) => pretty("Left ∨", l*) + case RightExists(_, t1, _, _, _) => pretty("Right ∃", t1) + case RightForall(_, t1, _, _) => pretty("Right ∀", t1) + case RightEpsilon(_, t1, _, _, _) => pretty("Right ε", t1) + case RightAnd(_, l, _) => pretty("Right ∧", l*) + case RightIff(_, t1, t2, _, _) => pretty("Right ⇔", t1, t2) + case RightImplies(_, t1, _, _) => pretty("Right ⇒", t1) + case LeftImplies(_, t1, t2, _, _) => pretty("Left ⇒", t1, t2) + case LeftIff(_, t1, _, _) => pretty("Left ⇔", t1) + case Weakening(_, t1) => pretty("Weakening", t1) + case Beta(_, t1) => pretty("Beta", t1) + case LeftRefl(_, t1, _) => pretty("L. Refl", t1) + case RightRefl(_, _) => pretty("R. Refl") + case LeftSubstEq(_, t1, _, _) => pretty("L. SubstEq", t1) + case RightSubstEq(_, t1, _, _) => pretty("R. SubstEq", t1) + case InstSchema(_, t1, _) => pretty("Schema Instantiation", t1) + case Sorry(_) => pretty("Sorry") + case SCSubproof(_, _) => throw new Exception("Should not happen") + } + Seq(line) + } + } + } + + val marker = "->" + + val lines = prettySCProofRecursive(proof, 0, IndexedSeq.empty, IndexedSeq.empty) + val maxStepNameLength = lines.map { case (_, _, stepName, _) => stepName.length }.maxOption.getOrElse(0) + lines + .map { case (isMarked, indices, stepName, sequent) => + val suffix = Seq(indices, rightPadSpaces(stepName, maxStepNameLength), sequent) + val full = if (!judgement.isValid) (if (isMarked) marker else leftPadSpaces("", marker.length)) +: suffix else suffix + full.mkString(" ") + } + .mkString("\n") + (judgement match { + case SCValidProof(_, _) => "" + case SCInvalidProof(proof, path, message) => s"\nProof checker has reported an error at line ${path.mkString(".")}: $message" + }) + } + + def prettySCProof(proof: SCProof): String = prettySCProof(SCValidProof(proof), false) + + } diff --git a/lisa-utils/src/main/scala/lisa/utils/LisaException.scala b/lisa-utils/src/main/scala/lisa/utils/LisaException.scala index 6d141d390..eefc60193 100644 --- a/lisa-utils/src/main/scala/lisa/utils/LisaException.scala +++ b/lisa-utils/src/main/scala/lisa/utils/LisaException.scala @@ -1,13 +1,14 @@ package lisa.utils -import lisa.fol.FOL as F +import lisa.utils.fol.FOL as F import lisa.kernel.fol.FOL import lisa.kernel.proof.RunningTheoryJudgement import lisa.kernel.proof.RunningTheoryJudgement.InvalidJustification import lisa.kernel.proof.SCProof -import lisa.prooflib.Library -import lisa.prooflib.ProofTacticLib.ProofTactic +import lisa.utils.prooflib.Library +// import lisa.utils.prooflib.ProofTacticLib.ProofTactic import lisa.utils.KernelHelpers.repr +import lisa.utils.KernelHelpers.prettySCProof abstract class LisaException(errorMessage: String)(using val line: sourcecode.Line, val file: sourcecode.File) extends Exception(errorMessage) { def showError: String @@ -15,8 +16,10 @@ abstract class LisaException(errorMessage: String)(using val line: sourcecode.Li import lisa.utils.KernelHelpers.{_, given} + import java.io.File object LisaException { + case class InvalidKernelJustificationComputation(errorMessage: String, underlying: RunningTheoryJudgement.InvalidJustification[?], proof: Option[Library#Proof])(using sourcecode.Line, sourcecode.File @@ -24,12 +27,12 @@ object LisaException { def showError: String = "Construction of proof succedded, but the resulting proof or definition has been reported to be faulty. This may be due to an internal bug.\n" + "The resulting faulty event is:\n" + s"$underlying.message\n${underlying.error match { - case Some(judgement) => FOLPrinter.prettySCProof(judgement) + case Some(judgement) => prettySCProof(judgement) case None => "" }}" } - class InvalidKernelAxiomException(errorMessage: String, name: String, formula: lisa.kernel.fol.FOL.Formula, theory: lisa.kernel.proof.RunningTheory)(using sourcecode.Line, sourcecode.File) + class InvalidKernelAxiomException(errorMessage: String, name: String, formula: lisa.kernel.fol.FOL.Expression, theory: lisa.kernel.proof.RunningTheory)(using sourcecode.Line, sourcecode.File) extends LisaException(errorMessage) { def showError: String = s"The desired axiom \"$name\" contains symbol that are not part of the theory.\n" + s"The symbols {${theory.findUndefinedSymbols(formula)}} are undefined." @@ -37,18 +40,21 @@ object LisaException { } + + /** * Error made by the user, should be "explained" */ abstract class UserLisaException(var errorMessage: String)(using line: sourcecode.Line, file: sourcecode.File) extends LisaException(errorMessage) { def fixTrace(): Unit = () } + object UserLisaException { class InvalidProofFromFileException(errorMessage: String, file: String)(using sourcecode.Line, sourcecode.File) extends UserLisaException(errorMessage) { def showError: String = errorMessage } - class InvalidAxiomException(errorMessage: String, name: String, formula: lisa.fol.FOL.Formula, library: lisa.prooflib.Library)(using sourcecode.Line, sourcecode.File) + class InvalidAxiomException(errorMessage: String, name: String, formula: lisa.utils.fol.FOL.Expr[lisa.utils.fol.FOL.Prop], library: lisa.utils.prooflib.Library)(using sourcecode.Line, sourcecode.File) extends UserLisaException(errorMessage) { def showError: String = s"The desired axiom \"$name\" contains symbol that are not part of the theory.\n" + s"The symbols {${library.theory.findUndefinedSymbols(formula.underlying)}} are undefined." @@ -58,7 +64,7 @@ object UserLisaException { def showError: String = "" } - class UndefinedSymbolException(errorMessage: String, symbol: F.ConstantLabel[?], library: lisa.prooflib.Library)(using sourcecode.Line, sourcecode.File) extends UserLisaException(errorMessage) { + class UndefinedSymbolException(errorMessage: String, symbol: F.Constant[?], library: lisa.utils.prooflib.Library)(using sourcecode.Line, sourcecode.File) extends UserLisaException(errorMessage) { def showError: String = s"The desired symbol \"$symbol\" is unknown and has not been defined.\n" } diff --git a/lisa-utils/src/main/scala/lisa/utils/Printing.scala b/lisa-utils/src/main/scala/lisa/utils/Printing.scala new file mode 100644 index 000000000..78b3dd71a --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/Printing.scala @@ -0,0 +1,6 @@ +package lisa.utils + +object Printing: + def printList[T](seq: Iterable[T], prefix: String = "\n\t"): String = + prefix + (seq lazyZip (0 until seq.size)).map((x, i) => s"$i: $x").mkString(prefix) + diff --git a/lisa-utils/src/main/scala/lisa/utils/ProofsShrink.scala b/lisa-utils/src/main/scala/lisa/utils/ProofsShrink.scala index 5f620aa87..4d0d3b94a 100644 --- a/lisa-utils/src/main/scala/lisa/utils/ProofsShrink.scala +++ b/lisa-utils/src/main/scala/lisa/utils/ProofsShrink.scala @@ -10,7 +10,7 @@ import lisa.kernel.proof.SequentCalculus.* * If the provided proofs are valid, then the resulting proofs will also be valid. */ object ProofsShrink { - +/* /** * Computes the size of a proof. Size corresponds to the number of proof steps. * Subproofs are count as one plus the size of their body. @@ -300,4 +300,5 @@ object ProofsShrink { } def minimizeProofOnce(proof: SCProof): SCProof = deadStepsElimination(factorProof(simplifyProof(flattenProof(proof)))) + */ } diff --git a/lisa-utils/src/main/scala/lisa/utils/Serialization.scala b/lisa-utils/src/main/scala/lisa/utils/Serialization.scala index 3b934506a..197eb17d1 100644 --- a/lisa-utils/src/main/scala/lisa/utils/Serialization.scala +++ b/lisa-utils/src/main/scala/lisa/utils/Serialization.scala @@ -21,7 +21,6 @@ object Serialization { inline def leftNot: Byte = 8 inline def leftForall: Byte = 9 inline def leftExists: Byte = 10 - inline def leftExistsOne: Byte = 11 inline def rightAnd: Byte = 12 inline def rightOr: Byte = 13 inline def rightImplies: Byte = 14 @@ -29,55 +28,41 @@ object Serialization { inline def rightNot: Byte = 16 inline def rightForall: Byte = 17 inline def rightExists: Byte = 18 - inline def rightExistsOne: Byte = 19 + inline def rightEpsilon: Byte = 19 inline def weakening: Byte = 20 - inline def leftRefl: Byte = 21 - inline def rightRefl: Byte = 22 - inline def leftSubstEq: Byte = 23 - inline def rightSubstEq: Byte = 24 - inline def leftSubstIff: Byte = 25 - inline def rightSubstIff: Byte = 26 - inline def instSchema: Byte = 27 - inline def scSubproof: Byte = 28 - inline def sorry: Byte = 29 + inline def beta: Byte = 21 + inline def leftRefl: Byte = 22 + inline def rightRefl: Byte = 23 + inline def leftSubstEq: Byte = 24 + inline def rightSubstEq: Byte = 25 + inline def instSchema: Byte = 26 + inline def scSubproof: Byte = 27 + inline def sorry: Byte = 28 type Line = Int - // Injectively represent a TermLabel as a string - def termLabelToString(label: TermLabel): String = - label match - case l: ConstantFunctionLabel => "cfl_" + l.id.name + "_" + l.id.no + "_" + l.arity - case l: SchematicFunctionLabel => "sfl_" + l.id.name + "_" + l.id.no + "_" + l.arity - case l: VariableLabel => "vl_" + l.id.name + "_" + l.id.no - - // Injectively represent a FormulaLabel as a string. - def formulaLabelToString(label: FormulaLabel): String = - label match - case l: ConstantAtomicLabel => "cpl_" + l.id.name + "_" + l.id.no + "_" + l.arity - case l: SchematicPredicateLabel => "spl_" + l.id.name + "_" + l.id.no + "_" + l.arity - case l: ConstantConnectorLabel => "ccl_" + l.id.name + "_" + l.id.no + "_" + l.arity - case l: SchematicConnectorLabel => "scl_" + l.id.name + "_" + l.id.no + "_" + l.arity - case l: VariableFormulaLabel => "vfl_" + l.id.name + "_" + l.id.no - case l: BinderLabel => "bl_" + l.id.name + "_" + l.id.no - - // write a term label to an OutputStream - def termLabelToDOS(label: TermLabel, dos: DataOutputStream): Unit = - label match - case l: ConstantFunctionLabel => - dos.writeByte(0) - dos.writeUTF(l.id.name) - dos.writeInt(l.id.no) - dos.writeInt(l.arity) - case l: SchematicFunctionLabel => - dos.writeByte(1) - dos.writeUTF(l.id.name) - dos.writeInt(l.id.no) - dos.writeInt(l.arity) - case l: VariableLabel => - dos.writeByte(2) - dos.writeUTF(l.id.name) - dos.writeInt(l.id.no) - // write a formula label to an OutputStream + def typeToString(t: Sort): String = + t match + case Ind => "T" + case Prop => "F" + case Arrow(from, to) => s">${typeToString(from)}${typeToString(to)}" + + def constantToString(c: Constant): String = "cst_" + c.id.name + "_" + c.id.no + "_" + typeToString(c.sort) + def variableToString(v: Variable): String = "var_" + v.id.name + "_" + v.id.no + "_" + typeToString(v.sort) + + def constantToDos(c: Constant, dos: DataOutputStream): Unit = + dos.writeByte(0) + dos.writeUTF(c.id.name) + dos.writeInt(c.id.no) + dos.writeUTF(typeToString(c.sort)) + + def variableToDOS(v: Variable, dos: DataOutputStream): Unit = + dos.writeByte(1) + dos.writeUTF(v.id.name) + dos.writeInt(v.id.no) + dos.writeUTF(typeToString(v.sort)) + + /* def formulaLabelToDOS(label: FormulaLabel, dos: DataOutputStream): Unit = label match case l: ConstantAtomicLabel => @@ -105,77 +90,56 @@ object Serialization { case l: BinderLabel => dos.writeByte(8) dos.writeUTF(l.id.name) + */ /** * Main function that, when given a proof, will serialize it to a file. It will also serialize all the formulas appearing in it to another file. */ def proofsToDataStream(treesDOS: DataOutputStream, proofDOS: DataOutputStream, theorems: Seq[(String, SCProof, List[String])]): Unit = { - val termMap = MutMap[Long, Line]() - val formulaMap = MutMap[Long, Line]() + val exprMap = MutMap[Long, Line]() var line = -1 - // Compute the line of a term. If it is not in the map, add it to the map and write it to the tree file - def lineOfTerm(term: Term): Line = - termMap.get(term.uniqueNumber) match - case Some(line) => line - case None => - val na = term.args.map(t => lineOfTerm(t)) - termLabelToDOS(term.label, treesDOS) - na.foreach(t => treesDOS.writeInt(t)) - line = line + 1 - termMap(term.uniqueNumber) = line - line - // Compute the line of a formula. If it is not in the map, add it to the map and write it to the tree file - def lineOfFormula(formula: Formula): Line = - formulaMap.get(formula.uniqueNumber) match + def lineOfExpr(e: Expression): Line = + exprMap.get(e.uniqueNumber) match case Some(line) => line case None => - val nextLine = formula match - case AtomicFormula(label, args) => - val na = args.map(t => lineOfTerm(t)) - formulaLabelToDOS(label, treesDOS) - na.foreach(t => treesDOS.writeInt(t)) - case ConnectorFormula(label, args) => - val na = args.map(t => lineOfFormula(t)) - formulaLabelToDOS(label, treesDOS) - treesDOS.writeShort(na.size) - na.foreach(t => treesDOS.writeInt(t)) - case BinderFormula(label, bound, inner) => - val ni = lineOfFormula(inner) - formulaLabelToDOS(label, treesDOS) - termLabelToDOS(bound, treesDOS) + val nextLine = e match + case v: Variable => + treesDOS.writeByte(0) + treesDOS.writeUTF(v.id.name) + treesDOS.writeInt(v.id.no) + treesDOS.writeUTF(typeToString(v.sort)) + case c: Constant => + treesDOS.writeByte(1) + treesDOS.writeUTF(c.id.name) + treesDOS.writeInt(c.id.no) + treesDOS.writeUTF(typeToString(c.sort)) + case Lambda(v, inner) => + treesDOS.writeByte(2) + val vi = lineOfExpr(v) + val ni = lineOfExpr(inner) + treesDOS.writeInt(vi) treesDOS.writeInt(ni) + case Application(f, arg) => + treesDOS.writeByte(3) + val a1 = lineOfExpr(f) + val a2 = lineOfExpr(arg) + treesDOS.writeInt(a1) + treesDOS.writeInt(a2) line = line + 1 - formulaMap(formula.uniqueNumber) = line + exprMap(e.uniqueNumber) = line line // Write a sequent to the proof file. def sequentToProofDOS(sequent: Sequent): Unit = proofDOS.writeShort(sequent.left.size) - sequent.left.foreach(f => proofDOS.writeInt(lineOfFormula(f))) + sequent.left.foreach(f => proofDOS.writeInt(lineOfExpr(f))) proofDOS.writeShort(sequent.right.size) - sequent.right.foreach(f => proofDOS.writeInt(lineOfFormula(f))) - - def lttToProofDOS(ltt: LambdaTermTerm): Unit = - val body = lineOfTerm(ltt.body) - proofDOS.writeShort(ltt.vars.size) - ltt.vars.foreach(v => termLabelToDOS(v, proofDOS)) - proofDOS.writeInt(body) - - def ltfToProofDOS(ltf: LambdaTermFormula): Unit = - val body = lineOfFormula(ltf.body) - proofDOS.writeShort(ltf.vars.size) - ltf.vars.foreach(v => termLabelToDOS(v, proofDOS)) - proofDOS.writeInt(body) - - def lffToProofDOS(lff: LambdaFormulaFormula): Unit = - val body = lineOfFormula(lff.body) - proofDOS.writeShort(lff.vars.size) - lff.vars.foreach(v => formulaLabelToDOS(v, proofDOS)) - proofDOS.writeInt(body) + sequent.right.foreach(f => proofDOS.writeInt(lineOfExpr(f))) + /** * Write a proof step to the proof file. @@ -196,192 +160,157 @@ object Serialization { case Hypothesis(bot, phi) => proofDOS.writeByte(hypothesis) sequentToProofDOS(bot) - proofDOS.writeInt(lineOfFormula(phi)) + proofDOS.writeInt(lineOfExpr(phi)) case Cut(bot, t1, t2, phi) => proofDOS.writeByte(cut) sequentToProofDOS(bot) proofDOS.writeInt(t1) proofDOS.writeInt(t2) - proofDOS.writeInt(lineOfFormula(phi)) + proofDOS.writeInt(lineOfExpr(phi)) case LeftAnd(bot, t1, phi, psi) => proofDOS.writeByte(leftAnd) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - proofDOS.writeInt(lineOfFormula(psi)) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(psi)) case LeftOr(bot, t, disjuncts) => proofDOS.writeByte(leftOr) sequentToProofDOS(bot) proofDOS.writeShort(t.size) t.foreach(proofDOS.writeInt) proofDOS.writeShort(disjuncts.size) - disjuncts.foreach(f => proofDOS.writeInt(lineOfFormula(f))) + disjuncts.foreach(f => proofDOS.writeInt(lineOfExpr(f))) case LeftImplies(bot, t1, t2, phi, psi) => proofDOS.writeByte(leftImplies) sequentToProofDOS(bot) proofDOS.writeInt(t1) proofDOS.writeInt(t2) - proofDOS.writeInt(lineOfFormula(phi)) - proofDOS.writeInt(lineOfFormula(psi)) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(psi)) case LeftIff(bot, t1, phi, psi) => proofDOS.writeByte(leftIff) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - proofDOS.writeInt(lineOfFormula(psi)) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(psi)) case LeftNot(bot, t1, phi) => proofDOS.writeByte(leftNot) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) + proofDOS.writeInt(lineOfExpr(phi)) case LeftForall(bot, t1, phi, x, t) => proofDOS.writeByte(leftForall) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - termLabelToDOS(x, proofDOS) - proofDOS.writeInt(lineOfTerm(t)) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(x)) + proofDOS.writeInt(lineOfExpr(t)) case LeftExists(bot, t1, phi, x) => proofDOS.writeByte(leftExists) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - termLabelToDOS(x, proofDOS) - case LeftExistsOne(bot, t1, phi, x) => - proofDOS.writeByte(leftExistsOne) - sequentToProofDOS(bot) - proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - termLabelToDOS(x, proofDOS) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(x)) case RightAnd(bot, t, conjuncts) => proofDOS.writeByte(rightAnd) sequentToProofDOS(bot) proofDOS.writeShort(t.size) t.foreach(proofDOS.writeInt) proofDOS.writeShort(conjuncts.size) - conjuncts.foreach(f => proofDOS.writeInt(lineOfFormula(f))) + conjuncts.foreach(f => proofDOS.writeInt(lineOfExpr(f))) case RightOr(bot, t1, phi, psi) => proofDOS.writeByte(rightOr) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - proofDOS.writeInt(lineOfFormula(psi)) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(psi)) case RightImplies(bot, t1, phi, psi) => proofDOS.writeByte(rightImplies) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - proofDOS.writeInt(lineOfFormula(psi)) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(psi)) case RightIff(bot, t1, t2, phi, psi) => proofDOS.writeByte(rightIff) sequentToProofDOS(bot) proofDOS.writeInt(t1) proofDOS.writeInt(t2) - proofDOS.writeInt(lineOfFormula(phi)) - proofDOS.writeInt(lineOfFormula(psi)) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(psi)) case RightNot(bot, t1, phi) => proofDOS.writeByte(rightNot) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) + proofDOS.writeInt(lineOfExpr(phi)) case RightForall(bot, t1, phi, x) => proofDOS.writeByte(rightForall) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - termLabelToDOS(x, proofDOS) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(x)) case RightExists(bot, t1, phi, x, t) => proofDOS.writeByte(rightExists) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - termLabelToDOS(x, proofDOS) - proofDOS.writeInt(lineOfTerm(t)) - case RightExistsOne(bot, t1, phi, x) => - proofDOS.writeByte(rightExistsOne) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(x)) + proofDOS.writeInt(lineOfExpr(t)) + case RightEpsilon(bot, t1, phi, x, t) => + proofDOS.writeByte(rightEpsilon) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(phi)) - termLabelToDOS(x, proofDOS) + proofDOS.writeInt(lineOfExpr(phi)) + proofDOS.writeInt(lineOfExpr(x)) + proofDOS.writeInt(lineOfExpr(t)) case Weakening(bot, t1) => proofDOS.writeByte(weakening) sequentToProofDOS(bot) proofDOS.writeInt(t1) + case Beta(bot, t1) => + proofDOS.writeByte(beta) + sequentToProofDOS(bot) + proofDOS.writeInt(t1) case LeftRefl(bot, t1, fa) => proofDOS.writeByte(leftRefl) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeInt(lineOfFormula(fa)) + proofDOS.writeInt(lineOfExpr(fa)) case RightRefl(bot, fa) => proofDOS.writeByte(rightRefl) sequentToProofDOS(bot) - proofDOS.writeInt(lineOfFormula(fa)) + proofDOS.writeInt(lineOfExpr(fa)) case LeftSubstEq(bot, t1, equals, lambdaPhi) => proofDOS.writeByte(leftSubstEq) sequentToProofDOS(bot) proofDOS.writeInt(t1) proofDOS.writeShort(equals.size) equals.foreach(ltts => - lttToProofDOS(ltts._1) - lttToProofDOS(ltts._2) + proofDOS.writeInt(lineOfExpr(ltts._1)) + proofDOS.writeInt(lineOfExpr(ltts._2)) ) proofDOS.writeShort(lambdaPhi._1.size) - lambdaPhi._1.foreach(stl => termLabelToDOS(stl, proofDOS)) - proofDOS.writeInt(lineOfFormula(lambdaPhi._2)) + lambdaPhi._1.foreach(stl => proofDOS.writeInt(lineOfExpr(stl))) + proofDOS.writeInt(lineOfExpr(lambdaPhi._2)) case RightSubstEq(bot, t1, equals, lambdaPhi) => - proofDOS.writeByte(rightSubstEq) - sequentToProofDOS(bot) - proofDOS.writeInt(t1) - proofDOS.writeShort(equals.size) - equals.foreach(ltts => - lttToProofDOS(ltts._1) - lttToProofDOS(ltts._2) - ) - proofDOS.writeShort(lambdaPhi._1.size) - lambdaPhi._1.foreach(stl => termLabelToDOS(stl, proofDOS)) - proofDOS.writeInt(lineOfFormula(lambdaPhi._2)) - case LeftSubstIff(bot, t1, equals, lambdaPhi) => - proofDOS.writeByte(leftSubstIff) - sequentToProofDOS(bot) - proofDOS.writeInt(t1) - proofDOS.writeShort(equals.size) - equals.foreach(ltts => - ltfToProofDOS(ltts._1) - ltfToProofDOS(ltts._2) - ) - proofDOS.writeShort(lambdaPhi._1.size) - lambdaPhi._1.foreach(stl => formulaLabelToDOS(stl, proofDOS)) - proofDOS.writeInt(lineOfFormula(lambdaPhi._2)) - case RightSubstIff(bot, t1, equals, lambdaPhi) => - proofDOS.writeByte(rightSubstIff) + proofDOS.writeByte(leftSubstEq) sequentToProofDOS(bot) proofDOS.writeInt(t1) proofDOS.writeShort(equals.size) equals.foreach(ltts => - ltfToProofDOS(ltts._1) - ltfToProofDOS(ltts._2) + proofDOS.writeInt(lineOfExpr(ltts._1)) + proofDOS.writeInt(lineOfExpr(ltts._2)) ) proofDOS.writeShort(lambdaPhi._1.size) - lambdaPhi._1.foreach(stl => formulaLabelToDOS(stl, proofDOS)) - proofDOS.writeInt(lineOfFormula(lambdaPhi._2)) - case InstSchema(bot, t1, mCon, mPred, mTerm) => + lambdaPhi._1.foreach(stl => proofDOS.writeInt(lineOfExpr(stl))) + proofDOS.writeInt(lineOfExpr(lambdaPhi._2)) + case InstSchema(bot, t1, m) => proofDOS.writeByte(instSchema) sequentToProofDOS(bot) proofDOS.writeInt(t1) - proofDOS.writeShort(mCon.size) - mCon.foreach(t => - formulaLabelToDOS(t._1, proofDOS) - lffToProofDOS(t._2) - ) - proofDOS.writeShort(mPred.size) - mPred.foreach(t => - formulaLabelToDOS(t._1, proofDOS) - ltfToProofDOS(t._2) - ) - proofDOS.writeShort(mTerm.size) - mTerm.foreach(t => - termLabelToDOS(t._1, proofDOS) - lttToProofDOS(t._2) + proofDOS.writeShort(m.size) + m.foreach(t => + proofDOS.writeInt(lineOfExpr(t._1)) + proofDOS.writeInt(lineOfExpr(t._2)) ) case SCSubproof(sp, premises) => throw new Exception("Cannot support subproofs, flatten the proof first.") case Sorry(bot) => @@ -403,6 +332,15 @@ object Serialization { } + def typeFromString(s: String): (Sort, String) = + if s(0) == 'T' then (Ind, s.drop(1)) + else if s(0) == 'F' then (Prop, s.drop(1)) + else if s(0) == '>' then + val (from, reminder) = typeFromString(s.drop(1)) + val (to, r) = typeFromString(reminder) + (Arrow(from, to), r) + else throw new Exception("Unknown type: " + s) + /** * This functions reverses the effect of proofToDataStream * @@ -410,110 +348,44 @@ object Serialization { */ def proofsFromDataStream(treesDIS: DataInputStream, proofDIS: DataInputStream): Seq[(String, SCProof, List[String])] = { - val termMap = MutMap[Line, Term]() - val formulaMap = MutMap[Line, Formula]() - - // Read a label from the tree file, reversing the effect of termLabelToDOS and formulaLabelToDOS. - def labelFromInputStream(dis: DataInputStream): Label = { - val labelType = dis.readByte() - labelType match - case 0 => - val name = dis.readUTF() - val no = dis.readInt() - val arity = dis.readInt() - ConstantFunctionLabel(Identifier(name, no), arity) - case 1 => - val name = dis.readUTF() - val no = dis.readInt() - val arity = dis.readInt() - SchematicFunctionLabel(Identifier(name, no), arity) - case 2 => - val name = dis.readUTF() - val no = dis.readInt() - VariableLabel(Identifier(name, no)) - case 3 => - val name = dis.readUTF() - val no = dis.readInt() - val arity = dis.readInt() - ConstantAtomicLabel(Identifier(name, no), arity) - case 4 => - val name = dis.readUTF() - val no = dis.readInt() - val arity = dis.readInt() - SchematicPredicateLabel(Identifier(name, no), arity) - case 5 => - val name = dis.readUTF() - name match - case And.id.name => And - case Or.id.name => Or - case Implies.id.name => Implies - case Iff.id.name => Iff - case Neg.id.name => Neg - case 6 => - val name = dis.readUTF() - val no = dis.readInt() - val arity = dis.readInt() - SchematicConnectorLabel(Identifier(name, no), arity) - case 7 => - val name = dis.readUTF() - val no = dis.readInt() - VariableFormulaLabel(Identifier(name, no)) - case 8 => - dis.readUTF() match - case Forall.id.name => Forall - case Exists.id.name => Exists - case ExistsOne.id.name => ExistsOne - - } + val exprMap = MutMap[Line, Expression]() // Read and reconstruct all the terms and formulas in the tree file. Fill the table with it. var lineNo = -1 try { while true do lineNo = lineNo + 1 - val label = labelFromInputStream(treesDIS) - label match - case l: TermLabel => - val args = (1 to l.arity).map(_ => termMap(treesDIS.readInt())).toSeq - termMap(lineNo) = Term(l, args) - case l: FormulaLabel => - val formula = label match - case l: AtomicLabel => - val args = (1 to l.arity).map(_ => termMap(treesDIS.readInt())).toSeq - AtomicFormula(l, args) - case l: ConnectorLabel => - val ar = treesDIS.readShort() - val args = (1 to ar).map(_ => formulaMap(treesDIS.readInt())).toSeq - ConnectorFormula(l, args) - case l: BinderLabel => - BinderFormula(l, labelFromInputStream(treesDIS).asInstanceOf[VariableLabel], formulaMap(treesDIS.readInt())) - formulaMap(lineNo) = formula + treesDIS.readByte() match + case 0 => + val name = treesDIS.readUTF() + val no = treesDIS.readInt() + val sort = treesDIS.readUTF() + Variable(Identifier(name, no), typeFromString(sort)._1) + case 1 => + val name = treesDIS.readUTF() + val no = treesDIS.readInt() + val sort = treesDIS.readUTF() + Constant(Identifier(name, no), typeFromString(sort)._1) + case 2 => + val v = exprMap(treesDIS.readInt()) + val body = exprMap(treesDIS.readInt()) + Lambda(v.asInstanceOf[Variable], body) + case 3 => + val f = exprMap(treesDIS.readInt()) + val arg = exprMap(treesDIS.readInt()) + Application(f, arg) } catch case _: EOFException => () // Terms and Formulas finished, deal with the proof now. - def lttFromProofDIS(): LambdaTermTerm = - val vars = (1 to proofDIS.readShort()).map(_ => labelFromInputStream(proofDIS).asInstanceOf[VariableLabel]).toSeq - val body = termMap(proofDIS.readInt()) - LambdaTermTerm(vars, body) - - def ltfFromProofDIS(): LambdaTermFormula = - val vars = (1 to proofDIS.readShort()).map(_ => labelFromInputStream(proofDIS).asInstanceOf[VariableLabel]).toSeq - val body = formulaMap(proofDIS.readInt()) - LambdaTermFormula(vars, body) - - def lffFromProofDIS(): LambdaFormulaFormula = - val vars = (1 to proofDIS.readShort()).map(_ => labelFromInputStream(proofDIS).asInstanceOf[VariableFormulaLabel]).toSeq - val body = formulaMap(proofDIS.readInt()) - LambdaFormulaFormula(vars, body) def sequentFromProofDIS(): Sequent = val leftSize = proofDIS.readShort() - val left = (1 to leftSize).map(_ => formulaMap(proofDIS.readInt())).toSet + val left = (1 to leftSize).map(_ => exprMap(proofDIS.readInt())).toSet val rightSize = proofDIS.readShort() - val right = (1 to rightSize).map(_ => formulaMap(proofDIS.readInt())).toSet + val right = (1 to rightSize).map(_ => exprMap(proofDIS.readInt())).toSet Sequent(left, right) // Read a proof step from the proof file. Inverse of proofStepToProofDOS @@ -521,86 +393,80 @@ object Serialization { val psType = proofDIS.readByte() if (psType == restate) Restate(sequentFromProofDIS(), proofDIS.readInt()) else if (psType == restateTrue) RestateTrue(sequentFromProofDIS()) - else if (psType == hypothesis) Hypothesis(sequentFromProofDIS(), formulaMap(proofDIS.readInt())) - else if (psType == cut) Cut(sequentFromProofDIS(), proofDIS.readInt(), proofDIS.readInt(), formulaMap(proofDIS.readInt())) - else if (psType == leftAnd) LeftAnd(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), formulaMap(proofDIS.readInt())) + else if (psType == hypothesis) Hypothesis(sequentFromProofDIS(), exprMap(proofDIS.readInt())) + else if (psType == cut) Cut(sequentFromProofDIS(), proofDIS.readInt(), proofDIS.readInt(), exprMap(proofDIS.readInt())) + else if (psType == leftAnd) LeftAnd(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt())) else if (psType == leftOr) LeftOr( sequentFromProofDIS(), (1 to proofDIS.readShort()).map(_ => proofDIS.readInt()).toSeq, - (1 to proofDIS.readShort()).map(_ => formulaMap(proofDIS.readInt())).toSeq + (1 to proofDIS.readShort()).map(_ => exprMap(proofDIS.readInt())).toSeq ) - else if (psType == leftImplies) LeftImplies(sequentFromProofDIS(), proofDIS.readInt(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), formulaMap(proofDIS.readInt())) - else if (psType == leftIff) LeftIff(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), formulaMap(proofDIS.readInt())) - else if (psType == leftNot) LeftNot(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt())) + else if (psType == leftImplies) LeftImplies(sequentFromProofDIS(), proofDIS.readInt(), proofDIS.readInt(), exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt())) + else if (psType == leftIff) LeftIff(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt())) + else if (psType == leftNot) LeftNot(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt())) else if (psType == leftForall) LeftForall( sequentFromProofDIS(), proofDIS.readInt(), - formulaMap(proofDIS.readInt()), - labelFromInputStream(proofDIS).asInstanceOf[VariableLabel], - termMap(proofDIS.readInt()) + exprMap(proofDIS.readInt()), + exprMap(proofDIS.readInt()).asInstanceOf[Variable], + exprMap(proofDIS.readInt()) ) - else if (psType == leftExists) LeftExists(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), labelFromInputStream(proofDIS).asInstanceOf[VariableLabel]) - else if (psType == leftExistsOne) LeftExistsOne(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), labelFromInputStream(proofDIS).asInstanceOf[VariableLabel]) + else if (psType == leftExists) LeftExists(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt()).asInstanceOf[Variable]) else if (psType == rightAnd) RightAnd( sequentFromProofDIS(), (1 to proofDIS.readShort()).map(_ => proofDIS.readInt()).toSeq, - (1 to proofDIS.readShort()).map(_ => formulaMap(proofDIS.readInt())).toSeq + (1 to proofDIS.readShort()).map(_ => exprMap(proofDIS.readInt())).toSeq ) - else if (psType == rightOr) RightOr(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), formulaMap(proofDIS.readInt())) - else if (psType == rightImplies) RightImplies(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), formulaMap(proofDIS.readInt())) - else if (psType == rightIff) RightIff(sequentFromProofDIS(), proofDIS.readInt(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), formulaMap(proofDIS.readInt())) - else if (psType == rightNot) RightNot(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt())) - else if (psType == rightForall) RightForall(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), labelFromInputStream(proofDIS).asInstanceOf[VariableLabel]) + else if (psType == rightOr) RightOr(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt())) + else if (psType == rightImplies) RightImplies(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt())) + else if (psType == rightIff) RightIff(sequentFromProofDIS(), proofDIS.readInt(), proofDIS.readInt(), exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt())) + else if (psType == rightNot) RightNot(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt())) + else if (psType == rightForall) RightForall(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt()).asInstanceOf[Variable]) else if (psType == rightExists) RightExists( sequentFromProofDIS(), proofDIS.readInt(), - formulaMap(proofDIS.readInt()), - labelFromInputStream(proofDIS).asInstanceOf[VariableLabel], - termMap(proofDIS.readInt()) + exprMap(proofDIS.readInt()), + exprMap(proofDIS.readInt()).asInstanceOf[Variable], + exprMap(proofDIS.readInt()) + ) + else if (psType == rightEpsilon) + RightEpsilon( + sequentFromProofDIS(), + proofDIS.readInt(), + exprMap(proofDIS.readInt()), + exprMap(proofDIS.readInt()).asInstanceOf[Variable], + exprMap(proofDIS.readInt()) ) - else if (psType == rightExistsOne) RightExistsOne(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt()), labelFromInputStream(proofDIS).asInstanceOf[VariableLabel]) else if (psType == weakening) Weakening(sequentFromProofDIS(), proofDIS.readInt()) - else if (psType == leftRefl) LeftRefl(sequentFromProofDIS(), proofDIS.readInt(), formulaMap(proofDIS.readInt())) - else if (psType == rightRefl) RightRefl(sequentFromProofDIS(), formulaMap(proofDIS.readInt())) + else if (psType == beta) + Beta(sequentFromProofDIS(), + proofDIS.readInt() + ) + else if (psType == leftRefl) LeftRefl(sequentFromProofDIS(), proofDIS.readInt(), exprMap(proofDIS.readInt())) + else if (psType == rightRefl) RightRefl(sequentFromProofDIS(), exprMap(proofDIS.readInt())) else if (psType == leftSubstEq) LeftSubstEq( sequentFromProofDIS(), proofDIS.readInt(), - (1 to proofDIS.readShort()).map(_ => (lttFromProofDIS(), lttFromProofDIS())).toList, - ((1 to proofDIS.readShort()).map(_ => labelFromInputStream(proofDIS).asInstanceOf[SchematicTermLabel]).toList, formulaMap(proofDIS.readInt())) + (1 to proofDIS.readShort()).map(_ => (exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt()))).toList, + ((1 to proofDIS.readShort()).map(_ => exprMap(proofDIS.readInt()).asInstanceOf[Variable]).toList, exprMap(proofDIS.readInt())) ) else if (psType == rightSubstEq) RightSubstEq( sequentFromProofDIS(), proofDIS.readInt(), - (1 to proofDIS.readShort()).map(_ => (lttFromProofDIS(), lttFromProofDIS())).toList, - ((1 to proofDIS.readShort()).map(_ => labelFromInputStream(proofDIS).asInstanceOf[SchematicTermLabel]).toList, formulaMap(proofDIS.readInt())) - ) - else if (psType == leftSubstIff) - LeftSubstIff( - sequentFromProofDIS(), - proofDIS.readInt(), - (1 to proofDIS.readShort()).map(_ => (ltfFromProofDIS(), ltfFromProofDIS())).toList, - ((1 to proofDIS.readShort()).map(_ => labelFromInputStream(proofDIS).asInstanceOf[SchematicAtomicLabel]).toList, formulaMap(proofDIS.readInt())) - ) - else if (psType == rightSubstIff) - RightSubstIff( - sequentFromProofDIS(), - proofDIS.readInt(), - (1 to proofDIS.readShort()).map(_ => (ltfFromProofDIS(), ltfFromProofDIS())).toList, - ((1 to proofDIS.readShort()).map(_ => labelFromInputStream(proofDIS).asInstanceOf[SchematicAtomicLabel]).toList, formulaMap(proofDIS.readInt())) + (1 to proofDIS.readShort()).map(_ => (exprMap(proofDIS.readInt()), exprMap(proofDIS.readInt()))).toList, + ((1 to proofDIS.readShort()).map(_ => exprMap(proofDIS.readInt()).asInstanceOf[Variable]).toList, exprMap(proofDIS.readInt())) ) else if (psType == instSchema) InstSchema( sequentFromProofDIS(), proofDIS.readInt(), - (1 to proofDIS.readShort()).map(_ => (labelFromInputStream(proofDIS).asInstanceOf[SchematicConnectorLabel], lffFromProofDIS())).toMap, - (1 to proofDIS.readShort()).map(_ => (labelFromInputStream(proofDIS).asInstanceOf[SchematicAtomicLabel], ltfFromProofDIS())).toMap, - (1 to proofDIS.readShort()).map(_ => (labelFromInputStream(proofDIS).asInstanceOf[SchematicTermLabel], lttFromProofDIS())).toMap + (1 to proofDIS.readShort()).map(_ => exprMap(proofDIS.readInt()).asInstanceOf[Variable] -> exprMap(proofDIS.readInt())).toMap ) else if (psType == sorry) Sorry(sequentFromProofDIS()) else throw new Exception("Unknown proof step type: " + psType) @@ -636,10 +502,12 @@ object Serialization { val justNames = justs.map { case (obj, theory.Axiom(name, ax)) => "a" + obj + "$" + name case (obj, theory.Theorem(name, proposition, withSorry)) => "t" + obj + "$" + name - case (obj, theory.FunctionDefinition(label, out, expression, withSorry)) => "f" + obj + "$" + label.id.name + "_" + label.id.no + "_" + label.arity - case (obj, theory.PredicateDefinition(label, expression)) => "p" + obj + "$" + label.id.name + "_" + label.id.no + "_" + label.arity + case (obj, theory.Definition(label, expression, vars)) => + "d" + obj + "$" + label.id.name + "_" + label.id.no + "_" + typeToString(label.sort) //+ "__" + + //vars.size + vars.map(v => v.id.name + "_" + v.id.no + "_" + typeToString(v.sort)).mkString("__") } - (name, minimizeProofOnce(proof), justNames) + //(name, minimizeProofOnce(proof), justNames) + (name, proof, justNames) ) ) } @@ -663,12 +531,10 @@ object Serialization { case 'a' => theory.getAxiom(name).get case 't' => theory.getTheorem(name).get - case 'f' => - name.split("_") match - case Array(name, no, arity) => theory.getDefinition(ConstantFunctionLabel(Identifier(name, no.toInt), arity.toInt)).get - case 'p' => - name.split("_") match - case Array(name, no, arity) => theory.getDefinition(ConstantAtomicLabel(Identifier(name, no.toInt), arity.toInt)).get + case 'd' => + val Array(id, no, sort) = name.split("_") + val cst = Constant(Identifier(id, no.toInt), typeFromString(sort)._1) + theory.getDefinition(cst).get } if debug then // To avoid conflicts where a theorem already exists, for example in test suits. diff --git a/lisa-utils/src/main/scala/lisa/utils/collection/Extensions.scala b/lisa-utils/src/main/scala/lisa/utils/collection/Extensions.scala new file mode 100644 index 000000000..e0b38ee97 --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/collection/Extensions.scala @@ -0,0 +1,36 @@ +package lisa.utils.collection + +import scala.collection.immutable.SeqOps +import scala.collection.immutable.VectorBuilder + +object Extensions: + + extension [A](seq: IterableOnce[A]) + /** + * Iterable.collectFirst, but for a function returning an Option. Evaluates + * the function only once per argument. Returns when the first non-`None` + * value is found. + * + * @param f the function to evaluate + */ + def collectFirstDefined[B](f: A => Option[B]): Option[B] = + var res: Option[B] = None + val iter = seq.iterator + while (res.isEmpty && iter.hasNext) res = f(iter.next()) + res + + /** + * Convert an iterable of options to an option of a sequence. + * `Some(Seq(...))` if every value in the iterable is defined, and `None` + * otherwise. + * + * Attempts to do so lazily (if your iterable is lazy). + */ + def toOptionSeq[B](using ev: A <:< Option[B]): Option[Seq[B]] = + val state = true + var res = Option(Vector.newBuilder[B]) + val iter = seq.iterator + while (res.nonEmpty && iter.hasNext) + val next = iter.next() + if next.isDefined then res = res.map(_ += next.get) else res = None + res.map(_.result()) diff --git a/lisa-utils/src/main/scala/lisa/utils/collection/VecSet.scala b/lisa-utils/src/main/scala/lisa/utils/collection/VecSet.scala new file mode 100644 index 000000000..4e74b7a06 --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/collection/VecSet.scala @@ -0,0 +1,76 @@ +package lisa.utils.collection + +import scala.collection.mutable +import scala.collection.IterableFactory +import scala.collection.IterableFactoryDefaults +import scala.collection.immutable.HashSet +import scala.collection.immutable.SetOps + +object VecSet extends IterableFactory[VecSet]: + def empty[A]: VecSet[A] = + new VecSet(Vector.empty, Set.empty) + def from[A](source: IterableOnce[A]): VecSet[A] = + val vec = Vector.from(source) + new VecSet(vec, vec.to(Set)) + def newBuilder[A]: mutable.ReusableBuilder[A, VecSet[A]] = new VecSetBuilder() + + private sealed class VecSetBuilder[A] () extends mutable.ReusableBuilder[A, VecSet[A]]: + protected val vecBuilder: mutable.ReusableBuilder[A, Vector[A]] = Vector.newBuilder[A] + protected val setBuilder: mutable.ReusableBuilder[A, Set[A]] = HashSet.newBuilder[A] + protected var currentSize = 0 + + def addOne(elem: A): this.type = + vecBuilder.addOne(elem) + setBuilder.addOne(elem) + currentSize += 1 + this + + override def clear(): Unit = + vecBuilder.clear() + setBuilder.clear() + currentSize = 0 + + override def result(): VecSet[A] = + new VecSet(vecBuilder.result(), setBuilder.result()) + +sealed class VecSet[A] private (protected val evec: Vector[A], protected val eset: Set[A]) + extends Set[A] + with SetOps[A, VecSet, VecSet[A]] + with IterableFactoryDefaults[A, VecSet]: + // invariants: + // require( evec.toSet == eset ) + // require( evec.distinct == evec ) + + def iterator: Iterator[A] = evec.iterator + + def length: Int = evec.length + + override def iterableFactory: IterableFactory[VecSet] = VecSet + + override def contains(elem: A): Boolean = eset.contains(elem) + + override def excl(elem: A): VecSet[A] = + eset(elem) match + case false => this + case true => + // specialized version of Vector.diff + // without the added dramatic flair + val builder = Vector.newBuilder[A] + val iter = evec.iterator + + while (iter.hasNext) do + val next = iter.next + if next == elem then + // found the element to remove, rush through the remaining + builder.addAll(iter) + else builder.addOne(next) + + new VecSet(builder.result(), eset.excl(elem)) + + override def incl(elem: A): VecSet[A] = + eset(elem) match + case false => new VecSet(evec :+ elem, eset + elem) + case true => this + + override def toSeq: Seq[A] = evec + override def toVector: Vector[A] = evec diff --git a/lisa-utils/src/main/scala/lisa/utils/fol/ExprOps.scala b/lisa-utils/src/main/scala/lisa/utils/fol/ExprOps.scala new file mode 100644 index 000000000..3910b33ef --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/fol/ExprOps.scala @@ -0,0 +1,58 @@ +package lisa.utils.fol + +import lisa.utils.fol.Syntax + +/** + * Functional Tree-like operations for expressions. + */ +trait ExprOps extends Syntax { + extension[A] (e: Expr[A]) + /** + * Apply a function to the expression tree. The function is applied to a + * node before traversing its (now new) children. + * + * @param f the function to apply + * @return the transformed expression + */ + def preMap(f: Expr[?] => Expr[?]): Expr[?] = ??? + + /** + * Apply a function to the expression tree. The function is applied to a + * node after traversing its children. + * + * @param f the function to apply + * @return the transformed expression + */ + def postMap(f: Expr[?] => Expr[?]): Expr[?] = ??? + + /** + * All subexpressions of this expression, including itself, in depth-first + * order. + */ + def subexpressions: Iterator[Expr[?]] = + e match + case Variable(id) => Iterator(e) + case Constant(id) => Iterator(e) + case App(f, arg) => Iterator(e) ++ f.subexpressions ++ arg.subexpressions + case Abs(v, body) => Iterator(e) ++ body.subexpressions + + /** + * Collect all sub-expressions which satisfy a given predicate + * + * @param p the predicate + * @return the (depth-first) sequence of sub-expressions satisfying `p` + */ + def filter(p: Expr[?] => Boolean): Seq[Expr[?]] = + e.subexpressions.filter(p).toVector + + /** + * Collect all sub-expressions to which a given partial function applies, + * after applying the function. + * + * @param f the partial function + * @return the (depth-first) sequence of sub-expressions to which `f` + * applies + */ + def collect[B](f: PartialFunction[Expr[?], Expr[B]]): Seq[Expr[B]] = + e.subexpressions.collect(f).toVector +} diff --git a/lisa-utils/src/main/scala/lisa/utils/fol/FOL.scala b/lisa-utils/src/main/scala/lisa/utils/fol/FOL.scala new file mode 100644 index 000000000..356a724b8 --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/fol/FOL.scala @@ -0,0 +1,6 @@ +package lisa.utils.fol + +object FOL extends Sequents { + //export lisa.utils.K + export lisa.utils.K.Identifier +} diff --git a/lisa-utils/src/main/scala/lisa/utils/fol/Predef.scala b/lisa-utils/src/main/scala/lisa/utils/fol/Predef.scala new file mode 100644 index 000000000..1729cd023 --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/fol/Predef.scala @@ -0,0 +1,230 @@ +package lisa.utils.fol + +import lisa.utils.K +import lisa.utils.fol.ExprOps +import K.given + +trait Predef extends ExprOps { + + export K.{given_Conversion_String_Identifier, given_Conversion_Identifier_String} + /** Creates a variable with the given identifier and sort. */ + def variable[S](using IsSort[S])(id: K.Identifier): Variable[S] = new Variable(id) + /** Creates a constant with the given identifier and sort. */ + def constant[S](using IsSort[S])(id: K.Identifier): Constant[S] = new Constant(id) + /** Creates a binder with the given identifier and sorts. */ + def binder[S1, S2, S3](using IsSort[S1], IsSort[S2], IsSort[S3]) + (id: K.Identifier): Binder[S1, S2, S3] = new Binder(id) + + /** Creates a variable with name equal to the scala identifier. Usage: + * {{{val x = variable[Ind]}}} + */ + def variable[S](using name: sourcecode.Name, is: IsSort[S]): Variable[S] = new Variable(name.value) + /** Creates a constant with name equal to the scala identifier. Usage: + * {{{val c = constant[Ind]}}} + */ + def constant[S](using name: sourcecode.Name, is: IsSort[S]): Constant[S] = new Constant(name.value) + /** Creates a binder with name equal to the scala identifier. Usage: + * {{{val B = binder[Ind, Prop, Prop]}}} + */ + def binder[S1, S2, S3](using name: sourcecode.Name) + (using IsSort[S1], IsSort[S2], IsSort[S3]): Binder[S1, S2, S3] = new Binder(name.value) + + /** Creates a variable with a runtime sort. */ + def variable(id: K.Identifier, s: K.Sort): Variable[?] = Variable.unsafe(id, s) + /** Creates a constant with a runtime sort. */ + def constant(id: K.Identifier, s: K.Sort): Constant[?] = Constant.unsafe(id, s) + + /** Creates a variable with name equal to the scala identifier and a runtime sort. Usage: + * {{{val x = variable(K.Ind))}}} + */ + def variable(using name: sourcecode.Name)(s: K.Sort): Variable[?] = Variable.unsafe(name.value, s) + /** Creates a constant with name equal to the scala identifier and a runtime sort. Usage: + * {{{val c = constant(K.Ind))}}} + */ + def constant(using name: sourcecode.Name)(s: K.Sort): Constant[?] = Constant.unsafe(name.value, s) + + + val equality = constant[Ind >>: Ind >>: Prop]("=") + val === = equality + val = = equality + + extension (t: Expr[Ind]) { + infix def ===(u: Expr[Ind]): Expr[Prop] = equality(t)(u) + infix def =(u: Expr[Ind]): Expr[Prop] = equality(t)(u) + } + + val top = constant[Prop]("⊤") + val ⊤ : top.type = top + val True: top.type = top + + val bot = constant[Prop]("⊥") + val ⊥ : bot.type = bot + val False: bot.type = bot + + val neg = constant[Prop >>: Prop]("¬") + val ¬ : neg.type = neg + val ! : neg.type = neg + + val and = constant[Prop >>: Prop >>: Prop]("∧").printInfix() + val /\ : and.type = and + val ∧ : and.type = and + + val or = constant[Prop >>: Prop >>: Prop]("∨").printInfix() + val \/ : or.type = or + val ∨ : or.type = or + + val implies = constant[Prop >>: Prop >>: Prop]("⇒").printInfix() + val ==> : implies.type = implies + + val iff = constant[Prop >>: Prop >>: Prop]("⇔").printInfix() + val <=> : iff.type = iff + val ⇔ : iff.type = iff + + val forall = binder[Ind, Prop, Prop]("∀") + val ∀ : forall.type = forall + + val exists = binder[Ind, Prop, Prop]("∃") + val ∃ : exists.type = exists + + val epsilon = binder[Ind, Prop, Ind]("ε") + val ε : epsilon.type = epsilon + + extension (f: Expr[Prop]) { + def unary_! = neg(f) + infix inline def ==>(g: Expr[Prop]): Expr[Prop] = implies(f)(g) + infix inline def <=>(g: Expr[Prop]): Expr[Prop] = iff(f)(g) + infix inline def /\(g: Expr[Prop]): Expr[Prop] = and(f)(g) + infix inline def ∧(g: Expr[Prop]): Expr[Prop] = and(f)(g) + infix inline def \/(g: Expr[Prop]): Expr[Prop] = or(f)(g) + infix inline def ∨(g: Expr[Prop]): Expr[Prop] = or(f)(g) + } + + /** Conjunction of all the formulas in the iterable. Must be non-empty */ + def andAll(forms: IterableOnce[Expr[Prop]]): Expr[Prop] = + forms.iterator.reduce(_ /\ _) + + /** Conjunction of all the formulas in the iterable, or True if the iterable is empty. */ + def andAllOrTrue(forms: IterableOnce[Expr[Prop]]): Expr[Prop] = + forms.iterator.reduceOption(_ /\ _).getOrElse(top) + + /** Disjunction of all the formulas in the iterable. Must be non-empty */ + def orAll(forms: IterableOnce[Expr[Prop]]): Expr[Prop] = + forms.iterator.reduce(_ \/ _) + + /** Disjunction of all the formulas in the iterable, or False if the iterable is empty. */ + def orAllOrFalse(forms: IterableOnce[Expr[Prop]]): Expr[Prop] = + forms.iterator.reduceOption(_ \/ _).getOrElse(bot) + + /** Maps a kernel expression to a corresponding front-end expression. */ + def asFrontExpression(e: K.Expression): Expr[?] = e match + case c: K.Constant => asFrontConstant(c) + case v: K.Variable => asFrontVariable(v) + case a: K.Application => asFrontApplication(a) + case l: K.Lambda => asFrontLambda(l) + + /** Maps a kernel constant to a corresponding front-end constant. */ + def asFrontConstant(c: K.Constant): Constant[?] = + new Constant[Ind](c.id)(using unsafeSortEvidence(c.sort)) + + /** Maps a kernel variable to a corresponding front-end variable. */ + def asFrontVariable(v: K.Variable): Variable[?] = + new Variable[Ind](v.id)(using unsafeSortEvidence(v.sort)) + + /** Maps a kernel application to a corresponding front-end application. */ + def asFrontApplication(a: K.Application): App[?, ?] = + new App(asFrontExpression(a.f).asInstanceOf, asFrontExpression(a.arg)) + + /** Maps a kernel lambda to a corresponding front-end lambda. */ + def asFrontLambda(l: K.Lambda): Abs[?, ?] = + new Abs(asFrontVariable(l.v).asInstanceOf, asFrontExpression(l.body)) + + /** Computes the greatest identifier in a sequence of expressions. */ + def greatestId(exprs: Seq[K.Expression | Expr[?] | K.Identifier ]): Int = + exprs.view.flatMap({ + case e: K.Expression => e.freeVariables.map(_.id) + case e: Expr[?] => e.freeVars.map(_.id) + case id: K.Identifier => Seq(id) + }).map(_.no).max + + /** Returns a fresh identifier based on the greatest identifier in a sequence of expressions. */ + def freshId(exprs: Iterable[K.Expression | Expr[?] | K.Identifier ], base: String = "x"): K.Identifier = { + val i = exprs.view.flatMap({ + case e: K.Expression => e.freeVariables.map(_.id) + case e: Expr[?] => e.freeVars.map(_.id) + case id: K.Identifier => Seq(id) + }).filter(_.name == base).map(_.no).max + K.Identifier(base, i + 1) + } + + /** Returns `n`` fresh identifiers based on the greatest identifier in a sequence of expressions. */ + def nFreshIds(n: Int, exprs: Seq[K.Expression | Expr[?] | K.Identifier ], base: String = "x"): Seq[K.Identifier] = { + val i = exprs.view.flatMap({ + case e: K.Expression => e.freeVariables.map(_.id) + case e: Expr[?] => e.freeVars.map(_.id) + case id: K.Identifier => Seq(id) + }).filter(_.name == base).map(_.no).max + (i + 1 to i + n).map(K.Identifier(base, _)) + } + + + /** Extractor object for functional expressions and types. */ + object Functional : + /** Usage: + * {{{ + * (e: Expr[Ind >>: Ind >>: Ind]) match + * case Functional(l) => ...// l = Seq(K.Ind, K.Ind) + * }}} + */ + def unapply(e: Expr[?]): Option[Seq[K.Sort]] = + if e.sort.isFunctional then Some(K.flatTypeParameters(e.sort)) else None + + /** Usage: + * {{{ + * (K.Ind -> K.Ind -> K.Ind) match + * case Functional(l) => ...// l = Seq(K.Ind, K.Ind) + * }}} + */ + def unapply(s: K.Sort): Option[Seq[K.Sort]] = + if s.isFunctional then Some(K.flatTypeParameters(s)) else None + + /** Extractor object for predicate expressions and types. */ + object Predicate: + /** Usage: + * {{{ + * (e: Expr[Ind >>: Ind >>: Prop]) match + * case Predicate(l) => ...// l = Seq(K.Ind, K.Ind) + * }}} + */ + def unapply(e: Expr[?]): Option[Seq[K.Sort]] = + if e.sort.isPredicate then Some(K.flatTypeParameters(e.sort)) else None + + /** Usage: + * {{{ + * (K.Ind -> K.Ind -> Prop) match + * case Predicate(l) => ...// l = Seq(K.Ind, K.Ind) + * }}} + */ + def unapply(s: K.Sort): Option[Seq[K.Sort]] = + if s.isPredicate then Some(K.flatTypeParameters(s)) else None + + + /** Creates pseudo-equality between two expressions, depending on their types, and based on extentionality. For example: + * {{{ + * makeEq(s, t) // s === t + * makeEq(ϕ, ψ) // ϕ <=> ψ + * makeEq(f, g) // ∀(x, f(x) === g(x)) + * makeEq(P, Q) // ∀(x, ∀(y, P(x)(y) <=> Q(x)(y)) + * }}} + */ + def makeEq(s: Expr[?], t: Expr[?]): Expr[Prop] = + if s.sort != t.sort || !(s.sort.isFunctional || s.sort.isPredicate) then throw new IllegalArgumentException("Can only make equality between predicate and functional expressions") + val no = ((s.freeVars ++ t.freeVars).view.map(_.id.no) ++ Seq(-1)).max+1 + val vars = (no until no+s.sort.depth).map(i => variable[Ind](K.Identifier("x", i))) + val inner1 = vars.foldLeft(s)(_ #@ _) + val inner2 = vars.foldLeft(t)(_ #@ _) + val base = if (inner1.sort == K.Prop) iff #@ inner1 #@ inner2 else equality #@ inner1 #@ inner2 + vars.foldRight(base : Expr[Prop]) { case (s_arg, acc) => forall(s_arg, acc) } + + + +} \ No newline at end of file diff --git a/lisa-utils/src/main/scala/lisa/utils/fol/Sequents.scala b/lisa-utils/src/main/scala/lisa/utils/fol/Sequents.scala new file mode 100644 index 000000000..63e375e3f --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/fol/Sequents.scala @@ -0,0 +1,253 @@ +package lisa.utils.fol + +import lisa.utils.prooflib.BasicStepTactic +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.ProofTacticLib.ProofTactic + +import lisa.utils.K + +import scala.annotation.showAsInfix + +trait Sequents extends Predef { + + /** Tactic witness for instantiating free variables in a sequent. */ + object SequentInstantiationRule extends ProofTactic + given ProofTactic = SequentInstantiationRule + + /** Represents a sequent in the sequent calculus. + * + * The left side are assumptions, and the right side are the (alternative) conclusions. + * Corresponds to [[K.Sequent]] in the kernel. + * + * @param left Formulas on the left side of the sequent + * @param right Formulas on the right side of the sequent + */ + case class Sequent(left: Set[Expr[Prop]], right: Set[Expr[Prop]]) extends LisaObject{ + /** The underlying kernel sequent. */ + def underlying: lisa.kernel.proof.SequentCalculus.Sequent = K.Sequent(left.map(_.underlying), right.map(_.underlying)) + + def substituteUnsafe(m: Map[Variable[?], Expr[?]]): Sequent = Sequent(left.map(_.substituteUnsafe(m)), right.map(_.substituteUnsafe(m))) + override def substituteWithCheck(m: Map[Variable[?], Expr[?]]): Sequent = + super.substituteWithCheck(m).asInstanceOf[Sequent] + override def substitute(pairs: SubstPair*): Sequent = + super.substitute(pairs*).asInstanceOf[Sequent] + + def freeVars: Set[Variable[?]] = left.flatMap(_.freeVars) ++ right.flatMap(_.freeVars) + def freeTermVars: Set[Variable[Ind]] = left.flatMap(_.freeTermVars) ++ right.flatMap(_.freeTermVars) + def constants: Set[Constant[?]] = left.flatMap(_.constants) ++ right.flatMap(_.constants) + + + + + + /** Instantiate schematic symbols inside this, and produces a kernel proof. + * Namely, if "that" is the result of the substitution, the proof should conclude with "that.underlying", + * using the assumption "this.underlying" at step index -1. + * + * @param map The substitution map + * @return The sequent after substitution and the proof of the substitution + */ + def instantiateWithProof(map: Map[Variable[?], Expr[?]], index: Int): (Sequent, Seq[K.SCProofStep]) = { + (substituteUnsafe(map), instantiateWithProofLikeKernel(map, index)) + + } + + /** Instantiate the quantified variables in the conclusion of the sequent with the given terms. + * The sequent must have a single universally quantified formula on the right side. + */ + def instantiateForallWithProof(args: Seq[Expr[Ind]], index: Int): (Sequent, Seq[K.SCProofStep]) = { + if this.right.size != 1 then throw new IllegalArgumentException("Right side of sequent must be a single universally quantified formula") + this.right.head match { + case r @ App(forall, Abs(x: Variable[Ind], f: Expr[Prop])) => + val t = args.head + val newf = f.substitute(x := t) + val s0 = K.Hypothesis((newf |- newf).underlying, newf.underlying) + val s1 = K.LeftForall((r |- newf).underlying, index + 1, f.underlying, x.underlying, t.underlying) + val s2 = K.Cut((this.left |- newf).underlying, index, index + 2, r.underlying) + if args.tail.isEmpty then (this.left |- newf, Seq(s0, s1, s2)) + else + (this.left |- newf).instantiateForallWithProof(args.tail, index + 3) match { + case (s, p) => (s, Seq(s0, s1, s2) ++ p) + } + + case _ => throw new IllegalArgumentException("Right side of sequent must be a single universally quantified formula") + } + + } + + /** + * Given 3 substitution maps like the kernel accepts, i.e. Substitution of Predicate Connector and Ind schemas, do the substitution + * and produce the (one-step) kernel proof that the result is provable from the original sequent + * + * @param mCon The substitution of connector schemas + * @param mPred The substitution of predicate schemas + * @param mTerm The substitution of function schemas + * @return + */ + def instantiateWithProofLikeKernel( + map: Map[Variable[?], Expr[?]], + index: Int + ): Seq[K.SCProofStep] = { + val premiseSequent = this.underlying + val mapK = map.map((v, e) => (v.underlying, e.underlying)) + val botK = lisa.utils.KernelHelpers.substituteVariablesInSequent(premiseSequent, mapK) + Seq(K.InstSchema(botK, index, mapK)) + } + + + /** Add a formula to the left side of the sequent. */ + infix def +<<(f: Expr[Prop]): Sequent = this.copy(left = this.left + f) + /** Remove a formula from the left side of the sequent. */ + infix def -<<(f: Expr[Prop]): Sequent = this.copy(left = this.left - f) + /** Add a formula to the right side of the sequent. */ + infix def +>>(f: Expr[Prop]): Sequent = this.copy(right = this.right + f) + /** Remove a formula from the right side of the sequent. */ + infix def ->>(f: Expr[Prop]): Sequent = this.copy(right = this.right - f) + /** Add all formulas from the left side of the given sequent to the left side of this sequent. */ + infix def ++<<(s1: Sequent): Sequent = this.copy(left = this.left ++ s1.left) + /** Remove all formulas from the left side of the given sequent from the left side of this sequent. */ + infix def --<<(s1: Sequent): Sequent = this.copy(left = this.left -- s1.left) + /** Add all formulas from the right side of the given sequent to the right side of this sequent. */ + infix def ++>>(s1: Sequent): Sequent = this.copy(right = this.right ++ s1.right) + /** Remove all formulas from the right side of the given sequent from the right side of this sequent. */ + infix def -->>(s1: Sequent): Sequent = this.copy(right = this.right -- s1.right) + /** Add all formulas on the left (and right) of the given sequent to the left (and right) of this sequent. */ + infix def ++(s1: Sequent): Sequent = this.copy(left = this.left ++ s1.left, right = this.right ++ s1.right) + /** Remove all formulas on the left (and right) of the given sequent from the left (and right) of this sequent. */ + infix def --(s1: Sequent): Sequent = this.copy(left = this.left -- s1.left, right = this.right -- s1.right) + + /** Remove all formulas OL-same to the given formula from the left side of the sequent. */ + infix def removeLeft(f: Expr[Prop]): Sequent = this.copy(left = this.left.filterNot(isSame(_, f))) + /** Remove all formulas OL-same to the given formula from the right side of the sequent. */ + infix def removeRight(f: Expr[Prop]): Sequent = this.copy(right = this.right.filterNot(isSame(_, f))) + /** Remove all formulas OL-same to one of the formulas on the left side of the given sequent from the left side of the sequent. */ + infix def removeAllLeft(s1: Sequent): Sequent = this.copy(left = this.left.filterNot(e1 => s1.left.exists(e2 => isSame(e1, e2)))) + /** Remove all formulas OL-same to one of the given formulas from the left side of the sequent. */ + infix def removeAllLeft(s1: Set[Expr[Prop]]): Sequent = this.copy(left = this.left.filterNot(e1 => s1.exists(e2 => isSame(e1, e2)))) + /** Remove all formulas OL-same to one of the formulas on the right side of the given sequent from the right side of the sequent. */ + infix def removeAllRight(s1: Sequent): Sequent = this.copy(right = this.right.filterNot(e1 => s1.right.exists(e2 => isSame(e1, e2)))) + /** Remove all formulas OL-same to one of the given formulas from the right side of the sequent. */ + infix def removeAllRight(s1: Set[Expr[Prop]]): Sequent = this.copy(right = this.right.filterNot(e1 => s1.exists(e2 => isSame(e1, e2)))) + /** Remove all formulas OL-same to one of the formulas on the left (and right) side of the given sequent from the left (and right) side of the sequent. */ + infix def removeAll(s1: Sequent): Sequent = + this.copy(left = this.left.filterNot(e1 => s1.left.exists(e2 => isSame(e1, e2))), right = this.right.filterNot(e1 => s1.right.exists(e2 => isSame(e1, e2)))) + + /** Add a formula to the left side of the sequent if there is not already a formula OL-same to it. */ + infix def addLeftIfNotExists(f: Expr[Prop]): Sequent = if (this.left.exists(isSame(_, f))) this else (this +<< f) + /** Add a formula to the right side of the sequent if there is not already a formula OL-same to it. */ + infix def addRightIfNotExists(f: Expr[Prop]): Sequent = if (this.right.exists(isSame(_, f))) this else (this +>> f) + /** Add all formulas from the left side of the given sequent to the left side of this sequent if there is not already a formula OL-same to it. */ + infix def addAllLeftIfNotExists(s1: Sequent): Sequent = this ++<< s1.copy(left = s1.left.filterNot(e1 => this.left.exists(isSame(_, e1)))) + /** Add all formulas from the right side of the given sequent to the right side of this sequent if there is not already a formula OL-same to it. */ + infix def addAllRightIfNotExists(s1: Sequent): Sequent = this ++>> s1.copy(right = s1.right.filterNot(e1 => this.right.exists(isSame(_, e1)))) + /** Add all formulas on the left (and right) of the given sequent to the left (and right) of this sequent if there is not already a formula OL-same to it. */ + infix def addAllIfNotExists(s1: Sequent): Sequent = + this ++ s1.copy(left = s1.left.filterNot(e1 => this.left.exists(isSame(_, e1))), right = s1.right.filterNot(e1 => this.right.exists(isSame(_, e1)))) + + // OL shorthands + /** Add a formula to the left side of the sequent if there is not already a formula OL-same to it. */ + infix def +(f: Expr[Prop]): Sequent = this addLeftIfNotExists f + /** Remove a formula from the left side of the sequent if there is a formula OL-same to it. */ + infix def -(f: Expr[Prop]): Sequent = this removeLeft f + /** Add a formula to the right side of the sequent if there is not already a formula OL-same to it. */ + infix def +>?(f: Expr[Prop]): Sequent = this addRightIfNotExists f + /** Remove a formula from the right side of the sequent if there is a formula OL-same to it. */ + infix def ->?(f: Expr[Prop]): Sequent = this removeRight f + /** Add all formulas from the left side of the given sequent to the left side of this sequent if there is not already a formula OL-same to it. */ + infix def ++(s1: Sequent): Sequent = this addAllLeftIfNotExists s1 + /** Remove all formulas from the left side of the given sequent from the left side of this sequent. */ + infix def --(s1: Sequent): Sequent = this removeAllLeft s1 + /** Add all formulas from the right side of the given sequent to the right side of this sequent if there is not already a formula OL-same to it. */ + infix def ++>?(s1: Sequent): Sequent = this addAllRightIfNotExists s1 + /** Remove all formulas from the right side of the given sequent from the right side of this sequent. */ + infix def -->?(s1: Sequent): Sequent = this removeAllRight s1 + /** Add all formulas on the left (and right) of the given sequent to the left (and right) of this sequent if there is not already a formula OL-same to it. */ + infix def --?(s1: Sequent): Sequent = this removeAll s1 + /** Add all formulas on the left (and right) of the given sequent to the left (and right) of this sequent if there is not already a formula OL-same to it. */ + infix def ++?(s1: Sequent): Sequent = this addAllIfNotExists s1 + + override def toString = + (if left.size == 0 then "" else if left.size == 1 then left.head.toString else "( " + left.mkString(", ") + " )") + + " ⊢ " + + (if right.size == 0 then "" else if right.size == 1 then right.head.toString else "( " + right.mkString(", ") + " )") + + } + + /** A sequent with empty left and right sides. Logically false. */ + val emptySeq: Sequent = Sequent(Set.empty, Set.empty) + + given Conversion[Expr[Prop], Sequent] = f => Sequent(Set.empty, Set(f)) + + /** Returns true if the two expressions are OL-same. */ + def isSame(e1: Expr[?], e2: Expr[?]): Boolean = { + e1.sort == e2.sort && K.isSame(e1.underlying, e2.underlying) + } + + /** Returns true if the two sequents are OL-same. */ + def isSameSequent(sequent1: Sequent, sequent2: Sequent): Boolean = { + K.isSameSequent(sequent1.underlying, sequent2.underlying) + } + + /** Returns true if the first expression OL-implies the second expression. */ + def isImplying[S: Sort](e1: Expr[Prop], e2: Expr[Prop]): Boolean = { + K.isImplying(e1.underlying, e2.underlying) + } + + /** Returns true if the first sequent OL-implies the second sequent. */ + def isImplyingSequent(sequent1: Sequent, sequent2: Sequent): Boolean = { + K.isImplyingSequent(sequent1.underlying, sequent2.underlying) + } + + /** Returns true if for all formulas on `s1`, there is a formula OL-same to it in `s2`. */ + def isSubset[A, B](s1: Set[Expr[A]], s2: Set[Expr[B]]): Boolean = { + K.isSubset(s1.map(_.underlying), s2.map(_.underlying)) + } + + /** Returns true if the two sets are OL-same. */ + def isSameSet[A, B](s1: Set[Expr[A]], s2: Set[Expr[B]]): Boolean = + K.isSameSet(s1.map(_.underlying), s2.map(_.underlying)) + + /** Returns true if `s` contains a formula OL-same to `f`. */ + def contains[A, B](s: Set[Expr[A]], f: Expr[B]): Boolean = { + K.contains(s.map(_.underlying), f.underlying) + } + + /** + * Represents a converter of some object into a set. + * @tparam S The type of elements in that set + * @tparam T The type to convert from + */ + trait FormulaSetConverter[T] { + def apply(t: T): Set[Expr[Prop]] + } + + given FormulaSetConverter[Unit] with { + override def apply(u: Unit): Set[Expr[Prop]] = Set.empty + } + + given FormulaSetConverter[EmptyTuple] with { + override def apply(t: EmptyTuple): Set[Expr[Prop]] = Set.empty + } + + given [H <: Expr[Prop], T <: Tuple](using c: FormulaSetConverter[T]): FormulaSetConverter[H *: T] with { + override def apply(t: H *: T): Set[Expr[Prop]] = c.apply(t.tail) + t.head + } + + given formula_to_set[T <: Expr[Prop]]: FormulaSetConverter[T] with { + override def apply(f: T): Set[Expr[Prop]] = Set(f) + } + + given iterable_to_set[T <: Expr[Prop], I <: Iterable[T]]: FormulaSetConverter[I] with { + override def apply(s: I): Set[Expr[Prop]] = s.toSet + } + + private def any2set[A, T <: A](any: T)(using c: FormulaSetConverter[T]): Set[Expr[Prop]] = c.apply(any) + + extension [A, T1 <: A](left: T1)(using FormulaSetConverter[T1]) { + /** Infix shorthand for constructing a [[Sequent]]. */ + infix def |-[B, T2 <: B](right: T2)(using FormulaSetConverter[T2]): Sequent = Sequent(any2set(left), any2set(right)) + /** Infix shorthand for constructing a [[Sequent]]. */ + infix def ⊢[B, T2 <: B](right: T2)(using FormulaSetConverter[T2]): Sequent = Sequent(any2set(left), any2set(right)) + } + +} diff --git a/lisa-utils/src/main/scala/lisa/utils/fol/Syntax.scala b/lisa-utils/src/main/scala/lisa/utils/fol/Syntax.scala new file mode 100644 index 000000000..c114dfa9c --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/fol/Syntax.scala @@ -0,0 +1,496 @@ +package lisa.utils.fol + +import lisa.utils.K +import lisa.utils.K.Identifier +import lisa.utils.K.given_Conversion_String_Identifier + + +import scala.annotation.nowarn +import scala.annotation.showAsInfix +import scala.annotation.targetName +import scala.util.Sorting +import lisa.utils.KernelHelpers.freshId + +trait Syntax { + + type IsSort[T] = Sort{type Self = T} + + @showAsInfix + infix type >>:[I, O] = Arrow[I, O] + + /** + * A trait representing a sort. + * + * Sorts are used to classify expressions and are either [[Ind]], [[Prop]], or _ [[Arrow]] _. + */ + trait Sort { + type Self + val underlying: K.Sort + } + + + /** + * The sort of individuals, i.e. sets, numbers, etc. Corresponds to [[K.Ind]] + */ + sealed trait Ind + /** The sort of propositions. Corresponds to [[K.Prop]] */ + sealed trait Prop + /** The sort of expression which take arguments. Corresponds to [[K.Arrow]]. Can be used as `A >>: B` */ + sealed trait Arrow[A: Sort, B: Sort] + + /** Typeclass asserting that [[Ind]] is a sort */ + given given_TermType: IsSort[Ind] with + val underlying = K.Ind + /** Typeclass asserting that [[Prop]] is a sort */ + given given_FormulaType: IsSort[Prop] with + val underlying = K.Prop + /** Typeclass asserting that [[Arrow]][_, _] is a sort */ + given given_ArrowType[A : Sort as ta, B : Sort as tb]: (IsSort[Arrow[A, B]]) with + val underlying = K.Arrow(ta.underlying, tb.underlying) + + /** + * A pair of a variable and an expression of matching Sort. Used for substitution. + */ + sealed trait SubstPair extends Product { + type S + val _1: Variable[S] + val _2: Expr[S] + } + + /** Concrete implementation of the [[SubstPair]] trait. Done this way to havee a type membeer instead of a type parameter.*/ + private case class ConcreteSubstPair[S1] (_1: Variable[S1], _2: Expr[S1]) extends SubstPair {type S = S1} + + /** Factory object for [[SubstPair]] */ + object SubstPair { + /** Creates a new well-sorted substitution pair. */ + def apply[S1 : Sort](_1: Variable[S1], _2: Expr[S1]): SubstPair {type S = S1} = new ConcreteSubstPair[S1](_1, _2) + def unapply[S1](s: SubstPair{type S = S1}): SubstPair{type S = S1} = s + } + + /** + * Used to cast expressions to a specific sort, without checking. + * Useful when the type is not known at compile time. + */ + def unsafeSortEvidence[S](sort: K.Sort) : IsSort[S] = new Sort { type Self = S; val underlying = sort } + + /** Converts a (Variable, Expr) pair to a SubstPair */ + given [T: Sort]: Conversion[(Variable[T], Expr[T]), SubstPair{type S = T}] = s => SubstPair(s._1, s._2) + + + /** A match type that extracts the arguments of `S1` such that `Target` is the return sort. + * + * For example, + * + * `ArgsTo[Arrow[A, B], B]` will be `Expr[A] *: EmptyTuple`. + * + * `ArgsTo[Arrow[A, Arrow[B, C]], C]` will be `Expr[A] *: Expr[B] *: EmptyTuple`. + * + */ + type ArgsTo[S1, Target] <: NonEmptyTuple = S1 match { + case Arrow[a, Target] => Tuple1[Expr[a]] + case Arrow[a, b] => Expr[a] *: ArgsTo[b, Target] + } + + + /** + * An object that supports substitution of variables with expressions. + * Typically, [[Expr]] and [[Sequent]] are LisaObjects. + */ + trait LisaObject { + /** Performs simultaneous substitution of variables with expressions. + * + * If the substitution is not well-sorted, an exception may be thrown when constructing the result. + * + * @param m The map of substitutions + * @return The result of the substitution + * + * @see [[substituteWithCheck]] + * @see [[substitute]] + */ + def substituteUnsafe(m: Map[Variable[?], Expr[?]]): LisaObject + + /** Performs simultaneous substitution of variables with expressions. First checks that the sorts of the substitution map. + * + * @param m The map of substitutions + * @return The result of the substitution + * @throws IllegalArgumentException if the substitution is not well-sorted + */ + def substituteWithCheck(m: Map[Variable[?], Expr[?]]): LisaObject = { + if m.forall((k, v) => k.sort == v.sort) then + substituteUnsafe(m) + else + val culprit = m.find((k, v) => k.sort != v.sort).get + throw new IllegalArgumentException("Sort mismatch in substitution: " + culprit._1 + " -> " + culprit._2) + } + + /** Performs simultaneous substitution of variables with expressions. + * + * @param pairs The pairs of substitutions + * @return The result of the substitution + */ + def substitute(pairs: SubstPair*): LisaObject = + substituteWithCheck(pairs.view.map(s => (s._1, s._2)).toMap) + + /** Returns the set of free variables in the expression */ + def freeVars: Set[Variable[?]] + /** Returns the set of free variables of sort [[Ind]] in the expression */ + def freeTermVars: Set[Variable[Ind]] + /** Returns the set of constants in the expression */ + def constants: Set[Constant[?]] + } + + /** + * A Lisa expression. + * + * Expressions are elements of the simply typed lambda calculus with base types (called [[Sort]]) [[Ind]] and [[Prop]]. + * + * @tparam S The sort of the expression, usually [[Ind]], [[Prop]], or an [[Arrow]]. + */ + sealed trait Expr[S] extends LisaObject { + /** The sort of the expression */ + val sort: K.Sort + private val arity = K.flatTypeParameters(sort).size + + /** The underlying kernel expression */ + def underlying: K.Expression + + def substituteUnsafe(m: Map[Variable[?], Expr[?]]): Expr[S] + override def substituteWithCheck(m: Map[Variable[?], Expr[?]]): Expr[S] = + super.substituteWithCheck(m).asInstanceOf[Expr[S]] + override def substitute(pairs: SubstPair*): Expr[S] = + super.substitute(pairs*).asInstanceOf[Expr[S]] + + /** Extracts the applied arguments of the expression. + * + * For example, let `f :: A >>: B >>: C`. then + * {{{ + * f(a)(b)(c) match { + * case f(a, b, c) => ... + * }}} + * + */ + def unapplySeq[Target](e: Expr[Target]): Option[ArgsTo[S, Target]] = + def inner[Target](e: Expr[Target]): Option[ArgsTo[S, Target]] = e match + case App(f2, arg) if this == f2 => Some((arg *: EmptyTuple).asInstanceOf[ArgsTo[S, Target]]) + case App(f2, arg) => inner(f2).map(value => (arg *: value).asInstanceOf[ArgsTo[S, Target]]) + case _ => None + inner[Target](e) + + @targetName("unapplySeq2") + def unapplySeq(e: Expr[?]): Option[Seq[Expr[?]]] = Multiapp(this).unapply(e) + + /** Default String representation of the expression, with potential arguments. */ + final def defaultMkString(args: Seq[Expr[?]]): String = s"$this(${args.map(a => s"${a}").mkString(", ")})" + /** Default String representation of the expression, with potential arguments, encapsulated by parenthesis if necessary. */ + final def defaultMkStringSeparated(args: Seq[Expr[?]]): String = s"(${defaultMkString(args)})" + /** String representation of the expression, with potential arguments. Can be updated.*/ + var mkString: Seq[Expr[?]] => String = defaultMkString + /** String representation of the expression, with potential arguments, encapsulated by parenthesis if necessary. Can be updated.*/ + var mkStringSeparated: Seq[Expr[?]] => String = defaultMkStringSeparated + + + /** Construct an unsafe application. If the sorts don't match, will throw an exception. + * @param arg The argument to apply to `this`. + * @throws IllegalArgumentException if the sorts don't match. + */ + def #@(arg: Expr[?]): RetExpr[S] = + App.unsafe(this, arg).asInstanceOf + + /** Iteratively construct an unsafe application. If the sorts don't match, will throw an exception. + * @param args The arguments to apply to `this`. + * @throws IllegalArgumentException if the sorts don't match. + */ + def #@@(args: Seq[Expr[?]]): Expr[?] = + Multiapp.unsafe(this, args) + + /** A structural representation of the expression for debugging purposes. + * + * Equal to the default representation of case classes. + */ + def structuralToString: String = this match + case Variable(id) => s"Variable($id, $sort)" + case Constant(id) => s"Constant($id, $sort)" + case App(f, arg) => s"App(${f.structuralToString}, ${arg.structuralToString})" + case Abs(v, body) => s"Abs(${v.structuralToString}, ${body.structuralToString})" + } + + /** (Inline) extractor object for unsafe applications */ + object #@ { + /** Extracts the function and argument of an application. + * Example: + * {{{ + * singleton(∅) match + * case f #@ x => ... + * }}} + */ + def unapply[S, T](e: Expr[T]): Option[(Expr[Arrow[S, T]], Expr[S])] = (e: @unchecked) match + case App[S, T](f, arg) => Some((f, arg)) + case _ => None + } + + /** Well-sorted application constructor. Used when sorts are known at compile time. */ + extension [S, T](f: Expr[Arrow[S, T]]) + def apply(using IsSort[S], IsSort[T])(arg: Expr[S]): Expr[T] = App(f, arg) + + + /** match type computing the return sort of an arrow sort. */ + type RetExpr[T] <: Expr[?] = T match { + case Arrow[a, b] => Expr[b] + } + + /** Utility class for expressions taking multiple arguments. */ + class Multiapp(f: Expr[?]): + /** Extractor for expressions with multiple arguments. */ + def unapply (e: Expr[?]): Option[Seq[Expr[?]]] = + def inner(e: Expr[?]): Option[List[Expr[?]]] = e match + case App(f2, arg) if f == f2 => Some(List(arg)) + case App(f2, arg) => inner(f2).map(arg :: _) + case _ => None + inner(e).map(_.reverse) + + /** Utility object for expressions taking multiple arguments. */ + object Multiapp: + def unsafe(f: Expr[?], args: Seq[Expr[?]]): Expr[?] = + args.foldLeft(f)((f, arg) => App.unsafe(f, arg)) + def unapply(e: Expr[?]): Some[(Expr[?], Seq[Expr[?]])] = Some(unfoldAllApp(e)) + + /** Computes the list of consecutively applied arguments of an expression. + * Example: + * {{{ + * unfoldAllApp(Abs(x, f(x))(a)(b)(c)) == (Abs(x, f(x)), List(a, b, c)) + * + * @param e + * @return + */ + def unfoldAllApp(e:Expr[?]): (Expr[?], List[Expr[?]]) = + def rec(e: Expr[?]): (Expr[?], List[Expr[?]]) = e match + case App(f, arg) => + val (f1, args) = unfoldAllApp(f) + (f1, arg :: args ) + case _ => (e, Nil) + val (f, args) = rec(e) + (f, args.reverse) + + + + + /** + * A variable of a given sort. + * + * Variables are leaf [[Expr]]essions that can be bound and instantiated. + * + * Corresponds to [[K.Variable]]. + * + * @tparam S The sort of the variable. + * + */ + case class Variable[S : Sort as sortEv](id: K.Identifier) extends Expr[S] { + /** The runtime sort of the variable */ + val sort: K.Sort = sortEv.underlying + /** The underlying kernel variable */ + val underlying: K.Variable = K.Variable(id, sort) + def substituteUnsafe(m: Map[Variable[?], Expr[?]]): Expr[S] = m.getOrElse(this, this).asInstanceOf[Expr[S]] + override def substituteWithCheck(m: Map[Variable[?], Expr[?]]): Expr[S] = + super.substituteWithCheck(m).asInstanceOf[Expr[S]] + override def substitute(pairs: SubstPair*): Expr[S] = + super.substitute(pairs*).asInstanceOf[Expr[S]] + def freeVars: Set[Variable[?]] = Set(this) + def freeTermVars: Set[Variable[Ind]] = if sort == K.Ind then Set(this.asInstanceOf) else Set.empty + def constants: Set[Constant[?]] = Set.empty + /** Returns a variable of the same type with the given identifier */ + def rename(newId: K.Identifier): Variable[S] = Variable(newId) + /** Returns a variable with a fresh identifier, with respect to the given expressions */ + def freshRename(existing: Iterable[Expr[?]]): Variable[S] = { + val newId = K.freshId(existing.flatMap(_.freeVars.map(_.id)), id) + Variable(newId) + } + override def toString(): String = id.toString + /** Constructs a [[SubstPair]]. Used for substitutions and instantiations. */ + def :=(replacement: Expr[S]) = SubstPair(this, replacement) + } + + /** Factory object for [[Variable]]. + */ + object Variable { + def unsafe(id: String, sort: K.Sort): Variable[?] = Variable(id)(using unsafeSortEvidence(sort)) + /** Constructs a variable whose sort is only known at runtime. */ + def unsafe(id: Identifier, sort: K.Sort): Variable[?] = Variable(id)(using unsafeSortEvidence(sort)) + } + + + /** + * A constant of a given sort. + * + * Constants are leaf [[Expr]]essions that cannot be bound or instantiated. They are usually user-defined, or fixed by a theory. + * + */ + case class Constant[S : Sort as sortEv](id: K.Identifier) extends Expr[S] { + val sort: K.Sort = sortEv.underlying + private var infix: Boolean = false + /** Set the variable to be printed infix. + * + * Does not affect the input syntax. To allow infix input syntax, define an `extension`. + */ + def printInfix(): Constant[S] = + infix = true + this + val underlying: K.Constant = K.Constant(id, sort) + def substituteUnsafe(m: Map[Variable[?], Expr[?]]): Constant[S] = this + override def substituteWithCheck(m: Map[Variable[?], Expr[?]]): Expr[S] = + super.substituteWithCheck(m).asInstanceOf[Constant[S]] + override def substitute(pairs: SubstPair*): Constant[S] = + super.substitute(pairs*).asInstanceOf[Constant[S]] + def freeVars: Set[Variable[?]] = Set.empty + def freeTermVars: Set[Variable[Ind]] = Set.empty + def constants: Set[Constant[?]] = Set(this) + /** Returns a constant with the given identifier */ + def rename(newId: K.Identifier): Constant[S] = Constant(newId) + // Special handling for inxfix constants + override def toString(): String = id.toString + mkString = (args: Seq[Expr[?]]) => + if infix && args.size == 2 then + s"${args(0)} $this ${args(1)}" + else if infix & args.size > 2 then + s"(${args(0)} $this ${args(1)})${args.drop(2).map(_.mkStringSeparated).mkString})" + else + defaultMkString(args) + mkStringSeparated = (args: Seq[Expr[?]]) => + if infix && args.size == 2 then + s"${args(0)} $this ${args(1)}" + else if infix & args.size > 2 then + s"(${args(0)} $this ${args(1)})${args.drop(2).map(_.mkStringSeparated).mkString})" + else + defaultMkStringSeparated(args) + + /** Returns the constant as a Binder. */ + def asBinder[T1: Sort, T2: Sort, T3: Sort](using S =:= Arrow[Arrow[T1, T2], T3]): Binder[T1, T2, T3] & Constant[Arrow[Arrow[T1, T2], T3]] = + new Binder[T1, T2, T3](id) + } + + /** Factory object for [[Constant]] with sort unknown at compile time.*/ + object Constant { + /** Constructs a constant with the given identifier and sort. */ + def unsafe(id: String, sort: K.Sort): Constant[?] = Constant(id)(using unsafeSortEvidence(sort)) + } + + /** A special kind of constant of type `(A >>: B) >>: C`. + * + * The binder "binds" a varible of type `A` in an expression of type `B` to produce an expression of type `C`. + * + * Examples: ∀ :: (Ind >>: Prop) >>: Prop, ∃ :: (Ind >>: Prop) >>: Prop, ϵ :: (Ind >>: Prop) >>: Ind + * + * @tparam S The sort of the variable + * @tparam T The sort of the body + * @tparam T3 The sort of the result + */ + class Binder[S: Sort, T: Sort, T3: Sort](id: K.Identifier) extends Constant[Arrow[Arrow[S, T], T3]](id) { + /** Binds `v` in `e`. */ + def apply(v1: Variable[S], e: Expr[T]): App[Arrow[S, T], T3] = App(this, Abs(v1, e)) + @targetName("unapplyBinder") + /** Extract the variable and body of the binder. */ + def unapply(e: Expr[?]): Option[(Variable[S], Expr[T])] = e match { + case App(f:Expr[Arrow[Arrow[S, T], T3]], Abs(v, e)) if f == this => Some((v, e)) + case _ => None + } + mkString = (args: Seq[Expr[?]]) => + if args.size == 0 then toString + else args(0) match { + case Abs(v, e) => s"$id($v, $e)${args.drop(1).map(_.mkStringSeparated).mkString}" + case _ => defaultMkString(args) + } + mkStringSeparated = (args: Seq[Expr[?]]) => + args match { + case Seq(Abs(v, e)) => s"($id($v, $e))" + case _ => defaultMkStringSeparated(args) + } + } + + + /** + * An application of a functional expression to an argument. + * + * The sorts must match: `f.sort` must be of the form `A >>: B` and `arg.sort` must be `A`. + */ + case class App[S, T](f: Expr[Arrow[S, T]], arg: Expr[S]) extends Expr[T] { + val sort: K.Sort = f.sort match + case K.Arrow(from, to) if from == arg.sort => to + case _ => throw new IllegalArgumentException("Sort mismatch. f: " + f.sort + ", arg: " + arg.sort) + + val underlying: K.Application = K.Application(f.underlying, arg.underlying) + def substituteUnsafe(m: Map[Variable[?], Expr[?]]): App[S, T] = App[S, T](f.substituteUnsafe(m), arg.substituteUnsafe(m)) + override def substituteWithCheck(m: Map[Variable[?], Expr[?]]): App[S, T] = + super.substituteWithCheck(m).asInstanceOf[App[S, T]] + override def substitute(pairs: SubstPair*): App[S, T] = + super.substitute(pairs*).asInstanceOf[App[S, T]] + def freeVars: Set[Variable[?]] = f.freeVars ++ arg.freeVars + def freeTermVars: Set[Variable[Ind]] = f.freeTermVars ++ arg.freeTermVars + def constants: Set[Constant[?]] = f.constants ++ arg.constants + override def toString(): String = + val (f, args) = unfoldAllApp(this) + f.mkString(args) + } + + /** Factory object for [[App]] when the sorts are unknown at compile time. */ + object App { + /** + * Constructs an application of `f` to `arg`. + * + * @throws IllegalArgumentException if the sorts don't match. + */ + def unsafe(f: Expr[?], arg: Expr[?]): Expr[?] = + val rsort = K.legalApplication(f.sort, arg.sort) + rsort match + case Some(to) => + App(f.asInstanceOf, arg) + case None => throw new IllegalArgumentException(s"Cannot apply $f of sort ${f.sort} to $arg of sort ${arg.sort}") + } + + + /** A lambda abstraction of a variable over an expression. + * + * The sort of the variable must match the sort of the body. + */ + case class Abs[S, T](v: Variable[S], body: Expr[T]) extends Expr[Arrow[S, T]] { + val sort: K.Sort = K.Arrow(v.sort, body.sort) + val underlying: K.Lambda = K.Lambda(v.underlying, body.underlying) + def substituteUnsafe(m: Map[Variable[?], Expr[?]]): Abs[S, T] = + lazy val frees = m.values.flatMap(_.freeVars).toSet + if m.keySet.contains(v) || frees.contains(v) then + // rename + val v1: Variable[S] = Variable.unsafe(freshId(frees.map(_.id), v.id), v.sort).asInstanceOf + new Abs(v1, body.substituteUnsafe(Map(v -> v1))).substituteUnsafe(m) + else + new Abs(v, body.substituteUnsafe(m)) + override def substituteWithCheck(m: Map[Variable[?], Expr[?]]): Abs[S, T] = + super.substituteWithCheck(m).asInstanceOf[Abs[S, T]] + override def substitute(pairs: SubstPair*): Abs[S, T] = + super.substitute(pairs*).asInstanceOf[Abs[S, T]] + def freeVars: Set[Variable[?]] = body.freeVars - v + def freeTermVars: Set[Variable[Ind]] = body.freeTermVars.filterNot(_ == v) + def constants: Set[Constant[?]] = body.constants + override def toString(): String = s"Abs($v, $body)" + } + + /** Factory object for [[Abs]] when the sorts are unknown at compile time. */ + object Abs: + /** + * Constructs a lambda abstraction of `v` over `body`. Always succeeds. + */ + def unsafe(v: Variable[?], body: Expr[?]): Expr[?] = + new Abs(v.asInstanceOf, body.asInstanceOf) + + def apply[S1, S2](v: Variable[S1], body: Expr[S2]): Abs[S1, S2] = new Abs(v, body) + + def apply(xs: Seq[Variable[?]], t: Expr[?]): Expr[?] = xs.foldRight(t)((x, t) => new Abs(x, t)) + + /** Alias for [[Abs]] */ + val lambda = Abs + + + + +} + + + + diff --git a/lisa-utils/src/main/scala/lisa/utils/memoization/Memoized.scala b/lisa-utils/src/main/scala/lisa/utils/memoization/Memoized.scala index 43afb5527..b010cabed 100644 --- a/lisa-utils/src/main/scala/lisa/utils/memoization/Memoized.scala +++ b/lisa-utils/src/main/scala/lisa/utils/memoization/Memoized.scala @@ -8,9 +8,9 @@ case class MemoizationStats(hits: Int, miss: Int, faulted: Int): case object InfiniteRecursionDetectedException extends Exception class Memoized[From, To](fun: From => To) extends Function[From, To]: - private val visited = scala.collection.mutable.HashSet.empty[From] - private val memory = scala.collection.mutable.HashMap.empty[From, To] - private var stats = MemoizationStats(0, 0, 0) + protected val visited = scala.collection.mutable.HashSet.empty[From] + protected val memory = scala.collection.mutable.HashMap.empty[From, To] + protected var stats = MemoizationStats(0, 0, 0) protected def handleFault(): To = throw InfiniteRecursionDetectedException diff --git a/lisa-utils/src/main/scala/lisa/utils/package.scala b/lisa-utils/src/main/scala/lisa/utils/package.scala deleted file mode 100644 index 0dd12e8fc..000000000 --- a/lisa-utils/src/main/scala/lisa/utils/package.scala +++ /dev/null @@ -1,4 +0,0 @@ -package lisa.utils - -export lisa.utils.parsing.{FOLParser, FOLPrinter, Parser, Printer, ProofPrinter} -//export lisa.utils.KernelHelpers.{*, given} diff --git a/lisa-utils/src/main/scala/lisa/utils/parsing/Parser.scala b/lisa-utils/src/main/scala/lisa/utils/parsing/Parser.scala deleted file mode 100644 index 806caf3da..000000000 --- a/lisa-utils/src/main/scala/lisa/utils/parsing/Parser.scala +++ /dev/null @@ -1,689 +0,0 @@ -package lisa.utils.parsing - -import lisa.kernel.fol.FOL -import lisa.kernel.fol.FOL.* -import lisa.kernel.fol.FOL.equality -import lisa.kernel.proof.SequentCalculus.* -import lisa.utils.KernelHelpers.False -import lisa.utils.KernelHelpers.given_Conversion_Identifier_String -import lisa.utils.KernelHelpers.given_Conversion_String_Identifier -import lisa.utils.parsing.ParsingUtils -import scallion.* -import scallion.util.Unfolds.unfoldRight -import silex.* - -import scala.collection.mutable - -val FOLParser = Parser(SynonymInfo.empty, "=" :: Nil, Nil) - -enum Associativity { - case Left, Right, None -} - -//TODO: Deal with errors in parsing. -class ParsingException(parsedString: String, errorMessage: String) extends lisa.utils.LisaException(errorMessage) { - def showError: String = "" -} - -abstract class ParserException(msg: String) extends Exception(msg) - -class UnexpectedInputException(input: String, position: (Int, Int), expected: String) - extends ParserException( - s""" - |$input - |${" " * position._1 + "^" * (position._2 - position._1)} - |Unexpected input: expected $expected - |""".stripMargin - ) - -object UnexpectedEndOfInputException extends Exception("Unexpected end of input") - -object UnreachableException extends ParserException("Internal error: expected unreachable") - -class PrintFailedException(inp: Sequent | Formula | Term) extends ParserException(s"Printing of $inp failed unexpectedly") - -/** - * @param synonymToCanonical information about synonyms that correspond to the same FunctionLabel / AtomicLabel. - * Can be constructed with [[lisa.utils.SynonymInfoBuilder]] - * @param infixPredicates list of infix predicates' names - * @param infixFunctions list of infix functions and their associativity in the decreasing order of priority - */ -class Parser( - synonymToCanonical: SynonymInfo, - infixPredicates: List[String], - infixFunctions: List[(String, Associativity)] -) { - private val infixPredicateSet = infixPredicates.toSet - private val infixFunctionSet = infixFunctions.map(_._1).toSet - private val infixSet = infixPredicateSet ++ infixFunctionSet - - /** - * Parses a sequent from a string. A sequent consists of the left and right side, separated by `⊢` or `|-`. - * Left and right sides consist of formulas, separated by `;`. - * - * @see Parser#parseFormula - * @param s string representation of the sequent - * @return parsed sequent on success, throws an exception when unexpected input or end of input. - */ - def parseSequent(s: String): Sequent = - try { - extractParseResult(s, SequentParser.parseTermulaSequent(SequentLexer(s.iterator))).toSequent - } catch { - case e: ExpectedFormulaGotTerm => throw new UnexpectedInputException(s, e.range, "formula") - case e: ExpectedTermGotFormula => throw new UnexpectedInputException(s, e.range, "term") - } - - /** - * Parses a formula from a string. A formula can be: - *- a bound formula: `∀'x. f`, `∃'x. f`, `∃!'x. f`. A binder binds the entire formula until the end of the scope (a closing parenthesis or the end of string). - *
- two formulas, connected by `⇔` or `⇒`. Iff / implies bind less tight than and / or. - *
- a conjunction or disjunction of arbitrary number of formulas. `∧` binds tighter than `∨`. - *
- negated formula. - *
- schematic connector formula: `?c(f1, f2, f3)`. - *
- equality of two formulas: `f1 = f2`. - *
- a constant `p(a)` or schematic `'p(a)` predicate application to arbitrary number of term arguments. - *
- boolean constant: `⊤` or `⊥`. - * - * @param s string representation of the formula - * @return parsed formula on success, throws an exception when unexpected input or end of input. - */ - def parseFormula(s: String): Formula = - try { - extractParseResult(s, SequentParser.parseTermula(SequentLexer(s.iterator))).toFormula - } catch { - case e: ExpectedFormulaGotTerm => throw new UnexpectedInputException(s, e.range, "formula") - case e: ExpectedTermGotFormula => throw new UnexpectedInputException(s, e.range, "term") - } - - /** - * Parses a term from a string. A term is a constant `c`, a schematic variable `'x` or an application of a constant `f(a)` - * or a schematic `'f(a)` function to other terms. - * - * @param s string representation of the term - * @return parsed term on success, throws an exception when unexpected input or end of input. - */ - def parseTerm(s: String): Term = - try { - extractParseResult(s, SequentParser.parseTermula(SequentLexer(s.iterator))).toTerm - } catch { - case e: ExpectedFormulaGotTerm => throw new UnexpectedInputException(s, e.range, "formula") - case e: ExpectedTermGotFormula => throw new UnexpectedInputException(s, e.range, "term") - } - - private def extractParseResult[T](input: String, r: SequentParser.ParseResult[T]): T = r match { - case SequentParser.Parsed(value, _) => value - case SequentParser.UnexpectedToken(token, rest) => throw UnexpectedInputException(input, token.range, "one of " + rest.first.mkString(", ")) - case SequentParser.UnexpectedEnd(_) => throw UnexpectedEndOfInputException - } - - /** - * Returns a string representation of the sequent. Empty set of formulas on the left side is not printed. - * Empty set of formulas on the right side is represented as `⊥` (false). - * - * @param s sequent to print - * @return string representation of the sequent, or the smallest component thereof, on which printing failed - */ - def printSequent(s: Sequent): String = SequentParser - .printTermulaSequent(s.toTermulaSequent) - .getOrElse({ - // attempt to print individual formulas. It might throw a more detailed PrintFailedException - s.left.foreach(printFormula) - s.right.foreach(printFormula) - throw PrintFailedException(s) - }) - - /** - * @param f formula to print - * @return string representation of the formula, or the smallest component thereof, on which printing failed - */ - def printFormula(f: Formula): String = SequentParser - .printTermula(f.toTermula) - .getOrElse({ - f match { - case AtomicFormula(_, args) => args.foreach(printTerm) - case ConnectorFormula(_, args) => args.foreach(printFormula) - case BinderFormula(_, _, inner) => printFormula(inner) - } - throw PrintFailedException(f) - }) - - /** - * @param t term to print - * @return string representation of the term, or the smallest component thereof, on which printing failed - */ - def printTerm(t: Term): String = SequentParser - .printTermula(t.toTermula) - .getOrElse({ - t match { - case Term(_, args) => args.foreach(printTerm) - case VariableTerm(_) => () - } - throw PrintFailedException(t) - }) - - private val UNKNOWN_RANGE = (-1, -1) - - private[Parser] object SequentLexer extends Lexers with CharLexers { - sealed abstract class FormulaToken(stringRepr: String) { - def printString: String = stringRepr - val range: (Int, Int) - } - - case class ForallToken(range: (Int, Int)) extends FormulaToken(Forall.id) - - case class ExistsOneToken(range: (Int, Int)) extends FormulaToken(ExistsOne.id) - - case class ExistsToken(range: (Int, Int)) extends FormulaToken(Exists.id) - - case class DotToken(range: (Int, Int)) extends FormulaToken(".") - - case class AndToken(range: (Int, Int), prefix: Boolean) extends FormulaToken(And.id) - - case class OrToken(range: (Int, Int), prefix: Boolean) extends FormulaToken(Or.id) - - case class ImpliesToken(range: (Int, Int)) extends FormulaToken(Implies.id) - - case class IffToken(range: (Int, Int)) extends FormulaToken(Iff.id) - - case class NegationToken(range: (Int, Int)) extends FormulaToken(Neg.id) - - case class TrueToken(range: (Int, Int)) extends FormulaToken("⊤") - - case class FalseToken(range: (Int, Int)) extends FormulaToken("⊥") - - // Constant functions and predicates - case class ConstantToken(id: String, range: (Int, Int)) extends FormulaToken(id) - - // Variables, schematic functions and predicates - case class SchematicToken(id: String, range: (Int, Int)) extends FormulaToken(schematicSymbol + id) - - // This token is not required for parsing, but is needed to print spaces around infix operations - case class InfixToken(id: String, range: (Int, Int)) extends FormulaToken(id) - - // Schematic connector (prefix notation is expected) - case class SchematicConnectorToken(id: String, range: (Int, Int)) extends FormulaToken(schematicConnectorSymbol + id) - - case class ParenthesisToken(isOpen: Boolean, range: (Int, Int)) extends FormulaToken(if (isOpen) "(" else ")") - - case class CommaToken(range: (Int, Int)) extends FormulaToken(",") - - case class SemicolonToken(range: (Int, Int)) extends FormulaToken(";") - - case class SequentToken(range: (Int, Int)) extends FormulaToken("⊢") - - case class SpaceToken(range: (Int, Int)) extends FormulaToken(" ") - - case class UnknownToken(str: String, range: (Int, Int)) extends FormulaToken(str) - - type Token = FormulaToken - type Position = Int - - val escapeChar = '`' - val pathSeparator = '$' - private val schematicSymbol = "'" - private val schematicConnectorSymbol = "?" - - private val letter = elem(_.isLetter) - private val variableLike = letter ~ many(elem(c => c.isLetterOrDigit || c == '_')) - private val number = many1(elem(_.isDigit)) - private val escaped = elem(escapeChar) ~ many1(elem(_ != escapeChar)) ~ elem(escapeChar) - private val arbitrarySymbol = elem(!_.isWhitespace) - private val symbolSequence = many1(oneOf("*/+-^:<>#%&@")) - private val path = many1(many1(letter) ~ elem(pathSeparator)) - - private val lexer = Lexer( - elem('∀') |> { (_, r) => ForallToken(r) }, - word("∃!") |> { (_, r) => ExistsOneToken(r) }, - elem('∃') |> { (_, r) => ExistsToken(r) }, - elem('.') |> { (_, r) => DotToken(r) }, - elem('∧') | word("/\\") |> { (_, r) => AndToken(r, false) }, - elem('∨') | word("\\/") |> { (_, r) => OrToken(r, false) }, - word(Implies.id.name) | word("=>") | word("==>") | elem('⇒') |> { (_, r) => ImpliesToken(r) }, - word(Iff.id.name) | word("<=>") | word("<==>") | elem('⟷') | elem('⇔') |> { (_, r) => IffToken(r) }, - elem('⊤') | elem('T') | word("True") | word("true") |> { (_, r) => TrueToken(r) }, - elem('⊥') | elem('F') | word("False") | word("false") |> { (_, r) => FalseToken(r) }, - elem('¬') | elem('!') |> { (_, r) => NegationToken(r) }, - elem('(') |> { (_, r) => ParenthesisToken(true, r) }, - elem(')') |> { (_, r) => ParenthesisToken(false, r) }, - elem(',') |> { (_, r) => CommaToken(r) }, - elem(';') |> { (_, r) => SemicolonToken(r) }, - elem('⊢') | word("|-") |> { (_, r) => SequentToken(r) }, - many1(whiteSpace) |> { (_, r) => SpaceToken(r) }, - word(schematicSymbol) ~ variableLike |> { (cs, r) => - // drop the ' - SchematicToken(cs.drop(1).mkString, r) - }, - word(schematicConnectorSymbol) ~ variableLike |> { (cs, r) => - SchematicConnectorToken(cs.drop(1).mkString, r) - }, - // Currently the path is merged into the id on the lexer level. When qualified ids are supported, this should be - // lifted into the parser. - opt(path) ~ (variableLike | number | arbitrarySymbol | symbolSequence | escaped) |> { (cs, r) => ConstantToken(cs.filter(_ != escapeChar).mkString, r) } - ) onError { (cs, r) => - UnknownToken(cs.mkString, r) - } - - def apply(it: Iterator[Char]): Iterator[Token] = { - val source = Source.fromIterator(it, IndexPositioner) - lexer - .spawn(source) - .map({ - case ConstantToken(id, r) if infixSet.contains(id) => InfixToken(id, r) - case t => t - }) - .filter(!_.isInstanceOf[SpaceToken]) - } - - def unapply(tokens: Iterator[Token]): String = { - val space = " " - tokens - .foldLeft(Nil: List[String]) { - // Sequent token is the only separator that can have an empty left side; in this case, omit the space before - case (Nil, s: SequentToken) => s.printString :: space :: Nil - case (l, t) => - t match { - // do not require spaces - - case _: ForallToken | _: ExistsToken | _: ExistsOneToken | _: NegationToken | _: ConstantToken | _: SchematicToken | _: SchematicConnectorToken | _: TrueToken | _: FalseToken | - _: ParenthesisToken | _: SpaceToken | AndToken(_, true) | OrToken(_, true) => - l :+ t.printString - // space after: separators - case _: DotToken | _: CommaToken | _: SemicolonToken => l :+ t.printString :+ space - // space before and after: infix, connectors, sequent symbol - - case _: InfixToken | _: AndToken | _: OrToken | _: ImpliesToken | _: IffToken | _: SequentToken => l :+ space :+ t.printString :+ space - case _: UnknownToken => throw UnreachableException - } - } - .mkString - } - } - - case class RangedLabel(folLabel: FOL.FormulaLabel, range: (Int, Int)) - - abstract class TermulaConversionError(range: (Int, Int)) extends Exception - case class ExpectedTermGotFormula(range: (Int, Int)) extends TermulaConversionError(range) - case class ExpectedFormulaGotTerm(range: (Int, Int)) extends TermulaConversionError(range) - - /** - * Termula represents parser-level union of terms and formulas. - * After parsing, the termula can be converted either to a term or to a formula: - *
- to be converted to a term, termula must consist only of function applications; - *
- to be converted to a formula, `args` of the termula are interpreted as formulas until a predicate application is observed; - * `args` of the predicate application are terms. - * - *
Convention: since the difference between `TermLabel`s and `AtomicLabel`s is purely semantic and Termula needs - * FormulaLabels (because of connector and binder labels), all TermLabels are translated to the corresponding - * PredicateLabels (see [[toTermula]]). - * - * @param label `AtomicLabel` for predicates and functions, `ConnectorLabel` or `BinderLabel` - * @param args Predicate / function arguments for `AtomicLabel`, connected formulas for `ConnectorLabel`, - * `Seq(VariableFormulaLabel(bound), inner)` for `BinderLabel` - */ - case class Termula(label: RangedLabel, args: Seq[Termula], range: (Int, Int)) { - def toTerm: Term = label.folLabel match { - case _: BinderLabel | _: ConnectorLabel => throw ExpectedTermGotFormula(range) - case t: ConstantAtomicLabel => Term(ConstantFunctionLabel(t.id, t.arity), args.map(_.toTerm)) - case t: SchematicPredicateLabel => Term(SchematicFunctionLabel(t.id, t.arity), args.map(_.toTerm)) - case v: VariableFormulaLabel => Term(VariableLabel(v.id), Seq()) - } - - def toFormula: Formula = label.folLabel match { - case p: AtomicLabel => AtomicFormula(p, args.map(_.toTerm)) - case c: ConnectorLabel => ConnectorFormula(c, args.map(_.toFormula)) - case b: BinderLabel => - args match { - case Seq(Termula(RangedLabel(v: VariableFormulaLabel, _), Seq(), _), f) => BinderFormula(b, VariableLabel(v.id), f.toFormula) - // wrong binder format. Termulas can only be constructed within parser and they are expected to always be constructed according - // to the format above - case _ => throw UnreachableException - } - } - } - - extension (f: Formula) { - - def toTermula: Termula = { - given Conversion[FOL.FormulaLabel, RangedLabel] with { - def apply(f: FOL.FormulaLabel): RangedLabel = RangedLabel(f, UNKNOWN_RANGE) - } - - f match { - case AtomicFormula(label, args) => Termula(label, args.map(_.toTermula), UNKNOWN_RANGE) - // case ConnectorFormula(And | Or, Seq(singleArg)) => singleArg.toTermula - case ConnectorFormula(label, args) => Termula(label, args.map(_.toTermula), UNKNOWN_RANGE) - case BinderFormula(label, bound, inner) => Termula(label, Seq(Termula(VariableFormulaLabel(bound.id), Seq(), UNKNOWN_RANGE), inner.toTermula), UNKNOWN_RANGE) - } - } - } - - extension (t: Term) { - def toTermula: Termula = { - given Conversion[FOL.FormulaLabel, RangedLabel] with { - def apply(f: FOL.FormulaLabel): RangedLabel = RangedLabel(f, UNKNOWN_RANGE) - } - val newLabel = t.label match { - case ConstantFunctionLabel(id, arity) => ConstantAtomicLabel(id, arity) - case SchematicFunctionLabel(id, arity) => SchematicPredicateLabel(id, arity) - case VariableLabel(id) => VariableFormulaLabel(id) - } - Termula(newLabel, t.args.map(_.toTermula), UNKNOWN_RANGE) - } - } - - case class TermulaSequent(left: Set[Termula], right: Set[Termula]) { - def toSequent: Sequent = Sequent(left.map(_.toFormula), right.map(_.toFormula)) - } - - extension (s: Sequent) { - def toTermulaSequent: TermulaSequent = TermulaSequent(s.left.map(_.toTermula), s.right.map(_.toTermula)) - } - - private[Parser] object SequentParser extends Parsers with ParsingUtils { - sealed abstract class TokenKind(debugString: String) { - override def toString: Mark = debugString - } - // and, or are both (left) associative and bind tighter than implies, iff - case object AndKind extends TokenKind(And.id) - case object OrKind extends TokenKind(Or.id) - // implies, iff are both non-associative and are of equal priority, lower than and, or - case object TopLevelConnectorKind extends TokenKind(s"${Implies.id} or ${Iff.id}") - case object NegationKind extends TokenKind(Neg.id) - case object BooleanConstantKind extends TokenKind("boolean constant") - case object IdKind extends TokenKind("constant or schematic id") - case class InfixKind(id: String) extends TokenKind(s"infix operation $id") - case object CommaKind extends TokenKind(",") - case class ParenthesisKind(isOpen: Boolean) extends TokenKind(if (isOpen) "(" else ")") - case object BinderKind extends TokenKind("binder") - case object DotKind extends TokenKind(".") - case object SemicolonKind extends TokenKind(";") - case object SequentSymbolKind extends TokenKind("⊢") - case object OtherKind extends TokenKind("
") - - import SequentLexer._ - type Token = FormulaToken - type Kind = TokenKind - - // can safely skip tokens which were mapped to unit with elem(kind).unit(token) - import SafeImplicits._ - - def getKind(t: Token): Kind = t match { - case _: AndToken => AndKind - case _: OrToken => OrKind - case _: IffToken | _: ImpliesToken => TopLevelConnectorKind - case _: NegationToken => NegationKind - case _: TrueToken | _: FalseToken => BooleanConstantKind - case InfixToken(id, _) => InfixKind(id) - case _: ConstantToken | _: SchematicToken | _: SchematicConnectorToken => IdKind - case _: CommaToken => CommaKind - case ParenthesisToken(isOpen, _) => ParenthesisKind(isOpen) - case _: ExistsToken | _: ExistsOneToken | _: ForallToken => BinderKind - case _: DotToken => DotKind - case _: SemicolonToken => SemicolonKind - case _: SequentToken => SequentSymbolKind - case _: SpaceToken | _: UnknownToken => OtherKind - } - - /////////////////////// SEPARATORS //////////////////////////////// - def parens(isOpen: Boolean): Syntax[Unit] = - elem(ParenthesisKind(isOpen)).unit(ParenthesisToken(isOpen, UNKNOWN_RANGE)) - - val open: Syntax[Unit] = parens(true) - - val closed: Syntax[Unit] = parens(false) - - val comma: Syntax[Unit] = elem(CommaKind).unit(CommaToken(UNKNOWN_RANGE)) - - val dot: Syntax[Unit] = elem(DotKind).unit(DotToken(UNKNOWN_RANGE)) - - val semicolon: Syntax[Unit] = elem(SemicolonKind).unit(SemicolonToken(UNKNOWN_RANGE)) - - val sequentSymbol: Syntax[Unit] = elem(SequentSymbolKind).unit(SequentToken(UNKNOWN_RANGE)) - /////////////////////////////////////////////////////////////////// - - //////////////////////// LABELS /////////////////////////////////// - val toplevelConnector: Syntax[RangedLabel] = accept(TopLevelConnectorKind)( - { - case ImpliesToken(r) => RangedLabel(Implies, r) - case IffToken(r) => RangedLabel(Iff, r) - }, - { - case RangedLabel(Implies, r) => Seq(ImpliesToken(r)) - case RangedLabel(Iff, r) => Seq(IffToken(r)) - case _ => throw UnreachableException - } - ) - - val negation: Syntax[RangedLabel] = accept(NegationKind)( - { case NegationToken(r) => RangedLabel(Neg, r) }, - { - case RangedLabel(Neg, r) => Seq(NegationToken(r)) - case _ => throw UnreachableException - } - ) - - val INFIX_ARITY = 2 - /////////////////////////////////////////////////////////////////// - - //////////////////////// TERMULAS ///////////////////////////////// - // can appear without parentheses - lazy val simpleTermula: Syntax[Termula] = prefixApplication | negated | bool - lazy val subtermula: Syntax[Termula] = simpleTermula | open.skip ~ termula ~ closed.skip - - val bool: Syntax[Termula] = accept(BooleanConstantKind)( - { - case TrueToken(r) => Termula(RangedLabel(top, r), Seq(), r) - case FalseToken(r) => Termula(RangedLabel(bot, r), Seq(), r) - }, - { - case Termula(RangedLabel(And, _), Seq(), r) => Seq(TrueToken(r)) - case Termula(RangedLabel(Or, _), Seq(), r) => Seq(FalseToken(r)) - case _ => throw UnreachableException - } - ) - - case class RangedTermulaSeq(ts: Seq[Termula], range: (Int, Int)) - lazy val args: Syntax[RangedTermulaSeq] = recursive( - (elem(ParenthesisKind(true)) ~ repsep(termula, comma) ~ elem(ParenthesisKind(false))).map( - { case start ~ ts ~ end => - RangedTermulaSeq(ts, (start.range._1, end.range._2)) - }, - { case RangedTermulaSeq(ts, (start, end)) => - Seq(ParenthesisToken(true, (start, start)) ~ ts ~ ParenthesisToken(false, (end, end))) - } - ) - ) - - def reconstructPrefixApplication(t: Termula): Token ~ Option[RangedTermulaSeq] = { - val argsRange = (t.label.range._2 + 1, t.range._2) - t.label.folLabel match { - case VariableFormulaLabel(id) => SchematicToken(id, t.label.range) ~ None - case SchematicPredicateLabel(id, _) => SchematicToken(id, t.range) ~ Some(RangedTermulaSeq(t.args, argsRange)) - case ConstantAtomicLabel(id, arity) => - ConstantToken(synonymToCanonical.getPrintName(id), t.label.range) ~ - (if (arity == 0) None else Some(RangedTermulaSeq(t.args, argsRange))) - case _ => throw UnreachableException - } - } - - // schematic connector, function, or predicate; constant function or predicate - val prefixApplication: Syntax[Termula] = ((elem(IdKind) | elem(OrKind) | elem(AndKind)) ~ opt(args)).map( - { case p ~ optArgs => - val args: Seq[Termula] = optArgs.map(_.ts).getOrElse(Seq()) - val l = p match { - - case ConstantToken(id, _) => ConstantAtomicLabel(synonymToCanonical.getInternalName(id), args.size) - case SchematicToken(id, _) => - if (args.isEmpty) VariableFormulaLabel(id) else SchematicPredicateLabel(id, args.size) - case SchematicConnectorToken(id, _) => SchematicConnectorLabel(id, args.size) - case AndToken(_, _) => And - case OrToken(_, _) => Or - case _ => throw UnreachableException - } - Termula(RangedLabel(l, p.range), args, (p.range._1, optArgs.map(_.range._2).getOrElse(p.range._2))) - }, - { - case t @ Termula(RangedLabel(_: AtomicLabel, _), _, _) => Seq(reconstructPrefixApplication(t)) - case t @ Termula(RangedLabel(SchematicConnectorLabel(id, _), r), args, _) => - val argsRange = (t.label.range._2 + 1, t.range._2) - Seq(SchematicConnectorToken(id, r) ~ Some(RangedTermulaSeq(args, argsRange))) - case t @ Termula(RangedLabel(And, r), args, _) => - val argsRange = (t.label.range._2 + 1, t.range._2) - Seq(AndToken(r, true) ~ Some(RangedTermulaSeq(args, argsRange))) - case t @ Termula(RangedLabel(Or, r), args, _) => - val argsRange = (t.label.range._2 + 1, t.range._2) - Seq(OrToken(r, true) ~ Some(RangedTermulaSeq(args, argsRange))) - - case _ => throw UnreachableException - } - ) - - val infixFunctionLabels: List[PrecedenceLevel[RangedLabel]] = infixFunctions.map({ case (l, assoc) => - elem(InfixKind(l)).map[RangedLabel]( - { - case InfixToken(`l`, range) => RangedLabel(ConstantAtomicLabel(synonymToCanonical.getInternalName(l), INFIX_ARITY), range) - case _ => throw UnreachableException - }, - { - case RangedLabel(ConstantAtomicLabel(id, INFIX_ARITY), range) => Seq(InfixToken(synonymToCanonical.getPrintName(id), range)) - case _ => throw UnreachableException - } - ) has assoc - }) - - val infixPredicateLabels: List[PrecedenceLevel[RangedLabel]] = infixPredicates.map(l => - elem(InfixKind(l)).map[RangedLabel]( - { - case InfixToken(`l`, range) => RangedLabel(ConstantAtomicLabel(synonymToCanonical.getInternalName(l), INFIX_ARITY), range) - case _ => throw UnreachableException - }, - { - case RangedLabel(ConstantAtomicLabel(id, INFIX_ARITY), range) => Seq(InfixToken(synonymToCanonical.getPrintName(id), range)) - case _ => throw UnreachableException - } - ) has Associativity.None - ) - - val negated: Syntax[Termula] = recursive { - (negation ~ subtermula).map( - { case n ~ f => - Termula(n, Seq(f), (n.range._1, f.range._2)) - }, - { - case Termula(l @ RangedLabel(Neg, _), Seq(f), _) => Seq(l ~ f) - case _ => throw UnreachableException - } - ) - } - - val and: Syntax[RangedLabel] = elem(AndKind).map[RangedLabel]( - { - - case AndToken(r, _) => RangedLabel(And, r) - case _ => throw UnreachableException - }, - { - case RangedLabel(And, r) => Seq(AndToken(r, false)) - case _ => throw UnreachableException - } - ) - - val or: Syntax[RangedLabel] = elem(OrKind).map[RangedLabel]( - { - case OrToken(r, _) => RangedLabel(Or, r) - case _ => throw UnreachableException - }, - { - case RangedLabel(Or, r) => Seq(OrToken(r, false)) - case _ => throw UnreachableException - } - ) - - // 'and' has higher priority than 'or' - val connectorTermula: Syntax[Termula] = infixOperators[RangedLabel, Termula](subtermula)( - infixFunctionLabels ++ - infixPredicateLabels ++ - ((and has Associativity.Left) :: - (or has Associativity.Left) :: - (toplevelConnector has Associativity.None) :: Nil)* - )( - (l, conn, r) => Termula(conn, Seq(l, r), (l.range._1, r.range._2)), - { - case Termula(pred @ RangedLabel(ConstantAtomicLabel(_, INFIX_ARITY), _), Seq(l, r), _) => (l, pred, r) - case Termula(conn @ RangedLabel(_: ConnectorLabel, _), Seq(l, r), _) => - (l, conn, r) - case Termula(conn @ RangedLabel(_: ConnectorLabel, _), l +: rest, _) if rest.nonEmpty => - val last = rest.last - val leftSide = rest.dropRight(1) - // parser only knows about connector formulas of two arguments, so we unfold the formula of many arguments to - // multiple nested formulas of two arguments - (leftSide.foldLeft(l)((f1, f2) => Termula(conn, Seq(f1, f2), UNKNOWN_RANGE)), conn, last) - } - ) - - val binderLabel: Syntax[RangedLabel] = accept(BinderKind)( - { - case ExistsToken(r) => RangedLabel(Exists, r) - case ExistsOneToken(r) => RangedLabel(ExistsOne, r) - case ForallToken(r) => RangedLabel(Forall, r) - }, - { - case RangedLabel(Exists, r) => Seq(ExistsToken(r)) - case RangedLabel(ExistsOne, r) => Seq(ExistsOneToken(r)) - case RangedLabel(Forall, r) => Seq(ForallToken(r)) - case _ => throw UnreachableException - } - ) - - val boundVariable: Syntax[RangedLabel] = accept(IdKind)( - t => { - val (id, r) = t match { - case ConstantToken(id, r) => (id, r) - case SchematicToken(id, r) => (id, r) - case _ => throw UnreachableException - } - RangedLabel(VariableFormulaLabel(id), r) - }, - { - case RangedLabel(VariableFormulaLabel(id), r) => Seq(SchematicToken(id, r)) - case _ => throw UnreachableException - } - ) - - val binder: Syntax[RangedLabel ~ RangedLabel] = binderLabel ~ boundVariable ~ dot.skip - - lazy val termula: Syntax[Termula] = recursive { - prefixes(binder, connectorTermula)( - { case (label ~ variable, f) => Termula(label, Seq(Termula(variable, Seq(), variable.range), f), (label.range._1, f.range._2)) }, - { case Termula(label @ RangedLabel(_: BinderLabel, _), Seq(Termula(variable @ RangedLabel(_: VariableFormulaLabel, _), Seq(), _), f), _) => - (label ~ variable, f) - } - ) - } - /////////////////////////////////////////////////////////////////// - - val sequent: Syntax[TermulaSequent] = (repsep(termula, semicolon) ~ opt(sequentSymbol.skip ~ repsep(termula, semicolon))).map[TermulaSequent]( - { - case left ~ Some(right) => TermulaSequent(left.toSet, right.toSet) - case right ~ None => TermulaSequent(Set(), right.toSet) - }, - { - case TermulaSequent(Seq(), right) => Seq(right.toSeq ~ None) - case TermulaSequent(left, Seq()) => Seq(left.toSeq ~ Some(Seq(False.toTermula))) - case TermulaSequent(left, right) => Seq(left.toSeq ~ Some(right.toSeq)) - } - ) - - val parser: Parser[TermulaSequent] = Parser(sequent) - val printer: PrettyPrinter[TermulaSequent] = PrettyPrinter(sequent) - - val termulaParser: SequentParser.Parser[Termula] = Parser(termula) - val termulaPrinter: SequentParser.PrettyPrinter[Termula] = PrettyPrinter(termula) - - def parseTermulaSequent(it: Iterator[Token]): ParseResult[TermulaSequent] = parser(it) - def printTermulaSequent(s: TermulaSequent): Option[String] = printer(s).map(SequentLexer.unapply) - - def parseTermula(it: Iterator[Token]): ParseResult[Termula] = termulaParser(it) - def printTermula(t: Termula): Option[String] = termulaPrinter(t).map(SequentLexer.unapply) - } -} diff --git a/lisa-utils/src/main/scala/lisa/utils/parsing/ParsingUtils.scala b/lisa-utils/src/main/scala/lisa/utils/parsing/ParsingUtils.scala deleted file mode 100644 index 858f6abff..000000000 --- a/lisa-utils/src/main/scala/lisa/utils/parsing/ParsingUtils.scala +++ /dev/null @@ -1,41 +0,0 @@ -package lisa.utils.parsing - -import lisa.utils.parsing.ParserException -import scallion.* -import scallion.util.Unfolds.unfoldLeft - -trait ParsingUtils extends Operators { self: Parsers => - case class PrecedenceLevel[Op](operator: Syntax[Op], associativity: lisa.utils.parsing.Associativity) - - implicit class PrecedenceLevelDecorator[Op](operator: Syntax[Op]) { - - /** - * Indicates the associativity of the operator. - */ - infix def has(associativity: lisa.utils.parsing.Associativity): PrecedenceLevel[Op] = PrecedenceLevel(operator, associativity) - } - - def singleInfix[Op, A](elem: Syntax[A], op: Syntax[Op])(function: (A, Op, A) => A, inverse: PartialFunction[A, (A, Op, A)] = PartialFunction.empty): Syntax[A] = - (elem ~ opt(op ~ elem)).map( - { - case first ~ None => first - case first ~ Some(op ~ second) => function(first, op, second) - }, - v => - inverse.lift(v) match { - // see the usage of singleInfix in infixOperators: the inverse function is the same for all ops, so it might - // or might not be correct to unwrap the current element with the inverse function. Hence, provide both options. - case Some(l, op, r) => Seq(l ~ Some(op ~ r), v ~ None) - case None => Seq(v ~ None) - } - ) - - def infixOperators[Op, A](elem: Syntax[A])(levels: PrecedenceLevel[Op]*)(function: (A, Op, A) => A, inverse: PartialFunction[A, (A, Op, A)] = PartialFunction.empty): Syntax[A] = - levels.foldLeft(elem) { case (currSyntax, PrecedenceLevel(op, assoc)) => - assoc match { - case Associativity.Left => infixLeft(currSyntax, op)(function, inverse) - case Associativity.Right => infixRight(currSyntax, op)(function, inverse) - case Associativity.None => singleInfix(currSyntax, op)(function, inverse) - } - } -} diff --git a/lisa-utils/src/main/scala/lisa/utils/parsing/Printer.scala b/lisa-utils/src/main/scala/lisa/utils/parsing/Printer.scala deleted file mode 100644 index 6b33007d7..000000000 --- a/lisa-utils/src/main/scala/lisa/utils/parsing/Printer.scala +++ /dev/null @@ -1,185 +0,0 @@ -package lisa.utils.parsing - -import lisa.kernel.fol.FOL.* -import lisa.kernel.proof.SCProof -import lisa.kernel.proof.SCProofCheckerJudgement -import lisa.kernel.proof.SCProofCheckerJudgement.* -import lisa.kernel.proof.SequentCalculus.* - -val FOLPrinter = Printer(FOLParser) - -/** - * A set of methods to pretty-print kernel trees. - */ -class Printer(parser: Parser) { - - private def spaceSeparator(compact: Boolean): String = if (compact) "" else " " - - private def commaSeparator(compact: Boolean, symbol: String = ","): String = s"$symbol${spaceSeparator(compact)}" - - /** - * Returns a string representation of this formula. See also [[prettyTerm]]. - * Example output: - * - * ∀x, y. (∀z. (z ∈ x) ⇔ (z ∈ y)) ⇔ (x = y) - *- * - * @param formula the formula - * @param compact whether spaces should be omitted between tokens - * @return the string representation of this formula - */ - def prettyFormula(formula: Formula, compact: Boolean = false): String = parser.printFormula(formula) - - /** - * Returns a string representation of this term. See also [[prettyFormula]]. - * Example output: - *- * f({w, (x, y)}, z) - *- * - * @param term the term - * @param compact whether spaces should be omitted between tokens - * @return the string representation of this term - */ - def prettyTerm(term: Term, compact: Boolean = false): String = parser.printTerm(term) - - /** - * Returns a string representation of this sequent. - * Example output: - *- * ⊢ ∀x, y. (∀z. (z ∈ x) ⇔ (z ∈ y)) ⇔ (x = y) - *- * - * @param sequent the sequent - * @param compact whether spaces should be omitted between tokens - * @return the string representation of this sequent - */ - def prettySequent(sequent: Sequent, compact: Boolean = false): String = parser.printSequent(sequent) - - /** - * Returns a string representation of this proof. - * - * @param proof the proof - * @param judgement optionally provide a proof checking judgement that will mark a particular step in the proof - * (`->`) as an error. The proof is considered to be valid by default - * @return a string where each indented line corresponds to a step in the proof - */ - def prettySCProof(judgement: SCProofCheckerJudgement, forceDisplaySubproofs: Boolean = false): String = { - val proof = judgement.proof - def computeMaxNumberingLengths(proof: SCProof, level: Int, result: IndexedSeq[Int]): IndexedSeq[Int] = { - val resultWithCurrent = result.updated( - level, - (Seq((proof.steps.size - 1).toString.length, result(level)) ++ (if (proof.imports.nonEmpty) Seq((-proof.imports.size).toString.length) else Seq.empty)).max - ) - proof.steps.collect { case sp: SCSubproof => sp }.foldLeft(resultWithCurrent)((acc, sp) => computeMaxNumberingLengths(sp.sp, level + 1, if (acc.size <= level + 1) acc :+ 0 else acc)) - } - - val maxNumberingLengths = computeMaxNumberingLengths(proof, 0, IndexedSeq(0)) // The maximum value for each number column - val maxLevel = maxNumberingLengths.size - 1 - - def leftPadSpaces(v: Any, n: Int): String = { - val s = String.valueOf(v) - if (s.length < n) (" " * (n - s.length)) + s else s - } - - def rightPadSpaces(v: Any, n: Int): String = { - val s = String.valueOf(v) - if (s.length < n) s + (" " * (n - s.length)) else s - } - - def prettySCProofRecursive(proof: SCProof, level: Int, tree: IndexedSeq[Int], topMostIndices: IndexedSeq[Int]): Seq[(Boolean, String, String, String)] = { - val printedImports = proof.imports.zipWithIndex.reverse.flatMap { case (imp, i) => - val currentTree = tree :+ (-i - 1) - val showErrorForLine = judgement match { - case SCValidProof(_, _) => false - case SCInvalidProof(proof, position, _) => currentTree.startsWith(position) && currentTree.drop(position.size).forall(_ == 0) - } - val prefix = (Seq.fill(level - topMostIndices.size)(None) ++ Seq.fill(topMostIndices.size)(None) :+ Some(-i - 1)) ++ Seq.fill(maxLevel - level)(None) - val prefixString = prefix.map(_.map(_.toString).getOrElse("")).zipWithIndex.map { case (v, i1) => leftPadSpaces(v, maxNumberingLengths(i1)) }.mkString(" ") - - def pretty(stepName: String, topSteps: Int*): (Boolean, String, String, String) = - ( - showErrorForLine, - prefixString, - Seq(stepName, topSteps.mkString(commaSeparator(compact = false))).filter(_.nonEmpty).mkString(" "), - prettySequent(imp) - ) - - Seq(pretty("Import", 0)) - } - printedImports ++ proof.steps.zipWithIndex.flatMap { case (step, i) => - val currentTree = tree :+ i - val showErrorForLine = judgement match { - case SCValidProof(_, _) => false - case SCInvalidProof(proof, position, _) => currentTree.startsWith(position) && currentTree.drop(position.size).forall(_ == 0) - } - val prefix = (Seq.fill(level - topMostIndices.size)(None) ++ Seq.fill(topMostIndices.size)(None) :+ Some(i)) ++ Seq.fill(maxLevel - level)(None) - val prefixString = prefix.map(_.map(_.toString).getOrElse("")).zipWithIndex.map { case (v, i1) => leftPadSpaces(v, maxNumberingLengths(i1)) }.mkString(" ") - - def pretty(stepName: String, topSteps: Int*): (Boolean, String, String, String) = - ( - showErrorForLine, - prefixString, - Seq(stepName, topSteps.mkString(commaSeparator(compact = false))).filter(_.nonEmpty).mkString(" "), - prettySequent(step.bot) - ) - - step match { - case sp @ SCSubproof(_, _) => - pretty("Subproof", sp.premises*) +: prettySCProofRecursive(sp.sp, level + 1, currentTree, (if (i == 0) topMostIndices else IndexedSeq.empty) :+ i) - case other => - val line = other match { - case Restate(_, t1) => pretty("Rewrite", t1) - case RestateTrue(_) => pretty("RewriteTrue") - case Hypothesis(_, _) => pretty("Hypo.") - case Cut(_, t1, t2, _) => pretty("Cut", t1, t2) - case LeftAnd(_, t1, _, _) => pretty("Left ∧", t1) - case LeftNot(_, t1, _) => pretty("Left ¬", t1) - case RightOr(_, t1, _, _) => pretty("Right ∨", t1) - case RightNot(_, t1, _) => pretty("Right ¬", t1) - case LeftExists(_, t1, _, _) => pretty("Left ∃", t1) - case LeftForall(_, t1, _, _, _) => pretty("Left ∀", t1) - case LeftExistsOne(_, t1, _, _) => pretty("Left ∃!", t1) - case LeftOr(_, l, _) => pretty("Left ∨", l*) - case RightExists(_, t1, _, _, _) => pretty("Right ∃", t1) - case RightForall(_, t1, _, _) => pretty("Right ∀", t1) - case RightExistsOne(_, t1, _, _) => pretty("Right ∃!", t1) - case RightAnd(_, l, _) => pretty("Right ∧", l*) - case RightIff(_, t1, t2, _, _) => pretty("Right ⇔", t1, t2) - case RightImplies(_, t1, _, _) => pretty("Right ⇒", t1) - case Weakening(_, t1) => pretty("Weakening", t1) - case LeftImplies(_, t1, t2, _, _) => pretty("Left ⇒", t1, t2) - case LeftIff(_, t1, _, _) => pretty("Left ⇔", t1) - case LeftRefl(_, t1, _) => pretty("L. Refl", t1) - case RightRefl(_, _) => pretty("R. Refl") - case LeftSubstEq(_, t1, _, _) => pretty("L. SubstEq", t1) - case RightSubstEq(_, t1, _, _) => pretty("R. SubstEq", t1) - case LeftSubstIff(_, t1, _, _) => pretty("L. SubstIff", t1) - case RightSubstIff(_, t1, _, _) => pretty("R. SubstIff", t1) - case InstSchema(_, t1, _, _, _) => pretty("Schema Instantiation", t1) - case Sorry(_) => pretty("Sorry") - case SCSubproof(_, _) => throw UnreachableException - } - Seq(line) - } - } - } - - val marker = "->" - - val lines = prettySCProofRecursive(proof, 0, IndexedSeq.empty, IndexedSeq.empty) - val maxStepNameLength = lines.map { case (_, _, stepName, _) => stepName.length }.maxOption.getOrElse(0) - lines - .map { case (isMarked, indices, stepName, sequent) => - val suffix = Seq(indices, rightPadSpaces(stepName, maxStepNameLength), sequent) - val full = if (!judgement.isValid) (if (isMarked) marker else leftPadSpaces("", marker.length)) +: suffix else suffix - full.mkString(" ") - } - .mkString("\n") + (judgement match { - case SCValidProof(_, _) => "" - case SCInvalidProof(proof, path, message) => s"\nProof checker has reported an error at line ${path.mkString(".")}: $message" - }) - } - - def prettySCProof(proof: SCProof): String = prettySCProof(SCValidProof(proof), false) -} diff --git a/lisa-utils/src/main/scala/lisa/utils/parsing/SynonymInfo.scala b/lisa-utils/src/main/scala/lisa/utils/parsing/SynonymInfo.scala deleted file mode 100644 index 2ab844296..000000000 --- a/lisa-utils/src/main/scala/lisa/utils/parsing/SynonymInfo.scala +++ /dev/null @@ -1,34 +0,0 @@ -package lisa.utils.parsing - -import scala.collection.mutable - -case class CanonicalId(internal: String, print: String) - -case class SynonymInfo(private val synonymToCanonical: Map[String, CanonicalId]) { - - /** - * @return the preferred way to output this id, if available, otherwise the id itself. - */ - def getPrintName(id: String): String = synonymToCanonical.get(id).map(_.print).getOrElse(id) - - /** - * @return the synonym of `id` that is used to construct the corresponding `ConstantAtomicLabel` or - * `ConstantFunctionLabel`. If not available, `id` has no known synonyms, so return `id` itself. - */ - def getInternalName(id: String): String = synonymToCanonical.get(id).map(_.internal).getOrElse(id) -} - -object SynonymInfo { - val empty: SynonymInfo = SynonymInfo(Map.empty[String, CanonicalId]) -} - -class SynonymInfoBuilder { - private val mapping: mutable.Map[String, CanonicalId] = mutable.Map() - - def addSynonyms(internal: String, print: String, equivalentLabels: List[String] = Nil): SynonymInfoBuilder = { - (print :: internal :: equivalentLabels).foreach(mapping(_) = CanonicalId(internal, print)) - this - } - - def build: SynonymInfo = SynonymInfo(mapping.toMap) -} diff --git a/lisa-utils/src/main/scala/lisa/prooflib/BasicMain.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/BasicMain.scala similarity index 95% rename from lisa-utils/src/main/scala/lisa/prooflib/BasicMain.scala rename to lisa-utils/src/main/scala/lisa/utils/prooflib/BasicMain.scala index 748f58d51..ad2b70bd5 100644 --- a/lisa-utils/src/main/scala/lisa/prooflib/BasicMain.scala +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/BasicMain.scala @@ -1,4 +1,4 @@ -package lisa.prooflib +package lisa.utils.prooflib import lisa.utils.Serialization.* diff --git a/lisa-utils/src/main/scala/lisa/prooflib/BasicStepTactic.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/BasicStepTactic.scala similarity index 68% rename from lisa-utils/src/main/scala/lisa/prooflib/BasicStepTactic.scala rename to lisa-utils/src/main/scala/lisa/utils/prooflib/BasicStepTactic.scala index 899c51da7..5cdec00d4 100644 --- a/lisa-utils/src/main/scala/lisa/prooflib/BasicStepTactic.scala +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/BasicStepTactic.scala @@ -1,13 +1,12 @@ -package lisa.prooflib -import lisa.fol.FOLHelpers.* -import lisa.fol.FOL as F -import lisa.prooflib.ProofTacticLib.{_, given} -import lisa.prooflib.* +package lisa.utils.prooflib +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.ProofTacticLib.{_, given} +import lisa.utils.prooflib.* import lisa.utils.K import lisa.utils.KernelHelpers.{|- => `K|-`, _} -import lisa.utils.UserLisaException -import lisa.utils.parsing.FOLPrinter -import lisa.utils.unification.UnificationUtils +//import lisa.utils.UserLisaException +import lisa.utils.unification.UnificationUtils.* +import lisa.utils.collection.Extensions.* object BasicStepTactic { @@ -43,7 +42,7 @@ object BasicStepTactic { object RewriteTrue extends ProofTactic with ProofSequentTactic { def apply(using lib: Library, proof: lib.Proof)(bot: F.Sequent): proof.ProofTacticJudgement = { val botK = bot.underlying - if (!K.isSameSequent(botK, () `K|-` K.AtomicFormula(K.top, Nil))) + if (!K.isSameSequent(botK, () `K|-` K.top)) proof.InvalidProofTactic("The desired conclusion is not a trivial tautology.") else proof.ValidProofTactic(bot, Seq(K.RestateTrue(botK)), Seq()) @@ -58,7 +57,7 @@ object BasicStepTactic { *
* Γ, ∃y.∀x. (x=y) ⇔ φ |- Δ @@ -490,22 +489,14 @@ object BasicStepTactic { **/ object LeftExistsOne extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, x: F.Variable)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], x: F.Variable[F.T])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val xK = x.underlyingLabel + lazy val xK = x.underlying lazy val phiK = phi.underlying lazy val botK = bot.underlying - lazy val y = K.VariableLabel(lisa.utils.KernelHelpers.freshId(phiK.freeVariables.map(_.id), x.id)) - lazy val instantiated = K.BinderFormula( - K.Exists, - y, - K.BinderFormula( - K.Forall, - xK, - K.ConnectorFormula(K.Iff, List(K.AtomicFormula(K.equality, List(K.VariableTerm(xK), K.VariableTerm(y))), phiK)) - ) - ) - lazy val quantified = K.BinderFormula(K.ExistsOne, xK, phiK) + lazy val y = K.Variable(lisa.utils.KernelHelpers.freshId(phiK.freeVariables.map(_.id), x.id), K.Ind) + lazy val instantiated = K.exists(y, K.forall(xK, (xK === y) <=> phiK )) + lazy val quantified = K.ExistsOne(xK, phiK) if (!K.isSameSet(botK.right, premiseSequent.right)) proof.InvalidProofTactic("Right-hand side of conclusion is not the same as right-hand side of premise.") @@ -529,7 +520,7 @@ object BasicStepTactic { else if (instantiatedPivot.tail.isEmpty) { instantiatedPivot.head match { // ∃_. ∀x. _ ⇔ φ == extract ==> x, phi - case F.BinderFormula(F.Exists, _, F.BinderFormula(F.Forall, x, F.AppliedConnector(F.Iff, Seq(_, phi)))) => LeftExistsOne.withParameters(phi, x)(premise)(bot) + case F.exists(_, F.forall(x, F.AppliedConnector(F.Iff, Seq(_, phi)))) => LeftExistsOne.withParameters(phi, x)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer an existentially quantified pivot from premise and conclusion.") } } else @@ -544,6 +535,8 @@ object BasicStepTactic { } } + */ + // Right rules /** *
@@ -553,11 +546,11 @@ object BasicStepTactic { **/ object RightAnd extends ProofTactic { - def withParameters(using lib: Library, proof: lib.Proof)(conjuncts: F.Formula*)(premises: proof.Fact*)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(conjuncts: F.Expr[F.Prop]*)(premises: proof.Fact*)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequents = premises.map(proof.getSequent(_).underlying) lazy val botK = bot.underlying lazy val conjunctsK = conjuncts.map(_.underlying) - lazy val conjunction = K.ConnectorFormula(K.And, conjunctsK) + lazy val conjunction = K.multiand(conjunctsK) if (premises.length == 0) proof.InvalidProofTactic(s"Premises expected, ${premises.length} received.") @@ -601,12 +594,12 @@ object BasicStepTactic { * */ object RightOr extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, psi: F.Formula)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], psi: F.Expr[F.Prop])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying lazy val phiK = phi.underlying lazy val psiK = psi.underlying lazy val botK = bot.underlying - lazy val phiAndPsi = K.ConnectorFormula(K.Or, Seq(phiK, psiK)) + lazy val phiAndPsi = phiK \/ psiK if (!K.isSameSet(botK.left, premiseSequent.left)) proof.InvalidProofTactic("Left-hand side of the premise is not the same as the left-hand side of the conclusion.") @@ -625,7 +618,7 @@ object BasicStepTactic { if (!pivot.isEmpty && pivot.tail.isEmpty) pivot.head match { - case F.AppliedConnector(F.Or, Seq(phi, psi)) => + case F.App(F.App(F.or, phi), psi) => if (premiseSequent.left.contains(phi)) RightOr.withParameters(phi, psi)(premise)(bot) else @@ -649,12 +642,12 @@ object BasicStepTactic { * */ object RightImplies extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, psi: F.Formula)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], psi: F.Expr[F.Prop])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying lazy val phiK = phi.underlying lazy val psiK = psi.underlying lazy val botK = bot.underlying - lazy val implication = K.ConnectorFormula(K.Implies, Seq(phiK, psiK)) + lazy val implication = phiK ==> psiK if (!K.isSameSet(botK.left + phiK, premiseSequent.left)) proof.InvalidProofTactic("Left-hand side of conclusion + φ is not the same as left-hand side of premise.") @@ -687,31 +680,31 @@ object BasicStepTactic { * */ object RightIff extends ProofTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, psi: F.Formula)(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], psi: F.Expr[F.Prop])(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val leftSequent = proof.getSequent(prem1).underlying lazy val rightSequent = proof.getSequent(prem2).underlying lazy val phiK = phi.underlying lazy val psiK = psi.underlying lazy val botK = bot.underlying - lazy val implication = K.ConnectorFormula(K.Iff, Seq(phiK, psiK)) - lazy val impLeft = K.ConnectorFormula(K.Implies, Seq(phiK, psiK)) - lazy val impRight = K.ConnectorFormula(K.Implies, Seq(psiK, phiK)) + lazy val implication = phiK <=> psiK + lazy val impLeft = phiK ==> psiK + lazy val impRight = psiK ==> phiK if (!K.isSameSet(botK.left, leftSequent.left union rightSequent.left)) proof.InvalidProofTactic("Left-hand side of conclusion is not the union of the left-hand sides of the premises.") else if (!K.isSubset(leftSequent.right, botK.right + impLeft)) proof.InvalidProofTactic( - "Conclusion is missing the following formulas from the left premise: " + (leftSequent.right -- botK.right).map(f => s"[${FOLPrinter.prettyFormula(f)}]").reduce(_ ++ ", " ++ _) + "Conclusion is missing the following formulas from the left premise: " + (leftSequent.right -- botK.right).map(f => s"[${f.repr}]").reduce(_ ++ ", " ++ _) ) else if (!K.isSubset(rightSequent.right, botK.right + impRight)) proof.InvalidProofTactic( - "Conclusion is missing the following formulas from the right premise: " + (rightSequent.right -- botK.right).map(f => s"[${FOLPrinter.prettyFormula(f)}]").reduce(_ ++ ", " ++ _) + "Conclusion is missing the following formulas from the right premise: " + (rightSequent.right -- botK.right).map(f => s"[${f.repr}]").reduce(_ ++ ", " ++ _) ) else if (!K.isSubset(botK.right, leftSequent.right union rightSequent.right + implication)) proof.InvalidProofTactic( "Conclusion has extraneous formulas apart from premises and implication: " ++ (botK.right .removedAll(leftSequent.right union rightSequent.right + implication)) - .map(f => s"[${FOLPrinter.prettyFormula(f)}]") + .map(f => s"[${f.repr}]") .reduce(_ ++ ", " ++ _) ) else @@ -729,7 +722,7 @@ object BasicStepTactic { proof.InvalidProofTactic("Left-hand side of conclusion is not a superset of the premises.") else if (pivot.tail.isEmpty) pivot.head match { - case F.AppliedConnector(F.Implies, Seq(phi, psi)) => RightIff.withParameters(phi, psi)(prem1, prem2)(bot) + case F.App(F.App(F.implies, phi), psi) => RightIff.withParameters(phi, psi)(prem1, prem2)(bot) case _ => proof.InvalidProofTactic("Could not infer an implication as pivot from premise and conclusion.") } else @@ -745,11 +738,11 @@ object BasicStepTactic { * */ object RightNot extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying lazy val phiK = phi.underlying lazy val botK = bot.underlying - lazy val negation = K.ConnectorFormula(K.Neg, Seq(phiK)) + lazy val negation = !phiK if (!K.isSameSet(botK.left + phiK, premiseSequent.left)) proof.InvalidProofTactic("Left-hand side of conclusion + φ is not the same as left-hand side of premise.") @@ -784,12 +777,12 @@ object BasicStepTactic { * */ object RightForall extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, x: F.Variable)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], x: F.Variable[F.Ind])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val xK = x.underlyingLabel + lazy val xK = x.underlying lazy val phiK = phi.underlying lazy val botK = bot.underlying - lazy val quantified = K.BinderFormula(K.Forall, xK, phiK) + lazy val quantified = K.forall(xK, phiK) if ((botK.left union botK.right).exists(_.freeVariables.contains(xK))) proof.InvalidProofTactic("The variable x is free in the resulting sequent.") @@ -813,22 +806,22 @@ object BasicStepTactic { else proof.InvalidProofTactic("Could not infer a pivot from the premise and conclusion.") else if (instantiatedPivot.tail.isEmpty) { - val in: F.Formula = instantiatedPivot.head - val quantifiedPhi: Option[F.Formula] = bot.right.find(f => + val in: F.Expr[F.Prop] = instantiatedPivot.head + val quantifiedPhi: Option[F.Expr[F.Prop]] = bot.right.find(f => f match { - case F.BinderFormula(F.Forall, _, g) => F.isSame(g, in) + case F.forall(_, g) => F.isSame(g, in) case _ => false } ) quantifiedPhi match { - case Some(F.BinderFormula(F.Forall, x, phi)) => RightForall.withParameters(phi, x)(premise)(bot) + case Some(F.forall(x, phi)) => RightForall.withParameters(phi, x)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer a universally quantified pivot from premise and conclusion.") } } else proof.InvalidProofTactic("Right-hand side of conclusion + φ is not the same as right-hand side of premise + ∃x. φ.") else if (pivot.tail.isEmpty) pivot.head match { - case F.BinderFormula(F.Forall, x, phi) => RightForall.withParameters(phi, x)(premise)(bot) + case F.forall(x, phi) => RightForall.withParameters(phi, x)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer a universally quantified pivot from premise and conclusion.") } else @@ -846,17 +839,14 @@ object BasicStepTactic { * */ object RightExists extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, x: F.Variable, t: F.Term | K.Term)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], x: F.Variable[F.Ind], t: F.Expr[F.Ind])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val xK = x.underlyingLabel - lazy val tK = t match { - case t: F.Term => t.underlying - case t: K.Term => t - } + lazy val xK = x.underlying + lazy val tK = t.underlying lazy val phiK = phi.underlying lazy val botK = bot.underlying - lazy val quantified = K.BinderFormula(K.Exists, xK, phiK) - lazy val instantiated = K.substituteVariablesInFormula(phiK, Map(xK -> tK), Seq()) + lazy val quantified = K.exists(xK, phiK) + lazy val instantiated = K.substituteVariables(phiK, Map(xK -> tK)) if (!K.isSameSet(botK.left, premiseSequent.left)) proof.InvalidProofTactic("Left-hand side of conclusion is not the same as left-hand side of premise") @@ -866,7 +856,7 @@ object BasicStepTactic { proof.ValidProofTactic(bot, Seq(K.RightExists(botK, -1, phiK, xK, tK)), Seq(premise)) } - def withParameters(using lib: Library, proof: lib.Proof)(t: F.Term)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(t: F.Expr[F.Ind])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise) lazy val pivot = bot.right.diff(premiseSequent.right) lazy val instantiatedPivot = premiseSequent.right.diff(bot.right) @@ -874,7 +864,7 @@ object BasicStepTactic { if (!pivot.isEmpty) if (pivot.tail.isEmpty) pivot.head match { - case F.BinderFormula(F.Exists, x, phi) => RightExists.withParameters(phi, x, t)(premise)(bot) + case F.exists(x, phi) => RightExists.withParameters(phi, x, t)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer an existentially quantified pivot from premise and conclusion.") } else @@ -887,16 +877,16 @@ object BasicStepTactic { else if (instantiatedPivot.tail.isEmpty) { // go through conclusion to find a matching quantified formula - val in: F.Formula = instantiatedPivot.head - val quantifiedPhi: Option[F.Formula] = bot.right.find(f => + val in: F.Expr[F.Prop] = instantiatedPivot.head + val quantifiedPhi: Option[F.Expr[F.Prop]] = bot.right.find(f => f match { - case g @ F.BinderFormula(F.Exists, _, _) => F.isSame(F.instantiateBinder(g, t), in) + case g @ F.exists(v, e) => F.isSame(e.substitute(v := t), in) case _ => false } ) quantifiedPhi match { - case Some(F.BinderFormula(F.Exists, x, phi)) => RightExists.withParameters(phi, x, t)(premise)(bot) + case Some(F.exists(x, phi)) => RightExists.withParameters(phi, x, t)(premise)(bot) case _ => proof.InvalidProofTactic("Could not match discovered quantified pivot with premise.") } } else proof.InvalidProofTactic("Right-hand side of conclusion + φ[t/x] is not the same as right-hand side of premise + ∃x. φ.") @@ -916,25 +906,27 @@ object BasicStepTactic { else if (instantiatedPivot.tail.isEmpty) { // go through conclusion to find a matching quantified formula - val in: F.Formula = instantiatedPivot.head + val in: F.Expr[F.Prop] = instantiatedPivot.head - val quantifiedPhi: Option[F.Formula] = pivot.find(f => + val quantifiedPhi: Option[(F.Expr[F.Prop], Substitution)] = pivot.collectFirstDefined(f => f match { - case g @ F.BinderFormula(F.Exists, x, phi) => - UnificationUtils.matchFormula(in, phi, takenTermVariables = (phi.freeVariables - x)).isDefined - case _ => false + case g @ F.exists(x, phi) => + val ctx = RewriteContext.withBound(phi.freeVars - x) + matchExpr(using ctx)(phi, in).map(f -> _) + case _ => None } ) quantifiedPhi match { - case Some(F.BinderFormula(F.Exists, x, phi)) => - RightExists.withParameters(phi, x, UnificationUtils.matchFormula(in, phi, takenTermVariables = (phi.freeVariables - x)).get._2.getOrElse(x, x))(premise)(bot) + case Some((F.exists(x, phi), subst)) => + RightExists.withParameters(phi, x, subst(x).getOrElse(x))(premise)(bot) case _ => proof.InvalidProofTactic("Could not match discovered quantified pivot with premise.") } } else proof.InvalidProofTactic("Right-hand side of conclusion + φ[t/x] is not the same as right-hand side of premise + ∃x. φ.") } } + /* /** *
* Γ |- ∃y.∀x. (x=y) ⇔ φ, Δ @@ -943,19 +935,19 @@ object BasicStepTactic { **/ object RightExistsOne extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Formula, x: F.Variable)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], x: F.Variable)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val xK = x.underlyingLabel + lazy val xK = x.underlying lazy val phiK = phi.underlying lazy val botK = bot.underlying - lazy val y = K.VariableLabel(lisa.utils.KernelHelpers.freshId(phiK.freeVariables.map(_.id), x.id)) + lazy val y = K.Variable(lisa.utils.KernelHelpers.freshId(phiK.freeVariables.map(_.id), x.id)) lazy val instantiated = K.BinderFormula( K.Exists, y, K.BinderFormula( K.Forall, xK, - K.ConnectorFormula(K.Iff, List(K.AtomicFormula(K.equality, List(K.VariableTerm(xK), K.VariableTerm(y))), phiK)) + K.ConnectorFormula(K.Iff, Seq(K.AtomicFormula(K.equality, Seq(K.VariableTerm(xK), K.VariableTerm(y))), phiK)) ) ) lazy val quantified = K.BinderFormula(K.ExistsOne, xK, phiK) @@ -982,7 +974,7 @@ object BasicStepTactic { else if (instantiatedPivot.tail.isEmpty) { instantiatedPivot.head match { // ∃_. ∀x. _ ⇔ φ == extract ==> x, phi - case F.BinderFormula(F.Exists, _, F.BinderFormula(F.Forall, x, F.AppliedConnector(F.Iff, Seq(_, phi)))) => + case F.exists(_, F.forall(x, F.AppliedConnector(F.Iff, Seq(_, phi)))) => RightExistsOne.withParameters(phi, x)(premise)(bot) case _ => proof.InvalidProofTactic("Could not infer an existentially quantified pivot from premise and conclusion.") } @@ -998,6 +990,89 @@ object BasicStepTactic { } } + */ + + /** + *
+ * Γ |- φ[t/x], Δ + * -------------------------- + * Γ|- φ[(εx. φ)/x], Δ + *+ * + * Note that if Δ contains φ[(εx. φ)/x] as well, the parameter inference will + * fail. In that case, use [[RightEpsilon.withParameters]] instead. + */ + object RightEpsilon extends ProofTactic with ProofFactSequentTactic { + def collectEpsilons(in: F.Expr[?]): Set[F.Expr[F.Ind]] = + in.collect { case e @ F.ε(_, _) => e.asInstanceOf[F.Expr[F.Ind]] }.toSet + + def withParameters(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], x: F.Variable[F.Ind], t: F.Expr[F.Ind])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + lazy val premiseSequent = proof.getSequent(premise).underlying + lazy val xK = x.underlying + lazy val tK = t.underlying + lazy val phiK = phi.underlying + lazy val botK = bot.underlying + lazy val epsilonTerm = K.epsilon(xK, phiK) + lazy val instantiated = K.substituteVariables(phiK, Map(xK -> tK)) + lazy val bound = K.substituteVariables(phiK, Map(xK -> epsilonTerm)) + + if (!K.isSameSet(botK.left, premiseSequent.left)) + proof.InvalidProofTactic("Left-hand side of conclusion is not the same as left-hand side of premise.") + else if (!K.isSameSet(botK.right + instantiated, premiseSequent.right + bound)) + proof.InvalidProofTactic("Right-hand side of conclusion + φ[t/x] is not the same as right-hand side of premise + φ[(εx. φ)/x].") + else + proof.ValidProofTactic(bot, Seq(K.RightEpsilon(botK, -1, phiK, xK, tK)), Seq(premise)) + } + def apply(using lib: Library, proof: lib.Proof)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = + val premiseSequent = proof.getSequent(premise) + val pivotSet = premiseSequent.right -- bot.right + val targetSet = bot.right -- premiseSequent.right + + inline def theFailure = + proof.InvalidProofTactic("Could not infer an epsilon pivot from premise and conclusion.") + + if pivotSet.size == 0 || targetSet.size == 0 then + theFailure + else if pivotSet.size == 1 && targetSet.size == 1 then + val pivot = pivotSet.head + val target = targetSet.head + + // the new binding is one of these + val epsilons = collectEpsilons(target) + + val newBindingOption = epsilons.collectFirstDefined: + case eps @ F.ε(x, phi) => + val substituted = phi.substitute(x := eps) + if F.isSame(substituted, target) then Some(eps) else None + case _ => None + + newBindingOption match + case Some(F.ε(x, phi)) => + // match pivot with phi to discover t + val pivotMatch = matchExpr(using RewriteContext.withBound(phi.freeVars - x))(phi, pivot) + pivotMatch match + case Some(subst) if subst.contains(x) => + val t = subst(x).get + RightEpsilon.withParameters(phi, x, t)(premise)(bot) + case _ => theFailure + case _ => theFailure + else + theFailure + } + + object Beta extends ProofTactic with ProofFactSequentTactic { + + def apply(using lib: Library, proof: lib.Proof)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + val botK = bot.underlying + val red1 = K.sequentToFormula(botK).betaNormalForm + val red2 = K.sequentToFormula(proof.getSequent(premise).underlying).betaNormalForm + if (!K.isSame(red1,red2)) + proof.InvalidProofTactic("The conclusion is not beta-OL-equivalent to the premise.") + else + proof.ValidProofTactic(bot, Seq(K.Beta(botK, -1)), Seq(premise)) + } + } + // Structural rules /** *
@@ -1026,7 +1101,7 @@ object BasicStepTactic { **/ object LeftRefl extends ProofTactic with ProofFactSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(fa: F.Formula)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(fa: F.Expr[F.Prop])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying lazy val faK = fa.underlying lazy val botK = bot.underlying @@ -1037,8 +1112,8 @@ object BasicStepTactic { proof.InvalidProofTactic("Right-hand side of the premise is not the same as the right-hand side of the conclusion.") else faK match { - case K.AtomicFormula(K.equality, Seq(left, right)) => - if (K.isSameTerm(left, right)) + case K.Application(K.Application(K.equality, left), right) => + if (K.isSame(left, right)) proof.ValidProofTactic(bot, Seq(K.LeftRefl(botK, -1, faK)), Seq(premise)) else proof.InvalidProofTactic("φ is not an instance of reflexivity.") @@ -1065,15 +1140,15 @@ object BasicStepTactic { * */ object RightRefl extends ProofTactic with ProofSequentTactic { - def withParameters(using lib: Library, proof: lib.Proof)(fa: F.Formula)(bot: F.Sequent): proof.ProofTacticJudgement = { + def withParameters(using lib: Library, proof: lib.Proof)(fa: F.Expr[F.Prop])(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val faK = fa.underlying lazy val botK = bot.underlying if (!botK.right.exists(_ == faK)) proof.InvalidProofTactic("Right-hand side of conclusion does not contain φ.") else faK match { - case K.AtomicFormula(K.equality, Seq(left, right)) => - if (K.isSameTerm(left, right)) + case K.Application(K.Application(K.equality, left), right) => + if (K.isSame(left, right)) proof.ValidProofTactic(bot, Seq(K.RightRefl(botK, faK)), Seq()) else proof.InvalidProofTactic("φ is not an instance of reflexivity.") @@ -1085,10 +1160,10 @@ object BasicStepTactic { if (bot.right.isEmpty) proof.InvalidProofTactic("Right-hand side of conclusion does not contain an instance of reflexivity.") else { // go through conclusion to see if you can find an reflexive formula - val pivot: Option[F.Formula] = bot.right.find(f => - val Eq = F.equality // (F.equality: (F.|->[F.**[F.Term, 2], F.Formula])) + val pivot: Option[F.Expr[F.Prop]] = bot.right.find(f => + val Eq = F.equality // (F.equality: (F.|->[F.**[F.Expr[F.Ind], 2], F.Expr[F.Prop]])) f match { - case F.AppliedPredicate(e, Seq(l, r)) => + case F.App(F.App(e, l), r) => (F.equality) == (e) && l == r // termequality case _ => false } @@ -1106,212 +1181,123 @@ object BasicStepTactic { /** *
- * Γ, φ(s1,...,sn) |- Δ - * ---------------------------------------- - * Γ, s1=t1, ..., sn=tn, φ(t1,...tn) |- Δ + * Γ, φ(s) |- Δ Σ |- s=t, Π + * -------------------------------- + * Γ, Σ φ(t) |- Δ, Π **/ object LeftSubstEq extends ProofTactic { - + @deprecated("Use withParameters instead", "0.9") def withParametersSimple(using lib: Library, proof: lib.Proof)( - equals: List[(F.Term, F.Term)], - lambdaPhi: F.LambdaExpression[F.Term, F.Formula, ?] + equals: Seq[(F.Expr[F.Ind], F.Expr[F.Ind])], + lambdaPhi: (Seq[F.Variable[?]], F.Expr[F.Prop]) )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { - withParameters(equals.map { case (a, b) => (F.lambda(Seq(), a), F.lambda(Seq(), b)) }, (lambdaPhi.bounds.asInstanceOf[Seq[F.SchematicTermLabel[?]]], lambdaPhi.body))(premise)(bot) + withParameters(equals, lambdaPhi)(premise)(bot) } def withParameters(using lib: Library, proof: lib.Proof)( - equals: List[(F.LambdaExpression[F.Term, F.Term, ?], F.LambdaExpression[F.Term, F.Term, ?])], - lambdaPhi: (Seq[F.SchematicTermLabel[?]], F.Formula) + equals: Seq[(F.Expr[?], F.Expr[?])], + lambdaPhi: (Seq[F.Variable[?]], F.Expr[F.Prop]) )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying lazy val botK = bot.underlying - lazy val equalsK = equals.map(p => (p._1.underlyingLTT, p._2.underlyingLTT)) - lazy val lambdaPhiK = (lambdaPhi._1.map(_.underlyingLabel), lambdaPhi._2.underlying) + lazy val equalsK = equals.map(p => (p._1.underlying, p._2.underlying)) + lazy val lambdaPhiK = (lambdaPhi._1.map(_.underlying), lambdaPhi._2.underlying) val (s_es, t_es) = equalsK.unzip val (phi_args, phi_body) = lambdaPhiK - if (phi_args.size != s_es.size) - return proof.InvalidProofTactic("The number of arguments of φ must be the same as the number of equalities.") - else if (equalsK.zip(phi_args).exists { case ((s, t), arg) => s.vars.size != arg.arity || t.vars.size != arg.arity }) - return proof.InvalidProofTactic("The arities of symbols in φ must be the same as the arities of equalities.") - - val phi_s = K.instantiateTermSchemas(phi_body, (phi_args zip s_es).toMap) - val phi_t = K.instantiateTermSchemas(phi_body, (phi_args zip t_es).toMap) - val sEqT_es = equalsK map { case (s, t) => - assert(s.vars.size == t.vars.size) - val base = K.AtomicFormula(K.equality, Seq(s.body, if (s.vars == t.vars) t.body else t(s.vars.map(K.VariableTerm)))) - (s.vars).foldLeft(base: K.Formula) { case (acc, s_arg) => K.BinderFormula(K.Forall, s_arg, acc) } - } + if (phi_args.size != s_es.size) // Not strictly necessary, but it's a good sanity check. To reactivate when tactics have been modified. + proof.InvalidProofTactic("The number of arguments of φ must be the same as the number of equalities.") + else if (equals.zip(phi_args).exists { case ((s, t), arg) => s.sort != arg.sort || t.sort != arg.sort }) + proof.InvalidProofTactic("The arities of symbols in φ must be the same as the arities of equalities.") + else { + val phi_s_for_f = K.substituteVariables(phi_body, (phi_args zip s_es).toMap) + val phi_t_for_f = K.substituteVariables(phi_body, (phi_args zip t_es).toMap) + val sEqT_es = equalsK map { + case (s, t) => + val no = (s.freeVariables ++ t.freeVariables).view.map(_.id.no).max+1 + val vars = (no until no+s.sort.depth).map(i => K.Variable(K.Identifier("x", i), K.Ind)) + val inner1 = vars.foldLeft(s)(_(_)) + val inner2 = vars.foldLeft(t)(_(_)) + val base = if (inner1.sort == K.Prop) K.iff(inner1)(inner2) else K.equality(inner1)(inner2) + vars.foldLeft(base : K.Expression) { case (acc, s_arg) => K.forall(s_arg, acc) } + } - if (!K.isSameSet(botK.right, premiseSequent.right)) - proof.InvalidProofTactic("Right-hand side of the premise is not the same as the right-hand side of the conclusion.") - else if ( - !K.isSameSet(botK.left + phi_s, premiseSequent.left ++ sEqT_es + phi_t) && - !K.isSameSet(botK.left + phi_t, premiseSequent.left ++ sEqT_es + phi_s) - ) - proof.InvalidProofTactic("Left-hand side of the conclusion + φ(s_) is not the same as left-hand side of the premise + (s=t)_ + φ(t_) (or with s_ and t_ swapped).") - else - proof.ValidProofTactic(bot, Seq(K.LeftSubstEq(botK, -1, equalsK, lambdaPhiK)), Seq(premise)) + if (K.isSameSet(botK.right, premiseSequent.right)) then + if ( + K.isSameSet(botK.left + phi_t_for_f, premiseSequent.left ++ sEqT_es + phi_s_for_f) || + K.isSameSet(botK.left + phi_s_for_f, premiseSequent.left ++ sEqT_es + phi_t_for_f) + ) + proof.ValidProofTactic(bot, Seq(K.LeftSubstEq(botK, -1, equalsK, lambdaPhiK)), Seq(premise)) + else + proof.InvalidProofTactic("Left-hand sides of the conclusion + φ(s_) must be the same as left-hand side of the premise + (s=t)_ + φ(t_) (or with s_ and t_ swapped).") + else proof.InvalidProofTactic("Right-hand sides of the premise and the conclusion aren't the same.") + } } } + /** *
- * Γ |- φ(s1,...,sn), Δ - * ---------------------------------------- - * Γ, s1=t1, ..., sn=tn |- φ(t1,...tn), Δ + * Γ |- φ(s), Δ Σ |- s=t, Π + * --------------------------------- + * Γ, Σ |- φ(t), Δ, Π **/ object RightSubstEq extends ProofTactic { + @deprecated("Use withParameters instead", "0.9") def withParametersSimple(using lib: Library, proof: lib.Proof)( - equals: List[(F.Term, F.Term)], - lambdaPhi: F.LambdaExpression[F.Term, F.Formula, ?] + equals: Seq[(F.Expr[F.Ind], F.Expr[F.Ind])], + lambdaPhi: (Seq[F.Variable[?]], F.Expr[F.Prop]) )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { - withParameters(equals.map { case (a, b) => (F.lambda(Seq(), a), F.lambda(Seq(), b)) }, (lambdaPhi.bounds.asInstanceOf[Seq[F.SchematicTermLabel[?]]], lambdaPhi.body))(premise)(bot) + withParameters(equals, lambdaPhi)(premise)(bot) } def withParameters(using lib: Library, proof: lib.Proof)( - equals: List[(F.LambdaExpression[F.Term, F.Term, ?], F.LambdaExpression[F.Term, F.Term, ?])], - lambdaPhi: (Seq[F.SchematicTermLabel[?]], F.Formula) + equals: Seq[(F.Expr[?], F.Expr[?])], + lambdaPhi: (Seq[F.Variable[?]], F.Expr[F.Prop]) )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { lazy val premiseSequent = proof.getSequent(premise).underlying lazy val botK = bot.underlying - lazy val equalsK = equals.map(p => (p._1.underlyingLTT, p._2.underlyingLTT)) - lazy val lambdaPhiK = (lambdaPhi._1.map(_.underlyingLabel), lambdaPhi._2.underlying) + lazy val equalsK = equals.map(p => (p._1.underlying, p._2.underlying)) + lazy val lambdaPhiK = (lambdaPhi._1.map(_.underlying), lambdaPhi._2.underlying) val (s_es, t_es) = equalsK.unzip val (phi_args, phi_body) = lambdaPhiK - if (phi_args.size != s_es.size) - return proof.InvalidProofTactic("The number of arguments of φ must be the same as the number of equalities.") - else if (equalsK.zip(phi_args).exists { case ((s, t), arg) => s.vars.size != arg.arity || t.vars.size != arg.arity }) - return proof.InvalidProofTactic("The arities of symbols in φ must be the same as the arities of equalities.") - - val phi_s = K.instantiateTermSchemas(phi_body, (phi_args zip s_es).toMap) - val phi_t = K.instantiateTermSchemas(phi_body, (phi_args zip t_es).toMap) - val sEqT_es = equalsK map { case (s, t) => - assert(s.vars.size == t.vars.size) - val base = K.AtomicFormula(K.equality, Seq(s.body, if (s.vars == t.vars) t.body else t(s.vars.map(K.VariableTerm)))) - (s.vars).foldLeft(base: K.Formula) { case (acc, s_arg) => K.BinderFormula(K.Forall, s_arg, acc) } - } - - if (!K.isSameSet(botK.left, premiseSequent.left ++ sEqT_es)) - proof.InvalidProofTactic("Left-hand side of the conclusion is not the same as the left-hand side of the premise + (s=t)_.") - else if ( - !K.isSameSet(botK.right + phi_s, premiseSequent.right + phi_t) && - !K.isSameSet(botK.right + phi_t, premiseSequent.right + phi_s) - ) - proof.InvalidProofTactic("Right-hand side of the conclusion + φ(s_) is not the same as right-hand side of the premise + φ(t_) (or with s_ and t_ swapped).") - else - proof.ValidProofTactic(bot, Seq(K.RightSubstEq(botK, -1, equalsK, lambdaPhiK)), Seq(premise)) - - } - - } - - /** - *
- * Γ, φ(a1,...an) |- Δ - * ---------------------------------------- - * Γ, a1⇔b1, ..., an⇔bn, φ(b1,...bn) |- Δ - *- */ - object LeftSubstIff extends ProofTactic { - def withParametersSimple(using lib: Library, proof: lib.Proof)( - equals: List[(F.Formula, F.Formula)], - lambdaPhi: F.LambdaExpression[F.Formula, F.Formula, ?] - )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { - withParameters(equals.map { case (a, b) => (F.lambda(Seq(), a), F.lambda(Seq(), b)) }, (lambdaPhi.bounds.asInstanceOf[Seq[F.SchematicAtomicLabel[?]]], lambdaPhi.body))(premise)(bot) - } - - def withParameters(using lib: Library, proof: lib.Proof)( - equals: List[(F.LambdaExpression[F.Term, F.Formula, ?], F.LambdaExpression[F.Term, F.Formula, ?])], - lambdaPhi: (Seq[F.SchematicAtomicLabel[?]], F.Formula) - )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { - - lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val botK = bot.underlying - lazy val equalsK = equals.map(p => (p._1.underlyingLTF, p._2.underlyingLTF)) - lazy val lambdaPhiK = (lambdaPhi._1.map(_.underlyingLabel), lambdaPhi._2.underlying) + if (phi_args.size != s_es.size) // Not strictly necessary, but it's a good sanity check. To reactivate when tactics have been modified. + proof.InvalidProofTactic("The number of arguments of φ must be the same as the number of equalities.") + else if (equals.zip(phi_args).exists { case ((s, t), arg) => s.sort != arg.sort || t.sort != arg.sort }) + proof.InvalidProofTactic("The arities of symbols in φ must be the same as the arities of equalities.") + else { + val phi_s_for_f = K.substituteVariables(phi_body, (phi_args zip s_es).toMap) + val phi_t_for_f = K.substituteVariables(phi_body, (phi_args zip t_es).toMap) + val sEqT_es = equalsK map { + case (s, t) => + val no = (s.freeVariables ++ t.freeVariables).view.map(_.id.no).max+1 + val vars = (no until no+s.sort.depth).map(i => K.Variable(K.Identifier("x", i), K.Ind)) + val inner1 = vars.foldLeft(s)(_(_)) + val inner2 = vars.foldLeft(t)(_(_)) + val base = if (inner1.sort == K.Prop) K.iff(inner1)(inner2) else K.equality(inner1)(inner2) + vars.foldLeft(base : K.Expression) { case (acc, s_arg) => K.forall(s_arg, acc) } + } - val (psi_s, tau_s) = equalsK.unzip - val (phi_args, phi_body) = lambdaPhiK - if (phi_args.size != psi_s.size) - return proof.InvalidProofTactic("The number of arguments of φ must be the same as the number of equalities.") - else if (equalsK.zip(phi_args).exists { case ((s, t), arg) => s.vars.size != arg.arity || t.vars.size != arg.arity }) - return proof.InvalidProofTactic("The arities of symbols in φ must be the same as the arities of equalities.") - - val phi_psi = K.instantiatePredicateSchemas(phi_body, (phi_args zip psi_s).toMap) - val phi_tau = K.instantiatePredicateSchemas(phi_body, (phi_args zip tau_s).toMap) - val psiIffTau = equalsK map { case (s, t) => - assert(s.vars.size == t.vars.size) - val base = K.ConnectorFormula(K.Iff, Seq(s.body, if (s.vars == t.vars) t.body else t(s.vars.map(K.VariableTerm)))) - (s.vars).foldLeft(base: K.Formula) { case (acc, s_arg) => K.BinderFormula(K.Forall, s_arg, acc) } + if (K.isSameSet(botK.left, premiseSequent.left ++ sEqT_es)) + if ( + K.isSameSet(botK.right + phi_t_for_f, premiseSequent.right + phi_s_for_f) || + K.isSameSet(botK.right + phi_s_for_f, premiseSequent.right + phi_t_for_f) + ) + proof.ValidProofTactic(bot, Seq(K.RightSubstEq(botK, -1, equalsK, lambdaPhiK)), Seq(premise)) + else + proof.InvalidProofTactic("Right-hand side of the premise and the conclusion should be the same with each containing one of φ(s_) φ(t_), but it isn't the case.") + else proof.InvalidProofTactic("Left-hand sides of the premise + (s=t)_ must be the same as left-hand side of the premise.") } - - if (!K.isSameSet(botK.right, premiseSequent.right)) - proof.InvalidProofTactic("Right-hand side of the premise is not the same as the right-hand side of the conclusion.") - else if ( - !K.isSameSet(botK.left + phi_psi, premiseSequent.left ++ psiIffTau + phi_tau) && - !K.isSameSet(botK.left + phi_tau, premiseSequent.left ++ psiIffTau + phi_psi) - ) - proof.InvalidProofTactic("Left-hand side of the conclusion + φ(ψ_) is not the same as left-hand side of the premise + (ψ ⇔ τ)_ + φ(τ_) (or with ψ_ and τ_ swapped).") - else - proof.ValidProofTactic(bot, Seq(K.LeftSubstIff(botK, -1, equalsK, lambdaPhiK)), Seq(premise)) - } - } - - /** - *
- * Γ |- φ(a1,...an), Δ - * ---------------------------------------- - * Γ, a1⇔b1, ..., an⇔bn |- φ(b1,...bn), Δ - *- */ - object RightSubstIff extends ProofTactic { - def withParametersSimple(using lib: Library, proof: lib.Proof)( - equals: List[(F.Formula, F.Formula)], - lambdaPhi: F.LambdaExpression[F.Formula, F.Formula, ?] - )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { - withParameters(equals.map { case (a, b) => (F.lambda(Seq(), a), F.lambda(Seq(), b)) }, (lambdaPhi.bounds.asInstanceOf[Seq[F.SchematicAtomicLabel[?]]], lambdaPhi.body))(premise)(bot) - } - - def withParameters(using lib: Library, proof: lib.Proof)( - equals: List[(F.LambdaExpression[F.Term, F.Formula, ?], F.LambdaExpression[F.Term, F.Formula, ?])], - lambdaPhi: (Seq[F.SchematicAtomicLabel[?]], F.Formula) - )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { - - lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val botK = bot.underlying - lazy val equalsK = equals.map(p => (p._1.underlyingLTF, p._2.underlyingLTF)) - lazy val lambdaPhiK = (lambdaPhi._1.map(_.underlyingLabel), lambdaPhi._2.underlying) - - val (psi_s, tau_s) = equalsK.unzip - val (phi_args, phi_body) = lambdaPhiK - if (phi_args.size != psi_s.size) - return proof.InvalidProofTactic("The number of arguments of φ must be the same as the number of equalities.") - else if (equalsK.zip(phi_args).exists { case ((s, t), arg) => s.vars.size != arg.arity || t.vars.size != arg.arity }) - return proof.InvalidProofTactic("The arities of symbols in φ must be the same as the arities of equalities.") - - val phi_psi = K.instantiatePredicateSchemas(phi_body, (phi_args zip psi_s).toMap) - val phi_tau = K.instantiatePredicateSchemas(phi_body, (phi_args zip tau_s).toMap) - val psiIffTau = equalsK map { case (s, t) => - assert(s.vars.size == t.vars.size) - val base = K.ConnectorFormula(K.Iff, Seq(s.body, if (s.vars == t.vars) t.body else t(s.vars.map(K.VariableTerm)))) - (s.vars).foldLeft(base: K.Formula) { case (acc, s_arg) => K.BinderFormula(K.Forall, s_arg, acc) } - } - - if (!K.isSameSet(botK.left, premiseSequent.left ++ psiIffTau)) { - proof.InvalidProofTactic("Left-hand side of the conclusion is not the same as the left-hand side of the premise + (ψ ⇔ τ)_.") - } else if ( - !K.isSameSet(botK.right + phi_psi, premiseSequent.right + phi_tau) && - !K.isSameSet(botK.right + phi_tau, premiseSequent.right + phi_psi) - ) - proof.InvalidProofTactic("Right-hand side of the conclusion + φ(ψ_) is not the same as right-hand side of the premise + φ(τ_) (or with ψ_ and τ_ swapped).") - else - proof.ValidProofTactic(bot, Seq(K.RightSubstIff(botK, -1, equalsK, lambdaPhiK)), Seq(premise)) } } + + @deprecated("Use LeftSubstEq instead", "0.9") + val LeftSubstIff = LeftSubstEq + @deprecated("Use RightSubstEq instead", "0.9") + val RightSubstIff = RightSubstEq /** *
@@ -1320,87 +1306,20 @@ object BasicStepTactic { * Γ[r(a)/?f] |- Δ[r(a)/?f] **/ - object InstFunSchema extends ProofTactic { - def apply(using lib: Library, proof: lib.Proof)( - insts: Map[F.SchematicFunctionLabel[?] | F.Variable, F.LambdaExpression[F.Term, F.Term, ?]] - )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { - lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val botK = bot.underlying - val instsK = insts.map((sl, le) => - sl match { - case v: F.Variable => (v.underlyingLabel, F.underlyingLTT(le)) - case sfl: F.SchematicFunctionLabel[?] => (sfl.underlyingLabel, F.underlyingLTT(le)) - } - ) + object InstSchema extends ProofTactic { + def unsafe(using lib: Library, proof: lib.Proof)(map: Map[F.Variable[?], F.Expr[?]])(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = + val premiseSequent = proof.getSequent(premise).underlying + val mapK = map.map((v, e) => (v.underlying, e.underlying)) + val botK = K.substituteVariablesInSequent(premiseSequent, mapK) + val res = proof.getSequent(premise).substituteUnsafe(map) + proof.ValidProofTactic(res, Seq(K.InstSchema(botK, -1, mapK)), Seq(premise)) - if (!K.isSameSet(botK.left, premiseSequent.left.map(K.instantiateTermSchemas(_, instsK)))) - proof.InvalidProofTactic("Left-hand side of premise instantiated with the map 'insts' is not the same as left-hand side of conclusion.") - else if (!K.isSameSet(botK.right, premiseSequent.right.map(K.instantiateTermSchemas(_, instsK)))) - proof.InvalidProofTactic("Right-hand side of premise instantiated with the map 'insts' is not the same as right-hand side of conclusion.") - else - proof.ValidProofTactic(bot, Seq(K.InstSchema(botK, -1, Map.empty, Map.empty, instsK)), Seq(premise)) - } - } + def apply(using lib: Library, proof: lib.Proof)(subst: F.SubstPair*)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = + val map = subst.map(p => (p._1, p._2)).toMap + unsafe(using lib, proof)(map)(premise)(bot) - /** - *
- * Γ |- Δ - * -------------------------- - * Γ[ψ(a)/?p] |- Δ[ψ(a)/?p] - *- */ - object InstPredSchema extends ProofTactic { - def apply(using lib: Library, proof: lib.Proof)( - insts: Map[F.SchematicPredicateLabel[?] | F.VariableFormula, F.LambdaExpression[F.Term, F.Formula, ?]] - )(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { - lazy val premiseSequent = proof.getSequent(premise).underlying - lazy val botK = bot.underlying - val instsK = insts.map((sl, le) => - sl match { - case v: F.VariableFormula => (v.underlyingLabel, F.underlyingLTF(le)) - case sfl: F.SchematicPredicateLabel[?] => (sfl.underlyingLabel, F.underlyingLTF(le)) - } - ) + - if (!K.isSameSet(botK.left, premiseSequent.left.map(K.instantiatePredicateSchemas(_, instsK)))) - proof.InvalidProofTactic("Left-hand side of premise instantiated with the map 'insts' is not the same as left-hand side of conclusion.") - else if (!K.isSameSet(botK.right, premiseSequent.right.map(K.instantiatePredicateSchemas(_, instsK)))) - proof.InvalidProofTactic("Right-hand side of premise instantiated with the map 'insts' is not the same as right-hand side of conclusion.") - else - proof.ValidProofTactic(bot, Seq(K.InstSchema(botK, -1, Map.empty, instsK, Map.empty)), Seq(premise)) - } - } - - object InstSchema extends ProofTactic { - def apply(using - lib: Library, - proof: lib.Proof - )( - mCon: Map[F.SchematicConnectorLabel[?], F.LambdaExpression[F.Formula, F.Formula, ?]], - mPred: Map[F.SchematicPredicateLabel[?] | F.VariableFormula, F.LambdaExpression[F.Term, F.Formula, ?]], - mTerm: Map[F.SchematicFunctionLabel[?] | F.Variable, F.LambdaExpression[F.Term, F.Term, ?]] - )( - premise: proof.Fact - ): proof.ProofTacticJudgement = { - val premiseSequent = proof.getSequent(premise).underlying - val mConK = mCon.map((sl, le) => (sl.underlyingLabel, F.underlyingLFF(le))) - val mPredK = mPred.map((sl, le) => - sl match { - case v: F.VariableFormula => (v.underlyingLabel, F.underlyingLTF(le)) - case spl: F.SchematicPredicateLabel[?] => (spl.underlyingLabel, F.underlyingLTF(le)) - } - ) - val mTermK = mTerm.map((sl, le) => - sl match { - case v: F.Variable => (v.underlyingLabel, F.underlyingLTT(le)) - case sfl: F.SchematicFunctionLabel[?] => (sfl.underlyingLabel, F.underlyingLTT(le)) - } - ) - val botK = instantiateSchemaInSequent(premiseSequent, mConK, mPredK, mTermK) - val smap = Map[F.SchematicLabel[?], F.LisaObject[?]]() ++ mCon ++ mPred ++ mTerm - val res = proof.getSequent(premise).substituteUnsafe(smap) - proof.ValidProofTactic(res, Seq(K.InstSchema(botK, -1, mConK, mPredK, mTermK)), Seq(premise)) - } } object Subproof extends ProofTactic { def apply(using proof: Library#Proof)(statement: Option[F.Sequent])(iProof: proof.InnerProof) = { @@ -1414,7 +1333,7 @@ object BasicStepTactic { proof.ValidProofTactic(iProof.mostRecentStep.bot, scproof.steps, premises) else if (!K.isSameSequent(botK.get, scproof.conclusion)) proof.InvalidProofTactic( - s"The subproof does not prove the desired conclusion.\n\tExpected: ${FOLPrinter.prettySequent(botK.get)}\n\tObtained: ${FOLPrinter.prettySequent(scproof.conclusion)}" + s"The subproof does not prove the desired conclusion.\n\tExpected: ${botK.get.repr}\n\tObtained: ${scproof.conclusion.repr}" ) else proof.ValidProofTactic(bot.get, scproof.steps :+ K.Restate(botK.get, scproof.length - 1), premises) @@ -1422,7 +1341,6 @@ object BasicStepTactic { judgement } } - class SUBPROOF(using val proof: Library#Proof)(statement: Option[F.Sequent])(val iProof: proof.InnerProof) extends ProofTactic { val bot: Option[F.Sequent] = statement val botK: Option[K.Sequent] = statement map (_.underlying) @@ -1435,7 +1353,7 @@ object BasicStepTactic { if (botK.isEmpty) proof.ValidProofTactic(iProof.mostRecentStep.bot, scproof.steps, premises) else if (!K.isSameSequent(botK.get, scproof.conclusion)) - proof.InvalidProofTactic(s"The subproof does not prove the desired conclusion.\n\tExpected: ${FOLPrinter.prettySequent(botK.get)}\n\tObtained: ${FOLPrinter.prettySequent(scproof.conclusion)}") + proof.InvalidProofTactic(s"The subproof does not prove the desired conclusion.\n\tExpected: ${botK.get.repr}\n\tObtained: ${scproof.conclusion.repr}") else proof.ValidProofTactic(bot.get, scproof.steps :+ K.Restate(botK.get, scproof.length - 1), premises) } @@ -1454,4 +1372,5 @@ object BasicStepTactic { } } + } diff --git a/lisa-utils/src/main/scala/lisa/utils/prooflib/Exports.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/Exports.scala new file mode 100644 index 000000000..235f4c312 --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/Exports.scala @@ -0,0 +1,6 @@ +package lisa.utils.prooflib + +object Exports { + export BasicStepTactic.* + export lisa.utils.prooflib.SimpleDeducedSteps.* +} diff --git a/lisa-utils/src/main/scala/lisa/prooflib/Library.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/Library.scala similarity index 51% rename from lisa-utils/src/main/scala/lisa/prooflib/Library.scala rename to lisa-utils/src/main/scala/lisa/utils/prooflib/Library.scala index 9d64e2784..4234e54cd 100644 --- a/lisa-utils/src/main/scala/lisa/prooflib/Library.scala +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/Library.scala @@ -1,10 +1,10 @@ -package lisa.prooflib +package lisa.utils.prooflib import lisa.kernel.proof.RunningTheory import lisa.kernel.proof.SCProofChecker import lisa.kernel.proof.SCProofCheckerJudgement import lisa.kernel.proof.SequentCalculus -import lisa.prooflib.ProofTacticLib.ProofTactic +//import lisa.utils.prooflib.ProofTacticLib.ProofTactic import lisa.utils.KernelHelpers.{_, given} import lisa.utils.{_, given} @@ -15,7 +15,7 @@ import scala.collection.mutable.Stack as stack * to write and use Theorems and Definitions. * @param theory The inner RunningTheory */ -abstract class Library extends lisa.prooflib.WithTheorems with lisa.prooflib.ProofsHelpers { +abstract class Library extends lisa.utils.prooflib.WithTheorems with lisa.utils.prooflib.ProofsHelpers { val theory: RunningTheory given library: this.type = this @@ -25,7 +25,7 @@ abstract class Library extends lisa.prooflib.WithTheorems with lisa.prooflib.Pro val K = lisa.utils.K val SC: SequentCalculus.type = K.SC - private[prooflib] val F = lisa.fol.FOL + private[prooflib] val F = lisa.utils.fol.FOL import F.{given} var last: Option[JUSTIFICATION] = None @@ -42,23 +42,18 @@ abstract class Library extends lisa.prooflib.WithTheorems with lisa.prooflib.Pro else _draft = Some(file) def isDraft = _draft.nonEmpty - val knownDefs: scala.collection.mutable.Map[F.ConstantLabel[?], Option[JUSTIFICATION]] = scala.collection.mutable.Map.empty - val shortDefs: scala.collection.mutable.Map[F.ConstantLabel[?], Option[JUSTIFICATION]] = scala.collection.mutable.Map.empty + val knownDefs: scala.collection.mutable.Map[F.Constant[?], Option[JUSTIFICATION]] = scala.collection.mutable.Map.empty + val shortDefs: scala.collection.mutable.Map[F.Constant[?], Option[JUSTIFICATION]] = scala.collection.mutable.Map.empty - def addSymbol(s: F.ConstantFunctionLabel[?] | F.ConstantPredicateLabel[?] | F.Constant): Unit = { - s match { - case s: F.ConstantFunctionLabel[?] => theory.addSymbol(s.underlyingLabel) - case s: F.ConstantPredicateLabel[?] => theory.addSymbol(s.underlyingLabel) - case s: F.Constant => theory.addSymbol(s.underlyingLabel) - } + def addSymbol(s: F.Constant[?]): Unit = + theory.addSymbol(s.underlying) knownDefs.update(s, None) - } - def getDefinition(label: F.ConstantLabel[?]): Option[JUSTIFICATION] = knownDefs.get(label) match { + def getDefinition(label: F.Constant[?]): Option[JUSTIFICATION] = knownDefs.get(label) match { case None => throw new UserLisaException.UndefinedSymbolException("Unknown symbol", label, this) case Some(value) => value } - def getShortDefinition(label: F.ConstantLabel[?]): Option[JUSTIFICATION] = shortDefs.get(label) match { + def getShortDefinition(label: F.Constant[?]): Option[JUSTIFICATION] = shortDefs.get(label) match { case None => throw new UserLisaException.UndefinedSymbolException("Unknown symbol", label, this) case Some(value) => value } @@ -71,38 +66,12 @@ abstract class Library extends lisa.prooflib.WithTheorems with lisa.prooflib.Pro // DEFINITION Syntax - /** - * Allows to create a definition by shortcut of a function symbol: - */ - def makeSimpleFunctionDefinition(symbol: String, expression: K.LambdaTermTerm): K.Judgement[theory.FunctionDefinition] = { - import K.* - val LambdaTermTerm(vars, body) = expression - - val out: VariableLabel = VariableLabel(freshId((vars.map(_.id) ++ body.schematicTermLabels.map(_.id)).toSet, "y")) - val proof: SCProof = simpleFunctionDefinition(expression, out) - theory.functionDefinition(symbol, LambdaTermFormula(vars, out === body), out, proof, out === body, Nil) - } - /** * Allows to create a definition by shortcut of a predicate symbol: */ - def makeSimplePredicateDefinition(symbol: String, expression: K.LambdaTermFormula): K.Judgement[theory.PredicateDefinition] = - theory.predicateDefinition(symbol, expression) - - private def simpleFunctionDefinition(expression: K.LambdaTermTerm, out: K.VariableLabel): K.SCProof = { - import K.{*, given} - val x = out - val LambdaTermTerm(vars, body) = expression - val xeb = x === body - val y = VariableLabel(freshId(body.freeVariables.map(_.id) ++ vars.map(_.id) + out.id, "y")) - val s0 = SC.RightRefl(() |- body === body, body === body) - val s1 = SC.Restate(() |- (xeb) <=> (xeb), 0) - val s2 = SC.RightForall(() |- forall(x, (xeb) <=> (xeb)), 1, (xeb) <=> (xeb), x) - val s3 = SC.RightExists(() |- exists(y, forall(x, (x === y) <=> (xeb))), 2, forall(x, (x === y) <=> (xeb)), y, body) - val s4 = SC.Restate(() |- existsOne(x, xeb), 3) - val v = Vector(s0, s1, s2, s3, s4) - K.SCProof(v) - } + def makeSimpleDefinition(symbol: String, expression: K.Expression): K.Judgement[theory.Definition] = + theory.definition(symbol, expression) + /** * Prints a short representation of the given theorem or definition diff --git a/lisa-utils/src/main/scala/lisa/prooflib/OutputManager.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/OutputManager.scala similarity index 92% rename from lisa-utils/src/main/scala/lisa/prooflib/OutputManager.scala rename to lisa-utils/src/main/scala/lisa/utils/prooflib/OutputManager.scala index ce3f84f8b..eafb97a78 100644 --- a/lisa-utils/src/main/scala/lisa/prooflib/OutputManager.scala +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/OutputManager.scala @@ -1,4 +1,4 @@ -package lisa.prooflib +package lisa.utils.prooflib import lisa.utils.KernelHelpers.{_, given} import lisa.utils.{_, given} @@ -25,7 +25,7 @@ abstract class OutputManager { case e: LisaException.InvalidKernelJustificationComputation => e.proof match { - case Some(value) => output(lisa.utils.ProofPrinter.prettyProof(value)) + case Some(value) => output(lisa.utils.prooflib.ProofPrinter.prettyProof(value)) case None => () } output(e.underlying.repr) diff --git a/lisa-utils/src/main/scala/lisa/utils/parsing/ProofPrinter.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/ProofPrinter.scala similarity index 95% rename from lisa-utils/src/main/scala/lisa/utils/parsing/ProofPrinter.scala rename to lisa-utils/src/main/scala/lisa/utils/prooflib/ProofPrinter.scala index ea2fe2ffd..a1daed3b9 100644 --- a/lisa-utils/src/main/scala/lisa/utils/parsing/ProofPrinter.scala +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/ProofPrinter.scala @@ -1,12 +1,11 @@ -package lisa.utils.parsing +package lisa.utils.prooflib import lisa.kernel.proof.SCProofCheckerJudgement -import lisa.prooflib.BasicStepTactic.SUBPROOF -import lisa.prooflib.Library -import lisa.prooflib.* +import lisa.utils.prooflib.BasicStepTactic.SUBPROOF +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.* import lisa.utils.* -//temporary - get merged wit regular printer in time object ProofPrinter { private def spaceSeparator(compact: Boolean): String = if (compact) "" else " " @@ -126,6 +125,4 @@ object ProofPrinter { def prettySimpleProof(proof: Library#Proof, error: Option[(IndexedSeq[Int], String)]): String = prettyProofLines(proof, error).mkString("\n") def prettySimpleProof(proof: Library#Proof, indent: Int, error: Option[(IndexedSeq[Int], String)]): String = prettyProofLines(proof, None).mkString("\n" + " " * indent) - // def prettyProof(judgement: InvalidProofTactic): String = prettyProof(judgement.tactic.proof) - } diff --git a/lisa-utils/src/main/scala/lisa/prooflib/ProofTacticLib.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/ProofTacticLib.scala similarity index 91% rename from lisa-utils/src/main/scala/lisa/prooflib/ProofTacticLib.scala rename to lisa-utils/src/main/scala/lisa/utils/prooflib/ProofTacticLib.scala index fb108de47..cdbfa25bf 100644 --- a/lisa-utils/src/main/scala/lisa/prooflib/ProofTacticLib.scala +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/ProofTacticLib.scala @@ -1,14 +1,15 @@ -package lisa.prooflib +package lisa.utils.prooflib -import lisa.fol.FOL as F -import lisa.prooflib.* +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.* import lisa.utils.K -import lisa.utils.Printer import lisa.utils.UserLisaException +import lisa.utils.prooflib.ProofPrinter object ProofTacticLib { type Arity = Int & Singleton + /** * A ProofTactic is an object that relies on a step of premises and which can be translated into pure Sequent Calculus. */ @@ -46,7 +47,7 @@ object ProofTacticLib { val textline = source.getLines().drop(line.value - 1).next().dropWhile(c => c.isWhitespace) source.close() Console.RED + proof.owningTheorem.prettyGoal + Console.RESET + "\n" + - lisa.utils.ProofPrinter.prettyProof(proof, 2) + "\n" + + ProofPrinter.prettyProof(proof, 2) + "\n" + " " * (1 + proof.depth) + Console.RED + textline + Console.RESET + "\n\n" + s" Proof tactic ${tactic.name} used in.(${file.value.split("/").last.split("\\\\").last}:${line.value}) did not succeed:\n" + " " + errorMessage @@ -60,7 +61,7 @@ object ProofTacticLib { extends lisa.utils.LisaException(errorMessage) { def showError: String = "A proof tactic used in another proof tactic returned an unexpected error. This may indicate an implementation error in either of the two tactics.\n" + "Status of the proof at time of the error is:" + - lisa.utils.ProofPrinter.prettyProof(failure.proof) + ProofPrinter.prettyProof(failure.proof) } } diff --git a/lisa-utils/src/main/scala/lisa/utils/prooflib/ProofsHelpers.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/ProofsHelpers.scala new file mode 100644 index 000000000..6140b87ab --- /dev/null +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/ProofsHelpers.scala @@ -0,0 +1,356 @@ +package lisa.utils.prooflib + +import lisa.kernel.proof.SCProofChecker.checkSCProof +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.ProofTacticLib.* +import lisa.utils.prooflib.SimpleDeducedSteps.* +import lisa.utils.prooflib.* +import lisa.utils.KernelHelpers.{_, given} +import lisa.utils.K.Identifier +import lisa.utils.LisaException +import lisa.utils.UserLisaException +import lisa.utils.{_, given} + +import scala.annotation.targetName + +trait ProofsHelpers { + library: Library & WithTheorems => + + import lisa.utils.fol.FOL.{given, *} + + class HaveSequent(val bot: Sequent) { + + inline infix def by(using proof: library.Proof, line: sourcecode.Line, file: sourcecode.File): By { val _proof: proof.type } = By(proof, line, file).asInstanceOf + + class By(val _proof: library.Proof, line: sourcecode.Line, file: sourcecode.File) { + + val bot = HaveSequent.this.bot ++ (F.iterable_to_set(_proof.getAssumptions) |- ()) + inline infix def apply(tactic: Sequent => _proof.ProofTacticJudgement): _proof.ProofStep & _proof.Fact = { + tactic(bot).validate(line, file) + } + inline infix def apply(tactic: ProofSequentTactic): _proof.ProofStep = { + tactic(using library, _proof)(bot).validate(line, file) + } + } + + infix def subproof(using proof: Library#Proof, line: sourcecode.Line, file: sourcecode.File)(computeProof: proof.InnerProof ?=> Unit): proof.ProofStep = { + val botWithAssumptions = HaveSequent.this.bot ++ (proof.getAssumptions |- ()) + val iProof: proof.InnerProof = new proof.InnerProof(Some(botWithAssumptions)) + computeProof(using iProof) + (new BasicStepTactic.SUBPROOF(using proof)(Some(botWithAssumptions))(iProof)).judgement.validate(line, file).asInstanceOf[proof.ProofStep] + } + + } + + class AndThenSequent private[ProofsHelpers] (val bot: Sequent) { + + inline infix def by(using proof: library.Proof, line: sourcecode.Line, file: sourcecode.File): By { val _proof: proof.type } = + By(proof, line, file).asInstanceOf[By { val _proof: proof.type }] + + class By(val _proof: library.Proof, line: sourcecode.Line, file: sourcecode.File) { + private val bot = AndThenSequent.this.bot ++ (_proof.getAssumptions |- ()) + inline infix def apply(tactic: _proof.Fact => Sequent => _proof.ProofTacticJudgement): _proof.ProofStep = { + tactic(_proof.mostRecentStep)(bot).validate(line, file) + } + + inline infix def apply(tactic: ProofFactSequentTactic): _proof.ProofStep = { + tactic(using library, _proof)(_proof.mostRecentStep)(bot).validate(line, file) + } + + } + } + + /** + * Claim the given Sequent as a ProofTactic, which may require a justification by a proof tactic and premises. + */ + def have(using proof: library.Proof)(res: Sequent): HaveSequent = HaveSequent(res) + + def have(using line: sourcecode.Line, file: sourcecode.File)(using proof: library.Proof)(v: proof.Fact | proof.ProofTacticJudgement) = v match { + case judg: proof.ProofTacticJudgement => judg.validate(line, file) + case fact: proof.Fact @unchecked => HaveSequent(proof.sequentOfFact(fact)).by(using proof, line, file)(Rewrite(using library, proof)(fact)) + } + + /** + * Claim the given Sequent as a ProofTactic directly following the previously proven tactic, + * which may require a justification by a proof tactic. + */ + def thenHave(using proof: library.Proof)(res: Sequent): AndThenSequent = AndThenSequent(res) + + infix def andThen(using proof: library.Proof, line: sourcecode.Line, file: sourcecode.File): AndThen { val _proof: proof.type } = AndThen(proof, line, file).asInstanceOf + + class AndThen private[ProofsHelpers] (val _proof: library.Proof, line: sourcecode.Line, file: sourcecode.File) { + inline infix def apply(tactic: _proof.Fact => _proof.ProofTacticJudgement): _proof.ProofStep = { + tactic(_proof.mostRecentStep).validate(line, file) + } + inline infix def apply(tactic: ProofFactTactic): _proof.ProofStep = { + tactic(using library, _proof)(_proof.mostRecentStep).validate(line, file) + } + } + + + /* + /** + * Assume the given formula in all future left hand-side of claimed sequents. + */ + def assume(using proof: library.Proof)(f: Expr[Prop]): proof.ProofStep = { + proof.addAssumption(f) + have(() |- f) by BasicStepTactic.Hypothesis + } + */ + /** + * Assume the given formulas in all future left hand-side of claimed sequents. + */ + def assume(using proof: library.Proof)(fs: Expr[Prop]*): proof.ProofStep = { + fs.foreach(f => proof.addAssumption(f)) + have(() |- fs.toSet) by BasicStepTactic.Hypothesis + } + + def thesis(using proof: library.Proof): Sequent = proof.possibleGoal.get + def goal(using proof: library.Proof): Sequent = proof.possibleGoal.get + + def lastStep(using proof: library.Proof): proof.ProofStep = proof.mostRecentStep + + def sorry(using proof: library.Proof): proof.ProofStep = have(thesis) by Sorry + + def showCurrentProof(using om: OutputManager, _proof: library.Proof)(): Unit = { + om.output("Current proof of " + _proof.owningTheorem.prettyGoal + ": ") + om.output( + ProofPrinter.prettyProof(_proof, 2) + ) + } + + extension (using proof: library.Proof)(fact: proof.Fact) { + infix def of(insts: (F.SubstPair | F.Expr[F.Ind])*): proof.InstantiatedFact = { + proof.InstantiatedFact(fact, insts) + } + def statement: F.Sequent = proof.sequentOfFact(fact) + } + + def currentProof(using p: library.Proof): Library#Proof = p + + + //////////////////////////////////////// + // DSL for definitions and theorems // + //////////////////////////////////////// + + class UserInvalidDefinitionException(val symbol: String, errorMessage: String)(using line: sourcecode.Line, file: sourcecode.File) extends UserLisaException(errorMessage) { // TODO refine + val showError: String = { + val source = scala.io.Source.fromFile(file.value) + val textline = source.getLines().drop(line.value - 1).next().dropWhile(c => c.isWhitespace) + source.close() + s" Definition of $symbol at.(${file.value.split("/").last.split("\\\\").last}:${line.value}) is invalid:\n" + + " " + Console.RED + textline + Console.RESET + "\n\n" + + " " + errorMessage + } + } + + + def leadingVarsAndBody(e: Expr[?]): (Seq[Variable[?]], Expr[?]) = + def inner(e: Expr[?]): (Seq[Variable[?]], Expr[?]) = e match + case Abs(v, body) => + val (vars, bodyR) = inner(body) + (v +: vars, bodyR) + case _ => (Seq(), e) + val r = inner(e) + (r._1, r._2) + + def DEF[S: Sort](using name: sourcecode.FullName)(using om: OutputManager, line: sourcecode.Line, file: sourcecode.File) + (e: Expr[S]): Constant[S] = + val (vars, body) = leadingVarsAndBody(e) + if vars.size == e.sort.depth then + DirectDefinition[S](name.value, line.value, file.value)(e, vars).cst + else + val maxV: Int = vars.maxBy(_.id.no).id.no + val maxB: Int = body.freeVars.maxBy(_.id.no).id.no + var no = List(maxV, maxB).max + val newvars = K.flatTypeParameters(body.sort).map(i =>{no+=1; Variable.unsafe(K.Identifier("x", no), i)}) + val totvars = vars ++ newvars + DirectDefinition[S](name.value, line.value, file.value)(e, totvars)(using F.unsafeSortEvidence(e.sort)).cst + + def EpsilonDEF[S: Sort](using om: OutputManager, name: sourcecode.FullName, line: sourcecode.Line, file: sourcecode.File) + (e: Expr[S], j: JUSTIFICATION): Constant[S] = + val (vars, body) = leadingVarsAndBody(e) + if vars.size == e.sort.depth then + body match + case epsilon(x, inner) => + EpsilonDefinition[S](name.value, line.value, file.value)(e, vars, j).cst + case _ => om.lisaThrow(UserInvalidDefinitionException(name.value, "The given expression is not an epsilon term.")) + else om.lisaThrow(UserInvalidDefinitionException(name.value, "The given expression is not an epsilon term.")) + + + + class DirectDefinition[S : Sort](using om: OutputManager)(val fullName: String, line: Int, file: String)(val expr: Expr[S], val vars: Seq[Variable[?]]) extends DEFINITION(line, file) { + + val arity = vars.size + + lazy val cst: Constant[S] = F.Constant(name) + + + val appliedCst: Expr[?] = cst#@@(vars) + + + val innerJustification: theory.Definition = { + import lisa.utils.K.{findUndefinedSymbols} + val uexpr = expr.underlying + val ucst = K.Constant(name, cst.sort) + val uvars = vars.map(_.underlying) + val judgement = theory.makeDefinition(ucst, uexpr, uvars) + judgement match { + case K.ValidJustification(just) => + just + case wrongJudgement: K.InvalidJustification[?] => + if (!theory.belongsToTheory(uexpr)) { + om.lisaThrow( + UserInvalidDefinitionException( + name, + s"All symbols in the definition must belong to the theory. The symbols ${theory.findUndefinedSymbols(uexpr)} are unknown and you need to define them first." + ) + ) + } + if !theory.isAvailable(ucst) then + om.lisaThrow(UserInvalidDefinitionException(name, s"The symbol ${name} has already been defined and can't be redefined.")) + if !uexpr.freeVariables.nonEmpty then + om.lisaThrow( + UserInvalidDefinitionException( + name, + s"The definition is not allowed to contain schematic symbols or free variables. " + + s"The variables {${(uexpr.freeVariables).mkString(", ")}} are free in the expression ${uexpr}." + ) + ) + if !theory.isAvailable(ucst) then + om.lisaThrow(UserInvalidDefinitionException(name, s"The symbol ${name} has already been defined and can't be redefined.")) + om.lisaThrow( + LisaException.InvalidKernelJustificationComputation( + "The final proof was rejected by LISA's logical kernel. This may be due to a faulty proof computation or an error in LISA.", + wrongJudgement, + None + ) + ) + } + } + val right = expr#@@(vars) + val statement = + if appliedCst.sort == K.Ind then + () |- (equality #@ appliedCst #@ right).asInstanceOf[Expr[Prop]] + else + () |- (iff #@ appliedCst #@ right).asInstanceOf[Expr[Prop]] + library.last = Some(this) + } + + def dropAllLambdas(s: Expr[?]): Expr[?] = s match { + case Abs(v, body) => dropAllLambdas(body) + case _ => s + } + + + /** + * For a list of sequence of variables x, y, z, creates the term with lambdas: + * λx.(λy.(λz. base)) + */ + def abstractVars(v: Seq[Variable[?]], body: Expr[?]): Expr[?] = + def inner(v: Seq[Variable[?]], body: Expr[?]) = v match + case Seq() => body + case x +: xs => Abs.unsafe(x, abstractVars(xs, body)) + inner(v.reverse, body) + + /** + * For a list of sequence of variables x, y, z, creates the term with lambdas: + * λx.(λy.(λz. base)) + */ + def applyVars(v: Seq[Variable[?]], body: Expr[?]): Expr[?] = v match + case Seq() => body + case x +: xs => applyVars(xs, body#@(x)) + + /** + * For a list of sequence of variables x, y, z, creates the term with lambdas: + * ((((λx.(λy.(λz. base))) x) y) z) + */ + def betaExpand(base: Expr[?], vars: Seq[Variable[?]]): Expr[?] = + applyVars(vars, abstractVars(vars.reverse, base)) + + + + /** + * Allows to make definitions "by unique existance" of a symbol. May need debugging + */ + class EpsilonDefinition[S: Sort](using om: OutputManager)(fullName: String, line: Int, file: String)( + expr: Expr[S], + vars: Seq[Variable[?]], + val j: JUSTIFICATION + ) extends DirectDefinition(fullName, line, file)(expr, vars) { + + val body: Expr[Ind] = dropAllLambdas(expr).asInstanceOf + override val appliedCst : Expr[Ind] = (cst#@@(vars)).asInstanceOf + val (epsilonVar, inner) = body match + case epsilon(x, inner) => (x, inner) + case _ => om.lisaThrow(UserInvalidDefinitionException(name, "The given expression is not an epsilon term.")) + + private val propCst = inner.substitute(epsilonVar := appliedCst) + private val propEpsilon = inner.substitute(epsilonVar := body) + val definingProp = Theorem(propCst) { + val fresh = freshId(vars, "x") + have(this) + + def loop(expr: Expr[?], leading: List[Variable[?]]) : Unit = + expr match { + case App(lam @ Abs(vAbs, body1: Expr[Ind]), _) => + val freshX = Variable.unsafe(fresh, body1.sort) + val right: Expr[Ind] = applyVars(leading.reverse, freshX).asInstanceOf[Expr[Ind]] + var instRight: Expr[Ind] = applyVars(leading.reverse, body1).asInstanceOf[Expr[Ind]] + thenHave(appliedCst === instRight) by Beta + case App(f, a: Variable[?]) => loop(expr, a :: leading) + case _ => throw new Exception("Unreachable") + } + while lastStep.bot.right.head match {case App(epsilon, _) => false; case _ => true} do + loop(lastStep.bot.right.head, List()) + val eqStep = lastStep // appliedCst === body + // j is exists(x, prop(x)) + val existsStep = ??? // have(propEpsilon) by // prop(body) + val s3 = have(appliedCst === body |- propCst) by RightSubstEq.withParameters(List((appliedCst, body)), (List(epsilonVar), inner))(lastStep) + val s4 = have(propCst) by Cut(s3, j) + ??? + } + + override def repr: String = + s" ${if (withSorry) " Sorry" else ""} Definition of symbol ${appliedCst} such that ${definingProp.statement})\n" + + } + + + + ///////////////////////// + // Local Definitions // + ///////////////////////// + + import lisa.utils.K.prettySCProof + import lisa.utils.KernelHelpers.apply + + /** + * A term with a definition, local to a proof. + * + * @param proof + * @param id + */ + abstract class LocalyDefinedVariable[S:Sort](val proof: library.Proof, id: Identifier) extends Variable(id) { + + val definition: proof.Fact + lazy val definingFormula = proof.sequentOfFact(definition).right.head + + // proof.addDefinition(this, defin(this), fact) + // val definition: proof.Fact = proof.getDefinition(this) + } + + /** + * Check correctness of the proof, using LISA's logical kernel, to the current point. + */ + def sanityProofCheck(using p: Proof)(message: String): Unit = { + val csc = p.toSCProof + if checkSCProof(csc).isValid then + println("Proof is valid. " + message) + Thread.sleep(100) + else + checkProof(csc) + throw Exception("Proof is not valid: " + message) + } + +} diff --git a/lisa-utils/src/main/scala/lisa/prooflib/SimpleDeducedSteps.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/SimpleDeducedSteps.scala similarity index 79% rename from lisa-utils/src/main/scala/lisa/prooflib/SimpleDeducedSteps.scala rename to lisa-utils/src/main/scala/lisa/utils/prooflib/SimpleDeducedSteps.scala index d9a2cd984..7c1c02e71 100644 --- a/lisa-utils/src/main/scala/lisa/prooflib/SimpleDeducedSteps.scala +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/SimpleDeducedSteps.scala @@ -1,37 +1,14 @@ -package lisa.prooflib - -import lisa.fol.FOLHelpers.* -import lisa.fol.FOL as F -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.ProofTacticLib.{_, given} -import lisa.prooflib.* -import lisa.utils.FOLParser +package lisa.utils.prooflib + +import lisa.utils.fol.FOL as F +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.ProofTacticLib.{_, given} +import lisa.utils.prooflib.* import lisa.utils.K import lisa.utils.KernelHelpers.{_, given} -import lisa.utils.Printer object SimpleDeducedSteps { - object simpleFunctionDefinition extends ProofTactic { - - def apply[N <: Arity](using lib: Library, proof: lib.Proof)(expression: F.LambdaExpression[F.Term, F.Term, N], out: F.Variable) = { - val scp = { - import K.{*, given} - val x = out.underlyingLabel - val LambdaTermTerm(vars, body) = expression.underlyingLTT - val xeb = x === body - val y = VariableLabel(freshId(body.freeVariables.map(_.id) ++ vars.map(_.id) + out.id, "y")) - val s0 = K.RightRefl(() |- body === body, body === body) - val s1 = K.Restate(() |- (xeb) <=> (xeb), 0) - val s2 = K.RightForall(() |- forall(x, (xeb) <=> (xeb)), 1, (xeb) <=> (xeb), x) - val s3 = K.RightExists(() |- exists(y, forall(x, (x === y) <=> (xeb))), 2, forall(x, (x === y) <=> (xeb)), y, body) - val s4 = K.Restate(() |- existsOne(x, xeb), 3) - Vector(s0, s1, s2, s3, s4) - } - proof.ValidProofTactic(F.Sequent(Set(), Set(F.ExistsOne(out, out === expression.body))), scp, Seq()) - } - - } object Restate extends ProofTactic with ProofSequentTactic with ProofFactSequentTactic { def apply(using lib: Library, proof: lib.Proof)(bot: F.Sequent): proof.ProofTacticJudgement = @@ -87,7 +64,7 @@ object SimpleDeducedSteps { * Returns a subproof containing the instantiation steps */ object InstantiateForall extends ProofTactic with ProofSequentTactic { - def apply(using lib: Library, proof: lib.Proof)(phi: F.Formula, t: F.Term*)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def apply(using lib: Library, proof: lib.Proof)(phi: F.Expr[F.Prop], t: F.Expr[F.Ind]*)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { val botK = bot.underlying val phiK = phi.underlying val tK = t map (_.underlying) @@ -106,14 +83,14 @@ object SimpleDeducedSteps { // by construction the premise is well-formed // verify the formula structure and instantiate f match { - case psi @ K.BinderFormula(K.Forall, x, _) => - val tempVar = K.VariableLabel(K.freshId(psi.freeVariables.map(_.id), x.id)) + case psi @ K.Forall(x, inner) => + val tempVar = K.Variable(K.freshId(psi.freeVariables.map(_.id), x.id), K.Ind) // instantiate the formula with input - val in = instantiateBinder(psi, t) + val in = K.substituteVariables(inner, Map(x -> t)) val con = p.conclusion ->> f +>> in // construct proof val p0 = K.Hypothesis(in |- in, in) - val p1 = K.LeftForall(f |- in, 0, instantiateBinder(psi, tempVar), tempVar, t) + val p1 = K.LeftForall(f |- in, 0, K.substituteVariables(inner, Map(x -> tempVar)) , tempVar, t) val p2 = K.Cut(con, -1, 1, f) /** @@ -144,13 +121,13 @@ object SimpleDeducedSteps { if (K.isImplyingSequent(res._1.conclusion, botK)) proof.ValidProofTactic(bot, Seq(K.SCSubproof(res._1.withNewSteps(IndexedSeq(K.Weakening(botK, res._1.length - 1))), Seq(-1))), Seq(premise)) else - proof.InvalidProofTactic(s"InstantiateForall proved \n\t${FOLParser.printSequent(res._1.conclusion)}\ninstead of input sequent\n\t${FOLParser.printSequent(botK)}") + proof.InvalidProofTactic(s"InstantiateForall proved \n\t${res._1.conclusion.repr}\ninstead of input sequent\n\t${botK.repr}") } } } } - def apply(using lib: Library, proof: lib.Proof)(t: F.Term*)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def apply(using lib: Library, proof: lib.Proof)(t: F.Expr[F.Ind]*)(premise: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { val prem = proof.getSequent(premise) if (prem.right.tail.isEmpty) { // well formed @@ -173,6 +150,10 @@ object SimpleDeducedSteps { } } + + + + /* /** * Performs a cut when the formula to be used as pivot for the cut is @@ -189,7 +170,7 @@ object SimpleDeducedSteps { * */ object PartialCut extends ProofTactic { - def apply(using lib: Library, proof: lib.Proof)(phi: K.Formula, conjunction: K.Formula)(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def apply(using lib: Library, proof: lib.Proof)(phi: K.Prop, conjunction: K.Prop)(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { val leftSequent = proof.getSequent(prem1) val rightSequent = proof.getSequent(prem2) @@ -198,14 +179,14 @@ object SimpleDeducedSteps { if (rightSequent.left.contains(phi)) { // check conjunction matches with phi conjunction match { - case K.ConnectorFormula(K.And, s: Seq[K.Formula]) => { + case K.ConnectorFormula(K.And, s: Seq[K.Prop]) => { if (s.contains(phi)) { // construct proof - val psi: Seq[K.Formula] = s.filterNot(_ == phi) - val newConclusions: Set[K.Formula] = rightSequent.right.map((f: K.Formula) => K.ConnectorFormula(K.And, f +: psi)) + val psi: Seq[K.Prop] = s.filterNot(_ == phi) + val newConclusions: Set[K.Prop] = rightSequent.right.map((f: K.Prop) => K.ConnectorFormula(K.And, f +: psi)) - val Sigma: Set[K.Formula] = rightSequent.left - phi + val Sigma: Set[K.Prop] = rightSequent.left - phi val p0 = K.Weakening(rightSequent ++<< (psi |- ()), -2) val p1 = K.RestateTrue(psi |- psi) @@ -257,7 +238,7 @@ object SimpleDeducedSteps { } object destructRightAnd extends ProofTactic { - def apply(using lib: Library, proof: lib.Proof)(a: K.Formula, b: K.Formula)(prem: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def apply(using lib: Library, proof: lib.Proof)(a: K.Prop, b: K.Prop)(prem: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { val conc = proof.getSequent(prem) val p0 = K.Hypothesis(emptySeq +<< a +>> a, a) val p1 = K.LeftAnd(emptySeq +<< (a /\ b) +>> a, 0, a, b) @@ -266,7 +247,7 @@ object SimpleDeducedSteps { } } object destructRightOr extends ProofTactic { - def apply(using lib: Library, proof: lib.Proof)(a: K.Formula, b: K.Formula)(prem: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def apply(using lib: Library, proof: lib.Proof)(a: K.Prop, b: K.Prop)(prem: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { val conc = proof.getSequent(prem) val mat = conc.right.find(f => K.isSame(f, a \/ b)) if (mat.nonEmpty) { @@ -285,18 +266,18 @@ object SimpleDeducedSteps { } object GeneralizeToForall extends ProofTactic { - def apply(using lib: Library, proof: lib.Proof)(phi: K.Formula, t: K.VariableLabel*)(prem: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def apply(using lib: Library, proof: lib.Proof)(phi: K.Prop, t: K.VariableLabel*)(prem: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { val sequent = proof.getSequent(prem) if (sequent.right.contains(phi)) { val emptyProof = SCProof(IndexedSeq(), IndexedSeq(sequent)) val j = proof.ValidProofTactic(IndexedSeq(K.Restate(sequent, proof.length - 1)), Seq[proof.Fact]()) - val res = t.foldRight(emptyProof: SCProof, phi: K.Formula, j: proof.ProofTacticJudgement) { case (x1, (p1: SCProof, phi1, j1)) => + val res = t.foldRight(emptyProof: SCProof, phi: K.Prop, j: proof.ProofTacticJudgement) { case (x1, (p1: SCProof, phi1, j1)) => j1 match { case proof.InvalidProofTactic(_) => (p1, phi1, j1) case proof.ValidProofTactic(_, _) => { if (!p1.conclusion.right.contains(phi1)) - (p1, phi1, proof.InvalidProofTactic("Formula is not present in the lass sequent")) + (p1, phi1, proof.InvalidProofTactic("Prop is not present in the lass sequent")) val proofStep = K.RightForall(p1.conclusion ->> phi1 +>> forall(x1, phi1), p1.length - 1, phi1, x1) ( @@ -329,7 +310,7 @@ object SimpleDeducedSteps { } object ByCase extends ProofTactic { - def apply(using lib: Library, proof: lib.Proof)(phi: K.Formula)(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { + def apply(using lib: Library, proof: lib.Proof)(phi: K.Prop)(prem1: proof.Fact, prem2: proof.Fact)(bot: F.Sequent): proof.ProofTacticJudgement = { val nphi = !phi val pa = proof.getSequent(prem1) diff --git a/lisa-utils/src/main/scala/lisa/prooflib/WithTheorems.scala b/lisa-utils/src/main/scala/lisa/utils/prooflib/WithTheorems.scala similarity index 91% rename from lisa-utils/src/main/scala/lisa/prooflib/WithTheorems.scala rename to lisa-utils/src/main/scala/lisa/utils/prooflib/WithTheorems.scala index 628ef7ffa..3d2f915c8 100644 --- a/lisa-utils/src/main/scala/lisa/prooflib/WithTheorems.scala +++ b/lisa-utils/src/main/scala/lisa/utils/prooflib/WithTheorems.scala @@ -1,15 +1,13 @@ -package lisa.prooflib +package lisa.utils.prooflib import lisa.kernel.proof.RunningTheory -import lisa.prooflib.ProofTacticLib.ProofTactic -import lisa.prooflib.ProofTacticLib.UnimplementedProof -import lisa.prooflib.* +import lisa.utils.prooflib.ProofTacticLib.ProofTactic +import lisa.utils.prooflib.ProofTacticLib.UnimplementedProof +import lisa.utils.prooflib.* import lisa.utils.KernelHelpers.{_, given} import lisa.utils.LisaException import lisa.utils.UserLisaException import lisa.utils.UserLisaException.* -import lisa.utils.parsing.FOLPrinter.prettySCProof -import lisa.utils.parsing.UnreachableException import scala.annotation.nowarn import scala.collection.mutable.Buffer as mBuf @@ -26,7 +24,8 @@ trait WithTheorems { * * @param assump list of starting assumptions, usually propagated from outer proofs. */ - sealed abstract class Proof(assump: List[F.Formula]) { + sealed abstract class Proof(assump: List[F.Expr[F.Prop]]) { + val possibleGoal: Option[F.Sequent] type SelfType = this.type type OutsideFact >: JUSTIFICATION @@ -40,13 +39,13 @@ trait WithTheorems { */ case class InstantiatedFact( fact: Fact, - insts: Seq[F.SubstPair | F.Term] + insts: Seq[F.SubstPair | F.Expr[F.Ind]] ) { val baseFormula: F.Sequent = sequentOfFact(fact) val (result, proof) = { - val (terms, substPairs) = insts.partitionMap { - case t: F.Term => Left(t) - case sp: F.SubstPair => Right(sp) + val (terms, substPairs) = insts.partitionMap {e => + if e.isInstanceOf[F.Expr[?]] then Left(e.asInstanceOf[F.Expr[F.Ind]]) + else Right(e.asInstanceOf[F.SubstPair]) } val (s1, p1) = if substPairs.isEmpty then (baseFormula, Seq()) else baseFormula.instantiateWithProof(substPairs.map(sp => (sp._1, sp._2)).toMap, -1) @@ -61,8 +60,8 @@ trait WithTheorems { private var steps: List[ProofStep] = Nil private var imports: List[(OutsideFact, F.Sequent)] = Nil private var instantiatedFacts: List[(InstantiatedFact, Int)] = Nil - private var assumptions: List[F.Formula] = assump - private var eliminations: List[(F.Formula, (Int, F.Sequent) => List[K.SCProofStep])] = Nil + private var assumptions: List[F.Expr[F.Prop]] = assump + private var eliminations: List[(F.Expr[F.Prop], (Int, F.Sequent) => List[K.SCProofStep])] = Nil def cleanAssumptions: Unit = assumptions = Nil @@ -127,11 +126,11 @@ trait WithTheorems { * * @param f */ - def addAssumption(f: F.Formula): Unit = { + def addAssumption(f: F.Expr[F.Prop]): Unit = { if (!assumptions.contains(f)) assumptions = f :: assumptions } - def addElimination(f: F.Formula, elim: (Int, F.Sequent) => List[K.SCProofStep]): Unit = { + def addElimination(f: F.Expr[F.Prop], elim: (Int, F.Sequent) => List[K.SCProofStep]): Unit = { eliminations = (f, elim) :: eliminations } @@ -147,17 +146,6 @@ trait WithTheorems { ) ) } - /* - def addDefinition(v: LocalyDefinedVariable, defin: F.Formula): Unit = { - if localdefs.contains(v) then - throw new UserInvalidDefinitionException("v", "Variable already defined with" + v.definition + " in current proof") - else { - localdefs(v) = defin - addAssumption(defin) - } - } - def getDefinition(v: LocalyDefinedVariable): Fact = localdefs(v)._2 - */ // Getters @@ -176,7 +164,7 @@ trait WithTheorems { /** * @return The list of formulas that are assumed for the reminder of the proof. */ - def getAssumptions: List[F.Formula] = assumptions + def getAssumptions: List[F.Expr[F.Prop]] = assumptions /** * Produce the low level [[K.SCProof]] corresponding to the proof. Automatically eliminates any formula in the discharges that is still left of the sequent. @@ -275,7 +263,7 @@ trait WithTheorems { /** * The set of symbols that can't be instantiated because they are free in an assumption. */ - def lockedSymbols: Set[F.SchematicLabel[?]] = assumptions.toSet.flatMap(f => f.freeSchematicLabels.toSet) + def lockedSymbols: Set[F.Variable[?]] = assumptions.toSet.flatMap(f => f.freeVars.toSet) /** * Used to "lift" the type of a justification when the compiler can't infer it. @@ -326,7 +314,7 @@ trait WithTheorems { this match { case vpt: ValidProofTactic => newProofStep(vpt) case ipt: InvalidProofTactic => - val e = lisa.prooflib.ProofTacticLib.UnapplicableProofTactic(ipt.tactic, ipt.proof, ipt.message)(using line, file) + val e = lisa.utils.prooflib.ProofTacticLib.UnapplicableProofTactic(ipt.tactic, ipt.proof, ipt.message)(using line, file) e.setStackTrace(ipt.stack) throw e } @@ -336,7 +324,7 @@ trait WithTheorems { /** * A Kernel Sequent Calculus proof step that has been correctly produced. */ - case class ValidProofTactic(bot: lisa.fol.FOL.Sequent, scps: Seq[K.SCProofStep], imports: Seq[Fact])(using val tactic: ProofTactic) extends ProofTacticJudgement {} + case class ValidProofTactic(bot: lisa.utils.fol.FOL.Sequent, scps: Seq[K.SCProofStep], imports: Seq[Fact])(using val tactic: ProofTactic) extends ProofTacticJudgement {} /** * A proof step which led to an error when computing the corresponding K.Sequent Calculus proof step. @@ -345,12 +333,14 @@ trait WithTheorems { private val nstack = Throwable() val stack: Array[StackTraceElement] = nstack.getStackTrace.drop(2) } + } /** * Top-level instance of [[Proof]] directly proving a theorem */ sealed class BaseProof(val owningTheorem: THMFromProof) extends Proof(Nil) { + val goal: F.Sequent = owningTheorem.goal val possibleGoal: Option[F.Sequent] = Some(goal) type OutsideFact = JUSTIFICATION @@ -359,6 +349,7 @@ trait WithTheorems { override def sequentOfOutsideFact(j: JUSTIFICATION): F.Sequent = j.statement def justifications: List[JUSTIFICATION] = getImports.map(_._1) + } /** @@ -401,8 +392,7 @@ trait WithTheorems { */ def withSorry: Boolean = innerJustification match { case thm: theory.Theorem => thm.withSorry - case fd: theory.FunctionDefinition => fd.withSorry - case pd: theory.PredicateDefinition => false + case d: theory.Definition => false case ax: theory.Axiom => false } } @@ -410,7 +400,7 @@ trait WithTheorems { /** * A Justification, corresponding to [[K.Axiom]] */ - class AXIOM(innerAxiom: theory.Axiom, val axiom: F.Formula, val fullName: String) extends JUSTIFICATION { + class AXIOM(innerAxiom: theory.Axiom, val axiom: F.Expr[F.Prop], val fullName: String) extends JUSTIFICATION { def innerJustification: theory.Axiom = innerAxiom val statement: F.Sequent = F.Sequent(Set(), Set(axiom)) if (statement.underlying != theory.sequentFromJustification(innerAxiom)) { @@ -426,7 +416,7 @@ trait WithTheorems { * @param axiom The axiomatized formula. * @return */ - def Axiom(using fullName: sourcecode.FullName)(axiom: F.Formula): AXIOM = { + def Axiom(using fullName: sourcecode.FullName)(axiom: F.Expr[F.Prop]): AXIOM = { val ax: Option[theory.Axiom] = theory.addAxiom(fullName.value, axiom.underlying) ax match { case None => throw new InvalidAxiomException("Not all symbols belong to the theory", fullName.value, axiom, library) @@ -441,8 +431,8 @@ trait WithTheorems { val fullName: String def repr: String = innerJustification.repr - def label: F.ConstantLabel[?] - knownDefs.update(label, Some(this)) + def cst: F.Constant[?] + knownDefs.update(cst, Some(this)) } @@ -467,7 +457,7 @@ trait WithTheorems { /** * A pretty representation of the goal of the theorem */ - def prettyGoal: String = lisa.utils.FOLPrinter.prettySequent(statement.underlying) + def prettyGoal: String = statement.underlying.repr } object THM { @@ -596,7 +586,7 @@ trait WithTheorems { } if (proof.length == 0) - om.lisaThrow(new UnimplementedProof(this)) + then om.lisaThrow(new UnimplementedProof(this)) val scp = proof.toSCProof val justifs = proof.getImports.map(e => (e._1.owner, e._1.innerJustification)) @@ -612,6 +602,7 @@ trait WithTheorems { ) ) } + } } diff --git a/lisa-utils/src/main/scala/lisa/utils/tptp/ProofParser.scala b/lisa-utils/src/main/scala/lisa/utils/tptp/ProofParser.scala deleted file mode 100644 index 162313ebb..000000000 --- a/lisa-utils/src/main/scala/lisa/utils/tptp/ProofParser.scala +++ /dev/null @@ -1,506 +0,0 @@ -package lisa.utils.tptp - -import leo.datastructures.TPTP.AnnotatedFormula -import leo.datastructures.TPTP.FOF -import leo.datastructures.TPTP.FOFAnnotated -import leo.modules.input.{TPTPParser => Parser} -import lisa.utils.K - -import java.io.File - -import Parser.TPTPParseException -import KernelParser.* -import K.{given} - -object ProofParser { - val TPTPversion = "TPTP v8.0.0" - val rand = scala.util.Random() - - given mapAtom: ((String, Int) => K.AtomicLabel) = (f, n) => - val kind = f.head - val id = f.tail - if kind == 's' then - if n == 0 then K.VariableFormulaLabel(sanitize(id)) - else K.SchematicPredicateLabel(sanitize(id), n) - else if kind == 'c' then K.ConstantAtomicLabel(sanitize(id), n) - else throw new Exception(s"Unknown kind of atomic label: $f") - given mapTerm: ((String, Int) => K.TermLabel) = (f, n) => - val kind = f.head - val id = f.tail - if kind == 's' then K.SchematicFunctionLabel(sanitize(id), n) - else if kind == 'c' then K.ConstantFunctionLabel(sanitize(id), n) - else if n == 0 then K.VariableLabel(sanitize(f)) - else K.SchematicFunctionLabel(sanitize(f), n) - given mapVariable: (String => K.VariableLabel) = f => - if f.head == 'X' then K.VariableLabel(f.tail) - else K.VariableLabel(f) - - def problemToFile(fileDirectory: String, fileName: String, name: String, axioms: Seq[K.Sequent], conjecture: K.Sequent, source: String): File = { - // case class Problem(file: String, domain: String, name: String, status: String, spc: Seq[String], formulas: Seq[AnnotatedStatement]) - val number = rand.nextInt(1000) - val file = new File(fileDirectory + fileName + ".p") - // val fileName = originFile.split("/").last - val header = - s"""%-------------------------------------------------------------------------- -% File : $fileName : $TPTPversion. -% Domain : None -% Problem : ${name} -% Version : None -% English : - -% Refs : https://github.com/epfl-lara/lisa -% : lisa.utils.tptp.ProofParser -% Source : [Lisa, $source] -% Names : - -% Status : Unknown -% Rating : ? -% Syntax : ? -% SPC : FOF_UNK_RFO_SEQ - -% Comments : This problem, was printed from a statement in a proof of a theorem by the Lisa theorem prover for submission to proof-producing ATPs. -%-------------------------------------------------------------------------- -""" - val writer = new java.io.PrintWriter(file) - writer.write(header) - var counter = 0 - def nextc = { counter += 1; counter } - axioms.foreach(s => writer.write(sequentToFOFAnnotated(s, "a" + nextc, "axiom").pretty + "\n")) - writer.write(sequentToFOFAnnotated(conjecture, "c" + nextc, "conjecture").pretty + "\n\n") - writer.close() - file - } - - def sequentToFOFAnnotated(sequent: K.Sequent, name: String, role: String): FOFAnnotated = { - val annotations = None - val formula = K.sequentToFormula(sequent) - FOFAnnotated(name, role, formulaToFOFStatement(formula), annotations) - } - - def isLowerWord(s: String): Boolean = s.head.isLower && s.tail.forall(_.isLetterOrDigit) - inline def quoted(s: String): String = if isLowerWord(s) then s else s"'$s'" - - def termToFOFTerm(term: K.Term): FOF.Term = { - val K.Term(label, args) = term - label match - case K.ConstantFunctionLabel(id, arity) => - FOF.AtomicTerm(quoted("c" + id), args.map(termToFOFTerm)) - case K.SchematicFunctionLabel(id, arity) => - FOF.AtomicTerm(quoted("s" + id), args.map(termToFOFTerm)) - case K.VariableLabel(id) => FOF.Variable("X" + id) - } - def formulaToFOFFormula(formula: K.Formula): FOF.Formula = { - formula match - case K.AtomicFormula(label, args) => - label match - case K.equality => FOF.Equality(termToFOFTerm(args(0)), termToFOFTerm(args(1))) - case K.top => FOF.AtomicFormula("$true", Seq()) - case K.bot => FOF.AtomicFormula("$false", Seq()) - case K.ConstantAtomicLabel(id, arity) => FOF.AtomicFormula(quoted("c" + id), args.map(termToFOFTerm)) - case K.SchematicPredicateLabel(id, arity) => FOF.AtomicFormula(quoted("s" + id), args.map(termToFOFTerm)) - case K.VariableFormulaLabel(id) => FOF.AtomicFormula(quoted("s" + id), Seq()) - case K.ConnectorFormula(label, args) => - label match - case K.Neg => FOF.UnaryFormula(FOF.~, formulaToFOFFormula(args.head)) - case K.Implies => FOF.BinaryFormula(FOF.Impl, formulaToFOFFormula(args(0)), formulaToFOFFormula(args(1))) - case K.Iff => FOF.BinaryFormula(FOF.<=>, formulaToFOFFormula(args(0)), formulaToFOFFormula(args(1))) - case K.And => - if args.size == 0 then FOF.AtomicFormula("$true", Seq()) - else if args.size == 1 then formulaToFOFFormula(args(0)) - else FOF.BinaryFormula(FOF.&, formulaToFOFFormula(args(0)), formulaToFOFFormula(args(1))) - case K.Or => - if args.size == 0 then FOF.AtomicFormula("$false", Seq()) - else if args.size == 1 then formulaToFOFFormula(args(0)) - else FOF.BinaryFormula(FOF.|, formulaToFOFFormula(args(0)), formulaToFOFFormula(args(1))) - case scl: K.SchematicConnectorLabel => throw new Exception(s"Schematic connectors are unsupported") - case K.BinderFormula(label, bound, inner) => - label match - case K.Forall => FOF.QuantifiedFormula(FOF.!, Seq("X" + bound.id), formulaToFOFFormula(inner)) - case K.Exists => FOF.QuantifiedFormula(FOF.?, Seq("X" + bound.id), formulaToFOFFormula(inner)) - case K.ExistsOne => ??? - } - - def formulaToFOFStatement(formula: K.Formula): FOF.Statement = { - FOF.Logical(formulaToFOFFormula(formula)) - } - - def reconstructProof(file: File)(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): K.SCProof = { - val problem = Parser.problem(io.Source.fromFile(file)) - val nameMap = scala.collection.mutable.Map[String, (Int, FOF.Sequent)]() - var prems = List[K.Sequent]() - var steps = List[K.SCProofStep]() - var numberSteps = 0 - problem.formulas.foreach { - case fa: FOFAnnotated => - if fa.role == "conjecture" then () - else - val fofsequent = fa.formula match { - case FOF.Logical(formula) => FOF.Sequent(Seq(), Seq(formula)) - case s: FOF.Sequent => s - } - if fa.role == "axiom" then - val sequent = K.Sequent(fofsequent.lhs.map(convertToKernel).toSet, fofsequent.rhs.map(convertToKernel).toSet) - nameMap(fa.name) = (-prems.size - 1, fofsequent) - prems = sequent :: prems - else - annotatedStatementToProofStep(fa, e => nameMap(e)._1, e => nameMap(e)._2) match { - case Some((step, name)) => - nameMap(name) = (numberSteps, fofsequent) - numberSteps += 1 - steps = step :: steps - case None => throw new Exception(s"Proof step could not be reconstructed from ${fa.pretty}") - } - case _ => throw new Exception("Only FOF statements are supported") - } - K.SCProof(steps.reverse.toIndexedSeq, prems.reverse.toIndexedSeq) - } - - def annotatedStatementToProofStep(ann: FOFAnnotated, numbermap: String => Int, sequentmap: String => FOF.Sequent)(using - mapAtom: (String, Int) => K.AtomicLabel, - mapTerm: (String, Int) => K.TermLabel, - mapVariable: String => K.VariableLabel - ): Option[(K.SCProofStep, String)] = { - given (String => Int) = numbermap - given (String => FOF.Sequent) = sequentmap - val r = ann match { - case Inference.Hypothesis(step, name) => Some((step, name)) - case Inference.Cut(step, name) => Some((step, name)) - case Inference.LeftHypothesis(step, name) => - Some((step, name)) - case Inference.LeftNNot(step, name) => Some((step, name)) - case Inference.LeftAnd(step, name) => Some((step, name)) - case Inference.LeftNOr(step, name) => Some((step, name)) - case Inference.LeftNImp(step, name) => Some((step, name)) - case Inference.LeftNAnd(step, name) => Some((step, name)) - case Inference.LeftOr(step, name) => Some((step, name)) - case Inference.LeftImp1(step, name) => Some((step, name)) - case Inference.LeftImp2(step, name) => Some((step, name)) - case Inference.LeftNAll(step, name) => Some((step, name)) - case Inference.LeftEx(step, name) => Some((step, name)) - case Inference.LeftAll(step, name) => Some((step, name)) - case Inference.LeftNEx(step, name) => Some((step, name)) - case Inference.RightNot(step, name) => Some((step, name)) - case _ => None - } - r - } - - object Inference { - import leo.datastructures.TPTP.{Annotations, GeneralTerm, MetaFunctionData, NumberData, Integer, FOF, GeneralFormulaData, FOTData} - import K.apply - - object Number { - def unapply(ann_seq: GeneralTerm): Option[BigInt] = - ann_seq match { - case GeneralTerm(List(NumberData(Integer(n))), None) => Some(n) - case _ => None - } - } - object Term { - def unapply(ann_seq: GeneralTerm)(using mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[K.Term] = - ann_seq match { - case GeneralTerm(List(GeneralFormulaData(FOTData(term))), None) => Some(convertTermToKernel(term)) - case _ => None - } - } - object String { - def unapply(ann_seq: GeneralTerm): Option[String] = - ann_seq match { - case GeneralTerm(List(MetaFunctionData(string, List())), None) => Some(string) - case _ => None - } - } - object StrOrNum { - def unapply(ann_seq: GeneralTerm): Option[String] = - ann_seq match { - case String(s) => Some(s) - case Number(n) => Some(n.toString) - case _ => None - } - } - def unapply(ann_seq: Annotations): Option[(String, Seq[GeneralTerm], Seq[String])] = - ann_seq match { - case Some( - ( - GeneralTerm( - List( - MetaFunctionData( - "inference", - List( - GeneralTerm(List(MetaFunctionData(stepName, List())), None), // stepnames - GeneralTerm(List(MetaFunctionData("param", parameters)), None), // params - GeneralTerm(List(), Some(numberTerms)) - ) // numbers - ) - ), - None - ), - None - ) - ) => - Some( - ( - stepName, - parameters, - numberTerms.map { - case StrOrNum(n) => n.toString - case String(n) => n - case _ => throw new Exception(s"Expected a list of number as last parameter of inference, but got $numberTerms") - } - ) - ) - case _ => None - } - - object Hypothesis { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("hyp", Seq(StrOrNum(n), StrOrNum(m)), Seq())) => - if (sequent.lhs(n.toInt) == sequent.rhs(m.toInt)) then - val left = sequent.lhs.map(convertToKernel) - val right = sequent.rhs.map(convertToKernel) - Some((K.RestateTrue(K.Sequent(left.toSet, right.toSet)), name)) - else None - case _ => None - } - } // List(GeneralTerm(List(),Some(List(GeneralTerm(List(NumberData(Integer(6))),None), GeneralTerm(List(NumberData(Integer(5))),None))))) - - object Cut { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("cut", Seq(StrOrNum(n), StrOrNum(m)), Seq(t1, t2))) => - val formula1 = sequentmap(t1).rhs(n.toInt) - val formula2 = sequentmap(t2).lhs(m.toInt) - if (formula1 == formula2) then Some((K.Cut(convertToKernel(sequent), numbermap(t1), numbermap(t2), convertToKernel(formula1)), name)) - else throw new Exception(s"Cut inference with different formulas given in the premises") - case _ => - None - } - - } - - object LeftHypothesis { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftHyp", Seq(StrOrNum(n), StrOrNum(m)), Seq())) => - val left = sequent.lhs.map(convertToKernel) - val right = sequent.rhs.map(convertToKernel) - val formula = left(n.toInt) - if (formula == K.Neg(left(m.toInt)) || K.Neg(formula) == left(m.toInt)) then Some((K.RestateTrue(K.Sequent(left.toSet, right.toSet)), name)) - else None - case _ => - None - } - } - object LeftNNot { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotNot", Seq(StrOrNum(n)), Seq(t1))) => - Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) - case _ => None - } - } - object LeftAnd { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftAnd", Seq(StrOrNum(n)), Seq(t1))) => - Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) - case _ => None - } - } - object LeftNOr { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotOr", Seq(StrOrNum(n)), Seq(t1))) => - Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) - case _ => None - } - } - object LeftNImp { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotImp", Seq(StrOrNum(n)), Seq(t1))) => - Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) - case _ => None - } - } - - object LeftNAnd { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotAnd", Seq(StrOrNum(n)), Seq(t1, t2))) => - val f = sequent.lhs(n.toInt) - val (a, b) = convertToKernel(f) match { - case K.ConnectorFormula(K.Neg, Seq(K.ConnectorFormula(K.And, Seq(x, y)))) => (x, y) - case _ => throw new Exception(s"Expected a negated conjunction, but got $f") - } - Some((K.LeftOr(convertToKernel(sequent), Seq(numbermap(t1), numbermap(t2)), Seq(K.Neg(a), K.Neg(b))), name)) - case _ => None - } - } - - object LeftOr { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftOr", Seq(StrOrNum(n)), Seq(t1, t2))) => - val f = sequent.lhs(n.toInt) - val (a, b) = convertToKernel(f) match { - case K.ConnectorFormula(K.Or, Seq(x, y)) => (x, y) - case _ => throw new Exception(s"Expected a disjunction, but got $f") - } - Some((K.LeftOr(convertToKernel(sequent), Seq(numbermap(t1), numbermap(t2)), Seq(a, b))), name) - case _ => None - } - } - - object LeftImp1 { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftImp1", Seq(StrOrNum(n)), Seq(t1, t2))) => - val f = sequent.lhs(n.toInt) - val (a, b) = convertToKernel(f) match { - case K.ConnectorFormula(K.Implies, Seq(x, y)) => (x, y) - case _ => throw new Exception(s"Expected an implication, but got $f") - } - Some((K.LeftImplies(convertToKernel(sequent), numbermap(t1), numbermap(t2), a, b), name)) - case _ => None - } - } - - object LeftImp2 { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftImp2", Seq(StrOrNum(n)), Seq(t1, t2))) => - val f = sequent.lhs(n.toInt) - val (a, b) = convertToKernel(f) match { - case K.ConnectorFormula(K.Implies, Seq(x, y)) => (x, y) - case _ => throw new Exception(s"Expected an implication, but got $f") - } - Some((K.LeftOr(convertToKernel(sequent), Seq(numbermap(t1), numbermap(t2)), Seq(K.Neg(a), b)), name)) - case _ => None - } - } - - object LeftNAll { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotForall", Seq(StrOrNum(n), Term(xl)), Seq(t1))) => // x has to be a GeneralTerm representinf a variable, i.e. $fot(x) - val f = sequent.lhs(n.toInt) - val x = xl match - case K.Term(x: K.VariableLabel, Seq()) => x - case _ => throw new Exception(s"Expected a variable, but got $xl") - val (y: K.VariableLabel, phi: K.Formula) = convertToKernel(f) match { - case K.ConnectorFormula(K.Neg, Seq(K.BinderFormula(K.Forall, x, phi))) => (x, phi) - case _ => throw new Exception(s"Expected a universal quantification, but got $f") - } - if x == y then Some((K.LeftExists(convertToKernel(sequent), numbermap(t1), phi, x), name)) - else Some((K.LeftExists(convertToKernel(sequent), numbermap(t1), K.substituteVariablesInFormula(K.ConnectorFormula(K.Neg, Seq(phi)), Map(y -> xl), Seq()), x), name)) - case _ => None - } - } - - object LeftEx { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftEx", Seq(StrOrNum(n), Term(xl)), Seq(t1))) => // x has to be a GeneralTerm representinf a variable, i.e. $fot(x) - val f = sequent.lhs(n.toInt) - val x = xl match - case K.Term(x: K.VariableLabel, Seq()) => x - case _ => throw new Exception(s"Expected a variable, but got $xl") - val (y: K.VariableLabel, phi: K.Formula) = convertToKernel(f) match { - case K.BinderFormula(K.Exists, x, phi) => (x, phi) - case _ => throw new Exception(s"Expected an existential quantification, but got $f") - } - if x == y then Some((K.LeftExists(convertToKernel(sequent), numbermap(t1), phi, x), name)) - else Some((K.LeftExists(convertToKernel(sequent), numbermap(t1), K.substituteVariablesInFormula(phi, Map(y -> xl), Seq()), x), name)) - case _ => None - } - } - - object LeftAll { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftForall", Seq(StrOrNum(n), Term(t)), Seq(t1))) => - val f = sequent.lhs(n.toInt) - val (x, phi) = convertToKernel(f) match { - case K.BinderFormula(K.Forall, x, phi) => (x, phi) - case _ => throw new Exception(s"Expected a universal quantification, but got $f") - } - Some((K.LeftForall(convertToKernel(sequent), numbermap(t1), phi, x, t), name)) - case _ => None - } - } - - object LeftNEx { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("leftNotEx", Seq(StrOrNum(n), Term(t)), Seq(t1))) => - val f = sequent.lhs(n.toInt) - val (x, phi) = convertToKernel(f) match { - case K.ConnectorFormula(K.Neg, Seq(K.BinderFormula(K.Exists, x, phi))) => (x, phi) - case _ => throw new Exception(s"Expected a negated existential quantification, but got $f") - } - Some((K.LeftForall(convertToKernel(sequent), numbermap(t1), K.ConnectorFormula(K.Neg, Seq(phi)), x, t), name)) - case _ => None - } - } - - object RightNot { - def unapply(ann_seq: FOFAnnotated)(using - numbermap: String => Int, - sequentmap: String => FOF.Sequent - )(using mapAtom: (String, Int) => K.AtomicLabel, mapTerm: (String, Int) => K.TermLabel, mapVariable: String => K.VariableLabel): Option[(K.SCProofStep, String)] = - ann_seq match { - case FOFAnnotated(name, role, sequent: FOF.Sequent, Inference("rightNot", Seq(StrOrNum(n)), Seq(t1))) => - Some((K.Weakening(convertToKernel(sequent), numbermap(t1)), name)) - case _ => None - } - } - - } -} diff --git a/lisa-utils/src/main/scala/lisa/utils/unification/UnificationUtils.scala b/lisa-utils/src/main/scala/lisa/utils/unification/UnificationUtils.scala index dcc762fa5..bea4c7c36 100644 --- a/lisa-utils/src/main/scala/lisa/utils/unification/UnificationUtils.scala +++ b/lisa-utils/src/main/scala/lisa/utils/unification/UnificationUtils.scala @@ -1,26 +1,399 @@ package lisa.utils.unification -import lisa.fol.FOL.{_, given} -//import lisa.fol.FOLHelpers.* - -//import lisa.kernel.fol.FOL.* -//import lisa.utils.KernelHelpers.{_, given} +import lisa.utils.fol.FOL.{_, given} +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.SimpleDeducedSteps +import lisa.utils.K +import lisa.utils.memoization.memoized +import lisa.utils.collection.Extensions.* +import lisa.utils.collection.{VecSet => Set} +import lisa.utils.fol.FOL /** * General utilities for unification, substitution, and rewriting */ -object UnificationUtils { +object UnificationUtils: - extension [A](seq: Seq[A]) { + /** + * Chosen equality for terms in matching and rewriting. + */ + inline def eq[A](l: Expr[A], r: Expr[A]) = isSame(l, r) + + /** + * Context containing information and constraints pertaining to matching, + * unification, and rewriting. + * + * @param boundVariables variables in terms that cannot be substituted + */ + case class RewriteContext( + boundVariables: Set[Variable[?]], + freeRules: Set[RewriteRule], + confinedRules: Set[RewriteRule], + ): + // when a context is constructed, update the global ID counter to make sure + // we aren't conflicting with variable names in the rewrite rules + RewriteContext.updateIDCounts(this) + + /** + * Checks if a variable is free under this context. + */ + def isFree[A](v: Variable[A]) = !isBound(v) + + /** + * Checks if a variable is bound under this context. + */ + def isBound[A](v: Variable[A]) = boundVariables.contains(v) + + /** + * A copy of this context with the given variable additionally bound. + */ + def withBound[A](v: Variable[A]) = + this.copy(boundVariables = boundVariables + v) + + /** + * A copy of this context with the given variables additionally bound. + */ + def withBound(vs: Iterable[Variable[?]]) = + this.copy(boundVariables = boundVariables ++ vs) + + /** + * A copy of this context with the given pair added as a _free_ rewrite + * rule, whose variables may be instantiated during rewriting. + */ + def withFreeRule[A](rule: RewriteRule) = + this.copy(freeRules = freeRules + rule) /** - * Seq.collectFirst, but for a function returning an Option. Evaluates the - * function only once per argument. Returns when the first non-`None` value - * is found. + * A copy of this context with the given pair added as a _confined_ rewrite + * rule, whose variables may *not* be instantiated during rewriting. + */ + def withConfinedRule[A](rule: RewriteRule) = + this.copy(confinedRules = confinedRules + rule) + + /** + * All rules (free + confined) in this context. + */ + def allRules: Set[RewriteRule] = freeRules ++ confinedRules + + val representativeVariable = memoized(__representativeVariable) + + private def __representativeVariable(rule: InstantiatedRewriteRule): Variable[?] = + val id = RewriteContext.freshRepresentative + rule.rule match + case TermRewriteRule(_, _) => Variable[Ind](id) + case FormulaRewriteRule(_, _) => Variable[Prop](id) + // should not reach under general use, but why not: + case r: InstantiatedRewriteRule => representativeVariable(r) + + object RewriteContext: + /** + * The empty rewrite context. + */ + def empty = RewriteContext(Set.empty, Set.empty, Set.empty) + + /** + * A rewrite context with the given variables considered bound. + */ + def withBound(vars: Iterable[Variable[?]]) = + RewriteContext(vars.to(Set), Set.empty, Set.empty) + + private object IDCounter: + val reprName = "@@internalRewriteVar@@" + private var current = 0 + def setIDCountTo(limit: Int): Unit = + current = math.max(limit, current) + def nextIDCount: Int = + current += 1 + current + + import IDCounter.{reprName, setIDCountTo, nextIDCount} + + private def freshRepresentative: Identifier = + Identifier(reprName, nextIDCount) + + private def maxVarId[A](expr: Expr[A]): Int = + expr match + case Variable(id: Identifier) => id.no + case Constant(id) => 0 + case App(f, arg) => math.max(maxVarId(f), maxVarId(arg)) + case Abs(v: Variable[?], body: Expr[?]) => math.max(maxVarId(v), maxVarId(body)) + + private def updateIDCounts(ctx: RewriteContext): Unit = + val max = ctx.allRules.map(r => maxVarId(r.toFormula)).maxOption.getOrElse(0) + 1 + setIDCountTo(max) + + /** + * Immutable representation of a typed variable substitution. + * + * Wraps an immutable map while preserving variable types. + * + * Types are discarded for storage but are guaranteed to be sound by + * construction. + * + * @param assignments mappings to initialize the substitution with + */ + class Substitution private ( + protected val assignments: Map[Variable[?], Expr[?]], + protected val freeVariables: Set[Variable[?]] + ): + // invariant: + // require( + // freeVariables == assignments.keySet ++ assignments.values.flatMap(_.freeVars) + // ) + + /** + * (Optionally) retrieves a variable's mapping + */ + def apply[A](v: Variable[A]): Option[Expr[A]] = + assignments.get(v).map(_.asInstanceOf[Expr[A]]) + + /** + * Creates a new substitution with a new mapping added + */ + def +[A](mapping: (Variable[A], Expr[A])): Substitution = + val newfree = mapping._2.freeVars + mapping._1 + Substitution(assignments + mapping, freeVariables ++ newfree) + + /** + * Checks whether a variable is assigned by this substitution + */ + def contains[A](v: Variable[A]): Boolean = + assignments.contains(v) + + /** + * Checks whether any substitution contains the given variable. Needed for + * verifying ill-formed substitutions containing bound variables. * - * @param T output type under option - * @param f the function to evaluate + * Eg: if `v` is externally bound, then `x` and `f(v)` have no matcher under + * capture avoiding substitution. + */ + def substitutes[A](v: Variable[A]): Boolean = + freeVariables(v) + + def asSubstPair: Seq[SubstPair] = + assignments.map((v, e) => v := e.asInstanceOf).toSeq + + object Substitution: + /** + * The empty substitution */ + def empty: Substitution = Substitution(Map.empty, Set.empty) + + /** + * Performs first-order matching for two terms. Returns a (most-general) + * substitution from variables to terms such that `expr` substituted is equal + * to `pattern`, if one exists. + * + * Does not use rewrite rules provided by `ctx`, if any. + * + * @param expr the reference term (to substitute in) + * @param pattern the pattern to match against + * @param subst partial substitution to match under + * @param ctx (implicit) context to match under + * @return substitution (Option) from variables to terms. `None` iff a + * substitution does not exist. + */ + def matchExpr[A](using ctx: RewriteContext)(expr: Expr[A], pattern: Expr[A], subst: Substitution = Substitution.empty): Option[Substitution] = + // chosen equality: ortholattice equivalence + inline def eq(l: Expr[A], r: Expr[A]) = isSame(l, r) + + if eq(expr, pattern) then + // trivial, done + Some(subst) + else + (expr, pattern) match + case (v @ Variable(_), _) if ctx.isFree(v) => + subst(v) match + case Some(e) => + // this variable has been assigned before. + // is that subst compatible with this instance? + if eq(e, pattern) then Some(subst) else None + case None => + // first encounter + Some(subst + (v -> pattern)) + case (App(fe, arge), App(fp, argp)) if fe.sort == fp.sort => + // the sort of fp is already runtime checked here; the sort of argp + // is implied by combination of static and runtime checks + matchExpr(fe, fp.asInstanceOf, subst) + .flatMap(subst => matchExpr(arge, argp.asInstanceOf, subst)) + + case (Abs(ve, fe), Abs(vp, fp)) => + val freshVar = ve.freshRename(Seq(fe, fp)) + matchExpr(using ctx.withBound(freshVar))( + fe.substitute(ve := freshVar), + fp.substitute(vp := freshVar), + subst + ).filterNot(_.substitutes(freshVar)) + + case _ => None + + + sealed trait RewriteRule: + type Base + + def l: Expr[Base] + + def r: Expr[Base] + + /** + * Flip this rewrite rule + */ + def swap: RewriteRule + + /** + * The trivial hypothesis step that can be used as a source for this rewrite + */ + def source(using lib: Library, proof: lib.Proof): proof.Fact = + val form = toFormula + lib.have(using proof)(form |- form) by SimpleDeducedSteps.Restate + + /** + * Reduce this rewrite rule to a formula representing the equivalence. + */ + def toFormula: Expr[Prop] + + /** + * The sort of the terms in this rewrite rule. + */ + def sort: K.Sort = l.sort + + + case class TermRewriteRule(l: Expr[Ind], r: Expr[Ind]) extends RewriteRule: + type Base = Ind + def swap: TermRewriteRule = TermRewriteRule(r, l) + def toFormula: Expr[Prop] = l === r + + case class FormulaRewriteRule(l: Expr[Prop], r: Expr[Prop]) extends RewriteRule: + type Base = Prop + def swap: FormulaRewriteRule = FormulaRewriteRule(r, l) + def toFormula: Expr[Prop] = l <=> r + + case class InstantiatedRewriteRule(rule: RewriteRule, subst: Substitution) extends RewriteRule: + type Base = rule.Base + def l: Expr[rule.Base] = rule.l.substitute(subst.asSubstPair*) + def r: Expr[rule.Base] = rule.r.substitute(subst.asSubstPair*) + def toFormula: Expr[Prop] = rule.toFormula.substitute(subst.asSubstPair*) + def swap: RewriteRule = InstantiatedRewriteRule(rule.swap, subst) + + + /** + * Given a single *free* rewrite rule, checks whether it rewrite `from` to + * `to` under this context. If the rewrite succeeds, returns the rule and + * the instantiation of the rule corresponding to the rewrite step. + * + * @param from term to rewrite from + * @param to term to rewrite into + * @param rule *free* rewrite rule to use + */ + private def rewriteOneWithFree[A](from: Expr[A], to: Expr[A], rule: RewriteRule {type Base = A}): Option[InstantiatedRewriteRule] = + val ctx = RewriteContext.empty + // attempt to rewrite with all bound variables discarded + rewriteOneWith(using ctx)(from, to, rule) + + /** + * Given a single rewrite rule, checks whether it rewrite `from` to `to` + * under this context. The rewrite rule is considered *confined* by the + * context. See [[rewriteOneWithFree]] for free rules. If the rewrite + * succeeds, returns the rule and the instantiation of the rule + * corresponding to the rewrite step. + * + * @param ctx (implicit) context to rewrite under + * @param from term to rewrite from + * @param to term to rewrite into + * @param rule *free* rewrite rule to use + */ + private def rewriteOneWith[A](using ctx: RewriteContext)(from: Expr[A], to: Expr[A], rule: RewriteRule {type Base = A}): Option[InstantiatedRewriteRule] = + val (l: Expr[A], r: Expr[A]) = (rule.l, rule.r) + // match the left side + matchExpr(l, from, Substitution.empty) + // based on this partial substitution, try to match the right side + // note: given that first match succeeded, any extension of it is still a successful matcher for l -> from + .flatMap(partialSubst => matchExpr(r, to, partialSubst)) + // if succeeded, pair the rule together and ship out + .map(finalSubst => InstantiatedRewriteRule(rule, finalSubst)) + + /** + * Tries to find a *top-level* rewrite from `from` to `to` using the + * rewrite rules in the implicit context. The rewrite rule unifying the two + * terms is returned if one exists. + * + * @param from term to rewrite from + * @param to term to rewrite into + */ + private def rewriteOne[A] + (using ctx: RewriteContext) + (from: Expr[A], to: Expr[A]): Option[InstantiatedRewriteRule] = + // rule sort is runtime checked + lazy val confinedRewrite = ctx.confinedRules + .filter(_.sort == from.sort) + .collectFirstDefined(rule => rewriteOneWith(from, to, rule.asInstanceOf)) + lazy val freeRewrite = ctx.freeRules + .filter(_.sort == from.sort) + .collectFirstDefined(rule => rewriteOneWithFree(from, to, rule.asInstanceOf)) + + // confined rules take precedence + // local rewrites are more likely to succeed than global ones + // (anecdotally) :) + confinedRewrite.orElse(freeRewrite) + + case class RewriteResult[A](ctx: RewriteContext, usedRules: Set[InstantiatedRewriteRule], context: Expr[A]): + def toLeft: Expr[A] = + context.substitute((vars `lazyZip` rules.map(_.l)).map((v, e) => v := e.asInstanceOf)*) + def toRight: Expr[A] = + context.substitute((vars `lazyZip` rules.map(_.r)).map((v, e) => v := e.asInstanceOf)*) + def vars: Seq[Variable[?]] = usedRules.map(ctx.representativeVariable).toSeq + def lambda: Expr[A] = context + def rules: Set[InstantiatedRewriteRule] = usedRules + def substitutes(v: Variable[?]): Boolean = + usedRules.exists(_.subst.substitutes(v)) + + // invariant: + // require( (vars `zip` rules).forall((v, e) => v.Sort == rule.Base ) ) // equality is over types + + type FormulaRewriteResult = RewriteResult[Prop] + + def rewrite[A](using ctx: RewriteContext)(from: Expr[A], to: Expr[A]): Option[RewriteResult[A]] = + lazy val rule = rewriteOne(from, to) + + if eq(from, to) then + Some(RewriteResult(ctx, Set.empty, from)) + else if rule.isDefined then + val irule = rule.get + Some(RewriteResult(ctx, Set(irule), ctx.representativeVariable(irule).asInstanceOf)) + else + (from, to) match + case (App(fe, arge), App(fp, argp)) if fe.sort == fp.sort => + lazy val fun = rewrite(fe, fp.asInstanceOf) + lazy val arg = rewrite(arge, argp.asInstanceOf) + + for + f <- fun + a <- arg + yield RewriteResult(ctx, f.rules ++ a.rules, f.context #@ a.context) + + case (Abs(ve, fe), Abs(vp, fp)) => + val freshVar = ve.freshRename(Seq(fe, fp)) + rewrite(fe.substitute(ve := freshVar), fp.substitute(vp := freshVar)) + .filterNot(_.substitutes(freshVar)) + .map: + case RewriteResult(c, r, e) => + RewriteResult(c, r, Abs(freshVar, e)) + case _ => None + +end UnificationUtils + +// object UnificationUtils { +/* + extension [A](seq: Seq[A]) { + + /** + * Seq.collectFirst, but for a function returning an Option. Evaluates the + * function only once per argument. Returns when the first non-`None` value + * is found. + * + * @param T output type under option + * @param f the function to evaluate + */ def getFirst[T](f: A => Option[T]): Option[T] = { var res: Option[T] = None val iter = seq.iterator @@ -33,24 +406,24 @@ object UnificationUtils { } /** - * All the information required for performing rewrites. - */ + * All the information required for performing rewrites. + */ case class RewriteContext( - freeFormulaRules: Seq[(Formula, Formula)] = Seq.empty, - freeTermRules: Seq[(Term, Term)] = Seq.empty, - confinedFormulaRules: Seq[(Formula, Formula)] = Seq.empty, - confinedTermRules: Seq[(Term, Term)] = Seq.empty, + freeFormulaRules: Seq[(Expr[Prop], Expr[Prop])] = Seq.empty, + freeTermRules: Seq[(Expr[Ind], Expr[Ind])] = Seq.empty, + confinedFormulaRules: Seq[(Expr[Prop], Expr[Prop])] = Seq.empty, + confinedTermRules: Seq[(Expr[Ind], Expr[Ind])] = Seq.empty, takenFormulaVars: Set[VariableFormula] = Set.empty, takenTermVars: Set[Variable] = Set.empty ) { private var lastID: Identifier = freshId((takenFormulaVars ++ takenTermVars).map(_.id), "@@rewriteVar@@") /** - * Generates a fresh identifier with an internal label `__rewriteVar__`. - * Mutates state. - * - * @return fresh identifier - */ + * Generates a fresh identifier with an internal label `__rewriteVar__`. + * Mutates state. + * + * @return fresh identifier + */ def freshIdentifier = { lastID = freshId(Seq(lastID), "@@rewriteVar@@") lastID @@ -60,11 +433,11 @@ object UnificationUtils { def isFreeVariable(v: VariableFormula) = !takenFormulaVars.contains(v) /** - * Update the last generated fresh ID to that of another context if it is - * larger, otherwise retain the previous value. Mutates state. - * - * @param other another context - */ + * Update the last generated fresh ID to that of another context if it is + * larger, otherwise retain the previous value. Mutates state. + * + * @param other another context + */ def updateTo(other: RewriteContext) = lastID = if (other.lastID.no > lastID.no) other.lastID else lastID } @@ -75,42 +448,42 @@ object UnificationUtils { // substitutions - type TermSubstitution = Map[Variable, Term] + type TermSubstitution = Map[Variable, Expr[Ind]] val TermSubstitution = Map // don't abuse pls O.o - type FormulaSubstitution = Map[VariableFormula, Formula] + type FormulaSubstitution = Map[VariableFormula, Expr[Prop]] val FormulaSubstitution = Map /** - * Performs first-order matching for two terms. Returns a (most-general) - * substitution from variables to terms such that `first` substituted is equal TODO: Fix `first`and `second` - * to `second`, if one exists. Uses [[matchTermRecursive]] as the actual - * implementation. - * - * @param reference the reference term - * @param template the term to match - * @param takenVariables any variables in the template which cannot be - * substituted, i.e., treated as constant - * @return substitution (Option) from variables to terms. `None` iff a - * substitution does not exist. - */ - def matchTerm(reference: Term, template: Term, takenVariables: Iterable[Variable] = Iterable.empty): Option[TermSubstitution] = { + * Performs first-order matching for two terms. Returns a (most-general) + * substitution from variables to terms such that `first` substituted is equal TODO: Fix `first`and `second` + * to `second`, if one exists. Uses [[matchTermRecursive]] as the actual + * implementation. + * + * @param reference the reference term + * @param template the term to match + * @param takenVariables any variables in the template which cannot be + * substituted, i.e., treated as constant + * @return substitution (Option) from variables to terms. `None` iff a + * substitution does not exist. + */ + def matchTerm(reference: Expr[Ind], template: Expr[Ind], takenVariables: Iterable[Variable] = Iterable.empty): Option[TermSubstitution] = { val context = RewriteContext(takenTermVars = takenVariables.toSet) matchTermRecursive(using context)(reference, template, TermSubstitution.empty) } /** - * Implementation for matching terms. See [[matchTerm]] for the interface. - * - * @param context all information about restricted variables and fresh name - * generation state - * @param reference the reference terms - * @param template the terms to match - * @param substitution currently accumulated susbtitutions to variables - * @return substitution (Option) from variables to terms. `None` if a - * substitution does not exist. - */ - private def matchTermRecursive(using context: RewriteContext)(reference: Term, template: Term, substitution: TermSubstitution): Option[TermSubstitution] = + * Implementation for matching terms. See [[matchTerm]] for the interface. + * + * @param context all information about restricted variables and fresh name + * generation state + * @param reference the reference terms + * @param template the terms to match + * @param substitution currently accumulated substitutions to variables + * @return substitution (Option) from variables to terms. `None` if a + * substitution does not exist. + */ + private def matchTermRecursive(using context: RewriteContext)(reference: Expr[Ind], template: Expr[Ind], substitution: TermSubstitution): Option[TermSubstitution] = if (reference == template) Some(substitution) else @@ -133,25 +506,24 @@ object UnificationUtils { } /** - * Performs first-order matching for two formulas. Returns a (most-general) - * substitution from variables to terms such that `first` substituted is equal - * to `second`, if one exists. Uses [[matchFormulaRecursive]] as the actual - * implementation. - * - * @param reference the reference formula - * @param template the formula to match - * @param takenTermVariables any variables in the template which cannot be - * substituted, i.e., treated as constant - * @param takenFormulaVariables any formula variables in the template which - * cannot be substituted, i.e., treated as constant - * @return substitution pair (Option) from formula variables to formulas, and - * variables to terms. `None` if a substitution does not exist. - */ + * Performs first-order matching for two formulas. Returns a (most-general) + * substitution from variables to terms such that `first` substituted is equal + * to `second`, if one exists. Uses [[matchFormulaRecursive]] as the actual + * implementation. + * + * @param reference the reference formula + * @param template the formula to match + * @param takenTermVariables any variables in the template which cannot be + * substituted, i.e., treated as constant + * @param takenFormulaVariables any formula variables in the template which + * cannot be substituted, i.e., treated as constant + * @return substitution pair (Option) from formula variables to formulas, and + * variables to terms. `None` if a substitution does not exist. + */ def matchFormula( - reference: Formula, - template: Formula, + reference: Expr[Prop], + template: Expr[Prop], takenTermVariables: Iterable[Variable] = Iterable.empty, - takenFormulaVariables: Iterable[VariableFormula] = Iterable.empty ): Option[(FormulaSubstitution, TermSubstitution)] = { val context = RewriteContext( takenTermVars = takenTermVariables.toSet, @@ -161,20 +533,20 @@ object UnificationUtils { } /** - * Implementation for matching formulas. See [[matchFormula]] for the - * interface. - * - * @param context all information about restricted variables and fresh name generation state - * @param reference the reference formula - * @param template the formula to match - * @param formulaSubstitution currently accumulated susbtitutions to formula variables - * @param termSubstitution currently accumulated susbtitutions to term variables - * @return substitution pair (Option) from formula variables to formulas, and - * variables to terms. `None` if a substitution does not exist. - */ + * Implementation for matching formulas. See [[matchFormula]] for the + * interface. + * + * @param context all information about restricted variables and fresh name generation state + * @param reference the reference formula + * @param template the formula to match + * @param formulaSubstitution currently accumulated substitutions to formula variables + * @param termSubstitution currently accumulated substitutions to term variables + * @return substitution pair (Option) from formula variables to formulas, and + * variables to terms. `None` if a substitution does not exist. + */ private def matchFormulaRecursive(using context: RewriteContext - )(reference: Formula, template: Formula, formulaSubstitution: FormulaSubstitution, termSubstitution: TermSubstitution): Option[(FormulaSubstitution, TermSubstitution)] = { + )(reference: Expr[Prop], template: Expr[Prop], formulaSubstitution: FormulaSubstitution, termSubstitution: TermSubstitution): Option[(FormulaSubstitution, TermSubstitution)] = { if (isSame(reference, template)) Some((formulaSubstitution, termSubstitution)) else @@ -241,126 +613,126 @@ object UnificationUtils { // rewrites /** - * A term rewrite rule (`l -> r`) with an accompanying instantiation, given - * by a term substitution. - * - * @example A rule without any instantiation would be `((l -> r), - * TermSubstitution.empty)`. - * @example Commutativity of a function with instantiation can be `((f(x, y) - * -> f(y, x)), Map(x -> pair(a, b), y -> c))` - */ - type TermRule = ((Term, Term), TermSubstitution) + * A term rewrite rule (`l -> r`) with an accompanying instantiation, given + * by a term substitution. + * + * @example A rule without any instantiation would be `((l -> r), + * TermSubstitution.empty)`. + * @example Commutativity of a function with instantiation can be `((f(x, y) + * -> f(y, x)), Map(x -> pair(a, b), y -> c))` + */ + type TermRule = ((Expr[Ind], Expr[Ind]), TermSubstitution) /** - * A formula rewrite rule (`l -> r`) with an accompanying instantiation, - * given by a formula and a term substitution. - * - * @example A rule without any instantiation would be `((l -> r), - * FormulaSubstitution.empty)`. - * @example `((P(x) \/ Q -> Q /\ R(x)), Map(Q -> A \/ B, x -> f(t)))` - */ - type FormulaRule = ((Formula, Formula), (FormulaSubstitution, TermSubstitution)) + * A formula rewrite rule (`l -> r`) with an accompanying instantiation, + * given by a formula and a term substitution. + * + * @example A rule without any instantiation would be `((l -> r), + * FormulaSubstitution.empty)`. + * @example `((P(x) \/ Q -> Q /\ R(x)), Map(Q -> A \/ B, x -> f(t)))` + */ + type FormulaRule = ((Expr[Prop], Expr[Prop]), (FormulaSubstitution, TermSubstitution)) /** - * A lambda representing a term, with inputs as terms. Carries extra - * information about rewrite rules used in its construction for proof - * genration later. - * - * @param termVars variables in the body to be treated as parameters closed - * under this function - * @param termRules mapping to the rules (with instantiations) used to - * construct this function; used for proof construction - * @param body the body of the function - */ + * A lambda representing a term, with inputs as terms. Carries extra + * information about rewrite rules used in its construction for proof + * genration later. + * + * @param termVars variables in the body to be treated as parameters closed + * under this function + * @param termRules mapping to the rules (with instantiations) used to + * construct this function; used for proof construction + * @param body the body of the function + */ case class TermRewriteLambda( termVars: Seq[Variable] = Seq.empty, termRules: Seq[(Variable, TermRule)] = Seq.empty, - body: Term + body: Expr[Ind] ) {} /** - * A lambda representing a formula, with inputs as terms or formulas. Carries - * extra information about rewrite rules used in its construction for proof - * geenration later. - * - * @param termVars variables in the body to be treated as parameters closed - * under this function - * @param formulaVars formula variables in the body to be treated as - * parameters closed under this function - * @param termRules mapping to the term rewrite rules (with instantiations) - * used to construct this function; used for proof construction - * @param formulaRules mapping to the formula rewrite rules (with - * instantiations) used to construct this function; used for proof - * construction - * @param body the body of the function - */ + * A lambda representing a formula, with inputs as terms or formulas. Carries + * extra information about rewrite rules used in its construction for proof + * geenration later. + * + * @param termVars variables in the body to be treated as parameters closed + * under this function + * @param formulaVars formula variables in the body to be treated as + * parameters closed under this function + * @param termRules mapping to the term rewrite rules (with instantiations) + * used to construct this function; used for proof construction + * @param formulaRules mapping to the formula rewrite rules (with + * instantiations) used to construct this function; used for proof + * construction + * @param body the body of the function + */ case class FormulaRewriteLambda( termRules: Seq[(Variable, TermRule)] = Seq.empty, formulaRules: Seq[(VariableFormula, FormulaRule)] = Seq.empty, - body: Formula + body: Expr[Prop] ) { /** - * **Unsafe** conversion to a term lambda, discarding rule and formula information - * - * Use if **know that only term rewrites were applied**. - */ - def toLambdaTF: LambdaExpression[Term, Formula, ?] = LambdaExpression(termRules.map(_._1), body, termRules.size) + * **Unsafe** conversion to a term lambda, discarding rule and formula information + * + * Use if **know that only term rewrites were applied**. + */ + def toLambdaTF: LambdaExpression[Expr[Ind], Expr[Prop], ?] = LambdaExpression(termRules.map(_._1), body, termRules.size) /** - * **Unsafe** conversion to a formula lambda, discarding rule and term information - * - * Use if **know that only formula rewrites were applied**. - */ - def toLambdaFF: LambdaExpression[Formula, Formula, ?] = LambdaExpression(formulaRules.map(_._1), body, formulaRules.size) + * **Unsafe** conversion to a formula lambda, discarding rule and term information + * + * Use if **know that only formula rewrites were applied**. + */ + def toLambdaFF: LambdaExpression[Expr[Prop], Expr[Prop], ?] = LambdaExpression(formulaRules.map(_._1), body, formulaRules.size) } /** - * Dummy connector used to combine formulas for convenience during rewriting - */ + * Dummy connector used to combine formulas for convenience during rewriting + */ val formulaRewriteConnector = SchematicConnectorLabel(Identifier("@@rewritesTo@@"), 2) /** - * Dummy function symbol used to combine terms for convenience during rewriting - */ + * Dummy function symbol used to combine terms for convenience during rewriting + */ val termRewriteConnector = ConstantFunctionLabel(Identifier("@@rewritesTo@@"), 2) /** - * Decides whether a term `first` be rewritten into `second` at the top level - * using the provided rewrite rule (with instantiation). - * - * Reduces to matching using [[matchTermRecursive]]. - */ - private def canRewrite(using context: RewriteContext)(first: Term, second: Term, rule: (Term, Term)): Option[TermSubstitution] = + * Decides whether a term `first` be rewritten into `second` at the top level + * using the provided rewrite rule (with instantiation). + * + * Reduces to matching using [[matchTermRecursive]]. + */ + private def canRewrite(using context: RewriteContext)(first: Expr[Ind], second: Expr[Ind], rule: (Expr[Ind], Expr[Ind])): Option[TermSubstitution] = matchTermRecursive(termRewriteConnector(first, second), termRewriteConnector(rule._1, rule._2), TermSubstitution.empty) /** - * Decides whether a formula `first` be rewritten into `second` at the top - * level using the provided rewrite rule (with instantiation). Produces the - * instantiation as output, if one exists. - * - * Reduces to matching using [[matchFormulaRecursive]]. - */ - private def canRewrite(using context: RewriteContext)(first: Formula, second: Formula, rule: (Formula, Formula)): Option[(FormulaSubstitution, TermSubstitution)] = + * Decides whether a formula `first` be rewritten into `second` at the top + * level using the provided rewrite rule (with instantiation). Produces the + * instantiation as output, if one exists. + * + * Reduces to matching using [[matchFormulaRecursive]]. + */ + private def canRewrite(using context: RewriteContext)(first: Expr[Prop], second: Expr[Prop], rule: (Expr[Prop], Expr[Prop])): Option[(FormulaSubstitution, TermSubstitution)] = matchFormulaRecursive(formulaRewriteConnector(first, second), formulaRewriteConnector(rule._1, rule._2), FormulaSubstitution.empty, TermSubstitution.empty) /** - * Decides whether a term `first` can be rewritten into another term `second` - * under the given rewrite rules and restrictions. - * - * Calls [[getContextRecursive]] as its actual implementation. - * - * @param first source term - * @param second destination term - * @param freeTermRules rewrite rules with unrestricted instantiations - * @param confinedTermRules rewrite rules with restricted instantiations wrt takenTermVariables - * @param takenTermVariables variables to *not* instantiate, i.e., treat as constant, for confined rules - */ + * Decides whether a term `first` can be rewritten into another term `second` + * under the given rewrite rules and restrictions. + * + * Calls [[getContextRecursive]] as its actual implementation. + * + * @param first source term + * @param second destination term + * @param freeTermRules rewrite rules with unrestricted instantiations + * @param confinedTermRules rewrite rules with restricted instantiations wrt takenTermVariables + * @param takenTermVariables variables to *not* instantiate, i.e., treat as constant, for confined rules + */ def getContextTerm( - first: Term, - second: Term, - freeTermRules: Seq[(Term, Term)], - confinedTermRules: Seq[(Term, Term)] = Seq.empty, + first: Expr[Ind], + second: Expr[Ind], + freeTermRules: Seq[(Expr[Ind], Expr[Ind])], + confinedTermRules: Seq[(Expr[Ind], Expr[Ind])] = Seq.empty, takenTermVariables: Set[Variable] = Set.empty ): Option[TermRewriteLambda] = { val context = RewriteContext( @@ -372,13 +744,13 @@ object UnificationUtils { } /** - * Inner implementation for [[getContextTerm]]. - * - * @param context all information about rewrite rules and allowed instantiations - * @param first source term - * @param second destination term - */ - private def getContextRecursive(using context: RewriteContext)(first: Term, second: Term): Option[TermRewriteLambda] = { + * Inner implementation for [[getContextTerm]]. + * + * @param context all information about rewrite rules and allowed instantiations + * @param first source term + * @param second destination term + */ + private def getContextRecursive(using context: RewriteContext)(first: Expr[Ind], second: Expr[Ind]): Option[TermRewriteLambda] = { // check if there exists a substitution lazy val validSubstitution = context.confinedTermRules @@ -434,33 +806,33 @@ object UnificationUtils { } /** - * Decides whether a formula `first` can be rewritten into another formula - * `second` under the given rewrite rules and restrictions. - * - * Calls [[getContextRecursive]] as its actual implementation. - * - * @param first source formula - * @param second destination formula - * @param freeTermRules term rewrite rules with unrestricted instantiations - * @param freeFormulaRules formula rewrite rules with unrestricted - * instantiations - * @param confinedTermRules term rewrite rules with restricted instantiations - * wrt takenTermVariables - * @param confinedTermRules formula rewrite rules with restricted - * instantiations wrt takenTermVariables - * @param takenTermVariables term variables to *not* instantiate, i.e., treat - * as constant, for confined rules - * @param takenFormulaVariables formula variables to *not* instantiate, i.e., - * treat as constant, for confined rules - */ + * Decides whether a formula `first` can be rewritten into another formula + * `second` under the given rewrite rules and restrictions. + * + * Calls [[getContextRecursive]] as its actual implementation. + * + * @param first source formula + * @param second destination formula + * @param freeTermRules term rewrite rules with unrestricted instantiations + * @param freeFormulaRules formula rewrite rules with unrestricted + * instantiations + * @param confinedTermRules term rewrite rules with restricted instantiations + * wrt takenTermVariables + * @param confinedTermRules formula rewrite rules with restricted + * instantiations wrt takenTermVariables + * @param takenTermVariables term variables to *not* instantiate, i.e., treat + * as constant, for confined rules + * @param takenFormulaVariables formula variables to *not* instantiate, i.e., + * treat as constant, for confined rules + */ def getContextFormula( - first: Formula, - second: Formula, - freeTermRules: Seq[(Term, Term)] = Seq.empty, - freeFormulaRules: Seq[(Formula, Formula)] = Seq.empty, - confinedTermRules: Seq[(Term, Term)] = Seq.empty, + first: Expr[Prop], + second: Expr[Prop], + freeTermRules: Seq[(Expr[Ind], Expr[Ind])] = Seq.empty, + freeFormulaRules: Seq[(Expr[Prop], Expr[Prop])] = Seq.empty, + confinedTermRules: Seq[(Expr[Ind], Expr[Ind])] = Seq.empty, takenTermVariables: Set[Variable] = Set.empty, - confinedFormulaRules: Seq[(Formula, Formula)] = Seq.empty, + confinedFormulaRules: Seq[(Expr[Prop], Expr[Prop])] = Seq.empty, takenFormulaVariables: Set[VariableFormula] = Set.empty ): Option[FormulaRewriteLambda] = { val context = RewriteContext( @@ -475,13 +847,13 @@ object UnificationUtils { } def getContextFormulaSet( - first: Seq[Formula], - second: Seq[Formula], - freeTermRules: Seq[(Term, Term)], - freeFormulaRules: Seq[(Formula, Formula)], - confinedTermRules: Seq[(Term, Term)] = Seq.empty, + first: Seq[Expr[Prop]], + second: Seq[Expr[Prop]], + freeTermRules: Seq[(Expr[Ind], Expr[Ind])], + freeFormulaRules: Seq[(Expr[Prop], Expr[Prop])], + confinedTermRules: Seq[(Expr[Ind], Expr[Ind])] = Seq.empty, takenTermVariables: Set[Variable] = Set.empty, - confinedFormulaRules: Seq[(Formula, Formula)] = Seq.empty, + confinedFormulaRules: Seq[(Expr[Prop], Expr[Prop])] = Seq.empty, takenFormulaVariables: Set[VariableFormula] = Set.empty ): Option[Seq[FormulaRewriteLambda]] = { val context = RewriteContext( @@ -507,17 +879,17 @@ object UnificationUtils { } /** - * Inner implementation for [[getContextFormula]]. - * - * @param context all information about rewrite rules and allowed instantiations - * @param first source formula - * @param second destination formula - */ - private def getContextRecursive(using context: RewriteContext)(first: Formula, second: Formula): Option[FormulaRewriteLambda] = { + * Inner implementation for [[getContextFormula]]. + * + * @param context all information about rewrite rules and allowed instantiations + * @param first source formula + * @param second destination formula + */ + private def getContextRecursive(using context: RewriteContext)(first: Expr[Prop], second: Expr[Prop]): Option[FormulaRewriteLambda] = { // check if there exists a substitution lazy val validSubstitution = context.confinedFormulaRules - .getFirst { (l: Formula, r: Formula) => + .getFirst { (l: Expr[Prop], r: Expr[Prop]) => val subst = canRewrite(using context)(first, second, (l, r)) subst.map(s => ((l, r), s)) } @@ -615,5 +987,5 @@ object UnificationUtils { } } } - -} + */ +// } diff --git a/lisa-utils/src/test/scala/lisa/ProofCheckerSuite.scala b/lisa-utils/src/test/scala/lisa/ProofCheckerSuite.scala index 5b14743b4..ff4ac8a27 100644 --- a/lisa-utils/src/test/scala/lisa/ProofCheckerSuite.scala +++ b/lisa-utils/src/test/scala/lisa/ProofCheckerSuite.scala @@ -6,56 +6,43 @@ import lisa.kernel.proof.SCProofCheckerJudgement import lisa.kernel.proof.SCProofCheckerJudgement.SCInvalidProof import lisa.kernel.proof.SequentCalculus.Sequent import lisa.kernel.proof.SequentCalculus.isSameSequent -import lisa.utils.FOLPrinter import lisa.utils.KernelHelpers.{_, given} -import lisa.utils.Printer import org.scalatest.funsuite.AnyFunSuite import scala.language.adhocExtensions -abstract class ProofCheckerSuite(printer: Printer = FOLPrinter) extends AnyFunSuite { +abstract class ProofCheckerSuite extends AnyFunSuite { import lisa.kernel.fol.FOL.* - protected val (xl, yl, zl, wl, xpl, ypl, zpl, wpl) = ( - VariableLabel("x"), - VariableLabel("y"), - VariableLabel("z"), - VariableLabel("w"), - VariableLabel("x1"), - VariableLabel("y1"), - VariableLabel("z1"), - VariableLabel("w1") - ) protected val (x, y, z, w, xp, yp, zp, wp) = ( - VariableTerm(xl), - VariableTerm(yl), - VariableTerm(zl), - VariableTerm(wl), - VariableTerm(xpl), - VariableTerm(ypl), - VariableTerm(zpl), - VariableTerm(wpl) + Variable("x", Ind), + Variable("y", Ind), + Variable("z", Ind), + Variable("w", Ind), + Variable("x1", Ind), + Variable("y1", Ind), + Variable("z1", Ind), + Variable("w1", Ind) ) - protected val (sl, tl, ul, vl) = (VariableLabel("s"), VariableLabel("t"), VariableLabel("u"), VariableLabel("v")) - protected val (s, t, u, v) = (VariableTerm(sl), VariableTerm(tl), VariableTerm(ul), VariableTerm(vl)) + protected val (s, t, u, v) = (Variable("s", Ind), Variable("t", Ind), Variable("u", Ind), Variable("v", Ind)) def checkProof(proof: SCProof): Unit = { val judgement = checkSCProof(proof) - assert(judgement.isValid, printer.prettySCProof(judgement, true)) + assert(judgement.isValid, prettySCProof(judgement, true)) } def checkProof(proof: SCProof, expected: Sequent): Unit = { val judgement = checkSCProof(proof) - assert(judgement.isValid, "\n" + printer.prettySCProof(judgement)) - assert(isSameSequent(proof.conclusion, expected), s"(${printer.prettySequent(proof.conclusion)} did not equal ${printer.prettySequent(expected)})") + assert(judgement.isValid, "\n" + prettySCProof(judgement)) + assert(isSameSequent(proof.conclusion, expected), s"(${proof.conclusion.repr} did not equal ${expected.repr})") } - def checkIncorrectProof(incorrectProof: SCProof): Unit = { + inline def checkIncorrectProof(incorrectProof: SCProof): Unit = { assert( !checkSCProof(incorrectProof).isValid, - s"(incorrect proof with conclusion '${printer.prettySequent(incorrectProof.conclusion)}' was accepted by the proof checker)\nSequent: ${incorrectProof.conclusion}" + s"(incorrect proof with conclusion '${incorrectProof.conclusion.repr}' was accepted by the proof checker)\nSequent: ${incorrectProof.conclusion}" ) } } diff --git a/lisa-utils/src/test/scala/lisa/ProofTacticTestLib.scala b/lisa-utils/src/test/scala/lisa/ProofTacticTestLib.scala index ebecd9b39..2cec12523 100644 --- a/lisa-utils/src/test/scala/lisa/ProofTacticTestLib.scala +++ b/lisa-utils/src/test/scala/lisa/ProofTacticTestLib.scala @@ -1,20 +1,21 @@ package lisa.test import lisa.kernel.proof.SequentCalculus as SC -import lisa.prooflib.BasicMain -import lisa.prooflib.BasicStepTactic.* -import lisa.prooflib.Library -import lisa.prooflib.ProofTacticLib +import lisa.utils.prooflib.BasicMain +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.ProofTacticLib import org.scalatest.funsuite.AnyFunSuite import scala.collection.immutable.LazyList +import leo.datastructures.TPTP.FOF.Term trait ProofTacticTestLib extends AnyFunSuite with BasicMain { export lisa.test.TestTheoryLibrary.{_, given} - private val x: lisa.fol.FOL.Variable = variable - private val P = predicate[1] + private val x = variable[Ind] + private val P = variable[Ind >>: Prop] // generate a placeholde theorem to take ownership of proofs for test val placeholderTheorem: THMFromProof = Theorem(P(x) |- P(x)) { have(P(x) |- P(x)) by Hypothesis }.asInstanceOf diff --git a/lisa-utils/src/test/scala/lisa/TestTheoryAxioms.scala b/lisa-utils/src/test/scala/lisa/TestTheoryAxioms.scala index aeaa403d3..02765c155 100644 --- a/lisa-utils/src/test/scala/lisa/TestTheoryAxioms.scala +++ b/lisa-utils/src/test/scala/lisa/TestTheoryAxioms.scala @@ -5,11 +5,11 @@ import lisa.kernel.proof.RunningTheory import lisa.utils.KernelHelpers.{_, given} trait TestTheoryAxioms { - final val p1 = ConstantAtomicLabel("p1", 1) - final val p2 = ConstantAtomicLabel("p2", 1) - final val f1 = ConstantFunctionLabel("f1", 1) - final val fixedElement = ConstantFunctionLabel("fixedElement", 0) - final val anotherFixed = ConstantFunctionLabel("anotherElement", 0) + final val p1 = Constant("p1", Arrow(Ind, Prop)) + final val p2 = Constant("p2", Arrow(Ind, Prop)) + final val f1 = Constant("f1", Arrow(Ind, Ind)) + final val fixedElement = Constant("fixedElement", Ind) + final val anotherFixed = Constant("anotherElement", Ind) val runningTestTheory = new RunningTheory() runningTestTheory.addSymbol(p1) @@ -18,11 +18,11 @@ trait TestTheoryAxioms { runningTestTheory.addSymbol(fixedElement) runningTestTheory.addSymbol(anotherFixed) - private final val x = VariableLabel("x") - final val p1_implies_p2_f: Formula = forall(x, p1(x) ==> p2(x)) - final val ax2_f = p1(fixedElement()) - final val same_fixed_f = fixedElement() === anotherFixed() - final val fixed_point_f = forall(x, (f1(x) === fixedElement()) <=> (x === fixedElement())) + private final val x = Variable("x", Ind) + final val p1_implies_p2_f = forall(x, p1(x) ==> p2(x)) + final val ax2_f = p1(fixedElement) + final val same_fixed_f = fixedElement === anotherFixed + final val fixed_point_f = forall(x, (f1(x) === fixedElement) <=> (x === fixedElement)) val p1_implies_p2 = runningTestTheory.addAxiom("p1_implies_p2", p1_implies_p2_f).get val A2 = runningTestTheory.addAxiom("A2", ax2_f).get diff --git a/lisa-utils/src/test/scala/lisa/TestTheoryLibrary.scala b/lisa-utils/src/test/scala/lisa/TestTheoryLibrary.scala index 87b0cf73c..48c23721c 100644 --- a/lisa-utils/src/test/scala/lisa/TestTheoryLibrary.scala +++ b/lisa-utils/src/test/scala/lisa/TestTheoryLibrary.scala @@ -1,32 +1,35 @@ package lisa.test -import lisa.prooflib.Library +import lisa.utils.prooflib.Library object TestTheoryLibrary extends Library { val theory: TestTheory.runningTestTheory.type = TestTheory.runningTestTheory - export lisa.fol.FOL.{*, given} - - final val p1 = ConstantPredicateLabel("p1", 1) - final val p2 = ConstantPredicateLabel("p2", 1) - final val f1 = ConstantFunctionLabel("f1", 1) - final val fixedElement = Constant("fixedElement") - final val anotherFixed = Constant("anotherElement") + export lisa.utils.fol.FOL.{*, given} + final val p1 = constant[Ind >>: Prop] + final val p2 = constant[Ind >>: Prop] + final val f1 = constant[Ind >>: Ind] + final val fixedElement = constant[Ind] + final val anotherElement = constant[Ind] addSymbol(p1) addSymbol(p2) addSymbol(f1) addSymbol(fixedElement) - addSymbol(anotherFixed) + addSymbol(anotherElement) - private final val x = Variable("x") - final val p1_implies_p2_f: Formula = forall(x, p1(x) ==> p2(x)) + private final val x = variable[Ind] + final val p1_implies_p2_f: Expr[Prop] = forall(x, p1(x) ==> p2(x)) final val ax2 = p1(fixedElement) - final val same_fixed_f = fixedElement === anotherFixed + final val same_fixed_f = fixedElement === anotherElement final val fixed_point_f = forall(x, (f1(x) === fixedElement) <=> (x === fixedElement)) val p1_implies_p2 = AXIOM(TestTheory.p1_implies_p2, p1_implies_p2_f, "p1_implies_p2") val A2 = AXIOM(TestTheory.A2, ax2, "A2") + println(s"TestTheory.same_fixed: ${TestTheory.same_fixed}") + println(s"same_fixed_f : ${same_fixed_f}") + println(s"same_fixed_f.underlying : ${same_fixed_f.underlying}") + println(s"TestTheory.same_fixed.ax : ${TestTheory.same_fixed.ax}") val same_fixed = AXIOM(TestTheory.same_fixed, same_fixed_f, "same_fixed") val fixed_point = AXIOM(TestTheory.fixed_point, fixed_point_f, "fixed_point") diff --git a/lisa-utils/src/test/scala/lisa/kernel/EquivalenceCheckerTests.scala b/lisa-utils/src/test/scala/lisa/kernel/EquivalenceCheckerTests.scala index 81a39258f..3b6997284 100644 --- a/lisa-utils/src/test/scala/lisa/kernel/EquivalenceCheckerTests.scala +++ b/lisa-utils/src/test/scala/lisa/kernel/EquivalenceCheckerTests.scala @@ -2,7 +2,6 @@ package lisa.kernel import lisa.kernel.fol.FOL import lisa.kernel.fol.FOL.* -import lisa.utils.FOLPrinter import lisa.utils.KernelHelpers._ import lisa.utils.KernelHelpers.given_Conversion_Identifier_String import lisa.utils.KernelHelpers.given_Conversion_String_Identifier @@ -17,17 +16,17 @@ import scala.util.Random class EquivalenceCheckerTests extends AnyFunSuite { private val verbose = false // Turn this on to print all tested couples - def checkEquivalence(left: Formula, right: Formula): Unit = { + def checkEquivalence(left: Expression, right: Expression): Unit = { assert( isSame(left, right), - s"Couldn't prove the equivalence between ${FOLPrinter.prettyFormula(left)} and ${FOLPrinter.prettyFormula(right)}\nLeft tree: ${left}\nRight tree: ${right}" + s"Couldn't prove the equivalence between ${left.repr} and ${right.repr}\nLeft tree: ${left}\nRight tree: ${right}" ) } - def checkNonEquivalence(left: Formula, right: Formula): Unit = { + def checkNonEquivalence(left: Expression, right: Expression): Unit = { assert( !isSame(left, right), - s"Expected the checker to not be able to show equivalence between ${FOLPrinter.prettyFormula(left)} and ${FOLPrinter.prettyFormula(right)}\nLeft tree: ${left}\nRight tree: ${right}" + s"Expected the checker to not be able to show equivalence between ${left.repr} and ${right.repr}\nLeft tree: ${left}\nRight tree: ${right}" ) } @@ -41,6 +40,7 @@ class EquivalenceCheckerTests extends AnyFunSuite { id } } + def numbersGenerator(): () => Int = { var i = 1 () => { @@ -50,15 +50,15 @@ class EquivalenceCheckerTests extends AnyFunSuite { } } - def constantsGenerator(): () => Formula = { + def constantsGenerator(): () => Expression = { val generator = nameGenerator() () => { val id = generator() - AtomicFormula(ConstantAtomicLabel(id, 0), Seq.empty) + Constant(id, Prop) } } - def formulasGenerator(c: Double)(random: Random): () => Formula = { + def ExpressionsGenerator(c: Double)(random: Random): () => Expression = { val connectors = ArrayBuffer.empty[String] val variables = ArrayBuffer.empty[String] val nextConnectorName = nameGenerator() @@ -66,7 +66,7 @@ class EquivalenceCheckerTests extends AnyFunSuite { val gen = numbersGenerator() () => s"v${gen()}" } - def generate(p: Double): Formula = { + def generate(p: Double): Expression = { val q = random.nextDouble() if (q >= p) { @@ -81,7 +81,7 @@ class EquivalenceCheckerTests extends AnyFunSuite { // Reuse existing name connectors(random.nextInt(connectors.size)) } - AtomicFormula(ConstantAtomicLabel(name, 0), Seq.empty) + Constant(name, Prop) } else { // Branch val nextP = p * c @@ -105,9 +105,9 @@ class EquivalenceCheckerTests extends AnyFunSuite { // Binder val name = nextVariableName() variables += name - val binderTypes: IndexedSeq[BinderLabel] = IndexedSeq(Forall, Exists, ExistsOne) + val binderTypes = IndexedSeq(forall, exists) val binderType = binderTypes(random.nextInt(binderTypes.size)) - BinderFormula(binderType, VariableLabel(name), generate(nextP)) + binderType(lambda(Variable(name, Ind), generate(nextP))) } } } @@ -115,10 +115,10 @@ class EquivalenceCheckerTests extends AnyFunSuite { () => generate(c) } - def testcasesAny(generatorToTestcases: (() => Formula) => Random => Seq[(Formula, Formula)], equivalent: Boolean): Unit = { + def testcasesAny(generatorToTestcases: (() => Expression) => Random => Seq[(Expression, Expression)], equivalent: Boolean): Unit = { val random: Random = new Random(1) - def testWith(generator: () => () => Formula): Unit = { + def testWith(generator: () => () => Expression): Unit = { val cases = generatorToTestcases(generator())(random) cases.foreach { (left, right) => // For completeness we also test symmetry @@ -126,19 +126,19 @@ class EquivalenceCheckerTests extends AnyFunSuite { checkEquivalence(left, right) checkEquivalence(right, left) if (verbose) { - println(s"${FOLPrinter.prettyFormula(left)} <==> ${FOLPrinter.prettyFormula(right)}") + println(s"${left.repr} <==> ${right.repr}") } } else { checkNonEquivalence(left, right) checkNonEquivalence(right, left) if (verbose) { - println(s"${FOLPrinter.prettyFormula(left)} ${FOLPrinter.prettyFormula(right)}") + println(s"${left.repr} ${right.repr}") } } } } - def testWithRepeat(generator: () => () => Formula, n: Int): Unit = { + def testWithRepeat(generator: () => () => Expression, n: Int): Unit = { for (i <- 0 until n) { testWith(generator) } @@ -148,80 +148,64 @@ class EquivalenceCheckerTests extends AnyFunSuite { testWith(constantsGenerator) - // 2. Random formulas (small) + // 2. Random Expressions (small) - testWithRepeat(() => formulasGenerator(0.8)(random), 5) + testWithRepeat(() => ExpressionsGenerator(0.8)(random), 5) - // 3. Random formulas (larger) + // 3. Random Expressions (larger) - testWithRepeat(() => formulasGenerator(0.90)(random), 15) + testWithRepeat(() => ExpressionsGenerator(0.90)(random), 15) } - def testcases(f: Formula => Random => Seq[(Formula, Formula)], equivalent: Boolean): Unit = + def testcases(f: Expression => Random => Seq[(Expression, Expression)], equivalent: Boolean): Unit = testcasesAny(generator => r => f(generator())(r), equivalent) - def testcases(f: (Formula, Formula) => Random => Seq[(Formula, Formula)], equivalent: Boolean): Unit = + def testcases(f: (Expression, Expression) => Random => Seq[(Expression, Expression)], equivalent: Boolean): Unit = testcasesAny(generator => r => f(generator(), generator())(r), equivalent) - def testcases(f: (Formula, Formula, Formula) => Random => Seq[(Formula, Formula)], equivalent: Boolean): Unit = + def testcases(f: (Expression, Expression, Expression) => Random => Seq[(Expression, Expression)], equivalent: Boolean): Unit = testcasesAny(generator => r => f(generator(), generator(), generator())(r), equivalent) - def testcases(f: (Formula, Formula, Formula, Formula) => Random => Seq[(Formula, Formula)], equivalent: Boolean): Unit = + def testcases(f: (Expression, Expression, Expression, Expression) => Random => Seq[(Expression, Expression)], equivalent: Boolean): Unit = testcasesAny(generator => r => f(generator(), generator(), generator(), generator())(r), equivalent) def repeatApply[T](n: Int)(f: T => T)(initial: T): T = if (n > 0) repeatApply(n - 1)(f)(f(initial)) else initial - def commutativeShuffle(iterations: Int)(random: Random)(f: Formula): Formula = { - def transform(f: Formula): Formula = f match { - case AtomicFormula(label, args) => f - case ConnectorFormula(label, args) => - val newArgs = label match { - case And | Or | Iff => random.shuffle(args) - case _ => args - } - ConnectorFormula(label, newArgs.map(transform)) - case BinderFormula(label, bound, inner) => BinderFormula(label, bound, transform(inner)) + def commutativeShuffle(iterations: Int)(random: Random)(f: Expression): Expression = { + def transform(f: Expression): Expression = f match { + case And(a, b) => if random.nextBoolean() then and(transform(a), transform(b)) else and(transform(b), transform(a)) + case Or(a, b) => if random.nextBoolean() then or(transform(a), transform(b)) else or(transform(b), transform(a)) + case Iff(a, b) => if random.nextBoolean() then iff(transform(a), transform(b)) else iff(transform(b), transform(a)) + case Application(f, arg) => Application(transform(f), transform(arg)) + case Lambda(v, body) => Lambda(v, transform(body)) + case _ => f } repeatApply(iterations)(transform)(f) } - def associativeShuffle(iterations: Int)(random: Random)(f: Formula): Formula = { - def transform(f: Formula): Formula = f match { - case AtomicFormula(label, args) => f - // Simple for now, assume binary operations - case ConnectorFormula(label1 @ (And | Or), Seq(ConnectorFormula(label2, Seq(a1, a2)), a3)) if label1 == label2 => - if (random.nextBoolean()) { - ConnectorFormula(label1, Seq(a1, ConnectorFormula(label2, Seq(a2, a3)))) - } else { - f - } - case ConnectorFormula(label1 @ (And | Or), Seq(a1, ConnectorFormula(label2, Seq(a2, a3)))) if label1 == label2 => - if (random.nextBoolean()) { - ConnectorFormula(label1, Seq(ConnectorFormula(label2, Seq(a1, a2)), a3)) - } else { - f - } - case ConnectorFormula(label, args) => ConnectorFormula(label, args.map(transform)) - case BinderFormula(label, bound, inner) => BinderFormula(label, bound, transform(inner)) + def associativeShuffle(iterations: Int)(random: Random)(f: Expression): Expression = { + def transform(f: Expression): Expression = f match { + case And(And(a, b), c) => if (random.nextBoolean()) and(transform(a), and(transform(b), transform(c))) else and(and(transform(a), transform(b)), transform(c)) + case Or(Or(a, b), c) => if (random.nextBoolean()) or(transform(a), or(transform(b), transform(c))) else or(or(transform(a), transform(b)), transform(c)) + case Application(f, arg) => Application(transform(f), transform(arg)) + case Lambda(v, body) => Lambda(v, transform(body)) + case _ => f } repeatApply(iterations)(transform)(f) } - def addDoubleNegations(p: Double)(random: Random)(f: Formula): Formula = { - def transform(f: Formula): Formula = - if (random.nextDouble() < p) neg(neg(transform(f))) + def addDoubleNegations(p: Double)(random: Random)(f: Expression): Expression = { + def transform(f: Expression): Expression = + if (random.nextDouble() < p && f.sort == Prop) neg(neg(transform(f))) else f match { - case _: AtomicFormula => f - case ConnectorFormula(label, args) => ConnectorFormula(label, args.map(transform)) - case BinderFormula(label, bound, inner) => BinderFormula(label, bound, transform(inner)) + case Application(f, arg) => Application(transform(f), transform(arg)) + case Lambda(v, body) => Lambda(v, transform(body)) + case _ => f } transform(f) } - def addDeMorgans(p: Double)(random: Random)(f: Formula): Formula = { - def transform(f: Formula): Formula = f match { - case _: AtomicFormula => f - case ConnectorFormula(label, args) => - val map: Map[ConnectorLabel, ConnectorLabel] = Map(And -> Or, Or -> And) - map.get(label) match { - case Some(opposite) if random.nextDouble() < p => transform(neg(ConnectorFormula(opposite, args.map(neg(_))))) - case _ => ConnectorFormula(label, args.map(transform)) - } - case BinderFormula(label, bound, inner) => BinderFormula(label, bound, transform(inner)) + def addDeMorgans(p: Double)(random: Random)(f: Expression): Expression = { + def transform(f: Expression): Expression = f match { + case And(a, b) => if random.nextBoolean() then !or(!transform(a), !transform(b)) else and(transform(b), transform(a)) + case Or(a, b) => if random.nextBoolean() then !and(!transform(a), !transform(b)) else or(transform(b), transform(a)) + case Application(f, arg) => Application(transform(f), transform(arg)) + case Lambda(v, body) => Lambda(v, transform(body)) + case _ => f } transform(f) } @@ -364,13 +348,13 @@ class EquivalenceCheckerTests extends AnyFunSuite { } test("All allowed transformations") { - val transformations: Seq[Random => Formula => Formula] = IndexedSeq( + val transformations: Seq[Random => Expression => Expression] = IndexedSeq( r => commutativeShuffle(1)(r), r => associativeShuffle(1)(r), r => addDoubleNegations(0.02)(r), r => addDeMorgans(0.05)(r) ) - def randomTransformations(random: Random)(f: Formula): Formula = { + def randomTransformations(random: Random)(f: Expression): Expression = { val n = random.nextInt(50) Seq.fill(n)(transformations(random.nextInt(transformations.size))).foldLeft(f)((acc, e) => e(random)(acc)) } diff --git a/lisa-utils/src/test/scala/lisa/kernel/FolTests.scala b/lisa-utils/src/test/scala/lisa/kernel/FolTests.scala deleted file mode 100644 index bc769dd8c..000000000 --- a/lisa-utils/src/test/scala/lisa/kernel/FolTests.scala +++ /dev/null @@ -1,119 +0,0 @@ -package lisa.kernel - -import lisa.kernel.fol.FOL.* -import lisa.kernel.proof.RunningTheory -import lisa.kernel.proof.RunningTheory.* -import lisa.kernel.proof.SCProof -import lisa.kernel.proof.SCProofChecker -import lisa.kernel.proof.SequentCalculus.* -import lisa.utils.KernelHelpers.{_, given} -import lisa.utils.Printer -import org.scalatest.funsuite.AnyFunSuite - -import scala.collection.immutable.SortedSet -import scala.language.adhocExtensions -import scala.util.Random - -class FolTests extends AnyFunSuite { - - val predicateVerifier = SCProofChecker.checkSCProof - - def nameGenerator(candidates: Seq[String], gen: Random = new Random(), l: Int = 1): String = { - if (gen.nextBoolean()) gen.nextString(1) - else candidates(gen.between(0, candidates.length)) - } - - def termGenerator(maxDepth: Int, gen: Random = new Random()): Term = { - if (maxDepth <= 1) { - val r = gen.between(0, 3) - if (r == 0) { - val name = "" + ('a' to 'e')(gen.between(0, 5)) - Term(ConstantFunctionLabel(name, 0), List()) - } else { - val name = "" + ('v' to 'z')(gen.between(0, 5)) - VariableTerm(VariableLabel(name)) - } - } else { - val r = gen.between(0, 8) - val name = "" + ('f' to 'j')(gen.between(0, 5)) - if (r == 0) { - val name = "" + ('a' to 'e')(gen.between(0, 5)) - Term(ConstantFunctionLabel(name, 0), List()) - } else if (r == 1) { - val name = "" + ('v' to 'z')(gen.between(0, 5)) - VariableTerm(VariableLabel(name)) - } - if (r <= 3) Term(ConstantFunctionLabel(name, 1), Seq(termGenerator(maxDepth - 1, gen))) - else if (r <= 5) Term(ConstantFunctionLabel(name, 2), Seq(termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen))) - else if (r == 6) Term(ConstantFunctionLabel(name, 3), Seq(termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen))) - else - Term( - ConstantFunctionLabel(name, 4), - Seq(termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen)) - ) - - } - } - - def formulaGenerator(maxDepth: Int, gen: Random = new Random()): Formula = { - if (maxDepth <= 2 || (gen.nextBoolean() && gen.nextBoolean())) { - val r = gen.between(0, 7) - if (r <= 1) { - val name = "" + ('A' to 'E')(gen.between(0, 5)) - AtomicFormula(ConstantAtomicLabel(name, 0), Seq()) - } else if (r <= 3) { - val name = "" + ('A' to 'E')(gen.between(0, 5)) - AtomicFormula(ConstantAtomicLabel(name, 1), Seq(termGenerator(maxDepth - 1, gen))) - } else if (r <= 5) { - val s = gen.between(0, 3) - if (s == 0) AtomicFormula(equality, Seq(termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen))) - else { - val name = "" + ('A' to 'E')(gen.between(0, 5)) - AtomicFormula(ConstantAtomicLabel(name, 2), Seq(termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen))) - } - } else { - val name = "" + ('A' to 'E')(gen.between(0, 5)) - AtomicFormula(ConstantAtomicLabel(name, 3), Seq(termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen), termGenerator(maxDepth - 1, gen))) - } - - } else { - val r = gen.between(0, 7) - if (r <= 1) neg(formulaGenerator(maxDepth - 1, gen)) - else if (r == 2) and(formulaGenerator(maxDepth - 1, gen), formulaGenerator(maxDepth - 1, gen)) - else if (r == 3) or(formulaGenerator(maxDepth - 1, gen), formulaGenerator(maxDepth - 1, gen)) - else if (r == 4) implies(formulaGenerator(maxDepth - 1, gen), formulaGenerator(maxDepth - 1, gen)) - else if (r == 5) { - val f = formulaGenerator(maxDepth - 1, gen) - val l = f.freeVariables.toSeq ++ Seq(x) - forall(l(gen.between(0, l.size)), f) - } else { - val f = formulaGenerator(maxDepth - 1, gen) - val l = f.freeVariables.toSeq ++ Seq(x) - exists(l(gen.between(0, l.size)), f) - } - } - - } - - private val x = VariableLabel("x") - private val y = VariableLabel("y") - private val z = VariableLabel("z") - private val a = AtomicFormula(ConstantAtomicLabel("A", 0), Seq()) - private val b = AtomicFormula(ConstantAtomicLabel("B", 0), Seq()) - private val fp = ConstantAtomicLabel("F", 1) - private val sT = VariableLabel("t") - - def test_some_random_formulas(n: Int, maxDepth: Int): Unit = { - (0 to n).foreach(_ => println(formulaGenerator(maxDepth))) - } - - test("Random formulas well-constructed") { - (0 to 50).foreach(_ => formulaGenerator(10)) - } - - test("Fresh variables should be fresh") { - val y1 = VariableLabel(lisa.kernel.fol.FOL.freshId(equality(VariableTerm(x), VariableTerm(x)).freeVariables.map(_.name), x.name)) - - assert(!(x.id == y1.id)) - } -} diff --git a/lisa-utils/src/test/scala/lisa/kernel/IncorrectProofsTests.scala b/lisa-utils/src/test/scala/lisa/kernel/IncorrectProofsTests.scala index 8a9ba63a1..26edde8e7 100644 --- a/lisa-utils/src/test/scala/lisa/kernel/IncorrectProofsTests.scala +++ b/lisa-utils/src/test/scala/lisa/kernel/IncorrectProofsTests.scala @@ -19,10 +19,7 @@ class IncorrectProofsTests extends ProofCheckerSuite { // Shorthand implicit def proofStepToProof(proofStep: SCProofStep): SCProof = SCProof(proofStep) - val (fl, gl, hl) = (VariableFormulaLabel("f"), VariableFormulaLabel("g"), VariableFormulaLabel("h")) - val f = AtomicFormula(fl, Seq.empty) // Some arbitrary formulas - val g = AtomicFormula(gl, Seq.empty) - val h = AtomicFormula(hl, Seq.empty) + val (f, g, h) = (Variable("f", Prop), Variable("g", Prop), Variable("h", Prop)) val incorrectProofs: Seq[SCProof] = List( SCProof( @@ -40,23 +37,23 @@ class IncorrectProofsTests extends ProofCheckerSuite { SCProof( Hypothesis(emptySeq +<< (x === y) +>> (x === y), x === y), - RightSubstEq(emptySeq +<< (x === y) +<< (x === z) +>> (z === y), 0, List(((LambdaTermTerm(Seq(), x), LambdaTermTerm(Seq(), z)))), (Seq(yl), x === y)) // wrong variable replaced + RightSubstEq(emptySeq +<< (x === y) +<< (x === z) +>> (z === y), 0, Seq((x, z)),(Seq(y), x === y)) // wrong variable replaced ), SCProof( Hypothesis(emptySeq +<< (x === y) +>> (x === y), x === y), - RightSubstEq(emptySeq +<< (x === y) +>> (z === y), 0, List(((LambdaTermTerm(Seq(), x), LambdaTermTerm(Seq(), z)))), (Seq(xl), x === y)) // missing hypothesis + RightSubstEq(emptySeq +<< (x === y) +>> (z === y), 0, Seq((x, z)), (Seq(x), x === y)) // missing hypothesis ), SCProof( Hypothesis(emptySeq +<< (x === y) +>> (x === y), x === y), - RightSubstEq(emptySeq +<< (x === y) +<< (x === z) +>> (z === y), 0, List(((LambdaTermTerm(Seq(), x), LambdaTermTerm(Seq(), z)))), (Seq(xl), x === z)) // replacement mismatch + RightSubstEq(emptySeq +<< (x === y) +<< (x === z) +>> (z === y), 0, Seq((x, z)), (Seq(x), x === z)) // replacement mismatch ), SCProof( Hypothesis(emptySeq +<< (x === y) +>> (x === y), x === y), - LeftSubstEq(emptySeq +<< (z === y) +<< (x === z) +>> (x === y), 0, List(((LambdaTermTerm(Seq(), x), LambdaTermTerm(Seq(), z)))), (Seq(yl), x === y)) + LeftSubstEq(emptySeq +<< (z === y) +<< (x === z) +>> (x === y), 0, Seq((x, z)), (Seq(y), x === y)) ), SCProof( Hypothesis(emptySeq +<< (f <=> g) +>> (f <=> g), f <=> g), - LeftSubstIff(emptySeq +<< (h <=> g) +<< (f <=> h) +>> (f <=> g), 0, List(((LambdaTermFormula(Seq(), f), LambdaTermFormula(Seq(), h)))), (Seq(gl), f <=> g)) + LeftSubstIff(emptySeq +<< (h <=> g) +<< (f <=> h) +>> (f <=> g), 0, Seq((f, h)), (Seq(g), f <=> g)) ), SCProof( Hypothesis(emptySeq +<< f +>> f, f), diff --git a/lisa-utils/src/test/scala/lisa/kernel/ProofTests.scala b/lisa-utils/src/test/scala/lisa/kernel/ProofTests.scala index f86a18137..11b350e6c 100644 --- a/lisa-utils/src/test/scala/lisa/kernel/ProofTests.scala +++ b/lisa-utils/src/test/scala/lisa/kernel/ProofTests.scala @@ -8,7 +8,6 @@ import lisa.kernel.proof.SCProofChecker import lisa.kernel.proof.SCProofChecker.checkSCProof import lisa.kernel.proof.SequentCalculus.* import lisa.utils.KernelHelpers.{_, given} -import lisa.utils.Printer import org.scalatest.funsuite.AnyFunSuite import scala.language.adhocExtensions @@ -16,23 +15,23 @@ import scala.util.Random class ProofTests extends AnyFunSuite { - private val x = VariableLabel("x") - private val y = VariableLabel("y") - private val z = VariableLabel("z") - val f = SchematicFunctionLabel("f", 1) - val f2 = SchematicFunctionLabel("f2", 1) - val g = ConstantFunctionLabel("g", 2) - val g2 = SchematicFunctionLabel("g2", 2) - private val a = AtomicFormula(ConstantAtomicLabel("A", 0), Seq()) - private val b = AtomicFormula(ConstantAtomicLabel("B", 0), Seq()) - private val fp = ConstantAtomicLabel("F", 1) - val sT = VariableLabel("t") + private val x = variable + private val y = variable + private val z = variable + val f = function(1) + val f2 = function(1) + val g = cst("g", Ind >>: Ind >>: Ind) + val g2 = function(2) + private val a = cst("A", Prop) + private val b = cst("A", Prop) + private val fp = cst("F", Ind >>: Prop) + val sT = variable("t") - val X = VariableFormulaLabel("X") - val P = SchematicPredicateLabel("f", 1) - val P2 = SchematicPredicateLabel("f2", 1) - val Q = ConstantAtomicLabel("g", 2) - val Q2 = SchematicPredicateLabel("g2", 2) + val X = formulaVariable("X") + val P = predicate(1) + val P2 = predicate(1) + val Q = cst(Ind >>: Ind >>: Prop) + val Q2 = predicate(2) test("Verification of Pierce law") { val s0 = Hypothesis(a |- a, a) @@ -47,203 +46,259 @@ class ProofTests extends AnyFunSuite { test("Verification of LeftSubstEq") { { val t0 = Hypothesis(fp(x) |- fp(x), fp(x)) - val t1 = LeftSubstEq(Set(fp(y), x === y) |- fp(x), 0, List(((LambdaTermTerm(Seq(), x), LambdaTermTerm(Seq(), y)))), (Seq(sT), fp(sT))) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val t1 = Hypothesis(x === y |- x === y, x === y) + val t2 = LeftSubstEq(Set(fp(y), x === y) |- fp(x), 0, Seq((x, y)), (Seq(sT), fp(sT))) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, fp(f(x))) |- exists(x, fp(f(x))), exists(x, fp(f(x)))) val t1 = LeftSubstEq( - Set(exists(x, fp(g(x, x))), forall(y, f(y) === g(y, y))) |- exists(x, fp(f(x))), + Set(exists(x, fp(lambda(x, g(x, x))(x))), forall(y, f(y) === lambda(x, g(x, x))(y))) |- exists(x, fp(f(x))), 0, - List((LambdaTermTerm(Seq(x), f(x)), LambdaTermTerm(Seq(x), g(x, x)))), + Seq((f, lambda(x, g(x, x)))), (Seq(f2), exists(x, fp(f2(x)))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val t2 = Beta( + Set(exists(x, fp(g(x, x))), forall(y, f(y) === g(y, y))) |- exists(x, fp(f(x))), + 1 + ) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, fp(f(x))) |- exists(x, fp(f(x))), exists(x, fp(f(x)))) val t1 = LeftSubstEq( - Set(exists(x, fp(g(x, x))), forall(y, f(y) === g(y, y))) |- exists(x, fp(f(x))), + Set(exists(x, fp(lambda(x, g(x, x))(x))), forall(y, f(y) === lambda(x, g(x, x))(y))) |- exists(x, fp(f(x))), 0, - List((LambdaTermTerm(Seq(y), f(y)), LambdaTermTerm(Seq(z), g(z, z)))), + Seq((f, lambda(z, g(z, z)))), (Seq(f2), exists(x, fp(f2(x)))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val t2 = Beta( + Set(exists(x, fp(g(x, x))), forall(y, f(y) === g(y, y))) |- exists(x, fp(f(x))), + 1 + ) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, forall(y, fp(g(y, g(x, z))))) |- exists(x, forall(y, fp(g(y, g(x, z))))), exists(x, forall(y, fp(g(y, g(x, z)))))) val t1 = LeftSubstEq( - Set(exists(x, forall(y, fp(g(g(x, z), y)))), forall(y, forall(z, g(y, z) === g(z, y)))) |- exists(x, forall(y, fp(g(y, g(x, z))))), + Set(exists(x, forall(y, fp(lambda(Seq(y, z), g(z, y))(y, g(x, z))))), forall(y, forall(z, g(y, z) === lambda(Seq(y, z), g(z, y))(y, z)))) |- exists(x, forall(y, fp(g(y, g(x, z))))), 0, - List((LambdaTermTerm(Seq(y, z), g(y, z)), LambdaTermTerm(Seq(y, z), g(z, y)))), + Seq((g, lambda(Seq(y, z), g(z, y)))), (Seq(g2), exists(x, forall(y, fp(g2(y, g(x, z)))))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, forall(y, fp(g(y, g(x, z))))) |- exists(x, forall(y, fp(g(y, g(x, z))))), exists(x, forall(y, fp(g(y, g(x, z)))))) val t1 = LeftSubstEq( - Set(exists(x, forall(y, fp(g(g(z, x), y)))), forall(y, forall(z, g(y, z) === g(z, y)))) |- exists(x, forall(y, fp(g(y, g(x, z))))), + Set( + exists(x, forall(y, fp(lambda(Seq(y, z), g(z, y))(y, lambda(Seq(y, z), g(z, y))(x, z))))), + forall(y, forall(z, g(y, z) === lambda(Seq(y, z), g(z, y))(y, z))) + ) |- exists(x, forall(y, fp(g(y, g(x, z))))), 0, - List((LambdaTermTerm(Seq(y, z), g(y, z)), LambdaTermTerm(Seq(y, z), g(z, y)))), + Seq((g, lambda(Seq(y, z), g(z, y)))), (Seq(g2), exists(x, forall(y, fp(g2(y, g2(x, z)))))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1))) + assert(judg.isValid, "\n" + judg.repr) + } + // + { + val t0 = Hypothesis(P(x) |- P(x), P(x)) + val t1 = Hypothesis(P(x) <=> P(y) |- P(x) <=> P(y), P(x) <=> P(y)) + val t2 = LeftSubstIff(Set(P(y), P(x) <=> P(y)) |- P(x), 0, Seq((P(x), P(y))), (Seq(X), X)) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) + } + { + val t0 = Hypothesis(exists(x, P(x)) |- exists(x, P(x)), exists(x, P(x))) + val t1 = LeftSubstIff( + Set(exists(x, lambda(x, Q(x, x))(x)), forall(y, P(y) <=> lambda(x, Q(x, x))(y))) |- exists(x, P(x)), + 0, + Seq((P, lambda(x, Q(x, x)))), + (Seq(P2), exists(x, P2(x))) + ) + val t2 = Beta( + Set(exists(x, Q(x, x)), forall(y, P(y) <=> Q(y, y))) |- exists(x, P(x)), + 1 + ) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) + } + { + val t0 = Hypothesis(exists(x, P(x)) |- exists(x, P(x)), exists(x, P(x))) + val t1 = LeftSubstIff( + Set(exists(x, lambda(z, Q(z, z))(x)), forall(y, P(y) <=> lambda(x, Q(x, x))(y))) |- exists(x, P(x)), + 0, + Seq((P, lambda(z, Q(z, z)))), + (Seq(P2), exists(x, P2(x))) + ) + val t2 = Beta( + Set(exists(x, Q(x, x)), forall(y, P(y) <=> Q(y, y))) |- exists(x, P(x)), + 1 + ) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) + } + { + val t0 = Hypothesis(exists(x, forall(y, Q(y, g(x, z)))) |- exists(x, forall(y, Q(y, g(x, z)))), exists(x, forall(y, Q(y, g(x, z))))) + val t1 = LeftSubstIff( + Set(exists(x, forall(y, lambda(Seq(y, z), Q(z, y))(y, g(x, z)))), forall(x, forall(y, Q(x, y) <=> lambda(Seq(y, z), Q(z, y))(x, y)))) |- exists(x, forall(y, Q(y, g(x, z)))), + 0, + Seq((Q, lambda(Seq(y, z), Q(z, y)))), + (Seq(Q2), exists(x, forall(y, Q2(y, g(x, z))))) + ) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, forall(y, fp(g(y, g(x, f(z)))))) |- exists(x, forall(y, fp(g(y, g(x, f(z)))))), exists(x, forall(y, fp(g(y, g(x, f(z))))))) val t1 = LeftSubstEq( - Set(exists(x, forall(y, fp(g(g(g(z, z), x), y)))), forall(y, f(y) === g(y, y)), forall(y, forall(z, g(y, z) === g(z, y)))) |- exists(x, forall(y, fp(g(y, g(x, f(z)))))), + Set(exists(x, forall(y, fp(lambda(Seq(y, z), g(z, y))(y, lambda(Seq(y, z), g(z, y))(x, lambda(Seq(z), g(z, z))(z)))))), forall(y, f(y) === lambda(Seq(z), g(z, z))(y)), forall(y, forall(z, g(y, z) === lambda(Seq(y, z), g(z, y))(y, z)))) |- exists(x, forall(y, fp(g(y, g(x, f(z)))))), 0, - List((LambdaTermTerm(Seq(y, z), g(y, z)), LambdaTermTerm(Seq(y, z), g(z, y))), (LambdaTermTerm(Seq(y), f(y)), LambdaTermTerm(Seq(z), g(z, z)))), + List((g, lambda(Seq(y, z), g(z, y))), (f, lambda(Seq(z), g(z, z)))), (Seq(g2, f2), exists(x, forall(y, fp(g2(y, g2(x, f2(z))))))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val t2 = Beta(Set(exists(x, forall(y, fp(g(g(g(z, z), x), y)))), forall(y, f(y) === g(y, y)), forall(y, forall(z, g(y, z) === g(z, y)))) |- exists(x, forall(y, fp(g(y, g(x, f(z)))))), 1) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } } test("Verification of RightSubstEq") { { val t0 = Hypothesis(fp(x) |- fp(x), fp(x)) - val t1 = RightSubstEq(Set(fp(x), x === y) |- fp(y), 0, List(((LambdaTermTerm(Seq(), x), LambdaTermTerm(Seq(), y)))), (Seq(sT), fp(sT))) + val t1 = Hypothesis(x === y |- x === y, x === y) + val t2 = RightSubstEq(Set(fp(x), x === y) |- fp(y), 0, Seq((x, y)), (Seq(sT), fp(sT))) assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) } { val t0 = Hypothesis(exists(x, fp(f(x))) |- exists(x, fp(f(x))), exists(x, fp(f(x)))) val t1 = RightSubstEq( - Set(exists(x, fp(f(x))), forall(y, f(y) === g(y, y))) |- exists(x, fp(g(x, x))), + Set(exists(x, fp(f(x))), forall(y, f(y) === lambda(x, g(x, x))(y))) |- exists(x, fp(lambda(x, g(x, x))(x))), 0, - List((LambdaTermTerm(Seq(x), f(x)), LambdaTermTerm(Seq(x), g(x, x)))), + Seq((f, lambda(x, g(x, x)))), (Seq(f2), exists(x, fp(f2(x)))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val t2 = Beta( + Set(exists(x, fp(f(x))), forall(y, f(y) === g(y, y))) |- exists(x, fp(g(x, x))), + 1 + ) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, fp(f(x))) |- exists(x, fp(f(x))), exists(x, fp(f(x)))) val t1 = RightSubstEq( - Set(exists(x, fp(f(x))), forall(y, f(y) === g(y, y))) |- exists(x, fp(g(x, x))), + Set(exists(x, fp(f(x))), forall(y, f(y) === lambda(x, g(x, x))(y))) |- exists(x, fp(lambda(z, g(z, z))(x))), 0, - List((LambdaTermTerm(Seq(y), f(y)), LambdaTermTerm(Seq(z), g(z, z)))), + Seq((f, lambda(z, g(z, z)))), (Seq(f2), exists(x, fp(f2(x)))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val t2 = Beta( + Set(exists(x, fp(f(x))), forall(y, f(y) === g(y, y))) |- exists(x, fp(g(x, x))), + 1 + ) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, forall(y, fp(g(y, g(x, z))))) |- exists(x, forall(y, fp(g(y, g(x, z))))), exists(x, forall(y, fp(g(y, g(x, z)))))) val t1 = RightSubstEq( - Set(exists(x, forall(y, fp(g(y, g(x, z))))), forall(y, forall(z, g(y, z) === g(z, y)))) |- exists(x, forall(y, fp(g(g(x, z), y)))), + Set( + exists(x, forall(y, fp(g(y, g(x, z))))), + forall(y, forall(z, g(y, z) ===lambda(Seq(y, z), g(z, y))(y, z))) + ) |- exists(x, forall(y, fp(lambda(Seq(y, z), g(z, y))(y, g(x, z))))), 0, - List((LambdaTermTerm(Seq(y, z), g(y, z)), LambdaTermTerm(Seq(y, z), g(z, y)))), + Seq((g, lambda(Seq(y, z), g(z, y)))), (Seq(g2), exists(x, forall(y, fp(g2(y, g(x, z)))))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, forall(y, fp(g(y, g(x, z))))) |- exists(x, forall(y, fp(g(y, g(x, z))))), exists(x, forall(y, fp(g(y, g(x, z)))))) val t1 = RightSubstEq( - Set(exists(x, forall(y, fp(g(y, g(x, z))))), forall(y, forall(z, g(y, z) === g(z, y)))) |- exists(x, forall(y, fp(g(g(z, x), y)))), + Set( + exists(x, forall(y, fp(g(y, g(x, z))))), + forall(y, forall(z, g(y, z) === lambda(Seq(y, z), g(z, y))(y, z))) + ) |- exists(x, forall(y, fp(lambda(Seq(y, z), g(z, y))(y, lambda(Seq(y, z), g(z, y))(x, z))))), 0, - List((LambdaTermTerm(Seq(y, z), g(y, z)), LambdaTermTerm(Seq(y, z), g(z, y)))), + Seq((g, lambda(Seq(y, z), g(z, y)))), (Seq(g2), exists(x, forall(y, fp(g2(y, g2(x, z)))))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1))) + assert(judg.isValid, "\n" + judg.repr) } - { - val t0 = Hypothesis(exists(x, forall(y, fp(g(y, g(x, f(z)))))) |- exists(x, forall(y, fp(g(y, g(x, f(z)))))), exists(x, forall(y, fp(g(y, g(x, f(z))))))) - val t1 = RightSubstEq( - Set(exists(x, forall(y, fp(g(y, g(x, f(z)))))), forall(y, f(y) === g(y, y)), forall(y, forall(z, g(y, z) === g(z, y)))) |- exists(x, forall(y, fp(g(g(g(z, z), x), y)))), - 0, - List((LambdaTermTerm(Seq(y, z), g(y, z)), LambdaTermTerm(Seq(y, z), g(z, y))), (LambdaTermTerm(Seq(y), f(y)), LambdaTermTerm(Seq(z), g(z, z)))), - (Seq(g2, f2), exists(x, forall(y, fp(g2(y, g2(x, f2(z))))))) - ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) - } - - } - - test("Verification of LeftSubstIff") { + // { val t0 = Hypothesis(P(x) |- P(x), P(x)) - val t1 = LeftSubstIff(Set(P(y), P(x) <=> P(y)) |- P(x), 0, List(((LambdaTermFormula(Seq(), P(x)), LambdaTermFormula(Seq(), P(y))))), (Seq(X), X)) - val pr = new SCProof(IndexedSeq(t0, t1)) - assert(checkSCProof(pr).isValid) - } - { - val t0 = Hypothesis(exists(x, P(x)) |- exists(x, P(x)), exists(x, P(x))) - val t1 = LeftSubstIff( - Set(exists(x, Q(x, x)), forall(y, P(y) <=> Q(y, y))) |- exists(x, P(x)), - 0, - List((LambdaTermFormula(Seq(x), P(x)), LambdaTermFormula(Seq(x), Q(x, x)))), - (Seq(P2), exists(x, P2(x))) - ) + val t1 = RightSubstIff(Set(P(x), P(x) <=> P(y)) |- P(y), 0, Seq((P(x), P(y))), (Seq(X), X)) assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) } { val t0 = Hypothesis(exists(x, P(x)) |- exists(x, P(x)), exists(x, P(x))) - val t1 = LeftSubstIff( - Set(exists(x, Q(x, x)), forall(y, P(y) <=> Q(y, y))) |- exists(x, P(x)), + val t1 = RightSubstIff( + Set(exists(x, P(x)), forall(y, P(y) <=> lambda(x, Q(x, x))(y))) |- exists(x, lambda(x, Q(x, x))(x)), 0, - List((LambdaTermFormula(Seq(y), P(y)), LambdaTermFormula(Seq(z), Q(z, z)))), + Seq((P, lambda(x, Q(x, x)))), (Seq(P2), exists(x, P2(x))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) - } - { - val t0 = Hypothesis(exists(x, forall(y, Q(y, g(x, z)))) |- exists(x, forall(y, Q(y, g(x, z)))), exists(x, forall(y, Q(y, g(x, z))))) - val t1 = LeftSubstIff( - Set(exists(x, forall(y, Q(g(x, z), y))), forall(x, forall(y, Q(x, y) <=> Q(y, x)))) |- exists(x, forall(y, Q(y, g(x, z)))), - 0, - List((LambdaTermFormula(Seq(y, z), Q(y, z)), LambdaTermFormula(Seq(y, z), Q(z, y)))), - (Seq(Q2), exists(x, forall(y, Q2(y, g(x, z))))) + val t2 = Beta( + Set(exists(x, P(x)), forall(y, P(y) <=> Q(y, y))) |- exists(x, Q(x, x)), + 1 ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) - } - - } - - test("Verification of RightSubstIff") { - { - val t0 = Hypothesis(P(x) |- P(x), P(x)) - val t1 = RightSubstIff(Set(P(x), P(x) <=> P(y)) |- P(y), 0, List(((LambdaTermFormula(Seq(), P(x)), LambdaTermFormula(Seq(), P(y))))), (Seq(X), X)) - val pr = new SCProof(IndexedSeq(t0, t1)) - assert(checkSCProof(pr).isValid) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } { val t0 = Hypothesis(exists(x, P(x)) |- exists(x, P(x)), exists(x, P(x))) val t1 = RightSubstIff( - Set(exists(x, P(x)), forall(y, P(y) <=> Q(y, y))) |- exists(x, Q(x, x)), + Set(exists(x, P(x)), forall(y, P(y) <=> lambda(x, Q(x, x))(y))) |- exists(x, lambda(z, Q(z, z))(x)), 0, - List((LambdaTermFormula(Seq(x), P(x)), LambdaTermFormula(Seq(x), Q(x, x)))), + Seq((P, lambda(z, Q(z, z)))), (Seq(P2), exists(x, P2(x))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val t2 = Beta( + Set(exists(x, P(x)), forall(y, P(y) <=> Q(y, y))) |- exists(x, Q(x, x)), + 1 + ) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } { - val t0 = Hypothesis(exists(x, P(x)) |- exists(x, P(x)), exists(x, P(x))) + val t0 = Hypothesis(exists(x, forall(y, Q(y, g(x, z)))) |- exists(x, forall(y, Q(y, g(x, z)))), exists(x, forall(y, Q(y, g(x, z))))) val t1 = RightSubstIff( - Set(exists(x, P(x)), forall(y, P(y) <=> Q(y, y))) |- exists(x, Q(x, x)), + Set(exists(x, forall(y, Q(y, g(x, z)))), forall(x, forall(y, Q(x, y) <=> lambda(Seq(y, z), Q(z, y))(x, y)))) |- exists(x, forall(y, lambda(Seq(y, z), Q(z, y))(y, g(x, z)))), 0, - List((LambdaTermFormula(Seq(y), P(y)), LambdaTermFormula(Seq(z), Q(z, z)))), - (Seq(P2), exists(x, P2(x))) + Seq((Q, lambda(Seq(y, z), Q(z, y)))), + (Seq(Q2), exists(x, forall(y, Q2(y, g(x, z))))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1))) + assert(judg.isValid, "\n" + judg.repr) } { - val t0 = Hypothesis(exists(x, forall(y, Q(y, g(x, z)))) |- exists(x, forall(y, Q(y, g(x, z)))), exists(x, forall(y, Q(y, g(x, z))))) - val t1 = RightSubstIff( - Set(exists(x, forall(y, Q(y, g(x, z)))), forall(x, forall(y, Q(x, y) <=> Q(y, x)))) |- exists(x, forall(y, Q(g(x, z), y))), + val t0 = Hypothesis(exists(x, forall(y, fp(g(y, g(x, f(z)))))) |- exists(x, forall(y, fp(g(y, g(x, f(z)))))), exists(x, forall(y, fp(g(y, g(x, f(z))))))) + val t1 = RightSubstEq( + Set(exists(x, forall(y, fp(g(y, g(x, f(z)))))), forall(y, f(y) === lambda(Seq(z), g(z, z))(y)), forall(y, forall(z, g(y, z) === lambda(Seq(y, z), g(z, y))(y, z)))) |- exists(x, forall(y, fp(lambda(Seq(y, z), g(z, y))(y, lambda(Seq(y, z), g(z, y))(x, lambda(Seq(z), g(z, z))(z)))))), 0, - List((LambdaTermFormula(Seq(y, z), Q(y, z)), LambdaTermFormula(Seq(y, z), Q(z, y)))), - (Seq(Q2), exists(x, forall(y, Q2(g(x, z), y)))) + List((g, lambda(Seq(y, z), g(z, y))), (f, lambda(Seq(z), g(z, z)))), + (Seq(g2, f2), exists(x, forall(y, fp(g2(y, g2(x, f2(z))))))) ) - assert(checkSCProof(SCProof(IndexedSeq(t0, t1))).isValid) + val t2 = Beta(Set(exists(x, forall(y, fp(g(y, g(x, f(z)))))), forall(y, f(y) === g(y, y)), forall(y, forall(z, g(y, z) === g(z, y)))) |- exists(x, forall(y, fp(g(g(g(z, z), x), y)))), 1) + val judg = checkSCProof(SCProof(IndexedSeq(t0, t1, t2))) + assert(judg.isValid, "\n" + judg.repr) } } test("Commutativity on a random large formula") { val k = 9 val r = new Random() - val vars = (0 until 1 << k).map(i => AtomicFormula(ConstantAtomicLabel(s"P$i", 0), Seq())) + val vars = (0 until 1 << k).map(i => Constant(s"P$i", Prop)) val pairs = vars.grouped(2) val sPairs = vars.grouped(2) diff --git a/lisa-utils/src/test/scala/lisa/kernel/SubstitutionTest.scala b/lisa-utils/src/test/scala/lisa/kernel/SubstitutionTest.scala index b5e59ffc7..43769b445 100644 --- a/lisa-utils/src/test/scala/lisa/kernel/SubstitutionTest.scala +++ b/lisa-utils/src/test/scala/lisa/kernel/SubstitutionTest.scala @@ -6,7 +6,6 @@ import lisa.kernel.proof.RunningTheory.* import lisa.kernel.proof.SCProof import lisa.kernel.proof.SCProofChecker import lisa.kernel.proof.SequentCalculus.* -import lisa.utils.FOLPrinter.* import lisa.utils.KernelHelpers.{_, given} import org.scalatest.compatible.Assertion import org.scalatest.funsuite.AnyFunSuite @@ -16,7 +15,7 @@ import org.scalatest.funsuite.AnyFunSuite */ class SubstitutionTest extends AnyFunSuite { private val x = variable - private val x1 = VariableLabel(Identifier("x", 1)) + private val x1 = variable private val x2 = variable private val x3 = variable private val y = variable @@ -42,160 +41,88 @@ class SubstitutionTest extends AnyFunSuite { private val d1 = connector(1) private val e2 = connector(2) - - test("Substitution in Terms") { - case class $(t: Term, m: (SchematicTermLabel, LambdaTermTerm)*) - extension (c: $) { - inline infix def _VS_(t2: Term): Assertion = { - assert(instantiateTermSchemasInTerm(c.t, c.m.toMap) == t2, "\n - " + prettyTerm(instantiateTermSchemasInTerm(c.t, c.m.toMap)) + " didn't match " + prettyTerm(t2)) - } + case class $(t: Expression, m: (Variable, Expression)*){ + inline infix def _VS_(t2: Expression): Assertion = { + assert(isSame(substituteVariables(t, m.toMap).betaNormalForm, t2), "\n - " + substituteVariables(t, m.toMap).repr + " didn't match " + t2.repr) } + } + test("First Order Substitutions") { val cases: List[Assertion] = List( - $(x, x -> x()) _VS_ x, - $(x, y -> y()) _VS_ x, - $(x, x -> y()) _VS_ y, - $(x, y -> z(), x -> y()) _VS_ y, + $(x, x -> x) _VS_ x, + $(x, y -> y) _VS_ x, + $(x, x -> y) _VS_ y, + $(x, y -> z, x -> y) _VS_ y, $(x, g -> lambda(y, f(y))) _VS_ x, - $(f(x), x -> y()) _VS_ f(y), - $(f(f(h(x, y))), x -> y()) _VS_ f(f(h(y, y))), - $(f(f(h(x, y))), x -> z()) _VS_ f(f(h(z, y))), - $(f(f(h(x, y))), x -> z(), z -> x()) _VS_ f(f(h(z, y))), - $(f(f(h(x, y))), x -> y(), y -> x()) _VS_ f(f(h(y, x))), - $(f(f(h(x, y))), z -> y(), g -> lambda(Seq(x), f(h(y, x)))) _VS_ f(f(h(x, y))), - $(f(f(h(x, y))), f -> lambda(x, x)) _VS_ h(x, y), - $(f(f(h(x, y))), f -> lambda(x, y)) _VS_ y, - $(f(f(h(x, y))), f -> lambda(x, f(f(x)))) _VS_ f(f(f(f(h(x, y))))), - $(f(f(h(x, y))), f -> lambda(x, f(f(x))), h -> lambda(Seq(x, z), h(f(x), h(g(z), x)))) _VS_ f(f(f(f(h(f(x), h(g(y), x)))))) + $(f(x), x -> y) _VS_ f(y), + $(f(f(h(x, y))), x -> y) _VS_ f(f(h(y, y))), + $(f(f(h(x, y))), x -> z) _VS_ f(f(h(z, y))), + $(f(f(h(x, y))), x -> z, z -> x) _VS_ f(f(h(z, y))), + $(f(f(h(x, y))), x -> y, y -> x) _VS_ f(f(h(y, x))), + $(Q(x), x -> x) _VS_ Q(x), + $(Q(x), y -> y) _VS_ Q(x), + $(c1(c1(Q(x))), x -> y) _VS_ c1(c1(Q(y))), + $(Q(f(f(h(x, y)))), x -> y) _VS_ Q(f(f(h(y, y)))), + $(Q(f(f(h(x, y)))) /\ R(x, f(y)), x -> z) _VS_ Q(f(f(h(z, y)))) /\ R(z, f(y)), + $(forall(x, R(x, y)), x -> z) _VS_ forall(x, R(x, y)), + $(forall(x, R(x, y)), y -> z) _VS_ forall(x, R(x, z)), + $(forall(x, P(x)), x1 -> f(x)) _VS_ forall(x, P(x)), + $(forall(x, R(x, y)) /\ P(h(x, y)), y -> z, x -> y) _VS_ forall(x, R(x, z)) /\ P(h(y, z)), + $(forall(x, R(x, y)) /\ P(h(x, y)), y -> x) _VS_ forall(y, R(y, x)) /\ P(h(x, x)), + $(X, X -> X) _VS_ X, + $(X, Y -> Y) _VS_ X, + $(X, X -> Y) _VS_ Y, + $(X, Y -> Z, X -> Y) _VS_ Y, + $(c1(X), X -> Y) _VS_ c1(Y), + $(c1(c1(e2(X, Y))), X -> Y) _VS_ c1(c1(e2(Y, Y))), + $(c1(c1(e2(X, Y))), X -> Z) _VS_ c1(c1(e2(Z, Y))), + $(c1(c1(e2(X, Y))), X -> Z, Z -> X) _VS_ c1(c1(e2(Z, Y))), + $(c1(c1(e2(X, Y))), X -> Y, Y -> X) _VS_ c1(c1(e2(Y, X))), + $(Q(x), x -> x) _VS_ Q(x), + $(Q(x), y -> y) _VS_ Q(x), + $(c1(c1(Q(x))), x -> y) _VS_ c1(c1(Q(y))), + $(Q(f(f(h(x, y)))), x -> y) _VS_ Q(f(f(h(y, y)))), + $(Q(f(f(h(x, y)))) /\ R(x, f(y)), x -> z) _VS_ Q(f(f(h(z, y)))) /\ R(z, f(y)), + $(forall(x, R(x, y)), x -> z) _VS_ forall(x, R(x, y)), + $(forall(x, R(x, y)), y -> z) _VS_ forall(x, R(x, z)), + $(forall(x, R(x, y)) /\ P(h(x, y)), y -> z, x -> y) _VS_ forall(x, R(x, z)) /\ P(h(y, z)), + $(forall(x, R(x, y)) /\ P(h(x, y)), y -> x) _VS_ forall(y, R(y, x)) /\ P(h(x, x)), + $(X, X -> X) _VS_ X, + $(X, Y -> Y) _VS_ X, + $(X, X -> Y) _VS_ Y, + $(X, Y -> Z, X -> Y) _VS_ Y, + $(c1(X), X -> Y) _VS_ c1(Y), + $(c1(c1(e2(X, Y))), X -> Y) _VS_ c1(c1(e2(Y, Y))), + $(c1(c1(e2(X, Y))), X -> Z) _VS_ c1(c1(e2(Z, Y))), + $(c1(c1(e2(X, Y))), X -> Z, Z -> X) _VS_ c1(c1(e2(Z, Y))), + $(c1(c1(e2(X, Y))), X -> Y, Y -> X) _VS_ c1(c1(e2(Y, X))), + $(c1(c1(e2(X, Y))), Z -> Y) _VS_ c1(c1(e2(X, Y))), + $(c1(c1(e2(X, Y))), Z -> Y) _VS_ c1(c1(e2(X, Y))), ) } - - test("Substitution of Terms in Formulas") { - case class $(f: Formula, m: (SchematicTermLabel, LambdaTermTerm)*) - extension (c: $) { - inline infix def _VS_(t2: Formula): Assertion = { - assert(isSame(instantiateTermSchemas(c.f, c.m.toMap), t2), "\n - " + prettyFormula(instantiateTermSchemas(c.f, c.m.toMap)) + " didn't match " + prettyFormula(t2)) - } - } + + test("Higher Order Substitutions, with beta normalization") { val cases: List[Assertion] = List( - $(Q(x), x -> x()) _VS_ Q(x), - $(Q(x), y -> y()) _VS_ Q(x), - $(c1(c1(Q(x))), x -> y()) _VS_ c1(c1(Q(y))), - $(Q(f(f(h(x, y)))), x -> y()) _VS_ Q(f(f(h(y, y)))), - $(Q(f(f(h(x, y)))) /\ R(x, f(y)), x -> z()) _VS_ Q(f(f(h(z, y)))) /\ R(z, f(y)), - $(Q(f(f(h(x, y)))) /\ R(x, f(y)), x -> z(), h -> lambda(Seq(x, z), g(h(z, y)))) _VS_ Q(f(f(g(h(y, y))))) /\ R(z, f(y)), - $(R(x, h(f(z), y)), x -> z(), h -> lambda(Seq(x, z), g(h(z, y))), z -> y()) _VS_ R(z, g(h(y, y))), - $(Q(f(f(h(x, y)))) /\ R(x, h(y, f(z))), x -> z(), h -> lambda(Seq(x, z), g(h(z, y))), z -> y()) _VS_ Q(f(f(g(h(y, y))))) /\ R(z, g(h(f(y), y))), - $(forall(x, R(x, y)), x -> z()) _VS_ forall(x, R(x, y)), - $(forall(x, R(x, y)), y -> z()) _VS_ forall(x, R(x, z)), - $(forall(x, P(x)), x1 -> f(x())) _VS_ forall(x, P(x)), - $(forall(x, R(x, y)) /\ P(h(x, y)), y -> z(), x -> y()) _VS_ forall(x, R(x, z)) /\ P(h(y, z)), - $(forall(x, R(x, y)) /\ P(h(x, y)), y -> x()) _VS_ forall(y, R(y, x)) /\ P(h(x, x)), - $(existsOne(x, R(x, y)) /\ P(h(x, y)), y -> x()) _VS_ existsOne(y, R(y, x)) /\ P(h(x, x)) - ) - } - - test("Substitution of Predicates in Formulas") { - case class $(f: Formula, m: (SchematicAtomicLabel, LambdaTermFormula)*) - extension (c: $) { - inline infix def _VS_(t2: Formula): Assertion = { - assert( - isSame(instantiatePredicateSchemas(c.f, c.m.toMap), t2), - "\n - " + prettyFormula(instantiatePredicateSchemas(c.f, c.m.toMap)) + " didn't match " + prettyFormula(t2) - ) - } - } - val cases: List[Assertion] = List( - $(X, X -> X()) _VS_ X, - $(X, Y -> Y()) _VS_ X, - $(X, X -> Y()) _VS_ Y, - $(X, Y -> Z(), X -> Y()) _VS_ Y, - $(c1(X), X -> Y()) _VS_ c1(Y), - $(c1(c1(e2(X, Y))), X -> Y()) _VS_ c1(c1(e2(Y, Y))), - $(c1(c1(e2(X, Y))), X -> Z()) _VS_ c1(c1(e2(Z, Y))), - $(c1(c1(e2(X, Y))), X -> Z(), Z -> X()) _VS_ c1(c1(e2(Z, Y))), - $(c1(c1(e2(X, Y))), X -> Y(), Y -> X()) _VS_ c1(c1(e2(Y, X))), - $(c1(c1(e2(X, Y))), Z -> Y()) _VS_ c1(c1(e2(X, Y))), + $(f(f(h(x, y))), z -> y, g -> lambda(x, f(h(y, x)))) _VS_ f(f(h(x, y))), + $(f(f(h(x, y))), f -> lambda(x, x)) _VS_ h(x, y), + $(f(f(h(x, y))), f -> lambda(x, y)) _VS_ y, + $(f(f(h(x, y))), f -> lambda(x, f(f(x)))) _VS_ f(f(f(f(h(x, y))))), + $(f(f(h(x, y))), f -> lambda(x, f(f(x))), h -> lambda(Seq(x, z), h(f(x), h(g(z), x)))) _VS_ f(f(f(f(h(f(x), h(g(y), x)))))), + $(Q(f(f(h(x, y)))) /\ R(x, f(y)), x -> z, h -> lambda(Seq(x, z), g(h(z, y)))) _VS_ Q(f(f(g(h(y, y))))) /\ R(z, f(y)), + $(R(x, h(f(z), y)), x -> z, h -> lambda(Seq(x, z), g(h(z, y))), z -> y) _VS_ R(z, g(h(y, y))), + $(Q(f(f(h(x, y)))) /\ R(x, h(y, f(z))), x -> z, h -> lambda(Seq(x, z), g(h(z, y))), z -> y) _VS_ Q(f(f(g(h(y, y))))) /\ R(z, g(h(f(y), y))), $(R(x, f(y)), R -> lambda(Seq(z, y), P(z) /\ Q(y))) _VS_ P(x) /\ Q(f(y)), $(R(x, f(y)) /\ R(z, z), R -> lambda(Seq(z, y), P(z) /\ Q(y))) _VS_ (P(x) /\ Q(f(y))) /\ (P(z) /\ Q(z)), - $(forall(y, R(x, f(y))), R -> lambda(Seq(x, z), (x === z) /\ P(f(y)))) _VS_ forall(y2, (x === f(y2)) /\ P(f(y))) - ) - } - - test("Substitution of Formulas in Formulas") { - case class $(f: Formula, m: (SchematicConnectorLabel, LambdaFormulaFormula)*) - extension (c: $) { - inline infix def _VS_(t2: Formula): Assertion = { - assert(instantiateConnectorSchemas(c.f, c.m.toMap) == t2, "\n - " + prettyFormula(instantiateConnectorSchemas(c.f, c.m.toMap)) + " didn't match " + prettyFormula(t2)) - } - } - - val cases: List[Assertion] = List( + $(forall(y, R(x, f(y))), R -> lambda(Seq(x, z), (x === z) /\ P(f(y)))) _VS_ forall(y2, (x === f(y2)) /\ P(f(y))), $(c1(P(x)), c1 -> lambda(X, !X)) _VS_ !P(x), $(c1(c1(e2(X, P(y)))), c1 -> lambda(X, X)) _VS_ e2(X, P(y)), $(c1(c1(e2(X, P(y)))), c1 -> lambda(X, Y)) _VS_ Y, - $(c1(c1(e2(X, P(y)))), c1 -> lambda(X, c1(c1(X)))) _VS_ c1(c1(c1(c1(e2(X, P(y)))))) - ) - } - - test("Simultaneous Substitutions in Formulas") { - case class $(f: Formula, m: ((SchematicConnectorLabel, LambdaFormulaFormula) | (SchematicAtomicLabel, LambdaTermFormula) | (SchematicTermLabel, LambdaTermTerm))*) - extension (c: $) { - inline infix def _VS_(t2: Formula): Assertion = { - @annotation.nowarn - val mCon: Map[SchematicConnectorLabel, LambdaFormulaFormula] = c.m - .collect({ - case e if e._1.isInstanceOf[SchematicConnectorLabel] => e - }) - .toMap - .asInstanceOf - @annotation.nowarn - val mPred: Map[SchematicAtomicLabel, LambdaTermFormula] = c.m - .collect({ - case e if e._1.isInstanceOf[SchematicAtomicLabel] => e - }) - .toMap - .asInstanceOf - @annotation.nowarn - val mTerm: Map[SchematicTermLabel, LambdaTermTerm] = c.m - .collect({ - case e if e._1.isInstanceOf[SchematicTermLabel] => e - }) - .toMap - .asInstanceOf - assert( - isSame(instantiateSchemas(c.f, mCon, mPred, mTerm), t2), - "\n - " + prettyFormula(instantiateSchemas(c.f, mCon, mPred, mTerm)) + " didn't match " + prettyFormula(t2) - ) - } - } - - val cases: List[Assertion] = List( - $(Q(x), x -> x()) _VS_ Q(x), - $(Q(x), y -> y()) _VS_ Q(x), - $(c1(c1(Q(x))), x -> y()) _VS_ c1(c1(Q(y))), - $(Q(f(f(h(x, y)))), x -> y()) _VS_ Q(f(f(h(y, y)))), - $(Q(f(f(h(x, y)))) /\ R(x, f(y)), x -> z()) _VS_ Q(f(f(h(z, y)))) /\ R(z, f(y)), - $(Q(f(f(h(x, y)))) /\ R(x, f(y)), x -> z(), h -> lambda(Seq(x, z), g(h(z, y)))) _VS_ Q(f(f(g(h(y, y))))) /\ R(z, f(y)), - $(R(x, h(f(z), y)), x -> z(), h -> lambda(Seq(x, z), g(h(z, y))), z -> y()) _VS_ R(z, g(h(y, y))), - $(Q(f(f(h(x, y)))) /\ R(x, h(y, f(z))), x -> z(), h -> lambda(Seq(x, z), g(h(z, y))), z -> y()) _VS_ Q(f(f(g(h(y, y))))) /\ R(z, g(h(f(y), y))), - $(forall(x, R(x, y)), x -> z()) _VS_ forall(x, R(x, y)), - $(forall(x, R(x, y)), y -> z()) _VS_ forall(x, R(x, z)), - $(forall(x, R(x, y)) /\ P(h(x, y)), y -> z(), x -> y()) _VS_ forall(x, R(x, z)) /\ P(h(y, z)), - $(forall(x, R(x, y)) /\ P(h(x, y)), y -> x()) _VS_ forall(y, R(y, x)) /\ P(h(x, x)), - $(existsOne(x, R(x, y)) /\ P(h(x, y)), y -> x()) _VS_ existsOne(y, R(y, x)) /\ P(h(x, x)), - $(X, X -> X()) _VS_ X, - $(X, Y -> Y()) _VS_ X, - $(X, X -> Y()) _VS_ Y, - $(X, Y -> Z(), X -> Y()) _VS_ Y, - $(c1(X), X -> Y()) _VS_ c1(Y), - $(c1(c1(e2(X, Y))), X -> Y()) _VS_ c1(c1(e2(Y, Y))), - $(c1(c1(e2(X, Y))), X -> Z()) _VS_ c1(c1(e2(Z, Y))), - $(c1(c1(e2(X, Y))), X -> Z(), Z -> X()) _VS_ c1(c1(e2(Z, Y))), - $(c1(c1(e2(X, Y))), X -> Y(), Y -> X()) _VS_ c1(c1(e2(Y, X))), - $(c1(c1(e2(X, Y))), Z -> Y()) _VS_ c1(c1(e2(X, Y))), + $(c1(c1(e2(X, P(y)))), c1 -> lambda(X, c1(c1(X)))) _VS_ c1(c1(c1(c1(e2(X, P(y)))))), + $(Q(f(f(h(x, y)))) /\ R(x, f(y)), x -> z, h -> lambda(Seq(x, z), g(h(z, y)))) _VS_ Q(f(f(g(h(y, y))))) /\ R(z, f(y)), + $(R(x, h(f(z), y)), x -> z, h -> lambda(Seq(x, z), g(h(z, y))), z -> y) _VS_ R(z, g(h(y, y))), + $(Q(f(f(h(x, y)))) /\ R(x, h(y, f(z))), x -> z, h -> lambda(Seq(x, z), g(h(z, y))), z -> y) _VS_ Q(f(f(g(h(y, y))))) /\ R(z, g(h(f(y), y))), $(R(x, f(y)), R -> lambda(Seq(z, y), P(z) /\ Q(y))) _VS_ P(x) /\ Q(f(y)), $(R(x, f(y)) /\ R(z, z), R -> lambda(Seq(z, y), P(z) /\ Q(y))) _VS_ (P(x) /\ Q(f(y))) /\ (P(z) /\ Q(z)), $(forall(y, R(x, f(y))), R -> lambda(Seq(x, z), (x === z) /\ P(f(y)))) _VS_ forall(y2, (x === f(y2)) /\ P(f(y))), @@ -222,7 +149,7 @@ class SubstitutionTest extends AnyFunSuite { X -> (z === y), P -> lambda(x2, exists(y, R(x2, y) /\ P(x))) ) _VS_ (exists(y2, Q(y2) /\ ((z === y) <=> exists(x2, P(x2) /\ exists(y, R(x, y) /\ P(x))))) /\ R(y, f(y))), - $(forall(x, P(x)), x1 -> f(x())) _VS_ forall(x, P(x)), + $(forall(x, P(x)), x1 -> f(x)) _VS_ forall(x, P(x)), $( forall(x, e2(c1(e2(X, P(x))) /\ R(y, f(y)), exists(x, P(x) /\ X))), c1 -> lambda(X, exists(y, Q(y) /\ X)), diff --git a/lisa-utils/src/test/scala/lisa/kernel/TheoriesHelpersTest.scala b/lisa-utils/src/test/scala/lisa/kernel/TheoriesHelpersTest.scala index 988ead998..dc693fb1d 100644 --- a/lisa-utils/src/test/scala/lisa/kernel/TheoriesHelpersTest.scala +++ b/lisa-utils/src/test/scala/lisa/kernel/TheoriesHelpersTest.scala @@ -17,23 +17,25 @@ class TheoriesHelpersTest extends AnyFunSuite { export TestTheory.* test("theorem with incorrect statement") { - val (s0, s1) = (ConstantFunctionLabel("0", 0), ConstantFunctionLabel("1", 0)) - runningTestTheory.addSymbol(s0) - runningTestTheory.addSymbol(s1) + val (c0, c1) = (Constant("Z", Ind), Constant("S", Ind)) + runningTestTheory.addSymbol(c0) + runningTestTheory.addSymbol(c1) - val (c0, c1) = (s0(), s1()) val judgement = runningTestTheory.theorem("False theorem", c1 === c0, SCProof(Hypothesis((c0 === c1) |- (c0 === c1), c0 === c1)), Seq()) assert(!judgement.isValid) try { judgement.get fail("Shouldn't be able to get a theorem from an invalid judgement") } catch { - case InvalidJustificationException(msg, None) => () } + // same theorem but with correct statement - assert(runningTestTheory.theorem("True theorem", c1 === c0 |- c1 === c0, SCProof(Hypothesis((c0 === c1) |- (c0 === c1), c0 === c1)), Seq()).isValid) + + + assert(runningTestTheory.theorem("True theorem", c1 === c0 |- c1 === c0, SCProof(Hypothesis((c0 === c1) |- (c0 === c1), c0 === c1)), Seq()).isValid, + runningTestTheory.theorem("True theorem", c1 === c0 |- c1 === c0, SCProof(Hypothesis((c0 === c1) |- (c0 === c1), c0 === c1)), Seq()).repr) } } diff --git a/lisa-utils/src/test/scala/lisa/utils/BasicTacticTest.scala b/lisa-utils/src/test/scala/lisa/utils/BasicTacticTest.scala index 8d46bbcf4..2aa8c5f3b 100644 --- a/lisa-utils/src/test/scala/lisa/utils/BasicTacticTest.scala +++ b/lisa-utils/src/test/scala/lisa/utils/BasicTacticTest.scala @@ -1,39 +1,33 @@ package lisa.utils //import lisa.kernel.proof.SequentCalculus as SC -//import lisa.prooflib.BasicStepTactic.* -//import lisa.prooflib.Library -//import lisa.prooflib.ProofTacticLib -//import lisa.utils.Printer +import lisa.utils.prooflib.BasicStepTactic.* +import lisa.utils.prooflib.Library +import lisa.utils.prooflib.ProofTacticLib import lisa.test.ProofTacticTestLib //import org.scalatest.funsuite.AnyFunSuite class BasicTacticTest extends ProofTacticTestLib { - /* - given Conversion[String, Sequent] = FOLParser.parseSequent(_) - given Conversion[String, Formula] = FOLParser.parseFormula(_) - given Conversion[String, Term] = FOLParser.parseTerm(_) - given Conversion[String, VariableLabel] = s => VariableLabel(if (s.head == '?') s.tail else s) - */ - /* - val x: lisa.fol.FOL.Variable = variable - val y = variable - val z = variable - - val P = predicate[1] - val Q = predicate[1] - val R = predicate[1] - val S = predicate[2] + + + val x = variable[Ind] + val y = variable[Ind] + val z = variable[Ind] + + val P = variable[Ind >>: Prop] + val Q = variable[Ind >>: Prop] + val R = variable[Ind >>: Prop] + val S = variable[Ind >>: Ind >>: Prop] // hypothesis test("Tactic Tests: Hypothesis") { - val correct = List[lisa.fol.FOL.Sequent]( + val correct = List[lisa.utils.fol.FOL.Sequent]( (P(x) |- P(x)), (P(x) |- (P(x), Q(x))), ((P(x), Q(x)) |- (P(x), Q(x))), ((P(x), Q(x)) |- P(x)) ) - val incorrect = List[lisa.fol.FOL.Sequent]( + val incorrect = List[lisa.utils.fol.FOL.Sequent]( (P(x) |- ()), (() |- ()), (() |- P(x)), @@ -41,10 +35,10 @@ class BasicTacticTest extends ProofTacticTestLib { (Q(x) |- ()) ) - /*testTacticCases(correct, incorrect) { + testTacticCases(correct, incorrect) { Hypothesis(_) - }*/ - }*/ + } + } /* // rewrite // TODO: make this use equivalence checker tests @@ -1432,7 +1426,7 @@ class BasicTacticTest extends ProofTacticTestLib { } // instfunschema - test("Tactic Tests: InstFunSchema") { + test("Tactic Tests: InstSchema") { val x = variable val y = variable val f = SchematicFunctionLabel("f", 1) @@ -1462,12 +1456,12 @@ class BasicTacticTest extends ProofTacticTestLib { testTacticCases(correct, incorrect) { (stmt1, stmt2, termMap) => val prem = introduceSequent(stmt1) - InstFunSchema(termMap)(prem)(stmt2) + InstSchema(termMap)(prem)(stmt2) } } // instpredschema - test("Tactic Tests: InstPredSchema") { + test("Tactic Tests: InstSchema") { val x = variable val y = variable val f = SchematicPredicateLabel("f", 1) @@ -1498,7 +1492,7 @@ class BasicTacticTest extends ProofTacticTestLib { testTacticCases(correct, incorrect) { (stmt1, stmt2, termMap) => val prem = introduceSequent(stmt1) - InstPredSchema(termMap)(prem)(stmt2) + InstSchema(termMap)(prem)(stmt2) } } */ diff --git a/lisa-utils/src/test/scala/lisa/utils/ParserTest.scala b/lisa-utils/src/test/scala/lisa/utils/ParserTest.scala index b6e20602b..5c84347a1 100644 --- a/lisa-utils/src/test/scala/lisa/utils/ParserTest.scala +++ b/lisa-utils/src/test/scala/lisa/utils/ParserTest.scala @@ -2,15 +2,17 @@ package lisa.utils import lisa.kernel.fol.FOL._ import lisa.kernel.proof.SequentCalculus.Sequent -import lisa.utils.FOLParser import lisa.utils.KernelHelpers.{_, given} -import lisa.utils.parsing.* import lisa.utils.{_, given} import org.scalatest.funsuite.AnyFunSuite +/** + * TODO: Port to TPTP-based parsing + */ class ParserTest extends AnyFunSuite with TestUtils { + /* test("constant") { - assert(FOLParser.parseTerm("x") == Term(cx, Seq())) + assert(FOLParser.parseTerm("x") == Ind(cx, Seq())) } test("variable") { @@ -18,29 +20,29 @@ class ParserTest extends AnyFunSuite with TestUtils { } test("constant function application") { - assert(FOLParser.parseTerm("f()") == Term(f0, Seq())) - assert(FOLParser.parseTerm("f(x)") == Term(f1, Seq(cx))) - assert(FOLParser.parseTerm("f(x, y)") == Term(f2, Seq(cx, cy))) - assert(FOLParser.parseTerm("f(x, y, z)") == Term(f3, Seq(cx, cy, cz))) - - assert(FOLParser.parseTerm("f('x)") == Term(f1, Seq(x))) - assert(FOLParser.parseTerm("f('x, 'y)") == Term(f2, Seq(x, y))) - assert(FOLParser.parseTerm("f('x, 'y, 'z)") == Term(f3, Seq(x, y, z))) + assert(FOLParser.parseTerm("f()") == Ind(f0, Seq())) + assert(FOLParser.parseTerm("f(x)") == Ind(f1, Seq(cx))) + assert(FOLParser.parseTerm("f(x, y)") == Ind(f2, Seq(cx, cy))) + assert(FOLParser.parseTerm("f(x, y, z)") == Ind(f3, Seq(cx, cy, cz))) + + assert(FOLParser.parseTerm("f('x)") == Ind(f1, Seq(x))) + assert(FOLParser.parseTerm("f('x, 'y)") == Ind(f2, Seq(x, y))) + assert(FOLParser.parseTerm("f('x, 'y, 'z)") == Ind(f3, Seq(x, y, z))) } test("schematic function application") { // FOLParser.parseTerm("?f()") -- schematic functions of 0 arguments do not exist, those are variables - assert(FOLParser.parseTerm("'f(x)") == Term(sf1, Seq(cx))) - assert(FOLParser.parseTerm("'f(x, y)") == Term(sf2, Seq(cx, cy))) - assert(FOLParser.parseTerm("'f(x, y, z)") == Term(sf3, Seq(cx, cy, cz))) + assert(FOLParser.parseTerm("'f(x)") == Ind(sf1, Seq(cx))) + assert(FOLParser.parseTerm("'f(x, y)") == Ind(sf2, Seq(cx, cy))) + assert(FOLParser.parseTerm("'f(x, y, z)") == Ind(sf3, Seq(cx, cy, cz))) - assert(FOLParser.parseTerm("'f('x)") == Term(sf1, Seq(x))) - assert(FOLParser.parseTerm("'f('x, 'y)") == Term(sf2, Seq(x, y))) - assert(FOLParser.parseTerm("'f('x, 'y, 'z)") == Term(sf3, Seq(x, y, z))) + assert(FOLParser.parseTerm("'f('x)") == Ind(sf1, Seq(x))) + assert(FOLParser.parseTerm("'f('x, 'y)") == Ind(sf2, Seq(x, y))) + assert(FOLParser.parseTerm("'f('x, 'y, 'z)") == Ind(sf3, Seq(x, y, z))) } test("nested function application") { - assert(FOLParser.parseTerm("'f('f('x), 'y)") == Term(sf2, Seq(Term(sf1, Seq(x)), y))) + assert(FOLParser.parseTerm("'f('f('x), 'y)") == Ind(sf2, Seq(Ind(sf1, Seq(x)), y))) } test("0-ary predicate") { @@ -242,17 +244,17 @@ class ParserTest extends AnyFunSuite with TestUtils { test("infix functions") { val parser = Parser(SynonymInfoBuilder().addSynonyms(plus.id, "+").build, Nil, ("+", Associativity.Left) :: Nil) - assert(parser.parseTerm("x + y") == Term(plus, Seq(cx, cy))) - assert(parser.parseTerm("(x + y) + z") == Term(plus, Seq(Term(plus, Seq(cx, cy)), cz))) + assert(parser.parseTerm("x + y") == Ind(plus, Seq(cx, cy))) + assert(parser.parseTerm("(x + y) + z") == Ind(plus, Seq(Ind(plus, Seq(cx, cy)), cz))) } test("mix of infix functions and infix predicates") { val parser = Parser(SynonymInfoBuilder().addSynonyms(in.id, "∊").addSynonyms(plus.id, "+").build, "∊" :: Nil, ("+", Associativity.Left) :: Nil) - assert(parser.parseFormula("(x + y) ∊ z") == AtomicFormula(in, Seq(Term(plus, Seq(cx, cy)), cz))) + assert(parser.parseFormula("(x + y) ∊ z") == AtomicFormula(in, Seq(Ind(plus, Seq(cx, cy)), cz))) assert( parser.parseFormula("x ∊ y /\\ x ∊ z /\\ (x + y) ∊ z") == ConnectorFormula( And, - Seq(ConnectorFormula(And, Seq(AtomicFormula(in, Seq(cx, cy)), AtomicFormula(in, Seq(cx, cz)))), AtomicFormula(in, Seq(Term(plus, Seq(cx, cy)), cz))) + Seq(ConnectorFormula(And, Seq(AtomicFormula(in, Seq(cx, cy)), AtomicFormula(in, Seq(cx, cz)))), AtomicFormula(in, Seq(Ind(plus, Seq(cx, cy)), cz))) ) ) } @@ -285,4 +287,6 @@ class ParserTest extends AnyFunSuite with TestUtils { |Unexpected input: expected term""".stripMargin)) } } + + */ } diff --git a/lisa-utils/src/test/scala/lisa/utils/PrinterTest.scala b/lisa-utils/src/test/scala/lisa/utils/PrinterTest.scala index 8f2132419..e5ca2100b 100644 --- a/lisa-utils/src/test/scala/lisa/utils/PrinterTest.scala +++ b/lisa-utils/src/test/scala/lisa/utils/PrinterTest.scala @@ -2,50 +2,51 @@ package lisa.utils import lisa.kernel.fol.FOL.* import lisa.kernel.proof.SequentCalculus.Sequent -import lisa.utils.FOLParser import lisa.utils.KernelHelpers.{_, given} -import lisa.utils.parsing.* import lisa.utils.{_, given} import org.scalatest.funsuite.AnyFunSuite import scala.language.adhocExtensions +/** + * TODO: Port to TPTP-based printing + */ class PrinterTest extends AnyFunSuite with TestUtils { - +/* test("Minimal parenthesization") { - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, b))) == "a ∧ b") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(And, Seq(a, b)), c))) == "a ∧ b ∧ c") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, ConnectorFormula(And, Seq(b, c))))) == "a ∧ (b ∧ c)") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(a, ConnectorFormula(And, Seq(b, c))))) == "a ∨ b ∧ c") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(ConnectorFormula(And, Seq(a, b)), c))) == "a ∧ b ∨ c") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(Or, Seq(a, b)), c))) == "(a ∨ b) ∧ c") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, ConnectorFormula(Or, Seq(b, c))))) == "a ∧ (b ∨ c)") - - assert(FOLParser.printFormula(ConnectorFormula(Neg, Seq(a))) == "¬a") - assert(FOLParser.printFormula(ConnectorFormula(Neg, Seq(ConnectorFormula(Neg, Seq(a))))) == "¬¬a") - assert(FOLParser.printFormula(ConnectorFormula(Neg, Seq(ConnectorFormula(Neg, Seq(ConnectorFormula(And, Seq(a, b))))))) == "¬¬(a ∧ b)") + assert((multiand(Seq(a, b))).repr == "a ∧ b") + assert((multiand(Seq(multiand(Seq(a, b)), c))).repr == "a ∧ b ∧ c") + assert((multiand(Seq(a, multiand(Seq(b, c))))).repr == "a ∧ (b ∧ c)") + assert((multior(Seq(a, multiand(Seq(b, c))))).repr == "a ∨ b ∧ c") + assert((multior(Seq(multiand(Seq(a, b)), c))).repr == "a ∧ b ∨ c") + assert((multiand(Seq(multior(Seq(a, b)), c))).repr == "(a ∨ b) ∧ c") + assert((multiand(Seq(a, multior(Seq(b, c))))).repr == "a ∧ (b ∨ c)") + + assert((!a).repr == "¬a") + assert((!!a).repr == "¬¬a") + assert((!!Seq(multiand(Seq(a, b))))).repr == "¬¬(a ∧ b)") assert( - FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(Neg, Seq(a)), ConnectorFormula(And, Seq(ConnectorFormula(Neg, Seq(b)), ConnectorFormula(Neg, Seq(c))))))) == "¬a ∧ (¬b ∧ ¬c)" + (multiand(Seq(!a, multiand(Seq(!b, !c))))).repr == "¬a ∧ (¬b ∧ ¬c)" ) assert( - FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(And, Seq(ConnectorFormula(Neg, Seq(a)), ConnectorFormula(Neg, Seq(b)))), ConnectorFormula(Neg, Seq(c))))) == "¬a ∧ ¬b ∧ ¬c" + (multiand(Seq(multiand(Seq(ConnectorFormula(Neg, Seq(a)), ConnectorFormula(Neg, Seq(b)))), ConnectorFormula(Neg, Seq(c))))).repr == "¬a ∧ ¬b ∧ ¬c" ) - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, AtomicFormula(equality, Seq(x, x))))) == "a ∧ 'x = 'x") + assert((multiand(Seq(a, AtomicFormula(equality, Seq(x, x))))).repr == "a ∧ 'x = 'x") - assert(FOLParser.printFormula(BinderFormula(Forall, x, AtomicFormula(equality, Seq(x, x)))) == "∀'x. 'x = 'x") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, BinderFormula(Forall, x, AtomicFormula(equality, Seq(x, x)))))) == "a ∧ (∀'x. 'x = 'x)") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(BinderFormula(Forall, x, b), a))) == "(∀'x. b) ∧ a") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(And, Seq(a, BinderFormula(Forall, x, b))), a))) == "a ∧ (∀'x. b) ∧ a") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(ConnectorFormula(And, Seq(a, BinderFormula(Forall, x, b))), a))) == "a ∧ (∀'x. b) ∨ a") + assert((BinderFormula(Forall, x, AtomicFormula(equality, Seq(x, x)))).repr == "∀'x. 'x = 'x") + assert((multiand(Seq(a, BinderFormula(Forall, x, AtomicFormula(equality, Seq(x, x)))))).repr == "a ∧ (∀'x. 'x = 'x)") + assert((multiand(Seq(BinderFormula(Forall, x, b), a))).repr == "(∀'x. b) ∧ a") + assert((multiand(Seq(multiand(Seq(a, BinderFormula(Forall, x, b))), a))).repr == "a ∧ (∀'x. b) ∧ a") + assert((multior(Seq(multiand(Seq(a, BinderFormula(Forall, x, b))), a))).repr == "a ∧ (∀'x. b) ∨ a") - assert(FOLParser.printFormula(BinderFormula(Forall, x, BinderFormula(Exists, y, BinderFormula(ExistsOne, z, a)))) == "∀'x. ∃'y. ∃!'z. a") + assert((BinderFormula(Forall, x, BinderFormula(Exists, y, BinderFormula(ExistsOne, z, a)))).repr == "∀'x. ∃'y. ∃!'z. a") - assert(FOLParser.printFormula(AtomicFormula(ConstantAtomicLabel("f", 3), Seq(x, y, z))) == "f('x, 'y, 'z)") + assert((AtomicFormula(ConstantAtomicLabel("f", 3), Seq(x, y, z))).repr == "f('x, 'y, 'z)") } test("constant") { - assert(FOLParser.printTerm(Term(cx, Seq())) == "x") + assert(FOLParser.printTerm(Ind(cx, Seq())) == "x") } test("variable") { @@ -53,141 +54,141 @@ class PrinterTest extends AnyFunSuite with TestUtils { } test("constant function application") { - assert(FOLParser.printTerm(Term(f1, Seq(cx))) == "f(x)") - assert(FOLParser.printTerm(Term(f2, Seq(cx, cy))) == "f(x, y)") - assert(FOLParser.printTerm(Term(f3, Seq(cx, cy, cz))) == "f(x, y, z)") + assert(FOLParser.printTerm(Ind(f1, Seq(cx))) == "f(x)") + assert(FOLParser.printTerm(Ind(f2, Seq(cx, cy))) == "f(x, y)") + assert(FOLParser.printTerm(Ind(f3, Seq(cx, cy, cz))) == "f(x, y, z)") - assert(FOLParser.printTerm(Term(f1, Seq(x))) == "f('x)") - assert(FOLParser.printTerm(Term(f2, Seq(x, y))) == "f('x, 'y)") - assert(FOLParser.printTerm(Term(f3, Seq(x, y, z))) == "f('x, 'y, 'z)") + assert(FOLParser.printTerm(Ind(f1, Seq(x))) == "f('x)") + assert(FOLParser.printTerm(Ind(f2, Seq(x, y))) == "f('x, 'y)") + assert(FOLParser.printTerm(Ind(f3, Seq(x, y, z))) == "f('x, 'y, 'z)") } test("schematic function application") { - assert(FOLParser.printTerm(Term(sf1, Seq(cx))) == "'f(x)") - assert(FOLParser.printTerm(Term(sf2, Seq(cx, cy))) == "'f(x, y)") - assert(FOLParser.printTerm(Term(sf3, Seq(cx, cy, cz))) == "'f(x, y, z)") + assert(FOLParser.printTerm(Ind(sf1, Seq(cx))) == "'f(x)") + assert(FOLParser.printTerm(Ind(sf2, Seq(cx, cy))) == "'f(x, y)") + assert(FOLParser.printTerm(Ind(sf3, Seq(cx, cy, cz))) == "'f(x, y, z)") - assert(FOLParser.printTerm(Term(sf1, Seq(x))) == "'f('x)") - assert(FOLParser.printTerm(Term(sf2, Seq(x, y))) == "'f('x, 'y)") - assert(FOLParser.printTerm(Term(sf3, Seq(x, y, z))) == "'f('x, 'y, 'z)") + assert(FOLParser.printTerm(Ind(sf1, Seq(x))) == "'f('x)") + assert(FOLParser.printTerm(Ind(sf2, Seq(x, y))) == "'f('x, 'y)") + assert(FOLParser.printTerm(Ind(sf3, Seq(x, y, z))) == "'f('x, 'y, 'z)") } test("nested function application") { - assert(FOLParser.printTerm(Term(sf2, Seq(Term(sf1, Seq(x)), y))) == "'f('f('x), 'y)") + assert(FOLParser.printTerm(Ind(sf2, Seq(Ind(sf1, Seq(x)), y))) == "'f('f('x), 'y)") } test("0-ary predicate") { - assert(FOLParser.printFormula(AtomicFormula(ConstantAtomicLabel("a", 0), Seq())) == "a") + assert((AtomicFormula(ConstantAtomicLabel("a", 0), Seq())).repr == "a") } test("predicate application") { - assert(FOLParser.printFormula(AtomicFormula(ConstantAtomicLabel("p", 3), Seq(cx, cy, cz))) == "p(x, y, z)") - assert(FOLParser.printFormula(AtomicFormula(ConstantAtomicLabel("p", 3), Seq(x, y, z))) == "p('x, 'y, 'z)") + assert((AtomicFormula(ConstantAtomicLabel("p", 3), Seq(cx, cy, cz))).repr == "p(x, y, z)") + assert((AtomicFormula(ConstantAtomicLabel("p", 3), Seq(x, y, z))).repr == "p('x, 'y, 'z)") } test("equality") { - assert(FOLParser.printFormula(AtomicFormula(equality, Seq(cx, cx))) == "x = x") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, AtomicFormula(equality, Seq(x, x))))) == "a ∧ 'x = 'x") + assert((AtomicFormula(equality, Seq(cx, cx))).repr == "x = x") + assert((multiand(Seq(a, AtomicFormula(equality, Seq(x, x))))).repr == "a ∧ 'x = 'x") } test("toplevel connectors") { - assert(FOLParser.printFormula(ConnectorFormula(Implies, Seq(a, b))) == "a ⇒ b") - assert(FOLParser.printFormula(ConnectorFormula(Iff, Seq(a, b))) == "a ⇔ b") + assert((ConnectorFormula(Implies, Seq(a, b))).repr == "a ⇒ b") + assert((ConnectorFormula(Iff, Seq(a, b))).repr == "a ⇔ b") } test("unicode connectors") { - assert(FOLParser.printFormula(ConnectorFormula(Neg, Seq(a))) == "¬a") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, b))) == "a ∧ b") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(a, b))) == "a ∨ b") + assert((ConnectorFormula(Neg, Seq(a))).repr == "¬a") + assert((multiand(Seq(a, b))).repr == "a ∧ b") + assert((multior(Seq(a, b))).repr == "a ∨ b") } test("connector associativity") { - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(And, Seq(a, b)), c))) == "a ∧ b ∧ c") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(ConnectorFormula(Or, Seq(a, b)), c))) == "a ∨ b ∨ c") + assert((multiand(Seq(multiand(Seq(a, b)), c))).repr == "a ∧ b ∧ c") + assert((multior(Seq(multior(Seq(a, b)), c))).repr == "a ∨ b ∨ c") } test("and/or of 1 argument") { - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a))) == "∧(a)") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(a))) == "∨(a)") + assert((multiand(Seq(a))).repr == "∧(a)") + assert((multior(Seq(a))).repr == "∨(a)") - assert(FOLParser.printFormula(ConnectorFormula(Implies, Seq(ConnectorFormula(Or, Seq(a)), ConnectorFormula(And, Seq(a))))) == "∨(a) ⇒ ∧(a)") - assert(FOLParser.printFormula(ConnectorFormula(Implies, Seq(a, a))) == "a ⇒ a") - assert(FOLParser.printFormula(BinderFormula(Forall, x, ConnectorFormula(Or, Seq(a)))) == "∀'x. ∨(a)") + assert((ConnectorFormula(Implies, Seq(multior(Seq(a)), multiand(Seq(a))))).repr == "∨(a) ⇒ ∧(a)") + assert((ConnectorFormula(Implies, Seq(a, a))).repr == "a ⇒ a") + assert((BinderFormula(Forall, x, multior(Seq(a)))).repr == "∀'x. ∨(a)") } test("connectors of >2 arguments") { - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, b, c))) == "a ∧ b ∧ c") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(a, b, c))) == "a ∨ b ∨ c") + assert((multiand(Seq(a, b, c))).repr == "a ∧ b ∧ c") + assert((multior(Seq(a, b, c))).repr == "a ∨ b ∨ c") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, b, c, a))) == "a ∧ b ∧ c ∧ a") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(a, b, c, b))) == "a ∨ b ∨ c ∨ b") + assert((multiand(Seq(a, b, c, a))).repr == "a ∧ b ∧ c ∧ a") + assert((multior(Seq(a, b, c, b))).repr == "a ∨ b ∨ c ∨ b") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(ConnectorFormula(And, Seq(a, b, c)), ConnectorFormula(And, Seq(c, b, a))))) == "a ∧ b ∧ c ∨ c ∧ b ∧ a") + assert((multior(Seq(multiand(Seq(a, b, c)), multiand(Seq(c, b, a))))).repr == "a ∧ b ∧ c ∨ c ∧ b ∧ a") } test("connectors with no arguments") { - assert(FOLParser.printFormula(ConnectorFormula(And, Seq())) == "⊤") - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq())) == "⊥") + assert((multiand(Seq())).repr == "⊤") + assert((multior(Seq())).repr == "⊥") } test("connector priority") { // a ∨ (b ∧ c) - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(a, ConnectorFormula(And, Seq(b, c))))) == "a ∨ b ∧ c") + assert((multior(Seq(a, multiand(Seq(b, c))))).repr == "a ∨ b ∧ c") // (a ∧ b) ∨ c - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(ConnectorFormula(And, Seq(a, b)), c))) == "a ∧ b ∨ c") + assert((multior(Seq(multiand(Seq(a, b)), c))).repr == "a ∧ b ∨ c") // (a ∧ b) => c - assert(FOLParser.printFormula(ConnectorFormula(Implies, Seq(ConnectorFormula(And, Seq(a, b)), c))) == "a ∧ b ⇒ c") + assert((ConnectorFormula(Implies, Seq(multiand(Seq(a, b)), c))).repr == "a ∧ b ⇒ c") // a => (b ∧ c) - assert(FOLParser.printFormula(ConnectorFormula(Implies, Seq(a, ConnectorFormula(And, Seq(b, c))))) == "a ⇒ b ∧ c") + assert((ConnectorFormula(Implies, Seq(a, multiand(Seq(b, c))))).repr == "a ⇒ b ∧ c") // (a ∨ b) => c - assert(FOLParser.printFormula(ConnectorFormula(Implies, Seq(ConnectorFormula(Or, Seq(a, b)), c))) == "a ∨ b ⇒ c") + assert((ConnectorFormula(Implies, Seq(multior(Seq(a, b)), c))).repr == "a ∨ b ⇒ c") // a => (b ∨ c) - assert(FOLParser.printFormula(ConnectorFormula(Implies, Seq(a, ConnectorFormula(Or, Seq(b, c))))) == "a ⇒ b ∨ c") + assert((ConnectorFormula(Implies, Seq(a, multior(Seq(b, c))))).repr == "a ⇒ b ∨ c") // (a ∧ b) <=> c - assert(FOLParser.printFormula(ConnectorFormula(Iff, Seq(ConnectorFormula(And, Seq(a, b)), c))) == "a ∧ b ⇔ c") + assert((ConnectorFormula(Iff, Seq(multiand(Seq(a, b)), c))).repr == "a ∧ b ⇔ c") // a <=> (b ∧ c) - assert(FOLParser.printFormula(ConnectorFormula(Iff, Seq(a, ConnectorFormula(And, Seq(b, c))))) == "a ⇔ b ∧ c") + assert((ConnectorFormula(Iff, Seq(a, multiand(Seq(b, c))))).repr == "a ⇔ b ∧ c") // (a ∨ b) <=> c - assert(FOLParser.printFormula(ConnectorFormula(Iff, Seq(ConnectorFormula(Or, Seq(a, b)), c))) == "a ∨ b ⇔ c") + assert((ConnectorFormula(Iff, Seq(multior(Seq(a, b)), c))).repr == "a ∨ b ⇔ c") // a <=> (b ∨ c) - assert(FOLParser.printFormula(ConnectorFormula(Iff, Seq(a, ConnectorFormula(Or, Seq(b, c))))) == "a ⇔ b ∨ c") + assert((ConnectorFormula(Iff, Seq(a, multior(Seq(b, c))))).repr == "a ⇔ b ∨ c") } test("connector parentheses") { - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(Or, Seq(a, b)), c))) == "(a ∨ b) ∧ c") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(a, ConnectorFormula(Or, Seq(b, c))))) == "a ∧ (b ∨ c)") + assert((multiand(Seq(multior(Seq(a, b)), c))).repr == "(a ∨ b) ∧ c") + assert((multiand(Seq(a, multior(Seq(b, c))))).repr == "a ∧ (b ∨ c)") } test("schematic connectors") { - assert(FOLParser.printFormula(sc1(p(x))) == "?c(p('x))") - assert(FOLParser.printFormula(iff(sc1(p(x)), sc2(p(y), p(y)))) == "?c(p('x)) ⇔ ?c(p('y), p('y))") + assert((sc1(p(x))).repr == "?c(p('x))") + assert((iff(sc1(p(x)), sc2(p(y), p(y)))).repr == "?c(p('x)) ⇔ ?c(p('y), p('y))") } test("quantifiers") { - assert(FOLParser.printFormula(BinderFormula(Forall, VariableLabel("x"), AtomicFormula(ConstantAtomicLabel("p", 0), Seq()))) == "∀'x. p") - assert(FOLParser.printFormula(BinderFormula(Exists, VariableLabel("x"), AtomicFormula(ConstantAtomicLabel("p", 0), Seq()))) == "∃'x. p") - assert(FOLParser.printFormula(BinderFormula(ExistsOne, VariableLabel("x"), AtomicFormula(ConstantAtomicLabel("p", 0), Seq()))) == "∃!'x. p") + assert((BinderFormula(Forall, VariableLabel("x"), AtomicFormula(ConstantAtomicLabel("p", 0), Seq()))).repr == "∀'x. p") + assert((BinderFormula(Exists, VariableLabel("x"), AtomicFormula(ConstantAtomicLabel("p", 0), Seq()))).repr == "∃'x. p") + assert((BinderFormula(ExistsOne, VariableLabel("x"), AtomicFormula(ConstantAtomicLabel("p", 0), Seq()))).repr == "∃!'x. p") } test("nested quantifiers") { - assert(FOLParser.printFormula(BinderFormula(Forall, x, BinderFormula(Exists, y, BinderFormula(ExistsOne, z, a)))) == "∀'x. ∃'y. ∃!'z. a") + assert((BinderFormula(Forall, x, BinderFormula(Exists, y, BinderFormula(ExistsOne, z, a)))).repr == "∀'x. ∃'y. ∃!'z. a") } test("quantifier parentheses") { - assert(FOLParser.printFormula(BinderFormula(Forall, x, ConnectorFormula(And, Seq(b, a)))) == "∀'x. b ∧ a") + assert((BinderFormula(Forall, x, multiand(Seq(b, a)))).repr == "∀'x. b ∧ a") assert( FOLParser.printFormula( BinderFormula( Forall, x, - ConnectorFormula(And, Seq(AtomicFormula(ConstantAtomicLabel("p", 1), Seq(x)), AtomicFormula(ConstantAtomicLabel("q", 1), Seq(x)))) + multiand(Seq(AtomicFormula(ConstantAtomicLabel("p", 1), Seq(x)), AtomicFormula(ConstantAtomicLabel("q", 1), Seq(x)))) ) ) == "∀'x. p('x) ∧ q('x)" ) - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(BinderFormula(Forall, x, b), a))) == "(∀'x. b) ∧ a") + assert((multiand(Seq(BinderFormula(Forall, x, b), a))).repr == "(∀'x. b) ∧ a") assert( FOLParser.printFormula( @@ -201,21 +202,21 @@ class PrinterTest extends AnyFunSuite with TestUtils { ) == "(∀'x. p('x)) ∧ q('x)" ) - assert(FOLParser.printFormula(ConnectorFormula(Or, Seq(ConnectorFormula(And, Seq(a, BinderFormula(Forall, x, b))), a))) == "a ∧ (∀'x. b) ∨ a") - assert(FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(And, Seq(a, BinderFormula(Forall, x, b))), a))) == "a ∧ (∀'x. b) ∧ a") + assert((multior(Seq(multiand(Seq(a, BinderFormula(Forall, x, b))), a))).repr == "a ∧ (∀'x. b) ∨ a") + assert((multiand(Seq(multiand(Seq(a, BinderFormula(Forall, x, b))), a))).repr == "a ∧ (∀'x. b) ∧ a") } test("complex formulas with connectors") { - assert(FOLParser.printFormula(ConnectorFormula(Neg, Seq(ConnectorFormula(Or, Seq(a, b))))) == "¬(a ∨ b)") - assert(FOLParser.printFormula(ConnectorFormula(Neg, Seq(ConnectorFormula(Neg, Seq(a))))) == "¬¬a") - assert(FOLParser.printFormula(ConnectorFormula(Neg, Seq(ConnectorFormula(Neg, Seq(ConnectorFormula(And, Seq(a, b))))))) == "¬¬(a ∧ b)") + assert((ConnectorFormula(Neg, Seq(multior(Seq(a, b))))).repr == "¬(a ∨ b)") + assert((ConnectorFormula(Neg, Seq(ConnectorFormula(Neg, Seq(a))))).repr == "¬¬a") + assert((ConnectorFormula(Neg, Seq(ConnectorFormula(Neg, Seq(multiand(Seq(a, b))))))).repr == "¬¬(a ∧ b)") assert( - FOLParser.printFormula(ConnectorFormula(And, Seq(ConnectorFormula(And, Seq(ConnectorFormula(Neg, Seq(a)), ConnectorFormula(Neg, Seq(b)))), ConnectorFormula(Neg, Seq(c))))) == "¬a ∧ ¬b ∧ ¬c" + (multiand(Seq(multiand(Seq(ConnectorFormula(Neg, Seq(a)), ConnectorFormula(Neg, Seq(b)))), ConnectorFormula(Neg, Seq(c))))).repr == "¬a ∧ ¬b ∧ ¬c" ) } test("complex formulas") { - assert(FOLParser.printFormula(BinderFormula(Forall, x, AtomicFormula(equality, Seq(x, x)))) == "∀'x. 'x = 'x") + assert((BinderFormula(Forall, x, AtomicFormula(equality, Seq(x, x)))).repr == "∀'x. 'x = 'x") } test("sequent") { @@ -279,32 +280,34 @@ class PrinterTest extends AnyFunSuite with TestUtils { val parser = Parser(SynonymInfoBuilder().addSynonyms(prefixIn.id, in.id).build, in.id :: Nil, Nil) assert(parser.printFormula(AtomicFormula(in, Seq(cx, cy))) == "x ∊ y") assert(parser.printFormula(AtomicFormula(in, Seq(x, y))) == "'x ∊ 'y") - assert(parser.printFormula(ConnectorFormula(And, Seq(AtomicFormula(in, Seq(x, y)), a))) == "'x ∊ 'y ∧ a") - assert(parser.printFormula(ConnectorFormula(Or, Seq(a, AtomicFormula(in, Seq(x, y))))) == "a ∨ 'x ∊ 'y") + assert(parser.printFormula(multiand(Seq(AtomicFormula(in, Seq(x, y)), a))) == "'x ∊ 'y ∧ a") + assert(parser.printFormula(multior(Seq(a, AtomicFormula(in, Seq(x, y))))) == "a ∨ 'x ∊ 'y") assert(parser.printFormula(AtomicFormula(prefixIn, Seq(cx, cy))) == "x ∊ y") assert(parser.printFormula(AtomicFormula(prefixIn, Seq(x, y))) == "'x ∊ 'y") - assert(parser.printFormula(ConnectorFormula(And, Seq(AtomicFormula(prefixIn, Seq(x, y)), a))) == "'x ∊ 'y ∧ a") - assert(parser.printFormula(ConnectorFormula(Or, Seq(a, AtomicFormula(prefixIn, Seq(x, y))))) == "a ∨ 'x ∊ 'y") + assert(parser.printFormula(multiand(Seq(AtomicFormula(prefixIn, Seq(x, y)), a))) == "'x ∊ 'y ∧ a") + assert(parser.printFormula(multior(Seq(a, AtomicFormula(prefixIn, Seq(x, y))))) == "a ∨ 'x ∊ 'y") } test("infix functions") { val parser = Parser(SynonymInfoBuilder().addSynonyms(plus.id, "+").build, Nil, ("+", Associativity.Left) :: Nil) - assert(parser.printTerm(Term(plus, Seq(cx, cy))) == "x + y") - assert(parser.printTerm(Term(plus, Seq(Term(plus, Seq(cx, cy)), cz))) == "x + y + z") + assert(parser.printTerm(Ind(plus, Seq(cx, cy))) == "x + y") + assert(parser.printTerm(Ind(plus, Seq(Ind(plus, Seq(cx, cy)), cz))) == "x + y + z") } /* test("mix of infix functions and infix predicates") { val parser = Parser(SynonymInfoBuilder().addSynonyms(in.id, "∊").addSynonyms(plus.id, "+").build, "∊" :: Nil, ("+", Associativity.Left) :: Nil) - assert(parser.printFormula(AtomicFormula(in, Seq(Term(plus, Seq(cx, cy)), cz))) == "x + y ∊ z") + assert(parser.printFormula(AtomicFormula(in, Seq(Ind(plus, Seq(cx, cy)), cz))) == "x + y ∊ z") assert( parser.printFormula( ConnectorFormula( And, - Seq(ConnectorFormula(And, Seq(AtomicFormula(in, Seq(cx, cy)), AtomicFormula(in, Seq(cx, cz)))), AtomicFormula(in, Seq(Term(plus, Seq(cx, cy)), cz))) + Seq(multiand(Seq(AtomicFormula(in, Seq(cx, cy)), AtomicFormula(in, Seq(cx, cz)))), AtomicFormula(in, Seq(Ind(plus, Seq(cx, cy)), cz))) ) ) == "x ∊ y ∧ x ∊ z ∧ x + y ∊ z" ) }*/ + + */ } diff --git a/lisa-utils/src/test/scala/lisa/utils/TestUtils.scala b/lisa-utils/src/test/scala/lisa/utils/TestUtils.scala index 243bd3c1a..3239e7998 100644 --- a/lisa-utils/src/test/scala/lisa/utils/TestUtils.scala +++ b/lisa-utils/src/test/scala/lisa/utils/TestUtils.scala @@ -8,21 +8,16 @@ import lisa.utils.KernelHelpers.{_, given} import lisa.utils.{_, given} trait TestUtils { - val (a, b, c) = (ConstantAtomicLabel("a", 0), ConstantAtomicLabel("b", 0), ConstantAtomicLabel("c", 0)) - val p = ConstantAtomicLabel("p", 1) - val (x, y, z) = (VariableLabel("x"), VariableLabel("y"), VariableLabel("z")) - val (x1, y1, z1) = (VariableLabel("x1"), VariableLabel("y1"), VariableLabel("z1")) - val (xPrime, yPrime, zPrime) = (VariableLabel("x'"), VariableLabel("y'"), VariableLabel("z'")) - val (cx, cy, cz) = (ConstantFunctionLabel("x", 0), ConstantFunctionLabel("y", 0), ConstantFunctionLabel("z", 0)) - val (f0, f1, f2, f3) = (ConstantFunctionLabel("f", 0), ConstantFunctionLabel("f", 1), ConstantFunctionLabel("f", 2), ConstantFunctionLabel("f", 3)) - val (sf1, sf2, sf3) = (SchematicFunctionLabel("f", 1), SchematicFunctionLabel("f", 2), SchematicFunctionLabel("f", 3)) - val (sPhi1, sPhi2) = (SchematicPredicateLabel("phi", 1), SchematicPredicateLabel("phi", 2)) - val (sc1, sc2) = (SchematicConnectorLabel("c", 1), SchematicConnectorLabel("c", 2)) - val (in, plus) = (ConstantAtomicLabel("elem", 2), ConstantFunctionLabel("+", 2)) + val (a, b, c) = (Constant("a", Prop), Constant("b", Prop), Constant("c", Prop)) + val p = Constant("p", Arrow(Ind, Prop)) + val (x, y, z) = (Variable("x", Ind), Variable("y", Ind), Variable("z", Ind)) + val (x1, y1, z1) = (Variable("x1", Ind), Variable("y1", Ind), Variable("z1", Ind)) + val (xPrime, yPrime, zPrime) = (Variable("x'", Ind), Variable("y'", Ind), Variable("z'", Ind)) + val (cx, cy, cz) = (Constant("x", Ind), Constant("y", Ind), Constant("z", Ind)) + val (f0, f1, f2, f3) = (Constant("f", Ind), Constant("f", Arrow(Ind, Ind)), Constant("f", Arrow(Ind, Arrow(Ind, Ind))), Constant("f", Arrow(Ind, Arrow(Ind, Arrow(Ind, Ind))))) + val (sf1, sf2, sf3) = (Variable("f", Arrow(Ind, Ind)), Variable("f", Arrow(Ind, Arrow(Ind, Ind))), Variable("f", Arrow(Ind, Arrow(Ind, Arrow(Ind, Ind))))) + val (sPhi1, sPhi2) = (Variable("phi", Arrow(Ind, Prop)), Variable("phi", Arrow(Ind, Arrow(Ind, Prop)))) + val (sc1, sc2) = (Variable("c", Prop >>: Prop), Variable("c", Prop >>: Prop)) + val (in, plus) = (Constant("elem", Prop >>: Prop >>: Ind), Constant("+", Arrow(Ind, Arrow(Ind, Ind)))) - given Conversion[AtomicLabel, AtomicFormula] = AtomicFormula(_, Seq.empty) - - given Conversion[ConstantFunctionLabel, Term] = Term(_, Seq()) - - given Conversion[VariableLabel, Term] = VariableTerm.apply } diff --git a/lisa-utils/src/test/scala/lisa/utils/UnificationTest.scala b/lisa-utils/src/test/scala/lisa/utils/UnificationTest.scala index 1910e5de8..a94dee463 100644 --- a/lisa-utils/src/test/scala/lisa/utils/UnificationTest.scala +++ b/lisa-utils/src/test/scala/lisa/utils/UnificationTest.scala @@ -27,7 +27,7 @@ class UnificationTest extends ProofTacticTestLib { // TODO: Generate random terms, apply a random substitution and try to retrieve it? - val correct: List[(Term, Term, Option[Map[VariableLabel, Term]])] = List( + val correct: List[(Ind, Ind, Option[Map[VariableLabel, Ind]])] = List( (f(x, y), f(x, x), Some(Map(y -> x))), (f(x, y), f(x, y), Some(Map())), (f(x, x), f(x, x), Some(Map())), @@ -41,7 +41,7 @@ class UnificationTest extends ProofTacticTestLib { (f(x, g(y, x)), f(y, g(x, y)), Some(Map(x -> y, y -> x))) ) - val incorrect: List[(Term, Term, Option[Map[VariableLabel, Term]])] = List( + val incorrect: List[(Ind, Ind, Option[Map[VariableLabel, Ind]])] = List( (f(y, y), f(x, y), None), (f(x, y), g(x, y), None), (f(x, y), h(x), None), @@ -51,7 +51,7 @@ class UnificationTest extends ProofTacticTestLib { for ((t1, t2, res) <- (correct ++ incorrect)) if (matchTerm(t1, t2) == res) true - else fail(s"Matching test failed:\nFirst Term: $t1\nSecond Term: $t2\nExpected Result: $res\nFound: ${matchTerm(t1, t2)}\n") + else fail(s"Matching test failed:\nFirst Ind: $t1\nSecond Ind: $t2\nExpected Result: $res\nFound: ${matchTerm(t1, t2)}\n") } test("Unification Tests: First-Order Matching on Formulas") { @@ -69,7 +69,7 @@ class UnificationTest extends ProofTacticTestLib { val psi = formulaVariable val chi = formulaVariable - val correct: List[(Formula, Formula, Option[(Map[VariableFormulaLabel, Formula], Map[VariableLabel, Term])])] = List( + val correct: List[(Prop, Prop, Option[(Map[VariableFormulaLabel, Prop], Map[VariableLabel, Ind])])] = List( (P(f(x, y)), P(f(x, x)), Some(Map(), Map(y -> x))), (phi, P(f(x, y)), Some(Map(phi -> P(f(x, y))), Map())), (phi, chi, Some(Map(phi -> chi), Map())), @@ -79,7 +79,7 @@ class UnificationTest extends ProofTacticTestLib { (exists(x, P(x)), exists(y, P(y)), Some(Map(), Map())) ) - val incorrect: List[(Formula, Formula, Option[(Map[VariableFormulaLabel, Formula], Map[VariableLabel, Term])])] = List( + val incorrect: List[(Prop, Prop, Option[(Map[VariableFormulaLabel, Prop], Map[VariableLabel, Ind])])] = List( (P(f(x, y)), P(h(x)), None), (exists(x, phi), exists(x, P(x)), None), (exists(x, P(x)), exists(x, P(y)), None) @@ -87,7 +87,7 @@ class UnificationTest extends ProofTacticTestLib { for ((t1, t2, res) <- (correct ++ incorrect)) if (matchFormula(t1, t2) == res) true - else fail(s"Matching test failed:\nFirst Formula: $t1\nSecond Formula: $t2\nExpected Result: $res\nFound: ${matchFormula(t1, t2)}\n") + else fail(s"Matching test failed:\nFirst Prop: $t1\nSecond Prop: $t2\nExpected Result: $res\nFound: ${matchFormula(t1, t2)}\n") } /** @@ -103,7 +103,7 @@ class UnificationTest extends ProofTacticTestLib { val y = variable val z = variable - val correct: List[(Term, Term, Option[Map[VariableLabel, Term]])] = List( + val correct: List[(Ind, Ind, Option[Map[VariableLabel, Ind]])] = List( (f(x, y), f(x, x), Some(Map(y -> x))), (f(x, x), f(x, y), Some(Map(x -> y))), (f(x, g(x, z)), f(x, y), Some(Map(y -> g(x, z)))), @@ -119,7 +119,7 @@ class UnificationTest extends ProofTacticTestLib { (f(x, g(y, x)), f(y, g(x, y)), Some(Map(x -> y, y -> x))) ) - val incorrect: List[(Term, Term, Option[Map[VariableLabel, Term]])] = List( + val incorrect: List[(Ind, Ind, Option[Map[VariableLabel, Ind]])] = List( (f(y, y), f(x, y), None), (f(x, y), g(x, y), None), (f(x, y), h(x), None), @@ -130,7 +130,7 @@ class UnificationTest extends ProofTacticTestLib { for ((t1, t2, res) <- (correct ++ incorrect)) if (unifyTerm(t1, t2) == res) true - else fail(s"Unification test failed:\nFirst Term: $t1\nSecond Term: $t2\nExpected Result: $res\nFound: ${unifyTerm(t1, t2)}\n") + else fail(s"Unification test failed:\nFirst Ind: $t1\nSecond Ind: $t2\nExpected Result: $res\nFound: ${unifyTerm(t1, t2)}\n") } test("Unification Tests: First-Order Unification of Formulas") { @@ -148,7 +148,7 @@ class UnificationTest extends ProofTacticTestLib { val psi = formulaVariable val chi = formulaVariable - val correct: List[(Formula, Formula, Option[(Map[VariableFormulaLabel, Formula], Map[VariableLabel, Term])])] = List( + val correct: List[(Prop, Prop, Option[(Map[VariableFormulaLabel, Prop], Map[VariableLabel, Ind])])] = List( (P(f(x, y)), P(f(x, x)), Some(Map(), Map(y -> x))), (P(f(x, x)), P(f(x, y)), Some(Map(), Map(x -> y))), (phi, P(f(x, y)), Some(Map(phi -> P(f(x, y))), Map())), @@ -158,7 +158,7 @@ class UnificationTest extends ProofTacticTestLib { (exists(x, P(x)), exists(y, P(y)), Some(Map(), Map())) ) - val incorrect: List[(Formula, Formula, Option[(Map[VariableFormulaLabel, Formula], Map[VariableLabel, Term])])] = List( + val incorrect: List[(Prop, Prop, Option[(Map[VariableFormulaLabel, Prop], Map[VariableLabel, Ind])])] = List( (P(f(x, y)), P(h(x)), None), (P(h(x)), P(f(x, y)), None), (exists(x, phi), exists(x, P(x)), None), @@ -169,7 +169,7 @@ class UnificationTest extends ProofTacticTestLib { for ((t1, t2, res) <- (correct ++ incorrect)) if (unifyFormula(t1, t2) == res) true - else fail(s"Unification test failed:\nFirst Formula: $t1\nSecond Formula: $t2\nExpected Result: $res\nFound: ${unifyFormula(t1, t2)}\n") + else fail(s"Unification test failed:\nFirst Prop: $t1\nSecond Prop: $t2\nExpected Result: $res\nFound: ${unifyFormula(t1, t2)}\n") } */ } diff --git a/refman/kernel.tex b/refman/kernel.tex index f93de1cba..3952d64f2 100644 --- a/refman/kernel.tex +++ b/refman/kernel.tex @@ -4,164 +4,154 @@ \chapter{Lisa's Trusted Kernel} Lisa's kernel is the starting point of Lisa, formalising the foundations of the whole theorem prover. It is the only trusted code base, meaning that if it is bug-free then no further erroneous code can violate the soundness property and prove invalid statements. Hence, the two main goals of the kernel are to be efficient and trustworthy. -Lisa's foundations are based on very traditional (in the mathematical community) foundational theory of all mathematics: \textbf{First Order Logic}, expressed using \textbf{Sequent Calculus} (augmented with schematic symbols), with axioms of \textbf{Set Theory}. -While the Lisa library is built on top of Set Theory axioms, the kernel is actually theory-agnostic and is sound to use with any other set of axioms. Hence, we defer Set Theory to chapter~\ref{chapt:settheory}. +Lisa's foundations are based on traditional (in the mathematical community) foundational theory of all mathematics, but with some extensions and modifications to more closely match common mathematical practice: +\begin{itemize} + \item The syntax of Lisa's statement is an extension of first-order logic with lambda terms named \textbf{\lambdafol} (see \autoref{sec:FOL}). + \item The deductive system of Lisa's kernel is \textbf{Sequent Calculus}. + \item The axiomatic theory is \textbf{ZFC Set Theory}, but the kernel is actually theory-agnostic and is sound to use with any other set of axioms. Set theory (see \autoref{chapt:settheory}). +\end{itemize} -\section{First Order Logic} +\section{\lambdafol: First Order Logic with Lambda Terms} \label{sec:FOL} -\subsection{Syntax} -\begin{definition}[Terms] - In Lisa, the set of terms $\mathcal{T}$ is defined by the following grammar: - \begin{align} - \mathcal{T} := & ~\mathcal{L}_{Term}(\List[\mathcal{T}])~, - \end{align} - % - where $\mathcal{L}_{Term}$ is the set of \textit{term labels}: - % - \begin{align} - \mathcal{L}_{Term} := & ~\operatorname{ConstantTermLabel}(\textnormal{Id}, \textnormal{Arity}) \\ - \mid & ~\operatorname{SchematicTermLabel}(\textnormal{Id}, \textnormal{Arity}) - \end{align} - A label can be either \textit{constant} or \textit{schematic}, and is made of an identifier (a pair of a string and an integer, for example $x_1$) and the arity of the label (an integer). - A term is made of a term label and a list of children, whose length must be equal to the arity of the label. - A constant label of arity $0$ is called a \emph{constant}, and a schematic label of arity $0$ a \emph{variable}. - We define the abbreviation - % - $$ - \Var(x) \equiv \operatorname{SchematicTermLabel}(x, 0)~. - $$ -\end{definition} +First-order logic has many useful properties, but the way it is usually defined is not very convenient for practical use. In particular, it does not allow to write self-contained terms binding a variable. A typical example of this is integrals: +$$ +\int_0^1 x^2 dx +$$ +$x$ is a subterm of this expression, but it is bound by the integral sign $\int ... dx$. Another example is notation for set comprehensions: +$$ +\lbrace x \in \mathbb N \mid \exists y. x = y^2 \rbrace +$$ +here, $x$ is again a bound variable, and moreover the expression contains a subformula. Both the integral and the comprehensions are supposed to be terms: they denote elements of the universe, unlike formulas. But in pure first-order logic, terms cannot bind variables nor contain formulas. To adress this, Lisa uses an extension of first-order logic with lambda terms, called \lambdafol. -Constant labels represent a fixed function symbol in some language, for example the addition ``+'' in Peano arithmetic. +\subsection{Expressions} -Schematic symbols on the other hand, are uninterpreted --- they can represent any possible term and hence can be substituted by any term. Their use will become clearer in the next section when we introduce the concept of deductions. Moreover, variables, which are schematic terms of arity 0, can be bound in formulas. \footnote{In a very traditional presentation of first order logic, we would only have variables, i.e. schematic terms of arity 0, and schematic terms of higher arity would only appear in second order logic. We defer to Part~\ref{part:theory} Section~\ref{sec:theoryfol} the explanation of why our inclusion of schematic function symbols doesn't fundamentally move us out of First Order Logic.} +The basic elements of Lisa are called \textit{Expressions}, generalising terms and formulas. Expressions are terms of the simply typed lambda-calculus, with two basic types: \textit{Prop}, or propositions, corresponding to formulas, and \textit{Ind}, or individuals, corresponding to terms. To disambiguate from Scala types and set-theoretic types, we call Prop and Sort \textit{Sorts}. Formally: -\begin{example}[Terms]The following are typical examples of terms labels: - \begin{align*} - \emptyset & := \ConstantTermLabel(``\emptyset", 0) \\ - 7 & := \ConstantTermLabel(``7", 0) \\ - x & := \SchematicTermLabel(``x", 0) \\ - + & := \ConstantTermLabel(``{+}", 2) \\ - f & := \SchematicTermLabel(``f", 1) \\ - \end{align*} - The following are examples of Terms: - \begin{gather*} - \emptyset() := \emptyset(\Nil)\\ - 7() := 7(\Nil)\\ - x() := x(\Nil)\\ - +(7(), x())\\ - f(x()) - \end{gather*} - -\end{example} - - -\begin{definition}[Formulas] - The set of Formulas $\mathcal{F}$ is defined similarly: - % +\begin{definition}[Identifiers] + Identifiers are pairs of strings and positive integers used to name symbols. The integer partis convenient to quickly compute fresh identifiers and. \begin{align} - \mathcal{F} := & ~\mathcal{L}_{Predicate}(\List[\mathcal{T}]) \\ - \mid & ~\mathcal{L}_{Connector}(\List[\mathcal{F}]) \\ - \mid & ~\mathcal{L}_{Binder}(\Var(\textnormal{Id}), \mathcal{F})~, + \textit{ID} := & ~\text{ID(\textit{String}, \textit{Int})} \\ \end{align} - % - where $\mathcal{L}_{Predicate}$ is the set of \textit{predicate labels}: - % + Identifiers cannot contain the symbols \lstinline|()[]{}?,;_| nor whitespace. The canonical representation of an identifier is + +$$ +\text{ID}(\text{"foo"}, i) = \begin{cases} + \text{foo} & \text{ if } i=0\\ + \text{foo\textunderscore}i & \text{ else} + \end{cases} +$$ + +\end{definition} + +\begin{definition}[Sorts] + Sorts are defined by the following grammar: \begin{align} - \mathcal{L}_{Predicate} := & ~\ConstantAtomicLabel(\textnormal{Id}, \textnormal{Arity}) \\ - \mid & ~\SchematicPredicateLabel(\textnormal{Id}, \textnormal{Arity})~, + \mathcal{S} := & ~\text{Prop} \mid \text{Ind} \mid \mathcal{S} \rightarrow \mathcal{S} \\ \end{align} - % - $\mathcal{L}_{Connector}$ is the set of \textit{connector labels}: - % + + $A \rightarrow B$ is the sort of expressions taking arguments of sort $A$ and returning a result of sort $B$. Note that $\rightarrow$ associates to the right, i.e. $A \rightarrow B \rightarrow C$ is the same as $A \rightarrow (B \rightarrow C)$. +\end{definition} + +\begin{definition}[Expressions] + We define sets of \textit{variables} $\mathcal{V}$ and \textit{constants} $\mathcal{C}$: \begin{align} - \mathcal{L}_{Connector} := & ~\ConstantConnectorLabel(\textnormal{Id}, \textnormal{Arity}) \\ - \mid & ~\SchematicConnectorLabel(\textnormal{Id}, \textnormal{Arity})~. + \mathcal{V} := & ~\text{Var}(\textit{ID}, \mathcal{S}) \\ + \mathcal{C} := & ~\text{Cst}(\textit{ID}, \mathcal{S}) \\ \end{align} - % - and $\mathcal{L}_{Binder}$ is the set of \textit{Binder labels}: - % + When unambiguous, we typically represent variables and constants using the representation of its identifier, optionally with the sort following a colon, as in $x: \Ind$. + + An expression is either a variable, a constant, an application of an expression to another expression, or an abstraction of a variable over an expression. Expressions are always uniquely \textit{sorted}. Formally: \begin{align} - \mathcal{L}_{Binder} := \forall \mid \exists \mid \exists! + \mathcal{E} := & ~\mathcal{V}: \mathcal{S} \\ + & \mid \mathcal{C}: \mathcal{S} \\ + & \mid \text{App}(\mathcal{E}: \mathcal{S}_1 \rightarrow \mathcal{S}_2, \mathcal{E}: \mathcal{S}_1): \mathcal{S}_2 \\ + & \mid \text{Abs}(\mathcal{V}: \mathcal{S}_1, \mathcal{E}: \mathcal{S}_2): \mathcal{S}:1 \rightarrow \mathcal{S}_2 \\ \end{align} + We usually represent App$(f, x)$ as $f(x)$ and Abs$(x, e)$ as $\lambda x. e$. In $\lambda x. e$, All the occurences of $x$ in $e$ are called bound. + Every expression must belong to a sort. Ill-sorted expressions are not forbidden. + + Expressions of sort \Prop are called \textit{formulas}. Expressions of sort \Ind are called \textit{terms}. + Expressions of sort $\Ind \rightarrow \Ind \rightarrow ... \rightarrow \Ind$ are called \textit{functionals}. + Expressions of sort $\Prop \rightarrow \Prop \rightarrow ... \rightarrow \Prop$ are called \textit{predicates}. - Connectors and predicates, like terms, can exist in either constant or schematic forms. Note that connectors and predicates vary only in the type of arguments they take, so that connectors and predicates of arity 0 are essentially the same thing. Hence, Lisa, does not permit connectors of arity 0 and suggests the use of predicates instead. - A contrario to schematic terms of arity 0, schematic predicates of arity 0 can't be bound, but they still play a special role sometimes, so we introduce a special notation for them - % - $$ - \FormulaVar(X) \equiv \SchematicPredicateLabel(X, 0)~. - $$ - % - Moreover, in Lisa, a contrario to constant predicates and term symbols, which can be freely created, there is only the following finite set of constant connector symbols in Lisa: - % - $$ - \operatorname{Neg}(\neg, 1)\mid \operatorname{Implies}(\rightarrow, 2)\mid \operatorname{Iff}(\leftrightarrow, 2)\mid \operatorname{And}(\land, -1)\mid \operatorname{Or}(\lor, -1)~, - $$ - % - where the connectors And and Or are allowed to have an unrestricted arity, represented by the value $-1$. This means that a conjunction or a disjunction can have any finite number of children. - Similarly, there are only the following three binder labels: - % - $$ - \forall \mid \exists \mid \exists !~. - $$ - % - We also introduce a special constant predicate symbol, equality: - % - $$ - \operatorname{Equality}(=, 2)~. - $$ \end{definition} -\begin{example}[Formula]The following are typical examples of formula labels: + +\begin{definition}[Constants] + We predefine some important logical constants: \begin{align*} - \True & := \ConstantAtomicLabel(``\True", 0) \\ - \False & := \ConstantAtomicLabel(``\False", 0) \\ - X & := \SchematicPredicateLabel(``X", 0) \\ - = & := \ConstantAtomicLabel(``=", 2) \\ - {\in} & := \ConstantAtomicLabel(``{\in}", 2) \\ - P & := \SchematicPredicateLabel(``P", 1) \\ - \neg & := \ConstantConnectorLabel(``{\neg}", 1) \\ - \land & := \ConstantConnectorLabel(``{\land}", -1) \\ - \lor & := \ConstantConnectorLabel(``{\lor}", -1) \\ - \rightarrow & := \ConstantConnectorLabel(``{\rightarrow}", 2) \\ - \leftrightarrow & := \ConstantConnectorLabel(``{\leftrightarrow}", 2) \\ - c & := \SchematicConnectorLabel(``{c}", 3) \\ + = \quad: & \Ind \rightarrow \Ind \rightarrow \Prop \\ + \top \quad: & \Prop \\ + \bot \quad: & \Prop \\ + \neg \quad: & \Prop \rightarrow \Prop \\ + \land \quad: & \Prop \rightarrow \Prop \rightarrow \Prop \\ + \lor \quad: & \Prop \rightarrow \Prop \rightarrow \Prop \\ + \Rightarrow \quad: & \Prop \rightarrow \Prop \rightarrow \Prop \\ + \Leftrightarrow \quad: & \Prop \rightarrow \Prop \rightarrow \Prop \\ + \forall \quad: & (\Ind \rightarrow \Prop) \rightarrow \Prop \\ + \exists \quad: & (\Ind \rightarrow \Prop) \rightarrow \Prop \\ + \epsilon \quad: & (\Ind \rightarrow \Prop) \rightarrow \Ind \end{align*} - Note that in the case of $\ConstantConnectorLabel$, the list is exhaustive: $\neg, \land, \lor, \rightarrow$ and $\leftrightarrow$ are the only logical connectors accepted by Lisa. - The following are examples of Formulas: - \begin{gather*} - \True() := \True(\Nil)\\ - X() := X(\Nil)\\ - P(x(), 7()) \\ - =(+(7(), x()), +(x(), 7()))\\ - \forall(x, =(x(), x())) \\ - \neg(\exists(x, {\in}(x(), \emptyset))) - \end{gather*} + + They have special meaning for the deduction system of lisa, but syntactically behave the same as user-defined constants. + + We call $\forall$, $\exists$ and $\epsilon$ \textit{binders}. We often write bound expressions such as $\forall(\lambda x. P(x) \land Q(x))$ as $\forall x. P(x) \land Q(x)$, like in traditional first-order logic. +\end{definition} + +\begin{example} + The following are examples of expressions: + \begin{tabularx}{\textwidth}{|l | X|}\hline + $\Ind$ & The sort of individuals, i.e. elements of the universe such as sets, numbers, etc. \\\hline + $\emptyset: \Ind$ & The empty set \\\hline + $7: \Ind$ & The number 7 \\\hline + $\Prop$ & The sort of formulas, which can be either true or false. \\\hline + $\top: \Prop$ & The constant true \\\hline + $\bot: \Prop$ & The constant false \\\hline + $\Ind \rightarrow \Ind$ & The sort of functionals, taking one individuals as argument and returning an individual. \\\hline + $\mathcal P: \Ind \rightarrow \Ind$ & The powerset operator \\\hline + $\mathcal P(\emptyset): \Ind$ & The powerset of the empty set \\\hline + $(21 + 6) / 3: \Ind$ & The result of the division of an arithmetic expression \\\hline + $\Ind \rightarrow \Ind \rightarrow \Prop$ & The sort of predicates of arity 2. \\\hline + $\in: \Ind \rightarrow \Ind \rightarrow \Prop$ & The membership predicate \\\hline + $3 \in \mathbb N : \Prop$ & The formula stating that 3 is a natural number \\\hline + $\Prop \rightarrow \Prop \rightarrow \Prop$ & The sort of connectors of arity 2.\\\hline + $\land: \Prop \rightarrow \Prop \rightarrow \Prop$ & The conjunction connector \\\hline + $\bot \land 3 \in \mathbb N: \Prop$ & A formula \\\hline + + $\lambda x. x^2+1: \Ind \rightarrow \Ind$ & The functional mapping $x$ to $x^2+1$ \\\hline + $\lambda x. x = f(x): \Prop$ & The predicate mapping $x$ to whether $x$ is a fixpoint of $f$ \\\hline + \end{tabularx} \end{example} -In this document, as well as in the code documentation, we often write terms and formulas in a more conventional way, generally hiding the arity of labels and representing the label with its identifier only, preceded by an apostrophe (\lstinline|`|) if we need to precise that a symbol is schematic. When the arity is relevant, we write it with a superscript, for example: -% -\begin{gather*} - f^3(x,y,z) \equiv \operatorname{Fun}(f, 3)(\List(\Var(x), \Var(y), \Var(z)))~, -\end{gather*} -% -and -% -\begin{gather*} - \forall x. \phi \equiv \operatorname{Binder}(\forall, \Var(x), \phi)~. -\end{gather*} +\paragraph{Convention} Throughout this document, and in the code base, we adopt the following conventions:We use $e$, $e_1$, $e_2$ to denote arbitrary expressions. +We use $r$, $s$, $t$, $u$ to denote arbitrary terms, $a$, $b$, $c$ to denote constants of type \Ind, $x$, $y$, $z$ to denote variables of type \Ind and $f$, $g$, $h$ to denote constant or variables of type $\Ind \rightarrow ... \Ind$. -We also use other usual representations such as symbols in infix position, omitting parenthesis according to usual precedence rules, etc. +We use greek letters such as $\phi$, $\psi$, $\tau$ to denote arbitrary formulas and $X$, $Y$, $Z$ to denote variables of type \Prop. We use $P$, $Q$, $R$ to denote constants and variables of type $\Prop \rightarrow ... \rightarrow \Prop$. Sets or sequences of formulas are denoted with capital greek letters $\Pi$, $\Sigma$, $\Gamma$, $\Delta$, etc. -Finally, note that we use subscripts to emphasize that a variable is possibly free in a given term or formula: +$\equiv$ represents both $=$ and $\iff$, depending on whether the arguments are terms or formulas. -\begin{gather*} - t_{x,y,z}, ~\phi_{x,y,z}~. -\end{gather*} +\subsection{Capture-Avoiding Substitution and Beta-Reduction} +An important operation on expressions is the substitution of variables by expressions of the same type. +\begin{definition}[Capture-avoiding Substitution of variables] + Given a base expression $e$, a variable $x: A$ and another expression $e_1:A$, the substitution of $x$ by $e_1$ inside $t$ is denoted by $ t[x := e] $ and is computed by replacing all occurences of $x$ by $r$. + + Formally: + \begin{align*} + (x)[x := e] \equiv\quad& e & \\ + (y)[x := e] \equiv\quad& y & \text{ if } x \neq y \\ + (f(e_1))[x := e] \equiv\quad& f(e_1[x := e]) & \\ + (\lambda x. e_1)[x := e] \equiv\quad& \lambda x. e_1 & \\ + (\lambda y. e_1)[x := e] \equiv\quad& \lambda y. e_1[x := e] &\text{ if } x \neq y \text{ and } y \notin \text{free}(e) \\ + (\lambda y. e_1)[x := e] \equiv\quad& \lambda z. e_1[y := z][x := e] &\text{ otherwise, with $z$ fresh} \\ + \end{align*} + This is called \textit{capture-avoiding substitution}, because the last two lines ensure that the free variables of $e$ stay free, independently of the name of bound variables. +\end{definition} + +Applications of an abstraction to an argument \textit{beta-reduce}, as usual in lambda calculus. For example, +$$(\lambda x. x^2+1)(3) \leadsto 3^2+1$$ +It is a theorem of the simply typed lambda calculus, called the Church-Rosser theorem, that when we keep applying such reduction, we eventually reach a normal form that cannot be further reduced. This is called the \textit{beta normal form}. Two expressions with the same beta normal form are called \textit{alpha-equivalent} Moreover, if two expressions are identical up to renaming of bound variables, the expressions are called \textit{alpha-equivalent}. In Lisa, expressions whose beta normal forms are alpha-equivalent are considered logicaly the same. Note however that their representation as datastructure may not be the same, and this can influence the behaviour of programs acting on them. -\paragraph{Convention} Throughout this document, and in the code base, we adopt the following conventions: We use $r$, $s$, $t$, $u$ to denote arbitrary terms, $a$, $b$, $c$ to denote constant term symbols of arity $0$ and $f$, $g$, $h$ to denote term symbols of non-0 arity. We precede those with an apostrophe, such as $`f$ to denote schematic symbols. We also use $x$, $y$, $z$ to denote variables, i.e. schematic terms of arity $0$. -For formulas, we use greek letters such as $\phi$, $\psi$, $\tau$ to denote arbitrary formulas, and $X$, $Y$, $Z$ to denote formula variables. We use capital letters like $P$, $Q$, $R$ to denote predicate symbols, preceding them similarly with an apostrophe for schematic predicates. Schematic connectors are rarer, but when they appear, we use for example $`C$. Sets or sequences of formulas are denoted with capital greek letters $\Pi$, $\Sigma$, $\Gamma$, $\Delta$, etc. \subsection{Substitution} \label{subsec:substitution} @@ -192,103 +182,38 @@ \subsection{Substitution} \end{definition} -This definition of substitution is justified by the notion of alpha equivalence: two formulas which are identical up to renaming of bound variables are considered equivalent. In practice, this means that the free variables inside $r$ will never get caught when substituted. - -We can now define \enquote{lambda terms}. -\begin{definition}[Lambda Terms] - A lambda term is a meta expression (meaning that it is not part of FOL itself) consisting in a term with ``holes'' that can be filled by other terms. This is represented with a term and specified symbols indicating the ``holes''. - A lambda term can be though of as a meta-function on terms. For example, for a functional term with two arguments, we write - - $$ - L = \Lambdaa(\Var(x), \Var(y))(t_{x,y}) - $$ - This is similar to the representation of functions in lambda calculus - It comes with an instantiation operation: given terms $r$, $s$, - $$L(r, s) = t_{x, y}[x := r, y := s]$$ -\end{definition} -Those expressions are a generalization of terms, and would be part of our logic if we used Higher Order Logic rather than First Order Logic. Here, they are used to specify certain parameters for substitutions or internally by tactics. For conciseness and familiarity, in this document and in code documentation, we represent those expressions as lambda terms: -$$\lambda x y.~ t_{x,y}$$ - -Similarly to how variables can be substituted by terms, schematic terms labels of arity greater than 0 can be substituted by such lambda terms. -% As the definition of such substitution is rather convoluted to describe, we prefer to show examples and redirect the reader to the source code of Lisa for a technical definition. \footnote{Note that in lambda calculus, this would simply be iterated beta-reduction.} -The substitution is defined in a manner similar to that of variable substitution with the base case -% -\begin{equation*} - `f(s_1, s_2, \ldots, s_n)[`f := \lambda y_1.y_2.\ldots.y_n. t] \equiv t[y_1 := s_1][y_2 := s_2][\ldots][y_n := s_n]~, -\end{equation*} -% -where no $y_i$ is free in any $s_j$. Otherwise, the lambda term is renamed to an alpha-equivalent term with fresh variable names. - -\begin{example}[Functional terms substitution in terms] +\begin{example}[Combined substitution and beta-reduction] \phantom{ } \begin{center} \begin{tabular}{|c|r c l|c|} \noalign{\vspace{0.5em}} \hline Base term & \multicolumn{3}{c|}{Substitution} & Result \\ \hline - $`f(0, 3)$ & $`f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $0+3$ \\ - $`f(0, 3)$ & $`f$ & $\rightarrow$ & $\lambda y.x. x-y$ & $3-0$ \\ - $`f(0, 3)$ & $`f$ & $\rightarrow$ & $\lambda x.y. y+y-10$ & $3+3-10$ \\ - $10 \times {`g(x)}$ & $`g$ & $\rightarrow$ & $\lambda x. x^2$ & $10 \times x^2$ \\ - $10 \times {`g(50)}$ & $`g$ & $\rightarrow$ & $\lambda x. `f(x+2, z)$ & $10 \times {`f(50+2, z)}$ \\ - $`f(x, x+y)$ & $`f$ & $\rightarrow$ & $\lambda x.y. \cos(x-y)*y$ & $\cos(x-(x+y))*(x+y)$ \\ - \hline - \end{tabular} - \end{center} -\end{example} + $f(0, 3)$ & $f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $0+3$ \\ + $f(0, 3)$ & $f$ & $\rightarrow$ & $\lambda y.x. x-y$ & $3-0$ \\ + $f(0, 3)$ & $f$ & $\rightarrow$ & $\lambda x.y. y+y-10$ & $3+3-10$ \\ + $10 \times {g(x)}$ & $g$ & $\rightarrow$ & $\lambda x. x^2$ & $10 \times x^2$ \\ + $10 \times {g(50)}$ & $g$ & $\rightarrow$ & $\lambda x. `f(x+2, z)$ & $10 \times {f(50+2, z)}$ \\ + $f(x, x+y)$ & $f$ & $\rightarrow$ & $\lambda x.y. \cos(x-y)*y$ & $\cos(x-(x+y))*(x+y)$ \\ + $f(0, 3) = f(x, x)$ & $f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $0+3 = x+x$ \\ + $\forall x. f(0, 3) = f(x, x)$ & $f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $\forall x. 0+3 = x+x$ \\ - -The definition extends naturally to substitution of schematic terms inside formulas, with capture free substitution for bound variables. For example: - -\begin{example}[Functional terms substitution in formulas] - \begin{center} - \begin{tabular}{|c|r c l|c|} - \noalign{\vspace{0.5em}} - \hline - Base formula & \multicolumn{3}{c|}{Substitution} & Result \\ - \hline - $`f(0, 3) = `f(x, x)$ & $`f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $0+3 = x+x$ \\ - $\forall x. `f(0, 3) = `f(x, x)$ & $`f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $\forall x. 0+3 = x+x$ \\ - - $\exists y. `f(y) \leq `f(5)$ & $`f$ & $\rightarrow$ & $\lambda x. x+y$ & $\exists y_1. y_1+y \leq 5+y$ \\ + $\exists y. `f(y) \leq `f(5)$ & $f$ & $\rightarrow$ & $\lambda x. x+y$ & $\exists y_1. y_1+y \leq 5+y$ \\ \hline \end{tabular} \end{center} \end{example} -Note that if the lambda expression contains free variables (such as $y$ in the last example), then appropriate alpha-renaming of bound variables may be needed. - -We similarly define functional formulas, except that these can take either term arguments of formulas arguments. For example, we use $\LambdaTF$ to indicate functional expressions that take terms as arguments and return a formula. Similarly, we also have $\LambdaTT$ and $\LambdaFF$. - -% TODO: Fix this table's hbox -\begin{example}[Typical functional expressions] - \begin{center} - \begin{tabular}{|r l|} - \hline - \rule{0em}{1.3em} - $\LambdaTT(x, y)(x+y)$ & $=$ $\lambda x.y. x+y$ \\ - $\LambdaTF(x, y)(x=y)$ & $=$ $\lambda x.y. x=y$ \\ - $\LambdaFF(X, Y)(X \land Y)$ & $=$ $\lambda X.Y. X \land Y$ - \rule[-1em]{0em}{0em} \\ - \hline - \end{tabular} - \end{center} - -\end{example} - -Note that in the last case, $X$ and $Y$ are $\FormulaVar$. Substitution of functional formulas is completely analogous to (capture free!) substitution of functional terms. Note that there is no expression representing a function taking formulas as arguments and returning a term. - \subsection{The Equivalence Checker} \label{subsec:equivalencechecker} -While proving theorems, trivial syntactical transformations such as $p\land q \equiv q\land p$ significantly increase the length of proofs, which is desirable neither to the user nor the machine. Moreover, the proof checker will very often have to check whether two formulas that appear in different sequents are the same. Hence, instead of using pure syntactical equality, Lisa implements a powerful equivalence checker able to detect a class of equivalence-preserving logical transformations. For example, we would like the formulas $p\land q$ and $q\land p$ to be naturally treated as equivalent. +While proving theorems, trivial syntactical transformations such as $p\land q \equiv q\land p$ increase the length of proofs, which is desirable neither to the user nor the machine. Moreover, the proof checker will very often have to check whether two formulas that appear in different sequents are the same. Hence, instead of using pure syntactical equality, Lisa implements an equivalence checker able to detect a class of equivalence-preserving logical transformations. For example, we would like the formulas $p\land q$ and $q\land p$ to be naturally treated as equivalent. For soundness, the relation decided by the algorithm should be contained in the $\iff$ ``if and only if'' relation of first order logic. However, it is well known that this relationship is in general undecidable, and even the $\iff$ relation for propositional logic is coNP-complete. For practicality, we need a relation that is efficiently computable. -The decision procedure implemented in Lisa takes time quadratic in the size of the formula, which means that it is not significantly slower than syntactic equality. -It is based on an algorithm that decides the word problem for Ortholattices \cite{guilloudFormulaNormalizationsVerification2023}. -Ortholattices are a generalization of Boolean algebra where instead of the law of distributivity, the weaker absorption law (L9, \autoref{tab:ortholatticeLaws}) holds. In particular, every identity in the theory of ortholattices is also a theorem of propositional logic. +Orthologic is such a relation: It is a weaker theory than classical logic, because it does not include the distributivity law of $\land$ and $\lor$, but it admits a quadratic-time normalization algorithm for propositional formulas \cite{guilloudFormulaNormalizationsVerification2023}. The structure underlying orthologic (its Lindenbaum algebra) is that of ortholattices, similar to the relationshipbetween classical logic and Boolean algebra. The laws of orthologic are shown in \autoref{tab:ortholatticeLaws}. + \begin{table}[bth] \centering \begin{tabular}{r c @{\hskip 2em} | @{\hskip 2em} r c} @@ -321,12 +246,12 @@ \subsection{The Equivalence Checker} \end{theorem} Moreover, the algorithm works with structure sharing with the same complexity, which is very relevant for example when $x \leftrightarrow y$ is expanded to $(x \land y) \lor (\neg x \land \neg y)$. It can produce a normal form in this case as well. -Lisa's Kernel contains an algorithm, called the $\FOLalg{}$ Equivalence Checker which further extends OL inequality algorithm to first order logic formulas. It first expresses the formula using de Bruijn indices, then desugars $\exists. \phi$ into $\neg \forall. \neg \phi$. It then extends the OL algorithm with the rules in \autoref{tab:Olextension}. +Lisa's kernel contains a generalization of this algorithm to \lambdafol, which also includes additional reasoning rules. It first beta-normalize expressions, expresses the formula using de Bruijn indices, and desugars $\exists. \phi$ into $\neg \forall. \neg \phi$, $\phi\Leftrightarrow\psi$ into $(\phi\Rightarrow\psi)\land(\psi\Rightarrow\phi)$, and $\phi\Rightarrow\psi$ into $\neg \phi \lor \psi$. It then applies OL normalization, with the the additional rules of \autoref{tab:Olextension}. \begin{table}[ht] \centering \begin{tabular}{c | l | l} - & To decide... & Reduce to... \\ + & To decide... & Try... \\ \hline 1 & $\lbrace \land, \lor, \rightarrow, \leftrightarrow, \neg \rbrace(\vec{\phi}) \leq \psi $ & Base algorithm \\ 2 & $\phi \leq \lbrace \land, \lor, \rightarrow, \leftrightarrow, \neg \rbrace(\vec{\psi}) $ & Base algorithm \\ @@ -334,7 +259,7 @@ \subsection{The Equivalence Checker} 4 & $\phi \leq t_1 = t_2$ & $t_1 == t_2$ \\ %(s_1 \sim_\Ol t_1 \& s_2 \sim_\Ol t_2) || (s_1 \sim_\Ol t_2 \& s_2 \sim_\Ol t_1) 5 & $\forall. \phi \leq \forall. \psi$ & $\phi \leq \psi$ \\ - 6 & $\schem{C}(\phi_1,...,\phi_n) \leq \schem{C}(\psi_1,...,\psi_n)$ & $\phi_i \sim_\OL \psi_i$, for every $1 \le i \le n$ \\ + 6 & $C(\phi_1,...,\phi_n) \leq C(\psi_1,...,\psi_n)$ & $\phi_i \sim_\OL \psi_i$, for every $1 \le i \le n$ \\ 7 & Anything else & \lstinline|false| \end{tabular} @@ -343,14 +268,12 @@ \subsection{The Equivalence Checker} \label{tab:Olextension}} \end{table} - -In particular, the implementation in Lisa also takes into account symmetry and reflexivity of equality as well as alpha-equivalence, by which we mean renaming of bound variables. It also expresses $\rightarrow$ and $\leftrightarrow$ in terms of $\lor$ and $\land$. A more detailed discussion of extension of ortholattices to first-order logic, proof of correctness and implementation details can be found in \cite{guilloudFormulaNormalizationsVerification2023} and \cite{guilloudLISAModernProof2023}. -\section{Proofs in Sequent Calculus} +\section{Proofs in Sequent Calculus for \lambdafol} \label{sec:proofs_lk} \subsection{Sequent Calculus} \label{subsec:lk} @@ -456,23 +379,23 @@ \subsection{Sequent Calculus} \DisplayProof \\[5ex] - \AxiomC{$\Gamma, \phi[\schem{x} := t] \vdash \Delta$} + \AxiomC{$\Gamma, \phi[x := t] \vdash \Delta$} \RightLabel{\text { LeftForall}} - \UnaryInfC{$\Gamma, \forall \schem{x}. \phi \vdash \Delta$} + \UnaryInfC{$\Gamma, \forall x. \phi \vdash \Delta$} \DisplayProof & \AxiomC{$\Gamma \vdash \phi, \Delta$} \RightLabel{\text { RightForall}} - \UnaryInfC{$\Gamma \vdash \forall \schem{x}. \phi, \Delta$} + \UnaryInfC{$\Gamma \vdash \forall x. \phi, \Delta$} \DisplayProof \\[5ex] \AxiomC{$\Gamma, \phi \vdash \Delta$} \RightLabel{\text { LeftExists}} - \UnaryInfC{$\Gamma, \exists \schem{x}. \phi \vdash \Delta$} + \UnaryInfC{$\Gamma, \exists x. \phi \vdash \Delta$} \DisplayProof & - \AxiomC{$\Gamma \vdash \phi[\schem{x} := t], \Delta$} + \AxiomC{$\Gamma \vdash \phi[x := t], \Delta$} \RightLabel{\text { RightExists}} - \UnaryInfC{$\Gamma \vdash \exists \schem{x}. \phi, \Delta$} + \UnaryInfC{$\Gamma \vdash \exists x. \phi, \Delta$} \DisplayProof \end{tabular} \end{center} @@ -485,7 +408,16 @@ \subsection{Sequent Calculus} \scalebox{.9}{ \begin{minipage}{\textwidth} \begin{center} - \begin{tabular}{l l} + \begin{tabular}{l r} + + \multicolumn{2}{c}{ + \AxiomC{$\Gamma \vdash \phi[x:=t], \Delta$} + \RightLabel{\text { RightEpsilon}} + \UnaryInfC{$\Gamma \vdash \phi[x:=(\epsilon x. φ)], \Delta$} + \DisplayProof + } + \\[5ex] + \multicolumn{2}{c}{ \AxiomC{$\Gamma, \exists y \forall x. (x=y) \leftrightarrow \phi \vdash \Delta$} \RightLabel{\text { LeftExistsOne}} @@ -505,39 +437,23 @@ \subsection{Sequent Calculus} \multicolumn{2}{c}{ \AxiomC{$\Gamma \vdash \Delta$} \RightLabel{\text{ InstSchema}} - \UnaryInfC{$\Gamma[\psi(\vec{v}) := {\schem{p}(\vec{v})}] \vdash \Delta[\psi(\vec{v}) := {\schem{p}(\vec{v})}]$} + \UnaryInfC{$\Gamma[(x:A) := (e:A)] \vdash \Delta[(x:A) := (e:A)]$} \DisplayProof } \\[5ex] \multicolumn{2}{c}{ - \AxiomC{$\Gamma, \phi[\schem{f} := s] \vdash \Delta$} + \AxiomC{$\Gamma, \phi[f := s] \vdash \Delta$} \RightLabel{\text{ LeftSubstEq}} - \UnaryInfC{$\Gamma, \forall \vec x. s(\vec x) = t(\vec x), \phi[\schem{f} := t] \vdash \Delta$} + \UnaryInfC{$\Gamma, \forall \vec x. s(\vec x) \equiv t(\vec x), \phi[f := t] \vdash \Delta$} \DisplayProof } \\[5ex] \multicolumn{2}{c}{ - \AxiomC{$\Gamma \vdash \phi[\schem{f} := s], \Delta$} + \AxiomC{$\Gamma \vdash \phi[f := s], \Delta$} \RightLabel{\text{ RightSubstEq}} - \UnaryInfC{$\Gamma, \forall \vec x. s(\vec x) = t(\vec x) \vdash \phi[\schem{f} := t], \Delta$} - \DisplayProof - } - \\[5ex] - - \multicolumn{2}{c}{ - \AxiomC{$\Gamma, \phi[{\schem{p}} := a] \vdash \Delta$} - \RightLabel{\text{ LeftSubstIff}} - \UnaryInfC{$\Gamma, \forall \vec x. a(\vec x) \leftrightarrow b(\vec x), \phi[{\schem{p}} = b] \vdash \Delta$} - \DisplayProof - } - \\[5ex] - - \multicolumn{2}{c}{ - \AxiomC{$\Gamma \vdash \phi[{\schem{p}} := a], \Delta$} - \RightLabel{\text{ RightSubstIff}} - \UnaryInfC{$\Gamma, \forall \vec x. a(\vec x) \leftrightarrow b(\vec x) \vdash \phi[{\schem{p}} := b], \Delta$} + \UnaryInfC{$\Gamma, \forall \vec x. s(\vec x) \equiv t(\vec x) \vdash \phi[f := t], \Delta$} \DisplayProof } \\[5ex] @@ -675,7 +591,7 @@ \subsection{Proofs} \label{fig:exampleProofQuantifiers} \end{figure} -For every proof step, Lisa's kernel actually expects more than only the premises and conclusion of the rule. The proof step also contains some parameters indicating how the deduction rule is precisely applied. This makes proof checking much simpler, and hence more trustworthy. Outside the kernel, Lisa includes tactic which will infer such parameters automatically (see \autoref{sec:tactics}), so that in practice the user never has to write them. +For every proof step, Lisa's kernel actually expects more than only the premises and conclusion of the rule. The proof step also contains some parameters indicating how the deduction rule is precisely applied. This makes proof checking much simpler, and hence more trustworthy. Outside the kernel, Lisa includes tactic which will infer such parameters automatically (see \autoref{chapt:tactics}), so that in practice the user never has to write them. \autoref{fig:ExampleProofPierceScala} shows how a kernel proof is written in scala. \begin{figure}[ht] @@ -686,7 +602,8 @@ \subsection{Proofs} Weakening( φ |- ( φ, ψ ), 0), RightImplies(() |- ( φ, φ ==> ψ ), 1, φ, ψ) LeftImplies(( φ ==> ψ ) ==> φ |- φ, 2, 0, φ ==> ψ, φ), - RightImplies(() |- (( φ ==> ψ ) ==> φ) ==> φ, 3, ( φ ==> ψ ) ==> φ, φ) + RightImplies(() |- (( φ ==> ψ ) ==> φ) ==> φ, + 3, ( φ ==> ψ ) ==> φ, φ) ), Seq.empty /* no imports */ ) \end{lstlisting} \caption{The proof from~\autoref{fig:exampleProof} written for Lisa's kernel. The second argument (empty here) is the sequence of proof imports. The symbols \lstinline|==>| and \lstinline||-| are ligatures for ==> and |- and are syntactic sugar defined outside the kernel.} @@ -699,7 +616,9 @@ \subsection{Proofs} SCSubproof(sp: SCProof, premises: Seq[Int]) \end{lstlisting} The first argument contain a sequent calculus proof, with one conclusion and arbitrarily many \textit{imports}. The second arguments must justify all the imports of the inner proof with previous steps of the outer proof. -A Subproof only has an organizational purpose and allows to more easily write tactics. In particular, the numbering of proof steps in the inner proof is independent of the location of the subproof step in the outer proof. More will be said about proof tactics in \autoref{sec:tactics}. +A Subproof only has an organizational purpose and allows to more easily write tactics (see \autoref{chapt:tactics}). In particular, the numbering of proof steps in the inner proof is independent of the location of the subproof step in the outer proof. + +\paragraph*{Sorry} \subsection{Proof Checker} \label{subsec:proofchecker} diff --git a/refman/lisa.pdf b/refman/lisa.pdf index a0cf24de6..8df90e74f 100644 Binary files a/refman/lisa.pdf and b/refman/lisa.pdf differ diff --git a/refman/macro.tex b/refman/macro.tex index 4a2bf14ec..caf936d0d 100644 --- a/refman/macro.tex +++ b/refman/macro.tex @@ -48,6 +48,17 @@ \newcommand*{\definitionautorefname}{Definition} +\newcommand*{\listfigurenameautorefname}{Listing} + + +% Shortcuts +\newcommand{\Prop}{\text{Prop}} +\newcommand{\Ind}{\text{Ind}} +\newcommand{\lambdafol}{$\lambda$-FOL} + + + + % Code fonts %\usepackage{lstfiracode} diff --git a/refman/prooflib.tex b/refman/prooflib.tex index 47e372f8d..59e81f538 100644 --- a/refman/prooflib.tex +++ b/refman/prooflib.tex @@ -6,15 +6,106 @@ \chapter{Developping Mathematics with Prooflib} \autoref{fig:theoryFileExample} is a reminder from \autoref{chapt:quickguide} of the canonical way to write a theory file in Lisa. \begin{figure} -\lisaCode{src/MyTheoryName.scala}{An example of a theory file in Lisa}{firstline=3} -\label{fig:theoryFileExample} +\lisaCode{src/MyTheoryName.scala}{An example of a theory file in Lisa\label{fig:theoryFileExample}}{firstline=3} \end{figure} In this chapter, we will describe how each of these construct is made possible and how they translate to statements in the Kernel. \section{Richer FOL} +The syntax of Prooflib is similar to the syntax of Lisa's kernel, but the \textit{Sorts}, such as Ind and Prop, are reflected in Scala's type system, making well-sortedness checked at compile time and offering more detailed documentation and features. Prooflib's syntax also supports custom printing, such as infix notation, special handling for binders, and more. +\subsection{Sorts and Expressions} + +\begin{definition}[Sorts]\phantom{.} + \begin{lstlisting}[language=scala] +trait Ind +trait Prop +infix trait >>:[I, O] + \end{lstlisting} + +\end{definition} + +\begin{definition}[Expressions] + Expressions in Prooflib always correspond to an underlying expression in lisa's kernel, which can bee accessed using \lstinline|myExpr.underlying|. Expressions are always \textit{sorted}, and this sort reflects in their scala type. + \begin{lstlisting}[language=scala] +trait Expr[S] +case class Variables[S](name: String) extends Expr[S] +case class Constants[S](name: String) extends Expr[S] +case class App[S, T](f: Expr[S >>: T], arg: Expr[S]) extends Expr[T] +case class Abs[S, T](v: Variable[S], body: Expr[T]) extends Expr[S >>: T] + \end{lstlisting} +\end{definition} +\noindent +Expressions are usually built with the following helpers: +\begin{example}\phantom{.} + + \begin{lstlisting}[language=scala] +val x = variable[Ind] //the name "x" is used automatically +val c = variable[Ind] +val ∈ = constant[Ind >>: Ind >>: Prop] +val f = variable[(Ind >>: Prop) >>: Ind] + +x ∈ c : Expr[Prop] +lambda(x, x ∈ c) : Expr[Ind >>: Prop] +f(lambda(x, x ∈ c)) : Expr[Ind] + \end{lstlisting} + +\end{example} + +Expressions also support substitutions. + +\begin{definition}[Substitution]\phantom{.} + Substitution are most often performed with \lstinline|SubstPair|s, which guarantee well-sortedness. + \begin{lstlisting}[language=scala] +trait SubstPair extends Product: + type S + val _1: Variable[S] + val _2: Expr[S] + +(x := f(∅)) : SubstPair +g(x, y).subst(x := f(∅), y := x) // == g(f(∅), x) + \end{lstlisting} +\noindent + but can also be performed unsafely, when sorts are not necessarily known: + \begin{lstlisting}[language=scala] +//if ill-sorted, may crash in unpredictable ways. +myExpr.substituteUnsafe(Map(x -> s, y -> t)) + +//with sanity runtime check for well-sortedness +myExpr.substituteWithCheck(Map(x -> s, y -> t)) + \end{lstlisting} +\end{definition} + +\subsection{Sequents} + +Expressions build into sequents, which again have an underlying sequent in the kernel. + + +\begin{definition}[Sequents] + Sequents are formally pairs of sets of \lstinline|Expr[Prop]|. + \begin{lstlisting}[language=scala] +case class Sequent(left: Set[Expr[Prop]], right: Set[Expr[Prop]]) + \end{lstlisting} + \noindent + Sequent can be built from formulas and collections of formulas: + \begin{lstlisting}[language=scala] +val s1 = (x ∈ c) |- (f(x) ∈ f(c)) +val s2 = () |- (f(x) ∈ f(c)) +val s3 = (x ∈ c) |- () +val s4 = Set(x ∈ c, y ∈ c) |- Set(x = y, x \in y) +val s4 = assumptions |- (x = c) + \end{lstlisting} + the logical semantics of sequents is the same as in the kernel, i.e. a sequent is valid if and only if the conjunction of its left side implies the disjunction of its right side. + But it is usually discouraged to have multiple formulas on the right side of a sequent in theorems and lemmas, as it is harder to understand. Using multiple formulas on the right side of a sequent is however allowed in intermediate steps of a proof and in proof tactics. + + Sequents, like expressions, support substitutions: + \begin{lstlisting}[language=scala] +val s = Sequent(Set(x ∈ c), Set(f(x) ∈ f(c))) +s.substitute(x := g(∅)) + // == Sequent(Set(g(∅) ∈ c), Set(f(g(∅)) ∈ f(c))) + \end{lstlisting} +\end{definition} \section{Proof Builders} @@ -27,20 +118,20 @@ \subsection{Instantiations} \subsection{Local Definitions} \label{sec:localDefinitions} The following line of reasoning is standard in mathematical proofs. Suppose we have already proven the following fact: -$$\exists x. P(x)$$ +$$∃ x. P(x)$$ And want to prove the property $\phi$. A proof of $\phi$ using the previous theorem would naturally be obtained the following way: \begin{quotation} - Since we have proven $\exists x. P(x)$, let $c$ be an arbitrary value such that $P(c)$ holds. + Since we have proven $∃ x. P(x)$, let $c$ be an arbitrary value such that $P(c)$ holds. Hence we prove $\phi$, using the fact that $P(c)$: (...). \end{quotation} However, introducing a definition locally corresponding to a statement of the form -$$\exists x. P(x)$$ +$$∃ x. P(x)$$ is not a built-in feature of first order logic. This can however be simulated by introducing a fresh variable symbol $c$, that must stay fresh in the rest of the proof, and the assumption $P(c)$. The rest of the proof is then carried out under this assumption. When the proof is finished, the end statement should not contain $c$ free as it is a \textit{local} definition, and the assumption can be eliminated using the LeftExists and Cut rules. Such a $c$ is called a \textit{witness}. -Formally, the proof in (...) is a proof of $P(c) \vdash \phi$. This can be transformed into a proof of $\phi$ by mean of the following steps: +Formally, the proof in (...) is a proof of $P(c) ⊢ \phi$. This can be transformed into a proof of $\phi$ by mean of the following steps: \begin{center} - \AxiomC{$P(c) \vdash \phi$} - \UnaryInfC{$\exists x. P(x) \vdash \phi$} + \AxiomC{$P(c) ⊢ \phi$} + \UnaryInfC{$\exists x. P(x) ⊢ \phi$} \RightLabel{\text { LeftExists}} \AxiomC{$\exists x. P(x)$} \RightLabel{\text { Cut}} @@ -48,7 +139,7 @@ \subsection{Local Definitions} \end{center} Not that for this step to be correct, $c$ must not be free in $\phi$. This correspond to the fact that $c$ is an arbitrary free symbol. -This simulation is provided by Lisa through the \lstinline|witness|{} method. It takes as argument a fact showing $\exists x. P(x)$, and introduce a new symbol with the desired property. For an example, see figure \ref{fig:localDefinitionExample}. +This simulation is provided by Lisa through the \lstinline|witness|{} method. It takes as argument a fact showing $∃ x. P(x)$, and introduce a new symbol with the desired property. For an example, see figure \ref{fig:localDefinitionExample}. \begin{figure} \begin{lstlisting}[language=lisa, frame=single] @@ -84,7 +175,7 @@ \subsection{Instantiations with ``of''} have(thesis) by RightAnd(ax of c, ax of d) } \end{lstlisting} -Here, \lstinline|ax of c| is a fact whose proven statement is again $P(c)$. It is possible to instantiate multiple $\forall$ quantifiers at once. For example if \lstinline|ax| is an axiom of the form $\forall x, \forall y, P(x, y)$, then \lstinline|ax of (c, d)| is a fact whose proven statement is $P(c, d)$. It is also possible to combine instantiation of free symbols and quantified variables. For example, if \lstinline|ax| is an axiom of the form $\forall x, \forall y, P(x, y)$, then \lstinline|ax of (c, y, P := ===)| is a fact whose proven statement is $(c = y)$. +Here, \lstinline|ax of c| is a fact whose proven statement is again $P(c)$. It is possible to instantiate multiple $\forall$ quantifiers at once. For example if \lstinline|ax| is an axiom of the form $∀ x, ∀ y, P(x, y)$, then \lstinline|ax of (c, d)| is a fact whose proven statement is $P(c, d)$. It is also possible to combine instantiation of free symbols and quantified variables. For example, if \lstinline|ax| is an axiom of the form $∀ x, ∀ y, P(x, y)$, then \lstinline|ax of (c, y, P := ===)| is a fact whose proven statement is $(c = y)$. Formally, the \lstinline|of| keyword takes as argument arbitrarily many terms and substitution pairs. If there is at least one term given as argument, the base fact must have a single universally quantified formula on the right (an arbitrarily many formulas on the left). The number of given terms must be at most the number of leading universal quantifiers. Moreover, a substitution cannot instantiate any locked symbol (i.e. a symbol part of an assumption or definition). The ordering of substitution pairs does not matter, but the ordering of terms does. The resulting fact is obtained by first replacing the free symbols in the formula by the given substitution pairs, and then instantiating the quantified variables in the formula by the given terms diff --git a/refman/src/MyTheoryName.scala b/refman/src/MyTheoryName.scala index b2a48eb45..83f0d7451 100644 --- a/refman/src/MyTheoryName.scala +++ b/refman/src/MyTheoryName.scala @@ -1,10 +1,10 @@ //> using scala 3.5.1 //> using jar "../../../lisa/target/scala-3.5.1/lisa-assembly-0.7.jar" object MyTheoryName extends lisa.Main: - val x = variable - val y = variable - val f = function[1] - val P = predicate[1] + val x = variable[Ind] + val y = variable[Ind] + val f = function[Ind >>: Ind] + val P = predicate[Prop >>: Ind] val fixedPointDoubleApplication = Theorem( ∀(x, P(x) ==> P(f(x))) |- P(x) ==> P(f(f(x)))