init
This commit is contained in:
commit
bcc9d79d34
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"name": "LaTeX dev container",
|
||||
"image": "sjkillen/aft-may25-2023",
|
||||
// "build": {
|
||||
// "context": "..",
|
||||
// "dockerfile": "../Dockerfile"
|
||||
// },
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"James-Yu.latex-workshop",
|
||||
"torn4dom4n.latex-support",
|
||||
"mike-lischke.vscode-antlr4",
|
||||
"ms-python.python"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
__pycache__
|
||||
.antlr
|
||||
antlr4.jar
|
|
@ -0,0 +1,218 @@
|
|||
"""
|
||||
Parse a knowledge base and generate an AST
|
||||
Can be run directly with program as standard input to view/debug AST
|
||||
|
||||
usage: python AST.py knowledge_bases/simple.hmknf
|
||||
OR
|
||||
usage: python AST.py < knowledge_bases/simple.hmknf
|
||||
"""
|
||||
|
||||
|
||||
from itertools import count, product
|
||||
from string import ascii_lowercase
|
||||
from sys import argv, flags, stdin
|
||||
from antlr4 import ParseTreeVisitor, InputStream, CommonTokenStream
|
||||
from grammars.HMKNFLexer import HMKNFLexer
|
||||
from grammars.HMKNFParser import HMKNFParser
|
||||
from dataclasses import dataclass
|
||||
from more_itertools import partition
|
||||
from operator import itemgetter
|
||||
from functools import reduce, partial
|
||||
from enum import Enum
|
||||
|
||||
atom = str
|
||||
Set = frozenset
|
||||
|
||||
|
||||
@dataclass
|
||||
class LRule:
|
||||
head: tuple[atom, ...]
|
||||
pbody: tuple[atom, ...]
|
||||
nbody: tuple[atom, ...]
|
||||
|
||||
def text(self):
|
||||
head = ",".join(self.head)
|
||||
nbody = tuple(f"not {atom}" for atom in self.nbody)
|
||||
body = ",".join(self.pbody + nbody)
|
||||
return f"{head} :- {body}."
|
||||
|
||||
|
||||
@dataclass
|
||||
class OFormula:
|
||||
def text(self):
|
||||
if self is OConst.TRUE:
|
||||
return "TRUE"
|
||||
elif self is OConst.FALSE:
|
||||
return "FALSE"
|
||||
raise Exception("Unknown text repr")
|
||||
|
||||
|
||||
class OConst(Enum):
|
||||
TRUE = OFormula()
|
||||
FALSE = OFormula()
|
||||
|
||||
def text(self):
|
||||
return OFormula.text(self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class OBinary(OFormula):
|
||||
operator: str
|
||||
left: OFormula
|
||||
right: OFormula
|
||||
|
||||
def text(self):
|
||||
return f"{self.left.text()} {self.operator} {self.right.text()}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class OAtom(OFormula):
|
||||
name: str
|
||||
|
||||
def text(self):
|
||||
return self.name
|
||||
|
||||
|
||||
@dataclass
|
||||
class ONegation(OFormula):
|
||||
of: OFormula
|
||||
|
||||
def text(self):
|
||||
return f"-{self.of.text()}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class KB:
|
||||
ont: OFormula
|
||||
rules: tuple[LRule]
|
||||
katoms: Set[atom]
|
||||
oatoms: Set[atom]
|
||||
|
||||
def text(self):
|
||||
ont = f"{self.ont.text()}.\n"
|
||||
rules = "\n".join(map(LRule.text, self.rules))
|
||||
return ont + rules
|
||||
|
||||
|
||||
class HMKNFVisitor(ParseTreeVisitor):
|
||||
def __init__(self) -> None:
|
||||
self.katoms = set()
|
||||
self.oatoms = set()
|
||||
|
||||
def visitKb(self, ctx: HMKNFParser.KbContext):
|
||||
orules = (self.visit(orule) for orule in ctx.orule()) if ctx.orule() else ()
|
||||
ont = reduce(partial(OBinary, "&"), orules, OConst.TRUE)
|
||||
lrules = tuple(self.visit(lrule) for lrule in ctx.lrule())
|
||||
return KB(ont, lrules, Set(self.katoms), Set(self.oatoms))
|
||||
|
||||
def visitLrule(self, ctx: HMKNFParser.LruleContext):
|
||||
head = self.visit(ctx.head()) if ctx.head() else ()
|
||||
pbody, nbody = self.visit(ctx.body()) if ctx.body() else ((), ())
|
||||
return LRule(head, pbody, nbody)
|
||||
|
||||
def visitHead(self, ctx: HMKNFParser.HeadContext):
|
||||
heads = tuple(str(atom) for atom in ctx.IDENTIFIER())
|
||||
self.katoms.update(heads)
|
||||
return heads
|
||||
|
||||
def visitBody(self, ctx: HMKNFParser.BodyContext):
|
||||
nbody, pbody = partition(
|
||||
itemgetter(1), (self.visit(body_atom) for body_atom in ctx.katom())
|
||||
)
|
||||
return tuple(tuple(item[0] for item in part) for part in (pbody, nbody))
|
||||
|
||||
def visitOrule(self, ctx: HMKNFParser.OruleContext):
|
||||
return self.visit(ctx.oformula())
|
||||
|
||||
def visitDisjOrConj(self, ctx: HMKNFParser.DisjOrConjContext):
|
||||
return OBinary(
|
||||
ctx.operator.text, self.visit(ctx.oformula(0)), self.visit(ctx.oformula(1))
|
||||
)
|
||||
|
||||
def visitParenth(self, ctx: HMKNFParser.ParenthContext):
|
||||
return self.visit(ctx.oformula())
|
||||
|
||||
def visitImp(self, ctx: HMKNFParser.ImpContext):
|
||||
return OBinary("->", self.visit(ctx.oformula(0)), self.visit(ctx.oformula(1)))
|
||||
|
||||
def visitLiteral(self, ctx: HMKNFParser.LiteralContext):
|
||||
return self.visit(ctx.oatom())
|
||||
|
||||
def visitKatom(self, ctx: HMKNFParser.KatomContext):
|
||||
name, sign = self.visit(ctx.patom() if ctx.patom() else ctx.natom())
|
||||
self.katoms.add(name)
|
||||
return name, sign
|
||||
|
||||
def visitOatom(self, ctx: HMKNFParser.OatomContext):
|
||||
name = str(ctx.IDENTIFIER())
|
||||
self.oatoms.add(name)
|
||||
return OAtom(name)
|
||||
|
||||
def visitPatom(self, ctx: HMKNFParser.PatomContext):
|
||||
return str(ctx.IDENTIFIER()), True
|
||||
|
||||
def visitNatom(self, ctx: HMKNFParser.NatomContext):
|
||||
return str(ctx.IDENTIFIER()), False
|
||||
|
||||
def visitNegation(self, ctx: HMKNFParser.NegationContext):
|
||||
return ONegation(self.visit(ctx.oformula()))
|
||||
|
||||
|
||||
def check_syntax_constraints(ast: KB):
|
||||
for rule in ast.rules:
|
||||
if len(rule.head) > 1:
|
||||
raise Exception("No rule may have more than 1 atom in its head")
|
||||
return ast
|
||||
|
||||
|
||||
def unique_atom_namer(prefix: str):
|
||||
for i in count(1):
|
||||
for suffix in product(ascii_lowercase, repeat=i):
|
||||
suffix = "".join(suffix)
|
||||
yield prefix + suffix
|
||||
|
||||
|
||||
def desugar_constraints(ast: KB):
|
||||
used_names = list()
|
||||
|
||||
def add_name(name: str):
|
||||
used_names.append(name)
|
||||
return name
|
||||
|
||||
names = map(add_name, unique_atom_namer("constraint_"))
|
||||
rules = tuple(
|
||||
LRule(
|
||||
(name := next(names),),
|
||||
rule.pbody,
|
||||
rule.nbody + (name,),
|
||||
)
|
||||
if not rule.head
|
||||
else rule
|
||||
for rule in ast.rules
|
||||
)
|
||||
return KB(ast.ont, rules, ast.katoms.union(used_names), ast.oatoms)
|
||||
|
||||
|
||||
def loadProgram(fileContents) -> KB:
|
||||
input_stream = InputStream(fileContents)
|
||||
lexer = HMKNFLexer(input_stream)
|
||||
stream = CommonTokenStream(lexer)
|
||||
parser = HMKNFParser(stream)
|
||||
tree = parser.kb()
|
||||
|
||||
visitor = HMKNFVisitor()
|
||||
ast = visitor.visit(tree)
|
||||
return desugar_constraints(check_syntax_constraints(ast))
|
||||
|
||||
|
||||
def main():
|
||||
if len(argv) > 1:
|
||||
in_file = open(argv[1], "rt", encoding="utf8")
|
||||
else:
|
||||
in_file = stdin
|
||||
|
||||
print(loadProgram(in_file.read()).text())
|
||||
|
||||
|
||||
if __name__ == "__main__" and not flags.interactive:
|
||||
main()
|
|
@ -0,0 +1,31 @@
|
|||
FROM sjkillen/miktex
|
||||
LABEL maintainer="sjkillen@ualberta.ca"
|
||||
|
||||
RUN apt-get -qq update
|
||||
RUN apt-get -qq install sudo git wget default-jre
|
||||
RUN apt-get -qq install software-properties-common
|
||||
RUN add-apt-repository -y ppa:deadsnakes/ppa
|
||||
RUN apt-get -qq install python3.11
|
||||
RUN rm /usr/bin/python3
|
||||
RUN ln -sT /usr/bin/python3.11 /usr/bin/python3
|
||||
RUN ln -sT /usr/bin/python3.11 /usr/bin/python
|
||||
RUN cd /tmp && wget https://bootstrap.pypa.io/get-pip.py && python get-pip.py
|
||||
|
||||
ARG USERNAME=user
|
||||
ARG USER_UID=1000
|
||||
ARG USER_GID=$USER_UID
|
||||
|
||||
# Copied from https://code.visualstudio.com/docs/remote/containers-advanced#_adding-a-nonroot-user-to-your-dev-container
|
||||
# Create the user
|
||||
RUN groupadd --gid $USER_GID $USERNAME \
|
||||
&& useradd --uid $USER_UID --gid $USER_GID -m $USERNAME --shell /bin/bash \
|
||||
# [Optional] Add sudo support. Omit if you don't need to install software after connecting.
|
||||
&& echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
|
||||
&& chmod 0440 /etc/sudoers.d/$USERNAME
|
||||
|
||||
RUN chown -R $USERNAME:$USERNAME /miktex
|
||||
|
||||
USER $USERNAME
|
||||
|
||||
COPY requirements.txt /tmp/
|
||||
RUN python -m pip install -r /tmp/requirements.txt
|
|
@ -0,0 +1,25 @@
|
|||
grammar HMKNF;
|
||||
|
||||
COMMENT: '#' .*? '\n' -> skip;
|
||||
WS: [ \n\t\r]+ -> skip;
|
||||
|
||||
kb: (lrule | orule)*;
|
||||
|
||||
lrule: ((head body) | head | body) '.';
|
||||
head: IDENTIFIER (',' IDENTIFIER)*;
|
||||
body: ':-' (katom (',' katom)*)?;
|
||||
|
||||
orule: oformula '.';
|
||||
oformula:
|
||||
'(' oformula ')' # parenth
|
||||
| oatom # literal
|
||||
| '-' oformula # negation
|
||||
| oformula (operator = ('|' | '&')) oformula # disjOrConj
|
||||
| <assoc = right> oformula '->' oformula # imp;
|
||||
|
||||
katom: patom | natom;
|
||||
oatom: IDENTIFIER;
|
||||
patom: IDENTIFIER;
|
||||
natom: 'not' IDENTIFIER;
|
||||
|
||||
IDENTIFIER: [a-z'][A-Za-z0-9'_]*;
|
|
@ -0,0 +1,24 @@
|
|||
all: grammars
|
||||
|
||||
.PHONY: grammars
|
||||
grammars: *.g4 grammars/__init__.py
|
||||
java -cp "${PWD}/antlr4.jar" org.antlr.v4.Tool -Dlanguage=Python3 $< -no-listener -visitor -o grammars
|
||||
grammars/__init__.py: antlr4.jar requirements
|
||||
mkdir -p grammars
|
||||
touch $@
|
||||
|
||||
.PHONY: requirements
|
||||
requirements:
|
||||
python3 -m pip install -r requirements.txt
|
||||
|
||||
antlr4.jar:
|
||||
rm -f *.jar
|
||||
wget https://www.antlr.org/download/antlr-4.10.1-complete.jar
|
||||
mv *.jar antlr4.jar
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf grammars
|
||||
|
||||
.PHONY: test
|
||||
test: ${TESTS}
|
|
@ -0,0 +1,8 @@
|
|||
# Approximation Fixpoint Theory x Hybrid MKNF KBs
|
||||
|
||||
- AST.py
|
||||
- Parse a knowledge base (examples in knowledge_bases)
|
||||
- aft.py
|
||||
- A stable revision operator
|
||||
- solver.py
|
||||
- A naive, enumerative solver that finds partial stable models
|
|
@ -0,0 +1,99 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Implements code for stable revision.
|
||||
When run, will compute the least stable fixedpoint of a knowledge base
|
||||
usage: python aft.py knowledge_bases/simple.hmknf
|
||||
OR
|
||||
usage: python aft.py < knowledge_bases/simple.hmknf
|
||||
"""
|
||||
|
||||
from sys import stdin, flags, argv
|
||||
from AST import KB, Set, atom, loadProgram
|
||||
from hmknf import objective_knowledge
|
||||
from util import printp
|
||||
|
||||
Kinterp = tuple[Set[atom], Set[atom]]
|
||||
|
||||
|
||||
def add_immediate_XY(kb: KB, X: Set[atom], Y: Set[atom]) -> Set[atom]:
|
||||
"""
|
||||
When X is T and Y is P, will compute the atoms that must be true as a consequence of the rules and the ontology
|
||||
When they are flipped, i.e. X = P, and Y = T, then it computes what is "possibly true"
|
||||
This function is monotone w.r.t. the precision ordering
|
||||
"""
|
||||
_, X, _ = objective_knowledge(kb, X, Set())
|
||||
for rule in kb.rules:
|
||||
if not X.issuperset(rule.pbody):
|
||||
continue
|
||||
if Y.intersection(rule.nbody):
|
||||
continue
|
||||
X = X.union({rule.head[0]})
|
||||
return X
|
||||
|
||||
|
||||
def extract_OBT_entails_false(kb: KB, T: Set[atom]):
|
||||
"""
|
||||
Returns the set of atoms that are objectively false w.r.t. T
|
||||
Function is antitone w.r.t. subset relation
|
||||
"""
|
||||
_, _, F = objective_knowledge(kb, T, Set())
|
||||
return F
|
||||
|
||||
|
||||
def approximator(kb: KB, T: Set[atom], P: Set[atom]) -> Kinterp:
|
||||
return (
|
||||
add_immediate_XY(kb, T, P),
|
||||
add_immediate_XY(kb, P, T) - extract_OBT_entails_false(kb, T),
|
||||
)
|
||||
|
||||
|
||||
def fixpoint(op, initial):
|
||||
"""
|
||||
Iteratively apply a function beginning with the value initial until a fixpoint is reached
|
||||
E.g.
|
||||
op(op(op(initation)))
|
||||
"""
|
||||
prev = initial
|
||||
while (current := op(prev)) != prev:
|
||||
prev = current
|
||||
return prev
|
||||
|
||||
|
||||
def stable_revision(kb: KB, T: Set[atom], P: Set[atom]):
|
||||
def left(T):
|
||||
return add_immediate_XY(kb, T, P)
|
||||
|
||||
def right(P):
|
||||
return add_immediate_XY(kb, P, T) - extract_OBT_entails_false(kb, T)
|
||||
|
||||
return (
|
||||
fixpoint(left, Set()),
|
||||
fixpoint(right, Set()),
|
||||
)
|
||||
|
||||
|
||||
def stable_revision_extend(kb: KB, initialT: Set[atom], initialP: Set[atom]):
|
||||
def stable_revision_tuple(TP: Kinterp) -> Kinterp:
|
||||
T, P = TP
|
||||
return stable_revision(kb, T, P)
|
||||
|
||||
return fixpoint(stable_revision_tuple, (initialT, initialP))
|
||||
|
||||
|
||||
def least_stable_fixedpoint(kb: KB):
|
||||
return stable_revision_extend(kb, Set(), kb.katoms)
|
||||
|
||||
|
||||
def main():
|
||||
""""""
|
||||
if len(argv) > 1:
|
||||
in_file = open(argv[1], "rt", encoding="utf8")
|
||||
else:
|
||||
in_file = stdin
|
||||
kb = loadProgram(in_file.read())
|
||||
T, P = least_stable_fixedpoint(kb)
|
||||
printp(T, P)
|
||||
|
||||
|
||||
if __name__ == "__main__" and not flags.interactive:
|
||||
main()
|
|
@ -0,0 +1,254 @@
|
|||
\documentclass{article}
|
||||
|
||||
\input{common}
|
||||
|
||||
\title{Summary of Important Definitions}
|
||||
\date{May 11th}
|
||||
|
||||
|
||||
\begin{document}
|
||||
\maketitle
|
||||
|
||||
|
||||
|
||||
\section{Lattice Theory}
|
||||
|
||||
\begin{definition}
|
||||
A {\em partial order} $\preceq$ is a relation over a set $S$ such that for every triple of elements $x, y, z \in S$ the following hold
|
||||
\begin{itemize}
|
||||
\item (reflexivity) $x \preceq x$
|
||||
\item (antisymmetry) $(x \preceq y \land y \preceq x) \implies x = y$
|
||||
\item (transitivity) $(x \preceq y \land y \preceq z) \implies (x \preceq z)$
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
|
||||
\begin{definition}
|
||||
Given a partial order $\preceq$ over a set $S$ and a subset $X \subseteq S$, a {\em lower bound} of $X$ (resp.\ an {\em upper bound} of $X$) is an element $x \in S$ (Note that it may be the case that $x \not\in X$) such that
|
||||
\begin{itemize}
|
||||
\item $\forall y \in X, \boxed{x} \preceq y$
|
||||
\item (resp. $\forall y \in X, \boxed{y} \preceq x$)
|
||||
\end{itemize}
|
||||
The {\em greatest lower bound} of a set $X \subseteq S$ (denoted as $\textbf{glb}(X)$) is a unique upperbound of the set of all lowerbounds of $X$.
|
||||
The {\em least upper bound} of a set $X \subseteq S$ (denoted as $\textbf{lub}(X)$) is a unique lowerbound of the set of all upperbounds of $X$.
|
||||
In general, $\textbf{lub}(X)$ and $\textbf{glb}(X)$ may not exist.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A (complete) lattice $\langle \mathcal{L}, \preceq \rangle$ is a set of elements $\mathcal{L}$ and a partial order $\preceq$ over $\mathcal{L}$ such that for any set $S \subseteq \mathcal{L}$
|
||||
\begin{itemize}
|
||||
\item $\textbf{lub}(X)$ and $\textbf{glb}(X)$ exist and are unique.
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\subsection{Fixpoint Operators}
|
||||
|
||||
\begin{definition}
|
||||
Given a complete lattice $\langle \mathcal{L}, \preceq \rangle$, a fixpoint operator over the lattice is a function $\Phi: \mathcal{L} \rightarrow \mathcal{L}$ that is {\em $\preceq$-monotone}
|
||||
\begin{itemize}
|
||||
\item $\Phi$ is $\preceq$-monotone if for all $x, y \in \mathcal{L}$
|
||||
\begin{align*}
|
||||
x \preceq y \implies \Phi(x) \preceq \Phi(y)
|
||||
\end{align*}
|
||||
(Note: this does not mean the function is inflationary, i.e., $x \preceq \Phi(x)$ may not hold)
|
||||
\end{itemize}
|
||||
A {\em fixpoint} is an element $x \in \mathcal{L}$ s.t. $\Phi(x) = x$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}[Knaster-Tarski]
|
||||
The set of all fixpoints of a fixpoint operator $\Phi$ on a complete lattice is itself a complete lattice. The least element of this new lattice exists and is called the least fixpoint (denoted as $\lfp~{\Phi}$)
|
||||
\end{theorem}
|
||||
|
||||
Some intuitions about lattices
|
||||
\begin{itemize}
|
||||
\item The entire lattice has a biggest element $\lub{}(\mathcal{L}) = \top$ and a smallest element $\glb{}(\mathcal{L}) = \bot$
|
||||
\item When a lattice has a finite height (or finite domain). The least fixed point of a fixpoint operator can be computed by iteratively applying the fixpoint operator to $\bot$
|
||||
\item An operator may return an element that is not comparable to the input, however, after a comparable element is returned (either greater or less than) that comparability and direction are maintained for all subsequent iterations.
|
||||
\item Further, because $\bot$ is less than all elements in the lattice, it is always the case that $\bot \preceq \Phi(\bot)$
|
||||
\end{itemize}
|
||||
|
||||
\section{Partial Stable Model Semantics}
|
||||
|
||||
\begin{definition}
|
||||
A (ground and normal) {\em answer set program} $\P$ is a set of rules where each rule $r$ is of the form
|
||||
\begin{align*}
|
||||
h \leftarrow a_0,~ a_1,~ \dots,~ a_n,~ \Not b_0,~\Not b_1,~\dots,~\Not b_k
|
||||
\end{align*}
|
||||
where we define the following shorthand for a rule $r \in \P$
|
||||
\begin{align*}
|
||||
\head(r) &= h\\
|
||||
\bodyp(r) &= \{ a_0,~ a_1,~ \dots,~ a_n \}\\
|
||||
\bodyn(r) &= \{ b_0,~ b_1,~ \dots,~ b_k \}
|
||||
\end{align*}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A two-valued interpretation $I$ of a program $\P$ is a set of atoms that appear in $\P$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
An interpretation $I$ is a \boxedt{model} of a program $\P$ if for each rule $r \in \P$
|
||||
\begin{itemize}
|
||||
\item If $\bodyp(r) \subseteq I$ and $\bodyn(r) \intersect I = \emptyset$ then $\head(r) \in I$.
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
An interpretation $I$ is a \boxedt{stable model} of a program $\P$ if $I$ is a model of $\P$ \textbf{and}
|
||||
for every interpretation $I' \subseteq I$ there exists a rule $r \in \P$ such that
|
||||
\begin{itemize}
|
||||
\item $\bodyp(r) \subseteq I'$, $\bodyn(r) \intersect I \not= \emptyset$ (Note that this is $I$ and not $I'$) and $\head(r) \not\in I'$
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A three-valued interpretation $(T, P)$ of a program $\P$ is a pair of sets of atoms such that $T \subseteq P$.
|
||||
The \boxedt{truth-ordering} respects $\ff < \uu < \tt$ and is defined for two three-valued interpretations $(T, P)$ and $(X, Y)$ as follows.
|
||||
\begin{align*}
|
||||
(T, P) \preceq_t (X, Y) \textrm{ iff } T \subseteq X \land P \subseteq Y
|
||||
\end{align*}
|
||||
The \boxedt{precision-ordering} respects the partial order $\uu < \tt$, $\uu < \ff$ and is defined for two three-valued interpretations $(T, P)$ and $(X, Y)$ as follows.
|
||||
\begin{align*}
|
||||
(T, P) \preceq_p (X, Y) \textrm{ iff } T \subseteq X \land \boxed{Y \subseteq P}
|
||||
\end{align*}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A three-valued interpretation $(T, P)$ is a {\em model} of a program $\P$ if for each rule $r \in \P$
|
||||
\begin{itemize}
|
||||
\item $\body(r) \subseteq P \land \bodyn(r) \intersect T = \emptyset$ implies $\head(r) \in P$, and
|
||||
\item $\body(r) \subseteq T \land \bodyn(r) \intersect P = \emptyset$ implies $\head(r) \in T$.
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A three-valued interpretation $(T, P)$ is a {\em stable model} of a program $\P$ if it is a model of $\P$ and if for every three-valued interpretation $(X, Y)$ such that $(X, Y) \preceq_t (T, P)$ there exists a rule $r \in \P$ such that either
|
||||
\begin{itemize}
|
||||
\item $\bodyp(r) \subseteq Y \land \bodyn(r) \intersect T = \emptyset$ and $\head(r) \not\in Y$ \textbf{OR}
|
||||
\item $\bodyp(r) \subseteq X \land \bodyn(r) \intersect P = \emptyset$ and $\head(r) \not\in X$
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
|
||||
\section{Approximation Fixpoint Theory}
|
||||
|
||||
We can think of a three-valued interpretation $(T, P)$ as an approximation on the set of true atoms.
|
||||
$T$ is a lower bound and $P$ is the upper bound.
|
||||
|
||||
\begin{definition}
|
||||
An {\em approximator} is a fixpoint operator on the complete lattice $\langle \wp(\mathcal{L})^2, \preceq_p \rangle$ (called a bilattice)
|
||||
\end{definition}
|
||||
Given a function $f(T, P): S^2 \rightarrow S^2$, we define two separate functions
|
||||
\begin{align*}
|
||||
f(\cdot, P)_1:~ &S \rightarrow S\\
|
||||
f(T, \cdot)_2:~ &S \rightarrow S
|
||||
\end{align*}
|
||||
such that
|
||||
\begin{align*}
|
||||
f(T, P) = \Big(&(f(\cdot, P)_1)(T), \\&(f(T, \cdot)_2)(P)\Big)
|
||||
\end{align*}
|
||||
|
||||
\begin{definition}
|
||||
Given an approximator $\Phi(T, P)$ the stable revision operator is defined as follows
|
||||
\begin{align*}
|
||||
S(T, P) = (\lfp{(\Phi(\cdot, P)_1)},~ \lfp{(\Phi(T, \cdot)_2)})
|
||||
\end{align*}
|
||||
Note: the $\lfp$ is applied to a unary operator, thus it's the least fixpoint of the lattice $\langle \wp(\mathcal{L}), \subseteq \rangle$ whose least element is $\emptyset$.
|
||||
\end{definition}
|
||||
|
||||
\subsection{An Approximator for Partial Stable Semantics}
|
||||
|
||||
\begin{align*}
|
||||
\Gamma(T, P) &\define \{ \head(r) ~|~ r \in \P, T \subseteq \bodyp(r), \bodyn(r) \intersect P = \emptyset \}\\
|
||||
\Gamma(P, T) &\define \{ \head(r) ~|~ r \in \P, P \subseteq \bodyp(r), \bodyn(r) \intersect T = \emptyset \}\\
|
||||
\Phi(T, P) &\define \Bigl( \Gamma(T, P), \Gamma(P, T) \Bigr)
|
||||
\end{align*}
|
||||
|
||||
Without stable revision where $(T, P) = (\{ a, b \}, \{ a, b \})$
|
||||
\begin{align*}
|
||||
\Phi(T, P) = (\{ a, b \}, \{ a, b \}) \textrm{ (fixpoint reached)}
|
||||
\end{align*}
|
||||
With stable revision
|
||||
\begin{align*}
|
||||
S(T, P) = (\emptyset, \emptyset) \textrm{ (fixpoint reached)}
|
||||
\end{align*}
|
||||
|
||||
|
||||
\begin{proposition}\label{}
|
||||
Given a poset $\langle \mathcal{L}, \leq \rangle$,
|
||||
an operator $o: \mathcal{L} \rightarrow \mathcal{L}$ is $\leq$-monotone iff it is $\geq$-monotone
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
($\Rightarrow$)
|
||||
Initially, we have $\forall x, y \in \mathcal{L}: x \leq y \implies o(x) \leq o(y)$.
|
||||
Let $a, b \in \mathcal{L}$ such that $a \geq b$. We have $b \leq a$ and can apply our initial assumption to get $o(b) \leq o(a)$.
|
||||
This gives us $o(a) \geq o(b)$.
|
||||
We can generalize to obtain $\forall x, y \in \mathcal{L}: x \geq y \implies o(x) \geq o(y)$.
|
||||
($\Leftarrow$) Proof is more or less the same
|
||||
\end{proof}
|
||||
|
||||
% Let $\leq$ and $\leq_i$ be the orderings over the set $\mathcal{L}^2$ such that for each ${(T, P), (X, Y) \in \mathcal{L}^2}$
|
||||
% \begin{align*}
|
||||
% (a, b) \leq (x, y) &\textit{ iff } a \leq x \textrm{ and } b \leq y\\
|
||||
% (a, b) \leq_i (x, y) &\textit{ iff } a \leq x \textrm{ and } \boxed{y} \leq b
|
||||
% \end{align*}
|
||||
% \begin{proposition}
|
||||
% Given a poset $\langle \mathcal{L}^2, \leq \rangle$, an operator $o: \wp(\mathcal{L})^2 \rightarrow \wp(\mathcal{L})^2$ is $\leq$-monotone iff it is $\leq_i$-monotone
|
||||
% \end{proposition}
|
||||
% \begin{proof}
|
||||
% ($\Rightarrow$) Initially, we have ${\forall (x, y), (a, b) \in \mathcal{L}^2: (x, y) \leq (a, b) \implies o(x, y) \leq o(a, b)}$.
|
||||
% If we rearrange the variables we get
|
||||
% \begin{align*}
|
||||
% &\forall x, a \in \mathcal{L}: \forall y, b \in \mathcal{L}:\\
|
||||
% &~~~~~(x \leq a \land y \leq b) \implies ((o_1(x, y) \leq o_1(a, b)) \land (o_2(x, y) \leq o_2(a, b)))
|
||||
% \end{align*}
|
||||
|
||||
|
||||
% Let $(u, v), (i, k) \in \mathcal{L}^2$ such that $(u, v) \leq_i (i, k)$. We have $(u, k) \leq (i, v)$. We can apply the initial assumption to obtain
|
||||
% $o(u, k) \leq o(i, v)$. This is equivalent to
|
||||
% \begin{align*}
|
||||
% (o_1(u, k) \leq o_1(i, v)) \land (o_2(u, k) \leq o_2(i, v))
|
||||
% \end{align*}
|
||||
% which can be rewritten as
|
||||
% \begin{align*}
|
||||
% (o_1(u, k) \leq o_1(i, v)) \land \boxed{(o_2(i, v)) \leq o_2(u, k))}
|
||||
% \end{align*}
|
||||
% \end{proof}
|
||||
|
||||
\begin{proposition}
|
||||
An operator $A : L^2 \rightarrow L^2$ is symmetric and monotone
|
||||
with respect to both $\leq_i$ and $\leq$ if and only if there is a monotone operator
|
||||
$O : L \rightarrow L$ such that for every $x, y \in L, A(x, y) = (O (x), O (y ))$
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
($\Rightarrow$)
|
||||
From Proposition 5 and 6 we have for any $x \in L$, $A_1(\cdot, x)$ and $A_1(x, \cdot)$ are monotone and $A_1(x, \cdot)$ is antimonotone.
|
||||
By Proposition 2, $A_1(x, \cdot)$ is constant, denote this constant as the function $O(x)$.
|
||||
By the symmetric condition, we have $A_1(x, \cdot) = A_2(\cdot, x)$, thus $A(x, y) = (O(x), O(y))$. It follows from the monotonicity of $A$ that $O$ is monotone.
|
||||
|
||||
($\Leftarrow$)
|
||||
Clearly $A$ is symmetric, and $\leq$-monotone (Given that $O$ is $\leq$-monotone). Using proposition 3.1 (above) $O(x)$ is $\geq$-monotone, thus $A$ is $\leq_i$-monotone as well.
|
||||
\end{proof}
|
||||
|
||||
\section{The Polynomial Heirarchy}
|
||||
Intuitive definitions of ${\sf NP}$
|
||||
\begin{itemize}
|
||||
\item the class of problems which have algorithms that can verify solutions in polynomial time.
|
||||
\item A problem that can be solved by reducing it to a SAT expression of the form
|
||||
\begin{align*}
|
||||
\exists (a \lor b \lor c) \land (\neg a \lor \neg d \lor c)~\land \cdots
|
||||
\end{align*}
|
||||
\item (Alternating turing machine) A problem that is solved by some path of an algorithm that is allowed to branch is parallel
|
||||
\end{itemize}
|
||||
Intuitive definitions of ${\sf NP^{NP}}$ a.k.a.\ $\Sigma_2^P$
|
||||
\begin{itemize}
|
||||
\item the class of problems which have algorithms that can verify solutions in ${\sf NP}$ time.
|
||||
\item A problem that can be solved by reducing it to a SAT expression of the form
|
||||
\begin{align*}
|
||||
\exists c,~ \forall a,~ b,~ (a \lor b \lor c) \land (\neg a \lor \neg d \lor c)~\land \cdots
|
||||
\end{align*}
|
||||
\item (Alternating Turing machine) A problem that is solved by some path of an algorithm that is allowed to branch in parallel. A branch is allowed to switch to ``ALL'' mode only once and require that all subsequent forks return success
|
||||
\end{itemize}
|
||||
|
||||
|
||||
\end{document}
|
|
@ -0,0 +1,7 @@
|
|||
*.aux
|
||||
*.fdb_latexmk
|
||||
*.fls
|
||||
*.log
|
||||
*.pdf
|
||||
*.synctex.gz
|
||||
*.bbl
|
|
@ -0,0 +1,10 @@
|
|||
all: document.pdf
|
||||
|
||||
%.pdf: %.tex
|
||||
latexmk -pdf $<
|
||||
|
||||
clean:
|
||||
${RM} document.pdf
|
||||
${RM} *.bbl
|
||||
${RM} *.blg
|
||||
latexmk -C
|
|
@ -0,0 +1,131 @@
|
|||
\usepackage{amsmath}
|
||||
\usepackage{amsfonts}
|
||||
\usepackage{amssymb}
|
||||
\usepackage{amsthm}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{float}
|
||||
\usepackage{verbatim}
|
||||
\usepackage{enumerate}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{accents}
|
||||
\usepackage{bm}% boldmath
|
||||
|
||||
\usepackage[acronym,hyperfirst=false]{glossaries}
|
||||
|
||||
\newtheorem{theorem}{Theorem}[section]
|
||||
\newtheorem{proposition}{Proposition}[section]
|
||||
\newtheorem{example}{Example}
|
||||
\newtheorem{lemma}{Lemma}[section]
|
||||
\newtheorem{corollary}[theorem]{Corollary}
|
||||
\newtheorem{definition}{Definition}[section]
|
||||
\newtheorem{remark}{Remark}
|
||||
|
||||
\newcommand{\Definition}[1]{Definition~\ref{#1}}
|
||||
\newcommand{\Section}[1]{Section~\ref{#1}}
|
||||
\newcommand{\Chapter}[1]{Chapter~\ref{#1}}
|
||||
\newcommand{\Theorem}[1]{Theorem~\ref{#1}}
|
||||
\newcommand{\Proposition}[1]{Proposition~\ref{#1}}
|
||||
\newcommand{\Example}[1]{Example~\ref{#1}}
|
||||
\newcommand{\Lemma}[1]{Lemma~\ref{#1}}
|
||||
\newcommand{\Figure}[1]{Figure~\ref{#1}}
|
||||
\newcommand{\Corollary}[1]{Corollary~\ref{ #1}}
|
||||
\newcommand{\Remark}[1]{Remark~\ref{#1}}
|
||||
\newcommand{\Algorithm}[1]{Algorithm~\ref{#1}}
|
||||
\newcommand{\Line}[1]{line~\ref{#1}}
|
||||
|
||||
\renewcommand{\P}{\mathcal{P}}
|
||||
\newcommand{\Not}{\textrm{\bf not\,}}
|
||||
\newcommand{\head}{head}
|
||||
\newcommand{\intersect}{\cap}
|
||||
\newcommand{\union}{\cup}
|
||||
\newcommand{\atoms}{Atoms}
|
||||
\newcommand{\bodyp}{body^{+}}
|
||||
\newcommand{\bodyn}{body^{-}}
|
||||
\newcommand{\body}{body}
|
||||
\newcommand{\op}[1]{\textbf{#1}}
|
||||
\newcommand{\pmodels}{\models}
|
||||
|
||||
\newcommand{\KB}{\mathcal{K}}
|
||||
\newcommand{\bfK}{\textit{\em \textbf{K}}}
|
||||
\renewcommand{\O}{\mathcal{O}}
|
||||
\renewcommand{\P}{\mathcal{P}}
|
||||
\newcommand{\KAK}{\textbf{KA}(\KB)}
|
||||
\newcommand{\Katom}{\textbf{K}-atom~}
|
||||
\newcommand{\Katoms}{\textbf{K}-atoms~}
|
||||
\newcommand{\comp}[1]{\overline{#1}}
|
||||
|
||||
\newcommand{\A}{\textbf{A}}
|
||||
\newcommand{\T}{\textbf{T}}
|
||||
\newcommand{\F}{\textbf{F}}
|
||||
|
||||
\newcommand{\boldK}{\bf {K}}
|
||||
\newcommand{\PP}{\mathcal{x}_{\KBB}}
|
||||
\newcommand{\M}{\mathcal{M}}
|
||||
\newcommand{\Ma}{M_0}
|
||||
\newcommand{\Mb}{M_1}
|
||||
\newcommand{\MM}{\langle M, M_1 \rangle}
|
||||
\newcommand{\N}{\mathcal{N}}
|
||||
\newcommand{\Na}{N_0}
|
||||
\newcommand{\Nb}{N_1}
|
||||
\newcommand{\NN}{\langle N, N_1 \rangle}
|
||||
\newcommand{\MN}{\langle M, N \rangle}
|
||||
\newcommand{\ff}{{\bf f}}
|
||||
\newcommand{\uu}{{\bf u}}
|
||||
\renewcommand{\tt}{{\bf t}}
|
||||
\newcommand{\mneval}[1]{(I, \MN, \MN)( #1 )}
|
||||
\newcommand{\meval}[1]{\mneval{#1}}
|
||||
\newcommand{\mmeval}[1]{\mneval{#1}}
|
||||
\newcommand{\KBA}{\textsf{KA}}
|
||||
\newcommand{\OO}{\mathcal{O}}
|
||||
\newcommand{\OBO}[1]{{{\sf OB}_{\OO,#1}}}
|
||||
\renewcommand{\P}{\mathcal{P}}
|
||||
\newcommand{\Q}{\mathcal{Q}}
|
||||
\newcommand{\Pcomp}{\P_{\textrm{\tiny COMP}}}
|
||||
\newcommand{\Ploop}{\P_{\textrm{\tiny LOOP}}}
|
||||
\newcommand{\Atoms}{Atoms}
|
||||
\newcommand{\Loops}{Loops}
|
||||
\renewcommand{\implies}{\supset}
|
||||
\renewcommand{\impliedby}{\subset}
|
||||
\newcommand{\Rule}{rule}
|
||||
\newcommand{\eval}[2]{#1\big\{#2\big\}}
|
||||
\newcommand{\lub}[1]{{\bf lub_{#1}~}}
|
||||
\newcommand{\glb}[1]{{\bf glb_{#1}~}}
|
||||
\newcommand{\lfp}{{\bf lfp}}
|
||||
\newcommand{\fix}[1]{{\bf broken} ~{#1}}
|
||||
\newcommand{\fixpoints}{\textbf{fixpoints}}
|
||||
|
||||
% https://tex.stackexchange.com/questions/118240/how-to-draw-a-border-around-character-in-a-formula
|
||||
\makeatletter
|
||||
\renewcommand{\boxed}[1]{\text{\fboxsep=.2em\fbox{\m@th$\displaystyle#1$}}}
|
||||
\makeatother
|
||||
\newcommand{\boxedt}[1]{\boxed{\textrm{#1}}}
|
||||
|
||||
\newcommand{\argblank}{\underline{\hspace{.3cm}}}
|
||||
\renewcommand{\L}{\mathcal{L}}
|
||||
\newcommand{\squnion}{\sqcup}
|
||||
\renewcommand{\implies}{\Rightarrow}
|
||||
\newcommand{\define}{\coloneqq}
|
||||
\newcommand{\lcomp}[1]{{#1}^{\mathbf{\hspace{.075em}c}}}
|
||||
|
||||
\newcommand{\noncompare}{\bowtie}
|
||||
% https://tex.stackexchange.com/questions/378087
|
||||
\makeatletter
|
||||
\newcommand\ineq{}% for safety
|
||||
\DeclareRobustCommand{\ineq}{\mathrel{\mathpalette\rance@ineq\relax}}
|
||||
\newcommand{\rance@ineq}[2]{%
|
||||
\vcenter{\hbox{%
|
||||
$\m@th#1\mkern1.5mu\underline{\mkern-1.5mu\bowtie\mkern-1.5mu}\mkern1.5mu$%
|
||||
}}%
|
||||
}
|
||||
\makeatother
|
||||
\newcommand{\noncompareeq}{\ineq}
|
||||
|
||||
|
||||
|
||||
|
||||
\newcommand{\omax}{\textbf{max}}
|
||||
\newcommand{\omin}{\textbf{min}}
|
||||
|
||||
\newcommand{\project}[2]{\Pi_{#1}(#2)}
|
||||
|
||||
\newcommand{\mknfmodels}{\models_{\mbox{\tiny{\sf MKNF}}}}
|
|
@ -0,0 +1,46 @@
|
|||
This is BibTeX, Version 0.99d (TeX Live 2022/dev/Debian)
|
||||
Capacity: max_strings=200000, hash_size=200000, hash_prime=170003
|
||||
The top-level auxiliary file: document.aux
|
||||
The style file: plain.bst
|
||||
Database file #1: refs.bib
|
||||
You've used 3 entries,
|
||||
2118 wiz_defined-function locations,
|
||||
521 strings with 4580 characters,
|
||||
and the built_in function-call counts, 1233 in all, are:
|
||||
= -- 127
|
||||
> -- 46
|
||||
< -- 1
|
||||
+ -- 17
|
||||
- -- 14
|
||||
* -- 90
|
||||
:= -- 198
|
||||
add.period$ -- 10
|
||||
call.type$ -- 3
|
||||
change.case$ -- 15
|
||||
chr.to.int$ -- 0
|
||||
cite$ -- 3
|
||||
duplicate$ -- 45
|
||||
empty$ -- 102
|
||||
format.name$ -- 14
|
||||
if$ -- 263
|
||||
int.to.chr$ -- 0
|
||||
int.to.str$ -- 3
|
||||
missing$ -- 3
|
||||
newline$ -- 18
|
||||
num.names$ -- 8
|
||||
pop$ -- 13
|
||||
preamble$ -- 1
|
||||
purify$ -- 12
|
||||
quote$ -- 0
|
||||
skip$ -- 35
|
||||
stack$ -- 0
|
||||
substring$ -- 112
|
||||
swap$ -- 13
|
||||
text.length$ -- 1
|
||||
text.prefix$ -- 0
|
||||
top$ -- 0
|
||||
type$ -- 12
|
||||
warning$ -- 0
|
||||
while$ -- 14
|
||||
width$ -- 4
|
||||
write$ -- 36
|
|
@ -0,0 +1,397 @@
|
|||
\documentclass{article}
|
||||
|
||||
\usepackage{natbib}
|
||||
|
||||
\input{common}
|
||||
|
||||
\title{AFT and HMKNF}
|
||||
\author{}
|
||||
\date{May 25th 2023}
|
||||
|
||||
\begin{document}
|
||||
\maketitle
|
||||
|
||||
|
||||
\section{An Approximator Hybrid MKNF knowledge bases}
|
||||
|
||||
The lattice $\langle \L^2, \preceq_p \rangle$
|
||||
|
||||
\begin{align*}
|
||||
\Gamma(T, P) &\define \{ \head(r) ~|~ r \in \P,~ T \subseteq \bodyp(r),~ \bodyn(r) \intersect P = \emptyset \}\\
|
||||
extract(T) &\define \{ a \in \L ~|~ \OBO{T} \models \neg a \} \\
|
||||
\Phi(T, P) &\define \Bigl( \Gamma(T, P), \Gamma(P, T) \setminus extract(T) \Bigr)
|
||||
\end{align*}
|
||||
|
||||
\begin{align*}
|
||||
S(\Phi)(T, P) \define \Bigl( \lfp~\Gamma(\cdot, P),~ \lfp\,\Bigl( \Gamma(\cdot, T) \setminus extract(T) \Bigr) \Bigr)
|
||||
\end{align*}
|
||||
|
||||
\appendix
|
||||
|
||||
\section{Lattice Theory}
|
||||
|
||||
\begin{definition}
|
||||
A {\em partial order} $\preceq$ is a relation over a set $S$ such that for every triple of elements $x, y, z \in S$ the following hold
|
||||
\begin{itemize}
|
||||
\item (reflexivity) $x \preceq x$
|
||||
\item (antisymmetry) $(x \preceq y \land y \preceq x) \implies x = y$
|
||||
\item (transitivity) $(x \preceq y \land y \preceq z) \implies (x \preceq z)$
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
|
||||
\begin{definition}
|
||||
Given a partial order $\preceq$ over a set $S$ and a subset $X \subseteq S$, a {\em lower bound} of $X$ (resp.\ an {\em upper bound} of $X$) is an element $x \in S$ (Note that it may be the case that $x \not\in X$) such that
|
||||
\begin{itemize}
|
||||
\item $\forall y \in X, \boxed{x} \preceq y$
|
||||
\item (resp. $\forall y \in X, \boxed{y} \preceq x$)
|
||||
\end{itemize}
|
||||
The {\em greatest lower bound} of a set $X \subseteq S$ (denoted as $\textbf{glb}(X)$) is a unique upperbound of the set of all lowerbounds of $X$.
|
||||
The {\em least upper bound} of a set $X \subseteq S$ (denoted as $\textbf{lub}(X)$) is a unique lowerbound of the set of all upperbounds of $X$.
|
||||
In general, $\textbf{lub}(X)$ and $\textbf{glb}(X)$ may not exist.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A (complete) lattice $\langle \mathcal{L}, \preceq \rangle$ is a set of elements $\mathcal{L}$ and a partial order $\preceq$ over $\mathcal{L}$ such that for any set $S \subseteq \mathcal{L}$
|
||||
\begin{itemize}
|
||||
\item $\textbf{lub}(X)$ and $\textbf{glb}(X)$ exist and are unique.
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
|
||||
\section{Partial Stable Model Semantics}
|
||||
|
||||
\begin{definition}
|
||||
A (ground and normal) {\em answer set program} $\P$ is a set of rules where each rule $r$ is of the form
|
||||
\begin{align*}
|
||||
h \leftarrow a_0,~ a_1,~ \dots,~ a_n,~ \Not b_0,~\Not b_1,~\dots,~\Not b_k
|
||||
\end{align*}
|
||||
where we define the following shorthand for a rule $r \in \P$
|
||||
\begin{align*}
|
||||
\head(r) &= h\\
|
||||
\bodyp(r) &= \{ a_0,~ a_1,~ \dots,~ a_n \}\\
|
||||
\bodyn(r) &= \{ b_0,~ b_1,~ \dots,~ b_k \}
|
||||
\end{align*}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A two-valued interpretation $I$ of a program $\P$ is a set of atoms that appear in $\P$.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
An interpretation $I$ is a \boxedt{model} of a program $\P$ if for each rule $r \in \P$
|
||||
\begin{itemize}
|
||||
\item If $\bodyp(r) \subseteq I$ and $\bodyn(r) \intersect I = \emptyset$ then $\head(r) \in I$.
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
An interpretation $I$ is a \boxedt{stable model} of a program $\P$ if $I$ is a model of $\P$ \textbf{and}
|
||||
for every interpretation $I' \subseteq I$ there exists a rule $r \in \P$ such that
|
||||
\begin{itemize}
|
||||
\item $\bodyp(r) \subseteq I'$, $\bodyn(r) \intersect I \not= \emptyset$ (Note that this is $I$ and not $I'$) and $\head(r) \not\in I'$
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A three-valued interpretation $(T, P)$ of a program $\P$ is a pair of sets of atoms such that $T \subseteq P$.
|
||||
The \boxedt{truth-ordering} respects $\ff < \uu < \tt$ and is defined for two three-valued interpretations $(T, P)$ and $(X, Y)$ as follows.
|
||||
\begin{align*}
|
||||
(T, P) \preceq_t (X, Y) \textrm{ iff } T \subseteq X \land P \subseteq Y
|
||||
\end{align*}
|
||||
The \boxedt{precision-ordering} respects the partial order $\uu < \tt$, $\uu < \ff$ and is defined for two three-valued interpretations $(T, P)$ and $(X, Y)$ as follows.
|
||||
\begin{align*}
|
||||
(T, P) \preceq_p (X, Y) \textrm{ iff } T \subseteq X \land \boxed{Y \subseteq P}
|
||||
\end{align*}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A three-valued interpretation $(T, P)$ is a {\em model} of a program $\P$ if for each rule $r \in \P$
|
||||
\begin{itemize}
|
||||
\item $\body(r) \subseteq P \land \bodyn(r) \intersect T = \emptyset$ implies $\head(r) \in P$, and
|
||||
\item $\body(r) \subseteq T \land \bodyn(r) \intersect P = \emptyset$ implies $\head(r) \in T$.
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A three-valued interpretation $(T, P)$ is a {\em stable model} of a program $\P$ if it is a model of $\P$ and if for every three-valued interpretation $(X, Y)$ such that $(X, Y) \preceq_t (T, P)$ there exists a rule $r \in \P$ such that either
|
||||
\begin{itemize}
|
||||
\item $\bodyp(r) \subseteq Y \land \bodyn(r) \intersect T = \emptyset$ and $\head(r) \not\in Y$ \textbf{OR}
|
||||
\item $\bodyp(r) \subseteq X \land \bodyn(r) \intersect P = \emptyset$ and $\head(r) \not\in X$
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
|
||||
\subsection{Fixpoint Operators}
|
||||
|
||||
\begin{definition}
|
||||
Given a complete lattice $\langle \mathcal{L}, \preceq \rangle$, a fixpoint operator over the lattice is a function $\Phi: \mathcal{L} \rightarrow \mathcal{L}$ that is {\em $\preceq$-monotone}
|
||||
\begin{itemize}
|
||||
\item $\Phi$ is $\preceq$-monotone if for all $x, y \in \mathcal{L}$
|
||||
\begin{align*}
|
||||
x \preceq y \implies \Phi(x) \preceq \Phi(y)
|
||||
\end{align*}
|
||||
(Note: this does not mean the function is inflationary, i.e., $x \preceq \Phi(x)$ may not hold)
|
||||
\end{itemize}
|
||||
A {\em fixpoint} is an element $x \in \mathcal{L}$ s.t. $\Phi(x) = x$.
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}[Knaster-Tarski]
|
||||
The set of all fixpoints of a fixpoint operator $\Phi$ on a complete lattice is itself a complete lattice. The least element of this new lattice exists and is called the least fixpoint (denoted as $\lfp~{\Phi}$)
|
||||
\end{theorem}
|
||||
|
||||
Some intuitions about lattices
|
||||
\begin{itemize}
|
||||
\item The entire lattice has a biggest element $\lub{}(\mathcal{L}) = \top$ and a smallest element $\glb{}(\mathcal{L}) = \bot$
|
||||
\item When a lattice has a finite height (or finite domain). The least fixed point of a fixpoint operator can be computed by iteratively applying the fixpoint operator to $\bot$
|
||||
\item An operator may return an element that is not comparable to the input, however, after a comparable element is returned (either greater or less than) that comparability and direction are maintained for all subsequent iterations.
|
||||
\item Further, because $\bot$ is less than all elements in the lattice, it is always the case that $\bot \preceq \Phi(\bot)$
|
||||
\end{itemize}
|
||||
|
||||
|
||||
\section{Approximation Fixpoint Theory}
|
||||
|
||||
We can think of a three-valued interpretation $(T, P)$ as an approximation on the set of true atoms.
|
||||
$T$ is a lower bound and $P$ is the upper bound.
|
||||
|
||||
\begin{definition}
|
||||
An {\em approximator} is a fixpoint operator on the complete lattice $\langle \wp(\mathcal{L})^2, \preceq_p \rangle$ (called a bilattice)
|
||||
\end{definition}
|
||||
Given a function $f(T, P): S^2 \rightarrow S^2$, we define two separate functions
|
||||
\begin{align*}
|
||||
f(\cdot, P)_1:~ &S \rightarrow S\\
|
||||
f(T, \cdot)_2:~ &S \rightarrow S
|
||||
\end{align*}
|
||||
such that
|
||||
\begin{align*}
|
||||
f(T, P) = \Big(&(f(\cdot, P)_1)(T), \\&(f(T, \cdot)_2)(P)\Big)
|
||||
\end{align*}
|
||||
|
||||
\begin{definition}
|
||||
Given an approximator $\Phi(T, P)$ the stable revision operator is defined as follows
|
||||
\begin{align*}
|
||||
S(T, P) = (\lfp{(\Phi(\cdot, P)_1)},~ \lfp{(\Phi(T, \cdot)_2)})
|
||||
\end{align*}
|
||||
Note: the $\lfp$ is applied to a unary operator, thus it's the least fixpoint of the lattice $\langle \wp(\mathcal{L}), \subseteq \rangle$ whose least element is $\emptyset$.
|
||||
\end{definition}
|
||||
|
||||
|
||||
\section{Hybrid MKNF Knowledge Bases}\label{section-prelim-hmknf}
|
||||
|
||||
Motik and Rosati \cite{motikreconciling2010} extend MKNF (Minimal Knowledge and Negation as Failure \cite{lifschitznonmonotonic1991}) to form hybrid MKNF, a nonmonotonic logic that supports classical reasoning with ontologies.
|
||||
We use Knorr et al.'s \cite{knorrlocal2011} \mbox{3-valued} semantics for hybrid MKNF;
|
||||
Under which there are three truth values: $\ff$ (false), $\uu$ (undefined), and $\tt$ (true) that use the
|
||||
ordering $\ff < \uu < \tt$.
|
||||
The $min$ and $max$ functions respect this ordering when applied to sets.
|
||||
|
||||
MKNF relies on the standard name assumption under which
|
||||
every first-order interpretation in an
|
||||
MKNF interpretation is required to be a Herbrand interpretation with a
|
||||
countably infinite amount of additional constants.
|
||||
We use $\Delta$ to denote the set of all these constants.
|
||||
We use $\phi[x \rightarrow \alpha]$ to denote the formula obtained by replacing all free
|
||||
occurrences of variable x in $\phi$ with the term $\alpha$.
|
||||
|
||||
A (\mbox{3-valued}) \textit{MKNF structure} is a triple $(I, \M, \N)$ where $I$ is
|
||||
a (two-valued first-order) interpretation and $\M = \langle M, M_1 \rangle$ and
|
||||
$\N = \langle N, N_1 \rangle$ are pairs of sets of first-order interpretations
|
||||
such that $M \supseteq M_1$ and $N \supseteq N_1$.
|
||||
Using $\phi$ and $\sigma$ to denote MKNF formulas, the evaluation of an MKNF
|
||||
structure is defined as follows:
|
||||
\begin{align*}
|
||||
&(I, \M, \N)({p(t_1, \dots ,~t_n)}) \define
|
||||
\left\{\begin{array}{ll}
|
||||
\tt & \textrm{iff } p(t_1, \dots,~t_n) \textrm{ is \boxed{\textrm{true}} in } I
|
||||
\\
|
||||
\ff & \textrm{iff } p(t_1, \dots,~t_n) \textrm{ is \boxed{\textrm{false}} in } I
|
||||
\\
|
||||
\end{array}\right.
|
||||
\\
|
||||
&(I, \M, \N)({\neg \phi}) \define
|
||||
\left\{\begin{array}{ll}
|
||||
\tt & \textrm{iff } (I, \M, \N)({\phi}) = \ff \\
|
||||
\uu & \textrm{iff } (I, \M, \N)({\phi}) = \uu \\
|
||||
\ff & \textrm{iff } (I, \M, \N)({\phi}) = \tt \\
|
||||
\end{array}\right.
|
||||
\\
|
||||
&(I, \M, \N)({\exists x, \phi}) \define max\{(I, \M,
|
||||
\N)({\phi[\alpha \rightarrow x]}) ~|~\alpha \in \Delta\}
|
||||
\\
|
||||
&(I, \M, \N)({\forall x, \phi}) \define min\{(I, \M,
|
||||
\N)({\phi[\alpha \rightarrow x]}) ~|~\alpha \in \Delta\}
|
||||
\\
|
||||
&(I, \M, \N)({\phi \land \sigma}) \define min((I, \M, \N)({\phi}), (I,
|
||||
\M, \N)({\sigma}))
|
||||
\\
|
||||
&(I, \M, \N)({\phi \lor \sigma}) \define max((I, \M, \N)({\phi}), (I,
|
||||
\M, \N)({\sigma}))
|
||||
\\
|
||||
&(I, \M, \N)({\phi \subset \sigma}) \define \left\{ \begin{array}{ll}
|
||||
\tt & \textit{iff }\vspace{0.3em} (I, \M,
|
||||
\N)({\phi}) \geq (I, \M, \N)({\sigma}) \\
|
||||
\ff & \textrm{otherwise}
|
||||
\end{array}\right.\\
|
||||
&(I, \M, \N)({\bfK \phi}) \define
|
||||
\left\{\begin{array}{ll}
|
||||
\tt & \textrm{iff } (J, \MM, \N)({\phi}) = \tt \textrm{ \boxedt{for all}
|
||||
} J
|
||||
\in M
|
||||
\\
|
||||
\ff & \textrm{iff } (J, \MM, \N)({\phi}) = \ff \textrm{ \boxedt{for some} } J
|
||||
\in M_1
|
||||
\\
|
||||
\uu & \textrm{otherwise}
|
||||
\end{array}\right.
|
||||
\\
|
||||
&(I, \M, \N)({\Not \phi}) \define
|
||||
\left\{\begin{array}{ll}
|
||||
\tt & \textrm{iff } (J, \M, \NN)({\phi}) = \ff \textrm{ \boxedt{for some} } J
|
||||
\in N_1
|
||||
\\
|
||||
\ff & \textrm{iff } (J, \M, \NN)({\phi}) = \tt \textrm{ \boxedt{for all} } J
|
||||
\in N
|
||||
\\
|
||||
\uu & \textrm{otherwise}
|
||||
\end{array}\right.
|
||||
\end{align*}
|
||||
|
||||
While this logic captures many nonmonotonic semantics, we restrict our focus to hybrid MKNF Knowledge bases,
|
||||
a syntactic restriction that captures combining answer set programs with ontologies.
|
||||
An (MKNF) program $\P$ is a set of (MKNF) rules. A rule $r$ is written as
|
||||
follows:
|
||||
\begin{align*}\label{MKNFRules}
|
||||
\bfK h \leftarrow \bfK p_0,\dots,~\bfK p_j,~ \Not n_0,\dots,~\Not n_k.
|
||||
\end{align*}
|
||||
In the above, the atoms $h, p_0, n_0, \dots, p_j, $ and $n_k $ are function-free first-order
|
||||
atoms
|
||||
of the form $p(t_0, \dots,~t_n )$ where $p$ is a predicate and $t_0,
|
||||
\dots,~t_n$ are either constants or variables.
|
||||
We call an MKNF formula $\phi$ \textit{ground} if it does not contain variables.
|
||||
The corresponding MKNF formula $\pi(r)$ for a rule $r$ is as follows:
|
||||
\begin{align*}
|
||||
\pi(r) \define \forall \vec{x},~ \bfK h \subset \bfK p_0 \land \dots \land
|
||||
\bfK p_j \land \Not n_0 \land \dots \land \Not n_k
|
||||
\end{align*}
|
||||
where $\vec{x}$ is a vector of all variables appearing in the rule.
|
||||
We will use the following abbreviations:
|
||||
\begin{align*}
|
||||
&\pi(\P) \define \bigwedge\limits_{r \in \P} \pi(r)\\
|
||||
&\head(r) = \bfK h \\
|
||||
&\bodyp(r) = \{ \bfK p_0, \dots,~ \bfK p_j \} \\
|
||||
&\bodyn(r) = \{ \Not n_0, \dots,~ \Not n_k \} \\
|
||||
&\bfK(\bodyn(r)) = \{ \bfK a ~|~ \Not a \in \bodyn(r) \}
|
||||
\end{align*}
|
||||
An \textit{ontology} $\OO$ is a decidable description logic (DL) knowledge base translatable to first-order logic, we denote its translation to first-order logic with $\pi(\OO)$.
|
||||
We also assume that the ontology's entailment relation can be checked in polynomial time.
|
||||
|
||||
A {\em normal hybrid MKNF knowledge base} (or knowledge base for short) $\KB
|
||||
= (\OO, \P)$ contains a program and an ontology.
|
||||
The semantics of a knowledge base corresponds to the MKNF formula $\pi(\KB) = \pi(\P) \land \bfK \pi(\O)$.
|
||||
We assume, without loss of generality \cite{knorrlocal2011}, that any given hybrid MKNF knowledge $\KB = (\OO, \P)$ base is ground, that is, $\P$, does not contain any variables\footnote{Not every knowledge base can be grounded. The prevalent class of groundable knowledge bases is the knowledge bases that are DL-safe~\cite{motikreconciling2010}.}
|
||||
This ensures that $\KB$ is decidable.
|
||||
|
||||
A {\em (\mbox{3-valued}) MKNF interpretation} $(M, N)$ is a pair of sets of
|
||||
first-order interpretations where $\emptyset \subset N \subseteq M$.
|
||||
We say an MKNF interpretation $(M, N)$ \textit{satisfies} a knowledge base $\KB
|
||||
= (\OO, \P)$ if for each $I \in M$,
|
||||
$(I, \MN, \MN) (\pi (\KB)) = \tt$.
|
||||
|
||||
\begin{definition} \label{model}
|
||||
A (\mbox{3-valued}) MKNF interpretation $(M, N)$ is a
|
||||
\textit{(\mbox{3-valued}) MKNF model} of a normal hybrid MKNF knowledge base
|
||||
$\KB$
|
||||
if $(M, N)$ satisfies $\pi(\KB)$ and for every \mbox{3-valued} MKNF
|
||||
interpretation pair $(M', N')$ where $M \subseteq M'$, $N \subseteq N'$,
|
||||
$(M, N) \not= (M', N')$, and we have some $I \in M'$ s.t. $(I, \langle M', N'
|
||||
\rangle, \MN)(\pi(\KB)) \not= \tt$.
|
||||
\end{definition}
|
||||
|
||||
It is often convenient to only deal with atoms that appear inside $\P$.
|
||||
We use $\KAK$ to denote the set of \Katoms{} that appear as either $\bfK a$ or $\Not a$ in the program and we use $\OBO{S}$ to the objective knowledge w.r.t.\ to a set of \Katoms{} $S$.
|
||||
\begin{align*}
|
||||
\KAK &\define \{ \bfK a ~|~ r \in \P,~ \bfK a \in \Big(\{ \head(r) \} \union
|
||||
\bodyp(r) \union \bfK(\bodyn(r)) \Big) \} \\
|
||||
\OBO{S} &\define \big\{ \pi(\OO) \big\} \union \big\{ a ~|~ \bfK a \in S
|
||||
\}
|
||||
\end{align*}
|
||||
A \textit{$\bfK$-interpretation} $(T, P) \in \wp(\KAK)^2$ is a pair of sets of \Katoms{}\footnote{We use $\wp(S)$ to denote the powerset of $S$, i.e., $\wp(S) = \{ X ~|~ X \subseteq S \}$}.
|
||||
We say that $(T, P)$ is consistent if $T \subseteq P$.
|
||||
An MKNF interpretation pair $(M, N)$ uniquely {\em induces} a consistent \bfK-interpretation $(T, P)$ where for
|
||||
each $\bfK a \in \KAK$:
|
||||
\begin{align*}
|
||||
\bfK a \in \boxed{(T \intersect P)} \textrm{ if } \forall I \in M, \mmeval{\bfK a} &= \boxed{\tt}\\
|
||||
\bfK a \not\in \boxed{\hspace{0.5cm} P \hspace{0.5cm}} \textrm{ if } \forall I \in M, \mmeval{\bfK a} &= \boxed{\ff}\\
|
||||
\bfK a \in \boxed{(P \setminus T)} \textrm{ if } \forall I \in M, \mmeval{\bfK a} &= \boxed{\uu}
|
||||
\end{align*}
|
||||
We can acquire the set of false \Katoms{} by taking the complement of $P$, e.g., $F = \KAK \setminus P$ and $P = \KAK \setminus F$.
|
||||
When we use the letters $T$, $P$, and $F$, we always mean sets of true, possibly true, and false \Katoms{} respectively.
|
||||
|
||||
|
||||
We say that a \bfK-interpretation $(T, P)$ \textit{extends} to an MKNF interpretation $(M, N)$ if $(T, P)$ is consistent, $\OBO{P}$ is consistent, and
|
||||
\begin{align*}
|
||||
(M, N) = (\{ I ~|~ \OBO{T} \models I \}, \{ I ~|~ \OBO{P} \models I \})
|
||||
\end{align*}
|
||||
This operation extends $(T, P)$ to the $\subseteq$-maximal MKNF interpretation that induces $(T, P)$.
|
||||
|
||||
|
||||
We comment on how the relation \textit{induces} and \textit{extends} are related.
|
||||
\begin{remark}
|
||||
Let $(M, N)$ be an MKNF {\bf model} of an MKNF knowledge base $\KB$.
|
||||
A \bfK-interpretation $(T, P)$ that extends to $(M, N)$ exists, is unique, and is the \bfK-interpretation induced by $(M, N)$.
|
||||
\end{remark}
|
||||
We say that an MKNF interpretation $(M, N)$ \textit{weakly induces} a {\bfK-interpretation} $(T, P)$ if $(M, N)$ induces a {\bfK-interpretation} $(T^*, P^*)$ where $T \subseteq T^*$ and $P^* \subseteq P$.
|
||||
Similarly, a {\bfK-interpretation} \textit{weakly extends} to an MKNF interpretation $(M, N)$ if there exists an interpretation $(T^*, P^*)$ that extends to $(M, N)$ such that $T \subseteq T^*$, $P^* \subseteq P$.
|
||||
Intuitively, we are leveraging the knowledge ordering $\uu < \tt$ and $\uu < \ff$. A \bfK-interpretation is weakly induced by an MKNF interpretation if that MKNF interpretation induces a \bfK-interpretation that ``knows more'' than the original \bfK-interpretation.
|
||||
|
||||
There are MKNF interpretations for which no \bfK-interpretation extends to, however, these are of little interest; either $\OBO{P}$ is inconsistent or $(M, N)$ is not maximal w.r.t. atoms that do not appear in $\KAK$. Similarly, there exist \bfK-interpretations that extend to MKNF interpretations that do not induce them. If this is the case, then the \bfK-interpretation is missing some logical consequences of the ontology and this should be corrected.
|
||||
We define the class of \bfK-interpretations that excludes the undesirable \bfK-interpretations and MKNF interpretations from focus.
|
||||
\begin{definition}\label{saturated}
|
||||
A \bfK-interpretation $(T, P)$ of an MKNF knowledge base $\KB$ is {\em saturated} if it can be extended to an MKNF interpretation $(M, N)$ that induces $(T, P)$.
|
||||
Equivalently, a \bfK-interpretation is saturated iff $(T, P)$ and $\OBO{P}$ are consistent,
|
||||
$ \OBO{P} \not\models a~\textrm{for each } \bfK a \in (\KAK \setminus P)$ and $\OBO{T} \not\models a~\textrm{for each } \bfK a \in (\KAK \setminus T)$.
|
||||
\end{definition}
|
||||
|
||||
\subsection{An Example Knowledge Base}
|
||||
Hybrid MKNF is equivalent to answer set semantics when the ontology is empty.
|
||||
The definition of an MKNF program given above is precisely the formulation of stable model semantics in the logic of MKNF~\cite{lifschitznonmonotonic1991}.
|
||||
We give an example of a knowledge base to demonstrate the combined reasoning of an answer set program with an ontology.
|
||||
\begin{example}
|
||||
Let $\KB = (\OO, \P)$ be the following knowledge base
|
||||
\begin{align*}
|
||||
\pi(\OO) &= \{ a \lor \neg b \} \\
|
||||
(\textrm{The definition of } &\P \textrm{ follows})\\
|
||||
\bfK a &\leftarrow \Not b\\
|
||||
\bfK b &\leftarrow \Not a
|
||||
\end{align*}
|
||||
This knowledge base has two MKNF models
|
||||
\begin{align*}
|
||||
&\Big(\Big\{\{ a, b \}, \{ a, \neg b\}, \{ \neg a, \neg b \}\Big\}, \Big\{\{ a, b \} \Big\}\Big) \\
|
||||
&\hspace{1cm}\textrm{ which induces the \bfK-interpretation } (\emptyset, \{ \bfK a, \bfK b \})\\
|
||||
&(M, M) \textrm{ where } M = \Big\{\{ a, \neg b\}, \{ a, b \} \Big\}\\
|
||||
&\hspace{1cm}\textrm{ which induces the \bfK-interpretation } (\{ \bfK a \}, \{ \bfK a \})
|
||||
\end{align*}
|
||||
Above we have the answer set program $\P$ that, without $\OO$, admits three partial stable models. The ontology $\OO$ encodes an exclusive choice between the atoms $a$ and $b$. The program allows for both $a$ and $b$ to be undefined.
|
||||
The use of the ontology here is effectively a filter that removes all interpretations where $a$ is false but $b$ is true.
|
||||
|
||||
The mixing of negation as failure and classical negation is not always straightforward. For example
|
||||
\begin{align*}
|
||||
\pi(\OO) &= \{ a \lor b \} \\
|
||||
\P &= \emptyset
|
||||
\end{align*}
|
||||
The above admits just one MKNF model
|
||||
\begin{align*}
|
||||
&\big(M, M \big) \textrm{ where } M = \Big\{ \{ a, \neg b \}, \{ \neg a, b \}, \{ a, b \} \Big\}\\
|
||||
&\hspace{1cm}\textrm{ which induces the \bfK-interpretation } (\emptyset, \emptyset)
|
||||
\end{align*}
|
||||
From the perspective of $\P$, which is only concerned with \bfK-interpretations, all atoms are false.
|
||||
However, the interpretation $\{ \neg a, \neg b \}$ is absent from the model which ensures that $\O$ is still satisfied.
|
||||
Recall that $\pi(\KB) = \bfK (a \lor b)$.
|
||||
|
||||
\end{example}
|
||||
|
||||
|
||||
\bibliographystyle{plain}
|
||||
\bibliography{refs}
|
||||
|
||||
|
||||
\end{document}
|
|
@ -0,0 +1,453 @@
|
|||
|
||||
@book{romanlattices2008,
|
||||
location = {New York, {NY}},
|
||||
title = {Lattices and Ordered Sets},
|
||||
isbn = {978-0-387-78900-2 978-0-387-78901-9},
|
||||
url = {http://link.springer.com/10.1007/978-0-387-78901-9},
|
||||
publisher = {Springer New York},
|
||||
author = {Roman, Steven},
|
||||
urldate = {2021-07-28},
|
||||
year = {2008},
|
||||
langid = {english},
|
||||
doi = {10.1007/978-0-387-78901-9},
|
||||
file = {2008 - Lattices and Ordered Sets.pdf:/home/capybara/Zotero/storage/5QU8WTUR/2008 - Lattices and Ordered Sets.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@InProceedings{RRaft,
|
||||
author="Bi, Yi
|
||||
and You, Jia-Huai
|
||||
and Feng, Zhiyong",
|
||||
editor="Kontchakov, Roman
|
||||
and Mugnier, Marie-Laure",
|
||||
title="A Generalization of Approximation Fixpoint Theory and Application",
|
||||
booktitle="Web Reasoning and Rule Systems",
|
||||
year="2014",
|
||||
publisher="Springer International Publishing",
|
||||
address="Cham",
|
||||
pages="45--59"
|
||||
}
|
||||
|
||||
|
||||
@article{gelfondstable2000,
|
||||
title = {The Stable Model Semantics For Logic Programming},
|
||||
volume = {2},
|
||||
abstract = {We propose a new declarative semantics for logic programs with negation. Its formulation is quite simple; at the same time, it is more general than the iterated xed point semantics for stratied programs, and is applicable to some useful programs that are not stratied.},
|
||||
journal = {Logic Programming},
|
||||
shortjournal = {Logic Programming},
|
||||
author = {Gelfond, Michael and Lifschitz, Vladimir},
|
||||
date = {2000-12-14},
|
||||
year = "2000",
|
||||
file = {Full Text PDF:/home/gopher/Zotero/storage/LSRXUIBD/The_Stable_Model_Semantics_For_Logic_Programming.pdf:application/pdf}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@inproceedings{aftjust,
|
||||
author = {Simon Marynissen and
|
||||
Bart Bogaerts and
|
||||
Marc Denecker},
|
||||
editor = {Zhi{-}Hua Zhou},
|
||||
title = {On the Relation Between Approximation Fixpoint Theory and Justification
|
||||
Theory},
|
||||
booktitle = {Proceedings of the Thirtieth International Joint Conference on Artificial
|
||||
Intelligence, {IJCAI} 2021, Virtual Event / Montreal, Canada, 19-27
|
||||
August 2021},
|
||||
pages = {1973--1980},
|
||||
publisher = {ijcai.org},
|
||||
year = {2021},
|
||||
url = {https://doi.org/10.24963/ijcai.2021/272},
|
||||
doi = {10.24963/ijcai.2021/272},
|
||||
timestamp = {Sat, 09 Apr 2022 12:47:19 +0200},
|
||||
biburl = {https://dblp.org/rec/conf/ijcai/Marynissen0D21.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@article{liuyou2021,
|
||||
author = {Fangfang Liu and
|
||||
Jia{-}Huai You},
|
||||
title = {Alternating Fixpoint Operator for Hybrid {MKNF} Knowledge Bases as
|
||||
an Approximator of {AFT}},
|
||||
journal = {Theory Pract. Log. Program.},
|
||||
volume = {22},
|
||||
number = {2},
|
||||
pages = {305--334},
|
||||
year = {2022},
|
||||
doi = {10.1017/S1471068421000168},
|
||||
timestamp = {Wed, 27 Apr 2022 20:10:27 +0200},
|
||||
biburl = {https://dblp.org/rec/journals/tplp/LiuY22.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@inproceedings{liuyou2019,
|
||||
author = {Fangfang Liu and
|
||||
Jia{-}Huai You},
|
||||
editor = {Paul Fodor and
|
||||
Marco Montali and
|
||||
Diego Calvanese and
|
||||
Dumitru Roman},
|
||||
title = {Alternating Fixpoint Operator for Hybrid {MKNF} Knowledge Bases as
|
||||
an Approximator of {AFT}},
|
||||
booktitle = {Rules and Reasoning - Third International Joint Conference, RuleML+RR
|
||||
2019, Bolzano, Italy, September 16-19, 2019, Proceedings},
|
||||
series = {Lecture Notes in Computer Science},
|
||||
volume = {11784},
|
||||
pages = {113--127},
|
||||
publisher = {Springer},
|
||||
year = {2019},
|
||||
url = {https://doi.org/10.1007/978-3-030-31095-0\_8},
|
||||
doi = {10.1007/978-3-030-31095-0\_8},
|
||||
timestamp = {Tue, 17 Sep 2019 08:40:59 +0200},
|
||||
biburl = {https://dblp.org/rec/conf/ruleml/LiuY19.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
|
||||
@article{aftagg,
|
||||
author = {Linde Vanbesien and
|
||||
Maurice Bruynooghe and
|
||||
Marc Denecker},
|
||||
title = {Analyzing Semantics of Aggregate Answer Set Programming Using Approximation
|
||||
Fixpoint Theory},
|
||||
journal = {Theory Pract. Log. Program.},
|
||||
volume = {22},
|
||||
number = {4},
|
||||
pages = {523--537},
|
||||
year = {2022},
|
||||
url = {https://doi.org/10.1017/S1471068422000126},
|
||||
doi = {10.1017/S1471068422000126},
|
||||
timestamp = {Sat, 10 Sep 2022 20:58:45 +0200},
|
||||
biburl = {https://dblp.org/rec/journals/tplp/VanbesienBD22.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@article{DeneckerMT04,
|
||||
author = {Marc Denecker and
|
||||
Victor W. Marek and
|
||||
Miroslaw Truszczynski},
|
||||
title = {Ultimate approximation and its application in nonmonotonic knowledge
|
||||
representation systems},
|
||||
journal = {Inf. Comput.},
|
||||
volume = {192},
|
||||
number = {1},
|
||||
pages = {84--121},
|
||||
year = {2004},
|
||||
doi = {10.1016/j.ic.2004.02.004},
|
||||
timestamp = {Fri, 12 Feb 2021 22:16:22 +0100},
|
||||
biburl = {https://dblp.org/rec/journals/iandc/DeneckerMT04.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@incollection{denecker2000approximations,
|
||||
title={Approximations, stable operators, well-founded fixpoints and applications in nonmonotonic reasoning},
|
||||
author={Denecker, Marc and Marek, Victor and Truszczy{\'n}ski, Miros{\l}aw},
|
||||
booktitle={Logic-Based Artificial Intelligence},
|
||||
pages={127--144},
|
||||
year={2000},
|
||||
doi={10.1007/978-1-4615-1567-8_6},
|
||||
publisher = {Springer},
|
||||
}
|
||||
|
||||
@article{fittingfixpointsurvey,
|
||||
author = {Melvin Fitting},
|
||||
title = {Fixpoint semantics for logic programming a survey},
|
||||
journal = {Theor. Comput. Sci.},
|
||||
volume = {278},
|
||||
number = {1-2},
|
||||
pages = {25--51},
|
||||
year = {2002},
|
||||
url = {https://doi.org/10.1016/S0304-3975(00)00330-3},
|
||||
doi = {10.1016/S0304-3975(00)00330-3},
|
||||
timestamp = {Wed, 17 Feb 2021 21:58:01 +0100},
|
||||
biburl = {https://dblp.org/rec/journals/tcs/Fitting02.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@article{Ji17,
|
||||
author = {Jianmin Ji and
|
||||
Fangfang Liu and
|
||||
Jia{-}Huai You},
|
||||
title = {Well-founded operators for normal hybrid {MKNF} knowledge bases},
|
||||
journal = {Theory Pract. Log. Program.},
|
||||
volume = {17},
|
||||
number = {5-6},
|
||||
pages = {889--905},
|
||||
year = {2017},
|
||||
doi = {10.1017/S1471068417000291},
|
||||
timestamp = {Mon, 26 Oct 2020 08:22:48 +0100},
|
||||
biburl = {https://dblp.org/rec/journals/tplp/JiLY17.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
|
||||
@article{Fitting02,
|
||||
author = {Melvin Fitting},
|
||||
title = {Fixpoint semantics for logic programming a survey},
|
||||
journal = {Theor. Comput. Sci.},
|
||||
volume = {278},
|
||||
number = {1-2},
|
||||
pages = {25--51},
|
||||
year = {2002},
|
||||
doi = {10.1016/S0304-3975(00)00330-3},
|
||||
timestamp = {Wed, 17 Feb 2021 21:58:01 +0100},
|
||||
biburl = {https://dblp.org/rec/journals/tcs/Fitting02.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}}
|
||||
|
||||
|
||||
@article{knorrlocal2011,
|
||||
author = {Matthias Knorr and
|
||||
Jos{\'{e}} J{\'{u}}lio Alferes and
|
||||
Pascal Hitzler},
|
||||
title = {Local closed world reasoning with description logics under the well-founded
|
||||
semantics},
|
||||
journal = {Artif. Intell.},
|
||||
volume = {175},
|
||||
number = {9-10},
|
||||
pages = {1528--1554},
|
||||
year = {2011},
|
||||
doi = {10.1016/j.artint.2011.01.007},
|
||||
timestamp = {Wed, 23 Feb 2022 14:32:41 +0100},
|
||||
biburl = {https://dblp.org/rec/journals/ai/KnorrAH11.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@article{przymusinskistable1991,
|
||||
author = {Teodor C. Przymusinski},
|
||||
title = {Stable Semantics for Disjunctive Programs},
|
||||
journal = {New Gener. Comput.},
|
||||
volume = {9},
|
||||
number = {3/4},
|
||||
pages = {401--424},
|
||||
year = {1991},
|
||||
doi = {10.1007/BF03037171},
|
||||
timestamp = {Thu, 14 May 2020 22:23:50 +0200},
|
||||
biburl = {https://dblp.org/rec/journals/ngc/Przymusinski91.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@inproceedings{lifschitznonmonotonic1991,
|
||||
author = {Vladimir Lifschitz},
|
||||
editor = {John Mylopoulos and
|
||||
Raymond Reiter},
|
||||
title = {Nonmonotonic Databases and Epistemic Queries},
|
||||
booktitle = {Proceedings of the 12th International Joint Conference on Artificial
|
||||
Intelligence. Sydney, Australia, August 24-30, 1991},
|
||||
pages = {381--386},
|
||||
publisher = {Morgan Kaufmann},
|
||||
year = {1991},
|
||||
url = {http://ijcai.org/Proceedings/91-1/Papers/059.pdf},
|
||||
timestamp = {Tue, 20 Aug 2019 16:18:13 +0200},
|
||||
biburl = {https://dblp.org/rec/conf/ijcai/Lifschitz91.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@article{motikreconciling2010,
|
||||
author = {Boris Motik and
|
||||
Riccardo Rosati},
|
||||
title = {Reconciling description logics and rules},
|
||||
journal = {J. {ACM}},
|
||||
volume = {57},
|
||||
number = {5},
|
||||
pages = {30:1--30:62},
|
||||
year = {2010},
|
||||
doi = {10.1145/1754399.1754403},
|
||||
timestamp = {Mon, 16 Sep 2019 14:49:47 +0200},
|
||||
biburl = {https://dblp.org/rec/journals/jacm/MotikR10.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@article{liuthreevalued2017,
|
||||
author = {Fangfang Liu and
|
||||
Jia{-}Huai You},
|
||||
title = {Three-valued semantics for hybrid {MKNF} knowledge bases revisited},
|
||||
journal = {Artif. Intell.},
|
||||
volume = {252},
|
||||
pages = {123--138},
|
||||
year = {2017},
|
||||
doi = {10.1016/j.artint.2017.08.003},
|
||||
timestamp = {Thu, 28 Dec 2017 16:12:02 +0100},
|
||||
biburl = {https://dblp.org/rec/journals/ai/LiuY17.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@article{revisited,
|
||||
author = {Fangfang Liu and
|
||||
Jia{-}Huai You},
|
||||
title = {Three-valued semantics for hybrid {MKNF} knowledge bases revisited},
|
||||
journal = {Artif. Intell.},
|
||||
volume = {252},
|
||||
pages = {123--138},
|
||||
year = {2017},
|
||||
url = {https://doi.org/10.1016/j.artint.2017.08.003},
|
||||
doi = {10.1016/j.artint.2017.08.003},
|
||||
timestamp = {Thu, 28 Dec 2017 16:12:02 +0100},
|
||||
biburl = {https://dblp.org/rec/journals/ai/LiuY17.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
|
||||
@article{tarskilatticetheoretical1955,
|
||||
author = {Alfred Tarski},
|
||||
title = {{A lattice-theoretical fixpoint theorem and its applications.}},
|
||||
volume = {5},
|
||||
journal = {Pacific Journal of Mathematics},
|
||||
number = {2},
|
||||
publisher = {Pacific Journal of Mathematics, A Non-profit Corporation},
|
||||
pages = {285 -- 309},
|
||||
doi = {10.2140/pjm.1955.5.285},
|
||||
year = {1955}
|
||||
}
|
||||
|
||||
@article{gebserconflictdriven2012,
|
||||
author = {Martin Gebser and
|
||||
Benjamin Kaufmann and
|
||||
Torsten Schaub},
|
||||
title = {Conflict-driven answer set solving: From theory to practice},
|
||||
journal = {Artif. Intell.},
|
||||
volume = {187},
|
||||
pages = {52--89},
|
||||
year = {2012},
|
||||
doi = {10.1016/j.artint.2012.04.001},
|
||||
timestamp = {Fri, 09 Apr 2021 18:34:15 +0200},
|
||||
biburl = {https://dblp.org/rec/journals/ai/GebserKS12.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@inproceedings{gebseradvanced2013,
|
||||
author = {Martin Gebser and
|
||||
Benjamin Kaufmann and
|
||||
Torsten Schaub},
|
||||
editor = {Francesca Rossi},
|
||||
title = {Advanced Conflict-Driven Disjunctive Answer Set Solving},
|
||||
booktitle = {{IJCAI} 2013, Proceedings of the 23rd International Joint Conference
|
||||
on Artificial Intelligence, Beijing, China, August 3-9, 2013},
|
||||
pages = {912--918},
|
||||
publisher = {{IJCAI/AAAI}},
|
||||
year = {2013},
|
||||
url = {http://www.aaai.org/ocs/index.php/IJCAI/IJCAI13/paper/view/6835},
|
||||
timestamp = {Tue, 08 Mar 2022 17:41:53 +0100},
|
||||
biburl = {https://dblp.org/rec/conf/ijcai/GebserKS13.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@inproceedings{killenfixpoint2021,
|
||||
author = {Spencer Killen and
|
||||
Jia{-}Huai You},
|
||||
title = {Fixpoint Characterizations of Disjunctive Hybrid {MKNF} Knowledge
|
||||
Bases},
|
||||
booktitle = {Proceedings of the International Conference on Logic Programming 2021
|
||||
Workshops co-located with the 37th International Conference on Logic
|
||||
Programming {(ICLP} 2021), Porto, Portugal (virtual), September 20th-21st,
|
||||
2021},
|
||||
series = {{CEUR} Workshop Proceedings},
|
||||
volume = {2970},
|
||||
publisher = {CEUR-WS.org},
|
||||
year = {2021},
|
||||
url = {http://ceur-ws.org/Vol-2970/aspocppaper3.pdf},
|
||||
timestamp = {Thu, 28 Oct 2021 14:11:18 +0200},
|
||||
biburl = {https://dblp.org/rec/conf/iclp/KillenY21.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@inproceedings{killen2022,
|
||||
author = {Spencer Killen and
|
||||
Jia{-}Huai You},
|
||||
editor = {Yuliya Lierler and
|
||||
Jos{\'{e}} F. Morales and
|
||||
Carmine Dodaro and
|
||||
Ver{\'{o}}nica Dahl and
|
||||
Martin Gebser and
|
||||
Tuncay Tekle},
|
||||
title = {A Fixpoint Characterization of Three-Valued Disjunctive Hybrid {MKNF}
|
||||
Knowledge Bases},
|
||||
booktitle = {Proceedings 38th International Conference on Logic Programming, {ICLP}
|
||||
2022 Technical Communications / Doctoral Consortium, Haifa, Israel,
|
||||
31st July 2022 - 6th August 2022},
|
||||
series = {{EPTCS}},
|
||||
volume = {364},
|
||||
pages = {51--64},
|
||||
year = {2022},
|
||||
url = {https://doi.org/10.4204/EPTCS.364.6},
|
||||
doi = {10.4204/EPTCS.364.6},
|
||||
timestamp = {Sat, 15 Oct 2022 11:57:01 +0200},
|
||||
biburl = {https://dblp.org/rec/journals/corr/abs-2208-03087.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@inproceedings{killenunfounded,
|
||||
author = {Spencer Killen and
|
||||
Jia{-}Huai You},
|
||||
editor = {Meghyn Bienvenu and
|
||||
Gerhard Lakemeyer and
|
||||
Esra Erdem},
|
||||
title = {Unfounded Sets for Disjunctive Hybrid {MKNF} Knowledge Bases},
|
||||
booktitle = {Proceedings of the 18th International Conference on Principles of
|
||||
Knowledge Representation and Reasoning, {KR} 2021, Online event, November
|
||||
3-12, 2021},
|
||||
pages = {432--441},
|
||||
year = {2021},
|
||||
doi = {10.24963/kr.2021/41},
|
||||
timestamp = {Wed, 03 Nov 2021 12:47:32 +0100},
|
||||
biburl = {https://dblp.org/rec/conf/kr/KillenY21.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@inproceedings{heyninckapproximation2021,
|
||||
author = {Jesse Heyninck and
|
||||
Ofer Arieli},
|
||||
editor = {Meghyn Bienvenu and
|
||||
Gerhard Lakemeyer and
|
||||
Esra Erdem},
|
||||
title = {Approximation Fixpoint Theory for Non-Deterministic Operators and
|
||||
Its Application in Disjunctive Logic Programming},
|
||||
booktitle = {Proceedings of the 18th International Conference on Principles of
|
||||
Knowledge Representation and Reasoning, {KR} 2021, Online event, November
|
||||
3-12, 2021},
|
||||
pages = {334--344},
|
||||
year = {2021},
|
||||
doi = {10.24963/kr.2021/32},
|
||||
timestamp = {Wed, 03 Nov 2021 12:47:32 +0100},
|
||||
biburl = {https://dblp.org/rec/conf/kr/HeyninckA21.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@inproceedings{kaminskiefficient2015,
|
||||
author = {Tobias Kaminski and
|
||||
Matthias Knorr and
|
||||
Jo{\~{a}}o Leite},
|
||||
editor = {Qiang Yang and
|
||||
Michael J. Wooldridge},
|
||||
title = {Efficient Paraconsistent Reasoning with Ontologies and Rules},
|
||||
booktitle = {Proceedings of the Twenty-Fourth International Joint Conference on
|
||||
Artificial Intelligence, {IJCAI} 2015, Buenos Aires, Argentina, July
|
||||
25-31, 2015},
|
||||
pages = {3098--3105},
|
||||
publisher = {{AAAI} Press},
|
||||
year = {2015},
|
||||
url = {http://ijcai.org/Abstract/15/437},
|
||||
timestamp = {Wed, 23 Feb 2022 14:32:42 +0100},
|
||||
biburl = {https://dblp.org/rec/conf/ijcai/KaminskiKL15.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@article{headcycleisNP,
|
||||
author = {Rachel Ben{-}Eliyahu and
|
||||
Rina Dechter},
|
||||
title = {Propositional Semantics for Disjunctive Logic Programs},
|
||||
journal = {Ann. Math. Artif. Intell.},
|
||||
volume = {12},
|
||||
number = {1-2},
|
||||
pages = {53--87},
|
||||
year = {1994},
|
||||
doi = {10.1007/BF01530761},
|
||||
timestamp = {Sun, 28 May 2017 13:20:59 +0200},
|
||||
biburl = {https://dblp.org/rec/journals/amai/Ben-EliyahuD94.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
token literal names:
|
||||
null
|
||||
'.'
|
||||
','
|
||||
':-'
|
||||
'('
|
||||
')'
|
||||
'-'
|
||||
'|'
|
||||
'&'
|
||||
'->'
|
||||
'not'
|
||||
null
|
||||
null
|
||||
null
|
||||
|
||||
token symbolic names:
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
COMMENT
|
||||
WS
|
||||
IDENTIFIER
|
||||
|
||||
rule names:
|
||||
kb
|
||||
lrule
|
||||
head
|
||||
body
|
||||
orule
|
||||
oformula
|
||||
katom
|
||||
oatom
|
||||
patom
|
||||
natom
|
||||
|
||||
|
||||
atn:
|
||||
[4, 1, 13, 91, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 1, 0, 1, 0, 5, 0, 23, 8, 0, 10, 0, 12, 0, 26, 9, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 33, 8, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 5, 2, 40, 8, 2, 10, 2, 12, 2, 43, 9, 2, 1, 3, 1, 3, 1, 3, 1, 3, 5, 3, 49, 8, 3, 10, 3, 12, 3, 52, 9, 3, 3, 3, 54, 8, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 67, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 75, 8, 5, 10, 5, 12, 5, 78, 9, 5, 1, 6, 1, 6, 3, 6, 82, 8, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 0, 1, 10, 10, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 1, 1, 0, 7, 8, 92, 0, 24, 1, 0, 0, 0, 2, 32, 1, 0, 0, 0, 4, 36, 1, 0, 0, 0, 6, 44, 1, 0, 0, 0, 8, 55, 1, 0, 0, 0, 10, 66, 1, 0, 0, 0, 12, 81, 1, 0, 0, 0, 14, 83, 1, 0, 0, 0, 16, 85, 1, 0, 0, 0, 18, 87, 1, 0, 0, 0, 20, 23, 3, 2, 1, 0, 21, 23, 3, 8, 4, 0, 22, 20, 1, 0, 0, 0, 22, 21, 1, 0, 0, 0, 23, 26, 1, 0, 0, 0, 24, 22, 1, 0, 0, 0, 24, 25, 1, 0, 0, 0, 25, 1, 1, 0, 0, 0, 26, 24, 1, 0, 0, 0, 27, 28, 3, 4, 2, 0, 28, 29, 3, 6, 3, 0, 29, 33, 1, 0, 0, 0, 30, 33, 3, 4, 2, 0, 31, 33, 3, 6, 3, 0, 32, 27, 1, 0, 0, 0, 32, 30, 1, 0, 0, 0, 32, 31, 1, 0, 0, 0, 33, 34, 1, 0, 0, 0, 34, 35, 5, 1, 0, 0, 35, 3, 1, 0, 0, 0, 36, 41, 5, 13, 0, 0, 37, 38, 5, 2, 0, 0, 38, 40, 5, 13, 0, 0, 39, 37, 1, 0, 0, 0, 40, 43, 1, 0, 0, 0, 41, 39, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 5, 1, 0, 0, 0, 43, 41, 1, 0, 0, 0, 44, 53, 5, 3, 0, 0, 45, 50, 3, 12, 6, 0, 46, 47, 5, 2, 0, 0, 47, 49, 3, 12, 6, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 54, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 45, 1, 0, 0, 0, 53, 54, 1, 0, 0, 0, 54, 7, 1, 0, 0, 0, 55, 56, 3, 10, 5, 0, 56, 57, 5, 1, 0, 0, 57, 9, 1, 0, 0, 0, 58, 59, 6, 5, -1, 0, 59, 60, 5, 4, 0, 0, 60, 61, 3, 10, 5, 0, 61, 62, 5, 5, 0, 0, 62, 67, 1, 0, 0, 0, 63, 67, 3, 14, 7, 0, 64, 65, 5, 6, 0, 0, 65, 67, 3, 10, 5, 3, 66, 58, 1, 0, 0, 0, 66, 63, 1, 0, 0, 0, 66, 64, 1, 0, 0, 0, 67, 76, 1, 0, 0, 0, 68, 69, 10, 2, 0, 0, 69, 70, 7, 0, 0, 0, 70, 75, 3, 10, 5, 3, 71, 72, 10, 1, 0, 0, 72, 73, 5, 9, 0, 0, 73, 75, 3, 10, 5, 1, 74, 68, 1, 0, 0, 0, 74, 71, 1, 0, 0, 0, 75, 78, 1, 0, 0, 0, 76, 74, 1, 0, 0, 0, 76, 77, 1, 0, 0, 0, 77, 11, 1, 0, 0, 0, 78, 76, 1, 0, 0, 0, 79, 82, 3, 16, 8, 0, 80, 82, 3, 18, 9, 0, 81, 79, 1, 0, 0, 0, 81, 80, 1, 0, 0, 0, 82, 13, 1, 0, 0, 0, 83, 84, 5, 13, 0, 0, 84, 15, 1, 0, 0, 0, 85, 86, 5, 13, 0, 0, 86, 17, 1, 0, 0, 0, 87, 88, 5, 10, 0, 0, 88, 89, 5, 13, 0, 0, 89, 19, 1, 0, 0, 0, 10, 22, 24, 32, 41, 50, 53, 66, 74, 76, 81]
|
|
@ -0,0 +1,23 @@
|
|||
T__0=1
|
||||
T__1=2
|
||||
T__2=3
|
||||
T__3=4
|
||||
T__4=5
|
||||
T__5=6
|
||||
T__6=7
|
||||
T__7=8
|
||||
T__8=9
|
||||
T__9=10
|
||||
COMMENT=11
|
||||
WS=12
|
||||
IDENTIFIER=13
|
||||
'.'=1
|
||||
','=2
|
||||
':-'=3
|
||||
'('=4
|
||||
')'=5
|
||||
'-'=6
|
||||
'|'=7
|
||||
'&'=8
|
||||
'->'=9
|
||||
'not'=10
|
|
@ -0,0 +1,56 @@
|
|||
token literal names:
|
||||
null
|
||||
'.'
|
||||
','
|
||||
':-'
|
||||
'('
|
||||
')'
|
||||
'-'
|
||||
'|'
|
||||
'&'
|
||||
'->'
|
||||
'not'
|
||||
null
|
||||
null
|
||||
null
|
||||
|
||||
token symbolic names:
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
null
|
||||
COMMENT
|
||||
WS
|
||||
IDENTIFIER
|
||||
|
||||
rule names:
|
||||
T__0
|
||||
T__1
|
||||
T__2
|
||||
T__3
|
||||
T__4
|
||||
T__5
|
||||
T__6
|
||||
T__7
|
||||
T__8
|
||||
T__9
|
||||
COMMENT
|
||||
WS
|
||||
IDENTIFIER
|
||||
|
||||
channel names:
|
||||
DEFAULT_TOKEN_CHANNEL
|
||||
HIDDEN
|
||||
|
||||
mode names:
|
||||
DEFAULT_MODE
|
||||
|
||||
atn:
|
||||
[4, 0, 13, 76, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 5, 10, 54, 8, 10, 10, 10, 12, 10, 57, 9, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 4, 11, 64, 8, 11, 11, 11, 12, 11, 65, 1, 11, 1, 11, 1, 12, 1, 12, 5, 12, 72, 8, 12, 10, 12, 12, 12, 75, 9, 12, 1, 55, 0, 13, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 1, 0, 3, 3, 0, 9, 10, 13, 13, 32, 32, 2, 0, 39, 39, 97, 122, 5, 0, 39, 39, 48, 57, 65, 90, 95, 95, 97, 122, 78, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 1, 27, 1, 0, 0, 0, 3, 29, 1, 0, 0, 0, 5, 31, 1, 0, 0, 0, 7, 34, 1, 0, 0, 0, 9, 36, 1, 0, 0, 0, 11, 38, 1, 0, 0, 0, 13, 40, 1, 0, 0, 0, 15, 42, 1, 0, 0, 0, 17, 44, 1, 0, 0, 0, 19, 47, 1, 0, 0, 0, 21, 51, 1, 0, 0, 0, 23, 63, 1, 0, 0, 0, 25, 69, 1, 0, 0, 0, 27, 28, 5, 46, 0, 0, 28, 2, 1, 0, 0, 0, 29, 30, 5, 44, 0, 0, 30, 4, 1, 0, 0, 0, 31, 32, 5, 58, 0, 0, 32, 33, 5, 45, 0, 0, 33, 6, 1, 0, 0, 0, 34, 35, 5, 40, 0, 0, 35, 8, 1, 0, 0, 0, 36, 37, 5, 41, 0, 0, 37, 10, 1, 0, 0, 0, 38, 39, 5, 45, 0, 0, 39, 12, 1, 0, 0, 0, 40, 41, 5, 124, 0, 0, 41, 14, 1, 0, 0, 0, 42, 43, 5, 38, 0, 0, 43, 16, 1, 0, 0, 0, 44, 45, 5, 45, 0, 0, 45, 46, 5, 62, 0, 0, 46, 18, 1, 0, 0, 0, 47, 48, 5, 110, 0, 0, 48, 49, 5, 111, 0, 0, 49, 50, 5, 116, 0, 0, 50, 20, 1, 0, 0, 0, 51, 55, 5, 35, 0, 0, 52, 54, 9, 0, 0, 0, 53, 52, 1, 0, 0, 0, 54, 57, 1, 0, 0, 0, 55, 56, 1, 0, 0, 0, 55, 53, 1, 0, 0, 0, 56, 58, 1, 0, 0, 0, 57, 55, 1, 0, 0, 0, 58, 59, 5, 10, 0, 0, 59, 60, 1, 0, 0, 0, 60, 61, 6, 10, 0, 0, 61, 22, 1, 0, 0, 0, 62, 64, 7, 0, 0, 0, 63, 62, 1, 0, 0, 0, 64, 65, 1, 0, 0, 0, 65, 63, 1, 0, 0, 0, 65, 66, 1, 0, 0, 0, 66, 67, 1, 0, 0, 0, 67, 68, 6, 11, 0, 0, 68, 24, 1, 0, 0, 0, 69, 73, 7, 1, 0, 0, 70, 72, 7, 2, 0, 0, 71, 70, 1, 0, 0, 0, 72, 75, 1, 0, 0, 0, 73, 71, 1, 0, 0, 0, 73, 74, 1, 0, 0, 0, 74, 26, 1, 0, 0, 0, 75, 73, 1, 0, 0, 0, 4, 0, 55, 65, 73, 1, 6, 0, 0]
|
|
@ -0,0 +1,84 @@
|
|||
# Generated from HMKNF.g4 by ANTLR 4.10.1
|
||||
from antlr4 import *
|
||||
from io import StringIO
|
||||
import sys
|
||||
if sys.version_info[1] > 5:
|
||||
from typing import TextIO
|
||||
else:
|
||||
from typing.io import TextIO
|
||||
|
||||
|
||||
def serializedATN():
|
||||
return [
|
||||
4,0,13,76,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,
|
||||
6,7,6,2,7,7,7,2,8,7,8,2,9,7,9,2,10,7,10,2,11,7,11,2,12,7,12,1,0,
|
||||
1,0,1,1,1,1,1,2,1,2,1,2,1,3,1,3,1,4,1,4,1,5,1,5,1,6,1,6,1,7,1,7,
|
||||
1,8,1,8,1,8,1,9,1,9,1,9,1,9,1,10,1,10,5,10,54,8,10,10,10,12,10,57,
|
||||
9,10,1,10,1,10,1,10,1,10,1,11,4,11,64,8,11,11,11,12,11,65,1,11,1,
|
||||
11,1,12,1,12,5,12,72,8,12,10,12,12,12,75,9,12,1,55,0,13,1,1,3,2,
|
||||
5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21,11,23,12,25,13,1,0,3,3,
|
||||
0,9,10,13,13,32,32,2,0,39,39,97,122,5,0,39,39,48,57,65,90,95,95,
|
||||
97,122,78,0,1,1,0,0,0,0,3,1,0,0,0,0,5,1,0,0,0,0,7,1,0,0,0,0,9,1,
|
||||
0,0,0,0,11,1,0,0,0,0,13,1,0,0,0,0,15,1,0,0,0,0,17,1,0,0,0,0,19,1,
|
||||
0,0,0,0,21,1,0,0,0,0,23,1,0,0,0,0,25,1,0,0,0,1,27,1,0,0,0,3,29,1,
|
||||
0,0,0,5,31,1,0,0,0,7,34,1,0,0,0,9,36,1,0,0,0,11,38,1,0,0,0,13,40,
|
||||
1,0,0,0,15,42,1,0,0,0,17,44,1,0,0,0,19,47,1,0,0,0,21,51,1,0,0,0,
|
||||
23,63,1,0,0,0,25,69,1,0,0,0,27,28,5,46,0,0,28,2,1,0,0,0,29,30,5,
|
||||
44,0,0,30,4,1,0,0,0,31,32,5,58,0,0,32,33,5,45,0,0,33,6,1,0,0,0,34,
|
||||
35,5,40,0,0,35,8,1,0,0,0,36,37,5,41,0,0,37,10,1,0,0,0,38,39,5,45,
|
||||
0,0,39,12,1,0,0,0,40,41,5,124,0,0,41,14,1,0,0,0,42,43,5,38,0,0,43,
|
||||
16,1,0,0,0,44,45,5,45,0,0,45,46,5,62,0,0,46,18,1,0,0,0,47,48,5,110,
|
||||
0,0,48,49,5,111,0,0,49,50,5,116,0,0,50,20,1,0,0,0,51,55,5,35,0,0,
|
||||
52,54,9,0,0,0,53,52,1,0,0,0,54,57,1,0,0,0,55,56,1,0,0,0,55,53,1,
|
||||
0,0,0,56,58,1,0,0,0,57,55,1,0,0,0,58,59,5,10,0,0,59,60,1,0,0,0,60,
|
||||
61,6,10,0,0,61,22,1,0,0,0,62,64,7,0,0,0,63,62,1,0,0,0,64,65,1,0,
|
||||
0,0,65,63,1,0,0,0,65,66,1,0,0,0,66,67,1,0,0,0,67,68,6,11,0,0,68,
|
||||
24,1,0,0,0,69,73,7,1,0,0,70,72,7,2,0,0,71,70,1,0,0,0,72,75,1,0,0,
|
||||
0,73,71,1,0,0,0,73,74,1,0,0,0,74,26,1,0,0,0,75,73,1,0,0,0,4,0,55,
|
||||
65,73,1,6,0,0
|
||||
]
|
||||
|
||||
class HMKNFLexer(Lexer):
|
||||
|
||||
atn = ATNDeserializer().deserialize(serializedATN())
|
||||
|
||||
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
|
||||
|
||||
T__0 = 1
|
||||
T__1 = 2
|
||||
T__2 = 3
|
||||
T__3 = 4
|
||||
T__4 = 5
|
||||
T__5 = 6
|
||||
T__6 = 7
|
||||
T__7 = 8
|
||||
T__8 = 9
|
||||
T__9 = 10
|
||||
COMMENT = 11
|
||||
WS = 12
|
||||
IDENTIFIER = 13
|
||||
|
||||
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
|
||||
|
||||
modeNames = [ "DEFAULT_MODE" ]
|
||||
|
||||
literalNames = [ "<INVALID>",
|
||||
"'.'", "','", "':-'", "'('", "')'", "'-'", "'|'", "'&'", "'->'",
|
||||
"'not'" ]
|
||||
|
||||
symbolicNames = [ "<INVALID>",
|
||||
"COMMENT", "WS", "IDENTIFIER" ]
|
||||
|
||||
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
|
||||
"T__7", "T__8", "T__9", "COMMENT", "WS", "IDENTIFIER" ]
|
||||
|
||||
grammarFileName = "HMKNF.g4"
|
||||
|
||||
def __init__(self, input=None, output:TextIO = sys.stdout):
|
||||
super().__init__(input, output)
|
||||
self.checkVersion("4.10.1")
|
||||
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
|
||||
self._actions = None
|
||||
self._predicates = None
|
||||
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
T__0=1
|
||||
T__1=2
|
||||
T__2=3
|
||||
T__3=4
|
||||
T__4=5
|
||||
T__5=6
|
||||
T__6=7
|
||||
T__7=8
|
||||
T__8=9
|
||||
T__9=10
|
||||
COMMENT=11
|
||||
WS=12
|
||||
IDENTIFIER=13
|
||||
'.'=1
|
||||
','=2
|
||||
':-'=3
|
||||
'('=4
|
||||
')'=5
|
||||
'-'=6
|
||||
'|'=7
|
||||
'&'=8
|
||||
'->'=9
|
||||
'not'=10
|
|
@ -0,0 +1,806 @@
|
|||
# Generated from HMKNF.g4 by ANTLR 4.10.1
|
||||
# encoding: utf-8
|
||||
from antlr4 import *
|
||||
from io import StringIO
|
||||
import sys
|
||||
if sys.version_info[1] > 5:
|
||||
from typing import TextIO
|
||||
else:
|
||||
from typing.io import TextIO
|
||||
|
||||
def serializedATN():
|
||||
return [
|
||||
4,1,13,91,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,6,7,
|
||||
6,2,7,7,7,2,8,7,8,2,9,7,9,1,0,1,0,5,0,23,8,0,10,0,12,0,26,9,0,1,
|
||||
1,1,1,1,1,1,1,1,1,3,1,33,8,1,1,1,1,1,1,2,1,2,1,2,5,2,40,8,2,10,2,
|
||||
12,2,43,9,2,1,3,1,3,1,3,1,3,5,3,49,8,3,10,3,12,3,52,9,3,3,3,54,8,
|
||||
3,1,4,1,4,1,4,1,5,1,5,1,5,1,5,1,5,1,5,1,5,1,5,3,5,67,8,5,1,5,1,5,
|
||||
1,5,1,5,1,5,1,5,5,5,75,8,5,10,5,12,5,78,9,5,1,6,1,6,3,6,82,8,6,1,
|
||||
7,1,7,1,8,1,8,1,9,1,9,1,9,1,9,0,1,10,10,0,2,4,6,8,10,12,14,16,18,
|
||||
0,1,1,0,7,8,92,0,24,1,0,0,0,2,32,1,0,0,0,4,36,1,0,0,0,6,44,1,0,0,
|
||||
0,8,55,1,0,0,0,10,66,1,0,0,0,12,81,1,0,0,0,14,83,1,0,0,0,16,85,1,
|
||||
0,0,0,18,87,1,0,0,0,20,23,3,2,1,0,21,23,3,8,4,0,22,20,1,0,0,0,22,
|
||||
21,1,0,0,0,23,26,1,0,0,0,24,22,1,0,0,0,24,25,1,0,0,0,25,1,1,0,0,
|
||||
0,26,24,1,0,0,0,27,28,3,4,2,0,28,29,3,6,3,0,29,33,1,0,0,0,30,33,
|
||||
3,4,2,0,31,33,3,6,3,0,32,27,1,0,0,0,32,30,1,0,0,0,32,31,1,0,0,0,
|
||||
33,34,1,0,0,0,34,35,5,1,0,0,35,3,1,0,0,0,36,41,5,13,0,0,37,38,5,
|
||||
2,0,0,38,40,5,13,0,0,39,37,1,0,0,0,40,43,1,0,0,0,41,39,1,0,0,0,41,
|
||||
42,1,0,0,0,42,5,1,0,0,0,43,41,1,0,0,0,44,53,5,3,0,0,45,50,3,12,6,
|
||||
0,46,47,5,2,0,0,47,49,3,12,6,0,48,46,1,0,0,0,49,52,1,0,0,0,50,48,
|
||||
1,0,0,0,50,51,1,0,0,0,51,54,1,0,0,0,52,50,1,0,0,0,53,45,1,0,0,0,
|
||||
53,54,1,0,0,0,54,7,1,0,0,0,55,56,3,10,5,0,56,57,5,1,0,0,57,9,1,0,
|
||||
0,0,58,59,6,5,-1,0,59,60,5,4,0,0,60,61,3,10,5,0,61,62,5,5,0,0,62,
|
||||
67,1,0,0,0,63,67,3,14,7,0,64,65,5,6,0,0,65,67,3,10,5,3,66,58,1,0,
|
||||
0,0,66,63,1,0,0,0,66,64,1,0,0,0,67,76,1,0,0,0,68,69,10,2,0,0,69,
|
||||
70,7,0,0,0,70,75,3,10,5,3,71,72,10,1,0,0,72,73,5,9,0,0,73,75,3,10,
|
||||
5,1,74,68,1,0,0,0,74,71,1,0,0,0,75,78,1,0,0,0,76,74,1,0,0,0,76,77,
|
||||
1,0,0,0,77,11,1,0,0,0,78,76,1,0,0,0,79,82,3,16,8,0,80,82,3,18,9,
|
||||
0,81,79,1,0,0,0,81,80,1,0,0,0,82,13,1,0,0,0,83,84,5,13,0,0,84,15,
|
||||
1,0,0,0,85,86,5,13,0,0,86,17,1,0,0,0,87,88,5,10,0,0,88,89,5,13,0,
|
||||
0,89,19,1,0,0,0,10,22,24,32,41,50,53,66,74,76,81
|
||||
]
|
||||
|
||||
class HMKNFParser ( Parser ):
|
||||
|
||||
grammarFileName = "HMKNF.g4"
|
||||
|
||||
atn = ATNDeserializer().deserialize(serializedATN())
|
||||
|
||||
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
|
||||
|
||||
sharedContextCache = PredictionContextCache()
|
||||
|
||||
literalNames = [ "<INVALID>", "'.'", "','", "':-'", "'('", "')'", "'-'",
|
||||
"'|'", "'&'", "'->'", "'not'" ]
|
||||
|
||||
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
|
||||
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
|
||||
"<INVALID>", "<INVALID>", "<INVALID>", "COMMENT",
|
||||
"WS", "IDENTIFIER" ]
|
||||
|
||||
RULE_kb = 0
|
||||
RULE_lrule = 1
|
||||
RULE_head = 2
|
||||
RULE_body = 3
|
||||
RULE_orule = 4
|
||||
RULE_oformula = 5
|
||||
RULE_katom = 6
|
||||
RULE_oatom = 7
|
||||
RULE_patom = 8
|
||||
RULE_natom = 9
|
||||
|
||||
ruleNames = [ "kb", "lrule", "head", "body", "orule", "oformula", "katom",
|
||||
"oatom", "patom", "natom" ]
|
||||
|
||||
EOF = Token.EOF
|
||||
T__0=1
|
||||
T__1=2
|
||||
T__2=3
|
||||
T__3=4
|
||||
T__4=5
|
||||
T__5=6
|
||||
T__6=7
|
||||
T__7=8
|
||||
T__8=9
|
||||
T__9=10
|
||||
COMMENT=11
|
||||
WS=12
|
||||
IDENTIFIER=13
|
||||
|
||||
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
|
||||
super().__init__(input, output)
|
||||
self.checkVersion("4.10.1")
|
||||
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
|
||||
self._predicates = None
|
||||
|
||||
|
||||
|
||||
|
||||
class KbContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def lrule(self, i:int=None):
|
||||
if i is None:
|
||||
return self.getTypedRuleContexts(HMKNFParser.LruleContext)
|
||||
else:
|
||||
return self.getTypedRuleContext(HMKNFParser.LruleContext,i)
|
||||
|
||||
|
||||
def orule(self, i:int=None):
|
||||
if i is None:
|
||||
return self.getTypedRuleContexts(HMKNFParser.OruleContext)
|
||||
else:
|
||||
return self.getTypedRuleContext(HMKNFParser.OruleContext,i)
|
||||
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_kb
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitKb" ):
|
||||
return visitor.visitKb(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def kb(self):
|
||||
|
||||
localctx = HMKNFParser.KbContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 0, self.RULE_kb)
|
||||
self._la = 0 # Token type
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 24
|
||||
self._errHandler.sync(self)
|
||||
_la = self._input.LA(1)
|
||||
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << HMKNFParser.T__2) | (1 << HMKNFParser.T__3) | (1 << HMKNFParser.T__5) | (1 << HMKNFParser.IDENTIFIER))) != 0):
|
||||
self.state = 22
|
||||
self._errHandler.sync(self)
|
||||
la_ = self._interp.adaptivePredict(self._input,0,self._ctx)
|
||||
if la_ == 1:
|
||||
self.state = 20
|
||||
self.lrule()
|
||||
pass
|
||||
|
||||
elif la_ == 2:
|
||||
self.state = 21
|
||||
self.orule()
|
||||
pass
|
||||
|
||||
|
||||
self.state = 26
|
||||
self._errHandler.sync(self)
|
||||
_la = self._input.LA(1)
|
||||
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
class LruleContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def head(self):
|
||||
return self.getTypedRuleContext(HMKNFParser.HeadContext,0)
|
||||
|
||||
|
||||
def body(self):
|
||||
return self.getTypedRuleContext(HMKNFParser.BodyContext,0)
|
||||
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_lrule
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitLrule" ):
|
||||
return visitor.visitLrule(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def lrule(self):
|
||||
|
||||
localctx = HMKNFParser.LruleContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 2, self.RULE_lrule)
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 32
|
||||
self._errHandler.sync(self)
|
||||
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
|
||||
if la_ == 1:
|
||||
self.state = 27
|
||||
self.head()
|
||||
self.state = 28
|
||||
self.body()
|
||||
pass
|
||||
|
||||
elif la_ == 2:
|
||||
self.state = 30
|
||||
self.head()
|
||||
pass
|
||||
|
||||
elif la_ == 3:
|
||||
self.state = 31
|
||||
self.body()
|
||||
pass
|
||||
|
||||
|
||||
self.state = 34
|
||||
self.match(HMKNFParser.T__0)
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
class HeadContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def IDENTIFIER(self, i:int=None):
|
||||
if i is None:
|
||||
return self.getTokens(HMKNFParser.IDENTIFIER)
|
||||
else:
|
||||
return self.getToken(HMKNFParser.IDENTIFIER, i)
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_head
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitHead" ):
|
||||
return visitor.visitHead(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def head(self):
|
||||
|
||||
localctx = HMKNFParser.HeadContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 4, self.RULE_head)
|
||||
self._la = 0 # Token type
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 36
|
||||
self.match(HMKNFParser.IDENTIFIER)
|
||||
self.state = 41
|
||||
self._errHandler.sync(self)
|
||||
_la = self._input.LA(1)
|
||||
while _la==HMKNFParser.T__1:
|
||||
self.state = 37
|
||||
self.match(HMKNFParser.T__1)
|
||||
self.state = 38
|
||||
self.match(HMKNFParser.IDENTIFIER)
|
||||
self.state = 43
|
||||
self._errHandler.sync(self)
|
||||
_la = self._input.LA(1)
|
||||
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
class BodyContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def katom(self, i:int=None):
|
||||
if i is None:
|
||||
return self.getTypedRuleContexts(HMKNFParser.KatomContext)
|
||||
else:
|
||||
return self.getTypedRuleContext(HMKNFParser.KatomContext,i)
|
||||
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_body
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitBody" ):
|
||||
return visitor.visitBody(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def body(self):
|
||||
|
||||
localctx = HMKNFParser.BodyContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 6, self.RULE_body)
|
||||
self._la = 0 # Token type
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 44
|
||||
self.match(HMKNFParser.T__2)
|
||||
self.state = 53
|
||||
self._errHandler.sync(self)
|
||||
_la = self._input.LA(1)
|
||||
if _la==HMKNFParser.T__9 or _la==HMKNFParser.IDENTIFIER:
|
||||
self.state = 45
|
||||
self.katom()
|
||||
self.state = 50
|
||||
self._errHandler.sync(self)
|
||||
_la = self._input.LA(1)
|
||||
while _la==HMKNFParser.T__1:
|
||||
self.state = 46
|
||||
self.match(HMKNFParser.T__1)
|
||||
self.state = 47
|
||||
self.katom()
|
||||
self.state = 52
|
||||
self._errHandler.sync(self)
|
||||
_la = self._input.LA(1)
|
||||
|
||||
|
||||
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
class OruleContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def oformula(self):
|
||||
return self.getTypedRuleContext(HMKNFParser.OformulaContext,0)
|
||||
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_orule
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitOrule" ):
|
||||
return visitor.visitOrule(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def orule(self):
|
||||
|
||||
localctx = HMKNFParser.OruleContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 8, self.RULE_orule)
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 55
|
||||
self.oformula(0)
|
||||
self.state = 56
|
||||
self.match(HMKNFParser.T__0)
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
class OformulaContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_oformula
|
||||
|
||||
|
||||
def copyFrom(self, ctx:ParserRuleContext):
|
||||
super().copyFrom(ctx)
|
||||
|
||||
|
||||
class NegationContext(OformulaContext):
|
||||
|
||||
def __init__(self, parser, ctx:ParserRuleContext): # actually a HMKNFParser.OformulaContext
|
||||
super().__init__(parser)
|
||||
self.copyFrom(ctx)
|
||||
|
||||
def oformula(self):
|
||||
return self.getTypedRuleContext(HMKNFParser.OformulaContext,0)
|
||||
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitNegation" ):
|
||||
return visitor.visitNegation(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
class DisjOrConjContext(OformulaContext):
|
||||
|
||||
def __init__(self, parser, ctx:ParserRuleContext): # actually a HMKNFParser.OformulaContext
|
||||
super().__init__(parser)
|
||||
self.operator = None # Token
|
||||
self.copyFrom(ctx)
|
||||
|
||||
def oformula(self, i:int=None):
|
||||
if i is None:
|
||||
return self.getTypedRuleContexts(HMKNFParser.OformulaContext)
|
||||
else:
|
||||
return self.getTypedRuleContext(HMKNFParser.OformulaContext,i)
|
||||
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitDisjOrConj" ):
|
||||
return visitor.visitDisjOrConj(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
class ParenthContext(OformulaContext):
|
||||
|
||||
def __init__(self, parser, ctx:ParserRuleContext): # actually a HMKNFParser.OformulaContext
|
||||
super().__init__(parser)
|
||||
self.copyFrom(ctx)
|
||||
|
||||
def oformula(self):
|
||||
return self.getTypedRuleContext(HMKNFParser.OformulaContext,0)
|
||||
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitParenth" ):
|
||||
return visitor.visitParenth(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
class ImpContext(OformulaContext):
|
||||
|
||||
def __init__(self, parser, ctx:ParserRuleContext): # actually a HMKNFParser.OformulaContext
|
||||
super().__init__(parser)
|
||||
self.copyFrom(ctx)
|
||||
|
||||
def oformula(self, i:int=None):
|
||||
if i is None:
|
||||
return self.getTypedRuleContexts(HMKNFParser.OformulaContext)
|
||||
else:
|
||||
return self.getTypedRuleContext(HMKNFParser.OformulaContext,i)
|
||||
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitImp" ):
|
||||
return visitor.visitImp(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
class LiteralContext(OformulaContext):
|
||||
|
||||
def __init__(self, parser, ctx:ParserRuleContext): # actually a HMKNFParser.OformulaContext
|
||||
super().__init__(parser)
|
||||
self.copyFrom(ctx)
|
||||
|
||||
def oatom(self):
|
||||
return self.getTypedRuleContext(HMKNFParser.OatomContext,0)
|
||||
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitLiteral" ):
|
||||
return visitor.visitLiteral(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
def oformula(self, _p:int=0):
|
||||
_parentctx = self._ctx
|
||||
_parentState = self.state
|
||||
localctx = HMKNFParser.OformulaContext(self, self._ctx, _parentState)
|
||||
_prevctx = localctx
|
||||
_startState = 10
|
||||
self.enterRecursionRule(localctx, 10, self.RULE_oformula, _p)
|
||||
self._la = 0 # Token type
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 66
|
||||
self._errHandler.sync(self)
|
||||
token = self._input.LA(1)
|
||||
if token in [HMKNFParser.T__3]:
|
||||
localctx = HMKNFParser.ParenthContext(self, localctx)
|
||||
self._ctx = localctx
|
||||
_prevctx = localctx
|
||||
|
||||
self.state = 59
|
||||
self.match(HMKNFParser.T__3)
|
||||
self.state = 60
|
||||
self.oformula(0)
|
||||
self.state = 61
|
||||
self.match(HMKNFParser.T__4)
|
||||
pass
|
||||
elif token in [HMKNFParser.IDENTIFIER]:
|
||||
localctx = HMKNFParser.LiteralContext(self, localctx)
|
||||
self._ctx = localctx
|
||||
_prevctx = localctx
|
||||
self.state = 63
|
||||
self.oatom()
|
||||
pass
|
||||
elif token in [HMKNFParser.T__5]:
|
||||
localctx = HMKNFParser.NegationContext(self, localctx)
|
||||
self._ctx = localctx
|
||||
_prevctx = localctx
|
||||
self.state = 64
|
||||
self.match(HMKNFParser.T__5)
|
||||
self.state = 65
|
||||
self.oformula(3)
|
||||
pass
|
||||
else:
|
||||
raise NoViableAltException(self)
|
||||
|
||||
self._ctx.stop = self._input.LT(-1)
|
||||
self.state = 76
|
||||
self._errHandler.sync(self)
|
||||
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
|
||||
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
|
||||
if _alt==1:
|
||||
if self._parseListeners is not None:
|
||||
self.triggerExitRuleEvent()
|
||||
_prevctx = localctx
|
||||
self.state = 74
|
||||
self._errHandler.sync(self)
|
||||
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
|
||||
if la_ == 1:
|
||||
localctx = HMKNFParser.DisjOrConjContext(self, HMKNFParser.OformulaContext(self, _parentctx, _parentState))
|
||||
self.pushNewRecursionContext(localctx, _startState, self.RULE_oformula)
|
||||
self.state = 68
|
||||
if not self.precpred(self._ctx, 2):
|
||||
from antlr4.error.Errors import FailedPredicateException
|
||||
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
|
||||
|
||||
self.state = 69
|
||||
localctx.operator = self._input.LT(1)
|
||||
_la = self._input.LA(1)
|
||||
if not(_la==HMKNFParser.T__6 or _la==HMKNFParser.T__7):
|
||||
localctx.operator = self._errHandler.recoverInline(self)
|
||||
else:
|
||||
self._errHandler.reportMatch(self)
|
||||
self.consume()
|
||||
self.state = 70
|
||||
self.oformula(3)
|
||||
pass
|
||||
|
||||
elif la_ == 2:
|
||||
localctx = HMKNFParser.ImpContext(self, HMKNFParser.OformulaContext(self, _parentctx, _parentState))
|
||||
self.pushNewRecursionContext(localctx, _startState, self.RULE_oformula)
|
||||
self.state = 71
|
||||
if not self.precpred(self._ctx, 1):
|
||||
from antlr4.error.Errors import FailedPredicateException
|
||||
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
|
||||
self.state = 72
|
||||
self.match(HMKNFParser.T__8)
|
||||
self.state = 73
|
||||
self.oformula(1)
|
||||
pass
|
||||
|
||||
|
||||
self.state = 78
|
||||
self._errHandler.sync(self)
|
||||
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
|
||||
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.unrollRecursionContexts(_parentctx)
|
||||
return localctx
|
||||
|
||||
|
||||
class KatomContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def patom(self):
|
||||
return self.getTypedRuleContext(HMKNFParser.PatomContext,0)
|
||||
|
||||
|
||||
def natom(self):
|
||||
return self.getTypedRuleContext(HMKNFParser.NatomContext,0)
|
||||
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_katom
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitKatom" ):
|
||||
return visitor.visitKatom(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def katom(self):
|
||||
|
||||
localctx = HMKNFParser.KatomContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 12, self.RULE_katom)
|
||||
try:
|
||||
self.state = 81
|
||||
self._errHandler.sync(self)
|
||||
token = self._input.LA(1)
|
||||
if token in [HMKNFParser.IDENTIFIER]:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 79
|
||||
self.patom()
|
||||
pass
|
||||
elif token in [HMKNFParser.T__9]:
|
||||
self.enterOuterAlt(localctx, 2)
|
||||
self.state = 80
|
||||
self.natom()
|
||||
pass
|
||||
else:
|
||||
raise NoViableAltException(self)
|
||||
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
class OatomContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def IDENTIFIER(self):
|
||||
return self.getToken(HMKNFParser.IDENTIFIER, 0)
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_oatom
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitOatom" ):
|
||||
return visitor.visitOatom(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def oatom(self):
|
||||
|
||||
localctx = HMKNFParser.OatomContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 14, self.RULE_oatom)
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 83
|
||||
self.match(HMKNFParser.IDENTIFIER)
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
class PatomContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def IDENTIFIER(self):
|
||||
return self.getToken(HMKNFParser.IDENTIFIER, 0)
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_patom
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitPatom" ):
|
||||
return visitor.visitPatom(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def patom(self):
|
||||
|
||||
localctx = HMKNFParser.PatomContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 16, self.RULE_patom)
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 85
|
||||
self.match(HMKNFParser.IDENTIFIER)
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
class NatomContext(ParserRuleContext):
|
||||
__slots__ = 'parser'
|
||||
|
||||
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
|
||||
super().__init__(parent, invokingState)
|
||||
self.parser = parser
|
||||
|
||||
def IDENTIFIER(self):
|
||||
return self.getToken(HMKNFParser.IDENTIFIER, 0)
|
||||
|
||||
def getRuleIndex(self):
|
||||
return HMKNFParser.RULE_natom
|
||||
|
||||
def accept(self, visitor:ParseTreeVisitor):
|
||||
if hasattr( visitor, "visitNatom" ):
|
||||
return visitor.visitNatom(self)
|
||||
else:
|
||||
return visitor.visitChildren(self)
|
||||
|
||||
|
||||
|
||||
|
||||
def natom(self):
|
||||
|
||||
localctx = HMKNFParser.NatomContext(self, self._ctx, self.state)
|
||||
self.enterRule(localctx, 18, self.RULE_natom)
|
||||
try:
|
||||
self.enterOuterAlt(localctx, 1)
|
||||
self.state = 87
|
||||
self.match(HMKNFParser.T__9)
|
||||
self.state = 88
|
||||
self.match(HMKNFParser.IDENTIFIER)
|
||||
except RecognitionException as re:
|
||||
localctx.exception = re
|
||||
self._errHandler.reportError(self, re)
|
||||
self._errHandler.recover(self, re)
|
||||
finally:
|
||||
self.exitRule()
|
||||
return localctx
|
||||
|
||||
|
||||
|
||||
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
|
||||
if self._predicates == None:
|
||||
self._predicates = dict()
|
||||
self._predicates[5] = self.oformula_sempred
|
||||
pred = self._predicates.get(ruleIndex, None)
|
||||
if pred is None:
|
||||
raise Exception("No predicate with index:" + str(ruleIndex))
|
||||
else:
|
||||
return pred(localctx, predIndex)
|
||||
|
||||
def oformula_sempred(self, localctx:OformulaContext, predIndex:int):
|
||||
if predIndex == 0:
|
||||
return self.precpred(self._ctx, 2)
|
||||
|
||||
|
||||
if predIndex == 1:
|
||||
return self.precpred(self._ctx, 1)
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
# Generated from HMKNF.g4 by ANTLR 4.10.1
|
||||
from antlr4 import *
|
||||
if __name__ is not None and "." in __name__:
|
||||
from .HMKNFParser import HMKNFParser
|
||||
else:
|
||||
from HMKNFParser import HMKNFParser
|
||||
|
||||
# This class defines a complete generic visitor for a parse tree produced by HMKNFParser.
|
||||
|
||||
class HMKNFVisitor(ParseTreeVisitor):
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#kb.
|
||||
def visitKb(self, ctx:HMKNFParser.KbContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#lrule.
|
||||
def visitLrule(self, ctx:HMKNFParser.LruleContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#head.
|
||||
def visitHead(self, ctx:HMKNFParser.HeadContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#body.
|
||||
def visitBody(self, ctx:HMKNFParser.BodyContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#orule.
|
||||
def visitOrule(self, ctx:HMKNFParser.OruleContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#negation.
|
||||
def visitNegation(self, ctx:HMKNFParser.NegationContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#disjOrConj.
|
||||
def visitDisjOrConj(self, ctx:HMKNFParser.DisjOrConjContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#parenth.
|
||||
def visitParenth(self, ctx:HMKNFParser.ParenthContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#imp.
|
||||
def visitImp(self, ctx:HMKNFParser.ImpContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#literal.
|
||||
def visitLiteral(self, ctx:HMKNFParser.LiteralContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#katom.
|
||||
def visitKatom(self, ctx:HMKNFParser.KatomContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#oatom.
|
||||
def visitOatom(self, ctx:HMKNFParser.OatomContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#patom.
|
||||
def visitPatom(self, ctx:HMKNFParser.PatomContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
# Visit a parse tree produced by HMKNFParser#natom.
|
||||
def visitNatom(self, ctx:HMKNFParser.NatomContext):
|
||||
return self.visitChildren(ctx)
|
||||
|
||||
|
||||
|
||||
del HMKNFParser
|
|
@ -0,0 +1,64 @@
|
|||
from AST import ONegation, OFormula, OBinary, OAtom, OConst, KB, atom, Set
|
||||
from more_itertools import powerset
|
||||
|
||||
|
||||
def oformula_sat(o: OFormula, total_I: Set[atom]):
|
||||
match o:
|
||||
case OConst.TRUE:
|
||||
return True
|
||||
case OConst.FALSE:
|
||||
return False
|
||||
case OBinary("|", left, right):
|
||||
return oformula_sat(left, total_I) or oformula_sat(right, total_I)
|
||||
case OBinary("&", left, right):
|
||||
return oformula_sat(left, total_I) and oformula_sat(right, total_I)
|
||||
case OBinary("->", left, right):
|
||||
return (not oformula_sat(left, total_I)) or oformula_sat(right, total_I)
|
||||
case OAtom(name):
|
||||
return name in total_I
|
||||
case ONegation(of):
|
||||
return not oformula_sat(of, total_I)
|
||||
case _:
|
||||
raise Exception("Ontology error")
|
||||
|
||||
|
||||
def objective_knowledge_consistent(kb: KB, T: Set[atom], F: Set[atom]) -> bool:
|
||||
"OB_{T, F} has at least one model"
|
||||
undefined_atoms = (kb.katoms | kb.oatoms) - (T | F)
|
||||
for I in powerset(undefined_atoms):
|
||||
total_I = T.union(I)
|
||||
if oformula_sat(kb.ont, total_I):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def objective_knowledge(
|
||||
kb: KB, T: Set[atom], F: Set[atom]
|
||||
) -> tuple[bool, Set[atom], Set[atom]]:
|
||||
"OB_{T, F} |= a"
|
||||
undefined_atoms = (kb.katoms | kb.oatoms) - (T | F)
|
||||
entailed = kb.katoms
|
||||
entailed_false = kb.katoms
|
||||
consistent = False
|
||||
for I in powerset(undefined_atoms):
|
||||
total_I = T.union(I)
|
||||
if oformula_sat(kb.ont, total_I):
|
||||
consistent = True
|
||||
entailed = entailed.intersection(total_I)
|
||||
entailed_false = entailed_false.intersection(kb.katoms.difference(total_I))
|
||||
return consistent, T.union(entailed), F.union(entailed_false)
|
||||
|
||||
|
||||
def objective_knowledge_brave(kb: KB, T: Set[atom], F: Set[atom]):
|
||||
"OB_{T, F} |/= -a"
|
||||
undefined_atoms = (kb.katoms | kb.oatoms) - (T | F)
|
||||
entailed = Set()
|
||||
entailed_false = Set()
|
||||
consistent = False
|
||||
for I in powerset(undefined_atoms):
|
||||
total_I = T.union(I)
|
||||
if oformula_sat(kb.ont, total_I):
|
||||
consistent = True
|
||||
entailed = entailed.union(total_I)
|
||||
entailed_false = entailed_false.union(kb.katoms.difference(total_I))
|
||||
return consistent, T.union(entailed), F.union(entailed_false)
|
|
@ -0,0 +1,3 @@
|
|||
-b.
|
||||
a :- not b.
|
||||
b :- not a.
|
|
@ -0,0 +1,4 @@
|
|||
:- c.
|
||||
:- b.
|
||||
:- a.
|
||||
a.
|
|
@ -0,0 +1,3 @@
|
|||
antlr4-python3-runtime==4.10
|
||||
more-itertools==8.11.0
|
||||
black==23.3.0
|
|
@ -0,0 +1,56 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
An enumerative solver that will give all MKNF models of a given knowledge base
|
||||
|
||||
usage: python solver.py knowledge_bases/simple.hmknf
|
||||
OR
|
||||
usage: python solver.py < knowledge_bases/simple.hmknf
|
||||
"""
|
||||
|
||||
from sys import argv, flags, stdin
|
||||
from typing import Iterable
|
||||
|
||||
from more_itertools import peekable
|
||||
|
||||
from AST import loadProgram, atom, Set, KB
|
||||
from aft import Kinterp, add_immediate_XY, fixpoint, stable_revision
|
||||
from util import printp, powerset
|
||||
|
||||
|
||||
def solver(kb: KB) -> Iterable[Kinterp]:
|
||||
for T in powerset(kb.katoms):
|
||||
for U in powerset(kb.katoms - T):
|
||||
P = T | U
|
||||
if verify(kb, T, P):
|
||||
yield (T, P)
|
||||
|
||||
|
||||
def verify(kb: KB, T: Set[atom], P: Set[atom]) -> bool:
|
||||
"Return whether (T, P) corresponds to an MKNF model of kb"
|
||||
Tp, Pp = stable_revision(kb, T, P)
|
||||
if (Tp, Pp) != (T, P):
|
||||
return False
|
||||
|
||||
def check_rules(P: Set[atom]):
|
||||
return add_immediate_XY(kb, P, T)
|
||||
|
||||
return fixpoint(check_rules, Set()) == P
|
||||
|
||||
|
||||
def main():
|
||||
if len(argv) > 1:
|
||||
in_file = open(argv[1], "rt", encoding="utf8")
|
||||
else:
|
||||
in_file = stdin
|
||||
kb = loadProgram(in_file.read())
|
||||
models = peekable(solver(kb))
|
||||
try:
|
||||
models.peek()
|
||||
except StopIteration:
|
||||
print("no models")
|
||||
for T, P in models:
|
||||
printp(T, P)
|
||||
|
||||
|
||||
if __name__ == "__main__" and not flags.interactive:
|
||||
main()
|
|
@ -0,0 +1,64 @@
|
|||
from functools import wraps
|
||||
from AST import Set, atom
|
||||
import more_itertools
|
||||
|
||||
def powerset(items):
|
||||
return map(Set, more_itertools.powerset(items))
|
||||
|
||||
|
||||
def format_set(s: Set[atom]):
|
||||
return "{" + ", ".join(sorted(s)) + "}"
|
||||
|
||||
|
||||
def printp(*args):
|
||||
whole = "("
|
||||
sets = (format_set(arg) for arg in args)
|
||||
whole += ", ".join(sets)
|
||||
whole += ")"
|
||||
print(whole)
|
||||
|
||||
|
||||
def prints_input(*pos):
|
||||
def wrappest(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
output = fn(*args, **kwargs)
|
||||
print(
|
||||
f"Input of {fn.__name__} pos: {pos}:",
|
||||
" ".join(
|
||||
(format_set(arg) if isinstance(arg, Set) else str(arg))
|
||||
for i, arg in enumerate(args)
|
||||
if i in pos
|
||||
),
|
||||
)
|
||||
return output
|
||||
|
||||
return wrapper
|
||||
|
||||
return wrappest
|
||||
|
||||
|
||||
def prints_output(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
output = fn(*args, **kwargs)
|
||||
print(
|
||||
f"Output of {fn.__name__}:",
|
||||
format_set(output) if isinstance(output, Set) else output,
|
||||
)
|
||||
return output
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def disabled_static(return_value):
|
||||
"Disables a function by making it return a specified value instead"
|
||||
|
||||
def wrappest(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
return return_value
|
||||
|
||||
return wrapper
|
||||
|
||||
return wrappest
|
Loading…
Reference in New Issue