aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2015-06-18 08:11:40 -0700
committerJeff Vander Stoep <jeffv@google.com>2015-06-18 15:58:11 -0700
commitf33298730c2535fc791a8908f79dacad8b28b1d8 (patch)
tree5e79b0aba6645a3dfc954961d6ea51048652ea14
parent7435e5aecc8e14905931039389c132a1a00ab9f4 (diff)
downloadselinux-f33298730c2535fc791a8908f79dacad8b28b1d8.tar.gz
Move python libraries to prebuilt directory
Update script syntax to work with sh/dash/bash Add cleanspec (Cherry picked from 8a2e0bafc9c56de823d8fe7e52ca6f483b4ce011) Bug:21928182 Change-Id: Ifc19252d8d8e178b86c51fe1f54e162a61ffc0dd
-rw-r--r--CleanSpec.mk3
-rwxr-xr-xprebuilts/bin/audit2allow4
-rwxr-xr-xprebuilts/bin/audit2why6
-rwxr-xr-xprebuilts/bin/sesearch10
-rwxr-xr-xprebuilts/bin/sesearch.py206
-rw-r--r--prebuilts/lib/selinux/__init__.py2445
-rwxr-xr-xprebuilts/lib/selinux/_selinux.sobin333725 -> 0 bytes
-rwxr-xr-xprebuilts/lib/selinux/audit2why.sobin243205 -> 0 bytes
-rw-r--r--prebuilts/lib/sepolgen/__init__.py0
-rw-r--r--prebuilts/lib/sepolgen/access.py331
-rw-r--r--prebuilts/lib/sepolgen/audit.py549
-rw-r--r--prebuilts/lib/sepolgen/classperms.py116
-rw-r--r--prebuilts/lib/sepolgen/defaults.py77
-rw-r--r--prebuilts/lib/sepolgen/interfaces.py509
-rw-r--r--prebuilts/lib/sepolgen/lex.py866
-rw-r--r--prebuilts/lib/sepolgen/matching.py255
-rw-r--r--prebuilts/lib/sepolgen/module.py213
-rw-r--r--prebuilts/lib/sepolgen/objectmodel.py172
-rw-r--r--prebuilts/lib/sepolgen/output.py173
-rw-r--r--prebuilts/lib/sepolgen/policygen.py402
-rw-r--r--prebuilts/lib/sepolgen/refparser.py1128
-rw-r--r--prebuilts/lib/sepolgen/refpolicy.py917
-rw-r--r--prebuilts/lib/sepolgen/sepolgeni18n.py26
-rw-r--r--prebuilts/lib/sepolgen/util.py87
-rw-r--r--prebuilts/lib/sepolgen/yacc.py2209
25 files changed, 224 insertions, 10480 deletions
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 1ac5a628..5658a407 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -1,2 +1,5 @@
# This empty CleanSpec.mk file will prevent the build system
# from descending into subdirs.
+#
+$(call add-clean-step, rm -rf $(OUT_DIR)/host/linux-x86/bin/audit2allow)
+$(call add-clean-step, rm -rf $(OUT_DIR)/host/linux-x86/bin/audit2why)
diff --git a/prebuilts/bin/audit2allow b/prebuilts/bin/audit2allow
index 7493bdc9..8b99bbd1 100755
--- a/prebuilts/bin/audit2allow
+++ b/prebuilts/bin/audit2allow
@@ -1,9 +1,9 @@
#!/usr/bin/env bash
unamestr=`uname`
-if [[ $unamestr == "Linux" ]]; then
+if [ "$unamestr" = "Linux" -o "$unamestr" = "linux" ]; then
export LD_LIBRARY_PATH=$ANDROID_BUILD_TOP/external/selinux/prebuilts/lib
- export PYTHONPATH=$ANDROID_BUILD_TOP/external/selinux/prebuilts/lib
+ export PYTHONPATH=$ANDROID_BUILD_TOP/prebuilts/python/linux-x86/2.7.5/lib/python2.7/site-packages
python $ANDROID_BUILD_TOP/external/selinux/policycoreutils/audit2allow/audit2allow "$@"
else
echo "audit2allow is only supported on linux"
diff --git a/prebuilts/bin/audit2why b/prebuilts/bin/audit2why
index c0d73afd..e612c2b4 100755
--- a/prebuilts/bin/audit2why
+++ b/prebuilts/bin/audit2why
@@ -1,10 +1,10 @@
#!/bin/sh
unamestr=`uname`
-if [[ $unamestr == "Linux" ]]; then
+if [ "$unamestr" = "Linux" -o "$unamestr" = "linux" ]; then
export LD_LIBRARY_PATH=$ANDROID_BUILD_TOP/external/selinux/prebuilts/lib
- export PYTHONPATH=$ANDROID_BUILD_TOP/external/selinux/prebuilts/lib
- python $ANDROID_BUILD_TOP/external/selinux/policycoreutils/audit2allow/audit2wy "$@"
+ export PYTHONPATH=$ANDROID_BUILD_TOP/prebuilts/python/linux-x86/2.7.5/lib/python2.7/site-packages
+ exec python $ANDROID_BUILD_TOP/external/selinux/policycoreutils/audit2allow/audit2why "$@"
else
echo "audit2why is only supported on linux"
fi
diff --git a/prebuilts/bin/sesearch b/prebuilts/bin/sesearch
new file mode 100755
index 00000000..b713046d
--- /dev/null
+++ b/prebuilts/bin/sesearch
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+unamestr=`uname`
+if [ "$unamestr" = "Linux" -o "$unamestr" = "linux" ]; then
+ export LD_LIBRARY_PATH=$ANDROID_BUILD_TOP/external/selinux/prebuilts/lib
+ export PYTHONPATH=$ANDROID_BUILD_TOP/prebuilts/python/linux-x86/2.7.5/lib/python2.7/site-packages
+ python $ANDROID_BUILD_TOP/external/selinux/prebuilts/bin/sesearch.py "$@"
+else
+ echo "sesearch is only supported on linux"
+fi
diff --git a/prebuilts/bin/sesearch.py b/prebuilts/bin/sesearch.py
new file mode 100755
index 00000000..e861db60
--- /dev/null
+++ b/prebuilts/bin/sesearch.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# Copyright 2014-2015, Tresys Technology, LLC
+#
+# This file is part of SETools.
+#
+# SETools is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# SETools is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SETools. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import print_function
+import setools
+import argparse
+import sys
+import logging
+
+parser = argparse.ArgumentParser(
+ description="SELinux policy rule search tool.",
+ epilog="TE/MLS rule searches cannot be mixed with RBAC rule searches.")
+parser.add_argument("--version", action="version", version=setools.__version__)
+parser.add_argument("policy", help="Path to the SELinux policy to search.", nargs="?")
+parser.add_argument("-v", "--verbose", action="store_true",
+ help="Print extra informational messages")
+parser.add_argument("--debug", action="store_true", dest="debug", help="Enable debugging.")
+
+rtypes = parser.add_argument_group("TE Rule Types")
+rtypes.add_argument("-A", "--allow", action="append_const",
+ const="allow", dest="tertypes",
+ help="Search allow rules.")
+rtypes.add_argument("--auditallow", action="append_const",
+ const="auditallow", dest="tertypes",
+ help="Search auditallow rules.")
+rtypes.add_argument("--dontaudit", action="append_const",
+ const="dontaudit", dest="tertypes",
+ help="Search dontaudit rules.")
+rtypes.add_argument("-T", "--type_trans", action="append_const",
+ const="type_transition", dest="tertypes",
+ help="Search type_transition rules.")
+rtypes.add_argument("--type_change", action="append_const",
+ const="type_change", dest="tertypes",
+ help="Search type_change rules.")
+rtypes.add_argument("--type_member", action="append_const",
+ const="type_member", dest="tertypes",
+ help="Search type_member rules.")
+
+rbacrtypes = parser.add_argument_group("RBAC Rule Types")
+rbacrtypes.add_argument("--role_allow", action="append_const",
+ const="allow", dest="rbacrtypes",
+ help="Search role allow rules.")
+rbacrtypes.add_argument("--role_trans", action="append_const",
+ const="role_transition", dest="rbacrtypes",
+ help="Search role_transition rules.")
+
+mlsrtypes = parser.add_argument_group("MLS Rule Types")
+mlsrtypes.add_argument("--range_trans", action="append_const",
+ const="range_transition", dest="mlsrtypes",
+ help="Search range_transition rules.")
+
+expr = parser.add_argument_group("Expressions")
+expr.add_argument("-s", "--source",
+ help="Source type/role of the TE/RBAC rule.")
+expr.add_argument("-t", "--target",
+ help="Target type/role of the TE/RBAC rule.")
+expr.add_argument("-c", "--class", dest="tclass",
+ help="Comma separated list of object classes")
+expr.add_argument("-p", "--perms", metavar="PERMS",
+ help="Comma separated list of permissions.")
+expr.add_argument("-D", "--default",
+ help="Default of the rule. (type/role/range transition rules)")
+expr.add_argument("-b", "--bool", dest="boolean", metavar="BOOL",
+ help="Comma separated list of Booleans in the conditional expression.")
+
+opts = parser.add_argument_group("Search options")
+opts.add_argument("-eb", action="store_true", dest="boolean_equal",
+ help="Match Boolean list exactly instead of matching any listed Boolean.")
+opts.add_argument("-ep", action="store_true", dest="perms_equal",
+ help="Match permission set exactly instead of matching any listed permission.")
+opts.add_argument("-ds", action="store_false", dest="source_indirect",
+ help="Match source attributes directly instead of matching member types/roles.")
+opts.add_argument("-dt", action="store_false", dest="target_indirect",
+ help="Match target attributes directly instead of matching member types/roles.")
+opts.add_argument("-rs", action="store_true", dest="source_regex",
+ help="Use regular expression matching for the source type/role.")
+opts.add_argument("-rt", action="store_true", dest="target_regex",
+ help="Use regular expression matching for the target type/role.")
+opts.add_argument("-rc", action="store_true", dest="tclass_regex",
+ help="Use regular expression matching for the object class.")
+opts.add_argument("-rd", action="store_true", dest="default_regex",
+ help="Use regular expression matching for the default type/role.")
+opts.add_argument("-rb", action="store_true", dest="boolean_regex",
+ help="Use regular expression matching for Booleans.")
+
+args = parser.parse_args()
+
+if not args.tertypes and not args.mlsrtypes and not args.rbacrtypes:
+ parser.error("At least one rule type must be specified.")
+
+if args.debug:
+ logging.basicConfig(level=logging.DEBUG,
+ format='%(asctime)s|%(levelname)s|%(name)s|%(message)s')
+elif args.verbose:
+ logging.basicConfig(level=logging.INFO, format='%(message)s')
+else:
+ logging.basicConfig(level=logging.WARNING, format='%(message)s')
+
+try:
+ p = setools.SELinuxPolicy(args.policy)
+
+ if args.tertypes:
+ q = setools.TERuleQuery(p,
+ ruletype=args.tertypes,
+ source=args.source,
+ source_indirect=args.source_indirect,
+ source_regex=args.source_regex,
+ target=args.target,
+ target_indirect=args.target_indirect,
+ target_regex=args.target_regex,
+ tclass_regex=args.tclass_regex,
+ perms_equal=args.perms_equal,
+ default=args.default,
+ default_regex=args.default_regex,
+ boolean_regex=args.boolean_regex,
+ boolean_equal=args.boolean_equal)
+
+ # these are broken out from the above statement to prevent making a list
+ # with an empty string in it (split on empty string)
+ if args.tclass:
+ if args.tclass_regex:
+ q.tclass = args.tclass
+ else:
+ q.tclass = args.tclass.split(",")
+
+ if args.perms:
+ q.perms = args.perms.split(",")
+
+ if args.boolean:
+ if args.boolean_regex:
+ q.boolean = args.boolean
+ else:
+ q.boolean = args.boolean.split(",")
+
+ for r in sorted(q.results()):
+ print(r)
+
+ if args.rbacrtypes:
+ q = setools.RBACRuleQuery(p,
+ ruletype=args.rbacrtypes,
+ source=args.source,
+ source_indirect=args.source_indirect,
+ source_regex=args.source_regex,
+ target=args.target,
+ target_indirect=args.target_indirect,
+ target_regex=args.target_regex,
+ default=args.default,
+ default_regex=args.default_regex,
+ tclass_regex=args.tclass_regex)
+
+ # these are broken out from the above statement to prevent making a list
+ # with an empty string in it (split on empty string)
+ if args.tclass:
+ if args.tclass_regex:
+ q.tclass = args.tclass
+ else:
+ q.tclass = args.tclass.split(",")
+
+ for r in sorted(q.results()):
+ print(r)
+
+ if args.mlsrtypes:
+ q = setools.MLSRuleQuery(p,
+ ruletype=args.mlsrtypes,
+ source=args.source,
+ source_regex=args.source_regex,
+ target=args.target,
+ target_regex=args.target_regex,
+ tclass_regex=args.tclass_regex,
+ default=args.default)
+
+ # these are broken out from the above statement to prevent making a list
+ # with an empty string in it (split on empty string)
+ if args.tclass:
+ if args.tclass_regex:
+ q.tclass = args.tclass
+ else:
+ q.tclass = args.tclass.split(",")
+
+ for r in sorted(q.results()):
+ print(r)
+
+except Exception as err:
+ if args.debug:
+ import traceback
+ traceback.print_exc()
+ else:
+ print(err)
+
+ sys.exit(-1)
diff --git a/prebuilts/lib/selinux/__init__.py b/prebuilts/lib/selinux/__init__.py
deleted file mode 100644
index b81b031d..00000000
--- a/prebuilts/lib/selinux/__init__.py
+++ /dev/null
@@ -1,2445 +0,0 @@
-# This file was automatically generated by SWIG (http://www.swig.org).
-# Version 2.0.11
-#
-# Do not make changes to this file unless you know what you are doing--modify
-# the SWIG interface file instead.
-
-
-
-
-
-from sys import version_info
-if version_info >= (2,6,0):
- def swig_import_helper():
- from os.path import dirname
- import imp
- fp = None
- try:
- fp, pathname, description = imp.find_module('_selinux', [dirname(__file__)])
- except ImportError:
- import _selinux
- return _selinux
- if fp is not None:
- try:
- _mod = imp.load_module('_selinux', fp, pathname, description)
- finally:
- fp.close()
- return _mod
- _selinux = swig_import_helper()
- del swig_import_helper
-else:
- import _selinux
-del version_info
-try:
- _swig_property = property
-except NameError:
- pass # Python < 2.2 doesn't have 'property'.
-def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
- if (name == "thisown"): return self.this.own(value)
- if (name == "this"):
- if type(value).__name__ == 'SwigPyObject':
- self.__dict__[name] = value
- return
- method = class_type.__swig_setmethods__.get(name,None)
- if method: return method(self,value)
- if (not static):
- self.__dict__[name] = value
- else:
- raise AttributeError("You cannot add attributes to %s" % self)
-
-def _swig_setattr(self,class_type,name,value):
- return _swig_setattr_nondynamic(self,class_type,name,value,0)
-
-def _swig_getattr(self,class_type,name):
- if (name == "thisown"): return self.this.own()
- method = class_type.__swig_getmethods__.get(name,None)
- if method: return method(self)
- raise AttributeError(name)
-
-def _swig_repr(self):
- try: strthis = "proxy of " + self.this.__repr__()
- except: strthis = ""
- return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
-
-try:
- _object = object
- _newclass = 1
-except AttributeError:
- class _object : pass
- _newclass = 0
-
-
-import shutil, os, stat
-
-DISABLED = -1
-PERMISSIVE = 0
-ENFORCING = 1
-
-def restorecon(path, recursive=False):
- """ Restore SELinux context on a given path """
-
- try:
- mode = os.lstat(path)[stat.ST_MODE]
- status, context = matchpathcon(path, mode)
- except OSError:
- path = os.path.realpath(os.path.expanduser(path))
- mode = os.lstat(path)[stat.ST_MODE]
- status, context = matchpathcon(path, mode)
-
- if status == 0:
- status, oldcontext = lgetfilecon(path)
- if context != oldcontext:
- lsetfilecon(path, context)
-
- if recursive:
- for root, dirs, files in os.walk(path):
- for name in files + dirs:
- restorecon(os.path.join(root, name))
-
-def chcon(path, context, recursive=False):
- """ Set the SELinux context on a given path """
- lsetfilecon(path, context)
- if recursive:
- for root, dirs, files in os.walk(path):
- for name in files + dirs:
- lsetfilecon(os.path.join(root,name), context)
-
-def copytree(src, dest):
- """ An SELinux-friendly shutil.copytree method """
- shutil.copytree(src, dest)
- restorecon(dest, recursive=True)
-
-def install(src, dest):
- """ An SELinux-friendly shutil.move method """
- shutil.move(src, dest)
- restorecon(dest, recursive=True)
-
-class security_id(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, security_id, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, security_id, name)
- __repr__ = _swig_repr
- __swig_setmethods__["ctx"] = _selinux.security_id_ctx_set
- __swig_getmethods__["ctx"] = _selinux.security_id_ctx_get
- if _newclass:ctx = _swig_property(_selinux.security_id_ctx_get, _selinux.security_id_ctx_set)
- __swig_setmethods__["refcnt"] = _selinux.security_id_refcnt_set
- __swig_getmethods__["refcnt"] = _selinux.security_id_refcnt_get
- if _newclass:refcnt = _swig_property(_selinux.security_id_refcnt_get, _selinux.security_id_refcnt_set)
- def __init__(self):
- this = _selinux.new_security_id()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_security_id
- __del__ = lambda self : None;
-security_id_swigregister = _selinux.security_id_swigregister
-security_id_swigregister(security_id)
-
-
-def avc_sid_to_context(*args):
- return _selinux.avc_sid_to_context(*args)
-avc_sid_to_context = _selinux.avc_sid_to_context
-
-def avc_sid_to_context_raw(*args):
- return _selinux.avc_sid_to_context_raw(*args)
-avc_sid_to_context_raw = _selinux.avc_sid_to_context_raw
-
-def avc_context_to_sid(*args):
- return _selinux.avc_context_to_sid(*args)
-avc_context_to_sid = _selinux.avc_context_to_sid
-
-def avc_context_to_sid_raw(*args):
- return _selinux.avc_context_to_sid_raw(*args)
-avc_context_to_sid_raw = _selinux.avc_context_to_sid_raw
-
-def sidget(*args):
- return _selinux.sidget(*args)
-sidget = _selinux.sidget
-
-def sidput(*args):
- return _selinux.sidput(*args)
-sidput = _selinux.sidput
-
-def avc_get_initial_sid(*args):
- return _selinux.avc_get_initial_sid(*args)
-avc_get_initial_sid = _selinux.avc_get_initial_sid
-class avc_entry_ref(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, avc_entry_ref, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, avc_entry_ref, name)
- __repr__ = _swig_repr
- __swig_setmethods__["ae"] = _selinux.avc_entry_ref_ae_set
- __swig_getmethods__["ae"] = _selinux.avc_entry_ref_ae_get
- if _newclass:ae = _swig_property(_selinux.avc_entry_ref_ae_get, _selinux.avc_entry_ref_ae_set)
- def __init__(self):
- this = _selinux.new_avc_entry_ref()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_avc_entry_ref
- __del__ = lambda self : None;
-avc_entry_ref_swigregister = _selinux.avc_entry_ref_swigregister
-avc_entry_ref_swigregister(avc_entry_ref)
-
-class avc_memory_callback(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, avc_memory_callback, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, avc_memory_callback, name)
- __repr__ = _swig_repr
- __swig_setmethods__["func_malloc"] = _selinux.avc_memory_callback_func_malloc_set
- __swig_getmethods__["func_malloc"] = _selinux.avc_memory_callback_func_malloc_get
- if _newclass:func_malloc = _swig_property(_selinux.avc_memory_callback_func_malloc_get, _selinux.avc_memory_callback_func_malloc_set)
- __swig_setmethods__["func_free"] = _selinux.avc_memory_callback_func_free_set
- __swig_getmethods__["func_free"] = _selinux.avc_memory_callback_func_free_get
- if _newclass:func_free = _swig_property(_selinux.avc_memory_callback_func_free_get, _selinux.avc_memory_callback_func_free_set)
- def __init__(self):
- this = _selinux.new_avc_memory_callback()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_avc_memory_callback
- __del__ = lambda self : None;
-avc_memory_callback_swigregister = _selinux.avc_memory_callback_swigregister
-avc_memory_callback_swigregister(avc_memory_callback)
-
-class avc_log_callback(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, avc_log_callback, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, avc_log_callback, name)
- __repr__ = _swig_repr
- __swig_setmethods__["func_log"] = _selinux.avc_log_callback_func_log_set
- __swig_getmethods__["func_log"] = _selinux.avc_log_callback_func_log_get
- if _newclass:func_log = _swig_property(_selinux.avc_log_callback_func_log_get, _selinux.avc_log_callback_func_log_set)
- __swig_setmethods__["func_audit"] = _selinux.avc_log_callback_func_audit_set
- __swig_getmethods__["func_audit"] = _selinux.avc_log_callback_func_audit_get
- if _newclass:func_audit = _swig_property(_selinux.avc_log_callback_func_audit_get, _selinux.avc_log_callback_func_audit_set)
- def __init__(self):
- this = _selinux.new_avc_log_callback()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_avc_log_callback
- __del__ = lambda self : None;
-avc_log_callback_swigregister = _selinux.avc_log_callback_swigregister
-avc_log_callback_swigregister(avc_log_callback)
-
-class avc_thread_callback(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, avc_thread_callback, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, avc_thread_callback, name)
- __repr__ = _swig_repr
- __swig_setmethods__["func_create_thread"] = _selinux.avc_thread_callback_func_create_thread_set
- __swig_getmethods__["func_create_thread"] = _selinux.avc_thread_callback_func_create_thread_get
- if _newclass:func_create_thread = _swig_property(_selinux.avc_thread_callback_func_create_thread_get, _selinux.avc_thread_callback_func_create_thread_set)
- __swig_setmethods__["func_stop_thread"] = _selinux.avc_thread_callback_func_stop_thread_set
- __swig_getmethods__["func_stop_thread"] = _selinux.avc_thread_callback_func_stop_thread_get
- if _newclass:func_stop_thread = _swig_property(_selinux.avc_thread_callback_func_stop_thread_get, _selinux.avc_thread_callback_func_stop_thread_set)
- def __init__(self):
- this = _selinux.new_avc_thread_callback()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_avc_thread_callback
- __del__ = lambda self : None;
-avc_thread_callback_swigregister = _selinux.avc_thread_callback_swigregister
-avc_thread_callback_swigregister(avc_thread_callback)
-
-class avc_lock_callback(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, avc_lock_callback, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, avc_lock_callback, name)
- __repr__ = _swig_repr
- __swig_setmethods__["func_alloc_lock"] = _selinux.avc_lock_callback_func_alloc_lock_set
- __swig_getmethods__["func_alloc_lock"] = _selinux.avc_lock_callback_func_alloc_lock_get
- if _newclass:func_alloc_lock = _swig_property(_selinux.avc_lock_callback_func_alloc_lock_get, _selinux.avc_lock_callback_func_alloc_lock_set)
- __swig_setmethods__["func_get_lock"] = _selinux.avc_lock_callback_func_get_lock_set
- __swig_getmethods__["func_get_lock"] = _selinux.avc_lock_callback_func_get_lock_get
- if _newclass:func_get_lock = _swig_property(_selinux.avc_lock_callback_func_get_lock_get, _selinux.avc_lock_callback_func_get_lock_set)
- __swig_setmethods__["func_release_lock"] = _selinux.avc_lock_callback_func_release_lock_set
- __swig_getmethods__["func_release_lock"] = _selinux.avc_lock_callback_func_release_lock_get
- if _newclass:func_release_lock = _swig_property(_selinux.avc_lock_callback_func_release_lock_get, _selinux.avc_lock_callback_func_release_lock_set)
- __swig_setmethods__["func_free_lock"] = _selinux.avc_lock_callback_func_free_lock_set
- __swig_getmethods__["func_free_lock"] = _selinux.avc_lock_callback_func_free_lock_get
- if _newclass:func_free_lock = _swig_property(_selinux.avc_lock_callback_func_free_lock_get, _selinux.avc_lock_callback_func_free_lock_set)
- def __init__(self):
- this = _selinux.new_avc_lock_callback()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_avc_lock_callback
- __del__ = lambda self : None;
-avc_lock_callback_swigregister = _selinux.avc_lock_callback_swigregister
-avc_lock_callback_swigregister(avc_lock_callback)
-
-AVC_OPT_UNUSED = _selinux.AVC_OPT_UNUSED
-AVC_OPT_SETENFORCE = _selinux.AVC_OPT_SETENFORCE
-
-def avc_init(*args):
- return _selinux.avc_init(*args)
-avc_init = _selinux.avc_init
-
-def avc_open(*args):
- return _selinux.avc_open(*args)
-avc_open = _selinux.avc_open
-
-def avc_cleanup():
- return _selinux.avc_cleanup()
-avc_cleanup = _selinux.avc_cleanup
-
-def avc_reset():
- return _selinux.avc_reset()
-avc_reset = _selinux.avc_reset
-
-def avc_destroy():
- return _selinux.avc_destroy()
-avc_destroy = _selinux.avc_destroy
-
-def avc_has_perm_noaudit(*args):
- return _selinux.avc_has_perm_noaudit(*args)
-avc_has_perm_noaudit = _selinux.avc_has_perm_noaudit
-
-def avc_has_perm(*args):
- return _selinux.avc_has_perm(*args)
-avc_has_perm = _selinux.avc_has_perm
-
-def avc_audit(*args):
- return _selinux.avc_audit(*args)
-avc_audit = _selinux.avc_audit
-
-def avc_compute_create(*args):
- return _selinux.avc_compute_create(*args)
-avc_compute_create = _selinux.avc_compute_create
-
-def avc_compute_member(*args):
- return _selinux.avc_compute_member(*args)
-avc_compute_member = _selinux.avc_compute_member
-AVC_CALLBACK_GRANT = _selinux.AVC_CALLBACK_GRANT
-AVC_CALLBACK_TRY_REVOKE = _selinux.AVC_CALLBACK_TRY_REVOKE
-AVC_CALLBACK_REVOKE = _selinux.AVC_CALLBACK_REVOKE
-AVC_CALLBACK_RESET = _selinux.AVC_CALLBACK_RESET
-AVC_CALLBACK_AUDITALLOW_ENABLE = _selinux.AVC_CALLBACK_AUDITALLOW_ENABLE
-AVC_CALLBACK_AUDITALLOW_DISABLE = _selinux.AVC_CALLBACK_AUDITALLOW_DISABLE
-AVC_CALLBACK_AUDITDENY_ENABLE = _selinux.AVC_CALLBACK_AUDITDENY_ENABLE
-AVC_CALLBACK_AUDITDENY_DISABLE = _selinux.AVC_CALLBACK_AUDITDENY_DISABLE
-AVC_CACHE_STATS = _selinux.AVC_CACHE_STATS
-class avc_cache_stats(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, avc_cache_stats, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, avc_cache_stats, name)
- __repr__ = _swig_repr
- __swig_setmethods__["entry_lookups"] = _selinux.avc_cache_stats_entry_lookups_set
- __swig_getmethods__["entry_lookups"] = _selinux.avc_cache_stats_entry_lookups_get
- if _newclass:entry_lookups = _swig_property(_selinux.avc_cache_stats_entry_lookups_get, _selinux.avc_cache_stats_entry_lookups_set)
- __swig_setmethods__["entry_hits"] = _selinux.avc_cache_stats_entry_hits_set
- __swig_getmethods__["entry_hits"] = _selinux.avc_cache_stats_entry_hits_get
- if _newclass:entry_hits = _swig_property(_selinux.avc_cache_stats_entry_hits_get, _selinux.avc_cache_stats_entry_hits_set)
- __swig_setmethods__["entry_misses"] = _selinux.avc_cache_stats_entry_misses_set
- __swig_getmethods__["entry_misses"] = _selinux.avc_cache_stats_entry_misses_get
- if _newclass:entry_misses = _swig_property(_selinux.avc_cache_stats_entry_misses_get, _selinux.avc_cache_stats_entry_misses_set)
- __swig_setmethods__["entry_discards"] = _selinux.avc_cache_stats_entry_discards_set
- __swig_getmethods__["entry_discards"] = _selinux.avc_cache_stats_entry_discards_get
- if _newclass:entry_discards = _swig_property(_selinux.avc_cache_stats_entry_discards_get, _selinux.avc_cache_stats_entry_discards_set)
- __swig_setmethods__["cav_lookups"] = _selinux.avc_cache_stats_cav_lookups_set
- __swig_getmethods__["cav_lookups"] = _selinux.avc_cache_stats_cav_lookups_get
- if _newclass:cav_lookups = _swig_property(_selinux.avc_cache_stats_cav_lookups_get, _selinux.avc_cache_stats_cav_lookups_set)
- __swig_setmethods__["cav_hits"] = _selinux.avc_cache_stats_cav_hits_set
- __swig_getmethods__["cav_hits"] = _selinux.avc_cache_stats_cav_hits_get
- if _newclass:cav_hits = _swig_property(_selinux.avc_cache_stats_cav_hits_get, _selinux.avc_cache_stats_cav_hits_set)
- __swig_setmethods__["cav_probes"] = _selinux.avc_cache_stats_cav_probes_set
- __swig_getmethods__["cav_probes"] = _selinux.avc_cache_stats_cav_probes_get
- if _newclass:cav_probes = _swig_property(_selinux.avc_cache_stats_cav_probes_get, _selinux.avc_cache_stats_cav_probes_set)
- __swig_setmethods__["cav_misses"] = _selinux.avc_cache_stats_cav_misses_set
- __swig_getmethods__["cav_misses"] = _selinux.avc_cache_stats_cav_misses_get
- if _newclass:cav_misses = _swig_property(_selinux.avc_cache_stats_cav_misses_get, _selinux.avc_cache_stats_cav_misses_set)
- def __init__(self):
- this = _selinux.new_avc_cache_stats()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_avc_cache_stats
- __del__ = lambda self : None;
-avc_cache_stats_swigregister = _selinux.avc_cache_stats_swigregister
-avc_cache_stats_swigregister(avc_cache_stats)
-
-
-def avc_av_stats():
- return _selinux.avc_av_stats()
-avc_av_stats = _selinux.avc_av_stats
-
-def avc_sid_stats():
- return _selinux.avc_sid_stats()
-avc_sid_stats = _selinux.avc_sid_stats
-
-def avc_netlink_open(*args):
- return _selinux.avc_netlink_open(*args)
-avc_netlink_open = _selinux.avc_netlink_open
-
-def avc_netlink_loop():
- return _selinux.avc_netlink_loop()
-avc_netlink_loop = _selinux.avc_netlink_loop
-
-def avc_netlink_close():
- return _selinux.avc_netlink_close()
-avc_netlink_close = _selinux.avc_netlink_close
-
-def selinux_status_open(*args):
- return _selinux.selinux_status_open(*args)
-selinux_status_open = _selinux.selinux_status_open
-
-def selinux_status_close():
- return _selinux.selinux_status_close()
-selinux_status_close = _selinux.selinux_status_close
-
-def selinux_status_updated():
- return _selinux.selinux_status_updated()
-selinux_status_updated = _selinux.selinux_status_updated
-
-def selinux_status_getenforce():
- return _selinux.selinux_status_getenforce()
-selinux_status_getenforce = _selinux.selinux_status_getenforce
-
-def selinux_status_policyload():
- return _selinux.selinux_status_policyload()
-selinux_status_policyload = _selinux.selinux_status_policyload
-
-def selinux_status_deny_unknown():
- return _selinux.selinux_status_deny_unknown()
-selinux_status_deny_unknown = _selinux.selinux_status_deny_unknown
-COMMON_FILE__IOCTL = _selinux.COMMON_FILE__IOCTL
-COMMON_FILE__READ = _selinux.COMMON_FILE__READ
-COMMON_FILE__WRITE = _selinux.COMMON_FILE__WRITE
-COMMON_FILE__CREATE = _selinux.COMMON_FILE__CREATE
-COMMON_FILE__GETATTR = _selinux.COMMON_FILE__GETATTR
-COMMON_FILE__SETATTR = _selinux.COMMON_FILE__SETATTR
-COMMON_FILE__LOCK = _selinux.COMMON_FILE__LOCK
-COMMON_FILE__RELABELFROM = _selinux.COMMON_FILE__RELABELFROM
-COMMON_FILE__RELABELTO = _selinux.COMMON_FILE__RELABELTO
-COMMON_FILE__APPEND = _selinux.COMMON_FILE__APPEND
-COMMON_FILE__UNLINK = _selinux.COMMON_FILE__UNLINK
-COMMON_FILE__LINK = _selinux.COMMON_FILE__LINK
-COMMON_FILE__RENAME = _selinux.COMMON_FILE__RENAME
-COMMON_FILE__EXECUTE = _selinux.COMMON_FILE__EXECUTE
-COMMON_FILE__SWAPON = _selinux.COMMON_FILE__SWAPON
-COMMON_FILE__QUOTAON = _selinux.COMMON_FILE__QUOTAON
-COMMON_FILE__MOUNTON = _selinux.COMMON_FILE__MOUNTON
-COMMON_SOCKET__IOCTL = _selinux.COMMON_SOCKET__IOCTL
-COMMON_SOCKET__READ = _selinux.COMMON_SOCKET__READ
-COMMON_SOCKET__WRITE = _selinux.COMMON_SOCKET__WRITE
-COMMON_SOCKET__CREATE = _selinux.COMMON_SOCKET__CREATE
-COMMON_SOCKET__GETATTR = _selinux.COMMON_SOCKET__GETATTR
-COMMON_SOCKET__SETATTR = _selinux.COMMON_SOCKET__SETATTR
-COMMON_SOCKET__LOCK = _selinux.COMMON_SOCKET__LOCK
-COMMON_SOCKET__RELABELFROM = _selinux.COMMON_SOCKET__RELABELFROM
-COMMON_SOCKET__RELABELTO = _selinux.COMMON_SOCKET__RELABELTO
-COMMON_SOCKET__APPEND = _selinux.COMMON_SOCKET__APPEND
-COMMON_SOCKET__BIND = _selinux.COMMON_SOCKET__BIND
-COMMON_SOCKET__CONNECT = _selinux.COMMON_SOCKET__CONNECT
-COMMON_SOCKET__LISTEN = _selinux.COMMON_SOCKET__LISTEN
-COMMON_SOCKET__ACCEPT = _selinux.COMMON_SOCKET__ACCEPT
-COMMON_SOCKET__GETOPT = _selinux.COMMON_SOCKET__GETOPT
-COMMON_SOCKET__SETOPT = _selinux.COMMON_SOCKET__SETOPT
-COMMON_SOCKET__SHUTDOWN = _selinux.COMMON_SOCKET__SHUTDOWN
-COMMON_SOCKET__RECVFROM = _selinux.COMMON_SOCKET__RECVFROM
-COMMON_SOCKET__SENDTO = _selinux.COMMON_SOCKET__SENDTO
-COMMON_SOCKET__RECV_MSG = _selinux.COMMON_SOCKET__RECV_MSG
-COMMON_SOCKET__SEND_MSG = _selinux.COMMON_SOCKET__SEND_MSG
-COMMON_SOCKET__NAME_BIND = _selinux.COMMON_SOCKET__NAME_BIND
-COMMON_IPC__CREATE = _selinux.COMMON_IPC__CREATE
-COMMON_IPC__DESTROY = _selinux.COMMON_IPC__DESTROY
-COMMON_IPC__GETATTR = _selinux.COMMON_IPC__GETATTR
-COMMON_IPC__SETATTR = _selinux.COMMON_IPC__SETATTR
-COMMON_IPC__READ = _selinux.COMMON_IPC__READ
-COMMON_IPC__WRITE = _selinux.COMMON_IPC__WRITE
-COMMON_IPC__ASSOCIATE = _selinux.COMMON_IPC__ASSOCIATE
-COMMON_IPC__UNIX_READ = _selinux.COMMON_IPC__UNIX_READ
-COMMON_IPC__UNIX_WRITE = _selinux.COMMON_IPC__UNIX_WRITE
-COMMON_DATABASE__CREATE = _selinux.COMMON_DATABASE__CREATE
-COMMON_DATABASE__DROP = _selinux.COMMON_DATABASE__DROP
-COMMON_DATABASE__GETATTR = _selinux.COMMON_DATABASE__GETATTR
-COMMON_DATABASE__SETATTR = _selinux.COMMON_DATABASE__SETATTR
-COMMON_DATABASE__RELABELFROM = _selinux.COMMON_DATABASE__RELABELFROM
-COMMON_DATABASE__RELABELTO = _selinux.COMMON_DATABASE__RELABELTO
-FILESYSTEM__MOUNT = _selinux.FILESYSTEM__MOUNT
-FILESYSTEM__REMOUNT = _selinux.FILESYSTEM__REMOUNT
-FILESYSTEM__UNMOUNT = _selinux.FILESYSTEM__UNMOUNT
-FILESYSTEM__GETATTR = _selinux.FILESYSTEM__GETATTR
-FILESYSTEM__RELABELFROM = _selinux.FILESYSTEM__RELABELFROM
-FILESYSTEM__RELABELTO = _selinux.FILESYSTEM__RELABELTO
-FILESYSTEM__TRANSITION = _selinux.FILESYSTEM__TRANSITION
-FILESYSTEM__ASSOCIATE = _selinux.FILESYSTEM__ASSOCIATE
-FILESYSTEM__QUOTAMOD = _selinux.FILESYSTEM__QUOTAMOD
-FILESYSTEM__QUOTAGET = _selinux.FILESYSTEM__QUOTAGET
-DIR__IOCTL = _selinux.DIR__IOCTL
-DIR__READ = _selinux.DIR__READ
-DIR__WRITE = _selinux.DIR__WRITE
-DIR__CREATE = _selinux.DIR__CREATE
-DIR__GETATTR = _selinux.DIR__GETATTR
-DIR__SETATTR = _selinux.DIR__SETATTR
-DIR__LOCK = _selinux.DIR__LOCK
-DIR__RELABELFROM = _selinux.DIR__RELABELFROM
-DIR__RELABELTO = _selinux.DIR__RELABELTO
-DIR__APPEND = _selinux.DIR__APPEND
-DIR__UNLINK = _selinux.DIR__UNLINK
-DIR__LINK = _selinux.DIR__LINK
-DIR__RENAME = _selinux.DIR__RENAME
-DIR__EXECUTE = _selinux.DIR__EXECUTE
-DIR__SWAPON = _selinux.DIR__SWAPON
-DIR__QUOTAON = _selinux.DIR__QUOTAON
-DIR__MOUNTON = _selinux.DIR__MOUNTON
-DIR__ADD_NAME = _selinux.DIR__ADD_NAME
-DIR__REMOVE_NAME = _selinux.DIR__REMOVE_NAME
-DIR__REPARENT = _selinux.DIR__REPARENT
-DIR__SEARCH = _selinux.DIR__SEARCH
-DIR__RMDIR = _selinux.DIR__RMDIR
-DIR__OPEN = _selinux.DIR__OPEN
-FILE__IOCTL = _selinux.FILE__IOCTL
-FILE__READ = _selinux.FILE__READ
-FILE__WRITE = _selinux.FILE__WRITE
-FILE__CREATE = _selinux.FILE__CREATE
-FILE__GETATTR = _selinux.FILE__GETATTR
-FILE__SETATTR = _selinux.FILE__SETATTR
-FILE__LOCK = _selinux.FILE__LOCK
-FILE__RELABELFROM = _selinux.FILE__RELABELFROM
-FILE__RELABELTO = _selinux.FILE__RELABELTO
-FILE__APPEND = _selinux.FILE__APPEND
-FILE__UNLINK = _selinux.FILE__UNLINK
-FILE__LINK = _selinux.FILE__LINK
-FILE__RENAME = _selinux.FILE__RENAME
-FILE__EXECUTE = _selinux.FILE__EXECUTE
-FILE__SWAPON = _selinux.FILE__SWAPON
-FILE__QUOTAON = _selinux.FILE__QUOTAON
-FILE__MOUNTON = _selinux.FILE__MOUNTON
-FILE__EXECUTE_NO_TRANS = _selinux.FILE__EXECUTE_NO_TRANS
-FILE__ENTRYPOINT = _selinux.FILE__ENTRYPOINT
-FILE__EXECMOD = _selinux.FILE__EXECMOD
-FILE__OPEN = _selinux.FILE__OPEN
-LNK_FILE__IOCTL = _selinux.LNK_FILE__IOCTL
-LNK_FILE__READ = _selinux.LNK_FILE__READ
-LNK_FILE__WRITE = _selinux.LNK_FILE__WRITE
-LNK_FILE__CREATE = _selinux.LNK_FILE__CREATE
-LNK_FILE__GETATTR = _selinux.LNK_FILE__GETATTR
-LNK_FILE__SETATTR = _selinux.LNK_FILE__SETATTR
-LNK_FILE__LOCK = _selinux.LNK_FILE__LOCK
-LNK_FILE__RELABELFROM = _selinux.LNK_FILE__RELABELFROM
-LNK_FILE__RELABELTO = _selinux.LNK_FILE__RELABELTO
-LNK_FILE__APPEND = _selinux.LNK_FILE__APPEND
-LNK_FILE__UNLINK = _selinux.LNK_FILE__UNLINK
-LNK_FILE__LINK = _selinux.LNK_FILE__LINK
-LNK_FILE__RENAME = _selinux.LNK_FILE__RENAME
-LNK_FILE__EXECUTE = _selinux.LNK_FILE__EXECUTE
-LNK_FILE__SWAPON = _selinux.LNK_FILE__SWAPON
-LNK_FILE__QUOTAON = _selinux.LNK_FILE__QUOTAON
-LNK_FILE__MOUNTON = _selinux.LNK_FILE__MOUNTON
-CHR_FILE__IOCTL = _selinux.CHR_FILE__IOCTL
-CHR_FILE__READ = _selinux.CHR_FILE__READ
-CHR_FILE__WRITE = _selinux.CHR_FILE__WRITE
-CHR_FILE__CREATE = _selinux.CHR_FILE__CREATE
-CHR_FILE__GETATTR = _selinux.CHR_FILE__GETATTR
-CHR_FILE__SETATTR = _selinux.CHR_FILE__SETATTR
-CHR_FILE__LOCK = _selinux.CHR_FILE__LOCK
-CHR_FILE__RELABELFROM = _selinux.CHR_FILE__RELABELFROM
-CHR_FILE__RELABELTO = _selinux.CHR_FILE__RELABELTO
-CHR_FILE__APPEND = _selinux.CHR_FILE__APPEND
-CHR_FILE__UNLINK = _selinux.CHR_FILE__UNLINK
-CHR_FILE__LINK = _selinux.CHR_FILE__LINK
-CHR_FILE__RENAME = _selinux.CHR_FILE__RENAME
-CHR_FILE__EXECUTE = _selinux.CHR_FILE__EXECUTE
-CHR_FILE__SWAPON = _selinux.CHR_FILE__SWAPON
-CHR_FILE__QUOTAON = _selinux.CHR_FILE__QUOTAON
-CHR_FILE__MOUNTON = _selinux.CHR_FILE__MOUNTON
-CHR_FILE__EXECUTE_NO_TRANS = _selinux.CHR_FILE__EXECUTE_NO_TRANS
-CHR_FILE__ENTRYPOINT = _selinux.CHR_FILE__ENTRYPOINT
-CHR_FILE__EXECMOD = _selinux.CHR_FILE__EXECMOD
-CHR_FILE__OPEN = _selinux.CHR_FILE__OPEN
-BLK_FILE__IOCTL = _selinux.BLK_FILE__IOCTL
-BLK_FILE__READ = _selinux.BLK_FILE__READ
-BLK_FILE__WRITE = _selinux.BLK_FILE__WRITE
-BLK_FILE__CREATE = _selinux.BLK_FILE__CREATE
-BLK_FILE__GETATTR = _selinux.BLK_FILE__GETATTR
-BLK_FILE__SETATTR = _selinux.BLK_FILE__SETATTR
-BLK_FILE__LOCK = _selinux.BLK_FILE__LOCK
-BLK_FILE__RELABELFROM = _selinux.BLK_FILE__RELABELFROM
-BLK_FILE__RELABELTO = _selinux.BLK_FILE__RELABELTO
-BLK_FILE__APPEND = _selinux.BLK_FILE__APPEND
-BLK_FILE__UNLINK = _selinux.BLK_FILE__UNLINK
-BLK_FILE__LINK = _selinux.BLK_FILE__LINK
-BLK_FILE__RENAME = _selinux.BLK_FILE__RENAME
-BLK_FILE__EXECUTE = _selinux.BLK_FILE__EXECUTE
-BLK_FILE__SWAPON = _selinux.BLK_FILE__SWAPON
-BLK_FILE__QUOTAON = _selinux.BLK_FILE__QUOTAON
-BLK_FILE__MOUNTON = _selinux.BLK_FILE__MOUNTON
-BLK_FILE__OPEN = _selinux.BLK_FILE__OPEN
-SOCK_FILE__IOCTL = _selinux.SOCK_FILE__IOCTL
-SOCK_FILE__READ = _selinux.SOCK_FILE__READ
-SOCK_FILE__WRITE = _selinux.SOCK_FILE__WRITE
-SOCK_FILE__CREATE = _selinux.SOCK_FILE__CREATE
-SOCK_FILE__GETATTR = _selinux.SOCK_FILE__GETATTR
-SOCK_FILE__SETATTR = _selinux.SOCK_FILE__SETATTR
-SOCK_FILE__LOCK = _selinux.SOCK_FILE__LOCK
-SOCK_FILE__RELABELFROM = _selinux.SOCK_FILE__RELABELFROM
-SOCK_FILE__RELABELTO = _selinux.SOCK_FILE__RELABELTO
-SOCK_FILE__APPEND = _selinux.SOCK_FILE__APPEND
-SOCK_FILE__UNLINK = _selinux.SOCK_FILE__UNLINK
-SOCK_FILE__LINK = _selinux.SOCK_FILE__LINK
-SOCK_FILE__RENAME = _selinux.SOCK_FILE__RENAME
-SOCK_FILE__EXECUTE = _selinux.SOCK_FILE__EXECUTE
-SOCK_FILE__SWAPON = _selinux.SOCK_FILE__SWAPON
-SOCK_FILE__QUOTAON = _selinux.SOCK_FILE__QUOTAON
-SOCK_FILE__MOUNTON = _selinux.SOCK_FILE__MOUNTON
-FIFO_FILE__IOCTL = _selinux.FIFO_FILE__IOCTL
-FIFO_FILE__READ = _selinux.FIFO_FILE__READ
-FIFO_FILE__WRITE = _selinux.FIFO_FILE__WRITE
-FIFO_FILE__CREATE = _selinux.FIFO_FILE__CREATE
-FIFO_FILE__GETATTR = _selinux.FIFO_FILE__GETATTR
-FIFO_FILE__SETATTR = _selinux.FIFO_FILE__SETATTR
-FIFO_FILE__LOCK = _selinux.FIFO_FILE__LOCK
-FIFO_FILE__RELABELFROM = _selinux.FIFO_FILE__RELABELFROM
-FIFO_FILE__RELABELTO = _selinux.FIFO_FILE__RELABELTO
-FIFO_FILE__APPEND = _selinux.FIFO_FILE__APPEND
-FIFO_FILE__UNLINK = _selinux.FIFO_FILE__UNLINK
-FIFO_FILE__LINK = _selinux.FIFO_FILE__LINK
-FIFO_FILE__RENAME = _selinux.FIFO_FILE__RENAME
-FIFO_FILE__EXECUTE = _selinux.FIFO_FILE__EXECUTE
-FIFO_FILE__SWAPON = _selinux.FIFO_FILE__SWAPON
-FIFO_FILE__QUOTAON = _selinux.FIFO_FILE__QUOTAON
-FIFO_FILE__MOUNTON = _selinux.FIFO_FILE__MOUNTON
-FIFO_FILE__OPEN = _selinux.FIFO_FILE__OPEN
-FD__USE = _selinux.FD__USE
-SOCKET__IOCTL = _selinux.SOCKET__IOCTL
-SOCKET__READ = _selinux.SOCKET__READ
-SOCKET__WRITE = _selinux.SOCKET__WRITE
-SOCKET__CREATE = _selinux.SOCKET__CREATE
-SOCKET__GETATTR = _selinux.SOCKET__GETATTR
-SOCKET__SETATTR = _selinux.SOCKET__SETATTR
-SOCKET__LOCK = _selinux.SOCKET__LOCK
-SOCKET__RELABELFROM = _selinux.SOCKET__RELABELFROM
-SOCKET__RELABELTO = _selinux.SOCKET__RELABELTO
-SOCKET__APPEND = _selinux.SOCKET__APPEND
-SOCKET__BIND = _selinux.SOCKET__BIND
-SOCKET__CONNECT = _selinux.SOCKET__CONNECT
-SOCKET__LISTEN = _selinux.SOCKET__LISTEN
-SOCKET__ACCEPT = _selinux.SOCKET__ACCEPT
-SOCKET__GETOPT = _selinux.SOCKET__GETOPT
-SOCKET__SETOPT = _selinux.SOCKET__SETOPT
-SOCKET__SHUTDOWN = _selinux.SOCKET__SHUTDOWN
-SOCKET__RECVFROM = _selinux.SOCKET__RECVFROM
-SOCKET__SENDTO = _selinux.SOCKET__SENDTO
-SOCKET__RECV_MSG = _selinux.SOCKET__RECV_MSG
-SOCKET__SEND_MSG = _selinux.SOCKET__SEND_MSG
-SOCKET__NAME_BIND = _selinux.SOCKET__NAME_BIND
-TCP_SOCKET__IOCTL = _selinux.TCP_SOCKET__IOCTL
-TCP_SOCKET__READ = _selinux.TCP_SOCKET__READ
-TCP_SOCKET__WRITE = _selinux.TCP_SOCKET__WRITE
-TCP_SOCKET__CREATE = _selinux.TCP_SOCKET__CREATE
-TCP_SOCKET__GETATTR = _selinux.TCP_SOCKET__GETATTR
-TCP_SOCKET__SETATTR = _selinux.TCP_SOCKET__SETATTR
-TCP_SOCKET__LOCK = _selinux.TCP_SOCKET__LOCK
-TCP_SOCKET__RELABELFROM = _selinux.TCP_SOCKET__RELABELFROM
-TCP_SOCKET__RELABELTO = _selinux.TCP_SOCKET__RELABELTO
-TCP_SOCKET__APPEND = _selinux.TCP_SOCKET__APPEND
-TCP_SOCKET__BIND = _selinux.TCP_SOCKET__BIND
-TCP_SOCKET__CONNECT = _selinux.TCP_SOCKET__CONNECT
-TCP_SOCKET__LISTEN = _selinux.TCP_SOCKET__LISTEN
-TCP_SOCKET__ACCEPT = _selinux.TCP_SOCKET__ACCEPT
-TCP_SOCKET__GETOPT = _selinux.TCP_SOCKET__GETOPT
-TCP_SOCKET__SETOPT = _selinux.TCP_SOCKET__SETOPT
-TCP_SOCKET__SHUTDOWN = _selinux.TCP_SOCKET__SHUTDOWN
-TCP_SOCKET__RECVFROM = _selinux.TCP_SOCKET__RECVFROM
-TCP_SOCKET__SENDTO = _selinux.TCP_SOCKET__SENDTO
-TCP_SOCKET__RECV_MSG = _selinux.TCP_SOCKET__RECV_MSG
-TCP_SOCKET__SEND_MSG = _selinux.TCP_SOCKET__SEND_MSG
-TCP_SOCKET__NAME_BIND = _selinux.TCP_SOCKET__NAME_BIND
-TCP_SOCKET__CONNECTTO = _selinux.TCP_SOCKET__CONNECTTO
-TCP_SOCKET__NEWCONN = _selinux.TCP_SOCKET__NEWCONN
-TCP_SOCKET__ACCEPTFROM = _selinux.TCP_SOCKET__ACCEPTFROM
-TCP_SOCKET__NODE_BIND = _selinux.TCP_SOCKET__NODE_BIND
-TCP_SOCKET__NAME_CONNECT = _selinux.TCP_SOCKET__NAME_CONNECT
-UDP_SOCKET__IOCTL = _selinux.UDP_SOCKET__IOCTL
-UDP_SOCKET__READ = _selinux.UDP_SOCKET__READ
-UDP_SOCKET__WRITE = _selinux.UDP_SOCKET__WRITE
-UDP_SOCKET__CREATE = _selinux.UDP_SOCKET__CREATE
-UDP_SOCKET__GETATTR = _selinux.UDP_SOCKET__GETATTR
-UDP_SOCKET__SETATTR = _selinux.UDP_SOCKET__SETATTR
-UDP_SOCKET__LOCK = _selinux.UDP_SOCKET__LOCK
-UDP_SOCKET__RELABELFROM = _selinux.UDP_SOCKET__RELABELFROM
-UDP_SOCKET__RELABELTO = _selinux.UDP_SOCKET__RELABELTO
-UDP_SOCKET__APPEND = _selinux.UDP_SOCKET__APPEND
-UDP_SOCKET__BIND = _selinux.UDP_SOCKET__BIND
-UDP_SOCKET__CONNECT = _selinux.UDP_SOCKET__CONNECT
-UDP_SOCKET__LISTEN = _selinux.UDP_SOCKET__LISTEN
-UDP_SOCKET__ACCEPT = _selinux.UDP_SOCKET__ACCEPT
-UDP_SOCKET__GETOPT = _selinux.UDP_SOCKET__GETOPT
-UDP_SOCKET__SETOPT = _selinux.UDP_SOCKET__SETOPT
-UDP_SOCKET__SHUTDOWN = _selinux.UDP_SOCKET__SHUTDOWN
-UDP_SOCKET__RECVFROM = _selinux.UDP_SOCKET__RECVFROM
-UDP_SOCKET__SENDTO = _selinux.UDP_SOCKET__SENDTO
-UDP_SOCKET__RECV_MSG = _selinux.UDP_SOCKET__RECV_MSG
-UDP_SOCKET__SEND_MSG = _selinux.UDP_SOCKET__SEND_MSG
-UDP_SOCKET__NAME_BIND = _selinux.UDP_SOCKET__NAME_BIND
-UDP_SOCKET__NODE_BIND = _selinux.UDP_SOCKET__NODE_BIND
-RAWIP_SOCKET__IOCTL = _selinux.RAWIP_SOCKET__IOCTL
-RAWIP_SOCKET__READ = _selinux.RAWIP_SOCKET__READ
-RAWIP_SOCKET__WRITE = _selinux.RAWIP_SOCKET__WRITE
-RAWIP_SOCKET__CREATE = _selinux.RAWIP_SOCKET__CREATE
-RAWIP_SOCKET__GETATTR = _selinux.RAWIP_SOCKET__GETATTR
-RAWIP_SOCKET__SETATTR = _selinux.RAWIP_SOCKET__SETATTR
-RAWIP_SOCKET__LOCK = _selinux.RAWIP_SOCKET__LOCK
-RAWIP_SOCKET__RELABELFROM = _selinux.RAWIP_SOCKET__RELABELFROM
-RAWIP_SOCKET__RELABELTO = _selinux.RAWIP_SOCKET__RELABELTO
-RAWIP_SOCKET__APPEND = _selinux.RAWIP_SOCKET__APPEND
-RAWIP_SOCKET__BIND = _selinux.RAWIP_SOCKET__BIND
-RAWIP_SOCKET__CONNECT = _selinux.RAWIP_SOCKET__CONNECT
-RAWIP_SOCKET__LISTEN = _selinux.RAWIP_SOCKET__LISTEN
-RAWIP_SOCKET__ACCEPT = _selinux.RAWIP_SOCKET__ACCEPT
-RAWIP_SOCKET__GETOPT = _selinux.RAWIP_SOCKET__GETOPT
-RAWIP_SOCKET__SETOPT = _selinux.RAWIP_SOCKET__SETOPT
-RAWIP_SOCKET__SHUTDOWN = _selinux.RAWIP_SOCKET__SHUTDOWN
-RAWIP_SOCKET__RECVFROM = _selinux.RAWIP_SOCKET__RECVFROM
-RAWIP_SOCKET__SENDTO = _selinux.RAWIP_SOCKET__SENDTO
-RAWIP_SOCKET__RECV_MSG = _selinux.RAWIP_SOCKET__RECV_MSG
-RAWIP_SOCKET__SEND_MSG = _selinux.RAWIP_SOCKET__SEND_MSG
-RAWIP_SOCKET__NAME_BIND = _selinux.RAWIP_SOCKET__NAME_BIND
-RAWIP_SOCKET__NODE_BIND = _selinux.RAWIP_SOCKET__NODE_BIND
-NODE__TCP_RECV = _selinux.NODE__TCP_RECV
-NODE__TCP_SEND = _selinux.NODE__TCP_SEND
-NODE__UDP_RECV = _selinux.NODE__UDP_RECV
-NODE__UDP_SEND = _selinux.NODE__UDP_SEND
-NODE__RAWIP_RECV = _selinux.NODE__RAWIP_RECV
-NODE__RAWIP_SEND = _selinux.NODE__RAWIP_SEND
-NODE__ENFORCE_DEST = _selinux.NODE__ENFORCE_DEST
-NODE__DCCP_RECV = _selinux.NODE__DCCP_RECV
-NODE__DCCP_SEND = _selinux.NODE__DCCP_SEND
-NODE__RECVFROM = _selinux.NODE__RECVFROM
-NODE__SENDTO = _selinux.NODE__SENDTO
-NETIF__TCP_RECV = _selinux.NETIF__TCP_RECV
-NETIF__TCP_SEND = _selinux.NETIF__TCP_SEND
-NETIF__UDP_RECV = _selinux.NETIF__UDP_RECV
-NETIF__UDP_SEND = _selinux.NETIF__UDP_SEND
-NETIF__RAWIP_RECV = _selinux.NETIF__RAWIP_RECV
-NETIF__RAWIP_SEND = _selinux.NETIF__RAWIP_SEND
-NETIF__DCCP_RECV = _selinux.NETIF__DCCP_RECV
-NETIF__DCCP_SEND = _selinux.NETIF__DCCP_SEND
-NETIF__INGRESS = _selinux.NETIF__INGRESS
-NETIF__EGRESS = _selinux.NETIF__EGRESS
-NETLINK_SOCKET__IOCTL = _selinux.NETLINK_SOCKET__IOCTL
-NETLINK_SOCKET__READ = _selinux.NETLINK_SOCKET__READ
-NETLINK_SOCKET__WRITE = _selinux.NETLINK_SOCKET__WRITE
-NETLINK_SOCKET__CREATE = _selinux.NETLINK_SOCKET__CREATE
-NETLINK_SOCKET__GETATTR = _selinux.NETLINK_SOCKET__GETATTR
-NETLINK_SOCKET__SETATTR = _selinux.NETLINK_SOCKET__SETATTR
-NETLINK_SOCKET__LOCK = _selinux.NETLINK_SOCKET__LOCK
-NETLINK_SOCKET__RELABELFROM = _selinux.NETLINK_SOCKET__RELABELFROM
-NETLINK_SOCKET__RELABELTO = _selinux.NETLINK_SOCKET__RELABELTO
-NETLINK_SOCKET__APPEND = _selinux.NETLINK_SOCKET__APPEND
-NETLINK_SOCKET__BIND = _selinux.NETLINK_SOCKET__BIND
-NETLINK_SOCKET__CONNECT = _selinux.NETLINK_SOCKET__CONNECT
-NETLINK_SOCKET__LISTEN = _selinux.NETLINK_SOCKET__LISTEN
-NETLINK_SOCKET__ACCEPT = _selinux.NETLINK_SOCKET__ACCEPT
-NETLINK_SOCKET__GETOPT = _selinux.NETLINK_SOCKET__GETOPT
-NETLINK_SOCKET__SETOPT = _selinux.NETLINK_SOCKET__SETOPT
-NETLINK_SOCKET__SHUTDOWN = _selinux.NETLINK_SOCKET__SHUTDOWN
-NETLINK_SOCKET__RECVFROM = _selinux.NETLINK_SOCKET__RECVFROM
-NETLINK_SOCKET__SENDTO = _selinux.NETLINK_SOCKET__SENDTO
-NETLINK_SOCKET__RECV_MSG = _selinux.NETLINK_SOCKET__RECV_MSG
-NETLINK_SOCKET__SEND_MSG = _selinux.NETLINK_SOCKET__SEND_MSG
-NETLINK_SOCKET__NAME_BIND = _selinux.NETLINK_SOCKET__NAME_BIND
-PACKET_SOCKET__IOCTL = _selinux.PACKET_SOCKET__IOCTL
-PACKET_SOCKET__READ = _selinux.PACKET_SOCKET__READ
-PACKET_SOCKET__WRITE = _selinux.PACKET_SOCKET__WRITE
-PACKET_SOCKET__CREATE = _selinux.PACKET_SOCKET__CREATE
-PACKET_SOCKET__GETATTR = _selinux.PACKET_SOCKET__GETATTR
-PACKET_SOCKET__SETATTR = _selinux.PACKET_SOCKET__SETATTR
-PACKET_SOCKET__LOCK = _selinux.PACKET_SOCKET__LOCK
-PACKET_SOCKET__RELABELFROM = _selinux.PACKET_SOCKET__RELABELFROM
-PACKET_SOCKET__RELABELTO = _selinux.PACKET_SOCKET__RELABELTO
-PACKET_SOCKET__APPEND = _selinux.PACKET_SOCKET__APPEND
-PACKET_SOCKET__BIND = _selinux.PACKET_SOCKET__BIND
-PACKET_SOCKET__CONNECT = _selinux.PACKET_SOCKET__CONNECT
-PACKET_SOCKET__LISTEN = _selinux.PACKET_SOCKET__LISTEN
-PACKET_SOCKET__ACCEPT = _selinux.PACKET_SOCKET__ACCEPT
-PACKET_SOCKET__GETOPT = _selinux.PACKET_SOCKET__GETOPT
-PACKET_SOCKET__SETOPT = _selinux.PACKET_SOCKET__SETOPT
-PACKET_SOCKET__SHUTDOWN = _selinux.PACKET_SOCKET__SHUTDOWN
-PACKET_SOCKET__RECVFROM = _selinux.PACKET_SOCKET__RECVFROM
-PACKET_SOCKET__SENDTO = _selinux.PACKET_SOCKET__SENDTO
-PACKET_SOCKET__RECV_MSG = _selinux.PACKET_SOCKET__RECV_MSG
-PACKET_SOCKET__SEND_MSG = _selinux.PACKET_SOCKET__SEND_MSG
-PACKET_SOCKET__NAME_BIND = _selinux.PACKET_SOCKET__NAME_BIND
-KEY_SOCKET__IOCTL = _selinux.KEY_SOCKET__IOCTL
-KEY_SOCKET__READ = _selinux.KEY_SOCKET__READ
-KEY_SOCKET__WRITE = _selinux.KEY_SOCKET__WRITE
-KEY_SOCKET__CREATE = _selinux.KEY_SOCKET__CREATE
-KEY_SOCKET__GETATTR = _selinux.KEY_SOCKET__GETATTR
-KEY_SOCKET__SETATTR = _selinux.KEY_SOCKET__SETATTR
-KEY_SOCKET__LOCK = _selinux.KEY_SOCKET__LOCK
-KEY_SOCKET__RELABELFROM = _selinux.KEY_SOCKET__RELABELFROM
-KEY_SOCKET__RELABELTO = _selinux.KEY_SOCKET__RELABELTO
-KEY_SOCKET__APPEND = _selinux.KEY_SOCKET__APPEND
-KEY_SOCKET__BIND = _selinux.KEY_SOCKET__BIND
-KEY_SOCKET__CONNECT = _selinux.KEY_SOCKET__CONNECT
-KEY_SOCKET__LISTEN = _selinux.KEY_SOCKET__LISTEN
-KEY_SOCKET__ACCEPT = _selinux.KEY_SOCKET__ACCEPT
-KEY_SOCKET__GETOPT = _selinux.KEY_SOCKET__GETOPT
-KEY_SOCKET__SETOPT = _selinux.KEY_SOCKET__SETOPT
-KEY_SOCKET__SHUTDOWN = _selinux.KEY_SOCKET__SHUTDOWN
-KEY_SOCKET__RECVFROM = _selinux.KEY_SOCKET__RECVFROM
-KEY_SOCKET__SENDTO = _selinux.KEY_SOCKET__SENDTO
-KEY_SOCKET__RECV_MSG = _selinux.KEY_SOCKET__RECV_MSG
-KEY_SOCKET__SEND_MSG = _selinux.KEY_SOCKET__SEND_MSG
-KEY_SOCKET__NAME_BIND = _selinux.KEY_SOCKET__NAME_BIND
-UNIX_STREAM_SOCKET__IOCTL = _selinux.UNIX_STREAM_SOCKET__IOCTL
-UNIX_STREAM_SOCKET__READ = _selinux.UNIX_STREAM_SOCKET__READ
-UNIX_STREAM_SOCKET__WRITE = _selinux.UNIX_STREAM_SOCKET__WRITE
-UNIX_STREAM_SOCKET__CREATE = _selinux.UNIX_STREAM_SOCKET__CREATE
-UNIX_STREAM_SOCKET__GETATTR = _selinux.UNIX_STREAM_SOCKET__GETATTR
-UNIX_STREAM_SOCKET__SETATTR = _selinux.UNIX_STREAM_SOCKET__SETATTR
-UNIX_STREAM_SOCKET__LOCK = _selinux.UNIX_STREAM_SOCKET__LOCK
-UNIX_STREAM_SOCKET__RELABELFROM = _selinux.UNIX_STREAM_SOCKET__RELABELFROM
-UNIX_STREAM_SOCKET__RELABELTO = _selinux.UNIX_STREAM_SOCKET__RELABELTO
-UNIX_STREAM_SOCKET__APPEND = _selinux.UNIX_STREAM_SOCKET__APPEND
-UNIX_STREAM_SOCKET__BIND = _selinux.UNIX_STREAM_SOCKET__BIND
-UNIX_STREAM_SOCKET__CONNECT = _selinux.UNIX_STREAM_SOCKET__CONNECT
-UNIX_STREAM_SOCKET__LISTEN = _selinux.UNIX_STREAM_SOCKET__LISTEN
-UNIX_STREAM_SOCKET__ACCEPT = _selinux.UNIX_STREAM_SOCKET__ACCEPT
-UNIX_STREAM_SOCKET__GETOPT = _selinux.UNIX_STREAM_SOCKET__GETOPT
-UNIX_STREAM_SOCKET__SETOPT = _selinux.UNIX_STREAM_SOCKET__SETOPT
-UNIX_STREAM_SOCKET__SHUTDOWN = _selinux.UNIX_STREAM_SOCKET__SHUTDOWN
-UNIX_STREAM_SOCKET__RECVFROM = _selinux.UNIX_STREAM_SOCKET__RECVFROM
-UNIX_STREAM_SOCKET__SENDTO = _selinux.UNIX_STREAM_SOCKET__SENDTO
-UNIX_STREAM_SOCKET__RECV_MSG = _selinux.UNIX_STREAM_SOCKET__RECV_MSG
-UNIX_STREAM_SOCKET__SEND_MSG = _selinux.UNIX_STREAM_SOCKET__SEND_MSG
-UNIX_STREAM_SOCKET__NAME_BIND = _selinux.UNIX_STREAM_SOCKET__NAME_BIND
-UNIX_STREAM_SOCKET__CONNECTTO = _selinux.UNIX_STREAM_SOCKET__CONNECTTO
-UNIX_STREAM_SOCKET__NEWCONN = _selinux.UNIX_STREAM_SOCKET__NEWCONN
-UNIX_STREAM_SOCKET__ACCEPTFROM = _selinux.UNIX_STREAM_SOCKET__ACCEPTFROM
-UNIX_DGRAM_SOCKET__IOCTL = _selinux.UNIX_DGRAM_SOCKET__IOCTL
-UNIX_DGRAM_SOCKET__READ = _selinux.UNIX_DGRAM_SOCKET__READ
-UNIX_DGRAM_SOCKET__WRITE = _selinux.UNIX_DGRAM_SOCKET__WRITE
-UNIX_DGRAM_SOCKET__CREATE = _selinux.UNIX_DGRAM_SOCKET__CREATE
-UNIX_DGRAM_SOCKET__GETATTR = _selinux.UNIX_DGRAM_SOCKET__GETATTR
-UNIX_DGRAM_SOCKET__SETATTR = _selinux.UNIX_DGRAM_SOCKET__SETATTR
-UNIX_DGRAM_SOCKET__LOCK = _selinux.UNIX_DGRAM_SOCKET__LOCK
-UNIX_DGRAM_SOCKET__RELABELFROM = _selinux.UNIX_DGRAM_SOCKET__RELABELFROM
-UNIX_DGRAM_SOCKET__RELABELTO = _selinux.UNIX_DGRAM_SOCKET__RELABELTO
-UNIX_DGRAM_SOCKET__APPEND = _selinux.UNIX_DGRAM_SOCKET__APPEND
-UNIX_DGRAM_SOCKET__BIND = _selinux.UNIX_DGRAM_SOCKET__BIND
-UNIX_DGRAM_SOCKET__CONNECT = _selinux.UNIX_DGRAM_SOCKET__CONNECT
-UNIX_DGRAM_SOCKET__LISTEN = _selinux.UNIX_DGRAM_SOCKET__LISTEN
-UNIX_DGRAM_SOCKET__ACCEPT = _selinux.UNIX_DGRAM_SOCKET__ACCEPT
-UNIX_DGRAM_SOCKET__GETOPT = _selinux.UNIX_DGRAM_SOCKET__GETOPT
-UNIX_DGRAM_SOCKET__SETOPT = _selinux.UNIX_DGRAM_SOCKET__SETOPT
-UNIX_DGRAM_SOCKET__SHUTDOWN = _selinux.UNIX_DGRAM_SOCKET__SHUTDOWN
-UNIX_DGRAM_SOCKET__RECVFROM = _selinux.UNIX_DGRAM_SOCKET__RECVFROM
-UNIX_DGRAM_SOCKET__SENDTO = _selinux.UNIX_DGRAM_SOCKET__SENDTO
-UNIX_DGRAM_SOCKET__RECV_MSG = _selinux.UNIX_DGRAM_SOCKET__RECV_MSG
-UNIX_DGRAM_SOCKET__SEND_MSG = _selinux.UNIX_DGRAM_SOCKET__SEND_MSG
-UNIX_DGRAM_SOCKET__NAME_BIND = _selinux.UNIX_DGRAM_SOCKET__NAME_BIND
-PROCESS__FORK = _selinux.PROCESS__FORK
-PROCESS__TRANSITION = _selinux.PROCESS__TRANSITION
-PROCESS__SIGCHLD = _selinux.PROCESS__SIGCHLD
-PROCESS__SIGKILL = _selinux.PROCESS__SIGKILL
-PROCESS__SIGSTOP = _selinux.PROCESS__SIGSTOP
-PROCESS__SIGNULL = _selinux.PROCESS__SIGNULL
-PROCESS__SIGNAL = _selinux.PROCESS__SIGNAL
-PROCESS__PTRACE = _selinux.PROCESS__PTRACE
-PROCESS__GETSCHED = _selinux.PROCESS__GETSCHED
-PROCESS__SETSCHED = _selinux.PROCESS__SETSCHED
-PROCESS__GETSESSION = _selinux.PROCESS__GETSESSION
-PROCESS__GETPGID = _selinux.PROCESS__GETPGID
-PROCESS__SETPGID = _selinux.PROCESS__SETPGID
-PROCESS__GETCAP = _selinux.PROCESS__GETCAP
-PROCESS__SETCAP = _selinux.PROCESS__SETCAP
-PROCESS__SHARE = _selinux.PROCESS__SHARE
-PROCESS__GETATTR = _selinux.PROCESS__GETATTR
-PROCESS__SETEXEC = _selinux.PROCESS__SETEXEC
-PROCESS__SETFSCREATE = _selinux.PROCESS__SETFSCREATE
-PROCESS__NOATSECURE = _selinux.PROCESS__NOATSECURE
-PROCESS__SIGINH = _selinux.PROCESS__SIGINH
-PROCESS__SETRLIMIT = _selinux.PROCESS__SETRLIMIT
-PROCESS__RLIMITINH = _selinux.PROCESS__RLIMITINH
-PROCESS__DYNTRANSITION = _selinux.PROCESS__DYNTRANSITION
-PROCESS__SETCURRENT = _selinux.PROCESS__SETCURRENT
-PROCESS__EXECMEM = _selinux.PROCESS__EXECMEM
-PROCESS__EXECSTACK = _selinux.PROCESS__EXECSTACK
-PROCESS__EXECHEAP = _selinux.PROCESS__EXECHEAP
-PROCESS__SETKEYCREATE = _selinux.PROCESS__SETKEYCREATE
-PROCESS__SETSOCKCREATE = _selinux.PROCESS__SETSOCKCREATE
-IPC__CREATE = _selinux.IPC__CREATE
-IPC__DESTROY = _selinux.IPC__DESTROY
-IPC__GETATTR = _selinux.IPC__GETATTR
-IPC__SETATTR = _selinux.IPC__SETATTR
-IPC__READ = _selinux.IPC__READ
-IPC__WRITE = _selinux.IPC__WRITE
-IPC__ASSOCIATE = _selinux.IPC__ASSOCIATE
-IPC__UNIX_READ = _selinux.IPC__UNIX_READ
-IPC__UNIX_WRITE = _selinux.IPC__UNIX_WRITE
-SEM__CREATE = _selinux.SEM__CREATE
-SEM__DESTROY = _selinux.SEM__DESTROY
-SEM__GETATTR = _selinux.SEM__GETATTR
-SEM__SETATTR = _selinux.SEM__SETATTR
-SEM__READ = _selinux.SEM__READ
-SEM__WRITE = _selinux.SEM__WRITE
-SEM__ASSOCIATE = _selinux.SEM__ASSOCIATE
-SEM__UNIX_READ = _selinux.SEM__UNIX_READ
-SEM__UNIX_WRITE = _selinux.SEM__UNIX_WRITE
-MSGQ__CREATE = _selinux.MSGQ__CREATE
-MSGQ__DESTROY = _selinux.MSGQ__DESTROY
-MSGQ__GETATTR = _selinux.MSGQ__GETATTR
-MSGQ__SETATTR = _selinux.MSGQ__SETATTR
-MSGQ__READ = _selinux.MSGQ__READ
-MSGQ__WRITE = _selinux.MSGQ__WRITE
-MSGQ__ASSOCIATE = _selinux.MSGQ__ASSOCIATE
-MSGQ__UNIX_READ = _selinux.MSGQ__UNIX_READ
-MSGQ__UNIX_WRITE = _selinux.MSGQ__UNIX_WRITE
-MSGQ__ENQUEUE = _selinux.MSGQ__ENQUEUE
-MSG__SEND = _selinux.MSG__SEND
-MSG__RECEIVE = _selinux.MSG__RECEIVE
-SHM__CREATE = _selinux.SHM__CREATE
-SHM__DESTROY = _selinux.SHM__DESTROY
-SHM__GETATTR = _selinux.SHM__GETATTR
-SHM__SETATTR = _selinux.SHM__SETATTR
-SHM__READ = _selinux.SHM__READ
-SHM__WRITE = _selinux.SHM__WRITE
-SHM__ASSOCIATE = _selinux.SHM__ASSOCIATE
-SHM__UNIX_READ = _selinux.SHM__UNIX_READ
-SHM__UNIX_WRITE = _selinux.SHM__UNIX_WRITE
-SHM__LOCK = _selinux.SHM__LOCK
-SECURITY__COMPUTE_AV = _selinux.SECURITY__COMPUTE_AV
-SECURITY__COMPUTE_CREATE = _selinux.SECURITY__COMPUTE_CREATE
-SECURITY__COMPUTE_MEMBER = _selinux.SECURITY__COMPUTE_MEMBER
-SECURITY__CHECK_CONTEXT = _selinux.SECURITY__CHECK_CONTEXT
-SECURITY__LOAD_POLICY = _selinux.SECURITY__LOAD_POLICY
-SECURITY__COMPUTE_RELABEL = _selinux.SECURITY__COMPUTE_RELABEL
-SECURITY__COMPUTE_USER = _selinux.SECURITY__COMPUTE_USER
-SECURITY__SETENFORCE = _selinux.SECURITY__SETENFORCE
-SECURITY__SETBOOL = _selinux.SECURITY__SETBOOL
-SECURITY__SETSECPARAM = _selinux.SECURITY__SETSECPARAM
-SECURITY__SETCHECKREQPROT = _selinux.SECURITY__SETCHECKREQPROT
-SYSTEM__IPC_INFO = _selinux.SYSTEM__IPC_INFO
-SYSTEM__SYSLOG_READ = _selinux.SYSTEM__SYSLOG_READ
-SYSTEM__SYSLOG_MOD = _selinux.SYSTEM__SYSLOG_MOD
-SYSTEM__SYSLOG_CONSOLE = _selinux.SYSTEM__SYSLOG_CONSOLE
-CAPABILITY__CHOWN = _selinux.CAPABILITY__CHOWN
-CAPABILITY__DAC_OVERRIDE = _selinux.CAPABILITY__DAC_OVERRIDE
-CAPABILITY__DAC_READ_SEARCH = _selinux.CAPABILITY__DAC_READ_SEARCH
-CAPABILITY__FOWNER = _selinux.CAPABILITY__FOWNER
-CAPABILITY__FSETID = _selinux.CAPABILITY__FSETID
-CAPABILITY__KILL = _selinux.CAPABILITY__KILL
-CAPABILITY__SETGID = _selinux.CAPABILITY__SETGID
-CAPABILITY__SETUID = _selinux.CAPABILITY__SETUID
-CAPABILITY__SETPCAP = _selinux.CAPABILITY__SETPCAP
-CAPABILITY__LINUX_IMMUTABLE = _selinux.CAPABILITY__LINUX_IMMUTABLE
-CAPABILITY__NET_BIND_SERVICE = _selinux.CAPABILITY__NET_BIND_SERVICE
-CAPABILITY__NET_BROADCAST = _selinux.CAPABILITY__NET_BROADCAST
-CAPABILITY__NET_ADMIN = _selinux.CAPABILITY__NET_ADMIN
-CAPABILITY__NET_RAW = _selinux.CAPABILITY__NET_RAW
-CAPABILITY__IPC_LOCK = _selinux.CAPABILITY__IPC_LOCK
-CAPABILITY__IPC_OWNER = _selinux.CAPABILITY__IPC_OWNER
-CAPABILITY__SYS_MODULE = _selinux.CAPABILITY__SYS_MODULE
-CAPABILITY__SYS_RAWIO = _selinux.CAPABILITY__SYS_RAWIO
-CAPABILITY__SYS_CHROOT = _selinux.CAPABILITY__SYS_CHROOT
-CAPABILITY__SYS_PTRACE = _selinux.CAPABILITY__SYS_PTRACE
-CAPABILITY__SYS_PACCT = _selinux.CAPABILITY__SYS_PACCT
-CAPABILITY__SYS_ADMIN = _selinux.CAPABILITY__SYS_ADMIN
-CAPABILITY__SYS_BOOT = _selinux.CAPABILITY__SYS_BOOT
-CAPABILITY__SYS_NICE = _selinux.CAPABILITY__SYS_NICE
-CAPABILITY__SYS_RESOURCE = _selinux.CAPABILITY__SYS_RESOURCE
-CAPABILITY__SYS_TIME = _selinux.CAPABILITY__SYS_TIME
-CAPABILITY__SYS_TTY_CONFIG = _selinux.CAPABILITY__SYS_TTY_CONFIG
-CAPABILITY__MKNOD = _selinux.CAPABILITY__MKNOD
-CAPABILITY__LEASE = _selinux.CAPABILITY__LEASE
-CAPABILITY__AUDIT_WRITE = _selinux.CAPABILITY__AUDIT_WRITE
-CAPABILITY__AUDIT_CONTROL = _selinux.CAPABILITY__AUDIT_CONTROL
-CAPABILITY__SETFCAP = _selinux.CAPABILITY__SETFCAP
-CAPABILITY2__MAC_OVERRIDE = _selinux.CAPABILITY2__MAC_OVERRIDE
-CAPABILITY2__MAC_ADMIN = _selinux.CAPABILITY2__MAC_ADMIN
-PASSWD__PASSWD = _selinux.PASSWD__PASSWD
-PASSWD__CHFN = _selinux.PASSWD__CHFN
-PASSWD__CHSH = _selinux.PASSWD__CHSH
-PASSWD__ROOTOK = _selinux.PASSWD__ROOTOK
-PASSWD__CRONTAB = _selinux.PASSWD__CRONTAB
-X_DRAWABLE__CREATE = _selinux.X_DRAWABLE__CREATE
-X_DRAWABLE__DESTROY = _selinux.X_DRAWABLE__DESTROY
-X_DRAWABLE__READ = _selinux.X_DRAWABLE__READ
-X_DRAWABLE__WRITE = _selinux.X_DRAWABLE__WRITE
-X_DRAWABLE__BLEND = _selinux.X_DRAWABLE__BLEND
-X_DRAWABLE__GETATTR = _selinux.X_DRAWABLE__GETATTR
-X_DRAWABLE__SETATTR = _selinux.X_DRAWABLE__SETATTR
-X_DRAWABLE__LIST_CHILD = _selinux.X_DRAWABLE__LIST_CHILD
-X_DRAWABLE__ADD_CHILD = _selinux.X_DRAWABLE__ADD_CHILD
-X_DRAWABLE__REMOVE_CHILD = _selinux.X_DRAWABLE__REMOVE_CHILD
-X_DRAWABLE__LIST_PROPERTY = _selinux.X_DRAWABLE__LIST_PROPERTY
-X_DRAWABLE__GET_PROPERTY = _selinux.X_DRAWABLE__GET_PROPERTY
-X_DRAWABLE__SET_PROPERTY = _selinux.X_DRAWABLE__SET_PROPERTY
-X_DRAWABLE__MANAGE = _selinux.X_DRAWABLE__MANAGE
-X_DRAWABLE__OVERRIDE = _selinux.X_DRAWABLE__OVERRIDE
-X_DRAWABLE__SHOW = _selinux.X_DRAWABLE__SHOW
-X_DRAWABLE__HIDE = _selinux.X_DRAWABLE__HIDE
-X_DRAWABLE__SEND = _selinux.X_DRAWABLE__SEND
-X_DRAWABLE__RECEIVE = _selinux.X_DRAWABLE__RECEIVE
-X_SCREEN__GETATTR = _selinux.X_SCREEN__GETATTR
-X_SCREEN__SETATTR = _selinux.X_SCREEN__SETATTR
-X_SCREEN__HIDE_CURSOR = _selinux.X_SCREEN__HIDE_CURSOR
-X_SCREEN__SHOW_CURSOR = _selinux.X_SCREEN__SHOW_CURSOR
-X_SCREEN__SAVER_GETATTR = _selinux.X_SCREEN__SAVER_GETATTR
-X_SCREEN__SAVER_SETATTR = _selinux.X_SCREEN__SAVER_SETATTR
-X_SCREEN__SAVER_HIDE = _selinux.X_SCREEN__SAVER_HIDE
-X_SCREEN__SAVER_SHOW = _selinux.X_SCREEN__SAVER_SHOW
-X_GC__CREATE = _selinux.X_GC__CREATE
-X_GC__DESTROY = _selinux.X_GC__DESTROY
-X_GC__GETATTR = _selinux.X_GC__GETATTR
-X_GC__SETATTR = _selinux.X_GC__SETATTR
-X_GC__USE = _selinux.X_GC__USE
-X_FONT__CREATE = _selinux.X_FONT__CREATE
-X_FONT__DESTROY = _selinux.X_FONT__DESTROY
-X_FONT__GETATTR = _selinux.X_FONT__GETATTR
-X_FONT__ADD_GLYPH = _selinux.X_FONT__ADD_GLYPH
-X_FONT__REMOVE_GLYPH = _selinux.X_FONT__REMOVE_GLYPH
-X_FONT__USE = _selinux.X_FONT__USE
-X_COLORMAP__CREATE = _selinux.X_COLORMAP__CREATE
-X_COLORMAP__DESTROY = _selinux.X_COLORMAP__DESTROY
-X_COLORMAP__READ = _selinux.X_COLORMAP__READ
-X_COLORMAP__WRITE = _selinux.X_COLORMAP__WRITE
-X_COLORMAP__GETATTR = _selinux.X_COLORMAP__GETATTR
-X_COLORMAP__ADD_COLOR = _selinux.X_COLORMAP__ADD_COLOR
-X_COLORMAP__REMOVE_COLOR = _selinux.X_COLORMAP__REMOVE_COLOR
-X_COLORMAP__INSTALL = _selinux.X_COLORMAP__INSTALL
-X_COLORMAP__UNINSTALL = _selinux.X_COLORMAP__UNINSTALL
-X_COLORMAP__USE = _selinux.X_COLORMAP__USE
-X_PROPERTY__CREATE = _selinux.X_PROPERTY__CREATE
-X_PROPERTY__DESTROY = _selinux.X_PROPERTY__DESTROY
-X_PROPERTY__READ = _selinux.X_PROPERTY__READ
-X_PROPERTY__WRITE = _selinux.X_PROPERTY__WRITE
-X_PROPERTY__APPEND = _selinux.X_PROPERTY__APPEND
-X_PROPERTY__GETATTR = _selinux.X_PROPERTY__GETATTR
-X_PROPERTY__SETATTR = _selinux.X_PROPERTY__SETATTR
-X_SELECTION__READ = _selinux.X_SELECTION__READ
-X_SELECTION__WRITE = _selinux.X_SELECTION__WRITE
-X_SELECTION__GETATTR = _selinux.X_SELECTION__GETATTR
-X_SELECTION__SETATTR = _selinux.X_SELECTION__SETATTR
-X_CURSOR__CREATE = _selinux.X_CURSOR__CREATE
-X_CURSOR__DESTROY = _selinux.X_CURSOR__DESTROY
-X_CURSOR__READ = _selinux.X_CURSOR__READ
-X_CURSOR__WRITE = _selinux.X_CURSOR__WRITE
-X_CURSOR__GETATTR = _selinux.X_CURSOR__GETATTR
-X_CURSOR__SETATTR = _selinux.X_CURSOR__SETATTR
-X_CURSOR__USE = _selinux.X_CURSOR__USE
-X_CLIENT__DESTROY = _selinux.X_CLIENT__DESTROY
-X_CLIENT__GETATTR = _selinux.X_CLIENT__GETATTR
-X_CLIENT__SETATTR = _selinux.X_CLIENT__SETATTR
-X_CLIENT__MANAGE = _selinux.X_CLIENT__MANAGE
-X_DEVICE__GETATTR = _selinux.X_DEVICE__GETATTR
-X_DEVICE__SETATTR = _selinux.X_DEVICE__SETATTR
-X_DEVICE__USE = _selinux.X_DEVICE__USE
-X_DEVICE__READ = _selinux.X_DEVICE__READ
-X_DEVICE__WRITE = _selinux.X_DEVICE__WRITE
-X_DEVICE__GETFOCUS = _selinux.X_DEVICE__GETFOCUS
-X_DEVICE__SETFOCUS = _selinux.X_DEVICE__SETFOCUS
-X_DEVICE__BELL = _selinux.X_DEVICE__BELL
-X_DEVICE__FORCE_CURSOR = _selinux.X_DEVICE__FORCE_CURSOR
-X_DEVICE__FREEZE = _selinux.X_DEVICE__FREEZE
-X_DEVICE__GRAB = _selinux.X_DEVICE__GRAB
-X_DEVICE__MANAGE = _selinux.X_DEVICE__MANAGE
-X_SERVER__GETATTR = _selinux.X_SERVER__GETATTR
-X_SERVER__SETATTR = _selinux.X_SERVER__SETATTR
-X_SERVER__RECORD = _selinux.X_SERVER__RECORD
-X_SERVER__DEBUG = _selinux.X_SERVER__DEBUG
-X_SERVER__GRAB = _selinux.X_SERVER__GRAB
-X_SERVER__MANAGE = _selinux.X_SERVER__MANAGE
-X_EXTENSION__QUERY = _selinux.X_EXTENSION__QUERY
-X_EXTENSION__USE = _selinux.X_EXTENSION__USE
-X_RESOURCE__READ = _selinux.X_RESOURCE__READ
-X_RESOURCE__WRITE = _selinux.X_RESOURCE__WRITE
-X_EVENT__SEND = _selinux.X_EVENT__SEND
-X_EVENT__RECEIVE = _selinux.X_EVENT__RECEIVE
-X_SYNTHETIC_EVENT__SEND = _selinux.X_SYNTHETIC_EVENT__SEND
-X_SYNTHETIC_EVENT__RECEIVE = _selinux.X_SYNTHETIC_EVENT__RECEIVE
-NETLINK_ROUTE_SOCKET__IOCTL = _selinux.NETLINK_ROUTE_SOCKET__IOCTL
-NETLINK_ROUTE_SOCKET__READ = _selinux.NETLINK_ROUTE_SOCKET__READ
-NETLINK_ROUTE_SOCKET__WRITE = _selinux.NETLINK_ROUTE_SOCKET__WRITE
-NETLINK_ROUTE_SOCKET__CREATE = _selinux.NETLINK_ROUTE_SOCKET__CREATE
-NETLINK_ROUTE_SOCKET__GETATTR = _selinux.NETLINK_ROUTE_SOCKET__GETATTR
-NETLINK_ROUTE_SOCKET__SETATTR = _selinux.NETLINK_ROUTE_SOCKET__SETATTR
-NETLINK_ROUTE_SOCKET__LOCK = _selinux.NETLINK_ROUTE_SOCKET__LOCK
-NETLINK_ROUTE_SOCKET__RELABELFROM = _selinux.NETLINK_ROUTE_SOCKET__RELABELFROM
-NETLINK_ROUTE_SOCKET__RELABELTO = _selinux.NETLINK_ROUTE_SOCKET__RELABELTO
-NETLINK_ROUTE_SOCKET__APPEND = _selinux.NETLINK_ROUTE_SOCKET__APPEND
-NETLINK_ROUTE_SOCKET__BIND = _selinux.NETLINK_ROUTE_SOCKET__BIND
-NETLINK_ROUTE_SOCKET__CONNECT = _selinux.NETLINK_ROUTE_SOCKET__CONNECT
-NETLINK_ROUTE_SOCKET__LISTEN = _selinux.NETLINK_ROUTE_SOCKET__LISTEN
-NETLINK_ROUTE_SOCKET__ACCEPT = _selinux.NETLINK_ROUTE_SOCKET__ACCEPT
-NETLINK_ROUTE_SOCKET__GETOPT = _selinux.NETLINK_ROUTE_SOCKET__GETOPT
-NETLINK_ROUTE_SOCKET__SETOPT = _selinux.NETLINK_ROUTE_SOCKET__SETOPT
-NETLINK_ROUTE_SOCKET__SHUTDOWN = _selinux.NETLINK_ROUTE_SOCKET__SHUTDOWN
-NETLINK_ROUTE_SOCKET__RECVFROM = _selinux.NETLINK_ROUTE_SOCKET__RECVFROM
-NETLINK_ROUTE_SOCKET__SENDTO = _selinux.NETLINK_ROUTE_SOCKET__SENDTO
-NETLINK_ROUTE_SOCKET__RECV_MSG = _selinux.NETLINK_ROUTE_SOCKET__RECV_MSG
-NETLINK_ROUTE_SOCKET__SEND_MSG = _selinux.NETLINK_ROUTE_SOCKET__SEND_MSG
-NETLINK_ROUTE_SOCKET__NAME_BIND = _selinux.NETLINK_ROUTE_SOCKET__NAME_BIND
-NETLINK_ROUTE_SOCKET__NLMSG_READ = _selinux.NETLINK_ROUTE_SOCKET__NLMSG_READ
-NETLINK_ROUTE_SOCKET__NLMSG_WRITE = _selinux.NETLINK_ROUTE_SOCKET__NLMSG_WRITE
-NETLINK_FIREWALL_SOCKET__IOCTL = _selinux.NETLINK_FIREWALL_SOCKET__IOCTL
-NETLINK_FIREWALL_SOCKET__READ = _selinux.NETLINK_FIREWALL_SOCKET__READ
-NETLINK_FIREWALL_SOCKET__WRITE = _selinux.NETLINK_FIREWALL_SOCKET__WRITE
-NETLINK_FIREWALL_SOCKET__CREATE = _selinux.NETLINK_FIREWALL_SOCKET__CREATE
-NETLINK_FIREWALL_SOCKET__GETATTR = _selinux.NETLINK_FIREWALL_SOCKET__GETATTR
-NETLINK_FIREWALL_SOCKET__SETATTR = _selinux.NETLINK_FIREWALL_SOCKET__SETATTR
-NETLINK_FIREWALL_SOCKET__LOCK = _selinux.NETLINK_FIREWALL_SOCKET__LOCK
-NETLINK_FIREWALL_SOCKET__RELABELFROM = _selinux.NETLINK_FIREWALL_SOCKET__RELABELFROM
-NETLINK_FIREWALL_SOCKET__RELABELTO = _selinux.NETLINK_FIREWALL_SOCKET__RELABELTO
-NETLINK_FIREWALL_SOCKET__APPEND = _selinux.NETLINK_FIREWALL_SOCKET__APPEND
-NETLINK_FIREWALL_SOCKET__BIND = _selinux.NETLINK_FIREWALL_SOCKET__BIND
-NETLINK_FIREWALL_SOCKET__CONNECT = _selinux.NETLINK_FIREWALL_SOCKET__CONNECT
-NETLINK_FIREWALL_SOCKET__LISTEN = _selinux.NETLINK_FIREWALL_SOCKET__LISTEN
-NETLINK_FIREWALL_SOCKET__ACCEPT = _selinux.NETLINK_FIREWALL_SOCKET__ACCEPT
-NETLINK_FIREWALL_SOCKET__GETOPT = _selinux.NETLINK_FIREWALL_SOCKET__GETOPT
-NETLINK_FIREWALL_SOCKET__SETOPT = _selinux.NETLINK_FIREWALL_SOCKET__SETOPT
-NETLINK_FIREWALL_SOCKET__SHUTDOWN = _selinux.NETLINK_FIREWALL_SOCKET__SHUTDOWN
-NETLINK_FIREWALL_SOCKET__RECVFROM = _selinux.NETLINK_FIREWALL_SOCKET__RECVFROM
-NETLINK_FIREWALL_SOCKET__SENDTO = _selinux.NETLINK_FIREWALL_SOCKET__SENDTO
-NETLINK_FIREWALL_SOCKET__RECV_MSG = _selinux.NETLINK_FIREWALL_SOCKET__RECV_MSG
-NETLINK_FIREWALL_SOCKET__SEND_MSG = _selinux.NETLINK_FIREWALL_SOCKET__SEND_MSG
-NETLINK_FIREWALL_SOCKET__NAME_BIND = _selinux.NETLINK_FIREWALL_SOCKET__NAME_BIND
-NETLINK_FIREWALL_SOCKET__NLMSG_READ = _selinux.NETLINK_FIREWALL_SOCKET__NLMSG_READ
-NETLINK_FIREWALL_SOCKET__NLMSG_WRITE = _selinux.NETLINK_FIREWALL_SOCKET__NLMSG_WRITE
-NETLINK_TCPDIAG_SOCKET__IOCTL = _selinux.NETLINK_TCPDIAG_SOCKET__IOCTL
-NETLINK_TCPDIAG_SOCKET__READ = _selinux.NETLINK_TCPDIAG_SOCKET__READ
-NETLINK_TCPDIAG_SOCKET__WRITE = _selinux.NETLINK_TCPDIAG_SOCKET__WRITE
-NETLINK_TCPDIAG_SOCKET__CREATE = _selinux.NETLINK_TCPDIAG_SOCKET__CREATE
-NETLINK_TCPDIAG_SOCKET__GETATTR = _selinux.NETLINK_TCPDIAG_SOCKET__GETATTR
-NETLINK_TCPDIAG_SOCKET__SETATTR = _selinux.NETLINK_TCPDIAG_SOCKET__SETATTR
-NETLINK_TCPDIAG_SOCKET__LOCK = _selinux.NETLINK_TCPDIAG_SOCKET__LOCK
-NETLINK_TCPDIAG_SOCKET__RELABELFROM = _selinux.NETLINK_TCPDIAG_SOCKET__RELABELFROM
-NETLINK_TCPDIAG_SOCKET__RELABELTO = _selinux.NETLINK_TCPDIAG_SOCKET__RELABELTO
-NETLINK_TCPDIAG_SOCKET__APPEND = _selinux.NETLINK_TCPDIAG_SOCKET__APPEND
-NETLINK_TCPDIAG_SOCKET__BIND = _selinux.NETLINK_TCPDIAG_SOCKET__BIND
-NETLINK_TCPDIAG_SOCKET__CONNECT = _selinux.NETLINK_TCPDIAG_SOCKET__CONNECT
-NETLINK_TCPDIAG_SOCKET__LISTEN = _selinux.NETLINK_TCPDIAG_SOCKET__LISTEN
-NETLINK_TCPDIAG_SOCKET__ACCEPT = _selinux.NETLINK_TCPDIAG_SOCKET__ACCEPT
-NETLINK_TCPDIAG_SOCKET__GETOPT = _selinux.NETLINK_TCPDIAG_SOCKET__GETOPT
-NETLINK_TCPDIAG_SOCKET__SETOPT = _selinux.NETLINK_TCPDIAG_SOCKET__SETOPT
-NETLINK_TCPDIAG_SOCKET__SHUTDOWN = _selinux.NETLINK_TCPDIAG_SOCKET__SHUTDOWN
-NETLINK_TCPDIAG_SOCKET__RECVFROM = _selinux.NETLINK_TCPDIAG_SOCKET__RECVFROM
-NETLINK_TCPDIAG_SOCKET__SENDTO = _selinux.NETLINK_TCPDIAG_SOCKET__SENDTO
-NETLINK_TCPDIAG_SOCKET__RECV_MSG = _selinux.NETLINK_TCPDIAG_SOCKET__RECV_MSG
-NETLINK_TCPDIAG_SOCKET__SEND_MSG = _selinux.NETLINK_TCPDIAG_SOCKET__SEND_MSG
-NETLINK_TCPDIAG_SOCKET__NAME_BIND = _selinux.NETLINK_TCPDIAG_SOCKET__NAME_BIND
-NETLINK_TCPDIAG_SOCKET__NLMSG_READ = _selinux.NETLINK_TCPDIAG_SOCKET__NLMSG_READ
-NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE = _selinux.NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE
-NETLINK_NFLOG_SOCKET__IOCTL = _selinux.NETLINK_NFLOG_SOCKET__IOCTL
-NETLINK_NFLOG_SOCKET__READ = _selinux.NETLINK_NFLOG_SOCKET__READ
-NETLINK_NFLOG_SOCKET__WRITE = _selinux.NETLINK_NFLOG_SOCKET__WRITE
-NETLINK_NFLOG_SOCKET__CREATE = _selinux.NETLINK_NFLOG_SOCKET__CREATE
-NETLINK_NFLOG_SOCKET__GETATTR = _selinux.NETLINK_NFLOG_SOCKET__GETATTR
-NETLINK_NFLOG_SOCKET__SETATTR = _selinux.NETLINK_NFLOG_SOCKET__SETATTR
-NETLINK_NFLOG_SOCKET__LOCK = _selinux.NETLINK_NFLOG_SOCKET__LOCK
-NETLINK_NFLOG_SOCKET__RELABELFROM = _selinux.NETLINK_NFLOG_SOCKET__RELABELFROM
-NETLINK_NFLOG_SOCKET__RELABELTO = _selinux.NETLINK_NFLOG_SOCKET__RELABELTO
-NETLINK_NFLOG_SOCKET__APPEND = _selinux.NETLINK_NFLOG_SOCKET__APPEND
-NETLINK_NFLOG_SOCKET__BIND = _selinux.NETLINK_NFLOG_SOCKET__BIND
-NETLINK_NFLOG_SOCKET__CONNECT = _selinux.NETLINK_NFLOG_SOCKET__CONNECT
-NETLINK_NFLOG_SOCKET__LISTEN = _selinux.NETLINK_NFLOG_SOCKET__LISTEN
-NETLINK_NFLOG_SOCKET__ACCEPT = _selinux.NETLINK_NFLOG_SOCKET__ACCEPT
-NETLINK_NFLOG_SOCKET__GETOPT = _selinux.NETLINK_NFLOG_SOCKET__GETOPT
-NETLINK_NFLOG_SOCKET__SETOPT = _selinux.NETLINK_NFLOG_SOCKET__SETOPT
-NETLINK_NFLOG_SOCKET__SHUTDOWN = _selinux.NETLINK_NFLOG_SOCKET__SHUTDOWN
-NETLINK_NFLOG_SOCKET__RECVFROM = _selinux.NETLINK_NFLOG_SOCKET__RECVFROM
-NETLINK_NFLOG_SOCKET__SENDTO = _selinux.NETLINK_NFLOG_SOCKET__SENDTO
-NETLINK_NFLOG_SOCKET__RECV_MSG = _selinux.NETLINK_NFLOG_SOCKET__RECV_MSG
-NETLINK_NFLOG_SOCKET__SEND_MSG = _selinux.NETLINK_NFLOG_SOCKET__SEND_MSG
-NETLINK_NFLOG_SOCKET__NAME_BIND = _selinux.NETLINK_NFLOG_SOCKET__NAME_BIND
-NETLINK_XFRM_SOCKET__IOCTL = _selinux.NETLINK_XFRM_SOCKET__IOCTL
-NETLINK_XFRM_SOCKET__READ = _selinux.NETLINK_XFRM_SOCKET__READ
-NETLINK_XFRM_SOCKET__WRITE = _selinux.NETLINK_XFRM_SOCKET__WRITE
-NETLINK_XFRM_SOCKET__CREATE = _selinux.NETLINK_XFRM_SOCKET__CREATE
-NETLINK_XFRM_SOCKET__GETATTR = _selinux.NETLINK_XFRM_SOCKET__GETATTR
-NETLINK_XFRM_SOCKET__SETATTR = _selinux.NETLINK_XFRM_SOCKET__SETATTR
-NETLINK_XFRM_SOCKET__LOCK = _selinux.NETLINK_XFRM_SOCKET__LOCK
-NETLINK_XFRM_SOCKET__RELABELFROM = _selinux.NETLINK_XFRM_SOCKET__RELABELFROM
-NETLINK_XFRM_SOCKET__RELABELTO = _selinux.NETLINK_XFRM_SOCKET__RELABELTO
-NETLINK_XFRM_SOCKET__APPEND = _selinux.NETLINK_XFRM_SOCKET__APPEND
-NETLINK_XFRM_SOCKET__BIND = _selinux.NETLINK_XFRM_SOCKET__BIND
-NETLINK_XFRM_SOCKET__CONNECT = _selinux.NETLINK_XFRM_SOCKET__CONNECT
-NETLINK_XFRM_SOCKET__LISTEN = _selinux.NETLINK_XFRM_SOCKET__LISTEN
-NETLINK_XFRM_SOCKET__ACCEPT = _selinux.NETLINK_XFRM_SOCKET__ACCEPT
-NETLINK_XFRM_SOCKET__GETOPT = _selinux.NETLINK_XFRM_SOCKET__GETOPT
-NETLINK_XFRM_SOCKET__SETOPT = _selinux.NETLINK_XFRM_SOCKET__SETOPT
-NETLINK_XFRM_SOCKET__SHUTDOWN = _selinux.NETLINK_XFRM_SOCKET__SHUTDOWN
-NETLINK_XFRM_SOCKET__RECVFROM = _selinux.NETLINK_XFRM_SOCKET__RECVFROM
-NETLINK_XFRM_SOCKET__SENDTO = _selinux.NETLINK_XFRM_SOCKET__SENDTO
-NETLINK_XFRM_SOCKET__RECV_MSG = _selinux.NETLINK_XFRM_SOCKET__RECV_MSG
-NETLINK_XFRM_SOCKET__SEND_MSG = _selinux.NETLINK_XFRM_SOCKET__SEND_MSG
-NETLINK_XFRM_SOCKET__NAME_BIND = _selinux.NETLINK_XFRM_SOCKET__NAME_BIND
-NETLINK_XFRM_SOCKET__NLMSG_READ = _selinux.NETLINK_XFRM_SOCKET__NLMSG_READ
-NETLINK_XFRM_SOCKET__NLMSG_WRITE = _selinux.NETLINK_XFRM_SOCKET__NLMSG_WRITE
-NETLINK_SELINUX_SOCKET__IOCTL = _selinux.NETLINK_SELINUX_SOCKET__IOCTL
-NETLINK_SELINUX_SOCKET__READ = _selinux.NETLINK_SELINUX_SOCKET__READ
-NETLINK_SELINUX_SOCKET__WRITE = _selinux.NETLINK_SELINUX_SOCKET__WRITE
-NETLINK_SELINUX_SOCKET__CREATE = _selinux.NETLINK_SELINUX_SOCKET__CREATE
-NETLINK_SELINUX_SOCKET__GETATTR = _selinux.NETLINK_SELINUX_SOCKET__GETATTR
-NETLINK_SELINUX_SOCKET__SETATTR = _selinux.NETLINK_SELINUX_SOCKET__SETATTR
-NETLINK_SELINUX_SOCKET__LOCK = _selinux.NETLINK_SELINUX_SOCKET__LOCK
-NETLINK_SELINUX_SOCKET__RELABELFROM = _selinux.NETLINK_SELINUX_SOCKET__RELABELFROM
-NETLINK_SELINUX_SOCKET__RELABELTO = _selinux.NETLINK_SELINUX_SOCKET__RELABELTO
-NETLINK_SELINUX_SOCKET__APPEND = _selinux.NETLINK_SELINUX_SOCKET__APPEND
-NETLINK_SELINUX_SOCKET__BIND = _selinux.NETLINK_SELINUX_SOCKET__BIND
-NETLINK_SELINUX_SOCKET__CONNECT = _selinux.NETLINK_SELINUX_SOCKET__CONNECT
-NETLINK_SELINUX_SOCKET__LISTEN = _selinux.NETLINK_SELINUX_SOCKET__LISTEN
-NETLINK_SELINUX_SOCKET__ACCEPT = _selinux.NETLINK_SELINUX_SOCKET__ACCEPT
-NETLINK_SELINUX_SOCKET__GETOPT = _selinux.NETLINK_SELINUX_SOCKET__GETOPT
-NETLINK_SELINUX_SOCKET__SETOPT = _selinux.NETLINK_SELINUX_SOCKET__SETOPT
-NETLINK_SELINUX_SOCKET__SHUTDOWN = _selinux.NETLINK_SELINUX_SOCKET__SHUTDOWN
-NETLINK_SELINUX_SOCKET__RECVFROM = _selinux.NETLINK_SELINUX_SOCKET__RECVFROM
-NETLINK_SELINUX_SOCKET__SENDTO = _selinux.NETLINK_SELINUX_SOCKET__SENDTO
-NETLINK_SELINUX_SOCKET__RECV_MSG = _selinux.NETLINK_SELINUX_SOCKET__RECV_MSG
-NETLINK_SELINUX_SOCKET__SEND_MSG = _selinux.NETLINK_SELINUX_SOCKET__SEND_MSG
-NETLINK_SELINUX_SOCKET__NAME_BIND = _selinux.NETLINK_SELINUX_SOCKET__NAME_BIND
-NETLINK_AUDIT_SOCKET__IOCTL = _selinux.NETLINK_AUDIT_SOCKET__IOCTL
-NETLINK_AUDIT_SOCKET__READ = _selinux.NETLINK_AUDIT_SOCKET__READ
-NETLINK_AUDIT_SOCKET__WRITE = _selinux.NETLINK_AUDIT_SOCKET__WRITE
-NETLINK_AUDIT_SOCKET__CREATE = _selinux.NETLINK_AUDIT_SOCKET__CREATE
-NETLINK_AUDIT_SOCKET__GETATTR = _selinux.NETLINK_AUDIT_SOCKET__GETATTR
-NETLINK_AUDIT_SOCKET__SETATTR = _selinux.NETLINK_AUDIT_SOCKET__SETATTR
-NETLINK_AUDIT_SOCKET__LOCK = _selinux.NETLINK_AUDIT_SOCKET__LOCK
-NETLINK_AUDIT_SOCKET__RELABELFROM = _selinux.NETLINK_AUDIT_SOCKET__RELABELFROM
-NETLINK_AUDIT_SOCKET__RELABELTO = _selinux.NETLINK_AUDIT_SOCKET__RELABELTO
-NETLINK_AUDIT_SOCKET__APPEND = _selinux.NETLINK_AUDIT_SOCKET__APPEND
-NETLINK_AUDIT_SOCKET__BIND = _selinux.NETLINK_AUDIT_SOCKET__BIND
-NETLINK_AUDIT_SOCKET__CONNECT = _selinux.NETLINK_AUDIT_SOCKET__CONNECT
-NETLINK_AUDIT_SOCKET__LISTEN = _selinux.NETLINK_AUDIT_SOCKET__LISTEN
-NETLINK_AUDIT_SOCKET__ACCEPT = _selinux.NETLINK_AUDIT_SOCKET__ACCEPT
-NETLINK_AUDIT_SOCKET__GETOPT = _selinux.NETLINK_AUDIT_SOCKET__GETOPT
-NETLINK_AUDIT_SOCKET__SETOPT = _selinux.NETLINK_AUDIT_SOCKET__SETOPT
-NETLINK_AUDIT_SOCKET__SHUTDOWN = _selinux.NETLINK_AUDIT_SOCKET__SHUTDOWN
-NETLINK_AUDIT_SOCKET__RECVFROM = _selinux.NETLINK_AUDIT_SOCKET__RECVFROM
-NETLINK_AUDIT_SOCKET__SENDTO = _selinux.NETLINK_AUDIT_SOCKET__SENDTO
-NETLINK_AUDIT_SOCKET__RECV_MSG = _selinux.NETLINK_AUDIT_SOCKET__RECV_MSG
-NETLINK_AUDIT_SOCKET__SEND_MSG = _selinux.NETLINK_AUDIT_SOCKET__SEND_MSG
-NETLINK_AUDIT_SOCKET__NAME_BIND = _selinux.NETLINK_AUDIT_SOCKET__NAME_BIND
-NETLINK_AUDIT_SOCKET__NLMSG_READ = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_READ
-NETLINK_AUDIT_SOCKET__NLMSG_WRITE = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_WRITE
-NETLINK_AUDIT_SOCKET__NLMSG_RELAY = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_RELAY
-NETLINK_AUDIT_SOCKET__NLMSG_READPRIV = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_READPRIV
-NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT = _selinux.NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT
-NETLINK_IP6FW_SOCKET__IOCTL = _selinux.NETLINK_IP6FW_SOCKET__IOCTL
-NETLINK_IP6FW_SOCKET__READ = _selinux.NETLINK_IP6FW_SOCKET__READ
-NETLINK_IP6FW_SOCKET__WRITE = _selinux.NETLINK_IP6FW_SOCKET__WRITE
-NETLINK_IP6FW_SOCKET__CREATE = _selinux.NETLINK_IP6FW_SOCKET__CREATE
-NETLINK_IP6FW_SOCKET__GETATTR = _selinux.NETLINK_IP6FW_SOCKET__GETATTR
-NETLINK_IP6FW_SOCKET__SETATTR = _selinux.NETLINK_IP6FW_SOCKET__SETATTR
-NETLINK_IP6FW_SOCKET__LOCK = _selinux.NETLINK_IP6FW_SOCKET__LOCK
-NETLINK_IP6FW_SOCKET__RELABELFROM = _selinux.NETLINK_IP6FW_SOCKET__RELABELFROM
-NETLINK_IP6FW_SOCKET__RELABELTO = _selinux.NETLINK_IP6FW_SOCKET__RELABELTO
-NETLINK_IP6FW_SOCKET__APPEND = _selinux.NETLINK_IP6FW_SOCKET__APPEND
-NETLINK_IP6FW_SOCKET__BIND = _selinux.NETLINK_IP6FW_SOCKET__BIND
-NETLINK_IP6FW_SOCKET__CONNECT = _selinux.NETLINK_IP6FW_SOCKET__CONNECT
-NETLINK_IP6FW_SOCKET__LISTEN = _selinux.NETLINK_IP6FW_SOCKET__LISTEN
-NETLINK_IP6FW_SOCKET__ACCEPT = _selinux.NETLINK_IP6FW_SOCKET__ACCEPT
-NETLINK_IP6FW_SOCKET__GETOPT = _selinux.NETLINK_IP6FW_SOCKET__GETOPT
-NETLINK_IP6FW_SOCKET__SETOPT = _selinux.NETLINK_IP6FW_SOCKET__SETOPT
-NETLINK_IP6FW_SOCKET__SHUTDOWN = _selinux.NETLINK_IP6FW_SOCKET__SHUTDOWN
-NETLINK_IP6FW_SOCKET__RECVFROM = _selinux.NETLINK_IP6FW_SOCKET__RECVFROM
-NETLINK_IP6FW_SOCKET__SENDTO = _selinux.NETLINK_IP6FW_SOCKET__SENDTO
-NETLINK_IP6FW_SOCKET__RECV_MSG = _selinux.NETLINK_IP6FW_SOCKET__RECV_MSG
-NETLINK_IP6FW_SOCKET__SEND_MSG = _selinux.NETLINK_IP6FW_SOCKET__SEND_MSG
-NETLINK_IP6FW_SOCKET__NAME_BIND = _selinux.NETLINK_IP6FW_SOCKET__NAME_BIND
-NETLINK_IP6FW_SOCKET__NLMSG_READ = _selinux.NETLINK_IP6FW_SOCKET__NLMSG_READ
-NETLINK_IP6FW_SOCKET__NLMSG_WRITE = _selinux.NETLINK_IP6FW_SOCKET__NLMSG_WRITE
-NETLINK_DNRT_SOCKET__IOCTL = _selinux.NETLINK_DNRT_SOCKET__IOCTL
-NETLINK_DNRT_SOCKET__READ = _selinux.NETLINK_DNRT_SOCKET__READ
-NETLINK_DNRT_SOCKET__WRITE = _selinux.NETLINK_DNRT_SOCKET__WRITE
-NETLINK_DNRT_SOCKET__CREATE = _selinux.NETLINK_DNRT_SOCKET__CREATE
-NETLINK_DNRT_SOCKET__GETATTR = _selinux.NETLINK_DNRT_SOCKET__GETATTR
-NETLINK_DNRT_SOCKET__SETATTR = _selinux.NETLINK_DNRT_SOCKET__SETATTR
-NETLINK_DNRT_SOCKET__LOCK = _selinux.NETLINK_DNRT_SOCKET__LOCK
-NETLINK_DNRT_SOCKET__RELABELFROM = _selinux.NETLINK_DNRT_SOCKET__RELABELFROM
-NETLINK_DNRT_SOCKET__RELABELTO = _selinux.NETLINK_DNRT_SOCKET__RELABELTO
-NETLINK_DNRT_SOCKET__APPEND = _selinux.NETLINK_DNRT_SOCKET__APPEND
-NETLINK_DNRT_SOCKET__BIND = _selinux.NETLINK_DNRT_SOCKET__BIND
-NETLINK_DNRT_SOCKET__CONNECT = _selinux.NETLINK_DNRT_SOCKET__CONNECT
-NETLINK_DNRT_SOCKET__LISTEN = _selinux.NETLINK_DNRT_SOCKET__LISTEN
-NETLINK_DNRT_SOCKET__ACCEPT = _selinux.NETLINK_DNRT_SOCKET__ACCEPT
-NETLINK_DNRT_SOCKET__GETOPT = _selinux.NETLINK_DNRT_SOCKET__GETOPT
-NETLINK_DNRT_SOCKET__SETOPT = _selinux.NETLINK_DNRT_SOCKET__SETOPT
-NETLINK_DNRT_SOCKET__SHUTDOWN = _selinux.NETLINK_DNRT_SOCKET__SHUTDOWN
-NETLINK_DNRT_SOCKET__RECVFROM = _selinux.NETLINK_DNRT_SOCKET__RECVFROM
-NETLINK_DNRT_SOCKET__SENDTO = _selinux.NETLINK_DNRT_SOCKET__SENDTO
-NETLINK_DNRT_SOCKET__RECV_MSG = _selinux.NETLINK_DNRT_SOCKET__RECV_MSG
-NETLINK_DNRT_SOCKET__SEND_MSG = _selinux.NETLINK_DNRT_SOCKET__SEND_MSG
-NETLINK_DNRT_SOCKET__NAME_BIND = _selinux.NETLINK_DNRT_SOCKET__NAME_BIND
-DBUS__ACQUIRE_SVC = _selinux.DBUS__ACQUIRE_SVC
-DBUS__SEND_MSG = _selinux.DBUS__SEND_MSG
-NSCD__GETPWD = _selinux.NSCD__GETPWD
-NSCD__GETGRP = _selinux.NSCD__GETGRP
-NSCD__GETHOST = _selinux.NSCD__GETHOST
-NSCD__GETSTAT = _selinux.NSCD__GETSTAT
-NSCD__ADMIN = _selinux.NSCD__ADMIN
-NSCD__SHMEMPWD = _selinux.NSCD__SHMEMPWD
-NSCD__SHMEMGRP = _selinux.NSCD__SHMEMGRP
-NSCD__SHMEMHOST = _selinux.NSCD__SHMEMHOST
-NSCD__GETSERV = _selinux.NSCD__GETSERV
-NSCD__SHMEMSERV = _selinux.NSCD__SHMEMSERV
-ASSOCIATION__SENDTO = _selinux.ASSOCIATION__SENDTO
-ASSOCIATION__RECVFROM = _selinux.ASSOCIATION__RECVFROM
-ASSOCIATION__SETCONTEXT = _selinux.ASSOCIATION__SETCONTEXT
-ASSOCIATION__POLMATCH = _selinux.ASSOCIATION__POLMATCH
-NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL
-NETLINK_KOBJECT_UEVENT_SOCKET__READ = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__READ
-NETLINK_KOBJECT_UEVENT_SOCKET__WRITE = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__WRITE
-NETLINK_KOBJECT_UEVENT_SOCKET__CREATE = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__CREATE
-NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR
-NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR
-NETLINK_KOBJECT_UEVENT_SOCKET__LOCK = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__LOCK
-NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM
-NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO
-NETLINK_KOBJECT_UEVENT_SOCKET__APPEND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__APPEND
-NETLINK_KOBJECT_UEVENT_SOCKET__BIND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__BIND
-NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT
-NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN
-NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT
-NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT
-NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT
-NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN
-NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM
-NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO
-NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG
-NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG
-NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND = _selinux.NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND
-APPLETALK_SOCKET__IOCTL = _selinux.APPLETALK_SOCKET__IOCTL
-APPLETALK_SOCKET__READ = _selinux.APPLETALK_SOCKET__READ
-APPLETALK_SOCKET__WRITE = _selinux.APPLETALK_SOCKET__WRITE
-APPLETALK_SOCKET__CREATE = _selinux.APPLETALK_SOCKET__CREATE
-APPLETALK_SOCKET__GETATTR = _selinux.APPLETALK_SOCKET__GETATTR
-APPLETALK_SOCKET__SETATTR = _selinux.APPLETALK_SOCKET__SETATTR
-APPLETALK_SOCKET__LOCK = _selinux.APPLETALK_SOCKET__LOCK
-APPLETALK_SOCKET__RELABELFROM = _selinux.APPLETALK_SOCKET__RELABELFROM
-APPLETALK_SOCKET__RELABELTO = _selinux.APPLETALK_SOCKET__RELABELTO
-APPLETALK_SOCKET__APPEND = _selinux.APPLETALK_SOCKET__APPEND
-APPLETALK_SOCKET__BIND = _selinux.APPLETALK_SOCKET__BIND
-APPLETALK_SOCKET__CONNECT = _selinux.APPLETALK_SOCKET__CONNECT
-APPLETALK_SOCKET__LISTEN = _selinux.APPLETALK_SOCKET__LISTEN
-APPLETALK_SOCKET__ACCEPT = _selinux.APPLETALK_SOCKET__ACCEPT
-APPLETALK_SOCKET__GETOPT = _selinux.APPLETALK_SOCKET__GETOPT
-APPLETALK_SOCKET__SETOPT = _selinux.APPLETALK_SOCKET__SETOPT
-APPLETALK_SOCKET__SHUTDOWN = _selinux.APPLETALK_SOCKET__SHUTDOWN
-APPLETALK_SOCKET__RECVFROM = _selinux.APPLETALK_SOCKET__RECVFROM
-APPLETALK_SOCKET__SENDTO = _selinux.APPLETALK_SOCKET__SENDTO
-APPLETALK_SOCKET__RECV_MSG = _selinux.APPLETALK_SOCKET__RECV_MSG
-APPLETALK_SOCKET__SEND_MSG = _selinux.APPLETALK_SOCKET__SEND_MSG
-APPLETALK_SOCKET__NAME_BIND = _selinux.APPLETALK_SOCKET__NAME_BIND
-PACKET__SEND = _selinux.PACKET__SEND
-PACKET__RECV = _selinux.PACKET__RECV
-PACKET__RELABELTO = _selinux.PACKET__RELABELTO
-PACKET__FLOW_IN = _selinux.PACKET__FLOW_IN
-PACKET__FLOW_OUT = _selinux.PACKET__FLOW_OUT
-PACKET__FORWARD_IN = _selinux.PACKET__FORWARD_IN
-PACKET__FORWARD_OUT = _selinux.PACKET__FORWARD_OUT
-KEY__VIEW = _selinux.KEY__VIEW
-KEY__READ = _selinux.KEY__READ
-KEY__WRITE = _selinux.KEY__WRITE
-KEY__SEARCH = _selinux.KEY__SEARCH
-KEY__LINK = _selinux.KEY__LINK
-KEY__SETATTR = _selinux.KEY__SETATTR
-KEY__CREATE = _selinux.KEY__CREATE
-CONTEXT__TRANSLATE = _selinux.CONTEXT__TRANSLATE
-CONTEXT__CONTAINS = _selinux.CONTEXT__CONTAINS
-DCCP_SOCKET__IOCTL = _selinux.DCCP_SOCKET__IOCTL
-DCCP_SOCKET__READ = _selinux.DCCP_SOCKET__READ
-DCCP_SOCKET__WRITE = _selinux.DCCP_SOCKET__WRITE
-DCCP_SOCKET__CREATE = _selinux.DCCP_SOCKET__CREATE
-DCCP_SOCKET__GETATTR = _selinux.DCCP_SOCKET__GETATTR
-DCCP_SOCKET__SETATTR = _selinux.DCCP_SOCKET__SETATTR
-DCCP_SOCKET__LOCK = _selinux.DCCP_SOCKET__LOCK
-DCCP_SOCKET__RELABELFROM = _selinux.DCCP_SOCKET__RELABELFROM
-DCCP_SOCKET__RELABELTO = _selinux.DCCP_SOCKET__RELABELTO
-DCCP_SOCKET__APPEND = _selinux.DCCP_SOCKET__APPEND
-DCCP_SOCKET__BIND = _selinux.DCCP_SOCKET__BIND
-DCCP_SOCKET__CONNECT = _selinux.DCCP_SOCKET__CONNECT
-DCCP_SOCKET__LISTEN = _selinux.DCCP_SOCKET__LISTEN
-DCCP_SOCKET__ACCEPT = _selinux.DCCP_SOCKET__ACCEPT
-DCCP_SOCKET__GETOPT = _selinux.DCCP_SOCKET__GETOPT
-DCCP_SOCKET__SETOPT = _selinux.DCCP_SOCKET__SETOPT
-DCCP_SOCKET__SHUTDOWN = _selinux.DCCP_SOCKET__SHUTDOWN
-DCCP_SOCKET__RECVFROM = _selinux.DCCP_SOCKET__RECVFROM
-DCCP_SOCKET__SENDTO = _selinux.DCCP_SOCKET__SENDTO
-DCCP_SOCKET__RECV_MSG = _selinux.DCCP_SOCKET__RECV_MSG
-DCCP_SOCKET__SEND_MSG = _selinux.DCCP_SOCKET__SEND_MSG
-DCCP_SOCKET__NAME_BIND = _selinux.DCCP_SOCKET__NAME_BIND
-DCCP_SOCKET__NODE_BIND = _selinux.DCCP_SOCKET__NODE_BIND
-DCCP_SOCKET__NAME_CONNECT = _selinux.DCCP_SOCKET__NAME_CONNECT
-MEMPROTECT__MMAP_ZERO = _selinux.MEMPROTECT__MMAP_ZERO
-DB_DATABASE__CREATE = _selinux.DB_DATABASE__CREATE
-DB_DATABASE__DROP = _selinux.DB_DATABASE__DROP
-DB_DATABASE__GETATTR = _selinux.DB_DATABASE__GETATTR
-DB_DATABASE__SETATTR = _selinux.DB_DATABASE__SETATTR
-DB_DATABASE__RELABELFROM = _selinux.DB_DATABASE__RELABELFROM
-DB_DATABASE__RELABELTO = _selinux.DB_DATABASE__RELABELTO
-DB_DATABASE__ACCESS = _selinux.DB_DATABASE__ACCESS
-DB_DATABASE__INSTALL_MODULE = _selinux.DB_DATABASE__INSTALL_MODULE
-DB_DATABASE__LOAD_MODULE = _selinux.DB_DATABASE__LOAD_MODULE
-DB_DATABASE__GET_PARAM = _selinux.DB_DATABASE__GET_PARAM
-DB_DATABASE__SET_PARAM = _selinux.DB_DATABASE__SET_PARAM
-DB_TABLE__CREATE = _selinux.DB_TABLE__CREATE
-DB_TABLE__DROP = _selinux.DB_TABLE__DROP
-DB_TABLE__GETATTR = _selinux.DB_TABLE__GETATTR
-DB_TABLE__SETATTR = _selinux.DB_TABLE__SETATTR
-DB_TABLE__RELABELFROM = _selinux.DB_TABLE__RELABELFROM
-DB_TABLE__RELABELTO = _selinux.DB_TABLE__RELABELTO
-DB_TABLE__USE = _selinux.DB_TABLE__USE
-DB_TABLE__SELECT = _selinux.DB_TABLE__SELECT
-DB_TABLE__UPDATE = _selinux.DB_TABLE__UPDATE
-DB_TABLE__INSERT = _selinux.DB_TABLE__INSERT
-DB_TABLE__DELETE = _selinux.DB_TABLE__DELETE
-DB_TABLE__LOCK = _selinux.DB_TABLE__LOCK
-DB_PROCEDURE__CREATE = _selinux.DB_PROCEDURE__CREATE
-DB_PROCEDURE__DROP = _selinux.DB_PROCEDURE__DROP
-DB_PROCEDURE__GETATTR = _selinux.DB_PROCEDURE__GETATTR
-DB_PROCEDURE__SETATTR = _selinux.DB_PROCEDURE__SETATTR
-DB_PROCEDURE__RELABELFROM = _selinux.DB_PROCEDURE__RELABELFROM
-DB_PROCEDURE__RELABELTO = _selinux.DB_PROCEDURE__RELABELTO
-DB_PROCEDURE__EXECUTE = _selinux.DB_PROCEDURE__EXECUTE
-DB_PROCEDURE__ENTRYPOINT = _selinux.DB_PROCEDURE__ENTRYPOINT
-DB_COLUMN__CREATE = _selinux.DB_COLUMN__CREATE
-DB_COLUMN__DROP = _selinux.DB_COLUMN__DROP
-DB_COLUMN__GETATTR = _selinux.DB_COLUMN__GETATTR
-DB_COLUMN__SETATTR = _selinux.DB_COLUMN__SETATTR
-DB_COLUMN__RELABELFROM = _selinux.DB_COLUMN__RELABELFROM
-DB_COLUMN__RELABELTO = _selinux.DB_COLUMN__RELABELTO
-DB_COLUMN__USE = _selinux.DB_COLUMN__USE
-DB_COLUMN__SELECT = _selinux.DB_COLUMN__SELECT
-DB_COLUMN__UPDATE = _selinux.DB_COLUMN__UPDATE
-DB_COLUMN__INSERT = _selinux.DB_COLUMN__INSERT
-DB_TUPLE__RELABELFROM = _selinux.DB_TUPLE__RELABELFROM
-DB_TUPLE__RELABELTO = _selinux.DB_TUPLE__RELABELTO
-DB_TUPLE__USE = _selinux.DB_TUPLE__USE
-DB_TUPLE__SELECT = _selinux.DB_TUPLE__SELECT
-DB_TUPLE__UPDATE = _selinux.DB_TUPLE__UPDATE
-DB_TUPLE__INSERT = _selinux.DB_TUPLE__INSERT
-DB_TUPLE__DELETE = _selinux.DB_TUPLE__DELETE
-DB_BLOB__CREATE = _selinux.DB_BLOB__CREATE
-DB_BLOB__DROP = _selinux.DB_BLOB__DROP
-DB_BLOB__GETATTR = _selinux.DB_BLOB__GETATTR
-DB_BLOB__SETATTR = _selinux.DB_BLOB__SETATTR
-DB_BLOB__RELABELFROM = _selinux.DB_BLOB__RELABELFROM
-DB_BLOB__RELABELTO = _selinux.DB_BLOB__RELABELTO
-DB_BLOB__READ = _selinux.DB_BLOB__READ
-DB_BLOB__WRITE = _selinux.DB_BLOB__WRITE
-DB_BLOB__IMPORT = _selinux.DB_BLOB__IMPORT
-DB_BLOB__EXPORT = _selinux.DB_BLOB__EXPORT
-PEER__RECV = _selinux.PEER__RECV
-X_APPLICATION_DATA__PASTE = _selinux.X_APPLICATION_DATA__PASTE
-X_APPLICATION_DATA__PASTE_AFTER_CONFIRM = _selinux.X_APPLICATION_DATA__PASTE_AFTER_CONFIRM
-X_APPLICATION_DATA__COPY = _selinux.X_APPLICATION_DATA__COPY
-class context_s_t(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, context_s_t, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, context_s_t, name)
- __repr__ = _swig_repr
- __swig_setmethods__["ptr"] = _selinux.context_s_t_ptr_set
- __swig_getmethods__["ptr"] = _selinux.context_s_t_ptr_get
- if _newclass:ptr = _swig_property(_selinux.context_s_t_ptr_get, _selinux.context_s_t_ptr_set)
- def __init__(self):
- this = _selinux.new_context_s_t()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_context_s_t
- __del__ = lambda self : None;
-context_s_t_swigregister = _selinux.context_s_t_swigregister
-context_s_t_swigregister(context_s_t)
-
-
-def context_new(*args):
- return _selinux.context_new(*args)
-context_new = _selinux.context_new
-
-def context_str(*args):
- return _selinux.context_str(*args)
-context_str = _selinux.context_str
-
-def context_free(*args):
- return _selinux.context_free(*args)
-context_free = _selinux.context_free
-
-def context_type_get(*args):
- return _selinux.context_type_get(*args)
-context_type_get = _selinux.context_type_get
-
-def context_range_get(*args):
- return _selinux.context_range_get(*args)
-context_range_get = _selinux.context_range_get
-
-def context_role_get(*args):
- return _selinux.context_role_get(*args)
-context_role_get = _selinux.context_role_get
-
-def context_user_get(*args):
- return _selinux.context_user_get(*args)
-context_user_get = _selinux.context_user_get
-
-def context_type_set(*args):
- return _selinux.context_type_set(*args)
-context_type_set = _selinux.context_type_set
-
-def context_range_set(*args):
- return _selinux.context_range_set(*args)
-context_range_set = _selinux.context_range_set
-
-def context_role_set(*args):
- return _selinux.context_role_set(*args)
-context_role_set = _selinux.context_role_set
-
-def context_user_set(*args):
- return _selinux.context_user_set(*args)
-context_user_set = _selinux.context_user_set
-SECCLASS_SECURITY = _selinux.SECCLASS_SECURITY
-SECCLASS_PROCESS = _selinux.SECCLASS_PROCESS
-SECCLASS_SYSTEM = _selinux.SECCLASS_SYSTEM
-SECCLASS_CAPABILITY = _selinux.SECCLASS_CAPABILITY
-SECCLASS_FILESYSTEM = _selinux.SECCLASS_FILESYSTEM
-SECCLASS_FILE = _selinux.SECCLASS_FILE
-SECCLASS_DIR = _selinux.SECCLASS_DIR
-SECCLASS_FD = _selinux.SECCLASS_FD
-SECCLASS_LNK_FILE = _selinux.SECCLASS_LNK_FILE
-SECCLASS_CHR_FILE = _selinux.SECCLASS_CHR_FILE
-SECCLASS_BLK_FILE = _selinux.SECCLASS_BLK_FILE
-SECCLASS_SOCK_FILE = _selinux.SECCLASS_SOCK_FILE
-SECCLASS_FIFO_FILE = _selinux.SECCLASS_FIFO_FILE
-SECCLASS_SOCKET = _selinux.SECCLASS_SOCKET
-SECCLASS_TCP_SOCKET = _selinux.SECCLASS_TCP_SOCKET
-SECCLASS_UDP_SOCKET = _selinux.SECCLASS_UDP_SOCKET
-SECCLASS_RAWIP_SOCKET = _selinux.SECCLASS_RAWIP_SOCKET
-SECCLASS_NODE = _selinux.SECCLASS_NODE
-SECCLASS_NETIF = _selinux.SECCLASS_NETIF
-SECCLASS_NETLINK_SOCKET = _selinux.SECCLASS_NETLINK_SOCKET
-SECCLASS_PACKET_SOCKET = _selinux.SECCLASS_PACKET_SOCKET
-SECCLASS_KEY_SOCKET = _selinux.SECCLASS_KEY_SOCKET
-SECCLASS_UNIX_STREAM_SOCKET = _selinux.SECCLASS_UNIX_STREAM_SOCKET
-SECCLASS_UNIX_DGRAM_SOCKET = _selinux.SECCLASS_UNIX_DGRAM_SOCKET
-SECCLASS_SEM = _selinux.SECCLASS_SEM
-SECCLASS_MSG = _selinux.SECCLASS_MSG
-SECCLASS_MSGQ = _selinux.SECCLASS_MSGQ
-SECCLASS_SHM = _selinux.SECCLASS_SHM
-SECCLASS_IPC = _selinux.SECCLASS_IPC
-SECCLASS_PASSWD = _selinux.SECCLASS_PASSWD
-SECCLASS_X_DRAWABLE = _selinux.SECCLASS_X_DRAWABLE
-SECCLASS_X_SCREEN = _selinux.SECCLASS_X_SCREEN
-SECCLASS_X_GC = _selinux.SECCLASS_X_GC
-SECCLASS_X_FONT = _selinux.SECCLASS_X_FONT
-SECCLASS_X_COLORMAP = _selinux.SECCLASS_X_COLORMAP
-SECCLASS_X_PROPERTY = _selinux.SECCLASS_X_PROPERTY
-SECCLASS_X_SELECTION = _selinux.SECCLASS_X_SELECTION
-SECCLASS_X_CURSOR = _selinux.SECCLASS_X_CURSOR
-SECCLASS_X_CLIENT = _selinux.SECCLASS_X_CLIENT
-SECCLASS_X_DEVICE = _selinux.SECCLASS_X_DEVICE
-SECCLASS_X_SERVER = _selinux.SECCLASS_X_SERVER
-SECCLASS_X_EXTENSION = _selinux.SECCLASS_X_EXTENSION
-SECCLASS_NETLINK_ROUTE_SOCKET = _selinux.SECCLASS_NETLINK_ROUTE_SOCKET
-SECCLASS_NETLINK_FIREWALL_SOCKET = _selinux.SECCLASS_NETLINK_FIREWALL_SOCKET
-SECCLASS_NETLINK_TCPDIAG_SOCKET = _selinux.SECCLASS_NETLINK_TCPDIAG_SOCKET
-SECCLASS_NETLINK_NFLOG_SOCKET = _selinux.SECCLASS_NETLINK_NFLOG_SOCKET
-SECCLASS_NETLINK_XFRM_SOCKET = _selinux.SECCLASS_NETLINK_XFRM_SOCKET
-SECCLASS_NETLINK_SELINUX_SOCKET = _selinux.SECCLASS_NETLINK_SELINUX_SOCKET
-SECCLASS_NETLINK_AUDIT_SOCKET = _selinux.SECCLASS_NETLINK_AUDIT_SOCKET
-SECCLASS_NETLINK_IP6FW_SOCKET = _selinux.SECCLASS_NETLINK_IP6FW_SOCKET
-SECCLASS_NETLINK_DNRT_SOCKET = _selinux.SECCLASS_NETLINK_DNRT_SOCKET
-SECCLASS_DBUS = _selinux.SECCLASS_DBUS
-SECCLASS_NSCD = _selinux.SECCLASS_NSCD
-SECCLASS_ASSOCIATION = _selinux.SECCLASS_ASSOCIATION
-SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET = _selinux.SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET
-SECCLASS_APPLETALK_SOCKET = _selinux.SECCLASS_APPLETALK_SOCKET
-SECCLASS_PACKET = _selinux.SECCLASS_PACKET
-SECCLASS_KEY = _selinux.SECCLASS_KEY
-SECCLASS_CONTEXT = _selinux.SECCLASS_CONTEXT
-SECCLASS_DCCP_SOCKET = _selinux.SECCLASS_DCCP_SOCKET
-SECCLASS_MEMPROTECT = _selinux.SECCLASS_MEMPROTECT
-SECCLASS_DB_DATABASE = _selinux.SECCLASS_DB_DATABASE
-SECCLASS_DB_TABLE = _selinux.SECCLASS_DB_TABLE
-SECCLASS_DB_PROCEDURE = _selinux.SECCLASS_DB_PROCEDURE
-SECCLASS_DB_COLUMN = _selinux.SECCLASS_DB_COLUMN
-SECCLASS_DB_TUPLE = _selinux.SECCLASS_DB_TUPLE
-SECCLASS_DB_BLOB = _selinux.SECCLASS_DB_BLOB
-SECCLASS_PEER = _selinux.SECCLASS_PEER
-SECCLASS_CAPABILITY2 = _selinux.SECCLASS_CAPABILITY2
-SECCLASS_X_RESOURCE = _selinux.SECCLASS_X_RESOURCE
-SECCLASS_X_EVENT = _selinux.SECCLASS_X_EVENT
-SECCLASS_X_SYNTHETIC_EVENT = _selinux.SECCLASS_X_SYNTHETIC_EVENT
-SECCLASS_X_APPLICATION_DATA = _selinux.SECCLASS_X_APPLICATION_DATA
-SECINITSID_KERNEL = _selinux.SECINITSID_KERNEL
-SECINITSID_SECURITY = _selinux.SECINITSID_SECURITY
-SECINITSID_UNLABELED = _selinux.SECINITSID_UNLABELED
-SECINITSID_FS = _selinux.SECINITSID_FS
-SECINITSID_FILE = _selinux.SECINITSID_FILE
-SECINITSID_FILE_LABELS = _selinux.SECINITSID_FILE_LABELS
-SECINITSID_INIT = _selinux.SECINITSID_INIT
-SECINITSID_ANY_SOCKET = _selinux.SECINITSID_ANY_SOCKET
-SECINITSID_PORT = _selinux.SECINITSID_PORT
-SECINITSID_NETIF = _selinux.SECINITSID_NETIF
-SECINITSID_NETMSG = _selinux.SECINITSID_NETMSG
-SECINITSID_NODE = _selinux.SECINITSID_NODE
-SECINITSID_IGMP_PACKET = _selinux.SECINITSID_IGMP_PACKET
-SECINITSID_ICMP_SOCKET = _selinux.SECINITSID_ICMP_SOCKET
-SECINITSID_TCP_SOCKET = _selinux.SECINITSID_TCP_SOCKET
-SECINITSID_SYSCTL_MODPROBE = _selinux.SECINITSID_SYSCTL_MODPROBE
-SECINITSID_SYSCTL = _selinux.SECINITSID_SYSCTL
-SECINITSID_SYSCTL_FS = _selinux.SECINITSID_SYSCTL_FS
-SECINITSID_SYSCTL_KERNEL = _selinux.SECINITSID_SYSCTL_KERNEL
-SECINITSID_SYSCTL_NET = _selinux.SECINITSID_SYSCTL_NET
-SECINITSID_SYSCTL_NET_UNIX = _selinux.SECINITSID_SYSCTL_NET_UNIX
-SECINITSID_SYSCTL_VM = _selinux.SECINITSID_SYSCTL_VM
-SECINITSID_SYSCTL_DEV = _selinux.SECINITSID_SYSCTL_DEV
-SECINITSID_KMOD = _selinux.SECINITSID_KMOD
-SECINITSID_POLICY = _selinux.SECINITSID_POLICY
-SECINITSID_SCMP_PACKET = _selinux.SECINITSID_SCMP_PACKET
-SECINITSID_DEVNULL = _selinux.SECINITSID_DEVNULL
-SECINITSID_NUM = _selinux.SECINITSID_NUM
-SELINUX_DEFAULTUSER = _selinux.SELINUX_DEFAULTUSER
-
-def get_ordered_context_list(*args):
- return _selinux.get_ordered_context_list(*args)
-get_ordered_context_list = _selinux.get_ordered_context_list
-
-def get_ordered_context_list_with_level(*args):
- return _selinux.get_ordered_context_list_with_level(*args)
-get_ordered_context_list_with_level = _selinux.get_ordered_context_list_with_level
-
-def get_default_context(*args):
- return _selinux.get_default_context(*args)
-get_default_context = _selinux.get_default_context
-
-def get_default_context_with_level(*args):
- return _selinux.get_default_context_with_level(*args)
-get_default_context_with_level = _selinux.get_default_context_with_level
-
-def get_default_context_with_role(*args):
- return _selinux.get_default_context_with_role(*args)
-get_default_context_with_role = _selinux.get_default_context_with_role
-
-def get_default_context_with_rolelevel(*args):
- return _selinux.get_default_context_with_rolelevel(*args)
-get_default_context_with_rolelevel = _selinux.get_default_context_with_rolelevel
-
-def query_user_context():
- return _selinux.query_user_context()
-query_user_context = _selinux.query_user_context
-
-def manual_user_enter_context(*args):
- return _selinux.manual_user_enter_context(*args)
-manual_user_enter_context = _selinux.manual_user_enter_context
-
-def selinux_default_type_path():
- return _selinux.selinux_default_type_path()
-selinux_default_type_path = _selinux.selinux_default_type_path
-
-def get_default_type(*args):
- return _selinux.get_default_type(*args)
-get_default_type = _selinux.get_default_type
-SELABEL_CTX_FILE = _selinux.SELABEL_CTX_FILE
-SELABEL_CTX_MEDIA = _selinux.SELABEL_CTX_MEDIA
-SELABEL_CTX_X = _selinux.SELABEL_CTX_X
-SELABEL_CTX_DB = _selinux.SELABEL_CTX_DB
-SELABEL_CTX_ANDROID_PROP = _selinux.SELABEL_CTX_ANDROID_PROP
-SELABEL_OPT_UNUSED = _selinux.SELABEL_OPT_UNUSED
-SELABEL_OPT_VALIDATE = _selinux.SELABEL_OPT_VALIDATE
-SELABEL_OPT_BASEONLY = _selinux.SELABEL_OPT_BASEONLY
-SELABEL_OPT_PATH = _selinux.SELABEL_OPT_PATH
-SELABEL_OPT_SUBSET = _selinux.SELABEL_OPT_SUBSET
-SELABEL_NOPT = _selinux.SELABEL_NOPT
-
-def selabel_open(*args):
- return _selinux.selabel_open(*args)
-selabel_open = _selinux.selabel_open
-
-def selabel_close(*args):
- return _selinux.selabel_close(*args)
-selabel_close = _selinux.selabel_close
-
-def selabel_lookup(*args):
- return _selinux.selabel_lookup(*args)
-selabel_lookup = _selinux.selabel_lookup
-
-def selabel_lookup_raw(*args):
- return _selinux.selabel_lookup_raw(*args)
-selabel_lookup_raw = _selinux.selabel_lookup_raw
-
-def selabel_partial_match(*args):
- return _selinux.selabel_partial_match(*args)
-selabel_partial_match = _selinux.selabel_partial_match
-
-def selabel_lookup_best_match(*args):
- return _selinux.selabel_lookup_best_match(*args)
-selabel_lookup_best_match = _selinux.selabel_lookup_best_match
-
-def selabel_lookup_best_match_raw(*args):
- return _selinux.selabel_lookup_best_match_raw(*args)
-selabel_lookup_best_match_raw = _selinux.selabel_lookup_best_match_raw
-
-def selabel_stats(*args):
- return _selinux.selabel_stats(*args)
-selabel_stats = _selinux.selabel_stats
-SELABEL_X_PROP = _selinux.SELABEL_X_PROP
-SELABEL_X_EXT = _selinux.SELABEL_X_EXT
-SELABEL_X_CLIENT = _selinux.SELABEL_X_CLIENT
-SELABEL_X_EVENT = _selinux.SELABEL_X_EVENT
-SELABEL_X_SELN = _selinux.SELABEL_X_SELN
-SELABEL_X_POLYPROP = _selinux.SELABEL_X_POLYPROP
-SELABEL_X_POLYSELN = _selinux.SELABEL_X_POLYSELN
-SELABEL_DB_DATABASE = _selinux.SELABEL_DB_DATABASE
-SELABEL_DB_SCHEMA = _selinux.SELABEL_DB_SCHEMA
-SELABEL_DB_TABLE = _selinux.SELABEL_DB_TABLE
-SELABEL_DB_COLUMN = _selinux.SELABEL_DB_COLUMN
-SELABEL_DB_SEQUENCE = _selinux.SELABEL_DB_SEQUENCE
-SELABEL_DB_VIEW = _selinux.SELABEL_DB_VIEW
-SELABEL_DB_PROCEDURE = _selinux.SELABEL_DB_PROCEDURE
-SELABEL_DB_BLOB = _selinux.SELABEL_DB_BLOB
-SELABEL_DB_TUPLE = _selinux.SELABEL_DB_TUPLE
-SELABEL_DB_LANGUAGE = _selinux.SELABEL_DB_LANGUAGE
-SELABEL_DB_EXCEPTION = _selinux.SELABEL_DB_EXCEPTION
-SELABEL_DB_DATATYPE = _selinux.SELABEL_DB_DATATYPE
-
-def is_selinux_enabled():
- return _selinux.is_selinux_enabled()
-is_selinux_enabled = _selinux.is_selinux_enabled
-
-def is_selinux_mls_enabled():
- return _selinux.is_selinux_mls_enabled()
-is_selinux_mls_enabled = _selinux.is_selinux_mls_enabled
-
-def getcon():
- return _selinux.getcon()
-getcon = _selinux.getcon
-
-def getcon_raw():
- return _selinux.getcon_raw()
-getcon_raw = _selinux.getcon_raw
-
-def setcon(*args):
- return _selinux.setcon(*args)
-setcon = _selinux.setcon
-
-def setcon_raw(*args):
- return _selinux.setcon_raw(*args)
-setcon_raw = _selinux.setcon_raw
-
-def getpidcon(*args):
- return _selinux.getpidcon(*args)
-getpidcon = _selinux.getpidcon
-
-def getpidcon_raw(*args):
- return _selinux.getpidcon_raw(*args)
-getpidcon_raw = _selinux.getpidcon_raw
-
-def getprevcon():
- return _selinux.getprevcon()
-getprevcon = _selinux.getprevcon
-
-def getprevcon_raw():
- return _selinux.getprevcon_raw()
-getprevcon_raw = _selinux.getprevcon_raw
-
-def getexeccon():
- return _selinux.getexeccon()
-getexeccon = _selinux.getexeccon
-
-def getexeccon_raw():
- return _selinux.getexeccon_raw()
-getexeccon_raw = _selinux.getexeccon_raw
-
-def setexeccon(*args):
- return _selinux.setexeccon(*args)
-setexeccon = _selinux.setexeccon
-
-def setexeccon_raw(*args):
- return _selinux.setexeccon_raw(*args)
-setexeccon_raw = _selinux.setexeccon_raw
-
-def getfscreatecon():
- return _selinux.getfscreatecon()
-getfscreatecon = _selinux.getfscreatecon
-
-def getfscreatecon_raw():
- return _selinux.getfscreatecon_raw()
-getfscreatecon_raw = _selinux.getfscreatecon_raw
-
-def setfscreatecon(*args):
- return _selinux.setfscreatecon(*args)
-setfscreatecon = _selinux.setfscreatecon
-
-def setfscreatecon_raw(*args):
- return _selinux.setfscreatecon_raw(*args)
-setfscreatecon_raw = _selinux.setfscreatecon_raw
-
-def getkeycreatecon():
- return _selinux.getkeycreatecon()
-getkeycreatecon = _selinux.getkeycreatecon
-
-def getkeycreatecon_raw():
- return _selinux.getkeycreatecon_raw()
-getkeycreatecon_raw = _selinux.getkeycreatecon_raw
-
-def setkeycreatecon(*args):
- return _selinux.setkeycreatecon(*args)
-setkeycreatecon = _selinux.setkeycreatecon
-
-def setkeycreatecon_raw(*args):
- return _selinux.setkeycreatecon_raw(*args)
-setkeycreatecon_raw = _selinux.setkeycreatecon_raw
-
-def getsockcreatecon():
- return _selinux.getsockcreatecon()
-getsockcreatecon = _selinux.getsockcreatecon
-
-def getsockcreatecon_raw():
- return _selinux.getsockcreatecon_raw()
-getsockcreatecon_raw = _selinux.getsockcreatecon_raw
-
-def setsockcreatecon(*args):
- return _selinux.setsockcreatecon(*args)
-setsockcreatecon = _selinux.setsockcreatecon
-
-def setsockcreatecon_raw(*args):
- return _selinux.setsockcreatecon_raw(*args)
-setsockcreatecon_raw = _selinux.setsockcreatecon_raw
-
-def getfilecon(*args):
- return _selinux.getfilecon(*args)
-getfilecon = _selinux.getfilecon
-
-def getfilecon_raw(*args):
- return _selinux.getfilecon_raw(*args)
-getfilecon_raw = _selinux.getfilecon_raw
-
-def lgetfilecon(*args):
- return _selinux.lgetfilecon(*args)
-lgetfilecon = _selinux.lgetfilecon
-
-def lgetfilecon_raw(*args):
- return _selinux.lgetfilecon_raw(*args)
-lgetfilecon_raw = _selinux.lgetfilecon_raw
-
-def fgetfilecon(*args):
- return _selinux.fgetfilecon(*args)
-fgetfilecon = _selinux.fgetfilecon
-
-def fgetfilecon_raw(*args):
- return _selinux.fgetfilecon_raw(*args)
-fgetfilecon_raw = _selinux.fgetfilecon_raw
-
-def setfilecon(*args):
- return _selinux.setfilecon(*args)
-setfilecon = _selinux.setfilecon
-
-def setfilecon_raw(*args):
- return _selinux.setfilecon_raw(*args)
-setfilecon_raw = _selinux.setfilecon_raw
-
-def lsetfilecon(*args):
- return _selinux.lsetfilecon(*args)
-lsetfilecon = _selinux.lsetfilecon
-
-def lsetfilecon_raw(*args):
- return _selinux.lsetfilecon_raw(*args)
-lsetfilecon_raw = _selinux.lsetfilecon_raw
-
-def fsetfilecon(*args):
- return _selinux.fsetfilecon(*args)
-fsetfilecon = _selinux.fsetfilecon
-
-def fsetfilecon_raw(*args):
- return _selinux.fsetfilecon_raw(*args)
-fsetfilecon_raw = _selinux.fsetfilecon_raw
-
-def getpeercon(*args):
- return _selinux.getpeercon(*args)
-getpeercon = _selinux.getpeercon
-
-def getpeercon_raw(*args):
- return _selinux.getpeercon_raw(*args)
-getpeercon_raw = _selinux.getpeercon_raw
-class av_decision(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, av_decision, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, av_decision, name)
- __repr__ = _swig_repr
- __swig_setmethods__["allowed"] = _selinux.av_decision_allowed_set
- __swig_getmethods__["allowed"] = _selinux.av_decision_allowed_get
- if _newclass:allowed = _swig_property(_selinux.av_decision_allowed_get, _selinux.av_decision_allowed_set)
- __swig_setmethods__["decided"] = _selinux.av_decision_decided_set
- __swig_getmethods__["decided"] = _selinux.av_decision_decided_get
- if _newclass:decided = _swig_property(_selinux.av_decision_decided_get, _selinux.av_decision_decided_set)
- __swig_setmethods__["auditallow"] = _selinux.av_decision_auditallow_set
- __swig_getmethods__["auditallow"] = _selinux.av_decision_auditallow_get
- if _newclass:auditallow = _swig_property(_selinux.av_decision_auditallow_get, _selinux.av_decision_auditallow_set)
- __swig_setmethods__["auditdeny"] = _selinux.av_decision_auditdeny_set
- __swig_getmethods__["auditdeny"] = _selinux.av_decision_auditdeny_get
- if _newclass:auditdeny = _swig_property(_selinux.av_decision_auditdeny_get, _selinux.av_decision_auditdeny_set)
- __swig_setmethods__["seqno"] = _selinux.av_decision_seqno_set
- __swig_getmethods__["seqno"] = _selinux.av_decision_seqno_get
- if _newclass:seqno = _swig_property(_selinux.av_decision_seqno_get, _selinux.av_decision_seqno_set)
- __swig_setmethods__["flags"] = _selinux.av_decision_flags_set
- __swig_getmethods__["flags"] = _selinux.av_decision_flags_get
- if _newclass:flags = _swig_property(_selinux.av_decision_flags_get, _selinux.av_decision_flags_set)
- def __init__(self):
- this = _selinux.new_av_decision()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_av_decision
- __del__ = lambda self : None;
-av_decision_swigregister = _selinux.av_decision_swigregister
-av_decision_swigregister(av_decision)
-
-SELINUX_AVD_FLAGS_PERMISSIVE = _selinux.SELINUX_AVD_FLAGS_PERMISSIVE
-class selinux_opt(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, selinux_opt, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, selinux_opt, name)
- __repr__ = _swig_repr
- __swig_setmethods__["type"] = _selinux.selinux_opt_type_set
- __swig_getmethods__["type"] = _selinux.selinux_opt_type_get
- if _newclass:type = _swig_property(_selinux.selinux_opt_type_get, _selinux.selinux_opt_type_set)
- __swig_setmethods__["value"] = _selinux.selinux_opt_value_set
- __swig_getmethods__["value"] = _selinux.selinux_opt_value_get
- if _newclass:value = _swig_property(_selinux.selinux_opt_value_get, _selinux.selinux_opt_value_set)
- def __init__(self):
- this = _selinux.new_selinux_opt()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_selinux_opt
- __del__ = lambda self : None;
-selinux_opt_swigregister = _selinux.selinux_opt_swigregister
-selinux_opt_swigregister(selinux_opt)
-
-class selinux_callback(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, selinux_callback, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, selinux_callback, name)
- __repr__ = _swig_repr
- __swig_setmethods__["func_log"] = _selinux.selinux_callback_func_log_set
- __swig_getmethods__["func_log"] = _selinux.selinux_callback_func_log_get
- if _newclass:func_log = _swig_property(_selinux.selinux_callback_func_log_get, _selinux.selinux_callback_func_log_set)
- __swig_setmethods__["func_audit"] = _selinux.selinux_callback_func_audit_set
- __swig_getmethods__["func_audit"] = _selinux.selinux_callback_func_audit_get
- if _newclass:func_audit = _swig_property(_selinux.selinux_callback_func_audit_get, _selinux.selinux_callback_func_audit_set)
- __swig_setmethods__["func_validate"] = _selinux.selinux_callback_func_validate_set
- __swig_getmethods__["func_validate"] = _selinux.selinux_callback_func_validate_get
- if _newclass:func_validate = _swig_property(_selinux.selinux_callback_func_validate_get, _selinux.selinux_callback_func_validate_set)
- __swig_setmethods__["func_setenforce"] = _selinux.selinux_callback_func_setenforce_set
- __swig_getmethods__["func_setenforce"] = _selinux.selinux_callback_func_setenforce_get
- if _newclass:func_setenforce = _swig_property(_selinux.selinux_callback_func_setenforce_get, _selinux.selinux_callback_func_setenforce_set)
- __swig_setmethods__["func_policyload"] = _selinux.selinux_callback_func_policyload_set
- __swig_getmethods__["func_policyload"] = _selinux.selinux_callback_func_policyload_get
- if _newclass:func_policyload = _swig_property(_selinux.selinux_callback_func_policyload_get, _selinux.selinux_callback_func_policyload_set)
- def __init__(self):
- this = _selinux.new_selinux_callback()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_selinux_callback
- __del__ = lambda self : None;
-selinux_callback_swigregister = _selinux.selinux_callback_swigregister
-selinux_callback_swigregister(selinux_callback)
-
-SELINUX_CB_LOG = _selinux.SELINUX_CB_LOG
-SELINUX_CB_AUDIT = _selinux.SELINUX_CB_AUDIT
-SELINUX_CB_VALIDATE = _selinux.SELINUX_CB_VALIDATE
-SELINUX_CB_SETENFORCE = _selinux.SELINUX_CB_SETENFORCE
-SELINUX_CB_POLICYLOAD = _selinux.SELINUX_CB_POLICYLOAD
-
-def selinux_get_callback(*args):
- return _selinux.selinux_get_callback(*args)
-selinux_get_callback = _selinux.selinux_get_callback
-
-def selinux_set_callback(*args):
- return _selinux.selinux_set_callback(*args)
-selinux_set_callback = _selinux.selinux_set_callback
-SELINUX_ERROR = _selinux.SELINUX_ERROR
-SELINUX_WARNING = _selinux.SELINUX_WARNING
-SELINUX_INFO = _selinux.SELINUX_INFO
-SELINUX_AVC = _selinux.SELINUX_AVC
-SELINUX_TRANS_DIR = _selinux.SELINUX_TRANS_DIR
-
-def security_compute_av(*args):
- return _selinux.security_compute_av(*args)
-security_compute_av = _selinux.security_compute_av
-
-def security_compute_av_raw(*args):
- return _selinux.security_compute_av_raw(*args)
-security_compute_av_raw = _selinux.security_compute_av_raw
-
-def security_compute_av_flags(*args):
- return _selinux.security_compute_av_flags(*args)
-security_compute_av_flags = _selinux.security_compute_av_flags
-
-def security_compute_av_flags_raw(*args):
- return _selinux.security_compute_av_flags_raw(*args)
-security_compute_av_flags_raw = _selinux.security_compute_av_flags_raw
-
-def security_compute_create(*args):
- return _selinux.security_compute_create(*args)
-security_compute_create = _selinux.security_compute_create
-
-def security_compute_create_raw(*args):
- return _selinux.security_compute_create_raw(*args)
-security_compute_create_raw = _selinux.security_compute_create_raw
-
-def security_compute_create_name(*args):
- return _selinux.security_compute_create_name(*args)
-security_compute_create_name = _selinux.security_compute_create_name
-
-def security_compute_create_name_raw(*args):
- return _selinux.security_compute_create_name_raw(*args)
-security_compute_create_name_raw = _selinux.security_compute_create_name_raw
-
-def security_compute_relabel(*args):
- return _selinux.security_compute_relabel(*args)
-security_compute_relabel = _selinux.security_compute_relabel
-
-def security_compute_relabel_raw(*args):
- return _selinux.security_compute_relabel_raw(*args)
-security_compute_relabel_raw = _selinux.security_compute_relabel_raw
-
-def security_compute_member(*args):
- return _selinux.security_compute_member(*args)
-security_compute_member = _selinux.security_compute_member
-
-def security_compute_member_raw(*args):
- return _selinux.security_compute_member_raw(*args)
-security_compute_member_raw = _selinux.security_compute_member_raw
-
-def security_compute_user(*args):
- return _selinux.security_compute_user(*args)
-security_compute_user = _selinux.security_compute_user
-
-def security_compute_user_raw(*args):
- return _selinux.security_compute_user_raw(*args)
-security_compute_user_raw = _selinux.security_compute_user_raw
-
-def security_load_policy(*args):
- return _selinux.security_load_policy(*args)
-security_load_policy = _selinux.security_load_policy
-
-def security_get_initial_context(*args):
- return _selinux.security_get_initial_context(*args)
-security_get_initial_context = _selinux.security_get_initial_context
-
-def security_get_initial_context_raw(*args):
- return _selinux.security_get_initial_context_raw(*args)
-security_get_initial_context_raw = _selinux.security_get_initial_context_raw
-
-def selinux_mkload_policy(*args):
- return _selinux.selinux_mkload_policy(*args)
-selinux_mkload_policy = _selinux.selinux_mkload_policy
-
-def selinux_init_load_policy():
- return _selinux.selinux_init_load_policy()
-selinux_init_load_policy = _selinux.selinux_init_load_policy
-class SELboolean(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, SELboolean, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, SELboolean, name)
- __repr__ = _swig_repr
- __swig_setmethods__["name"] = _selinux.SELboolean_name_set
- __swig_getmethods__["name"] = _selinux.SELboolean_name_get
- if _newclass:name = _swig_property(_selinux.SELboolean_name_get, _selinux.SELboolean_name_set)
- __swig_setmethods__["value"] = _selinux.SELboolean_value_set
- __swig_getmethods__["value"] = _selinux.SELboolean_value_get
- if _newclass:value = _swig_property(_selinux.SELboolean_value_get, _selinux.SELboolean_value_set)
- def __init__(self):
- this = _selinux.new_SELboolean()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_SELboolean
- __del__ = lambda self : None;
-SELboolean_swigregister = _selinux.SELboolean_swigregister
-SELboolean_swigregister(SELboolean)
-
-
-def security_set_boolean_list(*args):
- return _selinux.security_set_boolean_list(*args)
-security_set_boolean_list = _selinux.security_set_boolean_list
-
-def security_load_booleans(*args):
- return _selinux.security_load_booleans(*args)
-security_load_booleans = _selinux.security_load_booleans
-
-def security_check_context(*args):
- return _selinux.security_check_context(*args)
-security_check_context = _selinux.security_check_context
-
-def security_check_context_raw(*args):
- return _selinux.security_check_context_raw(*args)
-security_check_context_raw = _selinux.security_check_context_raw
-
-def security_canonicalize_context(*args):
- return _selinux.security_canonicalize_context(*args)
-security_canonicalize_context = _selinux.security_canonicalize_context
-
-def security_canonicalize_context_raw(*args):
- return _selinux.security_canonicalize_context_raw(*args)
-security_canonicalize_context_raw = _selinux.security_canonicalize_context_raw
-
-def security_getenforce():
- return _selinux.security_getenforce()
-security_getenforce = _selinux.security_getenforce
-
-def security_setenforce(*args):
- return _selinux.security_setenforce(*args)
-security_setenforce = _selinux.security_setenforce
-
-def security_deny_unknown():
- return _selinux.security_deny_unknown()
-security_deny_unknown = _selinux.security_deny_unknown
-
-def security_disable():
- return _selinux.security_disable()
-security_disable = _selinux.security_disable
-
-def security_policyvers():
- return _selinux.security_policyvers()
-security_policyvers = _selinux.security_policyvers
-
-def security_get_boolean_names():
- return _selinux.security_get_boolean_names()
-security_get_boolean_names = _selinux.security_get_boolean_names
-
-def security_get_boolean_pending(*args):
- return _selinux.security_get_boolean_pending(*args)
-security_get_boolean_pending = _selinux.security_get_boolean_pending
-
-def security_get_boolean_active(*args):
- return _selinux.security_get_boolean_active(*args)
-security_get_boolean_active = _selinux.security_get_boolean_active
-
-def security_set_boolean(*args):
- return _selinux.security_set_boolean(*args)
-security_set_boolean = _selinux.security_set_boolean
-
-def security_commit_booleans():
- return _selinux.security_commit_booleans()
-security_commit_booleans = _selinux.security_commit_booleans
-class security_class_mapping(_object):
- __swig_setmethods__ = {}
- __setattr__ = lambda self, name, value: _swig_setattr(self, security_class_mapping, name, value)
- __swig_getmethods__ = {}
- __getattr__ = lambda self, name: _swig_getattr(self, security_class_mapping, name)
- __repr__ = _swig_repr
- __swig_setmethods__["name"] = _selinux.security_class_mapping_name_set
- __swig_getmethods__["name"] = _selinux.security_class_mapping_name_get
- if _newclass:name = _swig_property(_selinux.security_class_mapping_name_get, _selinux.security_class_mapping_name_set)
- __swig_setmethods__["perms"] = _selinux.security_class_mapping_perms_set
- __swig_getmethods__["perms"] = _selinux.security_class_mapping_perms_get
- if _newclass:perms = _swig_property(_selinux.security_class_mapping_perms_get, _selinux.security_class_mapping_perms_set)
- def __init__(self):
- this = _selinux.new_security_class_mapping()
- try: self.this.append(this)
- except: self.this = this
- __swig_destroy__ = _selinux.delete_security_class_mapping
- __del__ = lambda self : None;
-security_class_mapping_swigregister = _selinux.security_class_mapping_swigregister
-security_class_mapping_swigregister(security_class_mapping)
-
-
-def selinux_set_mapping(*args):
- return _selinux.selinux_set_mapping(*args)
-selinux_set_mapping = _selinux.selinux_set_mapping
-
-def mode_to_security_class(*args):
- return _selinux.mode_to_security_class(*args)
-mode_to_security_class = _selinux.mode_to_security_class
-
-def string_to_security_class(*args):
- return _selinux.string_to_security_class(*args)
-string_to_security_class = _selinux.string_to_security_class
-
-def security_class_to_string(*args):
- return _selinux.security_class_to_string(*args)
-security_class_to_string = _selinux.security_class_to_string
-
-def security_av_perm_to_string(*args):
- return _selinux.security_av_perm_to_string(*args)
-security_av_perm_to_string = _selinux.security_av_perm_to_string
-
-def string_to_av_perm(*args):
- return _selinux.string_to_av_perm(*args)
-string_to_av_perm = _selinux.string_to_av_perm
-
-def security_av_string(*args):
- return _selinux.security_av_string(*args)
-security_av_string = _selinux.security_av_string
-
-def print_access_vector(*args):
- return _selinux.print_access_vector(*args)
-print_access_vector = _selinux.print_access_vector
-MATCHPATHCON_BASEONLY = _selinux.MATCHPATHCON_BASEONLY
-MATCHPATHCON_NOTRANS = _selinux.MATCHPATHCON_NOTRANS
-MATCHPATHCON_VALIDATE = _selinux.MATCHPATHCON_VALIDATE
-
-def set_matchpathcon_flags(*args):
- return _selinux.set_matchpathcon_flags(*args)
-set_matchpathcon_flags = _selinux.set_matchpathcon_flags
-
-def matchpathcon_init(*args):
- return _selinux.matchpathcon_init(*args)
-matchpathcon_init = _selinux.matchpathcon_init
-
-def matchpathcon_init_prefix(*args):
- return _selinux.matchpathcon_init_prefix(*args)
-matchpathcon_init_prefix = _selinux.matchpathcon_init_prefix
-
-def matchpathcon_fini():
- return _selinux.matchpathcon_fini()
-matchpathcon_fini = _selinux.matchpathcon_fini
-
-def realpath_not_final(*args):
- return _selinux.realpath_not_final(*args)
-realpath_not_final = _selinux.realpath_not_final
-
-def matchpathcon(*args):
- return _selinux.matchpathcon(*args)
-matchpathcon = _selinux.matchpathcon
-
-def matchpathcon_index(*args):
- return _selinux.matchpathcon_index(*args)
-matchpathcon_index = _selinux.matchpathcon_index
-
-def matchpathcon_filespec_add(*args):
- return _selinux.matchpathcon_filespec_add(*args)
-matchpathcon_filespec_add = _selinux.matchpathcon_filespec_add
-
-def matchpathcon_filespec_destroy():
- return _selinux.matchpathcon_filespec_destroy()
-matchpathcon_filespec_destroy = _selinux.matchpathcon_filespec_destroy
-
-def matchpathcon_filespec_eval():
- return _selinux.matchpathcon_filespec_eval()
-matchpathcon_filespec_eval = _selinux.matchpathcon_filespec_eval
-
-def matchpathcon_checkmatches(*args):
- return _selinux.matchpathcon_checkmatches(*args)
-matchpathcon_checkmatches = _selinux.matchpathcon_checkmatches
-
-def matchmediacon(*args):
- return _selinux.matchmediacon(*args)
-matchmediacon = _selinux.matchmediacon
-
-def selinux_getenforcemode():
- return _selinux.selinux_getenforcemode()
-selinux_getenforcemode = _selinux.selinux_getenforcemode
-
-def selinux_boolean_sub(*args):
- return _selinux.selinux_boolean_sub(*args)
-selinux_boolean_sub = _selinux.selinux_boolean_sub
-
-def selinux_getpolicytype():
- return _selinux.selinux_getpolicytype()
-selinux_getpolicytype = _selinux.selinux_getpolicytype
-
-def selinux_policy_root():
- return _selinux.selinux_policy_root()
-selinux_policy_root = _selinux.selinux_policy_root
-
-def selinux_set_policy_root(*args):
- return _selinux.selinux_set_policy_root(*args)
-selinux_set_policy_root = _selinux.selinux_set_policy_root
-
-def selinux_current_policy_path():
- return _selinux.selinux_current_policy_path()
-selinux_current_policy_path = _selinux.selinux_current_policy_path
-
-def selinux_binary_policy_path():
- return _selinux.selinux_binary_policy_path()
-selinux_binary_policy_path = _selinux.selinux_binary_policy_path
-
-def selinux_failsafe_context_path():
- return _selinux.selinux_failsafe_context_path()
-selinux_failsafe_context_path = _selinux.selinux_failsafe_context_path
-
-def selinux_removable_context_path():
- return _selinux.selinux_removable_context_path()
-selinux_removable_context_path = _selinux.selinux_removable_context_path
-
-def selinux_default_context_path():
- return _selinux.selinux_default_context_path()
-selinux_default_context_path = _selinux.selinux_default_context_path
-
-def selinux_user_contexts_path():
- return _selinux.selinux_user_contexts_path()
-selinux_user_contexts_path = _selinux.selinux_user_contexts_path
-
-def selinux_file_context_path():
- return _selinux.selinux_file_context_path()
-selinux_file_context_path = _selinux.selinux_file_context_path
-
-def selinux_file_context_homedir_path():
- return _selinux.selinux_file_context_homedir_path()
-selinux_file_context_homedir_path = _selinux.selinux_file_context_homedir_path
-
-def selinux_file_context_local_path():
- return _selinux.selinux_file_context_local_path()
-selinux_file_context_local_path = _selinux.selinux_file_context_local_path
-
-def selinux_file_context_subs_path():
- return _selinux.selinux_file_context_subs_path()
-selinux_file_context_subs_path = _selinux.selinux_file_context_subs_path
-
-def selinux_file_context_subs_dist_path():
- return _selinux.selinux_file_context_subs_dist_path()
-selinux_file_context_subs_dist_path = _selinux.selinux_file_context_subs_dist_path
-
-def selinux_homedir_context_path():
- return _selinux.selinux_homedir_context_path()
-selinux_homedir_context_path = _selinux.selinux_homedir_context_path
-
-def selinux_media_context_path():
- return _selinux.selinux_media_context_path()
-selinux_media_context_path = _selinux.selinux_media_context_path
-
-def selinux_virtual_domain_context_path():
- return _selinux.selinux_virtual_domain_context_path()
-selinux_virtual_domain_context_path = _selinux.selinux_virtual_domain_context_path
-
-def selinux_virtual_image_context_path():
- return _selinux.selinux_virtual_image_context_path()
-selinux_virtual_image_context_path = _selinux.selinux_virtual_image_context_path
-
-def selinux_lxc_contexts_path():
- return _selinux.selinux_lxc_contexts_path()
-selinux_lxc_contexts_path = _selinux.selinux_lxc_contexts_path
-
-def selinux_x_context_path():
- return _selinux.selinux_x_context_path()
-selinux_x_context_path = _selinux.selinux_x_context_path
-
-def selinux_sepgsql_context_path():
- return _selinux.selinux_sepgsql_context_path()
-selinux_sepgsql_context_path = _selinux.selinux_sepgsql_context_path
-
-def selinux_systemd_contexts_path():
- return _selinux.selinux_systemd_contexts_path()
-selinux_systemd_contexts_path = _selinux.selinux_systemd_contexts_path
-
-def selinux_contexts_path():
- return _selinux.selinux_contexts_path()
-selinux_contexts_path = _selinux.selinux_contexts_path
-
-def selinux_securetty_types_path():
- return _selinux.selinux_securetty_types_path()
-selinux_securetty_types_path = _selinux.selinux_securetty_types_path
-
-def selinux_booleans_subs_path():
- return _selinux.selinux_booleans_subs_path()
-selinux_booleans_subs_path = _selinux.selinux_booleans_subs_path
-
-def selinux_booleans_path():
- return _selinux.selinux_booleans_path()
-selinux_booleans_path = _selinux.selinux_booleans_path
-
-def selinux_customizable_types_path():
- return _selinux.selinux_customizable_types_path()
-selinux_customizable_types_path = _selinux.selinux_customizable_types_path
-
-def selinux_users_path():
- return _selinux.selinux_users_path()
-selinux_users_path = _selinux.selinux_users_path
-
-def selinux_usersconf_path():
- return _selinux.selinux_usersconf_path()
-selinux_usersconf_path = _selinux.selinux_usersconf_path
-
-def selinux_translations_path():
- return _selinux.selinux_translations_path()
-selinux_translations_path = _selinux.selinux_translations_path
-
-def selinux_colors_path():
- return _selinux.selinux_colors_path()
-selinux_colors_path = _selinux.selinux_colors_path
-
-def selinux_netfilter_context_path():
- return _selinux.selinux_netfilter_context_path()
-selinux_netfilter_context_path = _selinux.selinux_netfilter_context_path
-
-def selinux_path():
- return _selinux.selinux_path()
-selinux_path = _selinux.selinux_path
-
-def selinux_check_access(*args):
- return _selinux.selinux_check_access(*args)
-selinux_check_access = _selinux.selinux_check_access
-
-def selinux_check_passwd_access(*args):
- return _selinux.selinux_check_passwd_access(*args)
-selinux_check_passwd_access = _selinux.selinux_check_passwd_access
-
-def checkPasswdAccess(*args):
- return _selinux.checkPasswdAccess(*args)
-checkPasswdAccess = _selinux.checkPasswdAccess
-
-def selinux_check_securetty_context(*args):
- return _selinux.selinux_check_securetty_context(*args)
-selinux_check_securetty_context = _selinux.selinux_check_securetty_context
-
-def set_selinuxmnt(*args):
- return _selinux.set_selinuxmnt(*args)
-set_selinuxmnt = _selinux.set_selinuxmnt
-
-def selinuxfs_exists():
- return _selinux.selinuxfs_exists()
-selinuxfs_exists = _selinux.selinuxfs_exists
-
-def fini_selinuxmnt():
- return _selinux.fini_selinuxmnt()
-fini_selinuxmnt = _selinux.fini_selinuxmnt
-
-def setexecfilecon(*args):
- return _selinux.setexecfilecon(*args)
-setexecfilecon = _selinux.setexecfilecon
-
-def rpm_execcon(*args):
- return _selinux.rpm_execcon(*args)
-rpm_execcon = _selinux.rpm_execcon
-
-def is_context_customizable(*args):
- return _selinux.is_context_customizable(*args)
-is_context_customizable = _selinux.is_context_customizable
-
-def selinux_trans_to_raw_context(*args):
- return _selinux.selinux_trans_to_raw_context(*args)
-selinux_trans_to_raw_context = _selinux.selinux_trans_to_raw_context
-
-def selinux_raw_to_trans_context(*args):
- return _selinux.selinux_raw_to_trans_context(*args)
-selinux_raw_to_trans_context = _selinux.selinux_raw_to_trans_context
-
-def selinux_raw_context_to_color(*args):
- return _selinux.selinux_raw_context_to_color(*args)
-selinux_raw_context_to_color = _selinux.selinux_raw_context_to_color
-
-def getseuserbyname(*args):
- return _selinux.getseuserbyname(*args)
-getseuserbyname = _selinux.getseuserbyname
-
-def getseuser(*args):
- return _selinux.getseuser(*args)
-getseuser = _selinux.getseuser
-
-def selinux_file_context_cmp(*args):
- return _selinux.selinux_file_context_cmp(*args)
-selinux_file_context_cmp = _selinux.selinux_file_context_cmp
-
-def selinux_file_context_verify(*args):
- return _selinux.selinux_file_context_verify(*args)
-selinux_file_context_verify = _selinux.selinux_file_context_verify
-
-def selinux_lsetfilecon_default(*args):
- return _selinux.selinux_lsetfilecon_default(*args)
-selinux_lsetfilecon_default = _selinux.selinux_lsetfilecon_default
-
-def selinux_reset_config():
- return _selinux.selinux_reset_config()
-selinux_reset_config = _selinux.selinux_reset_config
-# This file is compatible with both classic and new-style classes.
-
-
diff --git a/prebuilts/lib/selinux/_selinux.so b/prebuilts/lib/selinux/_selinux.so
deleted file mode 100755
index f4a045f3..00000000
--- a/prebuilts/lib/selinux/_selinux.so
+++ /dev/null
Binary files differ
diff --git a/prebuilts/lib/selinux/audit2why.so b/prebuilts/lib/selinux/audit2why.so
deleted file mode 100755
index e7daaabc..00000000
--- a/prebuilts/lib/selinux/audit2why.so
+++ /dev/null
Binary files differ
diff --git a/prebuilts/lib/sepolgen/__init__.py b/prebuilts/lib/sepolgen/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/prebuilts/lib/sepolgen/__init__.py
+++ /dev/null
diff --git a/prebuilts/lib/sepolgen/access.py b/prebuilts/lib/sepolgen/access.py
deleted file mode 100644
index cf132104..00000000
--- a/prebuilts/lib/sepolgen/access.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-"""
-Classes representing basic access.
-
-SELinux - at the most basic level - represents access as
-the 4-tuple subject (type or context), target (type or context),
-object class, permission. The policy language elaborates this basic
-access to faciliate more concise rules (e.g., allow rules can have multiple
-source or target types - see refpolicy for more information).
-
-This module has objects for representing the most basic access (AccessVector)
-and sets of that access (AccessVectorSet). These objects are used in Madison
-in a variety of ways, but they are the fundamental representation of access.
-"""
-
-import refpolicy
-from selinux import audit2why
-
-def is_idparam(id):
- """Determine if an id is a paramater in the form $N, where N is
- an integer.
-
- Returns:
- True if the id is a paramater
- False if the id is not a paramater
- """
- if len(id) > 1 and id[0] == '$':
- try:
- int(id[1:])
- except ValueError:
- return False
- return True
- else:
- return False
-
-class AccessVector:
- """
- An access vector is the basic unit of access in SELinux.
-
- Access vectors are the most basic representation of access within
- SELinux. It represents the access a source type has to a target
- type in terms of an object class and a set of permissions.
-
- Access vectors are distinct from AVRules in that they can only
- store a single source type, target type, and object class. The
- simplicity of AccessVectors makes them useful for storing access
- in a form that is easy to search and compare.
-
- The source, target, and object are stored as string. No checking
- done to verify that the strings are valid SELinux identifiers.
- Identifiers in the form $N (where N is an integer) are reserved as
- interface parameters and are treated as wild cards in many
- circumstances.
-
- Properties:
- .src_type - The source type allowed access. [String or None]
- .tgt_type - The target type to which access is allowed. [String or None]
- .obj_class - The object class to which access is allowed. [String or None]
- .perms - The permissions allowed to the object class. [IdSet]
- .audit_msgs - The audit messages that generated this access vector [List of strings]
- """
- def __init__(self, init_list=None):
- if init_list:
- self.from_list(init_list)
- else:
- self.src_type = None
- self.tgt_type = None
- self.obj_class = None
- self.perms = refpolicy.IdSet()
- self.audit_msgs = []
- self.type = audit2why.TERULE
- self.data = []
-
- # The direction of the information flow represented by this
- # access vector - used for matching
- self.info_flow_dir = None
-
- def from_list(self, list):
- """Initialize an access vector from a list.
-
- Initialize an access vector from a list treating the list as
- positional arguments - i.e., 0 = src_type, 1 = tgt_type, etc.
- All of the list elements 3 and greater are treated as perms.
- For example, the list ['foo_t', 'bar_t', 'file', 'read', 'write']
- would create an access vector list with the source type 'foo_t',
- target type 'bar_t', object class 'file', and permissions 'read'
- and 'write'.
-
- This format is useful for very simple storage to strings or disc
- (see to_list) and for initializing access vectors.
- """
- if len(list) < 4:
- raise ValueError("List must contain at least four elements %s" % str(list))
- self.src_type = list[0]
- self.tgt_type = list[1]
- self.obj_class = list[2]
- self.perms = refpolicy.IdSet(list[3:])
-
- def to_list(self):
- """
- Convert an access vector to a list.
-
- Convert an access vector to a list treating the list as positional
- values. See from_list for more information on how an access vector
- is represented in a list.
- """
- l = [self.src_type, self.tgt_type, self.obj_class]
- l.extend(self.perms)
- return l
-
- def __str__(self):
- return self.to_string()
-
- def to_string(self):
- return "allow %s %s:%s %s;" % (self.src_type, self.tgt_type,
- self.obj_class, self.perms.to_space_str())
-
- def __cmp__(self, other):
- if self.src_type != other.src_type:
- return cmp(self.src_type, other.src_type)
- if self.tgt_type != other.tgt_type:
- return cmp(self.tgt_type, other.tgt_type)
- if self.obj_class != self.obj_class:
- return cmp(self.obj_class, other.obj_class)
- if len(self.perms) != len(other.perms):
- return cmp(len(self.perms), len(other.perms))
- x = list(self.perms)
- x.sort()
- y = list(other.perms)
- y.sort()
- for pa, pb in zip(x, y):
- if pa != pb:
- return cmp(pa, pb)
- return 0
-
-def avrule_to_access_vectors(avrule):
- """Convert an avrule into a list of access vectors.
-
- AccessVectors and AVRules are similary, but differ in that
- an AVRule can more than one source type, target type, and
- object class. This function expands a single avrule into a
- list of one or more AccessVectors representing the access
- defined in the AVRule.
-
-
- """
- if isinstance(avrule, AccessVector):
- return [avrule]
- a = []
- for src_type in avrule.src_types:
- for tgt_type in avrule.tgt_types:
- for obj_class in avrule.obj_classes:
- access = AccessVector()
- access.src_type = src_type
- access.tgt_type = tgt_type
- access.obj_class = obj_class
- access.perms = avrule.perms.copy()
- a.append(access)
- return a
-
-class AccessVectorSet:
- """A non-overlapping set of access vectors.
-
- An AccessVectorSet is designed to store one or more access vectors
- that are non-overlapping. Access can be added to the set
- incrementally and access vectors will be added or merged as
- necessary. For example, adding the following access vectors using
- add_av:
- allow $1 etc_t : read;
- allow $1 etc_t : write;
- allow $1 var_log_t : read;
- Would result in an access vector set with the access vectors:
- allow $1 etc_t : { read write};
- allow $1 var_log_t : read;
- """
- def __init__(self):
- """Initialize an access vector set.
- """
- self.src = {}
- # The information flow direction of this access vector
- # set - see objectmodel.py for more information. This
- # stored here to speed up searching - see matching.py.
- self.info_dir = None
-
- def __iter__(self):
- """Iterate over all of the unique access vectors in the set."""
- for tgts in self.src.values():
- for objs in tgts.values():
- for av in objs.values():
- yield av
-
- def __len__(self):
- """Return the number of unique access vectors in the set.
-
- Because of the inernal representation of the access vector set,
- __len__ is not a constant time operation. Worst case is O(N)
- where N is the number of unique access vectors, but the common
- case is probably better.
- """
- l = 0
- for tgts in self.src.values():
- for objs in tgts.values():
- l += len(objs)
- return l
-
- def to_list(self):
- """Return the unique access vectors in the set as a list.
-
- The format of the returned list is a set of nested lists,
- each access vector represented by a list. This format is
- designed to be simply serializable to a file.
-
- For example, consider an access vector set with the following
- access vectors:
- allow $1 user_t : file read;
- allow $1 etc_t : file { read write};
- to_list would return the following:
- [[$1, user_t, file, read]
- [$1, etc_t, file, read, write]]
-
- See AccessVector.to_list for more information.
- """
- l = []
- for av in self:
- l.append(av.to_list())
-
- return l
-
- def from_list(self, l):
- """Add access vectors stored in a list.
-
- See to list for more information on the list format that this
- method accepts.
-
- This will add all of the access from the list. Any existing
- access vectors in the set will be retained.
- """
- for av in l:
- self.add_av(AccessVector(av))
-
- def add(self, src_type, tgt_type, obj_class, perms, audit_msg=None, avc_type=audit2why.TERULE, data=[]):
- """Add an access vector to the set.
- """
- tgt = self.src.setdefault(src_type, { })
- cls = tgt.setdefault(tgt_type, { })
-
- if cls.has_key((obj_class, avc_type)):
- access = cls[obj_class, avc_type]
- else:
- access = AccessVector()
- access.src_type = src_type
- access.tgt_type = tgt_type
- access.obj_class = obj_class
- access.data = data
- access.type = avc_type
- cls[obj_class, avc_type] = access
-
- access.perms.update(perms)
- if audit_msg:
- access.audit_msgs.append(audit_msg)
-
- def add_av(self, av, audit_msg=None):
- """Add an access vector to the set."""
- self.add(av.src_type, av.tgt_type, av.obj_class, av.perms)
-
-
-def avs_extract_types(avs):
- types = refpolicy.IdSet()
- for av in avs:
- types.add(av.src_type)
- types.add(av.tgt_type)
-
- return types
-
-def avs_extract_obj_perms(avs):
- perms = { }
- for av in avs:
- if perms.has_key(av.obj_class):
- s = perms[av.obj_class]
- else:
- s = refpolicy.IdSet()
- perms[av.obj_class] = s
- s.update(av.perms)
- return perms
-
-class RoleTypeSet:
- """A non-overlapping set of role type statements.
-
- This clas allows the incremental addition of role type statements and
- maintains a non-overlapping list of statements.
- """
- def __init__(self):
- """Initialize an access vector set."""
- self.role_types = {}
-
- def __iter__(self):
- """Iterate over all of the unique role allows statements in the set."""
- for role_type in self.role_types.values():
- yield role_type
-
- def __len__(self):
- """Return the unique number of role allow statements."""
- return len(self.role_types.keys())
-
- def add(self, role, type):
- if self.role_types.has_key(role):
- role_type = self.role_types[role]
- else:
- role_type = refpolicy.RoleType()
- role_type.role = role
- self.role_types[role] = role_type
-
- role_type.types.add(type)
diff --git a/prebuilts/lib/sepolgen/audit.py b/prebuilts/lib/sepolgen/audit.py
deleted file mode 100644
index 56919be7..00000000
--- a/prebuilts/lib/sepolgen/audit.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-import refpolicy
-import access
-import re
-import sys
-
-# Convenience functions
-
-def get_audit_boot_msgs():
- """Obtain all of the avc and policy load messages from the audit
- log. This function uses ausearch and requires that the current
- process have sufficient rights to run ausearch.
-
- Returns:
- string contain all of the audit messages returned by ausearch.
- """
- import subprocess
- import time
- fd=open("/proc/uptime", "r")
- off=float(fd.read().split()[0])
- fd.close
- s = time.localtime(time.time() - off)
- bootdate = time.strftime("%x", s)
- boottime = time.strftime("%X", s)
- output = subprocess.Popen(["/sbin/ausearch", "-m", "AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR", "-ts", bootdate, boottime],
- stdout=subprocess.PIPE).communicate()[0]
- return output
-
-def get_audit_msgs():
- """Obtain all of the avc and policy load messages from the audit
- log. This function uses ausearch and requires that the current
- process have sufficient rights to run ausearch.
-
- Returns:
- string contain all of the audit messages returned by ausearch.
- """
- import subprocess
- output = subprocess.Popen(["/sbin/ausearch", "-m", "AVC,USER_AVC,MAC_POLICY_LOAD,DAEMON_START,SELINUX_ERR"],
- stdout=subprocess.PIPE).communicate()[0]
- return output
-
-def get_dmesg_msgs():
- """Obtain all of the avc and policy load messages from /bin/dmesg.
-
- Returns:
- string contain all of the audit messages returned by dmesg.
- """
- import subprocess
- output = subprocess.Popen(["/bin/dmesg"],
- stdout=subprocess.PIPE).communicate()[0]
- return output
-
-# Classes representing audit messages
-
-class AuditMessage:
- """Base class for all objects representing audit messages.
-
- AuditMessage is a base class for all audit messages and only
- provides storage for the raw message (as a string) and a
- parsing function that does nothing.
- """
- def __init__(self, message):
- self.message = message
- self.header = ""
-
- def from_split_string(self, recs):
- """Parse a string that has been split into records by space into
- an audit message.
-
- This method should be overridden by subclasses. Error reporting
- should be done by raise ValueError exceptions.
- """
- for msg in recs:
- fields = msg.split("=")
- if len(fields) != 2:
- if msg[:6] == "audit(":
- self.header = msg
- return
- else:
- continue
-
- if fields[0] == "msg":
- self.header = fields[1]
- return
-
-
-class InvalidMessage(AuditMessage):
- """Class representing invalid audit messages. This is used to differentiate
- between audit messages that aren't recognized (that should return None from
- the audit message parser) and a message that is recognized but is malformed
- in some way.
- """
- def __init__(self, message):
- AuditMessage.__init__(self, message)
-
-class PathMessage(AuditMessage):
- """Class representing a path message"""
- def __init__(self, message):
- AuditMessage.__init__(self, message)
- self.path = ""
-
- def from_split_string(self, recs):
- AuditMessage.from_split_string(self, recs)
-
- for msg in recs:
- fields = msg.split("=")
- if len(fields) != 2:
- continue
- if fields[0] == "path":
- self.path = fields[1][1:-1]
- return
-import selinux.audit2why as audit2why
-
-avcdict = {}
-
-class AVCMessage(AuditMessage):
- """AVC message representing an access denial or granted message.
-
- This is a very basic class and does not represent all possible fields
- in an avc message. Currently the fields are:
- scontext - context for the source (process) that generated the message
- tcontext - context for the target
- tclass - object class for the target (only one)
- comm - the process name
- exe - the on-disc binary
- path - the path of the target
- access - list of accesses that were allowed or denied
- denial - boolean indicating whether this was a denial (True) or granted
- (False) message.
-
- An example audit message generated from the audit daemon looks like (line breaks
- added):
- 'type=AVC msg=audit(1155568085.407:10877): avc: denied { search } for
- pid=677 comm="python" name="modules" dev=dm-0 ino=13716388
- scontext=user_u:system_r:setroubleshootd_t:s0
- tcontext=system_u:object_r:modules_object_t:s0 tclass=dir'
-
- An example audit message stored in syslog (not processed by the audit daemon - line
- breaks added):
- 'Sep 12 08:26:43 dhcp83-5 kernel: audit(1158064002.046:4): avc: denied { read }
- for pid=2 496 comm="bluez-pin" name=".gdm1K3IFT" dev=dm-0 ino=3601333
- scontext=user_u:system_r:bluetooth_helper_t:s0-s0:c0
- tcontext=system_u:object_r:xdm_tmp_t:s0 tclass=file
- """
- def __init__(self, message):
- AuditMessage.__init__(self, message)
- self.scontext = refpolicy.SecurityContext()
- self.tcontext = refpolicy.SecurityContext()
- self.tclass = ""
- self.comm = ""
- self.exe = ""
- self.path = ""
- self.name = ""
- self.accesses = []
- self.denial = True
- self.type = audit2why.TERULE
-
- def __parse_access(self, recs, start):
- # This is kind of sucky - the access that is in a space separated
- # list like '{ read write }'. This doesn't fit particularly well with splitting
- # the string on spaces. This function takes the list of recs and a starting
- # position one beyond the open brace. It then adds the accesses until it finds
- # the close brace or the end of the list (which is an error if reached without
- # seeing a close brace).
- found_close = False
- i = start
- if i == (len(recs) - 1):
- raise ValueError("AVC message in invalid format [%s]\n" % self.message)
- while i < len(recs):
- if recs[i] == "}":
- found_close = True
- break
- self.accesses.append(recs[i])
- i = i + 1
- if not found_close:
- raise ValueError("AVC message in invalid format [%s]\n" % self.message)
- return i + 1
-
-
- def from_split_string(self, recs):
- AuditMessage.from_split_string(self, recs)
- # FUTURE - fully parse avc messages and store all possible fields
- # Required fields
- found_src = False
- found_tgt = False
- found_class = False
- found_access = False
-
- for i in range(len(recs)):
- if recs[i] == "{":
- i = self.__parse_access(recs, i + 1)
- found_access = True
- continue
- elif recs[i] == "granted":
- self.denial = False
-
- fields = recs[i].split("=")
- if len(fields) != 2:
- continue
- if fields[0] == "scontext":
- self.scontext = refpolicy.SecurityContext(fields[1])
- found_src = True
- elif fields[0] == "tcontext":
- self.tcontext = refpolicy.SecurityContext(fields[1])
- found_tgt = True
- elif fields[0] == "tclass":
- self.tclass = fields[1]
- found_class = True
- elif fields[0] == "comm":
- self.comm = fields[1][1:-1]
- elif fields[0] == "exe":
- self.exe = fields[1][1:-1]
- elif fields[0] == "name":
- self.name = fields[1][1:-1]
-
- if not found_src or not found_tgt or not found_class or not found_access:
- raise ValueError("AVC message in invalid format [%s]\n" % self.message)
- self.analyze()
-
- def analyze(self):
- tcontext = self.tcontext.to_string()
- scontext = self.scontext.to_string()
- access_tuple = tuple( self.accesses)
- self.data = []
-
- if (scontext, tcontext, self.tclass, access_tuple) in avcdict.keys():
- self.type, self.data = avcdict[(scontext, tcontext, self.tclass, access_tuple)]
- else:
- self.type, self.data = audit2why.analyze(scontext, tcontext, self.tclass, self.accesses);
- if self.type == audit2why.NOPOLICY:
- self.type = audit2why.TERULE
- if self.type == audit2why.BADTCON:
- raise ValueError("Invalid Target Context %s\n" % tcontext)
- if self.type == audit2why.BADSCON:
- raise ValueError("Invalid Source Context %s\n" % scontext)
- if self.type == audit2why.BADSCON:
- raise ValueError("Invalid Type Class %s\n" % self.tclass)
- if self.type == audit2why.BADPERM:
- raise ValueError("Invalid permission %s\n" % " ".join(self.accesses))
- if self.type == audit2why.BADCOMPUTE:
- raise ValueError("Error during access vector computation")
-
- if self.type == audit2why.CONSTRAINT:
- self.data = [ self.data ]
- if self.scontext.user != self.tcontext.user:
- self.data.append(("user (%s)" % self.scontext.user, 'user (%s)' % self.tcontext.user))
- if self.scontext.role != self.tcontext.role and self.tcontext.role != "object_r":
- self.data.append(("role (%s)" % self.scontext.role, 'role (%s)' % self.tcontext.role))
- if self.scontext.level != self.tcontext.level:
- self.data.append(("level (%s)" % self.scontext.level, 'level (%s)' % self.tcontext.level))
-
- avcdict[(scontext, tcontext, self.tclass, access_tuple)] = (self.type, self.data)
-
-class PolicyLoadMessage(AuditMessage):
- """Audit message indicating that the policy was reloaded."""
- def __init__(self, message):
- AuditMessage.__init__(self, message)
-
-class DaemonStartMessage(AuditMessage):
- """Audit message indicating that a daemon was started."""
- def __init__(self, message):
- AuditMessage.__init__(self, message)
- self.auditd = False
-
- def from_split_string(self, recs):
- AuditMessage.from_split_string(self, recs)
- if "auditd" in recs:
- self.auditd = True
-
-
-class ComputeSidMessage(AuditMessage):
- """Audit message indicating that a sid was not valid.
-
- Compute sid messages are generated on attempting to create a security
- context that is not valid. Security contexts are invalid if the role is
- not authorized for the user or the type is not authorized for the role.
-
- This class does not store all of the fields from the compute sid message -
- just the type and role.
- """
- def __init__(self, message):
- AuditMessage.__init__(self, message)
- self.invalid_context = refpolicy.SecurityContext()
- self.scontext = refpolicy.SecurityContext()
- self.tcontext = refpolicy.SecurityContext()
- self.tclass = ""
-
- def from_split_string(self, recs):
- AuditMessage.from_split_string(self, recs)
- if len(recs) < 10:
- raise ValueError("Split string does not represent a valid compute sid message")
-
- try:
- self.invalid_context = refpolicy.SecurityContext(recs[5])
- self.scontext = refpolicy.SecurityContext(recs[7].split("=")[1])
- self.tcontext = refpolicy.SecurityContext(recs[8].split("=")[1])
- self.tclass = recs[9].split("=")[1]
- except:
- raise ValueError("Split string does not represent a valid compute sid message")
- def output(self):
- return "role %s types %s;\n" % (self.role, self.type)
-
-# Parser for audit messages
-
-class AuditParser:
- """Parser for audit messages.
-
- This class parses audit messages and stores them according to their message
- type. This is not a general purpose audit message parser - it only extracts
- selinux related messages.
-
- Each audit messages are stored in one of four lists:
- avc_msgs - avc denial or granted messages. Messages are stored in
- AVCMessage objects.
- comput_sid_messages - invalid sid messages. Messages are stored in
- ComputSidMessage objects.
- invalid_msgs - selinux related messages that are not valid. Messages
- are stored in InvalidMessageObjects.
- policy_load_messages - policy load messages. Messages are stored in
- PolicyLoadMessage objects.
-
- These lists will be reset when a policy load message is seen if
- AuditParser.last_load_only is set to true. It is assumed that messages
- are fed to the parser in chronological order - time stamps are not
- parsed.
- """
- def __init__(self, last_load_only=False):
- self.__initialize()
- self.last_load_only = last_load_only
-
- def __initialize(self):
- self.avc_msgs = []
- self.compute_sid_msgs = []
- self.invalid_msgs = []
- self.policy_load_msgs = []
- self.path_msgs = []
- self.by_header = { }
- self.check_input_file = False
-
- # Low-level parsing function - tries to determine if this audit
- # message is an SELinux related message and then parses it into
- # the appropriate AuditMessage subclass. This function deliberately
- # does not impose policy (e.g., on policy load message) or store
- # messages to make as simple and reusable as possible.
- #
- # Return values:
- # None - no recognized audit message found in this line
- #
- # InvalidMessage - a recognized but invalid message was found.
- #
- # AuditMessage (or subclass) - object representing a parsed
- # and valid audit message.
- def __parse_line(self, line):
- rec = line.split()
- for i in rec:
- found = False
- if i == "avc:" or i == "message=avc:" or i == "msg='avc:":
- msg = AVCMessage(line)
- found = True
- elif i == "security_compute_sid:":
- msg = ComputeSidMessage(line)
- found = True
- elif i == "type=MAC_POLICY_LOAD" or i == "type=1403":
- msg = PolicyLoadMessage(line)
- found = True
- elif i == "type=AVC_PATH":
- msg = PathMessage(line)
- found = True
- elif i == "type=DAEMON_START":
- msg = DaemonStartMessage(list)
- found = True
-
- if found:
- self.check_input_file = True
- try:
- msg.from_split_string(rec)
- except ValueError:
- msg = InvalidMessage(line)
- return msg
- return None
-
- # Higher-level parse function - take a line, parse it into an
- # AuditMessage object, and store it in the appropriate list.
- # This function will optionally reset all of the lists when
- # it sees a load policy message depending on the value of
- # self.last_load_only.
- def __parse(self, line):
- msg = self.__parse_line(line)
- if msg is None:
- return
-
- # Append to the correct list
- if isinstance(msg, PolicyLoadMessage):
- if self.last_load_only:
- self.__initialize()
- elif isinstance(msg, DaemonStartMessage):
- # We initialize every time the auditd is started. This
- # is less than ideal, but unfortunately it is the only
- # way to catch reboots since the initial policy load
- # by init is not stored in the audit log.
- if msg.auditd and self.last_load_only:
- self.__initialize()
- self.policy_load_msgs.append(msg)
- elif isinstance(msg, AVCMessage):
- self.avc_msgs.append(msg)
- elif isinstance(msg, ComputeSidMessage):
- self.compute_sid_msgs.append(msg)
- elif isinstance(msg, InvalidMessage):
- self.invalid_msgs.append(msg)
- elif isinstance(msg, PathMessage):
- self.path_msgs.append(msg)
-
- # Group by audit header
- if msg.header != "":
- if self.by_header.has_key(msg.header):
- self.by_header[msg.header].append(msg)
- else:
- self.by_header[msg.header] = [msg]
-
-
- # Post processing will add additional information from AVC messages
- # from related messages - only works on messages generated by
- # the audit system.
- def __post_process(self):
- for value in self.by_header.values():
- avc = []
- path = None
- for msg in value:
- if isinstance(msg, PathMessage):
- path = msg
- elif isinstance(msg, AVCMessage):
- avc.append(msg)
- if len(avc) > 0 and path:
- for a in avc:
- a.path = path.path
-
- def parse_file(self, input):
- """Parse the contents of a file object. This method can be called
- multiple times (along with parse_string)."""
- line = input.readline()
- while line:
- self.__parse(line)
- line = input.readline()
- if not self.check_input_file:
- sys.stderr.write("Nothing to do\n")
- sys.exit(0)
- self.__post_process()
-
- def parse_string(self, input):
- """Parse a string containing audit messages - messages should
- be separated by new lines. This method can be called multiple
- times (along with parse_file)."""
- lines = input.split('\n')
- for l in lines:
- self.__parse(l)
- self.__post_process()
-
- def to_role(self, role_filter=None):
- """Return RoleAllowSet statements matching the specified filter
-
- Filter out types that match the filer, or all roles
-
- Params:
- role_filter - [optional] Filter object used to filter the
- output.
- Returns:
- Access vector set representing the denied access in the
- audit logs parsed by this object.
- """
- role_types = access.RoleTypeSet()
- for cs in self.compute_sid_msgs:
- if not role_filter or role_filter.filter(cs):
- role_types.add(cs.invalid_context.role, cs.invalid_context.type)
-
- return role_types
-
- def to_access(self, avc_filter=None, only_denials=True):
- """Convert the audit logs access into a an access vector set.
-
- Convert the audit logs into an access vector set, optionally
- filtering the restults with the passed in filter object.
-
- Filter objects are object instances with a .filter method
- that takes and access vector and returns True if the message
- should be included in the final output and False otherwise.
-
- Params:
- avc_filter - [optional] Filter object used to filter the
- output.
- Returns:
- Access vector set representing the denied access in the
- audit logs parsed by this object.
- """
- av_set = access.AccessVectorSet()
- for avc in self.avc_msgs:
- if avc.denial != True and only_denials:
- continue
- if avc_filter:
- if avc_filter.filter(avc):
- av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,
- avc.accesses, avc, avc_type=avc.type, data=avc.data)
- else:
- av_set.add(avc.scontext.type, avc.tcontext.type, avc.tclass,
- avc.accesses, avc, avc_type=avc.type, data=avc.data)
- return av_set
-
-class AVCTypeFilter:
- def __init__(self, regex):
- self.regex = re.compile(regex)
-
- def filter(self, avc):
- if self.regex.match(avc.scontext.type):
- return True
- if self.regex.match(avc.tcontext.type):
- return True
- return False
-
-class ComputeSidTypeFilter:
- def __init__(self, regex):
- self.regex = re.compile(regex)
-
- def filter(self, avc):
- if self.regex.match(avc.invalid_context.type):
- return True
- if self.regex.match(avc.scontext.type):
- return True
- if self.regex.match(avc.tcontext.type):
- return True
- return False
-
-
diff --git a/prebuilts/lib/sepolgen/classperms.py b/prebuilts/lib/sepolgen/classperms.py
deleted file mode 100644
index c925dee1..00000000
--- a/prebuilts/lib/sepolgen/classperms.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-import sys
-
-tokens = ('DEFINE',
- 'NAME',
- 'TICK',
- 'SQUOTE',
- 'OBRACE',
- 'CBRACE',
- 'SEMI',
- 'OPAREN',
- 'CPAREN',
- 'COMMA')
-
-reserved = {
- 'define' : 'DEFINE' }
-
-t_TICK = r'\`'
-t_SQUOTE = r'\''
-t_OBRACE = r'\{'
-t_CBRACE = r'\}'
-t_SEMI = r'\;'
-t_OPAREN = r'\('
-t_CPAREN = r'\)'
-t_COMMA = r'\,'
-
-t_ignore = " \t\n"
-
-def t_NAME(t):
- r'[a-zA-Z_][a-zA-Z0-9_]*'
- t.type = reserved.get(t.value,'NAME')
- return t
-
-def t_error(t):
- print "Illegal character '%s'" % t.value[0]
- t.skip(1)
-
-import lex
-lex.lex()
-
-def p_statements(p):
- '''statements : define_stmt
- | define_stmt statements
- '''
- if len(p) == 2:
- p[0] = [p[1]]
- else:
- p[0] = [p[1]] + [p[2]]
-
-def p_define_stmt(p):
- # This sucks - corresponds to 'define(`foo',`{ read write }')
- '''define_stmt : DEFINE OPAREN TICK NAME SQUOTE COMMA TICK list SQUOTE CPAREN
- '''
-
- p[0] = [p[4], p[8]]
-
-def p_list(p):
- '''list : NAME
- | OBRACE names CBRACE
- '''
- if p[1] == "{":
- p[0] = p[2]
- else:
- p[0] = [p[1]]
-
-def p_names(p):
- '''names : NAME
- | NAME names
- '''
- if len(p) == 2:
- p[0] = [p[1]]
- else:
- p[0] = [p[1]] + p[2]
-
-def p_error(p):
- print "Syntax error on line %d %s [type=%s]" % (p.lineno, p.value, p.type)
-
-import yacc
-yacc.yacc()
-
-
-f = open("all_perms.spt")
-txt = f.read()
-f.close()
-
-#lex.input(txt)
-#while 1:
-# tok = lex.token()
-# if not tok:
-# break
-# print tok
-
-test = "define(`foo',`{ read write append }')"
-test2 = """define(`all_filesystem_perms',`{ mount remount unmount getattr relabelfrom relabelto transition associate quotamod quotaget }')
-define(`all_security_perms',`{ compute_av compute_create compute_member check_context load_policy compute_relabel compute_user setenforce setbool setsecparam setcheckreqprot }')
-"""
-result = yacc.parse(txt)
-print result
-
diff --git a/prebuilts/lib/sepolgen/defaults.py b/prebuilts/lib/sepolgen/defaults.py
deleted file mode 100644
index 218bc7c4..00000000
--- a/prebuilts/lib/sepolgen/defaults.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-import os
-import re
-
-# Select the correct location for the development files based on a
-# path variable (optionally read from a configuration file)
-class PathChoooser(object):
- def __init__(self, pathname):
- self.config = dict()
- if not os.path.exists(pathname):
- self.config_pathname = "(defaults)"
- self.config["SELINUX_DEVEL_PATH"] = "/usr/share/selinux/default:/usr/share/selinux/mls:/usr/share/selinux/devel"
- return
- self.config_pathname = pathname
- ignore = re.compile(r"^\s*(?:#.+)?$")
- consider = re.compile(r"^\s*(\w+)\s*=\s*(.+?)\s*$")
- for lineno, line in enumerate(open(pathname)):
- if ignore.match(line): continue
- mo = consider.match(line)
- if not mo:
- raise ValueError, "%s:%d: line is not in key = value format" % (pathname, lineno+1)
- self.config[mo.group(1)] = mo.group(2)
-
- # We're only exporting one useful function, so why not be a function
- def __call__(self, testfilename, pathset="SELINUX_DEVEL_PATH"):
- paths = self.config.get(pathset, None)
- if paths is None:
- raise ValueError, "%s was not in %s" % (pathset, self.config_pathname)
- paths = paths.split(":")
- for p in paths:
- target = os.path.join(p, testfilename)
- if os.path.exists(target): return target
- return os.path.join(paths[0], testfilename)
-
-
-"""
-Various default settings, including file and directory locations.
-"""
-
-def data_dir():
- return "/var/lib/sepolgen"
-
-def perm_map():
- return data_dir() + "/perm_map"
-
-def interface_info():
- return data_dir() + "/interface_info"
-
-def attribute_info():
- return data_dir() + "/attribute_info"
-
-def refpolicy_makefile():
- chooser = PathChoooser("/etc/selinux/sepolgen.conf")
- return chooser("Makefile")
-
-def headers():
- chooser = PathChoooser("/etc/selinux/sepolgen.conf")
- return chooser("include")
-
diff --git a/prebuilts/lib/sepolgen/interfaces.py b/prebuilts/lib/sepolgen/interfaces.py
deleted file mode 100644
index 88a6dc30..00000000
--- a/prebuilts/lib/sepolgen/interfaces.py
+++ /dev/null
@@ -1,509 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-"""
-Classes for representing and manipulating interfaces.
-"""
-
-import access
-import refpolicy
-import itertools
-import objectmodel
-import matching
-
-from sepolgeni18n import _
-
-import copy
-
-class Param:
- """
- Object representing a paramater for an interface.
- """
- def __init__(self):
- self.__name = ""
- self.type = refpolicy.SRC_TYPE
- self.obj_classes = refpolicy.IdSet()
- self.required = True
-
- def set_name(self, name):
- if not access.is_idparam(name):
- raise ValueError("Name [%s] is not a param" % name)
- self.__name = name
-
- def get_name(self):
- return self.__name
-
- name = property(get_name, set_name)
-
- num = property(fget=lambda self: int(self.name[1:]))
-
- def __repr__(self):
- return "<sepolgen.policygen.Param instance [%s, %s, %s]>" % \
- (self.name, refpolicy.field_to_str[self.type], " ".join(self.obj_classes))
-
-
-# Helper for extract perms
-def __param_insert(name, type, av, params):
- ret = 0
- if name in params:
- p = params[name]
- # The entries are identical - we're done
- if type == p.type:
- return
- # Hanldle implicitly typed objects (like process)
- if (type == refpolicy.SRC_TYPE or type == refpolicy.TGT_TYPE) and \
- (p.type == refpolicy.TGT_TYPE or p.type == refpolicy.SRC_TYPE):
- #print name, refpolicy.field_to_str[p.type]
- # If the object is not implicitly typed, tell the
- # caller there is a likely conflict.
- ret = 1
- if av:
- avobjs = [av.obj_class]
- else:
- avobjs = []
- for obj in itertools.chain(p.obj_classes, avobjs):
- if obj in objectmodel.implicitly_typed_objects:
- ret = 0
- break
- # "Promote" to a SRC_TYPE as this is the likely usage.
- # We do this even if the above test fails on purpose
- # as there is really no sane way to resolve the conflict
- # here. The caller can take other actions if needed.
- p.type = refpolicy.SRC_TYPE
- else:
- # There is some conflict - no way to resolve it really
- # so we just leave the first entry and tell the caller
- # there was a conflict.
- ret = 1
- else:
- p = Param()
- p.name = name
- p.type = type
- params[p.name] = p
-
- if av:
- p.obj_classes.add(av.obj_class)
- return ret
-
-
-
-def av_extract_params(av, params):
- """Extract the paramaters from an access vector.
-
- Extract the paramaters (in the form $N) from an access
- vector, storing them as Param objects in a dictionary.
- Some attempt is made at resolving conflicts with other
- entries in the dict, but if an unresolvable conflict is
- found it is reported to the caller.
-
- The goal here is to figure out how interface paramaters are
- actually used in the interface - e.g., that $1 is a domain used as
- a SRC_TYPE. In general an interface will look like this:
-
- interface(`foo', `
- allow $1 foo : file read;
- ')
-
- This is simple to figure out - $1 is a SRC_TYPE. A few interfaces
- are more complex, for example:
-
- interface(`foo_trans',`
- domain_auto_trans($1,fingerd_exec_t,fingerd_t)
-
- allow $1 fingerd_t:fd use;
- allow fingerd_t $1:fd use;
- allow fingerd_t $1:fifo_file rw_file_perms;
- allow fingerd_t $1:process sigchld;
- ')
-
- Here the usage seems ambigious, but it is not. $1 is still domain
- and therefore should be returned as a SRC_TYPE.
-
- Returns:
- 0 - success
- 1 - conflict found
- """
- ret = 0
- found_src = False
- if access.is_idparam(av.src_type):
- if __param_insert(av.src_type, refpolicy.SRC_TYPE, av, params) == 1:
- ret = 1
-
- if access.is_idparam(av.tgt_type):
- if __param_insert(av.tgt_type, refpolicy.TGT_TYPE, av, params) == 1:
- ret = 1
-
- if access.is_idparam(av.obj_class):
- if __param_insert(av.obj_class, refpolicy.OBJ_CLASS, av, params) == 1:
- ret = 1
-
- for perm in av.perms:
- if access.is_idparam(perm):
- if __param_insert(perm, PERM) == 1:
- ret = 1
-
- return ret
-
-def role_extract_params(role, params):
- if access.is_idparam(role.role):
- return __param_insert(role.role, refpolicy.ROLE, None, params)
-
-def type_rule_extract_params(rule, params):
- def extract_from_set(set, type):
- ret = 0
- for x in set:
- if access.is_idparam(x):
- if __param_insert(x, type, None, params):
- ret = 1
- return ret
-
- ret = 0
- if extract_from_set(rule.src_types, refpolicy.SRC_TYPE):
- ret = 1
-
- if extract_from_set(rule.tgt_types, refpolicy.TGT_TYPE):
- ret = 1
-
- if extract_from_set(rule.obj_classes, refpolicy.OBJ_CLASS):
- ret = 1
-
- if access.is_idparam(rule.dest_type):
- if __param_insert(rule.dest_type, refpolicy.DEST_TYPE, None, params):
- ret = 1
-
- return ret
-
-def ifcall_extract_params(ifcall, params):
- ret = 0
- for arg in ifcall.args:
- if access.is_idparam(arg):
- # Assume interface arguments are source types. Fairly safe
- # assumption for most interfaces
- if __param_insert(arg, refpolicy.SRC_TYPE, None, params):
- ret = 1
-
- return ret
-
-class AttributeVector:
- def __init__(self):
- self.name = ""
- self.access = access.AccessVectorSet()
-
- def add_av(self, av):
- self.access.add_av(av)
-
-class AttributeSet:
- def __init__(self):
- self.attributes = { }
-
- def add_attr(self, attr):
- self.attributes[attr.name] = attr
-
- def from_file(self, fd):
- def parse_attr(line):
- fields = line[1:-1].split()
- if len(fields) != 2 or fields[0] != "Attribute":
- raise SyntaxError("Syntax error Attribute statement %s" % line)
- a = AttributeVector()
- a.name = fields[1]
-
- return a
-
- a = None
- for line in fd:
- line = line[:-1]
- if line[0] == "[":
- if a:
- self.add_attr(a)
- a = parse_attr(line)
- elif a:
- l = line.split(",")
- av = access.AccessVector(l)
- a.add_av(av)
- if a:
- self.add_attr(a)
-
-class InterfaceVector:
- def __init__(self, interface=None, attributes={}):
- # Enabled is a loose concept currently - we are essentially
- # not enabling interfaces that we can't handle currently.
- # See InterfaceVector.add_ifv for more information.
- self.enabled = True
- self.name = ""
- # The access that is enabled by this interface - eventually
- # this will include indirect access from typeattribute
- # statements.
- self.access = access.AccessVectorSet()
- # Paramaters are stored in a dictionary (key: param name
- # value: Param object).
- self.params = { }
- if interface:
- self.from_interface(interface, attributes)
- self.expanded = False
-
- def from_interface(self, interface, attributes={}):
- self.name = interface.name
-
- # Add allow rules
- for avrule in interface.avrules():
- if avrule.rule_type != refpolicy.AVRule.ALLOW:
- continue
- # Handle some policy bugs
- if "dontaudit" in interface.name:
- #print "allow rule in interface: %s" % interface
- continue
- avs = access.avrule_to_access_vectors(avrule)
- for av in avs:
- self.add_av(av)
-
- # Add typeattribute access
- if attributes:
- for typeattribute in interface.typeattributes():
- for attr in typeattribute.attributes:
- if not attributes.attributes.has_key(attr):
- # print "missing attribute " + attr
- continue
- attr_vec = attributes.attributes[attr]
- for a in attr_vec.access:
- av = copy.copy(a)
- if av.src_type == attr_vec.name:
- av.src_type = typeattribute.type
- if av.tgt_type == attr_vec.name:
- av.tgt_type = typeattribute.type
- self.add_av(av)
-
-
- # Extract paramaters from roles
- for role in interface.roles():
- if role_extract_params(role, self.params):
- pass
- #print "found conflicting role param %s for interface %s" % \
- # (role.name, interface.name)
- # Extract paramaters from type rules
- for rule in interface.typerules():
- if type_rule_extract_params(rule, self.params):
- pass
- #print "found conflicting params in rule %s in interface %s" % \
- # (str(rule), interface.name)
-
- for ifcall in interface.interface_calls():
- if ifcall_extract_params(ifcall, self.params):
- pass
- #print "found conflicting params in ifcall %s in interface %s" % \
- # (str(ifcall), interface.name)
-
-
- def add_av(self, av):
- if av_extract_params(av, self.params) == 1:
- pass
- #print "found conflicting perms [%s]" % str(av)
- self.access.add_av(av)
-
- def to_string(self):
- s = []
- s.append("[InterfaceVector %s]" % self.name)
- for av in self.access:
- s.append(str(av))
- return "\n".join(s)
-
- def __str__(self):
- return self.__repr__()
-
- def __repr__(self):
- return "<InterfaceVector %s:%s>" % (self.name, self.enabled)
-
-
-class InterfaceSet:
- def __init__(self, output=None):
- self.interfaces = { }
- self.tgt_type_map = { }
- self.tgt_type_all = []
- self.output = output
-
- def o(self, str):
- if self.output:
- self.output.write(str + "\n")
-
- def to_file(self, fd):
- for iv in self.interfaces.values():
- fd.write("[InterfaceVector %s " % iv.name)
- for param in iv.params.values():
- fd.write("%s:%s " % (param.name, refpolicy.field_to_str[param.type]))
- fd.write("]\n")
- avl = iv.access.to_list()
- for av in avl:
- fd.write(",".join(av))
- fd.write("\n")
-
- def from_file(self, fd):
- def parse_ifv(line):
- fields = line[1:-1].split()
- if len(fields) < 2 or fields[0] != "InterfaceVector":
- raise SyntaxError("Syntax error InterfaceVector statement %s" % line)
- ifv = InterfaceVector()
- ifv.name = fields[1]
- if len(fields) == 2:
- return
- for field in fields[2:]:
- p = field.split(":")
- if len(p) != 2:
- raise SyntaxError("Invalid param in InterfaceVector statement %s" % line)
- param = Param()
- param.name = p[0]
- param.type = refpolicy.str_to_field[p[1]]
- ifv.params[param.name] = param
- return ifv
-
- ifv = None
- for line in fd:
- line = line[:-1]
- if line[0] == "[":
- if ifv:
- self.add_ifv(ifv)
- ifv = parse_ifv(line)
- elif ifv:
- l = line.split(",")
- av = access.AccessVector(l)
- ifv.add_av(av)
- if ifv:
- self.add_ifv(ifv)
-
- self.index()
-
- def add_ifv(self, ifv):
- self.interfaces[ifv.name] = ifv
-
- def index(self):
- for ifv in self.interfaces.values():
- tgt_types = set()
- for av in ifv.access:
- if access.is_idparam(av.tgt_type):
- self.tgt_type_all.append(ifv)
- tgt_types = set()
- break
- tgt_types.add(av.tgt_type)
-
- for type in tgt_types:
- l = self.tgt_type_map.setdefault(type, [])
- l.append(ifv)
-
- def add(self, interface, attributes={}):
- ifv = InterfaceVector(interface, attributes)
- self.add_ifv(ifv)
-
- def add_headers(self, headers, output=None, attributes={}):
- for i in itertools.chain(headers.interfaces(), headers.templates()):
- self.add(i, attributes)
-
- self.expand_ifcalls(headers)
- self.index()
-
- def map_param(self, id, ifcall):
- if access.is_idparam(id):
- num = int(id[1:])
- if num > len(ifcall.args):
- # Tell caller to drop this because it must have
- # been generated from an optional param.
- return None
- else:
- arg = ifcall.args[num - 1]
- if isinstance(arg, list):
- return arg
- else:
- return [arg]
- else:
- return [id]
-
- def map_add_av(self, ifv, av, ifcall):
- src_types = self.map_param(av.src_type, ifcall)
- if src_types is None:
- return
-
- tgt_types = self.map_param(av.tgt_type, ifcall)
- if tgt_types is None:
- return
-
- obj_classes = self.map_param(av.obj_class, ifcall)
- if obj_classes is None:
- return
-
- new_perms = refpolicy.IdSet()
- for perm in av.perms:
- p = self.map_param(perm, ifcall)
- if p is None:
- continue
- else:
- new_perms.update(p)
- if len(new_perms) == 0:
- return
-
- for src_type in src_types:
- for tgt_type in tgt_types:
- for obj_class in obj_classes:
- ifv.access.add(src_type, tgt_type, obj_class, new_perms)
-
- def do_expand_ifcalls(self, interface, if_by_name):
- # Descend an interface call tree adding the access
- # from each interface. This is a depth first walk
- # of the tree.
-
- stack = [(interface, None)]
- ifv = self.interfaces[interface.name]
- ifv.expanded = True
-
- while len(stack) > 0:
- cur, cur_ifcall = stack.pop(-1)
-
- cur_ifv = self.interfaces[cur.name]
- if cur != interface:
-
- for av in cur_ifv.access:
- self.map_add_av(ifv, av, cur_ifcall)
-
- # If we have already fully expanded this interface
- # there is no reason to descend further.
- if cur_ifv.expanded:
- continue
-
- for ifcall in cur.interface_calls():
- if ifcall.ifname == interface.name:
- self.o(_("Found circular interface class"))
- return
- try:
- newif = if_by_name[ifcall.ifname]
- except KeyError:
- self.o(_("Missing interface definition for %s" % ifcall.ifname))
- continue
-
- stack.append((newif, ifcall))
-
-
- def expand_ifcalls(self, headers):
- # Create a map of interface names to interfaces -
- # this mirrors the interface vector map we already
- # have.
- if_by_name = { }
-
- for i in itertools.chain(headers.interfaces(), headers.templates()):
- if_by_name[i.name] = i
-
-
- for interface in itertools.chain(headers.interfaces(), headers.templates()):
- self.do_expand_ifcalls(interface, if_by_name)
-
diff --git a/prebuilts/lib/sepolgen/lex.py b/prebuilts/lib/sepolgen/lex.py
deleted file mode 100644
index c1493665..00000000
--- a/prebuilts/lib/sepolgen/lex.py
+++ /dev/null
@@ -1,866 +0,0 @@
-#-----------------------------------------------------------------------------
-# ply: lex.py
-#
-# Author: David M. Beazley (dave@dabeaz.com)
-#
-# Copyright (C) 2001-2006, David M. Beazley
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See the file COPYING for a complete copy of the LGPL.
-#-----------------------------------------------------------------------------
-
-__version__ = "2.2"
-
-import re, sys, types
-
-# Regular expression used to match valid token names
-_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
-
-# Available instance types. This is used when lexers are defined by a class.
-# It's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
-
-try:
- _INSTANCETYPE = (types.InstanceType, types.ObjectType)
-except AttributeError:
- _INSTANCETYPE = types.InstanceType
- class object: pass # Note: needed if no new-style classes present
-
-# Exception thrown when invalid token encountered and no default error
-# handler is defined.
-class LexError(Exception):
- def __init__(self,message,s):
- self.args = (message,)
- self.text = s
-
-# Token class
-class LexToken(object):
- def __str__(self):
- return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
- def __repr__(self):
- return str(self)
- def skip(self,n):
- self.lexer.skip(n)
-
-# -----------------------------------------------------------------------------
-# Lexer class
-#
-# This class encapsulates all of the methods and data associated with a lexer.
-#
-# input() - Store a new string in the lexer
-# token() - Get the next token
-# -----------------------------------------------------------------------------
-
-class Lexer:
- def __init__(self):
- self.lexre = None # Master regular expression. This is a list of
- # tuples (re,findex) where re is a compiled
- # regular expression and findex is a list
- # mapping regex group numbers to rules
- self.lexretext = None # Current regular expression strings
- self.lexstatere = {} # Dictionary mapping lexer states to master regexs
- self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
- self.lexstate = "INITIAL" # Current lexer state
- self.lexstatestack = [] # Stack of lexer states
- self.lexstateinfo = None # State information
- self.lexstateignore = {} # Dictionary of ignored characters for each state
- self.lexstateerrorf = {} # Dictionary of error functions for each state
- self.lexreflags = 0 # Optional re compile flags
- self.lexdata = None # Actual input data (as a string)
- self.lexpos = 0 # Current position in input text
- self.lexlen = 0 # Length of the input text
- self.lexerrorf = None # Error rule (if any)
- self.lextokens = None # List of valid tokens
- self.lexignore = "" # Ignored characters
- self.lexliterals = "" # Literal characters that can be passed through
- self.lexmodule = None # Module
- self.lineno = 1 # Current line number
- self.lexdebug = 0 # Debugging mode
- self.lexoptimize = 0 # Optimized mode
-
- def clone(self,object=None):
- c = Lexer()
- c.lexstatere = self.lexstatere
- c.lexstateinfo = self.lexstateinfo
- c.lexstateretext = self.lexstateretext
- c.lexstate = self.lexstate
- c.lexstatestack = self.lexstatestack
- c.lexstateignore = self.lexstateignore
- c.lexstateerrorf = self.lexstateerrorf
- c.lexreflags = self.lexreflags
- c.lexdata = self.lexdata
- c.lexpos = self.lexpos
- c.lexlen = self.lexlen
- c.lextokens = self.lextokens
- c.lexdebug = self.lexdebug
- c.lineno = self.lineno
- c.lexoptimize = self.lexoptimize
- c.lexliterals = self.lexliterals
- c.lexmodule = self.lexmodule
-
- # If the object parameter has been supplied, it means we are attaching the
- # lexer to a new object. In this case, we have to rebind all methods in
- # the lexstatere and lexstateerrorf tables.
-
- if object:
- newtab = { }
- for key, ritem in self.lexstatere.items():
- newre = []
- for cre, findex in ritem:
- newfindex = []
- for f in findex:
- if not f or not f[0]:
- newfindex.append(f)
- continue
- newfindex.append((getattr(object,f[0].__name__),f[1]))
- newre.append((cre,newfindex))
- newtab[key] = newre
- c.lexstatere = newtab
- c.lexstateerrorf = { }
- for key, ef in self.lexstateerrorf.items():
- c.lexstateerrorf[key] = getattr(object,ef.__name__)
- c.lexmodule = object
-
- # Set up other attributes
- c.begin(c.lexstate)
- return c
-
- # ------------------------------------------------------------
- # writetab() - Write lexer information to a table file
- # ------------------------------------------------------------
- def writetab(self,tabfile):
- tf = open(tabfile+".py","w")
- tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
- tf.write("_lextokens = %s\n" % repr(self.lextokens))
- tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
- tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
- tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
-
- tabre = { }
- for key, lre in self.lexstatere.items():
- titem = []
- for i in range(len(lre)):
- titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
- tabre[key] = titem
-
- tf.write("_lexstatere = %s\n" % repr(tabre))
- tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
-
- taberr = { }
- for key, ef in self.lexstateerrorf.items():
- if ef:
- taberr[key] = ef.__name__
- else:
- taberr[key] = None
- tf.write("_lexstateerrorf = %s\n" % repr(taberr))
- tf.close()
-
- # ------------------------------------------------------------
- # readtab() - Read lexer information from a tab file
- # ------------------------------------------------------------
- def readtab(self,tabfile,fdict):
- exec "import %s as lextab" % tabfile
- self.lextokens = lextab._lextokens
- self.lexreflags = lextab._lexreflags
- self.lexliterals = lextab._lexliterals
- self.lexstateinfo = lextab._lexstateinfo
- self.lexstateignore = lextab._lexstateignore
- self.lexstatere = { }
- self.lexstateretext = { }
- for key,lre in lextab._lexstatere.items():
- titem = []
- txtitem = []
- for i in range(len(lre)):
- titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
- txtitem.append(lre[i][0])
- self.lexstatere[key] = titem
- self.lexstateretext[key] = txtitem
- self.lexstateerrorf = { }
- for key,ef in lextab._lexstateerrorf.items():
- self.lexstateerrorf[key] = fdict[ef]
- self.begin('INITIAL')
-
- # ------------------------------------------------------------
- # input() - Push a new string into the lexer
- # ------------------------------------------------------------
- def input(self,s):
- if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
- raise ValueError, "Expected a string"
- self.lexdata = s
- self.lexpos = 0
- self.lexlen = len(s)
-
- # ------------------------------------------------------------
- # begin() - Changes the lexing state
- # ------------------------------------------------------------
- def begin(self,state):
- if not self.lexstatere.has_key(state):
- raise ValueError, "Undefined state"
- self.lexre = self.lexstatere[state]
- self.lexretext = self.lexstateretext[state]
- self.lexignore = self.lexstateignore.get(state,"")
- self.lexerrorf = self.lexstateerrorf.get(state,None)
- self.lexstate = state
-
- # ------------------------------------------------------------
- # push_state() - Changes the lexing state and saves old on stack
- # ------------------------------------------------------------
- def push_state(self,state):
- self.lexstatestack.append(self.lexstate)
- self.begin(state)
-
- # ------------------------------------------------------------
- # pop_state() - Restores the previous state
- # ------------------------------------------------------------
- def pop_state(self):
- self.begin(self.lexstatestack.pop())
-
- # ------------------------------------------------------------
- # current_state() - Returns the current lexing state
- # ------------------------------------------------------------
- def current_state(self):
- return self.lexstate
-
- # ------------------------------------------------------------
- # skip() - Skip ahead n characters
- # ------------------------------------------------------------
- def skip(self,n):
- self.lexpos += n
-
- # ------------------------------------------------------------
- # token() - Return the next token from the Lexer
- #
- # Note: This function has been carefully implemented to be as fast
- # as possible. Don't make changes unless you really know what
- # you are doing
- # ------------------------------------------------------------
- def token(self):
- # Make local copies of frequently referenced attributes
- lexpos = self.lexpos
- lexlen = self.lexlen
- lexignore = self.lexignore
- lexdata = self.lexdata
-
- while lexpos < lexlen:
- # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
- if lexdata[lexpos] in lexignore:
- lexpos += 1
- continue
-
- # Look for a regular expression match
- for lexre,lexindexfunc in self.lexre:
- m = lexre.match(lexdata,lexpos)
- if not m: continue
-
- # Set last match in lexer so that rules can access it if they want
- self.lexmatch = m
-
- # Create a token for return
- tok = LexToken()
- tok.value = m.group()
- tok.lineno = self.lineno
- tok.lexpos = lexpos
- tok.lexer = self
-
- lexpos = m.end()
- i = m.lastindex
- func,tok.type = lexindexfunc[i]
- self.lexpos = lexpos
-
- if not func:
- # If no token type was set, it's an ignored token
- if tok.type: return tok
- break
-
- # if func not callable, it means it's an ignored token
- if not callable(func):
- break
-
- # If token is processed by a function, call it
- newtok = func(tok)
-
- # Every function must return a token, if nothing, we just move to next token
- if not newtok:
- lexpos = self.lexpos # This is here in case user has updated lexpos.
- break
-
- # Verify type of the token. If not in the token map, raise an error
- if not self.lexoptimize:
- if not self.lextokens.has_key(newtok.type):
- raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
- func.func_code.co_filename, func.func_code.co_firstlineno,
- func.__name__, newtok.type),lexdata[lexpos:])
-
- return newtok
- else:
- # No match, see if in literals
- if lexdata[lexpos] in self.lexliterals:
- tok = LexToken()
- tok.value = lexdata[lexpos]
- tok.lineno = self.lineno
- tok.lexer = self
- tok.type = tok.value
- tok.lexpos = lexpos
- self.lexpos = lexpos + 1
- return tok
-
- # No match. Call t_error() if defined.
- if self.lexerrorf:
- tok = LexToken()
- tok.value = self.lexdata[lexpos:]
- tok.lineno = self.lineno
- tok.type = "error"
- tok.lexer = self
- tok.lexpos = lexpos
- self.lexpos = lexpos
- newtok = self.lexerrorf(tok)
- if lexpos == self.lexpos:
- # Error method didn't change text position at all. This is an error.
- raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
- lexpos = self.lexpos
- if not newtok: continue
- return newtok
-
- self.lexpos = lexpos
- raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
-
- self.lexpos = lexpos + 1
- if self.lexdata is None:
- raise RuntimeError, "No input string given with input()"
- return None
-
-# -----------------------------------------------------------------------------
-# _validate_file()
-#
-# This checks to see if there are duplicated t_rulename() functions or strings
-# in the parser input file. This is done using a simple regular expression
-# match on each line in the filename.
-# -----------------------------------------------------------------------------
-
-def _validate_file(filename):
- import os.path
- base,ext = os.path.splitext(filename)
- if ext != '.py': return 1 # No idea what the file is. Return OK
-
- try:
- f = open(filename)
- lines = f.readlines()
- f.close()
- except IOError:
- return 1 # Oh well
-
- fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
- sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
- counthash = { }
- linen = 1
- noerror = 1
- for l in lines:
- m = fre.match(l)
- if not m:
- m = sre.match(l)
- if m:
- name = m.group(1)
- prev = counthash.get(name)
- if not prev:
- counthash[name] = linen
- else:
- print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
- noerror = 0
- linen += 1
- return noerror
-
-# -----------------------------------------------------------------------------
-# _funcs_to_names()
-#
-# Given a list of regular expression functions, this converts it to a list
-# suitable for output to a table file
-# -----------------------------------------------------------------------------
-
-def _funcs_to_names(funclist):
- result = []
- for f in funclist:
- if f and f[0]:
- result.append((f[0].__name__,f[1]))
- else:
- result.append(f)
- return result
-
-# -----------------------------------------------------------------------------
-# _names_to_funcs()
-#
-# Given a list of regular expression function names, this converts it back to
-# functions.
-# -----------------------------------------------------------------------------
-
-def _names_to_funcs(namelist,fdict):
- result = []
- for n in namelist:
- if n and n[0]:
- result.append((fdict[n[0]],n[1]))
- else:
- result.append(n)
- return result
-
-# -----------------------------------------------------------------------------
-# _form_master_re()
-#
-# This function takes a list of all of the regex components and attempts to
-# form the master regular expression. Given limitations in the Python re
-# module, it may be necessary to break the master regex into separate expressions.
-# -----------------------------------------------------------------------------
-
-def _form_master_re(relist,reflags,ldict):
- if not relist: return []
- regex = "|".join(relist)
- try:
- lexre = re.compile(regex,re.VERBOSE | reflags)
-
- # Build the index to function map for the matching engine
- lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
- for f,i in lexre.groupindex.items():
- handle = ldict.get(f,None)
- if type(handle) in (types.FunctionType, types.MethodType):
- lexindexfunc[i] = (handle,handle.__name__[2:])
- elif handle is not None:
- # If rule was specified as a string, we build an anonymous
- # callback function to carry out the action
- if f.find("ignore_") > 0:
- lexindexfunc[i] = (None,None)
- print "IGNORE", f
- else:
- lexindexfunc[i] = (None, f[2:])
-
- return [(lexre,lexindexfunc)],[regex]
- except Exception,e:
- m = int(len(relist)/2)
- if m == 0: m = 1
- llist, lre = _form_master_re(relist[:m],reflags,ldict)
- rlist, rre = _form_master_re(relist[m:],reflags,ldict)
- return llist+rlist, lre+rre
-
-# -----------------------------------------------------------------------------
-# def _statetoken(s,names)
-#
-# Given a declaration name s of the form "t_" and a dictionary whose keys are
-# state names, this function returns a tuple (states,tokenname) where states
-# is a tuple of state names and tokenname is the name of the token. For example,
-# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
-# -----------------------------------------------------------------------------
-
-def _statetoken(s,names):
- nonstate = 1
- parts = s.split("_")
- for i in range(1,len(parts)):
- if not names.has_key(parts[i]) and parts[i] != 'ANY': break
- if i > 1:
- states = tuple(parts[1:i])
- else:
- states = ('INITIAL',)
-
- if 'ANY' in states:
- states = tuple(names.keys())
-
- tokenname = "_".join(parts[i:])
- return (states,tokenname)
-
-# -----------------------------------------------------------------------------
-# lex(module)
-#
-# Build all of the regular expression rules from definitions in the supplied module
-# -----------------------------------------------------------------------------
-def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0):
- global lexer
- ldict = None
- stateinfo = { 'INITIAL' : 'inclusive'}
- error = 0
- files = { }
- lexobj = Lexer()
- lexobj.lexdebug = debug
- lexobj.lexoptimize = optimize
- global token,input
-
- if nowarn: warn = 0
- else: warn = 1
-
- if object: module = object
-
- if module:
- # User supplied a module object.
- if isinstance(module, types.ModuleType):
- ldict = module.__dict__
- elif isinstance(module, _INSTANCETYPE):
- _items = [(k,getattr(module,k)) for k in dir(module)]
- ldict = { }
- for (i,v) in _items:
- ldict[i] = v
- else:
- raise ValueError,"Expected a module or instance"
- lexobj.lexmodule = module
-
- else:
- # No module given. We might be able to get information from the caller.
- try:
- raise RuntimeError
- except RuntimeError:
- e,b,t = sys.exc_info()
- f = t.tb_frame
- f = f.f_back # Walk out to our calling function
- ldict = f.f_globals # Grab its globals dictionary
-
- if optimize and lextab:
- try:
- lexobj.readtab(lextab,ldict)
- token = lexobj.token
- input = lexobj.input
- lexer = lexobj
- return lexobj
-
- except ImportError:
- pass
-
- # Get the tokens, states, and literals variables (if any)
- if (module and isinstance(module,_INSTANCETYPE)):
- tokens = getattr(module,"tokens",None)
- states = getattr(module,"states",None)
- literals = getattr(module,"literals","")
- else:
- tokens = ldict.get("tokens",None)
- states = ldict.get("states",None)
- literals = ldict.get("literals","")
-
- if not tokens:
- raise SyntaxError,"lex: module does not define 'tokens'"
- if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
- raise SyntaxError,"lex: tokens must be a list or tuple."
-
- # Build a dictionary of valid token names
- lexobj.lextokens = { }
- if not optimize:
- for n in tokens:
- if not _is_identifier.match(n):
- print "lex: Bad token name '%s'" % n
- error = 1
- if warn and lexobj.lextokens.has_key(n):
- print "lex: Warning. Token '%s' multiply defined." % n
- lexobj.lextokens[n] = None
- else:
- for n in tokens: lexobj.lextokens[n] = None
-
- if debug:
- print "lex: tokens = '%s'" % lexobj.lextokens.keys()
-
- try:
- for c in literals:
- if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
- print "lex: Invalid literal %s. Must be a single character" % repr(c)
- error = 1
- continue
-
- except TypeError:
- print "lex: Invalid literals specification. literals must be a sequence of characters."
- error = 1
-
- lexobj.lexliterals = literals
-
- # Build statemap
- if states:
- if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
- print "lex: states must be defined as a tuple or list."
- error = 1
- else:
- for s in states:
- if not isinstance(s,types.TupleType) or len(s) != 2:
- print "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
- error = 1
- continue
- name, statetype = s
- if not isinstance(name,types.StringType):
- print "lex: state name %s must be a string" % repr(name)
- error = 1
- continue
- if not (statetype == 'inclusive' or statetype == 'exclusive'):
- print "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
- error = 1
- continue
- if stateinfo.has_key(name):
- print "lex: state '%s' already defined." % name
- error = 1
- continue
- stateinfo[name] = statetype
-
- # Get a list of symbols with the t_ or s_ prefix
- tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
-
- # Now build up a list of functions and a list of strings
-
- funcsym = { } # Symbols defined as functions
- strsym = { } # Symbols defined as strings
- toknames = { } # Mapping of symbols to token names
-
- for s in stateinfo.keys():
- funcsym[s] = []
- strsym[s] = []
-
- ignore = { } # Ignore strings by state
- errorf = { } # Error functions by state
-
- if len(tsymbols) == 0:
- raise SyntaxError,"lex: no rules of the form t_rulename are defined."
-
- for f in tsymbols:
- t = ldict[f]
- states, tokname = _statetoken(f,stateinfo)
- toknames[f] = tokname
-
- if callable(t):
- for s in states: funcsym[s].append((f,t))
- elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
- for s in states: strsym[s].append((f,t))
- else:
- print "lex: %s not defined as a function or string" % f
- error = 1
-
- # Sort the functions by line number
- for f in funcsym.values():
- f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
-
- # Sort the strings by regular expression length
- for s in strsym.values():
- s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
-
- regexs = { }
-
- # Build the master regular expressions
- for state in stateinfo.keys():
- regex_list = []
-
- # Add rules defined by functions first
- for fname, f in funcsym[state]:
- line = f.func_code.co_firstlineno
- file = f.func_code.co_filename
- files[file] = None
- tokname = toknames[fname]
-
- ismethod = isinstance(f, types.MethodType)
-
- if not optimize:
- nargs = f.func_code.co_argcount
- if ismethod:
- reqargs = 2
- else:
- reqargs = 1
- if nargs > reqargs:
- print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
- error = 1
- continue
-
- if nargs < reqargs:
- print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
- error = 1
- continue
-
- if tokname == 'ignore':
- print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
- error = 1
- continue
-
- if tokname == 'error':
- errorf[state] = f
- continue
-
- if f.__doc__:
- if not optimize:
- try:
- c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
- if c.match(""):
- print "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
- error = 1
- continue
- except re.error,e:
- print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
- if '#' in f.__doc__:
- print "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
- error = 1
- continue
-
- if debug:
- print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
-
- # Okay. The regular expression seemed okay. Let's append it to the master regular
- # expression we're building
-
- regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
- else:
- print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
-
- # Now add all of the simple rules
- for name,r in strsym[state]:
- tokname = toknames[name]
-
- if tokname == 'ignore':
- ignore[state] = r
- continue
-
- if not optimize:
- if tokname == 'error':
- raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
- error = 1
- continue
-
- if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
- print "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
- error = 1
- continue
- try:
- c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
- if (c.match("")):
- print "lex: Regular expression for rule '%s' matches empty string." % name
- error = 1
- continue
- except re.error,e:
- print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
- if '#' in r:
- print "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
-
- error = 1
- continue
- if debug:
- print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
-
- regex_list.append("(?P<%s>%s)" % (name,r))
-
- if not regex_list:
- print "lex: No rules defined for state '%s'" % state
- error = 1
-
- regexs[state] = regex_list
-
-
- if not optimize:
- for f in files.keys():
- if not _validate_file(f):
- error = 1
-
- if error:
- raise SyntaxError,"lex: Unable to build lexer."
-
- # From this point forward, we're reasonably confident that we can build the lexer.
- # No more errors will be generated, but there might be some warning messages.
-
- # Build the master regular expressions
-
- for state in regexs.keys():
- lexre, re_text = _form_master_re(regexs[state],reflags,ldict)
- lexobj.lexstatere[state] = lexre
- lexobj.lexstateretext[state] = re_text
- if debug:
- for i in range(len(re_text)):
- print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
-
- # For inclusive states, we need to add the INITIAL state
- for state,type in stateinfo.items():
- if state != "INITIAL" and type == 'inclusive':
- lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
- lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
-
- lexobj.lexstateinfo = stateinfo
- lexobj.lexre = lexobj.lexstatere["INITIAL"]
- lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
-
- # Set up ignore variables
- lexobj.lexstateignore = ignore
- lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
-
- # Set up error functions
- lexobj.lexstateerrorf = errorf
- lexobj.lexerrorf = errorf.get("INITIAL",None)
- if warn and not lexobj.lexerrorf:
- print "lex: Warning. no t_error rule is defined."
-
- # Check state information for ignore and error rules
- for s,stype in stateinfo.items():
- if stype == 'exclusive':
- if warn and not errorf.has_key(s):
- print "lex: Warning. no error rule is defined for exclusive state '%s'" % s
- if warn and not ignore.has_key(s) and lexobj.lexignore:
- print "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
- elif stype == 'inclusive':
- if not errorf.has_key(s):
- errorf[s] = errorf.get("INITIAL",None)
- if not ignore.has_key(s):
- ignore[s] = ignore.get("INITIAL","")
-
-
- # Create global versions of the token() and input() functions
- token = lexobj.token
- input = lexobj.input
- lexer = lexobj
-
- # If in optimize mode, we write the lextab
- if lextab and optimize:
- lexobj.writetab(lextab)
-
- return lexobj
-
-# -----------------------------------------------------------------------------
-# runmain()
-#
-# This runs the lexer as a main program
-# -----------------------------------------------------------------------------
-
-def runmain(lexer=None,data=None):
- if not data:
- try:
- filename = sys.argv[1]
- f = open(filename)
- data = f.read()
- f.close()
- except IndexError:
- print "Reading from standard input (type EOF to end):"
- data = sys.stdin.read()
-
- if lexer:
- _input = lexer.input
- else:
- _input = input
- _input(data)
- if lexer:
- _token = lexer.token
- else:
- _token = token
-
- while 1:
- tok = _token()
- if not tok: break
- print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
-
-
-# -----------------------------------------------------------------------------
-# @TOKEN(regex)
-#
-# This decorator function can be used to set the regex expression on a function
-# when its docstring might need to be set in an alternative way
-# -----------------------------------------------------------------------------
-
-def TOKEN(r):
- def set_doc(f):
- f.__doc__ = r
- return f
- return set_doc
-
-# Alternative spelling of the TOKEN decorator
-Token = TOKEN
-
diff --git a/prebuilts/lib/sepolgen/matching.py b/prebuilts/lib/sepolgen/matching.py
deleted file mode 100644
index d56dd92d..00000000
--- a/prebuilts/lib/sepolgen/matching.py
+++ /dev/null
@@ -1,255 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-"""
-Classes and algorithms for matching requested access to access vectors.
-"""
-
-import access
-import objectmodel
-import itertools
-
-class Match:
- def __init__(self, interface=None, dist=0):
- self.interface = interface
- self.dist = dist
- self.info_dir_change = False
-
- def __cmp__(self, other):
- if self.dist == other.dist:
- if self.info_dir_change:
- if other.info_dir_change:
- return 0
- else:
- return 1
- else:
- if other.info_dir_change:
- return -1
- else:
- return 0
- else:
- if self.dist < other.dist:
- return -1
- else:
- return 1
-
-class MatchList:
- DEFAULT_THRESHOLD = 150
- def __init__(self):
- # Match objects that pass the threshold
- self.children = []
- # Match objects over the threshold
- self.bastards = []
- self.threshold = self.DEFAULT_THRESHOLD
- self.allow_info_dir_change = False
- self.av = None
-
- def best(self):
- if len(self.children):
- return self.children[0]
- if len(self.bastards):
- return self.bastards[0]
- return None
-
- def __len__(self):
- # Only return the length of the matches so
- # that this can be used to test if there is
- # a match.
- return len(self.children) + len(self.bastards)
-
- def __iter__(self):
- return iter(self.children)
-
- def all(self):
- return itertools.chain(self.children, self.bastards)
-
- def append(self, match):
- if match.dist <= self.threshold:
- if not match.info_dir_change or self.allow_info_dir_change:
- self.children.append(match)
- else:
- self.bastards.append(match)
- else:
- self.bastards.append(match)
-
- def sort(self):
- self.children.sort()
- self.bastards.sort()
-
-
-class AccessMatcher:
- def __init__(self, perm_maps=None):
- self.type_penalty = 10
- self.obj_penalty = 10
- if perm_maps:
- self.perm_maps = perm_maps
- else:
- self.perm_maps = objectmodel.PermMappings()
- # We want a change in the information flow direction
- # to be a strong penalty - stronger than access to
- # a few unrelated types.
- self.info_dir_penalty = 100
-
- def type_distance(self, a, b):
- if a == b or access.is_idparam(b):
- return 0
- else:
- return -self.type_penalty
-
-
- def perm_distance(self, av_req, av_prov):
- # First check that we have enough perms
- diff = av_req.perms.difference(av_prov.perms)
-
- if len(diff) != 0:
- total = self.perm_maps.getdefault_distance(av_req.obj_class, diff)
- return -total
- else:
- diff = av_prov.perms.difference(av_req.perms)
- return self.perm_maps.getdefault_distance(av_req.obj_class, diff)
-
- def av_distance(self, req, prov):
- """Determine the 'distance' between 2 access vectors.
-
- This function is used to find an access vector that matches
- a 'required' access. To do this we comput a signed numeric
- value that indicates how close the req access is to the
- 'provided' access vector. The closer the value is to 0
- the closer the match, with 0 being an exact match.
-
- A value over 0 indicates that the prov access vector provides more
- access than the req (in practice, this means that the source type,
- target type, and object class is the same and the perms in prov is
- a superset of those in req.
-
- A value under 0 indicates that the prov access less - or unrelated
- - access to the req access. A different type or object class will
- result in a very low value.
-
- The values other than 0 should only be interpreted relative to
- one another - they have no exact meaning and are likely to
- change.
-
- Params:
- req - [AccessVector] The access that is required. This is the
- access being matched.
- prov - [AccessVector] The access provided. This is the potential
- match that is being evaluated for req.
- Returns:
- 0 : Exact match between the acess vectors.
-
- < 0 : The prov av does not provide all of the access in req.
- A smaller value indicates that the access is further.
-
- > 0 : The prov av provides more access than req. The larger
- the value the more access over req.
- """
- # FUTURE - this is _very_ expensive and probably needs some
- # thorough performance work. This version is meant to give
- # meaningful results relatively simply.
- dist = 0
-
- # Get the difference between the types. The addition is safe
- # here because type_distance only returns 0 or negative.
- dist += self.type_distance(req.src_type, prov.src_type)
- dist += self.type_distance(req.tgt_type, prov.tgt_type)
-
- # Object class distance
- if req.obj_class != prov.obj_class and not access.is_idparam(prov.obj_class):
- dist -= self.obj_penalty
-
- # Permission distance
-
- # If this av doesn't have a matching source type, target type, and object class
- # count all of the permissions against it. Otherwise determine the perm
- # distance and dir.
- if dist < 0:
- pdist = self.perm_maps.getdefault_distance(prov.obj_class, prov.perms)
- else:
- pdist = self.perm_distance(req, prov)
-
- # Combine the perm and other distance
- if dist < 0:
- if pdist < 0:
- return dist + pdist
- else:
- return dist - pdist
- elif dist >= 0:
- if pdist < 0:
- return pdist - dist
- else:
- return dist + pdist
-
- def av_set_match(self, av_set, av):
- """
-
- """
- dist = None
-
- # Get the distance for each access vector
- for x in av_set:
- tmp = self.av_distance(av, x)
- if dist is None:
- dist = tmp
- elif tmp >= 0:
- if dist >= 0:
- dist += tmp
- else:
- dist = tmp + -dist
- else:
- if dist < 0:
- dist += tmp
- else:
- dist -= tmp
-
- # Penalize for information flow - we want to prevent the
- # addition of a write if the requested is read none. We are
- # much less concerned about the reverse.
- av_dir = self.perm_maps.getdefault_direction(av.obj_class, av.perms)
-
- if av_set.info_dir is None:
- av_set.info_dir = objectmodel.FLOW_NONE
- for x in av_set:
- av_set.info_dir = av_set.info_dir | \
- self.perm_maps.getdefault_direction(x.obj_class, x.perms)
- if (av_dir & objectmodel.FLOW_WRITE == 0) and (av_set.info_dir & objectmodel.FLOW_WRITE):
- if dist < 0:
- dist -= self.info_dir_penalty
- else:
- dist += self.info_dir_penalty
-
- return dist
-
- def search_ifs(self, ifset, av, match_list):
- match_list.av = av
- for iv in itertools.chain(ifset.tgt_type_all,
- ifset.tgt_type_map.get(av.tgt_type, [])):
- if not iv.enabled:
- #print "iv %s not enabled" % iv.name
- continue
-
- dist = self.av_set_match(iv.access, av)
- if dist >= 0:
- m = Match(iv, dist)
- match_list.append(m)
-
-
- match_list.sort()
-
-
diff --git a/prebuilts/lib/sepolgen/module.py b/prebuilts/lib/sepolgen/module.py
deleted file mode 100644
index 7fc94434..00000000
--- a/prebuilts/lib/sepolgen/module.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-"""
-Utilities for dealing with the compilation of modules and creation
-of module tress.
-"""
-
-import defaults
-
-import selinux
-
-import re
-import tempfile
-import commands
-import os
-import os.path
-import subprocess
-import shutil
-
-def is_valid_name(modname):
- """Check that a module name is valid.
- """
- m = re.findall("[^a-zA-Z0-9_\-\.]", modname)
- if len(m) == 0 and modname[0].isalpha():
- return True
- else:
- return False
-
-class ModuleTree:
- def __init__(self, modname):
- self.modname = modname
- self.dirname = None
-
- def dir_name(self):
- return self.dirname
-
- def te_name(self):
- return self.dirname + "/" + self.modname + ".te"
-
- def fc_name(self):
- return self.dirname + "/" + self.modname + ".fc"
-
- def if_name(self):
- return self.dirname + "/" + self.modname + ".if"
-
- def package_name(self):
- return self.dirname + "/" + self.modname + ".pp"
-
- def makefile_name(self):
- return self.dirname + "/Makefile"
-
- def create(self, parent_dirname, makefile_include=None):
- self.dirname = parent_dirname + "/" + self.modname
- os.mkdir(self.dirname)
- fd = open(self.makefile_name(), "w")
- if makefile_include:
- fd.write("include " + makefile_include)
- else:
- fd.write("include " + defaults.refpolicy_makefile())
- fd.close()
-
- # Create empty files for the standard refpolicy
- # module files
- open(self.te_name(), "w").close()
- open(self.fc_name(), "w").close()
- open(self.if_name(), "w").close()
-
-def modname_from_sourcename(sourcename):
- return os.path.splitext(os.path.split(sourcename)[1])[0]
-
-class ModuleCompiler:
- """ModuleCompiler eases running of the module compiler.
-
- The ModuleCompiler class encapsulates running the commandline
- module compiler (checkmodule) and module packager (semodule_package).
- You are likely interested in the create_module_package method.
-
- Several options are controlled via paramaters (only effects the
- non-refpol builds):
-
- .mls [boolean] Generate an MLS module (by passed -M to
- checkmodule). True to generate an MLS module, false
- otherwise.
-
- .module [boolean] Generate a module instead of a base module.
- True to generate a module, false to generate a base.
-
- .checkmodule [string] Fully qualified path to the module compiler.
- Default is /usr/bin/checkmodule.
-
- .semodule_package [string] Fully qualified path to the module
- packager. Defaults to /usr/bin/semodule_package.
- .output [file object] File object used to write verbose
- output of the compililation and packaging process.
- """
- def __init__(self, output=None):
- """Create a ModuleCompiler instance, optionally with an
- output file object for verbose output of the compilation process.
- """
- self.mls = selinux.is_selinux_mls_enabled()
- self.module = True
- self.checkmodule = "/usr/bin/checkmodule"
- self.semodule_package = "/usr/bin/semodule_package"
- self.output = output
- self.last_output = ""
- self.refpol_makefile = defaults.refpolicy_makefile()
- self.make = "/usr/bin/make"
-
- def o(self, str):
- if self.output:
- self.output.write(str + "\n")
- self.last_output = str
-
- def run(self, command):
- self.o(command)
- rc, output = commands.getstatusoutput(command)
- self.o(output)
-
- return rc
-
- def gen_filenames(self, sourcename):
- """Generate the module and policy package filenames from
- a source file name. The source file must be in the form
- of "foo.te". This will generate "foo.mod" and "foo.pp".
-
- Returns a tuple with (modname, policypackage).
- """
- splitname = sourcename.split(".")
- if len(splitname) < 2:
- raise RuntimeError("invalid sourcefile name %s (must end in .te)", sourcename)
- # Handle other periods in the filename correctly
- basename = ".".join(splitname[0:-1])
- modname = basename + ".mod"
- packagename = basename + ".pp"
-
- return (modname, packagename)
-
- def create_module_package(self, sourcename, refpolicy=True):
- """Create a module package saved in a packagename from a
- sourcename.
-
- The create_module_package creates a module package saved in a
- file named sourcename (.pp is the standard extension) from a
- source file (.te is the standard extension). The source file
- should contain SELinux policy statements appropriate for a
- base or non-base module (depending on the setting of .module).
-
- Only file names are accepted, not open file objects or
- descriptors because the command line SELinux tools are used.
-
- On error a RuntimeError will be raised with a descriptive
- error message.
- """
- if refpolicy:
- self.refpol_build(sourcename)
- else:
- modname, packagename = self.gen_filenames(sourcename)
- self.compile(sourcename, modname)
- self.package(modname, packagename)
- os.unlink(modname)
-
- def refpol_build(self, sourcename):
- # Compile
- command = self.make + " -f " + self.refpol_makefile
- rc = self.run(command)
-
- # Raise an error if the process failed
- if rc != 0:
- raise RuntimeError("compilation failed:\n%s" % self.last_output)
-
- def compile(self, sourcename, modname):
- s = [self.checkmodule]
- if self.mls:
- s.append("-M")
- if self.module:
- s.append("-m")
- s.append("-o")
- s.append(modname)
- s.append(sourcename)
-
- rc = self.run(" ".join(s))
- if rc != 0:
- raise RuntimeError("compilation failed:\n%s" % self.last_output)
-
- def package(self, modname, packagename):
- s = [self.semodule_package]
- s.append("-o")
- s.append(packagename)
- s.append("-m")
- s.append(modname)
-
- rc = self.run(" ".join(s))
- if rc != 0:
- raise RuntimeError("packaging failed [%s]" % self.last_output)
-
-
diff --git a/prebuilts/lib/sepolgen/objectmodel.py b/prebuilts/lib/sepolgen/objectmodel.py
deleted file mode 100644
index 88c8a1f4..00000000
--- a/prebuilts/lib/sepolgen/objectmodel.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-"""
-This module provides knowledge object classes and permissions. It should
-be used to keep this knowledge from leaking into the more generic parts of
-the policy generation.
-"""
-
-# Objects that can be implicitly typed - these objects do
-# not _have_ to be implicitly typed (e.g., sockets can be
-# explicitly labeled), but they often are.
-#
-# File is in this list for /proc/self
-#
-# This list is useful when dealing with rules that have a
-# type (or param) used as both a subject and object. For
-# example:
-#
-# allow httpd_t httpd_t : socket read;
-#
-# This rule makes sense because the socket was (presumably) created
-# by a process with the type httpd_t.
-implicitly_typed_objects = ["socket", "fd", "process", "file", "lnk_file", "fifo_file",
- "dbus", "capability", "unix_stream_socket"]
-
-#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-#
-#Information Flow
-#
-# All of the permissions in SELinux can be described in terms of
-# information flow. For example, a read of a file is a flow of
-# information from that file to the process reading. Viewing
-# permissions in these terms can be used to model a varity of
-# security properties.
-#
-# Here we have some infrastructure for understanding permissions
-# in terms of information flow
-#
-#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-
-# Information flow deals with information either flowing from a subject
-# to and object ("write") or to a subject from an object ("read"). Read
-# or write is described from the subject point-of-view. It is also possible
-# for a permission to represent both a read and write (though the flow is
-# typical asymettric in terms of bandwidth). It is also possible for
-# permission to not flow information (meaning that the result is pure
-# side-effect).
-#
-# The following constants are for representing the directionality
-# of information flow.
-FLOW_NONE = 0
-FLOW_READ = 1
-FLOW_WRITE = 2
-FLOW_BOTH = FLOW_READ | FLOW_WRITE
-
-# These are used by the parser and for nice disply of the directions
-str_to_dir = { "n" : FLOW_NONE, "r" : FLOW_READ, "w" : FLOW_WRITE, "b" : FLOW_BOTH }
-dir_to_str = { FLOW_NONE : "n", FLOW_READ : "r", FLOW_WRITE : "w", FLOW_BOTH : "b" }
-
-class PermMap:
- """A mapping between a permission and its information flow properties.
-
- PermMap represents the information flow properties of a single permission
- including the direction (read, write, etc.) and an abstract representation
- of the bandwidth of the flow (weight).
- """
- def __init__(self, perm, dir, weight):
- self.perm = perm
- self.dir = dir
- self.weight = weight
-
- def __repr__(self):
- return "<sepolgen.objectmodel.PermMap %s %s %d>" % (self.perm,
- dir_to_str[self.dir],
- self.weight)
-
-class PermMappings:
- """The information flow properties of a set of object classes and permissions.
-
- PermMappings maps one or more classes and permissions to their PermMap objects
- describing their information flow charecteristics.
- """
- def __init__(self):
- self.classes = { }
- self.default_weight = 5
- self.default_dir = FLOW_BOTH
-
- def from_file(self, fd):
- """Read the permission mappings from a file. This reads the format used
- by Apol in the setools suite.
- """
- # This parsing is deliberitely picky and bails at the least error. It
- # is assumed that the permission map file will be shipped as part
- # of sepolgen and not user modified, so this is a reasonable design
- # choice. If user supplied permission mappings are needed the parser
- # should be made a little more robust and give better error messages.
- cur = None
- for line in fd:
- fields = line.split()
- if len(fields) == 0 or len(fields) == 1 or fields[0] == "#":
- continue
- if fields[0] == "class":
- c = fields[1]
- if self.classes.has_key(c):
- raise ValueError("duplicate class in perm map")
- self.classes[c] = { }
- cur = self.classes[c]
- else:
- if len(fields) != 3:
- raise ValueError("error in object classs permissions")
- if cur is None:
- raise ValueError("permission outside of class")
- pm = PermMap(fields[0], str_to_dir[fields[1]], int(fields[2]))
- cur[pm.perm] = pm
-
- def get(self, obj, perm):
- """Get the permission map for the object permission.
-
- Returns:
- PermMap representing the permission
- Raises:
- KeyError if the object or permission is not defined
- """
- return self.classes[obj][perm]
-
- def getdefault(self, obj, perm):
- """Get the permission map for the object permission or a default.
-
- getdefault is the same as get except that a default PermMap is
- returned if the object class or permission is not defined. The
- default is FLOW_BOTH with a weight of 5.
- """
- try:
- pm = self.classes[obj][perm]
- except KeyError:
- return PermMap(perm, self.default_dir, self.default_weight)
- return pm
-
- def getdefault_direction(self, obj, perms):
- dir = FLOW_NONE
- for perm in perms:
- pm = self.getdefault(obj, perm)
- dir = dir | pm.dir
- return dir
-
- def getdefault_distance(self, obj, perms):
- total = 0
- for perm in perms:
- pm = self.getdefault(obj, perm)
- total += pm.weight
-
- return total
-
-
-
diff --git a/prebuilts/lib/sepolgen/output.py b/prebuilts/lib/sepolgen/output.py
deleted file mode 100644
index 739452db..00000000
--- a/prebuilts/lib/sepolgen/output.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-"""
-Classes and functions for the output of reference policy modules.
-
-This module takes a refpolicy.Module object and formats it for
-output using the ModuleWriter object. By separating the output
-in this way the other parts of Madison can focus solely on
-generating policy. This keeps the semantic / syntactic issues
-cleanly separated from the formatting issues.
-"""
-
-import refpolicy
-import util
-
-class ModuleWriter:
- def __init__(self):
- self.fd = None
- self.module = None
- self.sort = True
- self.requires = True
-
- def write(self, module, fd):
- self.module = module
-
- if self.sort:
- sort_filter(self.module)
-
- # FIXME - make this handle nesting
- for node, depth in refpolicy.walktree(self.module, showdepth=True):
- fd.write("%s\n" % str(node))
-
-# Helper functions for sort_filter - this is all done old school
-# C style rather than with polymorphic methods because this sorting
-# is specific to output. It is not necessarily the comparison you
-# want generally.
-
-# Compare two IdSets - we could probably do something clever
-# with different here, but this works.
-def id_set_cmp(x, y):
- xl = util.set_to_list(x)
- xl.sort()
- yl = util.set_to_list(y)
- yl.sort()
-
- if len(xl) != len(yl):
- return cmp(xl[0], yl[0])
- for v in zip(xl, yl):
- if v[0] != v[1]:
- return cmp(v[0], v[1])
- return 0
-
-# Compare two avrules
-def avrule_cmp(a, b):
- ret = id_set_cmp(a.src_types, b.src_types)
- if ret is not 0:
- return ret
- ret = id_set_cmp(a.tgt_types, b.tgt_types)
- if ret is not 0:
- return ret
- ret = id_set_cmp(a.obj_classes, b.obj_classes)
- if ret is not 0:
- return ret
-
- # At this point, who cares - just return something
- return cmp(len(a.perms), len(b.perms))
-
-# Compare two interface calls
-def ifcall_cmp(a, b):
- if a.args[0] != b.args[0]:
- return cmp(a.args[0], b.args[0])
- return cmp(a.ifname, b.ifname)
-
-# Compare an two avrules or interface calls
-def rule_cmp(a, b):
- if isinstance(a, refpolicy.InterfaceCall):
- if isinstance(b, refpolicy.InterfaceCall):
- return ifcall_cmp(a, b)
- else:
- return id_set_cmp([a.args[0]], b.src_types)
- else:
- if isinstance(b, refpolicy.AVRule):
- return avrule_cmp(a,b)
- else:
- return id_set_cmp(a.src_types, [b.args[0]])
-
-def role_type_cmp(a, b):
- return cmp(a.role, b.role)
-
-def sort_filter(module):
- """Sort and group the output for readability.
- """
- def sort_node(node):
- c = []
-
- # Module statement
- for mod in node.module_declarations():
- c.append(mod)
- c.append(refpolicy.Comment())
-
- # Requires
- for require in node.requires():
- c.append(require)
- c.append(refpolicy.Comment())
-
- # Rules
- #
- # We are going to group output by source type (which
- # we assume is the first argument for interfaces).
- rules = []
- rules.extend(node.avrules())
- rules.extend(node.interface_calls())
- rules.sort(rule_cmp)
-
- cur = None
- sep_rules = []
- for rule in rules:
- if isinstance(rule, refpolicy.InterfaceCall):
- x = rule.args[0]
- else:
- x = util.first(rule.src_types)
-
- if cur != x:
- if cur:
- sep_rules.append(refpolicy.Comment())
- cur = x
- comment = refpolicy.Comment()
- comment.lines.append("============= %s ==============" % cur)
- sep_rules.append(comment)
- sep_rules.append(rule)
-
- c.extend(sep_rules)
-
-
- ras = []
- ras.extend(node.role_types())
- ras.sort(role_type_cmp)
- if len(ras):
- comment = refpolicy.Comment()
- comment.lines.append("============= ROLES ==============")
- c.append(comment)
-
-
- c.extend(ras)
-
- # Everything else
- for child in node.children:
- if child not in c:
- c.append(child)
-
- node.children = c
-
- for node in module.nodes():
- sort_node(node)
-
-
diff --git a/prebuilts/lib/sepolgen/policygen.py b/prebuilts/lib/sepolgen/policygen.py
deleted file mode 100644
index 5f385775..00000000
--- a/prebuilts/lib/sepolgen/policygen.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-"""
-classes and algorithms for the generation of SELinux policy.
-"""
-
-import itertools
-import textwrap
-
-import refpolicy
-import objectmodel
-import access
-import interfaces
-import matching
-import selinux.audit2why as audit2why
-try:
- from setools import *
-except:
- pass
-
-# Constants for the level of explanation from the generation
-# routines
-NO_EXPLANATION = 0
-SHORT_EXPLANATION = 1
-LONG_EXPLANATION = 2
-
-class PolicyGenerator:
- """Generate a reference policy module from access vectors.
-
- PolicyGenerator generates a new reference policy module
- or updates an existing module based on requested access
- in the form of access vectors.
-
- It generates allow rules and optionally module require
- statements and reference policy interfaces. By default
- only allow rules are generated. The methods .set_gen_refpol
- and .set_gen_requires turns on interface generation and
- requires generation respectively.
-
- PolicyGenerator can also optionally add comments explaining
- why a particular access was allowed based on the audit
- messages that generated the access. The access vectors
- passed in must have the .audit_msgs field set correctly
- and .explain set to SHORT|LONG_EXPLANATION to enable this
- feature.
-
- The module created by PolicyGenerator can be passed to
- output.ModuleWriter to output a text representation.
- """
- def __init__(self, module=None):
- """Initialize a PolicyGenerator with an optional
- existing module.
-
- If the module paramater is not None then access
- will be added to the passed in module. Otherwise
- a new reference policy module will be created.
- """
- self.ifgen = None
- self.explain = NO_EXPLANATION
- self.gen_requires = False
- if module:
- self.moduel = module
- else:
- self.module = refpolicy.Module()
-
- self.dontaudit = False
-
- self.domains = None
- def set_gen_refpol(self, if_set=None, perm_maps=None):
- """Set whether reference policy interfaces are generated.
-
- To turn on interface generation pass in an interface set
- to use for interface generation. To turn off interface
- generation pass in None.
-
- If interface generation is enabled requires generation
- will also be enabled.
- """
- if if_set:
- self.ifgen = InterfaceGenerator(if_set, perm_maps)
- self.gen_requires = True
- else:
- self.ifgen = None
- self.__set_module_style()
-
-
- def set_gen_requires(self, status=True):
- """Set whether module requires are generated.
-
- Passing in true will turn on requires generation and
- False will disable generation. If requires generation is
- disabled interface generation will also be disabled and
- can only be re-enabled via .set_gen_refpol.
- """
- self.gen_requires = status
-
- def set_gen_explain(self, explain=SHORT_EXPLANATION):
- """Set whether access is explained.
- """
- self.explain = explain
-
- def set_gen_dontaudit(self, dontaudit):
- self.dontaudit = dontaudit
-
- def __set_module_style(self):
- if self.ifgen:
- refpolicy = True
- else:
- refpolicy = False
- for mod in self.module.module_declarations():
- mod.refpolicy = refpolicy
-
- def set_module_name(self, name, version="1.0"):
- """Set the name of the module and optionally the version.
- """
- # find an existing module declaration
- m = None
- for mod in self.module.module_declarations():
- m = mod
- if not m:
- m = refpolicy.ModuleDeclaration()
- self.module.children.insert(0, m)
- m.name = name
- m.version = version
- if self.ifgen:
- m.refpolicy = True
- else:
- m.refpolicy = False
-
- def get_module(self):
- # Generate the requires
- if self.gen_requires:
- gen_requires(self.module)
-
- """Return the generated module"""
- return self.module
-
- def __add_allow_rules(self, avs):
- for av in avs:
- rule = refpolicy.AVRule(av)
- if self.dontaudit:
- rule.rule_type = rule.DONTAUDIT
- rule.comment = ""
- if self.explain:
- rule.comment = str(refpolicy.Comment(explain_access(av, verbosity=self.explain)))
- if av.type == audit2why.ALLOW:
- rule.comment += "\n#!!!! This avc is allowed in the current policy"
- if av.type == audit2why.DONTAUDIT:
- rule.comment += "\n#!!!! This avc has a dontaudit rule in the current policy"
-
- if av.type == audit2why.BOOLEAN:
- if len(av.data) > 1:
- rule.comment += "\n#!!!! This avc can be allowed using one of the these booleans:\n# %s" % ", ".join(map(lambda x: x[0], av.data))
- else:
- rule.comment += "\n#!!!! This avc can be allowed using the boolean '%s'" % av.data[0][0]
-
- if av.type == audit2why.CONSTRAINT:
- rule.comment += "\n#!!!! This avc is a constraint violation. You would need to modify the attributes of either the source or target types to allow this access."
- rule.comment += "\n#Constraint rule: "
- rule.comment += "\n\t" + av.data[0]
- for reason in av.data[1:]:
- rule.comment += "\n#\tPossible cause is the source %s and target %s are different." % reason
-
- try:
- if ( av.type == audit2why.TERULE and
- "write" in av.perms and
- ( "dir" in av.obj_class or "open" in av.perms )):
- if not self.domains:
- self.domains = seinfo(ATTRIBUTE, name="domain")[0]["types"]
- types=[]
-
- for i in map(lambda x: x[TCONTEXT], sesearch([ALLOW], {SCONTEXT: av.src_type, CLASS: av.obj_class, PERMS: av.perms})):
- if i not in self.domains:
- types.append(i)
- if len(types) == 1:
- rule.comment += "\n#!!!! The source type '%s' can write to a '%s' of the following type:\n# %s\n" % ( av.src_type, av.obj_class, ", ".join(types))
- elif len(types) >= 1:
- rule.comment += "\n#!!!! The source type '%s' can write to a '%s' of the following types:\n# %s\n" % ( av.src_type, av.obj_class, ", ".join(types))
- except:
- pass
- self.module.children.append(rule)
-
-
- def add_access(self, av_set):
- """Add the access from the access vector set to this
- module.
- """
- # Use the interface generator to split the access
- # into raw allow rules and interfaces. After this
- # a will contain a list of access that should be
- # used as raw allow rules and the interfaces will
- # be added to the module.
- if self.ifgen:
- raw_allow, ifcalls = self.ifgen.gen(av_set, self.explain)
- self.module.children.extend(ifcalls)
- else:
- raw_allow = av_set
-
- # Generate the raw allow rules from the filtered list
- self.__add_allow_rules(raw_allow)
-
- def add_role_types(self, role_type_set):
- for role_type in role_type_set:
- self.module.children.append(role_type)
-
-def explain_access(av, ml=None, verbosity=SHORT_EXPLANATION):
- """Explain why a policy statement was generated.
-
- Return a string containing a text explanation of
- why a policy statement was generated. The string is
- commented and wrapped and can be directly inserted
- into a policy.
-
- Params:
- av - access vector representing the access. Should
- have .audit_msgs set appropriately.
- verbosity - the amount of explanation provided. Should
- be set to NO_EXPLANATION, SHORT_EXPLANATION, or
- LONG_EXPLANATION.
- Returns:
- list of strings - strings explaining the access or an empty
- string if verbosity=NO_EXPLANATION or there is not sufficient
- information to provide an explanation.
- """
- s = []
-
- def explain_interfaces():
- if not ml:
- return
- s.append(" Interface options:")
- for match in ml.all():
- ifcall = call_interface(match.interface, ml.av)
- s.append(' %s # [%d]' % (ifcall.to_string(), match.dist))
-
-
- # Format the raw audit data to explain why the
- # access was requested - either long or short.
- if verbosity == LONG_EXPLANATION:
- for msg in av.audit_msgs:
- s.append(' %s' % msg.header)
- s.append(' scontext="%s" tcontext="%s"' %
- (str(msg.scontext), str(msg.tcontext)))
- s.append(' class="%s" perms="%s"' %
- (msg.tclass, refpolicy.list_to_space_str(msg.accesses)))
- s.append(' comm="%s" exe="%s" path="%s"' % (msg.comm, msg.exe, msg.path))
- s.extend(textwrap.wrap('message="' + msg.message + '"', 80, initial_indent=" ",
- subsequent_indent=" "))
- explain_interfaces()
- elif verbosity:
- s.append(' src="%s" tgt="%s" class="%s", perms="%s"' %
- (av.src_type, av.tgt_type, av.obj_class, av.perms.to_space_str()))
- # For the short display we are only going to use the additional information
- # from the first audit message. For the vast majority of cases this info
- # will always be the same anyway.
- if len(av.audit_msgs) > 0:
- msg = av.audit_msgs[0]
- s.append(' comm="%s" exe="%s" path="%s"' % (msg.comm, msg.exe, msg.path))
- explain_interfaces()
- return s
-
-def param_comp(a, b):
- return cmp(b.num, a.num)
-
-def call_interface(interface, av):
- params = []
- args = []
-
- params.extend(interface.params.values())
- params.sort(param_comp)
-
- ifcall = refpolicy.InterfaceCall()
- ifcall.ifname = interface.name
-
- for i in range(len(params)):
- if params[i].type == refpolicy.SRC_TYPE:
- ifcall.args.append(av.src_type)
- elif params[i].type == refpolicy.TGT_TYPE:
- ifcall.args.append(av.tgt_type)
- elif params[i].type == refpolicy.OBJ_CLASS:
- ifcall.args.append(av.obj_class)
- else:
- print params[i].type
- assert(0)
-
- assert(len(ifcall.args) > 0)
-
- return ifcall
-
-class InterfaceGenerator:
- def __init__(self, ifs, perm_maps=None):
- self.ifs = ifs
- self.hack_check_ifs(ifs)
- self.matcher = matching.AccessMatcher(perm_maps)
- self.calls = []
-
- def hack_check_ifs(self, ifs):
- # FIXME: Disable interfaces we can't call - this is a hack.
- # Because we don't handle roles, multiple paramaters, etc.,
- # etc., we must make certain we can actually use a returned
- # interface.
- for x in ifs.interfaces.values():
- params = []
- params.extend(x.params.values())
- params.sort(param_comp)
- for i in range(len(params)):
- # Check that the paramater position matches
- # the number (e.g., $1 is the first arg). This
- # will fail if the parser missed something.
- if (i + 1) != params[i].num:
- x.enabled = False
- break
- # Check that we can handle the param type (currently excludes
- # roles.
- if params[i].type not in [refpolicy.SRC_TYPE, refpolicy.TGT_TYPE,
- refpolicy.OBJ_CLASS]:
- x.enabled = False
- break
-
- def gen(self, avs, verbosity):
- raw_av = self.match(avs)
- ifcalls = []
- for ml in self.calls:
- ifcall = call_interface(ml.best().interface, ml.av)
- if verbosity:
- ifcall.comment = refpolicy.Comment(explain_access(ml.av, ml, verbosity))
- ifcalls.append((ifcall, ml))
-
- d = []
- for ifcall, ifs in ifcalls:
- found = False
- for o_ifcall in d:
- if o_ifcall.matches(ifcall):
- if o_ifcall.comment and ifcall.comment:
- o_ifcall.comment.merge(ifcall.comment)
- found = True
- if not found:
- d.append(ifcall)
-
- return (raw_av, d)
-
-
- def match(self, avs):
- raw_av = []
- for av in avs:
- ans = matching.MatchList()
- self.matcher.search_ifs(self.ifs, av, ans)
- if len(ans):
- self.calls.append(ans)
- else:
- raw_av.append(av)
-
- return raw_av
-
-
-def gen_requires(module):
- """Add require statements to the module.
- """
- def collect_requires(node):
- r = refpolicy.Require()
- for avrule in node.avrules():
- r.types.update(avrule.src_types)
- r.types.update(avrule.tgt_types)
- for obj in avrule.obj_classes:
- r.add_obj_class(obj, avrule.perms)
-
- for ifcall in node.interface_calls():
- for arg in ifcall.args:
- # FIXME - handle non-type arguments when we
- # can actually figure those out.
- r.types.add(arg)
-
- for role_type in node.role_types():
- r.roles.add(role_type.role)
- r.types.update(role_type.types)
-
- r.types.discard("self")
-
- node.children.insert(0, r)
-
- # FUTURE - this is untested on modules with any sort of
- # nesting
- for node in module.nodes():
- collect_requires(node)
-
-
diff --git a/prebuilts/lib/sepolgen/refparser.py b/prebuilts/lib/sepolgen/refparser.py
deleted file mode 100644
index 83542d3e..00000000
--- a/prebuilts/lib/sepolgen/refparser.py
+++ /dev/null
@@ -1,1128 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006-2007 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-# OVERVIEW
-#
-#
-# This is a parser for the refpolicy policy "language" - i.e., the
-# normal SELinux policy language plus the refpolicy style M4 macro
-# constructs on top of that base language. This parser is primarily
-# aimed at parsing the policy headers in order to create an abstract
-# policy representation suitable for generating policy.
-#
-# Both the lexer and parser are included in this file. The are implemented
-# using the Ply library (included with sepolgen).
-
-import sys
-import os
-import re
-import traceback
-
-import refpolicy
-import access
-import defaults
-
-import lex
-import yacc
-
-# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-#
-# lexer
-#
-# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-
-tokens = (
- # basic tokens, punctuation
- 'TICK',
- 'SQUOTE',
- 'OBRACE',
- 'CBRACE',
- 'SEMI',
- 'COLON',
- 'OPAREN',
- 'CPAREN',
- 'COMMA',
- 'MINUS',
- 'TILDE',
- 'ASTERISK',
- 'AMP',
- 'BAR',
- 'EXPL',
- 'EQUAL',
- 'FILENAME',
- 'IDENTIFIER',
- 'NUMBER',
- 'PATH',
- 'IPV6_ADDR',
- # reserved words
- # module
- 'MODULE',
- 'POLICY_MODULE',
- 'REQUIRE',
- # flask
- 'SID',
- 'GENFSCON',
- 'FS_USE_XATTR',
- 'FS_USE_TRANS',
- 'FS_USE_TASK',
- 'PORTCON',
- 'NODECON',
- 'NETIFCON',
- 'PIRQCON',
- 'IOMEMCON',
- 'IOPORTCON',
- 'PCIDEVICECON',
- 'DEVICETREECON',
- # object classes
- 'CLASS',
- # types and attributes
- 'TYPEATTRIBUTE',
- 'ROLEATTRIBUTE',
- 'TYPE',
- 'ATTRIBUTE',
- 'ATTRIBUTE_ROLE',
- 'ALIAS',
- 'TYPEALIAS',
- # conditional policy
- 'BOOL',
- 'TRUE',
- 'FALSE',
- 'IF',
- 'ELSE',
- # users and roles
- 'ROLE',
- 'TYPES',
- # rules
- 'ALLOW',
- 'DONTAUDIT',
- 'AUDITALLOW',
- 'NEVERALLOW',
- 'PERMISSIVE',
- 'TYPE_TRANSITION',
- 'TYPE_CHANGE',
- 'TYPE_MEMBER',
- 'RANGE_TRANSITION',
- 'ROLE_TRANSITION',
- # refpolicy keywords
- 'OPT_POLICY',
- 'INTERFACE',
- 'TUNABLE_POLICY',
- 'GEN_REQ',
- 'TEMPLATE',
- 'GEN_CONTEXT',
- # m4
- 'IFELSE',
- 'IFDEF',
- 'IFNDEF',
- 'DEFINE'
- )
-
-# All reserved keywords - see t_IDENTIFIER for how these are matched in
-# the lexer.
-reserved = {
- # module
- 'module' : 'MODULE',
- 'policy_module' : 'POLICY_MODULE',
- 'require' : 'REQUIRE',
- # flask
- 'sid' : 'SID',
- 'genfscon' : 'GENFSCON',
- 'fs_use_xattr' : 'FS_USE_XATTR',
- 'fs_use_trans' : 'FS_USE_TRANS',
- 'fs_use_task' : 'FS_USE_TASK',
- 'portcon' : 'PORTCON',
- 'nodecon' : 'NODECON',
- 'netifcon' : 'NETIFCON',
- 'pirqcon' : 'PIRQCON',
- 'iomemcon' : 'IOMEMCON',
- 'ioportcon' : 'IOPORTCON',
- 'pcidevicecon' : 'PCIDEVICECON',
- 'devicetreecon' : 'DEVICETREECON',
- # object classes
- 'class' : 'CLASS',
- # types and attributes
- 'typeattribute' : 'TYPEATTRIBUTE',
- 'roleattribute' : 'ROLEATTRIBUTE',
- 'type' : 'TYPE',
- 'attribute' : 'ATTRIBUTE',
- 'attribute_role' : 'ATTRIBUTE_ROLE',
- 'alias' : 'ALIAS',
- 'typealias' : 'TYPEALIAS',
- # conditional policy
- 'bool' : 'BOOL',
- 'true' : 'TRUE',
- 'false' : 'FALSE',
- 'if' : 'IF',
- 'else' : 'ELSE',
- # users and roles
- 'role' : 'ROLE',
- 'types' : 'TYPES',
- # rules
- 'allow' : 'ALLOW',
- 'dontaudit' : 'DONTAUDIT',
- 'auditallow' : 'AUDITALLOW',
- 'neverallow' : 'NEVERALLOW',
- 'permissive' : 'PERMISSIVE',
- 'type_transition' : 'TYPE_TRANSITION',
- 'type_change' : 'TYPE_CHANGE',
- 'type_member' : 'TYPE_MEMBER',
- 'range_transition' : 'RANGE_TRANSITION',
- 'role_transition' : 'ROLE_TRANSITION',
- # refpolicy keywords
- 'optional_policy' : 'OPT_POLICY',
- 'interface' : 'INTERFACE',
- 'tunable_policy' : 'TUNABLE_POLICY',
- 'gen_require' : 'GEN_REQ',
- 'template' : 'TEMPLATE',
- 'gen_context' : 'GEN_CONTEXT',
- # M4
- 'ifelse' : 'IFELSE',
- 'ifndef' : 'IFNDEF',
- 'ifdef' : 'IFDEF',
- 'define' : 'DEFINE'
- }
-
-# The ply lexer allows definition of tokens in 2 ways: regular expressions
-# or functions.
-
-# Simple regex tokens
-t_TICK = r'\`'
-t_SQUOTE = r'\''
-t_OBRACE = r'\{'
-t_CBRACE = r'\}'
-# This will handle spurios extra ';' via the +
-t_SEMI = r'\;+'
-t_COLON = r'\:'
-t_OPAREN = r'\('
-t_CPAREN = r'\)'
-t_COMMA = r'\,'
-t_MINUS = r'\-'
-t_TILDE = r'\~'
-t_ASTERISK = r'\*'
-t_AMP = r'\&'
-t_BAR = r'\|'
-t_EXPL = r'\!'
-t_EQUAL = r'\='
-t_NUMBER = r'[0-9\.]+'
-t_PATH = r'/[a-zA-Z0-9)_\.\*/]*'
-#t_IPV6_ADDR = r'[a-fA-F0-9]{0,4}:[a-fA-F0-9]{0,4}:([a-fA-F0-9]{0,4}:)*'
-
-# Ignore whitespace - this is a special token for ply that more efficiently
-# ignores uninteresting tokens.
-t_ignore = " \t"
-
-# More complex tokens
-def t_IPV6_ADDR(t):
- r'[a-fA-F0-9]{0,4}:[a-fA-F0-9]{0,4}:([a-fA-F0-9]|:)*'
- # This is a function simply to force it sooner into
- # the regex list
- return t
-
-def t_m4comment(t):
- r'dnl.*\n'
- # Ignore all comments
- t.lexer.lineno += 1
-
-def t_refpolicywarn1(t):
- r'define.*refpolicywarn\(.*\n'
- # Ignore refpolicywarn statements - they sometimes
- # contain text that we can't parse.
- t.skip(1)
-
-def t_refpolicywarn(t):
- r'refpolicywarn\(.*\n'
- # Ignore refpolicywarn statements - they sometimes
- # contain text that we can't parse.
- t.lexer.lineno += 1
-
-def t_IDENTIFIER(t):
- r'[a-zA-Z_\$][a-zA-Z0-9_\-\+\.\$\*~]*'
- # Handle any keywords
- t.type = reserved.get(t.value,'IDENTIFIER')
- return t
-
-def t_FILENAME(t):
- r'\"[a-zA-Z0-9_\-\+\.\$\*~ :]+\"'
- # Handle any keywords
- t.type = reserved.get(t.value,'FILENAME')
- return t
-
-def t_comment(t):
- r'\#.*\n'
- # Ignore all comments
- t.lexer.lineno += 1
-
-def t_error(t):
- print "Illegal character '%s'" % t.value[0]
- t.skip(1)
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += len(t.value)
-
-# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-#
-# Parser
-#
-# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-
-# Global data used during parsing - making it global is easier than
-# passing the state through the parsing functions.
-
-# m is the top-level data structure (stands for modules).
-m = None
-# error is either None (indicating no error) or a string error message.
-error = None
-parse_file = ""
-# spt is the support macros (e.g., obj/perm sets) - it is an instance of
-# refpolicy.SupportMacros and should always be present during parsing
-# though it may not contain any macros.
-spt = None
-success = True
-
-# utilities
-def collect(stmts, parent, val=None):
- if stmts is None:
- return
- for s in stmts:
- if s is None:
- continue
- s.parent = parent
- if val is not None:
- parent.children.insert(0, (val, s))
- else:
- parent.children.insert(0, s)
-
-def expand(ids, s):
- for id in ids:
- if spt.has_key(id):
- s.update(spt.by_name(id))
- else:
- s.add(id)
-
-# Top-level non-terminal
-def p_statements(p):
- '''statements : statement
- | statements statement
- | empty
- '''
- if len(p) == 2 and p[1]:
- m.children.append(p[1])
- elif len(p) > 2 and p[2]:
- m.children.append(p[2])
-
-def p_statement(p):
- '''statement : interface
- | template
- | obj_perm_set
- | policy
- | policy_module_stmt
- | module_stmt
- '''
- p[0] = p[1]
-
-def p_empty(p):
- 'empty :'
- pass
-
-#
-# Reference policy language constructs
-#
-
-# This is for the policy module statement (e.g., policy_module(foo,1.2.0)).
-# We have a separate terminal for either the basic language module statement
-# and interface calls to make it easier to identifier.
-def p_policy_module_stmt(p):
- 'policy_module_stmt : POLICY_MODULE OPAREN IDENTIFIER COMMA NUMBER CPAREN'
- m = refpolicy.ModuleDeclaration()
- m.name = p[3]
- m.version = p[5]
- m.refpolicy = True
- p[0] = m
-
-def p_interface(p):
- '''interface : INTERFACE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
- '''
- x = refpolicy.Interface(p[4])
- collect(p[8], x)
- p[0] = x
-
-def p_template(p):
- '''template : TEMPLATE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
- | DEFINE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
- '''
- x = refpolicy.Template(p[4])
- collect(p[8], x)
- p[0] = x
-
-def p_define(p):
- '''define : DEFINE OPAREN TICK IDENTIFIER SQUOTE CPAREN'''
- # This is for defining single M4 values (to be used later in ifdef statements).
- # Example: define(`sulogin_no_pam'). We don't currently do anything with these
- # but we should in the future when we correctly resolve ifdef statements.
- p[0] = None
-
-def p_interface_stmts(p):
- '''interface_stmts : policy
- | interface_stmts policy
- | empty
- '''
- if len(p) == 2 and p[1]:
- p[0] = p[1]
- elif len(p) > 2:
- if not p[1]:
- if p[2]:
- p[0] = p[2]
- elif not p[2]:
- p[0] = p[1]
- else:
- p[0] = p[1] + p[2]
-
-def p_optional_policy(p):
- '''optional_policy : OPT_POLICY OPAREN TICK interface_stmts SQUOTE CPAREN
- | OPT_POLICY OPAREN TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
- '''
- o = refpolicy.OptionalPolicy()
- collect(p[4], o, val=True)
- if len(p) > 7:
- collect(p[8], o, val=False)
- p[0] = [o]
-
-def p_tunable_policy(p):
- '''tunable_policy : TUNABLE_POLICY OPAREN TICK cond_expr SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
- | TUNABLE_POLICY OPAREN TICK cond_expr SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN
- '''
- x = refpolicy.TunablePolicy()
- x.cond_expr = p[4]
- collect(p[8], x, val=True)
- if len(p) > 11:
- collect(p[12], x, val=False)
- p[0] = [x]
-
-def p_ifelse(p):
- '''ifelse : IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA COMMA TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
- | IFELSE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
- '''
-# x = refpolicy.IfDef(p[4])
-# v = True
-# collect(p[8], x, val=v)
-# if len(p) > 12:
-# collect(p[12], x, val=False)
-# p[0] = [x]
- pass
-
-
-def p_ifdef(p):
- '''ifdef : IFDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
- | IFNDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
- | IFDEF OPAREN TICK IDENTIFIER SQUOTE COMMA TICK interface_stmts SQUOTE COMMA TICK interface_stmts SQUOTE CPAREN optional_semi
- '''
- x = refpolicy.IfDef(p[4])
- if p[1] == 'ifdef':
- v = True
- else:
- v = False
- collect(p[8], x, val=v)
- if len(p) > 12:
- collect(p[12], x, val=False)
- p[0] = [x]
-
-def p_interface_call(p):
- '''interface_call : IDENTIFIER OPAREN interface_call_param_list CPAREN
- | IDENTIFIER OPAREN CPAREN
- | IDENTIFIER OPAREN interface_call_param_list CPAREN SEMI'''
- # Allow spurious semi-colons at the end of interface calls
- i = refpolicy.InterfaceCall(ifname=p[1])
- if len(p) > 4:
- i.args.extend(p[3])
- p[0] = i
-
-def p_interface_call_param(p):
- '''interface_call_param : IDENTIFIER
- | IDENTIFIER MINUS IDENTIFIER
- | nested_id_set
- | TRUE
- | FALSE
- | FILENAME
- '''
- # Intentionally let single identifiers pass through
- # List means set, non-list identifier
- if len(p) == 2:
- p[0] = p[1]
- else:
- p[0] = [p[1], "-" + p[3]]
-
-def p_interface_call_param_list(p):
- '''interface_call_param_list : interface_call_param
- | interface_call_param_list COMMA interface_call_param
- '''
- if len(p) == 2:
- p[0] = [p[1]]
- else:
- p[0] = p[1] + [p[3]]
-
-
-def p_obj_perm_set(p):
- 'obj_perm_set : DEFINE OPAREN TICK IDENTIFIER SQUOTE COMMA TICK names SQUOTE CPAREN'
- s = refpolicy.ObjPermSet(p[4])
- s.perms = p[8]
- p[0] = s
-
-#
-# Basic SELinux policy language
-#
-
-def p_policy(p):
- '''policy : policy_stmt
- | optional_policy
- | tunable_policy
- | ifdef
- | ifelse
- | conditional
- '''
- p[0] = p[1]
-
-def p_policy_stmt(p):
- '''policy_stmt : gen_require
- | avrule_def
- | typerule_def
- | typeattribute_def
- | roleattribute_def
- | interface_call
- | role_def
- | role_allow
- | permissive
- | type_def
- | typealias_def
- | attribute_def
- | attribute_role_def
- | range_transition_def
- | role_transition_def
- | bool
- | define
- | initial_sid
- | genfscon
- | fs_use
- | portcon
- | nodecon
- | netifcon
- | pirqcon
- | iomemcon
- | ioportcon
- | pcidevicecon
- | devicetreecon
- '''
- if p[1]:
- p[0] = [p[1]]
-
-def p_module_stmt(p):
- 'module_stmt : MODULE IDENTIFIER NUMBER SEMI'
- m = refpolicy.ModuleDeclaration()
- m.name = p[2]
- m.version = p[3]
- m.refpolicy = False
- p[0] = m
-
-def p_gen_require(p):
- '''gen_require : GEN_REQ OPAREN TICK requires SQUOTE CPAREN
- | REQUIRE OBRACE requires CBRACE'''
- # We ignore the require statements - they are redundant data from our point-of-view.
- # Checkmodule will verify them later anyway so we just assume that they match what
- # is in the rest of the interface.
- pass
-
-def p_requires(p):
- '''requires : require
- | requires require
- | ifdef
- | requires ifdef
- '''
- pass
-
-def p_require(p):
- '''require : TYPE comma_list SEMI
- | ROLE comma_list SEMI
- | ATTRIBUTE comma_list SEMI
- | ATTRIBUTE_ROLE comma_list SEMI
- | CLASS comma_list SEMI
- | BOOL comma_list SEMI
- '''
- pass
-
-def p_security_context(p):
- '''security_context : IDENTIFIER COLON IDENTIFIER COLON IDENTIFIER
- | IDENTIFIER COLON IDENTIFIER COLON IDENTIFIER COLON mls_range_def'''
- # This will likely need some updates to handle complex levels
- s = refpolicy.SecurityContext()
- s.user = p[1]
- s.role = p[3]
- s.type = p[5]
- if len(p) > 6:
- s.level = p[7]
-
- p[0] = s
-
-def p_gen_context(p):
- '''gen_context : GEN_CONTEXT OPAREN security_context COMMA mls_range_def CPAREN
- '''
- # We actually store gen_context statements in a SecurityContext
- # object - it knows how to output either a bare context or a
- # gen_context statement.
- s = p[3]
- s.level = p[5]
-
- p[0] = s
-
-def p_context(p):
- '''context : security_context
- | gen_context
- '''
- p[0] = p[1]
-
-def p_initial_sid(p):
- '''initial_sid : SID IDENTIFIER context'''
- s = refpolicy.InitialSid()
- s.name = p[2]
- s.context = p[3]
- p[0] = s
-
-def p_genfscon(p):
- '''genfscon : GENFSCON IDENTIFIER PATH context'''
-
- g = refpolicy.GenfsCon()
- g.filesystem = p[2]
- g.path = p[3]
- g.context = p[4]
-
- p[0] = g
-
-def p_fs_use(p):
- '''fs_use : FS_USE_XATTR IDENTIFIER context SEMI
- | FS_USE_TASK IDENTIFIER context SEMI
- | FS_USE_TRANS IDENTIFIER context SEMI
- '''
- f = refpolicy.FilesystemUse()
- if p[1] == "fs_use_xattr":
- f.type = refpolicy.FilesystemUse.XATTR
- elif p[1] == "fs_use_task":
- f.type = refpolicy.FilesystemUse.TASK
- elif p[1] == "fs_use_trans":
- f.type = refpolicy.FilesystemUse.TRANS
-
- f.filesystem = p[2]
- f.context = p[3]
-
- p[0] = f
-
-def p_portcon(p):
- '''portcon : PORTCON IDENTIFIER NUMBER context
- | PORTCON IDENTIFIER NUMBER MINUS NUMBER context'''
- c = refpolicy.PortCon()
- c.port_type = p[2]
- if len(p) == 5:
- c.port_number = p[3]
- c.context = p[4]
- else:
- c.port_number = p[3] + "-" + p[4]
- c.context = p[5]
-
- p[0] = c
-
-def p_nodecon(p):
- '''nodecon : NODECON NUMBER NUMBER context
- | NODECON IPV6_ADDR IPV6_ADDR context
- '''
- n = refpolicy.NodeCon()
- n.start = p[2]
- n.end = p[3]
- n.context = p[4]
-
- p[0] = n
-
-def p_netifcon(p):
- 'netifcon : NETIFCON IDENTIFIER context context'
- n = refpolicy.NetifCon()
- n.interface = p[2]
- n.interface_context = p[3]
- n.packet_context = p[4]
-
- p[0] = n
-
-def p_pirqcon(p):
- 'pirqcon : PIRQCON NUMBER context'
- c = refpolicy.PirqCon()
- c.pirq_number = p[2]
- c.context = p[3]
-
- p[0] = c
-
-def p_iomemcon(p):
- '''iomemcon : IOMEMCON NUMBER context
- | IOMEMCON NUMBER MINUS NUMBER context'''
- c = refpolicy.IomemCon()
- if len(p) == 4:
- c.device_mem = p[2]
- c.context = p[3]
- else:
- c.device_mem = p[2] + "-" + p[3]
- c.context = p[4]
-
- p[0] = c
-
-def p_ioportcon(p):
- '''ioportcon : IOPORTCON NUMBER context
- | IOPORTCON NUMBER MINUS NUMBER context'''
- c = refpolicy.IoportCon()
- if len(p) == 4:
- c.ioport = p[2]
- c.context = p[3]
- else:
- c.ioport = p[2] + "-" + p[3]
- c.context = p[4]
-
- p[0] = c
-
-def p_pcidevicecon(p):
- 'pcidevicecon : PCIDEVICECON NUMBER context'
- c = refpolicy.PciDeviceCon()
- c.device = p[2]
- c.context = p[3]
-
- p[0] = c
-
-def p_devicetreecon(p):
- 'devicetreecon : DEVICETREECON NUMBER context'
- c = refpolicy.DevicetTeeCon()
- c.path = p[2]
- c.context = p[3]
-
- p[0] = c
-
-def p_mls_range_def(p):
- '''mls_range_def : mls_level_def MINUS mls_level_def
- | mls_level_def
- '''
- p[0] = p[1]
- if len(p) > 2:
- p[0] = p[0] + "-" + p[3]
-
-def p_mls_level_def(p):
- '''mls_level_def : IDENTIFIER COLON comma_list
- | IDENTIFIER
- '''
- p[0] = p[1]
- if len(p) > 2:
- p[0] = p[0] + ":" + ",".join(p[3])
-
-def p_type_def(p):
- '''type_def : TYPE IDENTIFIER COMMA comma_list SEMI
- | TYPE IDENTIFIER SEMI
- | TYPE IDENTIFIER ALIAS names SEMI
- | TYPE IDENTIFIER ALIAS names COMMA comma_list SEMI
- '''
- t = refpolicy.Type(p[2])
- if len(p) == 6:
- if p[3] == ',':
- t.attributes.update(p[4])
- else:
- t.aliases = p[4]
- elif len(p) > 4:
- t.aliases = p[4]
- if len(p) == 8:
- t.attributes.update(p[6])
- p[0] = t
-
-def p_attribute_def(p):
- 'attribute_def : ATTRIBUTE IDENTIFIER SEMI'
- a = refpolicy.Attribute(p[2])
- p[0] = a
-
-def p_attribute_role_def(p):
- 'attribute_role_def : ATTRIBUTE_ROLE IDENTIFIER SEMI'
- a = refpolicy.Attribute_Role(p[2])
- p[0] = a
-
-def p_typealias_def(p):
- 'typealias_def : TYPEALIAS IDENTIFIER ALIAS names SEMI'
- t = refpolicy.TypeAlias()
- t.type = p[2]
- t.aliases = p[4]
- p[0] = t
-
-def p_role_def(p):
- '''role_def : ROLE IDENTIFIER TYPES comma_list SEMI
- | ROLE IDENTIFIER SEMI'''
- r = refpolicy.Role()
- r.role = p[2]
- if len(p) > 4:
- r.types.update(p[4])
- p[0] = r
-
-def p_role_allow(p):
- 'role_allow : ALLOW names names SEMI'
- r = refpolicy.RoleAllow()
- r.src_roles = p[2]
- r.tgt_roles = p[3]
- p[0] = r
-
-def p_permissive(p):
- 'permissive : PERMISSIVE names SEMI'
- t.skip(1)
-
-def p_avrule_def(p):
- '''avrule_def : ALLOW names names COLON names names SEMI
- | DONTAUDIT names names COLON names names SEMI
- | AUDITALLOW names names COLON names names SEMI
- | NEVERALLOW names names COLON names names SEMI
- '''
- a = refpolicy.AVRule()
- if p[1] == 'dontaudit':
- a.rule_type = refpolicy.AVRule.DONTAUDIT
- elif p[1] == 'auditallow':
- a.rule_type = refpolicy.AVRule.AUDITALLOW
- elif p[1] == 'neverallow':
- a.rule_type = refpolicy.AVRule.NEVERALLOW
- a.src_types = p[2]
- a.tgt_types = p[3]
- a.obj_classes = p[5]
- a.perms = p[6]
- p[0] = a
-
-def p_typerule_def(p):
- '''typerule_def : TYPE_TRANSITION names names COLON names IDENTIFIER SEMI
- | TYPE_TRANSITION names names COLON names IDENTIFIER FILENAME SEMI
- | TYPE_TRANSITION names names COLON names IDENTIFIER IDENTIFIER SEMI
- | TYPE_CHANGE names names COLON names IDENTIFIER SEMI
- | TYPE_MEMBER names names COLON names IDENTIFIER SEMI
- '''
- t = refpolicy.TypeRule()
- if p[1] == 'type_change':
- t.rule_type = refpolicy.TypeRule.TYPE_CHANGE
- elif p[1] == 'type_member':
- t.rule_type = refpolicy.TypeRule.TYPE_MEMBER
- t.src_types = p[2]
- t.tgt_types = p[3]
- t.obj_classes = p[5]
- t.dest_type = p[6]
- t.file_name = p[7]
- p[0] = t
-
-def p_bool(p):
- '''bool : BOOL IDENTIFIER TRUE SEMI
- | BOOL IDENTIFIER FALSE SEMI'''
- b = refpolicy.Bool()
- b.name = p[2]
- if p[3] == "true":
- b.state = True
- else:
- b.state = False
- p[0] = b
-
-def p_conditional(p):
- ''' conditional : IF OPAREN cond_expr CPAREN OBRACE interface_stmts CBRACE
- | IF OPAREN cond_expr CPAREN OBRACE interface_stmts CBRACE ELSE OBRACE interface_stmts CBRACE
- '''
- c = refpolicy.Conditional()
- c.cond_expr = p[3]
- collect(p[6], c, val=True)
- if len(p) > 8:
- collect(p[10], c, val=False)
- p[0] = [c]
-
-def p_typeattribute_def(p):
- '''typeattribute_def : TYPEATTRIBUTE IDENTIFIER comma_list SEMI'''
- t = refpolicy.TypeAttribute()
- t.type = p[2]
- t.attributes.update(p[3])
- p[0] = t
-
-def p_roleattribute_def(p):
- '''roleattribute_def : ROLEATTRIBUTE IDENTIFIER comma_list SEMI'''
- t = refpolicy.RoleAttribute()
- t.role = p[2]
- t.roleattributes.update(p[3])
- p[0] = t
-
-def p_range_transition_def(p):
- '''range_transition_def : RANGE_TRANSITION names names COLON names mls_range_def SEMI
- | RANGE_TRANSITION names names names SEMI'''
- pass
-
-def p_role_transition_def(p):
- '''role_transition_def : ROLE_TRANSITION names names names SEMI'''
- pass
-
-def p_cond_expr(p):
- '''cond_expr : IDENTIFIER
- | EXPL cond_expr
- | cond_expr AMP AMP cond_expr
- | cond_expr BAR BAR cond_expr
- | cond_expr EQUAL EQUAL cond_expr
- | cond_expr EXPL EQUAL cond_expr
- '''
- l = len(p)
- if l == 2:
- p[0] = [p[1]]
- elif l == 3:
- p[0] = [p[1]] + p[2]
- else:
- p[0] = p[1] + [p[2] + p[3]] + p[4]
-
-
-#
-# Basic terminals
-#
-
-# Identifiers and lists of identifiers. These must
-# be handled somewhat gracefully. Names returns an IdSet and care must
-# be taken that this is _assigned_ to an object to correctly update
-# all of the flags (as opposed to using update). The other terminals
-# return list - this is to preserve ordering if it is important for
-# parsing (for example, interface_call must retain the ordering). Other
-# times the list should be used to update an IdSet.
-
-def p_names(p):
- '''names : identifier
- | nested_id_set
- | asterisk
- | TILDE identifier
- | TILDE nested_id_set
- | IDENTIFIER MINUS IDENTIFIER
- '''
- s = refpolicy.IdSet()
- if len(p) < 3:
- expand(p[1], s)
- elif len(p) == 3:
- expand(p[2], s)
- s.compliment = True
- else:
- expand([p[1]])
- s.add("-" + p[3])
- p[0] = s
-
-def p_identifier(p):
- 'identifier : IDENTIFIER'
- p[0] = [p[1]]
-
-def p_asterisk(p):
- 'asterisk : ASTERISK'
- p[0] = [p[1]]
-
-def p_nested_id_set(p):
- '''nested_id_set : OBRACE nested_id_list CBRACE
- '''
- p[0] = p[2]
-
-def p_nested_id_list(p):
- '''nested_id_list : nested_id_element
- | nested_id_list nested_id_element
- '''
- if len(p) == 2:
- p[0] = p[1]
- else:
- p[0] = p[1] + p[2]
-
-def p_nested_id_element(p):
- '''nested_id_element : identifier
- | MINUS IDENTIFIER
- | nested_id_set
- '''
- if len(p) == 2:
- p[0] = p[1]
- else:
- # For now just leave the '-'
- str = "-" + p[2]
- p[0] = [str]
-
-def p_comma_list(p):
- '''comma_list : nested_id_list
- | comma_list COMMA nested_id_list
- '''
- if len(p) > 2:
- p[1] = p[1] + p[3]
- p[0] = p[1]
-
-def p_optional_semi(p):
- '''optional_semi : SEMI
- | empty'''
- pass
-
-
-#
-# Interface to the parser
-#
-
-def p_error(tok):
- global error, parse_file, success, parser
- error = "%s: Syntax error on line %d %s [type=%s]" % (parse_file, tok.lineno, tok.value, tok.type)
- print error
- success = False
-
-def prep_spt(spt):
- if not spt:
- return { }
- map = {}
- for x in spt:
- map[x.name] = x
-
-parser = None
-lexer = None
-def create_globals(module, support, debug):
- global parser, lexer, m, spt
-
- if not parser:
- lexer = lex.lex()
- parser = yacc.yacc(method="LALR", debug=debug, write_tables=0)
-
- if module is not None:
- m = module
- else:
- m = refpolicy.Module()
-
- if not support:
- spt = refpolicy.SupportMacros()
- else:
- spt = support
-
-def parse(text, module=None, support=None, debug=False):
- create_globals(module, support, debug)
- global error, parser, lexer, success
-
- success = True
-
- try:
- parser.parse(text, debug=debug, lexer=lexer)
- except Exception, e:
- parser = None
- lexer = None
- error = "internal parser error: %s" % str(e) + "\n" + traceback.format_exc()
-
- if not success:
- # force the parser and lexer to be rebuilt - we have some problems otherwise
- parser = None
- msg = 'could not parse text: "%s"' % error
- raise ValueError(msg)
- return m
-
-def list_headers(root):
- modules = []
- support_macros = None
-
- for dirpath, dirnames, filenames in os.walk(root):
- for name in filenames:
- modname = os.path.splitext(name)
- filename = os.path.join(dirpath, name)
-
- if modname[1] == '.spt':
- if name == "obj_perm_sets.spt":
- support_macros = filename
- elif len(re.findall("patterns", modname[0])):
- modules.append((modname[0], filename))
- elif modname[1] == '.if':
- modules.append((modname[0], filename))
-
- return (modules, support_macros)
-
-
-def parse_headers(root, output=None, expand=True, debug=False):
- import util
-
- headers = refpolicy.Headers()
-
- modules = []
- support_macros = None
-
- if os.path.isfile(root):
- name = os.path.split(root)[1]
- if name == '':
- raise ValueError("Invalid file name %s" % root)
- modname = os.path.splitext(name)
- modules.append((modname[0], root))
- all_modules, support_macros = list_headers(defaults.headers())
- else:
- modules, support_macros = list_headers(root)
-
- if expand and not support_macros:
- raise ValueError("could not find support macros (obj_perm_sets.spt)")
-
- def o(msg):
- if output:
- output.write(msg)
-
- def parse_file(f, module, spt=None):
- global parse_file
- if debug:
- o("parsing file %s\n" % f)
- try:
- fd = open(f)
- txt = fd.read()
- fd.close()
- parse_file = f
- parse(txt, module, spt, debug)
- except IOError, e:
- return
- except ValueError, e:
- raise ValueError("error parsing file %s: %s" % (f, str(e)))
-
- spt = None
- if support_macros:
- o("Parsing support macros (%s): " % support_macros)
- spt = refpolicy.SupportMacros()
- parse_file(support_macros, spt)
-
- headers.children.append(spt)
-
- # FIXME: Total hack - add in can_exec rather than parse the insanity
- # of misc_macros. We are just going to pretend that this is an interface
- # to make the expansion work correctly.
- can_exec = refpolicy.Interface("can_exec")
- av = access.AccessVector(["$1","$2","file","execute_no_trans","open", "read",
- "getattr","lock","execute","ioctl"])
-
- can_exec.children.append(refpolicy.AVRule(av))
- headers.children.append(can_exec)
-
- o("done.\n")
-
- if output and not debug:
- status = util.ConsoleProgressBar(sys.stdout, steps=len(modules))
- status.start("Parsing interface files")
-
- failures = []
- for x in modules:
- m = refpolicy.Module()
- m.name = x[0]
- try:
- if expand:
- parse_file(x[1], m, spt)
- else:
- parse_file(x[1], m)
- except ValueError, e:
- o(str(e) + "\n")
- failures.append(x[1])
- continue
-
- headers.children.append(m)
- if output and not debug:
- status.step()
-
- if len(failures):
- o("failed to parse some headers: %s" % ", ".join(failures))
-
- return headers
diff --git a/prebuilts/lib/sepolgen/refpolicy.py b/prebuilts/lib/sepolgen/refpolicy.py
deleted file mode 100644
index b8ed5c19..00000000
--- a/prebuilts/lib/sepolgen/refpolicy.py
+++ /dev/null
@@ -1,917 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-import string
-import itertools
-import selinux
-
-# OVERVIEW
-#
-# This file contains objects and functions used to represent the reference
-# policy (including the headers, M4 macros, and policy language statements).
-#
-# This representation is very different from the semantic representation
-# used in libsepol. Instead, it is a more typical abstract representation
-# used by the first stage of compilers. It is basically a parse tree.
-#
-# This choice is intentional as it allows us to handle the unprocessed
-# M4 statements - including the $1 style arguments - and to more easily generate
-# the data structures that we need for policy generation.
-#
-
-# Constans for referring to fields
-SRC_TYPE = 0
-TGT_TYPE = 1
-OBJ_CLASS = 2
-PERMS = 3
-ROLE = 4
-DEST_TYPE = 5
-
-# String represenations of the above constants
-field_to_str = ["source", "target", "object", "permission", "role", "destination" ]
-str_to_field = { "source" : SRC_TYPE, "target" : TGT_TYPE, "object" : OBJ_CLASS,
- "permission" : PERMS, "role" : ROLE, "destination" : DEST_TYPE }
-
-# Base Classes
-
-class PolicyBase:
- def __init__(self, parent=None):
- self.parent = None
- self.comment = None
-
-class Node(PolicyBase):
- """Base class objects produced from parsing the reference policy.
-
- The Node class is used as the base class for any non-leaf
- object produced by parsing the reference policy. This object
- should contain a reference to its parent (or None for a top-level
- object) and 0 or more children.
-
- The general idea here is to have a very simple tree structure. Children
- are not separated out by type. Instead the tree structure represents
- fairly closely the real structure of the policy statements.
-
- The object should be iterable - by default over all children but
- subclasses are free to provide additional iterators over a subset
- of their childre (see Interface for example).
- """
-
- def __init__(self, parent=None):
- PolicyBase.__init__(self, parent)
- self.children = []
-
- def __iter__(self):
- return iter(self.children)
-
- # Not all of the iterators will return something on all Nodes, but
- # they won't explode either. Putting them here is just easier.
-
- # Top level nodes
-
- def nodes(self):
- return itertools.ifilter(lambda x: isinstance(x, Node), walktree(self))
-
- def modules(self):
- return itertools.ifilter(lambda x: isinstance(x, Module), walktree(self))
-
- def interfaces(self):
- return itertools.ifilter(lambda x: isinstance(x, Interface), walktree(self))
-
- def templates(self):
- return itertools.ifilter(lambda x: isinstance(x, Template), walktree(self))
-
- def support_macros(self):
- return itertools.ifilter(lambda x: isinstance(x, SupportMacros), walktree(self))
-
- # Common policy statements
-
- def module_declarations(self):
- return itertools.ifilter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))
-
- def interface_calls(self):
- return itertools.ifilter(lambda x: isinstance(x, InterfaceCall), walktree(self))
-
- def avrules(self):
- return itertools.ifilter(lambda x: isinstance(x, AVRule), walktree(self))
-
- def typerules(self):
- return itertools.ifilter(lambda x: isinstance(x, TypeRule), walktree(self))
-
- def typeattributes(self):
- """Iterate over all of the TypeAttribute children of this Interface."""
- return itertools.ifilter(lambda x: isinstance(x, TypeAttribute), walktree(self))
-
- def roleattributes(self):
- """Iterate over all of the RoleAttribute children of this Interface."""
- return itertools.ifilter(lambda x: isinstance(x, RoleAttribute), walktree(self))
-
- def requires(self):
- return itertools.ifilter(lambda x: isinstance(x, Require), walktree(self))
-
- def roles(self):
- return itertools.ifilter(lambda x: isinstance(x, Role), walktree(self))
-
- def role_allows(self):
- return itertools.ifilter(lambda x: isinstance(x, RoleAllow), walktree(self))
-
- def role_types(self):
- return itertools.ifilter(lambda x: isinstance(x, RoleType), walktree(self))
-
- def __str__(self):
- if self.comment:
- return str(self.comment) + "\n" + self.to_string()
- else:
- return self.to_string()
-
- def __repr__(self):
- return "<%s(%s)>" % (self.__class__.__name__, self.to_string())
-
- def to_string(self):
- return ""
-
-
-class Leaf(PolicyBase):
- def __init__(self, parent=None):
- PolicyBase.__init__(self, parent)
-
- def __str__(self):
- if self.comment:
- return str(self.comment) + "\n" + self.to_string()
- else:
- return self.to_string()
-
- def __repr__(self):
- return "<%s(%s)>" % (self.__class__.__name__, self.to_string())
-
- def to_string(self):
- return ""
-
-
-
-# Utility functions
-
-def walktree(node, depthfirst=True, showdepth=False, type=None):
- """Iterate over a Node and its Children.
-
- The walktree function iterates over a tree containing Nodes and
- leaf objects. The iteration can perform a depth first or a breadth
- first traversal of the tree (controlled by the depthfirst
- paramater. The passed in node will be returned.
-
- This function will only work correctly for trees - arbitrary graphs
- will likely cause infinite looping.
- """
- # We control depth first / versus breadth first by
- # how we pop items off of the node stack.
- if depthfirst:
- index = -1
- else:
- index = 0
-
- stack = [(node, 0)]
- while len(stack) > 0:
- cur, depth = stack.pop(index)
- if showdepth:
- yield cur, depth
- else:
- yield cur
-
- # If the node is not a Node instance it must
- # be a leaf - so no need to add it to the stack
- if isinstance(cur, Node):
- items = []
- i = len(cur.children) - 1
- while i >= 0:
- if type is None or isinstance(cur.children[i], type):
- items.append((cur.children[i], depth + 1))
- i -= 1
-
- stack.extend(items)
-
-def walknode(node, type=None):
- """Iterate over the direct children of a Node.
-
- The walktree function iterates over the children of a Node.
- Unlike walktree it does note return the passed in node or
- the children of any Node objects (that is, it does not go
- beyond the current level in the tree).
- """
- for x in node:
- if type is None or isinstance(x, type):
- yield x
-
-
-def list_to_space_str(s, cont=('{', '}')):
- """Convert a set (or any sequence type) into a string representation
- formatted to match SELinux space separated list conventions.
-
- For example the list ['read', 'write'] would be converted into:
- '{ read write }'
- """
- l = len(s)
- str = ""
- if l < 1:
- raise ValueError("cannot convert 0 len set to string")
- str = " ".join(s)
- if l == 1:
- return str
- else:
- return cont[0] + " " + str + " " + cont[1]
-
-def list_to_comma_str(s):
- l = len(s)
- if l < 1:
- raise ValueError("cannot conver 0 len set to comma string")
-
- return ", ".join(s)
-
-# Basic SELinux types
-
-class IdSet(set):
- def __init__(self, list=None):
- if list:
- set.__init__(self, list)
- else:
- set.__init__(self)
- self.compliment = False
-
- def to_space_str(self):
- return list_to_space_str(self)
-
- def to_comma_str(self):
- return list_to_comma_str(self)
-
-class SecurityContext(Leaf):
- """An SELinux security context with optional MCS / MLS fields."""
- def __init__(self, context=None, parent=None):
- """Create a SecurityContext object, optionally from a string.
-
- Parameters:
- [context] - string representing a security context. Same format
- as a string passed to the from_string method.
- """
- Leaf.__init__(self, parent)
- self.user = ""
- self.role = ""
- self.type = ""
- self.level = None
- if context is not None:
- self.from_string(context)
-
- def from_string(self, context):
- """Parse a string representing a context into a SecurityContext.
-
- The string should be in the standard format - e.g.,
- 'user:role:type:level'.
-
- Raises ValueError if the string is not parsable as a security context.
- """
- fields = context.split(":")
- if len(fields) < 3:
- raise ValueError("context string [%s] not in a valid format" % context)
-
- self.user = fields[0]
- self.role = fields[1]
- self.type = fields[2]
- if len(fields) > 3:
- # FUTURE - normalize level fields to allow more comparisons to succeed.
- self.level = string.join(fields[3:], ':')
- else:
- self.level = None
-
- def __eq__(self, other):
- """Compare two SecurityContext objects - all fields must be exactly the
- the same for the comparison to work. It is possible for the level fields
- to be semantically the same yet syntactically different - in this case
- this function will return false.
- """
- return self.user == other.user and \
- self.role == other.role and \
- self.type == other.type and \
- self.level == other.level
-
- def to_string(self, default_level=None):
- """Return a string representing this security context.
-
- By default, the string will contiain a MCS / MLS level
- potentially from the default which is passed in if none was
- set.
-
- Arguments:
- default_level - the default level to use if self.level is an
- empty string.
-
- Returns:
- A string represening the security context in the form
- 'user:role:type:level'.
- """
- fields = [self.user, self.role, self.type]
- if self.level is None:
- if default_level is None:
- if selinux.is_selinux_mls_enabled() == 1:
- fields.append("s0")
- else:
- fields.append(default_level)
- else:
- fields.append(self.level)
- return ":".join(fields)
-
-class ObjectClass(Leaf):
- """SELinux object class and permissions.
-
- This class is a basic representation of an SELinux object
- class - it does not represent separate common permissions -
- just the union of the common and class specific permissions.
- It is meant to be convenient for policy generation.
- """
- def __init__(self, name="", parent=None):
- Leaf.__init__(self, parent)
- self.name = name
- self.perms = IdSet()
-
-# Basic statements
-
-class TypeAttribute(Leaf):
- """SElinux typeattribute statement.
-
- This class represents a typeattribute statement.
- """
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.type = ""
- self.attributes = IdSet()
-
- def to_string(self):
- return "typeattribute %s %s;" % (self.type, self.attributes.to_comma_str())
-
-class RoleAttribute(Leaf):
- """SElinux roleattribute statement.
-
- This class represents a roleattribute statement.
- """
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.role = ""
- self.roleattributes = IdSet()
-
- def to_string(self):
- return "roleattribute %s %s;" % (self.role, self.roleattributes.to_comma_str())
-
-
-class Role(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.role = ""
- self.types = IdSet()
-
- def to_string(self):
- s = ""
- for t in self.types:
- s += "role %s types %s;\n" % (self.role, t)
- return s
-
-class Type(Leaf):
- def __init__(self, name="", parent=None):
- Leaf.__init__(self, parent)
- self.name = name
- self.attributes = IdSet()
- self.aliases = IdSet()
-
- def to_string(self):
- s = "type %s" % self.name
- if len(self.aliases) > 0:
- s = s + "alias %s" % self.aliases.to_space_str()
- if len(self.attributes) > 0:
- s = s + ", %s" % self.attributes.to_comma_str()
- return s + ";"
-
-class TypeAlias(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.type = ""
- self.aliases = IdSet()
-
- def to_string(self):
- return "typealias %s alias %s;" % (self.type, self.aliases.to_space_str())
-
-class Attribute(Leaf):
- def __init__(self, name="", parent=None):
- Leaf.__init__(self, parent)
- self.name = name
-
- def to_string(self):
- return "attribute %s;" % self.name
-
-class Attribute_Role(Leaf):
- def __init__(self, name="", parent=None):
- Leaf.__init__(self, parent)
- self.name = name
-
- def to_string(self):
- return "attribute_role %s;" % self.name
-
-
-# Classes representing rules
-
-class AVRule(Leaf):
- """SELinux access vector (AV) rule.
-
- The AVRule class represents all varieties of AV rules including
- allow, dontaudit, and auditallow (indicated by the flags self.ALLOW,
- self.DONTAUDIT, and self.AUDITALLOW respectively).
-
- The source and target types, object classes, and perms are all represented
- by sets containing strings. Sets are used to make it simple to add
- strings repeatedly while avoiding duplicates.
-
- No checking is done to make certain that the symbols are valid or
- consistent (e.g., perms that don't match the object classes). It is
- even possible to put invalid types like '$1' into the rules to allow
- storage of the reference policy interfaces.
- """
- ALLOW = 0
- DONTAUDIT = 1
- AUDITALLOW = 2
- NEVERALLOW = 3
-
- def __init__(self, av=None, parent=None):
- Leaf.__init__(self, parent)
- self.src_types = IdSet()
- self.tgt_types = IdSet()
- self.obj_classes = IdSet()
- self.perms = IdSet()
- self.rule_type = self.ALLOW
- if av:
- self.from_av(av)
-
- def __rule_type_str(self):
- if self.rule_type == self.ALLOW:
- return "allow"
- elif self.rule_type == self.DONTAUDIT:
- return "dontaudit"
- else:
- return "auditallow"
-
- def from_av(self, av):
- """Add the access from an access vector to this allow
- rule.
- """
- self.src_types.add(av.src_type)
- if av.src_type == av.tgt_type:
- self.tgt_types.add("self")
- else:
- self.tgt_types.add(av.tgt_type)
- self.obj_classes.add(av.obj_class)
- self.perms.update(av.perms)
-
- def to_string(self):
- """Return a string representation of the rule
- that is a valid policy language representation (assuming
- that the types, object class, etc. are valie).
- """
- return "%s %s %s:%s %s;" % (self.__rule_type_str(),
- self.src_types.to_space_str(),
- self.tgt_types.to_space_str(),
- self.obj_classes.to_space_str(),
- self.perms.to_space_str())
-class TypeRule(Leaf):
- """SELinux type rules.
-
- This class is very similar to the AVRule class, but is for representing
- the type rules (type_trans, type_change, and type_member). The major
- difference is the lack of perms and only and sing destination type.
- """
- TYPE_TRANSITION = 0
- TYPE_CHANGE = 1
- TYPE_MEMBER = 2
-
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.src_types = IdSet()
- self.tgt_types = IdSet()
- self.obj_classes = IdSet()
- self.dest_type = ""
- self.rule_type = self.TYPE_TRANSITION
-
- def __rule_type_str(self):
- if self.rule_type == self.TYPE_TRANSITION:
- return "type_transition"
- elif self.rule_type == self.TYPE_CHANGE:
- return "type_change"
- else:
- return "type_member"
-
- def to_string(self):
- return "%s %s %s:%s %s;" % (self.__rule_type_str(),
- self.src_types.to_space_str(),
- self.tgt_types.to_space_str(),
- self.obj_classes.to_space_str(),
- self.dest_type)
-
-class RoleAllow(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.src_roles = IdSet()
- self.tgt_roles = IdSet()
-
- def to_string(self):
- return "allow %s %s;" % (self.src_roles.to_comma_str(),
- self.tgt_roles.to_comma_str())
-
-class RoleType(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.role = ""
- self.types = IdSet()
-
- def to_string(self):
- s = ""
- for t in self.types:
- s += "role %s types %s;\n" % (self.role, t)
- return s
-
-class ModuleDeclaration(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.name = ""
- self.version = ""
- self.refpolicy = False
-
- def to_string(self):
- if self.refpolicy:
- return "policy_module(%s, %s)" % (self.name, self.version)
- else:
- return "module %s %s;" % (self.name, self.version)
-
-class Conditional(Node):
- def __init__(self, parent=None):
- Node.__init__(self, parent)
- self.cond_expr = []
-
- def to_string(self):
- return "[If %s]" % list_to_space_str(self.cond_expr, cont=("", ""))
-
-class Bool(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.name = ""
- self.state = False
-
- def to_string(self):
- s = "bool %s " % self.name
- if s.state:
- return s + "true"
- else:
- return s + "false"
-
-class InitialSid(Leaf):
- def __init(self, parent=None):
- Leaf.__init__(self, parent)
- self.name = ""
- self.context = None
-
- def to_string(self):
- return "sid %s %s" % (self.name, str(self.context))
-
-class GenfsCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.filesystem = ""
- self.path = ""
- self.context = None
-
- def to_string(self):
- return "genfscon %s %s %s" % (self.filesystem, self.path, str(self.context))
-
-class FilesystemUse(Leaf):
- XATTR = 1
- TRANS = 2
- TASK = 3
-
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.type = self.XATTR
- self.filesystem = ""
- self.context = None
-
- def to_string(self):
- s = ""
- if self.type == XATTR:
- s = "fs_use_xattr "
- elif self.type == TRANS:
- s = "fs_use_trans "
- elif self.type == TASK:
- s = "fs_use_task "
-
- return "%s %s %s;" % (s, self.filesystem, str(self.context))
-
-class PortCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.port_type = ""
- self.port_number = ""
- self.context = None
-
- def to_string(self):
- return "portcon %s %s %s" % (self.port_type, self.port_number, str(self.context))
-
-class NodeCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.start = ""
- self.end = ""
- self.context = None
-
- def to_string(self):
- return "nodecon %s %s %s" % (self.start, self.end, str(self.context))
-
-class NetifCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.interface = ""
- self.interface_context = None
- self.packet_context = None
-
- def to_string(self):
- return "netifcon %s %s %s" % (self.interface, str(self.interface_context),
- str(self.packet_context))
-class PirqCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.pirq_number = ""
- self.context = None
-
- def to_string(self):
- return "pirqcon %s %s" % (self.pirq_number, str(self.context))
-
-class IomemCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.device_mem = ""
- self.context = None
-
- def to_string(self):
- return "iomemcon %s %s" % (self.device_mem, str(self.context))
-
-class IoportCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.ioport = ""
- self.context = None
-
- def to_string(self):
- return "ioportcon %s %s" % (self.ioport, str(self.context))
-
-class PciDeviceCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.device = ""
- self.context = None
-
- def to_string(self):
- return "pcidevicecon %s %s" % (self.device, str(self.context))
-
-class DeviceTreeCon(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.path = ""
- self.context = None
-
- def to_string(self):
- return "devicetreecon %s %s" % (self.path, str(self.context))
-
-# Reference policy specific types
-
-def print_tree(head):
- for node, depth in walktree(head, showdepth=True):
- s = ""
- for i in range(depth):
- s = s + "\t"
- print s + str(node)
-
-
-class Headers(Node):
- def __init__(self, parent=None):
- Node.__init__(self, parent)
-
- def to_string(self):
- return "[Headers]"
-
-
-class Module(Node):
- def __init__(self, parent=None):
- Node.__init__(self, parent)
-
- def to_string(self):
- return ""
-
-class Interface(Node):
- """A reference policy interface definition.
-
- This class represents a reference policy interface definition.
- """
- def __init__(self, name="", parent=None):
- Node.__init__(self, parent)
- self.name = name
-
- def to_string(self):
- return "[Interface name: %s]" % self.name
-
-class TunablePolicy(Node):
- def __init__(self, parent=None):
- Node.__init__(self, parent)
- self.cond_expr = []
-
- def to_string(self):
- return "[Tunable Policy %s]" % list_to_space_str(self.cond_expr, cont=("", ""))
-
-class Template(Node):
- def __init__(self, name="", parent=None):
- Node.__init__(self, parent)
- self.name = name
-
- def to_string(self):
- return "[Template name: %s]" % self.name
-
-class IfDef(Node):
- def __init__(self, name="", parent=None):
- Node.__init__(self, parent)
- self.name = name
-
- def to_string(self):
- return "[Ifdef name: %s]" % self.name
-
-class InterfaceCall(Leaf):
- def __init__(self, ifname="", parent=None):
- Leaf.__init__(self, parent)
- self.ifname = ifname
- self.args = []
- self.comments = []
-
- def matches(self, other):
- if self.ifname != other.ifname:
- return False
- if len(self.args) != len(other.args):
- return False
- for a,b in zip(self.args, other.args):
- if a != b:
- return False
- return True
-
- def to_string(self):
- s = "%s(" % self.ifname
- i = 0
- for a in self.args:
- if isinstance(a, list):
- str = list_to_space_str(a)
- else:
- str = a
-
- if i != 0:
- s = s + ", %s" % str
- else:
- s = s + str
- i += 1
- return s + ")"
-
-class OptionalPolicy(Node):
- def __init__(self, parent=None):
- Node.__init__(self, parent)
-
- def to_string(self):
- return "[Optional Policy]"
-
-class SupportMacros(Node):
- def __init__(self, parent=None):
- Node.__init__(self, parent)
- self.map = None
-
- def to_string(self):
- return "[Support Macros]"
-
- def __expand_perm(self, perm):
- # Recursive expansion - the assumption is that these
- # are ordered correctly so that no macro is used before
- # it is defined
- s = set()
- if self.map.has_key(perm):
- for p in self.by_name(perm):
- s.update(self.__expand_perm(p))
- else:
- s.add(perm)
- return s
-
- def __gen_map(self):
- self.map = {}
- for x in self:
- exp_perms = set()
- for perm in x.perms:
- exp_perms.update(self.__expand_perm(perm))
- self.map[x.name] = exp_perms
-
- def by_name(self, name):
- if not self.map:
- self.__gen_map()
- return self.map[name]
-
- def has_key(self, name):
- if not self.map:
- self.__gen_map()
- return self.map.has_key(name)
-
-class Require(Leaf):
- def __init__(self, parent=None):
- Leaf.__init__(self, parent)
- self.types = IdSet()
- self.obj_classes = { }
- self.roles = IdSet()
- self.data = IdSet()
- self.users = IdSet()
-
- def add_obj_class(self, obj_class, perms):
- p = self.obj_classes.setdefault(obj_class, IdSet())
- p.update(perms)
-
-
- def to_string(self):
- s = []
- s.append("require {")
- for type in self.types:
- s.append("\ttype %s;" % type)
- for obj_class, perms in self.obj_classes.items():
- s.append("\tclass %s %s;" % (obj_class, perms.to_space_str()))
- for role in self.roles:
- s.append("\trole %s;" % role)
- for bool in self.data:
- s.append("\tbool %s;" % bool)
- for user in self.users:
- s.append("\tuser %s;" % user)
- s.append("}")
-
- # Handle empty requires
- if len(s) == 2:
- return ""
-
- return "\n".join(s)
-
-
-class ObjPermSet:
- def __init__(self, name):
- self.name = name
- self.perms = set()
-
- def to_string(self):
- return "define(`%s', `%s')" % (self.name, self.perms.to_space_str())
-
-class ClassMap:
- def __init__(self, obj_class, perms):
- self.obj_class = obj_class
- self.perms = perms
-
- def to_string(self):
- return self.obj_class + ": " + self.perms
-
-class Comment:
- def __init__(self, l=None):
- if l:
- self.lines = l
- else:
- self.lines = []
-
- def to_string(self):
- # If there are no lines, treat this as a spacer between
- # policy statements and return a new line.
- if len(self.lines) == 0:
- return ""
- else:
- out = []
- for line in self.lines:
- out.append("#" + line)
- return "\n".join(out)
-
- def merge(self, other):
- if len(other.lines):
- for line in other.lines:
- if line != "":
- self.lines.append(line)
-
- def __str__(self):
- return self.to_string()
-
-
diff --git a/prebuilts/lib/sepolgen/sepolgeni18n.py b/prebuilts/lib/sepolgen/sepolgeni18n.py
deleted file mode 100644
index 998c4356..00000000
--- a/prebuilts/lib/sepolgen/sepolgeni18n.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-try:
- import gettext
- t = gettext.translation( 'yumex' )
- _ = t.gettext
-except:
- def _(str):
- return str
diff --git a/prebuilts/lib/sepolgen/util.py b/prebuilts/lib/sepolgen/util.py
deleted file mode 100644
index 74a11f51..00000000
--- a/prebuilts/lib/sepolgen/util.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
-#
-# Copyright (C) 2006 Red Hat
-# see file 'COPYING' for use and warranty information
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 only
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-class ConsoleProgressBar:
- def __init__(self, out, steps=100, indicator='#'):
- self.blocks = 0
- self.current = 0
- self.steps = steps
- self.indicator = indicator
- self.out = out
- self.done = False
-
- def start(self, message=None):
- self.done = False
- if message:
- self.out.write('\n%s:\n' % message)
- self.out.write('%--10---20---30---40---50---60---70---80---90--100\n')
-
- def step(self, n=1):
- self.current += n
-
- old = self.blocks
- self.blocks = int(round(self.current / float(self.steps) * 100) / 2)
-
- if self.blocks > 50:
- self.blocks = 50
-
- new = self.blocks - old
-
- self.out.write(self.indicator * new)
- self.out.flush()
-
- if self.blocks == 50 and not self.done:
- self.done = True
- self.out.write("\n")
-
-def set_to_list(s):
- l = []
- l.extend(s)
- return l
-
-def first(s, sorted=False):
- """
- Return the first element of a set.
-
- It sometimes useful to return the first element from a set but,
- because sets are not indexable, this is rather hard. This function
- will return the first element from a set. If sorted is True, then
- the set will first be sorted (making this an expensive operation).
- Otherwise a random element will be returned (as sets are not ordered).
- """
- if not len(s):
- raise IndexError("empty containter")
-
- if sorted:
- l = set_to_list(s)
- l.sort()
- return l[0]
- else:
- for x in s:
- return x
-
-if __name__ == "__main__":
- import sys
- import time
- p = ConsoleProgressBar(sys.stdout, steps=999)
- p.start("computing pi")
- for i in range(999):
- p.step()
- time.sleep(0.001)
-
diff --git a/prebuilts/lib/sepolgen/yacc.py b/prebuilts/lib/sepolgen/yacc.py
deleted file mode 100644
index bc4536de..00000000
--- a/prebuilts/lib/sepolgen/yacc.py
+++ /dev/null
@@ -1,2209 +0,0 @@
-#-----------------------------------------------------------------------------
-# ply: yacc.py
-#
-# Author(s): David M. Beazley (dave@dabeaz.com)
-#
-# Copyright (C) 2001-2006, David M. Beazley
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See the file COPYING for a complete copy of the LGPL.
-#
-#
-# This implements an LR parser that is constructed from grammar rules defined
-# as Python functions. The grammer is specified by supplying the BNF inside
-# Python documentation strings. The inspiration for this technique was borrowed
-# from John Aycock's Spark parsing system. PLY might be viewed as cross between
-# Spark and the GNU bison utility.
-#
-# The current implementation is only somewhat object-oriented. The
-# LR parser itself is defined in terms of an object (which allows multiple
-# parsers to co-exist). However, most of the variables used during table
-# construction are defined in terms of global variables. Users shouldn't
-# notice unless they are trying to define multiple parsers at the same
-# time using threads (in which case they should have their head examined).
-#
-# This implementation supports both SLR and LALR(1) parsing. LALR(1)
-# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
-# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
-# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
-# by the more efficient DeRemer and Pennello algorithm.
-#
-# :::::::: WARNING :::::::
-#
-# Construction of LR parsing tables is fairly complicated and expensive.
-# To make this module run fast, a *LOT* of work has been put into
-# optimization---often at the expensive of readability and what might
-# consider to be good Python "coding style." Modify the code at your
-# own risk!
-# ----------------------------------------------------------------------------
-
-__version__ = "2.2"
-
-#-----------------------------------------------------------------------------
-# === User configurable parameters ===
-#
-# Change these to modify the default behavior of yacc (if you wish)
-#-----------------------------------------------------------------------------
-
-yaccdebug = 1 # Debugging mode. If set, yacc generates a
- # a 'parser.out' file in the current directory
-
-debug_file = 'parser.out' # Default name of the debugging file
-tab_module = 'parsetab' # Default name of the table module
-default_lr = 'LALR' # Default LR table generation method
-
-error_count = 3 # Number of symbols that must be shifted to leave recovery mode
-
-import re, types, sys, cStringIO, hashlib, os.path
-
-# Exception raised for yacc-related errors
-class YaccError(Exception): pass
-
-#-----------------------------------------------------------------------------
-# === LR Parsing Engine ===
-#
-# The following classes are used for the LR parser itself. These are not
-# used during table construction and are independent of the actual LR
-# table generation algorithm
-#-----------------------------------------------------------------------------
-
-# This class is used to hold non-terminal grammar symbols during parsing.
-# It normally has the following attributes set:
-# .type = Grammar symbol type
-# .value = Symbol value
-# .lineno = Starting line number
-# .endlineno = Ending line number (optional, set automatically)
-# .lexpos = Starting lex position
-# .endlexpos = Ending lex position (optional, set automatically)
-
-class YaccSymbol:
- def __str__(self): return self.type
- def __repr__(self): return str(self)
-
-# This class is a wrapper around the objects actually passed to each
-# grammar rule. Index lookup and assignment actually assign the
-# .value attribute of the underlying YaccSymbol object.
-# The lineno() method returns the line number of a given
-# item (or 0 if not defined). The linespan() method returns
-# a tuple of (startline,endline) representing the range of lines
-# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
-# representing the range of positional information for a symbol.
-
-class YaccProduction:
- def __init__(self,s,stack=None):
- self.slice = s
- self.pbstack = []
- self.stack = stack
-
- def __getitem__(self,n):
- if type(n) == types.IntType:
- if n >= 0: return self.slice[n].value
- else: return self.stack[n].value
- else:
- return [s.value for s in self.slice[n.start:n.stop:n.step]]
-
- def __setitem__(self,n,v):
- self.slice[n].value = v
-
- def __len__(self):
- return len(self.slice)
-
- def lineno(self,n):
- return getattr(self.slice[n],"lineno",0)
-
- def linespan(self,n):
- startline = getattr(self.slice[n],"lineno",0)
- endline = getattr(self.slice[n],"endlineno",startline)
- return startline,endline
-
- def lexpos(self,n):
- return getattr(self.slice[n],"lexpos",0)
-
- def lexspan(self,n):
- startpos = getattr(self.slice[n],"lexpos",0)
- endpos = getattr(self.slice[n],"endlexpos",startpos)
- return startpos,endpos
-
- def pushback(self,n):
- if n <= 0:
- raise ValueError, "Expected a positive value"
- if n > (len(self.slice)-1):
- raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1)
- for i in range(0,n):
- self.pbstack.append(self.slice[-i-1])
-
-# The LR Parsing engine. This is defined as a class so that multiple parsers
-# can exist in the same process. A user never instantiates this directly.
-# Instead, the global yacc() function should be used to create a suitable Parser
-# object.
-
-class Parser:
- def __init__(self,magic=None):
-
- # This is a hack to keep users from trying to instantiate a Parser
- # object directly.
-
- if magic != "xyzzy":
- raise YaccError, "Can't instantiate Parser. Use yacc() instead."
-
- # Reset internal state
- self.productions = None # List of productions
- self.errorfunc = None # Error handling function
- self.action = { } # LR Action table
- self.goto = { } # LR goto table
- self.require = { } # Attribute require table
- self.method = "Unknown LR" # Table construction method used
-
- def errok(self):
- self.errorcount = 0
-
- def restart(self):
- del self.statestack[:]
- del self.symstack[:]
- sym = YaccSymbol()
- sym.type = '$end'
- self.symstack.append(sym)
- self.statestack.append(0)
-
- def parse(self,input=None,lexer=None,debug=0):
- lookahead = None # Current lookahead symbol
- lookaheadstack = [ ] # Stack of lookahead symbols
- actions = self.action # Local reference to action table
- goto = self.goto # Local reference to goto table
- prod = self.productions # Local reference to production list
- pslice = YaccProduction(None) # Production object passed to grammar rules
- pslice.parser = self # Parser object
- self.errorcount = 0 # Used during error recovery
-
- # If no lexer was given, we will try to use the lex module
- if not lexer:
- import lex
- lexer = lex.lexer
-
- pslice.lexer = lexer
-
- # If input was supplied, pass to lexer
- if input:
- lexer.input(input)
-
- # Tokenize function
- get_token = lexer.token
-
- statestack = [ ] # Stack of parsing states
- self.statestack = statestack
- symstack = [ ] # Stack of grammar symbols
- self.symstack = symstack
-
- pslice.stack = symstack # Put in the production
- errtoken = None # Err token
-
- # The start state is assumed to be (0,$end)
- statestack.append(0)
- sym = YaccSymbol()
- sym.type = '$end'
- symstack.append(sym)
-
- while 1:
- # Get the next symbol on the input. If a lookahead symbol
- # is already set, we just use that. Otherwise, we'll pull
- # the next token off of the lookaheadstack or from the lexer
- if debug > 1:
- print 'state', statestack[-1]
- if not lookahead:
- if not lookaheadstack:
- lookahead = get_token() # Get the next token
- else:
- lookahead = lookaheadstack.pop()
- if not lookahead:
- lookahead = YaccSymbol()
- lookahead.type = '$end'
- if debug:
- errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
-
- # Check the action table
- s = statestack[-1]
- ltype = lookahead.type
- t = actions.get((s,ltype),None)
-
- if debug > 1:
- print 'action', t
- if t is not None:
- if t > 0:
- # shift a symbol on the stack
- if ltype == '$end':
- # Error, end of input
- sys.stderr.write("yacc: Parse error. EOF\n")
- return
- statestack.append(t)
- if debug > 1:
- sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
- symstack.append(lookahead)
- lookahead = None
-
- # Decrease error count on successful shift
- if self.errorcount > 0:
- self.errorcount -= 1
-
- continue
-
- if t < 0:
- # reduce a symbol on the stack, emit a production
- p = prod[-t]
- pname = p.name
- plen = p.len
-
- # Get production function
- sym = YaccSymbol()
- sym.type = pname # Production name
- sym.value = None
- if debug > 1:
- sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
-
- if plen:
- targ = symstack[-plen-1:]
- targ[0] = sym
- try:
- sym.lineno = targ[1].lineno
- sym.endlineno = getattr(targ[-1],"endlineno",targ[-1].lineno)
- sym.lexpos = targ[1].lexpos
- sym.endlexpos = getattr(targ[-1],"endlexpos",targ[-1].lexpos)
- except AttributeError:
- sym.lineno = 0
- del symstack[-plen:]
- del statestack[-plen:]
- else:
- sym.lineno = 0
- targ = [ sym ]
- pslice.slice = targ
- pslice.pbstack = []
- # Call the grammar rule with our special slice object
- p.func(pslice)
-
- # If there was a pushback, put that on the stack
- if pslice.pbstack:
- lookaheadstack.append(lookahead)
- for _t in pslice.pbstack:
- lookaheadstack.append(_t)
- lookahead = None
-
- symstack.append(sym)
- statestack.append(goto[statestack[-1],pname])
- continue
-
- if t == 0:
- n = symstack[-1]
- return getattr(n,"value",None)
- sys.stderr.write(errorlead, "\n")
-
- if t == None:
- if debug:
- sys.stderr.write(errorlead + "\n")
- # We have some kind of parsing error here. To handle
- # this, we are going to push the current token onto
- # the tokenstack and replace it with an 'error' token.
- # If there are any synchronization rules, they may
- # catch it.
- #
- # In addition to pushing the error token, we call call
- # the user defined p_error() function if this is the
- # first syntax error. This function is only called if
- # errorcount == 0.
- if not self.errorcount:
- self.errorcount = error_count
- errtoken = lookahead
- if errtoken.type == '$end':
- errtoken = None # End of file!
- if self.errorfunc:
- global errok,token,restart
- errok = self.errok # Set some special functions available in error recovery
- token = get_token
- restart = self.restart
- tok = self.errorfunc(errtoken)
- del errok, token, restart # Delete special functions
-
- if not self.errorcount:
- # User must have done some kind of panic
- # mode recovery on their own. The
- # returned token is the next lookahead
- lookahead = tok
- errtoken = None
- continue
- else:
- if errtoken:
- if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
- else: lineno = 0
- if lineno:
- sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
- else:
- sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
- else:
- sys.stderr.write("yacc: Parse error in input. EOF\n")
- return
-
- else:
- self.errorcount = error_count
-
- # case 1: the statestack only has 1 entry on it. If we're in this state, the
- # entire parse has been rolled back and we're completely hosed. The token is
- # discarded and we just keep going.
-
- if len(statestack) <= 1 and lookahead.type != '$end':
- lookahead = None
- errtoken = None
- # Nuke the pushback stack
- del lookaheadstack[:]
- continue
-
- # case 2: the statestack has a couple of entries on it, but we're
- # at the end of the file. nuke the top entry and generate an error token
-
- # Start nuking entries on the stack
- if lookahead.type == '$end':
- # Whoa. We're really hosed here. Bail out
- return
-
- if lookahead.type != 'error':
- sym = symstack[-1]
- if sym.type == 'error':
- # Hmmm. Error is on top of stack, we'll just nuke input
- # symbol and continue
- lookahead = None
- continue
- t = YaccSymbol()
- t.type = 'error'
- if hasattr(lookahead,"lineno"):
- t.lineno = lookahead.lineno
- t.value = lookahead
- lookaheadstack.append(lookahead)
- lookahead = t
- else:
- symstack.pop()
- statestack.pop()
-
- continue
-
- # Call an error function here
- raise RuntimeError, "yacc: internal parser error!!!\n"
-
-# -----------------------------------------------------------------------------
-# === Parser Construction ===
-#
-# The following functions and variables are used to implement the yacc() function
-# itself. This is pretty hairy stuff involving lots of error checking,
-# construction of LR items, kernels, and so forth. Although a lot of
-# this work is done using global variables, the resulting Parser object
-# is completely self contained--meaning that it is safe to repeatedly
-# call yacc() with different grammars in the same application.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# validate_file()
-#
-# This function checks to see if there are duplicated p_rulename() functions
-# in the parser module file. Without this function, it is really easy for
-# users to make mistakes by cutting and pasting code fragments (and it's a real
-# bugger to try and figure out why the resulting parser doesn't work). Therefore,
-# we just do a little regular expression pattern matching of def statements
-# to try and detect duplicates.
-# -----------------------------------------------------------------------------
-
-def validate_file(filename):
- base,ext = os.path.splitext(filename)
- if ext != '.py': return 1 # No idea. Assume it's okay.
-
- try:
- f = open(filename)
- lines = f.readlines()
- f.close()
- except IOError:
- return 1 # Oh well
-
- # Match def p_funcname(
- fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
- counthash = { }
- linen = 1
- noerror = 1
- for l in lines:
- m = fre.match(l)
- if m:
- name = m.group(1)
- prev = counthash.get(name)
- if not prev:
- counthash[name] = linen
- else:
- sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
- noerror = 0
- linen += 1
- return noerror
-
-# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
-def validate_dict(d):
- for n,v in d.items():
- if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
- if n[0:2] == 't_': continue
-
- if n[0:2] == 'p_':
- sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
- if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
- try:
- doc = v.__doc__.split(" ")
- if doc[1] == ':':
- sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
- except StandardError:
- pass
-
-# -----------------------------------------------------------------------------
-# === GRAMMAR FUNCTIONS ===
-#
-# The following global variables and functions are used to store, manipulate,
-# and verify the grammar rules specified by the user.
-# -----------------------------------------------------------------------------
-
-# Initialize all of the global variables used during grammar construction
-def initialize_vars():
- global Productions, Prodnames, Prodmap, Terminals
- global Nonterminals, First, Follow, Precedence, LRitems
- global Errorfunc, Signature, Requires
-
- Productions = [None] # A list of all of the productions. The first
- # entry is always reserved for the purpose of
- # building an augmented grammar
-
- Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
- # productions of that nonterminal.
-
- Prodmap = { } # A dictionary that is only used to detect duplicate
- # productions.
-
- Terminals = { } # A dictionary mapping the names of terminal symbols to a
- # list of the rules where they are used.
-
- Nonterminals = { } # A dictionary mapping names of nonterminals to a list
- # of rule numbers where they are used.
-
- First = { } # A dictionary of precomputed FIRST(x) symbols
-
- Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
-
- Precedence = { } # Precedence rules for each terminal. Contains tuples of the
- # form ('right',level) or ('nonassoc', level) or ('left',level)
-
- LRitems = [ ] # A list of all LR items for the grammar. These are the
- # productions with the "dot" like E -> E . PLUS E
-
- Errorfunc = None # User defined error handler
-
- Signature = hashlib.sha256() # Digital signature of the grammar rules, precedence
- # and other information. Used to determined when a
- # parsing table needs to be regenerated.
-
- Requires = { } # Requires list
-
- # File objects used when creating the parser.out debugging file
- global _vf, _vfc
- _vf = cStringIO.StringIO()
- _vfc = cStringIO.StringIO()
-
-# -----------------------------------------------------------------------------
-# class Production:
-#
-# This class stores the raw information about a single production or grammar rule.
-# It has a few required attributes:
-#
-# name - Name of the production (nonterminal)
-# prod - A list of symbols making up its production
-# number - Production number.
-#
-# In addition, a few additional attributes are used to help with debugging or
-# optimization of table generation.
-#
-# file - File where production action is defined.
-# lineno - Line number where action is defined
-# func - Action function
-# prec - Precedence level
-# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E'
-# then lr_next refers to 'E -> E PLUS . E'
-# lr_index - LR item index (location of the ".") in the prod list.
-# lookaheads - LALR lookahead symbols for this item
-# len - Length of the production (number of symbols on right hand side)
-# -----------------------------------------------------------------------------
-
-class Production:
- def __init__(self,**kw):
- for k,v in kw.items():
- setattr(self,k,v)
- self.lr_index = -1
- self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure
- self.lr1_added = 0 # Flag indicating whether or not added to LR1
- self.usyms = [ ]
- self.lookaheads = { }
- self.lk_added = { }
- self.setnumbers = [ ]
-
- def __str__(self):
- if self.prod:
- s = "%s -> %s" % (self.name," ".join(self.prod))
- else:
- s = "%s -> <empty>" % self.name
- return s
-
- def __repr__(self):
- return str(self)
-
- # Compute lr_items from the production
- def lr_item(self,n):
- if n > len(self.prod): return None
- p = Production()
- p.name = self.name
- p.prod = list(self.prod)
- p.number = self.number
- p.lr_index = n
- p.lookaheads = { }
- p.setnumbers = self.setnumbers
- p.prod.insert(n,".")
- p.prod = tuple(p.prod)
- p.len = len(p.prod)
- p.usyms = self.usyms
-
- # Precompute list of productions immediately following
- try:
- p.lrafter = Prodnames[p.prod[n+1]]
- except (IndexError,KeyError),e:
- p.lrafter = []
- try:
- p.lrbefore = p.prod[n-1]
- except IndexError:
- p.lrbefore = None
-
- return p
-
-class MiniProduction:
- pass
-
-# regex matching identifiers
-_is_identifier = re.compile(r'^[a-zA-Z0-9_-~]+$')
-
-# -----------------------------------------------------------------------------
-# add_production()
-#
-# Given an action function, this function assembles a production rule.
-# The production rule is assumed to be found in the function's docstring.
-# This rule has the general syntax:
-#
-# name1 ::= production1
-# | production2
-# | production3
-# ...
-# | productionn
-# name2 ::= production1
-# | production2
-# ...
-# -----------------------------------------------------------------------------
-
-def add_production(f,file,line,prodname,syms):
-
- if Terminals.has_key(prodname):
- sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
- return -1
- if prodname == 'error':
- sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
- return -1
-
- if not _is_identifier.match(prodname):
- sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
- return -1
-
- for x in range(len(syms)):
- s = syms[x]
- if s[0] in "'\"":
- try:
- c = eval(s)
- if (len(c) > 1):
- sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
- return -1
- if not Terminals.has_key(c):
- Terminals[c] = []
- syms[x] = c
- continue
- except SyntaxError:
- pass
- if not _is_identifier.match(s) and s != '%prec':
- sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
- return -1
-
- # See if the rule is already in the rulemap
- map = "%s -> %s" % (prodname,syms)
- if Prodmap.has_key(map):
- m = Prodmap[map]
- sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
- sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
- return -1
-
- p = Production()
- p.name = prodname
- p.prod = syms
- p.file = file
- p.line = line
- p.func = f
- p.number = len(Productions)
-
-
- Productions.append(p)
- Prodmap[map] = p
- if not Nonterminals.has_key(prodname):
- Nonterminals[prodname] = [ ]
-
- # Add all terminals to Terminals
- i = 0
- while i < len(p.prod):
- t = p.prod[i]
- if t == '%prec':
- try:
- precname = p.prod[i+1]
- except IndexError:
- sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
- return -1
-
- prec = Precedence.get(precname,None)
- if not prec:
- sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
- return -1
- else:
- p.prec = prec
- del p.prod[i]
- del p.prod[i]
- continue
-
- if Terminals.has_key(t):
- Terminals[t].append(p.number)
- # Is a terminal. We'll assign a precedence to p based on this
- if not hasattr(p,"prec"):
- p.prec = Precedence.get(t,('right',0))
- else:
- if not Nonterminals.has_key(t):
- Nonterminals[t] = [ ]
- Nonterminals[t].append(p.number)
- i += 1
-
- if not hasattr(p,"prec"):
- p.prec = ('right',0)
-
- # Set final length of productions
- p.len = len(p.prod)
- p.prod = tuple(p.prod)
-
- # Calculate unique syms in the production
- p.usyms = [ ]
- for s in p.prod:
- if s not in p.usyms:
- p.usyms.append(s)
-
- # Add to the global productions list
- try:
- Prodnames[p.name].append(p)
- except KeyError:
- Prodnames[p.name] = [ p ]
- return 0
-
-# Given a raw rule function, this function rips out its doc string
-# and adds rules to the grammar
-
-def add_function(f):
- line = f.func_code.co_firstlineno
- file = f.func_code.co_filename
- error = 0
-
- if isinstance(f,types.MethodType):
- reqdargs = 2
- else:
- reqdargs = 1
-
- if f.func_code.co_argcount > reqdargs:
- sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
- return -1
-
- if f.func_code.co_argcount < reqdargs:
- sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
- return -1
-
- if f.__doc__:
- # Split the doc string into lines
- pstrings = f.__doc__.splitlines()
- lastp = None
- dline = line
- for ps in pstrings:
- dline += 1
- p = ps.split()
- if not p: continue
- try:
- if p[0] == '|':
- # This is a continuation of a previous rule
- if not lastp:
- sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
- return -1
- prodname = lastp
- if len(p) > 1:
- syms = p[1:]
- else:
- syms = [ ]
- else:
- prodname = p[0]
- lastp = prodname
- assign = p[1]
- if len(p) > 2:
- syms = p[2:]
- else:
- syms = [ ]
- if assign != ':' and assign != '::=':
- sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
- return -1
-
-
- e = add_production(f,file,dline,prodname,syms)
- error += e
-
-
- except StandardError:
- sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
- error -= 1
- else:
- sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
- return error
-
-
-# Cycle checking code (Michael Dyck)
-
-def compute_reachable():
- '''
- Find each symbol that can be reached from the start symbol.
- Print a warning for any nonterminals that can't be reached.
- (Unused terminals have already had their warning.)
- '''
- Reachable = { }
- for s in Terminals.keys() + Nonterminals.keys():
- Reachable[s] = 0
-
- mark_reachable_from( Productions[0].prod[0], Reachable )
-
- for s in Nonterminals.keys():
- if not Reachable[s]:
- sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
-
-def mark_reachable_from(s, Reachable):
- '''
- Mark all symbols that are reachable from symbol s.
- '''
- if Reachable[s]:
- # We've already reached symbol s.
- return
- Reachable[s] = 1
- for p in Prodnames.get(s,[]):
- for r in p.prod:
- mark_reachable_from(r, Reachable)
-
-# -----------------------------------------------------------------------------
-# compute_terminates()
-#
-# This function looks at the various parsing rules and tries to detect
-# infinite recursion cycles (grammar rules where there is no possible way
-# to derive a string of only terminals).
-# -----------------------------------------------------------------------------
-def compute_terminates():
- '''
- Raise an error for any symbols that don't terminate.
- '''
- Terminates = {}
-
- # Terminals:
- for t in Terminals.keys():
- Terminates[t] = 1
-
- Terminates['$end'] = 1
-
- # Nonterminals:
-
- # Initialize to false:
- for n in Nonterminals.keys():
- Terminates[n] = 0
-
- # Then propagate termination until no change:
- while 1:
- some_change = 0
- for (n,pl) in Prodnames.items():
- # Nonterminal n terminates iff any of its productions terminates.
- for p in pl:
- # Production p terminates iff all of its rhs symbols terminate.
- for s in p.prod:
- if not Terminates[s]:
- # The symbol s does not terminate,
- # so production p does not terminate.
- p_terminates = 0
- break
- else:
- # didn't break from the loop,
- # so every symbol s terminates
- # so production p terminates.
- p_terminates = 1
-
- if p_terminates:
- # symbol n terminates!
- if not Terminates[n]:
- Terminates[n] = 1
- some_change = 1
- # Don't need to consider any more productions for this n.
- break
-
- if not some_change:
- break
-
- some_error = 0
- for (s,terminates) in Terminates.items():
- if not terminates:
- if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
- # s is used-but-not-defined, and we've already warned of that,
- # so it would be overkill to say that it's also non-terminating.
- pass
- else:
- sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
- some_error = 1
-
- return some_error
-
-# -----------------------------------------------------------------------------
-# verify_productions()
-#
-# This function examines all of the supplied rules to see if they seem valid.
-# -----------------------------------------------------------------------------
-def verify_productions(cycle_check=1):
- error = 0
- for p in Productions:
- if not p: continue
-
- for s in p.prod:
- if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
- sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
- error = 1
- continue
-
- unused_tok = 0
- # Now verify all of the tokens
- if yaccdebug:
- _vf.write("Unused terminals:\n\n")
- for s,v in Terminals.items():
- if s != 'error' and not v:
- sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
- if yaccdebug: _vf.write(" %s\n"% s)
- unused_tok += 1
-
- # Print out all of the productions
- if yaccdebug:
- _vf.write("\nGrammar\n\n")
- for i in range(1,len(Productions)):
- _vf.write("Rule %-5d %s\n" % (i, Productions[i]))
-
- unused_prod = 0
- # Verify the use of all productions
- for s,v in Nonterminals.items():
- if not v:
- p = Prodnames[s][0]
- sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
- unused_prod += 1
-
-
- if unused_tok == 1:
- sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
- if unused_tok > 1:
- sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
-
- if unused_prod == 1:
- sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
- if unused_prod > 1:
- sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
-
- if yaccdebug:
- _vf.write("\nTerminals, with rules where they appear\n\n")
- ks = Terminals.keys()
- ks.sort()
- for k in ks:
- _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
- _vf.write("\nNonterminals, with rules where they appear\n\n")
- ks = Nonterminals.keys()
- ks.sort()
- for k in ks:
- _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
-
- if (cycle_check):
- compute_reachable()
- error += compute_terminates()
-# error += check_cycles()
- return error
-
-# -----------------------------------------------------------------------------
-# build_lritems()
-#
-# This function walks the list of productions and builds a complete set of the
-# LR items. The LR items are stored in two ways: First, they are uniquely
-# numbered and placed in the list _lritems. Second, a linked list of LR items
-# is built for each production. For example:
-#
-# E -> E PLUS E
-#
-# Creates the list
-#
-# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
-# -----------------------------------------------------------------------------
-
-def build_lritems():
- for p in Productions:
- lastlri = p
- lri = p.lr_item(0)
- i = 0
- while 1:
- lri = p.lr_item(i)
- lastlri.lr_next = lri
- if not lri: break
- lri.lr_num = len(LRitems)
- LRitems.append(lri)
- lastlri = lri
- i += 1
-
- # In order for the rest of the parser generator to work, we need to
- # guarantee that no more lritems are generated. Therefore, we nuke
- # the p.lr_item method. (Only used in debugging)
- # Production.lr_item = None
-
-# -----------------------------------------------------------------------------
-# add_precedence()
-#
-# Given a list of precedence rules, add to the precedence table.
-# -----------------------------------------------------------------------------
-
-def add_precedence(plist):
- plevel = 0
- error = 0
- for p in plist:
- plevel += 1
- try:
- prec = p[0]
- terms = p[1:]
- if prec != 'left' and prec != 'right' and prec != 'nonassoc':
- sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
- return -1
- for t in terms:
- if Precedence.has_key(t):
- sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
- error += 1
- continue
- Precedence[t] = (prec,plevel)
- except:
- sys.stderr.write("yacc: Invalid precedence table.\n")
- error += 1
-
- return error
-
-# -----------------------------------------------------------------------------
-# augment_grammar()
-#
-# Compute the augmented grammar. This is just a rule S' -> start where start
-# is the starting symbol.
-# -----------------------------------------------------------------------------
-
-def augment_grammar(start=None):
- if not start:
- start = Productions[1].name
- Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
- Productions[0].usyms = [ start ]
- Nonterminals[start].append(0)
-
-
-# -------------------------------------------------------------------------
-# first()
-#
-# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
-#
-# During execution of compute_first1, the result may be incomplete.
-# Afterward (e.g., when called from compute_follow()), it will be complete.
-# -------------------------------------------------------------------------
-def first(beta):
-
- # We are computing First(x1,x2,x3,...,xn)
- result = [ ]
- for x in beta:
- x_produces_empty = 0
-
- # Add all the non-<empty> symbols of First[x] to the result.
- for f in First[x]:
- if f == '<empty>':
- x_produces_empty = 1
- else:
- if f not in result: result.append(f)
-
- if x_produces_empty:
- # We have to consider the next x in beta,
- # i.e. stay in the loop.
- pass
- else:
- # We don't have to consider any further symbols in beta.
- break
- else:
- # There was no 'break' from the loop,
- # so x_produces_empty was true for all x in beta,
- # so beta produces empty as well.
- result.append('<empty>')
-
- return result
-
-
-# FOLLOW(x)
-# Given a non-terminal. This function computes the set of all symbols
-# that might follow it. Dragon book, p. 189.
-
-def compute_follow(start=None):
- # Add '$end' to the follow list of the start symbol
- for k in Nonterminals.keys():
- Follow[k] = [ ]
-
- if not start:
- start = Productions[1].name
-
- Follow[start] = [ '$end' ]
-
- while 1:
- didadd = 0
- for p in Productions[1:]:
- # Here is the production set
- for i in range(len(p.prod)):
- B = p.prod[i]
- if Nonterminals.has_key(B):
- # Okay. We got a non-terminal in a production
- fst = first(p.prod[i+1:])
- hasempty = 0
- for f in fst:
- if f != '<empty>' and f not in Follow[B]:
- Follow[B].append(f)
- didadd = 1
- if f == '<empty>':
- hasempty = 1
- if hasempty or i == (len(p.prod)-1):
- # Add elements of follow(a) to follow(b)
- for f in Follow[p.name]:
- if f not in Follow[B]:
- Follow[B].append(f)
- didadd = 1
- if not didadd: break
-
- if 0 and yaccdebug:
- _vf.write('\nFollow:\n')
- for k in Nonterminals.keys():
- _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
-
-# -------------------------------------------------------------------------
-# compute_first1()
-#
-# Compute the value of FIRST1(X) for all symbols
-# -------------------------------------------------------------------------
-def compute_first1():
-
- # Terminals:
- for t in Terminals.keys():
- First[t] = [t]
-
- First['$end'] = ['$end']
- First['#'] = ['#'] # what's this for?
-
- # Nonterminals:
-
- # Initialize to the empty set:
- for n in Nonterminals.keys():
- First[n] = []
-
- # Then propagate symbols until no change:
- while 1:
- some_change = 0
- for n in Nonterminals.keys():
- for p in Prodnames[n]:
- for f in first(p.prod):
- if f not in First[n]:
- First[n].append( f )
- some_change = 1
- if not some_change:
- break
-
- if 0 and yaccdebug:
- _vf.write('\nFirst:\n')
- for k in Nonterminals.keys():
- _vf.write("%-20s : %s\n" %
- (k, " ".join([str(s) for s in First[k]])))
-
-# -----------------------------------------------------------------------------
-# === SLR Generation ===
-#
-# The following functions are used to construct SLR (Simple LR) parsing tables
-# as described on p.221-229 of the dragon book.
-# -----------------------------------------------------------------------------
-
-# Global variables for the LR parsing engine
-def lr_init_vars():
- global _lr_action, _lr_goto, _lr_method
- global _lr_goto_cache, _lr0_cidhash
-
- _lr_action = { } # Action table
- _lr_goto = { } # Goto table
- _lr_method = "Unknown" # LR method used
- _lr_goto_cache = { }
- _lr0_cidhash = { }
-
-
-# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
-# prodlist is a list of productions.
-
-_add_count = 0 # Counter used to detect cycles
-
-def lr0_closure(I):
- global _add_count
-
- _add_count += 1
- prodlist = Productions
-
- # Add everything in I to J
- J = I[:]
- didadd = 1
- while didadd:
- didadd = 0
- for j in J:
- for x in j.lrafter:
- if x.lr0_added == _add_count: continue
- # Add B --> .G to J
- J.append(x.lr_next)
- x.lr0_added = _add_count
- didadd = 1
-
- return J
-
-# Compute the LR(0) goto function goto(I,X) where I is a set
-# of LR(0) items and X is a grammar symbol. This function is written
-# in a way that guarantees uniqueness of the generated goto sets
-# (i.e. the same goto set will never be returned as two different Python
-# objects). With uniqueness, we can later do fast set comparisons using
-# id(obj) instead of element-wise comparison.
-
-def lr0_goto(I,x):
- # First we look for a previously cached entry
- g = _lr_goto_cache.get((id(I),x),None)
- if g: return g
-
- # Now we generate the goto set in a way that guarantees uniqueness
- # of the result
-
- s = _lr_goto_cache.get(x,None)
- if not s:
- s = { }
- _lr_goto_cache[x] = s
-
- gs = [ ]
- for p in I:
- n = p.lr_next
- if n and n.lrbefore == x:
- s1 = s.get(id(n),None)
- if not s1:
- s1 = { }
- s[id(n)] = s1
- gs.append(n)
- s = s1
- g = s.get('$end',None)
- if not g:
- if gs:
- g = lr0_closure(gs)
- s['$end'] = g
- else:
- s['$end'] = gs
- _lr_goto_cache[(id(I),x)] = g
- return g
-
-_lr0_cidhash = { }
-
-# Compute the LR(0) sets of item function
-def lr0_items():
-
- C = [ lr0_closure([Productions[0].lr_next]) ]
- i = 0
- for I in C:
- _lr0_cidhash[id(I)] = i
- i += 1
-
- # Loop over the items in C and each grammar symbols
- i = 0
- while i < len(C):
- I = C[i]
- i += 1
-
- # Collect all of the symbols that could possibly be in the goto(I,X) sets
- asyms = { }
- for ii in I:
- for s in ii.usyms:
- asyms[s] = None
-
- for x in asyms.keys():
- g = lr0_goto(I,x)
- if not g: continue
- if _lr0_cidhash.has_key(id(g)): continue
- _lr0_cidhash[id(g)] = len(C)
- C.append(g)
-
- return C
-
-# -----------------------------------------------------------------------------
-# ==== LALR(1) Parsing ====
-#
-# LALR(1) parsing is almost exactly the same as SLR except that instead of
-# relying upon Follow() sets when performing reductions, a more selective
-# lookahead set that incorporates the state of the LR(0) machine is utilized.
-# Thus, we mainly just have to focus on calculating the lookahead sets.
-#
-# The method used here is due to DeRemer and Pennelo (1982).
-#
-# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
-# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
-# Vol. 4, No. 4, Oct. 1982, pp. 615-649
-#
-# Further details can also be found in:
-#
-# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
-# McGraw-Hill Book Company, (1985).
-#
-# Note: This implementation is a complete replacement of the LALR(1)
-# implementation in PLY-1.x releases. That version was based on
-# a less efficient algorithm and it had bugs in its implementation.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# compute_nullable_nonterminals()
-#
-# Creates a dictionary containing all of the non-terminals that might produce
-# an empty production.
-# -----------------------------------------------------------------------------
-
-def compute_nullable_nonterminals():
- nullable = {}
- num_nullable = 0
- while 1:
- for p in Productions[1:]:
- if p.len == 0:
- nullable[p.name] = 1
- continue
- for t in p.prod:
- if not nullable.has_key(t): break
- else:
- nullable[p.name] = 1
- if len(nullable) == num_nullable: break
- num_nullable = len(nullable)
- return nullable
-
-# -----------------------------------------------------------------------------
-# find_nonterminal_trans(C)
-#
-# Given a set of LR(0) items, this functions finds all of the non-terminal
-# transitions. These are transitions in which a dot appears immediately before
-# a non-terminal. Returns a list of tuples of the form (state,N) where state
-# is the state number and N is the nonterminal symbol.
-#
-# The input C is the set of LR(0) items.
-# -----------------------------------------------------------------------------
-
-def find_nonterminal_transitions(C):
- trans = []
- for state in range(len(C)):
- for p in C[state]:
- if p.lr_index < p.len - 1:
- t = (state,p.prod[p.lr_index+1])
- if Nonterminals.has_key(t[1]):
- if t not in trans: trans.append(t)
- state = state + 1
- return trans
-
-# -----------------------------------------------------------------------------
-# dr_relation()
-#
-# Computes the DR(p,A) relationships for non-terminal transitions. The input
-# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
-#
-# Returns a list of terminals.
-# -----------------------------------------------------------------------------
-
-def dr_relation(C,trans,nullable):
- dr_set = { }
- state,N = trans
- terms = []
-
- g = lr0_goto(C[state],N)
- for p in g:
- if p.lr_index < p.len - 1:
- a = p.prod[p.lr_index+1]
- if Terminals.has_key(a):
- if a not in terms: terms.append(a)
-
- # This extra bit is to handle the start state
- if state == 0 and N == Productions[0].prod[0]:
- terms.append('$end')
-
- return terms
-
-# -----------------------------------------------------------------------------
-# reads_relation()
-#
-# Computes the READS() relation (p,A) READS (t,C).
-# -----------------------------------------------------------------------------
-
-def reads_relation(C, trans, empty):
- # Look for empty transitions
- rel = []
- state, N = trans
-
- g = lr0_goto(C[state],N)
- j = _lr0_cidhash.get(id(g),-1)
- for p in g:
- if p.lr_index < p.len - 1:
- a = p.prod[p.lr_index + 1]
- if empty.has_key(a):
- rel.append((j,a))
-
- return rel
-
-# -----------------------------------------------------------------------------
-# compute_lookback_includes()
-#
-# Determines the lookback and includes relations
-#
-# LOOKBACK:
-#
-# This relation is determined by running the LR(0) state machine forward.
-# For example, starting with a production "N : . A B C", we run it forward
-# to obtain "N : A B C ." We then build a relationship between this final
-# state and the starting state. These relationships are stored in a dictionary
-# lookdict.
-#
-# INCLUDES:
-#
-# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
-#
-# This relation is used to determine non-terminal transitions that occur
-# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
-# if the following holds:
-#
-# B -> LAT, where T -> epsilon and p' -L-> p
-#
-# L is essentially a prefix (which may be empty), T is a suffix that must be
-# able to derive an empty string. State p' must lead to state p with the string L.
-#
-# -----------------------------------------------------------------------------
-
-def compute_lookback_includes(C,trans,nullable):
-
- lookdict = {} # Dictionary of lookback relations
- includedict = {} # Dictionary of include relations
-
- # Make a dictionary of non-terminal transitions
- dtrans = {}
- for t in trans:
- dtrans[t] = 1
-
- # Loop over all transitions and compute lookbacks and includes
- for state,N in trans:
- lookb = []
- includes = []
- for p in C[state]:
- if p.name != N: continue
-
- # Okay, we have a name match. We now follow the production all the way
- # through the state machine until we get the . on the right hand side
-
- lr_index = p.lr_index
- j = state
- while lr_index < p.len - 1:
- lr_index = lr_index + 1
- t = p.prod[lr_index]
-
- # Check to see if this symbol and state are a non-terminal transition
- if dtrans.has_key((j,t)):
- # Yes. Okay, there is some chance that this is an includes relation
- # the only way to know for certain is whether the rest of the
- # production derives empty
-
- li = lr_index + 1
- while li < p.len:
- if Terminals.has_key(p.prod[li]): break # No forget it
- if not nullable.has_key(p.prod[li]): break
- li = li + 1
- else:
- # Appears to be a relation between (j,t) and (state,N)
- includes.append((j,t))
-
- g = lr0_goto(C[j],t) # Go to next set
- j = _lr0_cidhash.get(id(g),-1) # Go to next state
-
- # When we get here, j is the final state, now we have to locate the production
- for r in C[j]:
- if r.name != p.name: continue
- if r.len != p.len: continue
- i = 0
- # This look is comparing a production ". A B C" with "A B C ."
- while i < r.lr_index:
- if r.prod[i] != p.prod[i+1]: break
- i = i + 1
- else:
- lookb.append((j,r))
- for i in includes:
- if not includedict.has_key(i): includedict[i] = []
- includedict[i].append((state,N))
- lookdict[(state,N)] = lookb
-
- return lookdict,includedict
-
-# -----------------------------------------------------------------------------
-# digraph()
-# traverse()
-#
-# The following two functions are used to compute set valued functions
-# of the form:
-#
-# F(x) = F'(x) U U{F(y) | x R y}
-#
-# This is used to compute the values of Read() sets as well as FOLLOW sets
-# in LALR(1) generation.
-#
-# Inputs: X - An input set
-# R - A relation
-# FP - Set-valued function
-# ------------------------------------------------------------------------------
-
-def digraph(X,R,FP):
- N = { }
- for x in X:
- N[x] = 0
- stack = []
- F = { }
- for x in X:
- if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
- return F
-
-def traverse(x,N,stack,F,X,R,FP):
- stack.append(x)
- d = len(stack)
- N[x] = d
- F[x] = FP(x) # F(X) <- F'(x)
-
- rel = R(x) # Get y's related to x
- for y in rel:
- if N[y] == 0:
- traverse(y,N,stack,F,X,R,FP)
- N[x] = min(N[x],N[y])
- for a in F.get(y,[]):
- if a not in F[x]: F[x].append(a)
- if N[x] == d:
- N[stack[-1]] = sys.maxint
- F[stack[-1]] = F[x]
- element = stack.pop()
- while element != x:
- N[stack[-1]] = sys.maxint
- F[stack[-1]] = F[x]
- element = stack.pop()
-
-# -----------------------------------------------------------------------------
-# compute_read_sets()
-#
-# Given a set of LR(0) items, this function computes the read sets.
-#
-# Inputs: C = Set of LR(0) items
-# ntrans = Set of nonterminal transitions
-# nullable = Set of empty transitions
-#
-# Returns a set containing the read sets
-# -----------------------------------------------------------------------------
-
-def compute_read_sets(C, ntrans, nullable):
- FP = lambda x: dr_relation(C,x,nullable)
- R = lambda x: reads_relation(C,x,nullable)
- F = digraph(ntrans,R,FP)
- return F
-
-# -----------------------------------------------------------------------------
-# compute_follow_sets()
-#
-# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
-# and an include set, this function computes the follow sets
-#
-# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
-#
-# Inputs:
-# ntrans = Set of nonterminal transitions
-# readsets = Readset (previously computed)
-# inclsets = Include sets (previously computed)
-#
-# Returns a set containing the follow sets
-# -----------------------------------------------------------------------------
-
-def compute_follow_sets(ntrans,readsets,inclsets):
- FP = lambda x: readsets[x]
- R = lambda x: inclsets.get(x,[])
- F = digraph(ntrans,R,FP)
- return F
-
-# -----------------------------------------------------------------------------
-# add_lookaheads()
-#
-# Attaches the lookahead symbols to grammar rules.
-#
-# Inputs: lookbacks - Set of lookback relations
-# followset - Computed follow set
-#
-# This function directly attaches the lookaheads to productions contained
-# in the lookbacks set
-# -----------------------------------------------------------------------------
-
-def add_lookaheads(lookbacks,followset):
- for trans,lb in lookbacks.items():
- # Loop over productions in lookback
- for state,p in lb:
- if not p.lookaheads.has_key(state):
- p.lookaheads[state] = []
- f = followset.get(trans,[])
- for a in f:
- if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
-
-# -----------------------------------------------------------------------------
-# add_lalr_lookaheads()
-#
-# This function does all of the work of adding lookahead information for use
-# with LALR parsing
-# -----------------------------------------------------------------------------
-
-def add_lalr_lookaheads(C):
- # Determine all of the nullable nonterminals
- nullable = compute_nullable_nonterminals()
-
- # Find all non-terminal transitions
- trans = find_nonterminal_transitions(C)
-
- # Compute read sets
- readsets = compute_read_sets(C,trans,nullable)
-
- # Compute lookback/includes relations
- lookd, included = compute_lookback_includes(C,trans,nullable)
-
- # Compute LALR FOLLOW sets
- followsets = compute_follow_sets(trans,readsets,included)
-
- # Add all of the lookaheads
- add_lookaheads(lookd,followsets)
-
-# -----------------------------------------------------------------------------
-# lr_parse_table()
-#
-# This function constructs the parse tables for SLR or LALR
-# -----------------------------------------------------------------------------
-def lr_parse_table(method):
- global _lr_method
- goto = _lr_goto # Goto array
- action = _lr_action # Action array
- actionp = { } # Action production array (temporary)
-
- _lr_method = method
-
- n_srconflict = 0
- n_rrconflict = 0
-
- if yaccdebug:
- sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
- _vf.write("\n\nParsing method: %s\n\n" % method)
-
- # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
- # This determines the number of states
-
- C = lr0_items()
-
- if method == 'LALR':
- add_lalr_lookaheads(C)
-
- # Build the parser table, state by state
- st = 0
- for I in C:
- # Loop over each production in I
- actlist = [ ] # List of actions
-
- if yaccdebug:
- _vf.write("\nstate %d\n\n" % st)
- for p in I:
- _vf.write(" (%d) %s\n" % (p.number, str(p)))
- _vf.write("\n")
-
- for p in I:
- try:
- if p.prod[-1] == ".":
- if p.name == "S'":
- # Start symbol. Accept!
- action[st,"$end"] = 0
- actionp[st,"$end"] = p
- else:
- # We are at the end of a production. Reduce!
- if method == 'LALR':
- laheads = p.lookaheads[st]
- else:
- laheads = Follow[p.name]
- for a in laheads:
- actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
- r = action.get((st,a),None)
- if r is not None:
- # Whoa. Have a shift/reduce or reduce/reduce conflict
- if r > 0:
- # Need to decide on shift or reduce here
- # By default we favor shifting. Need to add
- # some precedence rules here.
- sprec,slevel = Productions[actionp[st,a].number].prec
- rprec,rlevel = Precedence.get(a,('right',0))
- if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
- # We really need to reduce here.
- action[st,a] = -p.number
- actionp[st,a] = p
- if not slevel and not rlevel:
- _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
- _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
- n_srconflict += 1
- elif (slevel == rlevel) and (rprec == 'nonassoc'):
- action[st,a] = None
- else:
- # Hmmm. Guess we'll keep the shift
- if not rlevel:
- _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
- _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
- n_srconflict +=1
- elif r < 0:
- # Reduce/reduce conflict. In this case, we favor the rule
- # that was defined first in the grammar file
- oldp = Productions[-r]
- pp = Productions[p.number]
- if oldp.line > pp.line:
- action[st,a] = -p.number
- actionp[st,a] = p
- # sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
- n_rrconflict += 1
- _vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, actionp[st,a].number, actionp[st,a]))
- _vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,actionp[st,a].number, actionp[st,a]))
- else:
- sys.stderr.write("Unknown conflict in state %d\n" % st)
- else:
- action[st,a] = -p.number
- actionp[st,a] = p
- else:
- i = p.lr_index
- a = p.prod[i+1] # Get symbol right after the "."
- if Terminals.has_key(a):
- g = lr0_goto(I,a)
- j = _lr0_cidhash.get(id(g),-1)
- if j >= 0:
- # We are in a shift state
- actlist.append((a,p,"shift and go to state %d" % j))
- r = action.get((st,a),None)
- if r is not None:
- # Whoa have a shift/reduce or shift/shift conflict
- if r > 0:
- if r != j:
- sys.stderr.write("Shift/shift conflict in state %d\n" % st)
- elif r < 0:
- # Do a precedence check.
- # - if precedence of reduce rule is higher, we reduce.
- # - if precedence of reduce is same and left assoc, we reduce.
- # - otherwise we shift
- rprec,rlevel = Productions[actionp[st,a].number].prec
- sprec,slevel = Precedence.get(a,('right',0))
- if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')):
- # We decide to shift here... highest precedence to shift
- action[st,a] = j
- actionp[st,a] = p
- if not rlevel:
- n_srconflict += 1
- _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
- _vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
- elif (slevel == rlevel) and (rprec == 'nonassoc'):
- action[st,a] = None
- else:
- # Hmmm. Guess we'll keep the reduce
- if not slevel and not rlevel:
- n_srconflict +=1
- _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
- _vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
-
- else:
- sys.stderr.write("Unknown conflict in state %d\n" % st)
- else:
- action[st,a] = j
- actionp[st,a] = p
-
- except StandardError,e:
- raise YaccError, "Hosed in lr_parse_table", e
-
- # Print the actions associated with each terminal
- if yaccdebug:
- _actprint = { }
- for a,p,m in actlist:
- if action.has_key((st,a)):
- if p is actionp[st,a]:
- _vf.write(" %-15s %s\n" % (a,m))
- _actprint[(a,m)] = 1
- _vf.write("\n")
- for a,p,m in actlist:
- if action.has_key((st,a)):
- if p is not actionp[st,a]:
- if not _actprint.has_key((a,m)):
- _vf.write(" ! %-15s [ %s ]\n" % (a,m))
- _actprint[(a,m)] = 1
-
- # Construct the goto table for this state
- if yaccdebug:
- _vf.write("\n")
- nkeys = { }
- for ii in I:
- for s in ii.usyms:
- if Nonterminals.has_key(s):
- nkeys[s] = None
- for n in nkeys.keys():
- g = lr0_goto(I,n)
- j = _lr0_cidhash.get(id(g),-1)
- if j >= 0:
- goto[st,n] = j
- if yaccdebug:
- _vf.write(" %-30s shift and go to state %d\n" % (n,j))
-
- st += 1
-
- if yaccdebug:
- if n_srconflict == 1:
- sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
- if n_srconflict > 1:
- sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
- if n_rrconflict == 1:
- sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
- if n_rrconflict > 1:
- sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
-
-# -----------------------------------------------------------------------------
-# ==== LR Utility functions ====
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# _lr_write_tables()
-#
-# This function writes the LR parsing tables to a file
-# -----------------------------------------------------------------------------
-
-def lr_write_tables(modulename=tab_module,outputdir=''):
- filename = os.path.join(outputdir,modulename) + ".py"
- try:
- f = open(filename,"w")
-
- f.write("""
-# %s
-# This file is automatically generated. Do not edit.
-
-_lr_method = %s
-
-_lr_signature = %s
-""" % (filename, repr(_lr_method), repr(Signature.digest())))
-
- # Change smaller to 0 to go back to original tables
- smaller = 1
-
- # Factor out names to try and make smaller
- if smaller:
- items = { }
-
- for k,v in _lr_action.items():
- i = items.get(k[1])
- if not i:
- i = ([],[])
- items[k[1]] = i
- i[0].append(k[0])
- i[1].append(v)
-
- f.write("\n_lr_action_items = {")
- for k,v in items.items():
- f.write("%r:([" % k)
- for i in v[0]:
- f.write("%r," % i)
- f.write("],[")
- for i in v[1]:
- f.write("%r," % i)
-
- f.write("]),")
- f.write("}\n")
-
- f.write("""
-_lr_action = { }
-for _k, _v in _lr_action_items.items():
- for _x,_y in zip(_v[0],_v[1]):
- _lr_action[(_x,_k)] = _y
-del _lr_action_items
-""")
-
- else:
- f.write("\n_lr_action = { ");
- for k,v in _lr_action.items():
- f.write("(%r,%r):%r," % (k[0],k[1],v))
- f.write("}\n");
-
- if smaller:
- # Factor out names to try and make smaller
- items = { }
-
- for k,v in _lr_goto.items():
- i = items.get(k[1])
- if not i:
- i = ([],[])
- items[k[1]] = i
- i[0].append(k[0])
- i[1].append(v)
-
- f.write("\n_lr_goto_items = {")
- for k,v in items.items():
- f.write("%r:([" % k)
- for i in v[0]:
- f.write("%r," % i)
- f.write("],[")
- for i in v[1]:
- f.write("%r," % i)
-
- f.write("]),")
- f.write("}\n")
-
- f.write("""
-_lr_goto = { }
-for _k, _v in _lr_goto_items.items():
- for _x,_y in zip(_v[0],_v[1]):
- _lr_goto[(_x,_k)] = _y
-del _lr_goto_items
-""")
- else:
- f.write("\n_lr_goto = { ");
- for k,v in _lr_goto.items():
- f.write("(%r,%r):%r," % (k[0],k[1],v))
- f.write("}\n");
-
- # Write production table
- f.write("_lr_productions = [\n")
- for p in Productions:
- if p:
- if (p.func):
- f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
- else:
- f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len))
- else:
- f.write(" None,\n")
- f.write("]\n")
-
- f.close()
-
- except IOError,e:
- print "Unable to create '%s'" % filename
- print e
- return
-
-def lr_read_tables(module=tab_module,optimize=0):
- global _lr_action, _lr_goto, _lr_productions, _lr_method
- try:
- exec "import %s as parsetab" % module
-
- if (optimize) or (Signature.digest() == parsetab._lr_signature):
- _lr_action = parsetab._lr_action
- _lr_goto = parsetab._lr_goto
- _lr_productions = parsetab._lr_productions
- _lr_method = parsetab._lr_method
- return 1
- else:
- return 0
-
- except (ImportError,AttributeError):
- return 0
-
-
-# Available instance types. This is used when parsers are defined by a class.
-# it's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
-
-try:
- _INSTANCETYPE = (types.InstanceType, types.ObjectType)
-except AttributeError:
- _INSTANCETYPE = types.InstanceType
-
-# -----------------------------------------------------------------------------
-# yacc(module)
-#
-# Build the parser module
-# -----------------------------------------------------------------------------
-
-def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
- global yaccdebug
- yaccdebug = debug
-
- initialize_vars()
- files = { }
- error = 0
-
-
- # Add parsing method to signature
- Signature.update(method)
-
- # If a "module" parameter was supplied, extract its dictionary.
- # Note: a module may in fact be an instance as well.
-
- if module:
- # User supplied a module object.
- if isinstance(module, types.ModuleType):
- ldict = module.__dict__
- elif isinstance(module, _INSTANCETYPE):
- _items = [(k,getattr(module,k)) for k in dir(module)]
- ldict = { }
- for i in _items:
- ldict[i[0]] = i[1]
- else:
- raise ValueError,"Expected a module"
-
- else:
- # No module given. We might be able to get information from the caller.
- # Throw an exception and unwind the traceback to get the globals
-
- try:
- raise RuntimeError
- except RuntimeError:
- e,b,t = sys.exc_info()
- f = t.tb_frame
- f = f.f_back # Walk out to our calling function
- ldict = f.f_globals # Grab its globals dictionary
-
- # Add starting symbol to signature
- if not start:
- start = ldict.get("start",None)
- if start:
- Signature.update(start)
-
- # If running in optimized mode. We're going to
-
- if (optimize and lr_read_tables(tabmodule,1)):
- # Read parse table
- del Productions[:]
- for p in _lr_productions:
- if not p:
- Productions.append(None)
- else:
- m = MiniProduction()
- m.name = p[0]
- m.len = p[1]
- m.file = p[3]
- m.line = p[4]
- if p[2]:
- m.func = ldict[p[2]]
- Productions.append(m)
-
- else:
- # Get the tokens map
- if (module and isinstance(module,_INSTANCETYPE)):
- tokens = getattr(module,"tokens",None)
- else:
- tokens = ldict.get("tokens",None)
-
- if not tokens:
- raise YaccError,"module does not define a list 'tokens'"
- if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
- raise YaccError,"tokens must be a list or tuple."
-
- # Check to see if a requires dictionary is defined.
- requires = ldict.get("require",None)
- if requires:
- if not (isinstance(requires,types.DictType)):
- raise YaccError,"require must be a dictionary."
-
- for r,v in requires.items():
- try:
- if not (isinstance(v,types.ListType)):
- raise TypeError
- v1 = [x.split(".") for x in v]
- Requires[r] = v1
- except StandardError:
- print "Invalid specification for rule '%s' in require. Expected a list of strings" % r
-
-
- # Build the dictionary of terminals. We a record a 0 in the
- # dictionary to track whether or not a terminal is actually
- # used in the grammar
-
- if 'error' in tokens:
- print "yacc: Illegal token 'error'. Is a reserved word."
- raise YaccError,"Illegal token name"
-
- for n in tokens:
- if Terminals.has_key(n):
- print "yacc: Warning. Token '%s' multiply defined." % n
- Terminals[n] = [ ]
-
- Terminals['error'] = [ ]
-
- # Get the precedence map (if any)
- prec = ldict.get("precedence",None)
- if prec:
- if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
- raise YaccError,"precedence must be a list or tuple."
- add_precedence(prec)
- Signature.update(repr(prec))
-
- for n in tokens:
- if not Precedence.has_key(n):
- Precedence[n] = ('right',0) # Default, right associative, 0 precedence
-
- # Look for error handler
- ef = ldict.get('p_error',None)
- if ef:
- if isinstance(ef,types.FunctionType):
- ismethod = 0
- elif isinstance(ef, types.MethodType):
- ismethod = 1
- else:
- raise YaccError,"'p_error' defined, but is not a function or method."
- eline = ef.func_code.co_firstlineno
- efile = ef.func_code.co_filename
- files[efile] = None
-
- if (ef.func_code.co_argcount != 1+ismethod):
- raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
- global Errorfunc
- Errorfunc = ef
- else:
- print "yacc: Warning. no p_error() function is defined."
-
- # Get the list of built-in functions with p_ prefix
- symbols = [ldict[f] for f in ldict.keys()
- if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
- and ldict[f].__name__ != 'p_error')]
-
- # Check for non-empty symbols
- if len(symbols) == 0:
- raise YaccError,"no rules of the form p_rulename are defined."
-
- # Sort the symbols by line number
- symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
-
- # Add all of the symbols to the grammar
- for f in symbols:
- if (add_function(f)) < 0:
- error += 1
- else:
- files[f.func_code.co_filename] = None
-
- # Make a signature of the docstrings
- for f in symbols:
- if f.__doc__:
- Signature.update(f.__doc__)
-
- lr_init_vars()
-
- if error:
- raise YaccError,"Unable to construct parser."
-
- if not lr_read_tables(tabmodule):
-
- # Validate files
- for filename in files.keys():
- if not validate_file(filename):
- error = 1
-
- # Validate dictionary
- validate_dict(ldict)
-
- if start and not Prodnames.has_key(start):
- raise YaccError,"Bad starting symbol '%s'" % start
-
- augment_grammar(start)
- error = verify_productions(cycle_check=check_recursion)
- otherfunc = [ldict[f] for f in ldict.keys()
- if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
-
- if error:
- raise YaccError,"Unable to construct parser."
-
- build_lritems()
- compute_first1()
- compute_follow(start)
-
- if method in ['SLR','LALR']:
- lr_parse_table(method)
- else:
- raise YaccError, "Unknown parsing method '%s'" % method
-
- if write_tables:
- lr_write_tables(tabmodule,outputdir)
-
- if yaccdebug:
- try:
- f = open(os.path.join(outputdir,debugfile),"w")
- f.write(_vfc.getvalue())
- f.write("\n\n")
- f.write(_vf.getvalue())
- f.close()
- except IOError,e:
- print "yacc: can't create '%s'" % debugfile,e
-
- # Made it here. Create a parser object and set up its internal state.
- # Set global parse() method to bound method of parser object.
-
- p = Parser("xyzzy")
- p.productions = Productions
- p.errorfunc = Errorfunc
- p.action = _lr_action
- p.goto = _lr_goto
- p.method = _lr_method
- p.require = Requires
-
- global parse
- parse = p.parse
-
- global parser
- parser = p
-
- # Clean up all of the globals we created
- if (not optimize):
- yacc_cleanup()
- return p
-
-# yacc_cleanup function. Delete all of the global variables
-# used during table construction
-
-def yacc_cleanup():
- global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
- del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
-
- global Productions, Prodnames, Prodmap, Terminals
- global Nonterminals, First, Follow, Precedence, LRitems
- global Errorfunc, Signature, Requires
-
- del Productions, Prodnames, Prodmap, Terminals
- del Nonterminals, First, Follow, Precedence, LRitems
- del Errorfunc, Signature, Requires
-
- global _vf, _vfc
- del _vf, _vfc
-
-
-# Stub that raises an error if parsing is attempted without first calling yacc()
-def parse(*args,**kwargs):
- raise YaccError, "yacc: No parser built with yacc()"
-