summaryrefslogtreecommitdiff
path: root/python/helpers/pydev/third_party
diff options
context:
space:
mode:
Diffstat (limited to 'python/helpers/pydev/third_party')
-rw-r--r--python/helpers/pydev/third_party/pkgutil_old.py591
-rw-r--r--python/helpers/pydev/third_party/pluginbase.py454
-rw-r--r--python/helpers/pydev/third_party/uuid_old.py541
3 files changed, 1586 insertions, 0 deletions
diff --git a/python/helpers/pydev/third_party/pkgutil_old.py b/python/helpers/pydev/third_party/pkgutil_old.py
new file mode 100644
index 000000000000..ce072ec9ef75
--- /dev/null
+++ b/python/helpers/pydev/third_party/pkgutil_old.py
@@ -0,0 +1,591 @@
+"""Utilities to support packages."""
+
+# NOTE: This module must remain compatible with Python 2.3, as it is shared
+# by setuptools for distribution with Python 2.3 and up.
+
+import os
+import sys
+import imp
+import os.path
+from types import ModuleType
+
+__all__ = [
+ 'get_importer', 'iter_importers', 'get_loader', 'find_loader',
+ 'walk_packages', 'iter_modules', 'get_data',
+ 'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
+]
+
+def read_code(stream):
+ # This helper is needed in order for the PEP 302 emulation to
+ # correctly handle compiled files
+ import marshal
+
+ magic = stream.read(4)
+ if magic != imp.get_magic():
+ return None
+
+ stream.read(4) # Skip timestamp
+ return marshal.load(stream)
+
+
+def simplegeneric(func):
+ """Make a trivial single-dispatch generic function"""
+ registry = {}
+ def wrapper(*args, **kw):
+ ob = args[0]
+ try:
+ cls = ob.__class__
+ except AttributeError:
+ cls = type(ob)
+ try:
+ mro = cls.__mro__
+ except AttributeError:
+ try:
+ class cls(cls, object):
+ pass
+ mro = cls.__mro__[1:]
+ except TypeError:
+ mro = object, # must be an ExtensionClass or some such :(
+ for t in mro:
+ if t in registry:
+ return registry[t](*args, **kw)
+ else:
+ return func(*args, **kw)
+ try:
+ wrapper.__name__ = func.__name__
+ except (TypeError, AttributeError):
+ pass # Python 2.3 doesn't allow functions to be renamed
+
+ def register(typ, func=None):
+ if func is None:
+ return lambda f: register(typ, f)
+ registry[typ] = func
+ return func
+
+ wrapper.__dict__ = func.__dict__
+ wrapper.__doc__ = func.__doc__
+ wrapper.register = register
+ return wrapper
+
+
+def walk_packages(path=None, prefix='', onerror=None):
+ """Yields (module_loader, name, ispkg) for all modules recursively
+ on path, or, if path is None, all accessible modules.
+
+ 'path' should be either None or a list of paths to look for
+ modules in.
+
+ 'prefix' is a string to output on the front of every module name
+ on output.
+
+ Note that this function must import all *packages* (NOT all
+ modules!) on the given path, in order to access the __path__
+ attribute to find submodules.
+
+ 'onerror' is a function which gets called with one argument (the
+ name of the package which was being imported) if any exception
+ occurs while trying to import a package. If no onerror function is
+ supplied, ImportErrors are caught and ignored, while all other
+ exceptions are propagated, terminating the search.
+
+ Examples:
+
+ # list all modules python can access
+ walk_packages()
+
+ # list all submodules of ctypes
+ walk_packages(ctypes.__path__, ctypes.__name__+'.')
+ """
+
+ def seen(p, m={}):
+ if p in m:
+ return True
+ m[p] = True
+
+ for importer, name, ispkg in iter_modules(path, prefix):
+ yield importer, name, ispkg
+
+ if ispkg:
+ try:
+ __import__(name)
+ except ImportError:
+ if onerror is not None:
+ onerror(name)
+ except Exception:
+ if onerror is not None:
+ onerror(name)
+ else:
+ raise
+ else:
+ path = getattr(sys.modules[name], '__path__', None) or []
+
+ # don't traverse path items we've seen before
+ path = [p for p in path if not seen(p)]
+
+ for item in walk_packages(path, name+'.', onerror):
+ yield item
+
+
+def iter_modules(path=None, prefix=''):
+ """Yields (module_loader, name, ispkg) for all submodules on path,
+ or, if path is None, all top-level modules on sys.path.
+
+ 'path' should be either None or a list of paths to look for
+ modules in.
+
+ 'prefix' is a string to output on the front of every module name
+ on output.
+ """
+
+ if path is None:
+ importers = iter_importers()
+ else:
+ importers = map(get_importer, path)
+
+ yielded = {}
+ for i in importers:
+ for name, ispkg in iter_importer_modules(i, prefix):
+ if name not in yielded:
+ yielded[name] = 1
+ yield i, name, ispkg
+
+
+#@simplegeneric
+def iter_importer_modules(importer, prefix=''):
+ if not hasattr(importer, 'iter_modules'):
+ return []
+ return importer.iter_modules(prefix)
+
+iter_importer_modules = simplegeneric(iter_importer_modules)
+
+
+class ImpImporter:
+ """PEP 302 Importer that wraps Python's "classic" import algorithm
+
+ ImpImporter(dirname) produces a PEP 302 importer that searches that
+ directory. ImpImporter(None) produces a PEP 302 importer that searches
+ the current sys.path, plus any modules that are frozen or built-in.
+
+ Note that ImpImporter does not currently support being used by placement
+ on sys.meta_path.
+ """
+
+ def __init__(self, path=None):
+ self.path = path
+
+ def find_module(self, fullname, path=None):
+ # Note: we ignore 'path' argument since it is only used via meta_path
+ subname = fullname.split(".")[-1]
+ if subname != fullname and self.path is None:
+ return None
+ if self.path is None:
+ path = None
+ else:
+ path = [os.path.realpath(self.path)]
+ try:
+ file, filename, etc = imp.find_module(subname, path)
+ except ImportError:
+ return None
+ return ImpLoader(fullname, file, filename, etc)
+
+ def iter_modules(self, prefix=''):
+ if self.path is None or not os.path.isdir(self.path):
+ return
+
+ yielded = {}
+ import inspect
+ try:
+ filenames = os.listdir(self.path)
+ except OSError:
+ # ignore unreadable directories like import does
+ filenames = []
+ filenames.sort() # handle packages before same-named modules
+
+ for fn in filenames:
+ modname = inspect.getmodulename(fn)
+ if modname=='__init__' or modname in yielded:
+ continue
+
+ path = os.path.join(self.path, fn)
+ ispkg = False
+
+ if not modname and os.path.isdir(path) and '.' not in fn:
+ modname = fn
+ try:
+ dircontents = os.listdir(path)
+ except OSError:
+ # ignore unreadable directories like import does
+ dircontents = []
+ for fn in dircontents:
+ subname = inspect.getmodulename(fn)
+ if subname=='__init__':
+ ispkg = True
+ break
+ else:
+ continue # not a package
+
+ if modname and '.' not in modname:
+ yielded[modname] = 1
+ yield prefix + modname, ispkg
+
+
+class ImpLoader:
+ """PEP 302 Loader that wraps Python's "classic" import algorithm
+ """
+ code = source = None
+
+ def __init__(self, fullname, file, filename, etc):
+ self.file = file
+ self.filename = filename
+ self.fullname = fullname
+ self.etc = etc
+
+ def load_module(self, fullname):
+ self._reopen()
+ try:
+ mod = imp.load_module(fullname, self.file, self.filename, self.etc)
+ finally:
+ if self.file:
+ self.file.close()
+ # Note: we don't set __loader__ because we want the module to look
+ # normal; i.e. this is just a wrapper for standard import machinery
+ return mod
+
+ def get_data(self, pathname):
+ return open(pathname, "rb").read()
+
+ def _reopen(self):
+ if self.file and self.file.closed:
+ mod_type = self.etc[2]
+ if mod_type==imp.PY_SOURCE:
+ self.file = open(self.filename, 'rU')
+ elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
+ self.file = open(self.filename, 'rb')
+
+ def _fix_name(self, fullname):
+ if fullname is None:
+ fullname = self.fullname
+ elif fullname != self.fullname:
+ raise ImportError("Loader for module %s cannot handle "
+ "module %s" % (self.fullname, fullname))
+ return fullname
+
+ def is_package(self, fullname):
+ fullname = self._fix_name(fullname)
+ return self.etc[2]==imp.PKG_DIRECTORY
+
+ def get_code(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ if self.code is None:
+ mod_type = self.etc[2]
+ if mod_type==imp.PY_SOURCE:
+ source = self.get_source(fullname)
+ self.code = compile(source, self.filename, 'exec')
+ elif mod_type==imp.PY_COMPILED:
+ self._reopen()
+ try:
+ self.code = read_code(self.file)
+ finally:
+ self.file.close()
+ elif mod_type==imp.PKG_DIRECTORY:
+ self.code = self._get_delegate().get_code()
+ return self.code
+
+ def get_source(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ if self.source is None:
+ mod_type = self.etc[2]
+ if mod_type==imp.PY_SOURCE:
+ self._reopen()
+ try:
+ self.source = self.file.read()
+ finally:
+ self.file.close()
+ elif mod_type==imp.PY_COMPILED:
+ if os.path.exists(self.filename[:-1]):
+ f = open(self.filename[:-1], 'rU')
+ self.source = f.read()
+ f.close()
+ elif mod_type==imp.PKG_DIRECTORY:
+ self.source = self._get_delegate().get_source()
+ return self.source
+
+
+ def _get_delegate(self):
+ return ImpImporter(self.filename).find_module('__init__')
+
+ def get_filename(self, fullname=None):
+ fullname = self._fix_name(fullname)
+ mod_type = self.etc[2]
+ if self.etc[2]==imp.PKG_DIRECTORY:
+ return self._get_delegate().get_filename()
+ elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
+ return self.filename
+ return None
+
+
+try:
+ import zipimport
+ from zipimport import zipimporter
+
+ def iter_zipimport_modules(importer, prefix=''):
+ dirlist = zipimport._zip_directory_cache[importer.archive].keys()
+ dirlist.sort()
+ _prefix = importer.prefix
+ plen = len(_prefix)
+ yielded = {}
+ import inspect
+ for fn in dirlist:
+ if not fn.startswith(_prefix):
+ continue
+
+ fn = fn[plen:].split(os.sep)
+
+ if len(fn)==2 and fn[1].startswith('__init__.py'):
+ if fn[0] not in yielded:
+ yielded[fn[0]] = 1
+ yield fn[0], True
+
+ if len(fn)!=1:
+ continue
+
+ modname = inspect.getmodulename(fn[0])
+ if modname=='__init__':
+ continue
+
+ if modname and '.' not in modname and modname not in yielded:
+ yielded[modname] = 1
+ yield prefix + modname, False
+
+ iter_importer_modules.register(zipimporter, iter_zipimport_modules)
+
+except ImportError:
+ pass
+
+
+def get_importer(path_item):
+ """Retrieve a PEP 302 importer for the given path item
+
+ The returned importer is cached in sys.path_importer_cache
+ if it was newly created by a path hook.
+
+ If there is no importer, a wrapper around the basic import
+ machinery is returned. This wrapper is never inserted into
+ the importer cache (None is inserted instead).
+
+ The cache (or part of it) can be cleared manually if a
+ rescan of sys.path_hooks is necessary.
+ """
+ try:
+ importer = sys.path_importer_cache[path_item]
+ except KeyError:
+ for path_hook in sys.path_hooks:
+ try:
+ importer = path_hook(path_item)
+ break
+ except ImportError:
+ pass
+ else:
+ importer = None
+ sys.path_importer_cache.setdefault(path_item, importer)
+
+ if importer is None:
+ try:
+ importer = ImpImporter(path_item)
+ except ImportError:
+ importer = None
+ return importer
+
+
+def iter_importers(fullname=""):
+ """Yield PEP 302 importers for the given module name
+
+ If fullname contains a '.', the importers will be for the package
+ containing fullname, otherwise they will be importers for sys.meta_path,
+ sys.path, and Python's "classic" import machinery, in that order. If
+ the named module is in a package, that package is imported as a side
+ effect of invoking this function.
+
+ Non PEP 302 mechanisms (e.g. the Windows registry) used by the
+ standard import machinery to find files in alternative locations
+ are partially supported, but are searched AFTER sys.path. Normally,
+ these locations are searched BEFORE sys.path, preventing sys.path
+ entries from shadowing them.
+
+ For this to cause a visible difference in behaviour, there must
+ be a module or package name that is accessible via both sys.path
+ and one of the non PEP 302 file system mechanisms. In this case,
+ the emulation will find the former version, while the builtin
+ import mechanism will find the latter.
+
+ Items of the following types can be affected by this discrepancy:
+ imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
+ """
+ if fullname.startswith('.'):
+ raise ImportError("Relative module names not supported")
+ if '.' in fullname:
+ # Get the containing package's __path__
+ pkg = '.'.join(fullname.split('.')[:-1])
+ if pkg not in sys.modules:
+ __import__(pkg)
+ path = getattr(sys.modules[pkg], '__path__', None) or []
+ else:
+ for importer in sys.meta_path:
+ yield importer
+ path = sys.path
+ for item in path:
+ yield get_importer(item)
+ if '.' not in fullname:
+ yield ImpImporter()
+
+def get_loader(module_or_name):
+ """Get a PEP 302 "loader" object for module_or_name
+
+ If the module or package is accessible via the normal import
+ mechanism, a wrapper around the relevant part of that machinery
+ is returned. Returns None if the module cannot be found or imported.
+ If the named module is not already imported, its containing package
+ (if any) is imported, in order to establish the package __path__.
+
+ This function uses iter_importers(), and is thus subject to the same
+ limitations regarding platform-specific special import locations such
+ as the Windows registry.
+ """
+ if module_or_name in sys.modules:
+ module_or_name = sys.modules[module_or_name]
+ if isinstance(module_or_name, ModuleType):
+ module = module_or_name
+ loader = getattr(module, '__loader__', None)
+ if loader is not None:
+ return loader
+ fullname = module.__name__
+ else:
+ fullname = module_or_name
+ return find_loader(fullname)
+
+def find_loader(fullname):
+ """Find a PEP 302 "loader" object for fullname
+
+ If fullname contains dots, path must be the containing package's __path__.
+ Returns None if the module cannot be found or imported. This function uses
+ iter_importers(), and is thus subject to the same limitations regarding
+ platform-specific special import locations such as the Windows registry.
+ """
+ for importer in iter_importers(fullname):
+ loader = importer.find_module(fullname)
+ if loader is not None:
+ return loader
+
+ return None
+
+
+def extend_path(path, name):
+ """Extend a package's path.
+
+ Intended use is to place the following code in a package's __init__.py:
+
+ from pkgutil import extend_path
+ __path__ = extend_path(__path__, __name__)
+
+ This will add to the package's __path__ all subdirectories of
+ directories on sys.path named after the package. This is useful
+ if one wants to distribute different parts of a single logical
+ package as multiple directories.
+
+ It also looks for *.pkg files beginning where * matches the name
+ argument. This feature is similar to *.pth files (see site.py),
+ except that it doesn't special-case lines starting with 'import'.
+ A *.pkg file is trusted at face value: apart from checking for
+ duplicates, all entries found in a *.pkg file are added to the
+ path, regardless of whether they are exist the filesystem. (This
+ is a feature.)
+
+ If the input path is not a list (as is the case for frozen
+ packages) it is returned unchanged. The input path is not
+ modified; an extended copy is returned. Items are only appended
+ to the copy at the end.
+
+ It is assumed that sys.path is a sequence. Items of sys.path that
+ are not (unicode or 8-bit) strings referring to existing
+ directories are ignored. Unicode items of sys.path that cause
+ errors when used as filenames may cause this function to raise an
+ exception (in line with os.path.isdir() behavior).
+ """
+
+ if not isinstance(path, list):
+ # This could happen e.g. when this is called from inside a
+ # frozen package. Return the path unchanged in that case.
+ return path
+
+ pname = os.path.join(*name.split('.')) # Reconstitute as relative path
+ # Just in case os.extsep != '.'
+ sname = os.extsep.join(name.split('.'))
+ sname_pkg = sname + os.extsep + "pkg"
+ init_py = "__init__" + os.extsep + "py"
+
+ path = path[:] # Start with a copy of the existing path
+
+ for dir in sys.path:
+ if not isinstance(dir, basestring) or not os.path.isdir(dir):
+ continue
+ subdir = os.path.join(dir, pname)
+ # XXX This may still add duplicate entries to path on
+ # case-insensitive filesystems
+ initfile = os.path.join(subdir, init_py)
+ if subdir not in path and os.path.isfile(initfile):
+ path.append(subdir)
+ # XXX Is this the right thing for subpackages like zope.app?
+ # It looks for a file named "zope.app.pkg"
+ pkgfile = os.path.join(dir, sname_pkg)
+ if os.path.isfile(pkgfile):
+ try:
+ f = open(pkgfile)
+ except IOError, msg:
+ sys.stderr.write("Can't open %s: %s\n" %
+ (pkgfile, msg))
+ else:
+ for line in f:
+ line = line.rstrip('\n')
+ if not line or line.startswith('#'):
+ continue
+ path.append(line) # Don't check for existence!
+ f.close()
+
+ return path
+
+def get_data(package, resource):
+ """Get a resource from a package.
+
+ This is a wrapper round the PEP 302 loader get_data API. The package
+ argument should be the name of a package, in standard module format
+ (foo.bar). The resource argument should be in the form of a relative
+ filename, using '/' as the path separator. The parent directory name '..'
+ is not allowed, and nor is a rooted name (starting with a '/').
+
+ The function returns a binary string, which is the contents of the
+ specified resource.
+
+ For packages located in the filesystem, which have already been imported,
+ this is the rough equivalent of
+
+ d = os.path.dirname(sys.modules[package].__file__)
+ data = open(os.path.join(d, resource), 'rb').read()
+
+ If the package cannot be located or loaded, or it uses a PEP 302 loader
+ which does not support get_data(), then None is returned.
+ """
+
+ loader = get_loader(package)
+ if loader is None or not hasattr(loader, 'get_data'):
+ return None
+ mod = sys.modules.get(package) or loader.load_module(package)
+ if mod is None or not hasattr(mod, '__file__'):
+ return None
+
+ # Modify the resource name to be compatible with the loader.get_data
+ # signature - an os.path format "filename" starting with the dirname of
+ # the package's __file__
+ parts = resource.split('/')
+ parts.insert(0, os.path.dirname(mod.__file__))
+ resource_name = os.path.join(*parts)
+ return loader.get_data(resource_name)
diff --git a/python/helpers/pydev/third_party/pluginbase.py b/python/helpers/pydev/third_party/pluginbase.py
new file mode 100644
index 000000000000..0ad6404eee00
--- /dev/null
+++ b/python/helpers/pydev/third_party/pluginbase.py
@@ -0,0 +1,454 @@
+# -*- coding: utf-8 -*-
+"""
+ pluginbase
+ ~~~~~~~~~~
+
+ Pluginbase is a module for Python that provides a system for building
+ plugin based applications.
+
+ :copyright: (c) Copyright 2014 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+
+from pydevd_constants import IS_PY24, IS_PY3K
+
+
+if IS_PY24:
+ from third_party.uuid_old import uuid4
+else:
+ from uuid import uuid4
+
+if IS_PY3K:
+ import pkgutil
+else:
+ import pkgutil_old as pkgutil
+
+import errno
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+import threading
+
+from types import ModuleType
+from weakref import ref as weakref
+
+
+PY2 = sys.version_info[0] == 2
+if PY2:
+ text_type = unicode
+ string_types = (unicode, str)
+ from cStringIO import StringIO as NativeBytesIO
+else:
+ text_type = str
+ string_types = (str,)
+ from io import BytesIO as NativeBytesIO
+
+
+_local = threading.local()
+
+_internalspace = ModuleType(__name__ + '._internalspace')
+_internalspace.__path__ = []
+sys.modules[_internalspace.__name__] = _internalspace
+
+
+def get_plugin_source(module=None, stacklevel=None):
+ """Returns the :class:`PluginSource` for the current module or the given
+ module. The module can be provided by name (in which case an import
+ will be attempted) or as a module object.
+
+ If no plugin source can be discovered, the return value from this method
+ is `None`.
+
+ This function can be very useful if additional data has been attached
+ to the plugin source. For instance this could allow plugins to get
+ access to a back reference to the application that created them.
+
+ :param module: optionally the module to locate the plugin source of.
+ :param stacklevel: defines how many levels up the module should search
+ for before it discovers the plugin frame. The
+ default is 0. This can be useful for writing wrappers
+ around this function.
+ """
+ if module is None:
+ frm = sys._getframe((stacklevel or 0) + 1)
+ name = frm.f_globals['__name__']
+ glob = frm.f_globals
+ elif isinstance(module, string_types):
+ frm = sys._getframe(1)
+ name = module
+ glob = __import__(module, frm.f_globals,
+ frm.f_locals, ['__dict__']).__dict__
+ else:
+ name = module.__name__
+ glob = module.__dict__
+ return _discover_space(name, glob)
+
+
+def _discover_space(name, globals):
+ try:
+ return _local.space_stack[-1]
+ except (AttributeError, IndexError):
+ pass
+
+ if '__pluginbase_state__' in globals:
+ return globals['__pluginbase_state__'].source
+
+ mod_name = globals.get('__name__')
+ if mod_name is not None and \
+ mod_name.startswith(_internalspace.__name__ + '.'):
+ end = mod_name.find('.', len(_internalspace.__name__) + 1)
+ space = sys.modules.get(mod_name[:end])
+ if space is not None:
+ return space.__pluginbase_state__.source
+
+
+def _shutdown_module(mod):
+ members = list(mod.__dict__.items())
+ for key, value in members:
+ if key[:1] != '_':
+ setattr(mod, key, None)
+ for key, value in members:
+ setattr(mod, key, None)
+
+
+def _to_bytes(s):
+ if isinstance(s, text_type):
+ return s.encode('utf-8')
+ return s
+
+
+class _IntentionallyEmptyModule(ModuleType):
+
+ def __getattr__(self, name):
+ try:
+ return ModuleType.__getattr__(self, name)
+ except AttributeError:
+ if name[:2] == '__':
+ raise
+ raise RuntimeError(
+ 'Attempted to import from a plugin base module (%s) without '
+ 'having a plugin source activated. To solve this error '
+ 'you have to move the import into a "with" block of the '
+ 'associated plugin source.' % self.__name__)
+
+
+class _PluginSourceModule(ModuleType):
+
+ def __init__(self, source):
+ modname = '%s.%s' % (_internalspace.__name__, source.spaceid)
+ ModuleType.__init__(self, modname)
+ self.__pluginbase_state__ = PluginBaseState(source)
+
+ @property
+ def __path__(self):
+ try:
+ ps = self.__pluginbase_state__.source
+ except AttributeError:
+ return []
+ return ps.searchpath + ps.base.searchpath
+
+
+def _setup_base_package(module_name):
+ try:
+ mod = __import__(module_name, None, None, ['__name__'])
+ except ImportError:
+ mod = None
+ if '.' in module_name:
+ parent_mod = __import__(module_name.rsplit('.', 1)[0],
+ None, None, ['__name__'])
+ else:
+ parent_mod = None
+
+ if mod is None:
+ mod = _IntentionallyEmptyModule(module_name)
+ if parent_mod is not None:
+ setattr(parent_mod, module_name.rsplit('.', 1)[-1], mod)
+ sys.modules[module_name] = mod
+
+
+class PluginBase(object):
+ """The plugin base acts as a control object around a dummy Python
+ package that acts as a container for plugins. Usually each
+ application creates exactly one base object for all plugins.
+
+ :param package: the name of the package that acts as the plugin base.
+ Usually this module does not exist. Unless you know
+ what you are doing you should not create this module
+ on the file system.
+ :param searchpath: optionally a shared search path for modules that
+ will be used by all plugin sources registered.
+ """
+
+ def __init__(self, package, searchpath=None):
+ #: the name of the dummy package.
+ self.package = package
+ if searchpath is None:
+ searchpath = []
+ #: the default search path shared by all plugins as list.
+ self.searchpath = searchpath
+ _setup_base_package(package)
+
+ def make_plugin_source(self, *args, **kwargs):
+ """Creats a plugin source for this plugin base and returns it.
+ All parameters are forwarded to :class:`PluginSource`.
+ """
+ return PluginSource(self, *args, **kwargs)
+
+
+class PluginSource(object):
+ """The plugin source is what ultimately decides where plugins are
+ loaded from. Plugin bases can have multiple plugin sources which act
+ as isolation layer. While this is not a security system it generally
+ is not possible for plugins from different sources to accidentally
+ cross talk.
+
+ Once a plugin source has been created it can be used in a ``with``
+ statement to change the behavior of the ``import`` statement in the
+ block to define which source to load the plugins from::
+
+ plugin_source = plugin_base.make_plugin_source(
+ searchpath=['./path/to/plugins', './path/to/more/plugins'])
+
+ with plugin_source:
+ from myapplication.plugins import my_plugin
+
+ :param base: the base this plugin source belongs to.
+ :param identifier: optionally a stable identifier. If it's not defined
+ a random identifier is picked. It's useful to set this
+ to a stable value to have consistent tracebacks
+ between restarts and to support pickle.
+ :param searchpath: a list of paths where plugins are looked for.
+ :param persist: optionally this can be set to `True` and the plugins
+ will not be cleaned up when the plugin source gets
+ garbage collected.
+ """
+ # Set these here to false by default so that a completely failing
+ # constructor does not fuck up the destructor.
+ persist = False
+ mod = None
+
+ def __init__(self, base, identifier=None, searchpath=None,
+ persist=False):
+ #: indicates if this plugin source persists or not.
+ self.persist = persist
+ if identifier is None:
+ identifier = str(uuid4())
+ #: the identifier for this source.
+ self.identifier = identifier
+ #: A reference to the plugin base that created this source.
+ self.base = base
+ #: a list of paths where plugins are searched in.
+ self.searchpath = searchpath
+ #: The internal module name of the plugin source as it appears
+ #: in the :mod:`pluginsource._internalspace`.
+ div = None
+ self.spaceid = '_sp' + md5(
+ _to_bytes(self.base.package) + _to_bytes('|') +
+ _to_bytes(self.identifier)
+ ).hexdigest()
+ #: a reference to the module on the internal
+ #: :mod:`pluginsource._internalspace`.
+ self.mod = _PluginSourceModule(self)
+
+ if hasattr(_internalspace, self.spaceid):
+ raise RuntimeError('This plugin source already exists.')
+ sys.modules[self.mod.__name__] = self.mod
+ setattr(_internalspace, self.spaceid, self.mod)
+
+ def __del__(self):
+ if not self.persist:
+ self.cleanup()
+
+ def list_plugins(self):
+ """Returns a sorted list of all plugins that are available in this
+ plugin source. This can be useful to automatically discover plugins
+ that are available and is usually used together with
+ :meth:`load_plugin`.
+ """
+ rv = []
+ for _, modname, ispkg in pkgutil.iter_modules(self.mod.__path__):
+ rv.append(modname)
+ return sorted(rv)
+
+ def load_plugin(self, name):
+ """This automatically loads a plugin by the given name from the
+ current source and returns the module. This is a convenient
+ alternative to the import statement and saves you from invoking
+ ``__import__`` or a similar function yourself.
+
+ :param name: the name of the plugin to load.
+ """
+ if '.' in name:
+ raise ImportError('Plugin names cannot contain dots.')
+
+ #with self:
+ # return __import__(self.base.package + '.' + name,
+ # globals(), {}, ['__name__'])
+
+ self.__assert_not_cleaned_up()
+ _local.__dict__.setdefault('space_stack', []).append(self)
+ try:
+ res = __import__(self.base.package + '.' + name,
+ globals(), {}, ['__name__'])
+ return res
+ finally:
+ try:
+ _local.space_stack.pop()
+ except (AttributeError, IndexError):
+ pass
+
+ def open_resource(self, plugin, filename):
+ """This function locates a resource inside the plugin and returns
+ a byte stream to the contents of it. If the resource cannot be
+ loaded an :exc:`IOError` will be raised. Only plugins that are
+ real Python packages can contain resources. Plain old Python
+ modules do not allow this for obvious reasons.
+
+ .. versionadded:: 0.3
+
+ :param plugin: the name of the plugin to open the resource of.
+ :param filename: the name of the file within the plugin to open.
+ """
+ mod = self.load_plugin(plugin)
+ fn = getattr(mod, '__file__', None)
+ if fn is not None:
+ if fn.endswith(('.pyc', '.pyo')):
+ fn = fn[:-1]
+ if os.path.isfile(fn):
+ return open(os.path.join(os.path.dirname(fn), filename), 'rb')
+ buf = pkgutil.get_data(self.mod.__name__ + '.' + plugin, filename)
+ if buf is None:
+ raise IOError(errno.ENOEXITS, 'Could not find resource')
+ return NativeBytesIO(buf)
+
+ def cleanup(self):
+ """Cleans up all loaded plugins manually. This is necessary to
+ call only if :attr:`persist` is enabled. Otherwise this happens
+ automatically when the source gets garbage collected.
+ """
+ self.__cleanup()
+
+ def __cleanup(self, _sys=sys, _shutdown_module=_shutdown_module):
+ # The default parameters are necessary because this can be fired
+ # from the destructor and so late when the interpreter shuts down
+ # that these functions and modules might be gone.
+ if self.mod is None:
+ return
+ modname = self.mod.__name__
+ self.mod.__pluginbase_state__ = None
+ self.mod = None
+ try:
+ delattr(_internalspace, self.spaceid)
+ except AttributeError:
+ pass
+ prefix = modname + '.'
+ _sys.modules.pop(modname)
+ for key, value in list(_sys.modules.items()):
+ if not key.startswith(prefix):
+ continue
+ mod = _sys.modules.pop(key, None)
+ if mod is None:
+ continue
+ _shutdown_module(mod)
+
+ def __assert_not_cleaned_up(self):
+ if self.mod is None:
+ raise RuntimeError('The plugin source was already cleaned up.')
+
+ def __enter__(self):
+ self.__assert_not_cleaned_up()
+ _local.__dict__.setdefault('space_stack', []).append(self)
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ try:
+ _local.space_stack.pop()
+ except (AttributeError, IndexError):
+ pass
+
+ def _rewrite_module_path(self, modname):
+ self.__assert_not_cleaned_up()
+ if modname == self.base.package:
+ return self.mod.__name__
+ elif modname.startswith(self.base.package + '.'):
+ pieces = modname.split('.')
+ return self.mod.__name__ + '.' + '.'.join(
+ pieces[self.base.package.count('.') + 1:])
+
+
+class PluginBaseState(object):
+ __slots__ = ('_source',)
+
+ def __init__(self, source):
+ if source.persist:
+ self._source = lambda: source
+ else:
+ self._source = weakref(source)
+
+ @property
+ def source(self):
+ rv = self._source()
+ if rv is None:
+ raise AttributeError('Plugin source went away')
+ return rv
+
+
+class _ImportHook(ModuleType):
+
+ def __init__(self, name, system_import):
+ ModuleType.__init__(self, name)
+ self._system_import = system_import
+ self.enabled = True
+
+ def enable(self):
+ """Enables the import hook which drives the plugin base system.
+ This is the default.
+ """
+ self.enabled = True
+
+ def disable(self):
+ """Disables the import hook and restores the default import system
+ behavior. This effectively breaks pluginbase but can be useful
+ for testing purposes.
+ """
+ self.enabled = False
+
+ def plugin_import(self, name, globals=None, locals=None,
+ fromlist=None, level=-2):
+ import_name = name
+ if self.enabled:
+ ref_globals = globals
+ if ref_globals is None:
+ ref_globals = sys._getframe(1).f_globals
+ space = _discover_space(name, ref_globals)
+ if space is not None:
+ actual_name = space._rewrite_module_path(name)
+ if actual_name is not None:
+ import_name = actual_name
+ if level == -2:
+ # fake impossible value; default value depends on version
+ if IS_PY24:
+ # the level parameter was added in version 2.5
+ return self._system_import(import_name, globals, locals, fromlist)
+ elif IS_PY3K:
+ # default value for level parameter in python 3
+ level = 0
+ else:
+ # default value for level parameter in other versions
+ level = -1
+ return self._system_import(import_name, globals, locals,
+ fromlist, level)
+
+
+try:
+ import __builtin__ as builtins
+except ImportError:
+ import builtins
+
+import_hook = _ImportHook(__name__ + '.import_hook', builtins.__import__)
+builtins.__import__ = import_hook.plugin_import
+sys.modules[import_hook.__name__] = import_hook
+del builtins
diff --git a/python/helpers/pydev/third_party/uuid_old.py b/python/helpers/pydev/third_party/uuid_old.py
new file mode 100644
index 000000000000..ae3da25ca557
--- /dev/null
+++ b/python/helpers/pydev/third_party/uuid_old.py
@@ -0,0 +1,541 @@
+r"""UUID objects (universally unique identifiers) according to RFC 4122.
+
+This module provides immutable UUID objects (class UUID) and the functions
+uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
+UUIDs as specified in RFC 4122.
+
+If all you want is a unique ID, you should probably call uuid1() or uuid4().
+Note that uuid1() may compromise privacy since it creates a UUID containing
+the computer's network address. uuid4() creates a random UUID.
+
+Typical usage:
+
+ >>> import uuid
+
+ # make a UUID based on the host ID and current time
+ >>> uuid.uuid1()
+ UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
+
+ # make a UUID using an MD5 hash of a namespace UUID and a name
+ >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
+ UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
+
+ # make a random UUID
+ >>> uuid.uuid4()
+ UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
+
+ # make a UUID using a SHA-1 hash of a namespace UUID and a name
+ >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
+ UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
+
+ # make a UUID from a string of hex digits (braces and hyphens ignored)
+ >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
+
+ # convert a UUID to a string of hex digits in standard form
+ >>> str(x)
+ '00010203-0405-0607-0809-0a0b0c0d0e0f'
+
+ # get the raw 16 bytes of the UUID
+ >>> x.bytes
+ '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
+
+ # make a UUID from a 16-byte string
+ >>> uuid.UUID(bytes=x.bytes)
+ UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
+"""
+
+__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
+
+RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
+ 'reserved for NCS compatibility', 'specified in RFC 4122',
+ 'reserved for Microsoft compatibility', 'reserved for future definition']
+
+class UUID(object):
+ """Instances of the UUID class represent UUIDs as specified in RFC 4122.
+ UUID objects are immutable, hashable, and usable as dictionary keys.
+ Converting a UUID to a string with str() yields something in the form
+ '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
+ five possible forms: a similar string of hexadecimal digits, or a tuple
+ of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
+ 48-bit values respectively) as an argument named 'fields', or a string
+ of 16 bytes (with all the integer fields in big-endian order) as an
+ argument named 'bytes', or a string of 16 bytes (with the first three
+ fields in little-endian order) as an argument named 'bytes_le', or a
+ single 128-bit integer as an argument named 'int'.
+
+ UUIDs have these read-only attributes:
+
+ bytes the UUID as a 16-byte string (containing the six
+ integer fields in big-endian byte order)
+
+ bytes_le the UUID as a 16-byte string (with time_low, time_mid,
+ and time_hi_version in little-endian byte order)
+
+ fields a tuple of the six integer fields of the UUID,
+ which are also available as six individual attributes
+ and two derived attributes:
+
+ time_low the first 32 bits of the UUID
+ time_mid the next 16 bits of the UUID
+ time_hi_version the next 16 bits of the UUID
+ clock_seq_hi_variant the next 8 bits of the UUID
+ clock_seq_low the next 8 bits of the UUID
+ node the last 48 bits of the UUID
+
+ time the 60-bit timestamp
+ clock_seq the 14-bit sequence number
+
+ hex the UUID as a 32-character hexadecimal string
+
+ int the UUID as a 128-bit integer
+
+ urn the UUID as a URN as specified in RFC 4122
+
+ variant the UUID variant (one of the constants RESERVED_NCS,
+ RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
+
+ version the UUID version number (1 through 5, meaningful only
+ when the variant is RFC_4122)
+ """
+
+ def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
+ int=None, version=None):
+ r"""Create a UUID from either a string of 32 hexadecimal digits,
+ a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
+ in little-endian order as the 'bytes_le' argument, a tuple of six
+ integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
+ 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
+ the 'fields' argument, or a single 128-bit integer as the 'int'
+ argument. When a string of hex digits is given, curly braces,
+ hyphens, and a URN prefix are all optional. For example, these
+ expressions all yield the same UUID:
+
+ UUID('{12345678-1234-5678-1234-567812345678}')
+ UUID('12345678123456781234567812345678')
+ UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
+ UUID(bytes='\x12\x34\x56\x78'*4)
+ UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
+ '\x12\x34\x56\x78\x12\x34\x56\x78')
+ UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
+ UUID(int=0x12345678123456781234567812345678)
+
+ Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
+ be given. The 'version' argument is optional; if given, the resulting
+ UUID will have its variant and version set according to RFC 4122,
+ overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
+ """
+
+ if [hex, bytes, bytes_le, fields, int].count(None) != 4:
+ raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
+ if hex is not None:
+ hex = hex.replace('urn:', '').replace('uuid:', '')
+ hex = hex.strip('{}').replace('-', '')
+ if len(hex) != 32:
+ raise ValueError('badly formed hexadecimal UUID string')
+ int = long(hex, 16)
+ if bytes_le is not None:
+ if len(bytes_le) != 16:
+ raise ValueError('bytes_le is not a 16-char string')
+ bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
+ bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
+ bytes_le[8:])
+ if bytes is not None:
+ if len(bytes) != 16:
+ raise ValueError('bytes is not a 16-char string')
+ int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
+ if fields is not None:
+ if len(fields) != 6:
+ raise ValueError('fields is not a 6-tuple')
+ (time_low, time_mid, time_hi_version,
+ clock_seq_hi_variant, clock_seq_low, node) = fields
+ if not 0 <= time_low < 1<<32L:
+ raise ValueError('field 1 out of range (need a 32-bit value)')
+ if not 0 <= time_mid < 1<<16L:
+ raise ValueError('field 2 out of range (need a 16-bit value)')
+ if not 0 <= time_hi_version < 1<<16L:
+ raise ValueError('field 3 out of range (need a 16-bit value)')
+ if not 0 <= clock_seq_hi_variant < 1<<8L:
+ raise ValueError('field 4 out of range (need an 8-bit value)')
+ if not 0 <= clock_seq_low < 1<<8L:
+ raise ValueError('field 5 out of range (need an 8-bit value)')
+ if not 0 <= node < 1<<48L:
+ raise ValueError('field 6 out of range (need a 48-bit value)')
+ clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
+ int = ((time_low << 96L) | (time_mid << 80L) |
+ (time_hi_version << 64L) | (clock_seq << 48L) | node)
+ if int is not None:
+ if not 0 <= int < 1<<128L:
+ raise ValueError('int is out of range (need a 128-bit value)')
+ if version is not None:
+ if not 1 <= version <= 5:
+ raise ValueError('illegal version number')
+ # Set the variant to RFC 4122.
+ int &= ~(0xc000 << 48L)
+ int |= 0x8000 << 48L
+ # Set the version number.
+ int &= ~(0xf000 << 64L)
+ int |= version << 76L
+ self.__dict__['int'] = int
+
+ def __cmp__(self, other):
+ if isinstance(other, UUID):
+ return cmp(self.int, other.int)
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(self.int)
+
+ def __int__(self):
+ return self.int
+
+ def __repr__(self):
+ return 'UUID(%r)' % str(self)
+
+ def __setattr__(self, name, value):
+ raise TypeError('UUID objects are immutable')
+
+ def __str__(self):
+ hex = '%032x' % self.int
+ return '%s-%s-%s-%s-%s' % (
+ hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
+
+ def get_bytes(self):
+ bytes = ''
+ for shift in range(0, 128, 8):
+ bytes = chr((self.int >> shift) & 0xff) + bytes
+ return bytes
+
+ bytes = property(get_bytes)
+
+ def get_bytes_le(self):
+ bytes = self.bytes
+ return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
+ bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
+
+ bytes_le = property(get_bytes_le)
+
+ def get_fields(self):
+ return (self.time_low, self.time_mid, self.time_hi_version,
+ self.clock_seq_hi_variant, self.clock_seq_low, self.node)
+
+ fields = property(get_fields)
+
+ def get_time_low(self):
+ return self.int >> 96L
+
+ time_low = property(get_time_low)
+
+ def get_time_mid(self):
+ return (self.int >> 80L) & 0xffff
+
+ time_mid = property(get_time_mid)
+
+ def get_time_hi_version(self):
+ return (self.int >> 64L) & 0xffff
+
+ time_hi_version = property(get_time_hi_version)
+
+ def get_clock_seq_hi_variant(self):
+ return (self.int >> 56L) & 0xff
+
+ clock_seq_hi_variant = property(get_clock_seq_hi_variant)
+
+ def get_clock_seq_low(self):
+ return (self.int >> 48L) & 0xff
+
+ clock_seq_low = property(get_clock_seq_low)
+
+ def get_time(self):
+ return (((self.time_hi_version & 0x0fffL) << 48L) |
+ (self.time_mid << 32L) | self.time_low)
+
+ time = property(get_time)
+
+ def get_clock_seq(self):
+ return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
+ self.clock_seq_low)
+
+ clock_seq = property(get_clock_seq)
+
+ def get_node(self):
+ return self.int & 0xffffffffffff
+
+ node = property(get_node)
+
+ def get_hex(self):
+ return '%032x' % self.int
+
+ hex = property(get_hex)
+
+ def get_urn(self):
+ return 'urn:uuid:' + str(self)
+
+ urn = property(get_urn)
+
+ def get_variant(self):
+ if not self.int & (0x8000 << 48L):
+ return RESERVED_NCS
+ elif not self.int & (0x4000 << 48L):
+ return RFC_4122
+ elif not self.int & (0x2000 << 48L):
+ return RESERVED_MICROSOFT
+ else:
+ return RESERVED_FUTURE
+
+ variant = property(get_variant)
+
+ def get_version(self):
+ # The version bits are only meaningful for RFC 4122 UUIDs.
+ if self.variant == RFC_4122:
+ return int((self.int >> 76L) & 0xf)
+
+ version = property(get_version)
+
+def _find_mac(command, args, hw_identifiers, get_index):
+ import os
+ for dir in ['', '/sbin/', '/usr/sbin']:
+ executable = os.path.join(dir, command)
+ if not os.path.exists(executable):
+ continue
+
+ try:
+ # LC_ALL to get English output, 2>/dev/null to
+ # prevent output on stderr
+ cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
+ pipe = os.popen(cmd)
+ except IOError:
+ continue
+
+ for line in pipe:
+ words = line.lower().split()
+ for i in range(len(words)):
+ if words[i] in hw_identifiers:
+ return int(words[get_index(i)].replace(':', ''), 16)
+ return None
+
+def _ifconfig_getnode():
+ """Get the hardware address on Unix by running ifconfig."""
+
+ # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
+ for args in ('', '-a', '-av'):
+ mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
+ if mac:
+ return mac
+
+ import socket
+ ip_addr = socket.gethostbyname(socket.gethostname())
+
+ # Try getting the MAC addr from arp based on our IP address (Solaris).
+ mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
+ if mac:
+ return mac
+
+ # This might work on HP-UX.
+ mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
+ if mac:
+ return mac
+
+ return None
+
+def _ipconfig_getnode():
+ """Get the hardware address on Windows by running ipconfig.exe."""
+ import os, re
+ dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
+ try:
+ import ctypes
+ buffer = ctypes.create_string_buffer(300)
+ ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
+ dirs.insert(0, buffer.value.decode('mbcs'))
+ except:
+ pass
+ for dir in dirs:
+ try:
+ pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
+ except IOError:
+ continue
+ for line in pipe:
+ value = line.split(':')[-1].strip().lower()
+ if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
+ return int(value.replace('-', ''), 16)
+
+def _netbios_getnode():
+ """Get the hardware address on Windows using NetBIOS calls.
+ See http://support.microsoft.com/kb/118623 for details."""
+ import win32wnet, netbios
+ ncb = netbios.NCB()
+ ncb.Command = netbios.NCBENUM
+ ncb.Buffer = adapters = netbios.LANA_ENUM()
+ adapters._pack()
+ if win32wnet.Netbios(ncb) != 0:
+ return
+ adapters._unpack()
+ for i in range(adapters.length):
+ ncb.Reset()
+ ncb.Command = netbios.NCBRESET
+ ncb.Lana_num = ord(adapters.lana[i])
+ if win32wnet.Netbios(ncb) != 0:
+ continue
+ ncb.Reset()
+ ncb.Command = netbios.NCBASTAT
+ ncb.Lana_num = ord(adapters.lana[i])
+ ncb.Callname = '*'.ljust(16)
+ ncb.Buffer = status = netbios.ADAPTER_STATUS()
+ if win32wnet.Netbios(ncb) != 0:
+ continue
+ status._unpack()
+ bytes = map(ord, status.adapter_address)
+ return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
+ (bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
+
+# Thanks to Thomas Heller for ctypes and for his help with its use here.
+
+# If ctypes is available, use it to find system routines for UUID generation.
+_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
+try:
+ import ctypes, ctypes.util
+ _buffer = ctypes.create_string_buffer(16)
+
+ # The uuid_generate_* routines are provided by libuuid on at least
+ # Linux and FreeBSD, and provided by libc on Mac OS X.
+ for libname in ['uuid', 'c']:
+ try:
+ lib = ctypes.CDLL(ctypes.util.find_library(libname))
+ except:
+ continue
+ if hasattr(lib, 'uuid_generate_random'):
+ _uuid_generate_random = lib.uuid_generate_random
+ if hasattr(lib, 'uuid_generate_time'):
+ _uuid_generate_time = lib.uuid_generate_time
+
+ # On Windows prior to 2000, UuidCreate gives a UUID containing the
+ # hardware address. On Windows 2000 and later, UuidCreate makes a
+ # random UUID and UuidCreateSequential gives a UUID containing the
+ # hardware address. These routines are provided by the RPC runtime.
+ # NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
+ # 6 bytes returned by UuidCreateSequential are fixed, they don't appear
+ # to bear any relationship to the MAC address of any network device
+ # on the box.
+ try:
+ lib = ctypes.windll.rpcrt4
+ except:
+ lib = None
+ _UuidCreate = getattr(lib, 'UuidCreateSequential',
+ getattr(lib, 'UuidCreate', None))
+except:
+ pass
+
+def _unixdll_getnode():
+ """Get the hardware address on Unix using ctypes."""
+ _uuid_generate_time(_buffer)
+ return UUID(bytes=_buffer.raw).node
+
+def _windll_getnode():
+ """Get the hardware address on Windows using ctypes."""
+ if _UuidCreate(_buffer) == 0:
+ return UUID(bytes=_buffer.raw).node
+
+def _random_getnode():
+ """Get a random node ID, with eighth bit set as suggested by RFC 4122."""
+ import random
+ return random.randrange(0, 1<<48L) | 0x010000000000L
+
+_node = None
+
+def getnode():
+ """Get the hardware address as a 48-bit positive integer.
+
+ The first time this runs, it may launch a separate program, which could
+ be quite slow. If all attempts to obtain the hardware address fail, we
+ choose a random 48-bit number with its eighth bit set to 1 as recommended
+ in RFC 4122.
+ """
+
+ global _node
+ if _node is not None:
+ return _node
+
+ import sys
+ if sys.platform == 'win32':
+ getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
+ else:
+ getters = [_unixdll_getnode, _ifconfig_getnode]
+
+ for getter in getters + [_random_getnode]:
+ try:
+ _node = getter()
+ except:
+ continue
+ if _node is not None:
+ return _node
+
+_last_timestamp = None
+
+def uuid1(node=None, clock_seq=None):
+ """Generate a UUID from a host ID, sequence number, and the current time.
+ If 'node' is not given, getnode() is used to obtain the hardware
+ address. If 'clock_seq' is given, it is used as the sequence number;
+ otherwise a random 14-bit sequence number is chosen."""
+
+ # When the system provides a version-1 UUID generator, use it (but don't
+ # use UuidCreate here because its UUIDs don't conform to RFC 4122).
+ if _uuid_generate_time and node is clock_seq is None:
+ _uuid_generate_time(_buffer)
+ return UUID(bytes=_buffer.raw)
+
+ global _last_timestamp
+ import time
+ nanoseconds = int(time.time() * 1e9)
+ # 0x01b21dd213814000 is the number of 100-ns intervals between the
+ # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
+ timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
+ if timestamp <= _last_timestamp:
+ timestamp = _last_timestamp + 1
+ _last_timestamp = timestamp
+ if clock_seq is None:
+ import random
+ clock_seq = random.randrange(1<<14L) # instead of stable storage
+ time_low = timestamp & 0xffffffffL
+ time_mid = (timestamp >> 32L) & 0xffffL
+ time_hi_version = (timestamp >> 48L) & 0x0fffL
+ clock_seq_low = clock_seq & 0xffL
+ clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
+ if node is None:
+ node = getnode()
+ return UUID(fields=(time_low, time_mid, time_hi_version,
+ clock_seq_hi_variant, clock_seq_low, node), version=1)
+
+def uuid3(namespace, name):
+ """Generate a UUID from the MD5 hash of a namespace UUID and a name."""
+ import md5
+ hash = md5.md5(namespace.bytes + name).digest()
+ return UUID(bytes=hash[:16], version=3)
+
+def uuid4():
+ """Generate a random UUID."""
+
+ # When the system provides a version-4 UUID generator, use it.
+ if _uuid_generate_random:
+ _uuid_generate_random(_buffer)
+ return UUID(bytes=_buffer.raw)
+
+ # Otherwise, get randomness from urandom or the 'random' module.
+ try:
+ import os
+ return UUID(bytes=os.urandom(16), version=4)
+ except:
+ import random
+ bytes = [chr(random.randrange(256)) for i in range(16)]
+ return UUID(bytes=bytes, version=4)
+
+def uuid5(namespace, name):
+ """Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
+ import sha
+ hash = sha.sha(namespace.bytes + name).digest()
+ return UUID(bytes=hash[:16], version=5)
+
+# The following standard UUIDs are for use with uuid3() or uuid5().
+
+NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
+NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
+NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
+NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')