summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEmre Kultursay <emrekultursay@google.com>2021-08-09 18:07:11 -0700
committerEmre Kultursay <emrekultursay@google.com>2021-08-10 21:51:26 -0700
commitae5977f952ff699c34cd73a482a0c23891638789 (patch)
treed2a606fe3809c2369ed4fa3597b6c74e527b0f68
parente00f16d0e5e58d4cb83cf959094534bfc9863ff5 (diff)
downloadwindows-x86-ae5977f952ff699c34cd73a482a0c23891638789.tar.gz
Upgrade to Python 3.9 (Windows)
Taken from: ab/7630817 Bug: 172971586 Test: n/a Change-Id: Ia5b2abf912dba5dc1c0ecd457914693ceeafd39b
-rw-r--r--x64/DLLs/_asyncio.pydbin60928 -> 61952 bytes
-rw-r--r--x64/DLLs/_bz2.pydbin81920 -> 81920 bytes
-rw-r--r--x64/DLLs/_ctypes.pydbin129024 -> 130048 bytes
-rw-r--r--x64/DLLs/_decimal.pydbin305152 -> 307200 bytes
-rw-r--r--x64/DLLs/_elementtree.pydbin208896 -> 206848 bytes
-rw-r--r--x64/DLLs/_hashlib.pydbin36352 -> 55296 bytes
-rw-r--r--x64/DLLs/_lzma.pydbin236544 -> 179200 bytes
-rw-r--r--x64/DLLs/_msi.pydbin31744 -> 31744 bytes
-rw-r--r--x64/DLLs/_multiprocessing.pydbin22528 -> 22016 bytes
-rw-r--r--x64/DLLs/_overlapped.pydbin39424 -> 39424 bytes
-rw-r--r--x64/DLLs/_queue.pydbin21504 -> 22016 bytes
-rw-r--r--x64/DLLs/_socket.pydbin73216 -> 75264 bytes
-rw-r--r--x64/DLLs/_sqlite3.pydbin74752 -> 75776 bytes
-rw-r--r--x64/DLLs/_ssl.pydbin123392 -> 159232 bytes
-rw-r--r--x64/DLLs/_uuid.pydbin0 -> 15360 bytes
-rw-r--r--x64/DLLs/_zoneinfo.pydbin0 -> 38912 bytes
-rw-r--r--x64/DLLs/libcrypto-1_1.dllbin3381792 -> 3399200 bytes
-rw-r--r--x64/DLLs/libssl-1_1.dllbin686112 -> 689184 bytes
-rw-r--r--x64/DLLs/pyexpat.pydbin199680 -> 200704 bytes
-rw-r--r--x64/DLLs/select.pydbin19456 -> 20480 bytes
-rw-r--r--x64/DLLs/sqlite3.dllbin1296384 -> 1375232 bytes
-rw-r--r--x64/DLLs/unicodedata.pydbin1091072 -> 1115136 bytes
-rw-r--r--x64/DLLs/winsound.pydbin21504 -> 21504 bytes
-rw-r--r--x64/LICENSE.txt275
-rw-r--r--x64/Lib/__future__.py27
-rw-r--r--x64/Lib/_aix_support.py89
-rw-r--r--x64/Lib/_bootsubprocess.py97
-rw-r--r--x64/Lib/_collections_abc.py27
-rw-r--r--x64/Lib/_dummy_thread.py193
-rw-r--r--x64/Lib/_osx_support.py117
-rw-r--r--x64/Lib/_pydecimal.py8
-rw-r--r--x64/Lib/_pyio.py17
-rw-r--r--x64/Lib/_strptime.py2
-rw-r--r--x64/Lib/_weakrefset.py3
-rw-r--r--x64/Lib/aifc.py6
-rw-r--r--x64/Lib/antigravity.py2
-rw-r--r--x64/Lib/argparse.py92
-rw-r--r--x64/Lib/ast.py1143
-rw-r--r--x64/Lib/asynchat.py2
-rw-r--r--x64/Lib/asyncio/__init__.py2
-rw-r--r--x64/Lib/asyncio/base_events.py33
-rw-r--r--x64/Lib/asyncio/base_futures.py25
-rw-r--r--x64/Lib/asyncio/base_tasks.py13
-rw-r--r--x64/Lib/asyncio/events.py35
-rw-r--r--x64/Lib/asyncio/exceptions.py3
-rw-r--r--x64/Lib/asyncio/futures.py30
-rw-r--r--x64/Lib/asyncio/locks.py83
-rw-r--r--x64/Lib/asyncio/proactor_events.py21
-rw-r--r--x64/Lib/asyncio/queues.py3
-rw-r--r--x64/Lib/asyncio/runners.py6
-rw-r--r--x64/Lib/asyncio/selector_events.py72
-rw-r--r--x64/Lib/asyncio/sslproto.py1
-rw-r--r--x64/Lib/asyncio/tasks.py118
-rw-r--r--x64/Lib/asyncio/threads.py25
-rw-r--r--x64/Lib/asyncio/transports.py4
-rw-r--r--x64/Lib/asyncio/unix_events.py91
-rw-r--r--x64/Lib/asyncio/windows_events.py18
-rw-r--r--x64/Lib/asyncore.py4
-rw-r--r--x64/Lib/base64.py16
-rw-r--r--x64/Lib/bdb.py18
-rw-r--r--x64/Lib/binhex.py47
-rw-r--r--x64/Lib/bisect.py2
-rw-r--r--x64/Lib/bz2.py13
-rw-r--r--x64/Lib/cProfile.py23
-rw-r--r--x64/Lib/cgi.py8
-rw-r--r--x64/Lib/codecs.py15
-rw-r--r--x64/Lib/codeop.py29
-rw-r--r--x64/Lib/collections/__init__.py440
-rw-r--r--x64/Lib/compileall.py209
-rw-r--r--x64/Lib/concurrent/futures/_base.py24
-rw-r--r--x64/Lib/concurrent/futures/process.py535
-rw-r--r--x64/Lib/concurrent/futures/thread.py65
-rw-r--r--x64/Lib/contextlib.py42
-rw-r--r--x64/Lib/copyreg.py4
-rw-r--r--x64/Lib/crypt.py10
-rw-r--r--x64/Lib/ctypes/__init__.py3
-rw-r--r--x64/Lib/ctypes/macholib/dyld.py12
-rw-r--r--x64/Lib/ctypes/util.py32
-rw-r--r--x64/Lib/curses/__init__.py14
-rw-r--r--x64/Lib/dataclasses.py28
-rw-r--r--x64/Lib/datetime.py49
-rw-r--r--x64/Lib/difflib.py14
-rw-r--r--x64/Lib/dis.py2
-rw-r--r--x64/Lib/doctest.py11
-rw-r--r--x64/Lib/dummy_threading.py78
-rw-r--r--x64/Lib/email/_header_value_parser.py9
-rw-r--r--x64/Lib/email/contentmanager.py14
-rw-r--r--x64/Lib/email/generator.py6
-rw-r--r--x64/Lib/email/headerregistry.py35
-rw-r--r--x64/Lib/email/message.py4
-rw-r--r--x64/Lib/email/utils.py16
-rw-r--r--x64/Lib/encodings/aliases.py4
-rw-r--r--x64/Lib/encodings/mac_centeuro.py307
-rw-r--r--x64/Lib/encodings/punycode.py2
-rw-r--r--x64/Lib/enum.py91
-rw-r--r--x64/Lib/filecmp.py8
-rw-r--r--x64/Lib/fileinput.py3
-rw-r--r--x64/Lib/fnmatch.py69
-rw-r--r--x64/Lib/fractions.py73
-rw-r--r--x64/Lib/ftplib.py44
-rw-r--r--x64/Lib/functools.py45
-rw-r--r--x64/Lib/getpass.py2
-rw-r--r--x64/Lib/gettext.py8
-rw-r--r--x64/Lib/graphlib.py246
-rw-r--r--x64/Lib/gzip.py8
-rw-r--r--x64/Lib/hashlib.py23
-rw-r--r--x64/Lib/hmac.py55
-rw-r--r--x64/Lib/html/parser.py8
-rw-r--r--x64/Lib/http/__init__.py7
-rw-r--r--x64/Lib/http/client.py25
-rw-r--r--x64/Lib/http/cookies.py3
-rw-r--r--x64/Lib/http/server.py43
-rw-r--r--x64/Lib/imaplib.py59
-rw-r--r--x64/Lib/imghdr.py2
-rw-r--r--x64/Lib/importlib/_bootstrap.py26
-rw-r--r--x64/Lib/importlib/_bootstrap_external.py47
-rw-r--r--x64/Lib/importlib/_common.py62
-rw-r--r--x64/Lib/importlib/abc.py88
-rw-r--r--x64/Lib/importlib/metadata.py30
-rw-r--r--x64/Lib/importlib/resources.py162
-rw-r--r--x64/Lib/importlib/util.py4
-rw-r--r--x64/Lib/inspect.py93
-rw-r--r--x64/Lib/ipaddress.py123
-rw-r--r--x64/Lib/json/__init__.py11
-rw-r--r--x64/Lib/json/tool.py41
-rw-r--r--x64/Lib/keyword.py17
-rw-r--r--x64/Lib/linecache.py42
-rw-r--r--x64/Lib/locale.py2
-rw-r--r--x64/Lib/logging/__init__.py74
-rw-r--r--x64/Lib/logging/config.py1
-rw-r--r--x64/Lib/logging/handlers.py112
-rw-r--r--x64/Lib/mailbox.py5
-rw-r--r--x64/Lib/mailcap.py1
-rw-r--r--x64/Lib/mimetypes.py3
-rw-r--r--x64/Lib/modulefinder.py21
-rw-r--r--x64/Lib/msilib/__init__.py2
-rw-r--r--x64/Lib/multiprocessing/connection.py10
-rw-r--r--x64/Lib/multiprocessing/context.py7
-rw-r--r--x64/Lib/multiprocessing/forkserver.py16
-rw-r--r--x64/Lib/multiprocessing/managers.py61
-rw-r--r--x64/Lib/multiprocessing/pool.py11
-rw-r--r--x64/Lib/multiprocessing/popen_fork.py8
-rw-r--r--x64/Lib/multiprocessing/process.py10
-rw-r--r--x64/Lib/multiprocessing/queues.py20
-rw-r--r--x64/Lib/multiprocessing/resource_sharer.py6
-rw-r--r--x64/Lib/multiprocessing/shared_memory.py88
-rw-r--r--x64/Lib/multiprocessing/synchronize.py2
-rw-r--r--x64/Lib/multiprocessing/util.py33
-rw-r--r--x64/Lib/nntplib.py163
-rw-r--r--x64/Lib/opcode.py33
-rw-r--r--x64/Lib/os.py70
-rw-r--r--x64/Lib/pathlib.py173
-rw-r--r--x64/Lib/pdb.py17
-rw-r--r--x64/Lib/pickle.py28
-rw-r--r--x64/Lib/pkgutil.py70
-rw-r--r--x64/Lib/platform.py181
-rw-r--r--x64/Lib/plistlib.py198
-rw-r--r--x64/Lib/poplib.py4
-rw-r--r--x64/Lib/pprint.py27
-rw-r--r--x64/Lib/profile.py23
-rw-r--r--x64/Lib/pstats.py63
-rw-r--r--x64/Lib/py_compile.py9
-rw-r--r--x64/Lib/pydoc.py125
-rw-r--r--x64/Lib/pydoc_data/topics.py1554
-rw-r--r--x64/Lib/queue.py5
-rw-r--r--x64/Lib/quopri.py8
-rw-r--r--x64/Lib/random.py580
-rw-r--r--x64/Lib/re.py6
-rw-r--r--x64/Lib/runpy.py9
-rw-r--r--x64/Lib/secrets.py3
-rw-r--r--x64/Lib/selectors.py37
-rw-r--r--x64/Lib/shlex.py4
-rw-r--r--x64/Lib/shutil.py28
-rw-r--r--x64/Lib/site.py25
-rw-r--r--x64/Lib/smtpd.py2
-rw-r--r--x64/Lib/smtplib.py21
-rw-r--r--x64/Lib/sndhdr.py2
-rw-r--r--x64/Lib/socket.py37
-rw-r--r--x64/Lib/socketserver.py2
-rw-r--r--x64/Lib/ssl.py2
-rw-r--r--x64/Lib/statistics.py160
-rw-r--r--x64/Lib/string.py46
-rw-r--r--x64/Lib/subprocess.py179
-rw-r--r--x64/Lib/sunau.py7
-rw-r--r--x64/Lib/symbol.py9
-rw-r--r--x64/Lib/symtable.py27
-rw-r--r--x64/Lib/sysconfig.py19
-rw-r--r--x64/Lib/tarfile.py33
-rw-r--r--x64/Lib/tempfile.py14
-rw-r--r--x64/Lib/threading.py100
-rw-r--r--x64/Lib/timeit.py3
-rw-r--r--x64/Lib/trace.py25
-rw-r--r--x64/Lib/traceback.py40
-rw-r--r--x64/Lib/tracemalloc.py42
-rw-r--r--x64/Lib/types.py3
-rw-r--r--x64/Lib/typing.py1092
-rw-r--r--x64/Lib/urllib/parse.py68
-rw-r--r--x64/Lib/urllib/request.py96
-rw-r--r--x64/Lib/urllib/response.py4
-rw-r--r--x64/Lib/uuid.py415
-rw-r--r--x64/Lib/wave.py20
-rw-r--r--x64/Lib/weakref.py74
-rw-r--r--x64/Lib/webbrowser.py22
-rw-r--r--x64/Lib/xml/dom/expatbuilder.py8
-rw-r--r--x64/Lib/xml/dom/minidom.py49
-rw-r--r--x64/Lib/xml/dom/xmlbuilder.py1
-rw-r--r--x64/Lib/xml/etree/ElementInclude.py56
-rw-r--r--x64/Lib/xml/etree/ElementTree.py114
-rw-r--r--x64/Lib/xml/sax/__init__.py2
-rw-r--r--x64/Lib/xml/sax/expatreader.py14
-rw-r--r--x64/Lib/xmlrpc/client.py30
-rw-r--r--x64/Lib/xmlrpc/server.py2
-rw-r--r--x64/Lib/zipfile.py170
-rw-r--r--x64/Lib/zoneinfo/__init__.py31
-rw-r--r--x64/Lib/zoneinfo/_common.py165
-rw-r--r--x64/Lib/zoneinfo/_tzpath.py175
-rw-r--r--x64/Lib/zoneinfo/_zoneinfo.py752
-rw-r--r--x64/include/Python-ast.h64
-rw-r--r--x64/include/Python.h5
-rw-r--r--x64/include/abstract.h8
-rw-r--r--x64/include/asdl.h4
-rw-r--r--x64/include/ast.h8
-rw-r--r--x64/include/boolobject.h2
-rw-r--r--x64/include/bytearrayobject.h24
-rw-r--r--x64/include/bytesobject.h152
-rw-r--r--x64/include/cellobject.h2
-rw-r--r--x64/include/ceval.h121
-rw-r--r--x64/include/classobject.h6
-rw-r--r--x64/include/code.h168
-rw-r--r--x64/include/compile.h20
-rw-r--r--x64/include/complexobject.h2
-rw-r--r--x64/include/context.h9
-rw-r--r--x64/include/cpython/abstract.h195
-rw-r--r--x64/include/cpython/bytearrayobject.h20
-rw-r--r--x64/include/cpython/bytesobject.h118
-rw-r--r--x64/include/cpython/ceval.h38
-rw-r--r--x64/include/cpython/code.h165
-rw-r--r--x64/include/cpython/dictobject.h2
-rw-r--r--x64/include/cpython/fileobject.h8
-rw-r--r--x64/include/cpython/fileutils.h165
-rw-r--r--x64/include/cpython/frameobject.h84
-rw-r--r--x64/include/cpython/import.h50
-rw-r--r--x64/include/cpython/initconfig.h34
-rw-r--r--x64/include/cpython/listobject.h43
-rw-r--r--x64/include/cpython/methodobject.h35
-rw-r--r--x64/include/cpython/object.h154
-rw-r--r--x64/include/cpython/objimpl.h152
-rw-r--r--x64/include/cpython/pyerrors.h24
-rw-r--r--x64/include/cpython/pylifecycle.h10
-rw-r--r--x64/include/cpython/pystate.h38
-rw-r--r--x64/include/cpython/sysmodule.h5
-rw-r--r--x64/include/cpython/traceback.h2
-rw-r--r--x64/include/cpython/unicodeobject.h65
-rw-r--r--x64/include/datetime.h10
-rw-r--r--x64/include/dictobject.h2
-rw-r--r--x64/include/errcode.h1
-rw-r--r--x64/include/exports.h30
-rw-r--r--x64/include/fileobject.h7
-rw-r--r--x64/include/fileutils.h161
-rw-r--r--x64/include/floatobject.h14
-rw-r--r--x64/include/frameobject.h84
-rw-r--r--x64/include/funcobject.h8
-rw-r--r--x64/include/genericaliasobject.h14
-rw-r--r--x64/include/genobject.h23
-rw-r--r--x64/include/import.h57
-rw-r--r--x64/include/internal/pegen_interface.h46
-rw-r--r--x64/include/internal/pycore_abstract.h22
-rw-r--r--x64/include/internal/pycore_atomic.h3
-rw-r--r--x64/include/internal/pycore_bytes_methods.h (renamed from x64/include/bytes_methods.h)4
-rw-r--r--x64/include/internal/pycore_byteswap.h88
-rw-r--r--x64/include/internal/pycore_call.h39
-rw-r--r--x64/include/internal/pycore_ceval.h119
-rw-r--r--x64/include/internal/pycore_context.h2
-rw-r--r--x64/include/internal/pycore_dtoa.h (renamed from x64/include/dtoa.h)12
-rw-r--r--x64/include/internal/pycore_gc.h179
-rw-r--r--x64/include/internal/pycore_gil.h4
-rw-r--r--x64/include/internal/pycore_hamt.h2
-rw-r--r--x64/include/internal/pycore_hashtable.h148
-rw-r--r--x64/include/internal/pycore_import.h22
-rw-r--r--x64/include/internal/pycore_initconfig.h9
-rw-r--r--x64/include/internal/pycore_interp.h192
-rw-r--r--x64/include/internal/pycore_object.h49
-rw-r--r--x64/include/internal/pycore_pathconfig.h7
-rw-r--r--x64/include/internal/pycore_pyerrors.h30
-rw-r--r--x64/include/internal/pycore_pylifecycle.h68
-rw-r--r--x64/include/internal/pycore_pymem.h168
-rw-r--r--x64/include/internal/pycore_pystate.h323
-rw-r--r--x64/include/internal/pycore_runtime.h144
-rw-r--r--x64/include/internal/pycore_sysmodule.h24
-rw-r--r--x64/include/internal/pycore_traceback.h7
-rw-r--r--x64/include/internal/pycore_tupleobject.h2
-rw-r--r--x64/include/internal/pycore_warnings.h4
-rw-r--r--x64/include/iterobject.h5
-rw-r--r--x64/include/listobject.h61
-rw-r--r--x64/include/longobject.h4
-rw-r--r--x64/include/memoryobject.h2
-rw-r--r--x64/include/methodobject.h73
-rw-r--r--x64/include/modsupport.h7
-rw-r--r--x64/include/moduleobject.h2
-rw-r--r--x64/include/node.h1
-rw-r--r--x64/include/object.h287
-rw-r--r--x64/include/objimpl.h115
-rw-r--r--x64/include/odictobject.h2
-rw-r--r--x64/include/opcode.h28
-rw-r--r--x64/include/patchlevel.h6
-rw-r--r--x64/include/picklebufobject.h2
-rw-r--r--x64/include/py_curses.h3
-rw-r--r--x64/include/pycapsule.h2
-rw-r--r--x64/include/pyconfig.h24
-rw-r--r--x64/include/pydebug.h2
-rw-r--r--x64/include/pyerrors.h27
-rw-r--r--x64/include/pyfpe.h3
-rw-r--r--x64/include/pyframe.h22
-rw-r--r--x64/include/pyhash.h4
-rw-r--r--x64/include/pymacro.h28
-rw-r--r--x64/include/pymath.h10
-rw-r--r--x64/include/pymem.h35
-rw-r--r--x64/include/pyport.h86
-rw-r--r--x64/include/pystate.h24
-rw-r--r--x64/include/pythonrun.h17
-rw-r--r--x64/include/pythread.h18
-rw-r--r--x64/include/rangeobject.h2
-rw-r--r--x64/include/setobject.h11
-rw-r--r--x64/include/sliceobject.h2
-rw-r--r--x64/include/structseq.h2
-rw-r--r--x64/include/symtable.h2
-rw-r--r--x64/include/token.h4
-rw-r--r--x64/include/traceback.h6
-rw-r--r--x64/include/tupleobject.h4
-rw-r--r--x64/include/typeslots.h5
-rw-r--r--x64/include/unicodeobject.h13
-rw-r--r--x64/include/weakrefobject.h6
-rw-r--r--x64/libs/_uuid.libbin0 -> 1698 bytes
-rw-r--r--x64/libs/_zoneinfo.libbin0 -> 1766 bytes
-rw-r--r--x64/libs/python38.libbin356124 -> 0 bytes
-rw-r--r--x64/libs/python39.libbin0 -> 361556 bytes
-rw-r--r--x64/libs/sqlite3.libbin59780 -> 62124 bytes
-rw-r--r--x64/python.exebin93184 -> 93184 bytes
-rw-r--r--x64/python38.dllbin4934144 -> 0 bytes
-rw-r--r--x64/python39.dllbin0 -> 5232128 bytes
-rw-r--r--x64/pythonw.exebin91648 -> 91648 bytes
-rw-r--r--x64/vcruntime140.dllbin0 -> 87864 bytes
342 files changed, 12453 insertions, 7098 deletions
diff --git a/x64/DLLs/_asyncio.pyd b/x64/DLLs/_asyncio.pyd
index 05e2f57..bf84608 100644
--- a/x64/DLLs/_asyncio.pyd
+++ b/x64/DLLs/_asyncio.pyd
Binary files differ
diff --git a/x64/DLLs/_bz2.pyd b/x64/DLLs/_bz2.pyd
index d656c8c..1071382 100644
--- a/x64/DLLs/_bz2.pyd
+++ b/x64/DLLs/_bz2.pyd
Binary files differ
diff --git a/x64/DLLs/_ctypes.pyd b/x64/DLLs/_ctypes.pyd
index 089e5ab..d9a2798 100644
--- a/x64/DLLs/_ctypes.pyd
+++ b/x64/DLLs/_ctypes.pyd
Binary files differ
diff --git a/x64/DLLs/_decimal.pyd b/x64/DLLs/_decimal.pyd
index 6164989..5865dac 100644
--- a/x64/DLLs/_decimal.pyd
+++ b/x64/DLLs/_decimal.pyd
Binary files differ
diff --git a/x64/DLLs/_elementtree.pyd b/x64/DLLs/_elementtree.pyd
index a44f07b..ffd096a 100644
--- a/x64/DLLs/_elementtree.pyd
+++ b/x64/DLLs/_elementtree.pyd
Binary files differ
diff --git a/x64/DLLs/_hashlib.pyd b/x64/DLLs/_hashlib.pyd
index d7a26da..e1a9c85 100644
--- a/x64/DLLs/_hashlib.pyd
+++ b/x64/DLLs/_hashlib.pyd
Binary files differ
diff --git a/x64/DLLs/_lzma.pyd b/x64/DLLs/_lzma.pyd
index c4cca6a..d5b3c27 100644
--- a/x64/DLLs/_lzma.pyd
+++ b/x64/DLLs/_lzma.pyd
Binary files differ
diff --git a/x64/DLLs/_msi.pyd b/x64/DLLs/_msi.pyd
index 0daf56b..fabf1f7 100644
--- a/x64/DLLs/_msi.pyd
+++ b/x64/DLLs/_msi.pyd
Binary files differ
diff --git a/x64/DLLs/_multiprocessing.pyd b/x64/DLLs/_multiprocessing.pyd
index 0a50e92..1f4409d 100644
--- a/x64/DLLs/_multiprocessing.pyd
+++ b/x64/DLLs/_multiprocessing.pyd
Binary files differ
diff --git a/x64/DLLs/_overlapped.pyd b/x64/DLLs/_overlapped.pyd
index e62db9e..4da5398 100644
--- a/x64/DLLs/_overlapped.pyd
+++ b/x64/DLLs/_overlapped.pyd
Binary files differ
diff --git a/x64/DLLs/_queue.pyd b/x64/DLLs/_queue.pyd
index 0c5551a..f1b428c 100644
--- a/x64/DLLs/_queue.pyd
+++ b/x64/DLLs/_queue.pyd
Binary files differ
diff --git a/x64/DLLs/_socket.pyd b/x64/DLLs/_socket.pyd
index 7e2681a..a9b2c7c 100644
--- a/x64/DLLs/_socket.pyd
+++ b/x64/DLLs/_socket.pyd
Binary files differ
diff --git a/x64/DLLs/_sqlite3.pyd b/x64/DLLs/_sqlite3.pyd
index a33a919..ee937b9 100644
--- a/x64/DLLs/_sqlite3.pyd
+++ b/x64/DLLs/_sqlite3.pyd
Binary files differ
diff --git a/x64/DLLs/_ssl.pyd b/x64/DLLs/_ssl.pyd
index 0ac7493..83d13d1 100644
--- a/x64/DLLs/_ssl.pyd
+++ b/x64/DLLs/_ssl.pyd
Binary files differ
diff --git a/x64/DLLs/_uuid.pyd b/x64/DLLs/_uuid.pyd
new file mode 100644
index 0000000..f5748f4
--- /dev/null
+++ b/x64/DLLs/_uuid.pyd
Binary files differ
diff --git a/x64/DLLs/_zoneinfo.pyd b/x64/DLLs/_zoneinfo.pyd
new file mode 100644
index 0000000..00715f9
--- /dev/null
+++ b/x64/DLLs/_zoneinfo.pyd
Binary files differ
diff --git a/x64/DLLs/libcrypto-1_1.dll b/x64/DLLs/libcrypto-1_1.dll
index d65bef8..5d95d97 100644
--- a/x64/DLLs/libcrypto-1_1.dll
+++ b/x64/DLLs/libcrypto-1_1.dll
Binary files differ
diff --git a/x64/DLLs/libssl-1_1.dll b/x64/DLLs/libssl-1_1.dll
index 6971c00..b321efd 100644
--- a/x64/DLLs/libssl-1_1.dll
+++ b/x64/DLLs/libssl-1_1.dll
Binary files differ
diff --git a/x64/DLLs/pyexpat.pyd b/x64/DLLs/pyexpat.pyd
index f9c9bd5..ac31240 100644
--- a/x64/DLLs/pyexpat.pyd
+++ b/x64/DLLs/pyexpat.pyd
Binary files differ
diff --git a/x64/DLLs/select.pyd b/x64/DLLs/select.pyd
index 3680f7d..0c9984d 100644
--- a/x64/DLLs/select.pyd
+++ b/x64/DLLs/select.pyd
Binary files differ
diff --git a/x64/DLLs/sqlite3.dll b/x64/DLLs/sqlite3.dll
index 87206ac..b2c5d85 100644
--- a/x64/DLLs/sqlite3.dll
+++ b/x64/DLLs/sqlite3.dll
Binary files differ
diff --git a/x64/DLLs/unicodedata.pyd b/x64/DLLs/unicodedata.pyd
index 1bc764e..6742413 100644
--- a/x64/DLLs/unicodedata.pyd
+++ b/x64/DLLs/unicodedata.pyd
Binary files differ
diff --git a/x64/DLLs/winsound.pyd b/x64/DLLs/winsound.pyd
index 892a358..69b7699 100644
--- a/x64/DLLs/winsound.pyd
+++ b/x64/DLLs/winsound.pyd
Binary files differ
diff --git a/x64/LICENSE.txt b/x64/LICENSE.txt
index edfeb1c..8fb1f45 100644
--- a/x64/LICENSE.txt
+++ b/x64/LICENSE.txt
@@ -59,6 +59,17 @@ direction to make these releases possible.
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
===============================================================
+Python software and documentation are licensed under the
+Python Software Foundation License Version 2.
+
+Starting with Python 3.8.6, examples, recipes, and other code in
+the documentation are dual licensed under the PSF License Version 2
+and the Zero-Clause BSD license.
+
+Some software incorporated into Python is under different licenses.
+The licenses are listed with code falling under that license.
+
+
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
@@ -252,6 +263,20 @@ FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION
+----------------------------------------------------------------------
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
@@ -338,131 +363,131 @@ bzip2/libbzip2 version 1.0.6 of 6 September 2010
--------------------------------------------------------------------------
-
- LICENSE ISSUES
- ==============
-
- The OpenSSL toolkit stays under a double license, i.e. both the conditions of
- the OpenSSL License and the original SSLeay license apply to the toolkit.
- See below for the actual license texts.
-
- OpenSSL License
- ---------------
-
-/* ====================================================================
- * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- * software must display the following acknowledgment:
- * "This product includes software developed by the OpenSSL Project
- * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- * endorse or promote products derived from this software without
- * prior written permission. For written permission, please contact
- * openssl-core@openssl.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- * nor may "OpenSSL" appear in their names without prior written
- * permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- * acknowledgment:
- * "This product includes software developed by the OpenSSL Project
- * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com). This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
- Original SSLeay License
- -----------------------
-
-/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to. The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code. The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * "This product includes cryptographic software written by
- * Eric Young (eay@cryptsoft.com)"
- * The word 'cryptographic' can be left out if the rouines from the library
- * being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- * the apps directory (application code) you must include an acknowledgement:
- * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed. i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
+
+ LICENSE ISSUES
+ ==============
+
+ The OpenSSL toolkit stays under a double license, i.e. both the conditions of
+ the OpenSSL License and the original SSLeay license apply to the toolkit.
+ See below for the actual license texts.
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others.
See source files for details.
diff --git a/x64/Lib/__future__.py b/x64/Lib/__future__.py
index e113568..0e7b555 100644
--- a/x64/Lib/__future__.py
+++ b/x64/Lib/__future__.py
@@ -66,18 +66,20 @@ __all__ = ["all_feature_names"] + all_feature_names
# code.h and used by compile.h, so that an editor search will find them here.
# However, they're not exported in __all__, because they don't really belong to
# this module.
-CO_NESTED = 0x0010 # nested_scopes
-CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
-CO_FUTURE_DIVISION = 0x2000 # division
-CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
-CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
-CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
-CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
-CO_FUTURE_BARRY_AS_BDFL = 0x40000
-CO_FUTURE_GENERATOR_STOP = 0x80000 # StopIteration becomes RuntimeError in generators
-CO_FUTURE_ANNOTATIONS = 0x100000 # annotations become strings at runtime
+CO_NESTED = 0x0010 # nested_scopes
+CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
+CO_FUTURE_DIVISION = 0x20000 # division
+CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default
+CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement
+CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function
+CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals
+CO_FUTURE_BARRY_AS_BDFL = 0x400000
+CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators
+CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime
+
class _Feature:
+
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
@@ -88,7 +90,6 @@ class _Feature:
This is a 5-tuple, of the same form as sys.version_info.
"""
-
return self.optional
def getMandatoryRelease(self):
@@ -97,7 +98,6 @@ class _Feature:
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
-
return self.mandatory
def __repr__(self):
@@ -105,6 +105,7 @@ class _Feature:
self.mandatory,
self.compiler_flag))
+
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
@@ -142,5 +143,5 @@ generator_stop = _Feature((3, 5, 0, "beta", 1),
CO_FUTURE_GENERATOR_STOP)
annotations = _Feature((3, 7, 0, "beta", 1),
- (4, 0, 0, "alpha", 0),
+ (3, 10, 0, "alpha", 0),
CO_FUTURE_ANNOTATIONS)
diff --git a/x64/Lib/_aix_support.py b/x64/Lib/_aix_support.py
new file mode 100644
index 0000000..4550493
--- /dev/null
+++ b/x64/Lib/_aix_support.py
@@ -0,0 +1,89 @@
+"""Shared AIX support functions."""
+
+import sys
+import sysconfig
+
+try:
+ import subprocess
+except ImportError: # pragma: no cover
+ # _aix_support is used in distutils by setup.py to build C extensions,
+ # before subprocess dependencies like _posixsubprocess are available.
+ import _bootsubprocess as subprocess
+
+
+def _aix_tag(vrtl, bd):
+ # type: (List[int], int) -> str
+ # Infer the ABI bitwidth from maxsize (assuming 64 bit as the default)
+ _sz = 32 if sys.maxsize == (2**31-1) else 64
+ # vrtl[version, release, technology_level]
+ return "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(vrtl[0], vrtl[1], vrtl[2], bd, _sz)
+
+
+# extract version, release and technology level from a VRMF string
+def _aix_vrtl(vrmf):
+ # type: (str) -> List[int]
+ v, r, tl = vrmf.split(".")[:3]
+ return [int(v[-1]), int(r), int(tl)]
+
+
+def _aix_bosmp64():
+ # type: () -> Tuple[str, int]
+ """
+ Return a Tuple[str, int] e.g., ['7.1.4.34', 1806]
+ The fileset bos.mp64 is the AIX kernel. It's VRMF and builddate
+ reflect the current ABI levels of the runtime environment.
+ """
+ # We expect all AIX systems to have lslpp installed in this location
+ out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.mp64"])
+ out = out.decode("utf-8")
+ out = out.strip().split(":") # type: ignore
+ # Use str() and int() to help mypy see types
+ return (str(out[2]), int(out[-1]))
+
+
+def aix_platform():
+ # type: () -> str
+ """
+ AIX filesets are identified by four decimal values: V.R.M.F.
+ V (version) and R (release) can be retreived using ``uname``
+ Since 2007, starting with AIX 5.3 TL7, the M value has been
+ included with the fileset bos.mp64 and represents the Technology
+ Level (TL) of AIX. The F (Fix) value also increases, but is not
+ relevant for comparing releases and binary compatibility.
+ For binary compatibility the so-called builddate is needed.
+ Again, the builddate of an AIX release is associated with bos.mp64.
+ AIX ABI compatibility is described as guaranteed at: https://www.ibm.com/\
+ support/knowledgecenter/en/ssw_aix_72/install/binary_compatability.html
+
+ For pep425 purposes the AIX platform tag becomes:
+ "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(v, r, tl, builddate, bitsize)
+ e.g., "aix-6107-1415-32" for AIX 6.1 TL7 bd 1415, 32-bit
+ and, "aix-6107-1415-64" for AIX 6.1 TL7 bd 1415, 64-bit
+ """
+ vrmf, bd = _aix_bosmp64()
+ return _aix_tag(_aix_vrtl(vrmf), bd)
+
+
+# extract vrtl from the BUILD_GNU_TYPE as an int
+def _aix_bgt():
+ # type: () -> List[int]
+ gnu_type = sysconfig.get_config_var("BUILD_GNU_TYPE")
+ if not gnu_type:
+ raise ValueError("BUILD_GNU_TYPE is not defined")
+ return _aix_vrtl(vrmf=gnu_type)
+
+
+def aix_buildtag():
+ # type: () -> str
+ """
+ Return the platform_tag of the system Python was built on.
+ """
+ # AIX_BUILDDATE is defined by configure with:
+ # lslpp -Lcq bos.mp64 | awk -F: '{ print $NF }'
+ build_date = sysconfig.get_config_var("AIX_BUILDDATE")
+ try:
+ build_date = int(build_date)
+ except (ValueError, TypeError):
+ raise ValueError(f"AIX_BUILDDATE is not defined or invalid: "
+ f"{build_date!r}")
+ return _aix_tag(_aix_bgt(), build_date)
diff --git a/x64/Lib/_bootsubprocess.py b/x64/Lib/_bootsubprocess.py
new file mode 100644
index 0000000..014782f
--- /dev/null
+++ b/x64/Lib/_bootsubprocess.py
@@ -0,0 +1,97 @@
+"""
+Basic subprocess implementation for POSIX which only uses os functions. Only
+implement features required by setup.py to build C extension modules when
+subprocess is unavailable. setup.py is not used on Windows.
+"""
+import os
+
+
+# distutils.spawn used by distutils.command.build_ext
+# calls subprocess.Popen().wait()
+class Popen:
+ def __init__(self, cmd, env=None):
+ self._cmd = cmd
+ self._env = env
+ self.returncode = None
+
+ def wait(self):
+ pid = os.fork()
+ if pid == 0:
+ # Child process
+ try:
+ if self._env is not None:
+ os.execve(self._cmd[0], self._cmd, self._env)
+ else:
+ os.execv(self._cmd[0], self._cmd)
+ finally:
+ os._exit(1)
+ else:
+ # Parent process
+ _, status = os.waitpid(pid, 0)
+ self.returncode = os.waitstatus_to_exitcode(status)
+
+ return self.returncode
+
+
+def _check_cmd(cmd):
+ # Use regex [a-zA-Z0-9./-]+: reject empty string, space, etc.
+ safe_chars = []
+ for first, last in (("a", "z"), ("A", "Z"), ("0", "9")):
+ for ch in range(ord(first), ord(last) + 1):
+ safe_chars.append(chr(ch))
+ safe_chars.append("./-")
+ safe_chars = ''.join(safe_chars)
+
+ if isinstance(cmd, (tuple, list)):
+ check_strs = cmd
+ elif isinstance(cmd, str):
+ check_strs = [cmd]
+ else:
+ return False
+
+ for arg in check_strs:
+ if not isinstance(arg, str):
+ return False
+ if not arg:
+ # reject empty string
+ return False
+ for ch in arg:
+ if ch not in safe_chars:
+ return False
+
+ return True
+
+
+# _aix_support used by distutil.util calls subprocess.check_output()
+def check_output(cmd, **kwargs):
+ if kwargs:
+ raise NotImplementedError(repr(kwargs))
+
+ if not _check_cmd(cmd):
+ raise ValueError(f"unsupported command: {cmd!r}")
+
+ tmp_filename = "check_output.tmp"
+ if not isinstance(cmd, str):
+ cmd = " ".join(cmd)
+ cmd = f"{cmd} >{tmp_filename}"
+
+ try:
+ # system() spawns a shell
+ status = os.system(cmd)
+ exitcode = os.waitstatus_to_exitcode(status)
+ if exitcode:
+ raise ValueError(f"Command {cmd!r} returned non-zero "
+ f"exit status {exitcode!r}")
+
+ try:
+ with open(tmp_filename, "rb") as fp:
+ stdout = fp.read()
+ except FileNotFoundError:
+ stdout = b''
+ finally:
+ try:
+ os.unlink(tmp_filename)
+ except OSError:
+ pass
+
+ return stdout
diff --git a/x64/Lib/_collections_abc.py b/x64/Lib/_collections_abc.py
index 2b2ddba..36cd993 100644
--- a/x64/Lib/_collections_abc.py
+++ b/x64/Lib/_collections_abc.py
@@ -9,6 +9,8 @@ Unit tests are in test_collections.
from abc import ABCMeta, abstractmethod
import sys
+GenericAlias = type(list[int])
+
__all__ = ["Awaitable", "Coroutine",
"AsyncIterable", "AsyncIterator", "AsyncGenerator",
"Hashable", "Iterable", "Iterator", "Generator", "Reversible",
@@ -110,6 +112,8 @@ class Awaitable(metaclass=ABCMeta):
return _check_methods(C, "__await__")
return NotImplemented
+ __class_getitem__ = classmethod(GenericAlias)
+
class Coroutine(Awaitable):
@@ -169,6 +173,8 @@ class AsyncIterable(metaclass=ABCMeta):
return _check_methods(C, "__aiter__")
return NotImplemented
+ __class_getitem__ = classmethod(GenericAlias)
+
class AsyncIterator(AsyncIterable):
@@ -255,6 +261,8 @@ class Iterable(metaclass=ABCMeta):
return _check_methods(C, "__iter__")
return NotImplemented
+ __class_getitem__ = classmethod(GenericAlias)
+
class Iterator(Iterable):
@@ -274,6 +282,7 @@ class Iterator(Iterable):
return _check_methods(C, '__iter__', '__next__')
return NotImplemented
+
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
@@ -353,6 +362,7 @@ class Generator(Iterator):
'send', 'throw', 'close')
return NotImplemented
+
Generator.register(generator)
@@ -385,6 +395,9 @@ class Container(metaclass=ABCMeta):
return _check_methods(C, "__contains__")
return NotImplemented
+ __class_getitem__ = classmethod(GenericAlias)
+
+
class Collection(Sized, Iterable, Container):
__slots__ = ()
@@ -395,6 +408,7 @@ class Collection(Sized, Iterable, Container):
return _check_methods(C, "__len__", "__iter__", "__contains__")
return NotImplemented
+
class Callable(metaclass=ABCMeta):
__slots__ = ()
@@ -409,6 +423,8 @@ class Callable(metaclass=ABCMeta):
return _check_methods(C, "__call__")
return NotImplemented
+ __class_getitem__ = classmethod(GenericAlias)
+
### SETS ###
@@ -550,6 +566,7 @@ class Set(Collection):
h = 590923713
return h
+
Set.register(frozenset)
@@ -632,6 +649,7 @@ class MutableSet(Set):
self.discard(value)
return self
+
MutableSet.register(set)
@@ -688,6 +706,7 @@ class Mapping(Collection):
__reversed__ = None
+
Mapping.register(mappingproxy)
@@ -704,6 +723,8 @@ class MappingView(Sized):
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
+ __class_getitem__ = classmethod(GenericAlias)
+
class KeysView(MappingView, Set):
@@ -719,6 +740,7 @@ class KeysView(MappingView, Set):
def __iter__(self):
yield from self._mapping
+
KeysView.register(dict_keys)
@@ -743,6 +765,7 @@ class ItemsView(MappingView, Set):
for key in self._mapping:
yield (key, self._mapping[key])
+
ItemsView.register(dict_items)
@@ -761,6 +784,7 @@ class ValuesView(MappingView, Collection):
for key in self._mapping:
yield self._mapping[key]
+
ValuesView.register(dict_values)
@@ -847,6 +871,7 @@ class MutableMapping(Mapping):
self[key] = default
return default
+
MutableMapping.register(dict)
@@ -914,6 +939,7 @@ class Sequence(Reversible, Collection):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v is value or v == value)
+
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
@@ -1000,5 +1026,6 @@ class MutableSequence(Sequence):
self.extend(values)
return self
+
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
diff --git a/x64/Lib/_dummy_thread.py b/x64/Lib/_dummy_thread.py
deleted file mode 100644
index 2e46a07..0000000
--- a/x64/Lib/_dummy_thread.py
+++ /dev/null
@@ -1,193 +0,0 @@
-"""Drop-in replacement for the thread module.
-
-Meant to be used as a brain-dead substitute so that threaded code does
-not need to be rewritten for when the thread module is not present.
-
-Suggested usage is::
-
- try:
- import _thread
- except ImportError:
- import _dummy_thread as _thread
-
-"""
-# Exports only things specified by thread documentation;
-# skipping obsolete synonyms allocate(), start_new(), exit_thread().
-__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
- 'interrupt_main', 'LockType', 'RLock']
-
-# A dummy value
-TIMEOUT_MAX = 2**31
-
-# NOTE: this module can be imported early in the extension building process,
-# and so top level imports of other modules should be avoided. Instead, all
-# imports are done when needed on a function-by-function basis. Since threads
-# are disabled, the import lock should not be an issue anyway (??).
-
-error = RuntimeError
-
-def start_new_thread(function, args, kwargs={}):
- """Dummy implementation of _thread.start_new_thread().
-
- Compatibility is maintained by making sure that ``args`` is a
- tuple and ``kwargs`` is a dictionary. If an exception is raised
- and it is SystemExit (which can be done by _thread.exit()) it is
- caught and nothing is done; all other exceptions are printed out
- by using traceback.print_exc().
-
- If the executed function calls interrupt_main the KeyboardInterrupt will be
- raised when the function returns.
-
- """
- if type(args) != type(tuple()):
- raise TypeError("2nd arg must be a tuple")
- if type(kwargs) != type(dict()):
- raise TypeError("3rd arg must be a dict")
- global _main
- _main = False
- try:
- function(*args, **kwargs)
- except SystemExit:
- pass
- except:
- import traceback
- traceback.print_exc()
- _main = True
- global _interrupt
- if _interrupt:
- _interrupt = False
- raise KeyboardInterrupt
-
-def exit():
- """Dummy implementation of _thread.exit()."""
- raise SystemExit
-
-def get_ident():
- """Dummy implementation of _thread.get_ident().
-
- Since this module should only be used when _threadmodule is not
- available, it is safe to assume that the current process is the
- only thread. Thus a constant can be safely returned.
- """
- return 1
-
-def allocate_lock():
- """Dummy implementation of _thread.allocate_lock()."""
- return LockType()
-
-def stack_size(size=None):
- """Dummy implementation of _thread.stack_size()."""
- if size is not None:
- raise error("setting thread stack size not supported")
- return 0
-
-def _set_sentinel():
- """Dummy implementation of _thread._set_sentinel()."""
- return LockType()
-
-class LockType(object):
- """Class implementing dummy implementation of _thread.LockType.
-
- Compatibility is maintained by maintaining self.locked_status
- which is a boolean that stores the state of the lock. Pickling of
- the lock, though, should not be done since if the _thread module is
- then used with an unpickled ``lock()`` from here problems could
- occur from this class not having atomic methods.
-
- """
-
- def __init__(self):
- self.locked_status = False
-
- def acquire(self, waitflag=None, timeout=-1):
- """Dummy implementation of acquire().
-
- For blocking calls, self.locked_status is automatically set to
- True and returned appropriately based on value of
- ``waitflag``. If it is non-blocking, then the value is
- actually checked and not set if it is already acquired. This
- is all done so that threading.Condition's assert statements
- aren't triggered and throw a little fit.
-
- """
- if waitflag is None or waitflag:
- self.locked_status = True
- return True
- else:
- if not self.locked_status:
- self.locked_status = True
- return True
- else:
- if timeout > 0:
- import time
- time.sleep(timeout)
- return False
-
- __enter__ = acquire
-
- def __exit__(self, typ, val, tb):
- self.release()
-
- def release(self):
- """Release the dummy lock."""
- # XXX Perhaps shouldn't actually bother to test? Could lead
- # to problems for complex, threaded code.
- if not self.locked_status:
- raise error
- self.locked_status = False
- return True
-
- def locked(self):
- return self.locked_status
-
- def __repr__(self):
- return "<%s %s.%s object at %s>" % (
- "locked" if self.locked_status else "unlocked",
- self.__class__.__module__,
- self.__class__.__qualname__,
- hex(id(self))
- )
-
-
-class RLock(LockType):
- """Dummy implementation of threading._RLock.
-
- Re-entrant lock can be aquired multiple times and needs to be released
- just as many times. This dummy implemention does not check wheter the
- current thread actually owns the lock, but does accounting on the call
- counts.
- """
- def __init__(self):
- super().__init__()
- self._levels = 0
-
- def acquire(self, waitflag=None, timeout=-1):
- """Aquire the lock, can be called multiple times in succession.
- """
- locked = super().acquire(waitflag, timeout)
- if locked:
- self._levels += 1
- return locked
-
- def release(self):
- """Release needs to be called once for every call to acquire().
- """
- if self._levels == 0:
- raise error
- if self._levels == 1:
- super().release()
- self._levels -= 1
-
-# Used to signal that interrupt_main was called in a "thread"
-_interrupt = False
-# True when not executing in a "thread"
-_main = True
-
-def interrupt_main():
- """Set _interrupt flag to True to have start_new_thread raise
- KeyboardInterrupt upon exiting."""
- if _main:
- raise KeyboardInterrupt
- else:
- global _interrupt
- _interrupt = True
diff --git a/x64/Lib/_osx_support.py b/x64/Lib/_osx_support.py
index db6674e..37975fe 100644
--- a/x64/Lib/_osx_support.py
+++ b/x64/Lib/_osx_support.py
@@ -52,7 +52,7 @@ def _find_executable(executable, path=None):
return executable
-def _read_output(commandstring):
+def _read_output(commandstring, capture_stderr=False):
"""Output from successful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
@@ -67,7 +67,10 @@ def _read_output(commandstring):
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
- cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
+ if capture_stderr:
+ cmd = "%s >'%s' 2>&1" % (commandstring, fp.name)
+ else:
+ cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
@@ -110,6 +113,26 @@ def _get_system_version():
return _SYSTEM_VERSION
+_SYSTEM_VERSION_TUPLE = None
+def _get_system_version_tuple():
+ """
+ Return the macOS system version as a tuple
+
+ The return value is safe to use to compare
+ two version numbers.
+ """
+ global _SYSTEM_VERSION_TUPLE
+ if _SYSTEM_VERSION_TUPLE is None:
+ osx_version = _get_system_version()
+ if osx_version:
+ try:
+ _SYSTEM_VERSION_TUPLE = tuple(int(i) for i in osx_version.split('.'))
+ except ValueError:
+ _SYSTEM_VERSION_TUPLE = ()
+
+ return _SYSTEM_VERSION_TUPLE
+
+
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
@@ -125,6 +148,33 @@ def _save_modified_value(_config_vars, cv, newvalue):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
+
+_cache_default_sysroot = None
+def _default_sysroot(cc):
+ """ Returns the root of the default SDK for this system, or '/' """
+ global _cache_default_sysroot
+
+ if _cache_default_sysroot is not None:
+ return _cache_default_sysroot
+
+ contents = _read_output('%s -c -E -v - </dev/null' % (cc,), True)
+ in_incdirs = False
+ for line in contents.splitlines():
+ if line.startswith("#include <...>"):
+ in_incdirs = True
+ elif line.startswith("End of search list"):
+ in_incdirs = False
+ elif in_incdirs:
+ line = line.strip()
+ if line == '/usr/include':
+ _cache_default_sysroot = '/'
+ elif line.endswith(".sdk/usr/include"):
+ _cache_default_sysroot = line[:-12]
+ if _cache_default_sysroot is None:
+ _cache_default_sysroot = '/'
+
+ return _cache_default_sysroot
+
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
@@ -132,14 +182,18 @@ def _supports_universal_builds():
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
- osx_version = _get_system_version()
- if osx_version:
- try:
- osx_version = tuple(int(i) for i in osx_version.split('.'))
- except ValueError:
- osx_version = ''
+ osx_version = _get_system_version_tuple()
return bool(osx_version >= (10, 4)) if osx_version else False
+def _supports_arm64_builds():
+ """Returns True if arm64 builds are supported on this system"""
+ # There are two sets of systems supporting macOS/arm64 builds:
+ # 1. macOS 11 and later, unconditionally
+ # 2. macOS 10.15 with Xcode 12.2 or later
+ # For now the second category is ignored.
+ osx_version = _get_system_version_tuple()
+ return osx_version >= (11, 0) if osx_version else False
+
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
@@ -211,7 +265,7 @@ def _remove_universal_flags(_config_vars):
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
- flags = re.sub('-isysroot [^ \t]*', ' ', flags)
+ flags = re.sub(r'-isysroot\s*\S+', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
@@ -287,7 +341,7 @@ def _check_for_unavailable_sdk(_config_vars):
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
- m = re.search(r'-isysroot\s+(\S+)', cflags)
+ m = re.search(r'-isysroot\s*(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
@@ -295,7 +349,7 @@ def _check_for_unavailable_sdk(_config_vars):
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
- flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
+ flags = re.sub(r'-isysroot\s*\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
@@ -320,7 +374,7 @@ def compiler_fixup(compiler_so, cc_args):
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
- stripSysroot = '-isysroot' in cc_args
+ stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot'))
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
@@ -331,6 +385,12 @@ def compiler_fixup(compiler_so, cc_args):
except ValueError:
break
+ elif not _supports_arm64_builds():
+ # Look for "-arch arm64" and drop that
+ for idx in reversed(range(len(compiler_so))):
+ if compiler_so[idx] == '-arch' and compiler_so[idx+1] == "arm64":
+ del compiler_so[idx:idx+2]
+
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
@@ -338,23 +398,34 @@ def compiler_fixup(compiler_so, cc_args):
if stripSysroot:
while True:
- try:
- index = compiler_so.index('-isysroot')
+ indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
+ if not indices:
+ break
+ index = indices[0]
+ if compiler_so[index] == '-isysroot':
# Strip this argument and the next one:
del compiler_so[index:index+2]
- except ValueError:
- break
+ else:
+ # It's '-isysroot/some/path' in one arg
+ del compiler_so[index:index+1]
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
- if '-isysroot' in cc_args:
- idx = cc_args.index('-isysroot')
- sysroot = cc_args[idx+1]
- elif '-isysroot' in compiler_so:
- idx = compiler_so.index('-isysroot')
- sysroot = compiler_so[idx+1]
+ argvar = cc_args
+ indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')]
+ if not indices:
+ argvar = compiler_so
+ indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
+
+ for idx in indices:
+ if argvar[idx] == '-isysroot':
+ sysroot = argvar[idx+1]
+ break
+ else:
+ sysroot = argvar[idx][len('-isysroot'):]
+ break
if sysroot and not os.path.isdir(sysroot):
from distutils import log
@@ -470,6 +541,8 @@ def get_platform_osx(_config_vars, osname, release, machine):
if len(archs) == 1:
machine = archs[0]
+ elif archs == ('arm64', 'x86_64'):
+ machine = 'universal2'
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
diff --git a/x64/Lib/_pydecimal.py b/x64/Lib/_pydecimal.py
index c14d8ca..ab989e5 100644
--- a/x64/Lib/_pydecimal.py
+++ b/x64/Lib/_pydecimal.py
@@ -140,8 +140,11 @@ __all__ = [
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
- # C version: compile time choice that enables the thread local context
- 'HAVE_THREADS'
+ # C version: compile time choice that enables the thread local context (deprecated, now always true)
+ 'HAVE_THREADS',
+
+ # C version: compile time choice that enables the coroutine local context
+ 'HAVE_CONTEXTVAR'
]
__xname__ = __name__ # sys.modules lookup (--without-threads)
@@ -172,6 +175,7 @@ ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
+HAVE_CONTEXTVAR = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
diff --git a/x64/Lib/_pyio.py b/x64/Lib/_pyio.py
index fd31b8c..4804ed2 100644
--- a/x64/Lib/_pyio.py
+++ b/x64/Lib/_pyio.py
@@ -36,6 +36,8 @@ BlockingIOError = BlockingIOError
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
_IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
+# Does open() check its 'errors' argument?
+_CHECK_ERRORS = _IOBASE_EMITS_UNRAISABLE
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
@@ -802,6 +804,9 @@ class _BufferedIOMixin(BufferedIOBase):
return pos
def truncate(self, pos=None):
+ self._checkClosed()
+ self._checkWritable()
+
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
@@ -1571,7 +1576,7 @@ class FileIO(RawIOBase):
raise IsADirectoryError(errno.EISDIR,
os.strerror(errno.EISDIR), file)
except AttributeError:
- # Ignore the AttribueError if stat.S_ISDIR or errno.EISDIR
+ # Ignore the AttributeError if stat.S_ISDIR or errno.EISDIR
# don't exist.
pass
self._blksize = getattr(fdfstat, 'st_blksize', 0)
@@ -2026,6 +2031,8 @@ class TextIOWrapper(TextIOBase):
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
+ if _CHECK_ERRORS:
+ codecs.lookup_error(errors)
self._buffer = buffer
self._decoded_chars = '' # buffer for text returned from decoder
@@ -2295,7 +2302,7 @@ class TextIOWrapper(TextIOBase):
return not eof
def _pack_cookie(self, position, dec_flags=0,
- bytes_to_feed=0, need_eof=0, chars_to_skip=0):
+ bytes_to_feed=0, need_eof=False, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
@@ -2309,7 +2316,7 @@ class TextIOWrapper(TextIOBase):
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
- return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
+ return position, dec_flags, bytes_to_feed, bool(need_eof), chars_to_skip
def tell(self):
if not self._seekable:
@@ -2383,7 +2390,7 @@ class TextIOWrapper(TextIOBase):
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
- need_eof = 0
+ need_eof = False
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
@@ -2400,7 +2407,7 @@ class TextIOWrapper(TextIOBase):
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
- need_eof = 1
+ need_eof = True
if chars_decoded < chars_to_skip:
raise OSError("can't reconstruct logical file position")
diff --git a/x64/Lib/_strptime.py b/x64/Lib/_strptime.py
index f4f3c0b..5df37f5 100644
--- a/x64/Lib/_strptime.py
+++ b/x64/Lib/_strptime.py
@@ -182,7 +182,7 @@ class TimeRE(dict):
self.locale_time = LocaleTime()
base = super()
base.__init__({
- # The " \d" part of the regex is to make %c from ANSI C work
+ # The " [1-9]" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
diff --git a/x64/Lib/_weakrefset.py b/x64/Lib/_weakrefset.py
index 7a84823..b267780 100644
--- a/x64/Lib/_weakrefset.py
+++ b/x64/Lib/_weakrefset.py
@@ -3,6 +3,7 @@
# by abc.py to load everything else at startup.
from _weakref import ref
+from types import GenericAlias
__all__ = ['WeakSet']
@@ -197,3 +198,5 @@ class WeakSet:
def __repr__(self):
return repr(self.data)
+
+ __class_getitem__ = classmethod(GenericAlias)
diff --git a/x64/Lib/aifc.py b/x64/Lib/aifc.py
index 1916e7e..ed5da7d 100644
--- a/x64/Lib/aifc.py
+++ b/x64/Lib/aifc.py
@@ -138,7 +138,7 @@ import struct
import builtins
import warnings
-__all__ = ["Error", "open", "openfp"]
+__all__ = ["Error", "open"]
class Error(Exception):
pass
@@ -920,10 +920,6 @@ def open(f, mode=None):
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
-def openfp(f, mode=None):
- warnings.warn("aifc.openfp is deprecated since Python 3.7. "
- "Use aifc.open instead.", DeprecationWarning, stacklevel=2)
- return open(f, mode=mode)
if __name__ == '__main__':
import sys
diff --git a/x64/Lib/antigravity.py b/x64/Lib/antigravity.py
index c6f174c..6dc5207 100644
--- a/x64/Lib/antigravity.py
+++ b/x64/Lib/antigravity.py
@@ -12,6 +12,6 @@ def geohash(latitude, longitude, datedow):
'''
# https://xkcd.com/426/
- h = hashlib.md5(datedow).hexdigest()
+ h = hashlib.md5(datedow, usedforsecurity=False).hexdigest()
p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
diff --git a/x64/Lib/argparse.py b/x64/Lib/argparse.py
index 2dad5f1..2fb1da5 100644
--- a/x64/Lib/argparse.py
+++ b/x64/Lib/argparse.py
@@ -67,6 +67,7 @@ __all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
+ 'BooleanOptionalAction',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
@@ -86,7 +87,6 @@ __all__ = [
import os as _os
import re as _re
-import shutil as _shutil
import sys as _sys
from gettext import gettext as _, ngettext
@@ -129,7 +129,7 @@ class _AttributeHolder(object):
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
- return sorted(self.__dict__.items())
+ return list(self.__dict__.items())
def _get_args(self):
return []
@@ -166,7 +166,8 @@ class HelpFormatter(object):
# default setting for width
if width is None:
- width = _shutil.get_terminal_size().columns
+ import shutil
+ width = shutil.get_terminal_size().columns
width -= 2
self._prog = prog
@@ -263,7 +264,7 @@ class HelpFormatter(object):
invocations.append(get_invocation(subaction))
# update the maximum item length
- invocation_length = max([len(s) for s in invocations])
+ invocation_length = max(map(len, invocations))
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
@@ -454,7 +455,7 @@ class HelpFormatter(object):
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
- part = '%s' % option_string
+ part = action.format_usage()
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
@@ -590,7 +591,11 @@ class HelpFormatter(object):
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
- result = '[%s [%s ...]]' % get_metavar(2)
+ metavar = get_metavar(1)
+ if len(metavar) == 2:
+ result = '[%s [%s ...]]' % metavar
+ else:
+ result = '[%s ...]' % metavar
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
@@ -842,9 +847,52 @@ class Action(_AttributeHolder):
]
return [(name, getattr(self, name)) for name in names]
+ def format_usage(self):
+ return self.option_strings[0]
+
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
+class BooleanOptionalAction(Action):
+ def __init__(self,
+ option_strings,
+ dest,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None):
+
+ _option_strings = []
+ for option_string in option_strings:
+ _option_strings.append(option_string)
+
+ if option_string.startswith('--'):
+ option_string = '--no-' + option_string[2:]
+ _option_strings.append(option_string)
+
+ if help is not None and default is not None:
+ help += f" (default: {default})"
+
+ super().__init__(
+ option_strings=_option_strings,
+ dest=dest,
+ nargs=0,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ if option_string in self.option_strings:
+ setattr(namespace, self.dest, not option_string.startswith('--no-'))
+
+ def format_usage(self):
+ return ' | '.join(self.option_strings)
+
class _StoreAction(Action):
@@ -1490,10 +1538,8 @@ class _ActionsContainer(object):
# strings starting with two prefix characters are long options
option_strings.append(option_string)
- if option_string[0] in self.prefix_chars:
- if len(option_string) > 1:
- if option_string[1] in self.prefix_chars:
- long_option_strings.append(option_string)
+ if len(option_string) > 1 and option_string[1] in self.prefix_chars:
+ long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
@@ -1633,6 +1679,8 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
- allow_abbrev -- Allow long options to be abbreviated unambiguously
+ - exit_on_error -- Determines whether or not ArgumentParser exits with
+ error info when an error occurs
"""
def __init__(self,
@@ -1647,7 +1695,8 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
argument_default=None,
conflict_handler='error',
add_help=True,
- allow_abbrev=True):
+ allow_abbrev=True,
+ exit_on_error=True):
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
@@ -1666,6 +1715,7 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
self.allow_abbrev = allow_abbrev
+ self.exit_on_error = exit_on_error
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
@@ -1796,15 +1846,19 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
- try:
+ if self.exit_on_error:
+ try:
+ namespace, args = self._parse_known_args(args, namespace)
+ except ArgumentError:
+ err = _sys.exc_info()[1]
+ self.error(str(err))
+ else:
namespace, args = self._parse_known_args(args, namespace)
- if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
- args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
- delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
- return namespace, args
- except ArgumentError:
- err = _sys.exc_info()[1]
- self.error(str(err))
+
+ if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
+ args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
+ delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
+ return namespace, args
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
diff --git a/x64/Lib/ast.py b/x64/Lib/ast.py
index b45f1e4..ecd4895 100644
--- a/x64/Lib/ast.py
+++ b/x64/Lib/ast.py
@@ -24,7 +24,10 @@
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
+import sys
from _ast import *
+from contextlib import contextmanager, nullcontext
+from enum import IntEnum, auto
def parse(source, filename='<unknown>', mode='exec', *,
@@ -59,11 +62,12 @@ def literal_eval(node_or_string):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
+ def _raise_malformed_node(node):
+ raise ValueError(f'malformed node or string: {node!r}')
def _convert_num(node):
- if isinstance(node, Constant):
- if type(node.value) in (int, float, complex):
- return node.value
- raise ValueError('malformed node or string: ' + repr(node))
+ if not isinstance(node, Constant) or type(node.value) not in (int, float, complex):
+ _raise_malformed_node(node)
+ return node.value
def _convert_signed_num(node):
if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)):
operand = _convert_num(node.operand)
@@ -81,7 +85,12 @@ def literal_eval(node_or_string):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
+ elif (isinstance(node, Call) and isinstance(node.func, Name) and
+ node.func.id == 'set' and node.args == node.keywords == []):
+ return set()
elif isinstance(node, Dict):
+ if len(node.keys) != len(node.values):
+ _raise_malformed_node(node)
return dict(zip(map(_convert, node.keys),
map(_convert, node.values)))
elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)):
@@ -96,7 +105,7 @@ def literal_eval(node_or_string):
return _convert(node_or_string)
-def dump(node, annotate_fields=True, include_attributes=False):
+def dump(node, annotate_fields=True, include_attributes=False, *, indent=None):
"""
Return a formatted dump of the tree in node. This is mainly useful for
debugging purposes. If annotate_fields is true (by default),
@@ -104,35 +113,63 @@ def dump(node, annotate_fields=True, include_attributes=False):
If annotate_fields is false, the result string will be more compact by
omitting unambiguous field names. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
- include_attributes can be set to true.
+ include_attributes can be set to true. If indent is a non-negative
+ integer or string, then the tree will be pretty-printed with that indent
+ level. None (the default) selects the single line representation.
"""
- def _format(node):
+ def _format(node, level=0):
+ if indent is not None:
+ level += 1
+ prefix = '\n' + indent * level
+ sep = ',\n' + indent * level
+ else:
+ prefix = ''
+ sep = ', '
if isinstance(node, AST):
+ cls = type(node)
args = []
+ allsimple = True
keywords = annotate_fields
- for field in node._fields:
+ for name in node._fields:
try:
- value = getattr(node, field)
+ value = getattr(node, name)
except AttributeError:
keywords = True
+ continue
+ if value is None and getattr(cls, name, ...) is None:
+ keywords = True
+ continue
+ value, simple = _format(value, level)
+ allsimple = allsimple and simple
+ if keywords:
+ args.append('%s=%s' % (name, value))
else:
- if keywords:
- args.append('%s=%s' % (field, _format(value)))
- else:
- args.append(_format(value))
+ args.append(value)
if include_attributes and node._attributes:
- for a in node._attributes:
+ for name in node._attributes:
try:
- args.append('%s=%s' % (a, _format(getattr(node, a))))
+ value = getattr(node, name)
except AttributeError:
- pass
- return '%s(%s)' % (node.__class__.__name__, ', '.join(args))
+ continue
+ if value is None and getattr(cls, name, ...) is None:
+ continue
+ value, simple = _format(value, level)
+ allsimple = allsimple and simple
+ args.append('%s=%s' % (name, value))
+ if allsimple and len(args) <= 3:
+ return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args
+ return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False
elif isinstance(node, list):
- return '[%s]' % ', '.join(_format(x) for x in node)
- return repr(node)
+ if not node:
+ return '[]', True
+ return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False
+ return repr(node), True
+
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
- return _format(node)
+ if indent is not None and not isinstance(indent, str):
+ indent = ' ' * indent
+ return _format(node)[0]
def copy_location(new_node, old_node):
@@ -141,9 +178,14 @@ def copy_location(new_node, old_node):
attributes) from *old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset', 'end_lineno', 'end_col_offset':
- if attr in old_node._attributes and attr in new_node._attributes \
- and hasattr(old_node, attr):
- setattr(new_node, attr, getattr(old_node, attr))
+ if attr in old_node._attributes and attr in new_node._attributes:
+ value = getattr(old_node, attr, None)
+ # end_lineno and end_col_offset are optional attributes, and they
+ # should be copied whether the value is None or not.
+ if value is not None or (
+ hasattr(old_node, attr) and attr.startswith("end_")
+ ):
+ setattr(new_node, attr, value)
return new_node
@@ -162,7 +204,7 @@ def fix_missing_locations(node):
else:
lineno = node.lineno
if 'end_lineno' in node._attributes:
- if not hasattr(node, 'end_lineno'):
+ if getattr(node, 'end_lineno', None) is None:
node.end_lineno = end_lineno
else:
end_lineno = node.end_lineno
@@ -172,7 +214,7 @@ def fix_missing_locations(node):
else:
col_offset = node.col_offset
if 'end_col_offset' in node._attributes:
- if not hasattr(node, 'end_col_offset'):
+ if getattr(node, 'end_col_offset', None) is None:
node.end_col_offset = end_col_offset
else:
end_col_offset = node.end_col_offset
@@ -191,8 +233,11 @@ def increment_lineno(node, n=1):
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
- if 'end_lineno' in child._attributes:
- child.end_lineno = getattr(child, 'end_lineno', 0) + n
+ if (
+ "end_lineno" in child._attributes
+ and (end_lineno := getattr(child, "end_lineno", 0)) is not None
+ ):
+ child.end_lineno = end_lineno + n
return node
@@ -274,7 +319,7 @@ def _splitlines_no_ff(source):
def _pad_whitespace(source):
- """Replace all chars except '\f\t' in a line with spaces."""
+ r"""Replace all chars except '\f\t' in a line with spaces."""
result = ''
for c in source:
if c in '\f\t':
@@ -294,6 +339,8 @@ def get_source_segment(source, node, *, padded=False):
be padded with spaces to match its original position.
"""
try:
+ if node.end_lineno is None or node.end_col_offset is None:
+ return None
lineno = node.lineno - 1
end_lineno = node.end_lineno - 1
col_offset = node.col_offset
@@ -386,7 +433,7 @@ class NodeVisitor(object):
else:
import warnings
warnings.warn(f"{method} is deprecated; add visit_Constant",
- PendingDeprecationWarning, 2)
+ DeprecationWarning, 2)
return visitor(node)
return self.generic_visit(node)
@@ -408,11 +455,11 @@ class NodeTransformer(NodeVisitor):
class RewriteName(NodeTransformer):
def visit_Name(self, node):
- return copy_location(Subscript(
+ return Subscript(
value=Name(id='data', ctx=Load()),
- slice=Index(value=Str(s=node.id)),
+ slice=Constant(value=node.id),
ctx=node.ctx
- ), node)
+ )
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
@@ -450,20 +497,26 @@ class NodeTransformer(NodeVisitor):
return node
-# The following code is for backward compatibility.
-# It will be removed in future.
+# If the ast module is loaded more than once, only add deprecated methods once
+if not hasattr(Constant, 'n'):
+ # The following code is for backward compatibility.
+ # It will be removed in future.
-def _getter(self):
- return self.value
+ def _getter(self):
+ """Deprecated. Use value instead."""
+ return self.value
-def _setter(self, value):
- self.value = value
+ def _setter(self, value):
+ self.value = value
-Constant.n = property(_getter, _setter)
-Constant.s = property(_getter, _setter)
+ Constant.n = property(_getter, _setter)
+ Constant.s = property(_getter, _setter)
class _ABC(type):
+ def __init__(cls, *args):
+ cls.__doc__ = """Deprecated AST node class. Use ast.Constant instead"""
+
def __instancecheck__(cls, inst):
if not isinstance(inst, Constant):
return False
@@ -480,6 +533,13 @@ class _ABC(type):
return type.__instancecheck__(cls, inst)
def _new(cls, *args, **kwargs):
+ for key in kwargs:
+ if key not in cls._fields:
+ # arbitrary keyword arguments are accepted
+ continue
+ pos = cls._fields.index(key)
+ if pos < len(args):
+ raise TypeError(f"{cls.__name__} got multiple values for argument {key!r}")
if cls in _const_types:
return Constant(*args, **kwargs)
return Constant.__new__(cls, *args, **kwargs)
@@ -517,6 +577,7 @@ _const_types = {
_const_types_not = {
Num: (bool,),
}
+
_const_node_type_names = {
bool: 'NameConstant', # should be before int
type(None): 'NameConstant',
@@ -527,3 +588,1005 @@ _const_node_type_names = {
bytes: 'Bytes',
type(...): 'Ellipsis',
}
+
+class slice(AST):
+ """Deprecated AST node class."""
+
+class Index(slice):
+ """Deprecated AST node class. Use the index value directly instead."""
+ def __new__(cls, value, **kwargs):
+ return value
+
+class ExtSlice(slice):
+ """Deprecated AST node class. Use ast.Tuple instead."""
+ def __new__(cls, dims=(), **kwargs):
+ return Tuple(list(dims), Load(), **kwargs)
+
+# If the ast module is loaded more than once, only add deprecated methods once
+if not hasattr(Tuple, 'dims'):
+ # The following code is for backward compatibility.
+ # It will be removed in future.
+
+ def _dims_getter(self):
+ """Deprecated. Use elts instead."""
+ return self.elts
+
+ def _dims_setter(self, value):
+ self.elts = value
+
+ Tuple.dims = property(_dims_getter, _dims_setter)
+
+class Suite(mod):
+ """Deprecated AST node class. Unused in Python 3."""
+
+class AugLoad(expr_context):
+ """Deprecated AST node class. Unused in Python 3."""
+
+class AugStore(expr_context):
+ """Deprecated AST node class. Unused in Python 3."""
+
+class Param(expr_context):
+ """Deprecated AST node class. Unused in Python 3."""
+
+
+# Large float and imaginary literals get turned into infinities in the AST.
+# We unparse those infinities to INFSTR.
+_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
+
+class _Precedence(IntEnum):
+ """Precedence table that originated from python grammar."""
+
+ TUPLE = auto()
+ YIELD = auto() # 'yield', 'yield from'
+ TEST = auto() # 'if'-'else', 'lambda'
+ OR = auto() # 'or'
+ AND = auto() # 'and'
+ NOT = auto() # 'not'
+ CMP = auto() # '<', '>', '==', '>=', '<=', '!=',
+ # 'in', 'not in', 'is', 'is not'
+ EXPR = auto()
+ BOR = EXPR # '|'
+ BXOR = auto() # '^'
+ BAND = auto() # '&'
+ SHIFT = auto() # '<<', '>>'
+ ARITH = auto() # '+', '-'
+ TERM = auto() # '*', '@', '/', '%', '//'
+ FACTOR = auto() # unary '+', '-', '~'
+ POWER = auto() # '**'
+ AWAIT = auto() # 'await'
+ ATOM = auto()
+
+ def next(self):
+ try:
+ return self.__class__(self + 1)
+ except ValueError:
+ return self
+
+
+_SINGLE_QUOTES = ("'", '"')
+_MULTI_QUOTES = ('"""', "'''")
+_ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES)
+
+class _Unparser(NodeVisitor):
+ """Methods in this class recursively traverse an AST and
+ output source code for the abstract syntax; original formatting
+ is disregarded."""
+
+ def __init__(self, *, _avoid_backslashes=False):
+ self._source = []
+ self._buffer = []
+ self._precedences = {}
+ self._type_ignores = {}
+ self._indent = 0
+ self._avoid_backslashes = _avoid_backslashes
+
+ def interleave(self, inter, f, seq):
+ """Call f on each item in seq, calling inter() in between."""
+ seq = iter(seq)
+ try:
+ f(next(seq))
+ except StopIteration:
+ pass
+ else:
+ for x in seq:
+ inter()
+ f(x)
+
+ def items_view(self, traverser, items):
+ """Traverse and separate the given *items* with a comma and append it to
+ the buffer. If *items* is a single item sequence, a trailing comma
+ will be added."""
+ if len(items) == 1:
+ traverser(items[0])
+ self.write(",")
+ else:
+ self.interleave(lambda: self.write(", "), traverser, items)
+
+ def maybe_newline(self):
+ """Adds a newline if it isn't the start of generated source"""
+ if self._source:
+ self.write("\n")
+
+ def fill(self, text=""):
+ """Indent a piece of text and append it, according to the current
+ indentation level"""
+ self.maybe_newline()
+ self.write(" " * self._indent + text)
+
+ def write(self, text):
+ """Append a piece of text"""
+ self._source.append(text)
+
+ def buffer_writer(self, text):
+ self._buffer.append(text)
+
+ @property
+ def buffer(self):
+ value = "".join(self._buffer)
+ self._buffer.clear()
+ return value
+
+ @contextmanager
+ def block(self, *, extra = None):
+ """A context manager for preparing the source for blocks. It adds
+ the character':', increases the indentation on enter and decreases
+ the indentation on exit. If *extra* is given, it will be directly
+ appended after the colon character.
+ """
+ self.write(":")
+ if extra:
+ self.write(extra)
+ self._indent += 1
+ yield
+ self._indent -= 1
+
+ @contextmanager
+ def delimit(self, start, end):
+ """A context manager for preparing the source for expressions. It adds
+ *start* to the buffer and enters, after exit it adds *end*."""
+
+ self.write(start)
+ yield
+ self.write(end)
+
+ def delimit_if(self, start, end, condition):
+ if condition:
+ return self.delimit(start, end)
+ else:
+ return nullcontext()
+
+ def require_parens(self, precedence, node):
+ """Shortcut to adding precedence related parens"""
+ return self.delimit_if("(", ")", self.get_precedence(node) > precedence)
+
+ def get_precedence(self, node):
+ return self._precedences.get(node, _Precedence.TEST)
+
+ def set_precedence(self, precedence, *nodes):
+ for node in nodes:
+ self._precedences[node] = precedence
+
+ def get_raw_docstring(self, node):
+ """If a docstring node is found in the body of the *node* parameter,
+ return that docstring node, None otherwise.
+
+ Logic mirrored from ``_PyAST_GetDocString``."""
+ if not isinstance(
+ node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)
+ ) or len(node.body) < 1:
+ return None
+ node = node.body[0]
+ if not isinstance(node, Expr):
+ return None
+ node = node.value
+ if isinstance(node, Constant) and isinstance(node.value, str):
+ return node
+
+ def get_type_comment(self, node):
+ comment = self._type_ignores.get(node.lineno) or node.type_comment
+ if comment is not None:
+ return f" # type: {comment}"
+
+ def traverse(self, node):
+ if isinstance(node, list):
+ for item in node:
+ self.traverse(item)
+ else:
+ super().visit(node)
+
+ def visit(self, node):
+ """Outputs a source code string that, if converted back to an ast
+ (using ast.parse) will generate an AST equivalent to *node*"""
+ self._source = []
+ self.traverse(node)
+ return "".join(self._source)
+
+ def _write_docstring_and_traverse_body(self, node):
+ if (docstring := self.get_raw_docstring(node)):
+ self._write_docstring(docstring)
+ self.traverse(node.body[1:])
+ else:
+ self.traverse(node.body)
+
+ def visit_Module(self, node):
+ self._type_ignores = {
+ ignore.lineno: f"ignore{ignore.tag}"
+ for ignore in node.type_ignores
+ }
+ self._write_docstring_and_traverse_body(node)
+ self._type_ignores.clear()
+
+ def visit_FunctionType(self, node):
+ with self.delimit("(", ")"):
+ self.interleave(
+ lambda: self.write(", "), self.traverse, node.argtypes
+ )
+
+ self.write(" -> ")
+ self.traverse(node.returns)
+
+ def visit_Expr(self, node):
+ self.fill()
+ self.set_precedence(_Precedence.YIELD, node.value)
+ self.traverse(node.value)
+
+ def visit_NamedExpr(self, node):
+ with self.require_parens(_Precedence.TUPLE, node):
+ self.set_precedence(_Precedence.ATOM, node.target, node.value)
+ self.traverse(node.target)
+ self.write(" := ")
+ self.traverse(node.value)
+
+ def visit_Import(self, node):
+ self.fill("import ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.names)
+
+ def visit_ImportFrom(self, node):
+ self.fill("from ")
+ self.write("." * node.level)
+ if node.module:
+ self.write(node.module)
+ self.write(" import ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.names)
+
+ def visit_Assign(self, node):
+ self.fill()
+ for target in node.targets:
+ self.traverse(target)
+ self.write(" = ")
+ self.traverse(node.value)
+ if type_comment := self.get_type_comment(node):
+ self.write(type_comment)
+
+ def visit_AugAssign(self, node):
+ self.fill()
+ self.traverse(node.target)
+ self.write(" " + self.binop[node.op.__class__.__name__] + "= ")
+ self.traverse(node.value)
+
+ def visit_AnnAssign(self, node):
+ self.fill()
+ with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)):
+ self.traverse(node.target)
+ self.write(": ")
+ self.traverse(node.annotation)
+ if node.value:
+ self.write(" = ")
+ self.traverse(node.value)
+
+ def visit_Return(self, node):
+ self.fill("return")
+ if node.value:
+ self.write(" ")
+ self.traverse(node.value)
+
+ def visit_Pass(self, node):
+ self.fill("pass")
+
+ def visit_Break(self, node):
+ self.fill("break")
+
+ def visit_Continue(self, node):
+ self.fill("continue")
+
+ def visit_Delete(self, node):
+ self.fill("del ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.targets)
+
+ def visit_Assert(self, node):
+ self.fill("assert ")
+ self.traverse(node.test)
+ if node.msg:
+ self.write(", ")
+ self.traverse(node.msg)
+
+ def visit_Global(self, node):
+ self.fill("global ")
+ self.interleave(lambda: self.write(", "), self.write, node.names)
+
+ def visit_Nonlocal(self, node):
+ self.fill("nonlocal ")
+ self.interleave(lambda: self.write(", "), self.write, node.names)
+
+ def visit_Await(self, node):
+ with self.require_parens(_Precedence.AWAIT, node):
+ self.write("await")
+ if node.value:
+ self.write(" ")
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+
+ def visit_Yield(self, node):
+ with self.require_parens(_Precedence.YIELD, node):
+ self.write("yield")
+ if node.value:
+ self.write(" ")
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+
+ def visit_YieldFrom(self, node):
+ with self.require_parens(_Precedence.YIELD, node):
+ self.write("yield from ")
+ if not node.value:
+ raise ValueError("Node can't be used without a value attribute.")
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+
+ def visit_Raise(self, node):
+ self.fill("raise")
+ if not node.exc:
+ if node.cause:
+ raise ValueError(f"Node can't use cause without an exception.")
+ return
+ self.write(" ")
+ self.traverse(node.exc)
+ if node.cause:
+ self.write(" from ")
+ self.traverse(node.cause)
+
+ def visit_Try(self, node):
+ self.fill("try")
+ with self.block():
+ self.traverse(node.body)
+ for ex in node.handlers:
+ self.traverse(ex)
+ if node.orelse:
+ self.fill("else")
+ with self.block():
+ self.traverse(node.orelse)
+ if node.finalbody:
+ self.fill("finally")
+ with self.block():
+ self.traverse(node.finalbody)
+
+ def visit_ExceptHandler(self, node):
+ self.fill("except")
+ if node.type:
+ self.write(" ")
+ self.traverse(node.type)
+ if node.name:
+ self.write(" as ")
+ self.write(node.name)
+ with self.block():
+ self.traverse(node.body)
+
+ def visit_ClassDef(self, node):
+ self.maybe_newline()
+ for deco in node.decorator_list:
+ self.fill("@")
+ self.traverse(deco)
+ self.fill("class " + node.name)
+ with self.delimit_if("(", ")", condition = node.bases or node.keywords):
+ comma = False
+ for e in node.bases:
+ if comma:
+ self.write(", ")
+ else:
+ comma = True
+ self.traverse(e)
+ for e in node.keywords:
+ if comma:
+ self.write(", ")
+ else:
+ comma = True
+ self.traverse(e)
+
+ with self.block():
+ self._write_docstring_and_traverse_body(node)
+
+ def visit_FunctionDef(self, node):
+ self._function_helper(node, "def")
+
+ def visit_AsyncFunctionDef(self, node):
+ self._function_helper(node, "async def")
+
+ def _function_helper(self, node, fill_suffix):
+ self.maybe_newline()
+ for deco in node.decorator_list:
+ self.fill("@")
+ self.traverse(deco)
+ def_str = fill_suffix + " " + node.name
+ self.fill(def_str)
+ with self.delimit("(", ")"):
+ self.traverse(node.args)
+ if node.returns:
+ self.write(" -> ")
+ self.traverse(node.returns)
+ with self.block(extra=self.get_type_comment(node)):
+ self._write_docstring_and_traverse_body(node)
+
+ def visit_For(self, node):
+ self._for_helper("for ", node)
+
+ def visit_AsyncFor(self, node):
+ self._for_helper("async for ", node)
+
+ def _for_helper(self, fill, node):
+ self.fill(fill)
+ self.traverse(node.target)
+ self.write(" in ")
+ self.traverse(node.iter)
+ with self.block(extra=self.get_type_comment(node)):
+ self.traverse(node.body)
+ if node.orelse:
+ self.fill("else")
+ with self.block():
+ self.traverse(node.orelse)
+
+ def visit_If(self, node):
+ self.fill("if ")
+ self.traverse(node.test)
+ with self.block():
+ self.traverse(node.body)
+ # collapse nested ifs into equivalent elifs.
+ while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If):
+ node = node.orelse[0]
+ self.fill("elif ")
+ self.traverse(node.test)
+ with self.block():
+ self.traverse(node.body)
+ # final else
+ if node.orelse:
+ self.fill("else")
+ with self.block():
+ self.traverse(node.orelse)
+
+ def visit_While(self, node):
+ self.fill("while ")
+ self.traverse(node.test)
+ with self.block():
+ self.traverse(node.body)
+ if node.orelse:
+ self.fill("else")
+ with self.block():
+ self.traverse(node.orelse)
+
+ def visit_With(self, node):
+ self.fill("with ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.items)
+ with self.block(extra=self.get_type_comment(node)):
+ self.traverse(node.body)
+
+ def visit_AsyncWith(self, node):
+ self.fill("async with ")
+ self.interleave(lambda: self.write(", "), self.traverse, node.items)
+ with self.block(extra=self.get_type_comment(node)):
+ self.traverse(node.body)
+
+ def _str_literal_helper(
+ self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False
+ ):
+ """Helper for writing string literals, minimizing escapes.
+ Returns the tuple (string literal to write, possible quote types).
+ """
+ def escape_char(c):
+ # \n and \t are non-printable, but we only escape them if
+ # escape_special_whitespace is True
+ if not escape_special_whitespace and c in "\n\t":
+ return c
+ # Always escape backslashes and other non-printable characters
+ if c == "\\" or not c.isprintable():
+ return c.encode("unicode_escape").decode("ascii")
+ return c
+
+ escaped_string = "".join(map(escape_char, string))
+ possible_quotes = quote_types
+ if "\n" in escaped_string:
+ possible_quotes = [q for q in possible_quotes if q in _MULTI_QUOTES]
+ possible_quotes = [q for q in possible_quotes if q not in escaped_string]
+ if not possible_quotes:
+ # If there aren't any possible_quotes, fallback to using repr
+ # on the original string. Try to use a quote from quote_types,
+ # e.g., so that we use triple quotes for docstrings.
+ string = repr(string)
+ quote = next((q for q in quote_types if string[0] in q), string[0])
+ return string[1:-1], [quote]
+ if escaped_string:
+ # Sort so that we prefer '''"''' over """\""""
+ possible_quotes.sort(key=lambda q: q[0] == escaped_string[-1])
+ # If we're using triple quotes and we'd need to escape a final
+ # quote, escape it
+ if possible_quotes[0][0] == escaped_string[-1]:
+ assert len(possible_quotes[0]) == 3
+ escaped_string = escaped_string[:-1] + "\\" + escaped_string[-1]
+ return escaped_string, possible_quotes
+
+ def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES):
+ """Write string literal value with a best effort attempt to avoid backslashes."""
+ string, quote_types = self._str_literal_helper(string, quote_types=quote_types)
+ quote_type = quote_types[0]
+ self.write(f"{quote_type}{string}{quote_type}")
+
+ def visit_JoinedStr(self, node):
+ self.write("f")
+ if self._avoid_backslashes:
+ self._fstring_JoinedStr(node, self.buffer_writer)
+ self._write_str_avoiding_backslashes(self.buffer)
+ return
+
+ # If we don't need to avoid backslashes globally (i.e., we only need
+ # to avoid them inside FormattedValues), it's cosmetically preferred
+ # to use escaped whitespace. That is, it's preferred to use backslashes
+ # for cases like: f"{x}\n". To accomplish this, we keep track of what
+ # in our buffer corresponds to FormattedValues and what corresponds to
+ # Constant parts of the f-string, and allow escapes accordingly.
+ buffer = []
+ for value in node.values:
+ meth = getattr(self, "_fstring_" + type(value).__name__)
+ meth(value, self.buffer_writer)
+ buffer.append((self.buffer, isinstance(value, Constant)))
+ new_buffer = []
+ quote_types = _ALL_QUOTES
+ for value, is_constant in buffer:
+ # Repeatedly narrow down the list of possible quote_types
+ value, quote_types = self._str_literal_helper(
+ value, quote_types=quote_types,
+ escape_special_whitespace=is_constant
+ )
+ new_buffer.append(value)
+ value = "".join(new_buffer)
+ quote_type = quote_types[0]
+ self.write(f"{quote_type}{value}{quote_type}")
+
+ def visit_FormattedValue(self, node):
+ self.write("f")
+ self._fstring_FormattedValue(node, self.buffer_writer)
+ self._write_str_avoiding_backslashes(self.buffer)
+
+ def _fstring_JoinedStr(self, node, write):
+ for value in node.values:
+ meth = getattr(self, "_fstring_" + type(value).__name__)
+ meth(value, write)
+
+ def _fstring_Constant(self, node, write):
+ if not isinstance(node.value, str):
+ raise ValueError("Constants inside JoinedStr should be a string.")
+ value = node.value.replace("{", "{{").replace("}", "}}")
+ write(value)
+
+ def _fstring_FormattedValue(self, node, write):
+ write("{")
+ unparser = type(self)(_avoid_backslashes=True)
+ unparser.set_precedence(_Precedence.TEST.next(), node.value)
+ expr = unparser.visit(node.value)
+ if expr.startswith("{"):
+ write(" ") # Separate pair of opening brackets as "{ {"
+ if "\\" in expr:
+ raise ValueError("Unable to avoid backslash in f-string expression part")
+ write(expr)
+ if node.conversion != -1:
+ conversion = chr(node.conversion)
+ if conversion not in "sra":
+ raise ValueError("Unknown f-string conversion.")
+ write(f"!{conversion}")
+ if node.format_spec:
+ write(":")
+ meth = getattr(self, "_fstring_" + type(node.format_spec).__name__)
+ meth(node.format_spec, write)
+ write("}")
+
+ def visit_Name(self, node):
+ self.write(node.id)
+
+ def _write_docstring(self, node):
+ self.fill()
+ if node.kind == "u":
+ self.write("u")
+ self._write_str_avoiding_backslashes(node.value, quote_types=_MULTI_QUOTES)
+
+ def _write_constant(self, value):
+ if isinstance(value, (float, complex)):
+ # Substitute overflowing decimal literal for AST infinities.
+ self.write(repr(value).replace("inf", _INFSTR))
+ elif self._avoid_backslashes and isinstance(value, str):
+ self._write_str_avoiding_backslashes(value)
+ else:
+ self.write(repr(value))
+
+ def visit_Constant(self, node):
+ value = node.value
+ if isinstance(value, tuple):
+ with self.delimit("(", ")"):
+ self.items_view(self._write_constant, value)
+ elif value is ...:
+ self.write("...")
+ else:
+ if node.kind == "u":
+ self.write("u")
+ self._write_constant(node.value)
+
+ def visit_List(self, node):
+ with self.delimit("[", "]"):
+ self.interleave(lambda: self.write(", "), self.traverse, node.elts)
+
+ def visit_ListComp(self, node):
+ with self.delimit("[", "]"):
+ self.traverse(node.elt)
+ for gen in node.generators:
+ self.traverse(gen)
+
+ def visit_GeneratorExp(self, node):
+ with self.delimit("(", ")"):
+ self.traverse(node.elt)
+ for gen in node.generators:
+ self.traverse(gen)
+
+ def visit_SetComp(self, node):
+ with self.delimit("{", "}"):
+ self.traverse(node.elt)
+ for gen in node.generators:
+ self.traverse(gen)
+
+ def visit_DictComp(self, node):
+ with self.delimit("{", "}"):
+ self.traverse(node.key)
+ self.write(": ")
+ self.traverse(node.value)
+ for gen in node.generators:
+ self.traverse(gen)
+
+ def visit_comprehension(self, node):
+ if node.is_async:
+ self.write(" async for ")
+ else:
+ self.write(" for ")
+ self.set_precedence(_Precedence.TUPLE, node.target)
+ self.traverse(node.target)
+ self.write(" in ")
+ self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs)
+ self.traverse(node.iter)
+ for if_clause in node.ifs:
+ self.write(" if ")
+ self.traverse(if_clause)
+
+ def visit_IfExp(self, node):
+ with self.require_parens(_Precedence.TEST, node):
+ self.set_precedence(_Precedence.TEST.next(), node.body, node.test)
+ self.traverse(node.body)
+ self.write(" if ")
+ self.traverse(node.test)
+ self.write(" else ")
+ self.set_precedence(_Precedence.TEST, node.orelse)
+ self.traverse(node.orelse)
+
+ def visit_Set(self, node):
+ if not node.elts:
+ raise ValueError("Set node should have at least one item")
+ with self.delimit("{", "}"):
+ self.interleave(lambda: self.write(", "), self.traverse, node.elts)
+
+ def visit_Dict(self, node):
+ def write_key_value_pair(k, v):
+ self.traverse(k)
+ self.write(": ")
+ self.traverse(v)
+
+ def write_item(item):
+ k, v = item
+ if k is None:
+ # for dictionary unpacking operator in dicts {**{'y': 2}}
+ # see PEP 448 for details
+ self.write("**")
+ self.set_precedence(_Precedence.EXPR, v)
+ self.traverse(v)
+ else:
+ write_key_value_pair(k, v)
+
+ with self.delimit("{", "}"):
+ self.interleave(
+ lambda: self.write(", "), write_item, zip(node.keys, node.values)
+ )
+
+ def visit_Tuple(self, node):
+ with self.delimit("(", ")"):
+ self.items_view(self.traverse, node.elts)
+
+ unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"}
+ unop_precedence = {
+ "not": _Precedence.NOT,
+ "~": _Precedence.FACTOR,
+ "+": _Precedence.FACTOR,
+ "-": _Precedence.FACTOR,
+ }
+
+ def visit_UnaryOp(self, node):
+ operator = self.unop[node.op.__class__.__name__]
+ operator_precedence = self.unop_precedence[operator]
+ with self.require_parens(operator_precedence, node):
+ self.write(operator)
+ # factor prefixes (+, -, ~) shouldn't be seperated
+ # from the value they belong, (e.g: +1 instead of + 1)
+ if operator_precedence is not _Precedence.FACTOR:
+ self.write(" ")
+ self.set_precedence(operator_precedence, node.operand)
+ self.traverse(node.operand)
+
+ binop = {
+ "Add": "+",
+ "Sub": "-",
+ "Mult": "*",
+ "MatMult": "@",
+ "Div": "/",
+ "Mod": "%",
+ "LShift": "<<",
+ "RShift": ">>",
+ "BitOr": "|",
+ "BitXor": "^",
+ "BitAnd": "&",
+ "FloorDiv": "//",
+ "Pow": "**",
+ }
+
+ binop_precedence = {
+ "+": _Precedence.ARITH,
+ "-": _Precedence.ARITH,
+ "*": _Precedence.TERM,
+ "@": _Precedence.TERM,
+ "/": _Precedence.TERM,
+ "%": _Precedence.TERM,
+ "<<": _Precedence.SHIFT,
+ ">>": _Precedence.SHIFT,
+ "|": _Precedence.BOR,
+ "^": _Precedence.BXOR,
+ "&": _Precedence.BAND,
+ "//": _Precedence.TERM,
+ "**": _Precedence.POWER,
+ }
+
+ binop_rassoc = frozenset(("**",))
+ def visit_BinOp(self, node):
+ operator = self.binop[node.op.__class__.__name__]
+ operator_precedence = self.binop_precedence[operator]
+ with self.require_parens(operator_precedence, node):
+ if operator in self.binop_rassoc:
+ left_precedence = operator_precedence.next()
+ right_precedence = operator_precedence
+ else:
+ left_precedence = operator_precedence
+ right_precedence = operator_precedence.next()
+
+ self.set_precedence(left_precedence, node.left)
+ self.traverse(node.left)
+ self.write(f" {operator} ")
+ self.set_precedence(right_precedence, node.right)
+ self.traverse(node.right)
+
+ cmpops = {
+ "Eq": "==",
+ "NotEq": "!=",
+ "Lt": "<",
+ "LtE": "<=",
+ "Gt": ">",
+ "GtE": ">=",
+ "Is": "is",
+ "IsNot": "is not",
+ "In": "in",
+ "NotIn": "not in",
+ }
+
+ def visit_Compare(self, node):
+ with self.require_parens(_Precedence.CMP, node):
+ self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators)
+ self.traverse(node.left)
+ for o, e in zip(node.ops, node.comparators):
+ self.write(" " + self.cmpops[o.__class__.__name__] + " ")
+ self.traverse(e)
+
+ boolops = {"And": "and", "Or": "or"}
+ boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR}
+
+ def visit_BoolOp(self, node):
+ operator = self.boolops[node.op.__class__.__name__]
+ operator_precedence = self.boolop_precedence[operator]
+
+ def increasing_level_traverse(node):
+ nonlocal operator_precedence
+ operator_precedence = operator_precedence.next()
+ self.set_precedence(operator_precedence, node)
+ self.traverse(node)
+
+ with self.require_parens(operator_precedence, node):
+ s = f" {operator} "
+ self.interleave(lambda: self.write(s), increasing_level_traverse, node.values)
+
+ def visit_Attribute(self, node):
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+ # Special case: 3.__abs__() is a syntax error, so if node.value
+ # is an integer literal then we need to either parenthesize
+ # it or add an extra space to get 3 .__abs__().
+ if isinstance(node.value, Constant) and isinstance(node.value.value, int):
+ self.write(" ")
+ self.write(".")
+ self.write(node.attr)
+
+ def visit_Call(self, node):
+ self.set_precedence(_Precedence.ATOM, node.func)
+ self.traverse(node.func)
+ with self.delimit("(", ")"):
+ comma = False
+ for e in node.args:
+ if comma:
+ self.write(", ")
+ else:
+ comma = True
+ self.traverse(e)
+ for e in node.keywords:
+ if comma:
+ self.write(", ")
+ else:
+ comma = True
+ self.traverse(e)
+
+ def visit_Subscript(self, node):
+ def is_simple_tuple(slice_value):
+ # when unparsing a non-empty tuple, the parantheses can be safely
+ # omitted if there aren't any elements that explicitly requires
+ # parantheses (such as starred expressions).
+ return (
+ isinstance(slice_value, Tuple)
+ and slice_value.elts
+ and not any(isinstance(elt, Starred) for elt in slice_value.elts)
+ )
+
+ self.set_precedence(_Precedence.ATOM, node.value)
+ self.traverse(node.value)
+ with self.delimit("[", "]"):
+ if is_simple_tuple(node.slice):
+ self.items_view(self.traverse, node.slice.elts)
+ else:
+ self.traverse(node.slice)
+
+ def visit_Starred(self, node):
+ self.write("*")
+ self.set_precedence(_Precedence.EXPR, node.value)
+ self.traverse(node.value)
+
+ def visit_Ellipsis(self, node):
+ self.write("...")
+
+ def visit_Slice(self, node):
+ if node.lower:
+ self.traverse(node.lower)
+ self.write(":")
+ if node.upper:
+ self.traverse(node.upper)
+ if node.step:
+ self.write(":")
+ self.traverse(node.step)
+
+ def visit_arg(self, node):
+ self.write(node.arg)
+ if node.annotation:
+ self.write(": ")
+ self.traverse(node.annotation)
+
+ def visit_arguments(self, node):
+ first = True
+ # normal arguments
+ all_args = node.posonlyargs + node.args
+ defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults
+ for index, elements in enumerate(zip(all_args, defaults), 1):
+ a, d = elements
+ if first:
+ first = False
+ else:
+ self.write(", ")
+ self.traverse(a)
+ if d:
+ self.write("=")
+ self.traverse(d)
+ if index == len(node.posonlyargs):
+ self.write(", /")
+
+ # varargs, or bare '*' if no varargs but keyword-only arguments present
+ if node.vararg or node.kwonlyargs:
+ if first:
+ first = False
+ else:
+ self.write(", ")
+ self.write("*")
+ if node.vararg:
+ self.write(node.vararg.arg)
+ if node.vararg.annotation:
+ self.write(": ")
+ self.traverse(node.vararg.annotation)
+
+ # keyword-only arguments
+ if node.kwonlyargs:
+ for a, d in zip(node.kwonlyargs, node.kw_defaults):
+ self.write(", ")
+ self.traverse(a)
+ if d:
+ self.write("=")
+ self.traverse(d)
+
+ # kwargs
+ if node.kwarg:
+ if first:
+ first = False
+ else:
+ self.write(", ")
+ self.write("**" + node.kwarg.arg)
+ if node.kwarg.annotation:
+ self.write(": ")
+ self.traverse(node.kwarg.annotation)
+
+ def visit_keyword(self, node):
+ if node.arg is None:
+ self.write("**")
+ else:
+ self.write(node.arg)
+ self.write("=")
+ self.traverse(node.value)
+
+ def visit_Lambda(self, node):
+ with self.require_parens(_Precedence.TEST, node):
+ self.write("lambda ")
+ self.traverse(node.args)
+ self.write(": ")
+ self.set_precedence(_Precedence.TEST, node.body)
+ self.traverse(node.body)
+
+ def visit_alias(self, node):
+ self.write(node.name)
+ if node.asname:
+ self.write(" as " + node.asname)
+
+ def visit_withitem(self, node):
+ self.traverse(node.context_expr)
+ if node.optional_vars:
+ self.write(" as ")
+ self.traverse(node.optional_vars)
+
+def unparse(ast_obj):
+ unparser = _Unparser()
+ return unparser.visit(ast_obj)
+
+
+def main():
+ import argparse
+
+ parser = argparse.ArgumentParser(prog='python -m ast')
+ parser.add_argument('infile', type=argparse.FileType(mode='rb'), nargs='?',
+ default='-',
+ help='the file to parse; defaults to stdin')
+ parser.add_argument('-m', '--mode', default='exec',
+ choices=('exec', 'single', 'eval', 'func_type'),
+ help='specify what kind of code must be parsed')
+ parser.add_argument('--no-type-comments', default=True, action='store_false',
+ help="don't add information about type comments")
+ parser.add_argument('-a', '--include-attributes', action='store_true',
+ help='include attributes such as line numbers and '
+ 'column offsets')
+ parser.add_argument('-i', '--indent', type=int, default=3,
+ help='indentation of nodes (number of spaces)')
+ args = parser.parse_args()
+
+ with args.infile as infile:
+ source = infile.read()
+ tree = parse(source, args.infile.name, args.mode, type_comments=args.no_type_comments)
+ print(dump(tree, include_attributes=args.include_attributes, indent=args.indent))
+
+if __name__ == '__main__':
+ main()
diff --git a/x64/Lib/asynchat.py b/x64/Lib/asynchat.py
index fc1146a..f4ba361 100644
--- a/x64/Lib/asynchat.py
+++ b/x64/Lib/asynchat.py
@@ -117,7 +117,7 @@ class async_chat(asyncore.dispatcher):
data = self.recv(self.ac_in_buffer_size)
except BlockingIOError:
return
- except OSError as why:
+ except OSError:
self.handle_error()
return
diff --git a/x64/Lib/asyncio/__init__.py b/x64/Lib/asyncio/__init__.py
index 28c2e2c..eb84bfb 100644
--- a/x64/Lib/asyncio/__init__.py
+++ b/x64/Lib/asyncio/__init__.py
@@ -17,6 +17,7 @@ from .queues import *
from .streams import *
from .subprocess import *
from .tasks import *
+from .threads import *
from .transports import *
# Exposed for _asynciomodule.c to implement now deprecated
@@ -35,6 +36,7 @@ __all__ = (base_events.__all__ +
streams.__all__ +
subprocess.__all__ +
tasks.__all__ +
+ threads.__all__ +
transports.__all__)
if sys.platform == 'win32': # pragma: no cover
diff --git a/x64/Lib/asyncio/base_events.py b/x64/Lib/asyncio/base_events.py
index 799013d..b2d446a 100644
--- a/x64/Lib/asyncio/base_events.py
+++ b/x64/Lib/asyncio/base_events.py
@@ -410,6 +410,8 @@ class BaseEventLoop(events.AbstractEventLoop):
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
+ # Set to True when `loop.shutdown_default_executor` is called.
+ self._executor_shutdown_called = False
def __repr__(self):
return (
@@ -507,6 +509,10 @@ class BaseEventLoop(events.AbstractEventLoop):
if self._closed:
raise RuntimeError('Event loop is closed')
+ def _check_default_executor(self):
+ if self._executor_shutdown_called:
+ raise RuntimeError('Executor shutdown has been called')
+
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
@@ -547,6 +553,26 @@ class BaseEventLoop(events.AbstractEventLoop):
'asyncgen': agen
})
+ async def shutdown_default_executor(self):
+ """Schedule the shutdown of the default executor."""
+ self._executor_shutdown_called = True
+ if self._default_executor is None:
+ return
+ future = self.create_future()
+ thread = threading.Thread(target=self._do_shutdown, args=(future,))
+ thread.start()
+ try:
+ await future
+ finally:
+ thread.join()
+
+ def _do_shutdown(self, future):
+ try:
+ self._default_executor.shutdown(wait=True)
+ self.call_soon_threadsafe(future.set_result, None)
+ except Exception as ex:
+ self.call_soon_threadsafe(future.set_exception, ex)
+
def _check_running(self):
if self.is_running():
raise RuntimeError('This event loop is already running')
@@ -640,6 +666,7 @@ class BaseEventLoop(events.AbstractEventLoop):
self._closed = True
self._ready.clear()
self._scheduled.clear()
+ self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
@@ -776,8 +803,12 @@ class BaseEventLoop(events.AbstractEventLoop):
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
+ # Only check when the default executor is being used
+ self._check_default_executor()
if executor is None:
- executor = concurrent.futures.ThreadPoolExecutor()
+ executor = concurrent.futures.ThreadPoolExecutor(
+ thread_name_prefix='asyncio'
+ )
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
diff --git a/x64/Lib/asyncio/base_futures.py b/x64/Lib/asyncio/base_futures.py
index 22f2980..2c01ac9 100644
--- a/x64/Lib/asyncio/base_futures.py
+++ b/x64/Lib/asyncio/base_futures.py
@@ -1,6 +1,7 @@
__all__ = ()
import reprlib
+from _thread import get_ident
from . import format_helpers
@@ -41,6 +42,16 @@ def _format_callbacks(cb):
return f'cb=[{cb}]'
+# bpo-42183: _repr_running is needed for repr protection
+# when a Future or Task result contains itself directly or indirectly.
+# The logic is borrowed from @reprlib.recursive_repr decorator.
+# Unfortunately, the direct decorator usage is impossible because of
+# AttributeError: '_asyncio.Task' object has no attribute '__module__' error.
+#
+# After fixing this thing we can return to the decorator based approach.
+_repr_running = set()
+
+
def _future_repr_info(future):
# (Future) -> str
"""helper function for Future.__repr__"""
@@ -49,9 +60,17 @@ def _future_repr_info(future):
if future._exception is not None:
info.append(f'exception={future._exception!r}')
else:
- # use reprlib to limit the length of the output, especially
- # for very long strings
- result = reprlib.repr(future._result)
+ key = id(future), get_ident()
+ if key in _repr_running:
+ result = '...'
+ else:
+ _repr_running.add(key)
+ try:
+ # use reprlib to limit the length of the output, especially
+ # for very long strings
+ result = reprlib.repr(future._result)
+ finally:
+ _repr_running.discard(key)
info.append(f'result={result}')
if future._callbacks:
info.append(_format_callbacks(future._callbacks))
diff --git a/x64/Lib/asyncio/base_tasks.py b/x64/Lib/asyncio/base_tasks.py
index e2da462..09bb171 100644
--- a/x64/Lib/asyncio/base_tasks.py
+++ b/x64/Lib/asyncio/base_tasks.py
@@ -24,11 +24,18 @@ def _task_repr_info(task):
def _task_get_stack(task, limit):
frames = []
- try:
- # 'async def' coroutines
+ if hasattr(task._coro, 'cr_frame'):
+ # case 1: 'async def' coroutines
f = task._coro.cr_frame
- except AttributeError:
+ elif hasattr(task._coro, 'gi_frame'):
+ # case 2: legacy coroutines
f = task._coro.gi_frame
+ elif hasattr(task._coro, 'ag_frame'):
+ # case 3: async generators
+ f = task._coro.ag_frame
+ else:
+ # case 4: unknown objects
+ f = None
if f is not None:
while f is not None:
if limit is not None:
diff --git a/x64/Lib/asyncio/events.py b/x64/Lib/asyncio/events.py
index ca08663..0dce87b 100644
--- a/x64/Lib/asyncio/events.py
+++ b/x64/Lib/asyncio/events.py
@@ -19,7 +19,6 @@ import sys
import threading
from . import format_helpers
-from . import exceptions
class Handle:
@@ -119,20 +118,24 @@ class TimerHandle(Handle):
return hash(self._when)
def __lt__(self, other):
- return self._when < other._when
+ if isinstance(other, TimerHandle):
+ return self._when < other._when
+ return NotImplemented
def __le__(self, other):
- if self._when < other._when:
- return True
- return self.__eq__(other)
+ if isinstance(other, TimerHandle):
+ return self._when < other._when or self.__eq__(other)
+ return NotImplemented
def __gt__(self, other):
- return self._when > other._when
+ if isinstance(other, TimerHandle):
+ return self._when > other._when
+ return NotImplemented
def __ge__(self, other):
- if self._when > other._when:
- return True
- return self.__eq__(other)
+ if isinstance(other, TimerHandle):
+ return self._when > other._when or self.__eq__(other)
+ return NotImplemented
def __eq__(self, other):
if isinstance(other, TimerHandle):
@@ -142,10 +145,6 @@ class TimerHandle(Handle):
self._cancelled == other._cancelled)
return NotImplemented
- def __ne__(self, other):
- equal = self.__eq__(other)
- return NotImplemented if equal is NotImplemented else not equal
-
def cancel(self):
if not self._cancelled:
self._loop._timer_handle_cancelled(self)
@@ -249,6 +248,10 @@ class AbstractEventLoop:
"""Shutdown all active asynchronous generators."""
raise NotImplementedError
+ async def shutdown_default_executor(self):
+ """Schedule the shutdown of the default executor."""
+ raise NotImplementedError
+
# Methods scheduling callbacks. All these return Handles.
def _timer_handle_cancelled(self, handle):
@@ -280,7 +283,7 @@ class AbstractEventLoop:
def call_soon_threadsafe(self, callback, *args):
raise NotImplementedError
- async def run_in_executor(self, executor, func, *args):
+ def run_in_executor(self, executor, func, *args):
raise NotImplementedError
def set_default_executor(self, executor):
@@ -393,7 +396,7 @@ class AbstractEventLoop:
The return value is a Server object, which can be used to stop
the service.
- path is a str, representing a file systsem path to bind the
+ path is a str, representing a file system path to bind the
server socket to.
sock can optionally be specified in order to use a preexisting
@@ -632,7 +635,7 @@ class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
"""
if (self._local._loop is None and
not self._local._set_called and
- isinstance(threading.current_thread(), threading._MainThread)):
+ threading.current_thread() is threading.main_thread()):
self.set_event_loop(self.new_event_loop())
if self._local._loop is None:
diff --git a/x64/Lib/asyncio/exceptions.py b/x64/Lib/asyncio/exceptions.py
index e03602e..f07e448 100644
--- a/x64/Lib/asyncio/exceptions.py
+++ b/x64/Lib/asyncio/exceptions.py
@@ -34,8 +34,9 @@ class IncompleteReadError(EOFError):
- expected: total number of expected bytes (or None if unknown)
"""
def __init__(self, partial, expected):
+ r_expected = 'undefined' if expected is None else repr(expected)
super().__init__(f'{len(partial)} bytes read on a total of '
- f'{expected!r} expected bytes')
+ f'{r_expected} expected bytes')
self.partial = partial
self.expected = expected
diff --git a/x64/Lib/asyncio/futures.py b/x64/Lib/asyncio/futures.py
index 9afda22..bed4da5 100644
--- a/x64/Lib/asyncio/futures.py
+++ b/x64/Lib/asyncio/futures.py
@@ -51,6 +51,9 @@ class Future:
_exception = None
_loop = None
_source_traceback = None
+ _cancel_message = None
+ # A saved CancelledError for later chaining as an exception context.
+ _cancelled_exc = None
# This field is used for a dual purpose:
# - Its presence is a marker to declare that a class implements
@@ -103,6 +106,9 @@ class Future:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
+ def __class_getitem__(cls, type):
+ return cls
+
@property
def _log_traceback(self):
return self.__log_traceback
@@ -120,7 +126,22 @@ class Future:
raise RuntimeError("Future object is not initialized.")
return loop
- def cancel(self):
+ def _make_cancelled_error(self):
+ """Create the CancelledError to raise if the Future is cancelled.
+
+ This should only be called once when handling a cancellation since
+ it erases the saved context exception value.
+ """
+ if self._cancel_message is None:
+ exc = exceptions.CancelledError()
+ else:
+ exc = exceptions.CancelledError(self._cancel_message)
+ exc.__context__ = self._cancelled_exc
+ # Remove the reference since we don't need this anymore.
+ self._cancelled_exc = None
+ return exc
+
+ def cancel(self, msg=None):
"""Cancel the future and schedule callbacks.
If the future is already done or cancelled, return False. Otherwise,
@@ -131,6 +152,7 @@ class Future:
if self._state != _PENDING:
return False
self._state = _CANCELLED
+ self._cancel_message = msg
self.__schedule_callbacks()
return True
@@ -170,7 +192,8 @@ class Future:
the future is done and has an exception set, this exception is raised.
"""
if self._state == _CANCELLED:
- raise exceptions.CancelledError
+ exc = self._make_cancelled_error()
+ raise exc
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Result is not ready.')
self.__log_traceback = False
@@ -187,7 +210,8 @@ class Future:
InvalidStateError.
"""
if self._state == _CANCELLED:
- raise exceptions.CancelledError
+ exc = self._make_cancelled_error()
+ raise exc
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Exception is not set.')
self.__log_traceback = False
diff --git a/x64/Lib/asyncio/locks.py b/x64/Lib/asyncio/locks.py
index d94daeb..f1ce732 100644
--- a/x64/Lib/asyncio/locks.py
+++ b/x64/Lib/asyncio/locks.py
@@ -3,96 +3,13 @@
__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore')
import collections
-import types
import warnings
from . import events
-from . import futures
from . import exceptions
-from .import coroutines
-
-
-class _ContextManager:
- """Context manager.
-
- This enables the following idiom for acquiring and releasing a
- lock around a block:
-
- with (yield from lock):
- <block>
-
- while failing loudly when accidentally using:
-
- with lock:
- <block>
-
- Deprecated, use 'async with' statement:
- async with lock:
- <block>
- """
-
- def __init__(self, lock):
- self._lock = lock
-
- def __enter__(self):
- # We have no use for the "as ..." clause in the with
- # statement for locks.
- return None
-
- def __exit__(self, *args):
- try:
- self._lock.release()
- finally:
- self._lock = None # Crudely prevent reuse.
class _ContextManagerMixin:
- def __enter__(self):
- raise RuntimeError(
- '"yield from" should be used as context manager expression')
-
- def __exit__(self, *args):
- # This must exist because __enter__ exists, even though that
- # always raises; that's how the with-statement works.
- pass
-
- @types.coroutine
- def __iter__(self):
- # This is not a coroutine. It is meant to enable the idiom:
- #
- # with (yield from lock):
- # <block>
- #
- # as an alternative to:
- #
- # yield from lock.acquire()
- # try:
- # <block>
- # finally:
- # lock.release()
- # Deprecated, use 'async with' statement:
- # async with lock:
- # <block>
- warnings.warn("'with (yield from lock)' is deprecated "
- "use 'async with lock' instead",
- DeprecationWarning, stacklevel=2)
- yield from self.acquire()
- return _ContextManager(self)
-
- # The flag is needed for legacy asyncio.iscoroutine()
- __iter__._is_coroutine = coroutines._is_coroutine
-
- async def __acquire_ctx(self):
- await self.acquire()
- return _ContextManager(self)
-
- def __await__(self):
- warnings.warn("'with await lock' is deprecated "
- "use 'async with lock' instead",
- DeprecationWarning, stacklevel=2)
- # To make "with await lock" work.
- return self.__acquire_ctx().__await__()
-
async def __aenter__(self):
await self.acquire()
# We have no use for the "as ..." clause in the with
diff --git a/x64/Lib/asyncio/proactor_events.py b/x64/Lib/asyncio/proactor_events.py
index 830d8ed..b4cd414 100644
--- a/x64/Lib/asyncio/proactor_events.py
+++ b/x64/Lib/asyncio/proactor_events.py
@@ -711,7 +711,7 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
raise exceptions.SendfileNotAvailableError("not a regular file")
try:
fsize = os.fstat(fileno).st_size
- except OSError as err:
+ except OSError:
raise exceptions.SendfileNotAvailableError("not a regular file")
blocksize = count if count else fsize
if not blocksize:
@@ -766,6 +766,14 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
try:
if f is not None:
f.result() # may raise
+ if self._self_reading_future is not f:
+ # When we scheduled this Future, we assigned it to
+ # _self_reading_future. If it's not there now, something has
+ # tried to cancel the loop while this callback was still in the
+ # queue (see windows_events.ProactorEventLoop.run_forever). In
+ # that case stop here instead of continuing to schedule a new
+ # iteration.
+ return
f = self._proactor.recv(self._ssock, 4096)
except exceptions.CancelledError:
# _close_self_pipe() has been called, stop waiting for data
@@ -783,8 +791,17 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
f.add_done_callback(self._loop_self_reading)
def _write_to_self(self):
+ # This may be called from a different thread, possibly after
+ # _close_self_pipe() has been called or even while it is
+ # running. Guard for self._csock being None or closed. When
+ # a socket is closed, send() raises OSError (with errno set to
+ # EBADF, but let's not rely on the exact error code).
+ csock = self._csock
+ if csock is None:
+ return
+
try:
- self._csock.send(b'\0')
+ csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
diff --git a/x64/Lib/asyncio/queues.py b/x64/Lib/asyncio/queues.py
index 390ae9a..cd3f7c6 100644
--- a/x64/Lib/asyncio/queues.py
+++ b/x64/Lib/asyncio/queues.py
@@ -76,6 +76,9 @@ class Queue:
def __str__(self):
return f'<{type(self).__name__} {self._format()}>'
+ def __class_getitem__(cls, type):
+ return cls
+
def _format(self):
result = f'maxsize={self._maxsize!r}'
if getattr(self, '_queue', None):
diff --git a/x64/Lib/asyncio/runners.py b/x64/Lib/asyncio/runners.py
index 2e37e18..268635d 100644
--- a/x64/Lib/asyncio/runners.py
+++ b/x64/Lib/asyncio/runners.py
@@ -5,7 +5,7 @@ from . import events
from . import tasks
-def run(main, *, debug=False):
+def run(main, *, debug=None):
"""Execute the coroutine and return the result.
This function runs the passed coroutine, taking care of
@@ -39,12 +39,14 @@ def run(main, *, debug=False):
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
- loop.set_debug(debug)
+ if debug is not None:
+ loop.set_debug(debug)
return loop.run_until_complete(main)
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
+ loop.run_until_complete(loop.shutdown_default_executor())
finally:
events.set_event_loop(None)
loop.close()
diff --git a/x64/Lib/asyncio/selector_events.py b/x64/Lib/asyncio/selector_events.py
index a05cbb6..59cb6b1 100644
--- a/x64/Lib/asyncio/selector_events.py
+++ b/x64/Lib/asyncio/selector_events.py
@@ -133,14 +133,16 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
# a socket is closed, send() raises OSError (with errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
- if csock is not None:
- try:
- csock.send(b'\0')
- except OSError:
- if self._debug:
- logger.debug("Fail to write a null byte into the "
- "self-pipe socket",
- exc_info=True)
+ if csock is None:
+ return
+
+ try:
+ csock.send(b'\0')
+ except OSError:
+ if self._debug:
+ logger.debug("Fail to write a null byte into the "
+ "self-pipe socket",
+ exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None, backlog=100,
@@ -266,6 +268,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
(handle, writer))
if reader is not None:
reader.cancel()
+ return handle
def _remove_reader(self, fd):
if self.is_closed():
@@ -302,6 +305,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
(reader, handle))
if writer is not None:
writer.cancel()
+ return handle
def _remove_writer(self, fd):
"""Remove a writer callback."""
@@ -329,7 +333,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
- return self._add_reader(fd, callback, *args)
+ self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
@@ -339,7 +343,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
- return self._add_writer(fd, callback, *args)
+ self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
@@ -362,13 +366,15 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
pass
fut = self.create_future()
fd = sock.fileno()
- self.add_reader(fd, self._sock_recv, fut, sock, n)
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_reader(fd, self._sock_recv, fut, sock, n)
fut.add_done_callback(
- functools.partial(self._sock_read_done, fd))
+ functools.partial(self._sock_read_done, fd, handle=handle))
return await fut
- def _sock_read_done(self, fd, fut):
- self.remove_reader(fd)
+ def _sock_read_done(self, fd, fut, handle=None):
+ if handle is None or not handle.cancelled():
+ self.remove_reader(fd)
def _sock_recv(self, fut, sock, n):
# _sock_recv() can add itself as an I/O callback if the operation can't
@@ -401,9 +407,10 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
pass
fut = self.create_future()
fd = sock.fileno()
- self.add_reader(fd, self._sock_recv_into, fut, sock, buf)
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_reader(fd, self._sock_recv_into, fut, sock, buf)
fut.add_done_callback(
- functools.partial(self._sock_read_done, fd))
+ functools.partial(self._sock_read_done, fd, handle=handle))
return await fut
def _sock_recv_into(self, fut, sock, buf):
@@ -446,11 +453,12 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
fut = self.create_future()
fd = sock.fileno()
- fut.add_done_callback(
- functools.partial(self._sock_write_done, fd))
+ self._ensure_fd_no_transport(fd)
# use a trick with a list in closure to store a mutable state
- self.add_writer(fd, self._sock_sendall, fut, sock,
- memoryview(data), [n])
+ handle = self._add_writer(fd, self._sock_sendall, fut, sock,
+ memoryview(data), [n])
+ fut.add_done_callback(
+ functools.partial(self._sock_write_done, fd, handle=handle))
return await fut
def _sock_sendall(self, fut, sock, view, pos):
@@ -502,9 +510,11 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
# connection runs in background. We have to wait until the socket
# becomes writable to be notified when the connection succeed or
# fails.
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_writer(
+ fd, self._sock_connect_cb, fut, sock, address)
fut.add_done_callback(
- functools.partial(self._sock_write_done, fd))
- self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
+ functools.partial(self._sock_write_done, fd, handle=handle))
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
@@ -512,8 +522,9 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
else:
fut.set_result(None)
- def _sock_write_done(self, fd, fut):
- self.remove_writer(fd)
+ def _sock_write_done(self, fd, fut, handle=None):
+ if handle is None or not handle.cancelled():
+ self.remove_writer(fd)
def _sock_connect_cb(self, fut, sock, address):
if fut.done():
@@ -546,20 +557,19 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
- self._sock_accept(fut, False, sock)
+ self._sock_accept(fut, sock)
return await fut
- def _sock_accept(self, fut, registered, sock):
+ def _sock_accept(self, fut, sock):
fd = sock.fileno()
- if registered:
- self.remove_reader(fd)
- if fut.done():
- return
try:
conn, address = sock.accept()
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
- self.add_reader(fd, self._sock_accept, fut, True, sock)
+ self._ensure_fd_no_transport(fd)
+ handle = self._add_reader(fd, self._sock_accept, fut, sock)
+ fut.add_done_callback(
+ functools.partial(self._sock_read_done, fd, handle=handle))
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
diff --git a/x64/Lib/asyncio/sslproto.py b/x64/Lib/asyncio/sslproto.py
index 3eca6b4..cad25b2 100644
--- a/x64/Lib/asyncio/sslproto.py
+++ b/x64/Lib/asyncio/sslproto.py
@@ -5,7 +5,6 @@ try:
except ImportError: # pragma: no cover
ssl = None
-from . import base_events
from . import constants
from . import protocols
from . import transports
diff --git a/x64/Lib/asyncio/tasks.py b/x64/Lib/asyncio/tasks.py
index 38d9827..f486b67 100644
--- a/x64/Lib/asyncio/tasks.py
+++ b/x64/Lib/asyncio/tasks.py
@@ -113,34 +113,6 @@ class Task(futures._PyFuture): # Inherit Python Task implementation
# status is still pending
_log_destroy_pending = True
- @classmethod
- def current_task(cls, loop=None):
- """Return the currently running task in an event loop or None.
-
- By default the current task for the current event loop is returned.
-
- None is returned when called not in the context of a Task.
- """
- warnings.warn("Task.current_task() is deprecated since Python 3.7, "
- "use asyncio.current_task() instead",
- DeprecationWarning,
- stacklevel=2)
- if loop is None:
- loop = events.get_event_loop()
- return current_task(loop)
-
- @classmethod
- def all_tasks(cls, loop=None):
- """Return a set of all tasks for an event loop.
-
- By default all tasks for the current event loop are returned.
- """
- warnings.warn("Task.all_tasks() is deprecated since Python 3.7, "
- "use asyncio.all_tasks() instead",
- DeprecationWarning,
- stacklevel=2)
- return _all_tasks_compat(loop)
-
def __init__(self, coro, *, loop=None, name=None):
super().__init__(loop=loop)
if self._source_traceback:
@@ -175,6 +147,9 @@ class Task(futures._PyFuture): # Inherit Python Task implementation
self._loop.call_exception_handler(context)
super().__del__()
+ def __class_getitem__(cls, type):
+ return cls
+
def _repr_info(self):
return base_tasks._task_repr_info(self)
@@ -227,7 +202,7 @@ class Task(futures._PyFuture): # Inherit Python Task implementation
"""
return base_tasks._task_print_stack(self, limit, file)
- def cancel(self):
+ def cancel(self, msg=None):
"""Request that this task cancel itself.
This arranges for a CancelledError to be thrown into the
@@ -251,13 +226,14 @@ class Task(futures._PyFuture): # Inherit Python Task implementation
if self.done():
return False
if self._fut_waiter is not None:
- if self._fut_waiter.cancel():
+ if self._fut_waiter.cancel(msg=msg):
# Leave self._fut_waiter; it may be a Task that
# catches and ignores the cancellation so we may have
# to cancel it again later.
return True
# It must be the case that self.__step is already scheduled.
self._must_cancel = True
+ self._cancel_message = msg
return True
def __step(self, exc=None):
@@ -266,7 +242,7 @@ class Task(futures._PyFuture): # Inherit Python Task implementation
f'_step(): already done: {self!r}, {exc!r}')
if self._must_cancel:
if not isinstance(exc, exceptions.CancelledError):
- exc = exceptions.CancelledError()
+ exc = self._make_cancelled_error()
self._must_cancel = False
coro = self._coro
self._fut_waiter = None
@@ -284,10 +260,12 @@ class Task(futures._PyFuture): # Inherit Python Task implementation
if self._must_cancel:
# Task is cancelled right before coro stops.
self._must_cancel = False
- super().cancel()
+ super().cancel(msg=self._cancel_message)
else:
super().set_result(exc.value)
- except exceptions.CancelledError:
+ except exceptions.CancelledError as exc:
+ # Save the original exception so we can chain it later.
+ self._cancelled_exc = exc
super().cancel() # I.e., Future.cancel(self).
except (KeyboardInterrupt, SystemExit) as exc:
super().set_exception(exc)
@@ -316,7 +294,8 @@ class Task(futures._PyFuture): # Inherit Python Task implementation
self.__wakeup, context=self._context)
self._fut_waiter = result
if self._must_cancel:
- if self._fut_waiter.cancel():
+ if self._fut_waiter.cancel(
+ msg=self._cancel_message):
self._must_cancel = False
else:
new_exc = RuntimeError(
@@ -394,7 +373,7 @@ ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the Futures and coroutines given by fs to complete.
- The sequence futures must not be empty.
+ The fs iterable must not be empty.
Coroutines will be wrapped in Tasks.
@@ -421,7 +400,15 @@ async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
- fs = {ensure_future(f, loop=loop) for f in set(fs)}
+ fs = set(fs)
+
+ if any(coroutines.iscoroutine(f) for f in fs):
+ warnings.warn("The explicit passing of coroutine objects to "
+ "asyncio.wait() is deprecated since Python 3.8, and "
+ "scheduled for removal in Python 3.11.",
+ DeprecationWarning, stacklevel=2)
+
+ fs = {ensure_future(f, loop=loop) for f in fs}
return await _wait(fs, timeout, return_when, loop)
@@ -460,8 +447,13 @@ async def wait_for(fut, timeout, *, loop=None):
if fut.done():
return fut.result()
- fut.cancel()
- raise exceptions.TimeoutError()
+ await _cancel_and_wait(fut, loop=loop)
+ try:
+ fut.result()
+ except exceptions.CancelledError as exc:
+ raise exceptions.TimeoutError() from exc
+ else:
+ raise exceptions.TimeoutError()
waiter = loop.create_future()
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
@@ -475,9 +467,12 @@ async def wait_for(fut, timeout, *, loop=None):
try:
await waiter
except exceptions.CancelledError:
- fut.remove_done_callback(cb)
- fut.cancel()
- raise
+ if fut.done():
+ return fut.result()
+ else:
+ fut.remove_done_callback(cb)
+ fut.cancel()
+ raise
if fut.done():
return fut.result()
@@ -487,7 +482,15 @@ async def wait_for(fut, timeout, *, loop=None):
# after wait_for() returns.
# See https://bugs.python.org/issue32751
await _cancel_and_wait(fut, loop=loop)
- raise exceptions.TimeoutError()
+ # In case task cancellation failed with some
+ # exception, we should re-raise it
+ # See https://bugs.python.org/issue40607
+ try:
+ fut.result()
+ except exceptions.CancelledError as exc:
+ raise exceptions.TimeoutError() from exc
+ else:
+ raise exceptions.TimeoutError()
finally:
timeout_handle.cancel()
@@ -572,7 +575,7 @@ def as_completed(fs, *, loop=None, timeout=None):
Note: The futures 'f' are not necessarily members of fs.
"""
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
- raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
+ raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}")
from .queues import Queue # Import here to avoid circular import problem.
done = Queue(loop=loop)
@@ -699,12 +702,12 @@ class _GatheringFuture(futures.Future):
self._children = children
self._cancel_requested = False
- def cancel(self):
+ def cancel(self, msg=None):
if self.done():
return False
ret = False
for child in self._children:
- if child.cancel():
+ if child.cancel(msg=msg):
ret = True
if ret:
# If any child tasks were actually cancelled, we should
@@ -736,6 +739,13 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
the outer Future is *not* cancelled in this case. (This is to
prevent the cancellation of one child to cause other children to
be cancelled.)
+
+ If *return_exceptions* is False, cancelling gather() after it
+ has been marked done won't cancel any submitted awaitables.
+ For instance, gather can be marked done after propagating an
+ exception to the caller, therefore, calling ``gather.cancel()``
+ after catching an exception (raised by one of the awaitables) from
+ gather won't cancel any other awaitables.
"""
if not coros_or_futures:
if loop is None:
@@ -763,7 +773,7 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
# Check if 'fut' is cancelled first, as
# 'fut.exception()' will *raise* a CancelledError
# instead of returning it.
- exc = exceptions.CancelledError()
+ exc = fut._make_cancelled_error()
outer.set_exception(exc)
return
else:
@@ -779,10 +789,15 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
for fut in children:
if fut.cancelled():
- # Check if 'fut' is cancelled first, as
- # 'fut.exception()' will *raise* a CancelledError
- # instead of returning it.
- res = exceptions.CancelledError()
+ # Check if 'fut' is cancelled first, as 'fut.exception()'
+ # will *raise* a CancelledError instead of returning it.
+ # Also, since we're adding the exception return value
+ # to 'results' instead of raising it, don't bother
+ # setting __context__. This also lets us preserve
+ # calling '_make_cancelled_error()' at most once.
+ res = exceptions.CancelledError(
+ '' if fut._cancel_message is None else
+ fut._cancel_message)
else:
res = fut.exception()
if res is None:
@@ -793,7 +808,8 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
# If gather is being cancelled we must propagate the
# cancellation regardless of *return_exceptions* argument.
# See issue 32684.
- outer.set_exception(exceptions.CancelledError())
+ exc = fut._make_cancelled_error()
+ outer.set_exception(exc)
else:
outer.set_result(results)
diff --git a/x64/Lib/asyncio/threads.py b/x64/Lib/asyncio/threads.py
new file mode 100644
index 0000000..34b7513
--- /dev/null
+++ b/x64/Lib/asyncio/threads.py
@@ -0,0 +1,25 @@
+"""High-level support for working with threads in asyncio"""
+
+import functools
+import contextvars
+
+from . import events
+
+
+__all__ = "to_thread",
+
+
+async def to_thread(func, /, *args, **kwargs):
+ """Asynchronously run function *func* in a separate thread.
+
+ Any *args and **kwargs supplied for this function are directly passed
+ to *func*. Also, the current :class:`contextvars.Context` is propogated,
+ allowing context variables from the main thread to be accessed in the
+ separate thread.
+
+ Return a coroutine that can be awaited to get the eventual result of *func*.
+ """
+ loop = events.get_running_loop()
+ ctx = contextvars.copy_context()
+ func_call = functools.partial(ctx.run, func, *args, **kwargs)
+ return await loop.run_in_executor(None, func_call)
diff --git a/x64/Lib/asyncio/transports.py b/x64/Lib/asyncio/transports.py
index 513b1c0..45e155c 100644
--- a/x64/Lib/asyncio/transports.py
+++ b/x64/Lib/asyncio/transports.py
@@ -29,8 +29,8 @@ class BaseTransport:
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
- protocol's connection_lost() method will (eventually) called
- with None as its argument.
+ protocol's connection_lost() method will (eventually) be
+ called with None as its argument.
"""
raise NotImplementedError
diff --git a/x64/Lib/asyncio/unix_events.py b/x64/Lib/asyncio/unix_events.py
index 8c0a574..f34a5b4 100644
--- a/x64/Lib/asyncio/unix_events.py
+++ b/x64/Lib/asyncio/unix_events.py
@@ -29,7 +29,7 @@ from .log import logger
__all__ = (
'SelectorEventLoop',
'AbstractChildWatcher', 'SafeChildWatcher',
- 'FastChildWatcher',
+ 'FastChildWatcher', 'PidfdChildWatcher',
'MultiLoopChildWatcher', 'ThreadedChildWatcher',
'DefaultEventLoopPolicy',
)
@@ -101,7 +101,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
try:
# Register a dummy signal handler to ask Python to write the signal
- # number in the wakup file descriptor. _process_self_data() will
+ # number in the wakeup file descriptor. _process_self_data() will
# read signal numbers from this file descriptor to handle signals.
signal.signal(sig, _sighandler_noop)
@@ -330,7 +330,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
async def _sock_sendfile_native(self, sock, file, offset, count):
try:
os.sendfile
- except AttributeError as exc:
+ except AttributeError:
raise exceptions.SendfileNotAvailableError(
"os.sendfile() is not available")
try:
@@ -339,7 +339,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
raise exceptions.SendfileNotAvailableError("not a regular file")
try:
fsize = os.fstat(fileno).st_size
- except OSError as err:
+ except OSError:
raise exceptions.SendfileNotAvailableError("not a regular file")
blocksize = count if count else fsize
if not blocksize:
@@ -878,6 +878,84 @@ class AbstractChildWatcher:
raise NotImplementedError()
+class PidfdChildWatcher(AbstractChildWatcher):
+ """Child watcher implementation using Linux's pid file descriptors.
+
+ This child watcher polls process file descriptors (pidfds) to await child
+ process termination. In some respects, PidfdChildWatcher is a "Goldilocks"
+ child watcher implementation. It doesn't require signals or threads, doesn't
+ interfere with any processes launched outside the event loop, and scales
+ linearly with the number of subprocesses launched by the event loop. The
+ main disadvantage is that pidfds are specific to Linux, and only work on
+ recent (5.3+) kernels.
+ """
+
+ def __init__(self):
+ self._loop = None
+ self._callbacks = {}
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ pass
+
+ def is_active(self):
+ return self._loop is not None and self._loop.is_running()
+
+ def close(self):
+ self.attach_loop(None)
+
+ def attach_loop(self, loop):
+ if self._loop is not None and loop is None and self._callbacks:
+ warnings.warn(
+ 'A loop is being detached '
+ 'from a child watcher with pending handlers',
+ RuntimeWarning)
+ for pidfd, _, _ in self._callbacks.values():
+ self._loop._remove_reader(pidfd)
+ os.close(pidfd)
+ self._callbacks.clear()
+ self._loop = loop
+
+ def add_child_handler(self, pid, callback, *args):
+ existing = self._callbacks.get(pid)
+ if existing is not None:
+ self._callbacks[pid] = existing[0], callback, args
+ else:
+ pidfd = os.pidfd_open(pid)
+ self._loop._add_reader(pidfd, self._do_wait, pid)
+ self._callbacks[pid] = pidfd, callback, args
+
+ def _do_wait(self, pid):
+ pidfd, callback, args = self._callbacks.pop(pid)
+ self._loop._remove_reader(pidfd)
+ try:
+ _, status = os.waitpid(pid, 0)
+ except ChildProcessError:
+ # The child process is already reaped
+ # (may happen if waitpid() is called elsewhere).
+ returncode = 255
+ logger.warning(
+ "child process pid %d exit status already read: "
+ " will report returncode 255",
+ pid)
+ else:
+ returncode = _compute_returncode(status)
+
+ os.close(pidfd)
+ callback(pid, returncode, *args)
+
+ def remove_child_handler(self, pid):
+ try:
+ pidfd, _, _ = self._callbacks.pop(pid)
+ except KeyError:
+ return False
+ self._loop._remove_reader(pidfd)
+ os.close(pidfd)
+ return True
+
+
def _compute_returncode(status):
if os.WIFSIGNALED(status):
# The child process died because of a signal.
@@ -1346,8 +1424,7 @@ class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
with events._lock:
if self._watcher is None: # pragma: no branch
self._watcher = ThreadedChildWatcher()
- if isinstance(threading.current_thread(),
- threading._MainThread):
+ if threading.current_thread() is threading.main_thread():
self._watcher.attach_loop(self._local._loop)
def set_event_loop(self, loop):
@@ -1361,7 +1438,7 @@ class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
super().set_event_loop(loop)
if (self._watcher is not None and
- isinstance(threading.current_thread(), threading._MainThread)):
+ threading.current_thread() is threading.main_thread()):
self._watcher.attach_loop(loop)
def get_child_watcher(self):
diff --git a/x64/Lib/asyncio/windows_events.py b/x64/Lib/asyncio/windows_events.py
index ac51109..5e7cd79 100644
--- a/x64/Lib/asyncio/windows_events.py
+++ b/x64/Lib/asyncio/windows_events.py
@@ -75,9 +75,9 @@ class _OverlappedFuture(futures.Future):
self._loop.call_exception_handler(context)
self._ov = None
- def cancel(self):
+ def cancel(self, msg=None):
self._cancel_overlapped()
- return super().cancel()
+ return super().cancel(msg=msg)
def set_exception(self, exception):
super().set_exception(exception)
@@ -149,9 +149,9 @@ class _BaseWaitHandleFuture(futures.Future):
self._unregister_wait_cb(None)
- def cancel(self):
+ def cancel(self, msg=None):
self._unregister_wait()
- return super().cancel()
+ return super().cancel(msg=msg)
def set_exception(self, exception):
self._unregister_wait()
@@ -318,8 +318,12 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
if self._self_reading_future is not None:
ov = self._self_reading_future._ov
self._self_reading_future.cancel()
- # self_reading_future was just cancelled so it will never be signalled
- # Unregister it otherwise IocpProactor.close will wait for it forever
+ # self_reading_future was just cancelled so if it hasn't been
+ # finished yet, it never will be (it's possible that it has
+ # already finished and its callback is waiting in the queue,
+ # where it could still happen if the event loop is restarted).
+ # Unregister it otherwise IocpProactor.close will wait for it
+ # forever
if ov is not None:
self._proactor._unregister(ov)
self._self_reading_future = None
@@ -469,7 +473,7 @@ class IocpProactor:
else:
ov.ReadFileInto(conn.fileno(), buf)
except BrokenPipeError:
- return self._result(b'')
+ return self._result(0)
def finish_recv(trans, key, ov):
try:
diff --git a/x64/Lib/asyncore.py b/x64/Lib/asyncore.py
index 0e92be3..ce16f11 100644
--- a/x64/Lib/asyncore.py
+++ b/x64/Lib/asyncore.py
@@ -228,7 +228,7 @@ class dispatcher:
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
- sock.setblocking(0)
+ sock.setblocking(False)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
@@ -280,7 +280,7 @@ class dispatcher:
def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM):
self.family_and_type = family, type
sock = socket.socket(family, type)
- sock.setblocking(0)
+ sock.setblocking(False)
self.set_socket(sock)
def set_socket(self, sock, map=None):
diff --git a/x64/Lib/base64.py b/x64/Lib/base64.py
index 2e70223..a28109f 100644
--- a/x64/Lib/base64.py
+++ b/x64/Lib/base64.py
@@ -531,28 +531,12 @@ def encodebytes(s):
pieces.append(binascii.b2a_base64(chunk))
return b"".join(pieces)
-def encodestring(s):
- """Legacy alias of encodebytes()."""
- import warnings
- warnings.warn("encodestring() is a deprecated alias since 3.1, "
- "use encodebytes()",
- DeprecationWarning, 2)
- return encodebytes(s)
-
def decodebytes(s):
"""Decode a bytestring of base-64 data into a bytes object."""
_input_type_check(s)
return binascii.a2b_base64(s)
-def decodestring(s):
- """Legacy alias of decodebytes()."""
- import warnings
- warnings.warn("decodestring() is a deprecated alias since Python 3.1, "
- "use decodebytes()",
- DeprecationWarning, 2)
- return decodebytes(s)
-
# Usable as a script...
def main():
diff --git a/x64/Lib/bdb.py b/x64/Lib/bdb.py
index 18491da..b18a061 100644
--- a/x64/Lib/bdb.py
+++ b/x64/Lib/bdb.py
@@ -611,26 +611,11 @@ class Bdb:
# This method is more useful to debug a single function call.
- def runcall(*args, **kwds):
+ def runcall(self, func, /, *args, **kwds):
"""Debug a single function call.
Return the result of the function call.
"""
- if len(args) >= 2:
- self, func, *args = args
- elif not args:
- raise TypeError("descriptor 'runcall' of 'Bdb' object "
- "needs an argument")
- elif 'func' in kwds:
- func = kwds.pop('func')
- self, *args = args
- import warnings
- warnings.warn("Passing 'func' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('runcall expected at least 1 positional argument, '
- 'got %d' % (len(args)-1))
-
self.reset()
sys.settrace(self.trace_dispatch)
res = None
@@ -642,7 +627,6 @@ class Bdb:
self.quitting = True
sys.settrace(None)
return res
- runcall.__text_signature__ = '($self, func, /, *args, **kwds)'
def set_trace():
diff --git a/x64/Lib/binhex.py b/x64/Lib/binhex.py
index 56b5f85..ace5217 100644
--- a/x64/Lib/binhex.py
+++ b/x64/Lib/binhex.py
@@ -21,10 +21,16 @@ hexbin(inputfilename, outputfilename)
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
+import binascii
+import contextlib
import io
import os
import struct
-import binascii
+import warnings
+
+warnings.warn('the binhex module is deprecated', DeprecationWarning,
+ stacklevel=2)
+
__all__ = ["binhex","hexbin","Error"]
@@ -76,6 +82,16 @@ class openrsrc:
def close(self):
pass
+
+# DeprecationWarning is already emitted on "import binhex". There is no need
+# to repeat the warning at each call to deprecated binascii functions.
+@contextlib.contextmanager
+def _ignore_deprecation_warning():
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', '', DeprecationWarning)
+ yield
+
+
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
@@ -93,23 +109,25 @@ class _Hqxcoderengine:
self.data = self.data[todo:]
if not data:
return
- self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
+ with _ignore_deprecation_warning():
+ self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata) - self.linelen:
last = first + self.linelen
- self.ofp.write(self.hqxdata[first:last] + b'\n')
+ self.ofp.write(self.hqxdata[first:last] + b'\r')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
- self.ofp.write(self.hqxdata + b':\n')
+ self.ofp.write(self.hqxdata + b':\r')
def close(self):
if self.data:
- self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
+ with _ignore_deprecation_warning():
+ self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
@@ -125,13 +143,15 @@ class _Rlecoderengine:
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
- rledata = binascii.rlecode_hqx(self.data)
+ with _ignore_deprecation_warning():
+ rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = b''
def close(self):
if self.data:
- rledata = binascii.rlecode_hqx(self.data)
+ with _ignore_deprecation_warning():
+ rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
@@ -276,7 +296,8 @@ class _Hqxdecoderengine:
#
while True:
try:
- decdatacur, self.eof = binascii.a2b_hqx(data)
+ with _ignore_deprecation_warning():
+ decdatacur, self.eof = binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
@@ -312,8 +333,9 @@ class _Rledecoderengine:
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4)
if self.ifp.eof:
- self.post_buffer = self.post_buffer + \
- binascii.rledecode_hqx(self.pre_buffer)
+ with _ignore_deprecation_warning():
+ self.post_buffer = self.post_buffer + \
+ binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = b''
return
@@ -340,8 +362,9 @@ class _Rledecoderengine:
else:
mark = mark - 1
- self.post_buffer = self.post_buffer + \
- binascii.rledecode_hqx(self.pre_buffer[:mark])
+ with _ignore_deprecation_warning():
+ self.post_buffer = self.post_buffer + \
+ binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
diff --git a/x64/Lib/bisect.py b/x64/Lib/bisect.py
index 9786fc9..8f3f6a3 100644
--- a/x64/Lib/bisect.py
+++ b/x64/Lib/bisect.py
@@ -29,6 +29,7 @@ def bisect_right(a, x, lo=0, hi=None):
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
+ # Use __lt__ to match the logic in list.sort() and in heapq
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
@@ -63,6 +64,7 @@ def bisect_left(a, x, lo=0, hi=None):
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
+ # Use __lt__ to match the logic in list.sort() and in heapq
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
diff --git a/x64/Lib/bz2.py b/x64/Lib/bz2.py
index 21e8ff4..ce07ebe 100644
--- a/x64/Lib/bz2.py
+++ b/x64/Lib/bz2.py
@@ -12,7 +12,6 @@ __author__ = "Nadeem Vawda <nadeem.vawda@gmail.com>"
from builtins import open as _builtin_open
import io
import os
-import warnings
import _compression
from threading import RLock
@@ -24,8 +23,6 @@ _MODE_READ = 1
# Value 2 no longer used
_MODE_WRITE = 3
-_sentinel = object()
-
class BZ2File(_compression.BaseStream):
@@ -38,7 +35,7 @@ class BZ2File(_compression.BaseStream):
returned as bytes, and data to be written should be given as bytes.
"""
- def __init__(self, filename, mode="r", buffering=_sentinel, compresslevel=9):
+ def __init__(self, filename, mode="r", *, compresslevel=9):
"""Open a bzip2-compressed file.
If filename is a str, bytes, or PathLike object, it gives the
@@ -49,8 +46,6 @@ class BZ2File(_compression.BaseStream):
'x' for creating exclusively, or 'a' for appending. These can
equivalently be given as 'rb', 'wb', 'xb', and 'ab'.
- buffering is ignored since Python 3.0. Its use is deprecated.
-
If mode is 'w', 'x' or 'a', compresslevel can be a number between 1
and 9 specifying the level of compression: 1 produces the least
compression, and 9 (default) produces the most compression.
@@ -65,12 +60,6 @@ class BZ2File(_compression.BaseStream):
self._closefp = False
self._mode = _MODE_CLOSED
- if buffering is not _sentinel:
- warnings.warn("Use of 'buffering' argument is deprecated and ignored "
- "since Python 3.0.",
- DeprecationWarning,
- stacklevel=2)
-
if not (1 <= compresslevel <= 9):
raise ValueError("compresslevel must be between 1 and 9")
diff --git a/x64/Lib/cProfile.py b/x64/Lib/cProfile.py
index 369d02e..59b4699 100644
--- a/x64/Lib/cProfile.py
+++ b/x64/Lib/cProfile.py
@@ -103,28 +103,12 @@ class Profile(_lsprof.Profiler):
return self
# This method is more useful to profile a single function call.
- def runcall(*args, **kw):
- if len(args) >= 2:
- self, func, *args = args
- elif not args:
- raise TypeError("descriptor 'runcall' of 'Profile' object "
- "needs an argument")
- elif 'func' in kw:
- func = kw.pop('func')
- self, *args = args
- import warnings
- warnings.warn("Passing 'func' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('runcall expected at least 1 positional argument, '
- 'got %d' % (len(args)-1))
-
+ def runcall(self, func, /, *args, **kw):
self.enable()
try:
return func(*args, **kw)
finally:
self.disable()
- runcall.__text_signature__ = '($self, func, /, *args, **kw)'
def __enter__(self):
self.enable()
@@ -168,6 +152,11 @@ def main():
(options, args) = parser.parse_args()
sys.argv[:] = args
+ # The script that we're profiling may chdir, so capture the absolute path
+ # to the output file at startup.
+ if options.outfile is not None:
+ options.outfile = os.path.abspath(options.outfile)
+
if len(args) > 0:
if options.module:
code = "run_module(modname, run_name='__main__')"
diff --git a/x64/Lib/cgi.py b/x64/Lib/cgi.py
index c22c71b..77ab703 100644
--- a/x64/Lib/cgi.py
+++ b/x64/Lib/cgi.py
@@ -200,7 +200,10 @@ def parse_multipart(fp, pdict, encoding="utf-8", errors="replace"):
ctype = "multipart/form-data; boundary={}".format(boundary)
headers = Message()
headers.set_type(ctype)
- headers['Content-Length'] = pdict['CONTENT-LENGTH']
+ try:
+ headers['Content-Length'] = pdict['CONTENT-LENGTH']
+ except KeyError:
+ pass
fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors,
environ={'REQUEST_METHOD': 'POST'})
return {k: fs.getlist(k) for k in fs}
@@ -736,7 +739,8 @@ class FieldStorage:
last_line_lfend = True
_read = 0
while 1:
- if self.limit is not None and _read >= self.limit:
+
+ if self.limit is not None and 0 <= self.limit <= _read:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
diff --git a/x64/Lib/codecs.py b/x64/Lib/codecs.py
index 21c45a7..7f23e97 100644
--- a/x64/Lib/codecs.py
+++ b/x64/Lib/codecs.py
@@ -905,11 +905,16 @@ def open(filename, mode='r', encoding=None, errors='strict', buffering=-1):
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
- info = lookup(encoding)
- srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
- # Add attributes to simplify introspection
- srw.encoding = encoding
- return srw
+
+ try:
+ info = lookup(encoding)
+ srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
+ # Add attributes to simplify introspection
+ srw.encoding = encoding
+ return srw
+ except:
+ file.close()
+ raise
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
diff --git a/x64/Lib/codeop.py b/x64/Lib/codeop.py
index 0fa677f..4c10470 100644
--- a/x64/Lib/codeop.py
+++ b/x64/Lib/codeop.py
@@ -57,6 +57,7 @@ Compile():
"""
import __future__
+import warnings
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
@@ -80,18 +81,23 @@ def _maybe_compile(compiler, source, filename, symbol):
try:
code = compiler(source, filename, symbol)
- except SyntaxError as err:
+ except SyntaxError:
pass
- try:
- code1 = compiler(source + "\n", filename, symbol)
- except SyntaxError as e:
- err1 = e
+ # Catch syntax warnings after the first compile
+ # to emit warnings (SyntaxWarning, DeprecationWarning) at most once.
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
- try:
- code2 = compiler(source + "\n\n", filename, symbol)
- except SyntaxError as e:
- err2 = e
+ try:
+ code1 = compiler(source + "\n", filename, symbol)
+ except SyntaxError as e:
+ err1 = e
+
+ try:
+ code2 = compiler(source + "\n\n", filename, symbol)
+ except SyntaxError as e:
+ err2 = e
try:
if code:
@@ -112,7 +118,8 @@ def compile_command(source, filename="<input>", symbol="single"):
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
- symbol -- optional grammar start symbol; "single" (default) or "eval"
+ symbol -- optional grammar start symbol; "single" (default), "exec"
+ or "eval"
Return value / exceptions raised:
@@ -133,7 +140,7 @@ class Compile:
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
- codeob = compile(source, filename, symbol, self.flags, 1)
+ codeob = compile(source, filename, symbol, self.flags, True)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
diff --git a/x64/Lib/collections/__init__.py b/x64/Lib/collections/__init__.py
index cadf1c7..bc69a67 100644
--- a/x64/Lib/collections/__init__.py
+++ b/x64/Lib/collections/__init__.py
@@ -14,17 +14,30 @@ list, set, and tuple.
'''
-__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
- 'UserString', 'Counter', 'OrderedDict', 'ChainMap']
+__all__ = [
+ 'ChainMap',
+ 'Counter',
+ 'OrderedDict',
+ 'UserDict',
+ 'UserList',
+ 'UserString',
+ 'defaultdict',
+ 'deque',
+ 'namedtuple',
+]
import _collections_abc
-from operator import itemgetter as _itemgetter, eq as _eq
-from keyword import iskeyword as _iskeyword
-import sys as _sys
import heapq as _heapq
-from _weakref import proxy as _proxy
-from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
+import sys as _sys
+
+from itertools import chain as _chain
+from itertools import repeat as _repeat
+from itertools import starmap as _starmap
+from keyword import iskeyword as _iskeyword
+from operator import eq as _eq
+from operator import itemgetter as _itemgetter
from reprlib import recursive_repr as _recursive_repr
+from _weakref import proxy as _proxy
try:
from _collections import deque
@@ -48,12 +61,13 @@ def __getattr__(name):
import warnings
warnings.warn("Using or importing the ABCs from 'collections' instead "
"of from 'collections.abc' is deprecated since Python 3.3, "
- "and in 3.9 it will stop working",
+ "and in 3.10 it will stop working",
DeprecationWarning, stacklevel=2)
globals()[name] = obj
return obj
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
+
################################################################################
### OrderedDict
################################################################################
@@ -293,6 +307,24 @@ class OrderedDict(dict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ def __or__(self, other):
+ if not isinstance(other, dict):
+ return NotImplemented
+ new = self.__class__(self)
+ new.update(other)
+ return new
+
+ def __ror__(self, other):
+ if not isinstance(other, dict):
+ return NotImplemented
+ new = self.__class__(other)
+ new.update(self)
+ return new
+
try:
from _collections import OrderedDict
@@ -381,18 +413,23 @@ def namedtuple(typename, field_names, *, rename=False, defaults=None, module=Non
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
- arg_list = repr(field_names).replace("'", "")[1:-1]
+ arg_list = ', '.join(field_names)
+ if num_fields == 1:
+ arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
- s = f'def __new__(_cls, {arg_list}): return _tuple_new(_cls, ({arg_list}))'
- namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'}
- # Note: exec() has the side-effect of interning the field names
- exec(s, namespace)
- __new__ = namespace['__new__']
+ namespace = {
+ '_tuple_new': tuple_new,
+ '__builtins__': None,
+ '__name__': f'namedtuple_{typename}',
+ }
+ code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
+ __new__ = eval(code, namespace)
+ __new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@@ -429,8 +466,14 @@ def namedtuple(typename, field_names, *, rename=False, defaults=None, module=Non
return _tuple(self)
# Modify function metadata to help with introspection and debugging
- for method in (__new__, _make.__func__, _replace,
- __repr__, _asdict, __getnewargs__):
+ for method in (
+ __new__,
+ _make.__func__,
+ _replace,
+ __repr__,
+ _asdict,
+ __getnewargs__,
+ ):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
@@ -440,8 +483,6 @@ def namedtuple(typename, field_names, *, rename=False, defaults=None, module=Non
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
- # alternate spelling for backward compatibility
- '_fields_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
@@ -548,7 +589,7 @@ class Counter(dict):
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
- super(Counter, self).__init__()
+ super().__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
@@ -632,7 +673,8 @@ class Counter(dict):
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
- super(Counter, self).update(iterable) # fast path when counter is empty
+ # fast path when counter is empty
+ super().update(iterable)
else:
_count_elements(self, iterable)
if kwds:
@@ -679,13 +721,14 @@ class Counter(dict):
def __repr__(self):
if not self:
- return '%s()' % self.__class__.__name__
+ return f'{self.__class__.__name__}()'
try:
- items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
- return '%s({%s})' % (self.__class__.__name__, items)
+ # dict() preserves the ordering returned by most_common()
+ d = dict(self.most_common())
except TypeError:
# handle case where values are not orderable
- return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
+ d = dict(self)
+ return f'{self.__class__.__name__}({d!r})'
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
@@ -695,6 +738,13 @@ class Counter(dict):
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
+ #
+ # Rich comparison operators for multiset subset and superset tests
+ # are deliberately omitted due to semantic conflicts with the
+ # existing inherited dict equality method. Subset and superset
+ # semantics ignore zero counts and require that p≤q ∧ p≥q → p=q;
+ # however, that would not be the case for p=Counter(a=1, b=0)
+ # and q=Counter(a=1) where the dictionaries are not equal.
def __add__(self, other):
'''Add counts from two counters.
@@ -899,7 +949,7 @@ class ChainMap(_collections_abc.MutableMapping):
def __iter__(self):
d = {}
for mapping in reversed(self.maps):
- d.update(mapping) # reuses stored hash values if possible
+ d.update(dict.fromkeys(mapping)) # reuses stored hash values if possible
return iter(d)
def __contains__(self, key):
@@ -943,7 +993,7 @@ class ChainMap(_collections_abc.MutableMapping):
try:
del self.maps[0][key]
except KeyError:
- raise KeyError('Key not found in the first mapping: {!r}'.format(key))
+ raise KeyError(f'Key not found in the first mapping: {key!r}')
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
@@ -957,12 +1007,31 @@ class ChainMap(_collections_abc.MutableMapping):
try:
return self.maps[0].pop(key, *args)
except KeyError:
- raise KeyError('Key not found in the first mapping: {!r}'.format(key))
+ raise KeyError(f'Key not found in the first mapping: {key!r}')
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
+ def __ior__(self, other):
+ self.maps[0].update(other)
+ return self
+
+ def __or__(self, other):
+ if not isinstance(other, _collections_abc.Mapping):
+ return NotImplemented
+ m = self.copy()
+ m.maps[0].update(other)
+ return m
+
+ def __ror__(self, other):
+ if not isinstance(other, _collections_abc.Mapping):
+ return NotImplemented
+ m = dict(other)
+ for child in reversed(self.maps):
+ m.update(child)
+ return self.__class__(m)
+
################################################################################
### UserDict
@@ -971,38 +1040,29 @@ class ChainMap(_collections_abc.MutableMapping):
class UserDict(_collections_abc.MutableMapping):
# Start by filling-out the abstract methods
- def __init__(*args, **kwargs):
- if not args:
- raise TypeError("descriptor '__init__' of 'UserDict' object "
- "needs an argument")
- self, *args = args
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- if args:
- dict = args[0]
- elif 'dict' in kwargs:
- dict = kwargs.pop('dict')
- import warnings
- warnings.warn("Passing 'dict' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- dict = None
+ def __init__(self, dict=None, /, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if kwargs:
self.update(kwargs)
- __init__.__text_signature__ = '($self, dict=None, /, **kwargs)'
- def __len__(self): return len(self.data)
+ def __len__(self):
+ return len(self.data)
+
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
- def __setitem__(self, key, item): self.data[key] = item
- def __delitem__(self, key): del self.data[key]
+
+ def __setitem__(self, key, item):
+ self.data[key] = item
+
+ def __delitem__(self, key):
+ del self.data[key]
+
def __iter__(self):
return iter(self.data)
@@ -1011,7 +1071,30 @@ class UserDict(_collections_abc.MutableMapping):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
- def __repr__(self): return repr(self.data)
+ def __repr__(self):
+ return repr(self.data)
+
+ def __or__(self, other):
+ if isinstance(other, UserDict):
+ return self.__class__(self.data | other.data)
+ if isinstance(other, dict):
+ return self.__class__(self.data | other)
+ return NotImplemented
+
+ def __ror__(self, other):
+ if isinstance(other, UserDict):
+ return self.__class__(other.data | self.data)
+ if isinstance(other, dict):
+ return self.__class__(other | self.data)
+ return NotImplemented
+
+ def __ior__(self, other):
+ if isinstance(other, UserDict):
+ self.data |= other.data
+ else:
+ self.data |= other
+ return self
+
def __copy__(self):
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
@@ -1040,13 +1123,13 @@ class UserDict(_collections_abc.MutableMapping):
return d
-
################################################################################
### UserList
################################################################################
class UserList(_collections_abc.MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
+
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
@@ -1057,35 +1140,60 @@ class UserList(_collections_abc.MutableSequence):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
- def __repr__(self): return repr(self.data)
- def __lt__(self, other): return self.data < self.__cast(other)
- def __le__(self, other): return self.data <= self.__cast(other)
- def __eq__(self, other): return self.data == self.__cast(other)
- def __gt__(self, other): return self.data > self.__cast(other)
- def __ge__(self, other): return self.data >= self.__cast(other)
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def __lt__(self, other):
+ return self.data < self.__cast(other)
+
+ def __le__(self, other):
+ return self.data <= self.__cast(other)
+
+ def __eq__(self, other):
+ return self.data == self.__cast(other)
+
+ def __gt__(self, other):
+ return self.data > self.__cast(other)
+
+ def __ge__(self, other):
+ return self.data >= self.__cast(other)
+
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
- def __contains__(self, item): return item in self.data
- def __len__(self): return len(self.data)
+
+ def __contains__(self, item):
+ return item in self.data
+
+ def __len__(self):
+ return len(self.data)
+
def __getitem__(self, i):
if isinstance(i, slice):
return self.__class__(self.data[i])
else:
return self.data[i]
- def __setitem__(self, i, item): self.data[i] = item
- def __delitem__(self, i): del self.data[i]
+
+ def __setitem__(self, i, item):
+ self.data[i] = item
+
+ def __delitem__(self, i):
+ del self.data[i]
+
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
+
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
+
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
@@ -1094,28 +1202,53 @@ class UserList(_collections_abc.MutableSequence):
else:
self.data += list(other)
return self
+
def __mul__(self, n):
- return self.__class__(self.data*n)
+ return self.__class__(self.data * n)
+
__rmul__ = __mul__
+
def __imul__(self, n):
self.data *= n
return self
+
def __copy__(self):
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"][:]
return inst
- def append(self, item): self.data.append(item)
- def insert(self, i, item): self.data.insert(i, item)
- def pop(self, i=-1): return self.data.pop(i)
- def remove(self, item): self.data.remove(item)
- def clear(self): self.data.clear()
- def copy(self): return self.__class__(self)
- def count(self, item): return self.data.count(item)
- def index(self, item, *args): return self.data.index(item, *args)
- def reverse(self): self.data.reverse()
- def sort(self, /, *args, **kwds): self.data.sort(*args, **kwds)
+
+ def append(self, item):
+ self.data.append(item)
+
+ def insert(self, i, item):
+ self.data.insert(i, item)
+
+ def pop(self, i=-1):
+ return self.data.pop(i)
+
+ def remove(self, item):
+ self.data.remove(item)
+
+ def clear(self):
+ self.data.clear()
+
+ def copy(self):
+ return self.__class__(self)
+
+ def count(self, item):
+ return self.data.count(item)
+
+ def index(self, item, *args):
+ return self.data.index(item, *args)
+
+ def reverse(self):
+ self.data.reverse()
+
+ def sort(self, /, *args, **kwds):
+ self.data.sort(*args, **kwds)
+
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
@@ -1123,12 +1256,12 @@ class UserList(_collections_abc.MutableSequence):
self.data.extend(other)
-
################################################################################
### UserString
################################################################################
class UserString(_collections_abc.Sequence):
+
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
@@ -1136,12 +1269,25 @@ class UserString(_collections_abc.Sequence):
self.data = seq.data[:]
else:
self.data = str(seq)
- def __str__(self): return str(self.data)
- def __repr__(self): return repr(self.data)
- def __int__(self): return int(self.data)
- def __float__(self): return float(self.data)
- def __complex__(self): return complex(self.data)
- def __hash__(self): return hash(self.data)
+
+ def __str__(self):
+ return str(self.data)
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def __int__(self):
+ return int(self.data)
+
+ def __float__(self):
+ return float(self.data)
+
+ def __complex__(self):
+ return complex(self.data)
+
+ def __hash__(self):
+ return hash(self.data)
+
def __getnewargs__(self):
return (self.data[:],)
@@ -1149,18 +1295,22 @@ class UserString(_collections_abc.Sequence):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
+
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
+
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
+
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
+
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
@@ -1171,102 +1321,188 @@ class UserString(_collections_abc.Sequence):
char = char.data
return char in self.data
- def __len__(self): return len(self.data)
- def __getitem__(self, index): return self.__class__(self.data[index])
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, index):
+ return self.__class__(self.data[index])
+
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
+
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
+
def __mul__(self, n):
- return self.__class__(self.data*n)
+ return self.__class__(self.data * n)
+
__rmul__ = __mul__
+
def __mod__(self, args):
return self.__class__(self.data % args)
+
def __rmod__(self, template):
return self.__class__(str(template) % self)
+
# the following methods are defined in alphabetical order:
- def capitalize(self): return self.__class__(self.data.capitalize())
+ def capitalize(self):
+ return self.__class__(self.data.capitalize())
+
def casefold(self):
return self.__class__(self.data.casefold())
+
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
+
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
+
+ def removeprefix(self, prefix, /):
+ if isinstance(prefix, UserString):
+ prefix = prefix.data
+ return self.__class__(self.data.removeprefix(prefix))
+
+ def removesuffix(self, suffix, /):
+ if isinstance(suffix, UserString):
+ suffix = suffix.data
+ return self.__class__(self.data.removesuffix(suffix))
+
def encode(self, encoding='utf-8', errors='strict'):
encoding = 'utf-8' if encoding is None else encoding
errors = 'strict' if errors is None else errors
return self.data.encode(encoding, errors)
+
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
+
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
+
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
+
def format(self, /, *args, **kwds):
return self.data.format(*args, **kwds)
+
def format_map(self, mapping):
return self.data.format_map(mapping)
+
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
- def isalpha(self): return self.data.isalpha()
- def isalnum(self): return self.data.isalnum()
- def isascii(self): return self.data.isascii()
- def isdecimal(self): return self.data.isdecimal()
- def isdigit(self): return self.data.isdigit()
- def isidentifier(self): return self.data.isidentifier()
- def islower(self): return self.data.islower()
- def isnumeric(self): return self.data.isnumeric()
- def isprintable(self): return self.data.isprintable()
- def isspace(self): return self.data.isspace()
- def istitle(self): return self.data.istitle()
- def isupper(self): return self.data.isupper()
- def join(self, seq): return self.data.join(seq)
+
+ def isalpha(self):
+ return self.data.isalpha()
+
+ def isalnum(self):
+ return self.data.isalnum()
+
+ def isascii(self):
+ return self.data.isascii()
+
+ def isdecimal(self):
+ return self.data.isdecimal()
+
+ def isdigit(self):
+ return self.data.isdigit()
+
+ def isidentifier(self):
+ return self.data.isidentifier()
+
+ def islower(self):
+ return self.data.islower()
+
+ def isnumeric(self):
+ return self.data.isnumeric()
+
+ def isprintable(self):
+ return self.data.isprintable()
+
+ def isspace(self):
+ return self.data.isspace()
+
+ def istitle(self):
+ return self.data.istitle()
+
+ def isupper(self):
+ return self.data.isupper()
+
+ def join(self, seq):
+ return self.data.join(seq)
+
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
- def lower(self): return self.__class__(self.data.lower())
- def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
+
+ def lower(self):
+ return self.__class__(self.data.lower())
+
+ def lstrip(self, chars=None):
+ return self.__class__(self.data.lstrip(chars))
+
maketrans = str.maketrans
+
def partition(self, sep):
return self.data.partition(sep)
+
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
+
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
+
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
+
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
+
def rpartition(self, sep):
return self.data.rpartition(sep)
+
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
+
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
+
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
- def splitlines(self, keepends=False): return self.data.splitlines(keepends)
+
+ def splitlines(self, keepends=False):
+ return self.data.splitlines(keepends)
+
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
- def strip(self, chars=None): return self.__class__(self.data.strip(chars))
- def swapcase(self): return self.__class__(self.data.swapcase())
- def title(self): return self.__class__(self.data.title())
+
+ def strip(self, chars=None):
+ return self.__class__(self.data.strip(chars))
+
+ def swapcase(self):
+ return self.__class__(self.data.swapcase())
+
+ def title(self):
+ return self.__class__(self.data.title())
+
def translate(self, *args):
return self.__class__(self.data.translate(*args))
- def upper(self): return self.__class__(self.data.upper())
- def zfill(self, width): return self.__class__(self.data.zfill(width))
+
+ def upper(self):
+ return self.__class__(self.data.upper())
+
+ def zfill(self, width):
+ return self.__class__(self.data.zfill(width))
diff --git a/x64/Lib/compileall.py b/x64/Lib/compileall.py
index 49306d9..fe7f450 100644
--- a/x64/Lib/compileall.py
+++ b/x64/Lib/compileall.py
@@ -15,12 +15,14 @@ import sys
import importlib.util
import py_compile
import struct
+import filecmp
from functools import partial
+from pathlib import Path
__all__ = ["compile_dir","compile_file","compile_path"]
-def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0):
+def _walk_dir(dir, maxlevels, quiet=0):
if quiet < 2 and isinstance(dir, os.PathLike):
dir = os.fspath(dir)
if not quiet:
@@ -36,37 +38,49 @@ def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0):
if name == '__pycache__':
continue
fullname = os.path.join(dir, name)
- if ddir is not None:
- dfile = os.path.join(ddir, name)
- else:
- dfile = None
if not os.path.isdir(fullname):
yield fullname
elif (maxlevels > 0 and name != os.curdir and name != os.pardir and
os.path.isdir(fullname) and not os.path.islink(fullname)):
- yield from _walk_dir(fullname, ddir=dfile,
- maxlevels=maxlevels - 1, quiet=quiet)
+ yield from _walk_dir(fullname, maxlevels=maxlevels - 1,
+ quiet=quiet)
-def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None,
- quiet=0, legacy=False, optimize=-1, workers=1,
- invalidation_mode=None):
+def compile_dir(dir, maxlevels=None, ddir=None, force=False,
+ rx=None, quiet=0, legacy=False, optimize=-1, workers=1,
+ invalidation_mode=None, *, stripdir=None,
+ prependdir=None, limit_sl_dest=None, hardlink_dupes=False):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
- maxlevels: maximum recursion level (default 10)
+ maxlevels: maximum recursion level (default `sys.getrecursionlimit()`)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: full output with False or 0, errors only with 1,
no output with 2
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
- optimize: optimization level or -1 for level of the interpreter
+ optimize: int or list of optimization levels or -1 for level of
+ the interpreter. Multiple levels leads to multiple compiled
+ files each with one optimization level.
workers: maximum number of parallel workers
invalidation_mode: how the up-to-dateness of the pyc will be checked
+ stripdir: part of path to left-strip from source file path
+ prependdir: path to prepend to beginning of original file path, applied
+ after stripdir
+ limit_sl_dest: ignore symlinks if they are pointing outside of
+ the defined path
+ hardlink_dupes: hardlink duplicated pyc files
"""
ProcessPoolExecutor = None
+ if ddir is not None and (stripdir is not None or prependdir is not None):
+ raise ValueError(("Destination dir (ddir) cannot be used "
+ "in combination with stripdir or prependdir"))
+ if ddir is not None:
+ stripdir = dir
+ prependdir = ddir
+ ddir = None
if workers < 0:
raise ValueError('workers must be greater or equal to 0')
if workers != 1:
@@ -76,8 +90,9 @@ def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None,
from concurrent.futures import ProcessPoolExecutor
except ImportError:
workers = 1
- files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels,
- ddir=ddir)
+ if maxlevels is None:
+ maxlevels = sys.getrecursionlimit()
+ files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels)
success = True
if workers != 1 and ProcessPoolExecutor is not None:
# If workers == 0, let ProcessPoolExecutor choose
@@ -88,19 +103,27 @@ def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None,
rx=rx, quiet=quiet,
legacy=legacy,
optimize=optimize,
- invalidation_mode=invalidation_mode),
+ invalidation_mode=invalidation_mode,
+ stripdir=stripdir,
+ prependdir=prependdir,
+ limit_sl_dest=limit_sl_dest,
+ hardlink_dupes=hardlink_dupes),
files)
success = min(results, default=True)
else:
for file in files:
if not compile_file(file, ddir, force, rx, quiet,
- legacy, optimize, invalidation_mode):
+ legacy, optimize, invalidation_mode,
+ stripdir=stripdir, prependdir=prependdir,
+ limit_sl_dest=limit_sl_dest,
+ hardlink_dupes=hardlink_dupes):
success = False
return success
def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0,
legacy=False, optimize=-1,
- invalidation_mode=None):
+ invalidation_mode=None, *, stripdir=None, prependdir=None,
+ limit_sl_dest=None, hardlink_dupes=False):
"""Byte-compile one file.
Arguments (only fullname is required):
@@ -112,32 +135,85 @@ def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0,
quiet: full output with False or 0, errors only with 1,
no output with 2
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
- optimize: optimization level or -1 for level of the interpreter
+ optimize: int or list of optimization levels or -1 for level of
+ the interpreter. Multiple levels leads to multiple compiled
+ files each with one optimization level.
invalidation_mode: how the up-to-dateness of the pyc will be checked
+ stripdir: part of path to left-strip from source file path
+ prependdir: path to prepend to beginning of original file path, applied
+ after stripdir
+ limit_sl_dest: ignore symlinks if they are pointing outside of
+ the defined path.
+ hardlink_dupes: hardlink duplicated pyc files
"""
+
+ if ddir is not None and (stripdir is not None or prependdir is not None):
+ raise ValueError(("Destination dir (ddir) cannot be used "
+ "in combination with stripdir or prependdir"))
+
success = True
if quiet < 2 and isinstance(fullname, os.PathLike):
fullname = os.fspath(fullname)
name = os.path.basename(fullname)
+
+ dfile = None
+
if ddir is not None:
dfile = os.path.join(ddir, name)
- else:
- dfile = None
+
+ if stripdir is not None:
+ fullname_parts = fullname.split(os.path.sep)
+ stripdir_parts = stripdir.split(os.path.sep)
+ ddir_parts = list(fullname_parts)
+
+ for spart, opart in zip(stripdir_parts, fullname_parts):
+ if spart == opart:
+ ddir_parts.remove(spart)
+
+ dfile = os.path.join(*ddir_parts)
+
+ if prependdir is not None:
+ if dfile is None:
+ dfile = os.path.join(prependdir, fullname)
+ else:
+ dfile = os.path.join(prependdir, dfile)
+
+ if isinstance(optimize, int):
+ optimize = [optimize]
+
+ # Use set() to remove duplicates.
+ # Use sorted() to create pyc files in a deterministic order.
+ optimize = sorted(set(optimize))
+
+ if hardlink_dupes and len(optimize) < 2:
+ raise ValueError("Hardlinking of duplicated bytecode makes sense "
+ "only for more than one optimization level")
+
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
+
+ if limit_sl_dest is not None and os.path.islink(fullname):
+ if Path(limit_sl_dest).resolve() not in Path(fullname).resolve().parents:
+ return success
+
+ opt_cfiles = {}
+
if os.path.isfile(fullname):
- if legacy:
- cfile = fullname + 'c'
- else:
- if optimize >= 0:
- opt = optimize if optimize >= 1 else ''
- cfile = importlib.util.cache_from_source(
- fullname, optimization=opt)
+ for opt_level in optimize:
+ if legacy:
+ opt_cfiles[opt_level] = fullname + 'c'
else:
- cfile = importlib.util.cache_from_source(fullname)
- cache_dir = os.path.dirname(cfile)
+ if opt_level >= 0:
+ opt = opt_level if opt_level >= 1 else ''
+ cfile = (importlib.util.cache_from_source(
+ fullname, optimization=opt))
+ opt_cfiles[opt_level] = cfile
+ else:
+ cfile = importlib.util.cache_from_source(fullname)
+ opt_cfiles[opt_level] = cfile
+
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
@@ -145,18 +221,28 @@ def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0,
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sll', importlib.util.MAGIC_NUMBER,
0, mtime)
- with open(cfile, 'rb') as chandle:
- actual = chandle.read(12)
- if expect == actual:
+ for cfile in opt_cfiles.values():
+ with open(cfile, 'rb') as chandle:
+ actual = chandle.read(12)
+ if expect != actual:
+ break
+ else:
return success
except OSError:
pass
if not quiet:
print('Compiling {!r}...'.format(fullname))
try:
- ok = py_compile.compile(fullname, cfile, dfile, True,
- optimize=optimize,
- invalidation_mode=invalidation_mode)
+ for index, opt_level in enumerate(optimize):
+ cfile = opt_cfiles[opt_level]
+ ok = py_compile.compile(fullname, cfile, dfile, True,
+ optimize=opt_level,
+ invalidation_mode=invalidation_mode)
+ if index > 0 and hardlink_dupes:
+ previous_cfile = opt_cfiles[optimize[index - 1]]
+ if filecmp.cmp(cfile, previous_cfile, shallow=False):
+ os.unlink(cfile)
+ os.link(previous_cfile, cfile)
except py_compile.PyCompileError as err:
success = False
if quiet >= 2:
@@ -225,7 +311,7 @@ def main():
parser = argparse.ArgumentParser(
description='Utilities to support installing Python libraries.')
parser.add_argument('-l', action='store_const', const=0,
- default=10, dest='maxlevels',
+ default=None, dest='maxlevels',
help="don't recurse into subdirectories")
parser.add_argument('-r', type=int, dest='recursion',
help=('control the maximum recursion level. '
@@ -243,6 +329,20 @@ def main():
'compile-time tracebacks and in runtime '
'tracebacks in cases where the source file is '
'unavailable'))
+ parser.add_argument('-s', metavar='STRIPDIR', dest='stripdir',
+ default=None,
+ help=('part of path to left-strip from path '
+ 'to source file - for example buildroot. '
+ '`-d` and `-s` options cannot be '
+ 'specified together.'))
+ parser.add_argument('-p', metavar='PREPENDDIR', dest='prependdir',
+ default=None,
+ help=('path to add as prefix to path '
+ 'to source file - for example / to make '
+ 'it absolute when some part is removed '
+ 'by `-s` option. '
+ '`-d` and `-p` options cannot be '
+ 'specified together.'))
parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
help=('skip files matching the regular expression; '
'the regexp is searched for in the full path '
@@ -265,6 +365,15 @@ def main():
'"checked-hash" if the SOURCE_DATE_EPOCH '
'environment variable is set, and '
'"timestamp" otherwise.'))
+ parser.add_argument('-o', action='append', type=int, dest='opt_levels',
+ help=('Optimization levels to run compilation with.'
+ 'Default is -1 which uses optimization level of'
+ 'Python interpreter itself (specified by -O).'))
+ parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest',
+ help='Ignore symlinks pointing outsite of the DIR')
+ parser.add_argument('--hardlink-dupes', action='store_true',
+ dest='hardlink_dupes',
+ help='Hardlink duplicated pyc files')
args = parser.parse_args()
compile_dests = args.compile_dest
@@ -273,12 +382,26 @@ def main():
import re
args.rx = re.compile(args.rx)
+ if args.limit_sl_dest == "":
+ args.limit_sl_dest = None
if args.recursion is not None:
maxlevels = args.recursion
else:
maxlevels = args.maxlevels
+ if args.opt_levels is None:
+ args.opt_levels = [-1]
+
+ if len(args.opt_levels) == 1 and args.hardlink_dupes:
+ parser.error(("Hardlinking of duplicated bytecode makes sense "
+ "only for more than one optimization level."))
+
+ if args.ddir is not None and (
+ args.stripdir is not None or args.prependdir is not None
+ ):
+ parser.error("-d cannot be used in combination with -s or -p")
+
# if flist is provided then load it
if args.flist:
try:
@@ -303,13 +426,23 @@ def main():
if os.path.isfile(dest):
if not compile_file(dest, args.ddir, args.force, args.rx,
args.quiet, args.legacy,
- invalidation_mode=invalidation_mode):
+ invalidation_mode=invalidation_mode,
+ stripdir=args.stripdir,
+ prependdir=args.prependdir,
+ optimize=args.opt_levels,
+ limit_sl_dest=args.limit_sl_dest,
+ hardlink_dupes=args.hardlink_dupes):
success = False
else:
if not compile_dir(dest, maxlevels, args.ddir,
args.force, args.rx, args.quiet,
args.legacy, workers=args.workers,
- invalidation_mode=invalidation_mode):
+ invalidation_mode=invalidation_mode,
+ stripdir=args.stripdir,
+ prependdir=args.prependdir,
+ optimize=args.opt_levels,
+ limit_sl_dest=args.limit_sl_dest,
+ hardlink_dupes=args.hardlink_dupes):
success = False
return success
else:
diff --git a/x64/Lib/concurrent/futures/_base.py b/x64/Lib/concurrent/futures/_base.py
index 6001e3b..00eb548 100644
--- a/x64/Lib/concurrent/futures/_base.py
+++ b/x64/Lib/concurrent/futures/_base.py
@@ -7,6 +7,7 @@ import collections
import logging
import threading
import time
+import types
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
@@ -544,10 +545,12 @@ class Future(object):
self._condition.notify_all()
self._invoke_callbacks()
+ __class_getitem__ = classmethod(types.GenericAlias)
+
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
- def submit(*args, **kwargs):
+ def submit(self, fn, /, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
@@ -556,21 +559,7 @@ class Executor(object):
Returns:
A Future representing the given call.
"""
- if len(args) >= 2:
- pass
- elif not args:
- raise TypeError("descriptor 'submit' of 'Executor' object "
- "needs an argument")
- elif 'fn' in kwargs:
- import warnings
- warnings.warn("Passing 'fn' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('submit expected at least 1 positional argument, '
- 'got %d' % (len(args)-1))
-
raise NotImplementedError()
- submit.__text_signature__ = '($self, fn, /, *args, **kwargs)'
def map(self, fn, *iterables, timeout=None, chunksize=1):
"""Returns an iterator equivalent to map(fn, iter).
@@ -616,7 +605,7 @@ class Executor(object):
future.cancel()
return result_iterator()
- def shutdown(self, wait=True):
+ def shutdown(self, wait=True, *, cancel_futures=False):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
@@ -626,6 +615,9 @@ class Executor(object):
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
+ cancel_futures: If True then shutdown will cancel all pending
+ futures. Futures that are completed or running will not be
+ cancelled.
"""
pass
diff --git a/x64/Lib/concurrent/futures/process.py b/x64/Lib/concurrent/futures/process.py
index 2b2b78e..90bc98b 100644
--- a/x64/Lib/concurrent/futures/process.py
+++ b/x64/Lib/concurrent/futures/process.py
@@ -45,11 +45,9 @@ Process #1..n:
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
-import atexit
import os
from concurrent.futures import _base
import queue
-from queue import Full
import multiprocessing as mp
import multiprocessing.connection
from multiprocessing.queues import Queue
@@ -60,19 +58,6 @@ import itertools
import sys
import traceback
-# Workers are created as daemon threads and processes. This is done to allow the
-# interpreter to exit when there are still idle processes in a
-# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
-# allowing workers to die with the interpreter has two undesirable properties:
-# - The workers would still be running during interpreter shutdown,
-# meaning that they would fail in unpredictable ways.
-# - The workers could be killed while evaluating a work item, which could
-# be bad if the callable being evaluated has external side-effects e.g.
-# writing to a file.
-#
-# To work around this problem, an exit handler is installed which tells the
-# workers to exit when their work queues are empty and then waits until the
-# threads/processes finish.
_threads_wakeups = weakref.WeakKeyDictionary()
_global_shutdown = False
@@ -80,18 +65,23 @@ _global_shutdown = False
class _ThreadWakeup:
def __init__(self):
+ self._closed = False
self._reader, self._writer = mp.Pipe(duplex=False)
def close(self):
- self._writer.close()
- self._reader.close()
+ if not self._closed:
+ self._closed = True
+ self._writer.close()
+ self._reader.close()
def wakeup(self):
- self._writer.send_bytes(b"")
+ if not self._closed:
+ self._writer.send_bytes(b"")
def clear(self):
- while self._reader.poll():
- self._reader.recv_bytes()
+ if not self._closed:
+ while self._reader.poll():
+ self._reader.recv_bytes()
def _python_exit():
@@ -99,10 +89,17 @@ def _python_exit():
_global_shutdown = True
items = list(_threads_wakeups.items())
for _, thread_wakeup in items:
+ # call not protected by ProcessPoolExecutor._shutdown_lock
thread_wakeup.wakeup()
for t, _ in items:
t.join()
+# Register for `_python_exit()` to be called just before joining all
+# non-daemon threads. This is used instead of `atexit.register()` for
+# compatibility with subinterpreters, which no longer support daemon threads.
+# See bpo-39812 for context.
+threading._register_atexit(_python_exit)
+
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
@@ -160,8 +157,11 @@ class _CallItem(object):
class _SafeQueue(Queue):
"""Safe Queue set exception to the future object linked to a job"""
- def __init__(self, max_size=0, *, ctx, pending_work_items):
+ def __init__(self, max_size=0, *, ctx, pending_work_items, shutdown_lock,
+ thread_wakeup):
self.pending_work_items = pending_work_items
+ self.shutdown_lock = shutdown_lock
+ self.thread_wakeup = thread_wakeup
super().__init__(max_size, ctx=ctx)
def _on_queue_feeder_error(self, e, obj):
@@ -169,8 +169,11 @@ class _SafeQueue(Queue):
tb = traceback.format_exception(type(e), e, e.__traceback__)
e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb)))
work_item = self.pending_work_items.pop(obj.work_id, None)
- # work_item can be None if another process terminated. In this case,
- # the queue_manager_thread fails all work_items with BrokenProcessPool
+ with self.shutdown_lock:
+ self.thread_wakeup.wakeup()
+ # work_item can be None if another process terminated. In this
+ # case, the executor_manager_thread fails all work_items
+ # with BrokenProcessPool
if work_item is not None:
work_item.future.set_exception(e)
else:
@@ -186,6 +189,7 @@ def _get_chunks(*iterables, chunksize):
return
yield chunk
+
def _process_chunk(fn, chunk):
""" Processes a chunk of an iterable passed to map.
@@ -249,120 +253,132 @@ def _process_worker(call_queue, result_queue, initializer, initargs):
del call_item
-def _add_call_item_to_queue(pending_work_items,
- work_ids,
- call_queue):
- """Fills call_queue with _WorkItems from pending_work_items.
+class _ExecutorManagerThread(threading.Thread):
+ """Manages the communication between this process and the worker processes.
- This function never blocks.
+ The manager is run in a local thread.
Args:
- pending_work_items: A dict mapping work ids to _WorkItems e.g.
- {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
- work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
- are consumed and the corresponding _WorkItems from
- pending_work_items are transformed into _CallItems and put in
- call_queue.
- call_queue: A multiprocessing.Queue that will be filled with _CallItems
- derived from _WorkItems.
+ executor: A reference to the ProcessPoolExecutor that owns
+ this thread. A weakref will be own by the manager as well as
+ references to internal objects used to introspect the state of
+ the executor.
"""
- while True:
- if call_queue.full():
- return
- try:
- work_id = work_ids.get(block=False)
- except queue.Empty:
- return
- else:
- work_item = pending_work_items[work_id]
-
- if work_item.future.set_running_or_notify_cancel():
- call_queue.put(_CallItem(work_id,
- work_item.fn,
- work_item.args,
- work_item.kwargs),
- block=True)
- else:
- del pending_work_items[work_id]
- continue
+ def __init__(self, executor):
+ # Store references to necessary internals of the executor.
+
+ # A _ThreadWakeup to allow waking up the queue_manager_thread from the
+ # main Thread and avoid deadlocks caused by permanently locked queues.
+ self.thread_wakeup = executor._executor_manager_thread_wakeup
+ self.shutdown_lock = executor._shutdown_lock
+
+ # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used
+ # to determine if the ProcessPoolExecutor has been garbage collected
+ # and that the manager can exit.
+ # When the executor gets garbage collected, the weakref callback
+ # will wake up the queue management thread so that it can terminate
+ # if there is no pending work item.
+ def weakref_cb(_,
+ thread_wakeup=self.thread_wakeup,
+ shutdown_lock=self.shutdown_lock):
+ mp.util.debug('Executor collected: triggering callback for'
+ ' QueueManager wakeup')
+ with shutdown_lock:
+ thread_wakeup.wakeup()
-def _queue_management_worker(executor_reference,
- processes,
- pending_work_items,
- work_ids_queue,
- call_queue,
- result_queue,
- thread_wakeup):
- """Manages the communication between this process and the worker processes.
+ self.executor_reference = weakref.ref(executor, weakref_cb)
- This function is run in a local thread.
+ # A list of the ctx.Process instances used as workers.
+ self.processes = executor._processes
- Args:
- executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
- this thread. Used to determine if the ProcessPoolExecutor has been
- garbage collected and that this function can exit.
- process: A list of the ctx.Process instances used as
- workers.
- pending_work_items: A dict mapping work ids to _WorkItems e.g.
- {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
- work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
- call_queue: A ctx.Queue that will be filled with _CallItems
- derived from _WorkItems for processing by the process workers.
- result_queue: A ctx.SimpleQueue of _ResultItems generated by the
- process workers.
- thread_wakeup: A _ThreadWakeup to allow waking up the
- queue_manager_thread from the main Thread and avoid deadlocks
- caused by permanently locked queues.
- """
- executor = None
+ # A ctx.Queue that will be filled with _CallItems derived from
+ # _WorkItems for processing by the process workers.
+ self.call_queue = executor._call_queue
- def shutting_down():
- return (_global_shutdown or executor is None
- or executor._shutdown_thread)
+ # A ctx.SimpleQueue of _ResultItems generated by the process workers.
+ self.result_queue = executor._result_queue
- def shutdown_worker():
- # This is an upper bound on the number of children alive.
- n_children_alive = sum(p.is_alive() for p in processes.values())
- n_children_to_stop = n_children_alive
- n_sentinels_sent = 0
- # Send the right number of sentinels, to make sure all children are
- # properly terminated.
- while n_sentinels_sent < n_children_to_stop and n_children_alive > 0:
- for i in range(n_children_to_stop - n_sentinels_sent):
- try:
- call_queue.put_nowait(None)
- n_sentinels_sent += 1
- except Full:
- break
- n_children_alive = sum(p.is_alive() for p in processes.values())
+ # A queue.Queue of work ids e.g. Queue([5, 6, ...]).
+ self.work_ids_queue = executor._work_ids
- # Release the queue's resources as soon as possible.
- call_queue.close()
- # If .join() is not called on the created processes then
- # some ctx.Queue methods may deadlock on Mac OS X.
- for p in processes.values():
- p.join()
+ # A dict mapping work ids to _WorkItems e.g.
+ # {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
+ self.pending_work_items = executor._pending_work_items
- result_reader = result_queue._reader
- wakeup_reader = thread_wakeup._reader
- readers = [result_reader, wakeup_reader]
+ super().__init__()
- while True:
- _add_call_item_to_queue(pending_work_items,
- work_ids_queue,
- call_queue)
+ def run(self):
+ # Main loop for the executor manager thread.
+
+ while True:
+ self.add_call_item_to_queue()
+
+ result_item, is_broken, cause = self.wait_result_broken_or_wakeup()
+ if is_broken:
+ self.terminate_broken(cause)
+ return
+ if result_item is not None:
+ self.process_result_item(result_item)
+ # Delete reference to result_item to avoid keeping references
+ # while waiting on new results.
+ del result_item
+
+ # attempt to increment idle process count
+ executor = self.executor_reference()
+ if executor is not None:
+ executor._idle_worker_semaphore.release()
+ del executor
+
+ if self.is_shutting_down():
+ self.flag_executor_shutting_down()
+
+ # Since no new work items can be added, it is safe to shutdown
+ # this thread if there are no pending work items.
+ if not self.pending_work_items:
+ self.join_executor_internals()
+ return
+
+ def add_call_item_to_queue(self):
+ # Fills call_queue with _WorkItems from pending_work_items.
+ # This function never blocks.
+ while True:
+ if self.call_queue.full():
+ return
+ try:
+ work_id = self.work_ids_queue.get(block=False)
+ except queue.Empty:
+ return
+ else:
+ work_item = self.pending_work_items[work_id]
+
+ if work_item.future.set_running_or_notify_cancel():
+ self.call_queue.put(_CallItem(work_id,
+ work_item.fn,
+ work_item.args,
+ work_item.kwargs),
+ block=True)
+ else:
+ del self.pending_work_items[work_id]
+ continue
+
+ def wait_result_broken_or_wakeup(self):
# Wait for a result to be ready in the result_queue while checking
# that all worker processes are still running, or for a wake up
# signal send. The wake up signals come either from new tasks being
# submitted, from the executor being shutdown/gc-ed, or from the
# shutdown of the python interpreter.
- worker_sentinels = [p.sentinel for p in processes.values()]
+ result_reader = self.result_queue._reader
+ assert not self.thread_wakeup._closed
+ wakeup_reader = self.thread_wakeup._reader
+ readers = [result_reader, wakeup_reader]
+ worker_sentinels = [p.sentinel for p in self.processes.values()]
ready = mp.connection.wait(readers + worker_sentinels)
cause = None
is_broken = True
+ result_item = None
if result_reader in ready:
try:
result_item = result_reader.recv()
@@ -372,79 +388,138 @@ def _queue_management_worker(executor_reference,
elif wakeup_reader in ready:
is_broken = False
- result_item = None
- thread_wakeup.clear()
- if is_broken:
- # Mark the process pool broken so that submits fail right now.
- executor = executor_reference()
- if executor is not None:
- executor._broken = ('A child process terminated '
- 'abruptly, the process pool is not '
- 'usable anymore')
- executor._shutdown_thread = True
- executor = None
- bpe = BrokenProcessPool("A process in the process pool was "
- "terminated abruptly while the future was "
- "running or pending.")
- if cause is not None:
- bpe.__cause__ = _RemoteTraceback(
- f"\n'''\n{''.join(cause)}'''")
- # All futures in flight must be marked failed
- for work_id, work_item in pending_work_items.items():
- work_item.future.set_exception(bpe)
- # Delete references to object. See issue16284
- del work_item
- pending_work_items.clear()
- # Terminate remaining workers forcibly: the queues or their
- # locks may be in a dirty state and block forever.
- for p in processes.values():
- p.terminate()
- shutdown_worker()
- return
+
+ with self.shutdown_lock:
+ self.thread_wakeup.clear()
+
+ return result_item, is_broken, cause
+
+ def process_result_item(self, result_item):
+ # Process the received a result_item. This can be either the PID of a
+ # worker that exited gracefully or a _ResultItem
+
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID
# (avoids marking the executor broken)
- assert shutting_down()
- p = processes.pop(result_item)
+ assert self.is_shutting_down()
+ p = self.processes.pop(result_item)
p.join()
- if not processes:
- shutdown_worker()
+ if not self.processes:
+ self.join_executor_internals()
return
- elif result_item is not None:
- work_item = pending_work_items.pop(result_item.work_id, None)
+ else:
+ # Received a _ResultItem so mark the future as completed.
+ work_item = self.pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
- # Delete references to object. See issue16284
- del work_item
- # Delete reference to result_item
- del result_item
- # Check whether we should start shutting down.
- executor = executor_reference()
+ def is_shutting_down(self):
+ # Check whether we should start shutting down the executor.
+ executor = self.executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
- if shutting_down():
- try:
- # Flag the executor as shutting down as early as possible if it
- # is not gc-ed yet.
- if executor is not None:
- executor._shutdown_thread = True
- # Since no new work items can be added, it is safe to shutdown
- # this thread if there are no pending work items.
- if not pending_work_items:
- shutdown_worker()
- return
- except Full:
- # This is not a problem: we will eventually be woken up (in
- # result_queue.get()) and be able to send a sentinel again.
- pass
- executor = None
+ return (_global_shutdown or executor is None
+ or executor._shutdown_thread)
+
+ def terminate_broken(self, cause):
+ # Terminate the executor because it is in a broken state. The cause
+ # argument can be used to display more information on the error that
+ # lead the executor into becoming broken.
+
+ # Mark the process pool broken so that submits fail right now.
+ executor = self.executor_reference()
+ if executor is not None:
+ executor._broken = ('A child process terminated '
+ 'abruptly, the process pool is not '
+ 'usable anymore')
+ executor._shutdown_thread = True
+ executor = None
+
+ # All pending tasks are to be marked failed with the following
+ # BrokenProcessPool error
+ bpe = BrokenProcessPool("A process in the process pool was "
+ "terminated abruptly while the future was "
+ "running or pending.")
+ if cause is not None:
+ bpe.__cause__ = _RemoteTraceback(
+ f"\n'''\n{''.join(cause)}'''")
+
+ # Mark pending tasks as failed.
+ for work_id, work_item in self.pending_work_items.items():
+ work_item.future.set_exception(bpe)
+ # Delete references to object. See issue16284
+ del work_item
+ self.pending_work_items.clear()
+
+ # Terminate remaining workers forcibly: the queues or their
+ # locks may be in a dirty state and block forever.
+ for p in self.processes.values():
+ p.terminate()
+
+ # clean up resources
+ self.join_executor_internals()
+
+ def flag_executor_shutting_down(self):
+ # Flag the executor as shutting down and cancel remaining tasks if
+ # requested as early as possible if it is not gc-ed yet.
+ executor = self.executor_reference()
+ if executor is not None:
+ executor._shutdown_thread = True
+ # Cancel pending work items if requested.
+ if executor._cancel_pending_futures:
+ # Cancel all pending futures and update pending_work_items
+ # to only have futures that are currently running.
+ new_pending_work_items = {}
+ for work_id, work_item in self.pending_work_items.items():
+ if not work_item.future.cancel():
+ new_pending_work_items[work_id] = work_item
+ self.pending_work_items = new_pending_work_items
+ # Drain work_ids_queue since we no longer need to
+ # add items to the call queue.
+ while True:
+ try:
+ self.work_ids_queue.get_nowait()
+ except queue.Empty:
+ break
+ # Make sure we do this only once to not waste time looping
+ # on running processes over and over.
+ executor._cancel_pending_futures = False
+
+ def shutdown_workers(self):
+ n_children_to_stop = self.get_n_children_alive()
+ n_sentinels_sent = 0
+ # Send the right number of sentinels, to make sure all children are
+ # properly terminated.
+ while (n_sentinels_sent < n_children_to_stop
+ and self.get_n_children_alive() > 0):
+ for i in range(n_children_to_stop - n_sentinels_sent):
+ try:
+ self.call_queue.put_nowait(None)
+ n_sentinels_sent += 1
+ except queue.Full:
+ break
+
+ def join_executor_internals(self):
+ self.shutdown_workers()
+ # Release the queue's resources as soon as possible.
+ self.call_queue.close()
+ self.call_queue.join_thread()
+ with self.shutdown_lock:
+ self.thread_wakeup.close()
+ # If .join() is not called on the created processes then
+ # some ctx.Queue methods may deadlock on Mac OS X.
+ for p in self.processes.values():
+ p.join()
+
+ def get_n_children_alive(self):
+ # This is an upper bound on the number of children alive.
+ return sum(p.is_alive() for p in self.processes.values())
_system_limits_checked = False
@@ -535,7 +610,7 @@ class ProcessPoolExecutor(_base.Executor):
self._initargs = initargs
# Management thread
- self._queue_management_thread = None
+ self._executor_manager_thread = None
# Map of pids to processes
self._processes = {}
@@ -543,9 +618,21 @@ class ProcessPoolExecutor(_base.Executor):
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
+ self._idle_worker_semaphore = threading.Semaphore(0)
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
+ self._cancel_pending_futures = False
+
+ # _ThreadWakeup is a communication channel used to interrupt the wait
+ # of the main loop of executor_manager_thread from another thread (e.g.
+ # when calling executor.submit or executor.shutdown). We do not use the
+ # _result_queue to send wakeup signals to the executor_manager_thread
+ # as it could result in a deadlock if a worker process dies with the
+ # _result_queue write lock still acquired.
+ #
+ # _shutdown_lock must be locked to access _ThreadWakeup.
+ self._executor_manager_thread_wakeup = _ThreadWakeup()
# Create communication channels for the executor
# Make the call queue slightly larger than the number of processes to
@@ -554,7 +641,9 @@ class ProcessPoolExecutor(_base.Executor):
queue_size = self._max_workers + EXTRA_QUEUED_CALLS
self._call_queue = _SafeQueue(
max_size=queue_size, ctx=self._mp_context,
- pending_work_items=self._pending_work_items)
+ pending_work_items=self._pending_work_items,
+ shutdown_lock=self._shutdown_lock,
+ thread_wakeup=self._executor_manager_thread_wakeup)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
@@ -562,43 +651,21 @@ class ProcessPoolExecutor(_base.Executor):
self._result_queue = mp_context.SimpleQueue()
self._work_ids = queue.Queue()
- # _ThreadWakeup is a communication channel used to interrupt the wait
- # of the main loop of queue_manager_thread from another thread (e.g.
- # when calling executor.submit or executor.shutdown). We do not use the
- # _result_queue to send the wakeup signal to the queue_manager_thread
- # as it could result in a deadlock if a worker process dies with the
- # _result_queue write lock still acquired.
- self._queue_management_thread_wakeup = _ThreadWakeup()
-
- def _start_queue_management_thread(self):
- if self._queue_management_thread is None:
- # When the executor gets garbarge collected, the weakref callback
- # will wake up the queue management thread so that it can terminate
- # if there is no pending work item.
- def weakref_cb(_,
- thread_wakeup=self._queue_management_thread_wakeup):
- mp.util.debug('Executor collected: triggering callback for'
- ' QueueManager wakeup')
- thread_wakeup.wakeup()
+ def _start_executor_manager_thread(self):
+ if self._executor_manager_thread is None:
# Start the processes so that their sentinels are known.
- self._adjust_process_count()
- self._queue_management_thread = threading.Thread(
- target=_queue_management_worker,
- args=(weakref.ref(self, weakref_cb),
- self._processes,
- self._pending_work_items,
- self._work_ids,
- self._call_queue,
- self._result_queue,
- self._queue_management_thread_wakeup),
- name="QueueManagerThread")
- self._queue_management_thread.daemon = True
- self._queue_management_thread.start()
- _threads_wakeups[self._queue_management_thread] = \
- self._queue_management_thread_wakeup
+ self._executor_manager_thread = _ExecutorManagerThread(self)
+ self._executor_manager_thread.start()
+ _threads_wakeups[self._executor_manager_thread] = \
+ self._executor_manager_thread_wakeup
def _adjust_process_count(self):
- for _ in range(len(self._processes), self._max_workers):
+ # if there's an idle process, we don't need to spawn a new one.
+ if self._idle_worker_semaphore.acquire(blocking=False):
+ return
+
+ process_count = len(self._processes)
+ if process_count < self._max_workers:
p = self._mp_context.Process(
target=_process_worker,
args=(self._call_queue,
@@ -608,22 +675,7 @@ class ProcessPoolExecutor(_base.Executor):
p.start()
self._processes[p.pid] = p
- def submit(*args, **kwargs):
- if len(args) >= 2:
- self, fn, *args = args
- elif not args:
- raise TypeError("descriptor 'submit' of 'ProcessPoolExecutor' object "
- "needs an argument")
- elif 'fn' in kwargs:
- fn = kwargs.pop('fn')
- self, *args = args
- import warnings
- warnings.warn("Passing 'fn' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('submit expected at least 1 positional argument, '
- 'got %d' % (len(args)-1))
-
+ def submit(self, fn, /, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool(self._broken)
@@ -640,11 +692,11 @@ class ProcessPoolExecutor(_base.Executor):
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
- self._queue_management_thread_wakeup.wakeup()
+ self._executor_manager_thread_wakeup.wakeup()
- self._start_queue_management_thread()
+ self._adjust_process_count()
+ self._start_executor_manager_thread()
return f
- submit.__text_signature__ = _base.Executor.submit.__text_signature__
submit.__doc__ = _base.Executor.submit.__doc__
def map(self, fn, *iterables, timeout=None, chunksize=1):
@@ -676,29 +728,24 @@ class ProcessPoolExecutor(_base.Executor):
timeout=timeout)
return _chain_from_iterable_of_lists(results)
- def shutdown(self, wait=True):
+ def shutdown(self, wait=True, *, cancel_futures=False):
with self._shutdown_lock:
+ self._cancel_pending_futures = cancel_futures
self._shutdown_thread = True
- if self._queue_management_thread:
- # Wake up queue management thread
- self._queue_management_thread_wakeup.wakeup()
- if wait:
- self._queue_management_thread.join()
+ if self._executor_manager_thread_wakeup is not None:
+ # Wake up queue management thread
+ self._executor_manager_thread_wakeup.wakeup()
+
+ if self._executor_manager_thread is not None and wait:
+ self._executor_manager_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
- self._queue_management_thread = None
- if self._call_queue is not None:
- self._call_queue.close()
- if wait:
- self._call_queue.join_thread()
- self._call_queue = None
+ self._executor_manager_thread = None
+ self._call_queue = None
+ if self._result_queue is not None and wait:
+ self._result_queue.close()
self._result_queue = None
self._processes = None
-
- if self._queue_management_thread_wakeup:
- self._queue_management_thread_wakeup.close()
- self._queue_management_thread_wakeup = None
+ self._executor_manager_thread_wakeup = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
-
-atexit.register(_python_exit)
diff --git a/x64/Lib/concurrent/futures/thread.py b/x64/Lib/concurrent/futures/thread.py
index 9e669b2..b7a2cac 100644
--- a/x64/Lib/concurrent/futures/thread.py
+++ b/x64/Lib/concurrent/futures/thread.py
@@ -5,41 +5,36 @@
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
-import atexit
from concurrent.futures import _base
import itertools
import queue
import threading
+import types
import weakref
import os
-# Workers are created as daemon threads. This is done to allow the interpreter
-# to exit when there are still idle threads in a ThreadPoolExecutor's thread
-# pool (i.e. shutdown() was not called). However, allowing workers to die with
-# the interpreter has two undesirable properties:
-# - The workers would still be running during interpreter shutdown,
-# meaning that they would fail in unpredictable ways.
-# - The workers could be killed while evaluating a work item, which could
-# be bad if the callable being evaluated has external side-effects e.g.
-# writing to a file.
-#
-# To work around this problem, an exit handler is installed which tells the
-# workers to exit when their work queues are empty and then waits until the
-# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
+# Lock that ensures that new workers are not created while the interpreter is
+# shutting down. Must be held while mutating _threads_queues and _shutdown.
+_global_shutdown_lock = threading.Lock()
def _python_exit():
global _shutdown
- _shutdown = True
+ with _global_shutdown_lock:
+ _shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
-atexit.register(_python_exit)
+# Register for `_python_exit()` to be called just before joining all
+# non-daemon threads. This is used instead of `atexit.register()` for
+# compatibility with subinterpreters, which no longer support daemon threads.
+# See bpo-39812 for context.
+threading._register_atexit(_python_exit)
class _WorkItem(object):
@@ -62,6 +57,8 @@ class _WorkItem(object):
else:
self.future.set_result(result)
+ __class_getitem__ = classmethod(types.GenericAlias)
+
def _worker(executor_reference, work_queue, initializer, initargs):
if initializer is not None:
@@ -155,23 +152,8 @@ class ThreadPoolExecutor(_base.Executor):
self._initializer = initializer
self._initargs = initargs
- def submit(*args, **kwargs):
- if len(args) >= 2:
- self, fn, *args = args
- elif not args:
- raise TypeError("descriptor 'submit' of 'ThreadPoolExecutor' object "
- "needs an argument")
- elif 'fn' in kwargs:
- fn = kwargs.pop('fn')
- self, *args = args
- import warnings
- warnings.warn("Passing 'fn' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('submit expected at least 1 positional argument, '
- 'got %d' % (len(args)-1))
-
- with self._shutdown_lock:
+ def submit(self, fn, /, *args, **kwargs):
+ with self._shutdown_lock, _global_shutdown_lock:
if self._broken:
raise BrokenThreadPool(self._broken)
@@ -187,7 +169,6 @@ class ThreadPoolExecutor(_base.Executor):
self._work_queue.put(w)
self._adjust_thread_count()
return f
- submit.__text_signature__ = _base.Executor.submit.__text_signature__
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
@@ -209,7 +190,6 @@ class ThreadPoolExecutor(_base.Executor):
self._work_queue,
self._initializer,
self._initargs))
- t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
@@ -227,9 +207,22 @@ class ThreadPoolExecutor(_base.Executor):
if work_item is not None:
work_item.future.set_exception(BrokenThreadPool(self._broken))
- def shutdown(self, wait=True):
+ def shutdown(self, wait=True, *, cancel_futures=False):
with self._shutdown_lock:
self._shutdown = True
+ if cancel_futures:
+ # Drain all work items from the queue, and then cancel their
+ # associated futures.
+ while True:
+ try:
+ work_item = self._work_queue.get_nowait()
+ except queue.Empty:
+ break
+ if work_item is not None:
+ work_item.future.cancel()
+
+ # Send a wake-up to prevent threads calling
+ # _work_queue.get(block=True) from permanently blocking.
self._work_queue.put(None)
if wait:
for t in self._threads:
diff --git a/x64/Lib/contextlib.py b/x64/Lib/contextlib.py
index 94dc2bf..ff92d9f 100644
--- a/x64/Lib/contextlib.py
+++ b/x64/Lib/contextlib.py
@@ -4,7 +4,7 @@ import sys
import _collections_abc
from collections import deque
from functools import wraps
-from types import MethodType
+from types import MethodType, GenericAlias
__all__ = ["asynccontextmanager", "contextmanager", "closing", "nullcontext",
"AbstractContextManager", "AbstractAsyncContextManager",
@@ -16,6 +16,8 @@ class AbstractContextManager(abc.ABC):
"""An abstract base class for context managers."""
+ __class_getitem__ = classmethod(GenericAlias)
+
def __enter__(self):
"""Return `self` upon entering the runtime context."""
return self
@@ -36,6 +38,8 @@ class AbstractAsyncContextManager(abc.ABC):
"""An abstract base class for asynchronous context managers."""
+ __class_getitem__ = classmethod(GenericAlias)
+
async def __aenter__(self):
"""Return `self` upon entering the runtime context."""
return self
@@ -426,26 +430,11 @@ class _BaseExitStack:
self._push_cm_exit(cm, _exit)
return result
- def callback(*args, **kwds):
+ def callback(self, callback, /, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
- if len(args) >= 2:
- self, callback, *args = args
- elif not args:
- raise TypeError("descriptor 'callback' of '_BaseExitStack' object "
- "needs an argument")
- elif 'callback' in kwds:
- callback = kwds.pop('callback')
- self, *args = args
- import warnings
- warnings.warn("Passing 'callback' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('callback expected at least 1 positional argument, '
- 'got %d' % (len(args)-1))
-
_exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
@@ -453,7 +442,6 @@ class _BaseExitStack:
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper)
return callback # Allow use as a decorator
- callback.__text_signature__ = '($self, callback, /, *args, **kwds)'
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods."""
@@ -587,26 +575,11 @@ class AsyncExitStack(_BaseExitStack, AbstractAsyncContextManager):
self._push_async_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
- def push_async_callback(*args, **kwds):
+ def push_async_callback(self, callback, /, *args, **kwds):
"""Registers an arbitrary coroutine function and arguments.
Cannot suppress exceptions.
"""
- if len(args) >= 2:
- self, callback, *args = args
- elif not args:
- raise TypeError("descriptor 'push_async_callback' of "
- "'AsyncExitStack' object needs an argument")
- elif 'callback' in kwds:
- callback = kwds.pop('callback')
- self, *args = args
- import warnings
- warnings.warn("Passing 'callback' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('push_async_callback expected at least 1 '
- 'positional argument, got %d' % (len(args)-1))
-
_exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
@@ -614,7 +587,6 @@ class AsyncExitStack(_BaseExitStack, AbstractAsyncContextManager):
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper, False)
return callback # Allow use as a decorator
- push_async_callback.__text_signature__ = '($self, callback, /, *args, **kwds)'
async def aclose(self):
"""Immediately unwind the context stack."""
diff --git a/x64/Lib/copyreg.py b/x64/Lib/copyreg.py
index dfc463c..7ab8c12 100644
--- a/x64/Lib/copyreg.py
+++ b/x64/Lib/copyreg.py
@@ -48,6 +48,7 @@ def _reconstructor(cls, base, state):
return obj
_HEAPTYPE = 1<<9
+_new_type = type(int.__new__)
# Python code for object.__reduce_ex__ for protocols 0 and 1
@@ -57,6 +58,9 @@ def _reduce_ex(self, proto):
for base in cls.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
+ new = base.__new__
+ if isinstance(new, _new_type) and new.__self__ is base:
+ break
else:
base = object # not really reachable
if base is object:
diff --git a/x64/Lib/crypt.py b/x64/Lib/crypt.py
index 8846602..33dbc46 100644
--- a/x64/Lib/crypt.py
+++ b/x64/Lib/crypt.py
@@ -10,6 +10,7 @@ except ModuleNotFoundError:
else:
raise ImportError("The required _crypt module was not built as part of CPython")
+import errno
import string as _string
from random import SystemRandom as _SystemRandom
from collections import namedtuple as _namedtuple
@@ -88,7 +89,14 @@ def _add_method(name, *args, rounds=None):
method = _Method(name, *args)
globals()['METHOD_' + name] = method
salt = mksalt(method, rounds=rounds)
- result = crypt('', salt)
+ result = None
+ try:
+ result = crypt('', salt)
+ except OSError as e:
+ # Not all libc libraries support all encryption methods.
+ if e.errno == errno.EINVAL:
+ return False
+ raise
if result and len(result) == method.total_size:
methods.append(method)
return True
diff --git a/x64/Lib/ctypes/__init__.py b/x64/Lib/ctypes/__init__.py
index 8f09911..4afa4eb 100644
--- a/x64/Lib/ctypes/__init__.py
+++ b/x64/Lib/ctypes/__init__.py
@@ -1,6 +1,7 @@
"""create and manipulate C data types in Python"""
import os as _os, sys as _sys
+import types as _types
__version__ = "1.1.0"
@@ -450,6 +451,8 @@ class LibraryLoader(object):
def LoadLibrary(self, name):
return self._dlltype(name)
+ __class_getitem__ = classmethod(_types.GenericAlias)
+
cdll = LibraryLoader(CDLL)
pydll = LibraryLoader(PyDLL)
diff --git a/x64/Lib/ctypes/macholib/dyld.py b/x64/Lib/ctypes/macholib/dyld.py
index 9d86b05..1c3f8fd 100644
--- a/x64/Lib/ctypes/macholib/dyld.py
+++ b/x64/Lib/ctypes/macholib/dyld.py
@@ -6,6 +6,11 @@ import os
from ctypes.macholib.framework import framework_info
from ctypes.macholib.dylib import dylib_info
from itertools import *
+try:
+ from _ctypes import _dyld_shared_cache_contains_path
+except ImportError:
+ def _dyld_shared_cache_contains_path(*args):
+ raise NotImplementedError
__all__ = [
'dyld_find', 'framework_find',
@@ -122,8 +127,15 @@ def dyld_find(name, executable_path=None, env=None):
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
+
if os.path.isfile(path):
return path
+ try:
+ if _dyld_shared_cache_contains_path(path):
+ return path
+ except NotImplementedError:
+ pass
+
raise ValueError("dylib %s could not be found" % (name,))
def framework_find(fn, executable_path=None, env=None):
diff --git a/x64/Lib/ctypes/util.py b/x64/Lib/ctypes/util.py
index 97973bc..0c2510e 100644
--- a/x64/Lib/ctypes/util.py
+++ b/x64/Lib/ctypes/util.py
@@ -93,6 +93,12 @@ elif os.name == "posix":
# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
import re, tempfile
+ def _is_elf(filename):
+ "Return True if the given file is an ELF file"
+ elf_header = b'\x7fELF'
+ with open(filename, 'br') as thefile:
+ return thefile.read(4) == elf_header
+
def _findLib_gcc(name):
# Run GCC's linker with the -t (aka --trace) option and examine the
# library name it prints out. The GCC command will fail because we
@@ -130,10 +136,17 @@ elif os.name == "posix":
# Raised if the file was already removed, which is the normal
# behaviour of GCC if linking fails
pass
- res = re.search(expr, trace)
+ res = re.findall(expr, trace)
if not res:
return None
- return os.fsdecode(res.group(0))
+
+ for file in res:
+ # Check if the given file is an elf file: gcc can report
+ # some files that are linker scripts and not actual
+ # shared objects. See bpo-41976 for more details
+ if not _is_elf(file):
+ continue
+ return os.fsdecode(file)
if sys.platform == "sunos5":
@@ -299,17 +312,22 @@ elif os.name == "posix":
stderr=subprocess.PIPE,
universal_newlines=True)
out, _ = p.communicate()
- res = re.search(expr, os.fsdecode(out))
- if res:
- result = res.group(0)
- except Exception as e:
+ res = re.findall(expr, os.fsdecode(out))
+ for file in res:
+ # Check if the given file is an elf file: gcc can report
+ # some files that are linker scripts and not actual
+ # shared objects. See bpo-41976 for more details
+ if not _is_elf(file):
+ continue
+ return os.fsdecode(file)
+ except Exception:
pass # result will be None
return result
def find_library(name):
# See issue #9998
return _findSoname_ldconfig(name) or \
- _get_soname(_findLib_gcc(name) or _findLib_ld(name))
+ _get_soname(_findLib_gcc(name)) or _get_soname(_findLib_ld(name))
################################################################
# test code
diff --git a/x64/Lib/curses/__init__.py b/x64/Lib/curses/__init__.py
index 24ff3ca..69270bf 100644
--- a/x64/Lib/curses/__init__.py
+++ b/x64/Lib/curses/__init__.py
@@ -60,7 +60,7 @@ except NameError:
# raises an exception, wrapper() will restore the terminal to a sane state so
# you can read the resulting traceback.
-def wrapper(*args, **kwds):
+def wrapper(func, /, *args, **kwds):
"""Wrapper function that initializes curses and calls another function,
restoring normal keyboard/screen behavior on error.
The callable object 'func' is then passed the main window 'stdscr'
@@ -68,17 +68,6 @@ def wrapper(*args, **kwds):
wrapper().
"""
- if args:
- func, *args = args
- elif 'func' in kwds:
- func = kwds.pop('func')
- import warnings
- warnings.warn("Passing 'func' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('wrapper expected at least 1 positional argument, '
- 'got %d' % len(args))
-
try:
# Initialize curses
stdscr = initscr()
@@ -110,4 +99,3 @@ def wrapper(*args, **kwds):
echo()
nocbreak()
endwin()
-wrapper.__text_signature__ = '(func, /, *args, **kwds)'
diff --git a/x64/Lib/dataclasses.py b/x64/Lib/dataclasses.py
index 74f7929..530d3e9 100644
--- a/x64/Lib/dataclasses.py
+++ b/x64/Lib/dataclasses.py
@@ -7,6 +7,7 @@ import keyword
import builtins
import functools
import _thread
+from types import GenericAlias
__all__ = ['dataclass',
@@ -199,11 +200,7 @@ _POST_INIT_NAME = '__post_init__'
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)')
-class _InitVarMeta(type):
- def __getitem__(self, params):
- return InitVar(params)
-
-class InitVar(metaclass=_InitVarMeta):
+class InitVar:
__slots__ = ('type', )
def __init__(self, type):
@@ -217,6 +214,9 @@ class InitVar(metaclass=_InitVarMeta):
type_name = repr(self.type)
return f'dataclasses.InitVar[{type_name}]'
+ def __class_getitem__(cls, type):
+ return InitVar(type)
+
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
@@ -285,6 +285,8 @@ class Field:
# it.
func(self.default, owner, name)
+ __class_getitem__ = classmethod(GenericAlias)
+
class _DataclassParams:
__slots__ = ('init',
@@ -1092,7 +1094,7 @@ def _asdict_inner(obj, dict_factory):
# method, because:
# - it does not recurse in to the namedtuple fields and
# convert them to dicts (using dict_factory).
- # - I don't actually want to return a dict here. The the main
+ # - I don't actually want to return a dict here. The main
# use case here is json.dumps, and it handles converting
# namedtuples to lists. Admittedly we're losing some
# information here when we produce a json list instead of a
@@ -1231,7 +1233,7 @@ def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
unsafe_hash=unsafe_hash, frozen=frozen)
-def replace(*args, **changes):
+def replace(obj, /, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@@ -1245,17 +1247,6 @@ def replace(*args, **changes):
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
- if len(args) > 1:
- raise TypeError(f'replace() takes 1 positional argument but {len(args)} were given')
- if args:
- obj, = args
- elif 'obj' in changes:
- obj = changes.pop('obj')
- import warnings
- warnings.warn("Passing 'obj' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError("replace() missing 1 required positional argument: 'obj'")
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
@@ -1291,4 +1282,3 @@ def replace(*args, **changes):
# changes that aren't fields, this will correctly raise a
# TypeError.
return obj.__class__(**changes)
-replace.__text_signature__ = '(obj, /, **kwargs)'
diff --git a/x64/Lib/datetime.py b/x64/Lib/datetime.py
index 0adf1dd..e508d99 100644
--- a/x64/Lib/datetime.py
+++ b/x64/Lib/datetime.py
@@ -4,6 +4,10 @@ See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
+__all__ = ("date", "datetime", "time", "timedelta", "timezone", "tzinfo",
+ "MINYEAR", "MAXYEAR")
+
+
import time as _time
import math as _math
import sys
@@ -1091,7 +1095,7 @@ class date:
return self.toordinal() % 7 or 7
def isocalendar(self):
- """Return a 3-tuple containing ISO year, week number, and weekday.
+ """Return a named tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
@@ -1116,7 +1120,7 @@ class date:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
- return year, week+1, day+1
+ return _IsoCalendarDate(year, week+1, day+1)
# Pickle support.
@@ -1206,6 +1210,36 @@ class tzinfo:
else:
return (self.__class__, args, state)
+
+class IsoCalendarDate(tuple):
+
+ def __new__(cls, year, week, weekday, /):
+ return super().__new__(cls, (year, week, weekday))
+
+ @property
+ def year(self):
+ return self[0]
+
+ @property
+ def week(self):
+ return self[1]
+
+ @property
+ def weekday(self):
+ return self[2]
+
+ def __reduce__(self):
+ # This code is intended to pickle the object without making the
+ # class public. See https://bugs.python.org/msg352381
+ return (tuple, (tuple(self),))
+
+ def __repr__(self):
+ return (f'{self.__class__.__name__}'
+ f'(year={self[0]}, week={self[1]}, weekday={self[2]})')
+
+
+_IsoCalendarDate = IsoCalendarDate
+del IsoCalendarDate
_tzinfo_class = tzinfo
class time:
@@ -1418,7 +1452,8 @@ class time:
part is omitted if self.microsecond == 0.
The optional argument timespec specifies the number of additional
- terms of the time to include.
+ terms of the time to include. Valid options are 'auto', 'hours',
+ 'minutes', 'seconds', 'milliseconds' and 'microseconds'.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond, timespec)
@@ -1544,7 +1579,7 @@ class time:
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
- return (time, self._getstate(protocol))
+ return (self.__class__, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
@@ -1555,6 +1590,7 @@ time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
+
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
@@ -1902,7 +1938,8 @@ class datetime(date):
time, default 'T'.
The optional argument timespec specifies the number of additional
- terms of the time to include.
+ terms of the time to include. Valid options are 'auto', 'hours',
+ 'minutes', 'seconds', 'milliseconds' and 'microseconds'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
@@ -2510,7 +2547,7 @@ else:
_format_time, _format_offset, _is_leap, _isoweek1monday, _math,
_ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
_divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
- _parse_hh_mm_ss_ff)
+ _parse_hh_mm_ss_ff, _IsoCalendarDate)
# XXX Since import * above excludes names that start with _,
# docstring does not get overwritten. In the future, it may be
# appropriate to maintain a single module level docstring and
diff --git a/x64/Lib/difflib.py b/x64/Lib/difflib.py
index 5d75643..0dda80d 100644
--- a/x64/Lib/difflib.py
+++ b/x64/Lib/difflib.py
@@ -32,6 +32,7 @@ __all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
from heapq import nlargest as _nlargest
from collections import namedtuple as _namedtuple
+from types import GenericAlias
Match = _namedtuple('Match', 'a b size')
@@ -129,7 +130,7 @@ class SequenceMatcher:
set_seq2(b)
Set the second sequence to be compared.
- find_longest_match(alo, ahi, blo, bhi)
+ find_longest_match(alo=0, ahi=None, blo=0, bhi=None)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
@@ -333,9 +334,11 @@ class SequenceMatcher:
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
- def find_longest_match(self, alo, ahi, blo, bhi):
+ def find_longest_match(self, alo=0, ahi=None, blo=0, bhi=None):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
+ By default it will find the longest match in the entirety of a and b.
+
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
@@ -390,6 +393,10 @@ class SequenceMatcher:
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
+ if ahi is None:
+ ahi = len(a)
+ if bhi is None:
+ bhi = len(b)
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
@@ -685,6 +692,9 @@ class SequenceMatcher:
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
+ __class_getitem__ = classmethod(GenericAlias)
+
+
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
diff --git a/x64/Lib/dis.py b/x64/Lib/dis.py
index 10e5f7f..e289e17 100644
--- a/x64/Lib/dis.py
+++ b/x64/Lib/dis.py
@@ -542,7 +542,7 @@ def _test():
import argparse
parser = argparse.ArgumentParser()
- parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-')
+ parser.add_argument('infile', type=argparse.FileType('rb'), nargs='?', default='-')
args = parser.parse_args()
with args.infile as infile:
source = infile.read()
diff --git a/x64/Lib/doctest.py b/x64/Lib/doctest.py
index dcbcfe5..baa503c 100644
--- a/x64/Lib/doctest.py
+++ b/x64/Lib/doctest.py
@@ -211,6 +211,13 @@ def _normalize_module(module, depth=2):
else:
raise TypeError("Expected a module, string, or None")
+def _newline_convert(data):
+ # We have two cases to cover and we need to make sure we do
+ # them in the right order
+ for newline in ('\r\n', '\r'):
+ data = data.replace(newline, '\n')
+ return data
+
def _load_testfile(filename, package, module_relative, encoding):
if module_relative:
package = _normalize_module(package, 3)
@@ -221,7 +228,7 @@ def _load_testfile(filename, package, module_relative, encoding):
file_contents = file_contents.decode(encoding)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
- return file_contents.replace(os.linesep, '\n'), filename
+ return _newline_convert(file_contents), filename
with open(filename, encoding=encoding) as f:
return f.read(), filename
@@ -1327,7 +1334,7 @@ class DocTestRunner:
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
- compileflags, 1), test.globs)
+ compileflags, True), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
diff --git a/x64/Lib/dummy_threading.py b/x64/Lib/dummy_threading.py
deleted file mode 100644
index 1bb7eee..0000000
--- a/x64/Lib/dummy_threading.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
-
-The module ``_dummy_threading`` is added to ``sys.modules`` in order
-to not have ``threading`` considered imported. Had ``threading`` been
-directly imported it would have made all subsequent imports succeed
-regardless of whether ``_thread`` was available which is not desired.
-
-"""
-from sys import modules as sys_modules
-
-import _dummy_thread
-
-# Declaring now so as to not have to nest ``try``s to get proper clean-up.
-holding_thread = False
-holding_threading = False
-holding__threading_local = False
-
-try:
- # Could have checked if ``_thread`` was not in sys.modules and gone
- # a different route, but decided to mirror technique used with
- # ``threading`` below.
- if '_thread' in sys_modules:
- held_thread = sys_modules['_thread']
- holding_thread = True
- # Must have some module named ``_thread`` that implements its API
- # in order to initially import ``threading``.
- sys_modules['_thread'] = sys_modules['_dummy_thread']
-
- if 'threading' in sys_modules:
- # If ``threading`` is already imported, might as well prevent
- # trying to import it more than needed by saving it if it is
- # already imported before deleting it.
- held_threading = sys_modules['threading']
- holding_threading = True
- del sys_modules['threading']
-
- if '_threading_local' in sys_modules:
- # If ``_threading_local`` is already imported, might as well prevent
- # trying to import it more than needed by saving it if it is
- # already imported before deleting it.
- held__threading_local = sys_modules['_threading_local']
- holding__threading_local = True
- del sys_modules['_threading_local']
-
- import threading
- # Need a copy of the code kept somewhere...
- sys_modules['_dummy_threading'] = sys_modules['threading']
- del sys_modules['threading']
- sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
- del sys_modules['_threading_local']
- from _dummy_threading import *
- from _dummy_threading import __all__
-
-finally:
- # Put back ``threading`` if we overwrote earlier
-
- if holding_threading:
- sys_modules['threading'] = held_threading
- del held_threading
- del holding_threading
-
- # Put back ``_threading_local`` if we overwrote earlier
-
- if holding__threading_local:
- sys_modules['_threading_local'] = held__threading_local
- del held__threading_local
- del holding__threading_local
-
- # Put back ``thread`` if we overwrote, else del the entry we made
- if holding_thread:
- sys_modules['_thread'] = held_thread
- del held_thread
- else:
- del sys_modules['_thread']
- del holding_thread
-
- del _dummy_thread
- del sys_modules
diff --git a/x64/Lib/email/_header_value_parser.py b/x64/Lib/email/_header_value_parser.py
index 9c55ef7..51d355f 100644
--- a/x64/Lib/email/_header_value_parser.py
+++ b/x64/Lib/email/_header_value_parser.py
@@ -1218,12 +1218,21 @@ def get_bare_quoted_string(value):
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
+ valid_ew = False
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
+ valid_ew = True
except errors.HeaderParseError:
token, value = get_qcontent(value)
+ # Collapse the whitespace between two encoded words that occur in a
+ # bare-quoted-string.
+ if valid_ew and len(bare_quoted_string) > 1:
+ if (bare_quoted_string[-1].token_type == 'fws' and
+ bare_quoted_string[-2].token_type == 'encoded-word'):
+ bare_quoted_string[-1] = EWWhiteSpaceTerminal(
+ bare_quoted_string[-1], 'fws')
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
diff --git a/x64/Lib/email/contentmanager.py b/x64/Lib/email/contentmanager.py
index b904ded..b91fb0e 100644
--- a/x64/Lib/email/contentmanager.py
+++ b/x64/Lib/email/contentmanager.py
@@ -146,13 +146,13 @@ def _encode_text(string, charset, cte, policy):
def normal_body(lines): return b'\n'.join(lines) + b'\n'
if cte==None:
# Use heuristics to decide on the "best" encoding.
- try:
- return '7bit', normal_body(lines).decode('ascii')
- except UnicodeDecodeError:
- pass
- if (policy.cte_type == '8bit' and
- max(len(x) for x in lines) <= policy.max_line_length):
- return '8bit', normal_body(lines).decode('ascii', 'surrogateescape')
+ if max((len(x) for x in lines), default=0) <= policy.max_line_length:
+ try:
+ return '7bit', normal_body(lines).decode('ascii')
+ except UnicodeDecodeError:
+ pass
+ if policy.cte_type == '8bit':
+ return '8bit', normal_body(lines).decode('ascii', 'surrogateescape')
sniff = embedded_body(lines[:10])
sniff_qp = quoprimime.body_encode(sniff.decode('latin-1'),
policy.max_line_length)
diff --git a/x64/Lib/email/generator.py b/x64/Lib/email/generator.py
index ae670c2..c9b1216 100644
--- a/x64/Lib/email/generator.py
+++ b/x64/Lib/email/generator.py
@@ -186,7 +186,11 @@ class Generator:
# If we munged the cte, copy the message again and re-fix the CTE.
if munge_cte:
msg = deepcopy(msg)
- msg.replace_header('content-transfer-encoding', munge_cte[0])
+ # Preserve the header order if the CTE header already exists.
+ if msg.get('content-transfer-encoding') is None:
+ msg['Content-Transfer-Encoding'] = munge_cte[0]
+ else:
+ msg.replace_header('content-transfer-encoding', munge_cte[0])
msg.replace_header('content-type', munge_cte[1])
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
diff --git a/x64/Lib/email/headerregistry.py b/x64/Lib/email/headerregistry.py
index 8d1a202..5d84fc0 100644
--- a/x64/Lib/email/headerregistry.py
+++ b/x64/Lib/email/headerregistry.py
@@ -31,6 +31,11 @@ class Address:
without any Content Transfer Encoding.
"""
+
+ inputs = ''.join(filter(None, (display_name, username, domain, addr_spec)))
+ if '\r' in inputs or '\n' in inputs:
+ raise ValueError("invalid arguments; address parts cannot contain CR or LF")
+
# This clause with its potential 'raise' may only happen when an
# application program creates an Address object using an addr_spec
# keyword. The email library code itself must always supply username
@@ -69,11 +74,9 @@ class Address:
"""The addr_spec (username@domain) portion of the address, quoted
according to RFC 5322 rules, but with no Content Transfer Encoding.
"""
- nameset = set(self.username)
- if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS):
- lp = parser.quote_string(self.username)
- else:
- lp = self.username
+ lp = self.username
+ if not parser.DOT_ATOM_ENDS.isdisjoint(lp):
+ lp = parser.quote_string(lp)
if self.domain:
return lp + '@' + self.domain
if not lp:
@@ -86,19 +89,17 @@ class Address:
self.display_name, self.username, self.domain)
def __str__(self):
- nameset = set(self.display_name)
- if len(nameset) > len(nameset-parser.SPECIALS):
- disp = parser.quote_string(self.display_name)
- else:
- disp = self.display_name
+ disp = self.display_name
+ if not parser.SPECIALS.isdisjoint(disp):
+ disp = parser.quote_string(disp)
if disp:
addr_spec = '' if self.addr_spec=='<>' else self.addr_spec
return "{} <{}>".format(disp, addr_spec)
return self.addr_spec
def __eq__(self, other):
- if type(other) != type(self):
- return False
+ if not isinstance(other, Address):
+ return NotImplemented
return (self.display_name == other.display_name and
self.username == other.username and
self.domain == other.domain)
@@ -141,17 +142,15 @@ class Group:
if self.display_name is None and len(self.addresses)==1:
return str(self.addresses[0])
disp = self.display_name
- if disp is not None:
- nameset = set(disp)
- if len(nameset) > len(nameset-parser.SPECIALS):
- disp = parser.quote_string(disp)
+ if disp is not None and not parser.SPECIALS.isdisjoint(disp):
+ disp = parser.quote_string(disp)
adrstr = ", ".join(str(x) for x in self.addresses)
adrstr = ' ' + adrstr if adrstr else adrstr
return "{}:{};".format(disp, adrstr)
def __eq__(self, other):
- if type(other) != type(self):
- return False
+ if not isinstance(other, Group):
+ return NotImplemented
return (self.display_name == other.display_name and
self.addresses == other.addresses)
diff --git a/x64/Lib/email/message.py b/x64/Lib/email/message.py
index 1262602..3701b30 100644
--- a/x64/Lib/email/message.py
+++ b/x64/Lib/email/message.py
@@ -141,7 +141,7 @@ class Message:
header. For backward compatibility reasons, if maxheaderlen is
not specified it defaults to 0, so you must override it explicitly
if you want a different maxheaderlen. 'policy' is passed to the
- Generator instance used to serialize the mesasge; if it is not
+ Generator instance used to serialize the message; if it is not
specified the policy associated with the message instance is used.
If the message object contains binary data that is not encoded
@@ -958,7 +958,7 @@ class MIMEPart(Message):
header. maxheaderlen is retained for backward compatibility with the
base Message class, but defaults to None, meaning that the policy value
for max_line_length controls the header maximum length. 'policy' is
- passed to the Generator instance used to serialize the mesasge; if it
+ passed to the Generator instance used to serialize the message; if it
is not specified the policy associated with the message instance is
used.
"""
diff --git a/x64/Lib/email/utils.py b/x64/Lib/email/utils.py
index 858f620..1a7719d 100644
--- a/x64/Lib/email/utils.py
+++ b/x64/Lib/email/utils.py
@@ -81,7 +81,7 @@ def formataddr(pair, charset='utf-8'):
If the first element of pair is false, then the second element is
returned unmodified.
- Optional charset if given is the character set that is used to encode
+ The optional charset is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'.
@@ -259,21 +259,13 @@ def decode_params(params):
params is a sequence of 2-tuples containing (param name, string value).
"""
- # Copy params so we don't mess with the original
- params = params[:]
- new_params = []
+ new_params = [params[0]]
# Map parameter's name to a list of continuations. The values are a
# 3-tuple of the continuation number, the string value, and a flag
# specifying whether a particular segment is %-encoded.
rfc2231_params = {}
- name, value = params.pop(0)
- new_params.append((name, value))
- while params:
- name, value = params.pop(0)
- if name.endswith('*'):
- encoded = True
- else:
- encoded = False
+ for name, value in params[1:]:
+ encoded = name.endswith('*')
value = unquote(value)
mo = rfc2231_continuation.match(name)
if mo:
diff --git a/x64/Lib/encodings/aliases.py b/x64/Lib/encodings/aliases.py
index 2444f9f..d85afd6 100644
--- a/x64/Lib/encodings/aliases.py
+++ b/x64/Lib/encodings/aliases.py
@@ -450,6 +450,7 @@ aliases = {
# mac_latin2 codec
'maccentraleurope' : 'mac_latin2',
+ 'mac_centeuro' : 'mac_latin2',
'maclatin2' : 'mac_latin2',
# mac_roman codec
@@ -493,9 +494,6 @@ aliases = {
'sjisx0213' : 'shift_jisx0213',
's_jisx0213' : 'shift_jisx0213',
- # tactis codec
- 'tis260' : 'tactis',
-
# tis_620 codec
'tis620' : 'tis_620',
'tis_620_0' : 'tis_620',
diff --git a/x64/Lib/encodings/mac_centeuro.py b/x64/Lib/encodings/mac_centeuro.py
deleted file mode 100644
index 5785a0e..0000000
--- a/x64/Lib/encodings/mac_centeuro.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-centeuro',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- '\x00' # 0x00 -> CONTROL CHARACTER
- '\x01' # 0x01 -> CONTROL CHARACTER
- '\x02' # 0x02 -> CONTROL CHARACTER
- '\x03' # 0x03 -> CONTROL CHARACTER
- '\x04' # 0x04 -> CONTROL CHARACTER
- '\x05' # 0x05 -> CONTROL CHARACTER
- '\x06' # 0x06 -> CONTROL CHARACTER
- '\x07' # 0x07 -> CONTROL CHARACTER
- '\x08' # 0x08 -> CONTROL CHARACTER
- '\t' # 0x09 -> CONTROL CHARACTER
- '\n' # 0x0A -> CONTROL CHARACTER
- '\x0b' # 0x0B -> CONTROL CHARACTER
- '\x0c' # 0x0C -> CONTROL CHARACTER
- '\r' # 0x0D -> CONTROL CHARACTER
- '\x0e' # 0x0E -> CONTROL CHARACTER
- '\x0f' # 0x0F -> CONTROL CHARACTER
- '\x10' # 0x10 -> CONTROL CHARACTER
- '\x11' # 0x11 -> CONTROL CHARACTER
- '\x12' # 0x12 -> CONTROL CHARACTER
- '\x13' # 0x13 -> CONTROL CHARACTER
- '\x14' # 0x14 -> CONTROL CHARACTER
- '\x15' # 0x15 -> CONTROL CHARACTER
- '\x16' # 0x16 -> CONTROL CHARACTER
- '\x17' # 0x17 -> CONTROL CHARACTER
- '\x18' # 0x18 -> CONTROL CHARACTER
- '\x19' # 0x19 -> CONTROL CHARACTER
- '\x1a' # 0x1A -> CONTROL CHARACTER
- '\x1b' # 0x1B -> CONTROL CHARACTER
- '\x1c' # 0x1C -> CONTROL CHARACTER
- '\x1d' # 0x1D -> CONTROL CHARACTER
- '\x1e' # 0x1E -> CONTROL CHARACTER
- '\x1f' # 0x1F -> CONTROL CHARACTER
- ' ' # 0x20 -> SPACE
- '!' # 0x21 -> EXCLAMATION MARK
- '"' # 0x22 -> QUOTATION MARK
- '#' # 0x23 -> NUMBER SIGN
- '$' # 0x24 -> DOLLAR SIGN
- '%' # 0x25 -> PERCENT SIGN
- '&' # 0x26 -> AMPERSAND
- "'" # 0x27 -> APOSTROPHE
- '(' # 0x28 -> LEFT PARENTHESIS
- ')' # 0x29 -> RIGHT PARENTHESIS
- '*' # 0x2A -> ASTERISK
- '+' # 0x2B -> PLUS SIGN
- ',' # 0x2C -> COMMA
- '-' # 0x2D -> HYPHEN-MINUS
- '.' # 0x2E -> FULL STOP
- '/' # 0x2F -> SOLIDUS
- '0' # 0x30 -> DIGIT ZERO
- '1' # 0x31 -> DIGIT ONE
- '2' # 0x32 -> DIGIT TWO
- '3' # 0x33 -> DIGIT THREE
- '4' # 0x34 -> DIGIT FOUR
- '5' # 0x35 -> DIGIT FIVE
- '6' # 0x36 -> DIGIT SIX
- '7' # 0x37 -> DIGIT SEVEN
- '8' # 0x38 -> DIGIT EIGHT
- '9' # 0x39 -> DIGIT NINE
- ':' # 0x3A -> COLON
- ';' # 0x3B -> SEMICOLON
- '<' # 0x3C -> LESS-THAN SIGN
- '=' # 0x3D -> EQUALS SIGN
- '>' # 0x3E -> GREATER-THAN SIGN
- '?' # 0x3F -> QUESTION MARK
- '@' # 0x40 -> COMMERCIAL AT
- 'A' # 0x41 -> LATIN CAPITAL LETTER A
- 'B' # 0x42 -> LATIN CAPITAL LETTER B
- 'C' # 0x43 -> LATIN CAPITAL LETTER C
- 'D' # 0x44 -> LATIN CAPITAL LETTER D
- 'E' # 0x45 -> LATIN CAPITAL LETTER E
- 'F' # 0x46 -> LATIN CAPITAL LETTER F
- 'G' # 0x47 -> LATIN CAPITAL LETTER G
- 'H' # 0x48 -> LATIN CAPITAL LETTER H
- 'I' # 0x49 -> LATIN CAPITAL LETTER I
- 'J' # 0x4A -> LATIN CAPITAL LETTER J
- 'K' # 0x4B -> LATIN CAPITAL LETTER K
- 'L' # 0x4C -> LATIN CAPITAL LETTER L
- 'M' # 0x4D -> LATIN CAPITAL LETTER M
- 'N' # 0x4E -> LATIN CAPITAL LETTER N
- 'O' # 0x4F -> LATIN CAPITAL LETTER O
- 'P' # 0x50 -> LATIN CAPITAL LETTER P
- 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- 'R' # 0x52 -> LATIN CAPITAL LETTER R
- 'S' # 0x53 -> LATIN CAPITAL LETTER S
- 'T' # 0x54 -> LATIN CAPITAL LETTER T
- 'U' # 0x55 -> LATIN CAPITAL LETTER U
- 'V' # 0x56 -> LATIN CAPITAL LETTER V
- 'W' # 0x57 -> LATIN CAPITAL LETTER W
- 'X' # 0x58 -> LATIN CAPITAL LETTER X
- 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- '[' # 0x5B -> LEFT SQUARE BRACKET
- '\\' # 0x5C -> REVERSE SOLIDUS
- ']' # 0x5D -> RIGHT SQUARE BRACKET
- '^' # 0x5E -> CIRCUMFLEX ACCENT
- '_' # 0x5F -> LOW LINE
- '`' # 0x60 -> GRAVE ACCENT
- 'a' # 0x61 -> LATIN SMALL LETTER A
- 'b' # 0x62 -> LATIN SMALL LETTER B
- 'c' # 0x63 -> LATIN SMALL LETTER C
- 'd' # 0x64 -> LATIN SMALL LETTER D
- 'e' # 0x65 -> LATIN SMALL LETTER E
- 'f' # 0x66 -> LATIN SMALL LETTER F
- 'g' # 0x67 -> LATIN SMALL LETTER G
- 'h' # 0x68 -> LATIN SMALL LETTER H
- 'i' # 0x69 -> LATIN SMALL LETTER I
- 'j' # 0x6A -> LATIN SMALL LETTER J
- 'k' # 0x6B -> LATIN SMALL LETTER K
- 'l' # 0x6C -> LATIN SMALL LETTER L
- 'm' # 0x6D -> LATIN SMALL LETTER M
- 'n' # 0x6E -> LATIN SMALL LETTER N
- 'o' # 0x6F -> LATIN SMALL LETTER O
- 'p' # 0x70 -> LATIN SMALL LETTER P
- 'q' # 0x71 -> LATIN SMALL LETTER Q
- 'r' # 0x72 -> LATIN SMALL LETTER R
- 's' # 0x73 -> LATIN SMALL LETTER S
- 't' # 0x74 -> LATIN SMALL LETTER T
- 'u' # 0x75 -> LATIN SMALL LETTER U
- 'v' # 0x76 -> LATIN SMALL LETTER V
- 'w' # 0x77 -> LATIN SMALL LETTER W
- 'x' # 0x78 -> LATIN SMALL LETTER X
- 'y' # 0x79 -> LATIN SMALL LETTER Y
- 'z' # 0x7A -> LATIN SMALL LETTER Z
- '{' # 0x7B -> LEFT CURLY BRACKET
- '|' # 0x7C -> VERTICAL LINE
- '}' # 0x7D -> RIGHT CURLY BRACKET
- '~' # 0x7E -> TILDE
- '\x7f' # 0x7F -> CONTROL CHARACTER
- '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- '\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
- '\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
- '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- '\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
- '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
- '\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
- '\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
- '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- '\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
- '\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
- '\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
- '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- '\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
- '\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
- '\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
- '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
- '\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
- '\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
- '\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
- '\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
- '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
- '\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
- '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
- '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
- '\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
- '\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
- '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- '\u2020' # 0xA0 -> DAGGER
- '\xb0' # 0xA1 -> DEGREE SIGN
- '\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
- '\xa3' # 0xA3 -> POUND SIGN
- '\xa7' # 0xA4 -> SECTION SIGN
- '\u2022' # 0xA5 -> BULLET
- '\xb6' # 0xA6 -> PILCROW SIGN
- '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
- '\xae' # 0xA8 -> REGISTERED SIGN
- '\xa9' # 0xA9 -> COPYRIGHT SIGN
- '\u2122' # 0xAA -> TRADE MARK SIGN
- '\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
- '\xa8' # 0xAC -> DIAERESIS
- '\u2260' # 0xAD -> NOT EQUAL TO
- '\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
- '\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
- '\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
- '\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
- '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- '\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
- '\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
- '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
- '\u2211' # 0xB7 -> N-ARY SUMMATION
- '\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
- '\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
- '\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
- '\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
- '\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
- '\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
- '\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
- '\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
- '\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
- '\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
- '\xac' # 0xC2 -> NOT SIGN
- '\u221a' # 0xC3 -> SQUARE ROOT
- '\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
- '\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
- '\u2206' # 0xC6 -> INCREMENT
- '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- '\xa0' # 0xCA -> NO-BREAK SPACE
- '\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
- '\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
- '\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
- '\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
- '\u2013' # 0xD0 -> EN DASH
- '\u2014' # 0xD1 -> EM DASH
- '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- '\xf7' # 0xD6 -> DIVISION SIGN
- '\u25ca' # 0xD7 -> LOZENGE
- '\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
- '\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
- '\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
- '\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
- '\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- '\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- '\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
- '\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
- '\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
- '\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
- '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
- '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
- '\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
- '\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
- '\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
- '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
- '\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
- '\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
- '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
- '\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
- '\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
- '\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
- '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- '\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
- '\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
- '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
- '\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
- '\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- '\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
- '\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
- '\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
- '\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
- '\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
- '\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
- '\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- '\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
- '\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
- '\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
- '\u02c7' # 0xFF -> CARON
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/x64/Lib/encodings/punycode.py b/x64/Lib/encodings/punycode.py
index 66c5101..1c57264 100644
--- a/x64/Lib/encodings/punycode.py
+++ b/x64/Lib/encodings/punycode.py
@@ -143,7 +143,7 @@ def decode_generalized_number(extended, extpos, bias, errors):
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
- % extended[extpos])
+ % extended[extpos-1])
else:
return extpos, None
t = T(j, bias)
diff --git a/x64/Lib/enum.py b/x64/Lib/enum.py
index 108d389..ebadd9f 100644
--- a/x64/Lib/enum.py
+++ b/x64/Lib/enum.py
@@ -60,6 +60,7 @@ class _EnumDict(dict):
self._member_names = []
self._last_values = []
self._ignore = []
+ self._auto_called = False
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
@@ -77,6 +78,9 @@ class _EnumDict(dict):
):
raise ValueError('_names_ are reserved for future Enum use')
if key == '_generate_next_value_':
+ # check if members already defined as auto()
+ if self._auto_called:
+ raise TypeError("_generate_next_value_ must be defined before members")
setattr(self, '_generate_next_value', value)
elif key == '_ignore_':
if isinstance(value, str):
@@ -102,6 +106,7 @@ class _EnumDict(dict):
if isinstance(value, auto):
if value.value == _auto_null:
value.value = self._generate_next_value(key, 1, len(self._member_names), self._last_values[:])
+ self._auto_called = True
value = value.value
self._member_names.append(key)
self._last_values.append(value)
@@ -118,10 +123,12 @@ class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
+ # check that previous enum members do not exist
+ metacls._check_for_existing_members(cls, bases)
# create the namespace dict
enum_dict = _EnumDict()
# inherit previous flags and _generate_next_value_ function
- member_type, first_enum = metacls._get_mixins_(bases)
+ member_type, first_enum = metacls._get_mixins_(cls, bases)
if first_enum is not None:
enum_dict['_generate_next_value_'] = getattr(first_enum, '_generate_next_value_', None)
return enum_dict
@@ -137,7 +144,7 @@ class EnumMeta(type):
ignore = classdict['_ignore_']
for key in ignore:
classdict.pop(key, None)
- member_type, first_enum = metacls._get_mixins_(bases)
+ member_type, first_enum = metacls._get_mixins_(cls, bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
@@ -244,7 +251,11 @@ class EnumMeta(type):
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
+ # however, if the method is defined in the Enum itself, don't replace
+ # it
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
+ if name in classdict:
+ continue
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
@@ -392,7 +403,7 @@ class EnumMeta(type):
"""
metacls = cls.__class__
bases = (cls, ) if type is None else (type, cls)
- _, first_enum = cls._get_mixins_(bases)
+ _, first_enum = cls._get_mixins_(cls, bases)
classdict = metacls.__prepare__(class_name, bases)
# special processing needed for names?
@@ -420,7 +431,7 @@ class EnumMeta(type):
if module is None:
try:
module = sys._getframe(2).f_globals['__name__']
- except (AttributeError, ValueError, KeyError) as exc:
+ except (AttributeError, ValueError, KeyError):
pass
if module is None:
_make_class_unpicklable(enum_class)
@@ -464,14 +475,15 @@ class EnumMeta(type):
module_globals[name] = cls
return cls
- def _convert(cls, *args, **kwargs):
- import warnings
- warnings.warn("_convert is deprecated and will be removed in 3.9, use "
- "_convert_ instead.", DeprecationWarning, stacklevel=2)
- return cls._convert_(*args, **kwargs)
+ @staticmethod
+ def _check_for_existing_members(class_name, bases):
+ for chain in bases:
+ for base in chain.__mro__:
+ if issubclass(base, Enum) and base._member_names_:
+ raise TypeError("%s: cannot extend enumeration %r" % (class_name, base.__name__))
@staticmethod
- def _get_mixins_(bases):
+ def _get_mixins_(class_name, bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
@@ -482,14 +494,25 @@ class EnumMeta(type):
return object, Enum
def _find_data_type(bases):
+ data_types = []
for chain in bases:
+ candidate = None
for base in chain.__mro__:
if base is object:
continue
elif '__new__' in base.__dict__:
if issubclass(base, Enum):
continue
- return base
+ data_types.append(candidate or base)
+ break
+ elif not issubclass(base, Enum):
+ candidate = base
+ if len(data_types) > 1:
+ raise TypeError('%r: too many data types: %r' % (class_name, data_types))
+ elif data_types:
+ return data_types[0]
+ else:
+ return None
# ensure final parent class is an Enum derivative, find any concrete
# data type, and check that Enum has no members
@@ -583,7 +606,7 @@ class Enum(metaclass=EnumMeta):
if isinstance(result, cls):
return result
else:
- ve_exc = ValueError("%r is not a valid %s" % (value, cls.__name__))
+ ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__))
if result is None and exc is None:
raise ve_exc
elif exc is None:
@@ -605,7 +628,7 @@ class Enum(metaclass=EnumMeta):
@classmethod
def _missing_(cls, value):
- raise ValueError("%r is not a valid %s" % (value, cls.__name__))
+ return None
def __repr__(self):
return "<%s.%s: %r>" % (
@@ -628,8 +651,9 @@ class Enum(metaclass=EnumMeta):
# we can get strange results with the Enum name showing up instead of
# the value
- # pure Enum branch
- if self._member_type_ is object:
+ # pure Enum branch, or branch with __str__ explicitly overridden
+ str_overridden = type(self).__str__ != Enum.__str__
+ if self._member_type_ is object or str_overridden:
cls = str
val = str(self)
# mix-in branch
@@ -711,7 +735,7 @@ class Flag(Enum):
# verify all bits are accounted for
_, extra_flags = _decompose(cls, value)
if extra_flags:
- raise ValueError("%r is not a valid %s" % (value, cls.__name__))
+ raise ValueError("%r is not a valid %s" % (value, cls.__qualname__))
# construct a singleton enum pseudo-member
pseudo_member = object.__new__(cls)
pseudo_member._name_ = None
@@ -785,7 +809,7 @@ class IntFlag(int, Flag):
@classmethod
def _missing_(cls, value):
if not isinstance(value, int):
- raise ValueError("%r is not a valid %s" % (value, cls.__name__))
+ raise ValueError("%r is not a valid %s" % (value, cls.__qualname__))
new_member = cls._create_pseudo_member_(value)
return new_member
@@ -866,28 +890,20 @@ def _decompose(flag, value):
# _decompose is only called if the value is not named
not_covered = value
negative = value < 0
- # issue29167: wrap accesses to _value2member_map_ in a list to avoid race
- # conditions between iterating over it and having more pseudo-
- # members added to it
- if negative:
- # only check for named flags
- flags_to_check = [
- (m, v)
- for v, m in list(flag._value2member_map_.items())
- if m.name is not None
- ]
- else:
- # check for named flags and powers-of-two flags
- flags_to_check = [
- (m, v)
- for v, m in list(flag._value2member_map_.items())
- if m.name is not None or _power_of_two(v)
- ]
members = []
- for member, member_value in flags_to_check:
+ for member in flag:
+ member_value = member.value
if member_value and member_value & value == member_value:
members.append(member)
not_covered &= ~member_value
+ if not negative:
+ tmp = not_covered
+ while tmp:
+ flag_value = 2 ** _high_bit(tmp)
+ if flag_value in flag._value2member_map_:
+ members.append(flag._value2member_map_[flag_value])
+ not_covered &= ~flag_value
+ tmp &= ~flag_value
if not members and value in flag._value2member_map_:
members.append(flag._value2member_map_[value])
members.sort(key=lambda m: m._value_, reverse=True)
@@ -895,8 +911,3 @@ def _decompose(flag, value):
# we have the breakdown, don't need the value member itself
members.pop(0)
return members, not_covered
-
-def _power_of_two(value):
- if value < 1:
- return False
- return value == 2 ** _high_bit(value)
diff --git a/x64/Lib/filecmp.py b/x64/Lib/filecmp.py
index e5ad839..7a4da6b 100644
--- a/x64/Lib/filecmp.py
+++ b/x64/Lib/filecmp.py
@@ -13,6 +13,7 @@ Functions:
import os
import stat
from itertools import filterfalse
+from types import GenericAlias
__all__ = ['clear_cache', 'cmp', 'dircmp', 'cmpfiles', 'DEFAULT_IGNORES']
@@ -156,12 +157,12 @@ class dircmp:
ok = 1
try:
a_stat = os.stat(a_path)
- except OSError as why:
+ except OSError:
# print('Can\'t stat', a_path, ':', why.args[1])
ok = 0
try:
b_stat = os.stat(b_path)
- except OSError as why:
+ except OSError:
# print('Can\'t stat', b_path, ':', why.args[1])
ok = 0
@@ -247,6 +248,9 @@ class dircmp:
self.methodmap[attr](self)
return getattr(self, attr)
+ __class_getitem__ = classmethod(GenericAlias)
+
+
def cmpfiles(a, b, common, shallow=True):
"""Compare common files in two directories.
diff --git a/x64/Lib/fileinput.py b/x64/Lib/fileinput.py
index c1b0ec9..0c31f93 100644
--- a/x64/Lib/fileinput.py
+++ b/x64/Lib/fileinput.py
@@ -73,6 +73,7 @@ XXX Possible additions:
"""
import sys, os
+from types import GenericAlias
__all__ = ["input", "close", "nextfile", "filename", "lineno", "filelineno",
"fileno", "isfirstline", "isstdin", "FileInput", "hook_compressed",
@@ -391,6 +392,8 @@ class FileInput:
def isstdin(self):
return self._isstdin
+ __class_getitem__ = classmethod(GenericAlias)
+
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
diff --git a/x64/Lib/fnmatch.py b/x64/Lib/fnmatch.py
index b98e641..0eb1802 100644
--- a/x64/Lib/fnmatch.py
+++ b/x64/Lib/fnmatch.py
@@ -16,6 +16,12 @@ import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
+# Build a thread-safe incrementing counter to help create unique regexp group
+# names across calls.
+from itertools import count
+_nextgroupnum = count().__next__
+del count
+
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
@@ -77,15 +83,19 @@ def translate(pat):
There is no way to quote meta-characters.
"""
+ STAR = object()
+ res = []
+ add = res.append
i, n = 0, len(pat)
- res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
- res = res + '.*'
+ # compress consecutive `*` into one
+ if (not res) or res[-1] is not STAR:
+ add(STAR)
elif c == '?':
- res = res + '.'
+ add('.')
elif c == '[':
j = i
if j < n and pat[j] == '!':
@@ -95,7 +105,7 @@ def translate(pat):
while j < n and pat[j] != ']':
j = j+1
if j >= n:
- res = res + '\\['
+ add('\\[')
else:
stuff = pat[i:j]
if '--' not in stuff:
@@ -122,7 +132,52 @@ def translate(pat):
stuff = '^' + stuff[1:]
elif stuff[0] in ('^', '['):
stuff = '\\' + stuff
- res = '%s[%s]' % (res, stuff)
+ add(f'[{stuff}]')
+ else:
+ add(re.escape(c))
+ assert i == n
+
+ # Deal with STARs.
+ inp = res
+ res = []
+ add = res.append
+ i, n = 0, len(inp)
+ # Fixed pieces at the start?
+ while i < n and inp[i] is not STAR:
+ add(inp[i])
+ i += 1
+ # Now deal with STAR fixed STAR fixed ...
+ # For an interior `STAR fixed` pairing, we want to do a minimal
+ # .*? match followed by `fixed`, with no possibility of backtracking.
+ # We can't spell that directly, but can trick it into working by matching
+ # .*?fixed
+ # in a lookahead assertion, save the matched part in a group, then
+ # consume that group via a backreference. If the overall match fails,
+ # the lookahead assertion won't try alternatives. So the translation is:
+ # (?=(?P<name>.*?fixed))(?P=name)
+ # Group names are created as needed: g0, g1, g2, ...
+ # The numbers are obtained from _nextgroupnum() to ensure they're unique
+ # across calls and across threads. This is because people rely on the
+ # undocumented ability to join multiple translate() results together via
+ # "|" to build large regexps matching "one of many" shell patterns.
+ while i < n:
+ assert inp[i] is STAR
+ i += 1
+ if i == n:
+ add(".*")
+ break
+ assert inp[i] is not STAR
+ fixed = []
+ while i < n and inp[i] is not STAR:
+ fixed.append(inp[i])
+ i += 1
+ fixed = "".join(fixed)
+ if i == n:
+ add(".*")
+ add(fixed)
else:
- res = res + re.escape(c)
- return r'(?s:%s)\Z' % res
+ groupnum = _nextgroupnum()
+ add(f"(?=(?P<g{groupnum}>.*?{fixed}))(?P=g{groupnum})")
+ assert i == n
+ res = "".join(res)
+ return fr'(?s:{res})\Z'
diff --git a/x64/Lib/fractions.py b/x64/Lib/fractions.py
index e4fcc89..de3e23b 100644
--- a/x64/Lib/fractions.py
+++ b/x64/Lib/fractions.py
@@ -10,31 +10,9 @@ import operator
import re
import sys
-__all__ = ['Fraction', 'gcd']
+__all__ = ['Fraction']
-
-def gcd(a, b):
- """Calculate the Greatest Common Divisor of a and b.
-
- Unless b==0, the result will have the same sign as b (so that when
- b is divided by it, the result comes out positive).
- """
- import warnings
- warnings.warn('fractions.gcd() is deprecated. Use math.gcd() instead.',
- DeprecationWarning, 2)
- if type(a) is int is type(b):
- if (b or a) < 0:
- return -math.gcd(a, b)
- return math.gcd(a, b)
- return _gcd(a, b)
-
-def _gcd(a, b):
- # Supports non-integers for backward compatibility.
- while b:
- a, b = b, a%b
- return a
-
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo the prime _PyHASH_MODULUS.
_PyHASH_MODULUS = sys.hash_info.modulus
@@ -177,13 +155,9 @@ class Fraction(numbers.Rational):
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
if _normalize:
- if type(numerator) is int is type(denominator):
- # *very* normal case
- g = math.gcd(numerator, denominator)
- if denominator < 0:
- g = -g
- else:
- g = _gcd(numerator, denominator)
+ g = math.gcd(numerator, denominator)
+ if denominator < 0:
+ g = -g
numerator //= g
denominator //= g
self._numerator = numerator
@@ -556,23 +530,34 @@ class Fraction(numbers.Rational):
def __hash__(self):
"""hash(self)"""
- # XXX since this method is expensive, consider caching the result
-
- # In order to make sure that the hash of a Fraction agrees
- # with the hash of a numerically equal integer, float or
- # Decimal instance, we follow the rules for numeric hashes
- # outlined in the documentation. (See library docs, 'Built-in
- # Types').
+ # To make sure that the hash of a Fraction agrees with the hash
+ # of a numerically equal integer, float or Decimal instance, we
+ # follow the rules for numeric hashes outlined in the
+ # documentation. (See library docs, 'Built-in Types').
- # dinv is the inverse of self._denominator modulo the prime
- # _PyHASH_MODULUS, or 0 if self._denominator is divisible by
- # _PyHASH_MODULUS.
- dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
- if not dinv:
+ try:
+ dinv = pow(self._denominator, -1, _PyHASH_MODULUS)
+ except ValueError:
+ # ValueError means there is no modular inverse.
hash_ = _PyHASH_INF
else:
- hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS
- result = hash_ if self >= 0 else -hash_
+ # The general algorithm now specifies that the absolute value of
+ # the hash is
+ # (|N| * dinv) % P
+ # where N is self._numerator and P is _PyHASH_MODULUS. That's
+ # optimized here in two ways: first, for a non-negative int i,
+ # hash(i) == i % P, but the int hash implementation doesn't need
+ # to divide, and is faster than doing % P explicitly. So we do
+ # hash(|N| * dinv)
+ # instead. Second, N is unbounded, so its product with dinv may
+ # be arbitrarily expensive to compute. The final answer is the
+ # same if we use the bounded |N| % P instead, which can again
+ # be done with an int hash() call. If 0 <= i < P, hash(i) == i,
+ # so this nested hash() call wastes a bit of time making a
+ # redundant copy when |N| < P, but can save an arbitrarily large
+ # amount of computation for large |N|.
+ hash_ = hash(hash(abs(self._numerator)) * dinv)
+ result = hash_ if self._numerator >= 0 else -hash_
return -2 if result == -1 else result
def __eq__(a, b):
diff --git a/x64/Lib/ftplib.py b/x64/Lib/ftplib.py
index 58a46bc..1f760ed 100644
--- a/x64/Lib/ftplib.py
+++ b/x64/Lib/ftplib.py
@@ -72,17 +72,17 @@ B_CRLF = b'\r\n'
# The class itself
class FTP:
-
'''An FTP client class.
To create a connection, call the class using these arguments:
- host, user, passwd, acct, timeout
+ host, user, passwd, acct, timeout, source_address, encoding
The first four arguments are all strings, and have default value ''.
- timeout must be numeric and defaults to None if not passed,
- meaning that no timeout will be set on any ftp socket(s)
+ The parameter ´timeout´ must be numeric and defaults to None if not
+ passed, meaning that no timeout will be set on any ftp socket(s).
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
+ The last parameter is the encoding of filenames, which defaults to utf-8.
Then use self.connect() with optional host and port argument.
@@ -103,14 +103,16 @@ class FTP:
file = None
welcome = None
passiveserver = 1
- encoding = "latin-1"
- # Initialization method (called by class instantiation).
- # Initialize host to localhost, port to standard ftp port
- # Optional arguments are host (for connect()),
- # and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
- timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
+ timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *,
+ encoding='utf-8'):
+ """Initialization method (called by class instantiation).
+ Initialize host to localhost, port to standard ftp port.
+ Optional arguments are host (for connect()),
+ and user, passwd, acct (for login()).
+ """
+ self.encoding = encoding
self.source_address = source_address
self.timeout = timeout
if host:
@@ -146,6 +148,8 @@ class FTP:
self.port = port
if timeout != -999:
self.timeout = timeout
+ if self.timeout is not None and not self.timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
if source_address is not None:
self.source_address = source_address
sys.audit("ftplib.connect", self, self.host, self.port)
@@ -704,9 +708,10 @@ else:
'''
ssl_version = ssl.PROTOCOL_TLS_CLIENT
- def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
- certfile=None, context=None,
- timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
+ def __init__(self, host='', user='', passwd='', acct='',
+ keyfile=None, certfile=None, context=None,
+ timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *,
+ encoding='utf-8'):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
@@ -725,12 +730,13 @@ else:
keyfile=keyfile)
self.context = context
self._prot_p = False
- FTP.__init__(self, host, user, passwd, acct, timeout, source_address)
+ super().__init__(host, user, passwd, acct,
+ timeout, source_address, encoding=encoding)
def login(self, user='', passwd='', acct='', secure=True):
if secure and not isinstance(self.sock, ssl.SSLSocket):
self.auth()
- return FTP.login(self, user, passwd, acct)
+ return super().login(user, passwd, acct)
def auth(self):
'''Set up secure control connection by using TLS/SSL.'''
@@ -740,8 +746,7 @@ else:
resp = self.voidcmd('AUTH TLS')
else:
resp = self.voidcmd('AUTH SSL')
- self.sock = self.context.wrap_socket(self.sock,
- server_hostname=self.host)
+ self.sock = self.context.wrap_socket(self.sock, server_hostname=self.host)
self.file = self.sock.makefile(mode='r', encoding=self.encoding)
return resp
@@ -778,7 +783,7 @@ else:
# --- Overridden FTP methods
def ntransfercmd(self, cmd, rest=None):
- conn, size = FTP.ntransfercmd(self, cmd, rest)
+ conn, size = super().ntransfercmd(cmd, rest)
if self._prot_p:
conn = self.context.wrap_socket(conn,
server_hostname=self.host)
@@ -823,7 +828,6 @@ def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
-
if resp[:3] != '227':
raise error_reply(resp)
global _227_re
@@ -843,7 +847,6 @@ def parse229(resp, peer):
'''Parse the '229' response for an EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
-
if resp[:3] != '229':
raise error_reply(resp)
left = resp.find('(')
@@ -865,7 +868,6 @@ def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
-
if resp[:3] != '257':
raise error_reply(resp)
if resp[3:5] != ' "':
diff --git a/x64/Lib/functools.py b/x64/Lib/functools.py
index b41dea7..5cab497 100644
--- a/x64/Lib/functools.py
+++ b/x64/Lib/functools.py
@@ -10,14 +10,16 @@
# See C source code for _functools credits/copyright
__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
- 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial',
- 'partialmethod', 'singledispatch', 'singledispatchmethod']
+ 'total_ordering', 'cache', 'cmp_to_key', 'lru_cache', 'reduce',
+ 'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod',
+ 'cached_property']
from abc import get_cache_token
from collections import namedtuple
# import types, weakref # Deferred to single_dispatch()
from reprlib import recursive_repr
from _thread import RLock
+from types import GenericAlias
################################################################################
@@ -94,6 +96,8 @@ def _gt_from_lt(self, other, NotImplemented=NotImplemented):
def _le_from_lt(self, other, NotImplemented=NotImplemented):
'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).'
op_result = self.__lt__(other)
+ if op_result is NotImplemented:
+ return op_result
return op_result or self == other
def _ge_from_lt(self, other, NotImplemented=NotImplemented):
@@ -134,6 +138,8 @@ def _lt_from_gt(self, other, NotImplemented=NotImplemented):
def _ge_from_gt(self, other, NotImplemented=NotImplemented):
'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).'
op_result = self.__gt__(other)
+ if op_result is NotImplemented:
+ return op_result
return op_result or self == other
def _le_from_gt(self, other, NotImplemented=NotImplemented):
@@ -345,23 +351,7 @@ class partialmethod(object):
callables as instance methods.
"""
- def __init__(*args, **keywords):
- if len(args) >= 2:
- self, func, *args = args
- elif not args:
- raise TypeError("descriptor '__init__' of partialmethod "
- "needs an argument")
- elif 'func' in keywords:
- func = keywords.pop('func')
- self, *args = args
- import warnings
- warnings.warn("Passing 'func' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError("type 'partialmethod' takes at least one argument, "
- "got %d" % (len(args)-1))
- args = tuple(args)
-
+ def __init__(self, func, /, *args, **keywords):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError("{!r} is not callable or a descriptor"
.format(func))
@@ -379,7 +369,6 @@ class partialmethod(object):
self.func = func
self.args = args
self.keywords = keywords
- __init__.__text_signature__ = '($self, func, /, *args, **keywords)'
def __repr__(self):
args = ", ".join(map(repr, self.args))
@@ -423,6 +412,9 @@ class partialmethod(object):
def __isabstractmethod__(self):
return getattr(self.func, "__isabstractmethod__", False)
+ __class_getitem__ = classmethod(GenericAlias)
+
+
# Helper functions
def _unwrap_partial(func):
@@ -516,6 +508,7 @@ def lru_cache(maxsize=128, typed=False):
# The user_function was passed in directly via the maxsize argument
user_function, maxsize = maxsize, 128
wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
+ wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
return update_wrapper(wrapper, user_function)
elif maxsize is not None:
raise TypeError(
@@ -523,6 +516,7 @@ def lru_cache(maxsize=128, typed=False):
def decorating_function(user_function):
wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
+ wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
return update_wrapper(wrapper, user_function)
return decorating_function
@@ -650,6 +644,15 @@ except ImportError:
################################################################################
+### cache -- simplified access to the infinity cache
+################################################################################
+
+def cache(user_function, /):
+ 'Simple lightweight unbounded cache. Sometimes called "memoize".'
+ return lru_cache(maxsize=None)(user_function)
+
+
+################################################################################
### singledispatch() - single-dispatch generic function decorator
################################################################################
@@ -973,3 +976,5 @@ class cached_property:
)
raise TypeError(msg) from None
return val
+
+ __class_getitem__ = classmethod(GenericAlias)
diff --git a/x64/Lib/getpass.py b/x64/Lib/getpass.py
index 36e17e4..6911f41 100644
--- a/x64/Lib/getpass.py
+++ b/x64/Lib/getpass.py
@@ -52,7 +52,7 @@ def unix_getpass(prompt='Password: ', stream=None):
stack.enter_context(input)
if not stream:
stream = input
- except OSError as e:
+ except OSError:
# If that fails, see if stdin can be controlled.
stack.close()
try:
diff --git a/x64/Lib/gettext.py b/x64/Lib/gettext.py
index b98f501..77b67ae 100644
--- a/x64/Lib/gettext.py
+++ b/x64/Lib/gettext.py
@@ -46,7 +46,6 @@ internationalized, to the local language and cultural habits.
# find this format documented anywhere.
-import locale
import os
import re
import sys
@@ -210,6 +209,7 @@ def c2py(plural):
def _expand_lang(loc):
+ import locale
loc = locale.normalize(loc)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
@@ -278,6 +278,7 @@ class NullTranslations:
import warnings
warnings.warn('lgettext() is deprecated, use gettext() instead',
DeprecationWarning, 2)
+ import locale
if self._fallback:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\blgettext\b.*',
@@ -299,6 +300,7 @@ class NullTranslations:
import warnings
warnings.warn('lngettext() is deprecated, use ngettext() instead',
DeprecationWarning, 2)
+ import locale
if self._fallback:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'.*\blngettext\b.*',
@@ -462,6 +464,7 @@ class GNUTranslations(NullTranslations):
import warnings
warnings.warn('lgettext() is deprecated, use gettext() instead',
DeprecationWarning, 2)
+ import locale
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
@@ -476,6 +479,7 @@ class GNUTranslations(NullTranslations):
import warnings
warnings.warn('lngettext() is deprecated, use ngettext() instead',
DeprecationWarning, 2)
+ import locale
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
except KeyError:
@@ -668,6 +672,7 @@ def ldgettext(domain, message):
import warnings
warnings.warn('ldgettext() is deprecated, use dgettext() instead',
DeprecationWarning, 2)
+ import locale
codeset = _localecodesets.get(domain)
try:
with warnings.catch_warnings():
@@ -695,6 +700,7 @@ def ldngettext(domain, msgid1, msgid2, n):
import warnings
warnings.warn('ldngettext() is deprecated, use dngettext() instead',
DeprecationWarning, 2)
+ import locale
codeset = _localecodesets.get(domain)
try:
with warnings.catch_warnings():
diff --git a/x64/Lib/graphlib.py b/x64/Lib/graphlib.py
new file mode 100644
index 0000000..d0e7a48
--- /dev/null
+++ b/x64/Lib/graphlib.py
@@ -0,0 +1,246 @@
+__all__ = ["TopologicalSorter", "CycleError"]
+
+_NODE_OUT = -1
+_NODE_DONE = -2
+
+
+class _NodeInfo:
+ __slots__ = "node", "npredecessors", "successors"
+
+ def __init__(self, node):
+ # The node this class is augmenting.
+ self.node = node
+
+ # Number of predecessors, generally >= 0. When this value falls to 0,
+ # and is returned by get_ready(), this is set to _NODE_OUT and when the
+ # node is marked done by a call to done(), set to _NODE_DONE.
+ self.npredecessors = 0
+
+ # List of successor nodes. The list can contain duplicated elements as
+ # long as they're all reflected in the successor's npredecessors attribute).
+ self.successors = []
+
+
+class CycleError(ValueError):
+ """Subclass of ValueError raised by TopologicalSorter.prepare if cycles
+ exist in the working graph.
+
+ If multiple cycles exist, only one undefined choice among them will be reported
+ and included in the exception. The detected cycle can be accessed via the second
+ element in the *args* attribute of the exception instance and consists in a list
+ of nodes, such that each node is, in the graph, an immediate predecessor of the
+ next node in the list. In the reported list, the first and the last node will be
+ the same, to make it clear that it is cyclic.
+ """
+
+ pass
+
+
+class TopologicalSorter:
+ """Provides functionality to topologically sort a graph of hashable nodes"""
+
+ def __init__(self, graph=None):
+ self._node2info = {}
+ self._ready_nodes = None
+ self._npassedout = 0
+ self._nfinished = 0
+
+ if graph is not None:
+ for node, predecessors in graph.items():
+ self.add(node, *predecessors)
+
+ def _get_nodeinfo(self, node):
+ if (result := self._node2info.get(node)) is None:
+ self._node2info[node] = result = _NodeInfo(node)
+ return result
+
+ def add(self, node, *predecessors):
+ """Add a new node and its predecessors to the graph.
+
+ Both the *node* and all elements in *predecessors* must be hashable.
+
+ If called multiple times with the same node argument, the set of dependencies
+ will be the union of all dependencies passed in.
+
+ It is possible to add a node with no dependencies (*predecessors* is not provided)
+ as well as provide a dependency twice. If a node that has not been provided before
+ is included among *predecessors* it will be automatically added to the graph with
+ no predecessors of its own.
+
+ Raises ValueError if called after "prepare".
+ """
+ if self._ready_nodes is not None:
+ raise ValueError("Nodes cannot be added after a call to prepare()")
+
+ # Create the node -> predecessor edges
+ nodeinfo = self._get_nodeinfo(node)
+ nodeinfo.npredecessors += len(predecessors)
+
+ # Create the predecessor -> node edges
+ for pred in predecessors:
+ pred_info = self._get_nodeinfo(pred)
+ pred_info.successors.append(node)
+
+ def prepare(self):
+ """Mark the graph as finished and check for cycles in the graph.
+
+ If any cycle is detected, "CycleError" will be raised, but "get_ready" can
+ still be used to obtain as many nodes as possible until cycles block more
+ progress. After a call to this function, the graph cannot be modified and
+ therefore no more nodes can be added using "add".
+ """
+ if self._ready_nodes is not None:
+ raise ValueError("cannot prepare() more than once")
+
+ self._ready_nodes = [
+ i.node for i in self._node2info.values() if i.npredecessors == 0
+ ]
+ # ready_nodes is set before we look for cycles on purpose:
+ # if the user wants to catch the CycleError, that's fine,
+ # they can continue using the instance to grab as many
+ # nodes as possible before cycles block more progress
+ cycle = self._find_cycle()
+ if cycle:
+ raise CycleError(f"nodes are in a cycle", cycle)
+
+ def get_ready(self):
+ """Return a tuple of all the nodes that are ready.
+
+ Initially it returns all nodes with no predecessors; once those are marked
+ as processed by calling "done", further calls will return all new nodes that
+ have all their predecessors already processed. Once no more progress can be made,
+ empty tuples are returned.
+
+ Raises ValueError if called without calling "prepare" previously.
+ """
+ if self._ready_nodes is None:
+ raise ValueError("prepare() must be called first")
+
+ # Get the nodes that are ready and mark them
+ result = tuple(self._ready_nodes)
+ n2i = self._node2info
+ for node in result:
+ n2i[node].npredecessors = _NODE_OUT
+
+ # Clean the list of nodes that are ready and update
+ # the counter of nodes that we have returned.
+ self._ready_nodes.clear()
+ self._npassedout += len(result)
+
+ return result
+
+ def is_active(self):
+ """Return ``True`` if more progress can be made and ``False`` otherwise.
+
+ Progress can be made if cycles do not block the resolution and either there
+ are still nodes ready that haven't yet been returned by "get_ready" or the
+ number of nodes marked "done" is less than the number that have been returned
+ by "get_ready".
+
+ Raises ValueError if called without calling "prepare" previously.
+ """
+ if self._ready_nodes is None:
+ raise ValueError("prepare() must be called first")
+ return self._nfinished < self._npassedout or bool(self._ready_nodes)
+
+ def __bool__(self):
+ return self.is_active()
+
+ def done(self, *nodes):
+ """Marks a set of nodes returned by "get_ready" as processed.
+
+ This method unblocks any successor of each node in *nodes* for being returned
+ in the future by a call to "get_ready".
+
+ Raises :exec:`ValueError` if any node in *nodes* has already been marked as
+ processed by a previous call to this method, if a node was not added to the
+ graph by using "add" or if called without calling "prepare" previously or if
+ node has not yet been returned by "get_ready".
+ """
+
+ if self._ready_nodes is None:
+ raise ValueError("prepare() must be called first")
+
+ n2i = self._node2info
+
+ for node in nodes:
+
+ # Check if we know about this node (it was added previously using add()
+ if (nodeinfo := n2i.get(node)) is None:
+ raise ValueError(f"node {node!r} was not added using add()")
+
+ # If the node has not being returned (marked as ready) previously, inform the user.
+ stat = nodeinfo.npredecessors
+ if stat != _NODE_OUT:
+ if stat >= 0:
+ raise ValueError(
+ f"node {node!r} was not passed out (still not ready)"
+ )
+ elif stat == _NODE_DONE:
+ raise ValueError(f"node {node!r} was already marked done")
+ else:
+ assert False, f"node {node!r}: unknown status {stat}"
+
+ # Mark the node as processed
+ nodeinfo.npredecessors = _NODE_DONE
+
+ # Go to all the successors and reduce the number of predecessors, collecting all the ones
+ # that are ready to be returned in the next get_ready() call.
+ for successor in nodeinfo.successors:
+ successor_info = n2i[successor]
+ successor_info.npredecessors -= 1
+ if successor_info.npredecessors == 0:
+ self._ready_nodes.append(successor)
+ self._nfinished += 1
+
+ def _find_cycle(self):
+ n2i = self._node2info
+ stack = []
+ itstack = []
+ seen = set()
+ node2stacki = {}
+
+ for node in n2i:
+ if node in seen:
+ continue
+
+ while True:
+ if node in seen:
+ # If we have seen already the node and is in the
+ # current stack we have found a cycle.
+ if node in node2stacki:
+ return stack[node2stacki[node] :] + [node]
+ # else go on to get next successor
+ else:
+ seen.add(node)
+ itstack.append(iter(n2i[node].successors).__next__)
+ node2stacki[node] = len(stack)
+ stack.append(node)
+
+ # Backtrack to the topmost stack entry with
+ # at least another successor.
+ while stack:
+ try:
+ node = itstack[-1]()
+ break
+ except StopIteration:
+ del node2stacki[stack.pop()]
+ itstack.pop()
+ else:
+ break
+ return None
+
+ def static_order(self):
+ """Returns an iterable of nodes in a topological order.
+
+ The particular order that is returned may depend on the specific
+ order in which the items were inserted in the graph.
+
+ Using this method does not require to call "prepare" or "done". If any
+ cycle is detected, :exc:`CycleError` will be raised.
+ """
+ self.prepare()
+ while self.is_active():
+ node_group = self.get_ready()
+ yield from node_group
+ self.done(*node_group)
diff --git a/x64/Lib/gzip.py b/x64/Lib/gzip.py
index 87b553d..e422773 100644
--- a/x64/Lib/gzip.py
+++ b/x64/Lib/gzip.py
@@ -177,6 +177,7 @@ class GzipFile(_compression.BaseStream):
filename = ''
else:
filename = os.fspath(filename)
+ origmode = mode
if mode is None:
mode = getattr(fileobj, 'mode', 'rb')
@@ -187,6 +188,13 @@ class GzipFile(_compression.BaseStream):
self.name = filename
elif mode.startswith(('w', 'a', 'x')):
+ if origmode is None:
+ import warnings
+ warnings.warn(
+ "GzipFile was opened for writing, but this will "
+ "change in future Python releases. "
+ "Specify the mode argument for opening it for writing.",
+ FutureWarning, 2)
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
diff --git a/x64/Lib/hashlib.py b/x64/Lib/hashlib.py
index 56873b7..58c340d 100644
--- a/x64/Lib/hashlib.py
+++ b/x64/Lib/hashlib.py
@@ -70,9 +70,12 @@ __all__ = __always_supported + ('new', 'algorithms_guaranteed',
__builtin_constructor_cache = {}
+# Prefer our blake2 implementation
+# OpenSSL 1.1.0 comes with a limited implementation of blake2b/s. The OpenSSL
+# implementations neither support keyed blake2 (blake2 MAC) nor advanced
+# features like salt, personalization, or tree hashing. OpenSSL hash-only
+# variants are available as 'blake2b512' and 'blake2s256', though.
__block_openssl_constructor = {
- 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512',
- 'shake_128', 'shake_256',
'blake2b', 'blake2s',
}
@@ -122,13 +125,16 @@ def __get_builtin_constructor(name):
def __get_openssl_constructor(name):
if name in __block_openssl_constructor:
- # Prefer our blake2 and sha3 implementation.
+ # Prefer our builtin blake2 implementation.
return __get_builtin_constructor(name)
try:
+ # MD5, SHA1, and SHA2 are in all supported OpenSSL versions
+ # SHA3/shake are available in OpenSSL 1.1.1+
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
- # defined but the hash not actually available thanks to OpenSSL.
- f()
+ # defined but the hash not actually available. Don't fall back to
+ # builtin if the current security policy blocks a digest, bpo#40695.
+ f(usedforsecurity=False)
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
@@ -148,13 +154,10 @@ def __hash_new(name, data=b'', **kwargs):
optionally initialized with data (which must be a bytes-like object).
"""
if name in __block_openssl_constructor:
- # Prefer our blake2 and sha3 implementation
- # OpenSSL 1.1.0 comes with a limited implementation of blake2b/s.
- # It does neither support keyed blake2 nor advanced features like
- # salt, personal, tree hashing or SSE.
+ # Prefer our builtin blake2 implementation.
return __get_builtin_constructor(name)(data, **kwargs)
try:
- return _hashlib.new(name, data)
+ return _hashlib.new(name, data, **kwargs)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
diff --git a/x64/Lib/hmac.py b/x64/Lib/hmac.py
index b769876..180bc37 100644
--- a/x64/Lib/hmac.py
+++ b/x64/Lib/hmac.py
@@ -4,14 +4,15 @@ Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
-from _operator import _compare_digest as compare_digest
try:
import _hashlib as _hashopenssl
except ImportError:
_hashopenssl = None
_openssl_md_meths = None
+ from _operator import _compare_digest as compare_digest
else:
_openssl_md_meths = frozenset(_hashopenssl.openssl_md_meth_names)
+ compare_digest = _hashopenssl.compare_digest
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
@@ -30,6 +31,10 @@ class HMAC:
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
+ __slots__ = (
+ "_digest_cons", "_inner", "_outer", "block_size", "digest_size"
+ )
+
def __init__(self, key, msg=None, digestmod=''):
"""Create a new HMAC object.
@@ -51,18 +56,18 @@ class HMAC:
raise TypeError("Missing required parameter 'digestmod'.")
if callable(digestmod):
- self.digest_cons = digestmod
+ self._digest_cons = digestmod
elif isinstance(digestmod, str):
- self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
+ self._digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
- self.digest_cons = lambda d=b'': digestmod.new(d)
+ self._digest_cons = lambda d=b'': digestmod.new(d)
- self.outer = self.digest_cons()
- self.inner = self.digest_cons()
- self.digest_size = self.inner.digest_size
+ self._outer = self._digest_cons()
+ self._inner = self._digest_cons()
+ self.digest_size = self._inner.digest_size
- if hasattr(self.inner, 'block_size'):
- blocksize = self.inner.block_size
+ if hasattr(self._inner, 'block_size'):
+ blocksize = self._inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
@@ -79,21 +84,33 @@ class HMAC:
self.block_size = blocksize
if len(key) > blocksize:
- key = self.digest_cons(key).digest()
+ key = self._digest_cons(key).digest()
key = key.ljust(blocksize, b'\0')
- self.outer.update(key.translate(trans_5C))
- self.inner.update(key.translate(trans_36))
+ self._outer.update(key.translate(trans_5C))
+ self._inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
- return "hmac-" + self.inner.name
+ return "hmac-" + self._inner.name
+
+ @property
+ def digest_cons(self):
+ return self._digest_cons
+
+ @property
+ def inner(self):
+ return self._inner
+
+ @property
+ def outer(self):
+ return self._outer
def update(self, msg):
"""Feed data from msg into this hashing object."""
- self.inner.update(msg)
+ self._inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
@@ -102,10 +119,10 @@ class HMAC:
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
- other.digest_cons = self.digest_cons
+ other._digest_cons = self._digest_cons
other.digest_size = self.digest_size
- other.inner = self.inner.copy()
- other.outer = self.outer.copy()
+ other._inner = self._inner.copy()
+ other._outer = self._outer.copy()
return other
def _current(self):
@@ -113,8 +130,8 @@ class HMAC:
To be used only internally with digest() and hexdigest().
"""
- h = self.outer.copy()
- h.update(self.inner.digest())
+ h = self._outer.copy()
+ h.update(self._inner.digest())
return h
def digest(self):
diff --git a/x64/Lib/html/parser.py b/x64/Lib/html/parser.py
index de81879..6083077 100644
--- a/x64/Lib/html/parser.py
+++ b/x64/Lib/html/parser.py
@@ -9,7 +9,6 @@
import re
-import warnings
import _markupbase
from html import unescape
@@ -461,10 +460,3 @@ class HTMLParser(_markupbase.ParserBase):
def unknown_decl(self, data):
pass
-
- # Internal -- helper to remove special character quoting
- def unescape(self, s):
- warnings.warn('The unescape method is deprecated and will be removed '
- 'in 3.5, use html.unescape() instead.',
- DeprecationWarning, stacklevel=2)
- return unescape(s)
diff --git a/x64/Lib/http/__init__.py b/x64/Lib/http/__init__.py
index 350afe7..37be765 100644
--- a/x64/Lib/http/__init__.py
+++ b/x64/Lib/http/__init__.py
@@ -17,6 +17,9 @@ class HTTPStatus(IntEnum):
* RFC 2774: An HTTP Extension Framework
* RFC 7725: An HTTP Status Code to Report Legal Obstacles
* RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2)
+ * RFC 2324: Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0)
+ * RFC 8297: An HTTP Status Code for Indicating Hints
+ * RFC 8470: Using Early Data in HTTP
"""
def __new__(cls, value, phrase, description=''):
obj = int.__new__(cls, value)
@@ -31,6 +34,7 @@ class HTTPStatus(IntEnum):
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
'Switching to new protocol; obey Upgrade header')
PROCESSING = 102, 'Processing'
+ EARLY_HINTS = 103, 'Early Hints'
# success
OK = 200, 'OK', 'Request fulfilled, document follows'
@@ -100,11 +104,14 @@ class HTTPStatus(IntEnum):
'Cannot satisfy request range')
EXPECTATION_FAILED = (417, 'Expectation Failed',
'Expect condition could not be satisfied')
+ IM_A_TEAPOT = (418, 'I\'m a Teapot',
+ 'Server refuses to brew coffee because it is a teapot.')
MISDIRECTED_REQUEST = (421, 'Misdirected Request',
'Server is not able to produce a response')
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
LOCKED = 423, 'Locked'
FAILED_DEPENDENCY = 424, 'Failed Dependency'
+ TOO_EARLY = 425, 'Too Early'
UPGRADE_REQUIRED = 426, 'Upgrade Required'
PRECONDITION_REQUIRED = (428, 'Precondition Required',
'The origin server requires the request to be conditional')
diff --git a/x64/Lib/http/client.py b/x64/Lib/http/client.py
index 33a4347..c2ad047 100644
--- a/x64/Lib/http/client.py
+++ b/x64/Lib/http/client.py
@@ -147,6 +147,10 @@ _contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]')
# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
# We are more lenient for assumed real world compatibility purposes.
+# These characters are not allowed within HTTP method names
+# to prevent http header injection.
+_contains_disallowed_method_pchar_re = re.compile('[\x00-\x1f]')
+
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
@@ -828,6 +832,8 @@ class HTTPConnection:
(self.host, self.port) = self._get_hostport(host, port)
+ self._validate_host(self.host)
+
# This is stored as an instance variable to allow unit
# tests to replace it with a suitable mockup
self._create_connection = socket.create_connection
@@ -1085,6 +1091,8 @@ class HTTPConnection:
else:
raise CannotSendRequest(self.__state)
+ self._validate_method(method)
+
# Save the method for use later in the response phase
self._method = method
@@ -1175,6 +1183,15 @@ class HTTPConnection:
# ASCII also helps prevent CVE-2019-9740.
return request.encode('ascii')
+ def _validate_method(self, method):
+ """Validate a method name for putrequest."""
+ # prevent http header injection
+ match = _contains_disallowed_method_pchar_re.search(method)
+ if match:
+ raise ValueError(
+ f"method can't contain control characters. {method!r} "
+ f"(found at least {match.group()!r})")
+
def _validate_path(self, url):
"""Validate a url for putrequest."""
# Prevent CVE-2019-9740.
@@ -1183,6 +1200,14 @@ class HTTPConnection:
raise InvalidURL(f"URL can't contain control characters. {url!r} "
f"(found at least {match.group()!r})")
+ def _validate_host(self, host):
+ """Validate a host so it doesn't contain control characters."""
+ # Prevent CVE-2019-18348.
+ match = _contains_disallowed_url_pchar_re.search(host)
+ if match:
+ raise InvalidURL(f"URL can't contain control characters. {host!r} "
+ f"(found at least {match.group()!r})")
+
def putheader(self, header, *values):
"""Send a request header line to the server.
diff --git a/x64/Lib/http/cookies.py b/x64/Lib/http/cookies.py
index 6694f54..35ac2dc 100644
--- a/x64/Lib/http/cookies.py
+++ b/x64/Lib/http/cookies.py
@@ -131,6 +131,7 @@ Finis.
#
import re
import string
+import types
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
@@ -419,6 +420,8 @@ class Morsel(dict):
# Return the result
return _semispacejoin(result)
+ __class_getitem__ = classmethod(types.GenericAlias)
+
#
# Pattern for finding cookie
diff --git a/x64/Lib/http/server.py b/x64/Lib/http/server.py
index 38f7acc..def05f4 100644
--- a/x64/Lib/http/server.py
+++ b/x64/Lib/http/server.py
@@ -639,11 +639,17 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
"""
server_version = "SimpleHTTP/" + __version__
+ extensions_map = _encodings_map_default = {
+ '.gz': 'application/gzip',
+ '.Z': 'application/octet-stream',
+ '.bz2': 'application/x-bzip2',
+ '.xz': 'application/x-xz',
+ }
def __init__(self, *args, directory=None, **kwargs):
if directory is None:
directory = os.getcwd()
- self.directory = directory
+ self.directory = os.fspath(directory)
super().__init__(*args, **kwargs)
def do_GET(self):
@@ -866,25 +872,16 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
slow) to look inside the data to make a better guess.
"""
-
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
- else:
- return self.extensions_map['']
-
- if not mimetypes.inited:
- mimetypes.init() # try to read system mime.types
- extensions_map = mimetypes.types_map.copy()
- extensions_map.update({
- '': 'application/octet-stream', # Default
- '.py': 'text/plain',
- '.c': 'text/plain',
- '.h': 'text/plain',
- })
+ guess, _ = mimetypes.guess_type(path)
+ if guess:
+ return guess
+ return 'application/octet-stream'
# Utilities for CGIHTTPRequestHandler
@@ -1015,8 +1012,10 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
"""
collapsed_path = _url_collapse_path(self.path)
dir_sep = collapsed_path.find('/', 1)
- head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
- if head in self.cgi_directories:
+ while dir_sep > 0 and not collapsed_path[:dir_sep] in self.cgi_directories:
+ dir_sep = collapsed_path.find('/', dir_sep+1)
+ if dir_sep > 0:
+ head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
self.cgi_info = head, tail
return True
return False
@@ -1124,12 +1123,7 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
referer = self.headers.get('referer')
if referer:
env['HTTP_REFERER'] = referer
- accept = []
- for line in self.headers.getallmatchingheaders('accept'):
- if line[:1] in "\t\n\r ":
- accept.append(line.strip())
- else:
- accept = accept + line[7:].split(',')
+ accept = self.headers.get_all('accept', ())
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.get('user-agent')
if ua:
@@ -1165,8 +1159,9 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
- if sts:
- self.log_error("CGI script exit status %#x", sts)
+ exitcode = os.waitstatus_to_exitcode(sts)
+ if exitcode:
+ self.log_error(f"CGI script exit code {exitcode}")
return
# Child
try:
diff --git a/x64/Lib/imaplib.py b/x64/Lib/imaplib.py
index 822d9d6..d9720f2 100644
--- a/x64/Lib/imaplib.py
+++ b/x64/Lib/imaplib.py
@@ -98,6 +98,7 @@ Commands = {
'THREAD': ('SELECTED',),
'UID': ('SELECTED',),
'UNSUBSCRIBE': ('AUTH', 'SELECTED'),
+ 'UNSELECT': ('SELECTED',),
}
# Patterns to match server responses
@@ -135,10 +136,13 @@ class IMAP4:
r"""IMAP4 client class.
- Instantiate with: IMAP4([host[, port]])
+ Instantiate with: IMAP4([host[, port[, timeout=None]]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port).
+ timeout - socket timeout (default: None)
+ If timeout is not given or is None,
+ the global default socket timeout is used
All IMAP4rev1 commands are supported by methods of the same
name (in lower-case).
@@ -181,7 +185,7 @@ class IMAP4:
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
- def __init__(self, host='', port=IMAP4_PORT):
+ def __init__(self, host='', port=IMAP4_PORT, timeout=None):
self.debug = Debug
self.state = 'LOGOUT'
self.literal = None # A literal argument to a command
@@ -195,7 +199,7 @@ class IMAP4:
# Open socket to server.
- self.open(host, port)
+ self.open(host, port, timeout)
try:
self._connect()
@@ -284,15 +288,20 @@ class IMAP4:
# Overridable methods
- def _create_socket(self):
+ def _create_socket(self, timeout):
# Default value of IMAP4.host is '', but socket.getaddrinfo()
# (which is used by socket.create_connection()) expects None
# as a default value for host.
+ if timeout is not None and not timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
host = None if not self.host else self.host
sys.audit("imaplib.open", self, self.host, self.port)
- return socket.create_connection((host, self.port))
+ address = (host, self.port)
+ if timeout is not None:
+ return socket.create_connection(address, timeout)
+ return socket.create_connection(address)
- def open(self, host = '', port = IMAP4_PORT):
+ def open(self, host='', port=IMAP4_PORT, timeout=None):
"""Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
@@ -300,7 +309,7 @@ class IMAP4:
"""
self.host = host
self.port = port
- self.sock = self._create_socket()
+ self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
@@ -502,7 +511,7 @@ class IMAP4:
def enable(self, capability):
"""Send an RFC5161 enable string to the server.
- (typ, [data]) = <intance>.enable(capability)
+ (typ, [data]) = <instance>.enable(capability)
"""
if 'ENABLE' not in self.capabilities:
raise IMAP4.error("Server does not support ENABLE")
@@ -894,6 +903,22 @@ class IMAP4:
return self._simple_command('UNSUBSCRIBE', mailbox)
+ def unselect(self):
+ """Free server's resources associated with the selected mailbox
+ and returns the server to the authenticated state.
+ This command performs the same actions as CLOSE, except
+ that no messages are permanently removed from the currently
+ selected mailbox.
+
+ (typ, [data]) = <instance>.unselect()
+ """
+ try:
+ typ, data = self._simple_command('UNSELECT')
+ finally:
+ self.state = 'AUTH'
+ return typ, data
+
+
def xatom(self, name, *args):
"""Allow simple extension commands
notified by server in CAPABILITY response.
@@ -1261,7 +1286,7 @@ if HAVE_SSL:
"""IMAP4 client class over SSL connection
- Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile[, ssl_context]]]]])
+ Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile[, ssl_context[, timeout=None]]]]]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port);
@@ -1271,13 +1296,15 @@ if HAVE_SSL:
and private key (default: None)
Note: if ssl_context is provided, then parameters keyfile or
certfile should not be set otherwise ValueError is raised.
+ timeout - socket timeout (default: None) If timeout is not given or is None,
+ the global default socket timeout is used
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host='', port=IMAP4_SSL_PORT, keyfile=None,
- certfile=None, ssl_context=None):
+ certfile=None, ssl_context=None, timeout=None):
if ssl_context is not None and keyfile is not None:
raise ValueError("ssl_context and keyfile arguments are mutually "
"exclusive")
@@ -1294,20 +1321,20 @@ if HAVE_SSL:
ssl_context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.ssl_context = ssl_context
- IMAP4.__init__(self, host, port)
+ IMAP4.__init__(self, host, port, timeout)
- def _create_socket(self):
- sock = IMAP4._create_socket(self)
+ def _create_socket(self, timeout):
+ sock = IMAP4._create_socket(self, timeout)
return self.ssl_context.wrap_socket(sock,
server_hostname=self.host)
- def open(self, host='', port=IMAP4_SSL_PORT):
+ def open(self, host='', port=IMAP4_SSL_PORT, timeout=None):
"""Setup connection to remote server on "host:port".
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
- IMAP4.open(self, host, port)
+ IMAP4.open(self, host, port, timeout)
__all__.append("IMAP4_SSL")
@@ -1329,7 +1356,7 @@ class IMAP4_stream(IMAP4):
IMAP4.__init__(self)
- def open(self, host = None, port = None):
+ def open(self, host=None, port=None, timeout=None):
"""Setup a stream connection.
This connection will be used by the routines:
read, readline, send, shutdown.
diff --git a/x64/Lib/imghdr.py b/x64/Lib/imghdr.py
index 76e8abb..6e01fd8 100644
--- a/x64/Lib/imghdr.py
+++ b/x64/Lib/imghdr.py
@@ -152,7 +152,7 @@ def testall(list, recursive, toplevel):
if recursive or toplevel:
print('recursing down:')
import glob
- names = glob.glob(os.path.join(filename, '*'))
+ names = glob.glob(os.path.join(glob.escape(filename), '*'))
testall(names, recursive, 0)
else:
print('*** directory (use -r) ***')
diff --git a/x64/Lib/importlib/_bootstrap.py b/x64/Lib/importlib/_bootstrap.py
index 32deef1..e00b27e 100644
--- a/x64/Lib/importlib/_bootstrap.py
+++ b/x64/Lib/importlib/_bootstrap.py
@@ -67,6 +67,7 @@ class _ModuleLock:
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
+ seen = set()
while True:
lock = _blocking_on.get(tid)
if lock is None:
@@ -74,6 +75,14 @@ class _ModuleLock:
tid = lock.owner
if tid == me:
return True
+ if tid in seen:
+ # bpo 38091: the chain of tid's we encounter here
+ # eventually leads to a fixpoint or a cycle, but
+ # does not reach 'me'. This means we would not
+ # actually deadlock. This can happen if other
+ # threads are at the beginning of acquire() below.
+ return False
+ seen.add(tid)
def acquire(self):
"""
@@ -371,7 +380,7 @@ class ModuleSpec:
self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
- return False
+ return NotImplemented
@property
def cached(self):
@@ -713,6 +722,8 @@ class BuiltinImporter:
"""
+ _ORIGIN = "built-in"
+
@staticmethod
def module_repr(module):
"""Return repr for the module.
@@ -720,14 +731,14 @@ class BuiltinImporter:
The method is deprecated. The import machinery does the job itself.
"""
- return '<module {!r} (built-in)>'.format(module.__name__)
+ return f'<module {module.__name__!r} ({BuiltinImporter._ORIGIN})>'
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
- return spec_from_loader(fullname, cls, origin='built-in')
+ return spec_from_loader(fullname, cls, origin=cls._ORIGIN)
else:
return None
@@ -873,7 +884,7 @@ def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
- raise ValueError('attempted relative import beyond top-level package')
+ raise ImportError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
@@ -976,7 +987,12 @@ def _find_and_load_unlocked(name, import_):
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
- setattr(parent_module, name.rpartition('.')[2], module)
+ child = name.rpartition('.')[2]
+ try:
+ setattr(parent_module, child, module)
+ except AttributeError:
+ msg = f"Cannot set an attribute on {parent!r} for child module {child!r}"
+ _warnings.warn(msg, ImportWarning)
return module
diff --git a/x64/Lib/importlib/_bootstrap_external.py b/x64/Lib/importlib/_bootstrap_external.py
index b8ac482..25a3f8c 100644
--- a/x64/Lib/importlib/_bootstrap_external.py
+++ b/x64/Lib/importlib/_bootstrap_external.py
@@ -34,8 +34,8 @@ def _make_relax_case():
key = b'PYTHONCASEOK'
def _relax_case():
- """True if filenames must be checked case-insensitively."""
- return key in _os.environ
+ """True if filenames must be checked case-insensitively and ignore environment flags are not set."""
+ return not sys.flags.ignore_environment and key in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
@@ -271,6 +271,13 @@ _code_type = type(_write_atomic.__code__)
# Python 3.8b2 3412 (Swap the position of positional args and positional
# only args in ast.arguments #37593)
# Python 3.8b4 3413 (Fix "break" and "continue" in "finally" #37830)
+# Python 3.9a0 3420 (add LOAD_ASSERTION_ERROR #34880)
+# Python 3.9a0 3421 (simplified bytecode for with blocks #32949)
+# Python 3.9a0 3422 (remove BEGIN_FINALLY, END_FINALLY, CALL_FINALLY, POP_FINALLY bytecodes #33387)
+# Python 3.9a2 3423 (add IS_OP, CONTAINS_OP and JUMP_IF_NOT_EXC_MATCH bytecodes #39156)
+# Python 3.9a2 3424 (simplify bytecodes for *value unpacking)
+# Python 3.9a2 3425 (simplify bytecodes for **value unpacking)
+
#
# MAGIC must change whenever the bytecode emitted by the compiler may no
# longer be understood by older implementations of the eval loop (usually
@@ -279,7 +286,7 @@ _code_type = type(_write_atomic.__code__)
# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array
# in PC/launcher.c must also be updated.
-MAGIC_NUMBER = (3413).to_bytes(2, 'little') + b'\r\n'
+MAGIC_NUMBER = (3425).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
@@ -709,9 +716,9 @@ class WindowsRegistryFinder:
@classmethod
def _open_registry(cls, key):
try:
- return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
+ return winreg.OpenKey(winreg.HKEY_CURRENT_USER, key)
except OSError:
- return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
+ return winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
@@ -723,7 +730,7 @@ class WindowsRegistryFinder:
sys_version='%d.%d' % sys.version_info[:2])
try:
with cls._open_registry(key) as hkey:
- filepath = _winreg.QueryValue(hkey, '')
+ filepath = winreg.QueryValue(hkey, '')
except OSError:
return None
return filepath
@@ -1577,14 +1584,7 @@ def _setup(_bootstrap_module):
sys = _bootstrap.sys
_imp = _bootstrap._imp
- # Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
- for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
- if builtin_name not in sys.modules:
- builtin_module = _bootstrap._builtin_from_name(builtin_name)
- else:
- builtin_module = sys.modules[builtin_name]
- setattr(self_module, builtin_name, builtin_module)
# Directly load the os module (needed during bootstrap).
os_details = ('posix', ['/']), ('nt', ['\\', '/'])
@@ -1603,23 +1603,22 @@ def _setup(_bootstrap_module):
continue
else:
raise ImportError('importlib requires posix or nt')
+
setattr(self_module, '_os', os_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', ''.join(path_separators))
setattr(self_module, '_pathseps_with_colon', {f':{s}' for s in path_separators})
- # Directly load the _thread module (needed during bootstrap).
- thread_module = _bootstrap._builtin_from_name('_thread')
- setattr(self_module, '_thread', thread_module)
-
- # Directly load the _weakref module (needed during bootstrap).
- weakref_module = _bootstrap._builtin_from_name('_weakref')
- setattr(self_module, '_weakref', weakref_module)
-
- # Directly load the winreg module (needed during bootstrap).
+ # Directly load built-in modules needed during bootstrap.
+ builtin_names = ['_io', '_warnings', 'marshal']
if builtin_os == 'nt':
- winreg_module = _bootstrap._builtin_from_name('winreg')
- setattr(self_module, '_winreg', winreg_module)
+ builtin_names.append('winreg')
+ for builtin_name in builtin_names:
+ if builtin_name not in sys.modules:
+ builtin_module = _bootstrap._builtin_from_name(builtin_name)
+ else:
+ builtin_module = sys.modules[builtin_name]
+ setattr(self_module, builtin_name, builtin_module)
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
diff --git a/x64/Lib/importlib/_common.py b/x64/Lib/importlib/_common.py
new file mode 100644
index 0000000..c1204f0
--- /dev/null
+++ b/x64/Lib/importlib/_common.py
@@ -0,0 +1,62 @@
+import os
+import pathlib
+import zipfile
+import tempfile
+import functools
+import contextlib
+
+
+def from_package(package):
+ """
+ Return a Traversable object for the given package.
+
+ """
+ return fallback_resources(package.__spec__)
+
+
+def fallback_resources(spec):
+ package_directory = pathlib.Path(spec.origin).parent
+ try:
+ archive_path = spec.loader.archive
+ rel_path = package_directory.relative_to(archive_path)
+ return zipfile.Path(archive_path, str(rel_path) + '/')
+ except Exception:
+ pass
+ return package_directory
+
+
+@contextlib.contextmanager
+def _tempfile(reader, suffix=''):
+ # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
+ # blocks due to the need to close the temporary file to work on Windows
+ # properly.
+ fd, raw_path = tempfile.mkstemp(suffix=suffix)
+ try:
+ os.write(fd, reader())
+ os.close(fd)
+ yield pathlib.Path(raw_path)
+ finally:
+ try:
+ os.remove(raw_path)
+ except FileNotFoundError:
+ pass
+
+
+@functools.singledispatch
+@contextlib.contextmanager
+def as_file(path):
+ """
+ Given a Traversable object, return that object as a
+ path on the local file system in a context manager.
+ """
+ with _tempfile(path.read_bytes, suffix=path.name) as local:
+ yield local
+
+
+@as_file.register(pathlib.Path)
+@contextlib.contextmanager
+def _(path):
+ """
+ Degenerate behavior for pathlib.Path objects.
+ """
+ yield path
diff --git a/x64/Lib/importlib/abc.py b/x64/Lib/importlib/abc.py
index 4b2d3de..b8a9bb1 100644
--- a/x64/Lib/importlib/abc.py
+++ b/x64/Lib/importlib/abc.py
@@ -10,10 +10,11 @@ except ImportError as exc:
_frozen_importlib = None
try:
import _frozen_importlib_external
-except ImportError as exc:
+except ImportError:
_frozen_importlib_external = _bootstrap_external
import abc
import warnings
+from typing import Protocol, runtime_checkable
def _register(abstract_cls, *classes):
@@ -386,3 +387,88 @@ class ResourceReader(metaclass=abc.ABCMeta):
_register(ResourceReader, machinery.SourceFileLoader)
+
+
+@runtime_checkable
+class Traversable(Protocol):
+ """
+ An object with a subset of pathlib.Path methods suitable for
+ traversing directories and opening files.
+ """
+
+ @abc.abstractmethod
+ def iterdir(self):
+ """
+ Yield Traversable objects in self
+ """
+
+ @abc.abstractmethod
+ def read_bytes(self):
+ """
+ Read contents of self as bytes
+ """
+
+ @abc.abstractmethod
+ def read_text(self, encoding=None):
+ """
+ Read contents of self as bytes
+ """
+
+ @abc.abstractmethod
+ def is_dir(self):
+ """
+ Return True if self is a dir
+ """
+
+ @abc.abstractmethod
+ def is_file(self):
+ """
+ Return True if self is a file
+ """
+
+ @abc.abstractmethod
+ def joinpath(self, child):
+ """
+ Return Traversable child in self
+ """
+
+ @abc.abstractmethod
+ def __truediv__(self, child):
+ """
+ Return Traversable child in self
+ """
+
+ @abc.abstractmethod
+ def open(self, mode='r', *args, **kwargs):
+ """
+ mode may be 'r' or 'rb' to open as text or binary. Return a handle
+ suitable for reading (same as pathlib.Path.open).
+
+ When opening as text, accepts encoding parameters such as those
+ accepted by io.TextIOWrapper.
+ """
+
+ @abc.abstractproperty
+ def name(self):
+ # type: () -> str
+ """
+ The base name of this object without any parent references.
+ """
+
+
+class TraversableResources(ResourceReader):
+ @abc.abstractmethod
+ def files(self):
+ """Return a Traversable object for the loaded package."""
+
+ def open_resource(self, resource):
+ return self.files().joinpath(resource).open('rb')
+
+ def resource_path(self, resource):
+ raise FileNotFoundError(resource)
+
+ def is_resource(self, path):
+ return self.files().joinpath(path).isfile()
+
+ def contents(self):
+ return (item.name for item in self.files().iterdir())
diff --git a/x64/Lib/importlib/metadata.py b/x64/Lib/importlib/metadata.py
index 831f593..ffa0cba 100644
--- a/x64/Lib/importlib/metadata.py
+++ b/x64/Lib/importlib/metadata.py
@@ -79,6 +79,16 @@ class EntryPoint(
return functools.reduce(getattr, attrs, module)
@property
+ def module(self):
+ match = self.pattern.match(self.value)
+ return match.group('module')
+
+ @property
+ def attr(self):
+ match = self.pattern.match(self.value)
+ return match.group('attr')
+
+ @property
def extras(self):
match = self.pattern.match(self.value)
return list(re.finditer(r'\w+', match.group('extras') or ''))
@@ -170,7 +180,7 @@ class Distribution:
"""
for resolver in cls._discover_resolvers():
dists = resolver(DistributionFinder.Context(name=name))
- dist = next(dists, None)
+ dist = next(iter(dists), None)
if dist is not None:
return dist
else:
@@ -213,6 +223,17 @@ class Distribution:
)
return filter(None, declared)
+ @classmethod
+ def _local(cls, root='.'):
+ from pep517 import build, meta
+ system = build.compat_system(root)
+ builder = functools.partial(
+ meta.build,
+ source_dir=root,
+ system=system,
+ )
+ return PathDistribution(zipfile.Path(meta.build_as_zip(builder)))
+
@property
def metadata(self):
"""Return the parsed metadata for this Distribution.
@@ -391,7 +412,7 @@ class FastPath:
def __init__(self, root):
self.root = root
- self.base = os.path.basename(root).lower()
+ self.base = os.path.basename(self.root).lower()
def joinpath(self, child):
return pathlib.Path(self.root, child)
@@ -408,8 +429,8 @@ class FastPath:
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
- return (
- posixpath.split(child)[0]
+ return dict.fromkeys(
+ child.split(posixpath.sep, 1)[0]
for child in names
)
@@ -475,7 +496,6 @@ class MetadataPathFinder(DistributionFinder):
)
-
class PathDistribution(Distribution):
def __init__(self, path):
"""Construct a distribution from a path to the metadata directory.
diff --git a/x64/Lib/importlib/resources.py b/x64/Lib/importlib/resources.py
index fc3a1c9..b803a01 100644
--- a/x64/Lib/importlib/resources.py
+++ b/x64/Lib/importlib/resources.py
@@ -1,23 +1,25 @@
import os
-import tempfile
from . import abc as resources_abc
+from . import _common
+from ._common import as_file
from contextlib import contextmanager, suppress
from importlib import import_module
from importlib.abc import ResourceLoader
from io import BytesIO, TextIOWrapper
from pathlib import Path
from types import ModuleType
-from typing import Iterable, Iterator, Optional, Set, Union # noqa: F401
+from typing import ContextManager, Iterable, Optional, Union
from typing import cast
from typing.io import BinaryIO, TextIO
-from zipimport import ZipImportError
__all__ = [
'Package',
'Resource',
+ 'as_file',
'contents',
+ 'files',
'is_resource',
'open_binary',
'open_text',
@@ -31,24 +33,23 @@ Package = Union[str, ModuleType]
Resource = Union[str, os.PathLike]
+def _resolve(name) -> ModuleType:
+ """If name is a string, resolve to a module."""
+ if hasattr(name, '__spec__'):
+ return name
+ return import_module(name)
+
+
def _get_package(package) -> ModuleType:
"""Take a package name or module object and return the module.
- If a name, the module is imported. If the passed or imported module
+ If a name, the module is imported. If the resolved module
object is not a package, raise an exception.
"""
- if hasattr(package, '__spec__'):
- if package.__spec__.submodule_search_locations is None:
- raise TypeError('{!r} is not a package'.format(
- package.__spec__.name))
- else:
- return package
- else:
- module = import_module(package)
- if module.__spec__.submodule_search_locations is None:
- raise TypeError('{!r} is not a package'.format(package))
- else:
- return module
+ module = _resolve(package)
+ if module.__spec__.submodule_search_locations is None:
+ raise TypeError('{!r} is not a package'.format(package))
+ return module
def _normalize_path(path) -> str:
@@ -59,8 +60,7 @@ def _normalize_path(path) -> str:
parent, file_name = os.path.split(path)
if parent:
raise ValueError('{!r} must be only a file name'.format(path))
- else:
- return file_name
+ return file_name
def _get_resource_reader(
@@ -89,8 +89,8 @@ def open_binary(package: Package, resource: Resource) -> BinaryIO:
reader = _get_resource_reader(package)
if reader is not None:
return reader.open_resource(resource)
- _check_location(package)
- absolute_package_path = os.path.abspath(package.__spec__.origin)
+ absolute_package_path = os.path.abspath(
+ package.__spec__.origin or 'non-existent file')
package_path = os.path.dirname(absolute_package_path)
full_path = os.path.join(package_path, resource)
try:
@@ -109,8 +109,7 @@ def open_binary(package: Package, resource: Resource) -> BinaryIO:
message = '{!r} resource not found in {!r}'.format(
resource, package_name)
raise FileNotFoundError(message)
- else:
- return BytesIO(data)
+ return BytesIO(data)
def open_text(package: Package,
@@ -118,39 +117,12 @@ def open_text(package: Package,
encoding: str = 'utf-8',
errors: str = 'strict') -> TextIO:
"""Return a file-like object opened for text reading of the resource."""
- resource = _normalize_path(resource)
- package = _get_package(package)
- reader = _get_resource_reader(package)
- if reader is not None:
- return TextIOWrapper(reader.open_resource(resource), encoding, errors)
- _check_location(package)
- absolute_package_path = os.path.abspath(package.__spec__.origin)
- package_path = os.path.dirname(absolute_package_path)
- full_path = os.path.join(package_path, resource)
- try:
- return open(full_path, mode='r', encoding=encoding, errors=errors)
- except OSError:
- # Just assume the loader is a resource loader; all the relevant
- # importlib.machinery loaders are and an AttributeError for
- # get_data() will make it clear what is needed from the loader.
- loader = cast(ResourceLoader, package.__spec__.loader)
- data = None
- if hasattr(package.__spec__.loader, 'get_data'):
- with suppress(OSError):
- data = loader.get_data(full_path)
- if data is None:
- package_name = package.__spec__.name
- message = '{!r} resource not found in {!r}'.format(
- resource, package_name)
- raise FileNotFoundError(message)
- else:
- return TextIOWrapper(BytesIO(data), encoding, errors)
+ return TextIOWrapper(
+ open_binary(package, resource), encoding=encoding, errors=errors)
def read_binary(package: Package, resource: Resource) -> bytes:
"""Return the binary contents of the resource."""
- resource = _normalize_path(resource)
- package = _get_package(package)
with open_binary(package, resource) as fp:
return fp.read()
@@ -164,14 +136,20 @@ def read_text(package: Package,
The decoding-related arguments have the same semantics as those of
bytes.decode().
"""
- resource = _normalize_path(resource)
- package = _get_package(package)
with open_text(package, resource, encoding, errors) as fp:
return fp.read()
-@contextmanager
-def path(package: Package, resource: Resource) -> Iterator[Path]:
+def files(package: Package) -> resources_abc.Traversable:
+ """
+ Get a Traversable resource from a package
+ """
+ return _common.from_package(_get_package(package))
+
+
+def path(
+ package: Package, resource: Resource,
+ ) -> 'ContextManager[Path]':
"""A context manager providing a file path object to the resource.
If the resource does not already exist on its own on the file system,
@@ -180,39 +158,23 @@ def path(package: Package, resource: Resource) -> Iterator[Path]:
raised if the file was deleted prior to the context manager
exiting).
"""
- resource = _normalize_path(resource)
- package = _get_package(package)
- reader = _get_resource_reader(package)
- if reader is not None:
- try:
- yield Path(reader.resource_path(resource))
- return
- except FileNotFoundError:
- pass
- else:
- _check_location(package)
- # Fall-through for both the lack of resource_path() *and* if
- # resource_path() raises FileNotFoundError.
- package_directory = Path(package.__spec__.origin).parent
- file_path = package_directory / resource
- if file_path.exists():
- yield file_path
- else:
- with open_binary(package, resource) as fp:
- data = fp.read()
- # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
- # blocks due to the need to close the temporary file to work on
- # Windows properly.
- fd, raw_path = tempfile.mkstemp()
- try:
- os.write(fd, data)
- os.close(fd)
- yield Path(raw_path)
- finally:
- try:
- os.remove(raw_path)
- except FileNotFoundError:
- pass
+ reader = _get_resource_reader(_get_package(package))
+ return (
+ _path_from_reader(reader, resource)
+ if reader else
+ _common.as_file(files(package).joinpath(_normalize_path(resource)))
+ )
+
+
+@contextmanager
+def _path_from_reader(reader, resource):
+ norm_resource = _normalize_path(resource)
+ with suppress(FileNotFoundError):
+ yield Path(reader.resource_path(norm_resource))
+ return
+ opener_reader = reader.open_resource(norm_resource)
+ with _common._tempfile(opener_reader.read, suffix=norm_resource) as res:
+ yield res
def is_resource(package: Package, name: str) -> bool:
@@ -225,17 +187,10 @@ def is_resource(package: Package, name: str) -> bool:
reader = _get_resource_reader(package)
if reader is not None:
return reader.is_resource(name)
- try:
- package_contents = set(contents(package))
- except (NotADirectoryError, FileNotFoundError):
- return False
+ package_contents = set(contents(package))
if name not in package_contents:
return False
- # Just because the given file_name lives as an entry in the package's
- # contents doesn't necessarily mean it's a resource. Directories are not
- # resources, so let's try to find out if it's a directory or not.
- path = Path(package.__spec__.origin).parent / name
- return path.is_file()
+ return (_common.from_package(package) / name).is_file()
def contents(package: Package) -> Iterable[str]:
@@ -250,10 +205,11 @@ def contents(package: Package) -> Iterable[str]:
if reader is not None:
return reader.contents()
# Is the package a namespace package? By definition, namespace packages
- # cannot have resources. We could use _check_location() and catch the
- # exception, but that's extra work, so just inline the check.
- elif package.__spec__.origin is None or not package.__spec__.has_location:
+ # cannot have resources.
+ namespace = (
+ package.__spec__.origin is None or
+ package.__spec__.origin == 'namespace'
+ )
+ if namespace or not package.__spec__.has_location:
return ()
- else:
- package_directory = Path(package.__spec__.origin).parent
- return os.listdir(package_directory)
+ return list(item.name for item in _common.from_package(package).iterdir())
diff --git a/x64/Lib/importlib/util.py b/x64/Lib/importlib/util.py
index 201e0f4..269a6fa 100644
--- a/x64/Lib/importlib/util.py
+++ b/x64/Lib/importlib/util.py
@@ -29,8 +29,8 @@ def resolve_name(name, package):
if not name.startswith('.'):
return name
elif not package:
- raise ValueError(f'no package specified for {repr(name)} '
- '(required for relative module names)')
+ raise ImportError(f'no package specified for {repr(name)} '
+ '(required for relative module names)')
level = 0
for character in name:
if character != '.':
diff --git a/x64/Lib/inspect.py b/x64/Lib/inspect.py
index 3ff395c..18bed90 100644
--- a/x64/Lib/inspect.py
+++ b/x64/Lib/inspect.py
@@ -32,6 +32,7 @@ __author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import abc
+import ast
import dis
import collections.abc
import enum
@@ -741,7 +742,7 @@ def getmodule(object, _filename=None):
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
- for modname, module in list(sys.modules.items()):
+ for modname, module in sys.modules.copy().items():
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
@@ -769,6 +770,42 @@ def getmodule(object, _filename=None):
if builtinobject is object:
return builtin
+
+class ClassFoundException(Exception):
+ pass
+
+
+class _ClassFinder(ast.NodeVisitor):
+
+ def __init__(self, qualname):
+ self.stack = []
+ self.qualname = qualname
+
+ def visit_FunctionDef(self, node):
+ self.stack.append(node.name)
+ self.stack.append('<locals>')
+ self.generic_visit(node)
+ self.stack.pop()
+ self.stack.pop()
+
+ visit_AsyncFunctionDef = visit_FunctionDef
+
+ def visit_ClassDef(self, node):
+ self.stack.append(node.name)
+ if self.qualname == '.'.join(self.stack):
+ # Return the decorator for the class if present
+ if node.decorator_list:
+ line_number = node.decorator_list[0].lineno
+ else:
+ line_number = node.lineno
+
+ # decrement by one since lines starts with indexing by zero
+ line_number -= 1
+ raise ClassFoundException(line_number)
+ self.generic_visit(node)
+ self.stack.pop()
+
+
def findsource(object):
"""Return the entire source file and starting line number for an object.
@@ -801,25 +838,15 @@ def findsource(object):
return lines, 0
if isclass(object):
- name = object.__name__
- pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
- # make some effort to find the best matching class definition:
- # use the one with the least indentation, which is the one
- # that's most probably not inside a function definition.
- candidates = []
- for i in range(len(lines)):
- match = pat.match(lines[i])
- if match:
- # if it's at toplevel, it's already the best one
- if lines[i][0] == 'c':
- return lines, i
- # else add whitespace to candidate list
- candidates.append((match.group(1), i))
- if candidates:
- # this will sort by whitespace, and by line number,
- # less whitespace first
- candidates.sort()
- return lines, candidates[0][1]
+ qualname = object.__qualname__
+ source = ''.join(lines)
+ tree = ast.parse(source)
+ class_finder = _ClassFinder(qualname)
+ try:
+ class_finder.visit(tree)
+ except ClassFoundException as e:
+ line_number = e.args[0]
+ return lines, line_number
else:
raise OSError('could not find class definition')
@@ -837,7 +864,12 @@ def findsource(object):
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
- if pat.match(lines[lnum]): break
+ try:
+ line = lines[lnum]
+ except IndexError:
+ raise OSError('lineno is out of bounds')
+ if pat.match(line):
+ break
lnum = lnum - 1
return lines, lnum
raise OSError('could not find code object')
@@ -899,6 +931,7 @@ class BlockFinder:
self.indecorator = False
self.decoratorhasargs = False
self.last = 1
+ self.body_col0 = None
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started and not self.indecorator:
@@ -930,6 +963,8 @@ class BlockFinder:
elif self.passline:
pass
elif type == tokenize.INDENT:
+ if self.body_col0 is None and self.started:
+ self.body_col0 = erowcol[1]
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
@@ -939,6 +974,10 @@ class BlockFinder:
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
+ elif type == tokenize.COMMENT:
+ if self.body_col0 is not None and srowcol[1] >= self.body_col0:
+ # Include comments if indented at least as much as the block
+ self.last = srowcol[0]
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
@@ -1136,7 +1175,6 @@ def getfullargspec(func):
varkw = None
posonlyargs = []
kwonlyargs = []
- defaults = ()
annotations = {}
defaults = ()
kwdefaults = {}
@@ -2603,7 +2641,7 @@ class BoundArguments:
Has the following public attributes:
- * arguments : OrderedDict
+ * arguments : dict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
@@ -2703,7 +2741,7 @@ class BoundArguments:
# Signature.bind_partial().
continue
new_arguments.append((name, val))
- self.arguments = OrderedDict(new_arguments)
+ self.arguments = dict(new_arguments)
def __eq__(self, other):
if self is other:
@@ -2771,7 +2809,7 @@ class Signature:
top_kind = _POSITIONAL_ONLY
kind_defaults = False
- for idx, param in enumerate(parameters):
+ for param in parameters:
kind = param.kind
name = param.name
@@ -2806,8 +2844,7 @@ class Signature:
params[name] = param
else:
- params = OrderedDict(((param.name, param)
- for param in parameters))
+ params = OrderedDict((param.name, param) for param in parameters)
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@@ -2889,7 +2926,7 @@ class Signature:
def _bind(self, args, kwargs, *, partial=False):
"""Private method. Don't use directly."""
- arguments = OrderedDict()
+ arguments = {}
parameters = iter(self.parameters.values())
parameters_ex = ()
diff --git a/x64/Lib/ipaddress.py b/x64/Lib/ipaddress.py
index 873c764..bc662c4 100644
--- a/x64/Lib/ipaddress.py
+++ b/x64/Lib/ipaddress.py
@@ -560,6 +560,8 @@ class _IPAddressBase:
return self.__class__, (str(self),)
+_address_fmt_re = None
+
@functools.total_ordering
class _BaseAddress(_IPAddressBase):
@@ -618,6 +620,55 @@ class _BaseAddress(_IPAddressBase):
def __reduce__(self):
return self.__class__, (self._ip,)
+ def __format__(self, fmt):
+ """Returns an IP address as a formatted string.
+
+ Supported presentation types are:
+ 's': returns the IP address as a string (default)
+ 'b': converts to binary and returns a zero-padded string
+ 'X' or 'x': converts to upper- or lower-case hex and returns a zero-padded string
+ 'n': the same as 'b' for IPv4 and 'x' for IPv6
+
+ For binary and hex presentation types, the alternate form specifier
+ '#' and the grouping option '_' are supported.
+ """
+
+ # Support string formatting
+ if not fmt or fmt[-1] == 's':
+ return format(str(self), fmt)
+
+ # From here on down, support for 'bnXx'
+ global _address_fmt_re
+ if _address_fmt_re is None:
+ import re
+ _address_fmt_re = re.compile('(#?)(_?)([xbnX])')
+
+ m = _address_fmt_re.fullmatch(fmt)
+ if not m:
+ return super().__format__(fmt)
+
+ alternate, grouping, fmt_base = m.groups()
+
+ # Set some defaults
+ if fmt_base == 'n':
+ if self._version == 4:
+ fmt_base = 'b' # Binary is default for ipv4
+ else:
+ fmt_base = 'x' # Hex is default for ipv6
+
+ if fmt_base == 'b':
+ padlen = self._max_prefixlen
+ else:
+ padlen = self._max_prefixlen // 4
+
+ if grouping:
+ padlen += padlen // 4 - 1
+
+ if alternate:
+ padlen += 2 # 0b or 0x
+
+ return format(int(self), f'{alternate}0{padlen}{grouping}{fmt_base}')
+
@functools.total_ordering
class _BaseNetwork(_IPAddressBase):
@@ -1073,7 +1124,6 @@ class _BaseNetwork(_IPAddressBase):
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
-
class _BaseV4:
"""Base IPv4 object.
@@ -1347,7 +1397,7 @@ class IPv4Interface(IPv4Address):
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
- if not address_equal or address_equal is NotImplemented:
+ if address_equal is NotImplemented or not address_equal:
return address_equal
try:
return self.network == other.network
@@ -1370,7 +1420,7 @@ class IPv4Interface(IPv4Address):
return False
def __hash__(self):
- return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+ return hash((self._ip, self._prefixlen, int(self.network.network_address)))
__reduce__ = _IPAddressBase.__reduce__
@@ -1416,7 +1466,7 @@ class IPv4Network(_BaseV4, _BaseNetwork):
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
- '192.0.0.2/0.0.0.255'
+ '192.0.2.0/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
@@ -1458,6 +1508,8 @@ class IPv4Network(_BaseV4, _BaseNetwork):
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
+ elif self._prefixlen == (self._max_prefixlen):
+ self.hosts = lambda: [IPv4Address(addr)]
@property
@functools.lru_cache()
@@ -1785,6 +1837,26 @@ class _BaseV6:
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
+ @staticmethod
+ def _split_scope_id(ip_str):
+ """Helper function to parse IPv6 string address with scope id.
+
+ See RFC 4007 for details.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ (addr, scope_id) tuple.
+
+ """
+ addr, sep, scope_id = ip_str.partition('%')
+ if not sep:
+ scope_id = None
+ elif not scope_id or '%' in scope_id:
+ raise AddressValueError('Invalid IPv6 address: "%r"' % ip_str)
+ return addr, scope_id
+
@property
def max_prefixlen(self):
return self._max_prefixlen
@@ -1798,7 +1870,7 @@ class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
- __slots__ = ('_ip', '__weakref__')
+ __slots__ = ('_ip', '_scope_id', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
@@ -1821,12 +1893,14 @@ class IPv6Address(_BaseV6, _BaseAddress):
if isinstance(address, int):
self._check_int_address(address)
self._ip = address
+ self._scope_id = None
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
self._ip = int.from_bytes(address, 'big')
+ self._scope_id = None
return
# Assume input argument to be string or any object representation
@@ -1834,8 +1908,37 @@ class IPv6Address(_BaseV6, _BaseAddress):
addr_str = str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
+ addr_str, self._scope_id = self._split_scope_id(addr_str)
+
self._ip = self._ip_int_from_string(addr_str)
+ def __str__(self):
+ ip_str = super().__str__()
+ return ip_str + '%' + self._scope_id if self._scope_id else ip_str
+
+ def __hash__(self):
+ return hash((self._ip, self._scope_id))
+
+ def __eq__(self, other):
+ address_equal = super().__eq__(other)
+ if address_equal is NotImplemented:
+ return NotImplemented
+ if not address_equal:
+ return False
+ return self._scope_id == getattr(other, '_scope_id', None)
+
+ @property
+ def scope_id(self):
+ """Identifier of a particular zone of the address's scope.
+
+ See RFC 4007 for details.
+
+ Returns:
+ A string identifying the zone of the address if specified, else None.
+
+ """
+ return self._scope_id
+
@property
def packed(self):
"""The binary representation of this address."""
@@ -1989,12 +2092,12 @@ class IPv6Interface(IPv6Address):
return self.network.hostmask
def __str__(self):
- return '%s/%d' % (self._string_from_ip_int(self._ip),
+ return '%s/%d' % (super().__str__(),
self._prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
- if not address_equal or address_equal is NotImplemented:
+ if address_equal is NotImplemented or not address_equal:
return address_equal
try:
return self.network == other.network
@@ -2007,7 +2110,7 @@ class IPv6Interface(IPv6Address):
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
- return NotImplemented
+ return address_less
try:
return (self.network < other.network or
self.network == other.network and address_less)
@@ -2017,7 +2120,7 @@ class IPv6Interface(IPv6Address):
return False
def __hash__(self):
- return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+ return hash((self._ip, self._prefixlen, int(self.network.network_address)))
__reduce__ = _IPAddressBase.__reduce__
@@ -2110,6 +2213,8 @@ class IPv6Network(_BaseV6, _BaseNetwork):
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
+ elif self._prefixlen == self._max_prefixlen:
+ self.hosts = lambda: [IPv6Address(addr)]
def hosts(self):
"""Generate Iterator over usable hosts in a network.
diff --git a/x64/Lib/json/__init__.py b/x64/Lib/json/__init__.py
index 1ba8b48..2c52bde 100644
--- a/x64/Lib/json/__init__.py
+++ b/x64/Lib/json/__init__.py
@@ -329,8 +329,6 @@ def loads(s, *, cls=None, object_hook=None, parse_float=None,
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
-
- The ``encoding`` argument is ignored and deprecated since Python 3.1.
"""
if isinstance(s, str):
if s.startswith('\ufeff'):
@@ -342,15 +340,6 @@ def loads(s, *, cls=None, object_hook=None, parse_float=None,
f'not {s.__class__.__name__}')
s = s.decode(detect_encoding(s), 'surrogatepass')
- if "encoding" in kw:
- import warnings
- warnings.warn(
- "'encoding' is ignored and deprecated. It will be removed in Python 3.9",
- DeprecationWarning,
- stacklevel=2
- )
- del kw['encoding']
-
if (cls is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
diff --git a/x64/Lib/json/tool.py b/x64/Lib/json/tool.py
index 8db9ea4..5dee0a7 100644
--- a/x64/Lib/json/tool.py
+++ b/x64/Lib/json/tool.py
@@ -30,26 +30,49 @@ def main():
default=sys.stdout)
parser.add_argument('--sort-keys', action='store_true', default=False,
help='sort the output of dictionaries alphabetically by key')
+ parser.add_argument('--no-ensure-ascii', dest='ensure_ascii', action='store_false',
+ help='disable escaping of non-ASCII characters')
parser.add_argument('--json-lines', action='store_true', default=False,
- help='parse input using the jsonlines format')
+ help='parse input using the JSON Lines format. '
+ 'Use with --no-indent or --compact to produce valid JSON Lines output.')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--indent', default=4, type=int,
+ help='separate items with newlines and use this number '
+ 'of spaces for indentation')
+ group.add_argument('--tab', action='store_const', dest='indent',
+ const='\t', help='separate items with newlines and use '
+ 'tabs for indentation')
+ group.add_argument('--no-indent', action='store_const', dest='indent',
+ const=None,
+ help='separate items with spaces rather than newlines')
+ group.add_argument('--compact', action='store_true',
+ help='suppress all whitespace separation (most compact)')
options = parser.parse_args()
- infile = options.infile
- outfile = options.outfile
- sort_keys = options.sort_keys
- json_lines = options.json_lines
- with infile, outfile:
+ dump_args = {
+ 'sort_keys': options.sort_keys,
+ 'indent': options.indent,
+ 'ensure_ascii': options.ensure_ascii,
+ }
+ if options.compact:
+ dump_args['indent'] = None
+ dump_args['separators'] = ',', ':'
+
+ with options.infile as infile, options.outfile as outfile:
try:
- if json_lines:
+ if options.json_lines:
objs = (json.loads(line) for line in infile)
else:
objs = (json.load(infile), )
for obj in objs:
- json.dump(obj, outfile, sort_keys=sort_keys, indent=4)
+ json.dump(obj, outfile, **dump_args)
outfile.write('\n')
except ValueError as e:
raise SystemExit(e)
if __name__ == '__main__':
- main()
+ try:
+ main()
+ except BrokenPipeError as exc:
+ sys.exit(exc.errno)
diff --git a/x64/Lib/keyword.py b/x64/Lib/keyword.py
index ddcbb25..59fcfb0 100644
--- a/x64/Lib/keyword.py
+++ b/x64/Lib/keyword.py
@@ -1,23 +1,25 @@
-"""Keywords (from "Grammar/Grammar")
+"""Keywords (from "Grammar/python.gram")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree and run:
- python3 -m Parser.pgen.keywordgen Grammar/Grammar \
- Grammar/Tokens \
- Lib/keyword.py
+ PYTHONPATH=Tools/peg_generator python3 -m pegen.keywordgen \
+ Grammar/Grammar \
+ Grammar/Tokens \
+ Lib/keyword.py
Alternatively, you can run 'make regen-keyword'.
"""
-__all__ = ["iskeyword", "kwlist"]
+__all__ = ["iskeyword", "issoftkeyword", "kwlist", "softkwlist"]
kwlist = [
'False',
'None',
'True',
+ '__peg_parser__',
'and',
'as',
'assert',
@@ -52,4 +54,9 @@ kwlist = [
'yield'
]
+softkwlist = [
+
+]
+
iskeyword = frozenset(kwlist).__contains__
+issoftkeyword = frozenset(softkwlist).__contains__
diff --git a/x64/Lib/linecache.py b/x64/Lib/linecache.py
index 3afcce1..fa5dbd0 100644
--- a/x64/Lib/linecache.py
+++ b/x64/Lib/linecache.py
@@ -10,17 +10,8 @@ import sys
import os
import tokenize
-__all__ = ["getline", "clearcache", "checkcache"]
+__all__ = ["getline", "clearcache", "checkcache", "lazycache"]
-def getline(filename, lineno, module_globals=None):
- lines = getlines(filename, module_globals)
- if 1 <= lineno <= len(lines):
- return lines[lineno-1]
- else:
- return ''
-
-
-# The cache
# The cache. Maps filenames to either a thunk which will provide source code,
# or a tuple (size, mtime, lines, fullname) once loaded.
@@ -29,9 +20,17 @@ cache = {}
def clearcache():
"""Clear the cache entirely."""
+ cache.clear()
- global cache
- cache = {}
+
+def getline(filename, lineno, module_globals=None):
+ """Get a line for a Python source file from the cache.
+ Update the cache if it doesn't contain an entry for this file already."""
+
+ lines = getlines(filename, module_globals)
+ if 1 <= lineno <= len(lines):
+ return lines[lineno - 1]
+ return ''
def getlines(filename, module_globals=None):
@@ -56,11 +55,10 @@ def checkcache(filename=None):
if filename is None:
filenames = list(cache.keys())
+ elif filename in cache:
+ filenames = [filename]
else:
- if filename in cache:
- filenames = [filename]
- else:
- return
+ return
for filename in filenames:
entry = cache[filename]
@@ -73,10 +71,10 @@ def checkcache(filename=None):
try:
stat = os.stat(fullname)
except OSError:
- del cache[filename]
+ cache.pop(filename, None)
continue
if size != stat.st_size or mtime != stat.st_mtime:
- del cache[filename]
+ cache.pop(filename, None)
def updatecache(filename, module_globals=None):
@@ -86,7 +84,7 @@ def updatecache(filename, module_globals=None):
if filename in cache:
if len(cache[filename]) != 1:
- del cache[filename]
+ cache.pop(filename, None)
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
@@ -109,8 +107,10 @@ def updatecache(filename, module_globals=None):
# for this module.
return []
cache[filename] = (
- len(data), None,
- [line+'\n' for line in data.splitlines()], fullname
+ len(data),
+ None,
+ [line + '\n' for line in data.splitlines()],
+ fullname
)
return cache[filename][2]
diff --git a/x64/Lib/locale.py b/x64/Lib/locale.py
index dd8a085..1a4e9f6 100644
--- a/x64/Lib/locale.py
+++ b/x64/Lib/locale.py
@@ -279,6 +279,8 @@ def currency(val, symbol=True, grouping=False, international=False):
if precedes:
s = smb + (separated and ' ' or '') + s
else:
+ if international and smb[-1] == ' ':
+ smb = smb[:-1]
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
diff --git a/x64/Lib/logging/__init__.py b/x64/Lib/logging/__init__.py
index 0cfaec8..7b169a1 100644
--- a/x64/Lib/logging/__init__.py
+++ b/x64/Lib/logging/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
@@ -18,7 +18,7 @@
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
-Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
@@ -234,11 +234,9 @@ if not hasattr(os, 'register_at_fork'): # Windows and friends.
def _register_at_fork_reinit_lock(instance):
pass # no-op when os.register_at_fork does not exist.
else:
- # A collection of instances with a createLock method (logging.Handler)
+ # A collection of instances with a _at_fork_reinit method (logging.Handler)
# to be called in the child after forking. The weakref avoids us keeping
- # discarded Handler instances alive. A set is used to avoid accumulating
- # duplicate registrations as createLock() is responsible for registering
- # a new Handler instance with this set in the first place.
+ # discarded Handler instances alive.
_at_fork_reinit_lock_weakset = weakref.WeakSet()
def _register_at_fork_reinit_lock(instance):
@@ -249,16 +247,12 @@ else:
_releaseLock()
def _after_at_fork_child_reinit_locks():
- # _acquireLock() was called in the parent before forking.
for handler in _at_fork_reinit_lock_weakset:
- try:
- handler.createLock()
- except Exception as err:
- # Similar to what PyErr_WriteUnraisable does.
- print("Ignoring exception from logging atfork", instance,
- "._reinit_lock() method:", err, file=sys.stderr)
- _releaseLock() # Acquired by os.register_at_fork(before=.
+ handler._at_fork_reinit()
+ # _acquireLock() was called in the parent before forking.
+ # The lock is reinitialized to unlocked state.
+ _lock._at_fork_reinit()
os.register_at_fork(before=_acquireLock,
after_in_child=_after_at_fork_child_reinit_locks,
@@ -515,7 +509,7 @@ class Formatter(object):
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
- the style-dependent default value, "%(message)s", "{message}", or
+ style-dependent default value, "%(message)s", "{message}", or
"${message}", is used.
The Formatter can be initialized with a format string which makes use of
@@ -603,8 +597,9 @@ class Formatter(object):
if datefmt:
s = time.strftime(datefmt, ct)
else:
- t = time.strftime(self.default_time_format, ct)
- s = self.default_msec_format % (t, record.msecs)
+ s = time.strftime(self.default_time_format, ct)
+ if self.default_msec_format:
+ s = self.default_msec_format % (s, record.msecs)
return s
def formatException(self, ei):
@@ -753,8 +748,8 @@ class Filter(object):
"""
Determine if the specified record is to be logged.
- Is the specified record to be logged? Returns 0 for no, nonzero for
- yes. If deemed appropriate, the record may be modified in-place.
+ Returns True if the record should be logged, or False otherwise.
+ If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
@@ -891,6 +886,9 @@ class Handler(Filterer):
self.lock = threading.RLock()
_register_at_fork_reinit_lock(self)
+ def _at_fork_reinit(self):
+ self.lock._at_fork_reinit()
+
def acquire(self):
"""
Acquire the I/O thread lock.
@@ -1122,7 +1120,7 @@ class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
- def __init__(self, filename, mode='a', encoding=None, delay=False):
+ def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None):
"""
Open the specified file and use it as the stream for logging.
"""
@@ -1133,6 +1131,7 @@ class FileHandler(StreamHandler):
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
+ self.errors = errors
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
@@ -1169,7 +1168,8 @@ class FileHandler(StreamHandler):
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
- return open(self.baseFilename, self.mode, encoding=self.encoding)
+ return open(self.baseFilename, self.mode, encoding=self.encoding,
+ errors=self.errors)
def emit(self, record):
"""
@@ -1931,15 +1931,20 @@ def basicConfig(**kwargs):
attached to the root logger are removed and closed, before
carrying out the configuration as specified by the other
arguments.
+ encoding If specified together with a filename, this encoding is passed to
+ the created FileHandler, causing it to be used when the file is
+ opened.
+ errors If specified together with a filename, this value is passed to the
+ created FileHandler, causing it to be used when the file is
+ opened in text mode. If not specified, the default value is
+ `backslashreplace`.
+
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
- .. versionchanged:: 3.8
- Added the ``force`` parameter.
-
.. versionchanged:: 3.2
Added the ``style`` parameter.
@@ -1949,12 +1954,20 @@ def basicConfig(**kwargs):
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
+
+ .. versionchanged:: 3.8
+ Added the ``force`` parameter.
+
+ .. versionchanged:: 3.9
+ Added the ``encoding`` and ``errors`` parameters.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
force = kwargs.pop('force', False)
+ encoding = kwargs.pop('encoding', None)
+ errors = kwargs.pop('errors', 'backslashreplace')
if force:
for h in root.handlers[:]:
root.removeHandler(h)
@@ -1973,7 +1986,10 @@ def basicConfig(**kwargs):
filename = kwargs.pop("filename", None)
mode = kwargs.pop("filemode", 'a')
if filename:
- h = FileHandler(filename, mode)
+ if 'b'in mode:
+ errors = None
+ h = FileHandler(filename, mode,
+ encoding=encoding, errors=errors)
else:
stream = kwargs.pop("stream", None)
h = StreamHandler(stream)
@@ -2009,10 +2025,9 @@ def getLogger(name=None):
If no name is specified, return the root logger.
"""
- if name:
- return Logger.manager.getLogger(name)
- else:
+ if not name or isinstance(name, str) and name == root.name:
return root
+ return Logger.manager.getLogger(name)
def critical(msg, *args, **kwargs):
"""
@@ -2151,6 +2166,9 @@ class NullHandler(Handler):
def createLock(self):
self.lock = None
+ def _at_fork_reinit(self):
+ pass
+
# Warnings integration
_warnings_showwarning = None
diff --git a/x64/Lib/logging/config.py b/x64/Lib/logging/config.py
index 3cd5fea..fd3aded 100644
--- a/x64/Lib/logging/config.py
+++ b/x64/Lib/logging/config.py
@@ -143,6 +143,7 @@ def _install_handlers(cp, formatters):
kwargs = section.get("kwargs", '{}')
kwargs = eval(kwargs, vars(logging))
h = klass(*args, **kwargs)
+ h.name = hand
if "level" in section:
level = section["level"]
h.setLevel(level)
diff --git a/x64/Lib/logging/handlers.py b/x64/Lib/logging/handlers.py
index 34ff7a0..867ef4e 100644
--- a/x64/Lib/logging/handlers.py
+++ b/x64/Lib/logging/handlers.py
@@ -48,15 +48,19 @@ class BaseRotatingHandler(logging.FileHandler):
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
- def __init__(self, filename, mode, encoding=None, delay=False):
+ namer = None
+ rotator = None
+
+ def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
"""
Use the specified filename for streamed logging
"""
- logging.FileHandler.__init__(self, filename, mode, encoding, delay)
+ logging.FileHandler.__init__(self, filename, mode=mode,
+ encoding=encoding, delay=delay,
+ errors=errors)
self.mode = mode
self.encoding = encoding
- self.namer = None
- self.rotator = None
+ self.errors = errors
def emit(self, record):
"""
@@ -117,7 +121,8 @@ class RotatingFileHandler(BaseRotatingHandler):
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
- def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
+ def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
+ encoding=None, delay=False, errors=None):
"""
Open the specified file and use it as the stream for logging.
@@ -145,7 +150,8 @@ class RotatingFileHandler(BaseRotatingHandler):
# on each run.
if maxBytes > 0:
mode = 'a'
- BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
+ BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
+ delay=delay, errors=errors)
self.maxBytes = maxBytes
self.backupCount = backupCount
@@ -196,8 +202,11 @@ class TimedRotatingFileHandler(BaseRotatingHandler):
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
- def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
- BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
+ def __init__(self, filename, when='h', interval=1, backupCount=0,
+ encoding=None, delay=False, utc=False, atTime=None,
+ errors=None):
+ BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
+ delay=delay, errors=errors)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
@@ -431,8 +440,11 @@ class WatchedFileHandler(logging.FileHandler):
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
- def __init__(self, filename, mode='a', encoding=None, delay=False):
- logging.FileHandler.__init__(self, filename, mode, encoding, delay)
+ def __init__(self, filename, mode='a', encoding=None, delay=False,
+ errors=None):
+ logging.FileHandler.__init__(self, filename, mode=mode,
+ encoding=encoding, delay=delay,
+ errors=errors)
self.dev, self.ino = -1, -1
self._statstream()
@@ -730,6 +742,10 @@ class SysLogHandler(logging.Handler):
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
+ LOG_NTP = 12 # NTP subsystem
+ LOG_SECURITY = 13 # Log audit
+ LOG_CONSOLE = 14 # Log alert
+ LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
@@ -757,27 +773,30 @@ class SysLogHandler(logging.Handler):
}
facility_names = {
- "auth": LOG_AUTH,
- "authpriv": LOG_AUTHPRIV,
- "cron": LOG_CRON,
- "daemon": LOG_DAEMON,
- "ftp": LOG_FTP,
- "kern": LOG_KERN,
- "lpr": LOG_LPR,
- "mail": LOG_MAIL,
- "news": LOG_NEWS,
- "security": LOG_AUTH, # DEPRECATED
- "syslog": LOG_SYSLOG,
- "user": LOG_USER,
- "uucp": LOG_UUCP,
- "local0": LOG_LOCAL0,
- "local1": LOG_LOCAL1,
- "local2": LOG_LOCAL2,
- "local3": LOG_LOCAL3,
- "local4": LOG_LOCAL4,
- "local5": LOG_LOCAL5,
- "local6": LOG_LOCAL6,
- "local7": LOG_LOCAL7,
+ "auth": LOG_AUTH,
+ "authpriv": LOG_AUTHPRIV,
+ "console": LOG_CONSOLE,
+ "cron": LOG_CRON,
+ "daemon": LOG_DAEMON,
+ "ftp": LOG_FTP,
+ "kern": LOG_KERN,
+ "lpr": LOG_LPR,
+ "mail": LOG_MAIL,
+ "news": LOG_NEWS,
+ "ntp": LOG_NTP,
+ "security": LOG_SECURITY,
+ "solaris-cron": LOG_SOLCRON,
+ "syslog": LOG_SYSLOG,
+ "user": LOG_USER,
+ "uucp": LOG_UUCP,
+ "local0": LOG_LOCAL0,
+ "local1": LOG_LOCAL1,
+ "local2": LOG_LOCAL2,
+ "local3": LOG_LOCAL3,
+ "local4": LOG_LOCAL4,
+ "local5": LOG_LOCAL5,
+ "local6": LOG_LOCAL6,
+ "local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
@@ -1154,6 +1173,20 @@ class HTTPHandler(logging.Handler):
"""
return record.__dict__
+ def getConnection(self, host, secure):
+ """
+ get a HTTP[S]Connection.
+
+ Override when a custom connection is required, for example if
+ there is a proxy.
+ """
+ import http.client
+ if secure:
+ connection = http.client.HTTPSConnection(host, context=self.context)
+ else:
+ connection = http.client.HTTPConnection(host)
+ return connection
+
def emit(self, record):
"""
Emit a record.
@@ -1161,12 +1194,9 @@ class HTTPHandler(logging.Handler):
Send the record to the Web server as a percent-encoded dictionary
"""
try:
- import http.client, urllib.parse
+ import urllib.parse
host = self.host
- if self.secure:
- h = http.client.HTTPSConnection(host, context=self.context)
- else:
- h = http.client.HTTPConnection(host)
+ h = self.getConnection(host, self.secure)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
@@ -1242,7 +1272,7 @@ class BufferingHandler(logging.Handler):
"""
self.acquire()
try:
- self.buffer = []
+ self.buffer.clear()
finally:
self.release()
@@ -1294,7 +1324,11 @@ class MemoryHandler(BufferingHandler):
"""
Set the target handler for this handler.
"""
- self.target = target
+ self.acquire()
+ try:
+ self.target = target
+ finally:
+ self.release()
def flush(self):
"""
@@ -1309,7 +1343,7 @@ class MemoryHandler(BufferingHandler):
if self.target:
for record in self.buffer:
self.target.handle(record)
- self.buffer = []
+ self.buffer.clear()
finally:
self.release()
diff --git a/x64/Lib/mailbox.py b/x64/Lib/mailbox.py
index 5b4e864..70da07e 100644
--- a/x64/Lib/mailbox.py
+++ b/x64/Lib/mailbox.py
@@ -18,6 +18,7 @@ import email.message
import email.generator
import io
import contextlib
+from types import GenericAlias
try:
import fcntl
except ImportError:
@@ -260,6 +261,8 @@ class Mailbox:
else:
raise TypeError('Invalid message type: %s' % type(message))
+ __class_getitem__ = classmethod(GenericAlias)
+
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
@@ -2015,6 +2018,8 @@ class _ProxyFile:
return False
return self._file.closed
+ __class_getitem__ = classmethod(GenericAlias)
+
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
diff --git a/x64/Lib/mailcap.py b/x64/Lib/mailcap.py
index bd0fc09..ae416a8 100644
--- a/x64/Lib/mailcap.py
+++ b/x64/Lib/mailcap.py
@@ -251,6 +251,7 @@ def test():
else:
print("Executing:", command)
sts = os.system(command)
+ sts = os.waitstatus_to_exitcode(sts)
if sts:
print("Exit status:", sts)
diff --git a/x64/Lib/mimetypes.py b/x64/Lib/mimetypes.py
index f33b658..92c2a47 100644
--- a/x64/Lib/mimetypes.py
+++ b/x64/Lib/mimetypes.py
@@ -372,7 +372,7 @@ def init(files=None):
def read_mime_types(file):
try:
- f = open(file)
+ f = open(file, encoding='utf-8')
except OSError:
return None
with f:
@@ -401,6 +401,7 @@ def _default_mime_types():
'.Z': 'compress',
'.bz2': 'bzip2',
'.xz': 'xz',
+ '.br': 'br',
}
# Before adding new types, make sure they are either registered with IANA,
diff --git a/x64/Lib/modulefinder.py b/x64/Lib/modulefinder.py
index e0d2998..cb455f4 100644
--- a/x64/Lib/modulefinder.py
+++ b/x64/Lib/modulefinder.py
@@ -5,9 +5,8 @@ import importlib._bootstrap_external
import importlib.machinery
import marshal
import os
+import io
import sys
-import types
-import warnings
LOAD_CONST = dis.opmap['LOAD_CONST']
@@ -80,23 +79,20 @@ def _find_module(name, path=None):
if isinstance(spec.loader, importlib.machinery.SourceFileLoader):
kind = _PY_SOURCE
- mode = "r"
elif isinstance(spec.loader, importlib.machinery.ExtensionFileLoader):
kind = _C_EXTENSION
- mode = "rb"
elif isinstance(spec.loader, importlib.machinery.SourcelessFileLoader):
kind = _PY_COMPILED
- mode = "rb"
else: # Should never happen.
return None, None, ("", "", _SEARCH_ERROR)
- file = open(file_path, mode)
+ file = io.open_code(file_path)
suffix = os.path.splitext(file_path)[-1]
- return file, file_path, (suffix, mode, kind)
+ return file, file_path, (suffix, "rb", kind)
class Module:
@@ -160,15 +156,15 @@ class ModuleFinder:
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
- with open(pathname) as fp:
- stuff = ("", "r", _PY_SOURCE)
+ with io.open_code(pathname) as fp:
+ stuff = ("", "rb", _PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
- with open(pathname) as fp:
- stuff = (ext, "r", _PY_SOURCE)
+ with io.open_code(pathname) as fp:
+ stuff = (ext, "rb", _PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
@@ -322,6 +318,7 @@ class ModuleFinder:
except ImportError:
self.msgout(3, "import_module ->", None)
return None
+
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
@@ -340,7 +337,7 @@ class ModuleFinder:
self.msgout(2, "load_module ->", m)
return m
if type == _PY_SOURCE:
- co = compile(fp.read()+'\n', pathname, 'exec')
+ co = compile(fp.read(), pathname, 'exec')
elif type == _PY_COMPILED:
try:
data = fp.read()
diff --git a/x64/Lib/msilib/__init__.py b/x64/Lib/msilib/__init__.py
index 0bc8dd9..0e85aa2 100644
--- a/x64/Lib/msilib/__init__.py
+++ b/x64/Lib/msilib/__init__.py
@@ -116,7 +116,7 @@ def add_data(db, table, values):
raise TypeError("Unsupported type %s" % field.__class__.__name__)
try:
v.Modify(MSIMODIFY_INSERT, r)
- except Exception as e:
+ except Exception:
raise MSIError("Could not insert "+repr(values)+" into "+table)
r.ClearData()
diff --git a/x64/Lib/multiprocessing/connection.py b/x64/Lib/multiprocessing/connection.py
index c9f995e..510e4b5 100644
--- a/x64/Lib/multiprocessing/connection.py
+++ b/x64/Lib/multiprocessing/connection.py
@@ -73,6 +73,11 @@ def arbitrary_address(family):
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
+ # Prefer abstract sockets if possible to avoid problems with the address
+ # size. When coding portable applications, some implementations have
+ # sun_path as short as 92 bytes in the sockaddr_un struct.
+ if util.abstract_sockets_supported:
+ return f"\0listener-{os.getpid()}-{next(_mmap_counter)}"
return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
@@ -102,7 +107,7 @@ def address_type(address):
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
- elif type(address) is str:
+ elif type(address) is str or util.is_abstract_socket_namespace(address):
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)
@@ -597,7 +602,8 @@ class SocketListener(object):
self._family = family
self._last_accepted = None
- if family == 'AF_UNIX':
+ if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address):
+ # Linux abstract socket namespaces do not need to be explicitly unlinked
self._unlink = util.Finalize(
self, os.unlink, args=(address,), exitpriority=0
)
diff --git a/x64/Lib/multiprocessing/context.py b/x64/Lib/multiprocessing/context.py
index 5f8e0f0..8d0525d 100644
--- a/x64/Lib/multiprocessing/context.py
+++ b/x64/Lib/multiprocessing/context.py
@@ -257,10 +257,11 @@ class DefaultContext(BaseContext):
if sys.platform == 'win32':
return ['spawn']
else:
+ methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn']
if reduction.HAVE_SEND_HANDLE:
- return ['fork', 'spawn', 'forkserver']
- else:
- return ['fork', 'spawn']
+ methods.append('forkserver')
+ return methods
+
#
# Context types for fixed start method
diff --git a/x64/Lib/multiprocessing/forkserver.py b/x64/Lib/multiprocessing/forkserver.py
index 87ebef6..22a911a 100644
--- a/x64/Lib/multiprocessing/forkserver.py
+++ b/x64/Lib/multiprocessing/forkserver.py
@@ -55,7 +55,8 @@ class ForkServer(object):
os.waitpid(self._forkserver_pid, 0)
self._forkserver_pid = None
- os.unlink(self._forkserver_address)
+ if not util.is_abstract_socket_namespace(self._forkserver_address):
+ os.unlink(self._forkserver_address)
self._forkserver_address = None
def set_forkserver_preload(self, modules_names):
@@ -135,7 +136,8 @@ class ForkServer(object):
with socket.socket(socket.AF_UNIX) as listener:
address = connection.arbitrary_address('AF_UNIX')
listener.bind(address)
- os.chmod(address, 0o600)
+ if not util.is_abstract_socket_namespace(address):
+ os.chmod(address, 0o600)
listener.listen()
# all client processes own the write end of the "alive" pipe;
@@ -235,14 +237,8 @@ def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
break
child_w = pid_to_fd.pop(pid, None)
if child_w is not None:
- if os.WIFSIGNALED(sts):
- returncode = -os.WTERMSIG(sts)
- else:
- if not os.WIFEXITED(sts):
- raise AssertionError(
- "Child {0:n} status is {1:n}".format(
- pid,sts))
- returncode = os.WEXITSTATUS(sts)
+ returncode = os.waitstatus_to_exitcode(sts)
+
# Send exit code to client process
try:
write_signed(child_w, returncode)
diff --git a/x64/Lib/multiprocessing/managers.py b/x64/Lib/multiprocessing/managers.py
index 7e1818b..0eb16c6 100644
--- a/x64/Lib/multiprocessing/managers.py
+++ b/x64/Lib/multiprocessing/managers.py
@@ -21,6 +21,7 @@ import signal
import array
import queue
import time
+import types
import os
from os import getpid
@@ -59,7 +60,7 @@ if view_types[0] is not list: # only needed in Py3.0
class Token(object):
'''
- Type to uniquely indentify a shared object
+ Type to uniquely identify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
@@ -248,7 +249,7 @@ class Server(object):
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
- except KeyError as second_ke:
+ except KeyError:
raise ke
if methodname not in exposed:
@@ -296,7 +297,7 @@ class Server(object):
try:
try:
send(msg)
- except Exception as e:
+ except Exception:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
@@ -360,36 +361,10 @@ class Server(object):
finally:
self.stop_event.set()
- def create(*args, **kwds):
+ def create(self, c, typeid, /, *args, **kwds):
'''
Create a new shared object and return its id
'''
- if len(args) >= 3:
- self, c, typeid, *args = args
- elif not args:
- raise TypeError("descriptor 'create' of 'Server' object "
- "needs an argument")
- else:
- if 'typeid' not in kwds:
- raise TypeError('create expected at least 2 positional '
- 'arguments, got %d' % (len(args)-1))
- typeid = kwds.pop('typeid')
- if len(args) >= 2:
- self, c, *args = args
- import warnings
- warnings.warn("Passing 'typeid' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- if 'c' not in kwds:
- raise TypeError('create expected at least 2 positional '
- 'arguments, got %d' % (len(args)-1))
- c = kwds.pop('c')
- self, *args = args
- import warnings
- warnings.warn("Passing 'c' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- args = tuple(args)
-
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
@@ -421,7 +396,6 @@ class Server(object):
self.incref(c, ident)
return ident, tuple(exposed)
- create.__text_signature__ = '($self, c, typeid, /, *args, **kwds)'
def get_methods(self, c, token):
'''
@@ -821,7 +795,7 @@ class BaseProxy(object):
def _callmethod(self, methodname, args=(), kwds={}):
'''
- Try to call a method of the referrent and return a copy of the result
+ Try to call a method of the referent and return a copy of the result
'''
try:
conn = self._tls.connection
@@ -1156,6 +1130,8 @@ class ValueProxy(BaseProxy):
return self._callmethod('set', (value,))
value = property(get, set)
+ __class_getitem__ = classmethod(types.GenericAlias)
+
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
@@ -1289,30 +1265,23 @@ if HAS_SHMEM:
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
+ address = self.address
+ # The address of Linux abstract namespaces can be bytes
+ if isinstance(address, bytes):
+ address = os.fsdecode(address)
self.shared_memory_context = \
- _SharedMemoryTracker(f"shmm_{self.address}_{getpid()}")
+ _SharedMemoryTracker(f"shm_{address}_{getpid()}")
util.debug(f"SharedMemoryServer started by pid {getpid()}")
- def create(*args, **kwargs):
+ def create(self, c, typeid, /, *args, **kwargs):
"""Create a new distributed-shared object (not backed by a shared
memory block) and return its id to be used in a Proxy Object."""
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
- if len(args) >= 3:
- typeod = args[2]
- elif 'typeid' in kwargs:
- typeid = kwargs['typeid']
- elif not args:
- raise TypeError("descriptor 'create' of 'SharedMemoryServer' "
- "object needs an argument")
- else:
- raise TypeError('create expected at least 2 positional '
- 'arguments, got %d' % (len(args)-1))
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
- return Server.create(*args, **kwargs)
- create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)'
+ return Server.create(self, c, typeid, *args, **kwargs)
def shutdown(self, c):
"Call unlink() on all tracked shared memory, terminate the Server."
diff --git a/x64/Lib/multiprocessing/pool.py b/x64/Lib/multiprocessing/pool.py
index b223d6a..bbe05a5 100644
--- a/x64/Lib/multiprocessing/pool.py
+++ b/x64/Lib/multiprocessing/pool.py
@@ -20,8 +20,8 @@ import queue
import threading
import time
import traceback
+import types
import warnings
-from queue import Empty
# If threading is available then ThreadPool should be provided. Therefore
# we avoid top-level imports which are liable to fail on some systems.
@@ -651,8 +651,6 @@ class Pool(object):
def terminate(self):
util.debug('terminating pool')
self._state = TERMINATE
- self._worker_handler._state = TERMINATE
- self._change_notifier.put(None)
self._terminate()
def join(self):
@@ -682,7 +680,12 @@ class Pool(object):
# this is guaranteed to only be called once
util.debug('finalizing pool')
+ # Notify that the worker_handler state has been changed so the
+ # _handle_workers loop can be unblocked (and exited) in order to
+ # send the finalization sentinel all the workers.
worker_handler._state = TERMINATE
+ change_notifier.put(None)
+
task_handler._state = TERMINATE
util.debug('helping task handler/workers to finish')
@@ -777,6 +780,8 @@ class ApplyResult(object):
del self._cache[self._job]
self._pool = None
+ __class_getitem__ = classmethod(types.GenericAlias)
+
AsyncResult = ApplyResult # create alias -- see #17805
#
diff --git a/x64/Lib/multiprocessing/popen_fork.py b/x64/Lib/multiprocessing/popen_fork.py
index 11e2160..625981c 100644
--- a/x64/Lib/multiprocessing/popen_fork.py
+++ b/x64/Lib/multiprocessing/popen_fork.py
@@ -25,16 +25,12 @@ class Popen(object):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, flag)
- except OSError as e:
+ except OSError:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
if pid == self.pid:
- if os.WIFSIGNALED(sts):
- self.returncode = -os.WTERMSIG(sts)
- else:
- assert os.WIFEXITED(sts), "Status is {:n}".format(sts)
- self.returncode = os.WEXITSTATUS(sts)
+ self.returncode = os.waitstatus_to_exitcode(sts)
return self.returncode
def wait(self, timeout=None):
diff --git a/x64/Lib/multiprocessing/process.py b/x64/Lib/multiprocessing/process.py
index be13c07..0b2e0b4 100644
--- a/x64/Lib/multiprocessing/process.py
+++ b/x64/Lib/multiprocessing/process.py
@@ -317,12 +317,12 @@ class BaseProcess(object):
finally:
util._exit_function()
except SystemExit as e:
- if not e.args:
- exitcode = 1
- elif isinstance(e.args[0], int):
- exitcode = e.args[0]
+ if e.code is None:
+ exitcode = 0
+ elif isinstance(e.code, int):
+ exitcode = e.code
else:
- sys.stderr.write(str(e.args[0]) + '\n')
+ sys.stderr.write(str(e.code) + '\n')
exitcode = 1
except:
exitcode = 1
diff --git a/x64/Lib/multiprocessing/queues.py b/x64/Lib/multiprocessing/queues.py
index d112db2..a290181 100644
--- a/x64/Lib/multiprocessing/queues.py
+++ b/x64/Lib/multiprocessing/queues.py
@@ -14,6 +14,7 @@ import os
import threading
import collections
import time
+import types
import weakref
import errno
@@ -48,8 +49,7 @@ class Queue(object):
self._sem = ctx.BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
-
- self._after_fork()
+ self._reset()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
@@ -62,11 +62,17 @@ class Queue(object):
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
- self._after_fork()
+ self._reset()
def _after_fork(self):
debug('Queue._after_fork()')
- self._notempty = threading.Condition(threading.Lock())
+ self._reset(after_fork=True)
+
+ def _reset(self, after_fork=False):
+ if after_fork:
+ self._notempty._at_fork_reinit()
+ else:
+ self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
@@ -340,6 +346,10 @@ class SimpleQueue(object):
else:
self._wlock = ctx.Lock()
+ def close(self):
+ self._reader.close()
+ self._writer.close()
+
def empty(self):
return not self._poll()
@@ -366,3 +376,5 @@ class SimpleQueue(object):
else:
with self._wlock:
self._writer.send_bytes(obj)
+
+ __class_getitem__ = classmethod(types.GenericAlias)
diff --git a/x64/Lib/multiprocessing/resource_sharer.py b/x64/Lib/multiprocessing/resource_sharer.py
index 8d5c990..6607650 100644
--- a/x64/Lib/multiprocessing/resource_sharer.py
+++ b/x64/Lib/multiprocessing/resource_sharer.py
@@ -63,7 +63,6 @@ class _ResourceSharer(object):
def __init__(self):
self._key = 0
self._cache = {}
- self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
@@ -113,10 +112,7 @@ class _ResourceSharer(object):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
- # If self._lock was locked at the time of the fork, it may be broken
- # -- see issue 6721. Replace it without letting it be gc'ed.
- self._old_locks.append(self._lock)
- self._lock = threading.Lock()
+ self._lock._at_fork_reinit()
if self._listener is not None:
self._listener.close()
self._listener = None
diff --git a/x64/Lib/multiprocessing/shared_memory.py b/x64/Lib/multiprocessing/shared_memory.py
index 184e367..122b3fc 100644
--- a/x64/Lib/multiprocessing/shared_memory.py
+++ b/x64/Lib/multiprocessing/shared_memory.py
@@ -14,6 +14,7 @@ import os
import errno
import struct
import secrets
+import types
if os.name == "nt":
import _winapi
@@ -75,6 +76,8 @@ class SharedMemory:
raise ValueError("'size' must be a positive integer")
if create:
self._flags = _O_CREX | os.O_RDWR
+ if size == 0:
+ raise ValueError("'size' must be a positive number different from zero")
if name is None and not self._flags & os.O_EXCL:
raise ValueError("'name' can only be None if create=True")
@@ -251,6 +254,15 @@ class ShareableList:
packing format for any storable value must require no more than 8
characters to describe its format."""
+ # The shared memory area is organized as follows:
+ # - 8 bytes: number of items (N) as a 64-bit integer
+ # - (N + 1) * 8 bytes: offsets of each element from the start of the
+ # data area
+ # - K bytes: the data area storing item values (with encoding and size
+ # depending on their respective types)
+ # - N * 8 bytes: `struct` format string for each element
+ # - N bytes: index into _back_transforms_mapping for each element
+ # (for reconstructing the corresponding Python value)
_types_mapping = {
int: "q",
float: "d",
@@ -282,7 +294,8 @@ class ShareableList:
return 3 # NoneType
def __init__(self, sequence=None, *, name=None):
- if sequence is not None:
+ if name is None or sequence is not None:
+ sequence = sequence or ()
_formats = [
self._types_mapping[type(item)]
if not isinstance(item, (str, bytes))
@@ -293,10 +306,14 @@ class ShareableList:
]
self._list_len = len(_formats)
assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len
- self._allocated_bytes = tuple(
- self._alignment if fmt[-1] != "s" else int(fmt[:-1])
- for fmt in _formats
- )
+ offset = 0
+ # The offsets of each list element into the shared memory's
+ # data area (0 meaning the start of the data area, not the start
+ # of the shared memory area).
+ self._allocated_offsets = [0]
+ for fmt in _formats:
+ offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1])
+ self._allocated_offsets.append(offset)
_recreation_codes = [
self._extract_recreation_code(item) for item in sequence
]
@@ -307,13 +324,9 @@ class ShareableList:
self._format_back_transform_codes
)
+ self.shm = SharedMemory(name, create=True, size=requested_size)
else:
- requested_size = 8 # Some platforms require > 0.
-
- if name is not None and sequence is None:
self.shm = SharedMemory(name)
- else:
- self.shm = SharedMemory(name, create=True, size=requested_size)
if sequence is not None:
_enc = _encoding
@@ -322,7 +335,7 @@ class ShareableList:
self.shm.buf,
0,
self._list_len,
- *(self._allocated_bytes)
+ *(self._allocated_offsets)
)
struct.pack_into(
"".join(_formats),
@@ -345,10 +358,12 @@ class ShareableList:
else:
self._list_len = len(self) # Obtains size from offset 0 in buffer.
- self._allocated_bytes = struct.unpack_from(
- self._format_size_metainfo,
- self.shm.buf,
- 1 * 8
+ self._allocated_offsets = list(
+ struct.unpack_from(
+ self._format_size_metainfo,
+ self.shm.buf,
+ 1 * 8
+ )
)
def _get_packing_format(self, position):
@@ -370,7 +385,6 @@ class ShareableList:
def _get_back_transform(self, position):
"Gets the back transformation function for a single value."
- position = position if position >= 0 else position + self._list_len
if (position >= self._list_len) or (self._list_len < 0):
raise IndexError("Requested position out of range.")
@@ -387,7 +401,6 @@ class ShareableList:
"""Sets the packing format and back transformation code for a
single value in the list at the specified position."""
- position = position if position >= 0 else position + self._list_len
if (position >= self._list_len) or (self._list_len < 0):
raise IndexError("Requested position out of range.")
@@ -407,9 +420,9 @@ class ShareableList:
)
def __getitem__(self, position):
+ position = position if position >= 0 else position + self._list_len
try:
- offset = self._offset_data_start \
- + sum(self._allocated_bytes[:position])
+ offset = self._offset_data_start + self._allocated_offsets[position]
(v,) = struct.unpack_from(
self._get_packing_format(position),
self.shm.buf,
@@ -424,23 +437,29 @@ class ShareableList:
return v
def __setitem__(self, position, value):
+ position = position if position >= 0 else position + self._list_len
try:
- offset = self._offset_data_start \
- + sum(self._allocated_bytes[:position])
+ item_offset = self._allocated_offsets[position]
+ offset = self._offset_data_start + item_offset
current_format = self._get_packing_format(position)
except IndexError:
raise IndexError("assignment index out of range")
if not isinstance(value, (str, bytes)):
new_format = self._types_mapping[type(value)]
+ encoded_value = value
else:
- if len(value) > self._allocated_bytes[position]:
- raise ValueError("exceeds available storage for existing str")
+ allocated_length = self._allocated_offsets[position + 1] - item_offset
+
+ encoded_value = (value.encode(_encoding)
+ if isinstance(value, str) else value)
+ if len(encoded_value) > allocated_length:
+ raise ValueError("bytes/str item exceeds available storage")
if current_format[-1] == "s":
new_format = current_format
else:
new_format = self._types_mapping[str] % (
- self._allocated_bytes[position],
+ allocated_length,
)
self._set_packing_format_and_transform(
@@ -448,8 +467,7 @@ class ShareableList:
new_format,
value
)
- value = value.encode(_encoding) if isinstance(value, str) else value
- struct.pack_into(new_format, self.shm.buf, offset, value)
+ struct.pack_into(new_format, self.shm.buf, offset, encoded_value)
def __reduce__(self):
return partial(self.__class__, name=self.shm.name), ()
@@ -462,33 +480,35 @@ class ShareableList:
@property
def format(self):
- "The struct packing format used by all currently stored values."
+ "The struct packing format used by all currently stored items."
return "".join(
self._get_packing_format(i) for i in range(self._list_len)
)
@property
def _format_size_metainfo(self):
- "The struct packing format used for metainfo on storage sizes."
- return f"{self._list_len}q"
+ "The struct packing format used for the items' storage offsets."
+ return "q" * (self._list_len + 1)
@property
def _format_packing_metainfo(self):
- "The struct packing format used for the values' packing formats."
+ "The struct packing format used for the items' packing formats."
return "8s" * self._list_len
@property
def _format_back_transform_codes(self):
- "The struct packing format used for the values' back transforms."
+ "The struct packing format used for the items' back transforms."
return "b" * self._list_len
@property
def _offset_data_start(self):
- return (self._list_len + 1) * 8 # 8 bytes per "q"
+ # - 8 bytes for the list length
+ # - (N + 1) * 8 bytes for the element offsets
+ return (self._list_len + 2) * 8
@property
def _offset_packing_formats(self):
- return self._offset_data_start + sum(self._allocated_bytes)
+ return self._offset_data_start + self._allocated_offsets[-1]
@property
def _offset_back_transform_codes(self):
@@ -508,3 +528,5 @@ class ShareableList:
return position
else:
raise ValueError(f"{value!r} not in this container")
+
+ __class_getitem__ = classmethod(types.GenericAlias)
diff --git a/x64/Lib/multiprocessing/synchronize.py b/x64/Lib/multiprocessing/synchronize.py
index 4fcbefc..d0be48f 100644
--- a/x64/Lib/multiprocessing/synchronize.py
+++ b/x64/Lib/multiprocessing/synchronize.py
@@ -270,7 +270,7 @@ class Condition(object):
def notify(self, n=1):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(
- False), ('notify: Should not have been able to acquire'
+ False), ('notify: Should not have been able to acquire '
+ '_wait_semaphore')
# to take account of timeouts since last notify*() we subtract
diff --git a/x64/Lib/multiprocessing/util.py b/x64/Lib/multiprocessing/util.py
index 745f2b2..21f2a7e 100644
--- a/x64/Lib/multiprocessing/util.py
+++ b/x64/Lib/multiprocessing/util.py
@@ -102,6 +102,29 @@ def log_to_stderr(level=None):
_log_to_stderr = True
return _logger
+
+# Abstract socket support
+
+def _platform_supports_abstract_sockets():
+ if sys.platform == "linux":
+ return True
+ if hasattr(sys, 'getandroidapilevel'):
+ return True
+ return False
+
+
+def is_abstract_socket_namespace(address):
+ if not address:
+ return False
+ if isinstance(address, bytes):
+ return address[0] == 0
+ elif isinstance(address, str):
+ return address[0] == "\0"
+ raise TypeError('address type of {address!r} unrecognized')
+
+
+abstract_sockets_supported = _platform_supports_abstract_sockets()
+
#
# Function returning a temp directory which will be removed on exit
#
@@ -344,13 +367,13 @@ atexit.register(_exit_function)
class ForkAwareThreadLock(object):
def __init__(self):
- self._reset()
- register_after_fork(self, ForkAwareThreadLock._reset)
-
- def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
+ register_after_fork(self, ForkAwareThreadLock._at_fork_reinit)
+
+ def _at_fork_reinit(self):
+ self._lock._at_fork_reinit()
def __enter__(self):
return self._lock.__enter__()
@@ -429,7 +452,7 @@ def spawnv_passfds(path, args, passfds):
return _posixsubprocess.fork_exec(
args, [os.fsencode(path)], True, passfds, None, None,
-1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
- False, False, None)
+ False, False, None, None, None, -1, None)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
diff --git a/x64/Lib/nntplib.py b/x64/Lib/nntplib.py
index 9036f36..f6e746e 100644
--- a/x64/Lib/nntplib.py
+++ b/x64/Lib/nntplib.py
@@ -67,7 +67,6 @@ import re
import socket
import collections
import datetime
-import warnings
import sys
try:
@@ -294,7 +293,7 @@ if _have_ssl:
# The classes themselves
-class _NNTPBase:
+class NNTP:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
@@ -310,13 +309,18 @@ class _NNTPBase:
encoding = 'utf-8'
errors = 'surrogateescape'
- def __init__(self, file, host,
- readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
+ def __init__(self, host, port=NNTP_PORT, user=None, password=None,
+ readermode=None, usenetrc=False,
+ timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- - file: file-like object (open for read/write in binary mode)
- - host: hostname of the server
+ - host: hostname to connect to
+ - port: port to connect to (default the standard NNTP port)
+ - user: username to authenticate with
+ - password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
+ - usenetrc: allow loading username and password from ~/.netrc file
+ if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
@@ -326,7 +330,24 @@ class _NNTPBase:
readermode.
"""
self.host = host
- self.file = file
+ self.port = port
+ self.sock = self._create_socket(timeout)
+ self.file = None
+ try:
+ self.file = self.sock.makefile("rwb")
+ self._base_init(readermode)
+ if user or usenetrc:
+ self.login(user, password, usenetrc)
+ except:
+ if self.file:
+ self.file.close()
+ self.sock.close()
+ raise
+
+ def _base_init(self, readermode):
+ """Partial initialization for the NNTP protocol.
+ This instance method is extracted for supporting the test code.
+ """
self.debugging = 0
self.welcome = self._getresp()
@@ -371,6 +392,12 @@ class _NNTPBase:
if is_connected():
self._close()
+ def _create_socket(self, timeout):
+ if timeout is not None and not timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
+ sys.audit("nntplib.connect", self, self.host, self.port)
+ return socket.create_connection((self.host, self.port), timeout)
+
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
@@ -834,44 +861,6 @@ class _NNTPBase:
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
- def xgtitle(self, group, *, file=None):
- """Process an XGTITLE command (optional server extension) Arguments:
- - group: group name wildcard (i.e. news.*)
- Returns:
- - resp: server response if successful
- - list: list of (name,title) strings"""
- warnings.warn("The XGTITLE extension is not actively used, "
- "use descriptions() instead",
- DeprecationWarning, 2)
- line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
- resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
- lines = []
- for raw_line in raw_lines:
- match = line_pat.search(raw_line.strip())
- if match:
- lines.append(match.group(1, 2))
- return resp, lines
-
- def xpath(self, id):
- """Process an XPATH command (optional server extension) Arguments:
- - id: Message id of article
- Returns:
- resp: server response if successful
- path: directory path to article
- """
- warnings.warn("The XPATH extension is not actively used",
- DeprecationWarning, 2)
-
- resp = self._shortcmd('XPATH {0}'.format(id))
- if not resp.startswith('223'):
- raise NNTPReplyError(resp)
- try:
- [resp_num, path] = resp.split()
- except ValueError:
- raise NNTPReplyError(resp) from None
- else:
- return resp, path
-
def date(self):
"""Process the DATE command.
Returns:
@@ -927,8 +916,12 @@ class _NNTPBase:
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
- self.file.close()
- del self.file
+ try:
+ if self.file:
+ self.file.close()
+ del self.file
+ finally:
+ self.sock.close()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
@@ -1018,54 +1011,8 @@ class _NNTPBase:
raise NNTPError("TLS failed to start.")
-class NNTP(_NNTPBase):
-
- def __init__(self, host, port=NNTP_PORT, user=None, password=None,
- readermode=None, usenetrc=False,
- timeout=_GLOBAL_DEFAULT_TIMEOUT):
- """Initialize an instance. Arguments:
- - host: hostname to connect to
- - port: port to connect to (default the standard NNTP port)
- - user: username to authenticate with
- - password: password to use with username
- - readermode: if true, send 'mode reader' command after
- connecting.
- - usenetrc: allow loading username and password from ~/.netrc file
- if not specified explicitly
- - timeout: timeout (in seconds) used for socket connections
-
- readermode is sometimes necessary if you are connecting to an
- NNTP server on the local machine and intend to call
- reader-specific commands, such as `group'. If you get
- unexpected NNTPPermanentErrors, you might need to set
- readermode.
- """
- self.host = host
- self.port = port
- sys.audit("nntplib.connect", self, host, port)
- self.sock = socket.create_connection((host, port), timeout)
- file = None
- try:
- file = self.sock.makefile("rwb")
- _NNTPBase.__init__(self, file, host,
- readermode, timeout)
- if user or usenetrc:
- self.login(user, password, usenetrc)
- except:
- if file:
- file.close()
- self.sock.close()
- raise
-
- def _close(self):
- try:
- _NNTPBase._close(self)
- finally:
- self.sock.close()
-
-
if _have_ssl:
- class NNTP_SSL(_NNTPBase):
+ class NNTP_SSL(NNTP):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
@@ -1074,27 +1021,19 @@ if _have_ssl:
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
- sys.audit("nntplib.connect", self, host, port)
- self.sock = socket.create_connection((host, port), timeout)
- file = None
+ self.ssl_context = ssl_context
+ super().__init__(host, port, user, password, readermode,
+ usenetrc, timeout)
+
+ def _create_socket(self, timeout):
+ sock = super()._create_socket(timeout)
try:
- self.sock = _encrypt_on(self.sock, ssl_context, host)
- file = self.sock.makefile("rwb")
- _NNTPBase.__init__(self, file, host,
- readermode=readermode, timeout=timeout)
- if user or usenetrc:
- self.login(user, password, usenetrc)
+ sock = _encrypt_on(sock, self.ssl_context, self.host)
except:
- if file:
- file.close()
- self.sock.close()
+ sock.close()
raise
-
- def _close(self):
- try:
- _NNTPBase._close(self)
- finally:
- self.sock.close()
+ else:
+ return sock
__all__.append("NNTP_SSL")
diff --git a/x64/Lib/opcode.py b/x64/Lib/opcode.py
index 3fb716b..ac1aa53 100644
--- a/x64/Lib/opcode.py
+++ b/x64/Lib/opcode.py
@@ -21,8 +21,7 @@ try:
except ImportError:
pass
-cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
- 'is not', 'exception match', 'BAD')
+cmp_op = ('<', '<=', '==', '!=', '>', '>=')
hasconst = []
hasname = []
@@ -84,10 +83,12 @@ def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
+def_op('RERAISE', 48)
+def_op('WITH_EXCEPT_START', 49)
def_op('GET_AITER', 50)
def_op('GET_ANEXT', 51)
def_op('BEFORE_ASYNC_WITH', 52)
-def_op('BEGIN_FINALLY', 53)
+
def_op('END_ASYNC_FOR', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
@@ -109,20 +110,20 @@ def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('GET_AWAITABLE', 73)
-
+def_op('LOAD_ASSERTION_ERROR', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
-def_op('WITH_CLEANUP_START', 81)
-def_op('WITH_CLEANUP_FINISH', 82)
+
+def_op('LIST_TO_TUPLE', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('SETUP_ANNOTATIONS', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
-def_op('END_FINALLY', 88)
+
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
@@ -158,6 +159,10 @@ jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
+def_op('IS_OP', 117)
+def_op('CONTAINS_OP', 118)
+
+jabs_op('JUMP_IF_NOT_EXC_MATCH', 121)
jrel_op('SETUP_FINALLY', 122) # Distance to target address
def_op('LOAD_FAST', 124) # Local variable number
@@ -195,22 +200,18 @@ hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
-def_op('BUILD_LIST_UNPACK', 149)
-def_op('BUILD_MAP_UNPACK', 150)
-def_op('BUILD_MAP_UNPACK_WITH_CALL', 151)
-def_op('BUILD_TUPLE_UNPACK', 152)
-def_op('BUILD_SET_UNPACK', 153)
-
jrel_op('SETUP_ASYNC_WITH', 154)
def_op('FORMAT_VALUE', 155)
def_op('BUILD_CONST_KEY_MAP', 156)
def_op('BUILD_STRING', 157)
-def_op('BUILD_TUPLE_UNPACK_WITH_CALL', 158)
name_op('LOAD_METHOD', 160)
def_op('CALL_METHOD', 161)
-jrel_op('CALL_FINALLY', 162)
-def_op('POP_FINALLY', 163)
+
+def_op('LIST_EXTEND', 162)
+def_op('SET_UPDATE', 163)
+def_op('DICT_MERGE', 164)
+def_op('DICT_UPDATE', 165)
del def_op, name_op, jrel_op, jabs_op
diff --git a/x64/Lib/os.py b/x64/Lib/os.py
index 253cad1..b794159 100644
--- a/x64/Lib/os.py
+++ b/x64/Lib/os.py
@@ -28,6 +28,8 @@ import stat as st
from _collections_abc import _check_methods
+GenericAlias = type(list[int])
+
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
@@ -336,7 +338,10 @@ def walk(top, topdown=True, onerror=None, followlinks=False):
dirs.remove('CVS') # don't visit CVS directories
"""
- top = fspath(top)
+ sys.audit("os.walk", top, topdown, onerror, followlinks)
+ return _walk(fspath(top), topdown, onerror, followlinks)
+
+def _walk(top, topdown, onerror, followlinks):
dirs = []
nondirs = []
walk_dirs = []
@@ -410,11 +415,11 @@ def walk(top, topdown=True, onerror=None, followlinks=False):
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
- yield from walk(new_path, topdown, onerror, followlinks)
+ yield from _walk(new_path, topdown, onerror, followlinks)
else:
# Recurse into sub-directories
for new_path in walk_dirs:
- yield from walk(new_path, topdown, onerror, followlinks)
+ yield from _walk(new_path, topdown, onerror, followlinks)
# Yield after recursion if going bottom up
yield top, dirs, nondirs
@@ -455,6 +460,7 @@ if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd:
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
+ sys.audit("os.fwalk", top, topdown, onerror, follow_symlinks, dir_fd)
if not isinstance(top, int) or not hasattr(top, '__index__'):
top = fspath(top)
# Note: To guard against symlink races, we use the standard
@@ -654,17 +660,15 @@ def get_exec_path(env=None):
return path_list.split(pathsep)
-# Change environ to automatically call putenv(), unsetenv if they exist.
-from _collections_abc import MutableMapping
+# Change environ to automatically call putenv() and unsetenv()
+from _collections_abc import MutableMapping, Mapping
class _Environ(MutableMapping):
- def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
+ def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
- self.putenv = putenv
- self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
@@ -678,12 +682,12 @@ class _Environ(MutableMapping):
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
- self.putenv(key, value)
+ putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
- self.unsetenv(encodedkey)
+ unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
@@ -712,21 +716,23 @@ class _Environ(MutableMapping):
self[key] = value
return self[key]
-try:
- _putenv = putenv
-except NameError:
- _putenv = lambda key, value: None
-else:
- if "putenv" not in __all__:
- __all__.append("putenv")
+ def __ior__(self, other):
+ self.update(other)
+ return self
-try:
- _unsetenv = unsetenv
-except NameError:
- _unsetenv = lambda key: _putenv(key, "")
-else:
- if "unsetenv" not in __all__:
- __all__.append("unsetenv")
+ def __or__(self, other):
+ if not isinstance(other, Mapping):
+ return NotImplemented
+ new = dict(self)
+ new.update(other)
+ return new
+
+ def __ror__(self, other):
+ if not isinstance(other, Mapping):
+ return NotImplemented
+ new = dict(other)
+ new.update(self)
+ return new
def _createenviron():
if name == 'nt':
@@ -755,8 +761,7 @@ def _createenviron():
data = environ
return _Environ(data,
encodekey, decode,
- encode, decode,
- _putenv, _unsetenv)
+ encode, decode)
# unicode environ
environ = _createenviron()
@@ -781,8 +786,7 @@ if supports_bytes_environ:
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
- _check_bytes, bytes,
- _putenv, _unsetenv)
+ _check_bytes, bytes)
del _check_bytes
def getenvb(key, default=None):
@@ -862,12 +866,8 @@ if _exists("fork") and not _exists("spawnv") and _exists("execv"):
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
- elif WIFSIGNALED(sts):
- return -WTERMSIG(sts)
- elif WIFEXITED(sts):
- return WEXITSTATUS(sts)
- else:
- raise OSError("Not stopped, signaled or exited???")
+
+ return waitstatus_to_exitcode(sts)
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
@@ -1076,6 +1076,8 @@ class PathLike(abc.ABC):
return _check_methods(subclass, '__fspath__')
return NotImplemented
+ __class_getitem__ = classmethod(GenericAlias)
+
if name == 'nt':
class _AddedDllDirectory:
diff --git a/x64/Lib/pathlib.py b/x64/Lib/pathlib.py
index 015370a..147be2f 100644
--- a/x64/Lib/pathlib.py
+++ b/x64/Lib/pathlib.py
@@ -329,7 +329,10 @@ class _PosixFlavour(_Flavour):
# parent dir
path, _, _ = path.rpartition(sep)
continue
- newpath = path + sep + name
+ if path.endswith(sep):
+ newpath = path + name
+ else:
+ newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
@@ -447,6 +450,20 @@ class _NormalAccessor(_Accessor):
def readlink(self, path):
return os.readlink(path)
+ def owner(self, path):
+ try:
+ import pwd
+ return pwd.getpwuid(self.stat(path).st_uid).pw_name
+ except ImportError:
+ raise NotImplementedError("Path.owner() is unsupported on this system")
+
+ def group(self, path):
+ try:
+ import grp
+ return grp.getgrgid(self.stat(path).st_gid).gr_name
+ except ImportError:
+ raise NotImplementedError("Path.group() is unsupported on this system")
+
_normal_accessor = _NormalAccessor()
@@ -527,25 +544,29 @@ class _WildcardSelector(_Selector):
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
- entries = list(scandir(parent_path))
+ with scandir(parent_path) as scandir_it:
+ entries = list(scandir_it)
for entry in entries:
- entry_is_dir = False
- try:
- entry_is_dir = entry.is_dir()
- except OSError as e:
- if not _ignore_error(e):
- raise
- if not self.dironly or entry_is_dir:
- name = entry.name
- if self.match(name):
- path = parent_path._make_child_relpath(name)
- for p in self.successor._select_from(path, is_dir, exists, scandir):
- yield p
+ if self.dironly:
+ try:
+ # "entry.is_dir()" can raise PermissionError
+ # in some cases (see bpo-38894), which is not
+ # among the errors ignored by _ignore_error()
+ if not entry.is_dir():
+ continue
+ except OSError as e:
+ if not _ignore_error(e):
+ raise
+ continue
+ name = entry.name
+ if self.match(name):
+ path = parent_path._make_child_relpath(name)
+ for p in self.successor._select_from(path, is_dir, exists, scandir):
+ yield p
except PermissionError:
return
-
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts, flavour):
@@ -554,7 +575,8 @@ class _RecursiveWildcardSelector(_Selector):
def _iterate_directories(self, parent_path, is_dir, scandir):
yield parent_path
try:
- entries = list(scandir(parent_path))
+ with scandir(parent_path) as scandir_it:
+ entries = list(scandir_it)
for entry in entries:
entry_is_dir = False
try:
@@ -780,6 +802,9 @@ class PurePath(object):
return NotImplemented
return self._cparts >= other._cparts
+ def __class_getitem__(cls, type):
+ return cls
+
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
@@ -848,6 +873,10 @@ class PurePath(object):
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
+ def with_stem(self, stem):
+ """Return a new path with the stem changed."""
+ return self.with_name(stem + self.suffix)
+
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed. If the path
has no suffix, add given suffix. If the given suffix is an empty
@@ -896,11 +925,21 @@ class PurePath(object):
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
- raise ValueError("{!r} does not start with {!r}"
+ raise ValueError("{!r} is not in the subpath of {!r}"
+ " OR one path is relative and the other is absolute."
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
+ def is_relative_to(self, *other):
+ """Return True if the path is relative to another path or False.
+ """
+ try:
+ self.relative_to(*other)
+ return True
+ except ValueError:
+ return False
+
@property
def parts(self):
"""An object providing sequence-like access to the
@@ -1024,7 +1063,6 @@ class Path(PurePath):
"""
__slots__ = (
'_accessor',
- '_closed',
)
def __new__(cls, *args, **kwargs):
@@ -1041,7 +1079,6 @@ class Path(PurePath):
# Private non-constructor arguments
template=None,
):
- self._closed = False
if template is not None:
self._accessor = template._accessor
else:
@@ -1054,15 +1091,18 @@ class Path(PurePath):
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
- if self._closed:
- self._raise_closed()
return self
def __exit__(self, t, v, tb):
- self._closed = True
-
- def _raise_closed(self):
- raise ValueError("I/O operation on closed path")
+ # https://bugs.python.org/issue39682
+ # In previous versions of pathlib, this method marked this path as
+ # closed; subsequent attempts to perform I/O would raise an IOError.
+ # This functionality was never documented, and had the effect of
+ # making Path objects mutable, contrary to PEP 428. In Python 3.9 the
+ # _closed attribute was removed, and this method made a no-op.
+ # This method and __enter__()/__exit__() should be deprecated and
+ # removed in the future.
+ pass
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
@@ -1073,8 +1113,6 @@ class Path(PurePath):
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
- if self._closed:
- self._raise_closed()
return self._accessor.open(self, flags, mode)
# Public API
@@ -1101,27 +1139,24 @@ class Path(PurePath):
try:
other_st = other_path.stat()
except AttributeError:
- other_st = os.stat(other_path)
+ other_st = self._accessor.stat(other_path)
return os.path.samestat(st, other_st)
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
- if self._closed:
- self._raise_closed()
for name in self._accessor.listdir(self):
if name in {'.', '..'}:
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
- if self._closed:
- self._raise_closed()
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given relative pattern.
"""
+ sys.audit("pathlib.Path.glob", self, pattern)
if not pattern:
raise ValueError("Unacceptable pattern: {!r}".format(pattern))
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
@@ -1136,6 +1171,7 @@ class Path(PurePath):
directories) matching the given relative pattern, anywhere in
this subtree.
"""
+ sys.audit("pathlib.Path.rglob", self, pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
@@ -1151,8 +1187,6 @@ class Path(PurePath):
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
- if self._closed:
- self._raise_closed()
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
@@ -1167,8 +1201,6 @@ class Path(PurePath):
normalizing it (for example turning slashes into backslashes under
Windows).
"""
- if self._closed:
- self._raise_closed()
s = self._flavour.resolve(self, strict=strict)
if s is None:
# No symlink resolution => for consistency, raise an error if
@@ -1192,15 +1224,13 @@ class Path(PurePath):
"""
Return the login name of the file owner.
"""
- import pwd
- return pwd.getpwuid(self.stat().st_uid).pw_name
+ return self._accessor.owner(self)
def group(self):
"""
Return the group name of the file gid.
"""
- import grp
- return grp.getgrgid(self.stat().st_gid).gr_name
+ return self._accessor.group(self)
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
@@ -1208,8 +1238,6 @@ class Path(PurePath):
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
- if self._closed:
- self._raise_closed()
return io.open(self, mode, buffering, encoding, errors, newline,
opener=self._opener)
@@ -1246,12 +1274,19 @@ class Path(PurePath):
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data)
+ def readlink(self):
+ """
+ Return the path to which the symbolic link points.
+ """
+ path = self._accessor.readlink(self)
+ obj = self._from_parts((path,), init=False)
+ obj._init(template=self)
+ return obj
+
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
- if self._closed:
- self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
@@ -1273,8 +1308,6 @@ class Path(PurePath):
"""
Create a new directory at this given path.
"""
- if self._closed:
- self._raise_closed()
try:
self._accessor.mkdir(self, mode)
except FileNotFoundError:
@@ -1292,8 +1325,6 @@ class Path(PurePath):
"""
Change the permissions of the path, like os.chmod().
"""
- if self._closed:
- self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
@@ -1301,8 +1332,6 @@ class Path(PurePath):
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
- if self._closed:
- self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self, missing_ok=False):
@@ -1310,8 +1339,6 @@ class Path(PurePath):
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
- if self._closed:
- self._raise_closed()
try:
self._accessor.unlink(self)
except FileNotFoundError:
@@ -1322,8 +1349,6 @@ class Path(PurePath):
"""
Remove this directory. The directory must be empty.
"""
- if self._closed:
- self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
@@ -1331,36 +1356,37 @@ class Path(PurePath):
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
- if self._closed:
- self._raise_closed()
return self._accessor.lstat(self)
def link_to(self, target):
"""
Create a hard link pointing to a path named target.
"""
- if self._closed:
- self._raise_closed()
self._accessor.link_to(self, target)
def rename(self, target):
"""
- Rename this path to the given path,
- and return a new Path instance pointing to the given path.
+ Rename this path to the target path.
+
+ The target path may be absolute or relative. Relative paths are
+ interpreted relative to the current working directory, *not* the
+ directory of the Path object.
+
+ Returns the new Path instance pointing to the target path.
"""
- if self._closed:
- self._raise_closed()
self._accessor.rename(self, target)
return self.__class__(target)
def replace(self, target):
"""
- Rename this path to the given path, clobbering the existing
- destination if it exists, and return a new Path instance
- pointing to the given path.
+ Rename this path to the target path, overwriting if that path exists.
+
+ The target path may be absolute or relative. Relative paths are
+ interpreted relative to the current working directory, *not* the
+ directory of the Path object.
+
+ Returns the new Path instance pointing to the target path.
"""
- if self._closed:
- self._raise_closed()
self._accessor.replace(self, target)
return self.__class__(target)
@@ -1369,8 +1395,6 @@ class Path(PurePath):
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
- if self._closed:
- self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
@@ -1431,9 +1455,8 @@ class Path(PurePath):
if not self.exists() or not self.is_dir():
return False
- parent = Path(self.parent)
try:
- parent_dev = parent.stat().st_dev
+ parent_dev = self.parent.stat().st_dev
except OSError:
return False
@@ -1441,7 +1464,7 @@ class Path(PurePath):
if dev != parent_dev:
return True
ino = self.stat().st_ino
- parent_ino = parent.stat().st_ino
+ parent_ino = self.parent.stat().st_ino
return ino == parent_ino
def is_symlink(self):
@@ -1549,11 +1572,5 @@ class WindowsPath(Path, PureWindowsPath):
"""
__slots__ = ()
- def owner(self):
- raise NotImplementedError("Path.owner() is unsupported on this system")
-
- def group(self):
- raise NotImplementedError("Path.group() is unsupported on this system")
-
def is_mount(self):
raise NotImplementedError("Path.is_mount() is unsupported on this system")
diff --git a/x64/Lib/pdb.py b/x64/Lib/pdb.py
index bf503f1..d7d9571 100644
--- a/x64/Lib/pdb.py
+++ b/x64/Lib/pdb.py
@@ -79,6 +79,7 @@ import glob
import pprint
import signal
import inspect
+import tokenize
import traceback
import linecache
@@ -93,7 +94,7 @@ __all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
- fp = open(filename)
+ fp = tokenize.open(filename)
except OSError:
return None
# consumer of this info expects the first line to be 1
@@ -473,7 +474,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
except Exception:
ret = []
# Then, try to complete file names as well.
- globs = glob.glob(text + '*')
+ globs = glob.glob(glob.escape(text) + '*')
for fn in globs:
if os.path.isdir(fn):
ret.append(fn + '/')
@@ -1311,21 +1312,21 @@ class Pdb(bdb.Bdb, cmd.Cmd):
# _getval() already printed the error
return
code = None
- # Is it a function?
+ # Is it an instance method?
try:
- code = value.__code__
+ code = value.__func__.__code__
except Exception:
pass
if code:
- self.message('Function %s' % code.co_name)
+ self.message('Method %s' % code.co_name)
return
- # Is it an instance method?
+ # Is it a function?
try:
- code = value.__func__.__code__
+ code = value.__code__
except Exception:
pass
if code:
- self.message('Method %s' % code.co_name)
+ self.message('Function %s' % code.co_name)
return
# Is it a class?
if value.__class__ is type:
diff --git a/x64/Lib/pickle.py b/x64/Lib/pickle.py
index 515cb8a..e63a8b6 100644
--- a/x64/Lib/pickle.py
+++ b/x64/Lib/pickle.py
@@ -13,7 +13,7 @@ Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
- loads(string) -> object
+ loads(bytes) -> object
Misc variables:
@@ -339,8 +339,10 @@ def whichmodule(obj, name):
return module_name
# Protect the iteration by using a list copy of sys.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr.
- for module_name, module in list(sys.modules.items()):
- if module_name == '__main__' or module is None:
+ for module_name, module in sys.modules.copy().items():
+ if (module_name == '__main__'
+ or module_name == '__mp_main__' # bpo-42406
+ or module is None):
continue
try:
if _getattribute(module, name)[0] is obj:
@@ -1604,17 +1606,29 @@ class _Unpickler:
def load_get(self):
i = int(self.readline()[:-1])
- self.append(self.memo[i])
+ try:
+ self.append(self.memo[i])
+ except KeyError:
+ msg = f'Memo value not found at index {i}'
+ raise UnpicklingError(msg) from None
dispatch[GET[0]] = load_get
def load_binget(self):
i = self.read(1)[0]
- self.append(self.memo[i])
+ try:
+ self.append(self.memo[i])
+ except KeyError as exc:
+ msg = f'Memo value not found at index {i}'
+ raise UnpicklingError(msg) from None
dispatch[BINGET[0]] = load_binget
def load_long_binget(self):
i, = unpack('<I', self.read(4))
- self.append(self.memo[i])
+ try:
+ self.append(self.memo[i])
+ except KeyError as exc:
+ msg = f'Memo value not found at index {i}'
+ raise UnpicklingError(msg) from None
dispatch[LONG_BINGET[0]] = load_long_binget
def load_put(self):
@@ -1749,7 +1763,7 @@ def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict",
return _Unpickler(file, fix_imports=fix_imports, buffers=buffers,
encoding=encoding, errors=errors).load()
-def _loads(s, *, fix_imports=True, encoding="ASCII", errors="strict",
+def _loads(s, /, *, fix_imports=True, encoding="ASCII", errors="strict",
buffers=None):
if isinstance(s, str):
raise TypeError("Can't load pickle from unicode string")
diff --git a/x64/Lib/pkgutil.py b/x64/Lib/pkgutil.py
index 8474a77..4c18467 100644
--- a/x64/Lib/pkgutil.py
+++ b/x64/Lib/pkgutil.py
@@ -7,6 +7,7 @@ import importlib.util
import importlib.machinery
import os
import os.path
+import re
import sys
from types import ModuleType
import warnings
@@ -635,3 +636,72 @@ def get_data(package, resource):
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return loader.get_data(resource_name)
+
+
+_DOTTED_WORDS = r'(?!\d)(\w+)(\.(?!\d)(\w+))*'
+_NAME_PATTERN = re.compile(f'^(?P<pkg>{_DOTTED_WORDS})(?P<cln>:(?P<obj>{_DOTTED_WORDS})?)?$', re.U)
+del _DOTTED_WORDS
+
+def resolve_name(name):
+ """
+ Resolve a name to an object.
+
+ It is expected that `name` will be a string in one of the following
+ formats, where W is shorthand for a valid Python identifier and dot stands
+ for a literal period in these pseudo-regexes:
+
+ W(.W)*
+ W(.W)*:(W(.W)*)?
+
+ The first form is intended for backward compatibility only. It assumes that
+ some part of the dotted name is a package, and the rest is an object
+ somewhere within that package, possibly nested inside other objects.
+ Because the place where the package stops and the object hierarchy starts
+ can't be inferred by inspection, repeated attempts to import must be done
+ with this form.
+
+ In the second form, the caller makes the division point clear through the
+ provision of a single colon: the dotted name to the left of the colon is a
+ package to be imported, and the dotted name to the right is the object
+ hierarchy within that package. Only one import is needed in this form. If
+ it ends with the colon, then a module object is returned.
+
+ The function will return an object (which might be a module), or raise one
+ of the following exceptions:
+
+ ValueError - if `name` isn't in a recognised format
+ ImportError - if an import failed when it shouldn't have
+ AttributeError - if a failure occurred when traversing the object hierarchy
+ within the imported package to get to the desired object)
+ """
+ m = _NAME_PATTERN.match(name)
+ if not m:
+ raise ValueError(f'invalid format: {name!r}')
+ gd = m.groupdict()
+ if gd.get('cln'):
+ # there is a colon - a one-step import is all that's needed
+ mod = importlib.import_module(gd['pkg'])
+ parts = gd.get('obj')
+ parts = parts.split('.') if parts else []
+ else:
+ # no colon - have to iterate to find the package boundary
+ parts = name.split('.')
+ modname = parts.pop(0)
+ # first part *must* be a module/package.
+ mod = importlib.import_module(modname)
+ while parts:
+ p = parts[0]
+ s = f'{modname}.{p}'
+ try:
+ mod = importlib.import_module(s)
+ parts.pop(0)
+ modname = s
+ except ImportError:
+ break
+ # if we reach this point, mod is the module, already imported, and
+ # parts is the list of parts in the object hierarchy to be traversed, or
+ # an empty list if just the module is wanted.
+ result = mod
+ for p in parts:
+ result = getattr(result, p)
+ return result
diff --git a/x64/Lib/platform.py b/x64/Lib/platform.py
index 6fbb7b0..e9f50ab 100644
--- a/x64/Lib/platform.py
+++ b/x64/Lib/platform.py
@@ -116,6 +116,9 @@ import collections
import os
import re
import sys
+import subprocess
+import functools
+import itertools
### Globals & Constants
@@ -395,9 +398,9 @@ def win32_ver(release='', version='', csd='', ptype=''):
else:
try:
cvkey = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion'
- with winreg.OpenKeyEx(HKEY_LOCAL_MACHINE, cvkey) as key:
- ptype = QueryValueEx(key, 'CurrentType')[0]
- except:
+ with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, cvkey) as key:
+ ptype = winreg.QueryValueEx(key, 'CurrentType')[0]
+ except OSError:
pass
return release, version, csd, ptype
@@ -600,22 +603,6 @@ def _follow_symlinks(filepath):
os.path.join(os.path.dirname(filepath), os.readlink(filepath)))
return filepath
-def _syscmd_uname(option, default=''):
-
- """ Interface to the system's uname command.
- """
- if sys.platform in ('dos', 'win32', 'win16'):
- # XXX Others too ?
- return default
-
- import subprocess
- try:
- output = subprocess.check_output(('uname', option),
- stderr=subprocess.DEVNULL,
- text=True)
- except (OSError, subprocess.CalledProcessError):
- return default
- return (output.strip() or default)
def _syscmd_file(target, default=''):
@@ -736,13 +723,90 @@ def architecture(executable=sys.executable, bits='', linkage=''):
return bits, linkage
+
+def _get_machine_win32():
+ # Try to use the PROCESSOR_* environment variables
+ # available on Win XP and later; see
+ # http://support.microsoft.com/kb/888731 and
+ # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
+
+ # WOW64 processes mask the native architecture
+ return (
+ os.environ.get('PROCESSOR_ARCHITEW6432', '') or
+ os.environ.get('PROCESSOR_ARCHITECTURE', '')
+ )
+
+
+class _Processor:
+ @classmethod
+ def get(cls):
+ func = getattr(cls, f'get_{sys.platform}', cls.from_subprocess)
+ return func() or ''
+
+ def get_win32():
+ return os.environ.get('PROCESSOR_IDENTIFIER', _get_machine_win32())
+
+ def get_OpenVMS():
+ try:
+ import vms_lib
+ except ImportError:
+ pass
+ else:
+ csid, cpu_number = vms_lib.getsyi('SYI$_CPU', 0)
+ return 'Alpha' if cpu_number >= 128 else 'VAX'
+
+ def from_subprocess():
+ """
+ Fall back to `uname -p`
+ """
+ try:
+ return subprocess.check_output(
+ ['uname', '-p'],
+ stderr=subprocess.DEVNULL,
+ text=True,
+ ).strip()
+ except (OSError, subprocess.CalledProcessError):
+ pass
+
+
+def _unknown_as_blank(val):
+ return '' if val == 'unknown' else val
+
+
### Portable uname() interface
-uname_result = collections.namedtuple("uname_result",
- "system node release version machine processor")
+class uname_result(
+ collections.namedtuple(
+ "uname_result_base",
+ "system node release version machine")
+ ):
+ """
+ A uname_result that's largely compatible with a
+ simple namedtuple except that 'platform' is
+ resolved late and cached to avoid calling "uname"
+ except when needed.
+ """
+
+ @functools.cached_property
+ def processor(self):
+ return _unknown_as_blank(_Processor.get())
+
+ def __iter__(self):
+ return itertools.chain(
+ super().__iter__(),
+ (self.processor,)
+ )
+
+ def __getitem__(self, key):
+ return tuple(iter(self))[key]
+
+ def __len__(self):
+ return len(tuple(iter(self)))
+
_uname_cache = None
+
def uname():
""" Fairly portable uname interface. Returns a tuple
@@ -756,52 +820,30 @@ def uname():
"""
global _uname_cache
- no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
- processor = ''
-
# Get some infos from the builtin os.uname API...
try:
- system, node, release, version, machine = os.uname()
+ system, node, release, version, machine = infos = os.uname()
except AttributeError:
- no_os_uname = 1
-
- if no_os_uname or not list(filter(None, (system, node, release, version, machine))):
- # Hmm, no there is either no uname or uname has returned
- #'unknowns'... we'll have to poke around the system then.
- if no_os_uname:
- system = sys.platform
- release = ''
- version = ''
- node = _node()
- machine = ''
+ system = sys.platform
+ node = _node()
+ release = version = machine = ''
+ infos = ()
- use_syscmd_ver = 1
+ if not any(infos):
+ # uname is not available
# Try win32_ver() on win32 platforms
if system == 'win32':
release, version, csd, ptype = win32_ver()
- if release and version:
- use_syscmd_ver = 0
- # Try to use the PROCESSOR_* environment variables
- # available on Win XP and later; see
- # http://support.microsoft.com/kb/888731 and
- # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
- if not machine:
- # WOW64 processes mask the native architecture
- if "PROCESSOR_ARCHITEW6432" in os.environ:
- machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
- else:
- machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
- if not processor:
- processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
+ machine = machine or _get_machine_win32()
# Try the 'ver' system command available on some
# platforms
- if use_syscmd_ver:
+ if not (release and version):
system, release, version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
@@ -841,42 +883,15 @@ def uname():
if not release or release == '0':
release = version
version = ''
- # Get processor information
- try:
- import vms_lib
- except ImportError:
- pass
- else:
- csid, cpu_number = vms_lib.getsyi('SYI$_CPU', 0)
- if (cpu_number >= 128):
- processor = 'Alpha'
- else:
- processor = 'VAX'
- if not processor:
- # Get processor information from the uname system command
- processor = _syscmd_uname('-p', '')
-
- #If any unknowns still exist, replace them with ''s, which are more portable
- if system == 'unknown':
- system = ''
- if node == 'unknown':
- node = ''
- if release == 'unknown':
- release = ''
- if version == 'unknown':
- version = ''
- if machine == 'unknown':
- machine = ''
- if processor == 'unknown':
- processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
- _uname_cache = uname_result(system, node, release, version,
- machine, processor)
+ vals = system, node, release, version, machine
+ # Replace 'unknown' values with the more portable ''
+ _uname_cache = uname_result(*map(_unknown_as_blank, vals))
return _uname_cache
### Direct interfaces to some of the uname() return values
@@ -1202,7 +1217,7 @@ def platform(aliased=0, terse=0):
elif system in ('Linux',):
# check for libc vs. glibc
- libcname, libcversion = libc_ver(sys.executable)
+ libcname, libcversion = libc_ver()
platform = _platform(system, release, machine, processor,
'with',
libcname+libcversion)
diff --git a/x64/Lib/plistlib.py b/x64/Lib/plistlib.py
index 04f8a87..2eeebe4 100644
--- a/x64/Lib/plistlib.py
+++ b/x64/Lib/plistlib.py
@@ -46,14 +46,11 @@ Parse Plist example:
print(pl["aKey"])
"""
__all__ = [
- "readPlist", "writePlist", "readPlistFromBytes", "writePlistToBytes",
- "Data", "InvalidFileException", "FMT_XML", "FMT_BINARY",
- "load", "dump", "loads", "dumps", "UID"
+ "InvalidFileException", "FMT_XML", "FMT_BINARY", "load", "dump", "loads", "dumps", "UID"
]
import binascii
import codecs
-import contextlib
import datetime
import enum
from io import BytesIO
@@ -61,7 +58,6 @@ import itertools
import os
import re
import struct
-from warnings import warn
from xml.parsers.expat import ParserCreate
@@ -69,112 +65,6 @@ PlistFormat = enum.Enum('PlistFormat', 'FMT_XML FMT_BINARY', module=__name__)
globals().update(PlistFormat.__members__)
-#
-#
-# Deprecated functionality
-#
-#
-
-
-@contextlib.contextmanager
-def _maybe_open(pathOrFile, mode):
- if isinstance(pathOrFile, str):
- with open(pathOrFile, mode) as fp:
- yield fp
-
- else:
- yield pathOrFile
-
-
-def readPlist(pathOrFile):
- """
- Read a .plist from a path or file. pathOrFile should either
- be a file name, or a readable binary file object.
-
- This function is deprecated, use load instead.
- """
- warn("The readPlist function is deprecated, use load() instead",
- DeprecationWarning, 2)
-
- with _maybe_open(pathOrFile, 'rb') as fp:
- return load(fp, fmt=None, use_builtin_types=False)
-
-def writePlist(value, pathOrFile):
- """
- Write 'value' to a .plist file. 'pathOrFile' may either be a
- file name or a (writable) file object.
-
- This function is deprecated, use dump instead.
- """
- warn("The writePlist function is deprecated, use dump() instead",
- DeprecationWarning, 2)
- with _maybe_open(pathOrFile, 'wb') as fp:
- dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False)
-
-
-def readPlistFromBytes(data):
- """
- Read a plist data from a bytes object. Return the root object.
-
- This function is deprecated, use loads instead.
- """
- warn("The readPlistFromBytes function is deprecated, use loads() instead",
- DeprecationWarning, 2)
- return load(BytesIO(data), fmt=None, use_builtin_types=False)
-
-
-def writePlistToBytes(value):
- """
- Return 'value' as a plist-formatted bytes object.
-
- This function is deprecated, use dumps instead.
- """
- warn("The writePlistToBytes function is deprecated, use dumps() instead",
- DeprecationWarning, 2)
- f = BytesIO()
- dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False)
- return f.getvalue()
-
-
-class Data:
- """
- Wrapper for binary data.
-
- This class is deprecated, use a bytes object instead.
- """
-
- def __init__(self, data):
- if not isinstance(data, bytes):
- raise TypeError("data must be as bytes")
- self.data = data
-
- @classmethod
- def fromBase64(cls, data):
- # base64.decodebytes just calls binascii.a2b_base64;
- # it seems overkill to use both base64 and binascii.
- return cls(_decode_base64(data))
-
- def asBase64(self, maxlinelength=76):
- return _encode_base64(self.data, maxlinelength)
-
- def __eq__(self, other):
- if isinstance(other, self.__class__):
- return self.data == other.data
- elif isinstance(other, bytes):
- return self.data == other
- else:
- return NotImplemented
-
- def __repr__(self):
- return "%s(%s)" % (self.__class__.__name__, repr(self.data))
-
-#
-#
-# End of deprecated functionality
-#
-#
-
-
class UID:
def __init__(self, data):
if not isinstance(data, int):
@@ -202,7 +92,6 @@ class UID:
def __hash__(self):
return hash(self.data)
-
#
# XML support
#
@@ -273,11 +162,10 @@ def _escape(text):
return text
class _PlistParser:
- def __init__(self, use_builtin_types, dict_type):
+ def __init__(self, dict_type):
self.stack = []
self.current_key = None
self.root = None
- self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def parse(self, fileobj):
@@ -285,9 +173,16 @@ class _PlistParser:
self.parser.StartElementHandler = self.handle_begin_element
self.parser.EndElementHandler = self.handle_end_element
self.parser.CharacterDataHandler = self.handle_data
+ self.parser.EntityDeclHandler = self.handle_entity_decl
self.parser.ParseFile(fileobj)
return self.root
+ def handle_entity_decl(self, entity_name, is_parameter_entity, value, base, system_id, public_id, notation_name):
+ # Reject plist files with entity declarations to avoid XML vulnerabilies in expat.
+ # Regular plist files don't contain those declerations, and Apple's plutil tool does not
+ # accept them either.
+ raise InvalidFileException("XML entity declarations are not supported in plist files")
+
def handle_begin_element(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
@@ -357,7 +252,11 @@ class _PlistParser:
self.add_object(False)
def end_integer(self):
- self.add_object(int(self.get_data()))
+ raw = self.get_data()
+ if raw.startswith('0x') or raw.startswith('0X'):
+ self.add_object(int(raw, 16))
+ else:
+ self.add_object(int(raw))
def end_real(self):
self.add_object(float(self.get_data()))
@@ -366,11 +265,7 @@ class _PlistParser:
self.add_object(self.get_data())
def end_data(self):
- if self._use_builtin_types:
- self.add_object(_decode_base64(self.get_data()))
-
- else:
- self.add_object(Data.fromBase64(self.get_data()))
+ self.add_object(_decode_base64(self.get_data()))
def end_date(self):
self.add_object(_date_from_string(self.get_data()))
@@ -452,9 +347,6 @@ class _PlistWriter(_DumbXMLWriter):
elif isinstance(value, dict):
self.write_dict(value)
- elif isinstance(value, Data):
- self.write_data(value)
-
elif isinstance(value, (bytes, bytearray)):
self.write_bytes(value)
@@ -467,9 +359,6 @@ class _PlistWriter(_DumbXMLWriter):
else:
raise TypeError("unsupported type: %s" % type(value))
- def write_data(self, data):
- self.write_bytes(data.data)
-
def write_bytes(self, data):
self.begin_element("data")
self._indent_level -= 1
@@ -563,8 +452,7 @@ class _BinaryPlistParser:
see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c
"""
- def __init__(self, use_builtin_types, dict_type):
- self._use_builtin_types = use_builtin_types
+ def __init__(self, dict_type):
self._dict_type = dict_type
def parse(self, fp):
@@ -589,7 +477,7 @@ class _BinaryPlistParser:
return self._read_object(top_object)
except (OSError, IndexError, struct.error, OverflowError,
- UnicodeDecodeError):
+ ValueError):
raise InvalidFileException()
def _get_size(self, tokenL):
@@ -605,7 +493,7 @@ class _BinaryPlistParser:
def _read_ints(self, n, size):
data = self._fp.read(size * n)
if size in _BINARY_FORMAT:
- return struct.unpack('>' + _BINARY_FORMAT[size] * n, data)
+ return struct.unpack(f'>{n}{_BINARY_FORMAT[size]}', data)
else:
if not size or len(data) != size * n:
raise InvalidFileException()
@@ -664,18 +552,23 @@ class _BinaryPlistParser:
elif tokenH == 0x40: # data
s = self._get_size(tokenL)
- if self._use_builtin_types:
- result = self._fp.read(s)
- else:
- result = Data(self._fp.read(s))
+ result = self._fp.read(s)
+ if len(result) != s:
+ raise InvalidFileException()
elif tokenH == 0x50: # ascii string
s = self._get_size(tokenL)
- result = self._fp.read(s).decode('ascii')
+ data = self._fp.read(s)
+ if len(data) != s:
+ raise InvalidFileException()
+ result = data.decode('ascii')
elif tokenH == 0x60: # unicode string
- s = self._get_size(tokenL)
- result = self._fp.read(s * 2).decode('utf-16be')
+ s = self._get_size(tokenL) * 2
+ data = self._fp.read(s)
+ if len(data) != s:
+ raise InvalidFileException()
+ result = data.decode('utf-16be')
elif tokenH == 0x80: # UID
# used by Key-Archiver plist files
@@ -700,9 +593,11 @@ class _BinaryPlistParser:
obj_refs = self._read_refs(s)
result = self._dict_type()
self._objects[ref] = result
- for k, o in zip(key_refs, obj_refs):
- result[self._read_object(k)] = self._read_object(o)
-
+ try:
+ for k, o in zip(key_refs, obj_refs):
+ result[self._read_object(k)] = self._read_object(o)
+ except TypeError:
+ raise InvalidFileException()
else:
raise InvalidFileException()
@@ -716,7 +611,7 @@ def _count_to_size(count):
elif count < 1 << 16:
return 2
- elif count << 1 << 32:
+ elif count < 1 << 32:
return 4
else:
@@ -783,10 +678,6 @@ class _BinaryPlistWriter (object):
if (type(value), value) in self._objtable:
return
- elif isinstance(value, Data):
- if (type(value.data), value.data) in self._objtable:
- return
-
elif id(value) in self._objidtable:
return
@@ -795,8 +686,6 @@ class _BinaryPlistWriter (object):
self._objlist.append(value)
if isinstance(value, _scalars):
self._objtable[(type(value), value)] = refnum
- elif isinstance(value, Data):
- self._objtable[(type(value.data), value.data)] = refnum
else:
self._objidtable[id(value)] = refnum
@@ -826,8 +715,6 @@ class _BinaryPlistWriter (object):
def _getrefnum(self, value):
if isinstance(value, _scalars):
return self._objtable[(type(value), value)]
- elif isinstance(value, Data):
- return self._objtable[(type(value.data), value.data)]
else:
return self._objidtable[id(value)]
@@ -885,10 +772,6 @@ class _BinaryPlistWriter (object):
f = (value - datetime.datetime(2001, 1, 1)).total_seconds()
self._fp.write(struct.pack('>Bd', 0x33, f))
- elif isinstance(value, Data):
- self._write_size(0x40, len(value.data))
- self._fp.write(value.data)
-
elif isinstance(value, (bytes, bytearray)):
self._write_size(0x40, len(value))
self._fp.write(value)
@@ -970,7 +853,7 @@ _FORMATS={
}
-def load(fp, *, fmt=None, use_builtin_types=True, dict_type=dict):
+def load(fp, *, fmt=None, dict_type=dict):
"""Read a .plist file. 'fp' should be a readable and binary file object.
Return the unpacked root object (which usually is a dictionary).
"""
@@ -988,17 +871,16 @@ def load(fp, *, fmt=None, use_builtin_types=True, dict_type=dict):
else:
P = _FORMATS[fmt]['parser']
- p = P(use_builtin_types=use_builtin_types, dict_type=dict_type)
+ p = P(dict_type=dict_type)
return p.parse(fp)
-def loads(value, *, fmt=None, use_builtin_types=True, dict_type=dict):
+def loads(value, *, fmt=None, dict_type=dict):
"""Read a .plist file from a bytes object.
Return the unpacked root object (which usually is a dictionary).
"""
fp = BytesIO(value)
- return load(
- fp, fmt=fmt, use_builtin_types=use_builtin_types, dict_type=dict_type)
+ return load(fp, fmt=fmt, dict_type=dict_type)
def dump(value, fp, *, fmt=FMT_XML, sort_keys=True, skipkeys=False):
diff --git a/x64/Lib/poplib.py b/x64/Lib/poplib.py
index e3bd2ab..0f85873 100644
--- a/x64/Lib/poplib.py
+++ b/x64/Lib/poplib.py
@@ -107,6 +107,8 @@ class POP3:
self.welcome = self._getresp()
def _create_socket(self, timeout):
+ if timeout is not None and not timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
@@ -385,7 +387,7 @@ class POP3:
for capline in rawcaps:
capnm, capargs = _parsecap(capline)
caps[capnm] = capargs
- except error_proto as _err:
+ except error_proto:
raise error_proto('-ERR CAPA not supported by server')
return caps
diff --git a/x64/Lib/pprint.py b/x64/Lib/pprint.py
index 4bfcc31..7c1118a 100644
--- a/x64/Lib/pprint.py
+++ b/x64/Lib/pprint.py
@@ -342,6 +342,33 @@ class PrettyPrinter:
_dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy
+ def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level):
+ if type(object) is _types.SimpleNamespace:
+ # The SimpleNamespace repr is "namespace" instead of the class
+ # name, so we do the same here. For subclasses; use the class name.
+ cls_name = 'namespace'
+ else:
+ cls_name = object.__class__.__name__
+ indent += len(cls_name) + 1
+ delimnl = ',\n' + ' ' * indent
+ items = object.__dict__.items()
+ last_index = len(items) - 1
+
+ stream.write(cls_name + '(')
+ for i, (key, ent) in enumerate(items):
+ stream.write(key)
+ stream.write('=')
+
+ last = i == last_index
+ self._format(ent, stream, indent + len(key) + 1,
+ allowance if last else 1,
+ context, level)
+ if not last:
+ stream.write(delimnl)
+ stream.write(')')
+
+ _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
+
def _format_dict_items(self, items, stream, indent, allowance, context,
level):
write = stream.write
diff --git a/x64/Lib/profile.py b/x64/Lib/profile.py
index 1346297..5cb017e 100644
--- a/x64/Lib/profile.py
+++ b/x64/Lib/profile.py
@@ -425,29 +425,13 @@ class Profile:
return self
# This method is more useful to profile a single function call.
- def runcall(*args, **kw):
- if len(args) >= 2:
- self, func, *args = args
- elif not args:
- raise TypeError("descriptor 'runcall' of 'Profile' object "
- "needs an argument")
- elif 'func' in kw:
- func = kw.pop('func')
- self, *args = args
- import warnings
- warnings.warn("Passing 'func' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('runcall expected at least 1 positional argument, '
- 'got %d' % (len(args)-1))
-
+ def runcall(self, func, /, *args, **kw):
self.set_cmd(repr(func))
sys.setprofile(self.dispatcher)
try:
return func(*args, **kw)
finally:
sys.setprofile(None)
- runcall.__text_signature__ = '($self, func, /, *args, **kw)'
#******************************************************************
@@ -587,6 +571,11 @@ def main():
(options, args) = parser.parse_args()
sys.argv[:] = args
+ # The script that we're profiling may chdir, so capture the absolute path
+ # to the output file at startup.
+ if options.outfile is not None:
+ options.outfile = os.path.abspath(options.outfile)
+
if len(args) > 0:
if options.module:
import runpy
diff --git a/x64/Lib/pstats.py b/x64/Lib/pstats.py
index 4b419a8..0f93ae0 100644
--- a/x64/Lib/pstats.py
+++ b/x64/Lib/pstats.py
@@ -25,11 +25,13 @@ import os
import time
import marshal
import re
+
from enum import Enum
from functools import cmp_to_key
+from dataclasses import dataclass
+from typing import Dict
-__all__ = ["Stats", "SortKey"]
-
+__all__ = ["Stats", "SortKey", "FunctionProfile", "StatsProfile"]
class SortKey(str, Enum):
CALLS = 'calls', 'ncalls'
@@ -43,15 +45,31 @@ class SortKey(str, Enum):
TIME = 'time', 'tottime'
def __new__(cls, *values):
- obj = str.__new__(cls)
-
- obj._value_ = values[0]
+ value = values[0]
+ obj = str.__new__(cls, value)
+ obj._value_ = value
for other_value in values[1:]:
cls._value2member_map_[other_value] = obj
obj._all_values = values
return obj
+@dataclass(unsafe_hash=True)
+class FunctionProfile:
+ ncalls: int
+ tottime: float
+ percall_tottime: float
+ cumtime: float
+ percall_cumtime: float
+ file_name: str
+ line_number: int
+
+@dataclass(unsafe_hash=True)
+class StatsProfile:
+ '''Class for keeping track of an item in inventory.'''
+ total_tt: float
+ func_profiles: Dict[str, FunctionProfile]
+
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
@@ -333,6 +351,41 @@ class Stats:
return new_list, msg
+ def get_stats_profile(self):
+ """This method returns an instance of StatsProfile, which contains a mapping
+ of function names to instances of FunctionProfile. Each FunctionProfile
+ instance holds information related to the function's profile such as how
+ long the function took to run, how many times it was called, etc...
+ """
+ func_list = self.fcn_list[:] if self.fcn_list else list(self.stats.keys())
+ if not func_list:
+ return StatsProfile(0, {})
+
+ total_tt = float(f8(self.total_tt))
+ func_profiles = {}
+ stats_profile = StatsProfile(total_tt, func_profiles)
+
+ for func in func_list:
+ cc, nc, tt, ct, callers = self.stats[func]
+ file_name, line_number, func_name = func
+ ncalls = str(nc) if nc == cc else (str(nc) + '/' + str(cc))
+ tottime = float(f8(tt))
+ percall_tottime = -1 if nc == 0 else float(f8(tt/nc))
+ cumtime = float(f8(ct))
+ percall_cumtime = -1 if cc == 0 else float(f8(ct/cc))
+ func_profile = FunctionProfile(
+ ncalls,
+ tottime, # time spent in this function alone
+ percall_tottime,
+ cumtime, # time spent in the function plus all functions that this function called,
+ percall_cumtime,
+ file_name,
+ line_number
+ )
+ func_profiles[func_name] = func_profile
+
+ return stats_profile
+
def get_print_list(self, sel_list):
width = self.max_name_len
if self.fcn_list:
diff --git a/x64/Lib/py_compile.py b/x64/Lib/py_compile.py
index 2173689..a81f493 100644
--- a/x64/Lib/py_compile.py
+++ b/x64/Lib/py_compile.py
@@ -197,12 +197,10 @@ def main(args=None):
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
- if quiet < 2:
- sys.stderr.write("%s\n" % error.msg)
+ sys.stderr.write("%s\n" % error.msg)
except OSError as error:
rv = 1
- if quiet < 2:
- sys.stderr.write("%s\n" % error)
+ sys.stderr.write("%s\n" % error)
else:
for filename in args:
try:
@@ -210,8 +208,7 @@ def main(args=None):
except PyCompileError as error:
# return value to indicate at least one failure
rv = 1
- if quiet < 2:
- sys.stderr.write("%s\n" % error.msg)
+ sys.stderr.write("%s\n" % error.msg)
return rv
if __name__ == "__main__":
diff --git a/x64/Lib/pydoc.py b/x64/Lib/pydoc.py
index dc3377d..35ef3eb 100644
--- a/x64/Lib/pydoc.py
+++ b/x64/Lib/pydoc.py
@@ -90,9 +90,101 @@ def pathdirs():
normdirs.append(normdir)
return dirs
+def _findclass(func):
+ cls = sys.modules.get(func.__module__)
+ if cls is None:
+ return None
+ for name in func.__qualname__.split('.')[:-1]:
+ cls = getattr(cls, name)
+ if not inspect.isclass(cls):
+ return None
+ return cls
+
+def _finddoc(obj):
+ if inspect.ismethod(obj):
+ name = obj.__func__.__name__
+ self = obj.__self__
+ if (inspect.isclass(self) and
+ getattr(getattr(self, name, None), '__func__') is obj.__func__):
+ # classmethod
+ cls = self
+ else:
+ cls = self.__class__
+ elif inspect.isfunction(obj):
+ name = obj.__name__
+ cls = _findclass(obj)
+ if cls is None or getattr(cls, name) is not obj:
+ return None
+ elif inspect.isbuiltin(obj):
+ name = obj.__name__
+ self = obj.__self__
+ if (inspect.isclass(self) and
+ self.__qualname__ + '.' + name == obj.__qualname__):
+ # classmethod
+ cls = self
+ else:
+ cls = self.__class__
+ # Should be tested before isdatadescriptor().
+ elif isinstance(obj, property):
+ func = obj.fget
+ name = func.__name__
+ cls = _findclass(func)
+ if cls is None or getattr(cls, name) is not obj:
+ return None
+ elif inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj):
+ name = obj.__name__
+ cls = obj.__objclass__
+ if getattr(cls, name) is not obj:
+ return None
+ if inspect.ismemberdescriptor(obj):
+ slots = getattr(cls, '__slots__', None)
+ if isinstance(slots, dict) and name in slots:
+ return slots[name]
+ else:
+ return None
+ for base in cls.__mro__:
+ try:
+ doc = _getowndoc(getattr(base, name))
+ except AttributeError:
+ continue
+ if doc is not None:
+ return doc
+ return None
+
+def _getowndoc(obj):
+ """Get the documentation string for an object if it is not
+ inherited from its class."""
+ try:
+ doc = object.__getattribute__(obj, '__doc__')
+ if doc is None:
+ return None
+ if obj is not type:
+ typedoc = type(obj).__doc__
+ if isinstance(typedoc, str) and typedoc == doc:
+ return None
+ return doc
+ except AttributeError:
+ return None
+
+def _getdoc(object):
+ """Get the documentation string for an object.
+
+ All tabs are expanded to spaces. To clean up docstrings that are
+ indented to line up with blocks of code, any whitespace than can be
+ uniformly removed from the second line onwards is removed."""
+ doc = _getowndoc(object)
+ if doc is None:
+ try:
+ doc = _finddoc(object)
+ except (AttributeError, TypeError):
+ return None
+ if not isinstance(doc, str):
+ return None
+ return inspect.cleandoc(doc)
+
def getdoc(object):
"""Get the doc string or comments for an object."""
- result = inspect.getdoc(object) or inspect.getcomments(object)
+ result = _getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
@@ -584,7 +676,7 @@ class HTMLDoc(Doc):
escape = escape or self.escape
results = []
here = 0
- pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
+ pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
@@ -825,11 +917,8 @@ class HTMLDoc(Doc):
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
- if callable(value) or inspect.isdatadescriptor(value):
- doc = getattr(value, "__doc__", None)
- else:
- doc = None
- if doc is None:
+ doc = getdoc(value)
+ if not doc:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
@@ -1309,10 +1398,7 @@ location listed above.
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
- if callable(value) or inspect.isdatadescriptor(value):
- doc = getdoc(value)
- else:
- doc = None
+ doc = getdoc(value)
try:
obj = getattr(object, name)
except AttributeError:
@@ -1448,8 +1534,10 @@ location listed above.
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
- if doc is not None:
- line += '\n' + self.indent(str(doc))
+ if not doc:
+ doc = getdoc(object)
+ if doc:
+ line += '\n' + self.indent(str(doc)) + '\n'
return line
class _PlainTextDoc(TextDoc):
@@ -1672,11 +1760,15 @@ def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
- inspect.isdatadescriptor(object)):
+ inspect.isdatadescriptor(object) or
+ _getdoc(object)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
- object = type(object)
- desc += ' object'
+ if hasattr(object, '__origin__'):
+ object = object.__origin__
+ else:
+ object = type(object)
+ desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
@@ -1725,6 +1817,7 @@ class Helper:
'False': '',
'None': '',
'True': '',
+ '__peg_parser__': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
diff --git a/x64/Lib/pydoc_data/topics.py b/x64/Lib/pydoc_data/topics.py
index f1fdb7f..d8dd8c5 100644
--- a/x64/Lib/pydoc_data/topics.py
+++ b/x64/Lib/pydoc_data/topics.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Autogenerated by Sphinx on Mon Feb 24 21:52:17 2020
+# Autogenerated by Sphinx on Mon Dec 7 15:00:07 2020
topics = {'assert': 'The "assert" statement\n'
'**********************\n'
'\n'
@@ -99,27 +99,26 @@ topics = {'assert': 'The "assert" statement\n'
'assigned,\n'
' from left to right, to the corresponding targets.\n'
'\n'
- ' * If the target list contains one target prefixed with an\n'
- ' asterisk, called a “starred” target: The object must be '
- 'an\n'
- ' iterable with at least as many items as there are targets '
- 'in the\n'
- ' target list, minus one. The first items of the iterable '
- 'are\n'
- ' assigned, from left to right, to the targets before the '
+ ' * If the target list contains one target prefixed with an '
+ 'asterisk,\n'
+ ' called a “starred” target: The object must be an iterable '
+ 'with at\n'
+ ' least as many items as there are targets in the target '
+ 'list, minus\n'
+ ' one. The first items of the iterable are assigned, from '
+ 'left to\n'
+ ' right, to the targets before the starred target. The '
+ 'final items\n'
+ ' of the iterable are assigned to the targets after the '
'starred\n'
- ' target. The final items of the iterable are assigned to '
- 'the\n'
- ' targets after the starred target. A list of the remaining '
- 'items\n'
- ' in the iterable is then assigned to the starred target '
- '(the list\n'
- ' can be empty).\n'
+ ' target. A list of the remaining items in the iterable is '
+ 'then\n'
+ ' assigned to the starred target (the list can be empty).\n'
'\n'
' * Else: The object must be an iterable with the same number '
- 'of\n'
- ' items as there are targets in the target list, and the '
- 'items are\n'
+ 'of items\n'
+ ' as there are targets in the target list, and the items '
+ 'are\n'
' assigned, from left to right, to the corresponding '
'targets.\n'
'\n'
@@ -135,10 +134,10 @@ topics = {'assert': 'The "assert" statement\n'
'in the\n'
' current local namespace.\n'
'\n'
- ' * Otherwise: the name is bound to the object in the global\n'
- ' namespace or the outer namespace determined by '
- '"nonlocal",\n'
- ' respectively.\n'
+ ' * Otherwise: the name is bound to the object in the global '
+ 'namespace\n'
+ ' or the outer namespace determined by "nonlocal", '
+ 'respectively.\n'
'\n'
' The name is rebound if it was already bound. This may cause '
'the\n'
@@ -225,26 +224,27 @@ topics = {'assert': 'The "assert" statement\n'
'called with\n'
' appropriate arguments.\n'
'\n'
- '* If the target is a slicing: The primary expression in the\n'
- ' reference is evaluated. It should yield a mutable sequence '
- 'object\n'
- ' (such as a list). The assigned object should be a sequence '
- 'object\n'
- ' of the same type. Next, the lower and upper bound '
- 'expressions are\n'
- ' evaluated, insofar they are present; defaults are zero and '
- 'the\n'
- ' sequence’s length. The bounds should evaluate to integers. '
- 'If\n'
- ' either bound is negative, the sequence’s length is added to '
- 'it. The\n'
- ' resulting bounds are clipped to lie between zero and the '
+ '* If the target is a slicing: The primary expression in the '
+ 'reference\n'
+ ' is evaluated. It should yield a mutable sequence object '
+ '(such as a\n'
+ ' list). The assigned object should be a sequence object of '
+ 'the same\n'
+ ' type. Next, the lower and upper bound expressions are '
+ 'evaluated,\n'
+ ' insofar they are present; defaults are zero and the '
'sequence’s\n'
- ' length, inclusive. Finally, the sequence object is asked to '
- 'replace\n'
- ' the slice with the items of the assigned sequence. The '
- 'length of\n'
- ' the slice may be different from the length of the assigned '
+ ' length. The bounds should evaluate to integers. If either '
+ 'bound is\n'
+ ' negative, the sequence’s length is added to it. The '
+ 'resulting\n'
+ ' bounds are clipped to lie between zero and the sequence’s '
+ 'length,\n'
+ ' inclusive. Finally, the sequence object is asked to replace '
+ 'the\n'
+ ' slice with the items of the assigned sequence. The length '
+ 'of the\n'
+ ' slice may be different from the length of the assigned '
'sequence,\n'
' thus changing the length of the target sequence, if the '
'target\n'
@@ -514,8 +514,8 @@ topics = {'assert': 'The "assert" statement\n'
'is semantically equivalent to:\n'
'\n'
' manager = (EXPRESSION)\n'
- ' aexit = type(manager).__aexit__\n'
' aenter = type(manager).__aenter__\n'
+ ' aexit = type(manager).__aexit__\n'
' value = await aenter(manager)\n'
' hit_except = False\n'
'\n'
@@ -544,13 +544,17 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
- '[1] The exception is propagated to the invocation stack unless\n'
- ' there is a "finally" clause which happens to raise another\n'
- ' exception. That new exception causes the old one to be lost.\n'
+ '[1] The exception is propagated to the invocation stack unless '
+ 'there\n'
+ ' is a "finally" clause which happens to raise another '
+ 'exception.\n'
+ ' That new exception causes the old one to be lost.\n'
'\n'
- '[2] A string literal appearing as the first statement in the\n'
- ' function body is transformed into the function’s "__doc__"\n'
- ' attribute and therefore the function’s *docstring*.\n'
+ '[2] A string literal appearing as the first statement in the '
+ 'function\n'
+ ' body is transformed into the function’s "__doc__" attribute '
+ 'and\n'
+ ' therefore the function’s *docstring*.\n'
'\n'
'[3] A string literal appearing as the first statement in the class\n'
' body is transformed into the namespace’s "__doc__" item and\n'
@@ -688,11 +692,18 @@ topics = {'assert': 'The "assert" statement\n'
'needs, for\n'
' example, "object.__getattribute__(self, name)".\n'
'\n'
- ' Note: This method may still be bypassed when looking '
- 'up special\n'
- ' methods as the result of implicit invocation via '
- 'language syntax\n'
- ' or built-in functions. See Special method lookup.\n'
+ ' Note:\n'
+ '\n'
+ ' This method may still be bypassed when looking up '
+ 'special methods\n'
+ ' as the result of implicit invocation via language '
+ 'syntax or\n'
+ ' built-in functions. See Special method lookup.\n'
+ '\n'
+ ' For certain sensitive attribute accesses, raises an '
+ 'auditing event\n'
+ ' "object.__getattr__" with arguments "obj" and '
+ '"name".\n'
'\n'
'object.__setattr__(self, name, value)\n'
'\n'
@@ -710,6 +721,11 @@ topics = {'assert': 'The "assert" statement\n'
'for example,\n'
' "object.__setattr__(self, name, value)".\n'
'\n'
+ ' For certain sensitive attribute assignments, raises '
+ 'an auditing\n'
+ ' event "object.__setattr__" with arguments "obj", '
+ '"name", "value".\n'
+ '\n'
'object.__delattr__(self, name)\n'
'\n'
' Like "__setattr__()" but for attribute deletion '
@@ -718,6 +734,11 @@ topics = {'assert': 'The "assert" statement\n'
'obj.name" is\n'
' meaningful for the object.\n'
'\n'
+ ' For certain sensitive attribute deletions, raises an '
+ 'auditing event\n'
+ ' "object.__delattr__" with arguments "obj" and '
+ '"name".\n'
+ '\n'
'object.__dir__(self)\n'
'\n'
' Called when "dir()" is called on the object. A '
@@ -776,15 +797,16 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' sys.modules[__name__].__class__ = VerboseModule\n'
'\n'
- 'Note: Defining module "__getattr__" and setting module '
- '"__class__"\n'
- ' only affect lookups made using the attribute access '
- 'syntax –\n'
- ' directly accessing the module globals (whether by code '
- 'within the\n'
- ' module, or via a reference to the module’s globals '
- 'dictionary) is\n'
- ' unaffected.\n'
+ 'Note:\n'
+ '\n'
+ ' Defining module "__getattr__" and setting module '
+ '"__class__" only\n'
+ ' affect lookups made using the attribute access syntax '
+ '– directly\n'
+ ' accessing the module globals (whether by code within '
+ 'the module, or\n'
+ ' via a reference to the module’s globals dictionary) is '
+ 'unaffected.\n'
'\n'
'Changed in version 3.5: "__class__" module attribute is '
'now writable.\n'
@@ -867,12 +889,14 @@ topics = {'assert': 'The "assert" statement\n'
'created. The\n'
' descriptor has been assigned to *name*.\n'
'\n'
- ' Note: "__set_name__()" is only called implicitly as '
- 'part of the\n'
- ' "type" constructor, so it will need to be called '
- 'explicitly with\n'
- ' the appropriate parameters when a descriptor is '
- 'added to a class\n'
+ ' Note:\n'
+ '\n'
+ ' "__set_name__()" is only called implicitly as part '
+ 'of the "type"\n'
+ ' constructor, so it will need to be called '
+ 'explicitly with the\n'
+ ' appropriate parameters when a descriptor is added '
+ 'to a class\n'
' after initial creation:\n'
'\n'
' class A:\n'
@@ -979,12 +1003,13 @@ topics = {'assert': 'The "assert" statement\n'
'define both\n'
'"__get__()" and "__set__()", while non-data descriptors '
'have just the\n'
- '"__get__()" method. Data descriptors with "__set__()" '
- 'and "__get__()"\n'
- 'defined always override a redefinition in an instance '
- 'dictionary. In\n'
- 'contrast, non-data descriptors can be overridden by '
- 'instances.\n'
+ '"__get__()" method. Data descriptors with "__get__()" '
+ 'and "__set__()"\n'
+ '(and/or "__delete__()") defined always override a '
+ 'redefinition in an\n'
+ 'instance dictionary. In contrast, non-data descriptors '
+ 'can be\n'
+ 'overridden by instances.\n'
'\n'
'Python methods (including "staticmethod()" and '
'"classmethod()") are\n'
@@ -1032,10 +1057,9 @@ topics = {'assert': 'The "assert" statement\n'
'--------------------------\n'
'\n'
'* When inheriting from a class without *__slots__*, the '
- '*__dict__*\n'
- ' and *__weakref__* attribute of the instances will '
- 'always be\n'
- ' accessible.\n'
+ '*__dict__* and\n'
+ ' *__weakref__* attribute of the instances will always '
+ 'be accessible.\n'
'\n'
'* Without a *__dict__* variable, instances cannot be '
'assigned new\n'
@@ -1050,14 +1074,12 @@ topics = {'assert': 'The "assert" statement\n'
' declaration.\n'
'\n'
'* Without a *__weakref__* variable for each instance, '
- 'classes\n'
- ' defining *__slots__* do not support weak references to '
- 'its\n'
- ' instances. If weak reference support is needed, then '
- 'add\n'
- ' "\'__weakref__\'" to the sequence of strings in the '
- '*__slots__*\n'
- ' declaration.\n'
+ 'classes defining\n'
+ ' *__slots__* do not support weak references to its '
+ 'instances. If weak\n'
+ ' reference support is needed, then add '
+ '"\'__weakref__\'" to the\n'
+ ' sequence of strings in the *__slots__* declaration.\n'
'\n'
'* *__slots__* are implemented at the class level by '
'creating\n'
@@ -1070,24 +1092,23 @@ topics = {'assert': 'The "assert" statement\n'
' attribute would overwrite the descriptor assignment.\n'
'\n'
'* The action of a *__slots__* declaration is not limited '
- 'to the\n'
- ' class where it is defined. *__slots__* declared in '
- 'parents are\n'
- ' available in child classes. However, child subclasses '
- 'will get a\n'
- ' *__dict__* and *__weakref__* unless they also define '
- '*__slots__*\n'
- ' (which should only contain names of any *additional* '
- 'slots).\n'
+ 'to the class\n'
+ ' where it is defined. *__slots__* declared in parents '
+ 'are available\n'
+ ' in child classes. However, child subclasses will get a '
+ '*__dict__*\n'
+ ' and *__weakref__* unless they also define *__slots__* '
+ '(which should\n'
+ ' only contain names of any *additional* slots).\n'
'\n'
'* If a class defines a slot also defined in a base '
- 'class, the\n'
- ' instance variable defined by the base class slot is '
- 'inaccessible\n'
- ' (except by retrieving its descriptor directly from the '
- 'base class).\n'
- ' This renders the meaning of the program undefined. In '
- 'the future, a\n'
+ 'class, the instance\n'
+ ' variable defined by the base class slot is '
+ 'inaccessible (except by\n'
+ ' retrieving its descriptor directly from the base '
+ 'class). This\n'
+ ' renders the meaning of the program undefined. In the '
+ 'future, a\n'
' check may be added to prevent this.\n'
'\n'
'* Nonempty *__slots__* does not work for classes derived '
@@ -1096,9 +1117,9 @@ topics = {'assert': 'The "assert" statement\n'
'"bytes" and "tuple".\n'
'\n'
'* Any non-string iterable may be assigned to '
- '*__slots__*. Mappings\n'
- ' may also be used; however, in the future, special '
- 'meaning may be\n'
+ '*__slots__*. Mappings may\n'
+ ' also be used; however, in the future, special meaning '
+ 'may be\n'
' assigned to the values corresponding to each key.\n'
'\n'
'* *__class__* assignment works only if both classes have '
@@ -1114,9 +1135,9 @@ topics = {'assert': 'The "assert" statement\n'
' raise "TypeError".\n'
'\n'
'* If an iterator is used for *__slots__* then a '
- 'descriptor is\n'
- ' created for each of the iterator’s values. However, '
- 'the *__slots__*\n'
+ 'descriptor is created\n'
+ ' for each of the iterator’s values. However, the '
+ '*__slots__*\n'
' attribute will be an empty iterator.\n',
'attribute-references': 'Attribute references\n'
'********************\n'
@@ -1458,8 +1479,8 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' Called when the instance is “called” as a function; if '
'this method\n'
- ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n'
- ' "x.__call__(arg1, arg2, ...)".\n',
+ ' is defined, "x(arg1, arg2, ...)" roughly translates to\n'
+ ' "type(x).__call__(x, arg1, ...)".\n',
'calls': 'Calls\n'
'*****\n'
'\n'
@@ -1475,8 +1496,8 @@ topics = {'assert': 'The "assert" statement\n'
' | starred_and_keywords ["," '
'keywords_arguments]\n'
' | keywords_arguments\n'
- ' positional_arguments ::= ["*"] expression ("," ["*"] '
- 'expression)*\n'
+ ' positional_arguments ::= positional_item ("," positional_item)*\n'
+ ' positional_item ::= assignment_expression | "*" expression\n'
' starred_and_keywords ::= ("*" expression | keyword_item)\n'
' ("," "*" expression | "," '
'keyword_item)*\n'
@@ -1716,6 +1737,10 @@ topics = {'assert': 'The "assert" statement\n'
'for\n'
'function decorators. The result is then bound to the class name.\n'
'\n'
+ 'Changed in version 3.9: Classes may be decorated with any valid\n'
+ '"assignment_expression". Previously, the grammar was much more\n'
+ 'restrictive; see **PEP 614** for details.\n'
+ '\n'
'**Programmer’s note:** Variables defined in the class definition '
'are\n'
'class attributes; they are shared by instances. Instance '
@@ -1872,15 +1897,15 @@ topics = {'assert': 'The "assert" statement\n'
' value is false. A counter-intuitive implication is that '
'not-a-number\n'
' values are not equal to themselves. For example, if "x =\n'
- ' float(\'NaN\')", "3 < x", "x < 3", "x == x", "x != x" are '
- 'all false.\n'
- ' This behavior is compliant with IEEE 754.\n'
+ ' float(\'NaN\')", "3 < x", "x < 3" and "x == x" are all '
+ 'false, while "x\n'
+ ' != x" is true. This behavior is compliant with IEEE 754.\n'
'\n'
'* "None" and "NotImplemented" are singletons. **PEP 8** '
- 'advises\n'
- ' that comparisons for singletons should always be done with '
- '"is" or\n'
- ' "is not", never the equality operators.\n'
+ 'advises that\n'
+ ' comparisons for singletons should always be done with "is" '
+ 'or "is\n'
+ ' not", never the equality operators.\n'
'\n'
'* Binary sequences (instances of "bytes" or "bytearray") can '
'be\n'
@@ -1896,15 +1921,15 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' Strings and binary sequences cannot be directly compared.\n'
'\n'
- '* Sequences (instances of "tuple", "list", or "range") can '
- 'be\n'
- ' compared only within each of their types, with the '
- 'restriction that\n'
- ' ranges do not support order comparison. Equality '
- 'comparison across\n'
- ' these types results in inequality, and ordering comparison '
- 'across\n'
- ' these types raises "TypeError".\n'
+ '* Sequences (instances of "tuple", "list", or "range") can be '
+ 'compared\n'
+ ' only within each of their types, with the restriction that '
+ 'ranges do\n'
+ ' not support order comparison. Equality comparison across '
+ 'these\n'
+ ' types results in inequality, and ordering comparison across '
+ 'these\n'
+ ' types raises "TypeError".\n'
'\n'
' Sequences compare lexicographically using comparison of\n'
' corresponding elements. The built-in containers typically '
@@ -1928,8 +1953,8 @@ topics = {'assert': 'The "assert" statement\n'
' false because the type is not the same).\n'
'\n'
' * Collections that support order comparison are ordered the '
- 'same\n'
- ' as their first unequal elements (for example, "[1,2,x] <= '
+ 'same as\n'
+ ' their first unequal elements (for example, "[1,2,x] <= '
'[1,2,y]"\n'
' has the same value as "x <= y"). If a corresponding '
'element does\n'
@@ -1947,8 +1972,8 @@ topics = {'assert': 'The "assert" statement\n'
'"TypeError".\n'
'\n'
'* Sets (instances of "set" or "frozenset") can be compared '
- 'within\n'
- ' and across their types.\n'
+ 'within and\n'
+ ' across their types.\n'
'\n'
' They define order comparison operators to mean subset and '
'superset\n'
@@ -1967,8 +1992,8 @@ topics = {'assert': 'The "assert" statement\n'
' Comparison of sets enforces reflexivity of its elements.\n'
'\n'
'* Most other built-in types have no comparison methods '
- 'implemented,\n'
- ' so they inherit the default comparison behavior.\n'
+ 'implemented, so\n'
+ ' they inherit the default comparison behavior.\n'
'\n'
'User-defined classes that customize their comparison behavior '
'should\n'
@@ -2017,10 +2042,10 @@ topics = {'assert': 'The "assert" statement\n'
' "total_ordering()" decorator.\n'
'\n'
'* The "hash()" result should be consistent with equality. '
- 'Objects\n'
- ' that are equal should either have the same hash value, or '
- 'be marked\n'
- ' as unhashable.\n'
+ 'Objects that\n'
+ ' are equal should either have the same hash value, or be '
+ 'marked as\n'
+ ' unhashable.\n'
'\n'
'Python does not enforce these consistency rules. In fact, '
'the\n'
@@ -2186,8 +2211,8 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'The "if" statement is used for conditional execution:\n'
'\n'
- ' if_stmt ::= "if" expression ":" suite\n'
- ' ("elif" expression ":" suite)*\n'
+ ' if_stmt ::= "if" assignment_expression ":" suite\n'
+ ' ("elif" assignment_expression ":" suite)*\n'
' ["else" ":" suite]\n'
'\n'
'It selects exactly one of the suites by evaluating the '
@@ -2210,7 +2235,7 @@ topics = {'assert': 'The "assert" statement\n'
'an\n'
'expression is true:\n'
'\n'
- ' while_stmt ::= "while" expression ":" suite\n'
+ ' while_stmt ::= "while" assignment_expression ":" suite\n'
' ["else" ":" suite]\n'
'\n'
'This repeatedly tests the expression and, if it is true, '
@@ -2294,10 +2319,11 @@ topics = {'assert': 'The "assert" statement\n'
':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, '
'2]".\n'
'\n'
- 'Note: There is a subtlety when the sequence is being modified by '
- 'the\n'
- ' loop (this can only occur for mutable sequences, e.g. lists). '
- 'An\n'
+ 'Note:\n'
+ '\n'
+ ' There is a subtlety when the sequence is being modified by the '
+ 'loop\n'
+ ' (this can only occur for mutable sequences, e.g. lists). An\n'
' internal counter is used to keep track of which item is used '
'next,\n'
' and this is incremented on each iteration. When this counter '
@@ -2520,8 +2546,8 @@ topics = {'assert': 'The "assert" statement\n'
'follows:\n'
'\n'
'1. The context expression (the expression given in the '
- '"with_item")\n'
- ' is evaluated to obtain a context manager.\n'
+ '"with_item") is\n'
+ ' evaluated to obtain a context manager.\n'
'\n'
'2. The context manager’s "__enter__()" is loaded for later use.\n'
'\n'
@@ -2529,13 +2555,15 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'4. The context manager’s "__enter__()" method is invoked.\n'
'\n'
- '5. If a target was included in the "with" statement, the return\n'
- ' value from "__enter__()" is assigned to it.\n'
+ '5. If a target was included in the "with" statement, the return '
+ 'value\n'
+ ' from "__enter__()" is assigned to it.\n'
'\n'
- ' Note: The "with" statement guarantees that if the '
- '"__enter__()"\n'
- ' method returns without an error, then "__exit__()" will '
- 'always be\n'
+ ' Note:\n'
+ '\n'
+ ' The "with" statement guarantees that if the "__enter__()" '
+ 'method\n'
+ ' returns without an error, then "__exit__()" will always be\n'
' called. Thus, if an error occurs during the assignment to '
'the\n'
' target list, it will be treated the same as an error '
@@ -2625,8 +2653,8 @@ topics = {'assert': 'The "assert" statement\n'
'[parameter_list] ")"\n'
' ["->" expression] ":" suite\n'
' decorators ::= decorator+\n'
- ' decorator ::= "@" dotted_name ["(" '
- '[argument_list [","]] ")"] NEWLINE\n'
+ ' decorator ::= "@" assignment_expression '
+ 'NEWLINE\n'
' dotted_name ::= identifier ("." identifier)*\n'
' parameter_list ::= defparameter ("," '
'defparameter)* "," "/" ["," [parameter_list_no_posonly]]\n'
@@ -2680,6 +2708,11 @@ topics = {'assert': 'The "assert" statement\n'
'the name\n'
'"func".\n'
'\n'
+ 'Changed in version 3.9: Functions may be decorated with any '
+ 'valid\n'
+ '"assignment_expression". Previously, the grammar was much more\n'
+ 'restrictive; see **PEP 614** for details.\n'
+ '\n'
'When one or more *parameters* have the form *parameter* "="\n'
'*expression*, the function is said to have “default parameter '
'values.”\n'
@@ -2881,6 +2914,10 @@ topics = {'assert': 'The "assert" statement\n'
'function decorators. The result is then bound to the class '
'name.\n'
'\n'
+ 'Changed in version 3.9: Classes may be decorated with any valid\n'
+ '"assignment_expression". Previously, the grammar was much more\n'
+ 'restrictive; see **PEP 614** for details.\n'
+ '\n'
'**Programmer’s note:** Variables defined in the class definition '
'are\n'
'class attributes; they are shared by instances. Instance '
@@ -3009,8 +3046,8 @@ topics = {'assert': 'The "assert" statement\n'
'is semantically equivalent to:\n'
'\n'
' manager = (EXPRESSION)\n'
- ' aexit = type(manager).__aexit__\n'
' aenter = type(manager).__aenter__\n'
+ ' aexit = type(manager).__aexit__\n'
' value = await aenter(manager)\n'
' hit_except = False\n'
'\n'
@@ -3040,14 +3077,17 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
- '[1] The exception is propagated to the invocation stack unless\n'
- ' there is a "finally" clause which happens to raise another\n'
- ' exception. That new exception causes the old one to be '
- 'lost.\n'
+ '[1] The exception is propagated to the invocation stack unless '
+ 'there\n'
+ ' is a "finally" clause which happens to raise another '
+ 'exception.\n'
+ ' That new exception causes the old one to be lost.\n'
'\n'
- '[2] A string literal appearing as the first statement in the\n'
- ' function body is transformed into the function’s "__doc__"\n'
- ' attribute and therefore the function’s *docstring*.\n'
+ '[2] A string literal appearing as the first statement in the '
+ 'function\n'
+ ' body is transformed into the function’s "__doc__" attribute '
+ 'and\n'
+ ' therefore the function’s *docstring*.\n'
'\n'
'[3] A string literal appearing as the first statement in the '
'class\n'
@@ -3136,7 +3176,7 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'When a description of an arithmetic operator below uses the '
'phrase\n'
- '“the numeric arguments are converted to a common type,” this '
+ '“the numeric arguments are converted to a common type”, this '
'means\n'
'that the operator implementation for built-in types works as '
'follows:\n'
@@ -3146,8 +3186,8 @@ topics = {'assert': 'The "assert" statement\n'
' complex;\n'
'\n'
'* otherwise, if either argument is a floating point number, '
- 'the\n'
- ' other is converted to floating point;\n'
+ 'the other\n'
+ ' is converted to floating point;\n'
'\n'
'* otherwise, both must be integers and no conversion is '
'necessary.\n'
@@ -3257,7 +3297,9 @@ topics = {'assert': 'The "assert" statement\n'
'for\n'
' objects that still exist when the interpreter exits.\n'
'\n'
- ' Note: "del x" doesn’t directly call "x.__del__()" — the '
+ ' Note:\n'
+ '\n'
+ ' "del x" doesn’t directly call "x.__del__()" — the '
'former\n'
' decrements the reference count for "x" by one, and the '
'latter is\n'
@@ -3281,13 +3323,15 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' See also: Documentation for the "gc" module.\n'
'\n'
- ' Warning: Due to the precarious circumstances under '
- 'which\n'
- ' "__del__()" methods are invoked, exceptions that occur '
- 'during\n'
- ' their execution are ignored, and a warning is printed '
- 'to\n'
- ' "sys.stderr" instead. In particular:\n'
+ ' Warning:\n'
+ '\n'
+ ' Due to the precarious circumstances under which '
+ '"__del__()"\n'
+ ' methods are invoked, exceptions that occur during '
+ 'their execution\n'
+ ' are ignored, and a warning is printed to "sys.stderr" '
+ 'instead.\n'
+ ' In particular:\n'
'\n'
' * "__del__()" can be invoked when arbitrary code is '
'being\n'
@@ -3300,22 +3344,20 @@ topics = {'assert': 'The "assert" statement\n'
' that gets interrupted to execute "__del__()".\n'
'\n'
' * "__del__()" can be executed during interpreter '
- 'shutdown. As\n'
- ' a consequence, the global variables it needs to '
- 'access\n'
- ' (including other modules) may already have been '
- 'deleted or set\n'
- ' to "None". Python guarantees that globals whose name '
- 'begins\n'
- ' with a single underscore are deleted from their '
- 'module before\n'
- ' other globals are deleted; if no other references to '
- 'such\n'
- ' globals exist, this may help in assuring that '
- 'imported modules\n'
- ' are still available at the time when the "__del__()" '
- 'method is\n'
- ' called.\n'
+ 'shutdown. As a\n'
+ ' consequence, the global variables it needs to access '
+ '(including\n'
+ ' other modules) may already have been deleted or set '
+ 'to "None".\n'
+ ' Python guarantees that globals whose name begins '
+ 'with a single\n'
+ ' underscore are deleted from their module before '
+ 'other globals\n'
+ ' are deleted; if no other references to such globals '
+ 'exist, this\n'
+ ' may help in assuring that imported modules are still '
+ 'available\n'
+ ' at the time when the "__del__()" method is called.\n'
'\n'
'object.__repr__(self)\n'
'\n'
@@ -3400,7 +3442,7 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' Changed in version 3.7: "object.__format__(x, \'\')" is '
'now\n'
- ' equivalent to "str(x)" rather than "format(str(self), '
+ ' equivalent to "str(x)" rather than "format(str(x), '
'\'\')".\n'
'\n'
'object.__lt__(self, other)\n'
@@ -3434,16 +3476,21 @@ topics = {'assert': 'The "assert" statement\n'
' on the value to determine if the result is true or '
'false.\n'
'\n'
- ' By default, "__ne__()" delegates to "__eq__()" and '
- 'inverts the\n'
- ' result unless it is "NotImplemented". There are no '
- 'other implied\n'
- ' relationships among the comparison operators, for '
- 'example, the\n'
- ' truth of "(x<y or x==y)" does not imply "x<=y". To '
- 'automatically\n'
- ' generate ordering operations from a single root '
- 'operation, see\n'
+ ' By default, "object" implements "__eq__()" by using '
+ '"is", returning\n'
+ ' "NotImplemented" in the case of a false comparison: '
+ '"True if x is y\n'
+ ' else NotImplemented". For "__ne__()", by default it '
+ 'delegates to\n'
+ ' "__eq__()" and inverts the result unless it is '
+ '"NotImplemented".\n'
+ ' There are no other implied relationships among the '
+ 'comparison\n'
+ ' operators or default implementations; for example, the '
+ 'truth of\n'
+ ' "(x<y or x==y)" does not imply "x<=y". To automatically '
+ 'generate\n'
+ ' ordering operations from a single root operation, see\n'
' "functools.total_ordering()".\n'
'\n'
' See the paragraph on "__hash__()" for some important '
@@ -3491,19 +3538,21 @@ topics = {'assert': 'The "assert" statement\n'
' def __hash__(self):\n'
' return hash((self.name, self.nick, self.color))\n'
'\n'
- ' Note: "hash()" truncates the value returned from an '
- 'object’s\n'
- ' custom "__hash__()" method to the size of a '
- '"Py_ssize_t". This\n'
- ' is typically 8 bytes on 64-bit builds and 4 bytes on '
- '32-bit\n'
- ' builds. If an object’s "__hash__()" must '
- 'interoperate on builds\n'
- ' of different bit sizes, be sure to check the width on '
- 'all\n'
- ' supported builds. An easy way to do this is with '
- '"python -c\n'
- ' "import sys; print(sys.hash_info.width)"".\n'
+ ' Note:\n'
+ '\n'
+ ' "hash()" truncates the value returned from an object’s '
+ 'custom\n'
+ ' "__hash__()" method to the size of a "Py_ssize_t". '
+ 'This is\n'
+ ' typically 8 bytes on 64-bit builds and 4 bytes on '
+ '32-bit builds.\n'
+ ' If an object’s "__hash__()" must interoperate on '
+ 'builds of\n'
+ ' different bit sizes, be sure to check the width on all '
+ 'supported\n'
+ ' builds. An easy way to do this is with "python -c '
+ '"import sys;\n'
+ ' print(sys.hash_info.width)"".\n'
'\n'
' If a class does not define an "__eq__()" method it '
'should not\n'
@@ -3561,22 +3610,24 @@ topics = {'assert': 'The "assert" statement\n'
' hashable by an "isinstance(obj, '
'collections.abc.Hashable)" call.\n'
'\n'
- ' Note: By default, the "__hash__()" values of str and '
- 'bytes\n'
- ' objects are “salted” with an unpredictable random '
- 'value.\n'
- ' Although they remain constant within an individual '
- 'Python\n'
- ' process, they are not predictable between repeated '
- 'invocations of\n'
- ' Python.This is intended to provide protection against '
- 'a denial-\n'
- ' of-service caused by carefully-chosen inputs that '
- 'exploit the\n'
- ' worst case performance of a dict insertion, O(n^2) '
- 'complexity.\n'
- ' See '
- 'http://www.ocert.org/advisories/ocert-2011-003.html for\n'
+ ' Note:\n'
+ '\n'
+ ' By default, the "__hash__()" values of str and bytes '
+ 'objects are\n'
+ ' “salted” with an unpredictable random value. Although '
+ 'they\n'
+ ' remain constant within an individual Python process, '
+ 'they are not\n'
+ ' predictable between repeated invocations of '
+ 'Python.This is\n'
+ ' intended to provide protection against a '
+ 'denial-of-service caused\n'
+ ' by carefully-chosen inputs that exploit the worst '
+ 'case\n'
+ ' performance of a dict insertion, O(n^2) complexity. '
+ 'See\n'
+ ' http://www.ocert.org/advisories/ocert-2011-003.html '
+ 'for\n'
' details.Changing hash values affects the iteration '
'order of sets.\n'
' Python has never made guarantees about this ordering '
@@ -3966,7 +4017,7 @@ topics = {'assert': 'The "assert" statement\n'
'is\n'
' first hit. The arguments are the same as for "break".\n'
'\n'
- 'cl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n'
+ 'cl(ear) [filename:lineno | bpnumber ...]\n'
'\n'
' With a *filename:lineno* argument, clear all the breakpoints '
'at\n'
@@ -3976,7 +4027,7 @@ topics = {'assert': 'The "assert" statement\n'
'first\n'
' ask confirmation).\n'
'\n'
- 'disable [bpnumber [bpnumber ...]]\n'
+ 'disable [bpnumber ...]\n'
'\n'
' Disable the breakpoints given as a space separated list of\n'
' breakpoint numbers. Disabling a breakpoint means it cannot '
@@ -3985,7 +4036,7 @@ topics = {'assert': 'The "assert" statement\n'
'breakpoint, it\n'
' remains in the list of breakpoints and can be (re-)enabled.\n'
'\n'
- 'enable [bpnumber [bpnumber ...]]\n'
+ 'enable [bpnumber ...]\n'
'\n'
' Enable the breakpoints specified.\n'
'\n'
@@ -4156,9 +4207,11 @@ topics = {'assert': 'The "assert" statement\n'
'its\n'
' value.\n'
'\n'
- ' Note: "print()" can also be used, but is not a debugger '
- 'command —\n'
- ' this executes the Python "print()" function.\n'
+ ' Note:\n'
+ '\n'
+ ' "print()" can also be used, but is not a debugger command — '
+ 'this\n'
+ ' executes the Python "print()" function.\n'
'\n'
'pp expression\n'
'\n'
@@ -4284,13 +4337,14 @@ topics = {'assert': 'The "assert" statement\n'
' the current environment).\n'
'\n'
'retval\n'
- 'Print the return value for the last return of a function.\n'
+ '\n'
+ ' Print the return value for the last return of a function.\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] Whether a frame is considered to originate in a certain '
- 'module\n'
- ' is determined by the "__name__" in the frame globals.\n',
+ 'module is\n'
+ ' determined by the "__name__" in the frame globals.\n',
'del': 'The "del" statement\n'
'*******************\n'
'\n'
@@ -4402,8 +4456,8 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'The "if" statement is used for conditional execution:\n'
'\n'
- ' if_stmt ::= "if" expression ":" suite\n'
- ' ("elif" expression ":" suite)*\n'
+ ' if_stmt ::= "if" assignment_expression ":" suite\n'
+ ' ("elif" assignment_expression ":" suite)*\n'
' ["else" ":" suite]\n'
'\n'
'It selects exactly one of the suites by evaluating the expressions '
@@ -4470,13 +4524,15 @@ topics = {'assert': 'The "assert" statement\n'
'about the\n'
'exceptional condition.\n'
'\n'
- 'Note: Exception messages are not part of the Python API. '
- 'Their\n'
- ' contents may change from one version of Python to the next '
- 'without\n'
- ' warning and should not be relied on by code which will run '
- 'under\n'
- ' multiple versions of the interpreter.\n'
+ 'Note:\n'
+ '\n'
+ ' Exception messages are not part of the Python API. Their '
+ 'contents\n'
+ ' may change from one version of Python to the next without '
+ 'warning\n'
+ ' and should not be relied on by code which will run under '
+ 'multiple\n'
+ ' versions of the interpreter.\n'
'\n'
'See also the description of the "try" statement in section The '
'try\n'
@@ -4486,10 +4542,9 @@ topics = {'assert': 'The "assert" statement\n'
'-[ Footnotes ]-\n'
'\n'
'[1] This limitation occurs because the code that is executed '
- 'by\n'
- ' these operations is not available at the time the module '
- 'is\n'
- ' compiled.\n',
+ 'by these\n'
+ ' operations is not available at the time the module is '
+ 'compiled.\n',
'execmodel': 'Execution model\n'
'***************\n'
'\n'
@@ -4511,9 +4566,13 @@ topics = {'assert': 'The "assert" statement\n'
'(a\n'
'command specified on the interpreter command line with the '
'"-c"\n'
- 'option) is a code block. The string argument passed to the '
- 'built-in\n'
- 'functions "eval()" and "exec()" is a code block.\n'
+ 'option) is a code block. A module run as a top level script (as '
+ 'module\n'
+ '"__main__") from the command line using a "-m" argument is also '
+ 'a code\n'
+ 'block. The string argument passed to the built-in functions '
+ '"eval()"\n'
+ 'and "exec()" is a code block.\n'
'\n'
'A code block is executed in an *execution frame*. A frame '
'contains\n'
@@ -4791,13 +4850,15 @@ topics = {'assert': 'The "assert" statement\n'
'about the\n'
'exceptional condition.\n'
'\n'
- 'Note: Exception messages are not part of the Python API. '
- 'Their\n'
- ' contents may change from one version of Python to the next '
- 'without\n'
- ' warning and should not be relied on by code which will run '
- 'under\n'
- ' multiple versions of the interpreter.\n'
+ 'Note:\n'
+ '\n'
+ ' Exception messages are not part of the Python API. Their '
+ 'contents\n'
+ ' may change from one version of Python to the next without '
+ 'warning\n'
+ ' and should not be relied on by code which will run under '
+ 'multiple\n'
+ ' versions of the interpreter.\n'
'\n'
'See also the description of the "try" statement in section The '
'try\n'
@@ -4806,11 +4867,10 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
- '[1] This limitation occurs because the code that is executed '
- 'by\n'
- ' these operations is not available at the time the module '
- 'is\n'
- ' compiled.\n',
+ '[1] This limitation occurs because the code that is executed by '
+ 'these\n'
+ ' operations is not available at the time the module is '
+ 'compiled.\n',
'exprlists': 'Expression lists\n'
'****************\n'
'\n'
@@ -4819,7 +4879,7 @@ topics = {'assert': 'The "assert" statement\n'
'[","]\n'
' starred_expression ::= expression | (starred_item ",")* '
'[starred_item]\n'
- ' starred_item ::= expression | "*" or_expr\n'
+ ' starred_item ::= assignment_expression | "*" or_expr\n'
'\n'
'Except when part of a list or set display, an expression list\n'
'containing at least one comma yields a tuple. The length of '
@@ -4929,8 +4989,11 @@ topics = {'assert': 'The "assert" statement\n'
'i\n'
':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n'
'\n'
- 'Note: There is a subtlety when the sequence is being modified by the\n'
- ' loop (this can only occur for mutable sequences, e.g. lists). An\n'
+ 'Note:\n'
+ '\n'
+ ' There is a subtlety when the sequence is being modified by the '
+ 'loop\n'
+ ' (this can only occur for mutable sequences, e.g. lists). An\n'
' internal counter is used to keep track of which item is used next,\n'
' and this is incremented on each iteration. When this counter has\n'
' reached the length of the sequence the loop terminates. This '
@@ -5129,11 +5192,11 @@ topics = {'assert': 'The "assert" statement\n'
'only\n'
'supported by the numeric types.\n'
'\n'
- 'A general convention is that an empty format string ("""") '
+ 'A general convention is that an empty format specification '
'produces\n'
'the same result as if you had called "str()" on the value. '
'A non-empty\n'
- 'format string typically modifies the result.\n'
+ 'format specification typically modifies the result.\n'
'\n'
'The general form of a *standard format specifier* is:\n'
'\n'
@@ -5244,24 +5307,23 @@ topics = {'assert': 'The "assert" statement\n'
'for the\n'
'conversion. The alternate form is defined differently for '
'different\n'
- 'types. This option is only valid for integer, float, '
- 'complex and\n'
- 'Decimal types. For integers, when binary, octal, or '
- 'hexadecimal output\n'
- 'is used, this option adds the prefix respective "\'0b\'", '
- '"\'0o\'", or\n'
- '"\'0x\'" to the output value. For floats, complex and '
- 'Decimal the\n'
- 'alternate form causes the result of the conversion to '
- 'always contain a\n'
- 'decimal-point character, even if no digits follow it. '
- 'Normally, a\n'
- 'decimal-point character appears in the result of these '
- 'conversions\n'
- 'only if a digit follows it. In addition, for "\'g\'" and '
- '"\'G\'"\n'
- 'conversions, trailing zeros are not removed from the '
- 'result.\n'
+ 'types. This option is only valid for integer, float and '
+ 'complex\n'
+ 'types. For integers, when binary, octal, or hexadecimal '
+ 'output is\n'
+ 'used, this option adds the prefix respective "\'0b\'", '
+ '"\'0o\'", or "\'0x\'"\n'
+ 'to the output value. For float and complex the alternate '
+ 'form causes\n'
+ 'the result of the conversion to always contain a '
+ 'decimal-point\n'
+ 'character, even if no digits follow it. Normally, a '
+ 'decimal-point\n'
+ 'character appears in the result of these conversions only '
+ 'if a digit\n'
+ 'follows it. In addition, for "\'g\'" and "\'G\'" '
+ 'conversions, trailing\n'
+ 'zeros are not removed from the result.\n'
'\n'
'The "\',\'" option signals the use of a comma for a '
'thousands separator.\n'
@@ -5399,9 +5461,8 @@ topics = {'assert': 'The "assert" statement\n'
'the integer\n'
'to a floating point number before formatting.\n'
'\n'
- 'The available presentation types for floating point and '
- 'decimal values\n'
- 'are:\n'
+ 'The available presentation types for "float" and "Decimal" '
+ 'values are:\n'
'\n'
' '
'+-----------+------------------------------------------------------------+\n'
@@ -5410,24 +5471,50 @@ topics = {'assert': 'The "assert" statement\n'
'|\n'
' '
'|===========|============================================================|\n'
- ' | "\'e\'" | Exponent notation. Prints the number in '
- 'scientific |\n'
- ' | | notation using the letter ‘e’ to indicate '
- 'the exponent. |\n'
- ' | | The default precision is '
- '"6". |\n'
+ ' | "\'e\'" | Scientific notation. For a given '
+ 'precision "p", formats |\n'
+ ' | | the number in scientific notation with the '
+ 'letter ‘e’ |\n'
+ ' | | separating the coefficient from the '
+ 'exponent. The |\n'
+ ' | | coefficient has one digit before and "p" '
+ 'digits after the |\n'
+ ' | | decimal point, for a total of "p + 1" '
+ 'significant digits. |\n'
+ ' | | With no precision given, uses a precision '
+ 'of "6" digits |\n'
+ ' | | after the decimal point for "float", and '
+ 'shows all |\n'
+ ' | | coefficient digits for "Decimal". If no '
+ 'digits follow the |\n'
+ ' | | decimal point, the decimal point is also '
+ 'removed unless |\n'
+ ' | | the "#" option is '
+ 'used. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
- ' | "\'E\'" | Exponent notation. Same as "\'e\'" '
- 'except it uses an upper |\n'
+ ' | "\'E\'" | Scientific notation. Same as "\'e\'" '
+ 'except it uses an upper |\n'
' | | case ‘E’ as the separator '
'character. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
- ' | "\'f\'" | Fixed-point notation. Displays the '
- 'number as a fixed-point |\n'
- ' | | number. The default precision is '
- '"6". |\n'
+ ' | "\'f\'" | Fixed-point notation. For a given '
+ 'precision "p", formats |\n'
+ ' | | the number as a decimal number with '
+ 'exactly "p" digits |\n'
+ ' | | following the decimal point. With no '
+ 'precision given, uses |\n'
+ ' | | a precision of "6" digits after the '
+ 'decimal point for |\n'
+ ' | | "float", and uses a precision large enough '
+ 'to show all |\n'
+ ' | | coefficient digits for "Decimal". If no '
+ 'digits follow the |\n'
+ ' | | decimal point, the decimal point is also '
+ 'removed unless |\n'
+ ' | | the "#" option is '
+ 'used. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'F\'" | Fixed-point notation. Same as "\'f\'", '
@@ -5473,9 +5560,14 @@ topics = {'assert': 'The "assert" statement\n'
' | | regardless of the precision. A precision '
'of "0" is |\n'
' | | treated as equivalent to a precision of '
- '"1". The default |\n'
- ' | | precision is '
- '"6". |\n'
+ '"1". With no |\n'
+ ' | | precision given, uses a precision of "6" '
+ 'significant |\n'
+ ' | | digits for "float", and shows all '
+ 'coefficient digits for |\n'
+ ' | | '
+ '"Decimal". '
+ '|\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'G\'" | General format. Same as "\'g\'" except '
@@ -5688,8 +5780,8 @@ topics = {'assert': 'The "assert" statement\n'
'[parameter_list] ")"\n'
' ["->" expression] ":" suite\n'
' decorators ::= decorator+\n'
- ' decorator ::= "@" dotted_name ["(" '
- '[argument_list [","]] ")"] NEWLINE\n'
+ ' decorator ::= "@" assignment_expression '
+ 'NEWLINE\n'
' dotted_name ::= identifier ("." identifier)*\n'
' parameter_list ::= defparameter ("," '
'defparameter)* "," "/" ["," [parameter_list_no_posonly]]\n'
@@ -5743,6 +5835,11 @@ topics = {'assert': 'The "assert" statement\n'
'the name\n'
'"func".\n'
'\n'
+ 'Changed in version 3.9: Functions may be decorated with any '
+ 'valid\n'
+ '"assignment_expression". Previously, the grammar was much more\n'
+ 'restrictive; see **PEP 614** for details.\n'
+ '\n'
'When one or more *parameters* have the form *parameter* "="\n'
'*expression*, the function is said to have “default parameter '
'values.”\n'
@@ -5933,25 +6030,26 @@ topics = {'assert': 'The "assert" statement\n'
'defined.\n'
' See section The import statement.\n'
'\n'
- ' Note: The name "_" is often used in conjunction with\n'
+ ' Note:\n'
+ '\n'
+ ' The name "_" is often used in conjunction with\n'
' internationalization; refer to the documentation for the\n'
' "gettext" module for more information on this '
'convention.\n'
'\n'
'"__*__"\n'
- ' System-defined names. These names are defined by the '
- 'interpreter\n'
- ' and its implementation (including the standard library). '
- 'Current\n'
- ' system names are discussed in the Special method names '
- 'section and\n'
- ' elsewhere. More will likely be defined in future versions '
- 'of\n'
- ' Python. *Any* use of "__*__" names, in any context, that '
- 'does not\n'
- ' follow explicitly documented use, is subject to breakage '
- 'without\n'
- ' warning.\n'
+ ' System-defined names, informally known as “dunder” names. '
+ 'These\n'
+ ' names are defined by the interpreter and its '
+ 'implementation\n'
+ ' (including the standard library). Current system names are\n'
+ ' discussed in the Special method names section and '
+ 'elsewhere. More\n'
+ ' will likely be defined in future versions of Python. *Any* '
+ 'use of\n'
+ ' "__*__" names, in any context, that does not follow '
+ 'explicitly\n'
+ ' documented use, is subject to breakage without warning.\n'
'\n'
'"__*"\n'
' Class-private names. Names in this category, when used '
@@ -6038,8 +6136,8 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'A non-normative HTML file listing all valid identifier '
'characters for\n'
- 'Unicode 4.1 can be found at https://www.dcl.hpi.uni-\n'
- 'potsdam.de/home/loewis/table-3131.html.\n'
+ 'Unicode 4.1 can be found at\n'
+ 'https://www.unicode.org/Public/13.0.0/ucd/DerivedCoreProperties.txt\n'
'\n'
'\n'
'Keywords\n'
@@ -6080,26 +6178,28 @@ topics = {'assert': 'The "assert" statement\n'
'defined.\n'
' See section The import statement.\n'
'\n'
- ' Note: The name "_" is often used in conjunction with\n'
+ ' Note:\n'
+ '\n'
+ ' The name "_" is often used in conjunction with\n'
' internationalization; refer to the documentation for '
'the\n'
' "gettext" module for more information on this '
'convention.\n'
'\n'
'"__*__"\n'
- ' System-defined names. These names are defined by the '
- 'interpreter\n'
- ' and its implementation (including the standard library). '
- 'Current\n'
- ' system names are discussed in the Special method names '
- 'section and\n'
- ' elsewhere. More will likely be defined in future versions '
- 'of\n'
- ' Python. *Any* use of "__*__" names, in any context, that '
- 'does not\n'
- ' follow explicitly documented use, is subject to breakage '
- 'without\n'
- ' warning.\n'
+ ' System-defined names, informally known as “dunder” names. '
+ 'These\n'
+ ' names are defined by the interpreter and its '
+ 'implementation\n'
+ ' (including the standard library). Current system names '
+ 'are\n'
+ ' discussed in the Special method names section and '
+ 'elsewhere. More\n'
+ ' will likely be defined in future versions of Python. '
+ '*Any* use of\n'
+ ' "__*__" names, in any context, that does not follow '
+ 'explicitly\n'
+ ' documented use, is subject to breakage without warning.\n'
'\n'
'"__*"\n'
' Class-private names. Names in this category, when used '
@@ -6114,8 +6214,8 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'The "if" statement is used for conditional execution:\n'
'\n'
- ' if_stmt ::= "if" expression ":" suite\n'
- ' ("elif" expression ":" suite)*\n'
+ ' if_stmt ::= "if" assignment_expression ":" suite\n'
+ ' ("elif" assignment_expression ":" suite)*\n'
' ["else" ":" suite]\n'
'\n'
'It selects exactly one of the suites by evaluating the expressions '
@@ -6165,8 +6265,9 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'1. find a module, loading and initializing it if necessary\n'
'\n'
- '2. define a name or names in the local namespace for the scope\n'
- ' where the "import" statement occurs.\n'
+ '2. define a name or names in the local namespace for the scope '
+ 'where\n'
+ ' the "import" statement occurs.\n'
'\n'
'When the statement contains multiple clauses (separated by commas) '
'the\n'
@@ -6192,8 +6293,9 @@ topics = {'assert': 'The "assert" statement\n'
'made\n'
'available in the local namespace in one of three ways:\n'
'\n'
- '* If the module name is followed by "as", then the name following\n'
- ' "as" is bound directly to the imported module.\n'
+ '* If the module name is followed by "as", then the name following '
+ '"as"\n'
+ ' is bound directly to the imported module.\n'
'\n'
'* If no other name is specified, and the module being imported is '
'a\n'
@@ -6342,8 +6444,8 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'* other future statements.\n'
'\n'
- 'The only feature in Python 3.7 that requires using the future\n'
- 'statement is "annotations".\n'
+ 'The only feature that requires using the future statement is\n'
+ '"annotations" (see **PEP 563**).\n'
'\n'
'All historical features enabled by the future statement are still\n'
'recognized by Python 3. The list includes "absolute_import",\n'
@@ -6871,15 +6973,18 @@ topics = {'assert': 'The "assert" statement\n'
'"__rpow__()" (the\n'
' coercion rules would become too complicated).\n'
'\n'
- ' Note: If the right operand’s type is a subclass of the '
- 'left\n'
- ' operand’s type and that subclass provides the '
- 'reflected method\n'
- ' for the operation, this method will be called before '
- 'the left\n'
- ' operand’s non-reflected method. This behavior allows '
- 'subclasses\n'
- ' to override their ancestors’ operations.\n'
+ ' Note:\n'
+ '\n'
+ ' If the right operand’s type is a subclass of the left '
+ 'operand’s\n'
+ ' type and that subclass provides a different '
+ 'implementation of the\n'
+ ' reflected method for the operation, this method will '
+ 'be called\n'
+ ' before the left operand’s non-reflected method. This '
+ 'behavior\n'
+ ' allows subclasses to override their ancestors’ '
+ 'operations.\n'
'\n'
'object.__iadd__(self, other)\n'
'object.__isub__(self, other)\n'
@@ -6923,6 +7028,16 @@ topics = {'assert': 'The "assert" statement\n'
'the data\n'
' model.\n'
'\n'
+ ' Note:\n'
+ '\n'
+ ' Due to a bug in the dispatching mechanism for "**=", a '
+ 'class that\n'
+ ' defines "__ipow__()" but returns "NotImplemented" '
+ 'would fail to\n'
+ ' fall back to "x.__pow__(y)" and "y.__rpow__(x)". This '
+ 'bug is\n'
+ ' fixed in Python 3.10.\n'
+ '\n'
'object.__neg__(self)\n'
'object.__pos__(self)\n'
'object.__abs__(self)\n'
@@ -6984,7 +7099,7 @@ topics = {'assert': 'The "assert" statement\n'
'program is represented by objects or by relations between '
'objects. (In\n'
'a sense, and in conformance to Von Neumann’s model of a “stored\n'
- 'program computer,” code is also represented by objects.)\n'
+ 'program computer”, code is also represented by objects.)\n'
'\n'
'Every object has an identity, a type and a value. An object’s\n'
'*identity* never changes once it has been created; you may think '
@@ -7199,8 +7314,8 @@ topics = {'assert': 'The "assert" statement\n'
'-[ Footnotes ]-\n'
'\n'
'[1] While "abs(x%y) < abs(y)" is true mathematically, '
- 'for floats\n'
- ' it may not be true numerically due to roundoff. For '
+ 'for floats it\n'
+ ' may not be true numerically due to roundoff. For '
'example, and\n'
' assuming a platform on which a Python float is an '
'IEEE 754 double-\n'
@@ -7265,22 +7380,22 @@ topics = {'assert': 'The "assert" statement\n'
'"unicodedata.normalize()".\n'
'\n'
'[4] Due to automatic garbage-collection, free lists, and '
- 'the\n'
- ' dynamic nature of descriptors, you may notice '
- 'seemingly unusual\n'
- ' behaviour in certain uses of the "is" operator, like '
- 'those\n'
- ' involving comparisons between instance methods, or '
- 'constants.\n'
- ' Check their documentation for more info.\n'
+ 'the dynamic\n'
+ ' nature of descriptors, you may notice seemingly '
+ 'unusual behaviour\n'
+ ' in certain uses of the "is" operator, like those '
+ 'involving\n'
+ ' comparisons between instance methods, or constants. '
+ 'Check their\n'
+ ' documentation for more info.\n'
'\n'
'[5] The "%" operator is also used for string formatting; '
'the same\n'
' precedence applies.\n'
'\n'
'[6] The power operator "**" binds less tightly than an '
- 'arithmetic\n'
- ' or bitwise unary operator on its right, that is, '
+ 'arithmetic or\n'
+ ' bitwise unary operator on its right, that is, '
'"2**-1" is "0.5".\n',
'pass': 'The "pass" statement\n'
'********************\n'
@@ -7570,9 +7685,11 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' New in version 3.4.\n'
'\n'
- 'Note: Slicing is done exclusively with the following three '
- 'methods.\n'
- ' A call like\n'
+ 'Note:\n'
+ '\n'
+ ' Slicing is done exclusively with the following three '
+ 'methods. A\n'
+ ' call like\n'
'\n'
' a[1:2] = b\n'
'\n'
@@ -7603,7 +7720,9 @@ topics = {'assert': 'The "assert" statement\n'
'the\n'
' container), "KeyError" should be raised.\n'
'\n'
- ' Note: "for" loops expect that an "IndexError" will be '
+ ' Note:\n'
+ '\n'
+ ' "for" loops expect that an "IndexError" will be '
'raised for\n'
' illegal indexes to allow proper detection of the end '
'of the\n'
@@ -7839,26 +7958,26 @@ topics = {'assert': 'The "assert" statement\n'
'-[ Footnotes ]-\n'
'\n'
'[1] Additional information on these special methods may be '
- 'found\n'
- ' in the Python Reference Manual (Basic customization).\n'
+ 'found in\n'
+ ' the Python Reference Manual (Basic customization).\n'
'\n'
'[2] As a consequence, the list "[1, 2]" is considered equal '
- 'to\n'
- ' "[1.0, 2.0]", and similarly for tuples.\n'
+ 'to "[1.0,\n'
+ ' 2.0]", and similarly for tuples.\n'
'\n'
'[3] They must have since the parser can’t tell the type of '
'the\n'
' operands.\n'
'\n'
'[4] Cased characters are those with general category '
- 'property\n'
- ' being one of “Lu” (Letter, uppercase), “Ll” (Letter, '
- 'lowercase),\n'
- ' or “Lt” (Letter, titlecase).\n'
- '\n'
- '[5] To format only a tuple you should therefore provide a\n'
- ' singleton tuple whose only element is the tuple to be '
- 'formatted.\n',
+ 'property being\n'
+ ' one of “Lu” (Letter, uppercase), “Ll” (Letter, '
+ 'lowercase), or “Lt”\n'
+ ' (Letter, titlecase).\n'
+ '\n'
+ '[5] To format only a tuple you should therefore provide a '
+ 'singleton\n'
+ ' tuple whose only element is the tuple to be formatted.\n',
'specialnames': 'Special method names\n'
'********************\n'
'\n'
@@ -8003,7 +8122,9 @@ topics = {'assert': 'The "assert" statement\n'
'for\n'
' objects that still exist when the interpreter exits.\n'
'\n'
- ' Note: "del x" doesn’t directly call "x.__del__()" — the '
+ ' Note:\n'
+ '\n'
+ ' "del x" doesn’t directly call "x.__del__()" — the '
'former\n'
' decrements the reference count for "x" by one, and the '
'latter is\n'
@@ -8027,12 +8148,15 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' See also: Documentation for the "gc" module.\n'
'\n'
- ' Warning: Due to the precarious circumstances under which\n'
- ' "__del__()" methods are invoked, exceptions that occur '
- 'during\n'
- ' their execution are ignored, and a warning is printed '
- 'to\n'
- ' "sys.stderr" instead. In particular:\n'
+ ' Warning:\n'
+ '\n'
+ ' Due to the precarious circumstances under which '
+ '"__del__()"\n'
+ ' methods are invoked, exceptions that occur during their '
+ 'execution\n'
+ ' are ignored, and a warning is printed to "sys.stderr" '
+ 'instead.\n'
+ ' In particular:\n'
'\n'
' * "__del__()" can be invoked when arbitrary code is '
'being\n'
@@ -8045,22 +8169,20 @@ topics = {'assert': 'The "assert" statement\n'
' that gets interrupted to execute "__del__()".\n'
'\n'
' * "__del__()" can be executed during interpreter '
- 'shutdown. As\n'
- ' a consequence, the global variables it needs to '
- 'access\n'
- ' (including other modules) may already have been '
- 'deleted or set\n'
- ' to "None". Python guarantees that globals whose name '
- 'begins\n'
- ' with a single underscore are deleted from their '
- 'module before\n'
- ' other globals are deleted; if no other references to '
- 'such\n'
- ' globals exist, this may help in assuring that '
- 'imported modules\n'
- ' are still available at the time when the "__del__()" '
- 'method is\n'
- ' called.\n'
+ 'shutdown. As a\n'
+ ' consequence, the global variables it needs to access '
+ '(including\n'
+ ' other modules) may already have been deleted or set '
+ 'to "None".\n'
+ ' Python guarantees that globals whose name begins with '
+ 'a single\n'
+ ' underscore are deleted from their module before other '
+ 'globals\n'
+ ' are deleted; if no other references to such globals '
+ 'exist, this\n'
+ ' may help in assuring that imported modules are still '
+ 'available\n'
+ ' at the time when the "__del__()" method is called.\n'
'\n'
'object.__repr__(self)\n'
'\n'
@@ -8145,7 +8267,7 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' Changed in version 3.7: "object.__format__(x, \'\')" is '
'now\n'
- ' equivalent to "str(x)" rather than "format(str(self), '
+ ' equivalent to "str(x)" rather than "format(str(x), '
'\'\')".\n'
'\n'
'object.__lt__(self, other)\n'
@@ -8179,16 +8301,21 @@ topics = {'assert': 'The "assert" statement\n'
' on the value to determine if the result is true or '
'false.\n'
'\n'
- ' By default, "__ne__()" delegates to "__eq__()" and '
- 'inverts the\n'
- ' result unless it is "NotImplemented". There are no other '
- 'implied\n'
- ' relationships among the comparison operators, for '
- 'example, the\n'
- ' truth of "(x<y or x==y)" does not imply "x<=y". To '
- 'automatically\n'
- ' generate ordering operations from a single root '
- 'operation, see\n'
+ ' By default, "object" implements "__eq__()" by using "is", '
+ 'returning\n'
+ ' "NotImplemented" in the case of a false comparison: "True '
+ 'if x is y\n'
+ ' else NotImplemented". For "__ne__()", by default it '
+ 'delegates to\n'
+ ' "__eq__()" and inverts the result unless it is '
+ '"NotImplemented".\n'
+ ' There are no other implied relationships among the '
+ 'comparison\n'
+ ' operators or default implementations; for example, the '
+ 'truth of\n'
+ ' "(x<y or x==y)" does not imply "x<=y". To automatically '
+ 'generate\n'
+ ' ordering operations from a single root operation, see\n'
' "functools.total_ordering()".\n'
'\n'
' See the paragraph on "__hash__()" for some important '
@@ -8236,19 +8363,21 @@ topics = {'assert': 'The "assert" statement\n'
' def __hash__(self):\n'
' return hash((self.name, self.nick, self.color))\n'
'\n'
- ' Note: "hash()" truncates the value returned from an '
- 'object’s\n'
- ' custom "__hash__()" method to the size of a '
- '"Py_ssize_t". This\n'
- ' is typically 8 bytes on 64-bit builds and 4 bytes on '
- '32-bit\n'
- ' builds. If an object’s "__hash__()" must interoperate '
- 'on builds\n'
- ' of different bit sizes, be sure to check the width on '
- 'all\n'
- ' supported builds. An easy way to do this is with '
- '"python -c\n'
- ' "import sys; print(sys.hash_info.width)"".\n'
+ ' Note:\n'
+ '\n'
+ ' "hash()" truncates the value returned from an object’s '
+ 'custom\n'
+ ' "__hash__()" method to the size of a "Py_ssize_t". '
+ 'This is\n'
+ ' typically 8 bytes on 64-bit builds and 4 bytes on '
+ '32-bit builds.\n'
+ ' If an object’s "__hash__()" must interoperate on '
+ 'builds of\n'
+ ' different bit sizes, be sure to check the width on all '
+ 'supported\n'
+ ' builds. An easy way to do this is with "python -c '
+ '"import sys;\n'
+ ' print(sys.hash_info.width)"".\n'
'\n'
' If a class does not define an "__eq__()" method it should '
'not\n'
@@ -8304,21 +8433,22 @@ topics = {'assert': 'The "assert" statement\n'
' hashable by an "isinstance(obj, '
'collections.abc.Hashable)" call.\n'
'\n'
- ' Note: By default, the "__hash__()" values of str and '
- 'bytes\n'
- ' objects are “salted” with an unpredictable random '
- 'value.\n'
- ' Although they remain constant within an individual '
- 'Python\n'
- ' process, they are not predictable between repeated '
- 'invocations of\n'
- ' Python.This is intended to provide protection against a '
- 'denial-\n'
- ' of-service caused by carefully-chosen inputs that '
- 'exploit the\n'
- ' worst case performance of a dict insertion, O(n^2) '
- 'complexity.\n'
- ' See http://www.ocert.org/advisories/ocert-2011-003.html '
+ ' Note:\n'
+ '\n'
+ ' By default, the "__hash__()" values of str and bytes '
+ 'objects are\n'
+ ' “salted” with an unpredictable random value. Although '
+ 'they\n'
+ ' remain constant within an individual Python process, '
+ 'they are not\n'
+ ' predictable between repeated invocations of Python.This '
+ 'is\n'
+ ' intended to provide protection against a '
+ 'denial-of-service caused\n'
+ ' by carefully-chosen inputs that exploit the worst case\n'
+ ' performance of a dict insertion, O(n^2) complexity. '
+ 'See\n'
+ ' http://www.ocert.org/advisories/ocert-2011-003.html '
'for\n'
' details.Changing hash values affects the iteration '
'order of sets.\n'
@@ -8407,11 +8537,17 @@ topics = {'assert': 'The "assert" statement\n'
'needs, for\n'
' example, "object.__getattribute__(self, name)".\n'
'\n'
- ' Note: This method may still be bypassed when looking up '
- 'special\n'
- ' methods as the result of implicit invocation via '
- 'language syntax\n'
- ' or built-in functions. See Special method lookup.\n'
+ ' Note:\n'
+ '\n'
+ ' This method may still be bypassed when looking up '
+ 'special methods\n'
+ ' as the result of implicit invocation via language '
+ 'syntax or\n'
+ ' built-in functions. See Special method lookup.\n'
+ '\n'
+ ' For certain sensitive attribute accesses, raises an '
+ 'auditing event\n'
+ ' "object.__getattr__" with arguments "obj" and "name".\n'
'\n'
'object.__setattr__(self, name, value)\n'
'\n'
@@ -8429,6 +8565,11 @@ topics = {'assert': 'The "assert" statement\n'
'example,\n'
' "object.__setattr__(self, name, value)".\n'
'\n'
+ ' For certain sensitive attribute assignments, raises an '
+ 'auditing\n'
+ ' event "object.__setattr__" with arguments "obj", "name", '
+ '"value".\n'
+ '\n'
'object.__delattr__(self, name)\n'
'\n'
' Like "__setattr__()" but for attribute deletion instead '
@@ -8437,6 +8578,10 @@ topics = {'assert': 'The "assert" statement\n'
'obj.name" is\n'
' meaningful for the object.\n'
'\n'
+ ' For certain sensitive attribute deletions, raises an '
+ 'auditing event\n'
+ ' "object.__delattr__" with arguments "obj" and "name".\n'
+ '\n'
'object.__dir__(self)\n'
'\n'
' Called when "dir()" is called on the object. A sequence '
@@ -8495,15 +8640,16 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' sys.modules[__name__].__class__ = VerboseModule\n'
'\n'
- 'Note: Defining module "__getattr__" and setting module '
- '"__class__"\n'
- ' only affect lookups made using the attribute access syntax '
- '–\n'
- ' directly accessing the module globals (whether by code '
- 'within the\n'
- ' module, or via a reference to the module’s globals '
- 'dictionary) is\n'
- ' unaffected.\n'
+ 'Note:\n'
+ '\n'
+ ' Defining module "__getattr__" and setting module '
+ '"__class__" only\n'
+ ' affect lookups made using the attribute access syntax – '
+ 'directly\n'
+ ' accessing the module globals (whether by code within the '
+ 'module, or\n'
+ ' via a reference to the module’s globals dictionary) is '
+ 'unaffected.\n'
'\n'
'Changed in version 3.5: "__class__" module attribute is now '
'writable.\n'
@@ -8586,12 +8732,14 @@ topics = {'assert': 'The "assert" statement\n'
'The\n'
' descriptor has been assigned to *name*.\n'
'\n'
- ' Note: "__set_name__()" is only called implicitly as part '
- 'of the\n'
- ' "type" constructor, so it will need to be called '
- 'explicitly with\n'
- ' the appropriate parameters when a descriptor is added '
- 'to a class\n'
+ ' Note:\n'
+ '\n'
+ ' "__set_name__()" is only called implicitly as part of '
+ 'the "type"\n'
+ ' constructor, so it will need to be called explicitly '
+ 'with the\n'
+ ' appropriate parameters when a descriptor is added to a '
+ 'class\n'
' after initial creation:\n'
'\n'
' class A:\n'
@@ -8696,12 +8844,13 @@ topics = {'assert': 'The "assert" statement\n'
'both\n'
'"__get__()" and "__set__()", while non-data descriptors have '
'just the\n'
- '"__get__()" method. Data descriptors with "__set__()" and '
- '"__get__()"\n'
- 'defined always override a redefinition in an instance '
- 'dictionary. In\n'
- 'contrast, non-data descriptors can be overridden by '
- 'instances.\n'
+ '"__get__()" method. Data descriptors with "__get__()" and '
+ '"__set__()"\n'
+ '(and/or "__delete__()") defined always override a '
+ 'redefinition in an\n'
+ 'instance dictionary. In contrast, non-data descriptors can '
+ 'be\n'
+ 'overridden by instances.\n'
'\n'
'Python methods (including "staticmethod()" and '
'"classmethod()") are\n'
@@ -8749,10 +8898,9 @@ topics = {'assert': 'The "assert" statement\n'
'~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
'\n'
'* When inheriting from a class without *__slots__*, the '
- '*__dict__*\n'
- ' and *__weakref__* attribute of the instances will always '
- 'be\n'
- ' accessible.\n'
+ '*__dict__* and\n'
+ ' *__weakref__* attribute of the instances will always be '
+ 'accessible.\n'
'\n'
'* Without a *__dict__* variable, instances cannot be '
'assigned new\n'
@@ -8766,13 +8914,12 @@ topics = {'assert': 'The "assert" statement\n'
' declaration.\n'
'\n'
'* Without a *__weakref__* variable for each instance, '
- 'classes\n'
- ' defining *__slots__* do not support weak references to '
- 'its\n'
- ' instances. If weak reference support is needed, then add\n'
- ' "\'__weakref__\'" to the sequence of strings in the '
- '*__slots__*\n'
- ' declaration.\n'
+ 'classes defining\n'
+ ' *__slots__* do not support weak references to its '
+ 'instances. If weak\n'
+ ' reference support is needed, then add "\'__weakref__\'" to '
+ 'the\n'
+ ' sequence of strings in the *__slots__* declaration.\n'
'\n'
'* *__slots__* are implemented at the class level by '
'creating\n'
@@ -8785,23 +8932,22 @@ topics = {'assert': 'The "assert" statement\n'
' attribute would overwrite the descriptor assignment.\n'
'\n'
'* The action of a *__slots__* declaration is not limited to '
- 'the\n'
- ' class where it is defined. *__slots__* declared in '
- 'parents are\n'
- ' available in child classes. However, child subclasses will '
- 'get a\n'
- ' *__dict__* and *__weakref__* unless they also define '
- '*__slots__*\n'
- ' (which should only contain names of any *additional* '
- 'slots).\n'
+ 'the class\n'
+ ' where it is defined. *__slots__* declared in parents are '
+ 'available\n'
+ ' in child classes. However, child subclasses will get a '
+ '*__dict__*\n'
+ ' and *__weakref__* unless they also define *__slots__* '
+ '(which should\n'
+ ' only contain names of any *additional* slots).\n'
'\n'
'* If a class defines a slot also defined in a base class, '
- 'the\n'
- ' instance variable defined by the base class slot is '
- 'inaccessible\n'
- ' (except by retrieving its descriptor directly from the '
- 'base class).\n'
- ' This renders the meaning of the program undefined. In the '
+ 'the instance\n'
+ ' variable defined by the base class slot is inaccessible '
+ '(except by\n'
+ ' retrieving its descriptor directly from the base class). '
+ 'This\n'
+ ' renders the meaning of the program undefined. In the '
'future, a\n'
' check may be added to prevent this.\n'
'\n'
@@ -8811,9 +8957,9 @@ topics = {'assert': 'The "assert" statement\n'
'and "tuple".\n'
'\n'
'* Any non-string iterable may be assigned to *__slots__*. '
- 'Mappings\n'
- ' may also be used; however, in the future, special meaning '
- 'may be\n'
+ 'Mappings may\n'
+ ' also be used; however, in the future, special meaning may '
+ 'be\n'
' assigned to the values corresponding to each key.\n'
'\n'
'* *__class__* assignment works only if both classes have the '
@@ -8829,8 +8975,8 @@ topics = {'assert': 'The "assert" statement\n'
' raise "TypeError".\n'
'\n'
'* If an iterator is used for *__slots__* then a descriptor '
- 'is\n'
- ' created for each of the iterator’s values. However, the '
+ 'is created\n'
+ ' for each of the iterator’s values. However, the '
'*__slots__*\n'
' attribute will be an empty iterator.\n'
'\n'
@@ -8883,9 +9029,11 @@ topics = {'assert': 'The "assert" statement\n'
'does nothing,\n'
' but raises an error if it is called with any arguments.\n'
'\n'
- ' Note: The metaclass hint "metaclass" is consumed by the '
- 'rest of\n'
- ' the type machinery, and is never passed to '
+ ' Note:\n'
+ '\n'
+ ' The metaclass hint "metaclass" is consumed by the rest '
+ 'of the\n'
+ ' type machinery, and is never passed to '
'"__init_subclass__"\n'
' implementations. The actual metaclass (rather than the '
'explicit\n'
@@ -8953,9 +9101,10 @@ topics = {'assert': 'The "assert" statement\n'
'tuple may\n'
'be empty, in such case the original base is ignored.\n'
'\n'
- 'See also: **PEP 560** - Core support for typing module and '
- 'generic\n'
- ' types\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 560** - Core support for typing module and generic '
+ 'types\n'
'\n'
'\n'
'Determining the appropriate metaclass\n'
@@ -9012,7 +9161,7 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'If the metaclass has no "__prepare__" attribute, then the '
'class\n'
- 'namespace is initialised as an empty "dict()".\n'
+ 'namespace is initialised as an empty ordered mapping.\n'
'\n'
'See also:\n'
'\n'
@@ -9213,9 +9362,10 @@ topics = {'assert': 'The "assert" statement\n'
'type hints,\n'
'other usage is discouraged.\n'
'\n'
- 'See also: **PEP 560** - Core support for typing module and '
- 'generic\n'
- ' types\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 560** - Core support for typing module and generic '
+ 'types\n'
'\n'
'\n'
'Emulating callable objects\n'
@@ -9225,8 +9375,8 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' Called when the instance is “called” as a function; if '
'this method\n'
- ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n'
- ' "x.__call__(arg1, arg2, ...)".\n'
+ ' is defined, "x(arg1, arg2, ...)" roughly translates to\n'
+ ' "type(x).__call__(x, arg1, ...)".\n'
'\n'
'\n'
'Emulating container types\n'
@@ -9327,9 +9477,11 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' New in version 3.4.\n'
'\n'
- 'Note: Slicing is done exclusively with the following three '
- 'methods.\n'
- ' A call like\n'
+ 'Note:\n'
+ '\n'
+ ' Slicing is done exclusively with the following three '
+ 'methods. A\n'
+ ' call like\n'
'\n'
' a[1:2] = b\n'
'\n'
@@ -9360,8 +9512,10 @@ topics = {'assert': 'The "assert" statement\n'
'the\n'
' container), "KeyError" should be raised.\n'
'\n'
- ' Note: "for" loops expect that an "IndexError" will be '
- 'raised for\n'
+ ' Note:\n'
+ '\n'
+ ' "for" loops expect that an "IndexError" will be raised '
+ 'for\n'
' illegal indexes to allow proper detection of the end of '
'the\n'
' sequence.\n'
@@ -9551,15 +9705,18 @@ topics = {'assert': 'The "assert" statement\n'
'"__rpow__()" (the\n'
' coercion rules would become too complicated).\n'
'\n'
- ' Note: If the right operand’s type is a subclass of the '
- 'left\n'
- ' operand’s type and that subclass provides the reflected '
- 'method\n'
- ' for the operation, this method will be called before '
- 'the left\n'
- ' operand’s non-reflected method. This behavior allows '
- 'subclasses\n'
- ' to override their ancestors’ operations.\n'
+ ' Note:\n'
+ '\n'
+ ' If the right operand’s type is a subclass of the left '
+ 'operand’s\n'
+ ' type and that subclass provides a different '
+ 'implementation of the\n'
+ ' reflected method for the operation, this method will be '
+ 'called\n'
+ ' before the left operand’s non-reflected method. This '
+ 'behavior\n'
+ ' allows subclasses to override their ancestors’ '
+ 'operations.\n'
'\n'
'object.__iadd__(self, other)\n'
'object.__isub__(self, other)\n'
@@ -9603,6 +9760,16 @@ topics = {'assert': 'The "assert" statement\n'
'the data\n'
' model.\n'
'\n'
+ ' Note:\n'
+ '\n'
+ ' Due to a bug in the dispatching mechanism for "**=", a '
+ 'class that\n'
+ ' defines "__ipow__()" but returns "NotImplemented" would '
+ 'fail to\n'
+ ' fall back to "x.__pow__(y)" and "y.__rpow__(x)". This '
+ 'bug is\n'
+ ' fixed in Python 3.10.\n'
+ '\n'
'object.__neg__(self)\n'
'object.__pos__(self)\n'
'object.__abs__(self)\n'
@@ -9909,9 +10076,20 @@ topics = {'assert': 'The "assert" statement\n'
'For a list\n'
' of possible encodings, see section Standard Encodings.\n'
'\n'
+ ' By default, the *errors* argument is not checked for '
+ 'best\n'
+ ' performances, but only used at the first encoding '
+ 'error. Enable the\n'
+ ' Python Development Mode, or use a debug build to check '
+ '*errors*.\n'
+ '\n'
' Changed in version 3.1: Support for keyword arguments '
'added.\n'
'\n'
+ ' Changed in version 3.9: The *errors* is now checked in '
+ 'development\n'
+ ' mode and in debug mode.\n'
+ '\n'
'str.endswith(suffix[, start[, end]])\n'
'\n'
' Return "True" if the string ends with the specified '
@@ -9966,11 +10144,13 @@ topics = {'assert': 'The "assert" statement\n'
'"-1" if\n'
' *sub* is not found.\n'
'\n'
- ' Note: The "find()" method should be used only if you '
- 'need to know\n'
- ' the position of *sub*. To check if *sub* is a '
- 'substring or not,\n'
- ' use the "in" operator:\n'
+ ' Note:\n'
+ '\n'
+ ' The "find()" method should be used only if you need '
+ 'to know the\n'
+ ' position of *sub*. To check if *sub* is a substring '
+ 'or not, use\n'
+ ' the "in" operator:\n'
'\n'
" >>> 'Py' in 'Python'\n"
' True\n'
@@ -9999,8 +10179,9 @@ topics = {'assert': 'The "assert" statement\n'
' formatting options that can be specified in format '
'strings.\n'
'\n'
- ' Note: When formatting a number ("int", "float", '
- '"complex",\n'
+ ' Note:\n'
+ '\n'
+ ' When formatting a number ("int", "float", "complex",\n'
' "decimal.Decimal" and subclasses) with the "n" type '
'(ex:\n'
' "\'{:n}\'.format(1234)"), the function temporarily '
@@ -10205,6 +10386,15 @@ topics = {'assert': 'The "assert" statement\n'
'"False"\n'
' otherwise.\n'
'\n'
+ " >>> 'BANANA'.isupper()\n"
+ ' True\n'
+ " >>> 'banana'.isupper()\n"
+ ' False\n'
+ " >>> 'baNana'.isupper()\n"
+ ' False\n'
+ " >>> ' '.isupper()\n"
+ ' False\n'
+ '\n'
'str.join(iterable)\n'
'\n'
' Return a string which is the concatenation of the '
@@ -10253,6 +10443,16 @@ topics = {'assert': 'The "assert" statement\n'
" >>> 'www.example.com'.lstrip('cmowz.')\n"
" 'example.com'\n"
'\n'
+ ' See "str.removeprefix()" for a method that will remove '
+ 'a single\n'
+ ' prefix string rather than all of a set of characters. '
+ 'For example:\n'
+ '\n'
+ " >>> 'Arthur: three!'.lstrip('Arthur: ')\n"
+ " 'ee!'\n"
+ " >>> 'Arthur: three!'.removeprefix('Arthur: ')\n"
+ " 'three!'\n"
+ '\n'
'static str.maketrans(x[, y[, z]])\n'
'\n'
' This static method returns a translation table usable '
@@ -10289,6 +10489,35 @@ topics = {'assert': 'The "assert" statement\n'
'followed by\n'
' two empty strings.\n'
'\n'
+ 'str.removeprefix(prefix, /)\n'
+ '\n'
+ ' If the string starts with the *prefix* string, return\n'
+ ' "string[len(prefix):]". Otherwise, return a copy of the '
+ 'original\n'
+ ' string:\n'
+ '\n'
+ " >>> 'TestHook'.removeprefix('Test')\n"
+ " 'Hook'\n"
+ " >>> 'BaseTestCase'.removeprefix('Test')\n"
+ " 'BaseTestCase'\n"
+ '\n'
+ ' New in version 3.9.\n'
+ '\n'
+ 'str.removesuffix(suffix, /)\n'
+ '\n'
+ ' If the string ends with the *suffix* string and that '
+ '*suffix* is\n'
+ ' not empty, return "string[:-len(suffix)]". Otherwise, '
+ 'return a copy\n'
+ ' of the original string:\n'
+ '\n'
+ " >>> 'MiscTests'.removesuffix('Tests')\n"
+ " 'Misc'\n"
+ " >>> 'TmpDirMixin'.removesuffix('Tests')\n"
+ " 'TmpDirMixin'\n"
+ '\n'
+ ' New in version 3.9.\n'
+ '\n'
'str.replace(old, new[, count])\n'
'\n'
' Return a copy of the string with all occurrences of '
@@ -10366,6 +10595,16 @@ topics = {'assert': 'The "assert" statement\n'
" >>> 'mississippi'.rstrip('ipz')\n"
" 'mississ'\n"
'\n'
+ ' See "str.removesuffix()" for a method that will remove '
+ 'a single\n'
+ ' suffix string rather than all of a set of characters. '
+ 'For example:\n'
+ '\n'
+ " >>> 'Monty Python'.rstrip(' Python')\n"
+ " 'M'\n"
+ " >>> 'Monty Python'.removesuffix(' Python')\n"
+ " 'Monty'\n"
+ '\n'
'str.split(sep=None, maxsplit=-1)\n'
'\n'
' Return a list of the words in the string, using *sep* '
@@ -10847,17 +11086,20 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'2. Unlike in Standard C, exactly two hex digits are required.\n'
'\n'
- '3. In a bytes literal, hexadecimal and octal escapes denote the\n'
- ' byte with the given value. In a string literal, these escapes\n'
- ' denote a Unicode character with the given value.\n'
+ '3. In a bytes literal, hexadecimal and octal escapes denote the '
+ 'byte\n'
+ ' with the given value. In a string literal, these escapes '
+ 'denote a\n'
+ ' Unicode character with the given value.\n'
'\n'
'4. Changed in version 3.3: Support for name aliases [1] has been\n'
' added.\n'
'\n'
'5. Exactly four hex digits are required.\n'
'\n'
- '6. Any Unicode character can be encoded this way. Exactly eight\n'
- ' hex digits are required.\n'
+ '6. Any Unicode character can be encoded this way. Exactly eight '
+ 'hex\n'
+ ' digits are required.\n'
'\n'
'Unlike Standard C, all unrecognized escape sequences are left in '
'the\n'
@@ -10899,9 +11141,10 @@ topics = {'assert': 'The "assert" statement\n'
'subscriptions': 'Subscriptions\n'
'*************\n'
'\n'
- 'A subscription selects an item of a sequence (string, tuple '
- 'or list)\n'
- 'or mapping (dictionary) object:\n'
+ 'Subscription of a sequence (string, tuple or list) or '
+ 'mapping\n'
+ '(dictionary) object usually selects an item from the '
+ 'collection:\n'
'\n'
' subscription ::= primary "[" expression_list "]"\n'
'\n'
@@ -10952,7 +11195,13 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'A string’s items are characters. A character is not a '
'separate data\n'
- 'type but a string of exactly one character.\n',
+ 'type but a string of exactly one character.\n'
+ '\n'
+ 'Subscription of certain *classes* or *types* creates a '
+ 'generic alias.\n'
+ 'In this case, user-defined classes can support subscription '
+ 'by\n'
+ 'providing a "__class_getitem__()" classmethod.\n',
'truth': 'Truth Value Testing\n'
'*******************\n'
'\n'
@@ -11170,10 +11419,17 @@ topics = {'assert': 'The "assert" statement\n'
'for\n'
' the operands provided. (The interpreter will then try the\n'
' reflected operation, or some other fallback, depending on the\n'
- ' operator.) Its truth value is true.\n'
+ ' operator.) It should not be evaluated in a boolean context.\n'
'\n'
' See Implementing the arithmetic operations for more details.\n'
'\n'
+ ' Changed in version 3.9: Evaluating "NotImplemented" in a '
+ 'boolean\n'
+ ' context is deprecated. While it currently evaluates as true, it\n'
+ ' will emit a "DeprecationWarning". It will raise a "TypeError" in '
+ 'a\n'
+ ' future version of Python.\n'
+ '\n'
'Ellipsis\n'
' This type has a single value. There is a single object with '
'this\n'
@@ -11191,6 +11447,27 @@ topics = {'assert': 'The "assert" statement\n'
'representation\n'
' in computers.\n'
'\n'
+ ' The string representations of the numeric classes, computed by\n'
+ ' "__repr__()" and "__str__()", have the following properties:\n'
+ '\n'
+ ' * They are valid numeric literals which, when passed to their '
+ 'class\n'
+ ' constructor, produce an object having the value of the '
+ 'original\n'
+ ' numeric.\n'
+ '\n'
+ ' * The representation is in base 10, when possible.\n'
+ '\n'
+ ' * Leading zeros, possibly excepting a single zero before a '
+ 'decimal\n'
+ ' point, are not shown.\n'
+ '\n'
+ ' * Trailing zeros, possibly excepting a single zero after a '
+ 'decimal\n'
+ ' point, are not shown.\n'
+ '\n'
+ ' * A sign is shown only when the number is negative.\n'
+ '\n'
' Python distinguishes between integers, floating point numbers, '
'and\n'
' complex numbers:\n'
@@ -11432,6 +11709,16 @@ topics = {'assert': 'The "assert" statement\n'
' then they can be used interchangeably to index the same\n'
' dictionary entry.\n'
'\n'
+ ' Dictionaries preserve insertion order, meaning that keys will '
+ 'be\n'
+ ' produced in the same order they were added sequentially over '
+ 'the\n'
+ ' dictionary. Replacing an existing key does not change the '
+ 'order,\n'
+ ' however removing a key and re-inserting it will add it to '
+ 'the\n'
+ ' end instead of keeping its old place.\n'
+ '\n'
' Dictionaries are mutable; they can be created by the "{...}"\n'
' notation (see section Dictionary displays).\n'
'\n'
@@ -11440,6 +11727,13 @@ topics = {'assert': 'The "assert" statement\n'
'"collections"\n'
' module.\n'
'\n'
+ ' Changed in version 3.7: Dictionaries did not preserve '
+ 'insertion\n'
+ ' order in versions of Python before 3.6. In CPython 3.6,\n'
+ ' insertion order was preserved, but it was considered an\n'
+ ' implementation detail at that time rather than a language\n'
+ ' guarantee.\n'
+ '\n'
'Callable types\n'
' These are the types to which the function call operation (see\n'
' section Calls) can be applied:\n'
@@ -12225,6 +12519,21 @@ topics = {'assert': 'The "assert" statement\n'
'positional\n'
' argument and a possibly empty set of keyword arguments.\n'
'\n'
+ ' Dictionaries can be created by several means:\n'
+ '\n'
+ ' * Use a comma-separated list of "key: value" pairs within '
+ 'braces:\n'
+ ' "{\'jack\': 4098, \'sjoerd\': 4127}" or "{4098: '
+ "'jack', 4127:\n"
+ ' \'sjoerd\'}"\n'
+ '\n'
+ ' * Use a dict comprehension: "{}", "{x: x ** 2 for x in '
+ 'range(10)}"\n'
+ '\n'
+ ' * Use the type constructor: "dict()", "dict([(\'foo\', '
+ "100), ('bar',\n"
+ ' 200)])", "dict(foo=100, bar=200)"\n'
+ '\n'
' If no positional argument is given, an empty dictionary '
'is created.\n'
' If a positional argument is given and it is a mapping '
@@ -12262,7 +12571,8 @@ topics = {'assert': 'The "assert" statement\n'
" >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3]))\n"
" >>> d = dict([('two', 2), ('one', 1), ('three', 3)])\n"
" >>> e = dict({'three': 3, 'one': 1, 'two': 2})\n"
- ' >>> a == b == c == d == e\n'
+ " >>> f = dict({'one': 1, 'three': 3}, two=2)\n"
+ ' >>> a == b == c == d == e == f\n'
' True\n'
'\n'
' Providing keyword arguments as in the first example only '
@@ -12460,6 +12770,29 @@ topics = {'assert': 'The "assert" statement\n'
' >>> d.values() == d.values()\n'
' False\n'
'\n'
+ ' d | other\n'
+ '\n'
+ ' Create a new dictionary with the merged keys and '
+ 'values of *d*\n'
+ ' and *other*, which must both be dictionaries. The '
+ 'values of\n'
+ ' *other* take priority when *d* and *other* share '
+ 'keys.\n'
+ '\n'
+ ' New in version 3.9.\n'
+ '\n'
+ ' d |= other\n'
+ '\n'
+ ' Update the dictionary *d* with keys and values from '
+ '*other*,\n'
+ ' which may be either a *mapping* or an *iterable* of '
+ 'key/value\n'
+ ' pairs. The values of *other* take priority when *d* '
+ 'and *other*\n'
+ ' share keys.\n'
+ '\n'
+ ' New in version 3.9.\n'
+ '\n'
' Dictionaries compare equal if and only if they have the '
'same "(key,\n'
' value)" pairs (regardless of ordering). Order comparisons '
@@ -12507,9 +12840,11 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
' Changed in version 3.8: Dictionaries are now reversible.\n'
'\n'
- 'See also: "types.MappingProxyType" can be used to create a '
- 'read-only\n'
- ' view of a "dict".\n'
+ 'See also:\n'
+ '\n'
+ ' "types.MappingProxyType" can be used to create a read-only '
+ 'view of a\n'
+ ' "dict".\n'
'\n'
'\n'
'Dictionary view objects\n'
@@ -12893,13 +13228,14 @@ topics = {'assert': 'The "assert" statement\n'
'"None", it\n'
' is treated like "1".\n'
'\n'
- '6. Concatenating immutable sequences always results in a new\n'
- ' object. This means that building up a sequence by repeated\n'
- ' concatenation will have a quadratic runtime cost in the '
- 'total\n'
- ' sequence length. To get a linear runtime cost, you must '
- 'switch to\n'
- ' one of the alternatives below:\n'
+ '6. Concatenating immutable sequences always results in a new '
+ 'object.\n'
+ ' This means that building up a sequence by repeated '
+ 'concatenation\n'
+ ' will have a quadratic runtime cost in the total sequence '
+ 'length.\n'
+ ' To get a linear runtime cost, you must switch to one of the\n'
+ ' alternatives below:\n'
'\n'
' * if concatenating "str" objects, you can build a list and '
'use\n'
@@ -12917,24 +13253,25 @@ topics = {'assert': 'The "assert" statement\n'
' * for other types, investigate the relevant class '
'documentation\n'
'\n'
- '7. Some sequence types (such as "range") only support item\n'
- ' sequences that follow specific patterns, and hence don’t '
- 'support\n'
- ' sequence concatenation or repetition.\n'
- '\n'
- '8. "index" raises "ValueError" when *x* is not found in *s*. '
- 'Not\n'
- ' all implementations support passing the additional arguments '
- '*i*\n'
- ' and *j*. These arguments allow efficient searching of '
- 'subsections\n'
- ' of the sequence. Passing the extra arguments is roughly '
- 'equivalent\n'
- ' to using "s[i:j].index(x)", only without copying any data and '
- 'with\n'
- ' the returned index being relative to the start of the '
+ '7. Some sequence types (such as "range") only support item '
+ 'sequences\n'
+ ' that follow specific patterns, and hence don’t support '
'sequence\n'
- ' rather than the start of the slice.\n'
+ ' concatenation or repetition.\n'
+ '\n'
+ '8. "index" raises "ValueError" when *x* is not found in *s*. Not '
+ 'all\n'
+ ' implementations support passing the additional arguments *i* '
+ 'and\n'
+ ' *j*. These arguments allow efficient searching of subsections '
+ 'of\n'
+ ' the sequence. Passing the extra arguments is roughly '
+ 'equivalent to\n'
+ ' using "s[i:j].index(x)", only without copying any data and '
+ 'with the\n'
+ ' returned index being relative to the start of the sequence '
+ 'rather\n'
+ ' than the start of the slice.\n'
'\n'
'\n'
'Immutable Sequence Types\n'
@@ -13062,17 +13399,17 @@ topics = {'assert': 'The "assert" statement\n'
'1. *t* must have the same length as the slice it is replacing.\n'
'\n'
'2. The optional argument *i* defaults to "-1", so that by '
- 'default\n'
- ' the last item is removed and returned.\n'
+ 'default the\n'
+ ' last item is removed and returned.\n'
'\n'
'3. "remove()" raises "ValueError" when *x* is not found in *s*.\n'
'\n'
- '4. The "reverse()" method modifies the sequence in place for\n'
- ' economy of space when reversing a large sequence. To remind '
- 'users\n'
- ' that it operates by side effect, it does not return the '
- 'reversed\n'
- ' sequence.\n'
+ '4. The "reverse()" method modifies the sequence in place for '
+ 'economy\n'
+ ' of space when reversing a large sequence. To remind users '
+ 'that it\n'
+ ' operates by side effect, it does not return the reversed '
+ 'sequence.\n'
'\n'
'5. "clear()" and "copy()" are included for consistency with the\n'
' interfaces of mutable containers that don’t support slicing\n'
@@ -13109,9 +13446,9 @@ topics = {'assert': 'The "assert" statement\n'
' * Using a pair of square brackets to denote the empty list: '
'"[]"\n'
'\n'
- ' * Using square brackets, separating items with commas: '
- '"[a]",\n'
- ' "[a, b, c]"\n'
+ ' * Using square brackets, separating items with commas: "[a]", '
+ '"[a,\n'
+ ' b, c]"\n'
'\n'
' * Using a list comprehension: "[x for x in iterable]"\n'
'\n'
@@ -13414,9 +13751,9 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'See also:\n'
'\n'
- ' * The linspace recipe shows how to implement a lazy version '
- 'of\n'
- ' range suitable for floating point applications.\n',
+ ' * The linspace recipe shows how to implement a lazy version of '
+ 'range\n'
+ ' suitable for floating point applications.\n',
'typesseq-mutable': 'Mutable Sequence Types\n'
'**********************\n'
'\n'
@@ -13527,19 +13864,18 @@ topics = {'assert': 'The "assert" statement\n'
'replacing.\n'
'\n'
'2. The optional argument *i* defaults to "-1", so that '
- 'by default\n'
- ' the last item is removed and returned.\n'
+ 'by default the\n'
+ ' last item is removed and returned.\n'
'\n'
'3. "remove()" raises "ValueError" when *x* is not found '
'in *s*.\n'
'\n'
'4. The "reverse()" method modifies the sequence in place '
- 'for\n'
- ' economy of space when reversing a large sequence. To '
- 'remind users\n'
- ' that it operates by side effect, it does not return '
- 'the reversed\n'
- ' sequence.\n'
+ 'for economy\n'
+ ' of space when reversing a large sequence. To remind '
+ 'users that it\n'
+ ' operates by side effect, it does not return the '
+ 'reversed sequence.\n'
'\n'
'5. "clear()" and "copy()" are included for consistency '
'with the\n'
@@ -13589,7 +13925,7 @@ topics = {'assert': 'The "assert" statement\n'
'The "while" statement is used for repeated execution as long as an\n'
'expression is true:\n'
'\n'
- ' while_stmt ::= "while" expression ":" suite\n'
+ ' while_stmt ::= "while" assignment_expression ":" suite\n'
' ["else" ":" suite]\n'
'\n'
'This repeatedly tests the expression and, if it is true, executes '
@@ -13622,8 +13958,9 @@ topics = {'assert': 'The "assert" statement\n'
'The execution of the "with" statement with one “item” proceeds as\n'
'follows:\n'
'\n'
- '1. The context expression (the expression given in the "with_item")\n'
- ' is evaluated to obtain a context manager.\n'
+ '1. The context expression (the expression given in the "with_item") '
+ 'is\n'
+ ' evaluated to obtain a context manager.\n'
'\n'
'2. The context manager’s "__enter__()" is loaded for later use.\n'
'\n'
@@ -13631,12 +13968,15 @@ topics = {'assert': 'The "assert" statement\n'
'\n'
'4. The context manager’s "__enter__()" method is invoked.\n'
'\n'
- '5. If a target was included in the "with" statement, the return\n'
- ' value from "__enter__()" is assigned to it.\n'
+ '5. If a target was included in the "with" statement, the return '
+ 'value\n'
+ ' from "__enter__()" is assigned to it.\n'
+ '\n'
+ ' Note:\n'
'\n'
- ' Note: The "with" statement guarantees that if the "__enter__()"\n'
- ' method returns without an error, then "__exit__()" will always '
- 'be\n'
+ ' The "with" statement guarantees that if the "__enter__()" '
+ 'method\n'
+ ' returns without an error, then "__exit__()" will always be\n'
' called. Thus, if an error occurs during the assignment to the\n'
' target list, it will be treated the same as an error occurring\n'
' within the suite would be. See step 6 below.\n'
diff --git a/x64/Lib/queue.py b/x64/Lib/queue.py
index 5bb0431..10dbcbc 100644
--- a/x64/Lib/queue.py
+++ b/x64/Lib/queue.py
@@ -1,6 +1,7 @@
'''A multi-producer, multi-consumer queue.'''
import threading
+import types
from collections import deque
from heapq import heappush, heappop
from time import monotonic as time
@@ -216,6 +217,8 @@ class Queue:
def _get(self):
return self.queue.popleft()
+ __class_getitem__ = classmethod(types.GenericAlias)
+
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
@@ -316,6 +319,8 @@ class _PySimpleQueue:
'''Return the approximate size of the queue (not reliable!).'''
return len(self._queue)
+ __class_getitem__ = classmethod(types.GenericAlias)
+
if SimpleQueue is None:
SimpleQueue = _PySimpleQueue
diff --git a/x64/Lib/quopri.py b/x64/Lib/quopri.py
index cbd979a..08899c5 100644
--- a/x64/Lib/quopri.py
+++ b/x64/Lib/quopri.py
@@ -204,11 +204,11 @@ def main():
print("-t: quote tabs")
print("-d: decode; default encode")
sys.exit(2)
- deco = 0
- tabs = 0
+ deco = False
+ tabs = False
for o, a in opts:
- if o == '-t': tabs = 1
- if o == '-d': deco = 1
+ if o == '-t': tabs = True
+ if o == '-d': deco = True
if tabs and deco:
sys.stdout = sys.stderr
print("-t and -d are mutually exclusive")
diff --git a/x64/Lib/random.py b/x64/Lib/random.py
index 365a019..a6454f5 100644
--- a/x64/Lib/random.py
+++ b/x64/Lib/random.py
@@ -1,5 +1,9 @@
"""Random variable generators.
+ bytes
+ -----
+ uniform bytes (values between 0 and 255)
+
integers
--------
uniform within range
@@ -37,14 +41,20 @@ General notes on the underlying Mersenne Twister core generator:
"""
+# Translated by Guido van Rossum from C source provided by
+# Adrian Baddeley. Adapted by Raymond Hettinger for use with
+# the Mersenne Twister and os.urandom() core generators.
+
from warnings import warn as _warn
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
+from math import tau as TWOPI, floor as _floor
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from itertools import accumulate as _accumulate, repeat as _repeat
from bisect import bisect as _bisect
import os as _os
+import _random
try:
# hashlib is pretty heavy to load, try lean internal module first
@@ -53,28 +63,40 @@ except ImportError:
# fallback to official implementation
from hashlib import sha512 as _sha512
-
-__all__ = ["Random","seed","random","uniform","randint","choice","sample",
- "randrange","shuffle","normalvariate","lognormvariate",
- "expovariate","vonmisesvariate","gammavariate","triangular",
- "gauss","betavariate","paretovariate","weibullvariate",
- "getstate","setstate", "getrandbits", "choices",
- "SystemRandom"]
-
-NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
-TWOPI = 2.0*_pi
+__all__ = [
+ "Random",
+ "SystemRandom",
+ "betavariate",
+ "choice",
+ "choices",
+ "expovariate",
+ "gammavariate",
+ "gauss",
+ "getrandbits",
+ "getstate",
+ "lognormvariate",
+ "normalvariate",
+ "paretovariate",
+ "randint",
+ "random",
+ "randrange",
+ "sample",
+ "seed",
+ "setstate",
+ "shuffle",
+ "triangular",
+ "uniform",
+ "vonmisesvariate",
+ "weibullvariate",
+]
+
+NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0)
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
-RECIP_BPF = 2**-BPF
+RECIP_BPF = 2 ** -BPF
-# Translated by Guido van Rossum from C source provided by
-# Adrian Baddeley. Adapted by Raymond Hettinger for use with
-# the Mersenne Twister and os.urandom() core generators.
-
-import _random
-
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
@@ -100,28 +122,11 @@ class Random(_random.Random):
self.seed(x)
self.gauss_next = None
- def __init_subclass__(cls, /, **kwargs):
- """Control how subclasses generate random integers.
-
- The algorithm a subclass can use depends on the random() and/or
- getrandbits() implementation available to it and determines
- whether it can generate random integers from arbitrarily large
- ranges.
- """
-
- for c in cls.__mro__:
- if '_randbelow' in c.__dict__:
- # just inherit it
- break
- if 'getrandbits' in c.__dict__:
- cls._randbelow = cls._randbelow_with_getrandbits
- break
- if 'random' in c.__dict__:
- cls._randbelow = cls._randbelow_without_getrandbits
- break
-
def seed(self, a=None, version=2):
- """Initialize internal state from hashable object.
+ """Initialize internal state from a seed.
+
+ The only supported seed types are None, int, float,
+ str, bytes, and bytearray.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
@@ -143,12 +148,20 @@ class Random(_random.Random):
x ^= len(a)
a = -2 if x == -1 else x
- if version == 2 and isinstance(a, (str, bytes, bytearray)):
+ elif version == 2 and isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
+ elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)):
+ _warn('Seeding based on hashing is deprecated\n'
+ 'since Python 3.9 and will be removed in a subsequent '
+ 'version. The only \n'
+ 'supported seed types are: None, '
+ 'int, float, str, bytes, and bytearray.',
+ DeprecationWarning, 2)
+
super().seed(a)
self.gauss_next = None
@@ -169,7 +182,7 @@ class Random(_random.Random):
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
- internalstate = tuple(x % (2**32) for x in internalstate)
+ internalstate = tuple(x % (2 ** 32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
@@ -178,15 +191,18 @@ class Random(_random.Random):
"Random.setstate() of version %s" %
(version, self.VERSION))
-## ---- Methods below this point do not need to be overridden when
-## ---- subclassing for the purpose of using a different core generator.
-## -------------------- pickle support -------------------
+ ## -------------------------------------------------------
+ ## ---- Methods below this point do not need to be overridden or extended
+ ## ---- when subclassing for the purpose of using a different core generator.
+
+
+ ## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
- def __getstate__(self): # for pickle
+ def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
@@ -195,9 +211,83 @@ class Random(_random.Random):
def __reduce__(self):
return self.__class__, (), self.getstate()
-## -------------------- integer methods -------------------
- def randrange(self, start, stop=None, step=1, _int=int):
+ ## ---- internal support method for evenly distributed integers ----
+
+ def __init_subclass__(cls, /, **kwargs):
+ """Control how subclasses generate random integers.
+
+ The algorithm a subclass can use depends on the random() and/or
+ getrandbits() implementation available to it and determines
+ whether it can generate random integers from arbitrarily large
+ ranges.
+ """
+
+ for c in cls.__mro__:
+ if '_randbelow' in c.__dict__:
+ # just inherit it
+ break
+ if 'getrandbits' in c.__dict__:
+ cls._randbelow = cls._randbelow_with_getrandbits
+ break
+ if 'random' in c.__dict__:
+ cls._randbelow = cls._randbelow_without_getrandbits
+ break
+
+ def _randbelow_with_getrandbits(self, n):
+ "Return a random int in the range [0,n). Returns 0 if n==0."
+
+ if not n:
+ return 0
+ getrandbits = self.getrandbits
+ k = n.bit_length() # don't use (n-1) here because n can be 1
+ r = getrandbits(k) # 0 <= r < 2**k
+ while r >= n:
+ r = getrandbits(k)
+ return r
+
+ def _randbelow_without_getrandbits(self, n, maxsize=1<<BPF):
+ """Return a random int in the range [0,n). Returns 0 if n==0.
+
+ The implementation does not use getrandbits, but only random.
+ """
+
+ random = self.random
+ if n >= maxsize:
+ _warn("Underlying random() generator does not supply \n"
+ "enough bits to choose from a population range this large.\n"
+ "To remove the range limitation, add a getrandbits() method.")
+ return _floor(random() * n)
+ if n == 0:
+ return 0
+ rem = maxsize % n
+ limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
+ r = random()
+ while r >= limit:
+ r = random()
+ return _floor(r * maxsize) % n
+
+ _randbelow = _randbelow_with_getrandbits
+
+
+ ## --------------------------------------------------------
+ ## ---- Methods below this point generate custom distributions
+ ## ---- based on the methods defined above. They do not
+ ## ---- directly touch the underlying generator and only
+ ## ---- access randomness through the methods: random(),
+ ## ---- getrandbits(), or _randbelow().
+
+
+ ## -------------------- bytes methods ---------------------
+
+ def randbytes(self, n):
+ """Generate n random bytes."""
+ return self.getrandbits(n * 8).to_bytes(n, 'little')
+
+
+ ## -------------------- integer methods -------------------
+
+ def randrange(self, start, stop=None, step=1):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
@@ -207,7 +297,7 @@ class Random(_random.Random):
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
- istart = _int(start)
+ istart = int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
@@ -216,7 +306,7 @@ class Random(_random.Random):
raise ValueError("empty range for randrange()")
# stop argument supplied.
- istop = _int(stop)
+ istop = int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
@@ -226,7 +316,7 @@ class Random(_random.Random):
raise ValueError("empty range for randrange() (%d, %d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
- istep = _int(step)
+ istep = int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
@@ -239,7 +329,7 @@ class Random(_random.Random):
if n <= 0:
raise ValueError("empty range for randrange()")
- return istart + istep*self._randbelow(n)
+ return istart + istep * self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
@@ -247,48 +337,13 @@ class Random(_random.Random):
return self.randrange(a, b+1)
- def _randbelow_with_getrandbits(self, n):
- "Return a random int in the range [0,n). Raises ValueError if n==0."
- getrandbits = self.getrandbits
- k = n.bit_length() # don't use (n-1) here because n can be 1
- r = getrandbits(k) # 0 <= r < 2**k
- while r >= n:
- r = getrandbits(k)
- return r
-
- def _randbelow_without_getrandbits(self, n, int=int, maxsize=1<<BPF):
- """Return a random int in the range [0,n). Raises ValueError if n==0.
-
- The implementation does not use getrandbits, but only random.
- """
-
- random = self.random
- if n >= maxsize:
- _warn("Underlying random() generator does not supply \n"
- "enough bits to choose from a population range this large.\n"
- "To remove the range limitation, add a getrandbits() method.")
- return int(random() * n)
- if n == 0:
- raise ValueError("Boundary cannot be zero")
- rem = maxsize % n
- limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
- r = random()
- while r >= limit:
- r = random()
- return int(r*maxsize) % n
-
- _randbelow = _randbelow_with_getrandbits
-
-## -------------------- sequence methods -------------------
+ ## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
- try:
- i = self._randbelow(len(seq))
- except ValueError:
- raise IndexError('Cannot choose from an empty sequence') from None
- return seq[i]
+ # raises IndexError if seq is empty
+ return seq[self._randbelow(len(seq))]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
@@ -303,16 +358,20 @@ class Random(_random.Random):
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
- j = randbelow(i+1)
+ j = randbelow(i + 1)
x[i], x[j] = x[j], x[i]
else:
- _int = int
+ _warn('The *random* parameter to shuffle() has been deprecated\n'
+ 'since Python 3.9 and will be removed in a subsequent '
+ 'version.',
+ DeprecationWarning, 2)
+ floor = _floor
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
- j = _int(random() * (i+1))
+ j = floor(random() * (i + 1))
x[i], x[j] = x[j], x[i]
- def sample(self, population, k):
+ def sample(self, population, k, *, counts=None):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
@@ -325,9 +384,21 @@ class Random(_random.Random):
population contains repeats, then each occurrence is a possible
selection in the sample.
- To choose a sample in a range of integers, use range as an argument.
- This is especially fast and space efficient for sampling from a
- large population: sample(range(10000000), 60)
+ Repeated elements can be specified one at a time or with the optional
+ counts parameter. For example:
+
+ sample(['red', 'blue'], counts=[4, 2], k=5)
+
+ is equivalent to:
+
+ sample(['red', 'red', 'red', 'red', 'blue', 'blue'], k=5)
+
+ To choose a sample from a range of integers, use range() for the
+ population argument. This is especially fast and space efficient
+ for sampling from a large population:
+
+ sample(range(10000000), 60)
+
"""
# Sampling without replacement entails tracking either potential
@@ -354,24 +425,40 @@ class Random(_random.Random):
# causing them to eat more entropy than necessary.
if isinstance(population, _Set):
+ _warn('Sampling from a set deprecated\n'
+ 'since Python 3.9 and will be removed in a subsequent version.',
+ DeprecationWarning, 2)
population = tuple(population)
if not isinstance(population, _Sequence):
- raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
- randbelow = self._randbelow
+ raise TypeError("Population must be a sequence. For dicts or sets, use sorted(d).")
n = len(population)
+ if counts is not None:
+ cum_counts = list(_accumulate(counts))
+ if len(cum_counts) != n:
+ raise ValueError('The number of counts does not match the population')
+ total = cum_counts.pop()
+ if not isinstance(total, int):
+ raise TypeError('Counts must be integers')
+ if total <= 0:
+ raise ValueError('Total of counts must be greater than zero')
+ selections = sample(range(total), k=k)
+ bisect = _bisect
+ return [population[bisect(cum_counts, s)] for s in selections]
+ randbelow = self._randbelow
if not 0 <= k <= n:
raise ValueError("Sample larger than population or is negative")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
- setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
+ setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
- # An n-length list is smaller than a k-length set
+ # An n-length list is smaller than a k-length set.
+ # Invariant: non-selected at pool[0 : n-i]
pool = list(population)
- for i in range(k): # invariant: non-selected at [0,n-i)
- j = randbelow(n-i)
+ for i in range(k):
+ j = randbelow(n - i)
result[i] = pool[j]
- pool[j] = pool[n-i-1] # move non-selected item into vacancy
+ pool[j] = pool[n - i - 1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
@@ -394,29 +481,28 @@ class Random(_random.Random):
n = len(population)
if cum_weights is None:
if weights is None:
- _int = int
+ floor = _floor
n += 0.0 # convert to float for a small speed improvement
- return [population[_int(random() * n)] for i in _repeat(None, k)]
+ return [population[floor(random() * n)] for i in _repeat(None, k)]
cum_weights = list(_accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != n:
raise ValueError('The number of weights does not match the population')
- bisect = _bisect
total = cum_weights[-1] + 0.0 # convert to float
+ if total <= 0.0:
+ raise ValueError('Total of weights must be greater than zero')
+ bisect = _bisect
hi = n - 1
return [population[bisect(cum_weights, random() * total, 0, hi)]
for i in _repeat(None, k)]
-## -------------------- real-valued distributions -------------------
-## -------------------- uniform distribution -------------------
+ ## -------------------- real-valued distributions -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
- return a + (b-a) * self.random()
-
-## -------------------- triangular --------------------
+ return a + (b - a) * self.random()
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
@@ -438,32 +524,64 @@ class Random(_random.Random):
low, high = high, low
return low + (high - low) * _sqrt(u * c)
-## -------------------- normal distribution --------------------
-
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
- # mu = mean, sigma = standard deviation
-
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
- while 1:
+ while True:
u1 = random()
u2 = 1.0 - random()
- z = NV_MAGICCONST*(u1-0.5)/u2
- zz = z*z/4.0
+ z = NV_MAGICCONST * (u1 - 0.5) / u2
+ zz = z * z / 4.0
if zz <= -_log(u2):
break
- return mu + z*sigma
+ return mu + z * sigma
+
+ def gauss(self, mu, sigma):
+ """Gaussian distribution.
-## -------------------- lognormal distribution --------------------
+ mu is the mean, and sigma is the standard deviation. This is
+ slightly faster than the normalvariate() function.
+
+ Not thread-safe without a lock around calls.
+
+ """
+ # When x and y are two variables from [0, 1), uniformly
+ # distributed, then
+ #
+ # cos(2*pi*x)*sqrt(-2*log(1-y))
+ # sin(2*pi*x)*sqrt(-2*log(1-y))
+ #
+ # are two *independent* variables with normal distribution
+ # (mu = 0, sigma = 1).
+ # (Lambert Meertens)
+ # (corrected version; bug discovered by Mike Miller, fixed by LM)
+
+ # Multithreading note: When two threads call this function
+ # simultaneously, it is possible that they will receive the
+ # same return value. The window is very small though. To
+ # avoid this, you have to use a lock around all calls. (I
+ # didn't want to slow this down in the serial case by using a
+ # lock here.)
+
+ random = self.random
+ z = self.gauss_next
+ self.gauss_next = None
+ if z is None:
+ x2pi = random() * TWOPI
+ g2rad = _sqrt(-2.0 * _log(1.0 - random()))
+ z = _cos(x2pi) * g2rad
+ self.gauss_next = _sin(x2pi) * g2rad
+
+ return mu + z * sigma
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
@@ -475,8 +593,6 @@ class Random(_random.Random):
"""
return _exp(self.normalvariate(mu, sigma))
-## -------------------- exponential distribution --------------------
-
def expovariate(self, lambd):
"""Exponential distribution.
@@ -492,9 +608,7 @@ class Random(_random.Random):
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
- return -_log(1.0 - self.random())/lambd
-
-## -------------------- von Mises distribution --------------------
+ return -_log(1.0 - self.random()) / lambd
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
@@ -505,10 +619,6 @@ class Random(_random.Random):
to a uniform random angle over the range 0 to 2*pi.
"""
- # mu: mean angle (in radians between 0 and 2*pi)
- # kappa: concentration parameter kappa (>= 0)
- # if kappa = 0 generate uniform random angle
-
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
@@ -523,7 +633,7 @@ class Random(_random.Random):
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
- while 1:
+ while True:
u1 = random()
z = _cos(_pi * u1)
@@ -542,8 +652,6 @@ class Random(_random.Random):
return theta
-## -------------------- gamma distribution --------------------
-
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
@@ -556,7 +664,6 @@ class Random(_random.Random):
math.gamma(alpha) * beta ** alpha
"""
-
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
@@ -577,32 +684,31 @@ class Random(_random.Random):
while 1:
u1 = random()
- if not 1e-7 < u1 < .9999999:
+ if not 1e-7 < u1 < 0.9999999:
continue
u2 = 1.0 - random()
- v = _log(u1/(1.0-u1))/ainv
- x = alpha*_exp(v)
- z = u1*u1*u2
- r = bbb+ccc*v-x
- if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
+ v = _log(u1 / (1.0 - u1)) / ainv
+ x = alpha * _exp(v)
+ z = u1 * u1 * u2
+ r = bbb + ccc * v - x
+ if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1/beta)
return -_log(1.0 - random()) * beta
- else: # alpha is between 0 and 1 (exclusive)
-
+ else:
+ # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
-
- while 1:
+ while True:
u = random()
- b = (_e + alpha)/_e
- p = b*u
+ b = (_e + alpha) / _e
+ p = b * u
if p <= 1.0:
- x = p ** (1.0/alpha)
+ x = p ** (1.0 / alpha)
else:
- x = -_log((b-p)/alpha)
+ x = -_log((b - p) / alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
@@ -611,61 +717,6 @@ class Random(_random.Random):
break
return x * beta
-## -------------------- Gauss (faster alternative) --------------------
-
- def gauss(self, mu, sigma):
- """Gaussian distribution.
-
- mu is the mean, and sigma is the standard deviation. This is
- slightly faster than the normalvariate() function.
-
- Not thread-safe without a lock around calls.
-
- """
-
- # When x and y are two variables from [0, 1), uniformly
- # distributed, then
- #
- # cos(2*pi*x)*sqrt(-2*log(1-y))
- # sin(2*pi*x)*sqrt(-2*log(1-y))
- #
- # are two *independent* variables with normal distribution
- # (mu = 0, sigma = 1).
- # (Lambert Meertens)
- # (corrected version; bug discovered by Mike Miller, fixed by LM)
-
- # Multithreading note: When two threads call this function
- # simultaneously, it is possible that they will receive the
- # same return value. The window is very small though. To
- # avoid this, you have to use a lock around all calls. (I
- # didn't want to slow this down in the serial case by using a
- # lock here.)
-
- random = self.random
- z = self.gauss_next
- self.gauss_next = None
- if z is None:
- x2pi = random() * TWOPI
- g2rad = _sqrt(-2.0 * _log(1.0 - random()))
- z = _cos(x2pi) * g2rad
- self.gauss_next = _sin(x2pi) * g2rad
-
- return mu + z*sigma
-
-## -------------------- beta --------------------
-## See
-## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
-## for Ivan Frohne's insightful analysis of why the original implementation:
-##
-## def betavariate(self, alpha, beta):
-## # Discrete Event Simulation in C, pp 87-88.
-##
-## y = self.expovariate(alpha)
-## z = self.expovariate(1.0/beta)
-## return z/(y+z)
-##
-## was dead wrong, and how it probably got that way.
-
def betavariate(self, alpha, beta):
"""Beta distribution.
@@ -673,25 +724,32 @@ class Random(_random.Random):
Returned values range between 0 and 1.
"""
+ ## See
+ ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
+ ## for Ivan Frohne's insightful analysis of why the original implementation:
+ ##
+ ## def betavariate(self, alpha, beta):
+ ## # Discrete Event Simulation in C, pp 87-88.
+ ##
+ ## y = self.expovariate(alpha)
+ ## z = self.expovariate(1.0/beta)
+ ## return z/(y+z)
+ ##
+ ## was dead wrong, and how it probably got that way.
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.0)
- if y == 0:
- return 0.0
- else:
+ if y:
return y / (y + self.gammavariate(beta, 1.0))
-
-## -------------------- Pareto --------------------
+ return 0.0
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
- return 1.0 / u ** (1.0/alpha)
-
-## -------------------- Weibull --------------------
+ return 1.0 / u ** (1.0 / alpha)
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
@@ -702,16 +760,20 @@ class Random(_random.Random):
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
- return alpha * (-_log(u)) ** (1.0/beta)
+ return alpha * (-_log(u)) ** (1.0 / beta)
+
+## ------------------------------------------------------------------
## --------------- Operating System Random Source ------------------
+
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
+
"""
def random(self):
@@ -720,12 +782,18 @@ class SystemRandom(Random):
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
- if k <= 0:
- raise ValueError('number of bits must be greater than zero')
+ if k < 0:
+ raise ValueError('number of bits must be non-negative')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
+ def randbytes(self, n):
+ """Generate n random bytes."""
+ # os.urandom(n) fails with ValueError for n < 0
+ # and returns an empty bytes string for n == 0.
+ return _urandom(n)
+
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
@@ -735,51 +803,11 @@ class SystemRandom(Random):
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
-## -------------------- test program --------------------
-
-def _test_generator(n, func, args):
- import time
- print(n, 'times', func.__name__)
- total = 0.0
- sqsum = 0.0
- smallest = 1e10
- largest = -1e10
- t0 = time.perf_counter()
- for i in range(n):
- x = func(*args)
- total += x
- sqsum = sqsum + x*x
- smallest = min(x, smallest)
- largest = max(x, largest)
- t1 = time.perf_counter()
- print(round(t1-t0, 3), 'sec,', end=' ')
- avg = total/n
- stddev = _sqrt(sqsum/n - avg*avg)
- print('avg %g, stddev %g, min %g, max %g\n' % \
- (avg, stddev, smallest, largest))
-
-
-def _test(N=2000):
- _test_generator(N, random, ())
- _test_generator(N, normalvariate, (0.0, 1.0))
- _test_generator(N, lognormvariate, (0.0, 1.0))
- _test_generator(N, vonmisesvariate, (0.0, 1.0))
- _test_generator(N, gammavariate, (0.01, 1.0))
- _test_generator(N, gammavariate, (0.1, 1.0))
- _test_generator(N, gammavariate, (0.1, 2.0))
- _test_generator(N, gammavariate, (0.5, 1.0))
- _test_generator(N, gammavariate, (0.9, 1.0))
- _test_generator(N, gammavariate, (1.0, 1.0))
- _test_generator(N, gammavariate, (2.0, 1.0))
- _test_generator(N, gammavariate, (20.0, 1.0))
- _test_generator(N, gammavariate, (200.0, 1.0))
- _test_generator(N, gauss, (0.0, 1.0))
- _test_generator(N, betavariate, (3.0, 3.0))
- _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
+# ----------------------------------------------------------------------
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
-#(both in the user's code and in the Python libraries), but that's fine
+# (both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
@@ -806,6 +834,50 @@ weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
+randbytes = _inst.randbytes
+
+
+## ------------------------------------------------------
+## ----------------- test program -----------------------
+
+def _test_generator(n, func, args):
+ from statistics import stdev, fmean as mean
+ from time import perf_counter
+
+ t0 = perf_counter()
+ data = [func(*args) for i in range(n)]
+ t1 = perf_counter()
+
+ xbar = mean(data)
+ sigma = stdev(data, xbar)
+ low = min(data)
+ high = max(data)
+
+ print(f'{t1 - t0:.3f} sec, {n} times {func.__name__}')
+ print('avg %g, stddev %g, min %g, max %g\n' % (xbar, sigma, low, high))
+
+
+def _test(N=2000):
+ _test_generator(N, random, ())
+ _test_generator(N, normalvariate, (0.0, 1.0))
+ _test_generator(N, lognormvariate, (0.0, 1.0))
+ _test_generator(N, vonmisesvariate, (0.0, 1.0))
+ _test_generator(N, gammavariate, (0.01, 1.0))
+ _test_generator(N, gammavariate, (0.1, 1.0))
+ _test_generator(N, gammavariate, (0.1, 2.0))
+ _test_generator(N, gammavariate, (0.5, 1.0))
+ _test_generator(N, gammavariate, (0.9, 1.0))
+ _test_generator(N, gammavariate, (1.0, 1.0))
+ _test_generator(N, gammavariate, (2.0, 1.0))
+ _test_generator(N, gammavariate, (20.0, 1.0))
+ _test_generator(N, gammavariate, (200.0, 1.0))
+ _test_generator(N, gauss, (0.0, 1.0))
+ _test_generator(N, betavariate, (3.0, 3.0))
+ _test_generator(N, triangular, (0.0, 1.0, 1.0 / 3.0))
+
+
+## ------------------------------------------------------
+## ------------------ fork support ---------------------
if hasattr(_os, "fork"):
_os.register_at_fork(after_in_child=_inst.seed)
diff --git a/x64/Lib/re.py b/x64/Lib/re.py
index 8f1d55d..bfb7b1c 100644
--- a/x64/Lib/re.py
+++ b/x64/Lib/re.py
@@ -44,7 +44,7 @@ The special characters are:
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
- (?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below).
+ (?aiLmsux) The letters set the corresponding flags defined below.
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
@@ -97,7 +97,9 @@ This module exports the following functions:
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
-Some of the functions in this module takes flags as optional parameters:
+Each function other than purge and escape can take an optional 'flags' argument
+consisting of one or more of the following module constants, joined by "|".
+A, L, and U are mutually exclusive.
A ASCII For string patterns, make \w, \W, \b, \B, \d, \D
match the corresponding ASCII character categories
(rather than the whole Unicode categories, which is the
diff --git a/x64/Lib/runpy.py b/x64/Lib/runpy.py
index 8adc91e..7e1e1ac 100644
--- a/x64/Lib/runpy.py
+++ b/x64/Lib/runpy.py
@@ -15,6 +15,7 @@ import importlib.machinery # importlib first so we can test #15386 via -m
import importlib.util
import io
import types
+import os
from pkgutil import read_code, get_importer
__all__ = [
@@ -132,6 +133,9 @@ def _get_module_details(mod_name, error=ImportError):
# importlib, where the latter raises other errors for cases where
# pkgutil previously raised ImportError
msg = "Error while finding module specification for {!r} ({}: {})"
+ if mod_name.endswith(".py"):
+ msg += (f". Try using '{mod_name[:-3]}' instead of "
+ f"'{mod_name}' as the module name.")
raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex
if spec is None:
raise error("No module named %s" % mod_name)
@@ -229,11 +233,12 @@ def _get_main_module_details(error=ImportError):
def _get_code_from_file(run_name, fname):
# Check for a compiled file first
- with io.open_code(fname) as f:
+ decoded_path = os.path.abspath(os.fsdecode(fname))
+ with io.open_code(decoded_path) as f:
code = read_code(f)
if code is None:
# That didn't work, so try it as normal source code
- with io.open_code(fname) as f:
+ with io.open_code(decoded_path) as f:
code = compile(f.read(), fname, 'exec')
return code, fname
diff --git a/x64/Lib/secrets.py b/x64/Lib/secrets.py
index 1304342..a546efb 100644
--- a/x64/Lib/secrets.py
+++ b/x64/Lib/secrets.py
@@ -14,7 +14,6 @@ __all__ = ['choice', 'randbelow', 'randbits', 'SystemRandom',
import base64
import binascii
-import os
from hmac import compare_digest
from random import SystemRandom
@@ -44,7 +43,7 @@ def token_bytes(nbytes=None):
"""
if nbytes is None:
nbytes = DEFAULT_ENTROPY
- return os.urandom(nbytes)
+ return _sysrand.randbytes(nbytes)
def token_hex(nbytes=None):
"""Return a random text string, in hexadecimal.
diff --git a/x64/Lib/selectors.py b/x64/Lib/selectors.py
index a9a0801..bb15a1c 100644
--- a/x64/Lib/selectors.py
+++ b/x64/Lib/selectors.py
@@ -57,6 +57,7 @@ if sys.version_info >= (3, 5):
SelectorKey.data.__doc__ = ('''Optional opaque data associated to this file object.
For example, this could be used to store a per-client session ID.''')
+
class _SelectorMapping(Mapping):
"""Mapping of file objects to selector keys."""
@@ -552,7 +553,10 @@ if hasattr(select, 'kqueue'):
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
- max_ev = len(self._fd_to_key)
+ # If max_ev is 0, kqueue will ignore the timeout. For consistent
+ # behavior with the other selector classes, we prevent that here
+ # (using max). See https://bugs.python.org/issue29255
+ max_ev = max(len(self._fd_to_key), 1)
ready = []
try:
kev_list = self._selector.control(None, max_ev, timeout)
@@ -577,16 +581,39 @@ if hasattr(select, 'kqueue'):
super().close()
+def _can_use(method):
+ """Check if we can use the selector depending upon the
+ operating system. """
+ # Implementation based upon https://github.com/sethmlarson/selectors2/blob/master/selectors2.py
+ selector = getattr(select, method, None)
+ if selector is None:
+ # select module does not implement method
+ return False
+ # check if the OS and Kernel actually support the method. Call may fail with
+ # OSError: [Errno 38] Function not implemented
+ try:
+ selector_obj = selector()
+ if method == 'poll':
+ # check that poll actually works
+ selector_obj.poll(0)
+ else:
+ # close epoll, kqueue, and devpoll fd
+ selector_obj.close()
+ return True
+ except OSError:
+ return False
+
+
# Choose the best implementation, roughly:
# epoll|kqueue|devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
-if 'KqueueSelector' in globals():
+if _can_use('kqueue'):
DefaultSelector = KqueueSelector
-elif 'EpollSelector' in globals():
+elif _can_use('epoll'):
DefaultSelector = EpollSelector
-elif 'DevpollSelector' in globals():
+elif _can_use('devpoll'):
DefaultSelector = DevpollSelector
-elif 'PollSelector' in globals():
+elif _can_use('poll'):
DefaultSelector = PollSelector
else:
DefaultSelector = SelectSelector
diff --git a/x64/Lib/shlex.py b/x64/Lib/shlex.py
index c817274..4801a6c 100644
--- a/x64/Lib/shlex.py
+++ b/x64/Lib/shlex.py
@@ -304,6 +304,10 @@ class shlex:
def split(s, comments=False, posix=True):
"""Split the string *s* using shell-like syntax."""
+ if s is None:
+ import warnings
+ warnings.warn("Passing None for 's' to shlex.split() is deprecated.",
+ DeprecationWarning, stacklevel=2)
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
diff --git a/x64/Lib/shutil.py b/x64/Lib/shutil.py
index 1f05d80..f0e833d 100644
--- a/x64/Lib/shutil.py
+++ b/x64/Lib/shutil.py
@@ -53,6 +53,9 @@ COPY_BUFSIZE = 1024 * 1024 if _WINDOWS else 64 * 1024
_USE_CP_SENDFILE = hasattr(os, "sendfile") and sys.platform.startswith("linux")
_HAS_FCOPYFILE = posix and hasattr(posix, "_fcopyfile") # macOS
+# CMD defaults in Windows 10
+_WIN_DEFAULT_PATHEXT = ".COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC"
+
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
@@ -708,7 +711,7 @@ def rmtree(path, ignore_errors=False, onerror=None):
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
- onerror(os.lstat, path, sys.exc_info())
+ onerror(os.open, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
@@ -741,8 +744,20 @@ def rmtree(path, ignore_errors=False, onerror=None):
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
- # A basename() variant which first strips the trailing slash, if present.
- # Thus we always get the last component of the path, even for directories.
+ """A basename() variant which first strips the trailing slash, if present.
+ Thus we always get the last component of the path, even for directories.
+
+ path: Union[PathLike, str]
+
+ e.g.
+ >>> os.path.basename('/bar/foo')
+ 'foo'
+ >>> os.path.basename('/bar/foo/')
+ ''
+ >>> _basename('/bar/foo/')
+ 'foo'
+ """
+ path = os.fspath(path)
sep = os.path.sep + (os.path.altsep or '')
return os.path.basename(path.rstrip(sep))
@@ -781,7 +796,10 @@ def move(src, dst, copy_function=copy2):
os.rename(src, dst)
return
+ # Using _basename instead of os.path.basename is important, as we must
+ # ignore any trailing slash to avoid the basename returning ''
real_dst = os.path.join(dst, _basename(src))
+
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
@@ -1400,7 +1418,9 @@ def which(cmd, mode=os.F_OK | os.X_OK, path=None):
path.insert(0, curdir)
# PATHEXT is necessary to check on Windows.
- pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
+ pathext_source = os.getenv("PATHEXT") or _WIN_DEFAULT_PATHEXT
+ pathext = [ext for ext in pathext_source.split(os.pathsep) if ext]
+
if use_bytes:
pathext = [os.fsencode(ext) for ext in pathext]
# See if the given file matches any of the expected path extensions.
diff --git a/x64/Lib/site.py b/x64/Lib/site.py
index a065ab0..9e617af 100644
--- a/x64/Lib/site.py
+++ b/x64/Lib/site.py
@@ -334,13 +334,22 @@ def getsitepackages(prefixes=None):
continue
seen.add(prefix)
+ libdirs = [sys.platlibdir]
+ if sys.platlibdir != "lib":
+ libdirs.append("lib")
+
if os.sep == '/':
- sitepackages.append(os.path.join(prefix, "lib",
- "python%d.%d" % sys.version_info[:2],
- "site-packages"))
+ for libdir in libdirs:
+ path = os.path.join(prefix, libdir,
+ "python%d.%d" % sys.version_info[:2],
+ "site-packages")
+ sitepackages.append(path)
else:
sitepackages.append(prefix)
- sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
+
+ for libdir in libdirs:
+ path = os.path.join(prefix, libdir, "site-packages")
+ sitepackages.append(path)
return sitepackages
def addsitepackages(known_paths, prefixes=None):
@@ -444,9 +453,9 @@ def enablerlcompleter():
def write_history():
try:
readline.write_history_file(history)
- except (FileNotFoundError, PermissionError):
- # home directory does not exist or is not writable
- # https://bugs.python.org/issue19891
+ except OSError:
+ # bpo-19891, bpo-41193: Home directory does not exist
+ # or is not writable, or the filesystem is read-only.
pass
atexit.register(write_history)
@@ -590,7 +599,7 @@ def _script():
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
- 2 - uses site directory is disabled by super user
+ 2 - user site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
diff --git a/x64/Lib/smtpd.py b/x64/Lib/smtpd.py
index 8103ca9..8f1a22e 100644
--- a/x64/Lib/smtpd.py
+++ b/x64/Lib/smtpd.py
@@ -779,6 +779,8 @@ class PureProxy(SMTPServer):
class MailmanProxy(PureProxy):
def __init__(self, *args, **kwargs):
+ warn('MailmanProxy is deprecated and will be removed '
+ 'in future', DeprecationWarning, 2)
if 'enable_SMTPUTF8' in kwargs and kwargs['enable_SMTPUTF8']:
raise ValueError("MailmanProxy does not support SMTPUTF8.")
super(PureProxy, self).__init__(*args, **kwargs)
diff --git a/x64/Lib/smtplib.py b/x64/Lib/smtplib.py
index 8e3d4bf..7808ba0 100644
--- a/x64/Lib/smtplib.py
+++ b/x64/Lib/smtplib.py
@@ -303,6 +303,8 @@ class SMTP:
def _get_socket(self, host, port, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
+ if timeout is not None and not timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
if self.debuglevel > 0:
self._print_debug('connect: to', (host, port), self.source_address)
return socket.create_connection((host, port), timeout,
@@ -333,8 +335,6 @@ class SMTP:
raise OSError("nonnumeric port")
if not port:
port = self.default_port
- if self.debuglevel > 0:
- self._print_debug('connect:', (host, port))
sys.audit("smtplib.connect", self, host, port)
self.sock = self._get_socket(host, port, self.timeout)
self.file = None
@@ -1032,13 +1032,12 @@ if _have_ssl:
keyfile=keyfile)
self.context = context
SMTP.__init__(self, host, port, local_hostname, timeout,
- source_address)
+ source_address)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
- new_socket = socket.create_connection((host, port), timeout,
- self.source_address)
+ new_socket = super()._get_socket(host, port, timeout)
new_socket = self.context.wrap_socket(new_socket,
server_hostname=self._host)
return new_socket
@@ -1067,19 +1066,23 @@ class LMTP(SMTP):
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None,
- source_address=None):
+ source_address=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Initialize a new instance."""
- SMTP.__init__(self, host, port, local_hostname=local_hostname,
- source_address=source_address)
+ super().__init__(host, port, local_hostname=local_hostname,
+ source_address=source_address, timeout=timeout)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
- return SMTP.connect(self, host, port, source_address=source_address)
+ return super().connect(host, port, source_address=source_address)
+
+ if self.timeout is not None and not self.timeout:
+ raise ValueError('Non-blocking socket (timeout=0) is not supported')
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.settimeout(self.timeout)
self.file = None
self.sock.connect(host)
except OSError:
diff --git a/x64/Lib/sndhdr.py b/x64/Lib/sndhdr.py
index 5943531..96595c6 100644
--- a/x64/Lib/sndhdr.py
+++ b/x64/Lib/sndhdr.py
@@ -241,7 +241,7 @@ def testall(list, recursive, toplevel):
if recursive or toplevel:
print('recursing down:')
import glob
- names = glob.glob(os.path.join(filename, '*'))
+ names = glob.glob(os.path.join(glob.escape(filename), '*'))
testall(names, recursive, 0)
else:
print('*** directory (use -r) ***')
diff --git a/x64/Lib/socket.py b/x64/Lib/socket.py
index f83f36d..cafa573 100644
--- a/x64/Lib/socket.py
+++ b/x64/Lib/socket.py
@@ -12,6 +12,8 @@ Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
+send_fds() -- Send file descriptor to the socket.
+recv_fds() -- Recieve file descriptors from the socket.
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
@@ -104,7 +106,6 @@ def _intenum_converter(value, enum_klass):
except ValueError:
return value
-_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
@@ -543,6 +544,40 @@ def fromfd(fd, family, type, proto=0):
nfd = dup(fd)
return socket(family, type, proto, nfd)
+if hasattr(_socket.socket, "sendmsg"):
+ import array
+
+ def send_fds(sock, buffers, fds, flags=0, address=None):
+ """ send_fds(sock, buffers, fds[, flags[, address]]) -> integer
+
+ Send the list of file descriptors fds over an AF_UNIX socket.
+ """
+ return sock.sendmsg(buffers, [(_socket.SOL_SOCKET,
+ _socket.SCM_RIGHTS, array.array("i", fds))])
+ __all__.append("send_fds")
+
+if hasattr(_socket.socket, "recvmsg"):
+ import array
+
+ def recv_fds(sock, bufsize, maxfds, flags=0):
+ """ recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file
+ descriptors, msg_flags, address)
+
+ Receive up to maxfds file descriptors returning the message
+ data and a list containing the descriptors.
+ """
+ # Array of ints
+ fds = array.array("i")
+ msg, ancdata, flags, addr = sock.recvmsg(bufsize,
+ _socket.CMSG_LEN(maxfds * fds.itemsize))
+ for cmsg_level, cmsg_type, cmsg_data in ancdata:
+ if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS):
+ fds.frombytes(cmsg_data[:
+ len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
+
+ return msg, list(fds), flags, addr
+ __all__.append("recv_fds")
+
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
diff --git a/x64/Lib/socketserver.py b/x64/Lib/socketserver.py
index 1ad028f..57c1ae6 100644
--- a/x64/Lib/socketserver.py
+++ b/x64/Lib/socketserver.py
@@ -374,7 +374,7 @@ class BaseServer:
"""
print('-'*40, file=sys.stderr)
- print('Exception happened during processing of request from',
+ print('Exception occurred during processing of request from',
client_address, file=sys.stderr)
import traceback
traceback.print_exc()
diff --git a/x64/Lib/ssl.py b/x64/Lib/ssl.py
index 0726cae..30f4e59 100644
--- a/x64/Lib/ssl.py
+++ b/x64/Lib/ssl.py
@@ -252,7 +252,7 @@ class _TLSMessageType(_IntEnum):
if sys.platform == "win32":
from _ssl import enum_certificates, enum_crls
-from socket import socket, AF_INET, SOCK_STREAM, create_connection
+from socket import socket, SOCK_STREAM, create_connection
from socket import SOL_SOCKET, SO_TYPE
import socket as _socket
import base64 # for DER-to-PEM translation
diff --git a/x64/Lib/statistics.py b/x64/Lib/statistics.py
index 1e95c0b..f9d3802 100644
--- a/x64/Lib/statistics.py
+++ b/x64/Lib/statistics.py
@@ -163,7 +163,7 @@ def _sum(data, start=0):
T = _coerce(int, type(start))
for typ, values in groupby(data, type):
T = _coerce(T, typ) # or raise TypeError
- for n,d in map(_exact_ratio, values):
+ for n, d in map(_exact_ratio, values):
count += 1
partials[d] = partials_get(d, 0) + n
if None in partials:
@@ -261,7 +261,7 @@ def _convert(value, T):
return T(value)
except TypeError:
if issubclass(T, Decimal):
- return T(value.numerator)/T(value.denominator)
+ return T(value.numerator) / T(value.denominator)
else:
raise
@@ -277,8 +277,8 @@ def _find_lteq(a, x):
def _find_rteq(a, l, x):
'Locate the rightmost value exactly equal to x'
i = bisect_right(a, x, lo=l)
- if i != (len(a)+1) and a[i-1] == x:
- return i-1
+ if i != (len(a) + 1) and a[i - 1] == x:
+ return i - 1
raise ValueError
@@ -315,7 +315,7 @@ def mean(data):
raise StatisticsError('mean requires at least one data point')
T, total, count = _sum(data)
assert count == n
- return _convert(total/n, T)
+ return _convert(total / n, T)
def fmean(data):
@@ -403,11 +403,11 @@ def harmonic_mean(data):
else:
raise TypeError('unsupported type')
try:
- T, total, count = _sum(1/x for x in _fail_neg(data, errmsg))
+ T, total, count = _sum(1 / x for x in _fail_neg(data, errmsg))
except ZeroDivisionError:
return 0
assert count == n
- return _convert(n/total, T)
+ return _convert(n / total, T)
# FIXME: investigate ways to calculate medians without sorting? Quickselect?
@@ -428,11 +428,11 @@ def median(data):
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
- if n%2 == 1:
- return data[n//2]
+ if n % 2 == 1:
+ return data[n // 2]
else:
- i = n//2
- return (data[i - 1] + data[i])/2
+ i = n // 2
+ return (data[i - 1] + data[i]) / 2
def median_low(data):
@@ -451,10 +451,10 @@ def median_low(data):
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
- if n%2 == 1:
- return data[n//2]
+ if n % 2 == 1:
+ return data[n // 2]
else:
- return data[n//2 - 1]
+ return data[n // 2 - 1]
def median_high(data):
@@ -473,7 +473,7 @@ def median_high(data):
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
- return data[n//2]
+ return data[n // 2]
def median_grouped(data, interval=1):
@@ -510,15 +510,15 @@ def median_grouped(data, interval=1):
return data[0]
# Find the value at the midpoint. Remember this corresponds to the
# centre of the class interval.
- x = data[n//2]
+ x = data[n // 2]
for obj in (x, interval):
if isinstance(obj, (str, bytes)):
raise TypeError('expected number but got %r' % obj)
try:
- L = x - interval/2 # The lower limit of the median interval.
+ L = x - interval / 2 # The lower limit of the median interval.
except TypeError:
# Mixed type. For now we just coerce to float.
- L = float(x) - float(interval)/2
+ L = float(x) - float(interval) / 2
# Uses bisection search to search for x in data with log(n) time complexity
# Find the position of leftmost occurrence of x in data
@@ -528,7 +528,7 @@ def median_grouped(data, interval=1):
l2 = _find_rteq(data, l1, x)
cf = l1
f = l2 - l1 + 1
- return L + interval*(n/2 - cf)/f
+ return L + interval * (n / 2 - cf) / f
def mode(data):
@@ -554,8 +554,7 @@ def mode(data):
If *data* is empty, ``mode``, raises StatisticsError.
"""
- data = iter(data)
- pairs = Counter(data).most_common(1)
+ pairs = Counter(iter(data)).most_common(1)
try:
return pairs[0][0]
except IndexError:
@@ -597,7 +596,7 @@ def multimode(data):
# For sample data where there is a positive probability for values
# beyond the range of the data, the R6 exclusive method is a
# reasonable choice. Consider a random sample of nine values from a
-# population with a uniform distribution from 0.0 to 100.0. The
+# population with a uniform distribution from 0.0 to 1.0. The
# distribution of the third ranked sample point is described by
# betavariate(alpha=3, beta=7) which has mode=0.250, median=0.286, and
# mean=0.300. Only the latter (which corresponds with R6) gives the
@@ -643,9 +642,8 @@ def quantiles(data, *, n=4, method='exclusive'):
m = ld - 1
result = []
for i in range(1, n):
- j = i * m // n
- delta = i*m - j*n
- interpolated = (data[j] * (n - delta) + data[j+1] * delta) / n
+ j, delta = divmod(i * m, n)
+ interpolated = (data[j] * (n - delta) + data[j + 1] * delta) / n
result.append(interpolated)
return result
if method == 'exclusive':
@@ -655,7 +653,7 @@ def quantiles(data, *, n=4, method='exclusive'):
j = i * m // n # rescale i to m/n
j = 1 if j < 1 else ld-1 if j > ld-1 else j # clamp to 1 .. ld-1
delta = i*m - j*n # exact integer math
- interpolated = (data[j-1] * (n - delta) + data[j] * delta) / n
+ interpolated = (data[j - 1] * (n - delta) + data[j] * delta) / n
result.append(interpolated)
return result
raise ValueError(f'Unknown method: {method!r}')
@@ -682,14 +680,16 @@ def _ss(data, c=None):
calculated from ``c`` as given. Use the second case with care, as it can
lead to garbage results.
"""
- if c is None:
- c = mean(data)
+ if c is not None:
+ T, total, count = _sum((x-c)**2 for x in data)
+ return (T, total)
+ c = mean(data)
T, total, count = _sum((x-c)**2 for x in data)
# The following sum should mathematically equal zero, but due to rounding
# error may not.
- U, total2, count2 = _sum((x-c) for x in data)
+ U, total2, count2 = _sum((x - c) for x in data)
assert T == U and count == count2
- total -= total2**2/len(data)
+ total -= total2 ** 2 / len(data)
assert not total < 0, 'negative sum of square deviations: %f' % total
return (T, total)
@@ -738,7 +738,7 @@ def variance(data, xbar=None):
if n < 2:
raise StatisticsError('variance requires at least two data points')
T, ss = _ss(data, xbar)
- return _convert(ss/(n-1), T)
+ return _convert(ss / (n - 1), T)
def pvariance(data, mu=None):
@@ -782,7 +782,7 @@ def pvariance(data, mu=None):
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
T, ss = _ss(data, mu)
- return _convert(ss/n, T)
+ return _convert(ss / n, T)
def stdev(data, xbar=None):
@@ -894,6 +894,13 @@ def _normal_dist_inv_cdf(p, mu, sigma):
return mu + (x * sigma)
+# If available, use C implementation
+try:
+ from _statistics import _normal_dist_inv_cdf
+except ImportError:
+ pass
+
+
class NormalDist:
"Normal distribution of a random variable"
# https://en.wikipedia.org/wiki/Normal_distribution
@@ -984,7 +991,7 @@ class NormalDist:
if not isinstance(other, NormalDist):
raise TypeError('Expected another NormalDist instance')
X, Y = self, other
- if (Y._sigma, Y._mu) < (X._sigma, X._mu): # sort to assure commutativity
+ if (Y._sigma, Y._mu) < (X._sigma, X._mu): # sort to assure commutativity
X, Y = Y, X
X_var, Y_var = X.variance, Y.variance
if not X_var or not Y_var:
@@ -999,6 +1006,17 @@ class NormalDist:
x2 = (a - b) / dv
return 1.0 - (fabs(Y.cdf(x1) - X.cdf(x1)) + fabs(Y.cdf(x2) - X.cdf(x2)))
+ def zscore(self, x):
+ """Compute the Standard Score. (x - mean) / stdev
+
+ Describes *x* in terms of the number of standard deviations
+ above or below the mean of the normal distribution.
+ """
+ # https://www.statisticshowto.com/probability-and-statistics/z-score/
+ if not self._sigma:
+ raise StatisticsError('zscore() not defined when sigma is zero')
+ return (x - self._mu) / self._sigma
+
@property
def mean(self):
"Arithmetic mean of the normal distribution."
@@ -1100,79 +1118,3 @@ class NormalDist:
def __repr__(self):
return f'{type(self).__name__}(mu={self._mu!r}, sigma={self._sigma!r})'
-
-# If available, use C implementation
-try:
- from _statistics import _normal_dist_inv_cdf
-except ImportError:
- pass
-
-
-if __name__ == '__main__':
-
- # Show math operations computed analytically in comparsion
- # to a monte carlo simulation of the same operations
-
- from math import isclose
- from operator import add, sub, mul, truediv
- from itertools import repeat
- import doctest
-
- g1 = NormalDist(10, 20)
- g2 = NormalDist(-5, 25)
-
- # Test scaling by a constant
- assert (g1 * 5 / 5).mean == g1.mean
- assert (g1 * 5 / 5).stdev == g1.stdev
-
- n = 100_000
- G1 = g1.samples(n)
- G2 = g2.samples(n)
-
- for func in (add, sub):
- print(f'\nTest {func.__name__} with another NormalDist:')
- print(func(g1, g2))
- print(NormalDist.from_samples(map(func, G1, G2)))
-
- const = 11
- for func in (add, sub, mul, truediv):
- print(f'\nTest {func.__name__} with a constant:')
- print(func(g1, const))
- print(NormalDist.from_samples(map(func, G1, repeat(const))))
-
- const = 19
- for func in (add, sub, mul):
- print(f'\nTest constant with {func.__name__}:')
- print(func(const, g1))
- print(NormalDist.from_samples(map(func, repeat(const), G1)))
-
- def assert_close(G1, G2):
- assert isclose(G1.mean, G1.mean, rel_tol=0.01), (G1, G2)
- assert isclose(G1.stdev, G2.stdev, rel_tol=0.01), (G1, G2)
-
- X = NormalDist(-105, 73)
- Y = NormalDist(31, 47)
- s = 32.75
- n = 100_000
-
- S = NormalDist.from_samples([x + s for x in X.samples(n)])
- assert_close(X + s, S)
-
- S = NormalDist.from_samples([x - s for x in X.samples(n)])
- assert_close(X - s, S)
-
- S = NormalDist.from_samples([x * s for x in X.samples(n)])
- assert_close(X * s, S)
-
- S = NormalDist.from_samples([x / s for x in X.samples(n)])
- assert_close(X / s, S)
-
- S = NormalDist.from_samples([x + y for x, y in zip(X.samples(n),
- Y.samples(n))])
- assert_close(X + Y, S)
-
- S = NormalDist.from_samples([x - y for x, y in zip(X.samples(n),
- Y.samples(n))])
- assert_close(X - Y, S)
-
- print(doctest.testmod())
diff --git a/x64/Lib/string.py b/x64/Lib/string.py
index b423ff5..489777b 100644
--- a/x64/Lib/string.py
+++ b/x64/Lib/string.py
@@ -54,30 +54,7 @@ from collections import ChainMap as _ChainMap
_sentinel_dict = {}
-class _TemplateMetaclass(type):
- pattern = r"""
- %(delim)s(?:
- (?P<escaped>%(delim)s) | # Escape sequence of two delimiters
- (?P<named>%(id)s) | # delimiter and a Python identifier
- {(?P<braced>%(bid)s)} | # delimiter and a braced identifier
- (?P<invalid>) # Other ill-formed delimiter exprs
- )
- """
-
- def __init__(cls, name, bases, dct):
- super(_TemplateMetaclass, cls).__init__(name, bases, dct)
- if 'pattern' in dct:
- pattern = cls.pattern
- else:
- pattern = _TemplateMetaclass.pattern % {
- 'delim' : _re.escape(cls.delimiter),
- 'id' : cls.idpattern,
- 'bid' : cls.braceidpattern or cls.idpattern,
- }
- cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
-
-
-class Template(metaclass=_TemplateMetaclass):
+class Template:
"""A string class for supporting $-substitutions."""
delimiter = '$'
@@ -89,6 +66,24 @@ class Template(metaclass=_TemplateMetaclass):
braceidpattern = None
flags = _re.IGNORECASE
+ def __init_subclass__(cls):
+ super().__init_subclass__()
+ if 'pattern' in cls.__dict__:
+ pattern = cls.pattern
+ else:
+ delim = _re.escape(cls.delimiter)
+ id = cls.idpattern
+ bid = cls.braceidpattern or cls.idpattern
+ pattern = fr"""
+ {delim}(?:
+ (?P<escaped>{delim}) | # Escape sequence of two delimiters
+ (?P<named>{id}) | # delimiter and a Python identifier
+ {{(?P<braced>{bid})}} | # delimiter and a braced identifier
+ (?P<invalid>) # Other ill-formed delimiter exprs
+ )
+ """
+ cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
+
def __init__(self, template):
self.template = template
@@ -146,6 +141,9 @@ class Template(metaclass=_TemplateMetaclass):
self.pattern)
return self.pattern.sub(convert, self.template)
+# Initialize Template.pattern. __init_subclass__() is automatically called
+# only for subclasses, not for the Template class itself.
+Template.__init_subclass__()
########################################################################
diff --git a/x64/Lib/subprocess.py b/x64/Lib/subprocess.py
index 5c2c2f0..f1d829a 100644
--- a/x64/Lib/subprocess.py
+++ b/x64/Lib/subprocess.py
@@ -52,7 +52,16 @@ import threading
import warnings
import contextlib
from time import monotonic as _time
+import types
+try:
+ import pwd
+except ImportError:
+ pwd = None
+try:
+ import grp
+except ImportError:
+ grp = None
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "run", "CalledProcessError", "DEVNULL",
@@ -317,7 +326,7 @@ def _args_from_interpreter_flags():
if dev_mode:
args.extend(('-X', 'dev'))
for opt in ('faulthandler', 'tracemalloc', 'importtime',
- 'showalloccount', 'showrefcount', 'utf8'):
+ 'showrefcount', 'utf8', 'oldparser'):
if opt in xoptions:
value = xoptions[opt]
if value is True:
@@ -438,6 +447,9 @@ class CompletedProcess(object):
args.append('stderr={!r}'.format(self.stderr))
return "{}({})".format(type(self).__name__, ', '.join(args))
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+
def check_returncode(self):
"""Raise CalledProcessError if the exit code is non-zero."""
if self.returncode:
@@ -719,6 +731,14 @@ class Popen(object):
start_new_session (POSIX only)
+ group (POSIX only)
+
+ extra_groups (POSIX only)
+
+ user (POSIX only)
+
+ umask (POSIX only)
+
pass_fds (POSIX only)
encoding and errors: Text mode encoding and error handling to use for
@@ -735,7 +755,8 @@ class Popen(object):
shell=False, cwd=None, env=None, universal_newlines=None,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
- pass_fds=(), *, encoding=None, errors=None, text=None):
+ pass_fds=(), *, user=None, group=None, extra_groups=None,
+ encoding=None, errors=None, text=None, umask=-1):
"""Create new Popen instance."""
_cleanup()
# Held while anything is calling waitpid before returncode has been
@@ -833,6 +854,78 @@ class Popen(object):
else:
line_buffering = False
+ gid = None
+ if group is not None:
+ if not hasattr(os, 'setregid'):
+ raise ValueError("The 'group' parameter is not supported on the "
+ "current platform")
+
+ elif isinstance(group, str):
+ if grp is None:
+ raise ValueError("The group parameter cannot be a string "
+ "on systems without the grp module")
+
+ gid = grp.getgrnam(group).gr_gid
+ elif isinstance(group, int):
+ gid = group
+ else:
+ raise TypeError("Group must be a string or an integer, not {}"
+ .format(type(group)))
+
+ if gid < 0:
+ raise ValueError(f"Group ID cannot be negative, got {gid}")
+
+ gids = None
+ if extra_groups is not None:
+ if not hasattr(os, 'setgroups'):
+ raise ValueError("The 'extra_groups' parameter is not "
+ "supported on the current platform")
+
+ elif isinstance(extra_groups, str):
+ raise ValueError("Groups must be a list, not a string")
+
+ gids = []
+ for extra_group in extra_groups:
+ if isinstance(extra_group, str):
+ if grp is None:
+ raise ValueError("Items in extra_groups cannot be "
+ "strings on systems without the "
+ "grp module")
+
+ gids.append(grp.getgrnam(extra_group).gr_gid)
+ elif isinstance(extra_group, int):
+ gids.append(extra_group)
+ else:
+ raise TypeError("Items in extra_groups must be a string "
+ "or integer, not {}"
+ .format(type(extra_group)))
+
+ # make sure that the gids are all positive here so we can do less
+ # checking in the C code
+ for gid_check in gids:
+ if gid_check < 0:
+ raise ValueError(f"Group ID cannot be negative, got {gid_check}")
+
+ uid = None
+ if user is not None:
+ if not hasattr(os, 'setreuid'):
+ raise ValueError("The 'user' parameter is not supported on "
+ "the current platform")
+
+ elif isinstance(user, str):
+ if pwd is None:
+ raise ValueError("The user parameter cannot be a string "
+ "on systems without the pwd module")
+
+ uid = pwd.getpwnam(user).pw_uid
+ elif isinstance(user, int):
+ uid = user
+ else:
+ raise TypeError("User must be a string or an integer")
+
+ if uid < 0:
+ raise ValueError(f"User ID cannot be negative, got {uid}")
+
try:
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
@@ -857,7 +950,9 @@ class Popen(object):
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
- restore_signals, start_new_session)
+ restore_signals,
+ gid, gids, uid, umask,
+ start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
@@ -887,6 +982,17 @@ class Popen(object):
raise
+ def __repr__(self):
+ obj_repr = (
+ f"<{self.__class__.__name__}: "
+ f"returncode: {self.returncode} args: {list(self.args)!r}>"
+ )
+ if len(obj_repr) > 80:
+ obj_repr = obj_repr[:76] + "...>"
+ return obj_repr
+
+ __class_getitem__ = classmethod(types.GenericAlias)
+
@property
def universal_newlines(self):
# universal_newlines as retained as an alias of text_mode for API
@@ -1227,7 +1333,10 @@ class Popen(object):
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
- unused_restore_signals, unused_start_new_session):
+ unused_restore_signals,
+ unused_gid, unused_gids, unused_uid,
+ unused_umask,
+ unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
@@ -1553,7 +1662,9 @@ class Popen(object):
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
- restore_signals, start_new_session):
+ restore_signals,
+ gid, gids, uid, umask,
+ start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
@@ -1588,7 +1699,11 @@ class Popen(object):
and (p2cread == -1 or p2cread > 2)
and (c2pwrite == -1 or c2pwrite > 2)
and (errwrite == -1 or errwrite > 2)
- and not start_new_session):
+ and not start_new_session
+ and gid is None
+ and gids is None
+ and uid is None
+ and umask < 0):
self._posix_spawn(args, executable, env, restore_signals,
p2cread, p2cwrite,
c2pread, c2pwrite,
@@ -1641,7 +1756,9 @@ class Popen(object):
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
- restore_signals, start_new_session, preexec_fn)
+ restore_signals, start_new_session,
+ gid, gids, uid, umask,
+ preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
@@ -1703,23 +1820,17 @@ class Popen(object):
raise child_exception_type(err_msg)
- def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
- _WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
- _WEXITSTATUS=os.WEXITSTATUS, _WIFSTOPPED=os.WIFSTOPPED,
- _WSTOPSIG=os.WSTOPSIG):
+ def _handle_exitstatus(self, sts,
+ waitstatus_to_exitcode=os.waitstatus_to_exitcode,
+ _WIFSTOPPED=os.WIFSTOPPED,
+ _WSTOPSIG=os.WSTOPSIG):
"""All callers to this function MUST hold self._waitpid_lock."""
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
- if _WIFSIGNALED(sts):
- self.returncode = -_WTERMSIG(sts)
- elif _WIFEXITED(sts):
- self.returncode = _WEXITSTATUS(sts)
- elif _WIFSTOPPED(sts):
+ if _WIFSTOPPED(sts):
self.returncode = -_WSTOPSIG(sts)
else:
- # Should never happen
- raise SubprocessError("Unknown child exit status!")
-
+ self.returncode = waitstatus_to_exitcode(sts)
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD):
@@ -1926,9 +2037,35 @@ class Popen(object):
def send_signal(self, sig):
"""Send a signal to the process."""
- # Skip signalling a process that we know has already died.
- if self.returncode is None:
+ # bpo-38630: Polling reduces the risk of sending a signal to the
+ # wrong process if the process completed, the Popen.returncode
+ # attribute is still None, and the pid has been reassigned
+ # (recycled) to a new different process. This race condition can
+ # happens in two cases.
+ #
+ # Case 1. Thread A calls Popen.poll(), thread B calls
+ # Popen.send_signal(). In thread A, waitpid() succeed and returns
+ # the exit status. Thread B calls kill() because poll() in thread A
+ # did not set returncode yet. Calling poll() in thread B prevents
+ # the race condition thanks to Popen._waitpid_lock.
+ #
+ # Case 2. waitpid(pid, 0) has been called directly, without
+ # using Popen methods: returncode is still None is this case.
+ # Calling Popen.poll() will set returncode to a default value,
+ # since waitpid() fails with ProcessLookupError.
+ self.poll()
+ if self.returncode is not None:
+ # Skip signalling a process that we know has already died.
+ return
+
+ # The race condition can still happen if the race condition
+ # described above happens between the returncode test
+ # and the kill() call.
+ try:
os.kill(self.pid, sig)
+ except ProcessLookupError:
+ # Supress the race condition error; bpo-40550.
+ pass
def terminate(self):
"""Terminate the process with SIGTERM
diff --git a/x64/Lib/sunau.py b/x64/Lib/sunau.py
index 129502b..79750a9 100644
--- a/x64/Lib/sunau.py
+++ b/x64/Lib/sunau.py
@@ -104,7 +104,7 @@ is destroyed.
"""
from collections import namedtuple
-import warnings
+
_sunau_params = namedtuple('_sunau_params',
'nchannels sampwidth framerate nframes comptype compname')
@@ -524,8 +524,3 @@ def open(f, mode=None):
return Au_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
-
-def openfp(f, mode=None):
- warnings.warn("sunau.openfp is deprecated since Python 3.7. "
- "Use sunau.open instead.", DeprecationWarning, stacklevel=2)
- return open(f, mode=mode)
diff --git a/x64/Lib/symbol.py b/x64/Lib/symbol.py
index 36e0eec..aaac8c9 100644
--- a/x64/Lib/symbol.py
+++ b/x64/Lib/symbol.py
@@ -11,6 +11,15 @@
#
# make regen-symbol
+import warnings
+
+warnings.warn(
+ "The symbol module is deprecated and will be removed "
+ "in future versions of Python",
+ DeprecationWarning,
+ stacklevel=2,
+)
+
#--start constants--
single_input = 256
file_input = 257
diff --git a/x64/Lib/symtable.py b/x64/Lib/symtable.py
index 5bea7cf..521540f 100644
--- a/x64/Lib/symtable.py
+++ b/x64/Lib/symtable.py
@@ -34,7 +34,7 @@ class SymbolTableFactory:
_newSymbolTable = SymbolTableFactory()
-class SymbolTable(object):
+class SymbolTable:
def __init__(self, raw_table, filename):
self._table = raw_table
@@ -47,7 +47,7 @@ class SymbolTable(object):
else:
kind = "%s " % self.__class__.__name__
- if self._table.name == "global":
+ if self._table.name == "top":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
@@ -82,10 +82,6 @@ class SymbolTable(object):
def has_children(self):
return bool(self._table.children)
- def has_exec(self):
- """Return true if the scope uses exec. Deprecated method."""
- return False
-
def get_identifiers(self):
return self._table.symbols.keys()
@@ -94,7 +90,9 @@ class SymbolTable(object):
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
- sym = self._symbols[name] = Symbol(name, flags, namespaces)
+ module_scope = (self._table.name == "top")
+ sym = self._symbols[name] = Symbol(name, flags, namespaces,
+ module_scope=module_scope)
return sym
def get_symbols(self):
@@ -167,13 +165,14 @@ class Class(SymbolTable):
return self.__methods
-class Symbol(object):
+class Symbol:
- def __init__(self, name, flags, namespaces=None):
+ def __init__(self, name, flags, namespaces=None, *, module_scope=False):
self.__name = name
self.__flags = flags
self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
self.__namespaces = namespaces or ()
+ self.__module_scope = module_scope
def __repr__(self):
return "<symbol {0!r}>".format(self.__name)
@@ -188,7 +187,10 @@ class Symbol(object):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
- return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT))
+ """Return *True* if the sysmbol is global.
+ """
+ return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
+ or (self.__module_scope and self.__flags & DEF_BOUND))
def is_nonlocal(self):
return bool(self.__flags & DEF_NONLOCAL)
@@ -197,7 +199,10 @@ class Symbol(object):
return bool(self.__scope == GLOBAL_EXPLICIT)
def is_local(self):
- return bool(self.__flags & DEF_BOUND)
+ """Return *True* if the symbol is local.
+ """
+ return bool(self.__scope in (LOCAL, CELL)
+ or (self.__module_scope and self.__flags & DEF_BOUND))
def is_annotated(self):
return bool(self.__flags & DEF_ANNOT)
diff --git a/x64/Lib/sysconfig.py b/x64/Lib/sysconfig.py
index b9e2faf..bf04ac5 100644
--- a/x64/Lib/sysconfig.py
+++ b/x64/Lib/sysconfig.py
@@ -20,10 +20,10 @@ __all__ = [
_INSTALL_SCHEMES = {
'posix_prefix': {
- 'stdlib': '{installed_base}/lib/python{py_version_short}',
- 'platstdlib': '{platbase}/lib/python{py_version_short}',
+ 'stdlib': '{installed_base}/{platlibdir}/python{py_version_short}',
+ 'platstdlib': '{platbase}/{platlibdir}/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
- 'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
+ 'platlib': '{platbase}/{platlibdir}/python{py_version_short}/site-packages',
'include':
'{installed_base}/include/python{py_version_short}{abiflags}',
'platinclude':
@@ -62,10 +62,10 @@ _INSTALL_SCHEMES = {
'data': '{userbase}',
},
'posix_user': {
- 'stdlib': '{userbase}/lib/python{py_version_short}',
- 'platstdlib': '{userbase}/lib/python{py_version_short}',
+ 'stdlib': '{userbase}/{platlibdir}/python{py_version_short}',
+ 'platstdlib': '{userbase}/{platlibdir}/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
- 'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
+ 'platlib': '{userbase}/{platlibdir}/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data': '{userbase}',
@@ -84,8 +84,6 @@ _INSTALL_SCHEMES = {
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
- # FIXME don't rely on sys.version here, its format is an implementation detail
- # of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = '%d.%d' % sys.version_info[:2]
_PY_VERSION_SHORT_NO_DOT = '%d%d' % sys.version_info[:2]
@@ -539,6 +537,7 @@ def get_config_vars(*args):
_CONFIG_VARS['installed_platbase'] = _BASE_EXEC_PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
+ _CONFIG_VARS['platlibdir'] = sys.platlibdir
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
@@ -547,6 +546,7 @@ def get_config_vars(*args):
if os.name == 'nt':
_init_non_posix(_CONFIG_VARS)
+ _CONFIG_VARS['TZPATH'] = ''
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# For backward compatibility, see issue19555
@@ -665,7 +665,8 @@ def get_platform():
machine += ".%s" % bitness[sys.maxsize]
# fall through to standard osname-release-machine representation
elif osname[:3] == "aix":
- return "%s-%s.%s" % (osname, version, release)
+ from _aix_support import aix_platform
+ return aix_platform()
elif osname[:6] == "cygwin":
osname = "cygwin"
import re
diff --git a/x64/Lib/tarfile.py b/x64/Lib/tarfile.py
index d31b9cb..1d15612 100644
--- a/x64/Lib/tarfile.py
+++ b/x64/Lib/tarfile.py
@@ -420,6 +420,8 @@ class _Stream:
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
+ # Honor "directory components removed" from RFC1952
+ self.name = os.path.basename(self.name)
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
@@ -930,6 +932,14 @@ class TarInfo(object):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
+ has_device_fields = info.get("type") in (CHRTYPE, BLKTYPE)
+ if has_device_fields:
+ devmajor = itn(info.get("devmajor", 0), 8, format)
+ devminor = itn(info.get("devminor", 0), 8, format)
+ else:
+ devmajor = stn("", 8, encoding, errors)
+ devminor = stn("", 8, encoding, errors)
+
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
@@ -943,8 +953,8 @@ class TarInfo(object):
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
- itn(info.get("devmajor", 0), 8, format),
- itn(info.get("devminor", 0), 8, format),
+ devmajor,
+ devminor,
stn(info.get("prefix", ""), 155, encoding, errors)
]
@@ -1241,6 +1251,8 @@ class TarInfo(object):
length, keyword = match.groups()
length = int(length)
+ if length == 0:
+ raise InvalidHeaderError("invalid header")
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf-8" as the encoding and "strict"
@@ -2081,9 +2093,10 @@ class TarFile(object):
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
- a filename or a TarInfo object. If `member' is a regular file or a
- link, an io.BufferedReader object is returned. Otherwise, None is
- returned.
+ a filename or a TarInfo object. If `member' is a regular file or
+ a link, an io.BufferedReader object is returned. For all other
+ existing members, None is returned. If `member' does not appear
+ in the archive, KeyError is raised.
"""
self._check("r")
@@ -2224,6 +2237,9 @@ class TarFile(object):
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
+ if os.path.lexists(targetpath):
+ # Avoid FileExistsError on following os.symlink.
+ os.unlink(targetpath)
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
@@ -2459,9 +2475,14 @@ class TarFile(object):
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
+
+ 'name' should be a string, file, or file-like object.
"""
try:
- t = open(name)
+ if hasattr(name, "read"):
+ t = open(fileobj=name)
+ else:
+ t = open(name)
t.close()
return True
except TarError:
diff --git a/x64/Lib/tempfile.py b/x64/Lib/tempfile.py
index 6287554..770f72c 100644
--- a/x64/Lib/tempfile.py
+++ b/x64/Lib/tempfile.py
@@ -44,6 +44,7 @@ import shutil as _shutil
import errno as _errno
from random import Random as _Random
import sys as _sys
+import types as _types
import weakref as _weakref
import _thread
_allocate_lock = _thread.allocate_lock
@@ -307,8 +308,7 @@ def mkstemp(suffix=None, prefix=None, dir=None, text=False):
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
- mode. Else (the default) the file is opened in binary mode. On
- some operating systems, this makes no difference.
+ mode. Else (the default) the file is opened in binary mode.
If any of 'suffix', 'prefix' and 'dir' are not None, they must be the
same type. If they are bytes, the returned name will be bytes; str
@@ -643,6 +643,8 @@ class SpooledTemporaryFile:
'encoding': encoding, 'newline': newline,
'dir': dir, 'errors': errors}
+ __class_getitem__ = classmethod(_types.GenericAlias)
+
def _check(self, file):
if self._rolled: return
max_size = self._max_size
@@ -735,11 +737,7 @@ class SpooledTemporaryFile:
return self._file.readlines(*args)
def seek(self, *args):
- self._file.seek(*args)
-
- @property
- def softspace(self):
- return self._file.softspace
+ return self._file.seek(*args)
def tell(self):
return self._file.tell()
@@ -830,3 +828,5 @@ class TemporaryDirectory(object):
def cleanup(self):
if self._finalizer.detach():
self._rmtree(self.name)
+
+ __class_getitem__ = classmethod(_types.GenericAlias)
diff --git a/x64/Lib/threading.py b/x64/Lib/threading.py
index 2f6ac70..d96d99a 100644
--- a/x64/Lib/threading.py
+++ b/x64/Lib/threading.py
@@ -3,6 +3,7 @@
import os as _os
import sys as _sys
import _thread
+import functools
from time import monotonic as _time
from _weakrefset import WeakSet
@@ -121,6 +122,11 @@ class _RLock:
hex(id(self))
)
+ def _at_fork_reinit(self):
+ self._block._at_fork_reinit()
+ self._owner = None
+ self._count = 0
+
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
@@ -243,6 +249,10 @@ class Condition:
pass
self._waiters = _deque()
+ def _at_fork_reinit(self):
+ self._lock._at_fork_reinit()
+ self._waiters.clear()
+
def __enter__(self):
return self._lock.__enter__()
@@ -261,7 +271,7 @@ class Condition:
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if _lock doesn't have _is_owned().
- if self._lock.acquire(0):
+ if self._lock.acquire(False):
self._lock.release()
return False
else:
@@ -438,16 +448,19 @@ class Semaphore:
__enter__ = acquire
- def release(self):
- """Release a semaphore, incrementing the internal counter by one.
+ def release(self, n=1):
+ """Release a semaphore, incrementing the internal counter by one or more.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
+ if n < 1:
+ raise ValueError('n must be one or more')
with self._cond:
- self._value += 1
- self._cond.notify()
+ self._value += n
+ for i in range(n):
+ self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
@@ -474,8 +487,8 @@ class BoundedSemaphore(Semaphore):
Semaphore.__init__(self, value)
self._initial_value = value
- def release(self):
- """Release a semaphore, incrementing the internal counter by one.
+ def release(self, n=1):
+ """Release a semaphore, incrementing the internal counter by one or more.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
@@ -484,11 +497,14 @@ class BoundedSemaphore(Semaphore):
raise a ValueError.
"""
+ if n < 1:
+ raise ValueError('n must be one or more')
with self._cond:
- if self._value >= self._initial_value:
+ if self._value + n > self._initial_value:
raise ValueError("Semaphore released too many times")
- self._value += 1
- self._cond.notify()
+ self._value += n
+ for i in range(n):
+ self._cond.notify()
class Event:
@@ -506,9 +522,9 @@ class Event:
self._cond = Condition(Lock())
self._flag = False
- def _reset_internal_locks(self):
- # private! called by Thread._reset_internal_locks by _after_fork()
- self._cond.__init__(Lock())
+ def _at_fork_reinit(self):
+ # Private method called by Thread._reset_internal_locks()
+ self._cond._at_fork_reinit()
def is_set(self):
"""Return true if and only if the internal flag is true."""
@@ -808,9 +824,14 @@ class Thread:
def _reset_internal_locks(self, is_alive):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
- self._started._reset_internal_locks()
+ self._started._at_fork_reinit()
if is_alive:
- self._set_tstate_lock()
+ # bpo-42350: If the fork happens when the thread is already stopped
+ # (ex: after threading._shutdown() has been called), _tstate_lock
+ # is None. Do nothing in this case.
+ if self._tstate_lock is not None:
+ self._tstate_lock._at_fork_reinit()
+ self._tstate_lock.acquire()
else:
# The thread isn't alive after fork: it doesn't have a tstate
# anymore.
@@ -846,6 +867,7 @@ class Thread:
if self._started.is_set():
raise RuntimeError("threads can only be started once")
+
with _active_limbo_lock:
_limbo[self] = self
try:
@@ -1082,16 +1104,6 @@ class Thread:
self._wait_for_tstate_lock(False)
return not self._is_stopped
- def isAlive(self):
- """Return whether the thread is alive.
-
- This method is deprecated, use is_alive() instead.
- """
- import warnings
- warnings.warn('isAlive() is deprecated, use is_alive() instead',
- DeprecationWarning, stacklevel=2)
- return self.is_alive()
-
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
@@ -1344,6 +1356,27 @@ def enumerate():
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
+
+_threading_atexits = []
+_SHUTTING_DOWN = False
+
+def _register_atexit(func, *arg, **kwargs):
+ """CPython internal: register *func* to be called before joining threads.
+
+ The registered *func* is called with its arguments just before all
+ non-daemon threads are joined in `_shutdown()`. It provides a similar
+ purpose to `atexit.register()`, but its functions are called prior to
+ threading shutdown instead of interpreter shutdown.
+
+ For similarity to atexit, the registered functions are called in reverse.
+ """
+ if _SHUTTING_DOWN:
+ raise RuntimeError("can't register atexit after shutdown")
+
+ call = functools.partial(func, *arg, **kwargs)
+ _threading_atexits.append(call)
+
+
from _thread import stack_size
# Create the main thread object,
@@ -1365,6 +1398,8 @@ def _shutdown():
# _shutdown() was already called
return
+ global _SHUTTING_DOWN
+ _SHUTTING_DOWN = True
# Main thread
tlock = _main_thread._tstate_lock
# The main thread isn't finished yet, so its thread state lock can't have
@@ -1374,6 +1409,11 @@ def _shutdown():
tlock.release()
_main_thread._stop()
+ # Call registered threading atexit functions before threads are joined.
+ # Order is reversed, similar to atexit.
+ for atexit_call in reversed(_threading_atexits):
+ atexit_call()
+
# Join all non-deamon threads
while True:
with _shutdown_locks_lock:
@@ -1421,7 +1461,15 @@ def _after_fork():
# fork() only copied the current thread; clear references to others.
new_active = {}
- current = current_thread()
+
+ try:
+ current = _active[get_ident()]
+ except KeyError:
+ # fork() was called in a thread which was not spawned
+ # by threading.Thread. For example, a thread spawned
+ # by thread.start_new_thread().
+ current = _MainThread()
+
_main_thread = current
# reset _shutdown() locks: threads re-register their _tstate_lock below
diff --git a/x64/Lib/timeit.py b/x64/Lib/timeit.py
index c0362bc..6c3ec01 100644
--- a/x64/Lib/timeit.py
+++ b/x64/Lib/timeit.py
@@ -29,7 +29,8 @@ argument in quotes and using leading spaces. Multiple -s options are
treated similarly.
If -n is not given, a suitable number of loops is calculated by trying
-successive powers of 10 until the total time is at least 0.2 seconds.
+increasing numbers from the sequence 1, 2, 5, 10, 20, 50, ... until the
+total time is at least 0.2 seconds.
Note: there is a certain baseline overhead associated with executing a
pass statement. It differs between versions. The code here doesn't try
diff --git a/x64/Lib/trace.py b/x64/Lib/trace.py
index a447357..c505d8b 100644
--- a/x64/Lib/trace.py
+++ b/x64/Lib/trace.py
@@ -287,8 +287,9 @@ class CoverageResults:
if self.outfile:
# try and store counts and module info into self.outfile
try:
- pickle.dump((self.counts, self.calledfuncs, self.callers),
- open(self.outfile, 'wb'), 1)
+ with open(self.outfile, 'wb') as f:
+ pickle.dump((self.counts, self.calledfuncs, self.callers),
+ f, 1)
except OSError as err:
print("Can't save counts files because %s" % err, file=sys.stderr)
@@ -452,22 +453,7 @@ class Trace:
sys.settrace(None)
threading.settrace(None)
- def runfunc(*args, **kw):
- if len(args) >= 2:
- self, func, *args = args
- elif not args:
- raise TypeError("descriptor 'runfunc' of 'Trace' object "
- "needs an argument")
- elif 'func' in kw:
- func = kw.pop('func')
- self, *args = args
- import warnings
- warnings.warn("Passing 'func' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError('runfunc expected at least 1 positional argument, '
- 'got %d' % (len(args)-1))
-
+ def runfunc(self, func, /, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
@@ -477,7 +463,6 @@ class Trace:
if not self.donothing:
sys.settrace(None)
return result
- runfunc.__text_signature__ = '($self, func, /, *args, **kw)'
def file_module_function_of(self, frame):
code = frame.f_code
@@ -731,7 +716,7 @@ def main():
sys.argv = [opts.progname, *opts.arguments]
sys.path[0] = os.path.dirname(opts.progname)
- with open(opts.progname) as fp:
+ with open(opts.progname, 'rb') as fp:
code = compile(fp.read(), opts.progname, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
diff --git a/x64/Lib/traceback.py b/x64/Lib/traceback.py
index ab35da9..fb34de9 100644
--- a/x64/Lib/traceback.py
+++ b/x64/Lib/traceback.py
@@ -500,7 +500,6 @@ class TracebackException:
_seen=_seen)
else:
context = None
- self.exc_traceback = exc_traceback
self.__cause__ = cause
self.__context__ = context
self.__suppress_context__ = \
@@ -538,7 +537,9 @@ class TracebackException:
self.__cause__._load_lines()
def __eq__(self, other):
- return self.__dict__ == other.__dict__
+ if isinstance(other, TracebackException):
+ return self.__dict__ == other.__dict__
+ return NotImplemented
def __str__(self):
return self._str
@@ -549,7 +550,7 @@ class TracebackException:
The return value is a generator of strings, each ending in a newline.
Normally, the generator emits a single string; however, for
- SyntaxError exceptions, it emites several lines that (when
+ SyntaxError exceptions, it emits several lines that (when
printed) display detailed information about where the syntax
error occurred.
@@ -567,23 +568,30 @@ class TracebackException:
if not issubclass(self.exc_type, SyntaxError):
yield _format_final_exc_line(stype, self._str)
- return
+ else:
+ yield from self._format_syntax_error(stype)
- # It was a syntax error; show exactly where the problem was found.
+ def _format_syntax_error(self, stype):
+ """Format SyntaxError exceptions (internal helper)."""
+ # Show exactly where the problem was found.
filename = self.filename or "<string>"
lineno = str(self.lineno) or '?'
yield ' File "{}", line {}\n'.format(filename, lineno)
- badline = self.text
- offset = self.offset
- if badline is not None:
- yield ' {}\n'.format(badline.strip())
- if offset is not None:
- caretspace = badline.rstrip('\n')
- offset = min(len(caretspace), offset) - 1
- caretspace = caretspace[:offset].lstrip()
+ text = self.text
+ if text is not None:
+ # text = " foo\n"
+ # rtext = " foo"
+ # ltext = "foo"
+ rtext = text.rstrip('\n')
+ ltext = rtext.lstrip(' \n\f')
+ spaces = len(rtext) - len(ltext)
+ yield ' {}\n'.format(ltext)
+ # Convert 1-based column offset to 0-based index into stripped text
+ caret = (self.offset or 0) - 1 - spaces
+ if caret >= 0:
# non-space whitespace (likes tabs) must be kept for alignment
- caretspace = ((c.isspace() and c or ' ') for c in caretspace)
+ caretspace = ((c if c.isspace() else ' ') for c in ltext[:caret])
yield ' {}^\n'.format(''.join(caretspace))
msg = self.msg or "<no detail available>"
yield "{}: {}\n".format(stype, msg)
@@ -608,7 +616,7 @@ class TracebackException:
not self.__suppress_context__):
yield from self.__context__.format(chain=chain)
yield _context_message
- if self.exc_traceback is not None:
+ if self.stack:
yield 'Traceback (most recent call last):\n'
- yield from self.stack.format()
+ yield from self.stack.format()
yield from self.format_exception_only()
diff --git a/x64/Lib/tracemalloc.py b/x64/Lib/tracemalloc.py
index 2c1ac3b..69b4170 100644
--- a/x64/Lib/tracemalloc.py
+++ b/x64/Lib/tracemalloc.py
@@ -43,6 +43,8 @@ class Statistic:
return hash((self.traceback, self.size, self.count))
def __eq__(self, other):
+ if not isinstance(other, Statistic):
+ return NotImplemented
return (self.traceback == other.traceback
and self.size == other.size
and self.count == other.count)
@@ -84,6 +86,8 @@ class StatisticDiff:
self.count, self.count_diff))
def __eq__(self, other):
+ if not isinstance(other, StatisticDiff):
+ return NotImplemented
return (self.traceback == other.traceback
and self.size == other.size
and self.size_diff == other.size_diff
@@ -153,9 +157,13 @@ class Frame:
return self._frame[1]
def __eq__(self, other):
+ if not isinstance(other, Frame):
+ return NotImplemented
return (self._frame == other._frame)
def __lt__(self, other):
+ if not isinstance(other, Frame):
+ return NotImplemented
return (self._frame < other._frame)
def __hash__(self):
@@ -174,15 +182,20 @@ class Traceback(Sequence):
Sequence of Frame instances sorted from the oldest frame
to the most recent frame.
"""
- __slots__ = ("_frames",)
+ __slots__ = ("_frames", '_total_nframe')
- def __init__(self, frames):
+ def __init__(self, frames, total_nframe=None):
Sequence.__init__(self)
# frames is a tuple of frame tuples: see Frame constructor for the
# format of a frame tuple; it is reversed, because _tracemalloc
# returns frames sorted from most recent to oldest, but the
# Python API expects oldest to most recent
self._frames = tuple(reversed(frames))
+ self._total_nframe = total_nframe
+
+ @property
+ def total_nframe(self):
+ return self._total_nframe
def __len__(self):
return len(self._frames)
@@ -200,16 +213,25 @@ class Traceback(Sequence):
return hash(self._frames)
def __eq__(self, other):
+ if not isinstance(other, Traceback):
+ return NotImplemented
return (self._frames == other._frames)
def __lt__(self, other):
+ if not isinstance(other, Traceback):
+ return NotImplemented
return (self._frames < other._frames)
def __str__(self):
return str(self[0])
def __repr__(self):
- return "<Traceback %r>" % (tuple(self),)
+ s = "<Traceback %r" % tuple(self)
+ if self._total_nframe is None:
+ s += ">"
+ else:
+ s += f" total_nframe={self.total_nframe}>"
+ return s
def format(self, limit=None, most_recent_first=False):
lines = []
@@ -268,9 +290,11 @@ class Trace:
@property
def traceback(self):
- return Traceback(self._trace[2])
+ return Traceback(*self._trace[2:])
def __eq__(self, other):
+ if not isinstance(other, Trace):
+ return NotImplemented
return (self._trace == other._trace)
def __hash__(self):
@@ -303,6 +327,8 @@ class _Traces(Sequence):
return trace._trace in self._traces
def __eq__(self, other):
+ if not isinstance(other, _Traces):
+ return NotImplemented
return (self._traces == other._traces)
def __repr__(self):
@@ -362,7 +388,7 @@ class Filter(BaseFilter):
return self._match_frame(filename, lineno)
def _match(self, trace):
- domain, size, traceback = trace
+ domain, size, traceback, total_nframe = trace
res = self._match_traceback(traceback)
if self.domain is not None:
if self.inclusive:
@@ -382,7 +408,7 @@ class DomainFilter(BaseFilter):
return self._domain
def _match(self, trace):
- domain, size, traceback = trace
+ domain, size, traceback, total_nframe = trace
return (domain == self.domain) ^ (not self.inclusive)
@@ -459,7 +485,7 @@ class Snapshot:
tracebacks = {}
if not cumulative:
for trace in self.traces._traces:
- domain, size, trace_traceback = trace
+ domain, size, trace_traceback, total_nframe = trace
try:
traceback = tracebacks[trace_traceback]
except KeyError:
@@ -480,7 +506,7 @@ class Snapshot:
else:
# cumulative statistics
for trace in self.traces._traces:
- domain, size, trace_traceback = trace
+ domain, size, trace_traceback, total_nframe = trace
for frame in trace_traceback:
try:
traceback = tracebacks[frame]
diff --git a/x64/Lib/types.py b/x64/Lib/types.py
index ea3c0b2..ad2020e 100644
--- a/x64/Lib/types.py
+++ b/x64/Lib/types.py
@@ -293,4 +293,7 @@ def coroutine(func):
return wrapped
+GenericAlias = type(list[int])
+
+
__all__ = [n for n in globals() if n[:1] != '_']
diff --git a/x64/Lib/typing.py b/x64/Lib/typing.py
index 7aab8db..f5316ab 100644
--- a/x64/Lib/typing.py
+++ b/x64/Lib/typing.py
@@ -26,11 +26,12 @@ import operator
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
-from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType
+from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
+ 'Annotated',
'Any',
'Callable',
'ClassVar',
@@ -140,8 +141,9 @@ def _type_check(arg, msg, is_argument=True):
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
- if (isinstance(arg, _SpecialForm) and arg not in (Any, NoReturn) or
- arg in (Generic, Protocol)):
+ if arg in (Any, NoReturn):
+ return arg
+ if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef)):
return arg
@@ -158,6 +160,8 @@ def _type_repr(obj):
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
+ if isinstance(obj, types.GenericAlias):
+ return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
@@ -179,43 +183,37 @@ def _collect_type_vars(types):
for t in types:
if isinstance(t, TypeVar) and t not in tvars:
tvars.append(t)
- if isinstance(t, _GenericAlias) and not t._special:
+ if isinstance(t, (_GenericAlias, GenericAlias)):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
-def _subs_tvars(tp, tvars, subs):
- """Substitute type variables 'tvars' with substitutions 'subs'.
- These two must have the same length.
- """
- if not isinstance(tp, _GenericAlias):
- return tp
- new_args = list(tp.__args__)
- for a, arg in enumerate(tp.__args__):
- if isinstance(arg, TypeVar):
- for i, tvar in enumerate(tvars):
- if arg == tvar:
- new_args[a] = subs[i]
- else:
- new_args[a] = _subs_tvars(arg, tvars, subs)
- if tp.__origin__ is Union:
- return Union[tuple(new_args)]
- return tp.copy_with(tuple(new_args))
-
-
-def _check_generic(cls, parameters):
+def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
- if not cls.__parameters__:
+ if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
- elen = len(cls.__parameters__)
if alen != elen:
raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
f" actual {alen}, expected {elen}")
+def _deduplicate(params):
+ # Weed out strict duplicates, preserving the first of each occurrence.
+ all_params = set(params)
+ if len(all_params) < len(params):
+ new_params = []
+ for t in params:
+ if t in all_params:
+ new_params.append(t)
+ all_params.remove(t)
+ params = new_params
+ assert not all_params, all_params
+ return params
+
+
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Unions
among parameters, then remove duplicates.
@@ -223,58 +221,68 @@ def _remove_dups_flatten(parameters):
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
- if isinstance(p, _GenericAlias) and p.__origin__ is Union:
+ if isinstance(p, _UnionGenericAlias):
params.extend(p.__args__)
elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
params.extend(p[1:])
else:
params.append(p)
- # Weed out strict duplicates, preserving the first of each occurrence.
- all_params = set(params)
- if len(all_params) < len(params):
- new_params = []
- for t in params:
- if t in all_params:
- new_params.append(t)
- all_params.remove(t)
- params = new_params
- assert not all_params, all_params
+
+ return tuple(_deduplicate(params))
+
+
+def _flatten_literal_params(parameters):
+ """An internal helper for Literal creation: flatten Literals among parameters"""
+ params = []
+ for p in parameters:
+ if isinstance(p, _LiteralGenericAlias):
+ params.extend(p.__args__)
+ else:
+ params.append(p)
return tuple(params)
_cleanups = []
-def _tp_cache(func):
+def _tp_cache(func=None, /, *, typed=False):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
- cached = functools.lru_cache()(func)
- _cleanups.append(cached.cache_clear)
+ def decorator(func):
+ cached = functools.lru_cache(typed=typed)(func)
+ _cleanups.append(cached.cache_clear)
- @functools.wraps(func)
- def inner(*args, **kwds):
- try:
- return cached(*args, **kwds)
- except TypeError:
- pass # All real errors (not unhashable args) are raised below.
- return func(*args, **kwds)
- return inner
+ @functools.wraps(func)
+ def inner(*args, **kwds):
+ try:
+ return cached(*args, **kwds)
+ except TypeError:
+ pass # All real errors (not unhashable args) are raised below.
+ return func(*args, **kwds)
+ return inner
+ if func is not None:
+ return decorator(func)
-def _eval_type(t, globalns, localns):
- """Evaluate all forward reverences in the given type t.
+ return decorator
+
+def _eval_type(t, globalns, localns, recursive_guard=frozenset()):
+ """Evaluate all forward references in the given type t.
For use of globalns and localns see the docstring for get_type_hints().
+ recursive_guard is used to prevent prevent infinite recursion
+ with recursive ForwardRef.
"""
if isinstance(t, ForwardRef):
- return t._evaluate(globalns, localns)
- if isinstance(t, _GenericAlias):
- ev_args = tuple(_eval_type(a, globalns, localns) for a in t.__args__)
+ return t._evaluate(globalns, localns, recursive_guard)
+ if isinstance(t, (_GenericAlias, GenericAlias)):
+ ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)
if ev_args == t.__args__:
return t
- res = t.copy_with(ev_args)
- res._special = t._special
- return res
+ if isinstance(t, GenericAlias):
+ return GenericAlias(t.__origin__, ev_args)
+ else:
+ return t.copy_with(ev_args)
return t
@@ -289,6 +297,7 @@ class _Final:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
+ __slots__ = ()
def __copy__(self):
return self
@@ -297,37 +306,18 @@ class _Immutable:
return self
-class _SpecialForm(_Final, _Immutable, _root=True):
- """Internal indicator of special typing constructs.
- See _doc instance attribute for specific docs.
- """
-
- __slots__ = ('_name', '_doc')
-
- def __new__(cls, *args, **kwds):
- """Constructor.
+# Internal indicator of special typing constructs.
+# See __doc__ instance attribute for specific docs.
+class _SpecialForm(_Final, _root=True):
+ __slots__ = ('_name', '__doc__', '_getitem')
- This only exists to give a better error message in case
- someone tries to subclass a special typing object (not a good idea).
- """
- if (len(args) == 3 and
- isinstance(args[0], str) and
- isinstance(args[1], tuple)):
- # Close enough.
- raise TypeError(f"Cannot subclass {cls!r}")
- return super().__new__(cls)
-
- def __init__(self, name, doc):
- self._name = name
- self._doc = doc
+ def __init__(self, getitem):
+ self._getitem = getitem
+ self._name = getitem.__name__
+ self.__doc__ = getitem.__doc__
- def __eq__(self, other):
- if not isinstance(other, _SpecialForm):
- return NotImplemented
- return self._name == other._name
-
- def __hash__(self):
- return hash((self._name,))
+ def __mro_entries__(self, bases):
+ raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
@@ -346,31 +336,17 @@ class _SpecialForm(_Final, _Immutable, _root=True):
@_tp_cache
def __getitem__(self, parameters):
- if self._name in ('ClassVar', 'Final'):
- item = _type_check(parameters, f'{self._name} accepts only single type.')
- return _GenericAlias(self, (item,))
- if self._name == 'Union':
- if parameters == ():
- raise TypeError("Cannot take a Union of no types.")
- if not isinstance(parameters, tuple):
- parameters = (parameters,)
- msg = "Union[arg, ...]: each arg must be a type."
- parameters = tuple(_type_check(p, msg) for p in parameters)
- parameters = _remove_dups_flatten(parameters)
- if len(parameters) == 1:
- return parameters[0]
- return _GenericAlias(self, parameters)
- if self._name == 'Optional':
- arg = _type_check(parameters, "Optional[t] requires a single type.")
- return Union[arg, type(None)]
- if self._name == 'Literal':
- # There is no '_type_check' call because arguments to Literal[...] are
- # values, not types.
- return _GenericAlias(self, parameters)
- raise TypeError(f"{self} is not subscriptable")
-
-
-Any = _SpecialForm('Any', doc=
+ return self._getitem(self, parameters)
+
+
+class _LiteralSpecialForm(_SpecialForm, _root=True):
+ @_tp_cache(typed=True)
+ def __getitem__(self, parameters):
+ return self._getitem(self, parameters)
+
+
+@_SpecialForm
+def Any(self, parameters):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
@@ -380,9 +356,11 @@ Any = _SpecialForm('Any', doc=
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
- """)
+ """
+ raise TypeError(f"{self} is not subscriptable")
-NoReturn = _SpecialForm('NoReturn', doc=
+@_SpecialForm
+def NoReturn(self, parameters):
"""Special type indicating functions that never return.
Example::
@@ -393,9 +371,11 @@ NoReturn = _SpecialForm('NoReturn', doc=
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
- """)
+ """
+ raise TypeError(f"{self} is not subscriptable")
-ClassVar = _SpecialForm('ClassVar', doc=
+@_SpecialForm
+def ClassVar(self, parameters):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
@@ -410,9 +390,12 @@ ClassVar = _SpecialForm('ClassVar', doc=
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
- """)
+ """
+ item = _type_check(parameters, f'{self} accepts only single type.')
+ return _GenericAlias(self, (item,))
-Final = _SpecialForm('Final', doc=
+@_SpecialForm
+def Final(self, parameters):
"""Special typing construct to indicate final names to type checkers.
A final name cannot be re-assigned or overridden in a subclass.
@@ -428,9 +411,12 @@ Final = _SpecialForm('Final', doc=
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
- """)
+ """
+ item = _type_check(parameters, f'{self} accepts only single type.')
+ return _GenericAlias(self, (item,))
-Union = _SpecialForm('Union', doc=
+@_SpecialForm
+def Union(self, parameters):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
@@ -455,15 +441,29 @@ Union = _SpecialForm('Union', doc=
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
- """)
-
-Optional = _SpecialForm('Optional', doc=
+ """
+ if parameters == ():
+ raise TypeError("Cannot take a Union of no types.")
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+ msg = "Union[arg, ...]: each arg must be a type."
+ parameters = tuple(_type_check(p, msg) for p in parameters)
+ parameters = _remove_dups_flatten(parameters)
+ if len(parameters) == 1:
+ return parameters[0]
+ return _UnionGenericAlias(self, parameters)
+
+@_SpecialForm
+def Optional(self, parameters):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
- """)
+ """
+ arg = _type_check(parameters, f"{self} requires a single type.")
+ return Union[arg, type(None)]
-Literal = _SpecialForm('Literal', doc=
+@_LiteralSpecialForm
+def Literal(self, parameters):
"""Special typing form to define literal types (a.k.a. value types).
This form can be used to indicate to type checkers that the corresponding
@@ -480,10 +480,23 @@ Literal = _SpecialForm('Literal', doc=
open_helper('/some/path', 'r') # Passes type check
open_helper('/other/path', 'typo') # Error in type checker
- Literal[...] cannot be subclassed. At runtime, an arbitrary value
- is allowed as type argument to Literal[...], but type checkers may
- impose restrictions.
- """)
+ Literal[...] cannot be subclassed. At runtime, an arbitrary value
+ is allowed as type argument to Literal[...], but type checkers may
+ impose restrictions.
+ """
+ # There is no '_type_check' call because arguments to Literal[...] are
+ # values, not types.
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+
+ parameters = _flatten_literal_params(parameters)
+
+ try:
+ parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))
+ except TypeError: # unhashable parameters
+ pass
+
+ return _LiteralGenericAlias(self, parameters)
class ForwardRef(_Final, _root=True):
@@ -506,7 +519,9 @@ class ForwardRef(_Final, _root=True):
self.__forward_value__ = None
self.__forward_is_argument__ = is_argument
- def _evaluate(self, globalns, localns):
+ def _evaluate(self, globalns, localns, recursive_guard):
+ if self.__forward_arg__ in recursive_guard:
+ return self
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
@@ -514,10 +529,14 @@ class ForwardRef(_Final, _root=True):
globalns = localns
elif localns is None:
localns = globalns
- self.__forward_value__ = _type_check(
+ type_ =_type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.",
- is_argument=self.__forward_is_argument__)
+ is_argument=self.__forward_is_argument__,
+ )
+ self.__forward_value__ = _eval_type(
+ type_, globalns, localns, recursive_guard | {self.__forward_arg__}
+ )
self.__forward_evaluated__ = True
return self.__forward_value__
@@ -581,7 +600,7 @@ class TypeVar(_Final, _Immutable, _root=True):
"""
__slots__ = ('__name__', '__bound__', '__constraints__',
- '__covariant__', '__contravariant__')
+ '__covariant__', '__contravariant__', '__dict__')
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
@@ -600,7 +619,10 @@ class TypeVar(_Final, _Immutable, _root=True):
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
- def_mod = sys._getframe(1).f_globals['__name__'] # for pickling
+ try:
+ def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') # for pickling
+ except (AttributeError, ValueError):
+ def_mod = None
if def_mod != 'typing':
self.__module__ = def_mod
@@ -617,34 +639,10 @@ class TypeVar(_Final, _Immutable, _root=True):
return self.__name__
-# Special typing constructs Union, Optional, Generic, Callable and Tuple
-# use three special attributes for internal bookkeeping of generic types:
-# * __parameters__ is a tuple of unique free type parameters of a generic
-# type, for example, Dict[T, T].__parameters__ == (T,);
-# * __origin__ keeps a reference to a type that was subscripted,
-# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
-# the type.
-# * __args__ is a tuple of all arguments used in subscripting,
-# e.g., Dict[T, int].__args__ == (T, int).
-
-
-# Mapping from non-generic type names that have a generic alias in typing
-# but with a different name.
-_normalize_alias = {'list': 'List',
- 'tuple': 'Tuple',
- 'dict': 'Dict',
- 'set': 'Set',
- 'frozenset': 'FrozenSet',
- 'deque': 'Deque',
- 'defaultdict': 'DefaultDict',
- 'type': 'Type',
- 'Set': 'AbstractSet'}
-
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
-
-class _GenericAlias(_Final, _root=True):
+class _BaseGenericAlias(_Final, _root=True):
"""The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
@@ -653,24 +651,88 @@ class _GenericAlias(_Final, _root=True):
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
- def __init__(self, origin, params, *, inst=True, special=False, name=None):
+ def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
- self._special = special
- if special and name is None:
- orig_name = origin.__name__
- name = _normalize_alias.get(orig_name, orig_name)
self._name = name
+ self.__origin__ = origin
+ self.__slots__ = None # This is not documented.
+
+ def __call__(self, *args, **kwargs):
+ if not self._inst:
+ raise TypeError(f"Type {self._name} cannot be instantiated; "
+ f"use {self.__origin__.__name__}() instead")
+ result = self.__origin__(*args, **kwargs)
+ try:
+ result.__orig_class__ = self
+ except AttributeError:
+ pass
+ return result
+
+ def __mro_entries__(self, bases):
+ res = []
+ if self.__origin__ not in bases:
+ res.append(self.__origin__)
+ i = bases.index(self)
+ for b in bases[i+1:]:
+ if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
+ break
+ else:
+ res.append(Generic)
+ return tuple(res)
+
+ def __getattr__(self, attr):
+ # We are careful for copy and pickle.
+ # Also for simplicity we just don't relay all dunder names
+ if '__origin__' in self.__dict__ and not _is_dunder(attr):
+ return getattr(self.__origin__, attr)
+ raise AttributeError(attr)
+
+ def __setattr__(self, attr, val):
+ if _is_dunder(attr) or attr in ('_name', '_inst', '_nparams'):
+ super().__setattr__(attr, val)
+ else:
+ setattr(self.__origin__, attr, val)
+
+ def __instancecheck__(self, obj):
+ return self.__subclasscheck__(type(obj))
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Subscripted generics cannot be used with"
+ " class and instance checks")
+
+
+# Special typing constructs Union, Optional, Generic, Callable and Tuple
+# use three special attributes for internal bookkeeping of generic types:
+# * __parameters__ is a tuple of unique free type parameters of a generic
+# type, for example, Dict[T, T].__parameters__ == (T,);
+# * __origin__ keeps a reference to a type that was subscripted,
+# e.g., Union[T, int].__origin__ == Union, or the non-generic version of
+# the type.
+# * __args__ is a tuple of all arguments used in subscripting,
+# e.g., Dict[T, int].__args__ == (T, int).
+
+
+class _GenericAlias(_BaseGenericAlias, _root=True):
+ def __init__(self, origin, params, *, inst=True, name=None):
+ super().__init__(origin, inst=inst, name=name)
if not isinstance(params, tuple):
params = (params,)
- self.__origin__ = origin
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in params)
self.__parameters__ = _collect_type_vars(params)
- self.__slots__ = None # This is not documented.
if not name:
self.__module__ = origin.__module__
+ def __eq__(self, other):
+ if not isinstance(other, _GenericAlias):
+ return NotImplemented
+ return (self.__origin__ == other.__origin__
+ and self.__args__ == other.__args__)
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__args__))
+
@_tp_cache
def __getitem__(self, params):
if self.__origin__ in (Generic, Protocol):
@@ -680,125 +742,119 @@ class _GenericAlias(_Final, _root=True):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
- _check_generic(self, params)
- return _subs_tvars(self, self.__parameters__, params)
+ _check_generic(self, params, len(self.__parameters__))
+
+ subst = dict(zip(self.__parameters__, params))
+ new_args = []
+ for arg in self.__args__:
+ if isinstance(arg, TypeVar):
+ arg = subst[arg]
+ elif isinstance(arg, (_GenericAlias, GenericAlias)):
+ subparams = arg.__parameters__
+ if subparams:
+ subargs = tuple(subst[x] for x in subparams)
+ arg = arg[subargs]
+ new_args.append(arg)
+ return self.copy_with(tuple(new_args))
def copy_with(self, params):
- # We don't copy self._special.
- return _GenericAlias(self.__origin__, params, name=self._name, inst=self._inst)
+ return self.__class__(self.__origin__, params, name=self._name, inst=self._inst)
def __repr__(self):
- if (self._name != 'Callable' or
- len(self.__args__) == 2 and self.__args__[0] is Ellipsis):
- if self._name:
- name = 'typing.' + self._name
- else:
- name = _type_repr(self.__origin__)
- if not self._special:
- args = f'[{", ".join([_type_repr(a) for a in self.__args__])}]'
- else:
- args = ''
- return (f'{name}{args}')
- if self._special:
- return 'typing.Callable'
- return (f'typing.Callable'
- f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], '
- f'{_type_repr(self.__args__[-1])}]')
-
- def __eq__(self, other):
- if not isinstance(other, _GenericAlias):
- return NotImplemented
- if self.__origin__ != other.__origin__:
- return False
- if self.__origin__ is Union and other.__origin__ is Union:
- return frozenset(self.__args__) == frozenset(other.__args__)
- return self.__args__ == other.__args__
-
- def __hash__(self):
- if self.__origin__ is Union:
- return hash((Union, frozenset(self.__args__)))
- return hash((self.__origin__, self.__args__))
+ if self._name:
+ name = 'typing.' + self._name
+ else:
+ name = _type_repr(self.__origin__)
+ args = ", ".join([_type_repr(a) for a in self.__args__])
+ return f'{name}[{args}]'
- def __call__(self, *args, **kwargs):
- if not self._inst:
- raise TypeError(f"Type {self._name} cannot be instantiated; "
- f"use {self._name.lower()}() instead")
- result = self.__origin__(*args, **kwargs)
- try:
- result.__orig_class__ = self
- except AttributeError:
- pass
- return result
+ def __reduce__(self):
+ if self._name:
+ origin = globals()[self._name]
+ else:
+ origin = self.__origin__
+ args = tuple(self.__args__)
+ if len(args) == 1 and not isinstance(args[0], tuple):
+ args, = args
+ return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if self._name: # generic version of an ABC or built-in class
- res = []
- if self.__origin__ not in bases:
- res.append(self.__origin__)
- i = bases.index(self)
- if not any(isinstance(b, _GenericAlias) or issubclass(b, Generic)
- for b in bases[i+1:]):
- res.append(Generic)
- return tuple(res)
+ return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
- if isinstance(b, _GenericAlias) and b is not self:
+ if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
- def __getattr__(self, attr):
- # We are careful for copy and pickle.
- # Also for simplicity we just don't relay all dunder names
- if '__origin__' in self.__dict__ and not _is_dunder(attr):
- return getattr(self.__origin__, attr)
- raise AttributeError(attr)
- def __setattr__(self, attr, val):
- if _is_dunder(attr) or attr in ('_name', '_inst', '_special'):
- super().__setattr__(attr, val)
+# _nparams is the number of accepted parameters, e.g. 0 for Hashable,
+# 1 for List and 2 for Dict. It may be -1 if variable number of
+# parameters are accepted (needs custom __getitem__).
+
+class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
+ def __init__(self, origin, nparams, *, inst=True, name=None):
+ if name is None:
+ name = origin.__name__
+ super().__init__(origin, inst=inst, name=name)
+ self._nparams = nparams
+ if origin.__module__ == 'builtins':
+ self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
- setattr(self.__origin__, attr, val)
+ self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
- def __instancecheck__(self, obj):
- return self.__subclasscheck__(type(obj))
+ @_tp_cache
+ def __getitem__(self, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ msg = "Parameters to generic types must be types."
+ params = tuple(_type_check(p, msg) for p in params)
+ _check_generic(self, params, self._nparams)
+ return self.copy_with(params)
+
+ def copy_with(self, params):
+ return _GenericAlias(self.__origin__, params,
+ name=self._name, inst=self._inst)
+
+ def __repr__(self):
+ return 'typing.' + self._name
def __subclasscheck__(self, cls):
- if self._special:
- if not isinstance(cls, _GenericAlias):
- return issubclass(cls, self.__origin__)
- if cls._special:
- return issubclass(cls.__origin__, self.__origin__)
- raise TypeError("Subscripted generics cannot be used with"
- " class and instance checks")
+ if isinstance(cls, _SpecialGenericAlias):
+ return issubclass(cls.__origin__, self.__origin__)
+ if not isinstance(cls, _GenericAlias):
+ return issubclass(cls, self.__origin__)
+ return super().__subclasscheck__(cls)
def __reduce__(self):
- if self._special:
- return self._name
+ return self._name
- if self._name:
- origin = globals()[self._name]
- else:
- origin = self.__origin__
- if (origin is Callable and
- not (len(self.__args__) == 2 and self.__args__[0] is Ellipsis)):
- args = list(self.__args__[:-1]), self.__args__[-1]
- else:
- args = tuple(self.__args__)
- if len(args) == 1 and not isinstance(args[0], tuple):
- args, = args
- return operator.getitem, (origin, args)
+class _CallableGenericAlias(_GenericAlias, _root=True):
+ def __repr__(self):
+ assert self._name == 'Callable'
+ if len(self.__args__) == 2 and self.__args__[0] is Ellipsis:
+ return super().__repr__()
+ return (f'typing.Callable'
+ f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], '
+ f'{_type_repr(self.__args__[-1])}]')
+
+ def __reduce__(self):
+ args = self.__args__
+ if not (len(args) == 2 and args[0] is ...):
+ args = list(args[:-1]), args[-1]
+ return operator.getitem, (Callable, args)
+
+
+class _CallableType(_SpecialGenericAlias, _root=True):
+ def copy_with(self, params):
+ return _CallableGenericAlias(self.__origin__, params,
+ name=self._name, inst=self._inst)
-class _VariadicGenericAlias(_GenericAlias, _root=True):
- """Same as _GenericAlias above but for variadic aliases. Currently,
- this is used only by special internal aliases: Tuple and Callable.
- """
def __getitem__(self, params):
- if self._name != 'Callable' or not self._special:
- return self.__getitem_inner__(params)
if not isinstance(params, tuple) or len(params) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
@@ -814,29 +870,69 @@ class _VariadicGenericAlias(_GenericAlias, _root=True):
@_tp_cache
def __getitem_inner__(self, params):
- if self.__origin__ is tuple and self._special:
- if params == ():
- return self.copy_with((_TypingEmpty,))
- if not isinstance(params, tuple):
- params = (params,)
- if len(params) == 2 and params[1] is ...:
- msg = "Tuple[t, ...]: t must be a type."
- p = _type_check(params[0], msg)
- return self.copy_with((p, _TypingEllipsis))
- msg = "Tuple[t0, t1, ...]: each t must be a type."
- params = tuple(_type_check(p, msg) for p in params)
- return self.copy_with(params)
- if self.__origin__ is collections.abc.Callable and self._special:
- args, result = params
- msg = "Callable[args, result]: result must be a type."
- result = _type_check(result, msg)
- if args is Ellipsis:
- return self.copy_with((_TypingEllipsis, result))
- msg = "Callable[[arg, ...], result]: each arg must be a type."
- args = tuple(_type_check(arg, msg) for arg in args)
- params = args + (result,)
- return self.copy_with(params)
- return super().__getitem__(params)
+ args, result = params
+ msg = "Callable[args, result]: result must be a type."
+ result = _type_check(result, msg)
+ if args is Ellipsis:
+ return self.copy_with((_TypingEllipsis, result))
+ msg = "Callable[[arg, ...], result]: each arg must be a type."
+ args = tuple(_type_check(arg, msg) for arg in args)
+ params = args + (result,)
+ return self.copy_with(params)
+
+
+class _TupleType(_SpecialGenericAlias, _root=True):
+ @_tp_cache
+ def __getitem__(self, params):
+ if params == ():
+ return self.copy_with((_TypingEmpty,))
+ if not isinstance(params, tuple):
+ params = (params,)
+ if len(params) == 2 and params[1] is ...:
+ msg = "Tuple[t, ...]: t must be a type."
+ p = _type_check(params[0], msg)
+ return self.copy_with((p, _TypingEllipsis))
+ msg = "Tuple[t0, t1, ...]: each t must be a type."
+ params = tuple(_type_check(p, msg) for p in params)
+ return self.copy_with(params)
+
+
+class _UnionGenericAlias(_GenericAlias, _root=True):
+ def copy_with(self, params):
+ return Union[params]
+
+ def __eq__(self, other):
+ if not isinstance(other, _UnionGenericAlias):
+ return NotImplemented
+ return set(self.__args__) == set(other.__args__)
+
+ def __hash__(self):
+ return hash(frozenset(self.__args__))
+
+ def __repr__(self):
+ args = self.__args__
+ if len(args) == 2:
+ if args[0] is type(None):
+ return f'typing.Optional[{_type_repr(args[1])}]'
+ elif args[1] is type(None):
+ return f'typing.Optional[{_type_repr(args[0])}]'
+ return super().__repr__()
+
+
+def _value_and_type_iter(parameters):
+ return ((p, type(p)) for p in parameters)
+
+
+class _LiteralGenericAlias(_GenericAlias, _root=True):
+
+ def __eq__(self, other):
+ if not isinstance(other, _LiteralGenericAlias):
+ return NotImplemented
+
+ return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))
+
+ def __hash__(self):
+ return hash(frozenset(_value_and_type_iter(self.__args__)))
class Generic:
@@ -862,16 +958,6 @@ class Generic:
__slots__ = ()
_is_protocol = False
- def __new__(cls, *args, **kwds):
- if cls in (Generic, Protocol):
- raise TypeError(f"Type {cls.__name__} cannot be instantiated; "
- "it can be used only as a base class")
- if super().__new__ is object.__new__ and cls.__init__ is not object.__init__:
- obj = super().__new__(cls)
- else:
- obj = super().__new__(cls, *args, **kwds)
- return obj
-
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
@@ -891,7 +977,7 @@ class Generic:
f"Parameters to {cls.__name__}[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
- _check_generic(cls, params)
+ _check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
@@ -946,7 +1032,7 @@ _TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
- '__subclasshook__', '__weakref__']
+ '__subclasshook__', '__weakref__', '__class_getitem__']
# These special attributes will be not collected as protocol members.
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
@@ -980,7 +1066,7 @@ def _no_init(self, *args, **kwargs):
def _allow_reckless_class_cheks():
- """Allow instnance and class checks for special stdlib modules.
+ """Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
@@ -1118,6 +1204,100 @@ class Protocol(Generic, metaclass=_ProtocolMeta):
cls.__init__ = _no_init
+class _AnnotatedAlias(_GenericAlias, _root=True):
+ """Runtime representation of an annotated type.
+
+ At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
+ with extra annotations. The alias behaves like a normal typing alias,
+ instantiating is the same as instantiating the underlying type, binding
+ it to types is also the same.
+ """
+ def __init__(self, origin, metadata):
+ if isinstance(origin, _AnnotatedAlias):
+ metadata = origin.__metadata__ + metadata
+ origin = origin.__origin__
+ super().__init__(origin, origin)
+ self.__metadata__ = metadata
+
+ def copy_with(self, params):
+ assert len(params) == 1
+ new_type = params[0]
+ return _AnnotatedAlias(new_type, self.__metadata__)
+
+ def __repr__(self):
+ return "typing.Annotated[{}, {}]".format(
+ _type_repr(self.__origin__),
+ ", ".join(repr(a) for a in self.__metadata__)
+ )
+
+ def __reduce__(self):
+ return operator.getitem, (
+ Annotated, (self.__origin__,) + self.__metadata__
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, _AnnotatedAlias):
+ return NotImplemented
+ return (self.__origin__ == other.__origin__
+ and self.__metadata__ == other.__metadata__)
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__metadata__))
+
+
+class Annotated:
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise TypeError("Type Annotated cannot be instantiated.")
+
+ @_tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError("Annotated[...] should be used "
+ "with at least two arguments (a type and an "
+ "annotation).")
+ msg = "Annotated[t, ...]: t must be a type."
+ origin = _type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return _AnnotatedAlias(origin, metadata)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ raise TypeError(
+ "Cannot subclass {}.Annotated".format(cls.__module__)
+ )
+
+
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
@@ -1179,12 +1359,13 @@ _allowed_types = (types.FunctionType, types.BuiltinFunctionType,
WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)
-def get_type_hints(obj, globalns=None, localns=None):
+def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
- forward references encoded as string literals, and if necessary
- adds Optional[t] if a default value equal to None is set.
+ forward references encoded as string literals, adds Optional[t] if a
+ default value equal to None is set and recursively replaces all
+ 'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
@@ -1228,7 +1409,7 @@ def get_type_hints(obj, globalns=None, localns=None):
value = ForwardRef(value, is_argument=False)
value = _eval_type(value, base_globals, localns)
hints[name] = value
- return hints
+ return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
@@ -1262,14 +1443,32 @@ def get_type_hints(obj, globalns=None, localns=None):
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
- return hints
+ return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
+
+
+def _strip_annotations(t):
+ """Strips the annotations from a given type.
+ """
+ if isinstance(t, _AnnotatedAlias):
+ return _strip_annotations(t.__origin__)
+ if isinstance(t, _GenericAlias):
+ stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ return t.copy_with(stripped_args)
+ if isinstance(t, GenericAlias):
+ stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ return GenericAlias(t.__origin__, stripped_args)
+ return t
def get_origin(tp):
"""Get the unsubscripted version of a type.
- This supports generic types, Callable, Tuple, Union, Literal, Final and ClassVar.
- Return None for unsupported types. Examples::
+ This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
+ and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
@@ -1279,7 +1478,9 @@ def get_origin(tp):
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
"""
- if isinstance(tp, _GenericAlias):
+ if isinstance(tp, _AnnotatedAlias):
+ return Annotated
+ if isinstance(tp, (_BaseGenericAlias, GenericAlias)):
return tp.__origin__
if tp is Generic:
return Generic
@@ -1297,11 +1498,15 @@ def get_args(tp):
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
+ if isinstance(tp, _AnnotatedAlias):
+ return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, _GenericAlias):
res = tp.__args__
- if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
+ if tp.__origin__ is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
+ if isinstance(tp, GenericAlias):
+ return tp.__args__
return ()
@@ -1429,21 +1634,20 @@ AnyStr = TypeVar('AnyStr', bytes, str)
# Various ABCs mimicking those in collections.abc.
-def _alias(origin, params, inst=True):
- return _GenericAlias(origin, params, special=True, inst=inst)
-
-Hashable = _alias(collections.abc.Hashable, ()) # Not generic.
-Awaitable = _alias(collections.abc.Awaitable, T_co)
-Coroutine = _alias(collections.abc.Coroutine, (T_co, T_contra, V_co))
-AsyncIterable = _alias(collections.abc.AsyncIterable, T_co)
-AsyncIterator = _alias(collections.abc.AsyncIterator, T_co)
-Iterable = _alias(collections.abc.Iterable, T_co)
-Iterator = _alias(collections.abc.Iterator, T_co)
-Reversible = _alias(collections.abc.Reversible, T_co)
-Sized = _alias(collections.abc.Sized, ()) # Not generic.
-Container = _alias(collections.abc.Container, T_co)
-Collection = _alias(collections.abc.Collection, T_co)
-Callable = _VariadicGenericAlias(collections.abc.Callable, (), special=True)
+_alias = _SpecialGenericAlias
+
+Hashable = _alias(collections.abc.Hashable, 0) # Not generic.
+Awaitable = _alias(collections.abc.Awaitable, 1)
+Coroutine = _alias(collections.abc.Coroutine, 3)
+AsyncIterable = _alias(collections.abc.AsyncIterable, 1)
+AsyncIterator = _alias(collections.abc.AsyncIterator, 1)
+Iterable = _alias(collections.abc.Iterable, 1)
+Iterator = _alias(collections.abc.Iterator, 1)
+Reversible = _alias(collections.abc.Reversible, 1)
+Sized = _alias(collections.abc.Sized, 0) # Not generic.
+Container = _alias(collections.abc.Container, 1)
+Collection = _alias(collections.abc.Collection, 1)
+Callable = _CallableType(collections.abc.Callable, 2)
Callable.__doc__ = \
"""Callable type; Callable[[int], str] is a function of (int) -> str.
@@ -1454,15 +1658,16 @@ Callable.__doc__ = \
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
-AbstractSet = _alias(collections.abc.Set, T_co)
-MutableSet = _alias(collections.abc.MutableSet, T)
+AbstractSet = _alias(collections.abc.Set, 1, name='AbstractSet')
+MutableSet = _alias(collections.abc.MutableSet, 1)
# NOTE: Mapping is only covariant in the value type.
-Mapping = _alias(collections.abc.Mapping, (KT, VT_co))
-MutableMapping = _alias(collections.abc.MutableMapping, (KT, VT))
-Sequence = _alias(collections.abc.Sequence, T_co)
-MutableSequence = _alias(collections.abc.MutableSequence, T)
-ByteString = _alias(collections.abc.ByteString, ()) # Not generic
-Tuple = _VariadicGenericAlias(tuple, (), inst=False, special=True)
+Mapping = _alias(collections.abc.Mapping, 2)
+MutableMapping = _alias(collections.abc.MutableMapping, 2)
+Sequence = _alias(collections.abc.Sequence, 1)
+MutableSequence = _alias(collections.abc.MutableSequence, 1)
+ByteString = _alias(collections.abc.ByteString, 0) # Not generic
+# Tuple accepts variable number of parameters.
+Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
Tuple.__doc__ = \
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
@@ -1472,24 +1677,24 @@ Tuple.__doc__ = \
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
-List = _alias(list, T, inst=False)
-Deque = _alias(collections.deque, T)
-Set = _alias(set, T, inst=False)
-FrozenSet = _alias(frozenset, T_co, inst=False)
-MappingView = _alias(collections.abc.MappingView, T_co)
-KeysView = _alias(collections.abc.KeysView, KT)
-ItemsView = _alias(collections.abc.ItemsView, (KT, VT_co))
-ValuesView = _alias(collections.abc.ValuesView, VT_co)
-ContextManager = _alias(contextlib.AbstractContextManager, T_co)
-AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, T_co)
-Dict = _alias(dict, (KT, VT), inst=False)
-DefaultDict = _alias(collections.defaultdict, (KT, VT))
-OrderedDict = _alias(collections.OrderedDict, (KT, VT))
-Counter = _alias(collections.Counter, T)
-ChainMap = _alias(collections.ChainMap, (KT, VT))
-Generator = _alias(collections.abc.Generator, (T_co, T_contra, V_co))
-AsyncGenerator = _alias(collections.abc.AsyncGenerator, (T_co, T_contra))
-Type = _alias(type, CT_co, inst=False)
+List = _alias(list, 1, inst=False, name='List')
+Deque = _alias(collections.deque, 1, name='Deque')
+Set = _alias(set, 1, inst=False, name='Set')
+FrozenSet = _alias(frozenset, 1, inst=False, name='FrozenSet')
+MappingView = _alias(collections.abc.MappingView, 1)
+KeysView = _alias(collections.abc.KeysView, 1)
+ItemsView = _alias(collections.abc.ItemsView, 2)
+ValuesView = _alias(collections.abc.ValuesView, 1)
+ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
+AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
+Dict = _alias(dict, 2, inst=False, name='Dict')
+DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
+OrderedDict = _alias(collections.OrderedDict, 2)
+Counter = _alias(collections.Counter, 1)
+ChainMap = _alias(collections.ChainMap, 2)
+Generator = _alias(collections.abc.Generator, 3)
+AsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)
+Type = _alias(type, 1, inst=False, name='Type')
Type.__doc__ = \
"""A special construct usable to annotate class objects.
@@ -1585,50 +1790,41 @@ class SupportsRound(Protocol[T_co]):
pass
-def _make_nmtuple(name, types):
- msg = "NamedTuple('Name', [(f0, t0), (f1, t1), ...]); each t must be a type"
- types = [(n, _type_check(t, msg)) for n, t in types]
- nm_tpl = collections.namedtuple(name, [n for n, t in types])
- # Prior to PEP 526, only _field_types attribute was assigned.
- # Now __annotations__ are used and _field_types is deprecated (remove in 3.9)
- nm_tpl.__annotations__ = nm_tpl._field_types = dict(types)
- try:
- nm_tpl.__module__ = sys._getframe(2).f_globals.get('__name__', '__main__')
- except (AttributeError, ValueError):
- pass
+def _make_nmtuple(name, types, module, defaults = ()):
+ fields = [n for n, t in types]
+ types = {n: _type_check(t, f"field {n} annotation must be a type")
+ for n, t in types}
+ nm_tpl = collections.namedtuple(name, fields,
+ defaults=defaults, module=module)
+ nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types
return nm_tpl
# attributes prohibited to set in NamedTuple class syntax
-_prohibited = ('__new__', '__init__', '__slots__', '__getnewargs__',
- '_fields', '_field_defaults', '_field_types',
- '_make', '_replace', '_asdict', '_source')
+_prohibited = frozenset({'__new__', '__init__', '__slots__', '__getnewargs__',
+ '_fields', '_field_defaults',
+ '_make', '_replace', '_asdict', '_source'})
-_special = ('__module__', '__name__', '__annotations__')
+_special = frozenset({'__module__', '__name__', '__annotations__'})
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
- if ns.get('_root', False):
- return super().__new__(cls, typename, bases, ns)
+ assert bases[0] is _NamedTuple
types = ns.get('__annotations__', {})
- nm_tpl = _make_nmtuple(typename, types.items())
- defaults = []
- defaults_dict = {}
+ default_names = []
for field_name in types:
if field_name in ns:
- default_value = ns[field_name]
- defaults.append(default_value)
- defaults_dict[field_name] = default_value
- elif defaults:
- raise TypeError("Non-default namedtuple field {field_name} cannot "
- "follow default field(s) {default_names}"
- .format(field_name=field_name,
- default_names=', '.join(defaults_dict.keys())))
- nm_tpl.__new__.__annotations__ = dict(types)
- nm_tpl.__new__.__defaults__ = tuple(defaults)
- nm_tpl._field_defaults = defaults_dict
+ default_names.append(field_name)
+ elif default_names:
+ raise TypeError(f"Non-default namedtuple field {field_name} "
+ f"cannot follow default field"
+ f"{'s' if len(default_names) > 1 else ''} "
+ f"{', '.join(default_names)}")
+ nm_tpl = _make_nmtuple(typename, types.items(),
+ defaults=[ns[n] for n in default_names],
+ module=ns['__module__'])
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited:
@@ -1638,7 +1834,7 @@ class NamedTupleMeta(type):
return nm_tpl
-class NamedTuple(metaclass=NamedTupleMeta):
+def NamedTuple(typename, fields=None, /, **kwargs):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
@@ -1662,99 +1858,81 @@ class NamedTuple(metaclass=NamedTupleMeta):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
- _root = True
-
- def __new__(*args, **kwargs):
- if not args:
- raise TypeError('NamedTuple.__new__(): not enough arguments')
- cls, *args = args # allow the "cls" keyword be passed
- if args:
- typename, *args = args # allow the "typename" keyword be passed
- elif 'typename' in kwargs:
- typename = kwargs.pop('typename')
- import warnings
- warnings.warn("Passing 'typename' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError("NamedTuple.__new__() missing 1 required positional "
- "argument: 'typename'")
- if args:
- try:
- fields, = args # allow the "fields" keyword be passed
- except ValueError:
- raise TypeError(f'NamedTuple.__new__() takes from 2 to 3 '
- f'positional arguments but {len(args) + 2} '
- f'were given') from None
- elif 'fields' in kwargs and len(kwargs) == 1:
- fields = kwargs.pop('fields')
- import warnings
- warnings.warn("Passing 'fields' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- fields = None
-
- if fields is None:
- fields = kwargs.items()
- elif kwargs:
- raise TypeError("Either list of fields or keywords"
- " can be provided to NamedTuple, not both")
- return _make_nmtuple(typename, fields)
- __new__.__text_signature__ = '($cls, typename, fields=None, /, **kwargs)'
-
-
-def _dict_new(cls, /, *args, **kwargs):
- return dict(*args, **kwargs)
-
-
-def _typeddict_new(cls, typename, fields=None, /, *, total=True, **kwargs):
if fields is None:
- fields = kwargs
+ fields = kwargs.items()
elif kwargs:
- raise TypeError("TypedDict takes either a dict or keyword arguments,"
- " but not both")
-
- ns = {'__annotations__': dict(fields), '__total__': total}
+ raise TypeError("Either list of fields or keywords"
+ " can be provided to NamedTuple, not both")
try:
- # Setting correct module is necessary to make typed dict classes pickleable.
- ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
+ module = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
- pass
+ module = None
+ return _make_nmtuple(typename, fields, module=module)
- return _TypedDictMeta(typename, (), ns)
+_NamedTuple = type.__new__(NamedTupleMeta, 'NamedTuple', (), {})
+def _namedtuple_mro_entries(bases):
+ if len(bases) > 1:
+ raise TypeError("Multiple inheritance with NamedTuple is not supported")
+ assert bases[0] is NamedTuple
+ return (_NamedTuple,)
-def _check_fails(cls, other):
- # Typed dicts are only for static structural subtyping.
- raise TypeError('TypedDict does not support instance and class checks')
+NamedTuple.__mro_entries__ = _namedtuple_mro_entries
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
"""Create new typed dict class object.
- This method is called directly when TypedDict is subclassed,
- or via _typeddict_new when TypedDict is instantiated. This way
+ This method is called when TypedDict is subclassed,
+ or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
- Subclasses and instances of TypedDict return actual dictionaries
- via _dict_new.
+ Subclasses and instances of TypedDict return actual dictionaries.
"""
- ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
- tp_dict = super(_TypedDictMeta, cls).__new__(cls, name, (dict,), ns)
-
- anns = ns.get('__annotations__', {})
+ for base in bases:
+ if type(base) is not _TypedDictMeta:
+ raise TypeError('cannot inherit from both a TypedDict type '
+ 'and a non-TypedDict base class')
+ tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
+
+ annotations = {}
+ own_annotations = ns.get('__annotations__', {})
+ own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
- anns = {n: _type_check(tp, msg) for n, tp in anns.items()}
+ own_annotations = {
+ n: _type_check(tp, msg) for n, tp in own_annotations.items()
+ }
+ required_keys = set()
+ optional_keys = set()
+
for base in bases:
- anns.update(base.__dict__.get('__annotations__', {}))
- tp_dict.__annotations__ = anns
+ annotations.update(base.__dict__.get('__annotations__', {}))
+ required_keys.update(base.__dict__.get('__required_keys__', ()))
+ optional_keys.update(base.__dict__.get('__optional_keys__', ()))
+
+ annotations.update(own_annotations)
+ if total:
+ required_keys.update(own_annotation_keys)
+ else:
+ optional_keys.update(own_annotation_keys)
+
+ tp_dict.__annotations__ = annotations
+ tp_dict.__required_keys__ = frozenset(required_keys)
+ tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
- __instancecheck__ = __subclasscheck__ = _check_fails
+ __call__ = dict # static method
+
+ def __subclasscheck__(cls, other):
+ # Typed dicts are only for static structural subtyping.
+ raise TypeError('TypedDict does not support instance and class checks')
+
+ __instancecheck__ = __subclasscheck__
-class TypedDict(dict, metaclass=_TypedDictMeta):
+def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
@@ -1773,8 +1951,9 @@ class TypedDict(dict, metaclass=_TypedDictMeta):
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
- The type info can be accessed via Point2D.__annotations__. TypedDict
- supports two additional equivalent forms::
+ The type info can be accessed via the Point2D.__annotations__ dict, and
+ the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+ TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
@@ -1795,6 +1974,23 @@ class TypedDict(dict, metaclass=_TypedDictMeta):
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
+ if fields is None:
+ fields = kwargs
+ elif kwargs:
+ raise TypeError("TypedDict takes either a dict or keyword arguments,"
+ " but not both")
+
+ ns = {'__annotations__': dict(fields), '__total__': total}
+ try:
+ # Setting correct module is necessary to make typed dict classes pickleable.
+ ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+
+ return _TypedDictMeta(typename, (), ns)
+
+_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
+TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
def NewType(name, tp):
@@ -1992,8 +2188,8 @@ class io:
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
-Pattern = _alias(stdlib_re.Pattern, AnyStr)
-Match = _alias(stdlib_re.Match, AnyStr)
+Pattern = _alias(stdlib_re.Pattern, 1)
+Match = _alias(stdlib_re.Match, 1)
class re:
"""Wrapper namespace for re type aliases."""
diff --git a/x64/Lib/urllib/parse.py b/x64/Lib/urllib/parse.py
index e2b6f13..ea897c3 100644
--- a/x64/Lib/urllib/parse.py
+++ b/x64/Lib/urllib/parse.py
@@ -29,6 +29,7 @@ test_urlparse.py provides a good indicator of parsing behavior.
import re
import sys
+import types
import collections
import warnings
@@ -176,6 +177,8 @@ class _NetlocResultMixinBase(object):
raise ValueError("Port out of range 0-65535")
return port
+ __class_getitem__ = classmethod(types.GenericAlias)
+
class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
__slots__ = ()
@@ -366,9 +369,23 @@ del _fix_result_transcoding
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
- Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
- Note that we don't break the components up in smaller bits
- (e.g. netloc is a single string) and we don't expand % escapes."""
+
+ The result is a named 6-tuple with fields corresponding to the
+ above. It is either a ParseResult or ParseResultBytes object,
+ depending on the type of the url parameter.
+
+ The username, password, hostname, and port sub-components of netloc
+ can also be accessed as attributes of the returned object.
+
+ The scheme argument provides the default value of the scheme
+ component when no scheme is found in url.
+
+ If allow_fragments is False, no attempt is made to separate the
+ fragment component from the previous component, which can be either
+ path or query.
+
+ Note that % escapes are not expanded.
+ """
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
@@ -417,9 +434,24 @@ def _checknetloc(netloc):
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
- Return a 5-tuple: (scheme, netloc, path, query, fragment).
- Note that we don't break the components up in smaller bits
- (e.g. netloc is a single string) and we don't expand % escapes."""
+
+ The result is a named 5-tuple with fields corresponding to the
+ above. It is either a SplitResult or SplitResultBytes object,
+ depending on the type of the url parameter.
+
+ The username, password, hostname, and port sub-components of netloc
+ can also be accessed as attributes of the returned object.
+
+ The scheme argument provides the default value of the scheme
+ component when no scheme is found in url.
+
+ If allow_fragments is False, no attempt is made to separate the
+ fragment component from the previous component, which can be either
+ path or query.
+
+ Note that % escapes are not expanded.
+ """
+
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
@@ -431,31 +463,11 @@ def urlsplit(url, scheme='', allow_fragments=True):
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
- if url[:i] == 'http': # optimize the common case
- url = url[i+1:]
- if url[:2] == '//':
- netloc, url = _splitnetloc(url, 2)
- if (('[' in netloc and ']' not in netloc) or
- (']' in netloc and '[' not in netloc)):
- raise ValueError("Invalid IPv6 URL")
- if allow_fragments and '#' in url:
- url, fragment = url.split('#', 1)
- if '?' in url:
- url, query = url.split('?', 1)
- _checknetloc(netloc)
- v = SplitResult('http', netloc, url, query, fragment)
- _parse_cache[key] = v
- return _coerce_result(v)
for c in url[:i]:
if c not in scheme_chars:
break
else:
- # make sure "url" is not actually a port number (in which case
- # "scheme" is really part of the path)
- rest = url[i+1:]
- if not rest or any(c not in '0123456789' for c in rest):
- # not a port number
- scheme, url = url[:i].lower(), rest
+ scheme, url = url[:i].lower(), url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
@@ -631,6 +643,8 @@ def unquote(string, encoding='utf-8', errors='replace'):
unquote('abc%20def') -> 'abc def'.
"""
+ if isinstance(string, bytes):
+ return unquote_to_bytes(string).decode(encoding, errors)
if '%' not in string:
string.split
return string
diff --git a/x64/Lib/urllib/request.py b/x64/Lib/urllib/request.py
index 6f6577b..a8c870b 100644
--- a/x64/Lib/urllib/request.py
+++ b/x64/Lib/urllib/request.py
@@ -163,18 +163,10 @@ def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
The *cadefault* parameter is ignored.
- This function always returns an object which can work as a context
- manager and has methods such as
- * geturl() - return the URL of the resource retrieved, commonly used to
- determine if a redirect was followed
-
- * info() - return the meta-information of the page, such as headers, in the
- form of an email.message_from_string() instance (see Quick Reference to
- HTTP Headers)
-
- * getcode() - return the HTTP status code of the response. Raises URLError
- on errors.
+ This function always returns an object which can work as a
+ context manager and has the properties url, headers, and status.
+ See urllib.response.addinfourl for more detail on these properties.
For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse
object slightly modified. In addition to the three new methods above, the
@@ -945,8 +937,15 @@ class AbstractBasicAuthHandler:
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
- rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
- 'realm=(["\']?)([^"\']*)\\2', re.I)
+ rx = re.compile('(?:^|,)' # start of the string or ','
+ '[ \t]*' # optional whitespaces
+ '([^ \t]+)' # scheme like "Basic"
+ '[ \t]+' # mandatory whitespaces
+ # realm=xxx
+ # realm='xxx'
+ # realm="xxx"
+ 'realm=(["\']?)([^"\']*)\\2',
+ re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
@@ -958,27 +957,51 @@ class AbstractBasicAuthHandler:
self.passwd = password_mgr
self.add_password = self.passwd.add_password
+ def _parse_realm(self, header):
+ # parse WWW-Authenticate header: accept multiple challenges per header
+ found_challenge = False
+ for mo in AbstractBasicAuthHandler.rx.finditer(header):
+ scheme, quote, realm = mo.groups()
+ if quote not in ['"', "'"]:
+ warnings.warn("Basic Auth Realm was unquoted",
+ UserWarning, 3)
+
+ yield (scheme, realm)
+
+ found_challenge = True
+
+ if not found_challenge:
+ if header:
+ scheme = header.split()[0]
+ else:
+ scheme = ''
+ yield (scheme, None)
+
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
- # XXX could be multiple headers
- authreq = headers.get(authreq, None)
+ headers = headers.get_all(authreq)
+ if not headers:
+ # no header found
+ return
- if authreq:
- scheme = authreq.split()[0]
- if scheme.lower() != 'basic':
- raise ValueError("AbstractBasicAuthHandler does not"
- " support the following scheme: '%s'" %
- scheme)
- else:
- mo = AbstractBasicAuthHandler.rx.search(authreq)
- if mo:
- scheme, quote, realm = mo.groups()
- if quote not in ['"',"'"]:
- warnings.warn("Basic Auth Realm was unquoted",
- UserWarning, 2)
- if scheme.lower() == 'basic':
- return self.retry_http_basic_auth(host, req, realm)
+ unsupported = None
+ for header in headers:
+ for scheme, realm in self._parse_realm(header):
+ if scheme.lower() != 'basic':
+ unsupported = scheme
+ continue
+
+ if realm is not None:
+ # Use the first matching Basic challenge.
+ # Ignore following challenges even if they use the Basic
+ # scheme.
+ return self.retry_http_basic_auth(host, req, realm)
+
+ if unsupported is not None:
+ raise ValueError("AbstractBasicAuthHandler does not "
+ "support the following scheme: %r"
+ % (scheme,))
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
@@ -1146,7 +1169,9 @@ class AbstractDigestAuthHandler:
req.selector)
# NOTE: As per RFC 2617, when server sends "auth,auth-int", the client could use either `auth`
# or `auth-int` to the response back. we use `auth` to send the response back.
- if 'auth' in qop.split(','):
+ if qop is None:
+ respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
+ elif 'auth' in qop.split(','):
if nonce == self.last_nonce:
self.nonce_count += 1
else:
@@ -1156,8 +1181,6 @@ class AbstractDigestAuthHandler:
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, 'auth', H(A2))
respdig = KD(H(A1), noncebit)
- elif qop is None:
- respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
raise URLError("qop '%s' is not supported." % qop)
@@ -1788,7 +1811,7 @@ class URLopener:
hdrs = fp.info()
fp.close()
return url2pathname(_splithost(url1)[1]), hdrs
- except OSError as msg:
+ except OSError:
pass
fp = self.open(url, data)
try:
@@ -2573,6 +2596,11 @@ def _proxy_bypass_macosx_sysconf(host, proxy_settings):
mask = 8 * (m.group(1).count('.') + 1)
else:
mask = int(mask[1:])
+
+ if mask < 0 or mask > 32:
+ # System libraries ignore invalid prefix lengths
+ continue
+
mask = 32 - mask
if (hostIP >> mask) == (base >> mask):
diff --git a/x64/Lib/urllib/response.py b/x64/Lib/urllib/response.py
index 4778118..5a2c3cc 100644
--- a/x64/Lib/urllib/response.py
+++ b/x64/Lib/urllib/response.py
@@ -73,6 +73,10 @@ class addinfourl(addinfo):
self.url = url
self.code = code
+ @property
+ def status(self):
+ return self.code
+
def getcode(self):
return self.code
diff --git a/x64/Lib/uuid.py b/x64/Lib/uuid.py
index 188e16b..5ae0a3e 100644
--- a/x64/Lib/uuid.py
+++ b/x64/Lib/uuid.py
@@ -45,7 +45,6 @@ Typical usage:
"""
import os
-import platform
import sys
from enum import Enum
@@ -54,10 +53,19 @@ from enum import Enum
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
# The recognized platforms - known behaviors
-_AIX = platform.system() == 'AIX'
-_DARWIN = platform.system() == 'Darwin'
-_LINUX = platform.system() == 'Linux'
-_WINDOWS = platform.system() == 'Windows'
+if sys.platform in ('win32', 'darwin'):
+ _AIX = _LINUX = False
+else:
+ import platform
+ _platform_system = platform.system()
+ _AIX = _platform_system == 'AIX'
+ _LINUX = _platform_system == 'Linux'
+
+_MAC_DELIM = b':'
+_MAC_OMITS_LEADING_ZEROES = False
+if _AIX:
+ _MAC_DELIM = b'.'
+ _MAC_OMITS_LEADING_ZEROES = True
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
@@ -347,24 +355,32 @@ class UUID:
if self.variant == RFC_4122:
return int((self.int >> 76) & 0xf)
-def _popen(command, *args):
- import os, shutil, subprocess
- executable = shutil.which(command)
- if executable is None:
- path = os.pathsep.join(('/sbin', '/usr/sbin'))
- executable = shutil.which(command, path=path)
+
+def _get_command_stdout(command, *args):
+ import io, os, shutil, subprocess
+
+ try:
+ path_dirs = os.environ.get('PATH', os.defpath).split(os.pathsep)
+ path_dirs.extend(['/sbin', '/usr/sbin'])
+ executable = shutil.which(command, path=os.pathsep.join(path_dirs))
if executable is None:
return None
- # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output
- # on stderr (Note: we don't have an example where the words we search
- # for are actually localized, but in theory some system could do so.)
- env = dict(os.environ)
- env['LC_ALL'] = 'C'
- proc = subprocess.Popen((executable,) + args,
- stdout=subprocess.PIPE,
- stderr=subprocess.DEVNULL,
- env=env)
- return proc
+ # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output
+ # on stderr (Note: we don't have an example where the words we search
+ # for are actually localized, but in theory some system could do so.)
+ env = dict(os.environ)
+ env['LC_ALL'] = 'C'
+ proc = subprocess.Popen((executable,) + args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.DEVNULL,
+ env=env)
+ if not proc:
+ return None
+ stdout, stderr = proc.communicate()
+ return io.BytesIO(stdout)
+ except (OSError, subprocess.SubprocessError):
+ return None
+
# For MAC (a.k.a. IEEE 802, or EUI-48) addresses, the second least significant
# bit of the first octet signifies whether the MAC address is universally (0)
@@ -384,40 +400,114 @@ def _popen(command, *args):
def _is_universal(mac):
return not (mac & (1 << 41))
-def _find_mac(command, args, hw_identifiers, get_index):
+
+def _find_mac_near_keyword(command, args, keywords, get_word_index):
+ """Searches a command's output for a MAC address near a keyword.
+
+ Each line of words in the output is case-insensitively searched for
+ any of the given keywords. Upon a match, get_word_index is invoked
+ to pick a word from the line, given the index of the match. For
+ example, lambda i: 0 would get the first word on the line, while
+ lambda i: i - 1 would get the word preceding the keyword.
+ """
+ stdout = _get_command_stdout(command, args)
+ if stdout is None:
+ return None
+
first_local_mac = None
- try:
- proc = _popen(command, *args.split())
- if not proc:
- return None
- with proc:
- for line in proc.stdout:
- words = line.lower().rstrip().split()
- for i in range(len(words)):
- if words[i] in hw_identifiers:
- try:
- word = words[get_index(i)]
- mac = int(word.replace(b':', b''), 16)
- if _is_universal(mac):
- return mac
- first_local_mac = first_local_mac or mac
- except (ValueError, IndexError):
- # Virtual interfaces, such as those provided by
- # VPNs, do not have a colon-delimited MAC address
- # as expected, but a 16-byte HWAddr separated by
- # dashes. These should be ignored in favor of a
- # real MAC address
- pass
- except OSError:
- pass
+ for line in stdout:
+ words = line.lower().rstrip().split()
+ for i in range(len(words)):
+ if words[i] in keywords:
+ try:
+ word = words[get_word_index(i)]
+ mac = int(word.replace(_MAC_DELIM, b''), 16)
+ except (ValueError, IndexError):
+ # Virtual interfaces, such as those provided by
+ # VPNs, do not have a colon-delimited MAC address
+ # as expected, but a 16-byte HWAddr separated by
+ # dashes. These should be ignored in favor of a
+ # real MAC address
+ pass
+ else:
+ if _is_universal(mac):
+ return mac
+ first_local_mac = first_local_mac or mac
return first_local_mac or None
+
+def _parse_mac(word):
+ # Accept 'HH:HH:HH:HH:HH:HH' MAC address (ex: '52:54:00:9d:0e:67'),
+ # but reject IPv6 address (ex: 'fe80::5054:ff:fe9' or '123:2:3:4:5:6:7:8').
+ #
+ # Virtual interfaces, such as those provided by VPNs, do not have a
+ # colon-delimited MAC address as expected, but a 16-byte HWAddr separated
+ # by dashes. These should be ignored in favor of a real MAC address
+ parts = word.split(_MAC_DELIM)
+ if len(parts) != 6:
+ return
+ if _MAC_OMITS_LEADING_ZEROES:
+ # (Only) on AIX the macaddr value given is not prefixed by 0, e.g.
+ # en0 1500 link#2 fa.bc.de.f7.62.4 110854824 0 160133733 0 0
+ # not
+ # en0 1500 link#2 fa.bc.de.f7.62.04 110854824 0 160133733 0 0
+ if not all(1 <= len(part) <= 2 for part in parts):
+ return
+ hexstr = b''.join(part.rjust(2, b'0') for part in parts)
+ else:
+ if not all(len(part) == 2 for part in parts):
+ return
+ hexstr = b''.join(parts)
+ try:
+ return int(hexstr, 16)
+ except ValueError:
+ return
+
+
+def _find_mac_under_heading(command, args, heading):
+ """Looks for a MAC address under a heading in a command's output.
+
+ The first line of words in the output is searched for the given
+ heading. Words at the same word index as the heading in subsequent
+ lines are then examined to see if they look like MAC addresses.
+ """
+ stdout = _get_command_stdout(command, args)
+ if stdout is None:
+ return None
+
+ keywords = stdout.readline().rstrip().split()
+ try:
+ column_index = keywords.index(heading)
+ except ValueError:
+ return None
+
+ first_local_mac = None
+ for line in stdout:
+ words = line.rstrip().split()
+ try:
+ word = words[column_index]
+ except IndexError:
+ continue
+
+ mac = _parse_mac(word)
+ if mac is None:
+ continue
+ if _is_universal(mac):
+ return mac
+ if first_local_mac is None:
+ first_local_mac = mac
+
+ return first_local_mac
+
+
+# The following functions call external programs to 'get' a macaddr value to
+# be used as basis for an uuid
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
keywords = (b'hwaddr', b'ether', b'address:', b'lladdr')
for args in ('', '-a', '-av'):
- mac = _find_mac('ifconfig', args, keywords, lambda i: i+1)
+ mac = _find_mac_near_keyword('ifconfig', args, keywords, lambda i: i+1)
if mac:
return mac
return None
@@ -425,7 +515,7 @@ def _ifconfig_getnode():
def _ip_getnode():
"""Get the hardware address on Unix by running ip."""
# This works on Linux with iproute2.
- mac = _find_mac('ip', 'link', [b'link/ether'], lambda i: i+1)
+ mac = _find_mac_near_keyword('ip', 'link', [b'link/ether'], lambda i: i+1)
if mac:
return mac
return None
@@ -439,17 +529,17 @@ def _arp_getnode():
return None
# Try getting the MAC addr from arp based on our IP address (Solaris).
- mac = _find_mac('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1)
+ mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1)
if mac:
return mac
# This works on OpenBSD
- mac = _find_mac('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1)
+ mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1)
if mac:
return mac
# This works on Linux, FreeBSD and NetBSD
- mac = _find_mac('arp', '-an', [os.fsencode('(%s)' % ip_addr)],
+ mac = _find_mac_near_keyword('arp', '-an', [os.fsencode('(%s)' % ip_addr)],
lambda i: i+2)
# Return None instead of 0.
if mac:
@@ -459,210 +549,52 @@ def _arp_getnode():
def _lanscan_getnode():
"""Get the hardware address on Unix by running lanscan."""
# This might work on HP-UX.
- return _find_mac('lanscan', '-ai', [b'lan0'], lambda i: 0)
+ return _find_mac_near_keyword('lanscan', '-ai', [b'lan0'], lambda i: 0)
def _netstat_getnode():
"""Get the hardware address on Unix by running netstat."""
- # This might work on AIX, Tru64 UNIX.
- first_local_mac = None
- try:
- proc = _popen('netstat', '-ia')
- if not proc:
- return None
- with proc:
- words = proc.stdout.readline().rstrip().split()
- try:
- i = words.index(b'Address')
- except ValueError:
- return None
- for line in proc.stdout:
- try:
- words = line.rstrip().split()
- word = words[i]
- if len(word) == 17 and word.count(b':') == 5:
- mac = int(word.replace(b':', b''), 16)
- if _is_universal(mac):
- return mac
- first_local_mac = first_local_mac or mac
- except (ValueError, IndexError):
- pass
- except OSError:
- pass
- return first_local_mac or None
+ # This works on AIX and might work on Tru64 UNIX.
+ return _find_mac_under_heading('netstat', '-ian', b'Address')
def _ipconfig_getnode():
- """Get the hardware address on Windows by running ipconfig.exe."""
- import os, re, subprocess
- first_local_mac = None
- dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
- try:
- import ctypes
- buffer = ctypes.create_string_buffer(300)
- ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
- dirs.insert(0, buffer.value.decode('mbcs'))
- except:
- pass
- for dir in dirs:
- try:
- proc = subprocess.Popen([os.path.join(dir, 'ipconfig'), '/all'],
- stdout=subprocess.PIPE,
- encoding="oem")
- except OSError:
- continue
- with proc:
- for line in proc.stdout:
- value = line.split(':')[-1].strip().lower()
- if re.fullmatch('(?:[0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
- mac = int(value.replace('-', ''), 16)
- if _is_universal(mac):
- return mac
- first_local_mac = first_local_mac or mac
- return first_local_mac or None
+ """[DEPRECATED] Get the hardware address on Windows."""
+ # bpo-40501: UuidCreateSequential() is now the only supported approach
+ return _windll_getnode()
def _netbios_getnode():
- """Get the hardware address on Windows using NetBIOS calls.
- See http://support.microsoft.com/kb/118623 for details."""
- import win32wnet, netbios
- first_local_mac = None
- ncb = netbios.NCB()
- ncb.Command = netbios.NCBENUM
- ncb.Buffer = adapters = netbios.LANA_ENUM()
- adapters._pack()
- if win32wnet.Netbios(ncb) != 0:
- return None
- adapters._unpack()
- for i in range(adapters.length):
- ncb.Reset()
- ncb.Command = netbios.NCBRESET
- ncb.Lana_num = ord(adapters.lana[i])
- if win32wnet.Netbios(ncb) != 0:
- continue
- ncb.Reset()
- ncb.Command = netbios.NCBASTAT
- ncb.Lana_num = ord(adapters.lana[i])
- ncb.Callname = '*'.ljust(16)
- ncb.Buffer = status = netbios.ADAPTER_STATUS()
- if win32wnet.Netbios(ncb) != 0:
- continue
- status._unpack()
- bytes = status.adapter_address[:6]
- if len(bytes) != 6:
- continue
- mac = int.from_bytes(bytes, 'big')
- if _is_universal(mac):
- return mac
- first_local_mac = first_local_mac or mac
- return first_local_mac or None
+ """[DEPRECATED] Get the hardware address on Windows."""
+ # bpo-40501: UuidCreateSequential() is now the only supported approach
+ return _windll_getnode()
-_generate_time_safe = _UuidCreate = None
-_has_uuid_generate_time_safe = None
-
# Import optional C extension at toplevel, to help disabling it when testing
try:
import _uuid
+ _generate_time_safe = getattr(_uuid, "generate_time_safe", None)
+ _UuidCreate = getattr(_uuid, "UuidCreate", None)
+ _has_uuid_generate_time_safe = _uuid.has_uuid_generate_time_safe
except ImportError:
_uuid = None
+ _generate_time_safe = None
+ _UuidCreate = None
+ _has_uuid_generate_time_safe = None
def _load_system_functions():
- """
- Try to load platform-specific functions for generating uuids.
- """
- global _generate_time_safe, _UuidCreate, _has_uuid_generate_time_safe
-
- if _has_uuid_generate_time_safe is not None:
- return
-
- _has_uuid_generate_time_safe = False
-
- if sys.platform == "darwin" and int(os.uname().release.split('.')[0]) < 9:
- # The uuid_generate_* functions are broken on MacOS X 10.5, as noted
- # in issue #8621 the function generates the same sequence of values
- # in the parent process and all children created using fork (unless
- # those children use exec as well).
- #
- # Assume that the uuid_generate functions are broken from 10.5 onward,
- # the test can be adjusted when a later version is fixed.
- pass
- elif _uuid is not None:
- _generate_time_safe = _uuid.generate_time_safe
- _has_uuid_generate_time_safe = _uuid.has_uuid_generate_time_safe
- return
-
- try:
- # If we couldn't find an extension module, try ctypes to find
- # system routines for UUID generation.
- # Thanks to Thomas Heller for ctypes and for his help with its use here.
- import ctypes
- import ctypes.util
-
- # The uuid_generate_* routines are provided by libuuid on at least
- # Linux and FreeBSD, and provided by libc on Mac OS X.
- _libnames = ['uuid']
- if not sys.platform.startswith('win'):
- _libnames.append('c')
- for libname in _libnames:
- try:
- lib = ctypes.CDLL(ctypes.util.find_library(libname))
- except Exception: # pragma: nocover
- continue
- # Try to find the safe variety first.
- if hasattr(lib, 'uuid_generate_time_safe'):
- _uuid_generate_time_safe = lib.uuid_generate_time_safe
- # int uuid_generate_time_safe(uuid_t out);
- def _generate_time_safe():
- _buffer = ctypes.create_string_buffer(16)
- res = _uuid_generate_time_safe(_buffer)
- return bytes(_buffer.raw), res
- _has_uuid_generate_time_safe = True
- break
-
- elif hasattr(lib, 'uuid_generate_time'): # pragma: nocover
- _uuid_generate_time = lib.uuid_generate_time
- # void uuid_generate_time(uuid_t out);
- _uuid_generate_time.restype = None
- def _generate_time_safe():
- _buffer = ctypes.create_string_buffer(16)
- _uuid_generate_time(_buffer)
- return bytes(_buffer.raw), None
- break
-
- # On Windows prior to 2000, UuidCreate gives a UUID containing the
- # hardware address. On Windows 2000 and later, UuidCreate makes a
- # random UUID and UuidCreateSequential gives a UUID containing the
- # hardware address. These routines are provided by the RPC runtime.
- # NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
- # 6 bytes returned by UuidCreateSequential are fixed, they don't appear
- # to bear any relationship to the MAC address of any network device
- # on the box.
- try:
- lib = ctypes.windll.rpcrt4
- except:
- lib = None
- _UuidCreate = getattr(lib, 'UuidCreateSequential',
- getattr(lib, 'UuidCreate', None))
-
- except Exception as exc:
- import warnings
- warnings.warn(f"Could not find fallback ctypes uuid functions: {exc}",
- ImportWarning)
+ """[DEPRECATED] Platform-specific functions loaded at import time"""
def _unix_getnode():
- """Get the hardware address on Unix using the _uuid extension module
- or ctypes."""
- _load_system_functions()
- uuid_time, _ = _generate_time_safe()
- return UUID(bytes=uuid_time).node
+ """Get the hardware address on Unix using the _uuid extension module."""
+ if _generate_time_safe:
+ uuid_time, _ = _generate_time_safe()
+ return UUID(bytes=uuid_time).node
def _windll_getnode():
- """Get the hardware address on Windows using ctypes."""
- import ctypes
- _load_system_functions()
- _buffer = ctypes.create_string_buffer(16)
- if _UuidCreate(_buffer) == 0:
- return UUID(bytes=bytes_(_buffer.raw)).node
+ """Get the hardware address on Windows using the _uuid extension module."""
+ if _UuidCreate:
+ uuid_bytes = _UuidCreate()
+ return UUID(bytes_le=uuid_bytes).node
def _random_getnode():
"""Get a random node ID."""
@@ -688,10 +620,11 @@ def _random_getnode():
# @unittest.skipUnless(_uuid._ifconfig_getnode in _uuid._GETTERS, ...)
if _LINUX:
_OS_GETTERS = [_ip_getnode, _ifconfig_getnode]
-elif _DARWIN:
+elif sys.platform == 'darwin':
_OS_GETTERS = [_ifconfig_getnode, _arp_getnode, _netstat_getnode]
-elif _WINDOWS:
- _OS_GETTERS = [_netbios_getnode, _ipconfig_getnode]
+elif sys.platform == 'win32':
+ # bpo-40201: _windll_getnode will always succeed, so these are not needed
+ _OS_GETTERS = []
elif _AIX:
_OS_GETTERS = [_netstat_getnode]
else:
@@ -706,7 +639,7 @@ else:
_node = None
-def getnode(*, getters=None):
+def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
@@ -738,7 +671,6 @@ def uuid1(node=None, clock_seq=None):
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
- _load_system_functions()
if _generate_time_safe is not None and node is clock_seq is None:
uuid_time, safely_generated = _generate_time_safe()
try:
@@ -772,8 +704,11 @@ def uuid1(node=None, clock_seq=None):
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
- hash = md5(namespace.bytes + bytes(name, "utf-8")).digest()
- return UUID(bytes=hash[:16], version=3)
+ digest = md5(
+ namespace.bytes + bytes(name, "utf-8"),
+ usedforsecurity=False
+ ).digest()
+ return UUID(bytes=digest[:16], version=3)
def uuid4():
"""Generate a random UUID."""
diff --git a/x64/Lib/wave.py b/x64/Lib/wave.py
index 823f091..b707119 100644
--- a/x64/Lib/wave.py
+++ b/x64/Lib/wave.py
@@ -71,9 +71,15 @@ The close() method is called automatically when the class instance
is destroyed.
"""
+from chunk import Chunk
+from collections import namedtuple
+import audioop
import builtins
+import struct
+import sys
+
-__all__ = ["open", "openfp", "Error", "Wave_read", "Wave_write"]
+__all__ = ["open", "Error", "Wave_read", "Wave_write"]
class Error(Exception):
pass
@@ -82,13 +88,6 @@ WAVE_FORMAT_PCM = 0x0001
_array_fmts = None, 'b', 'h', None, 'i'
-import audioop
-import struct
-import sys
-from chunk import Chunk
-from collections import namedtuple
-import warnings
-
_wave_params = namedtuple('_wave_params',
'nchannels sampwidth framerate nframes comptype compname')
@@ -512,8 +511,3 @@ def open(f, mode=None):
return Wave_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
-
-def openfp(f, mode=None):
- warnings.warn("wave.openfp is deprecated since Python 3.7. "
- "Use wave.open instead.", DeprecationWarning, stacklevel=2)
- return open(f, mode=mode)
diff --git a/x64/Lib/weakref.py b/x64/Lib/weakref.py
index 9d70089..5fa851d 100644
--- a/x64/Lib/weakref.py
+++ b/x64/Lib/weakref.py
@@ -33,6 +33,9 @@ __all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakSet", "WeakMethod", "finalize"]
+_collections_abc.Set.register(WeakSet)
+_collections_abc.MutableSet.register(WeakSet)
+
class WeakMethod(ref):
"""
A custom `weakref.ref` subclass which simulates a weak reference to
@@ -75,14 +78,14 @@ class WeakMethod(ref):
if not self._alive or not other._alive:
return self is other
return ref.__eq__(self, other) and self._func_ref == other._func_ref
- return False
+ return NotImplemented
def __ne__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is not other
return ref.__ne__(self, other) or self._func_ref != other._func_ref
- return True
+ return NotImplemented
__hash__ = ref.__hash__
@@ -307,6 +310,25 @@ class WeakValueDictionary(_collections_abc.MutableMapping):
self._commit_removals()
return list(self.data.values())
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ def __or__(self, other):
+ if isinstance(other, _collections_abc.Mapping):
+ c = self.copy()
+ c.update(other)
+ return c
+ return NotImplemented
+
+ def __ror__(self, other):
+ if isinstance(other, _collections_abc.Mapping):
+ c = self.__class__()
+ c.update(other)
+ c.update(self)
+ return c
+ return NotImplemented
+
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
@@ -485,6 +507,25 @@ class WeakKeyDictionary(_collections_abc.MutableMapping):
if len(kwargs):
self.update(kwargs)
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ def __or__(self, other):
+ if isinstance(other, _collections_abc.Mapping):
+ c = self.copy()
+ c.update(other)
+ return c
+ return NotImplemented
+
+ def __ror__(self, other):
+ if isinstance(other, _collections_abc.Mapping):
+ c = self.__class__()
+ c.update(other)
+ c.update(self)
+ return c
+ return NotImplemented
+
class finalize:
"""Class for finalization of weakrefable objects
@@ -514,33 +555,7 @@ class finalize:
class _Info:
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
- def __init__(*args, **kwargs):
- if len(args) >= 3:
- self, obj, func, *args = args
- elif not args:
- raise TypeError("descriptor '__init__' of 'finalize' object "
- "needs an argument")
- else:
- if 'func' not in kwargs:
- raise TypeError('finalize expected at least 2 positional '
- 'arguments, got %d' % (len(args)-1))
- func = kwargs.pop('func')
- if len(args) >= 2:
- self, obj, *args = args
- import warnings
- warnings.warn("Passing 'func' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- if 'obj' not in kwargs:
- raise TypeError('finalize expected at least 2 positional '
- 'arguments, got %d' % (len(args)-1))
- obj = kwargs.pop('obj')
- self, *args = args
- import warnings
- warnings.warn("Passing 'obj' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- args = tuple(args)
-
+ def __init__(self, obj, func, /, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
@@ -556,7 +571,6 @@ class finalize:
info.index = next(self._index_iter)
self._registry[self] = info
finalize._dirty = True
- __init__.__text_signature__ = '($self, obj, func, /, *args, **kwargs)'
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
diff --git a/x64/Lib/webbrowser.py b/x64/Lib/webbrowser.py
index 1ef179a..6023c1e 100644
--- a/x64/Lib/webbrowser.py
+++ b/x64/Lib/webbrowser.py
@@ -69,6 +69,14 @@ def get(using=None):
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=True):
+ """Display url using the default browser.
+
+ If possible, open url in a location determined by new.
+ - 0: the same browser window (the default).
+ - 1: a new browser window.
+ - 2: a new browser page ("tab").
+ If possible, autoraise raises the window (the default) or not.
+ """
if _tryorder is None:
with _lock:
if _tryorder is None:
@@ -80,9 +88,17 @@ def open(url, new=0, autoraise=True):
return False
def open_new(url):
+ """Open url in a new window of the default browser.
+
+ If not possible, then open url in the only browser window.
+ """
return open(url, 1)
def open_new_tab(url):
+ """Open url in a new page ("tab") of the default browser.
+
+ If not possible, then the behavior becomes equivalent to open_new().
+ """
return open(url, 2)
@@ -397,7 +413,7 @@ class Grail(BaseBrowser):
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
- filename = os.path.join(tempdir, user + "-*")
+ filename = os.path.join(glob.escape(tempdir), glob.escape(user) + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
@@ -529,12 +545,12 @@ def register_standard_browsers():
register(browser, None, BackgroundBrowser(browser))
else:
# Prefer X browsers if present
- if os.environ.get("DISPLAY"):
+ if os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY"):
try:
cmd = "xdg-settings get default-web-browser".split()
raw_result = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
result = raw_result.decode().strip()
- except (FileNotFoundError, subprocess.CalledProcessError):
+ except (FileNotFoundError, subprocess.CalledProcessError, PermissionError, NotADirectoryError) :
pass
else:
global _os_preferred_browser
diff --git a/x64/Lib/xml/dom/expatbuilder.py b/x64/Lib/xml/dom/expatbuilder.py
index 2bd835b..199c22d 100644
--- a/x64/Lib/xml/dom/expatbuilder.py
+++ b/x64/Lib/xml/dom/expatbuilder.py
@@ -204,11 +204,11 @@ class ExpatBuilder:
buffer = file.read(16*1024)
if not buffer:
break
- parser.Parse(buffer, 0)
+ parser.Parse(buffer, False)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
- parser.Parse("", True)
+ parser.Parse(b"", True)
except ParseEscape:
pass
doc = self.document
@@ -637,7 +637,7 @@ class FragmentBuilder(ExpatBuilder):
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
- parser.Parse(document, 1)
+ parser.Parse(document, True)
except:
self.reset()
raise
@@ -697,7 +697,7 @@ class FragmentBuilder(ExpatBuilder):
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
- parser.Parse(self._source, 1)
+ parser.Parse(self._source, True)
finally:
self.curNode = old_cur_node
self.document = old_document
diff --git a/x64/Lib/xml/dom/minidom.py b/x64/Lib/xml/dom/minidom.py
index 464420b..d09ef5e 100644
--- a/x64/Lib/xml/dom/minidom.py
+++ b/x64/Lib/xml/dom/minidom.py
@@ -43,10 +43,11 @@ class Node(xml.dom.Node):
def __bool__(self):
return True
- def toxml(self, encoding=None):
- return self.toprettyxml("", "", encoding)
+ def toxml(self, encoding=None, standalone=None):
+ return self.toprettyxml("", "", encoding, standalone)
- def toprettyxml(self, indent="\t", newl="\n", encoding=None):
+ def toprettyxml(self, indent="\t", newl="\n", encoding=None,
+ standalone=None):
if encoding is None:
writer = io.StringIO()
else:
@@ -56,7 +57,7 @@ class Node(xml.dom.Node):
newline='\n')
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
- self.writexml(writer, "", indent, newl, encoding)
+ self.writexml(writer, "", indent, newl, encoding, standalone)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
@@ -718,6 +719,14 @@ class Element(Node):
Node.unlink(self)
def getAttribute(self, attname):
+ """Returns the value of the specified attribute.
+
+ Returns the value of the element's attribute named attname as
+ a string. An empty string is returned if the element does not
+ have such an attribute. Note that an empty string may also be
+ returned as an explicitly given attribute value, use the
+ hasAttribute method to distinguish these two cases.
+ """
if self._attrs is None:
return ""
try:
@@ -828,6 +837,11 @@ class Element(Node):
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
+ """Checks whether the element has an attribute with the specified name.
+
+ Returns True if the element has an attribute with the specified name.
+ Otherwise, returns False.
+ """
if self._attrs is None:
return False
return name in self._attrs
@@ -838,6 +852,11 @@ class Element(Node):
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
+ """Returns all descendant elements with the given tag name.
+
+ Returns the list of all descendant elements (not direct children
+ only) with the specified tag name.
+ """
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
@@ -848,6 +867,11 @@ class Element(Node):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
+ """Write an XML element to a file-like object
+
+ Write the element to the writer object that must provide
+ a write method (e.g. a file or StringIO object).
+ """
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
@@ -1787,12 +1811,17 @@ class Document(Node, DocumentLS):
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
- def writexml(self, writer, indent="", addindent="", newl="", encoding=None):
- if encoding is None:
- writer.write('<?xml version="1.0" ?>'+newl)
- else:
- writer.write('<?xml version="1.0" encoding="%s"?>%s' % (
- encoding, newl))
+ def writexml(self, writer, indent="", addindent="", newl="", encoding=None,
+ standalone=None):
+ declarations = []
+
+ if encoding:
+ declarations.append(f'encoding="{encoding}"')
+ if standalone is not None:
+ declarations.append(f'standalone="{"yes" if standalone else "no"}"')
+
+ writer.write(f'<?xml version="1.0" {" ".join(declarations)}?>{newl}')
+
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
diff --git a/x64/Lib/xml/dom/xmlbuilder.py b/x64/Lib/xml/dom/xmlbuilder.py
index 213ab14..8a20026 100644
--- a/x64/Lib/xml/dom/xmlbuilder.py
+++ b/x64/Lib/xml/dom/xmlbuilder.py
@@ -1,7 +1,6 @@
"""Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
-import warnings
import xml.dom
from xml.dom.NodeFilter import NodeFilter
diff --git a/x64/Lib/xml/etree/ElementInclude.py b/x64/Lib/xml/etree/ElementInclude.py
index 963470e..5303062 100644
--- a/x64/Lib/xml/etree/ElementInclude.py
+++ b/x64/Lib/xml/etree/ElementInclude.py
@@ -50,18 +50,28 @@
import copy
from . import ElementTree
+from urllib.parse import urljoin
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
+# For security reasons, the inclusion depth is limited to this read-only value by default.
+DEFAULT_MAX_INCLUSION_DEPTH = 6
+
+
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
+
+class LimitedRecursiveIncludeError(FatalIncludeError):
+ pass
+
+
##
# Default loader. This loader reads an included resource from disk.
#
@@ -92,13 +102,33 @@ def default_loader(href, parse, encoding=None):
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
+# @param base_url The base URL of the original file, to resolve
+# relative include file references.
+# @param max_depth The maximum number of recursive inclusions.
+# Limited to reduce the risk of malicious content explosion.
+# Pass a negative value to disable the limitation.
+# @throws LimitedRecursiveIncludeError If the {@link max_depth} was exceeded.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
-# @throws OSError If the function fails to load a given resource.
+# @throws IOError If the function fails to load a given resource.
+# @returns the node or its replacement if it was an XInclude node
-def include(elem, loader=None):
+def include(elem, loader=None, base_url=None,
+ max_depth=DEFAULT_MAX_INCLUSION_DEPTH):
+ if max_depth is None:
+ max_depth = -1
+ elif max_depth < 0:
+ raise ValueError("expected non-negative depth or None for 'max_depth', got %r" % max_depth)
+
+ if hasattr(elem, 'getroot'):
+ elem = elem.getroot()
if loader is None:
loader = default_loader
+
+ _include(elem, loader, base_url, max_depth, set())
+
+
+def _include(elem, loader, base_url, max_depth, _parent_hrefs):
# look for xinclude elements
i = 0
while i < len(elem):
@@ -106,14 +136,24 @@ def include(elem, loader=None):
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
+ if base_url:
+ href = urljoin(base_url, href)
parse = e.get("parse", "xml")
if parse == "xml":
+ if href in _parent_hrefs:
+ raise FatalIncludeError("recursive include of %s" % href)
+ if max_depth == 0:
+ raise LimitedRecursiveIncludeError(
+ "maximum xinclude depth reached when including file %s" % href)
+ _parent_hrefs.add(href)
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
- node = copy.copy(node)
+ node = copy.copy(node) # FIXME: this makes little sense with recursive includes
+ _include(node, loader, href, max_depth - 1, _parent_hrefs)
+ _parent_hrefs.remove(href)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
@@ -123,11 +163,13 @@ def include(elem, loader=None):
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
+ if e.tail:
+ text += e.tail
if i:
node = elem[i-1]
- node.tail = (node.tail or "") + text + (e.tail or "")
+ node.tail = (node.tail or "") + text
else:
- elem.text = (elem.text or "") + text + (e.tail or "")
+ elem.text = (elem.text or "") + text
del elem[i]
continue
else:
@@ -139,5 +181,5 @@ def include(elem, loader=None):
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
- include(e, loader)
- i = i + 1
+ _include(e, loader, base_url, max_depth, _parent_hrefs)
+ i += 1
diff --git a/x64/Lib/xml/etree/ElementTree.py b/x64/Lib/xml/etree/ElementTree.py
index 645e999..7a26900 100644
--- a/x64/Lib/xml/etree/ElementTree.py
+++ b/x64/Lib/xml/etree/ElementTree.py
@@ -76,7 +76,7 @@ __all__ = [
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
- "iselement", "iterparse",
+ "indent", "iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
@@ -195,6 +195,13 @@ class Element:
original tree.
"""
+ warnings.warn(
+ "elem.copy() is deprecated. Use copy.copy(elem) instead.",
+ DeprecationWarning
+ )
+ return self.__copy__()
+
+ def __copy__(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
@@ -273,19 +280,6 @@ class Element:
# assert iselement(element)
self._children.remove(subelement)
- def getchildren(self):
- """(Deprecated) Return all subelements.
-
- Elements are returned in document order.
-
- """
- warnings.warn(
- "This method will be removed in future versions. "
- "Use 'list(elem)' or iteration over elem instead.",
- DeprecationWarning, stacklevel=2
- )
- return self._children
-
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
@@ -409,15 +403,6 @@ class Element:
for e in self._children:
yield from e.iter(tag)
- # compatibility
- def getiterator(self, tag=None):
- warnings.warn(
- "This method will be removed in future versions. "
- "Use 'elem.iter()' or 'list(elem.iter())' instead.",
- DeprecationWarning, stacklevel=2
- )
- return list(self.iter(tag))
-
def itertext(self):
"""Create text iterator.
@@ -617,15 +602,6 @@ class ElementTree:
# assert self._root is not None
return self._root.iter(tag)
- # compatibility
- def getiterator(self, tag=None):
- warnings.warn(
- "This method will be removed in future versions. "
- "Use 'tree.iter()' or 'list(tree.iter())' instead.",
- DeprecationWarning, stacklevel=2
- )
- return list(self.iter(tag))
-
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
@@ -1081,15 +1057,15 @@ def _escape_attrib(text):
text = text.replace(">", "&gt;")
if "\"" in text:
text = text.replace("\"", "&quot;")
- # The following business with carriage returns is to satisfy
- # Section 2.11 of the XML specification, stating that
- # CR or CR LN should be replaced with just LN
+ # Although section 2.11 of the XML specification states that CR or
+ # CR LN should be replaced with just LN, it applies only to EOLNs
+ # which take part of organizing file into lines. Within attributes,
+ # we are replacing these with entity numbers, so they do not count.
# http://www.w3.org/TR/REC-xml/#sec-line-ends
- if "\r\n" in text:
- text = text.replace("\r\n", "\n")
+ # The current solution, contained in following six lines, was
+ # discussed in issue 17582 and 39011.
if "\r" in text:
- text = text.replace("\r", "\n")
- #The following four lines are issue 17582
+ text = text.replace("\r", "&#13;")
if "\n" in text:
text = text.replace("\n", "&#10;")
if "\t" in text:
@@ -1185,6 +1161,57 @@ def dump(elem):
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
+
+def indent(tree, space=" ", level=0):
+ """Indent an XML document by inserting newlines and indentation space
+ after elements.
+
+ *tree* is the ElementTree or Element to modify. The (root) element
+ itself will not be changed, but the tail text of all elements in its
+ subtree will be adapted.
+
+ *space* is the whitespace to insert for each indentation level, two
+ space characters by default.
+
+ *level* is the initial indentation level. Setting this to a higher
+ value than 0 can be used for indenting subtrees that are more deeply
+ nested inside of a document.
+ """
+ if isinstance(tree, ElementTree):
+ tree = tree.getroot()
+ if level < 0:
+ raise ValueError(f"Initial indentation level must be >= 0, got {level}")
+ if not len(tree):
+ return
+
+ # Reduce the memory consumption by reusing indentation strings.
+ indentations = ["\n" + level * space]
+
+ def _indent_children(elem, level):
+ # Start a new indentation level for the first child.
+ child_level = level + 1
+ try:
+ child_indentation = indentations[child_level]
+ except IndexError:
+ child_indentation = indentations[level] + space
+ indentations.append(child_indentation)
+
+ if not elem.text or not elem.text.strip():
+ elem.text = child_indentation
+
+ for child in elem:
+ if len(child):
+ _indent_children(child, child_level)
+ if not child.tail or not child.tail.strip():
+ child.tail = child_indentation
+
+ # Dedent after the last child by overwriting the previous indentation.
+ if not child.tail.strip():
+ child.tail = indentations[level]
+
+ _indent_children(tree, 0)
+
+
# --------------------------------------------------------------------
# parsing
@@ -1690,14 +1717,14 @@ class XMLParser:
def feed(self, data):
"""Feed encoded data to parser."""
try:
- self.parser.Parse(data, 0)
+ self.parser.Parse(data, False)
except self._error as v:
self._raiseerror(v)
def close(self):
"""Finish feeding data to parser and return element structure."""
try:
- self.parser.Parse("", 1) # end of data
+ self.parser.Parse(b"", True) # end of data
except self._error as v:
self._raiseerror(v)
try:
@@ -1849,6 +1876,11 @@ class C14NWriterTarget:
self._declared_ns_stack[-1].append((uri, prefix))
return f'{prefix}:{tag}' if prefix else tag, tag, uri
+ if not uri:
+ # As soon as a default namespace is defined,
+ # anything that has no namespace (and thus, no prefix) goes there.
+ return tag, tag, uri
+
raise ValueError(f'Namespace "{uri}" is not declared in scope')
def data(self, data):
diff --git a/x64/Lib/xml/sax/__init__.py b/x64/Lib/xml/sax/__init__.py
index a0f5d40..17b7587 100644
--- a/x64/Lib/xml/sax/__init__.py
+++ b/x64/Lib/xml/sax/__init__.py
@@ -78,7 +78,7 @@ def make_parser(parser_list=()):
for parser_name in list(parser_list) + default_parser_list:
try:
return _create_parser(parser_name)
- except ImportError as e:
+ except ImportError:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
diff --git a/x64/Lib/xml/sax/expatreader.py b/x64/Lib/xml/sax/expatreader.py
index 5066ffc..e334ac9 100644
--- a/x64/Lib/xml/sax/expatreader.py
+++ b/x64/Lib/xml/sax/expatreader.py
@@ -93,7 +93,7 @@ class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
- self._parsing = 0
+ self._parsing = False
self._entity_stack = []
self._external_ges = 0
self._interning = None
@@ -203,10 +203,10 @@ class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
# IncrementalParser methods
- def feed(self, data, isFinal = 0):
+ def feed(self, data, isFinal=False):
if not self._parsing:
self.reset()
- self._parsing = 1
+ self._parsing = True
self._cont_handler.startDocument()
try:
@@ -237,13 +237,13 @@ class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
# If we are completing an external entity, do nothing here
return
try:
- self.feed("", isFinal = 1)
+ self.feed(b"", isFinal=True)
self._cont_handler.endDocument()
- self._parsing = 0
+ self._parsing = False
# break cycle created by expat handlers pointing to our methods
self._parser = None
finally:
- self._parsing = 0
+ self._parsing = False
if self._parser is not None:
# Keep ErrorColumnNumber and ErrorLineNumber after closing.
parser = _ClosedParser()
@@ -307,7 +307,7 @@ class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
- self._parsing = 0
+ self._parsing = False
self._entity_stack = []
# Locator methods
diff --git a/x64/Lib/xmlrpc/client.py b/x64/Lib/xmlrpc/client.py
index b987574..d15d60d 100644
--- a/x64/Lib/xmlrpc/client.py
+++ b/x64/Lib/xmlrpc/client.py
@@ -313,31 +313,38 @@ class DateTime:
s = self.timetuple()
o = other.timetuple()
else:
- otype = (hasattr(other, "__class__")
- and other.__class__.__name__
- or type(other))
- raise TypeError("Can't compare %s and %s" %
- (self.__class__.__name__, otype))
+ s = self
+ o = NotImplemented
return s, o
def __lt__(self, other):
s, o = self.make_comparable(other)
+ if o is NotImplemented:
+ return NotImplemented
return s < o
def __le__(self, other):
s, o = self.make_comparable(other)
+ if o is NotImplemented:
+ return NotImplemented
return s <= o
def __gt__(self, other):
s, o = self.make_comparable(other)
+ if o is NotImplemented:
+ return NotImplemented
return s > o
def __ge__(self, other):
s, o = self.make_comparable(other)
+ if o is NotImplemented:
+ return NotImplemented
return s >= o
def __eq__(self, other):
s, o = self.make_comparable(other)
+ if o is NotImplemented:
+ return NotImplemented
return s == o
def timetuple(self):
@@ -435,7 +442,7 @@ class ExpatParser:
target.xml(encoding, None)
def feed(self, data):
- self._parser.Parse(data, 0)
+ self._parser.Parse(data, False)
def close(self):
try:
@@ -1414,15 +1421,14 @@ class ServerProxy:
# establish a "logical" server connection
# get the url
- type, uri = urllib.parse._splittype(uri)
- if type not in ("http", "https"):
+ p = urllib.parse.urlparse(uri)
+ if p.scheme not in ("http", "https"):
raise OSError("unsupported XML-RPC protocol")
- self.__host, self.__handler = urllib.parse._splithost(uri)
- if not self.__handler:
- self.__handler = "/RPC2"
+ self.__host = p.netloc
+ self.__handler = p.path or "/RPC2"
if transport is None:
- if type == "https":
+ if p.scheme == "https":
handler = SafeTransport
extra_kwargs = {"context": context}
else:
diff --git a/x64/Lib/xmlrpc/server.py b/x64/Lib/xmlrpc/server.py
index 32aba4d..287e324 100644
--- a/x64/Lib/xmlrpc/server.py
+++ b/x64/Lib/xmlrpc/server.py
@@ -732,7 +732,7 @@ class ServerHTMLDoc(pydoc.HTMLDoc):
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
- pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
+ pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
diff --git a/x64/Lib/zipfile.py b/x64/Lib/zipfile.py
index 5dc6516..816f858 100644
--- a/x64/Lib/zipfile.py
+++ b/x64/Lib/zipfile.py
@@ -4,7 +4,6 @@ Read and write ZIP files.
XXX references to utf-8 need further investigation.
"""
import binascii
-import functools
import importlib.util
import io
import itertools
@@ -17,7 +16,6 @@ import sys
import threading
import time
import contextlib
-from collections import OrderedDict
try:
import zlib # We may need its compression method
@@ -38,7 +36,8 @@ except ImportError:
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
- "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
+ "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile",
+ "Path"]
class BadZipFile(Exception):
pass
@@ -378,11 +377,11 @@ class ZipInfo (object):
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
+ self.compress_size = 0 # Size of the compressed file
+ self.file_size = 0 # Size of the uncompressed file
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
- # compress_size Size of the compressed file
- # file_size Size of the uncompressed file
def __repr__(self):
result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)]
@@ -467,44 +466,23 @@ class ZipInfo (object):
if ln+4 > len(extra):
raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
if tp == 0x0001:
- if ln >= 24:
- counts = unpack('<QQQ', extra[4:28])
- elif ln == 16:
- counts = unpack('<QQ', extra[4:20])
- elif ln == 8:
- counts = unpack('<Q', extra[4:12])
- elif ln == 0:
- counts = ()
- else:
- raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
-
- idx = 0
-
+ data = extra[4:ln+4]
# ZIP64 extension (large files and/or large archives)
- if self.file_size in (0xffffffffffffffff, 0xffffffff):
- if len(counts) <= idx:
- raise BadZipFile(
- "Corrupt zip64 extra field. File size not found."
- )
- self.file_size = counts[idx]
- idx += 1
-
- if self.compress_size == 0xFFFFFFFF:
- if len(counts) <= idx:
- raise BadZipFile(
- "Corrupt zip64 extra field. Compress size not found."
- )
- self.compress_size = counts[idx]
- idx += 1
-
- if self.header_offset == 0xffffffff:
- if len(counts) <= idx:
- raise BadZipFile(
- "Corrupt zip64 extra field. Header offset not found."
- )
- old = self.header_offset
- self.header_offset = counts[idx]
- idx+=1
+ try:
+ if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF):
+ field = "File size"
+ self.file_size, = unpack('<Q', data[:8])
+ data = data[8:]
+ if self.compress_size == 0xFFFF_FFFF:
+ field = "Compress size"
+ self.compress_size, = unpack('<Q', data[:8])
+ data = data[8:]
+ if self.header_offset == 0xFFFF_FFFF:
+ field = "Header offset"
+ self.header_offset, = unpack('<Q', data[:8])
+ except struct.error:
+ raise BadZipFile(f"Corrupt zip64 extra field. "
+ f"{field} not found.") from None
extra = extra[ln+4:]
@@ -912,12 +890,16 @@ class ZipExtFile(io.BufferedIOBase):
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
+ if self.closed:
+ raise ValueError("I/O operation on closed file.")
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached.
"""
+ if self.closed:
+ raise ValueError("read from closed file.")
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
@@ -1054,9 +1036,13 @@ class ZipExtFile(io.BufferedIOBase):
super().close()
def seekable(self):
+ if self.closed:
+ raise ValueError("I/O operation on closed file.")
return self._seekable
def seek(self, offset, whence=0):
+ if self.closed:
+ raise ValueError("seek on closed file.")
if not self._seekable:
raise io.UnsupportedOperation("underlying stream is not seekable")
curr_pos = self.tell()
@@ -1105,6 +1091,8 @@ class ZipExtFile(io.BufferedIOBase):
return self.tell()
def tell(self):
+ if self.closed:
+ raise ValueError("tell on closed file.")
if not self._seekable:
raise io.UnsupportedOperation("underlying stream is not seekable")
filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset
@@ -1546,7 +1534,7 @@ class ZipFile:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
- if zinfo.flag_bits & 0x800:
+ if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
@@ -1584,9 +1572,7 @@ class ZipFile:
"another write handle open on it. "
"Close the first handle before opening another.")
- # Sizes and CRC are overwritten with correct data after processing the file
- if not hasattr(zinfo, 'file_size'):
- zinfo.file_size = 0
+ # Size and CRC are overwritten with correct data after processing the file
zinfo.compress_size = 0
zinfo.CRC = 0
@@ -1882,25 +1868,15 @@ class ZipFile:
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
- try:
- filename, flag_bits = zinfo._encodeFilenameFlags()
- centdir = struct.pack(structCentralDir,
- stringCentralDir, create_version,
- zinfo.create_system, extract_version, zinfo.reserved,
- flag_bits, zinfo.compress_type, dostime, dosdate,
- zinfo.CRC, compress_size, file_size,
- len(filename), len(extra_data), len(zinfo.comment),
- 0, zinfo.internal_attr, zinfo.external_attr,
- header_offset)
- except DeprecationWarning:
- print((structCentralDir, stringCentralDir, create_version,
- zinfo.create_system, extract_version, zinfo.reserved,
- zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
- zinfo.CRC, compress_size, file_size,
- len(zinfo.filename), len(extra_data), len(zinfo.comment),
- 0, zinfo.internal_attr, zinfo.external_attr,
- header_offset), file=sys.stderr)
- raise
+ filename, flag_bits = zinfo._encodeFilenameFlags()
+ centdir = struct.pack(structCentralDir,
+ stringCentralDir, create_version,
+ zinfo.create_system, extract_version, zinfo.reserved,
+ flag_bits, zinfo.compress_type, dostime, dosdate,
+ zinfo.CRC, compress_size, file_size,
+ len(filename), len(extra_data), len(zinfo.comment),
+ 0, zinfo.internal_attr, zinfo.external_attr,
+ header_offset)
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
@@ -1942,6 +1918,8 @@ class ZipFile:
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
+ if self.mode == "a":
+ self.fp.truncate()
self.fp.flush()
def _fpclose(self, fp):
@@ -2125,24 +2103,6 @@ class PyZipFile(ZipFile):
return (fname, archivename)
-def _unique_everseen(iterable, key=None):
- "List unique elements, preserving order. Remember all elements ever seen."
- # unique_everseen('AAAABBBCCDAABBB') --> A B C D
- # unique_everseen('ABBCcAD', str.lower) --> A B C D
- seen = set()
- seen_add = seen.add
- if key is None:
- for element in itertools.filterfalse(seen.__contains__, iterable):
- seen_add(element)
- yield element
- else:
- for element in iterable:
- k = key(element)
- if k not in seen:
- seen_add(k)
- yield element
-
-
def _parents(path):
"""
Given a path with elements separated by
@@ -2184,6 +2144,18 @@ def _ancestry(path):
path, tail = posixpath.split(path)
+_dedupe = dict.fromkeys
+"""Deduplicate an iterable in original order"""
+
+
+def _difference(minuend, subtrahend):
+ """
+ Return items in minuend not in subtrahend, retaining order
+ with O(1) lookup.
+ """
+ return itertools.filterfalse(set(subtrahend).__contains__, minuend)
+
+
class CompleteDirs(ZipFile):
"""
A ZipFile subclass that ensures that implied directories
@@ -2193,13 +2165,8 @@ class CompleteDirs(ZipFile):
@staticmethod
def _implied_dirs(names):
parents = itertools.chain.from_iterable(map(_parents, names))
- # Deduplicate entries in original order
- implied_dirs = OrderedDict.fromkeys(
- p + posixpath.sep for p in parents
- # Cast names to a set for O(1) lookups
- if p + posixpath.sep not in set(names)
- )
- return implied_dirs
+ as_dirs = (p + posixpath.sep for p in parents)
+ return _dedupe(_difference(as_dirs, names))
def namelist(self):
names = super(CompleteDirs, self).namelist()
@@ -2328,20 +2295,31 @@ class Path:
self.root = FastLookup.make(root)
self.at = at
- @property
- def open(self):
- return functools.partial(self.root.open, self.at)
+ def open(self, mode='r', *args, **kwargs):
+ """
+ Open this entry as text or binary following the semantics
+ of ``pathlib.Path.open()`` by passing arguments through
+ to io.TextIOWrapper().
+ """
+ pwd = kwargs.pop('pwd', None)
+ zip_mode = mode[0]
+ stream = self.root.open(self.at, zip_mode, pwd=pwd)
+ if 'b' in mode:
+ if args or kwargs:
+ raise ValueError("encoding args invalid for binary operation")
+ return stream
+ return io.TextIOWrapper(stream, *args, **kwargs)
@property
def name(self):
return posixpath.basename(self.at.rstrip("/"))
def read_text(self, *args, **kwargs):
- with self.open() as strm:
- return io.TextIOWrapper(strm, *args, **kwargs).read()
+ with self.open('r', *args, **kwargs) as strm:
+ return strm.read()
def read_bytes(self):
- with self.open() as strm:
+ with self.open('rb') as strm:
return strm.read()
def _is_child(self, path):
diff --git a/x64/Lib/zoneinfo/__init__.py b/x64/Lib/zoneinfo/__init__.py
new file mode 100644
index 0000000..f5510ee
--- /dev/null
+++ b/x64/Lib/zoneinfo/__init__.py
@@ -0,0 +1,31 @@
+__all__ = [
+ "ZoneInfo",
+ "reset_tzpath",
+ "available_timezones",
+ "TZPATH",
+ "ZoneInfoNotFoundError",
+ "InvalidTZPathWarning",
+]
+
+from . import _tzpath
+from ._common import ZoneInfoNotFoundError
+
+try:
+ from _zoneinfo import ZoneInfo
+except ImportError: # pragma: nocover
+ from ._zoneinfo import ZoneInfo
+
+reset_tzpath = _tzpath.reset_tzpath
+available_timezones = _tzpath.available_timezones
+InvalidTZPathWarning = _tzpath.InvalidTZPathWarning
+
+
+def __getattr__(name):
+ if name == "TZPATH":
+ return _tzpath.TZPATH
+ else:
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
+
+
+def __dir__():
+ return sorted(list(globals()) + ["TZPATH"])
diff --git a/x64/Lib/zoneinfo/_common.py b/x64/Lib/zoneinfo/_common.py
new file mode 100644
index 0000000..41c898f
--- /dev/null
+++ b/x64/Lib/zoneinfo/_common.py
@@ -0,0 +1,165 @@
+import struct
+
+
+def load_tzdata(key):
+ import importlib.resources
+
+ components = key.split("/")
+ package_name = ".".join(["tzdata.zoneinfo"] + components[:-1])
+ resource_name = components[-1]
+
+ try:
+ return importlib.resources.open_binary(package_name, resource_name)
+ except (ImportError, FileNotFoundError, UnicodeEncodeError):
+ # There are three types of exception that can be raised that all amount
+ # to "we cannot find this key":
+ #
+ # ImportError: If package_name doesn't exist (e.g. if tzdata is not
+ # installed, or if there's an error in the folder name like
+ # Amrica/New_York)
+ # FileNotFoundError: If resource_name doesn't exist in the package
+ # (e.g. Europe/Krasnoy)
+ # UnicodeEncodeError: If package_name or resource_name are not UTF-8,
+ # such as keys containing a surrogate character.
+ raise ZoneInfoNotFoundError(f"No time zone found with key {key}")
+
+
+def load_data(fobj):
+ header = _TZifHeader.from_file(fobj)
+
+ if header.version == 1:
+ time_size = 4
+ time_type = "l"
+ else:
+ # Version 2+ has 64-bit integer transition times
+ time_size = 8
+ time_type = "q"
+
+ # Version 2+ also starts with a Version 1 header and data, which
+ # we need to skip now
+ skip_bytes = (
+ header.timecnt * 5 # Transition times and types
+ + header.typecnt * 6 # Local time type records
+ + header.charcnt # Time zone designations
+ + header.leapcnt * 8 # Leap second records
+ + header.isstdcnt # Standard/wall indicators
+ + header.isutcnt # UT/local indicators
+ )
+
+ fobj.seek(skip_bytes, 1)
+
+ # Now we need to read the second header, which is not the same
+ # as the first
+ header = _TZifHeader.from_file(fobj)
+
+ typecnt = header.typecnt
+ timecnt = header.timecnt
+ charcnt = header.charcnt
+
+ # The data portion starts with timecnt transitions and indices
+ if timecnt:
+ trans_list_utc = struct.unpack(
+ f">{timecnt}{time_type}", fobj.read(timecnt * time_size)
+ )
+ trans_idx = struct.unpack(f">{timecnt}B", fobj.read(timecnt))
+ else:
+ trans_list_utc = ()
+ trans_idx = ()
+
+ # Read the ttinfo struct, (utoff, isdst, abbrind)
+ if typecnt:
+ utcoff, isdst, abbrind = zip(
+ *(struct.unpack(">lbb", fobj.read(6)) for i in range(typecnt))
+ )
+ else:
+ utcoff = ()
+ isdst = ()
+ abbrind = ()
+
+ # Now read the abbreviations. They are null-terminated strings, indexed
+ # not by position in the array but by position in the unsplit
+ # abbreviation string. I suppose this makes more sense in C, which uses
+ # null to terminate the strings, but it's inconvenient here...
+ abbr_vals = {}
+ abbr_chars = fobj.read(charcnt)
+
+ def get_abbr(idx):
+ # Gets a string starting at idx and running until the next \x00
+ #
+ # We cannot pre-populate abbr_vals by splitting on \x00 because there
+ # are some zones that use subsets of longer abbreviations, like so:
+ #
+ # LMT\x00AHST\x00HDT\x00
+ #
+ # Where the idx to abbr mapping should be:
+ #
+ # {0: "LMT", 4: "AHST", 5: "HST", 9: "HDT"}
+ if idx not in abbr_vals:
+ span_end = abbr_chars.find(b"\x00", idx)
+ abbr_vals[idx] = abbr_chars[idx:span_end].decode()
+
+ return abbr_vals[idx]
+
+ abbr = tuple(get_abbr(idx) for idx in abbrind)
+
+ # The remainder of the file consists of leap seconds (currently unused) and
+ # the standard/wall and ut/local indicators, which are metadata we don't need.
+ # In version 2 files, we need to skip the unnecessary data to get at the TZ string:
+ if header.version >= 2:
+ # Each leap second record has size (time_size + 4)
+ skip_bytes = header.isutcnt + header.isstdcnt + header.leapcnt * 12
+ fobj.seek(skip_bytes, 1)
+
+ c = fobj.read(1) # Should be \n
+ assert c == b"\n", c
+
+ tz_bytes = b""
+ while (c := fobj.read(1)) != b"\n":
+ tz_bytes += c
+
+ tz_str = tz_bytes
+ else:
+ tz_str = None
+
+ return trans_idx, trans_list_utc, utcoff, isdst, abbr, tz_str
+
+
+class _TZifHeader:
+ __slots__ = [
+ "version",
+ "isutcnt",
+ "isstdcnt",
+ "leapcnt",
+ "timecnt",
+ "typecnt",
+ "charcnt",
+ ]
+
+ def __init__(self, *args):
+ assert len(self.__slots__) == len(args)
+ for attr, val in zip(self.__slots__, args):
+ setattr(self, attr, val)
+
+ @classmethod
+ def from_file(cls, stream):
+ # The header starts with a 4-byte "magic" value
+ if stream.read(4) != b"TZif":
+ raise ValueError("Invalid TZif file: magic not found")
+
+ _version = stream.read(1)
+ if _version == b"\x00":
+ version = 1
+ else:
+ version = int(_version)
+ stream.read(15)
+
+ args = (version,)
+
+ # Slots are defined in the order that the bytes are arranged
+ args = args + struct.unpack(">6l", stream.read(24))
+
+ return cls(*args)
+
+
+class ZoneInfoNotFoundError(KeyError):
+ """Exception raised when a ZoneInfo key is not found."""
diff --git a/x64/Lib/zoneinfo/_tzpath.py b/x64/Lib/zoneinfo/_tzpath.py
new file mode 100644
index 0000000..9513611
--- /dev/null
+++ b/x64/Lib/zoneinfo/_tzpath.py
@@ -0,0 +1,175 @@
+import os
+import sysconfig
+
+
+def reset_tzpath(to=None):
+ global TZPATH
+
+ tzpaths = to
+ if tzpaths is not None:
+ if isinstance(tzpaths, (str, bytes)):
+ raise TypeError(
+ f"tzpaths must be a list or tuple, "
+ + f"not {type(tzpaths)}: {tzpaths!r}"
+ )
+
+ if not all(map(os.path.isabs, tzpaths)):
+ raise ValueError(_get_invalid_paths_message(tzpaths))
+ base_tzpath = tzpaths
+ else:
+ env_var = os.environ.get("PYTHONTZPATH", None)
+ if env_var is not None:
+ base_tzpath = _parse_python_tzpath(env_var)
+ else:
+ base_tzpath = _parse_python_tzpath(
+ sysconfig.get_config_var("TZPATH")
+ )
+
+ TZPATH = tuple(base_tzpath)
+
+
+def _parse_python_tzpath(env_var):
+ if not env_var:
+ return ()
+
+ raw_tzpath = env_var.split(os.pathsep)
+ new_tzpath = tuple(filter(os.path.isabs, raw_tzpath))
+
+ # If anything has been filtered out, we will warn about it
+ if len(new_tzpath) != len(raw_tzpath):
+ import warnings
+
+ msg = _get_invalid_paths_message(raw_tzpath)
+
+ warnings.warn(
+ "Invalid paths specified in PYTHONTZPATH environment variable."
+ + msg,
+ InvalidTZPathWarning,
+ )
+
+ return new_tzpath
+
+
+def _get_invalid_paths_message(tzpaths):
+ invalid_paths = (path for path in tzpaths if not os.path.isabs(path))
+
+ prefix = "\n "
+ indented_str = prefix + prefix.join(invalid_paths)
+
+ return (
+ "Paths should be absolute but found the following relative paths:"
+ + indented_str
+ )
+
+
+def find_tzfile(key):
+ """Retrieve the path to a TZif file from a key."""
+ _validate_tzfile_path(key)
+ for search_path in TZPATH:
+ filepath = os.path.join(search_path, key)
+ if os.path.isfile(filepath):
+ return filepath
+
+ return None
+
+
+_TEST_PATH = os.path.normpath(os.path.join("_", "_"))[:-1]
+
+
+def _validate_tzfile_path(path, _base=_TEST_PATH):
+ if os.path.isabs(path):
+ raise ValueError(
+ f"ZoneInfo keys may not be absolute paths, got: {path}"
+ )
+
+ # We only care about the kinds of path normalizations that would change the
+ # length of the key - e.g. a/../b -> a/b, or a/b/ -> a/b. On Windows,
+ # normpath will also change from a/b to a\b, but that would still preserve
+ # the length.
+ new_path = os.path.normpath(path)
+ if len(new_path) != len(path):
+ raise ValueError(
+ f"ZoneInfo keys must be normalized relative paths, got: {path}"
+ )
+
+ resolved = os.path.normpath(os.path.join(_base, new_path))
+ if not resolved.startswith(_base):
+ raise ValueError(
+ f"ZoneInfo keys must refer to subdirectories of TZPATH, got: {path}"
+ )
+
+
+del _TEST_PATH
+
+
+def available_timezones():
+ """Returns a set containing all available time zones.
+
+ .. caution::
+
+ This may attempt to open a large number of files, since the best way to
+ determine if a given file on the time zone search path is to open it
+ and check for the "magic string" at the beginning.
+ """
+ from importlib import resources
+
+ valid_zones = set()
+
+ # Start with loading from the tzdata package if it exists: this has a
+ # pre-assembled list of zones that only requires opening one file.
+ try:
+ with resources.open_text("tzdata", "zones") as f:
+ for zone in f:
+ zone = zone.strip()
+ if zone:
+ valid_zones.add(zone)
+ except (ImportError, FileNotFoundError):
+ pass
+
+ def valid_key(fpath):
+ try:
+ with open(fpath, "rb") as f:
+ return f.read(4) == b"TZif"
+ except Exception: # pragma: nocover
+ return False
+
+ for tz_root in TZPATH:
+ if not os.path.exists(tz_root):
+ continue
+
+ for root, dirnames, files in os.walk(tz_root):
+ if root == tz_root:
+ # right/ and posix/ are special directories and shouldn't be
+ # included in the output of available zones
+ if "right" in dirnames:
+ dirnames.remove("right")
+ if "posix" in dirnames:
+ dirnames.remove("posix")
+
+ for file in files:
+ fpath = os.path.join(root, file)
+
+ key = os.path.relpath(fpath, start=tz_root)
+ if os.sep != "/": # pragma: nocover
+ key = key.replace(os.sep, "/")
+
+ if not key or key in valid_zones:
+ continue
+
+ if valid_key(fpath):
+ valid_zones.add(key)
+
+ if "posixrules" in valid_zones:
+ # posixrules is a special symlink-only time zone where it exists, it
+ # should not be included in the output
+ valid_zones.remove("posixrules")
+
+ return valid_zones
+
+
+class InvalidTZPathWarning(RuntimeWarning):
+ """Warning raised if an invalid path is specified in PYTHONTZPATH."""
+
+
+TZPATH = ()
+reset_tzpath()
diff --git a/x64/Lib/zoneinfo/_zoneinfo.py b/x64/Lib/zoneinfo/_zoneinfo.py
new file mode 100644
index 0000000..9810637
--- /dev/null
+++ b/x64/Lib/zoneinfo/_zoneinfo.py
@@ -0,0 +1,752 @@
+import bisect
+import calendar
+import collections
+import functools
+import re
+import weakref
+from datetime import datetime, timedelta, tzinfo
+
+from . import _common, _tzpath
+
+EPOCH = datetime(1970, 1, 1)
+EPOCHORDINAL = datetime(1970, 1, 1).toordinal()
+
+# It is relatively expensive to construct new timedelta objects, and in most
+# cases we're looking at the same deltas, like integer numbers of hours, etc.
+# To improve speed and memory use, we'll keep a dictionary with references
+# to the ones we've already used so far.
+#
+# Loading every time zone in the 2020a version of the time zone database
+# requires 447 timedeltas, which requires approximately the amount of space
+# that ZoneInfo("America/New_York") with 236 transitions takes up, so we will
+# set the cache size to 512 so that in the common case we always get cache
+# hits, but specifically crafted ZoneInfo objects don't leak arbitrary amounts
+# of memory.
+@functools.lru_cache(maxsize=512)
+def _load_timedelta(seconds):
+ return timedelta(seconds=seconds)
+
+
+class ZoneInfo(tzinfo):
+ _strong_cache_size = 8
+ _strong_cache = collections.OrderedDict()
+ _weak_cache = weakref.WeakValueDictionary()
+ __module__ = "zoneinfo"
+
+ def __init_subclass__(cls):
+ cls._strong_cache = collections.OrderedDict()
+ cls._weak_cache = weakref.WeakValueDictionary()
+
+ def __new__(cls, key):
+ instance = cls._weak_cache.get(key, None)
+ if instance is None:
+ instance = cls._weak_cache.setdefault(key, cls._new_instance(key))
+ instance._from_cache = True
+
+ # Update the "strong" cache
+ cls._strong_cache[key] = cls._strong_cache.pop(key, instance)
+
+ if len(cls._strong_cache) > cls._strong_cache_size:
+ cls._strong_cache.popitem(last=False)
+
+ return instance
+
+ @classmethod
+ def no_cache(cls, key):
+ obj = cls._new_instance(key)
+ obj._from_cache = False
+
+ return obj
+
+ @classmethod
+ def _new_instance(cls, key):
+ obj = super().__new__(cls)
+ obj._key = key
+ obj._file_path = obj._find_tzfile(key)
+
+ if obj._file_path is not None:
+ file_obj = open(obj._file_path, "rb")
+ else:
+ file_obj = _common.load_tzdata(key)
+
+ with file_obj as f:
+ obj._load_file(f)
+
+ return obj
+
+ @classmethod
+ def from_file(cls, fobj, /, key=None):
+ obj = super().__new__(cls)
+ obj._key = key
+ obj._file_path = None
+ obj._load_file(fobj)
+ obj._file_repr = repr(fobj)
+
+ # Disable pickling for objects created from files
+ obj.__reduce__ = obj._file_reduce
+
+ return obj
+
+ @classmethod
+ def clear_cache(cls, *, only_keys=None):
+ if only_keys is not None:
+ for key in only_keys:
+ cls._weak_cache.pop(key, None)
+ cls._strong_cache.pop(key, None)
+
+ else:
+ cls._weak_cache.clear()
+ cls._strong_cache.clear()
+
+ @property
+ def key(self):
+ return self._key
+
+ def utcoffset(self, dt):
+ return self._find_trans(dt).utcoff
+
+ def dst(self, dt):
+ return self._find_trans(dt).dstoff
+
+ def tzname(self, dt):
+ return self._find_trans(dt).tzname
+
+ def fromutc(self, dt):
+ """Convert from datetime in UTC to datetime in local time"""
+
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ timestamp = self._get_local_timestamp(dt)
+ num_trans = len(self._trans_utc)
+
+ if num_trans >= 1 and timestamp < self._trans_utc[0]:
+ tti = self._tti_before
+ fold = 0
+ elif (
+ num_trans == 0 or timestamp > self._trans_utc[-1]
+ ) and not isinstance(self._tz_after, _ttinfo):
+ tti, fold = self._tz_after.get_trans_info_fromutc(
+ timestamp, dt.year
+ )
+ elif num_trans == 0:
+ tti = self._tz_after
+ fold = 0
+ else:
+ idx = bisect.bisect_right(self._trans_utc, timestamp)
+
+ if num_trans > 1 and timestamp >= self._trans_utc[1]:
+ tti_prev, tti = self._ttinfos[idx - 2 : idx]
+ elif timestamp > self._trans_utc[-1]:
+ tti_prev = self._ttinfos[-1]
+ tti = self._tz_after
+ else:
+ tti_prev = self._tti_before
+ tti = self._ttinfos[0]
+
+ # Detect fold
+ shift = tti_prev.utcoff - tti.utcoff
+ fold = shift.total_seconds() > timestamp - self._trans_utc[idx - 1]
+ dt += tti.utcoff
+ if fold:
+ return dt.replace(fold=1)
+ else:
+ return dt
+
+ def _find_trans(self, dt):
+ if dt is None:
+ if self._fixed_offset:
+ return self._tz_after
+ else:
+ return _NO_TTINFO
+
+ ts = self._get_local_timestamp(dt)
+
+ lt = self._trans_local[dt.fold]
+
+ num_trans = len(lt)
+
+ if num_trans and ts < lt[0]:
+ return self._tti_before
+ elif not num_trans or ts > lt[-1]:
+ if isinstance(self._tz_after, _TZStr):
+ return self._tz_after.get_trans_info(ts, dt.year, dt.fold)
+ else:
+ return self._tz_after
+ else:
+ # idx is the transition that occurs after this timestamp, so we
+ # subtract off 1 to get the current ttinfo
+ idx = bisect.bisect_right(lt, ts) - 1
+ assert idx >= 0
+ return self._ttinfos[idx]
+
+ def _get_local_timestamp(self, dt):
+ return (
+ (dt.toordinal() - EPOCHORDINAL) * 86400
+ + dt.hour * 3600
+ + dt.minute * 60
+ + dt.second
+ )
+
+ def __str__(self):
+ if self._key is not None:
+ return f"{self._key}"
+ else:
+ return repr(self)
+
+ def __repr__(self):
+ if self._key is not None:
+ return f"{self.__class__.__name__}(key={self._key!r})"
+ else:
+ return f"{self.__class__.__name__}.from_file({self._file_repr})"
+
+ def __reduce__(self):
+ return (self.__class__._unpickle, (self._key, self._from_cache))
+
+ def _file_reduce(self):
+ import pickle
+
+ raise pickle.PicklingError(
+ "Cannot pickle a ZoneInfo file created from a file stream."
+ )
+
+ @classmethod
+ def _unpickle(cls, key, from_cache, /):
+ if from_cache:
+ return cls(key)
+ else:
+ return cls.no_cache(key)
+
+ def _find_tzfile(self, key):
+ return _tzpath.find_tzfile(key)
+
+ def _load_file(self, fobj):
+ # Retrieve all the data as it exists in the zoneinfo file
+ trans_idx, trans_utc, utcoff, isdst, abbr, tz_str = _common.load_data(
+ fobj
+ )
+
+ # Infer the DST offsets (needed for .dst()) from the data
+ dstoff = self._utcoff_to_dstoff(trans_idx, utcoff, isdst)
+
+ # Convert all the transition times (UTC) into "seconds since 1970-01-01 local time"
+ trans_local = self._ts_to_local(trans_idx, trans_utc, utcoff)
+
+ # Construct `_ttinfo` objects for each transition in the file
+ _ttinfo_list = [
+ _ttinfo(
+ _load_timedelta(utcoffset), _load_timedelta(dstoffset), tzname
+ )
+ for utcoffset, dstoffset, tzname in zip(utcoff, dstoff, abbr)
+ ]
+
+ self._trans_utc = trans_utc
+ self._trans_local = trans_local
+ self._ttinfos = [_ttinfo_list[idx] for idx in trans_idx]
+
+ # Find the first non-DST transition
+ for i in range(len(isdst)):
+ if not isdst[i]:
+ self._tti_before = _ttinfo_list[i]
+ break
+ else:
+ if self._ttinfos:
+ self._tti_before = self._ttinfos[0]
+ else:
+ self._tti_before = None
+
+ # Set the "fallback" time zone
+ if tz_str is not None and tz_str != b"":
+ self._tz_after = _parse_tz_str(tz_str.decode())
+ else:
+ if not self._ttinfos and not _ttinfo_list:
+ raise ValueError("No time zone information found.")
+
+ if self._ttinfos:
+ self._tz_after = self._ttinfos[-1]
+ else:
+ self._tz_after = _ttinfo_list[-1]
+
+ # Determine if this is a "fixed offset" zone, meaning that the output
+ # of the utcoffset, dst and tzname functions does not depend on the
+ # specific datetime passed.
+ #
+ # We make three simplifying assumptions here:
+ #
+ # 1. If _tz_after is not a _ttinfo, it has transitions that might
+ # actually occur (it is possible to construct TZ strings that
+ # specify STD and DST but no transitions ever occur, such as
+ # AAA0BBB,0/0,J365/25).
+ # 2. If _ttinfo_list contains more than one _ttinfo object, the objects
+ # represent different offsets.
+ # 3. _ttinfo_list contains no unused _ttinfos (in which case an
+ # otherwise fixed-offset zone with extra _ttinfos defined may
+ # appear to *not* be a fixed offset zone).
+ #
+ # Violations to these assumptions would be fairly exotic, and exotic
+ # zones should almost certainly not be used with datetime.time (the
+ # only thing that would be affected by this).
+ if len(_ttinfo_list) > 1 or not isinstance(self._tz_after, _ttinfo):
+ self._fixed_offset = False
+ elif not _ttinfo_list:
+ self._fixed_offset = True
+ else:
+ self._fixed_offset = _ttinfo_list[0] == self._tz_after
+
+ @staticmethod
+ def _utcoff_to_dstoff(trans_idx, utcoffsets, isdsts):
+ # Now we must transform our ttis and abbrs into `_ttinfo` objects,
+ # but there is an issue: .dst() must return a timedelta with the
+ # difference between utcoffset() and the "standard" offset, but
+ # the "base offset" and "DST offset" are not encoded in the file;
+ # we can infer what they are from the isdst flag, but it is not
+ # sufficient to to just look at the last standard offset, because
+ # occasionally countries will shift both DST offset and base offset.
+
+ typecnt = len(isdsts)
+ dstoffs = [0] * typecnt # Provisionally assign all to 0.
+ dst_cnt = sum(isdsts)
+ dst_found = 0
+
+ for i in range(1, len(trans_idx)):
+ if dst_cnt == dst_found:
+ break
+
+ idx = trans_idx[i]
+
+ dst = isdsts[idx]
+
+ # We're only going to look at daylight saving time
+ if not dst:
+ continue
+
+ # Skip any offsets that have already been assigned
+ if dstoffs[idx] != 0:
+ continue
+
+ dstoff = 0
+ utcoff = utcoffsets[idx]
+
+ comp_idx = trans_idx[i - 1]
+
+ if not isdsts[comp_idx]:
+ dstoff = utcoff - utcoffsets[comp_idx]
+
+ if not dstoff and idx < (typecnt - 1):
+ comp_idx = trans_idx[i + 1]
+
+ # If the following transition is also DST and we couldn't
+ # find the DST offset by this point, we're going ot have to
+ # skip it and hope this transition gets assigned later
+ if isdsts[comp_idx]:
+ continue
+
+ dstoff = utcoff - utcoffsets[comp_idx]
+
+ if dstoff:
+ dst_found += 1
+ dstoffs[idx] = dstoff
+ else:
+ # If we didn't find a valid value for a given index, we'll end up
+ # with dstoff = 0 for something where `isdst=1`. This is obviously
+ # wrong - one hour will be a much better guess than 0
+ for idx in range(typecnt):
+ if not dstoffs[idx] and isdsts[idx]:
+ dstoffs[idx] = 3600
+
+ return dstoffs
+
+ @staticmethod
+ def _ts_to_local(trans_idx, trans_list_utc, utcoffsets):
+ """Generate number of seconds since 1970 *in the local time*.
+
+ This is necessary to easily find the transition times in local time"""
+ if not trans_list_utc:
+ return [[], []]
+
+ # Start with the timestamps and modify in-place
+ trans_list_wall = [list(trans_list_utc), list(trans_list_utc)]
+
+ if len(utcoffsets) > 1:
+ offset_0 = utcoffsets[0]
+ offset_1 = utcoffsets[trans_idx[0]]
+ if offset_1 > offset_0:
+ offset_1, offset_0 = offset_0, offset_1
+ else:
+ offset_0 = offset_1 = utcoffsets[0]
+
+ trans_list_wall[0][0] += offset_0
+ trans_list_wall[1][0] += offset_1
+
+ for i in range(1, len(trans_idx)):
+ offset_0 = utcoffsets[trans_idx[i - 1]]
+ offset_1 = utcoffsets[trans_idx[i]]
+
+ if offset_1 > offset_0:
+ offset_1, offset_0 = offset_0, offset_1
+
+ trans_list_wall[0][i] += offset_0
+ trans_list_wall[1][i] += offset_1
+
+ return trans_list_wall
+
+
+class _ttinfo:
+ __slots__ = ["utcoff", "dstoff", "tzname"]
+
+ def __init__(self, utcoff, dstoff, tzname):
+ self.utcoff = utcoff
+ self.dstoff = dstoff
+ self.tzname = tzname
+
+ def __eq__(self, other):
+ return (
+ self.utcoff == other.utcoff
+ and self.dstoff == other.dstoff
+ and self.tzname == other.tzname
+ )
+
+ def __repr__(self): # pragma: nocover
+ return (
+ f"{self.__class__.__name__}"
+ + f"({self.utcoff}, {self.dstoff}, {self.tzname})"
+ )
+
+
+_NO_TTINFO = _ttinfo(None, None, None)
+
+
+class _TZStr:
+ __slots__ = (
+ "std",
+ "dst",
+ "start",
+ "end",
+ "get_trans_info",
+ "get_trans_info_fromutc",
+ "dst_diff",
+ )
+
+ def __init__(
+ self, std_abbr, std_offset, dst_abbr, dst_offset, start=None, end=None
+ ):
+ self.dst_diff = dst_offset - std_offset
+ std_offset = _load_timedelta(std_offset)
+ self.std = _ttinfo(
+ utcoff=std_offset, dstoff=_load_timedelta(0), tzname=std_abbr
+ )
+
+ self.start = start
+ self.end = end
+
+ dst_offset = _load_timedelta(dst_offset)
+ delta = _load_timedelta(self.dst_diff)
+ self.dst = _ttinfo(utcoff=dst_offset, dstoff=delta, tzname=dst_abbr)
+
+ # These are assertions because the constructor should only be called
+ # by functions that would fail before passing start or end
+ assert start is not None, "No transition start specified"
+ assert end is not None, "No transition end specified"
+
+ self.get_trans_info = self._get_trans_info
+ self.get_trans_info_fromutc = self._get_trans_info_fromutc
+
+ def transitions(self, year):
+ start = self.start.year_to_epoch(year)
+ end = self.end.year_to_epoch(year)
+ return start, end
+
+ def _get_trans_info(self, ts, year, fold):
+ """Get the information about the current transition - tti"""
+ start, end = self.transitions(year)
+
+ # With fold = 0, the period (denominated in local time) with the
+ # smaller offset starts at the end of the gap and ends at the end of
+ # the fold; with fold = 1, it runs from the start of the gap to the
+ # beginning of the fold.
+ #
+ # So in order to determine the DST boundaries we need to know both
+ # the fold and whether DST is positive or negative (rare), and it
+ # turns out that this boils down to fold XOR is_positive.
+ if fold == (self.dst_diff >= 0):
+ end -= self.dst_diff
+ else:
+ start += self.dst_diff
+
+ if start < end:
+ isdst = start <= ts < end
+ else:
+ isdst = not (end <= ts < start)
+
+ return self.dst if isdst else self.std
+
+ def _get_trans_info_fromutc(self, ts, year):
+ start, end = self.transitions(year)
+ start -= self.std.utcoff.total_seconds()
+ end -= self.dst.utcoff.total_seconds()
+
+ if start < end:
+ isdst = start <= ts < end
+ else:
+ isdst = not (end <= ts < start)
+
+ # For positive DST, the ambiguous period is one dst_diff after the end
+ # of DST; for negative DST, the ambiguous period is one dst_diff before
+ # the start of DST.
+ if self.dst_diff > 0:
+ ambig_start = end
+ ambig_end = end + self.dst_diff
+ else:
+ ambig_start = start
+ ambig_end = start - self.dst_diff
+
+ fold = ambig_start <= ts < ambig_end
+
+ return (self.dst if isdst else self.std, fold)
+
+
+def _post_epoch_days_before_year(year):
+ """Get the number of days between 1970-01-01 and YEAR-01-01"""
+ y = year - 1
+ return y * 365 + y // 4 - y // 100 + y // 400 - EPOCHORDINAL
+
+
+class _DayOffset:
+ __slots__ = ["d", "julian", "hour", "minute", "second"]
+
+ def __init__(self, d, julian, hour=2, minute=0, second=0):
+ if not (0 + julian) <= d <= 365:
+ min_day = 0 + julian
+ raise ValueError(f"d must be in [{min_day}, 365], not: {d}")
+
+ self.d = d
+ self.julian = julian
+ self.hour = hour
+ self.minute = minute
+ self.second = second
+
+ def year_to_epoch(self, year):
+ days_before_year = _post_epoch_days_before_year(year)
+
+ d = self.d
+ if self.julian and d >= 59 and calendar.isleap(year):
+ d += 1
+
+ epoch = (days_before_year + d) * 86400
+ epoch += self.hour * 3600 + self.minute * 60 + self.second
+
+ return epoch
+
+
+class _CalendarOffset:
+ __slots__ = ["m", "w", "d", "hour", "minute", "second"]
+
+ _DAYS_BEFORE_MONTH = (
+ -1,
+ 0,
+ 31,
+ 59,
+ 90,
+ 120,
+ 151,
+ 181,
+ 212,
+ 243,
+ 273,
+ 304,
+ 334,
+ )
+
+ def __init__(self, m, w, d, hour=2, minute=0, second=0):
+ if not 0 < m <= 12:
+ raise ValueError("m must be in (0, 12]")
+
+ if not 0 < w <= 5:
+ raise ValueError("w must be in (0, 5]")
+
+ if not 0 <= d <= 6:
+ raise ValueError("d must be in [0, 6]")
+
+ self.m = m
+ self.w = w
+ self.d = d
+ self.hour = hour
+ self.minute = minute
+ self.second = second
+
+ @classmethod
+ def _ymd2ord(cls, year, month, day):
+ return (
+ _post_epoch_days_before_year(year)
+ + cls._DAYS_BEFORE_MONTH[month]
+ + (month > 2 and calendar.isleap(year))
+ + day
+ )
+
+ # TODO: These are not actually epoch dates as they are expressed in local time
+ def year_to_epoch(self, year):
+ """Calculates the datetime of the occurrence from the year"""
+ # We know year and month, we need to convert w, d into day of month
+ #
+ # Week 1 is the first week in which day `d` (where 0 = Sunday) appears.
+ # Week 5 represents the last occurrence of day `d`, so we need to know
+ # the range of the month.
+ first_day, days_in_month = calendar.monthrange(year, self.m)
+
+ # This equation seems magical, so I'll break it down:
+ # 1. calendar says 0 = Monday, POSIX says 0 = Sunday
+ # so we need first_day + 1 to get 1 = Monday -> 7 = Sunday,
+ # which is still equivalent because this math is mod 7
+ # 2. Get first day - desired day mod 7: -1 % 7 = 6, so we don't need
+ # to do anything to adjust negative numbers.
+ # 3. Add 1 because month days are a 1-based index.
+ month_day = (self.d - (first_day + 1)) % 7 + 1
+
+ # Now use a 0-based index version of `w` to calculate the w-th
+ # occurrence of `d`
+ month_day += (self.w - 1) * 7
+
+ # month_day will only be > days_in_month if w was 5, and `w` means
+ # "last occurrence of `d`", so now we just check if we over-shot the
+ # end of the month and if so knock off 1 week.
+ if month_day > days_in_month:
+ month_day -= 7
+
+ ordinal = self._ymd2ord(year, self.m, month_day)
+ epoch = ordinal * 86400
+ epoch += self.hour * 3600 + self.minute * 60 + self.second
+ return epoch
+
+
+def _parse_tz_str(tz_str):
+ # The tz string has the format:
+ #
+ # std[offset[dst[offset],start[/time],end[/time]]]
+ #
+ # std and dst must be 3 or more characters long and must not contain
+ # a leading colon, embedded digits, commas, nor a plus or minus signs;
+ # The spaces between "std" and "offset" are only for display and are
+ # not actually present in the string.
+ #
+ # The format of the offset is ``[+|-]hh[:mm[:ss]]``
+
+ offset_str, *start_end_str = tz_str.split(",", 1)
+
+ # fmt: off
+ parser_re = re.compile(
+ r"(?P<std>[^<0-9:.+-]+|<[a-zA-Z0-9+\-]+>)" +
+ r"((?P<stdoff>[+-]?\d{1,2}(:\d{2}(:\d{2})?)?)" +
+ r"((?P<dst>[^0-9:.+-]+|<[a-zA-Z0-9+\-]+>)" +
+ r"((?P<dstoff>[+-]?\d{1,2}(:\d{2}(:\d{2})?)?))?" +
+ r")?" + # dst
+ r")?$" # stdoff
+ )
+ # fmt: on
+
+ m = parser_re.match(offset_str)
+
+ if m is None:
+ raise ValueError(f"{tz_str} is not a valid TZ string")
+
+ std_abbr = m.group("std")
+ dst_abbr = m.group("dst")
+ dst_offset = None
+
+ std_abbr = std_abbr.strip("<>")
+
+ if dst_abbr:
+ dst_abbr = dst_abbr.strip("<>")
+
+ if std_offset := m.group("stdoff"):
+ try:
+ std_offset = _parse_tz_delta(std_offset)
+ except ValueError as e:
+ raise ValueError(f"Invalid STD offset in {tz_str}") from e
+ else:
+ std_offset = 0
+
+ if dst_abbr is not None:
+ if dst_offset := m.group("dstoff"):
+ try:
+ dst_offset = _parse_tz_delta(dst_offset)
+ except ValueError as e:
+ raise ValueError(f"Invalid DST offset in {tz_str}") from e
+ else:
+ dst_offset = std_offset + 3600
+
+ if not start_end_str:
+ raise ValueError(f"Missing transition rules: {tz_str}")
+
+ start_end_strs = start_end_str[0].split(",", 1)
+ try:
+ start, end = (_parse_dst_start_end(x) for x in start_end_strs)
+ except ValueError as e:
+ raise ValueError(f"Invalid TZ string: {tz_str}") from e
+
+ return _TZStr(std_abbr, std_offset, dst_abbr, dst_offset, start, end)
+ elif start_end_str:
+ raise ValueError(f"Transition rule present without DST: {tz_str}")
+ else:
+ # This is a static ttinfo, don't return _TZStr
+ return _ttinfo(
+ _load_timedelta(std_offset), _load_timedelta(0), std_abbr
+ )
+
+
+def _parse_dst_start_end(dststr):
+ date, *time = dststr.split("/")
+ if date[0] == "M":
+ n_is_julian = False
+ m = re.match(r"M(\d{1,2})\.(\d).(\d)$", date)
+ if m is None:
+ raise ValueError(f"Invalid dst start/end date: {dststr}")
+ date_offset = tuple(map(int, m.groups()))
+ offset = _CalendarOffset(*date_offset)
+ else:
+ if date[0] == "J":
+ n_is_julian = True
+ date = date[1:]
+ else:
+ n_is_julian = False
+
+ doy = int(date)
+ offset = _DayOffset(doy, n_is_julian)
+
+ if time:
+ time_components = list(map(int, time[0].split(":")))
+ n_components = len(time_components)
+ if n_components < 3:
+ time_components.extend([0] * (3 - n_components))
+ offset.hour, offset.minute, offset.second = time_components
+
+ return offset
+
+
+def _parse_tz_delta(tz_delta):
+ match = re.match(
+ r"(?P<sign>[+-])?(?P<h>\d{1,2})(:(?P<m>\d{2})(:(?P<s>\d{2}))?)?",
+ tz_delta,
+ )
+ # Anything passed to this function should already have hit an equivalent
+ # regular expression to find the section to parse.
+ assert match is not None, tz_delta
+
+ h, m, s = (
+ int(v) if v is not None else 0
+ for v in map(match.group, ("h", "m", "s"))
+ )
+
+ total = h * 3600 + m * 60 + s
+
+ if not -86400 < total < 86400:
+ raise ValueError(
+ f"Offset must be strictly between -24h and +24h: {tz_delta}"
+ )
+
+ # Yes, +5 maps to an offset of -5h
+ if match.group("sign") != "-":
+ total *= -1
+
+ return total
diff --git a/x64/include/Python-ast.h b/x64/include/Python-ast.h
index 5fe4f2b..e7afa1e 100644
--- a/x64/include/Python-ast.h
+++ b/x64/include/Python-ast.h
@@ -6,6 +6,7 @@
extern "C" {
#endif
+#ifndef Py_LIMITED_API
#include "asdl.h"
#undef Yield /* undefine macro conflicting with <winbase.h> */
@@ -16,10 +17,7 @@ typedef struct _stmt *stmt_ty;
typedef struct _expr *expr_ty;
-typedef enum _expr_context { Load=1, Store=2, Del=3, AugLoad=4, AugStore=5,
- Param=6 } expr_context_ty;
-
-typedef struct _slice *slice_ty;
+typedef enum _expr_context { Load=1, Store=2, Del=3 } expr_context_ty;
typedef enum _boolop { And=1, Or=2 } boolop_ty;
@@ -50,7 +48,7 @@ typedef struct _type_ignore *type_ignore_ty;
enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3,
- FunctionType_kind=4, Suite_kind=5};
+ FunctionType_kind=4};
struct _mod {
enum _mod_kind kind;
union {
@@ -72,10 +70,6 @@ struct _mod {
expr_ty returns;
} FunctionType;
- struct {
- asdl_seq *body;
- } Suite;
-
} v;
};
@@ -236,7 +230,7 @@ enum _expr_kind {BoolOp_kind=1, NamedExpr_kind=2, BinOp_kind=3, UnaryOp_kind=4,
YieldFrom_kind=15, Compare_kind=16, Call_kind=17,
FormattedValue_kind=18, JoinedStr_kind=19, Constant_kind=20,
Attribute_kind=21, Subscript_kind=22, Starred_kind=23,
- Name_kind=24, List_kind=25, Tuple_kind=26};
+ Name_kind=24, List_kind=25, Tuple_kind=26, Slice_kind=27};
struct _expr {
enum _expr_kind kind;
union {
@@ -349,7 +343,7 @@ struct _expr {
struct {
expr_ty value;
- slice_ty slice;
+ expr_ty slice;
expr_context_ty ctx;
} Subscript;
@@ -373,32 +367,17 @@ struct _expr {
expr_context_ty ctx;
} Tuple;
- } v;
- int lineno;
- int col_offset;
- int end_lineno;
- int end_col_offset;
-};
-
-enum _slice_kind {Slice_kind=1, ExtSlice_kind=2, Index_kind=3};
-struct _slice {
- enum _slice_kind kind;
- union {
struct {
expr_ty lower;
expr_ty upper;
expr_ty step;
} Slice;
- struct {
- asdl_seq *dims;
- } ExtSlice;
-
- struct {
- expr_ty value;
- } Index;
-
} v;
+ int lineno;
+ int col_offset;
+ int end_lineno;
+ int end_col_offset;
};
struct _comprehension {
@@ -448,6 +427,10 @@ struct _arg {
struct _keyword {
identifier arg;
expr_ty value;
+ int lineno;
+ int col_offset;
+ int end_lineno;
+ int end_col_offset;
};
struct _alias {
@@ -482,8 +465,6 @@ mod_ty _Py_Interactive(asdl_seq * body, PyArena *arena);
mod_ty _Py_Expression(expr_ty body, PyArena *arena);
#define FunctionType(a0, a1, a2) _Py_FunctionType(a0, a1, a2)
mod_ty _Py_FunctionType(asdl_seq * argtypes, expr_ty returns, PyArena *arena);
-#define Suite(a0, a1) _Py_Suite(a0, a1)
-mod_ty _Py_Suite(asdl_seq * body, PyArena *arena);
#define FunctionDef(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) _Py_FunctionDef(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)
stmt_ty _Py_FunctionDef(identifier name, arguments_ty args, asdl_seq * body,
asdl_seq * decorator_list, expr_ty returns, string
@@ -653,7 +634,7 @@ expr_ty _Py_Attribute(expr_ty value, identifier attr, expr_context_ty ctx, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
#define Subscript(a0, a1, a2, a3, a4, a5, a6, a7) _Py_Subscript(a0, a1, a2, a3, a4, a5, a6, a7)
-expr_ty _Py_Subscript(expr_ty value, slice_ty slice, expr_context_ty ctx, int
+expr_ty _Py_Subscript(expr_ty value, expr_ty slice, expr_context_ty ctx, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
#define Starred(a0, a1, a2, a3, a4, a5, a6) _Py_Starred(a0, a1, a2, a3, a4, a5, a6)
@@ -672,12 +653,10 @@ expr_ty _Py_List(asdl_seq * elts, expr_context_ty ctx, int lineno, int
expr_ty _Py_Tuple(asdl_seq * elts, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
-#define Slice(a0, a1, a2, a3) _Py_Slice(a0, a1, a2, a3)
-slice_ty _Py_Slice(expr_ty lower, expr_ty upper, expr_ty step, PyArena *arena);
-#define ExtSlice(a0, a1) _Py_ExtSlice(a0, a1)
-slice_ty _Py_ExtSlice(asdl_seq * dims, PyArena *arena);
-#define Index(a0, a1) _Py_Index(a0, a1)
-slice_ty _Py_Index(expr_ty value, PyArena *arena);
+#define Slice(a0, a1, a2, a3, a4, a5, a6, a7) _Py_Slice(a0, a1, a2, a3, a4, a5, a6, a7)
+expr_ty _Py_Slice(expr_ty lower, expr_ty upper, expr_ty step, int lineno, int
+ col_offset, int end_lineno, int end_col_offset, PyArena
+ *arena);
#define comprehension(a0, a1, a2, a3, a4) _Py_comprehension(a0, a1, a2, a3, a4)
comprehension_ty _Py_comprehension(expr_ty target, expr_ty iter, asdl_seq *
ifs, int is_async, PyArena *arena);
@@ -695,8 +674,10 @@ arguments_ty _Py_arguments(asdl_seq * posonlyargs, asdl_seq * args, arg_ty
arg_ty _Py_arg(identifier arg, expr_ty annotation, string type_comment, int
lineno, int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
-#define keyword(a0, a1, a2) _Py_keyword(a0, a1, a2)
-keyword_ty _Py_keyword(identifier arg, expr_ty value, PyArena *arena);
+#define keyword(a0, a1, a2, a3, a4, a5, a6) _Py_keyword(a0, a1, a2, a3, a4, a5, a6)
+keyword_ty _Py_keyword(identifier arg, expr_ty value, int lineno, int
+ col_offset, int end_lineno, int end_col_offset, PyArena
+ *arena);
#define alias(a0, a1, a2) _Py_alias(a0, a1, a2)
alias_ty _Py_alias(identifier name, identifier asname, PyArena *arena);
#define withitem(a0, a1, a2) _Py_withitem(a0, a1, a2)
@@ -708,6 +689,7 @@ type_ignore_ty _Py_TypeIgnore(int lineno, string tag, PyArena *arena);
PyObject* PyAST_mod2obj(mod_ty t);
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);
int PyAST_Check(PyObject* obj);
+#endif /* !Py_LIMITED_API */
#ifdef __cplusplus
}
diff --git a/x64/include/Python.h b/x64/include/Python.h
index d6e5b13..dcd0a57 100644
--- a/x64/include/Python.h
+++ b/x64/include/Python.h
@@ -114,12 +114,15 @@
#include "classobject.h"
#include "fileobject.h"
#include "pycapsule.h"
+#include "code.h"
+#include "pyframe.h"
#include "traceback.h"
#include "sliceobject.h"
#include "cellobject.h"
#include "iterobject.h"
#include "genobject.h"
#include "descrobject.h"
+#include "genericaliasobject.h"
#include "warnings.h"
#include "weakrefobject.h"
#include "structseq.h"
@@ -130,6 +133,7 @@
#include "pyerrors.h"
#include "cpython/initconfig.h"
+#include "pythread.h"
#include "pystate.h"
#include "context.h"
@@ -152,7 +156,6 @@
#include "pyctype.h"
#include "pystrtod.h"
#include "pystrcmp.h"
-#include "dtoa.h"
#include "fileutils.h"
#include "pyfpe.h"
#include "tracemalloc.h"
diff --git a/x64/include/abstract.h b/x64/include/abstract.h
index 777fd70..bb51c66 100644
--- a/x64/include/abstract.h
+++ b/x64/include/abstract.h
@@ -141,6 +141,12 @@ extern "C" {
#endif
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
+/* Call a callable Python object without any arguments */
+PyAPI_FUNC(PyObject *) PyObject_CallNoArgs(PyObject *func);
+#endif
+
+
/* Call a callable Python object 'callable' with arguments given by the
tuple 'args' and keywords arguments given by the dictionary 'kwargs'.
@@ -696,7 +702,7 @@ PyAPI_FUNC(PyObject *) PySequence_Fast(PyObject *o, const char* m);
(PyList_Check(o) ? PyList_GET_ITEM(o, i) : PyTuple_GET_ITEM(o, i))
/* Return a pointer to the underlying item array for
- an object retured by PySequence_Fast */
+ an object returned by PySequence_Fast */
#define PySequence_Fast_ITEMS(sf) \
(PyList_Check(sf) ? ((PyListObject *)(sf))->ob_item \
: ((PyTupleObject *)(sf))->ob_item)
diff --git a/x64/include/asdl.h b/x64/include/asdl.h
index fc6d223..e962560 100644
--- a/x64/include/asdl.h
+++ b/x64/include/asdl.h
@@ -1,11 +1,10 @@
+#ifndef Py_LIMITED_API
#ifndef Py_ASDL_H
#define Py_ASDL_H
typedef PyObject * identifier;
typedef PyObject * string;
-typedef PyObject * bytes;
typedef PyObject * object;
-typedef PyObject * singleton;
typedef PyObject * constant;
/* It would be nice if the code generated by asdl_c.py was completely
@@ -44,3 +43,4 @@ asdl_int_seq *_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena);
#endif
#endif /* !Py_ASDL_H */
+#endif /* Py_LIMITED_API */
diff --git a/x64/include/ast.h b/x64/include/ast.h
index f1d7348..a8c52af 100644
--- a/x64/include/ast.h
+++ b/x64/include/ast.h
@@ -1,3 +1,4 @@
+#ifndef Py_LIMITED_API
#ifndef Py_AST_H
#define Py_AST_H
#ifdef __cplusplus
@@ -19,19 +20,16 @@ PyAPI_FUNC(mod_ty) PyAST_FromNodeObject(
PyObject *filename,
PyArena *arena);
-#ifndef Py_LIMITED_API
-
/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */
PyAPI_FUNC(PyObject *) _PyAST_ExprAsUnicode(expr_ty);
/* Return the borrowed reference to the first literal string in the
- sequence of statemnts or NULL if it doesn't start from a literal string.
+ sequence of statements or NULL if it doesn't start from a literal string.
Doesn't set exception. */
PyAPI_FUNC(PyObject *) _PyAST_GetDocString(asdl_seq *);
-#endif /* !Py_LIMITED_API */
-
#ifdef __cplusplus
}
#endif
#endif /* !Py_AST_H */
+#endif /* !Py_LIMITED_API */
diff --git a/x64/include/boolobject.h b/x64/include/boolobject.h
index 7cc2f1f..bb8044a 100644
--- a/x64/include/boolobject.h
+++ b/x64/include/boolobject.h
@@ -9,7 +9,7 @@ extern "C" {
PyAPI_DATA(PyTypeObject) PyBool_Type;
-#define PyBool_Check(x) (Py_TYPE(x) == &PyBool_Type)
+#define PyBool_Check(x) Py_IS_TYPE(x, &PyBool_Type)
/* Py_False and Py_True are the only two bools in existence.
Don't forget to apply Py_INCREF() when returning either!!! */
diff --git a/x64/include/bytearrayobject.h b/x64/include/bytearrayobject.h
index a757b88..9e95433 100644
--- a/x64/include/bytearrayobject.h
+++ b/x64/include/bytearrayobject.h
@@ -18,25 +18,13 @@ extern "C" {
* to contain a char pointer, not an unsigned char pointer.
*/
-/* Object layout */
-#ifndef Py_LIMITED_API
-typedef struct {
- PyObject_VAR_HEAD
- Py_ssize_t ob_alloc; /* How many bytes allocated in ob_bytes */
- char *ob_bytes; /* Physical backing buffer */
- char *ob_start; /* Logical start inside ob_bytes */
- /* XXX(nnorwitz): should ob_exports be Py_ssize_t? */
- int ob_exports; /* How many buffer exports */
-} PyByteArrayObject;
-#endif
-
/* Type object */
PyAPI_DATA(PyTypeObject) PyByteArray_Type;
PyAPI_DATA(PyTypeObject) PyByteArrayIter_Type;
/* Type check macros */
#define PyByteArray_Check(self) PyObject_TypeCheck(self, &PyByteArray_Type)
-#define PyByteArray_CheckExact(self) (Py_TYPE(self) == &PyByteArray_Type)
+#define PyByteArray_CheckExact(self) Py_IS_TYPE(self, &PyByteArray_Type)
/* Direct API functions */
PyAPI_FUNC(PyObject *) PyByteArray_FromObject(PyObject *);
@@ -46,14 +34,10 @@ PyAPI_FUNC(Py_ssize_t) PyByteArray_Size(PyObject *);
PyAPI_FUNC(char *) PyByteArray_AsString(PyObject *);
PyAPI_FUNC(int) PyByteArray_Resize(PyObject *, Py_ssize_t);
-/* Macros, trading safety for speed */
#ifndef Py_LIMITED_API
-#define PyByteArray_AS_STRING(self) \
- (assert(PyByteArray_Check(self)), \
- Py_SIZE(self) ? ((PyByteArrayObject *)(self))->ob_start : _PyByteArray_empty_string)
-#define PyByteArray_GET_SIZE(self) (assert(PyByteArray_Check(self)), Py_SIZE(self))
-
-PyAPI_DATA(char) _PyByteArray_empty_string[];
+# define Py_CPYTHON_BYTEARRAYOBJECT_H
+# include "cpython/bytearrayobject.h"
+# undef Py_CPYTHON_BYTEARRAYOBJECT_H
#endif
#ifdef __cplusplus
diff --git a/x64/include/bytesobject.h b/x64/include/bytesobject.h
index 3fde4a2..5062d8d 100644
--- a/x64/include/bytesobject.h
+++ b/x64/include/bytesobject.h
@@ -27,26 +27,12 @@ functions should be applied to nil objects.
/* Caching the hash (ob_shash) saves recalculation of a string's hash value.
This significantly speeds up dict lookups. */
-#ifndef Py_LIMITED_API
-typedef struct {
- PyObject_VAR_HEAD
- Py_hash_t ob_shash;
- char ob_sval[1];
-
- /* Invariants:
- * ob_sval contains space for 'ob_size+1' elements.
- * ob_sval[ob_size] == 0.
- * ob_shash is the hash of the string or -1 if not computed yet.
- */
-} PyBytesObject;
-#endif
-
PyAPI_DATA(PyTypeObject) PyBytes_Type;
PyAPI_DATA(PyTypeObject) PyBytesIter_Type;
#define PyBytes_Check(op) \
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_BYTES_SUBCLASS)
-#define PyBytes_CheckExact(op) (Py_TYPE(op) == &PyBytes_Type)
+#define PyBytes_CheckExact(op) Py_IS_TYPE(op, &PyBytes_Type)
PyAPI_FUNC(PyObject *) PyBytes_FromStringAndSize(const char *, Py_ssize_t);
PyAPI_FUNC(PyObject *) PyBytes_FromString(const char *);
@@ -60,40 +46,9 @@ PyAPI_FUNC(char *) PyBytes_AsString(PyObject *);
PyAPI_FUNC(PyObject *) PyBytes_Repr(PyObject *, int);
PyAPI_FUNC(void) PyBytes_Concat(PyObject **, PyObject *);
PyAPI_FUNC(void) PyBytes_ConcatAndDel(PyObject **, PyObject *);
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(int) _PyBytes_Resize(PyObject **, Py_ssize_t);
-PyAPI_FUNC(PyObject*) _PyBytes_FormatEx(
- const char *format,
- Py_ssize_t format_len,
- PyObject *args,
- int use_bytearray);
-PyAPI_FUNC(PyObject*) _PyBytes_FromHex(
- PyObject *string,
- int use_bytearray);
-#endif
PyAPI_FUNC(PyObject *) PyBytes_DecodeEscape(const char *, Py_ssize_t,
const char *, Py_ssize_t,
const char *);
-#ifndef Py_LIMITED_API
-/* Helper for PyBytes_DecodeEscape that detects invalid escape chars. */
-PyAPI_FUNC(PyObject *) _PyBytes_DecodeEscape(const char *, Py_ssize_t,
- const char *, Py_ssize_t,
- const char *,
- const char **);
-#endif
-
-/* Macro, trading safety for speed */
-#ifndef Py_LIMITED_API
-#define PyBytes_AS_STRING(op) (assert(PyBytes_Check(op)), \
- (((PyBytesObject *)(op))->ob_sval))
-#define PyBytes_GET_SIZE(op) (assert(PyBytes_Check(op)),Py_SIZE(op))
-#endif
-
-/* _PyBytes_Join(sep, x) is like sep.join(x). sep must be PyBytesObject*,
- x must be an iterable object. */
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(PyObject *) _PyBytes_Join(PyObject *sep, PyObject *x);
-#endif
/* Provides access to the internal data buffer and size of a string
object or the default encoded version of a Unicode object. Passing
@@ -108,28 +63,6 @@ PyAPI_FUNC(int) PyBytes_AsStringAndSize(
strings) */
);
-/* Using the current locale, insert the thousands grouping
- into the string pointed to by buffer. For the argument descriptions,
- see Objects/stringlib/localeutil.h */
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(Py_ssize_t) _PyBytes_InsertThousandsGroupingLocale(char *buffer,
- Py_ssize_t n_buffer,
- char *digits,
- Py_ssize_t n_digits,
- Py_ssize_t min_width);
-
-/* Using explicit passed-in values, insert the thousands grouping
- into the string pointed to by buffer. For the argument descriptions,
- see Objects/stringlib/localeutil.h */
-PyAPI_FUNC(Py_ssize_t) _PyBytes_InsertThousandsGrouping(char *buffer,
- Py_ssize_t n_buffer,
- char *digits,
- Py_ssize_t n_digits,
- Py_ssize_t min_width,
- const char *grouping,
- const char *thousands_sep);
-#endif
-
/* Flags used by string formatting */
#define F_LJUST (1<<0)
#define F_SIGN (1<<1)
@@ -138,85 +71,10 @@ PyAPI_FUNC(Py_ssize_t) _PyBytes_InsertThousandsGrouping(char *buffer,
#define F_ZERO (1<<4)
#ifndef Py_LIMITED_API
-/* The _PyBytesWriter structure is big: it contains an embedded "stack buffer".
- A _PyBytesWriter variable must be declared at the end of variables in a
- function to optimize the memory allocation on the stack. */
-typedef struct {
- /* bytes, bytearray or NULL (when the small buffer is used) */
- PyObject *buffer;
-
- /* Number of allocated size. */
- Py_ssize_t allocated;
-
- /* Minimum number of allocated bytes,
- incremented by _PyBytesWriter_Prepare() */
- Py_ssize_t min_size;
-
- /* If non-zero, use a bytearray instead of a bytes object for buffer. */
- int use_bytearray;
-
- /* If non-zero, overallocate the buffer (default: 0).
- This flag must be zero if use_bytearray is non-zero. */
- int overallocate;
-
- /* Stack buffer */
- int use_small_buffer;
- char small_buffer[512];
-} _PyBytesWriter;
-
-/* Initialize a bytes writer
-
- By default, the overallocation is disabled. Set the overallocate attribute
- to control the allocation of the buffer. */
-PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer);
-
-/* Get the buffer content and reset the writer.
- Return a bytes object, or a bytearray object if use_bytearray is non-zero.
- Raise an exception and return NULL on error. */
-PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer,
- void *str);
-
-/* Deallocate memory of a writer (clear its internal buffer). */
-PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer);
-
-/* Allocate the buffer to write size bytes.
- Return the pointer to the beginning of buffer data.
- Raise an exception and return NULL on error. */
-PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer,
- Py_ssize_t size);
-
-/* Ensure that the buffer is large enough to write *size* bytes.
- Add size to the writer minimum size (min_size attribute).
-
- str is the current pointer inside the buffer.
- Return the updated current pointer inside the buffer.
- Raise an exception and return NULL on error. */
-PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer,
- void *str,
- Py_ssize_t size);
-
-/* Resize the buffer to make it larger.
- The new buffer may be larger than size bytes because of overallocation.
- Return the updated current pointer inside the buffer.
- Raise an exception and return NULL on error.
-
- Note: size must be greater than the number of allocated bytes in the writer.
-
- This function doesn't use the writer minimum size (min_size attribute).
-
- See also _PyBytesWriter_Prepare().
- */
-PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer,
- void *str,
- Py_ssize_t size);
-
-/* Write bytes.
- Raise an exception and return NULL on error. */
-PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer,
- void *str,
- const void *bytes,
- Py_ssize_t size);
-#endif /* Py_LIMITED_API */
+# define Py_CPYTHON_BYTESOBJECT_H
+# include "cpython/bytesobject.h"
+# undef Py_CPYTHON_BYTESOBJECT_H
+#endif
#ifdef __cplusplus
}
diff --git a/x64/include/cellobject.h b/x64/include/cellobject.h
index 2f9b5b7..f12aa90 100644
--- a/x64/include/cellobject.h
+++ b/x64/include/cellobject.h
@@ -13,7 +13,7 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyCell_Type;
-#define PyCell_Check(op) (Py_TYPE(op) == &PyCell_Type)
+#define PyCell_Check(op) Py_IS_TYPE(op, &PyCell_Type)
PyAPI_FUNC(PyObject *) PyCell_New(PyObject *);
PyAPI_FUNC(PyObject *) PyCell_Get(PyObject *);
diff --git a/x64/include/ceval.h b/x64/include/ceval.h
index 36fd014..0f372e2 100644
--- a/x64/include/ceval.h
+++ b/x64/include/ceval.h
@@ -8,52 +8,30 @@ extern "C" {
/* Interface to random parts in ceval.c */
/* PyEval_CallObjectWithKeywords(), PyEval_CallObject(), PyEval_CallFunction
- * and PyEval_CallMethod are kept for backward compatibility: PyObject_Call(),
- * PyObject_CallFunction() and PyObject_CallMethod() are recommended to call
- * a callable object.
+ * and PyEval_CallMethod are deprecated. Since they are officially part of the
+ * stable ABI (PEP 384), they must be kept for backward compatibility.
+ * PyObject_Call(), PyObject_CallFunction() and PyObject_CallMethod() are
+ * recommended to call a callable object.
*/
-PyAPI_FUNC(PyObject *) PyEval_CallObjectWithKeywords(
+Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallObjectWithKeywords(
PyObject *callable,
PyObject *args,
PyObject *kwargs);
-/* Inline this */
+/* Deprecated since PyEval_CallObjectWithKeywords is deprecated */
#define PyEval_CallObject(callable, arg) \
PyEval_CallObjectWithKeywords(callable, arg, (PyObject *)NULL)
-PyAPI_FUNC(PyObject *) PyEval_CallFunction(PyObject *callable,
- const char *format, ...);
-PyAPI_FUNC(PyObject *) PyEval_CallMethod(PyObject *obj,
- const char *name,
- const char *format, ...);
-
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(void) PyEval_SetProfile(Py_tracefunc, PyObject *);
-PyAPI_FUNC(void) PyEval_SetTrace(Py_tracefunc, PyObject *);
-PyAPI_FUNC(void) _PyEval_SetCoroutineOriginTrackingDepth(int new_depth);
-PyAPI_FUNC(int) _PyEval_GetCoroutineOriginTrackingDepth(void);
-PyAPI_FUNC(void) _PyEval_SetAsyncGenFirstiter(PyObject *);
-PyAPI_FUNC(PyObject *) _PyEval_GetAsyncGenFirstiter(void);
-PyAPI_FUNC(void) _PyEval_SetAsyncGenFinalizer(PyObject *);
-PyAPI_FUNC(PyObject *) _PyEval_GetAsyncGenFinalizer(void);
-#endif
-
-struct _frame; /* Avoid including frameobject.h */
+Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallFunction(
+ PyObject *callable, const char *format, ...);
+Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyEval_CallMethod(
+ PyObject *obj, const char *name, const char *format, ...);
PyAPI_FUNC(PyObject *) PyEval_GetBuiltins(void);
PyAPI_FUNC(PyObject *) PyEval_GetGlobals(void);
PyAPI_FUNC(PyObject *) PyEval_GetLocals(void);
-PyAPI_FUNC(struct _frame *) PyEval_GetFrame(void);
-
-#ifndef Py_LIMITED_API
-/* Helper to look up a builtin object */
-PyAPI_FUNC(PyObject *) _PyEval_GetBuiltinId(_Py_Identifier *);
-/* Look at the current frame's (if any) code's co_flags, and turn on
- the corresponding compiler flags in cf->cf_flags. Return 1 if any
- flag was set, else return 0. */
-PyAPI_FUNC(int) PyEval_MergeCompilerFlags(PyCompilerFlags *cf);
-#endif
+PyAPI_FUNC(PyFrameObject *) PyEval_GetFrame(void);
PyAPI_FUNC(int) Py_AddPendingCall(int (*func)(void *), void *arg);
PyAPI_FUNC(int) Py_MakePendingCalls(void);
@@ -86,41 +64,8 @@ PyAPI_FUNC(int) Py_MakePendingCalls(void);
PyAPI_FUNC(void) Py_SetRecursionLimit(int);
PyAPI_FUNC(int) Py_GetRecursionLimit(void);
-#define Py_EnterRecursiveCall(where) \
- (_Py_MakeRecCheck(PyThreadState_GET()->recursion_depth) && \
- _Py_CheckRecursiveCall(where))
-#define Py_LeaveRecursiveCall() \
- do{ if(_Py_MakeEndRecCheck(PyThreadState_GET()->recursion_depth)) \
- PyThreadState_GET()->overflowed = 0; \
- } while(0)
-PyAPI_FUNC(int) _Py_CheckRecursiveCall(const char *where);
-
-/* Due to the macros in which it's used, _Py_CheckRecursionLimit is in
- the stable ABI. It should be removed therefrom when possible.
-*/
-PyAPI_DATA(int) _Py_CheckRecursionLimit;
-
-#ifdef USE_STACKCHECK
-/* With USE_STACKCHECK, trigger stack checks in _Py_CheckRecursiveCall()
- on every 64th call to Py_EnterRecursiveCall.
-*/
-# define _Py_MakeRecCheck(x) \
- (++(x) > _Py_CheckRecursionLimit || \
- ++(PyThreadState_GET()->stackcheck_counter) > 64)
-#else
-# define _Py_MakeRecCheck(x) (++(x) > _Py_CheckRecursionLimit)
-#endif
-
-/* Compute the "lower-water mark" for a recursion limit. When
- * Py_LeaveRecursiveCall() is called with a recursion depth below this mark,
- * the overflowed flag is reset to 0. */
-#define _Py_RecursionLimitLowerWaterMark(limit) \
- (((limit) > 200) \
- ? ((limit) - 50) \
- : (3 * ((limit) >> 2)))
-
-#define _Py_MakeEndRecCheck(x) \
- (--(x) < _Py_RecursionLimitLowerWaterMark(_Py_CheckRecursionLimit))
+PyAPI_FUNC(int) Py_EnterRecursiveCall(const char *where);
+PyAPI_FUNC(void) Py_LeaveRecursiveCall(void);
#define Py_ALLOW_RECURSION \
do { unsigned char _old = PyThreadState_GET()->recursion_critical;\
@@ -133,11 +78,8 @@ PyAPI_DATA(int) _Py_CheckRecursionLimit;
PyAPI_FUNC(const char *) PyEval_GetFuncName(PyObject *);
PyAPI_FUNC(const char *) PyEval_GetFuncDesc(PyObject *);
-PyAPI_FUNC(PyObject *) PyEval_EvalFrame(struct _frame *);
-PyAPI_FUNC(PyObject *) PyEval_EvalFrameEx(struct _frame *f, int exc);
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(PyObject *) _PyEval_EvalFrameDefault(struct _frame *f, int exc);
-#endif
+PyAPI_FUNC(PyObject *) PyEval_EvalFrame(PyFrameObject *);
+PyAPI_FUNC(PyObject *) PyEval_EvalFrameEx(PyFrameObject *f, int exc);
/* Interface for threads.
@@ -177,9 +119,6 @@ PyAPI_FUNC(PyObject *) _PyEval_EvalFrameDefault(struct _frame *f, int exc);
WARNING: NEVER NEST CALLS TO Py_BEGIN_ALLOW_THREADS AND
Py_END_ALLOW_THREADS!!!
- The function PyEval_InitThreads() should be called only from
- init_thread() in "_threadmodule.c".
-
Note that not yet all candidates have been converted to use this
mechanism!
*/
@@ -187,22 +126,17 @@ PyAPI_FUNC(PyObject *) _PyEval_EvalFrameDefault(struct _frame *f, int exc);
PyAPI_FUNC(PyThreadState *) PyEval_SaveThread(void);
PyAPI_FUNC(void) PyEval_RestoreThread(PyThreadState *);
-PyAPI_FUNC(int) PyEval_ThreadsInitialized(void);
-PyAPI_FUNC(void) PyEval_InitThreads(void);
+Py_DEPRECATED(3.9) PyAPI_FUNC(int) PyEval_ThreadsInitialized(void);
+Py_DEPRECATED(3.9) PyAPI_FUNC(void) PyEval_InitThreads(void);
+/* PyEval_AcquireLock() and PyEval_ReleaseLock() are part of stable ABI.
+ * They will be removed from this header file in the future version.
+ * But they will be remained in ABI until Python 4.0.
+ */
Py_DEPRECATED(3.2) PyAPI_FUNC(void) PyEval_AcquireLock(void);
-/* Py_DEPRECATED(3.2) */ PyAPI_FUNC(void) PyEval_ReleaseLock(void);
+Py_DEPRECATED(3.2) PyAPI_FUNC(void) PyEval_ReleaseLock(void);
PyAPI_FUNC(void) PyEval_AcquireThread(PyThreadState *tstate);
PyAPI_FUNC(void) PyEval_ReleaseThread(PyThreadState *tstate);
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(void) _PyEval_SetSwitchInterval(unsigned long microseconds);
-PyAPI_FUNC(unsigned long) _PyEval_GetSwitchInterval(void);
-#endif
-
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc);
-#endif
-
#define Py_BEGIN_ALLOW_THREADS { \
PyThreadState *_save; \
_save = PyEval_SaveThread();
@@ -211,11 +145,6 @@ PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc);
#define Py_END_ALLOW_THREADS PyEval_RestoreThread(_save); \
}
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *);
-PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);
-#endif
-
/* Masks and values used by FORMAT_VALUE opcode. */
#define FVC_MASK 0x3
#define FVC_NONE 0x0
@@ -225,6 +154,12 @@ PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);
#define FVS_MASK 0x4
#define FVS_HAVE_SPEC 0x4
+#ifndef Py_LIMITED_API
+# define Py_CPYTHON_CEVAL_H
+# include "cpython/ceval.h"
+# undef Py_CPYTHON_CEVAL_H
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/classobject.h b/x64/include/classobject.h
index c83303c..1952f67 100644
--- a/x64/include/classobject.h
+++ b/x64/include/classobject.h
@@ -19,7 +19,7 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyMethod_Type;
-#define PyMethod_Check(op) ((op)->ob_type == &PyMethod_Type)
+#define PyMethod_Check(op) Py_IS_TYPE(op, &PyMethod_Type)
PyAPI_FUNC(PyObject *) PyMethod_New(PyObject *, PyObject *);
@@ -33,8 +33,6 @@ PyAPI_FUNC(PyObject *) PyMethod_Self(PyObject *);
#define PyMethod_GET_SELF(meth) \
(((PyMethodObject *)meth) -> im_self)
-PyAPI_FUNC(int) PyMethod_ClearFreeList(void);
-
typedef struct {
PyObject_HEAD
PyObject *func;
@@ -42,7 +40,7 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyInstanceMethod_Type;
-#define PyInstanceMethod_Check(op) ((op)->ob_type == &PyInstanceMethod_Type)
+#define PyInstanceMethod_Check(op) Py_IS_TYPE(op, &PyInstanceMethod_Type)
PyAPI_FUNC(PyObject *) PyInstanceMethod_New(PyObject *);
PyAPI_FUNC(PyObject *) PyInstanceMethod_Function(PyObject *);
diff --git a/x64/include/code.h b/x64/include/code.h
index 3afddd2..b9e23eb 100644
--- a/x64/include/code.h
+++ b/x64/include/code.h
@@ -1,180 +1,20 @@
/* Definitions for bytecode */
-#ifndef Py_LIMITED_API
#ifndef Py_CODE_H
#define Py_CODE_H
#ifdef __cplusplus
extern "C" {
#endif
-typedef uint16_t _Py_CODEUNIT;
-
-#ifdef WORDS_BIGENDIAN
-# define _Py_OPCODE(word) ((word) >> 8)
-# define _Py_OPARG(word) ((word) & 255)
-#else
-# define _Py_OPCODE(word) ((word) & 255)
-# define _Py_OPARG(word) ((word) >> 8)
-#endif
-
-typedef struct _PyOpcache _PyOpcache;
-
-/* Bytecode object */
-typedef struct {
- PyObject_HEAD
- int co_argcount; /* #arguments, except *args */
- int co_posonlyargcount; /* #positional only arguments */
- int co_kwonlyargcount; /* #keyword only arguments */
- int co_nlocals; /* #local variables */
- int co_stacksize; /* #entries needed for evaluation stack */
- int co_flags; /* CO_..., see below */
- int co_firstlineno; /* first source line number */
- PyObject *co_code; /* instruction opcodes */
- PyObject *co_consts; /* list (constants used) */
- PyObject *co_names; /* list of strings (names used) */
- PyObject *co_varnames; /* tuple of strings (local variable names) */
- PyObject *co_freevars; /* tuple of strings (free variable names) */
- PyObject *co_cellvars; /* tuple of strings (cell variable names) */
- /* The rest aren't used in either hash or comparisons, except for co_name,
- used in both. This is done to preserve the name and line number
- for tracebacks and debuggers; otherwise, constant de-duplication
- would collapse identical functions/lambdas defined on different lines.
- */
- Py_ssize_t *co_cell2arg; /* Maps cell vars which are arguments. */
- PyObject *co_filename; /* unicode (where it was loaded from) */
- PyObject *co_name; /* unicode (name, for reference) */
- PyObject *co_lnotab; /* string (encoding addr<->lineno mapping) See
- Objects/lnotab_notes.txt for details. */
- void *co_zombieframe; /* for optimization only (see frameobject.c) */
- PyObject *co_weakreflist; /* to support weakrefs to code objects */
- /* Scratch space for extra data relating to the code object.
- Type is a void* to keep the format private in codeobject.c to force
- people to go through the proper APIs. */
- void *co_extra;
-
- /* Per opcodes just-in-time cache
- *
- * To reduce cache size, we use indirect mapping from opcode index to
- * cache object:
- * cache = co_opcache[co_opcache_map[next_instr - first_instr] - 1]
- */
-
- // co_opcache_map is indexed by (next_instr - first_instr).
- // * 0 means there is no cache for this opcode.
- // * n > 0 means there is cache in co_opcache[n-1].
- unsigned char *co_opcache_map;
- _PyOpcache *co_opcache;
- int co_opcache_flag; // used to determine when create a cache.
- unsigned char co_opcache_size; // length of co_opcache.
-} PyCodeObject;
-
-/* Masks for co_flags above */
-#define CO_OPTIMIZED 0x0001
-#define CO_NEWLOCALS 0x0002
-#define CO_VARARGS 0x0004
-#define CO_VARKEYWORDS 0x0008
-#define CO_NESTED 0x0010
-#define CO_GENERATOR 0x0020
-/* The CO_NOFREE flag is set if there are no free or cell variables.
- This information is redundant, but it allows a single flag test
- to determine whether there is any extra work to be done when the
- call frame it setup.
-*/
-#define CO_NOFREE 0x0040
-
-/* The CO_COROUTINE flag is set for coroutine functions (defined with
- ``async def`` keywords) */
-#define CO_COROUTINE 0x0080
-#define CO_ITERABLE_COROUTINE 0x0100
-#define CO_ASYNC_GENERATOR 0x0200
-
-/* These are no longer used. */
-#if 0
-#define CO_GENERATOR_ALLOWED 0x1000
-#endif
-#define CO_FUTURE_DIVISION 0x2000
-#define CO_FUTURE_ABSOLUTE_IMPORT 0x4000 /* do absolute imports by default */
-#define CO_FUTURE_WITH_STATEMENT 0x8000
-#define CO_FUTURE_PRINT_FUNCTION 0x10000
-#define CO_FUTURE_UNICODE_LITERALS 0x20000
-
-#define CO_FUTURE_BARRY_AS_BDFL 0x40000
-#define CO_FUTURE_GENERATOR_STOP 0x80000
-#define CO_FUTURE_ANNOTATIONS 0x100000
-
-/* This value is found in the co_cell2arg array when the associated cell
- variable does not correspond to an argument. */
-#define CO_CELL_NOT_AN_ARG (-1)
-
-/* This should be defined if a future statement modifies the syntax.
- For example, when a keyword is added.
-*/
-#define PY_PARSER_REQUIRES_FUTURE_KEYWORD
-
-#define CO_MAXBLOCKS 20 /* Max static block nesting within a function */
-
-PyAPI_DATA(PyTypeObject) PyCode_Type;
-
-#define PyCode_Check(op) (Py_TYPE(op) == &PyCode_Type)
-#define PyCode_GetNumFree(op) (PyTuple_GET_SIZE((op)->co_freevars))
-
-/* Public interface */
-PyAPI_FUNC(PyCodeObject *) PyCode_New(
- int, int, int, int, int, PyObject *, PyObject *,
- PyObject *, PyObject *, PyObject *, PyObject *,
- PyObject *, PyObject *, int, PyObject *);
-
-PyAPI_FUNC(PyCodeObject *) PyCode_NewWithPosOnlyArgs(
- int, int, int, int, int, int, PyObject *, PyObject *,
- PyObject *, PyObject *, PyObject *, PyObject *,
- PyObject *, PyObject *, int, PyObject *);
- /* same as struct above */
-
-/* Creates a new empty code object with the specified source location. */
-PyAPI_FUNC(PyCodeObject *)
-PyCode_NewEmpty(const char *filename, const char *funcname, int firstlineno);
-
-/* Return the line number associated with the specified bytecode index
- in this code object. If you just need the line number of a frame,
- use PyFrame_GetLineNumber() instead. */
-PyAPI_FUNC(int) PyCode_Addr2Line(PyCodeObject *, int);
-
-/* for internal use only */
-typedef struct _addr_pair {
- int ap_lower;
- int ap_upper;
-} PyAddrPair;
-
-#ifndef Py_LIMITED_API
-/* Update *bounds to describe the first and one-past-the-last instructions in the
- same line as lasti. Return the number of that line.
-*/
-PyAPI_FUNC(int) _PyCode_CheckLineNumber(PyCodeObject* co,
- int lasti, PyAddrPair *bounds);
-
-/* Create a comparable key used to compare constants taking in account the
- * object type. It is used to make sure types are not coerced (e.g., float and
- * complex) _and_ to distinguish 0.0 from -0.0 e.g. on IEEE platforms
- *
- * Return (type(obj), obj, ...): a tuple with variable size (at least 2 items)
- * depending on the type and the value. The type is the first item to not
- * compare bytes and str which can raise a BytesWarning exception. */
-PyAPI_FUNC(PyObject*) _PyCode_ConstantKey(PyObject *obj);
-#endif
-
-PyAPI_FUNC(PyObject*) PyCode_Optimize(PyObject *code, PyObject* consts,
- PyObject *names, PyObject *lnotab);
-
+typedef struct PyCodeObject PyCodeObject;
#ifndef Py_LIMITED_API
-PyAPI_FUNC(int) _PyCode_GetExtra(PyObject *code, Py_ssize_t index,
- void **extra);
-PyAPI_FUNC(int) _PyCode_SetExtra(PyObject *code, Py_ssize_t index,
- void *extra);
+# define Py_CPYTHON_CODE_H
+# include "cpython/code.h"
+# undef Py_CPYTHON_CODE_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_CODE_H */
-#endif /* Py_LIMITED_API */
diff --git a/x64/include/compile.h b/x64/include/compile.h
index 1cda955..98adee3 100644
--- a/x64/include/compile.h
+++ b/x64/include/compile.h
@@ -2,7 +2,6 @@
#define Py_COMPILE_H
#ifndef Py_LIMITED_API
-#include "code.h"
#ifdef __cplusplus
extern "C" {
@@ -10,6 +9,9 @@ extern "C" {
/* Public interface */
struct _node; /* Declare the existence of this type */
+#ifndef Py_BUILD_CORE
+Py_DEPRECATED(3.9)
+#endif
PyAPI_FUNC(PyCodeObject *) PyNode_Compile(struct _node *, const char *);
/* XXX (ncoghlan): Unprefixed type name in a public API! */
@@ -18,12 +20,18 @@ PyAPI_FUNC(PyCodeObject *) PyNode_Compile(struct _node *, const char *);
CO_FUTURE_UNICODE_LITERALS | CO_FUTURE_BARRY_AS_BDFL | \
CO_FUTURE_GENERATOR_STOP | CO_FUTURE_ANNOTATIONS)
#define PyCF_MASK_OBSOLETE (CO_NESTED)
+
+/* bpo-39562: CO_FUTURE_ and PyCF_ constants must be kept unique.
+ PyCF_ constants can use bits from 0x0100 to 0x10000.
+ CO_FUTURE_ constants use bits starting at 0x20000. */
#define PyCF_SOURCE_IS_UTF8 0x0100
#define PyCF_DONT_IMPLY_DEDENT 0x0200
#define PyCF_ONLY_AST 0x0400
#define PyCF_IGNORE_COOKIE 0x0800
#define PyCF_TYPE_COMMENTS 0x1000
#define PyCF_ALLOW_TOP_LEVEL_AWAIT 0x2000
+#define PyCF_COMPILE_MASK (PyCF_ONLY_AST | PyCF_ALLOW_TOP_LEVEL_AWAIT | \
+ PyCF_TYPE_COMMENTS | PyCF_DONT_IMPLY_DEDENT)
#ifndef Py_LIMITED_API
typedef struct {
@@ -83,7 +91,12 @@ PyAPI_FUNC(PyObject*) _Py_Mangle(PyObject *p, PyObject *name);
PyAPI_FUNC(int) PyCompile_OpcodeStackEffect(int opcode, int oparg);
PyAPI_FUNC(int) PyCompile_OpcodeStackEffectWithJump(int opcode, int oparg, int jump);
-PyAPI_FUNC(int) _PyAST_Optimize(struct _mod *, PyArena *arena, int optimize);
+typedef struct {
+ int optimize;
+ int ff_features;
+} _PyASTOptimizeState;
+
+PyAPI_FUNC(int) _PyAST_Optimize(struct _mod *, PyArena *arena, _PyASTOptimizeState *state);
#ifdef __cplusplus
}
@@ -97,4 +110,7 @@ PyAPI_FUNC(int) _PyAST_Optimize(struct _mod *, PyArena *arena, int optimize);
#define Py_eval_input 258
#define Py_func_type_input 345
+/* This doesn't need to match anything */
+#define Py_fstring_input 800
+
#endif /* !Py_COMPILE_H */
diff --git a/x64/include/complexobject.h b/x64/include/complexobject.h
index cb8c52c..9221f9c 100644
--- a/x64/include/complexobject.h
+++ b/x64/include/complexobject.h
@@ -39,7 +39,7 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyComplex_Type;
#define PyComplex_Check(op) PyObject_TypeCheck(op, &PyComplex_Type)
-#define PyComplex_CheckExact(op) (Py_TYPE(op) == &PyComplex_Type)
+#define PyComplex_CheckExact(op) Py_IS_TYPE(op, &PyComplex_Type)
#ifndef Py_LIMITED_API
PyAPI_FUNC(PyObject *) PyComplex_FromCComplex(Py_complex);
diff --git a/x64/include/context.h b/x64/include/context.h
index 9581285..4e50070 100644
--- a/x64/include/context.h
+++ b/x64/include/context.h
@@ -17,9 +17,9 @@ PyAPI_DATA(PyTypeObject) PyContextToken_Type;
typedef struct _pycontexttokenobject PyContextToken;
-#define PyContext_CheckExact(o) (Py_TYPE(o) == &PyContext_Type)
-#define PyContextVar_CheckExact(o) (Py_TYPE(o) == &PyContextVar_Type)
-#define PyContextToken_CheckExact(o) (Py_TYPE(o) == &PyContextToken_Type)
+#define PyContext_CheckExact(o) Py_IS_TYPE(o, &PyContext_Type)
+#define PyContextVar_CheckExact(o) Py_IS_TYPE(o, &PyContextVar_Type)
+#define PyContextToken_CheckExact(o) Py_IS_TYPE(o, &PyContextToken_Type)
PyAPI_FUNC(PyObject *) PyContext_New(void);
@@ -73,9 +73,6 @@ PyAPI_FUNC(int) PyContextVar_Reset(PyObject *var, PyObject *token);
PyAPI_FUNC(PyObject *) _PyContext_NewHamtForTests(void);
-PyAPI_FUNC(int) PyContext_ClearFreeList(void);
-
-
#endif /* !Py_LIMITED_API */
#ifdef __cplusplus
diff --git a/x64/include/cpython/abstract.h b/x64/include/cpython/abstract.h
index 2ea3209..7bc8083 100644
--- a/x64/include/cpython/abstract.h
+++ b/x64/include/cpython/abstract.h
@@ -26,28 +26,10 @@ PyAPI_FUNC(PyObject *) _PyStack_AsDict(
PyObject *const *values,
PyObject *kwnames);
-/* Convert (args, nargs, kwargs: dict) into a (stack, nargs, kwnames: tuple).
-
- Return 0 on success, raise an exception and return -1 on error.
-
- Write the new stack into *p_stack. If *p_stack is differen than args, it
- must be released by PyMem_Free().
-
- The stack uses borrowed references.
-
- The type of keyword keys is not checked, these checks should be done
- later (ex: _PyArg_ParseStackAndKeywords). */
-PyAPI_FUNC(int) _PyStack_UnpackDict(
- PyObject *const *args,
- Py_ssize_t nargs,
- PyObject *kwargs,
- PyObject *const **p_stack,
- PyObject **p_kwnames);
-
/* Suggested size (number of positional arguments) for arrays of PyObject*
allocated on a C stack to avoid allocating memory on the heap memory. Such
array is used to pass positional arguments to call functions of the
- _PyObject_Vectorcall() family.
+ PyObject_Vectorcall() family.
The size is chosen to not abuse the C stack and so limit the risk of stack
overflow. The size is also chosen to allow using the small stack for most
@@ -55,16 +37,19 @@ PyAPI_FUNC(int) _PyStack_UnpackDict(
40 bytes on the stack. */
#define _PY_FASTCALL_SMALL_STACK 5
-PyAPI_FUNC(PyObject *) _Py_CheckFunctionResult(PyObject *callable,
- PyObject *result,
- const char *where);
+PyAPI_FUNC(PyObject *) _Py_CheckFunctionResult(
+ PyThreadState *tstate,
+ PyObject *callable,
+ PyObject *result,
+ const char *where);
/* === Vectorcall protocol (PEP 590) ============================= */
-/* Call callable using tp_call. Arguments are like _PyObject_Vectorcall()
- or _PyObject_FastCallDict() (both forms are supported),
+/* Call callable using tp_call. Arguments are like PyObject_Vectorcall()
+ or PyObject_FastCallDict() (both forms are supported),
except that nargs is plainly the number of arguments without flags. */
PyAPI_FUNC(PyObject *) _PyObject_MakeTpCall(
+ PyThreadState *tstate,
PyObject *callable,
PyObject *const *args, Py_ssize_t nargs,
PyObject *keywords);
@@ -78,17 +63,21 @@ PyVectorcall_NARGS(size_t n)
}
static inline vectorcallfunc
-_PyVectorcall_Function(PyObject *callable)
+PyVectorcall_Function(PyObject *callable)
{
- PyTypeObject *tp = Py_TYPE(callable);
- Py_ssize_t offset = tp->tp_vectorcall_offset;
+ PyTypeObject *tp;
+ Py_ssize_t offset;
vectorcallfunc *ptr;
- if (!PyType_HasFeature(tp, _Py_TPFLAGS_HAVE_VECTORCALL)) {
+
+ assert(callable != NULL);
+ tp = Py_TYPE(callable);
+ if (!PyType_HasFeature(tp, Py_TPFLAGS_HAVE_VECTORCALL)) {
return NULL;
}
assert(PyCallable_Check(callable));
+ offset = tp->tp_vectorcall_offset;
assert(offset > 0);
- ptr = (vectorcallfunc*)(((char *)callable) + offset);
+ ptr = (vectorcallfunc *)(((char *)callable) + offset);
return *ptr;
}
@@ -106,31 +95,51 @@ _PyVectorcall_Function(PyObject *callable)
of keyword arguments does not change nargsf). kwnames can also be NULL if
there are no keyword arguments.
- keywords must only contains str strings (no subclass), and all keys must
- be unique.
+ keywords must only contain strings and all keys must be unique.
Return the result on success. Raise an exception and return NULL on
error. */
static inline PyObject *
-_PyObject_Vectorcall(PyObject *callable, PyObject *const *args,
- size_t nargsf, PyObject *kwnames)
+_PyObject_VectorcallTstate(PyThreadState *tstate, PyObject *callable,
+ PyObject *const *args, size_t nargsf,
+ PyObject *kwnames)
{
- PyObject *res;
vectorcallfunc func;
+ PyObject *res;
+
assert(kwnames == NULL || PyTuple_Check(kwnames));
assert(args != NULL || PyVectorcall_NARGS(nargsf) == 0);
- func = _PyVectorcall_Function(callable);
+
+ func = PyVectorcall_Function(callable);
if (func == NULL) {
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
- return _PyObject_MakeTpCall(callable, args, nargs, kwnames);
+ return _PyObject_MakeTpCall(tstate, callable, args, nargs, kwnames);
}
res = func(callable, args, nargsf, kwnames);
- return _Py_CheckFunctionResult(callable, res, NULL);
+ return _Py_CheckFunctionResult(tstate, callable, res, NULL);
}
-/* Same as _PyObject_Vectorcall except that keyword arguments are passed as
+static inline PyObject *
+PyObject_Vectorcall(PyObject *callable, PyObject *const *args,
+ size_t nargsf, PyObject *kwnames)
+{
+ PyThreadState *tstate = PyThreadState_GET();
+ return _PyObject_VectorcallTstate(tstate, callable,
+ args, nargsf, kwnames);
+}
+
+// Backwards compatibility aliases for API that was provisional in Python 3.8
+#define _PyObject_Vectorcall PyObject_Vectorcall
+#define _PyObject_VectorcallMethod PyObject_VectorcallMethod
+#define _PyObject_FastCallDict PyObject_VectorcallDict
+#define _PyVectorcall_Function PyVectorcall_Function
+#define _PyObject_CallOneArg PyObject_CallOneArg
+#define _PyObject_CallMethodNoArgs PyObject_CallMethodNoArgs
+#define _PyObject_CallMethodOneArg PyObject_CallMethodOneArg
+
+/* Same as PyObject_Vectorcall except that keyword arguments are passed as
dict, which may be NULL if there are no keyword arguments. */
-PyAPI_FUNC(PyObject *) _PyObject_FastCallDict(
+PyAPI_FUNC(PyObject *) PyObject_VectorcallDict(
PyObject *callable,
PyObject *const *args,
size_t nargsf,
@@ -140,30 +149,65 @@ PyAPI_FUNC(PyObject *) _PyObject_FastCallDict(
"tuple" and keyword arguments "dict". "dict" may also be NULL */
PyAPI_FUNC(PyObject *) PyVectorcall_Call(PyObject *callable, PyObject *tuple, PyObject *dict);
-/* Same as _PyObject_Vectorcall except without keyword arguments */
+static inline PyObject *
+_PyObject_FastCallTstate(PyThreadState *tstate, PyObject *func, PyObject *const *args, Py_ssize_t nargs)
+{
+ return _PyObject_VectorcallTstate(tstate, func, args, (size_t)nargs, NULL);
+}
+
+/* Same as PyObject_Vectorcall except without keyword arguments */
static inline PyObject *
_PyObject_FastCall(PyObject *func, PyObject *const *args, Py_ssize_t nargs)
{
- return _PyObject_Vectorcall(func, args, (size_t)nargs, NULL);
+ PyThreadState *tstate = PyThreadState_GET();
+ return _PyObject_FastCallTstate(tstate, func, args, nargs);
}
-/* Call a callable without any arguments */
+/* Call a callable without any arguments
+ Private static inline function variant of public function
+ PyObject_CallNoArgs(). */
static inline PyObject *
_PyObject_CallNoArg(PyObject *func) {
- return _PyObject_Vectorcall(func, NULL, 0, NULL);
+ PyThreadState *tstate = PyThreadState_GET();
+ return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
}
-PyAPI_FUNC(PyObject *) _PyObject_Call_Prepend(
- PyObject *callable,
- PyObject *obj,
- PyObject *args,
- PyObject *kwargs);
+static inline PyObject *
+PyObject_CallOneArg(PyObject *func, PyObject *arg)
+{
+ PyObject *_args[2];
+ PyObject **args;
+ PyThreadState *tstate;
+ size_t nargsf;
+
+ assert(arg != NULL);
+ args = _args + 1; // For PY_VECTORCALL_ARGUMENTS_OFFSET
+ args[0] = arg;
+ tstate = PyThreadState_GET();
+ nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET;
+ return _PyObject_VectorcallTstate(tstate, func, args, nargsf, NULL);
+}
-PyAPI_FUNC(PyObject *) _PyObject_FastCall_Prepend(
- PyObject *callable,
- PyObject *obj,
- PyObject *const *args,
- Py_ssize_t nargs);
+PyAPI_FUNC(PyObject *) PyObject_VectorcallMethod(
+ PyObject *name, PyObject *const *args,
+ size_t nargsf, PyObject *kwnames);
+
+static inline PyObject *
+PyObject_CallMethodNoArgs(PyObject *self, PyObject *name)
+{
+ return PyObject_VectorcallMethod(name, &self,
+ 1 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
+}
+
+static inline PyObject *
+PyObject_CallMethodOneArg(PyObject *self, PyObject *name, PyObject *arg)
+{
+ PyObject *args[2] = {self, arg};
+
+ assert(arg != NULL);
+ return PyObject_VectorcallMethod(name, args,
+ 2 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
+}
/* Like PyObject_CallMethod(), but expect a _Py_Identifier*
as the method name. */
@@ -181,6 +225,35 @@ PyAPI_FUNC(PyObject *) _PyObject_CallMethodIdObjArgs(
struct _Py_Identifier *name,
...);
+static inline PyObject *
+_PyObject_VectorcallMethodId(
+ _Py_Identifier *name, PyObject *const *args,
+ size_t nargsf, PyObject *kwnames)
+{
+ PyObject *oname = _PyUnicode_FromId(name); /* borrowed */
+ if (!oname) {
+ return NULL;
+ }
+ return PyObject_VectorcallMethod(oname, args, nargsf, kwnames);
+}
+
+static inline PyObject *
+_PyObject_CallMethodIdNoArgs(PyObject *self, _Py_Identifier *name)
+{
+ return _PyObject_VectorcallMethodId(name, &self,
+ 1 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
+}
+
+static inline PyObject *
+_PyObject_CallMethodIdOneArg(PyObject *self, _Py_Identifier *name, PyObject *arg)
+{
+ PyObject *args[2] = {self, arg};
+
+ assert(arg != NULL);
+ return _PyObject_VectorcallMethodId(name, args,
+ 2 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
+}
+
PyAPI_FUNC(int) _PyObject_HasLen(PyObject *o);
/* Guess the size of object 'o' using len(o) or o.__length_hint__().
@@ -191,9 +264,7 @@ PyAPI_FUNC(Py_ssize_t) PyObject_LengthHint(PyObject *o, Py_ssize_t);
/* === New Buffer API ============================================ */
/* Return 1 if the getbuffer function is available, otherwise return 0. */
-#define PyObject_CheckBuffer(obj) \
- (((obj)->ob_type->tp_as_buffer != NULL) && \
- ((obj)->ob_type->tp_as_buffer->bf_getbuffer != NULL))
+PyAPI_FUNC(int) PyObject_CheckBuffer(PyObject *obj);
/* This is a C-API version of the getbuffer function call. It checks
to make sure object has the required function pointer and issues the
@@ -209,7 +280,7 @@ PyAPI_FUNC(void *) PyBuffer_GetPointer(Py_buffer *view, Py_ssize_t *indices);
/* Return the implied itemsize of the data-format area from a
struct-style description. */
-PyAPI_FUNC(int) PyBuffer_SizeFromFormat(const char *);
+PyAPI_FUNC(Py_ssize_t) PyBuffer_SizeFromFormat(const char *format);
/* Implementation in memoryobject.c */
PyAPI_FUNC(int) PyBuffer_ToContiguous(void *buf, Py_buffer *view,
@@ -261,14 +332,8 @@ PyAPI_FUNC(void) PyBuffer_Release(Py_buffer *view);
/* ==== Iterators ================================================ */
#define PyIter_Check(obj) \
- ((obj)->ob_type->tp_iternext != NULL && \
- (obj)->ob_type->tp_iternext != &_PyObject_NextNotImplemented)
-
-/* === Number Protocol ================================================== */
-
-#define PyIndex_Check(obj) \
- ((obj)->ob_type->tp_as_number != NULL && \
- (obj)->ob_type->tp_as_number->nb_index != NULL)
+ (Py_TYPE(obj)->tp_iternext != NULL && \
+ Py_TYPE(obj)->tp_iternext != &_PyObject_NextNotImplemented)
/* === Sequence protocol ================================================ */
diff --git a/x64/include/cpython/bytearrayobject.h b/x64/include/cpython/bytearrayobject.h
new file mode 100644
index 0000000..569b0cd
--- /dev/null
+++ b/x64/include/cpython/bytearrayobject.h
@@ -0,0 +1,20 @@
+#ifndef Py_CPYTHON_BYTEARRAYOBJECT_H
+# error "this header file must not be included directly"
+#endif
+
+/* Object layout */
+typedef struct {
+ PyObject_VAR_HEAD
+ Py_ssize_t ob_alloc; /* How many bytes allocated in ob_bytes */
+ char *ob_bytes; /* Physical backing buffer */
+ char *ob_start; /* Logical start inside ob_bytes */
+ Py_ssize_t ob_exports; /* How many buffer exports */
+} PyByteArrayObject;
+
+/* Macros, trading safety for speed */
+#define PyByteArray_AS_STRING(self) \
+ (assert(PyByteArray_Check(self)), \
+ Py_SIZE(self) ? ((PyByteArrayObject *)(self))->ob_start : _PyByteArray_empty_string)
+#define PyByteArray_GET_SIZE(self) (assert(PyByteArray_Check(self)), Py_SIZE(self))
+
+PyAPI_DATA(char) _PyByteArray_empty_string[];
diff --git a/x64/include/cpython/bytesobject.h b/x64/include/cpython/bytesobject.h
new file mode 100644
index 0000000..f284c58
--- /dev/null
+++ b/x64/include/cpython/bytesobject.h
@@ -0,0 +1,118 @@
+#ifndef Py_CPYTHON_BYTESOBJECT_H
+# error "this header file must not be included directly"
+#endif
+
+typedef struct {
+ PyObject_VAR_HEAD
+ Py_hash_t ob_shash;
+ char ob_sval[1];
+
+ /* Invariants:
+ * ob_sval contains space for 'ob_size+1' elements.
+ * ob_sval[ob_size] == 0.
+ * ob_shash is the hash of the string or -1 if not computed yet.
+ */
+} PyBytesObject;
+
+PyAPI_FUNC(int) _PyBytes_Resize(PyObject **, Py_ssize_t);
+PyAPI_FUNC(PyObject*) _PyBytes_FormatEx(
+ const char *format,
+ Py_ssize_t format_len,
+ PyObject *args,
+ int use_bytearray);
+PyAPI_FUNC(PyObject*) _PyBytes_FromHex(
+ PyObject *string,
+ int use_bytearray);
+
+/* Helper for PyBytes_DecodeEscape that detects invalid escape chars. */
+PyAPI_FUNC(PyObject *) _PyBytes_DecodeEscape(const char *, Py_ssize_t,
+ const char *, const char **);
+
+/* Macro, trading safety for speed */
+#define PyBytes_AS_STRING(op) (assert(PyBytes_Check(op)), \
+ (((PyBytesObject *)(op))->ob_sval))
+#define PyBytes_GET_SIZE(op) (assert(PyBytes_Check(op)),Py_SIZE(op))
+
+/* _PyBytes_Join(sep, x) is like sep.join(x). sep must be PyBytesObject*,
+ x must be an iterable object. */
+PyAPI_FUNC(PyObject *) _PyBytes_Join(PyObject *sep, PyObject *x);
+
+
+/* The _PyBytesWriter structure is big: it contains an embedded "stack buffer".
+ A _PyBytesWriter variable must be declared at the end of variables in a
+ function to optimize the memory allocation on the stack. */
+typedef struct {
+ /* bytes, bytearray or NULL (when the small buffer is used) */
+ PyObject *buffer;
+
+ /* Number of allocated size. */
+ Py_ssize_t allocated;
+
+ /* Minimum number of allocated bytes,
+ incremented by _PyBytesWriter_Prepare() */
+ Py_ssize_t min_size;
+
+ /* If non-zero, use a bytearray instead of a bytes object for buffer. */
+ int use_bytearray;
+
+ /* If non-zero, overallocate the buffer (default: 0).
+ This flag must be zero if use_bytearray is non-zero. */
+ int overallocate;
+
+ /* Stack buffer */
+ int use_small_buffer;
+ char small_buffer[512];
+} _PyBytesWriter;
+
+/* Initialize a bytes writer
+
+ By default, the overallocation is disabled. Set the overallocate attribute
+ to control the allocation of the buffer. */
+PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer);
+
+/* Get the buffer content and reset the writer.
+ Return a bytes object, or a bytearray object if use_bytearray is non-zero.
+ Raise an exception and return NULL on error. */
+PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer,
+ void *str);
+
+/* Deallocate memory of a writer (clear its internal buffer). */
+PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer);
+
+/* Allocate the buffer to write size bytes.
+ Return the pointer to the beginning of buffer data.
+ Raise an exception and return NULL on error. */
+PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer,
+ Py_ssize_t size);
+
+/* Ensure that the buffer is large enough to write *size* bytes.
+ Add size to the writer minimum size (min_size attribute).
+
+ str is the current pointer inside the buffer.
+ Return the updated current pointer inside the buffer.
+ Raise an exception and return NULL on error. */
+PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer,
+ void *str,
+ Py_ssize_t size);
+
+/* Resize the buffer to make it larger.
+ The new buffer may be larger than size bytes because of overallocation.
+ Return the updated current pointer inside the buffer.
+ Raise an exception and return NULL on error.
+
+ Note: size must be greater than the number of allocated bytes in the writer.
+
+ This function doesn't use the writer minimum size (min_size attribute).
+
+ See also _PyBytesWriter_Prepare().
+ */
+PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer,
+ void *str,
+ Py_ssize_t size);
+
+/* Write bytes.
+ Raise an exception and return NULL on error. */
+PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer,
+ void *str,
+ const void *bytes,
+ Py_ssize_t size);
diff --git a/x64/include/cpython/ceval.h b/x64/include/cpython/ceval.h
new file mode 100644
index 0000000..e1922a6
--- /dev/null
+++ b/x64/include/cpython/ceval.h
@@ -0,0 +1,38 @@
+#ifndef Py_CPYTHON_CEVAL_H
+# error "this header file must not be included directly"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+PyAPI_FUNC(void) PyEval_SetProfile(Py_tracefunc, PyObject *);
+PyAPI_DATA(int) _PyEval_SetProfile(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
+PyAPI_FUNC(void) PyEval_SetTrace(Py_tracefunc, PyObject *);
+PyAPI_FUNC(int) _PyEval_SetTrace(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
+PyAPI_FUNC(int) _PyEval_GetCoroutineOriginTrackingDepth(void);
+PyAPI_FUNC(int) _PyEval_SetAsyncGenFirstiter(PyObject *);
+PyAPI_FUNC(PyObject *) _PyEval_GetAsyncGenFirstiter(void);
+PyAPI_FUNC(int) _PyEval_SetAsyncGenFinalizer(PyObject *);
+PyAPI_FUNC(PyObject *) _PyEval_GetAsyncGenFinalizer(void);
+
+/* Helper to look up a builtin object */
+PyAPI_FUNC(PyObject *) _PyEval_GetBuiltinId(_Py_Identifier *);
+/* Look at the current frame's (if any) code's co_flags, and turn on
+ the corresponding compiler flags in cf->cf_flags. Return 1 if any
+ flag was set, else return 0. */
+PyAPI_FUNC(int) PyEval_MergeCompilerFlags(PyCompilerFlags *cf);
+
+PyAPI_FUNC(PyObject *) _PyEval_EvalFrameDefault(PyThreadState *tstate, PyFrameObject *f, int exc);
+
+PyAPI_FUNC(void) _PyEval_SetSwitchInterval(unsigned long microseconds);
+PyAPI_FUNC(unsigned long) _PyEval_GetSwitchInterval(void);
+
+PyAPI_FUNC(Py_ssize_t) _PyEval_RequestCodeExtraIndex(freefunc);
+
+PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *);
+PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/x64/include/cpython/code.h b/x64/include/cpython/code.h
new file mode 100644
index 0000000..cda28ac
--- /dev/null
+++ b/x64/include/cpython/code.h
@@ -0,0 +1,165 @@
+#ifndef Py_CPYTHON_CODE_H
+# error "this header file must not be included directly"
+#endif
+
+typedef uint16_t _Py_CODEUNIT;
+
+#ifdef WORDS_BIGENDIAN
+# define _Py_OPCODE(word) ((word) >> 8)
+# define _Py_OPARG(word) ((word) & 255)
+#else
+# define _Py_OPCODE(word) ((word) & 255)
+# define _Py_OPARG(word) ((word) >> 8)
+#endif
+
+typedef struct _PyOpcache _PyOpcache;
+
+/* Bytecode object */
+struct PyCodeObject {
+ PyObject_HEAD
+ int co_argcount; /* #arguments, except *args */
+ int co_posonlyargcount; /* #positional only arguments */
+ int co_kwonlyargcount; /* #keyword only arguments */
+ int co_nlocals; /* #local variables */
+ int co_stacksize; /* #entries needed for evaluation stack */
+ int co_flags; /* CO_..., see below */
+ int co_firstlineno; /* first source line number */
+ PyObject *co_code; /* instruction opcodes */
+ PyObject *co_consts; /* list (constants used) */
+ PyObject *co_names; /* list of strings (names used) */
+ PyObject *co_varnames; /* tuple of strings (local variable names) */
+ PyObject *co_freevars; /* tuple of strings (free variable names) */
+ PyObject *co_cellvars; /* tuple of strings (cell variable names) */
+ /* The rest aren't used in either hash or comparisons, except for co_name,
+ used in both. This is done to preserve the name and line number
+ for tracebacks and debuggers; otherwise, constant de-duplication
+ would collapse identical functions/lambdas defined on different lines.
+ */
+ Py_ssize_t *co_cell2arg; /* Maps cell vars which are arguments. */
+ PyObject *co_filename; /* unicode (where it was loaded from) */
+ PyObject *co_name; /* unicode (name, for reference) */
+ PyObject *co_lnotab; /* string (encoding addr<->lineno mapping) See
+ Objects/lnotab_notes.txt for details. */
+ void *co_zombieframe; /* for optimization only (see frameobject.c) */
+ PyObject *co_weakreflist; /* to support weakrefs to code objects */
+ /* Scratch space for extra data relating to the code object.
+ Type is a void* to keep the format private in codeobject.c to force
+ people to go through the proper APIs. */
+ void *co_extra;
+
+ /* Per opcodes just-in-time cache
+ *
+ * To reduce cache size, we use indirect mapping from opcode index to
+ * cache object:
+ * cache = co_opcache[co_opcache_map[next_instr - first_instr] - 1]
+ */
+
+ // co_opcache_map is indexed by (next_instr - first_instr).
+ // * 0 means there is no cache for this opcode.
+ // * n > 0 means there is cache in co_opcache[n-1].
+ unsigned char *co_opcache_map;
+ _PyOpcache *co_opcache;
+ int co_opcache_flag; // used to determine when create a cache.
+ unsigned char co_opcache_size; // length of co_opcache.
+};
+
+/* Masks for co_flags above */
+#define CO_OPTIMIZED 0x0001
+#define CO_NEWLOCALS 0x0002
+#define CO_VARARGS 0x0004
+#define CO_VARKEYWORDS 0x0008
+#define CO_NESTED 0x0010
+#define CO_GENERATOR 0x0020
+/* The CO_NOFREE flag is set if there are no free or cell variables.
+ This information is redundant, but it allows a single flag test
+ to determine whether there is any extra work to be done when the
+ call frame it setup.
+*/
+#define CO_NOFREE 0x0040
+
+/* The CO_COROUTINE flag is set for coroutine functions (defined with
+ ``async def`` keywords) */
+#define CO_COROUTINE 0x0080
+#define CO_ITERABLE_COROUTINE 0x0100
+#define CO_ASYNC_GENERATOR 0x0200
+
+/* bpo-39562: These constant values are changed in Python 3.9
+ to prevent collision with compiler flags. CO_FUTURE_ and PyCF_
+ constants must be kept unique. PyCF_ constants can use bits from
+ 0x0100 to 0x10000. CO_FUTURE_ constants use bits starting at 0x20000. */
+#define CO_FUTURE_DIVISION 0x20000
+#define CO_FUTURE_ABSOLUTE_IMPORT 0x40000 /* do absolute imports by default */
+#define CO_FUTURE_WITH_STATEMENT 0x80000
+#define CO_FUTURE_PRINT_FUNCTION 0x100000
+#define CO_FUTURE_UNICODE_LITERALS 0x200000
+
+#define CO_FUTURE_BARRY_AS_BDFL 0x400000
+#define CO_FUTURE_GENERATOR_STOP 0x800000
+#define CO_FUTURE_ANNOTATIONS 0x1000000
+
+/* This value is found in the co_cell2arg array when the associated cell
+ variable does not correspond to an argument. */
+#define CO_CELL_NOT_AN_ARG (-1)
+
+/* This should be defined if a future statement modifies the syntax.
+ For example, when a keyword is added.
+*/
+#define PY_PARSER_REQUIRES_FUTURE_KEYWORD
+
+#define CO_MAXBLOCKS 20 /* Max static block nesting within a function */
+
+PyAPI_DATA(PyTypeObject) PyCode_Type;
+
+#define PyCode_Check(op) Py_IS_TYPE(op, &PyCode_Type)
+#define PyCode_GetNumFree(op) (PyTuple_GET_SIZE((op)->co_freevars))
+
+/* Public interface */
+PyAPI_FUNC(PyCodeObject *) PyCode_New(
+ int, int, int, int, int, PyObject *, PyObject *,
+ PyObject *, PyObject *, PyObject *, PyObject *,
+ PyObject *, PyObject *, int, PyObject *);
+
+PyAPI_FUNC(PyCodeObject *) PyCode_NewWithPosOnlyArgs(
+ int, int, int, int, int, int, PyObject *, PyObject *,
+ PyObject *, PyObject *, PyObject *, PyObject *,
+ PyObject *, PyObject *, int, PyObject *);
+ /* same as struct above */
+
+/* Creates a new empty code object with the specified source location. */
+PyAPI_FUNC(PyCodeObject *)
+PyCode_NewEmpty(const char *filename, const char *funcname, int firstlineno);
+
+/* Return the line number associated with the specified bytecode index
+ in this code object. If you just need the line number of a frame,
+ use PyFrame_GetLineNumber() instead. */
+PyAPI_FUNC(int) PyCode_Addr2Line(PyCodeObject *, int);
+
+/* for internal use only */
+typedef struct _addr_pair {
+ int ap_lower;
+ int ap_upper;
+} PyAddrPair;
+
+/* Update *bounds to describe the first and one-past-the-last instructions in the
+ same line as lasti. Return the number of that line.
+*/
+PyAPI_FUNC(int) _PyCode_CheckLineNumber(PyCodeObject* co,
+ int lasti, PyAddrPair *bounds);
+
+/* Create a comparable key used to compare constants taking in account the
+ * object type. It is used to make sure types are not coerced (e.g., float and
+ * complex) _and_ to distinguish 0.0 from -0.0 e.g. on IEEE platforms
+ *
+ * Return (type(obj), obj, ...): a tuple with variable size (at least 2 items)
+ * depending on the type and the value. The type is the first item to not
+ * compare bytes and str which can raise a BytesWarning exception. */
+PyAPI_FUNC(PyObject*) _PyCode_ConstantKey(PyObject *obj);
+
+PyAPI_FUNC(PyObject*) PyCode_Optimize(PyObject *code, PyObject* consts,
+ PyObject *names, PyObject *lnotab);
+
+
+PyAPI_FUNC(int) _PyCode_GetExtra(PyObject *code, Py_ssize_t index,
+ void **extra);
+PyAPI_FUNC(int) _PyCode_SetExtra(PyObject *code, Py_ssize_t index,
+ void *extra);
diff --git a/x64/include/cpython/dictobject.h b/x64/include/cpython/dictobject.h
index 64c012a..e33a0d1 100644
--- a/x64/include/cpython/dictobject.h
+++ b/x64/include/cpython/dictobject.h
@@ -62,8 +62,6 @@ PyObject *_PyDict_Pop_KnownHash(PyObject *, PyObject *, Py_hash_t, PyObject *);
PyObject *_PyDict_FromKeys(PyObject *, PyObject *, PyObject *);
#define _PyDict_HasSplitTable(d) ((d)->ma_values != NULL)
-PyAPI_FUNC(int) PyDict_ClearFreeList(void);
-
/* Like PyDict_Merge, but override can be 0, 1 or 2. If override is 0,
the first occurrence of a key wins, if override is 1, the last occurrence
of a key wins, if override is 2, a KeyError with conflicting key as
diff --git a/x64/include/cpython/fileobject.h b/x64/include/cpython/fileobject.h
index 57eac13..3005ce1 100644
--- a/x64/include/cpython/fileobject.h
+++ b/x64/include/cpython/fileobject.h
@@ -8,14 +8,6 @@ extern "C" {
PyAPI_FUNC(char *) Py_UniversalNewlineFgets(char *, int, FILE*, PyObject *);
-#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000
-PyAPI_DATA(const char *) Py_FileSystemDefaultEncodeErrors;
-#endif
-
-#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000
-PyAPI_DATA(int) Py_UTF8Mode;
-#endif
-
/* The std printer acts as a preliminary sys.stderr until the new io
infrastructure is in place. */
PyAPI_FUNC(PyObject *) PyFile_NewStdPrinter(int);
diff --git a/x64/include/cpython/fileutils.h b/x64/include/cpython/fileutils.h
new file mode 100644
index 0000000..e79d03e
--- /dev/null
+++ b/x64/include/cpython/fileutils.h
@@ -0,0 +1,165 @@
+#ifndef Py_CPYTHON_FILEUTILS_H
+# error "this header file must not be included directly"
+#endif
+
+typedef enum {
+ _Py_ERROR_UNKNOWN=0,
+ _Py_ERROR_STRICT,
+ _Py_ERROR_SURROGATEESCAPE,
+ _Py_ERROR_REPLACE,
+ _Py_ERROR_IGNORE,
+ _Py_ERROR_BACKSLASHREPLACE,
+ _Py_ERROR_SURROGATEPASS,
+ _Py_ERROR_XMLCHARREFREPLACE,
+ _Py_ERROR_OTHER
+} _Py_error_handler;
+
+PyAPI_FUNC(_Py_error_handler) _Py_GetErrorHandler(const char *errors);
+
+PyAPI_FUNC(int) _Py_DecodeLocaleEx(
+ const char *arg,
+ wchar_t **wstr,
+ size_t *wlen,
+ const char **reason,
+ int current_locale,
+ _Py_error_handler errors);
+
+PyAPI_FUNC(int) _Py_EncodeLocaleEx(
+ const wchar_t *text,
+ char **str,
+ size_t *error_pos,
+ const char **reason,
+ int current_locale,
+ _Py_error_handler errors);
+
+
+PyAPI_FUNC(PyObject *) _Py_device_encoding(int);
+
+#if defined(MS_WINDOWS) || defined(__APPLE__)
+ /* On Windows, the count parameter of read() is an int (bpo-9015, bpo-9611).
+ On macOS 10.13, read() and write() with more than INT_MAX bytes
+ fail with EINVAL (bpo-24658). */
+# define _PY_READ_MAX INT_MAX
+# define _PY_WRITE_MAX INT_MAX
+#else
+ /* write() should truncate the input to PY_SSIZE_T_MAX bytes,
+ but it's safer to do it ourself to have a portable behaviour */
+# define _PY_READ_MAX PY_SSIZE_T_MAX
+# define _PY_WRITE_MAX PY_SSIZE_T_MAX
+#endif
+
+#ifdef MS_WINDOWS
+struct _Py_stat_struct {
+ unsigned long st_dev;
+ uint64_t st_ino;
+ unsigned short st_mode;
+ int st_nlink;
+ int st_uid;
+ int st_gid;
+ unsigned long st_rdev;
+ __int64 st_size;
+ time_t st_atime;
+ int st_atime_nsec;
+ time_t st_mtime;
+ int st_mtime_nsec;
+ time_t st_ctime;
+ int st_ctime_nsec;
+ unsigned long st_file_attributes;
+ unsigned long st_reparse_tag;
+};
+#else
+# define _Py_stat_struct stat
+#endif
+
+PyAPI_FUNC(int) _Py_fstat(
+ int fd,
+ struct _Py_stat_struct *status);
+
+PyAPI_FUNC(int) _Py_fstat_noraise(
+ int fd,
+ struct _Py_stat_struct *status);
+
+PyAPI_FUNC(int) _Py_stat(
+ PyObject *path,
+ struct stat *status);
+
+PyAPI_FUNC(int) _Py_open(
+ const char *pathname,
+ int flags);
+
+PyAPI_FUNC(int) _Py_open_noraise(
+ const char *pathname,
+ int flags);
+
+PyAPI_FUNC(FILE *) _Py_wfopen(
+ const wchar_t *path,
+ const wchar_t *mode);
+
+PyAPI_FUNC(FILE*) _Py_fopen(
+ const char *pathname,
+ const char *mode);
+
+PyAPI_FUNC(FILE*) _Py_fopen_obj(
+ PyObject *path,
+ const char *mode);
+
+PyAPI_FUNC(Py_ssize_t) _Py_read(
+ int fd,
+ void *buf,
+ size_t count);
+
+PyAPI_FUNC(Py_ssize_t) _Py_write(
+ int fd,
+ const void *buf,
+ size_t count);
+
+PyAPI_FUNC(Py_ssize_t) _Py_write_noraise(
+ int fd,
+ const void *buf,
+ size_t count);
+
+#ifdef HAVE_READLINK
+PyAPI_FUNC(int) _Py_wreadlink(
+ const wchar_t *path,
+ wchar_t *buf,
+ /* Number of characters of 'buf' buffer
+ including the trailing NUL character */
+ size_t buflen);
+#endif
+
+#ifdef HAVE_REALPATH
+PyAPI_FUNC(wchar_t*) _Py_wrealpath(
+ const wchar_t *path,
+ wchar_t *resolved_path,
+ /* Number of characters of 'resolved_path' buffer
+ including the trailing NUL character */
+ size_t resolved_path_len);
+#endif
+
+#ifndef MS_WINDOWS
+PyAPI_FUNC(int) _Py_isabs(const wchar_t *path);
+#endif
+
+PyAPI_FUNC(int) _Py_abspath(const wchar_t *path, wchar_t **abspath_p);
+
+PyAPI_FUNC(wchar_t*) _Py_wgetcwd(
+ wchar_t *buf,
+ /* Number of characters of 'buf' buffer
+ including the trailing NUL character */
+ size_t buflen);
+
+PyAPI_FUNC(int) _Py_get_inheritable(int fd);
+
+PyAPI_FUNC(int) _Py_set_inheritable(int fd, int inheritable,
+ int *atomic_flag_works);
+
+PyAPI_FUNC(int) _Py_set_inheritable_async_safe(int fd, int inheritable,
+ int *atomic_flag_works);
+
+PyAPI_FUNC(int) _Py_dup(int fd);
+
+#ifndef MS_WINDOWS
+PyAPI_FUNC(int) _Py_get_blocking(int fd);
+
+PyAPI_FUNC(int) _Py_set_blocking(int fd, int blocking);
+#endif /* !MS_WINDOWS */
diff --git a/x64/include/cpython/frameobject.h b/x64/include/cpython/frameobject.h
new file mode 100644
index 0000000..36a51ba
--- /dev/null
+++ b/x64/include/cpython/frameobject.h
@@ -0,0 +1,84 @@
+/* Frame object interface */
+
+#ifndef Py_CPYTHON_FRAMEOBJECT_H
+# error "this header file must not be included directly"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ int b_type; /* what kind of block this is */
+ int b_handler; /* where to jump to find handler */
+ int b_level; /* value stack level to pop to */
+} PyTryBlock;
+
+struct _frame {
+ PyObject_VAR_HEAD
+ struct _frame *f_back; /* previous frame, or NULL */
+ PyCodeObject *f_code; /* code segment */
+ PyObject *f_builtins; /* builtin symbol table (PyDictObject) */
+ PyObject *f_globals; /* global symbol table (PyDictObject) */
+ PyObject *f_locals; /* local symbol table (any mapping) */
+ PyObject **f_valuestack; /* points after the last local */
+ /* Next free slot in f_valuestack. Frame creation sets to f_valuestack.
+ Frame evaluation usually NULLs it, but a frame that yields sets it
+ to the current stack top. */
+ PyObject **f_stacktop;
+ PyObject *f_trace; /* Trace function */
+ char f_trace_lines; /* Emit per-line trace events? */
+ char f_trace_opcodes; /* Emit per-opcode trace events? */
+
+ /* Borrowed reference to a generator, or NULL */
+ PyObject *f_gen;
+
+ int f_lasti; /* Last instruction if called */
+ /* Call PyFrame_GetLineNumber() instead of reading this field
+ directly. As of 2.3 f_lineno is only valid when tracing is
+ active (i.e. when f_trace is set). At other times we use
+ PyCode_Addr2Line to calculate the line from the current
+ bytecode index. */
+ int f_lineno; /* Current line number */
+ int f_iblock; /* index in f_blockstack */
+ char f_executing; /* whether the frame is still executing */
+ PyTryBlock f_blockstack[CO_MAXBLOCKS]; /* for try and loop blocks */
+ PyObject *f_localsplus[1]; /* locals+stack, dynamically sized */
+};
+
+
+/* Standard object interface */
+
+PyAPI_DATA(PyTypeObject) PyFrame_Type;
+
+#define PyFrame_Check(op) Py_IS_TYPE(op, &PyFrame_Type)
+
+PyAPI_FUNC(PyFrameObject *) PyFrame_New(PyThreadState *, PyCodeObject *,
+ PyObject *, PyObject *);
+
+/* only internal use */
+PyFrameObject* _PyFrame_New_NoTrack(PyThreadState *, PyCodeObject *,
+ PyObject *, PyObject *);
+
+
+/* The rest of the interface is specific for frame objects */
+
+/* Block management functions */
+
+PyAPI_FUNC(void) PyFrame_BlockSetup(PyFrameObject *, int, int, int);
+PyAPI_FUNC(PyTryBlock *) PyFrame_BlockPop(PyFrameObject *);
+
+/* Conversions between "fast locals" and locals in dictionary */
+
+PyAPI_FUNC(void) PyFrame_LocalsToFast(PyFrameObject *, int);
+
+PyAPI_FUNC(int) PyFrame_FastToLocalsWithError(PyFrameObject *f);
+PyAPI_FUNC(void) PyFrame_FastToLocals(PyFrameObject *);
+
+PyAPI_FUNC(void) _PyFrame_DebugMallocStats(FILE *out);
+
+PyAPI_FUNC(PyFrameObject *) PyFrame_GetBack(PyFrameObject *frame);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/x64/include/cpython/import.h b/x64/include/cpython/import.h
new file mode 100644
index 0000000..c1b4712
--- /dev/null
+++ b/x64/include/cpython/import.h
@@ -0,0 +1,50 @@
+#ifndef Py_CPYTHON_IMPORT_H
+# error "this header file must not be included directly"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+PyMODINIT_FUNC PyInit__imp(void);
+
+PyAPI_FUNC(int) _PyImport_IsInitialized(PyInterpreterState *);
+
+PyAPI_FUNC(PyObject *) _PyImport_GetModuleId(struct _Py_Identifier *name);
+PyAPI_FUNC(int) _PyImport_SetModule(PyObject *name, PyObject *module);
+PyAPI_FUNC(int) _PyImport_SetModuleString(const char *name, PyObject* module);
+
+PyAPI_FUNC(void) _PyImport_AcquireLock(void);
+PyAPI_FUNC(int) _PyImport_ReleaseLock(void);
+
+PyAPI_FUNC(PyObject *) _PyImport_FindExtensionObject(PyObject *, PyObject *);
+
+PyAPI_FUNC(int) _PyImport_FixupBuiltin(
+ PyObject *mod,
+ const char *name, /* UTF-8 encoded string */
+ PyObject *modules
+ );
+PyAPI_FUNC(int) _PyImport_FixupExtensionObject(PyObject*, PyObject *,
+ PyObject *, PyObject *);
+
+struct _inittab {
+ const char *name; /* ASCII encoded string */
+ PyObject* (*initfunc)(void);
+};
+PyAPI_DATA(struct _inittab *) PyImport_Inittab;
+PyAPI_FUNC(int) PyImport_ExtendInittab(struct _inittab *newtab);
+
+struct _frozen {
+ const char *name; /* ASCII encoded string */
+ const unsigned char *code;
+ int size;
+};
+
+/* Embedding apps may change this pointer to point to their favorite
+ collection of frozen modules: */
+
+PyAPI_DATA(const struct _frozen *) PyImport_FrozenModules;
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/x64/include/cpython/initconfig.h b/x64/include/cpython/initconfig.h
index 4b5ceaf..0a256d4 100644
--- a/x64/include/cpython/initconfig.h
+++ b/x64/include/cpython/initconfig.h
@@ -113,7 +113,11 @@ typedef struct {
"POSIX", otherwise it is set to 0. Inherit Py_UTF8Mode value value. */
int utf8_mode;
- int dev_mode; /* Development mode. PYTHONDEVMODE, -X dev */
+ /* If non-zero, enable the Python Development Mode.
+
+ Set to 1 by the -X dev command line option. Set by the PYTHONDEVMODE
+ environment variable. */
+ int dev_mode;
/* Memory allocator: PYTHONMALLOC env var.
See PyMemAllocatorName for valid values. */
@@ -131,7 +135,7 @@ typedef struct {
int isolated; /* Isolated mode? see PyPreConfig.isolated */
int use_environment; /* Use environment variables? see PyPreConfig.use_environment */
- int dev_mode; /* Development mode? See PyPreConfig.dev_mode */
+ int dev_mode; /* Python Development Mode? See PyPreConfig.dev_mode */
/* Install signal handlers? Yes by default. */
int install_signal_handlers;
@@ -143,13 +147,16 @@ typedef struct {
Set to 1 by -X faulthandler and PYTHONFAULTHANDLER. -1 means unset. */
int faulthandler;
+ /* Enable PEG parser?
+ 1 by default, set to 0 by -X oldparser and PYTHONOLDPARSER */
+ int _use_peg_parser;
+
/* Enable tracemalloc?
Set by -X tracemalloc=N and PYTHONTRACEMALLOC. -1 means unset */
int tracemalloc;
int import_time; /* PYTHONPROFILEIMPORTTIME, -X importtime */
int show_ref_count; /* -X showrefcount */
- int show_alloc_count; /* -X showalloccount */
int dump_refs; /* PYTHONDUMPREFS */
int malloc_stats; /* PYTHONMALLOCSTATS */
@@ -381,6 +388,7 @@ typedef struct {
wchar_t *base_prefix; /* sys.base_prefix */
wchar_t *exec_prefix; /* sys.exec_prefix */
wchar_t *base_exec_prefix; /* sys.base_exec_prefix */
+ wchar_t *platlibdir; /* sys.platlibdir */
/* --- Parameter only used by Py_Main() ---------- */
@@ -402,6 +410,18 @@ typedef struct {
/* If equal to 0, stop Python initialization before the "main" phase */
int _init_main;
+
+ /* If non-zero, disallow threads, subprocesses, and fork.
+ Default: 0. */
+ int _isolated_interpreter;
+
+ /* Original command line arguments. If _orig_argv is empty and _argv is
+ not equal to [''], PyConfig_Read() copies the configuration 'argv' list
+ into '_orig_argv' list before modifying 'argv' list (if parse_argv
+ is non-zero).
+
+ _PyConfig_Write() initializes Py_GetArgcArgv() to this list. */
+ PyWideStringList _orig_argv;
} PyConfig;
PyAPI_FUNC(void) PyConfig_InitPythonConfig(PyConfig *config);
@@ -427,6 +447,14 @@ PyAPI_FUNC(PyStatus) PyConfig_SetWideStringList(PyConfig *config,
PyWideStringList *list,
Py_ssize_t length, wchar_t **items);
+
+/* --- Helper functions --------------------------------------- */
+
+/* Get the original command line arguments, before Python modified them.
+
+ See also PyConfig._orig_argv. */
+PyAPI_FUNC(void) Py_GetArgcArgv(int *argc, wchar_t ***argv);
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/cpython/listobject.h b/x64/include/cpython/listobject.h
new file mode 100644
index 0000000..74fe330
--- /dev/null
+++ b/x64/include/cpython/listobject.h
@@ -0,0 +1,43 @@
+#ifndef Py_CPYTHON_LISTOBJECT_H
+# error "this header file must not be included directly"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ PyObject_VAR_HEAD
+ /* Vector of pointers to list elements. list[0] is ob_item[0], etc. */
+ PyObject **ob_item;
+
+ /* ob_item contains space for 'allocated' elements. The number
+ * currently in use is ob_size.
+ * Invariants:
+ * 0 <= ob_size <= allocated
+ * len(list) == ob_size
+ * ob_item == NULL implies ob_size == allocated == 0
+ * list.sort() temporarily sets allocated to -1 to detect mutations.
+ *
+ * Items must normally not be NULL, except during construction when
+ * the list is not yet visible outside the function that builds it.
+ */
+ Py_ssize_t allocated;
+} PyListObject;
+
+PyAPI_FUNC(PyObject *) _PyList_Extend(PyListObject *, PyObject *);
+PyAPI_FUNC(void) _PyList_DebugMallocStats(FILE *out);
+
+/* Macro, trading safety for speed */
+
+/* Cast argument to PyTupleObject* type. */
+#define _PyList_CAST(op) (assert(PyList_Check(op)), (PyListObject *)(op))
+
+#define PyList_GET_ITEM(op, i) (_PyList_CAST(op)->ob_item[i])
+#define PyList_SET_ITEM(op, i, v) (_PyList_CAST(op)->ob_item[i] = (v))
+#define PyList_GET_SIZE(op) Py_SIZE(_PyList_CAST(op))
+#define _PyList_ITEMS(op) (_PyList_CAST(op)->ob_item)
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/x64/include/cpython/methodobject.h b/x64/include/cpython/methodobject.h
new file mode 100644
index 0000000..7ecbfe3
--- /dev/null
+++ b/x64/include/cpython/methodobject.h
@@ -0,0 +1,35 @@
+#ifndef Py_CPYTHON_METHODOBJECT_H
+# error "this header file must not be included directly"
+#endif
+
+PyAPI_DATA(PyTypeObject) PyCMethod_Type;
+
+#define PyCMethod_CheckExact(op) Py_IS_TYPE(op, &PyCMethod_Type)
+#define PyCMethod_Check(op) PyObject_TypeCheck(op, &PyCMethod_Type)
+
+/* Macros for direct access to these values. Type checks are *not*
+ done, so use with care. */
+#define PyCFunction_GET_FUNCTION(func) \
+ (((PyCFunctionObject *)func) -> m_ml -> ml_meth)
+#define PyCFunction_GET_SELF(func) \
+ (((PyCFunctionObject *)func) -> m_ml -> ml_flags & METH_STATIC ? \
+ NULL : ((PyCFunctionObject *)func) -> m_self)
+#define PyCFunction_GET_FLAGS(func) \
+ (((PyCFunctionObject *)func) -> m_ml -> ml_flags)
+#define PyCFunction_GET_CLASS(func) \
+ (((PyCFunctionObject *)func) -> m_ml -> ml_flags & METH_METHOD ? \
+ ((PyCMethodObject *)func) -> mm_class : NULL)
+
+typedef struct {
+ PyObject_HEAD
+ PyMethodDef *m_ml; /* Description of the C function to call */
+ PyObject *m_self; /* Passed as 'self' arg to the C func, can be NULL */
+ PyObject *m_module; /* The __module__ attribute, can be anything */
+ PyObject *m_weakreflist; /* List of weak references */
+ vectorcallfunc vectorcall;
+} PyCFunctionObject;
+
+typedef struct {
+ PyCFunctionObject func;
+ PyTypeObject *mm_class; /* Class that defines this method */
+} PyCMethodObject;
diff --git a/x64/include/cpython/object.h b/x64/include/cpython/object.h
index 5a0ac4a..444f832 100644
--- a/x64/include/cpython/object.h
+++ b/x64/include/cpython/object.h
@@ -6,6 +6,22 @@
extern "C" {
#endif
+PyAPI_FUNC(void) _Py_NewReference(PyObject *op);
+
+#ifdef Py_TRACE_REFS
+/* Py_TRACE_REFS is such major surgery that we call external routines. */
+PyAPI_FUNC(void) _Py_ForgetReference(PyObject *);
+#endif
+
+/* Update the Python traceback of an object. This function must be called
+ when a memory block is reused from a free list. */
+PyAPI_FUNC(int) _PyTraceMalloc_NewReference(PyObject *op);
+
+#ifdef Py_REF_DEBUG
+PyAPI_FUNC(Py_ssize_t) _Py_GetRefTotal(void);
+#endif
+
+
/********************* String Literals ****************************************/
/* This structure helps managing static strings. The basic usage goes like this:
Instead of doing
@@ -20,7 +36,7 @@ extern "C" {
PyId_foo is a static variable, either on block level or file level. On first
usage, the string "foo" is interned, and the structures are linked. On interpreter
- shutdown, all strings are released (through _PyUnicode_ClearStaticStrings).
+ shutdown, all strings are released.
Alternatively, _Py_static_string allows choosing the variable name.
_PyUnicode_FromId returns a borrowed reference to the interned string.
@@ -174,7 +190,7 @@ typedef struct {
* backwards-compatibility */
typedef Py_ssize_t printfunc;
-typedef struct _typeobject {
+struct _typeobject {
PyObject_VAR_HEAD
const char *tp_name; /* For printing, in format "<module>.<name>" */
Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */
@@ -255,19 +271,7 @@ typedef struct _typeobject {
destructor tp_finalize;
vectorcallfunc tp_vectorcall;
-
- /* bpo-37250: kept for backwards compatibility in CPython 3.8 only */
- Py_DEPRECATED(3.8) int (*tp_print)(PyObject *, FILE *, int);
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- Py_ssize_t tp_allocs;
- Py_ssize_t tp_frees;
- Py_ssize_t tp_maxalloc;
- struct _typeobject *tp_prev;
- struct _typeobject *tp_next;
-#endif
-} PyTypeObject;
+};
/* The *real* layout of a type object when allocated on the heap */
typedef struct _heaptypeobject {
@@ -285,6 +289,7 @@ typedef struct _heaptypeobject {
PyBufferProcs as_buffer;
PyObject *ht_name, *ht_slots, *ht_qualname;
struct _dictkeysobject *ht_cached_keys;
+ PyObject *ht_module;
/* here are optional user slots, followed by the members. */
} PyHeapTypeObject;
@@ -321,6 +326,9 @@ PyAPI_FUNC(int) _PyObject_HasAttrId(PyObject *, struct _Py_Identifier *);
*/
PyAPI_FUNC(int) _PyObject_LookupAttr(PyObject *, PyObject *, PyObject **);
PyAPI_FUNC(int) _PyObject_LookupAttrId(PyObject *, struct _Py_Identifier *, PyObject **);
+
+PyAPI_FUNC(int) _PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
+
PyAPI_FUNC(PyObject **) _PyObject_GetDictPtr(PyObject *);
PyAPI_FUNC(PyObject *) _PyObject_NextNotImplemented(PyObject *);
PyAPI_FUNC(void) PyObject_CallFinalizer(PyObject *);
@@ -334,20 +342,7 @@ PyAPI_FUNC(int)
_PyObject_GenericSetAttrWithDict(PyObject *, PyObject *,
PyObject *, PyObject *);
-#define PyType_HasFeature(t,f) (((t)->tp_flags & (f)) != 0)
-
-static inline void _Py_Dealloc_inline(PyObject *op)
-{
- destructor dealloc = Py_TYPE(op)->tp_dealloc;
-#ifdef Py_TRACE_REFS
- _Py_ForgetReference(op);
-#else
- _Py_INC_TPFREES(op);
-#endif
- (*dealloc)(op);
-}
-#define _Py_Dealloc(op) _Py_Dealloc_inline(op)
-
+PyAPI_FUNC(PyObject *) _PyObject_FunctionStr(PyObject *);
/* Safely decref `op` and set `op` to `op2`.
*
@@ -391,11 +386,6 @@ PyAPI_DATA(PyTypeObject) _PyNotImplemented_Type;
*/
PyAPI_DATA(int) _Py_SwappedOp[];
-/* This is the old private API, invoked by the macros before 3.2.4.
- Kept for binary compatibility of extensions using the stable ABI. */
-PyAPI_FUNC(void) _PyTrash_deposit_object(PyObject*);
-PyAPI_FUNC(void) _PyTrash_destroy_chain(void);
-
PyAPI_FUNC(void)
_PyDebugAllocatorStats(FILE *out, const char *block_name, int num_blocks,
size_t sizeof_block);
@@ -442,7 +432,7 @@ _PyObject_DebugTypeStats(FILE *out);
NDEBUG against a Python built with NDEBUG defined.
msg, expr and function can be NULL. */
-PyAPI_FUNC(void) _PyObject_AssertFailed(
+PyAPI_FUNC(void) _Py_NO_RETURN _PyObject_AssertFailed(
PyObject *obj,
const char *expr,
const char *msg,
@@ -465,6 +455,100 @@ PyAPI_FUNC(int) _PyObject_CheckConsistency(
PyObject *op,
int check_content);
+
+/* Trashcan mechanism, thanks to Christian Tismer.
+
+When deallocating a container object, it's possible to trigger an unbounded
+chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
+next" object in the chain to 0. This can easily lead to stack overflows,
+especially in threads (which typically have less stack space to work with).
+
+A container object can avoid this by bracketing the body of its tp_dealloc
+function with a pair of macros:
+
+static void
+mytype_dealloc(mytype *p)
+{
+ ... declarations go here ...
+
+ PyObject_GC_UnTrack(p); // must untrack first
+ Py_TRASHCAN_BEGIN(p, mytype_dealloc)
+ ... The body of the deallocator goes here, including all calls ...
+ ... to Py_DECREF on contained objects. ...
+ Py_TRASHCAN_END // there should be no code after this
+}
+
+CAUTION: Never return from the middle of the body! If the body needs to
+"get out early", put a label immediately before the Py_TRASHCAN_END
+call, and goto it. Else the call-depth counter (see below) will stay
+above 0 forever, and the trashcan will never get emptied.
+
+How it works: The BEGIN macro increments a call-depth counter. So long
+as this counter is small, the body of the deallocator is run directly without
+further ado. But if the counter gets large, it instead adds p to a list of
+objects to be deallocated later, skips the body of the deallocator, and
+resumes execution after the END macro. The tp_dealloc routine then returns
+without deallocating anything (and so unbounded call-stack depth is avoided).
+
+When the call stack finishes unwinding again, code generated by the END macro
+notices this, and calls another routine to deallocate all the objects that
+may have been added to the list of deferred deallocations. In effect, a
+chain of N deallocations is broken into (N-1)/(PyTrash_UNWIND_LEVEL-1) pieces,
+with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
+
+Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
+class, we need to ensure that the trashcan is only triggered on the tp_dealloc
+of the actual class being deallocated. Otherwise we might end up with a
+partially-deallocated object. To check this, the tp_dealloc function must be
+passed as second argument to Py_TRASHCAN_BEGIN().
+*/
+
+/* This is the old private API, invoked by the macros before 3.2.4.
+ Kept for binary compatibility of extensions using the stable ABI. */
+PyAPI_FUNC(void) _PyTrash_deposit_object(PyObject*);
+PyAPI_FUNC(void) _PyTrash_destroy_chain(void);
+
+/* This is the old private API, invoked by the macros before 3.9.
+ Kept for binary compatibility of extensions using the stable ABI. */
+PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyObject*);
+PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(void);
+
+/* Forward declarations for PyThreadState */
+struct _ts;
+
+/* Python 3.9 private API, invoked by the macros below. */
+PyAPI_FUNC(int) _PyTrash_begin(struct _ts *tstate, PyObject *op);
+PyAPI_FUNC(void) _PyTrash_end(struct _ts *tstate);
+
+#define PyTrash_UNWIND_LEVEL 50
+
+#define Py_TRASHCAN_BEGIN_CONDITION(op, cond) \
+ do { \
+ PyThreadState *_tstate = NULL; \
+ /* If "cond" is false, then _tstate remains NULL and the deallocator \
+ * is run normally without involving the trashcan */ \
+ if (cond) { \
+ _tstate = PyThreadState_GET(); \
+ if (_PyTrash_begin(_tstate, _PyObject_CAST(op))) { \
+ break; \
+ } \
+ }
+ /* The body of the deallocator is here. */
+#define Py_TRASHCAN_END \
+ if (_tstate) { \
+ _PyTrash_end(_tstate); \
+ } \
+ } while (0);
+
+#define Py_TRASHCAN_BEGIN(op, dealloc) \
+ Py_TRASHCAN_BEGIN_CONDITION(op, \
+ Py_TYPE(op)->tp_dealloc == (destructor)(dealloc))
+
+/* For backwards compatibility, these macros enable the trashcan
+ * unconditionally */
+#define Py_TRASHCAN_SAFE_BEGIN(op) Py_TRASHCAN_BEGIN_CONDITION(op, 1)
+#define Py_TRASHCAN_SAFE_END(op) Py_TRASHCAN_END
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/cpython/objimpl.h b/x64/include/cpython/objimpl.h
index f121922..b835936 100644
--- a/x64/include/cpython/objimpl.h
+++ b/x64/include/cpython/objimpl.h
@@ -6,6 +6,89 @@
extern "C" {
#endif
+#define _PyObject_SIZE(typeobj) ( (typeobj)->tp_basicsize )
+
+/* _PyObject_VAR_SIZE returns the number of bytes (as size_t) allocated for a
+ vrbl-size object with nitems items, exclusive of gc overhead (if any). The
+ value is rounded up to the closest multiple of sizeof(void *), in order to
+ ensure that pointer fields at the end of the object are correctly aligned
+ for the platform (this is of special importance for subclasses of, e.g.,
+ str or int, so that pointers can be stored after the embedded data).
+
+ Note that there's no memory wastage in doing this, as malloc has to
+ return (at worst) pointer-aligned memory anyway.
+*/
+#if ((SIZEOF_VOID_P - 1) & SIZEOF_VOID_P) != 0
+# error "_PyObject_VAR_SIZE requires SIZEOF_VOID_P be a power of 2"
+#endif
+
+#define _PyObject_VAR_SIZE(typeobj, nitems) \
+ _Py_SIZE_ROUND_UP((typeobj)->tp_basicsize + \
+ (nitems)*(typeobj)->tp_itemsize, \
+ SIZEOF_VOID_P)
+
+
+/* This example code implements an object constructor with a custom
+ allocator, where PyObject_New is inlined, and shows the important
+ distinction between two steps (at least):
+ 1) the actual allocation of the object storage;
+ 2) the initialization of the Python specific fields
+ in this storage with PyObject_{Init, InitVar}.
+
+ PyObject *
+ YourObject_New(...)
+ {
+ PyObject *op;
+
+ op = (PyObject *) Your_Allocator(_PyObject_SIZE(YourTypeStruct));
+ if (op == NULL)
+ return PyErr_NoMemory();
+
+ PyObject_Init(op, &YourTypeStruct);
+
+ op->ob_field = value;
+ ...
+ return op;
+ }
+
+ Note that in C++, the use of the new operator usually implies that
+ the 1st step is performed automatically for you, so in a C++ class
+ constructor you would start directly with PyObject_Init/InitVar. */
+
+
+/* Inline functions trading binary compatibility for speed:
+ PyObject_INIT() is the fast version of PyObject_Init(), and
+ PyObject_INIT_VAR() is the fast version of PyObject_InitVar().
+
+ These inline functions must not be called with op=NULL. */
+static inline PyObject*
+_PyObject_INIT(PyObject *op, PyTypeObject *typeobj)
+{
+ assert(op != NULL);
+ Py_SET_TYPE(op, typeobj);
+ if (PyType_GetFlags(typeobj) & Py_TPFLAGS_HEAPTYPE) {
+ Py_INCREF(typeobj);
+ }
+ _Py_NewReference(op);
+ return op;
+}
+
+#define PyObject_INIT(op, typeobj) \
+ _PyObject_INIT(_PyObject_CAST(op), (typeobj))
+
+static inline PyVarObject*
+_PyObject_INIT_VAR(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
+{
+ assert(op != NULL);
+ Py_SET_SIZE(op, size);
+ PyObject_INIT((PyObject *)op, typeobj);
+ return op;
+}
+
+#define PyObject_INIT_VAR(op, typeobj, size) \
+ _PyObject_INIT_VAR(_PyVarObject_CAST(op), (typeobj), (size))
+
+
/* This function returns the number of allocated memory blocks, regardless of size */
PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void);
@@ -37,66 +120,16 @@ PyAPI_FUNC(Py_ssize_t) _PyGC_CollectNoFail(void);
PyAPI_FUNC(Py_ssize_t) _PyGC_CollectIfEnabled(void);
-/* Test if an object has a GC head */
-#define PyObject_IS_GC(o) \
- (PyType_IS_GC(Py_TYPE(o)) \
- && (Py_TYPE(o)->tp_is_gc == NULL || Py_TYPE(o)->tp_is_gc(o)))
+/* Test if an object implements the garbage collector protocol */
+PyAPI_FUNC(int) PyObject_IS_GC(PyObject *obj);
-/* GC information is stored BEFORE the object structure. */
-typedef struct {
- // Pointer to next object in the list.
- // 0 means the object is not tracked
- uintptr_t _gc_next;
-
- // Pointer to previous object in the list.
- // Lowest two bits are used for flags documented later.
- uintptr_t _gc_prev;
-} PyGC_Head;
-
-#define _Py_AS_GC(o) ((PyGC_Head *)(o)-1)
-
-/* True if the object is currently tracked by the GC. */
-#define _PyObject_GC_IS_TRACKED(o) (_Py_AS_GC(o)->_gc_next != 0)
-
-/* True if the object may be tracked by the GC in the future, or already is.
- This can be useful to implement some optimizations. */
-#define _PyObject_GC_MAY_BE_TRACKED(obj) \
- (PyObject_IS_GC(obj) && \
- (!PyTuple_CheckExact(obj) || _PyObject_GC_IS_TRACKED(obj)))
-
-
-/* Bit flags for _gc_prev */
-/* Bit 0 is set when tp_finalize is called */
-#define _PyGC_PREV_MASK_FINALIZED (1)
-/* Bit 1 is set when the object is in generation which is GCed currently. */
-#define _PyGC_PREV_MASK_COLLECTING (2)
-/* The (N-2) most significant bits contain the real address. */
-#define _PyGC_PREV_SHIFT (2)
-#define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT)
-
-// Lowest bit of _gc_next is used for flags only in GC.
-// But it is always 0 for normal code.
-#define _PyGCHead_NEXT(g) ((PyGC_Head*)(g)->_gc_next)
-#define _PyGCHead_SET_NEXT(g, p) ((g)->_gc_next = (uintptr_t)(p))
-
-// Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags.
-#define _PyGCHead_PREV(g) ((PyGC_Head*)((g)->_gc_prev & _PyGC_PREV_MASK))
-#define _PyGCHead_SET_PREV(g, p) do { \
- assert(((uintptr_t)p & ~_PyGC_PREV_MASK) == 0); \
- (g)->_gc_prev = ((g)->_gc_prev & ~_PyGC_PREV_MASK) \
- | ((uintptr_t)(p)); \
- } while (0)
-
-#define _PyGCHead_FINALIZED(g) \
- (((g)->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0)
-#define _PyGCHead_SET_FINALIZED(g) \
- ((g)->_gc_prev |= _PyGC_PREV_MASK_FINALIZED)
-
-#define _PyGC_FINALIZED(o) \
- _PyGCHead_FINALIZED(_Py_AS_GC(o))
-#define _PyGC_SET_FINALIZED(o) \
- _PyGCHead_SET_FINALIZED(_Py_AS_GC(o))
+/* Code built with Py_BUILD_CORE must include pycore_gc.h instead which
+ defines a different _PyGC_FINALIZED() macro. */
+#ifndef Py_BUILD_CORE
+ // Kept for backward compatibility with Python 3.8
+# define _PyGC_FINALIZED(o) PyObject_GC_IsFinalized(o)
+#endif
PyAPI_FUNC(PyObject *) _PyObject_GC_Malloc(size_t size);
PyAPI_FUNC(PyObject *) _PyObject_GC_Calloc(size_t size);
@@ -105,8 +138,7 @@ PyAPI_FUNC(PyObject *) _PyObject_GC_Calloc(size_t size);
/* Test if a type supports weak references */
#define PyType_SUPPORTS_WEAKREFS(t) ((t)->tp_weaklistoffset > 0)
-#define PyObject_GET_WEAKREFS_LISTPTR(o) \
- ((PyObject **) (((char *) (o)) + Py_TYPE(o)->tp_weaklistoffset))
+PyAPI_FUNC(PyObject **) PyObject_GET_WEAKREFS_LISTPTR(PyObject *op);
#ifdef __cplusplus
}
diff --git a/x64/include/cpython/pyerrors.h b/x64/include/cpython/pyerrors.h
index e3098b3..9c87b53 100644
--- a/x64/include/cpython/pyerrors.h
+++ b/x64/include/cpython/pyerrors.h
@@ -75,7 +75,8 @@ typedef PyOSErrorObject PyWindowsErrorObject;
/* Error handling definitions */
PyAPI_FUNC(void) _PyErr_SetKeyError(PyObject *);
-_PyErr_StackItem *_PyErr_GetTopmostException(PyThreadState *tstate);
+PyAPI_FUNC(_PyErr_StackItem*) _PyErr_GetTopmostException(PyThreadState *tstate);
+PyAPI_FUNC(void) _PyErr_GetExcInfo(PyThreadState *, PyObject **, PyObject **, PyObject **);
/* Context manipulation (PEP 3134) */
@@ -148,7 +149,10 @@ PyAPI_FUNC(PyObject *) PyErr_ProgramTextObject(
PyObject *filename,
int lineno);
-/* Create a UnicodeEncodeError object */
+/* Create a UnicodeEncodeError object.
+ *
+ * TODO: This API will be removed in Python 3.11.
+ */
Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject *) PyUnicodeEncodeError_Create(
const char *encoding, /* UTF-8 encoded string */
const Py_UNICODE *object,
@@ -158,7 +162,10 @@ Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject *) PyUnicodeEncodeError_Create(
const char *reason /* UTF-8 encoded string */
);
-/* Create a UnicodeTranslateError object */
+/* Create a UnicodeTranslateError object.
+ *
+ * TODO: This API will be removed in Python 3.11.
+ */
Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject *) PyUnicodeTranslateError_Create(
const Py_UNICODE *object,
Py_ssize_t length,
@@ -177,6 +184,17 @@ PyAPI_FUNC(void) _PyErr_WriteUnraisableMsg(
const char *err_msg,
PyObject *obj);
+PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalErrorFunc(
+ const char *func,
+ const char *message);
+
+PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalErrorFormat(
+ const char *func,
+ const char *format,
+ ...);
+
+#define Py_FatalError(message) _Py_FatalErrorFunc(__func__, message)
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/cpython/pylifecycle.h b/x64/include/cpython/pylifecycle.h
index 2f3a0db..eb523b8 100644
--- a/x64/include/cpython/pylifecycle.h
+++ b/x64/include/cpython/pylifecycle.h
@@ -32,14 +32,6 @@ PyAPI_FUNC(int) _Py_IsCoreInitialized(void);
PyAPI_FUNC(PyStatus) Py_InitializeFromConfig(
const PyConfig *config);
-PyAPI_FUNC(PyStatus) _Py_InitializeFromArgs(
- const PyConfig *config,
- Py_ssize_t argc,
- char * const *argv);
-PyAPI_FUNC(PyStatus) _Py_InitializeFromWideArgs(
- const PyConfig *config,
- Py_ssize_t argc,
- wchar_t * const *argv);
PyAPI_FUNC(PyStatus) _Py_InitializeMain(void);
PyAPI_FUNC(int) Py_RunMain(void);
@@ -73,6 +65,8 @@ PyAPI_FUNC(int) _Py_CoerceLegacyLocale(int warn);
PyAPI_FUNC(int) _Py_LegacyLocaleDetected(int warn);
PyAPI_FUNC(char *) _Py_SetLocaleFromEnv(int category);
+PyAPI_FUNC(PyThreadState *) _Py_NewInterpreter(int isolated_subinterpreter);
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/cpython/pystate.h b/x64/include/cpython/pystate.h
index 94b0809..f292da1 100644
--- a/x64/include/cpython/pystate.h
+++ b/x64/include/cpython/pystate.h
@@ -16,7 +16,7 @@ PyAPI_FUNC(PyObject *) _PyInterpreterState_GetMainModule(PyInterpreterState *);
/* State unique per thread */
/* Py_tracefunc return -1 when raising an exception, or 0 for success. */
-typedef int (*Py_tracefunc)(PyObject *, struct _frame *, int, PyObject *);
+typedef int (*Py_tracefunc)(PyObject *, PyFrameObject *, int, PyObject *);
/* The following values are used for 'what' for tracefunc functions
*
@@ -55,7 +55,8 @@ struct _ts {
struct _ts *next;
PyInterpreterState *interp;
- struct _frame *frame;
+ /* Borrowed reference to the current frame (it can be NULL) */
+ PyFrameObject *frame;
int recursion_depth;
char overflowed; /* The stack has overflowed. Allow 50 more calls
to handle the runtime error. */
@@ -139,22 +140,17 @@ struct _ts {
};
-/* Get the current interpreter state.
+// Alias for backward compatibility with Python 3.8
+#define _PyInterpreterState_Get PyInterpreterState_Get
- Issue a fatal error if there no current Python thread state or no current
- interpreter. It cannot return NULL.
-
- The caller must hold the GIL.*/
-PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_Get(void);
-
-PyAPI_FUNC(int) _PyState_AddModule(PyObject*, struct PyModuleDef*);
-PyAPI_FUNC(void) _PyState_ClearModules(void);
PyAPI_FUNC(PyThreadState *) _PyThreadState_Prealloc(PyInterpreterState *);
/* Similar to PyThreadState_Get(), but don't issue a fatal error
* if it is NULL. */
PyAPI_FUNC(PyThreadState *) _PyThreadState_UncheckedGet(void);
+PyAPI_FUNC(PyObject *) _PyThreadState_GetDict(PyThreadState *tstate);
+
/* PyGILState */
/* Helper/diagnostic function - return 1 if the current thread
@@ -169,7 +165,7 @@ PyAPI_FUNC(int) PyGILState_Check(void);
This function doesn't check for error. Return NULL before _PyGILState_Init()
is called and after _PyGILState_Fini() is called.
- See also _PyInterpreterState_Get() and _PyInterpreterState_GET_UNSAFE(). */
+ See also _PyInterpreterState_Get() and _PyInterpreterState_GET(). */
PyAPI_FUNC(PyInterpreterState *) _PyGILState_GetInterpreterStateUnsafe(void);
/* The implementation of sys._current_frames() Returns a dict mapping
@@ -184,8 +180,24 @@ PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Head(void);
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Next(PyInterpreterState *);
PyAPI_FUNC(PyThreadState *) PyInterpreterState_ThreadHead(PyInterpreterState *);
PyAPI_FUNC(PyThreadState *) PyThreadState_Next(PyThreadState *);
+PyAPI_FUNC(void) PyThreadState_DeleteCurrent(void);
+
+/* Frame evaluation API */
+
+typedef PyObject* (*_PyFrameEvalFunction)(PyThreadState *tstate, PyFrameObject *, int);
+
+PyAPI_FUNC(_PyFrameEvalFunction) _PyInterpreterState_GetEvalFrameFunc(
+ PyInterpreterState *interp);
+PyAPI_FUNC(void) _PyInterpreterState_SetEvalFrameFunc(
+ PyInterpreterState *interp,
+ _PyFrameEvalFunction eval_frame);
+
+PyAPI_FUNC(const PyConfig*) _PyInterpreterState_GetConfig(PyInterpreterState *interp);
+
+// Get the configuration of the currrent interpreter.
+// The caller must hold the GIL.
+PyAPI_FUNC(const PyConfig*) _Py_GetConfig(void);
-typedef struct _frame *(*PyThreadFrameGetter)(PyThreadState *self_);
/* cross-interpreter data */
diff --git a/x64/include/cpython/sysmodule.h b/x64/include/cpython/sysmodule.h
index 72d8ffe..1802b5b 100644
--- a/x64/include/cpython/sysmodule.h
+++ b/x64/include/cpython/sysmodule.h
@@ -13,7 +13,10 @@ PyAPI_FUNC(size_t) _PySys_GetSizeOf(PyObject *);
typedef int(*Py_AuditHookFunction)(const char *, PyObject *, void *);
-PyAPI_FUNC(int) PySys_Audit(const char*, const char *, ...);
+PyAPI_FUNC(int) PySys_Audit(
+ const char *event,
+ const char *argFormat,
+ ...);
PyAPI_FUNC(int) PySys_AddAuditHook(Py_AuditHookFunction, void*);
#ifdef __cplusplus
diff --git a/x64/include/cpython/traceback.h b/x64/include/cpython/traceback.h
index 746097d..837470c 100644
--- a/x64/include/cpython/traceback.h
+++ b/x64/include/cpython/traceback.h
@@ -9,7 +9,7 @@ extern "C" {
typedef struct _traceback {
PyObject_HEAD
struct _traceback *tb_next;
- struct _frame *tb_frame;
+ PyFrameObject *tb_frame;
int tb_lasti;
int tb_lineno;
} PyTracebackObject;
diff --git a/x64/include/cpython/unicodeobject.h b/x64/include/cpython/unicodeobject.h
index 54a13e3..1fc732a 100644
--- a/x64/include/cpython/unicodeobject.h
+++ b/x64/include/cpython/unicodeobject.h
@@ -50,13 +50,18 @@ extern "C" {
Py_UNICODE_ISDIGIT(ch) || \
Py_UNICODE_ISNUMERIC(ch))
-#define Py_UNICODE_COPY(target, source, length) \
- memcpy((target), (source), (length)*sizeof(Py_UNICODE))
+Py_DEPRECATED(3.3) static inline void
+Py_UNICODE_COPY(Py_UNICODE *target, const Py_UNICODE *source, Py_ssize_t length) {
+ memcpy(target, source, (size_t)(length) * sizeof(Py_UNICODE));
+}
-#define Py_UNICODE_FILL(target, value, length) \
- do {Py_ssize_t i_; Py_UNICODE *t_ = (target); Py_UNICODE v_ = (value);\
- for (i_ = 0; i_ < (length); i_++) t_[i_] = v_;\
- } while (0)
+Py_DEPRECATED(3.3) static inline void
+Py_UNICODE_FILL(Py_UNICODE *target, Py_UNICODE value, Py_ssize_t length) {
+ Py_ssize_t i;
+ for (i = 0; i < length; i++) {
+ target[i] = value;
+ }
+}
/* macros to work with surrogates */
#define Py_UNICODE_IS_SURROGATE(ch) (0xD800 <= (ch) && (ch) <= 0xDFFF)
@@ -71,14 +76,6 @@ extern "C" {
/* low surrogate = bottom 10 bits added to DC00 */
#define Py_UNICODE_LOW_SURROGATE(ch) (0xDC00 + ((ch) & 0x3FF))
-/* Check if substring matches at given offset. The offset must be
- valid, and the substring must not be empty. */
-
-#define Py_UNICODE_MATCH(string, offset, substring) \
- ((*((string)->wstr + (offset)) == *((substring)->wstr)) && \
- ((*((string)->wstr + (offset) + (substring)->wstr_length-1) == *((substring)->wstr + (substring)->wstr_length-1))) && \
- !memcmp((string)->wstr + (offset), (substring)->wstr, (substring)->wstr_length*sizeof(Py_UNICODE)))
-
/* --- Unicode Type ------------------------------------------------------- */
/* ASCII-only strings created through PyUnicode_New use the PyASCIIObject
@@ -251,10 +248,6 @@ PyAPI_FUNC(int) _PyUnicode_CheckConsistency(
int check_content);
/* Fast access macros */
-#define PyUnicode_WSTR_LENGTH(op) \
- (PyUnicode_IS_COMPACT_ASCII(op) ? \
- ((PyASCIIObject*)op)->length : \
- ((PyCompactUnicodeObject*)op)->wstr_length)
/* Returns the deprecated Py_UNICODE representation's size in code units
(this includes surrogate pairs as 2 units).
@@ -449,6 +442,14 @@ enum PyUnicode_Kind {
(0xffffU) : \
(0x10ffffU)))))
+Py_DEPRECATED(3.3)
+static inline Py_ssize_t _PyUnicode_get_wstr_length(PyObject *op) {
+ return PyUnicode_IS_COMPACT_ASCII(op) ?
+ ((PyASCIIObject*)op)->length :
+ ((PyCompactUnicodeObject*)op)->wstr_length;
+}
+#define PyUnicode_WSTR_LENGTH(op) _PyUnicode_get_wstr_length((PyObject*)op)
+
/* === Public API ========================================================= */
/* --- Plain Py_UNICODE --------------------------------------------------- */
@@ -547,7 +548,7 @@ PyAPI_FUNC(void) _PyUnicode_FastFill(
only allowed if u was set to NULL.
The buffer is copied into the new object. */
-/* Py_DEPRECATED(3.3) */ PyAPI_FUNC(PyObject*) PyUnicode_FromUnicode(
+Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject*) PyUnicode_FromUnicode(
const Py_UNICODE *u, /* Unicode buffer */
Py_ssize_t size /* size of buffer */
);
@@ -576,13 +577,13 @@ PyAPI_FUNC(Py_UCS4) _PyUnicode_FindMaxChar (
Py_UNICODE buffer.
If the wchar_t/Py_UNICODE representation is not yet available, this
function will calculate it. */
-/* Py_DEPRECATED(3.3) */ PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode(
+Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicode(
PyObject *unicode /* Unicode object */
);
/* Similar to PyUnicode_AsUnicode(), but raises a ValueError if the string
contains null characters. */
-PyAPI_FUNC(const Py_UNICODE *) _PyUnicode_AsUnicode(
+Py_DEPRECATED(3.3) PyAPI_FUNC(const Py_UNICODE *) _PyUnicode_AsUnicode(
PyObject *unicode /* Unicode object */
);
@@ -591,7 +592,7 @@ PyAPI_FUNC(const Py_UNICODE *) _PyUnicode_AsUnicode(
If the wchar_t/Py_UNICODE representation is not yet available, this
function will calculate it. */
-/* Py_DEPRECATED(3.3) */ PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicodeAndSize(
+Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE *) PyUnicode_AsUnicodeAndSize(
PyObject *unicode, /* Unicode object */
Py_ssize_t *size /* location where to save the length */
);
@@ -726,12 +727,6 @@ PyAPI_FUNC(int) _PyUnicode_FormatAdvancedWriter(
Py_ssize_t start,
Py_ssize_t end);
-/* --- wchar_t support for platforms which support it --------------------- */
-
-#ifdef HAVE_WCHAR_H
-PyAPI_FUNC(void*) _PyUnicode_AsKind(PyObject *s, unsigned int kind);
-#endif
-
/* --- Manage the default encoding ---------------------------------------- */
/* Returns a pointer to the default encoding (UTF-8) of the
@@ -746,12 +741,6 @@ PyAPI_FUNC(void*) _PyUnicode_AsKind(PyObject *s, unsigned int kind);
_PyUnicode_AsStringAndSize is a #define for PyUnicode_AsUTF8AndSize to
support the previous internal function with the same behaviour.
-
- *** This API is for interpreter INTERNAL USE ONLY and will likely
- *** be removed or changed in the future.
-
- *** If you need to access the Unicode object as UTF-8 bytes string,
- *** please use PyUnicode_AsUTF8String() instead.
*/
PyAPI_FUNC(const char *) PyUnicode_AsUTF8AndSize(
@@ -990,7 +979,7 @@ Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject*) PyUnicode_EncodeMBCS(
*/
-/* Py_DEPRECATED(3.3) */ PyAPI_FUNC(int) PyUnicode_EncodeDecimal(
+Py_DEPRECATED(3.3) PyAPI_FUNC(int) PyUnicode_EncodeDecimal(
Py_UNICODE *s, /* Unicode buffer */
Py_ssize_t length, /* Number of Py_UNICODE chars to encode */
char *output, /* Output buffer; must have size >= length */
@@ -1003,7 +992,7 @@ Py_DEPRECATED(3.3) PyAPI_FUNC(PyObject*) PyUnicode_EncodeMBCS(
Returns a new Unicode string on success, NULL on failure.
*/
-/* Py_DEPRECATED(3.3) */
+Py_DEPRECATED(3.3)
PyAPI_FUNC(PyObject*) PyUnicode_TransformDecimalToASCII(
Py_UNICODE *s, /* Unicode buffer */
Py_ssize_t length /* Number of Py_UNICODE chars to transform */
@@ -1227,13 +1216,13 @@ Py_DEPRECATED(3.3) PyAPI_FUNC(Py_UNICODE*) PyUnicode_AsUnicodeCopy(
/* Return an interned Unicode object for an Identifier; may fail if there is no memory.*/
PyAPI_FUNC(PyObject*) _PyUnicode_FromId(_Py_Identifier*);
-/* Clear all static strings. */
-PyAPI_FUNC(void) _PyUnicode_ClearStaticStrings(void);
/* Fast equality check when the inputs are known to be exact unicode types
and where the hash values are equal (i.e. a very probable match) */
PyAPI_FUNC(int) _PyUnicode_EQ(PyObject *, PyObject *);
+PyAPI_FUNC(Py_ssize_t) _PyUnicode_ScanIdentifier(PyObject *);
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/datetime.h b/x64/include/datetime.h
index 00507cb..5d9f255 100644
--- a/x64/include/datetime.h
+++ b/x64/include/datetime.h
@@ -196,19 +196,19 @@ static PyDateTime_CAPI *PyDateTimeAPI = NULL;
/* Macros for type checking when not building the Python core. */
#define PyDate_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->DateType)
-#define PyDate_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->DateType)
+#define PyDate_CheckExact(op) Py_IS_TYPE(op, PyDateTimeAPI->DateType)
#define PyDateTime_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->DateTimeType)
-#define PyDateTime_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->DateTimeType)
+#define PyDateTime_CheckExact(op) Py_IS_TYPE(op, PyDateTimeAPI->DateTimeType)
#define PyTime_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->TimeType)
-#define PyTime_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->TimeType)
+#define PyTime_CheckExact(op) Py_IS_TYPE(op, PyDateTimeAPI->TimeType)
#define PyDelta_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->DeltaType)
-#define PyDelta_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->DeltaType)
+#define PyDelta_CheckExact(op) Py_IS_TYPE(op, PyDateTimeAPI->DeltaType)
#define PyTZInfo_Check(op) PyObject_TypeCheck(op, PyDateTimeAPI->TZInfoType)
-#define PyTZInfo_CheckExact(op) (Py_TYPE(op) == PyDateTimeAPI->TZInfoType)
+#define PyTZInfo_CheckExact(op) Py_IS_TYPE(op, PyDateTimeAPI->TZInfoType)
/* Macros for accessing constructors in a simplified fashion. */
diff --git a/x64/include/dictobject.h b/x64/include/dictobject.h
index b37573a..c88b0aa 100644
--- a/x64/include/dictobject.h
+++ b/x64/include/dictobject.h
@@ -16,7 +16,7 @@ PyAPI_DATA(PyTypeObject) PyDict_Type;
#define PyDict_Check(op) \
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_DICT_SUBCLASS)
-#define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
+#define PyDict_CheckExact(op) Py_IS_TYPE(op, &PyDict_Type)
PyAPI_FUNC(PyObject *) PyDict_New(void);
PyAPI_FUNC(PyObject *) PyDict_GetItem(PyObject *mp, PyObject *key);
diff --git a/x64/include/errcode.h b/x64/include/errcode.h
index b37cd26..790518b 100644
--- a/x64/include/errcode.h
+++ b/x64/include/errcode.h
@@ -29,7 +29,6 @@ extern "C" {
#define E_EOFS 23 /* EOF in triple-quoted string */
#define E_EOLS 24 /* EOL in single-quoted string */
#define E_LINECONT 25 /* Unexpected characters after a line continuation */
-#define E_IDENTIFIER 26 /* Invalid characters in identifier */
#define E_BADSINGLE 27 /* Ill-formed single statement input */
#ifdef __cplusplus
diff --git a/x64/include/exports.h b/x64/include/exports.h
new file mode 100644
index 0000000..fc1a5c5
--- /dev/null
+++ b/x64/include/exports.h
@@ -0,0 +1,30 @@
+#ifndef Py_EXPORTS_H
+#define Py_EXPORTS_H
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+ #define Py_IMPORTED_SYMBOL __declspec(dllimport)
+ #define Py_EXPORTED_SYMBOL __declspec(dllexport)
+ #define Py_LOCAL_SYMBOL
+#else
+/*
+ * If we only ever used gcc >= 5, we could use __has_attribute(visibility)
+ * as a cross-platform way to determine if visibility is supported. However,
+ * we may still need to support gcc >= 4, as some Ubuntu LTS and Centos versions
+ * have 4 < gcc < 5.
+ */
+ #ifndef __has_attribute
+ #define __has_attribute(x) 0 // Compatibility with non-clang compilers.
+ #endif
+ #if (defined(__GNUC__) && (__GNUC__ >= 4)) ||\
+ (defined(__clang__) && __has_attribute(visibility))
+ #define Py_IMPORTED_SYMBOL __attribute__ ((visibility ("default")))
+ #define Py_EXPORTED_SYMBOL __attribute__ ((visibility ("default")))
+ #define Py_LOCAL_SYMBOL __attribute__ ((visibility ("hidden")))
+ #else
+ #define Py_IMPORTED_SYMBOL
+ #define Py_EXPORTED_SYMBOL
+ #define Py_LOCAL_SYMBOL
+ #endif
+#endif
+
+#endif /* Py_EXPORTS_H */
diff --git a/x64/include/fileobject.h b/x64/include/fileobject.h
index 456887e..6ec2994 100644
--- a/x64/include/fileobject.h
+++ b/x64/include/fileobject.h
@@ -20,8 +20,15 @@ PyAPI_FUNC(int) PyObject_AsFileDescriptor(PyObject *);
If non-NULL, this is different than the default encoding for strings
*/
PyAPI_DATA(const char *) Py_FileSystemDefaultEncoding;
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000
+PyAPI_DATA(const char *) Py_FileSystemDefaultEncodeErrors;
+#endif
PyAPI_DATA(int) Py_HasFileSystemDefaultEncoding;
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000
+PyAPI_DATA(int) Py_UTF8Mode;
+#endif
+
/* A routine to check if a file descriptor can be select()-ed. */
#ifdef _MSC_VER
/* On Windows, any socket fd can be select()-ed, no matter how high */
diff --git a/x64/include/fileutils.h b/x64/include/fileutils.h
index 359dd0a..12bd071 100644
--- a/x64/include/fileutils.h
+++ b/x64/include/fileutils.h
@@ -18,167 +18,12 @@ PyAPI_FUNC(char*) _Py_EncodeLocaleRaw(
size_t *error_pos);
#endif
-
-#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03080000
-typedef enum {
- _Py_ERROR_UNKNOWN=0,
- _Py_ERROR_STRICT,
- _Py_ERROR_SURROGATEESCAPE,
- _Py_ERROR_REPLACE,
- _Py_ERROR_IGNORE,
- _Py_ERROR_BACKSLASHREPLACE,
- _Py_ERROR_SURROGATEPASS,
- _Py_ERROR_XMLCHARREFREPLACE,
- _Py_ERROR_OTHER
-} _Py_error_handler;
-
-PyAPI_FUNC(_Py_error_handler) _Py_GetErrorHandler(const char *errors);
-
-PyAPI_FUNC(int) _Py_DecodeLocaleEx(
- const char *arg,
- wchar_t **wstr,
- size_t *wlen,
- const char **reason,
- int current_locale,
- _Py_error_handler errors);
-
-PyAPI_FUNC(int) _Py_EncodeLocaleEx(
- const wchar_t *text,
- char **str,
- size_t *error_pos,
- const char **reason,
- int current_locale,
- _Py_error_handler errors);
-#endif
-
#ifndef Py_LIMITED_API
-PyAPI_FUNC(PyObject *) _Py_device_encoding(int);
-
-#if defined(MS_WINDOWS) || defined(__APPLE__)
- /* On Windows, the count parameter of read() is an int (bpo-9015, bpo-9611).
- On macOS 10.13, read() and write() with more than INT_MAX bytes
- fail with EINVAL (bpo-24658). */
-# define _PY_READ_MAX INT_MAX
-# define _PY_WRITE_MAX INT_MAX
-#else
- /* write() should truncate the input to PY_SSIZE_T_MAX bytes,
- but it's safer to do it ourself to have a portable behaviour */
-# define _PY_READ_MAX PY_SSIZE_T_MAX
-# define _PY_WRITE_MAX PY_SSIZE_T_MAX
-#endif
-
-#ifdef MS_WINDOWS
-struct _Py_stat_struct {
- unsigned long st_dev;
- uint64_t st_ino;
- unsigned short st_mode;
- int st_nlink;
- int st_uid;
- int st_gid;
- unsigned long st_rdev;
- __int64 st_size;
- time_t st_atime;
- int st_atime_nsec;
- time_t st_mtime;
- int st_mtime_nsec;
- time_t st_ctime;
- int st_ctime_nsec;
- unsigned long st_file_attributes;
- unsigned long st_reparse_tag;
-};
-#else
-# define _Py_stat_struct stat
-#endif
-
-PyAPI_FUNC(int) _Py_fstat(
- int fd,
- struct _Py_stat_struct *status);
-
-PyAPI_FUNC(int) _Py_fstat_noraise(
- int fd,
- struct _Py_stat_struct *status);
-
-PyAPI_FUNC(int) _Py_stat(
- PyObject *path,
- struct stat *status);
-
-PyAPI_FUNC(int) _Py_open(
- const char *pathname,
- int flags);
-
-PyAPI_FUNC(int) _Py_open_noraise(
- const char *pathname,
- int flags);
-
-PyAPI_FUNC(FILE *) _Py_wfopen(
- const wchar_t *path,
- const wchar_t *mode);
-
-PyAPI_FUNC(FILE*) _Py_fopen(
- const char *pathname,
- const char *mode);
-
-PyAPI_FUNC(FILE*) _Py_fopen_obj(
- PyObject *path,
- const char *mode);
-
-PyAPI_FUNC(Py_ssize_t) _Py_read(
- int fd,
- void *buf,
- size_t count);
-
-PyAPI_FUNC(Py_ssize_t) _Py_write(
- int fd,
- const void *buf,
- size_t count);
-
-PyAPI_FUNC(Py_ssize_t) _Py_write_noraise(
- int fd,
- const void *buf,
- size_t count);
-
-#ifdef HAVE_READLINK
-PyAPI_FUNC(int) _Py_wreadlink(
- const wchar_t *path,
- wchar_t *buf,
- /* Number of characters of 'buf' buffer
- including the trailing NUL character */
- size_t buflen);
-#endif
-
-#ifdef HAVE_REALPATH
-PyAPI_FUNC(wchar_t*) _Py_wrealpath(
- const wchar_t *path,
- wchar_t *resolved_path,
- /* Number of characters of 'resolved_path' buffer
- including the trailing NUL character */
- size_t resolved_path_len);
+# define Py_CPYTHON_FILEUTILS_H
+# include "cpython/fileutils.h"
+# undef Py_CPYTHON_FILEUTILS_H
#endif
-PyAPI_FUNC(wchar_t*) _Py_wgetcwd(
- wchar_t *buf,
- /* Number of characters of 'buf' buffer
- including the trailing NUL character */
- size_t buflen);
-
-PyAPI_FUNC(int) _Py_get_inheritable(int fd);
-
-PyAPI_FUNC(int) _Py_set_inheritable(int fd, int inheritable,
- int *atomic_flag_works);
-
-PyAPI_FUNC(int) _Py_set_inheritable_async_safe(int fd, int inheritable,
- int *atomic_flag_works);
-
-PyAPI_FUNC(int) _Py_dup(int fd);
-
-#ifndef MS_WINDOWS
-PyAPI_FUNC(int) _Py_get_blocking(int fd);
-
-PyAPI_FUNC(int) _Py_set_blocking(int fd, int blocking);
-#endif /* !MS_WINDOWS */
-
-#endif /* Py_LIMITED_API */
-
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/floatobject.h b/x64/include/floatobject.h
index f1044d6..e994aa8 100644
--- a/x64/include/floatobject.h
+++ b/x64/include/floatobject.h
@@ -21,7 +21,7 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyFloat_Type;
#define PyFloat_Check(op) PyObject_TypeCheck(op, &PyFloat_Type)
-#define PyFloat_CheckExact(op) (Py_TYPE(op) == &PyFloat_Type)
+#define PyFloat_CheckExact(op) Py_IS_TYPE(op, &PyFloat_Type)
#ifdef Py_NAN
#define Py_RETURN_NAN return PyFloat_FromDouble(Py_NAN)
@@ -88,15 +88,6 @@ PyAPI_FUNC(int) _PyFloat_Pack2(double x, unsigned char *p, int le);
PyAPI_FUNC(int) _PyFloat_Pack4(double x, unsigned char *p, int le);
PyAPI_FUNC(int) _PyFloat_Pack8(double x, unsigned char *p, int le);
-/* Needed for the old way for marshal to store a floating point number.
- Returns the string length copied into p, -1 on error.
- */
-PyAPI_FUNC(int) _PyFloat_Repr(double x, char *p, size_t len);
-
-/* Used to get the important decimal digits of a double */
-PyAPI_FUNC(int) _PyFloat_Digits(char *buf, double v, int *signum);
-PyAPI_FUNC(void) _PyFloat_DigitsInit(void);
-
/* The unpack routines read 2, 4 or 8 bytes, starting at p. le is a bool
* argument, true if the string is in little-endian format (exponent
* last, at p+1, p+3 or p+7), false if big-endian (exponent first, at p).
@@ -109,9 +100,6 @@ PyAPI_FUNC(double) _PyFloat_Unpack2(const unsigned char *p, int le);
PyAPI_FUNC(double) _PyFloat_Unpack4(const unsigned char *p, int le);
PyAPI_FUNC(double) _PyFloat_Unpack8(const unsigned char *p, int le);
-/* free list api */
-PyAPI_FUNC(int) PyFloat_ClearFreeList(void);
-
PyAPI_FUNC(void) _PyFloat_DebugMallocStats(FILE* out);
/* Format the object based on the format_spec, as defined in PEP 3101
diff --git a/x64/include/frameobject.h b/x64/include/frameobject.h
index 3bad86a..c118af1 100644
--- a/x64/include/frameobject.h
+++ b/x64/include/frameobject.h
@@ -1,92 +1,20 @@
/* Frame object interface */
-#ifndef Py_LIMITED_API
#ifndef Py_FRAMEOBJECT_H
#define Py_FRAMEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
-typedef struct {
- int b_type; /* what kind of block this is */
- int b_handler; /* where to jump to find handler */
- int b_level; /* value stack level to pop to */
-} PyTryBlock;
-
-typedef struct _frame {
- PyObject_VAR_HEAD
- struct _frame *f_back; /* previous frame, or NULL */
- PyCodeObject *f_code; /* code segment */
- PyObject *f_builtins; /* builtin symbol table (PyDictObject) */
- PyObject *f_globals; /* global symbol table (PyDictObject) */
- PyObject *f_locals; /* local symbol table (any mapping) */
- PyObject **f_valuestack; /* points after the last local */
- /* Next free slot in f_valuestack. Frame creation sets to f_valuestack.
- Frame evaluation usually NULLs it, but a frame that yields sets it
- to the current stack top. */
- PyObject **f_stacktop;
- PyObject *f_trace; /* Trace function */
- char f_trace_lines; /* Emit per-line trace events? */
- char f_trace_opcodes; /* Emit per-opcode trace events? */
-
- /* Borrowed reference to a generator, or NULL */
- PyObject *f_gen;
-
- int f_lasti; /* Last instruction if called */
- /* Call PyFrame_GetLineNumber() instead of reading this field
- directly. As of 2.3 f_lineno is only valid when tracing is
- active (i.e. when f_trace is set). At other times we use
- PyCode_Addr2Line to calculate the line from the current
- bytecode index. */
- int f_lineno; /* Current line number */
- int f_iblock; /* index in f_blockstack */
- char f_executing; /* whether the frame is still executing */
- PyTryBlock f_blockstack[CO_MAXBLOCKS]; /* for try and loop blocks */
- PyObject *f_localsplus[1]; /* locals+stack, dynamically sized */
-} PyFrameObject;
-
-
-/* Standard object interface */
-
-PyAPI_DATA(PyTypeObject) PyFrame_Type;
-
-#define PyFrame_Check(op) (Py_TYPE(op) == &PyFrame_Type)
-
-PyAPI_FUNC(PyFrameObject *) PyFrame_New(PyThreadState *, PyCodeObject *,
- PyObject *, PyObject *);
-
-/* only internal use */
-PyFrameObject* _PyFrame_New_NoTrack(PyThreadState *, PyCodeObject *,
- PyObject *, PyObject *);
-
-
-/* The rest of the interface is specific for frame objects */
+#include "pyframe.h"
-/* Block management functions */
-
-PyAPI_FUNC(void) PyFrame_BlockSetup(PyFrameObject *, int, int, int);
-PyAPI_FUNC(PyTryBlock *) PyFrame_BlockPop(PyFrameObject *);
-
-/* Extend the value stack */
-
-PyAPI_FUNC(PyObject **) PyFrame_ExtendStack(PyFrameObject *, int, int);
-
-/* Conversions between "fast locals" and locals in dictionary */
-
-PyAPI_FUNC(void) PyFrame_LocalsToFast(PyFrameObject *, int);
-
-PyAPI_FUNC(int) PyFrame_FastToLocalsWithError(PyFrameObject *f);
-PyAPI_FUNC(void) PyFrame_FastToLocals(PyFrameObject *);
-
-PyAPI_FUNC(int) PyFrame_ClearFreeList(void);
-
-PyAPI_FUNC(void) _PyFrame_DebugMallocStats(FILE *out);
-
-/* Return the line of code the frame is currently executing. */
-PyAPI_FUNC(int) PyFrame_GetLineNumber(PyFrameObject *);
+#ifndef Py_LIMITED_API
+# define Py_CPYTHON_FRAMEOBJECT_H
+# include "cpython/frameobject.h"
+# undef Py_CPYTHON_FRAMEOBJECT_H
+#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_FRAMEOBJECT_H */
-#endif /* Py_LIMITED_API */
diff --git a/x64/include/funcobject.h b/x64/include/funcobject.h
index e563a74..c5cc9d2 100644
--- a/x64/include/funcobject.h
+++ b/x64/include/funcobject.h
@@ -43,7 +43,7 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyFunction_Type;
-#define PyFunction_Check(op) (Py_TYPE(op) == &PyFunction_Type)
+#define PyFunction_Check(op) Py_IS_TYPE(op, &PyFunction_Type)
PyAPI_FUNC(PyObject *) PyFunction_New(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_NewWithQualName(PyObject *, PyObject *, PyObject *);
@@ -60,12 +60,6 @@ PyAPI_FUNC(PyObject *) PyFunction_GetAnnotations(PyObject *);
PyAPI_FUNC(int) PyFunction_SetAnnotations(PyObject *, PyObject *);
#ifndef Py_LIMITED_API
-PyAPI_FUNC(PyObject *) _PyFunction_FastCallDict(
- PyObject *func,
- PyObject *const *args,
- Py_ssize_t nargs,
- PyObject *kwargs);
-
PyAPI_FUNC(PyObject *) _PyFunction_Vectorcall(
PyObject *func,
PyObject *const *stack,
diff --git a/x64/include/genericaliasobject.h b/x64/include/genericaliasobject.h
new file mode 100644
index 0000000..cf00297
--- /dev/null
+++ b/x64/include/genericaliasobject.h
@@ -0,0 +1,14 @@
+// Implementation of PEP 585: support list[int] etc.
+#ifndef Py_GENERICALIASOBJECT_H
+#define Py_GENERICALIASOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+PyAPI_FUNC(PyObject *) Py_GenericAlias(PyObject *, PyObject *);
+PyAPI_DATA(PyTypeObject) Py_GenericAliasType;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_GENERICALIASOBJECT_H */
diff --git a/x64/include/genobject.h b/x64/include/genobject.h
index 59ede28..8ffd156 100644
--- a/x64/include/genobject.h
+++ b/x64/include/genobject.h
@@ -10,14 +10,12 @@ extern "C" {
#include "pystate.h" /* _PyErr_StackItem */
-struct _frame; /* Avoid including frameobject.h */
-
/* _PyGenObject_HEAD defines the initial segment of generator
and coroutine objects. */
#define _PyGenObject_HEAD(prefix) \
PyObject_HEAD \
/* Note: gi_frame can be NULL if the generator is "finished" */ \
- struct _frame *prefix##_frame; \
+ PyFrameObject *prefix##_frame; \
/* True if generator is being executed. */ \
char prefix##_running; \
/* The code object backing the generator */ \
@@ -38,12 +36,11 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyGen_Type;
#define PyGen_Check(op) PyObject_TypeCheck(op, &PyGen_Type)
-#define PyGen_CheckExact(op) (Py_TYPE(op) == &PyGen_Type)
+#define PyGen_CheckExact(op) Py_IS_TYPE(op, &PyGen_Type)
-PyAPI_FUNC(PyObject *) PyGen_New(struct _frame *);
-PyAPI_FUNC(PyObject *) PyGen_NewWithQualName(struct _frame *,
+PyAPI_FUNC(PyObject *) PyGen_New(PyFrameObject *);
+PyAPI_FUNC(PyObject *) PyGen_NewWithQualName(PyFrameObject *,
PyObject *name, PyObject *qualname);
-PyAPI_FUNC(int) PyGen_NeedsFinalizing(PyGenObject *);
PyAPI_FUNC(int) _PyGen_SetStopIterationValue(PyObject *);
PyAPI_FUNC(int) _PyGen_FetchStopIterationValue(PyObject **);
PyAPI_FUNC(PyObject *) _PyGen_Send(PyGenObject *, PyObject *);
@@ -59,11 +56,9 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyCoro_Type;
PyAPI_DATA(PyTypeObject) _PyCoroWrapper_Type;
-PyAPI_DATA(PyTypeObject) _PyAIterWrapper_Type;
-
-#define PyCoro_CheckExact(op) (Py_TYPE(op) == &PyCoro_Type)
+#define PyCoro_CheckExact(op) Py_IS_TYPE(op, &PyCoro_Type)
PyObject *_PyCoro_GetAwaitableIter(PyObject *o);
-PyAPI_FUNC(PyObject *) PyCoro_New(struct _frame *,
+PyAPI_FUNC(PyObject *) PyCoro_New(PyFrameObject *,
PyObject *name, PyObject *qualname);
/* Asynchronous Generators */
@@ -89,15 +84,13 @@ PyAPI_DATA(PyTypeObject) _PyAsyncGenASend_Type;
PyAPI_DATA(PyTypeObject) _PyAsyncGenWrappedValue_Type;
PyAPI_DATA(PyTypeObject) _PyAsyncGenAThrow_Type;
-PyAPI_FUNC(PyObject *) PyAsyncGen_New(struct _frame *,
+PyAPI_FUNC(PyObject *) PyAsyncGen_New(PyFrameObject *,
PyObject *name, PyObject *qualname);
-#define PyAsyncGen_CheckExact(op) (Py_TYPE(op) == &PyAsyncGen_Type)
+#define PyAsyncGen_CheckExact(op) Py_IS_TYPE(op, &PyAsyncGen_Type)
PyObject *_PyAsyncGenValueWrapperNew(PyObject *);
-int PyAsyncGen_ClearFreeLists(void);
-
#endif
#undef _PyGenObject_HEAD
diff --git a/x64/include/import.h b/x64/include/import.h
index 13c6149..aeef3ef 100644
--- a/x64/include/import.h
+++ b/x64/include/import.h
@@ -1,4 +1,3 @@
-
/* Module definition and import interface */
#ifndef Py_IMPORT_H
@@ -7,9 +6,6 @@
extern "C" {
#endif
-#ifndef Py_LIMITED_API
-PyMODINIT_FUNC PyInit__imp(void);
-#endif /* !Py_LIMITED_API */
PyAPI_FUNC(long) PyImport_GetMagicNumber(void);
PyAPI_FUNC(const char *) PyImport_GetMagicTag(void);
PyAPI_FUNC(PyObject *) PyImport_ExecCodeModule(
@@ -39,14 +35,6 @@ PyAPI_FUNC(PyObject *) PyImport_GetModuleDict(void);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000
PyAPI_FUNC(PyObject *) PyImport_GetModule(PyObject *name);
#endif
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(int) _PyImport_IsInitialized(PyInterpreterState *);
-PyAPI_FUNC(PyObject *) _PyImport_GetModuleId(struct _Py_Identifier *name);
-PyAPI_FUNC(PyObject *) _PyImport_AddModuleObject(PyObject *name,
- PyObject *modules);
-PyAPI_FUNC(int) _PyImport_SetModule(PyObject *name, PyObject *module);
-PyAPI_FUNC(int) _PyImport_SetModuleString(const char *name, PyObject* module);
-#endif
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
PyAPI_FUNC(PyObject *) PyImport_AddModuleObject(
PyObject *name
@@ -84,7 +72,6 @@ PyAPI_FUNC(PyObject *) PyImport_ImportModuleLevelObject(
PyAPI_FUNC(PyObject *) PyImport_GetImporter(PyObject *path);
PyAPI_FUNC(PyObject *) PyImport_Import(PyObject *name);
PyAPI_FUNC(PyObject *) PyImport_ReloadModule(PyObject *m);
-PyAPI_FUNC(void) PyImport_Cleanup(void);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
PyAPI_FUNC(int) PyImport_ImportFrozenModuleObject(
PyObject *name
@@ -94,53 +81,15 @@ PyAPI_FUNC(int) PyImport_ImportFrozenModule(
const char *name /* UTF-8 encoded string */
);
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(void) _PyImport_AcquireLock(void);
-PyAPI_FUNC(int) _PyImport_ReleaseLock(void);
-
-PyAPI_FUNC(void) _PyImport_ReInitLock(void);
-
-PyAPI_FUNC(PyObject *) _PyImport_FindBuiltin(
- const char *name, /* UTF-8 encoded string */
- PyObject *modules
- );
-PyAPI_FUNC(PyObject *) _PyImport_FindExtensionObject(PyObject *, PyObject *);
-PyAPI_FUNC(PyObject *) _PyImport_FindExtensionObjectEx(PyObject *, PyObject *,
- PyObject *);
-PyAPI_FUNC(int) _PyImport_FixupBuiltin(
- PyObject *mod,
- const char *name, /* UTF-8 encoded string */
- PyObject *modules
- );
-PyAPI_FUNC(int) _PyImport_FixupExtensionObject(PyObject*, PyObject *,
- PyObject *, PyObject *);
-
-struct _inittab {
- const char *name; /* ASCII encoded string */
- PyObject* (*initfunc)(void);
-};
-PyAPI_DATA(struct _inittab *) PyImport_Inittab;
-PyAPI_FUNC(int) PyImport_ExtendInittab(struct _inittab *newtab);
-#endif /* Py_LIMITED_API */
-
-PyAPI_DATA(PyTypeObject) PyNullImporter_Type;
-
PyAPI_FUNC(int) PyImport_AppendInittab(
const char *name, /* ASCII encoded string */
PyObject* (*initfunc)(void)
);
#ifndef Py_LIMITED_API
-struct _frozen {
- const char *name; /* ASCII encoded string */
- const unsigned char *code;
- int size;
-};
-
-/* Embedding apps may change this pointer to point to their favorite
- collection of frozen modules: */
-
-PyAPI_DATA(const struct _frozen *) PyImport_FrozenModules;
+# define Py_CPYTHON_IMPORT_H
+# include "cpython/import.h"
+# undef Py_CPYTHON_IMPORT_H
#endif
#ifdef __cplusplus
diff --git a/x64/include/internal/pegen_interface.h b/x64/include/internal/pegen_interface.h
new file mode 100644
index 0000000..ee4c77e
--- /dev/null
+++ b/x64/include/internal/pegen_interface.h
@@ -0,0 +1,46 @@
+#ifndef Py_PEGENINTERFACE
+#define Py_PEGENINTERFACE
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "Python.h"
+#include "Python-ast.h"
+
+PyAPI_FUNC(mod_ty) PyPegen_ASTFromString(
+ const char *str,
+ const char *filename,
+ int mode,
+ PyCompilerFlags *flags,
+ PyArena *arena);
+PyAPI_FUNC(mod_ty) PyPegen_ASTFromStringObject(
+ const char *str,
+ PyObject* filename,
+ int mode,
+ PyCompilerFlags *flags,
+ PyArena *arena);
+PyAPI_FUNC(mod_ty) PyPegen_ASTFromFileObject(
+ FILE *fp,
+ PyObject *filename_ob,
+ int mode,
+ const char *enc,
+ const char *ps1,
+ const char *ps2,
+ PyCompilerFlags *flags,
+ int *errcode,
+ PyArena *arena);
+PyAPI_FUNC(mod_ty) PyPegen_ASTFromFilename(
+ const char *filename,
+ int mode,
+ PyCompilerFlags *flags,
+ PyArena *arena);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_PEGENINTERFACE*/
diff --git a/x64/include/internal/pycore_abstract.h b/x64/include/internal/pycore_abstract.h
new file mode 100644
index 0000000..b791bf2
--- /dev/null
+++ b/x64/include/internal/pycore_abstract.h
@@ -0,0 +1,22 @@
+#ifndef Py_INTERNAL_ABSTRACT_H
+#define Py_INTERNAL_ABSTRACT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+// Fast inlined version of PyIndex_Check()
+static inline int
+_PyIndex_Check(PyObject *obj)
+{
+ PyNumberMethods *tp_as_number = Py_TYPE(obj)->tp_as_number;
+ return (tp_as_number != NULL && tp_as_number->nb_index != NULL);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_ABSTRACT_H */
diff --git a/x64/include/internal/pycore_atomic.h b/x64/include/internal/pycore_atomic.h
index 336bc3f..1d5c562 100644
--- a/x64/include/internal/pycore_atomic.h
+++ b/x64/include/internal/pycore_atomic.h
@@ -8,8 +8,7 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "dynamic_annotations.h"
-
+#include "dynamic_annotations.h" /* _Py_ANNOTATE_MEMORY_ORDER */
#include "pyconfig.h"
#if defined(HAVE_STD_ATOMIC)
diff --git a/x64/include/bytes_methods.h b/x64/include/internal/pycore_bytes_methods.h
index 8434a50..11e8ab2 100644
--- a/x64/include/bytes_methods.h
+++ b/x64/include/internal/pycore_bytes_methods.h
@@ -2,6 +2,10 @@
#ifndef Py_BYTES_CTYPE_H
#define Py_BYTES_CTYPE_H
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
/*
* The internal implementation behind PyBytes (bytes) and PyByteArray (bytearray)
* methods of the given names, they operate on ASCII byte strings.
diff --git a/x64/include/internal/pycore_byteswap.h b/x64/include/internal/pycore_byteswap.h
new file mode 100644
index 0000000..2b20fc6
--- /dev/null
+++ b/x64/include/internal/pycore_byteswap.h
@@ -0,0 +1,88 @@
+/* Bytes swap functions, reverse order of bytes:
+
+ - _Py_bswap16(uint16_t)
+ - _Py_bswap32(uint32_t)
+ - _Py_bswap64(uint64_t)
+*/
+
+#ifndef Py_INTERNAL_BSWAP_H
+#define Py_INTERNAL_BSWAP_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#if defined(__GNUC__) \
+ && ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))
+ /* __builtin_bswap16() is available since GCC 4.8,
+ __builtin_bswap32() is available since GCC 4.3,
+ __builtin_bswap64() is available since GCC 4.3. */
+# define _PY_HAVE_BUILTIN_BSWAP
+#endif
+
+#ifdef _MSC_VER
+ /* Get _byteswap_ushort(), _byteswap_ulong(), _byteswap_uint64() */
+# include <intrin.h>
+#endif
+
+static inline uint16_t
+_Py_bswap16(uint16_t word)
+{
+#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap16)
+ return __builtin_bswap16(word);
+#elif defined(_MSC_VER)
+ Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned short));
+ return _byteswap_ushort(word);
+#else
+ // Portable implementation which doesn't rely on circular bit shift
+ return ( ((word & UINT16_C(0x00FF)) << 8)
+ | ((word & UINT16_C(0xFF00)) >> 8));
+#endif
+}
+
+static inline uint32_t
+_Py_bswap32(uint32_t word)
+{
+#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap32)
+ return __builtin_bswap32(word);
+#elif defined(_MSC_VER)
+ Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned long));
+ return _byteswap_ulong(word);
+#else
+ // Portable implementation which doesn't rely on circular bit shift
+ return ( ((word & UINT32_C(0x000000FF)) << 24)
+ | ((word & UINT32_C(0x0000FF00)) << 8)
+ | ((word & UINT32_C(0x00FF0000)) >> 8)
+ | ((word & UINT32_C(0xFF000000)) >> 24));
+#endif
+}
+
+static inline uint64_t
+_Py_bswap64(uint64_t word)
+{
+#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap64)
+ return __builtin_bswap64(word);
+#elif defined(_MSC_VER)
+ return _byteswap_uint64(word);
+#else
+ // Portable implementation which doesn't rely on circular bit shift
+ return ( ((word & UINT64_C(0x00000000000000FF)) << 56)
+ | ((word & UINT64_C(0x000000000000FF00)) << 40)
+ | ((word & UINT64_C(0x0000000000FF0000)) << 24)
+ | ((word & UINT64_C(0x00000000FF000000)) << 8)
+ | ((word & UINT64_C(0x000000FF00000000)) >> 8)
+ | ((word & UINT64_C(0x0000FF0000000000)) >> 24)
+ | ((word & UINT64_C(0x00FF000000000000)) >> 40)
+ | ((word & UINT64_C(0xFF00000000000000)) >> 56));
+#endif
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_BSWAP_H */
+
diff --git a/x64/include/internal/pycore_call.h b/x64/include/internal/pycore_call.h
new file mode 100644
index 0000000..f7d856a
--- /dev/null
+++ b/x64/include/internal/pycore_call.h
@@ -0,0 +1,39 @@
+#ifndef Py_INTERNAL_CALL_H
+#define Py_INTERNAL_CALL_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+PyAPI_FUNC(PyObject *) _PyObject_Call_Prepend(
+ PyThreadState *tstate,
+ PyObject *callable,
+ PyObject *obj,
+ PyObject *args,
+ PyObject *kwargs);
+
+PyAPI_FUNC(PyObject *) _PyObject_FastCallDictTstate(
+ PyThreadState *tstate,
+ PyObject *callable,
+ PyObject *const *args,
+ size_t nargsf,
+ PyObject *kwargs);
+
+PyAPI_FUNC(PyObject *) _PyObject_Call(
+ PyThreadState *tstate,
+ PyObject *callable,
+ PyObject *args,
+ PyObject *kwargs);
+
+static inline PyObject *
+_PyObject_CallNoArgTstate(PyThreadState *tstate, PyObject *func) {
+ return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_CALL_H */
diff --git a/x64/include/internal/pycore_ceval.h b/x64/include/internal/pycore_ceval.h
index 4c1c0e2..18c8f02 100644
--- a/x64/include/internal/pycore_ceval.h
+++ b/x64/include/internal/pycore_ceval.h
@@ -8,29 +8,116 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_atomic.h"
-#include "pycore_pystate.h"
-#include "pythread.h"
-
-PyAPI_FUNC(void) _Py_FinishPendingCalls(_PyRuntimeState *runtime);
-PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *);
-PyAPI_FUNC(void) _PyEval_FiniThreads(
- struct _ceval_runtime_state *ceval);
-PyAPI_FUNC(void) _PyEval_SignalReceived(
- struct _ceval_runtime_state *ceval);
+/* Forward declarations */
+struct pyruntimestate;
+struct _ceval_runtime_state;
+
+#include "pycore_interp.h" /* PyInterpreterState.eval_frame */
+
+extern void _Py_FinishPendingCalls(PyThreadState *tstate);
+extern void _PyEval_InitRuntimeState(struct _ceval_runtime_state *);
+extern int _PyEval_InitState(struct _ceval_state *ceval);
+extern void _PyEval_FiniState(struct _ceval_state *ceval);
+PyAPI_FUNC(void) _PyEval_SignalReceived(PyInterpreterState *interp);
PyAPI_FUNC(int) _PyEval_AddPendingCall(
- PyThreadState *tstate,
- struct _ceval_runtime_state *ceval,
+ PyInterpreterState *interp,
int (*func)(void *),
void *arg);
-PyAPI_FUNC(void) _PyEval_SignalAsyncExc(
- struct _ceval_runtime_state *ceval);
-PyAPI_FUNC(void) _PyEval_ReInitThreads(
- _PyRuntimeState *runtime);
+PyAPI_FUNC(void) _PyEval_SignalAsyncExc(PyThreadState *tstate);
+#ifdef HAVE_FORK
+extern void _PyEval_ReInitThreads(struct pyruntimestate *runtime);
+#endif
+PyAPI_FUNC(void) _PyEval_SetCoroutineOriginTrackingDepth(
+ PyThreadState *tstate,
+ int new_depth);
/* Private function */
void _PyEval_Fini(void);
+static inline PyObject*
+_PyEval_EvalFrame(PyThreadState *tstate, PyFrameObject *f, int throwflag)
+{
+ return tstate->interp->eval_frame(tstate, f, throwflag);
+}
+
+extern PyObject *_PyEval_EvalCode(
+ PyThreadState *tstate,
+ PyObject *_co, PyObject *globals, PyObject *locals,
+ PyObject *const *args, Py_ssize_t argcount,
+ PyObject *const *kwnames, PyObject *const *kwargs,
+ Py_ssize_t kwcount, int kwstep,
+ PyObject *const *defs, Py_ssize_t defcount,
+ PyObject *kwdefs, PyObject *closure,
+ PyObject *name, PyObject *qualname);
+
+extern int _PyEval_ThreadsInitialized(struct pyruntimestate *runtime);
+extern PyStatus _PyEval_InitGIL(PyThreadState *tstate);
+extern void _PyEval_FiniGIL(PyThreadState *tstate);
+
+extern void _PyEval_ReleaseLock(PyThreadState *tstate);
+
+
+/* --- _Py_EnterRecursiveCall() ----------------------------------------- */
+
+PyAPI_DATA(int) _Py_CheckRecursionLimit;
+
+#ifdef USE_STACKCHECK
+/* With USE_STACKCHECK macro defined, trigger stack checks in
+ _Py_CheckRecursiveCall() on every 64th call to Py_EnterRecursiveCall. */
+static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
+ return (++tstate->recursion_depth > tstate->interp->ceval.recursion_limit
+ || ++tstate->stackcheck_counter > 64);
+}
+#else
+static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
+ return (++tstate->recursion_depth > tstate->interp->ceval.recursion_limit);
+}
+#endif
+
+PyAPI_FUNC(int) _Py_CheckRecursiveCall(
+ PyThreadState *tstate,
+ const char *where);
+
+static inline int _Py_EnterRecursiveCall(PyThreadState *tstate,
+ const char *where) {
+ return (_Py_MakeRecCheck(tstate) && _Py_CheckRecursiveCall(tstate, where));
+}
+
+static inline int _Py_EnterRecursiveCall_inline(const char *where) {
+ PyThreadState *tstate = PyThreadState_GET();
+ return _Py_EnterRecursiveCall(tstate, where);
+}
+
+#define Py_EnterRecursiveCall(where) _Py_EnterRecursiveCall_inline(where)
+
+/* Compute the "lower-water mark" for a recursion limit. When
+ * Py_LeaveRecursiveCall() is called with a recursion depth below this mark,
+ * the overflowed flag is reset to 0. */
+static inline int _Py_RecursionLimitLowerWaterMark(int limit) {
+ if (limit > 200) {
+ return (limit - 50);
+ }
+ else {
+ return (3 * (limit >> 2));
+ }
+}
+
+static inline void _Py_LeaveRecursiveCall(PyThreadState *tstate) {
+ tstate->recursion_depth--;
+ int limit = tstate->interp->ceval.recursion_limit;
+ if (tstate->recursion_depth < _Py_RecursionLimitLowerWaterMark(limit)) {
+ tstate->overflowed = 0;
+ }
+}
+
+static inline void _Py_LeaveRecursiveCall_inline(void) {
+ PyThreadState *tstate = PyThreadState_GET();
+ _Py_LeaveRecursiveCall(tstate);
+}
+
+#define Py_LeaveRecursiveCall() _Py_LeaveRecursiveCall_inline()
+
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/internal/pycore_context.h b/x64/include/internal/pycore_context.h
index 5e1ba0d..f665ad5 100644
--- a/x64/include/internal/pycore_context.h
+++ b/x64/include/internal/pycore_context.h
@@ -5,7 +5,7 @@
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_hamt.h"
+#include "pycore_hamt.h" /* PyHamtObject */
struct _pycontextobject {
PyObject_HEAD
diff --git a/x64/include/dtoa.h b/x64/include/internal/pycore_dtoa.h
index 9bfb625..3faf8cf 100644
--- a/x64/include/dtoa.h
+++ b/x64/include/internal/pycore_dtoa.h
@@ -1,9 +1,15 @@
-#ifndef Py_LIMITED_API
#ifndef PY_NO_SHORT_FLOAT_REPR
#ifdef __cplusplus
extern "C" {
#endif
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+/* These functions are used by modules compiled as C extension like math:
+ they must be exported. */
+
PyAPI_FUNC(double) _Py_dg_strtod(const char *str, char **ptr);
PyAPI_FUNC(char *) _Py_dg_dtoa(double d, int mode, int ndigits,
int *decpt, int *sign, char **rve);
@@ -11,9 +17,7 @@ PyAPI_FUNC(void) _Py_dg_freedtoa(char *s);
PyAPI_FUNC(double) _Py_dg_stdnan(int sign);
PyAPI_FUNC(double) _Py_dg_infinity(int sign);
-
#ifdef __cplusplus
}
#endif
-#endif
-#endif
+#endif /* !PY_NO_SHORT_FLOAT_REPR */
diff --git a/x64/include/internal/pycore_gc.h b/x64/include/internal/pycore_gc.h
new file mode 100644
index 0000000..0511eea
--- /dev/null
+++ b/x64/include/internal/pycore_gc.h
@@ -0,0 +1,179 @@
+#ifndef Py_INTERNAL_GC_H
+#define Py_INTERNAL_GC_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+/* GC information is stored BEFORE the object structure. */
+typedef struct {
+ // Pointer to next object in the list.
+ // 0 means the object is not tracked
+ uintptr_t _gc_next;
+
+ // Pointer to previous object in the list.
+ // Lowest two bits are used for flags documented later.
+ uintptr_t _gc_prev;
+} PyGC_Head;
+
+#define _Py_AS_GC(o) ((PyGC_Head *)(o)-1)
+
+/* True if the object is currently tracked by the GC. */
+#define _PyObject_GC_IS_TRACKED(o) (_Py_AS_GC(o)->_gc_next != 0)
+
+/* True if the object may be tracked by the GC in the future, or already is.
+ This can be useful to implement some optimizations. */
+#define _PyObject_GC_MAY_BE_TRACKED(obj) \
+ (PyObject_IS_GC(obj) && \
+ (!PyTuple_CheckExact(obj) || _PyObject_GC_IS_TRACKED(obj)))
+
+
+/* Bit flags for _gc_prev */
+/* Bit 0 is set when tp_finalize is called */
+#define _PyGC_PREV_MASK_FINALIZED (1)
+/* Bit 1 is set when the object is in generation which is GCed currently. */
+#define _PyGC_PREV_MASK_COLLECTING (2)
+/* The (N-2) most significant bits contain the real address. */
+#define _PyGC_PREV_SHIFT (2)
+#define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT)
+
+// Lowest bit of _gc_next is used for flags only in GC.
+// But it is always 0 for normal code.
+#define _PyGCHead_NEXT(g) ((PyGC_Head*)(g)->_gc_next)
+#define _PyGCHead_SET_NEXT(g, p) ((g)->_gc_next = (uintptr_t)(p))
+
+// Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags.
+#define _PyGCHead_PREV(g) ((PyGC_Head*)((g)->_gc_prev & _PyGC_PREV_MASK))
+#define _PyGCHead_SET_PREV(g, p) do { \
+ assert(((uintptr_t)p & ~_PyGC_PREV_MASK) == 0); \
+ (g)->_gc_prev = ((g)->_gc_prev & ~_PyGC_PREV_MASK) \
+ | ((uintptr_t)(p)); \
+ } while (0)
+
+#define _PyGCHead_FINALIZED(g) \
+ (((g)->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0)
+#define _PyGCHead_SET_FINALIZED(g) \
+ ((g)->_gc_prev |= _PyGC_PREV_MASK_FINALIZED)
+
+#define _PyGC_FINALIZED(o) \
+ _PyGCHead_FINALIZED(_Py_AS_GC(o))
+#define _PyGC_SET_FINALIZED(o) \
+ _PyGCHead_SET_FINALIZED(_Py_AS_GC(o))
+
+
+/* GC runtime state */
+
+/* If we change this, we need to change the default value in the
+ signature of gc.collect. */
+#define NUM_GENERATIONS 3
+/*
+ NOTE: about untracking of mutable objects.
+
+ Certain types of container cannot participate in a reference cycle, and
+ so do not need to be tracked by the garbage collector. Untracking these
+ objects reduces the cost of garbage collections. However, determining
+ which objects may be untracked is not free, and the costs must be
+ weighed against the benefits for garbage collection.
+
+ There are two possible strategies for when to untrack a container:
+
+ i) When the container is created.
+ ii) When the container is examined by the garbage collector.
+
+ Tuples containing only immutable objects (integers, strings etc, and
+ recursively, tuples of immutable objects) do not need to be tracked.
+ The interpreter creates a large number of tuples, many of which will
+ not survive until garbage collection. It is therefore not worthwhile
+ to untrack eligible tuples at creation time.
+
+ Instead, all tuples except the empty tuple are tracked when created.
+ During garbage collection it is determined whether any surviving tuples
+ can be untracked. A tuple can be untracked if all of its contents are
+ already not tracked. Tuples are examined for untracking in all garbage
+ collection cycles. It may take more than one cycle to untrack a tuple.
+
+ Dictionaries containing only immutable objects also do not need to be
+ tracked. Dictionaries are untracked when created. If a tracked item is
+ inserted into a dictionary (either as a key or value), the dictionary
+ becomes tracked. During a full garbage collection (all generations),
+ the collector will untrack any dictionaries whose contents are not
+ tracked.
+
+ The module provides the python function is_tracked(obj), which returns
+ the CURRENT tracking status of the object. Subsequent garbage
+ collections may change the tracking status of the object.
+
+ Untracking of certain containers was introduced in issue #4688, and
+ the algorithm was refined in response to issue #14775.
+*/
+
+struct gc_generation {
+ PyGC_Head head;
+ int threshold; /* collection threshold */
+ int count; /* count of allocations or collections of younger
+ generations */
+};
+
+/* Running stats per generation */
+struct gc_generation_stats {
+ /* total number of collections */
+ Py_ssize_t collections;
+ /* total number of collected objects */
+ Py_ssize_t collected;
+ /* total number of uncollectable objects (put into gc.garbage) */
+ Py_ssize_t uncollectable;
+};
+
+struct _gc_runtime_state {
+ /* List of objects that still need to be cleaned up, singly linked
+ * via their gc headers' gc_prev pointers. */
+ PyObject *trash_delete_later;
+ /* Current call-stack depth of tp_dealloc calls. */
+ int trash_delete_nesting;
+
+ int enabled;
+ int debug;
+ /* linked lists of container objects */
+ struct gc_generation generations[NUM_GENERATIONS];
+ PyGC_Head *generation0;
+ /* a permanent generation which won't be collected */
+ struct gc_generation permanent_generation;
+ struct gc_generation_stats generation_stats[NUM_GENERATIONS];
+ /* true if we are currently running the collector */
+ int collecting;
+ /* list of uncollectable objects */
+ PyObject *garbage;
+ /* a list of callbacks to be invoked when collection is performed */
+ PyObject *callbacks;
+ /* This is the number of objects that survived the last full
+ collection. It approximates the number of long lived objects
+ tracked by the GC.
+
+ (by "full collection", we mean a collection of the oldest
+ generation). */
+ Py_ssize_t long_lived_total;
+ /* This is the number of objects that survived all "non-full"
+ collections, and are awaiting to undergo a full collection for
+ the first time. */
+ Py_ssize_t long_lived_pending;
+};
+
+PyAPI_FUNC(void) _PyGC_InitState(struct _gc_runtime_state *);
+
+
+// Functions to clear types free lists
+extern void _PyFrame_ClearFreeList(void);
+extern void _PyTuple_ClearFreeList(void);
+extern void _PyFloat_ClearFreeList(void);
+extern void _PyList_ClearFreeList(void);
+extern void _PyDict_ClearFreeList(void);
+extern void _PyAsyncGen_ClearFreeLists(void);
+extern void _PyContext_ClearFreeList(void);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_GC_H */
diff --git a/x64/include/internal/pycore_gil.h b/x64/include/internal/pycore_gil.h
index 7de3163..8ebad37 100644
--- a/x64/include/internal/pycore_gil.h
+++ b/x64/include/internal/pycore_gil.h
@@ -8,8 +8,8 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_condvar.h"
-#include "pycore_atomic.h"
+#include "pycore_atomic.h" /* _Py_atomic_address */
+#include "pycore_condvar.h" /* PyCOND_T */
#ifndef Py_HAVE_CONDVAR
# error You need either a POSIX-compatible or a Windows system!
diff --git a/x64/include/internal/pycore_hamt.h b/x64/include/internal/pycore_hamt.h
index e65aef5..aaf6559 100644
--- a/x64/include/internal/pycore_hamt.h
+++ b/x64/include/internal/pycore_hamt.h
@@ -8,7 +8,7 @@
#define _Py_HAMT_MAX_TREE_DEPTH 7
-#define PyHamt_Check(o) (Py_TYPE(o) == &_PyHamt_Type)
+#define PyHamt_Check(o) Py_IS_TYPE(o, &_PyHamt_Type)
/* Abstract tree node. */
diff --git a/x64/include/internal/pycore_hashtable.h b/x64/include/internal/pycore_hashtable.h
new file mode 100644
index 0000000..18757ab
--- /dev/null
+++ b/x64/include/internal/pycore_hashtable.h
@@ -0,0 +1,148 @@
+#ifndef Py_INTERNAL_HASHTABLE_H
+#define Py_INTERNAL_HASHTABLE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+/* Single linked list */
+
+typedef struct _Py_slist_item_s {
+ struct _Py_slist_item_s *next;
+} _Py_slist_item_t;
+
+typedef struct {
+ _Py_slist_item_t *head;
+} _Py_slist_t;
+
+#define _Py_SLIST_ITEM_NEXT(ITEM) (((_Py_slist_item_t *)ITEM)->next)
+
+#define _Py_SLIST_HEAD(SLIST) (((_Py_slist_t *)SLIST)->head)
+
+
+/* _Py_hashtable: table entry */
+
+typedef struct {
+ /* used by _Py_hashtable_t.buckets to link entries */
+ _Py_slist_item_t _Py_slist_item;
+
+ Py_uhash_t key_hash;
+ void *key;
+ void *value;
+} _Py_hashtable_entry_t;
+
+
+/* _Py_hashtable: prototypes */
+
+/* Forward declaration */
+struct _Py_hashtable_t;
+typedef struct _Py_hashtable_t _Py_hashtable_t;
+
+typedef Py_uhash_t (*_Py_hashtable_hash_func) (const void *key);
+typedef int (*_Py_hashtable_compare_func) (const void *key1, const void *key2);
+typedef void (*_Py_hashtable_destroy_func) (void *key);
+typedef _Py_hashtable_entry_t* (*_Py_hashtable_get_entry_func)(_Py_hashtable_t *ht,
+ const void *key);
+
+typedef struct {
+ // Allocate a memory block
+ void* (*malloc) (size_t size);
+
+ // Release a memory block
+ void (*free) (void *ptr);
+} _Py_hashtable_allocator_t;
+
+
+/* _Py_hashtable: table */
+struct _Py_hashtable_t {
+ size_t nentries; // Total number of entries in the table
+ size_t nbuckets;
+ _Py_slist_t *buckets;
+
+ _Py_hashtable_get_entry_func get_entry_func;
+ _Py_hashtable_hash_func hash_func;
+ _Py_hashtable_compare_func compare_func;
+ _Py_hashtable_destroy_func key_destroy_func;
+ _Py_hashtable_destroy_func value_destroy_func;
+ _Py_hashtable_allocator_t alloc;
+};
+
+/* Hash a pointer (void*) */
+PyAPI_FUNC(Py_uhash_t) _Py_hashtable_hash_ptr(const void *key);
+
+/* Comparison using memcmp() */
+PyAPI_FUNC(int) _Py_hashtable_compare_direct(
+ const void *key1,
+ const void *key2);
+
+PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new(
+ _Py_hashtable_hash_func hash_func,
+ _Py_hashtable_compare_func compare_func);
+
+PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new_full(
+ _Py_hashtable_hash_func hash_func,
+ _Py_hashtable_compare_func compare_func,
+ _Py_hashtable_destroy_func key_destroy_func,
+ _Py_hashtable_destroy_func value_destroy_func,
+ _Py_hashtable_allocator_t *allocator);
+
+PyAPI_FUNC(void) _Py_hashtable_destroy(_Py_hashtable_t *ht);
+
+PyAPI_FUNC(void) _Py_hashtable_clear(_Py_hashtable_t *ht);
+
+typedef int (*_Py_hashtable_foreach_func) (_Py_hashtable_t *ht,
+ const void *key, const void *value,
+ void *user_data);
+
+/* Call func() on each entry of the hashtable.
+ Iteration stops if func() result is non-zero, in this case it's the result
+ of the call. Otherwise, the function returns 0. */
+PyAPI_FUNC(int) _Py_hashtable_foreach(
+ _Py_hashtable_t *ht,
+ _Py_hashtable_foreach_func func,
+ void *user_data);
+
+PyAPI_FUNC(size_t) _Py_hashtable_size(const _Py_hashtable_t *ht);
+
+/* Add a new entry to the hash. The key must not be present in the hash table.
+ Return 0 on success, -1 on memory error. */
+PyAPI_FUNC(int) _Py_hashtable_set(
+ _Py_hashtable_t *ht,
+ const void *key,
+ void *value);
+
+
+/* Get an entry.
+ Return NULL if the key does not exist. */
+static inline _Py_hashtable_entry_t *
+_Py_hashtable_get_entry(_Py_hashtable_t *ht, const void *key)
+{
+ return ht->get_entry_func(ht, key);
+}
+
+
+/* Get value from an entry.
+ Return NULL if the entry is not found.
+
+ Use _Py_hashtable_get_entry() to distinguish entry value equal to NULL
+ and entry not found. */
+PyAPI_FUNC(void*) _Py_hashtable_get(_Py_hashtable_t *ht, const void *key);
+
+
+/* Remove a key and its associated value without calling key and value destroy
+ functions.
+
+ Return the removed value if the key was found.
+ Return NULL if the key was not found. */
+PyAPI_FUNC(void*) _Py_hashtable_steal(
+ _Py_hashtable_t *ht,
+ const void *key);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_HASHTABLE_H */
diff --git a/x64/include/internal/pycore_import.h b/x64/include/internal/pycore_import.h
new file mode 100644
index 0000000..b011ea4
--- /dev/null
+++ b/x64/include/internal/pycore_import.h
@@ -0,0 +1,22 @@
+#ifndef Py_LIMITED_API
+#ifndef Py_INTERNAL_IMPORT_H
+#define Py_INTERNAL_IMPORT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+PyAPI_FUNC(PyObject *) _PyImport_FindBuiltin(
+ PyThreadState *tstate,
+ const char *name /* UTF-8 encoded string */
+ );
+
+#ifdef HAVE_FORK
+extern void _PyImport_ReInitLock(void);
+#endif
+extern void _PyImport_Cleanup(PyThreadState *tstate);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_IMPORT_H */
+#endif /* !Py_LIMITED_API */
diff --git a/x64/include/internal/pycore_initconfig.h b/x64/include/internal/pycore_initconfig.h
index 40831c4..457a005 100644
--- a/x64/include/internal/pycore_initconfig.h
+++ b/x64/include/internal/pycore_initconfig.h
@@ -8,7 +8,8 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_pystate.h" /* _PyRuntimeState */
+/* Forward declaration */
+struct pyruntimestate;
/* --- PyStatus ----------------------------------------------- */
@@ -60,7 +61,7 @@ PyAPI_FUNC(PyObject*) _PyWideStringList_AsList(const PyWideStringList *list);
/* --- _PyArgv ---------------------------------------------------- */
-typedef struct {
+typedef struct _PyArgv {
Py_ssize_t argc;
int use_bytes_argv;
char * const *bytes_argv;
@@ -149,8 +150,8 @@ extern PyStatus _PyConfig_Copy(
PyConfig *config,
const PyConfig *config2);
extern PyStatus _PyConfig_InitPathConfig(PyConfig *config);
-extern void _PyConfig_Write(const PyConfig *config,
- _PyRuntimeState *runtime);
+extern PyStatus _PyConfig_Write(const PyConfig *config,
+ struct pyruntimestate *runtime);
extern PyStatus _PyConfig_SetPyArgv(
PyConfig *config,
const _PyArgv *args);
diff --git a/x64/include/internal/pycore_interp.h b/x64/include/internal/pycore_interp.h
new file mode 100644
index 0000000..551ad83
--- /dev/null
+++ b/x64/include/internal/pycore_interp.h
@@ -0,0 +1,192 @@
+#ifndef Py_INTERNAL_INTERP_H
+#define Py_INTERNAL_INTERP_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_atomic.h" /* _Py_atomic_address */
+#include "pycore_gil.h" /* struct _gil_runtime_state */
+#include "pycore_gc.h" /* struct _gc_runtime_state */
+#include "pycore_warnings.h" /* struct _warnings_runtime_state */
+
+/* ceval state */
+
+struct _pending_calls {
+ PyThread_type_lock lock;
+ /* Request for running pending calls. */
+ _Py_atomic_int calls_to_do;
+ /* Request for looking at the `async_exc` field of the current
+ thread state.
+ Guarded by the GIL. */
+ int async_exc;
+#define NPENDINGCALLS 32
+ struct {
+ int (*func)(void *);
+ void *arg;
+ } calls[NPENDINGCALLS];
+ int first;
+ int last;
+};
+
+struct _ceval_state {
+ int recursion_limit;
+ /* Records whether tracing is on for any thread. Counts the number
+ of threads for which tstate->c_tracefunc is non-NULL, so if the
+ value is 0, we know we don't have to check this thread's
+ c_tracefunc. This speeds up the if statement in
+ _PyEval_EvalFrameDefault() after fast_next_opcode. */
+ int tracing_possible;
+ /* This single variable consolidates all requests to break out of
+ the fast path in the eval loop. */
+ _Py_atomic_int eval_breaker;
+ /* Request for dropping the GIL */
+ _Py_atomic_int gil_drop_request;
+ struct _pending_calls pending;
+};
+
+/* fs_codec.encoding is initialized to NULL.
+ Later, it is set to a non-NULL string by _PyUnicode_InitEncodings(). */
+struct _Py_unicode_fs_codec {
+ char *encoding; // Filesystem encoding (encoded to UTF-8)
+ int utf8; // encoding=="utf-8"?
+ char *errors; // Filesystem errors (encoded to UTF-8)
+ _Py_error_handler error_handler;
+};
+
+struct _Py_unicode_state {
+ struct _Py_unicode_fs_codec fs_codec;
+};
+
+
+/* interpreter state */
+
+#define _PY_NSMALLPOSINTS 257
+#define _PY_NSMALLNEGINTS 5
+
+// The PyInterpreterState typedef is in Include/pystate.h.
+struct _is {
+
+ struct _is *next;
+ struct _ts *tstate_head;
+
+ /* Reference to the _PyRuntime global variable. This field exists
+ to not have to pass runtime in addition to tstate to a function.
+ Get runtime from tstate: tstate->interp->runtime. */
+ struct pyruntimestate *runtime;
+
+ int64_t id;
+ int64_t id_refcount;
+ int requires_idref;
+ PyThread_type_lock id_mutex;
+
+ int finalizing;
+
+ struct _ceval_state ceval;
+ struct _gc_runtime_state gc;
+
+ PyObject *modules;
+ PyObject *modules_by_index;
+ PyObject *sysdict;
+ PyObject *builtins;
+ PyObject *importlib;
+
+ /* Used in Modules/_threadmodule.c. */
+ long num_threads;
+ /* Support for runtime thread stack size tuning.
+ A value of 0 means using the platform's default stack size
+ or the size specified by the THREAD_STACK_SIZE macro. */
+ /* Used in Python/thread.c. */
+ size_t pythread_stacksize;
+
+ PyObject *codec_search_path;
+ PyObject *codec_search_cache;
+ PyObject *codec_error_registry;
+ int codecs_initialized;
+
+ struct _Py_unicode_state unicode;
+
+ PyConfig config;
+#ifdef HAVE_DLOPEN
+ int dlopenflags;
+#endif
+
+ PyObject *dict; /* Stores per-interpreter state */
+
+ PyObject *builtins_copy;
+ PyObject *import_func;
+ /* Initialized to PyEval_EvalFrameDefault(). */
+ _PyFrameEvalFunction eval_frame;
+
+ Py_ssize_t co_extra_user_count;
+ freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS];
+
+#ifdef HAVE_FORK
+ PyObject *before_forkers;
+ PyObject *after_forkers_parent;
+ PyObject *after_forkers_child;
+#endif
+ /* AtExit module */
+ void (*pyexitfunc)(PyObject *);
+ PyObject *pyexitmodule;
+
+ uint64_t tstate_next_unique_id;
+
+ struct _warnings_runtime_state warnings;
+
+ PyObject *audit_hooks;
+
+ struct {
+ struct {
+ int level;
+ int atbol;
+ } listnode;
+ } parser;
+
+#if _PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS > 0
+ /* Small integers are preallocated in this array so that they
+ can be shared.
+ The integers that are preallocated are those in the range
+ -_PY_NSMALLNEGINTS (inclusive) to _PY_NSMALLPOSINTS (not inclusive).
+ */
+ PyLongObject* small_ints[_PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS];
+#endif
+};
+
+/* Used by _PyImport_Cleanup() */
+extern void _PyInterpreterState_ClearModules(PyInterpreterState *interp);
+
+extern PyStatus _PyInterpreterState_SetConfig(
+ PyInterpreterState *interp,
+ const PyConfig *config);
+
+
+
+/* cross-interpreter data registry */
+
+/* For now we use a global registry of shareable classes. An
+ alternative would be to add a tp_* slot for a class's
+ crossinterpdatafunc. It would be simpler and more efficient. */
+
+struct _xidregitem;
+
+struct _xidregitem {
+ PyTypeObject *cls;
+ crossinterpdatafunc getdata;
+ struct _xidregitem *next;
+};
+
+PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(int64_t);
+
+PyAPI_FUNC(int) _PyInterpreterState_IDInitref(struct _is *);
+PyAPI_FUNC(void) _PyInterpreterState_IDIncref(struct _is *);
+PyAPI_FUNC(void) _PyInterpreterState_IDDecref(struct _is *);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_INTERP_H */
+
diff --git a/x64/include/internal/pycore_object.h b/x64/include/internal/pycore_object.h
index 7418c69..32e86d0 100644
--- a/x64/include/internal/pycore_object.h
+++ b/x64/include/internal/pycore_object.h
@@ -8,7 +8,9 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_pystate.h" /* _PyRuntime */
+#include "pycore_gc.h" // _PyObject_GC_IS_TRACKED()
+#include "pycore_interp.h" // PyInterpreterState.gc
+#include "pycore_pystate.h" // _PyThreadState_GET()
PyAPI_FUNC(int) _PyType_CheckConsistency(PyTypeObject *type);
PyAPI_FUNC(int) _PyDict_CheckConsistency(PyObject *mp, int check_content);
@@ -18,7 +20,7 @@ PyAPI_FUNC(int) _PyDict_CheckConsistency(PyObject *mp, int check_content);
* NB: While the object is tracked by the collector, it must be safe to call the
* ob_traverse method.
*
- * Internal note: _PyRuntime.gc.generation0->_gc_prev doesn't have any bit flags
+ * Internal note: interp->gc.generation0->_gc_prev doesn't have any bit flags
* because it's not object header. So we don't use _PyGCHead_PREV() and
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
*
@@ -37,11 +39,13 @@ static inline void _PyObject_GC_TRACK_impl(const char *filename, int lineno,
"object is in generation which is garbage collected",
filename, lineno, "_PyObject_GC_TRACK");
- PyGC_Head *last = (PyGC_Head*)(_PyRuntime.gc.generation0->_gc_prev);
+ PyThreadState *tstate = _PyThreadState_GET();
+ PyGC_Head *generation0 = tstate->interp->gc.generation0;
+ PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev);
_PyGCHead_SET_NEXT(last, gc);
_PyGCHead_SET_PREV(gc, last);
- _PyGCHead_SET_NEXT(gc, _PyRuntime.gc.generation0);
- _PyRuntime.gc.generation0->_gc_prev = (uintptr_t)gc;
+ _PyGCHead_SET_NEXT(gc, generation0);
+ generation0->_gc_prev = (uintptr_t)gc;
}
#define _PyObject_GC_TRACK(op) \
@@ -75,6 +79,41 @@ static inline void _PyObject_GC_UNTRACK_impl(const char *filename, int lineno,
#define _PyObject_GC_UNTRACK(op) \
_PyObject_GC_UNTRACK_impl(__FILE__, __LINE__, _PyObject_CAST(op))
+#ifdef Py_REF_DEBUG
+extern void _PyDebug_PrintTotalRefs(void);
+#endif
+
+#ifdef Py_TRACE_REFS
+extern void _Py_AddToAllObjects(PyObject *op, int force);
+extern void _Py_PrintReferences(FILE *);
+extern void _Py_PrintReferenceAddresses(FILE *);
+#endif
+
+static inline PyObject **
+_PyObject_GET_WEAKREFS_LISTPTR(PyObject *op)
+{
+ Py_ssize_t offset = Py_TYPE(op)->tp_weaklistoffset;
+ return (PyObject **)((char *)op + offset);
+}
+
+// Fast inlined version of PyType_HasFeature()
+static inline int
+_PyType_HasFeature(PyTypeObject *type, unsigned long feature) {
+ return ((type->tp_flags & feature) != 0);
+}
+
+// Fast inlined version of PyObject_IS_GC()
+static inline int
+_PyObject_IS_GC(PyObject *obj)
+{
+ return (PyType_IS_GC(Py_TYPE(obj))
+ && (Py_TYPE(obj)->tp_is_gc == NULL
+ || Py_TYPE(obj)->tp_is_gc(obj)));
+}
+
+// Fast inlined version of PyType_IS_GC()
+#define _PyType_IS_GC(t) _PyType_HasFeature((t), Py_TPFLAGS_HAVE_GC)
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/internal/pycore_pathconfig.h b/x64/include/internal/pycore_pathconfig.h
index ce75cce..42d61b1 100644
--- a/x64/include/internal/pycore_pathconfig.h
+++ b/x64/include/internal/pycore_pathconfig.h
@@ -47,8 +47,6 @@ PyAPI_DATA(wchar_t*) _Py_dll_path;
#endif
extern void _PyPathConfig_ClearGlobal(void);
-extern PyStatus _PyPathConfig_SetGlobal(
- const struct _PyPathConfig *pathconfig);
extern PyStatus _PyPathConfig_Calculate(
_PyPathConfig *pathconfig,
@@ -56,11 +54,10 @@ extern PyStatus _PyPathConfig_Calculate(
extern int _PyPathConfig_ComputeSysPath0(
const PyWideStringList *argv,
PyObject **path0);
-extern int _Py_FindEnvConfigValue(
+extern PyStatus _Py_FindEnvConfigValue(
FILE *env_file,
const wchar_t *key,
- wchar_t *value,
- size_t value_size);
+ wchar_t **value_p);
#ifdef MS_WINDOWS
extern wchar_t* _Py_GetDLLPath(void);
diff --git a/x64/include/internal/pycore_pyerrors.h b/x64/include/internal/pycore_pyerrors.h
index 23327ef..2cf1160 100644
--- a/x64/include/internal/pycore_pyerrors.h
+++ b/x64/include/internal/pycore_pyerrors.h
@@ -10,7 +10,22 @@ extern "C" {
static inline PyObject* _PyErr_Occurred(PyThreadState *tstate)
{
- return tstate == NULL ? NULL : tstate->curexc_type;
+ assert(tstate != NULL);
+ return tstate->curexc_type;
+}
+
+static inline void _PyErr_ClearExcState(_PyErr_StackItem *exc_state)
+{
+ PyObject *t, *v, *tb;
+ t = exc_state->exc_type;
+ v = exc_state->exc_value;
+ tb = exc_state->exc_traceback;
+ exc_state->exc_type = NULL;
+ exc_state->exc_value = NULL;
+ exc_state->exc_traceback = NULL;
+ Py_XDECREF(t);
+ Py_XDECREF(v);
+ Py_XDECREF(tb);
}
@@ -35,10 +50,15 @@ PyAPI_FUNC(void) _PyErr_SetObject(
PyObject *type,
PyObject *value);
+PyAPI_FUNC(void) _PyErr_ChainStackItem(
+ _PyErr_StackItem *exc_info);
+
PyAPI_FUNC(void) _PyErr_Clear(PyThreadState *tstate);
PyAPI_FUNC(void) _PyErr_SetNone(PyThreadState *tstate, PyObject *exception);
+PyAPI_FUNC(PyObject *) _PyErr_NoMemory(PyThreadState *tstate);
+
PyAPI_FUNC(void) _PyErr_SetString(
PyThreadState *tstate,
PyObject *exception,
@@ -56,6 +76,14 @@ PyAPI_FUNC(void) _PyErr_NormalizeException(
PyObject **val,
PyObject **tb);
+PyAPI_FUNC(PyObject *) _PyErr_FormatFromCauseTstate(
+ PyThreadState *tstate,
+ PyObject *exception,
+ const char *format,
+ ...);
+
+PyAPI_FUNC(int) _PyErr_CheckSignalsTstate(PyThreadState *tstate);
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/internal/pycore_pylifecycle.h b/x64/include/internal/pycore_pylifecycle.h
index d4f0ae2..50ab645 100644
--- a/x64/include/internal/pycore_pylifecycle.h
+++ b/x64/include/internal/pycore_pylifecycle.h
@@ -8,8 +8,9 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_initconfig.h" /* _PyArgv */
-#include "pycore_pystate.h" /* _PyRuntimeState */
+/* Forward declarations */
+struct _PyArgv;
+struct pyruntimestate;
/* True if the main interpreter thread exited due to an unhandled
* KeyboardInterrupt exception, suggesting the user pressed ^C. */
@@ -32,74 +33,69 @@ PyAPI_FUNC(int) _Py_IsLocaleCoercionTarget(const char *ctype_loc);
extern PyStatus _PyUnicode_Init(void);
extern int _PyStructSequence_Init(void);
-extern int _PyLong_Init(void);
+extern int _PyLong_Init(PyThreadState *tstate);
extern PyStatus _PyFaulthandler_Init(int enable);
extern int _PyTraceMalloc_Init(int enable);
-extern PyObject * _PyBuiltin_Init(void);
+extern PyObject * _PyBuiltin_Init(PyThreadState *tstate);
extern PyStatus _PySys_Create(
- _PyRuntimeState *runtime,
- PyInterpreterState *interp,
+ PyThreadState *tstate,
PyObject **sysmod_p);
-extern PyStatus _PySys_SetPreliminaryStderr(PyObject *sysdict);
extern PyStatus _PySys_ReadPreinitWarnOptions(PyWideStringList *options);
extern PyStatus _PySys_ReadPreinitXOptions(PyConfig *config);
-extern int _PySys_InitMain(
- _PyRuntimeState *runtime,
- PyInterpreterState *interp);
-extern PyStatus _PyImport_Init(PyInterpreterState *interp);
+extern int _PySys_InitMain(PyThreadState *tstate);
extern PyStatus _PyExc_Init(void);
extern PyStatus _PyErr_Init(void);
extern PyStatus _PyBuiltins_AddExceptions(PyObject * bltinmod);
-extern PyStatus _PyImportHooks_Init(void);
+extern PyStatus _PyImportHooks_Init(PyThreadState *tstate);
extern int _PyFloat_Init(void);
extern PyStatus _Py_HashRandomization_Init(const PyConfig *);
extern PyStatus _PyTypes_Init(void);
-extern PyStatus _PyImportZip_Init(PyInterpreterState *interp);
+extern PyStatus _PyTypes_InitSlotDefs(void);
+extern PyStatus _PyImportZip_Init(PyThreadState *tstate);
+extern PyStatus _PyGC_Init(PyThreadState *tstate);
/* Various internal finalizers */
-extern void PyMethod_Fini(void);
-extern void PyFrame_Fini(void);
-extern void PyCFunction_Fini(void);
-extern void PyDict_Fini(void);
-extern void PyTuple_Fini(void);
-extern void PyList_Fini(void);
-extern void PySet_Fini(void);
-extern void PyBytes_Fini(void);
-extern void PyFloat_Fini(void);
+extern void _PyFrame_Fini(void);
+extern void _PyDict_Fini(void);
+extern void _PyTuple_Fini(void);
+extern void _PyList_Fini(void);
+extern void _PySet_Fini(void);
+extern void _PyBytes_Fini(void);
+extern void _PyFloat_Fini(void);
+extern void _PySlice_Fini(void);
+extern void _PyAsyncGen_Fini(void);
+
+extern int _PySignal_Init(int install_signal_handlers);
extern void PyOS_FiniInterrupts(void);
-extern void PySlice_Fini(void);
-extern void PyAsyncGen_Fini(void);
extern void _PyExc_Fini(void);
extern void _PyImport_Fini(void);
extern void _PyImport_Fini2(void);
-extern void _PyGC_Fini(_PyRuntimeState *runtime);
+extern void _PyGC_Fini(PyThreadState *tstate);
extern void _PyType_Fini(void);
extern void _Py_HashRandomization_Fini(void);
-extern void _PyUnicode_Fini(void);
-extern void PyLong_Fini(void);
+extern void _PyUnicode_Fini(PyThreadState *tstate);
+extern void _PyLong_Fini(PyThreadState *tstate);
extern void _PyFaulthandler_Fini(void);
extern void _PyHash_Fini(void);
extern void _PyTraceMalloc_Fini(void);
extern void _PyWarnings_Fini(PyInterpreterState *interp);
+extern void _PyAST_Fini(void);
-extern void _PyGILState_Init(
- _PyRuntimeState *runtime,
- PyInterpreterState *interp,
- PyThreadState *tstate);
-extern void _PyGILState_Fini(_PyRuntimeState *runtime);
+extern PyStatus _PyGILState_Init(PyThreadState *tstate);
+extern void _PyGILState_Fini(PyThreadState *tstate);
-PyAPI_FUNC(void) _PyGC_DumpShutdownStats(_PyRuntimeState *runtime);
+PyAPI_FUNC(void) _PyGC_DumpShutdownStats(PyThreadState *tstate);
PyAPI_FUNC(PyStatus) _Py_PreInitializeFromPyArgv(
const PyPreConfig *src_config,
- const _PyArgv *args);
+ const struct _PyArgv *args);
PyAPI_FUNC(PyStatus) _Py_PreInitializeFromConfig(
const PyConfig *config,
- const _PyArgv *args);
+ const struct _PyArgv *args);
PyAPI_FUNC(int) _Py_HandleSystemExit(int *exitcode_p);
@@ -110,6 +106,8 @@ PyAPI_FUNC(void) _PyErr_Print(PyThreadState *tstate);
PyAPI_FUNC(void) _PyErr_Display(PyObject *file, PyObject *exception,
PyObject *value, PyObject *tb);
+PyAPI_FUNC(void) _PyThreadState_DeleteCurrent(PyThreadState *tstate);
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/internal/pycore_pymem.h b/x64/include/internal/pycore_pymem.h
index 47d092f..3d925e2 100644
--- a/x64/include/internal/pycore_pymem.h
+++ b/x64/include/internal/pycore_pymem.h
@@ -8,144 +8,7 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "objimpl.h"
-#include "pymem.h"
-
-
-/* GC runtime state */
-
-/* If we change this, we need to change the default value in the
- signature of gc.collect. */
-#define NUM_GENERATIONS 3
-
-/*
- NOTE: about the counting of long-lived objects.
-
- To limit the cost of garbage collection, there are two strategies;
- - make each collection faster, e.g. by scanning fewer objects
- - do less collections
- This heuristic is about the latter strategy.
-
- In addition to the various configurable thresholds, we only trigger a
- full collection if the ratio
- long_lived_pending / long_lived_total
- is above a given value (hardwired to 25%).
-
- The reason is that, while "non-full" collections (i.e., collections of
- the young and middle generations) will always examine roughly the same
- number of objects -- determined by the aforementioned thresholds --,
- the cost of a full collection is proportional to the total number of
- long-lived objects, which is virtually unbounded.
-
- Indeed, it has been remarked that doing a full collection every
- <constant number> of object creations entails a dramatic performance
- degradation in workloads which consist in creating and storing lots of
- long-lived objects (e.g. building a large list of GC-tracked objects would
- show quadratic performance, instead of linear as expected: see issue #4074).
-
- Using the above ratio, instead, yields amortized linear performance in
- the total number of objects (the effect of which can be summarized
- thusly: "each full garbage collection is more and more costly as the
- number of objects grows, but we do fewer and fewer of them").
-
- This heuristic was suggested by Martin von Löwis on python-dev in
- June 2008. His original analysis and proposal can be found at:
- http://mail.python.org/pipermail/python-dev/2008-June/080579.html
-*/
-
-/*
- NOTE: about untracking of mutable objects.
-
- Certain types of container cannot participate in a reference cycle, and
- so do not need to be tracked by the garbage collector. Untracking these
- objects reduces the cost of garbage collections. However, determining
- which objects may be untracked is not free, and the costs must be
- weighed against the benefits for garbage collection.
-
- There are two possible strategies for when to untrack a container:
-
- i) When the container is created.
- ii) When the container is examined by the garbage collector.
-
- Tuples containing only immutable objects (integers, strings etc, and
- recursively, tuples of immutable objects) do not need to be tracked.
- The interpreter creates a large number of tuples, many of which will
- not survive until garbage collection. It is therefore not worthwhile
- to untrack eligible tuples at creation time.
-
- Instead, all tuples except the empty tuple are tracked when created.
- During garbage collection it is determined whether any surviving tuples
- can be untracked. A tuple can be untracked if all of its contents are
- already not tracked. Tuples are examined for untracking in all garbage
- collection cycles. It may take more than one cycle to untrack a tuple.
-
- Dictionaries containing only immutable objects also do not need to be
- tracked. Dictionaries are untracked when created. If a tracked item is
- inserted into a dictionary (either as a key or value), the dictionary
- becomes tracked. During a full garbage collection (all generations),
- the collector will untrack any dictionaries whose contents are not
- tracked.
-
- The module provides the python function is_tracked(obj), which returns
- the CURRENT tracking status of the object. Subsequent garbage
- collections may change the tracking status of the object.
-
- Untracking of certain containers was introduced in issue #4688, and
- the algorithm was refined in response to issue #14775.
-*/
-
-struct gc_generation {
- PyGC_Head head;
- int threshold; /* collection threshold */
- int count; /* count of allocations or collections of younger
- generations */
-};
-
-/* Running stats per generation */
-struct gc_generation_stats {
- /* total number of collections */
- Py_ssize_t collections;
- /* total number of collected objects */
- Py_ssize_t collected;
- /* total number of uncollectable objects (put into gc.garbage) */
- Py_ssize_t uncollectable;
-};
-
-struct _gc_runtime_state {
- /* List of objects that still need to be cleaned up, singly linked
- * via their gc headers' gc_prev pointers. */
- PyObject *trash_delete_later;
- /* Current call-stack depth of tp_dealloc calls. */
- int trash_delete_nesting;
-
- int enabled;
- int debug;
- /* linked lists of container objects */
- struct gc_generation generations[NUM_GENERATIONS];
- PyGC_Head *generation0;
- /* a permanent generation which won't be collected */
- struct gc_generation permanent_generation;
- struct gc_generation_stats generation_stats[NUM_GENERATIONS];
- /* true if we are currently running the collector */
- int collecting;
- /* list of uncollectable objects */
- PyObject *garbage;
- /* a list of callbacks to be invoked when collection is performed */
- PyObject *callbacks;
- /* This is the number of objects that survived the last full
- collection. It approximates the number of long lived objects
- tracked by the GC.
-
- (by "full collection", we mean a collection of the oldest
- generation). */
- Py_ssize_t long_lived_total;
- /* This is the number of objects that survived all "non-full"
- collections, and are awaiting to undergo a full collection for
- the first time. */
- Py_ssize_t long_lived_pending;
-};
-
-PyAPI_FUNC(void) _PyGC_Initialize(struct _gc_runtime_state *);
+#include "pymem.h" // PyMemAllocatorName
/* Set the memory allocator of the specified domain to the default.
@@ -206,6 +69,35 @@ PyAPI_FUNC(int) _PyMem_GetAllocatorName(
PYMEM_ALLOCATOR_NOT_SET does nothing. */
PyAPI_FUNC(int) _PyMem_SetupAllocators(PyMemAllocatorName allocator);
+/* bpo-35053: Expose _Py_tracemalloc_config for _Py_NewReference()
+ which access directly _Py_tracemalloc_config.tracing for best
+ performances. */
+struct _PyTraceMalloc_Config {
+ /* Module initialized?
+ Variable protected by the GIL */
+ enum {
+ TRACEMALLOC_NOT_INITIALIZED,
+ TRACEMALLOC_INITIALIZED,
+ TRACEMALLOC_FINALIZED
+ } initialized;
+
+ /* Is tracemalloc tracing memory allocations?
+ Variable protected by the GIL */
+ int tracing;
+
+ /* limit of the number of frames in a traceback, 1 by default.
+ Variable protected by the GIL. */
+ int max_nframe;
+};
+
+#define _PyTraceMalloc_Config_INIT \
+ {.initialized = TRACEMALLOC_NOT_INITIALIZED, \
+ .tracing = 0, \
+ .max_nframe = 1}
+
+PyAPI_DATA(struct _PyTraceMalloc_Config) _Py_tracemalloc_config;
+
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/internal/pycore_pystate.h b/x64/include/internal/pycore_pystate.h
index f90e7e1..835d6e0 100644
--- a/x64/include/internal/pycore_pystate.h
+++ b/x64/include/internal/pycore_pystate.h
@@ -8,270 +8,52 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "cpython/initconfig.h"
-#include "fileobject.h"
-#include "pystate.h"
-#include "pythread.h"
-#include "sysmodule.h"
-
-#include "pycore_gil.h" /* _gil_runtime_state */
-#include "pycore_pathconfig.h"
-#include "pycore_pymem.h"
-#include "pycore_warnings.h"
-
-
-/* ceval state */
-
-struct _pending_calls {
- int finishing;
- PyThread_type_lock lock;
- /* Request for running pending calls. */
- _Py_atomic_int calls_to_do;
- /* Request for looking at the `async_exc` field of the current
- thread state.
- Guarded by the GIL. */
- int async_exc;
-#define NPENDINGCALLS 32
- struct {
- int (*func)(void *);
- void *arg;
- } calls[NPENDINGCALLS];
- int first;
- int last;
-};
-
-struct _ceval_runtime_state {
- int recursion_limit;
- /* Records whether tracing is on for any thread. Counts the number
- of threads for which tstate->c_tracefunc is non-NULL, so if the
- value is 0, we know we don't have to check this thread's
- c_tracefunc. This speeds up the if statement in
- PyEval_EvalFrameEx() after fast_next_opcode. */
- int tracing_possible;
- /* This single variable consolidates all requests to break out of
- the fast path in the eval loop. */
- _Py_atomic_int eval_breaker;
- /* Request for dropping the GIL */
- _Py_atomic_int gil_drop_request;
- struct _pending_calls pending;
- /* Request for checking signals. */
- _Py_atomic_int signals_pending;
- struct _gil_runtime_state gil;
-};
-
-/* interpreter state */
-
-typedef PyObject* (*_PyFrameEvalFunction)(struct _frame *, int);
-
-// The PyInterpreterState typedef is in Include/pystate.h.
-struct _is {
-
- struct _is *next;
- struct _ts *tstate_head;
-
- int64_t id;
- int64_t id_refcount;
- int requires_idref;
- PyThread_type_lock id_mutex;
-
- int finalizing;
-
- PyObject *modules;
- PyObject *modules_by_index;
- PyObject *sysdict;
- PyObject *builtins;
- PyObject *importlib;
-
- /* Used in Python/sysmodule.c. */
- int check_interval;
-
- /* Used in Modules/_threadmodule.c. */
- long num_threads;
- /* Support for runtime thread stack size tuning.
- A value of 0 means using the platform's default stack size
- or the size specified by the THREAD_STACK_SIZE macro. */
- /* Used in Python/thread.c. */
- size_t pythread_stacksize;
-
- PyObject *codec_search_path;
- PyObject *codec_search_cache;
- PyObject *codec_error_registry;
- int codecs_initialized;
-
- /* fs_codec.encoding is initialized to NULL.
- Later, it is set to a non-NULL string by _PyUnicode_InitEncodings(). */
- struct {
- char *encoding; /* Filesystem encoding (encoded to UTF-8) */
- char *errors; /* Filesystem errors (encoded to UTF-8) */
- _Py_error_handler error_handler;
- } fs_codec;
-
- PyConfig config;
-#ifdef HAVE_DLOPEN
- int dlopenflags;
-#endif
-
- PyObject *dict; /* Stores per-interpreter state */
-
- PyObject *builtins_copy;
- PyObject *import_func;
- /* Initialized to PyEval_EvalFrameDefault(). */
- _PyFrameEvalFunction eval_frame;
-
- Py_ssize_t co_extra_user_count;
- freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS];
-
-#ifdef HAVE_FORK
- PyObject *before_forkers;
- PyObject *after_forkers_parent;
- PyObject *after_forkers_child;
-#endif
- /* AtExit module */
- void (*pyexitfunc)(PyObject *);
- PyObject *pyexitmodule;
-
- uint64_t tstate_next_unique_id;
-
- struct _warnings_runtime_state warnings;
-
- PyObject *audit_hooks;
-};
-
-PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T);
-
-PyAPI_FUNC(int) _PyInterpreterState_IDInitref(struct _is *);
-PyAPI_FUNC(void) _PyInterpreterState_IDIncref(struct _is *);
-PyAPI_FUNC(void) _PyInterpreterState_IDDecref(struct _is *);
-
-
-/* cross-interpreter data registry */
-
-/* For now we use a global registry of shareable classes. An
- alternative would be to add a tp_* slot for a class's
- crossinterpdatafunc. It would be simpler and more efficient. */
-
-struct _xidregitem;
-
-struct _xidregitem {
- PyTypeObject *cls;
- crossinterpdatafunc getdata;
- struct _xidregitem *next;
-};
-
-/* runtime audit hook state */
-
-typedef struct _Py_AuditHookEntry {
- struct _Py_AuditHookEntry *next;
- Py_AuditHookFunction hookCFunction;
- void *userData;
-} _Py_AuditHookEntry;
-
-/* GIL state */
-
-struct _gilstate_runtime_state {
- int check_enabled;
- /* Assuming the current thread holds the GIL, this is the
- PyThreadState for the current thread. */
- _Py_atomic_address tstate_current;
- PyThreadFrameGetter getframe;
- /* The single PyInterpreterState used by this process'
- GILState implementation
- */
- /* TODO: Given interp_main, it may be possible to kill this ref */
- PyInterpreterState *autoInterpreterState;
- Py_tss_t autoTSSkey;
-};
-
-/* hook for PyEval_GetFrame(), requested for Psyco */
-#define _PyThreadState_GetFrame _PyRuntime.gilstate.getframe
-
-/* Issue #26558: Flag to disable PyGILState_Check().
- If set to non-zero, PyGILState_Check() always return 1. */
-#define _PyGILState_check_enabled _PyRuntime.gilstate.check_enabled
-
-
-/* Full Python runtime state */
-
-typedef struct pyruntimestate {
- /* Is running Py_PreInitialize()? */
- int preinitializing;
+#include "pycore_runtime.h" /* PyRuntimeState */
- /* Is Python preinitialized? Set to 1 by Py_PreInitialize() */
- int preinitialized;
- /* Is Python core initialized? Set to 1 by _Py_InitializeCore() */
- int core_initialized;
-
- /* Is Python fully initialized? Set to 1 by Py_Initialize() */
- int initialized;
-
- /* Set by Py_FinalizeEx(). Only reset to NULL if Py_Initialize()
- is called again. */
- PyThreadState *finalizing;
-
- struct pyinterpreters {
- PyThread_type_lock mutex;
- PyInterpreterState *head;
- PyInterpreterState *main;
- /* _next_interp_id is an auto-numbered sequence of small
- integers. It gets initialized in _PyInterpreterState_Init(),
- which is called in Py_Initialize(), and used in
- PyInterpreterState_New(). A negative interpreter ID
- indicates an error occurred. The main interpreter will
- always have an ID of 0. Overflow results in a RuntimeError.
- If that becomes a problem later then we can adjust, e.g. by
- using a Python int. */
- int64_t next_id;
- } interpreters;
- // XXX Remove this field once we have a tp_* slot.
- struct _xidregistry {
- PyThread_type_lock mutex;
- struct _xidregitem *head;
- } xidregistry;
-
- unsigned long main_thread;
+/* Check if the current thread is the main thread.
+ Use _Py_IsMainInterpreter() to check if it's the main interpreter. */
+static inline int
+_Py_IsMainThread(void)
+{
+ unsigned long thread = PyThread_get_thread_ident();
+ return (thread == _PyRuntime.main_thread);
+}
-#define NEXITFUNCS 32
- void (*exitfuncs[NEXITFUNCS])(void);
- int nexitfuncs;
- struct _gc_runtime_state gc;
- struct _ceval_runtime_state ceval;
- struct _gilstate_runtime_state gilstate;
+static inline int
+_Py_IsMainInterpreter(PyThreadState* tstate)
+{
+ /* Use directly _PyRuntime rather than tstate->interp->runtime, since
+ this function is used in performance critical code path (ceval) */
+ return (tstate->interp == _PyRuntime.interpreters.main);
+}
- PyPreConfig preconfig;
- Py_OpenCodeHookFunction open_code_hook;
- void *open_code_userdata;
- _Py_AuditHookEntry *audit_hook_head;
+/* Only handle signals on the main thread of the main interpreter. */
+static inline int
+_Py_ThreadCanHandleSignals(PyInterpreterState *interp)
+{
+ return (_Py_IsMainThread() && interp == _PyRuntime.interpreters.main);
+}
- // XXX Consolidate globals found via the check-c-globals script.
-} _PyRuntimeState;
-
-#define _PyRuntimeState_INIT \
- {.preinitialized = 0, .core_initialized = 0, .initialized = 0}
-/* Note: _PyRuntimeState_INIT sets other fields to 0/NULL */
-
-PyAPI_DATA(_PyRuntimeState) _PyRuntime;
-PyAPI_FUNC(PyStatus) _PyRuntimeState_Init(_PyRuntimeState *runtime);
-PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *runtime);
-PyAPI_FUNC(void) _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime);
-/* Initialize _PyRuntimeState.
- Return NULL on success, or return an error message on failure. */
-PyAPI_FUNC(PyStatus) _PyRuntime_Initialize(void);
-
-PyAPI_FUNC(void) _PyRuntime_Finalize(void);
-
-#define _Py_CURRENTLY_FINALIZING(runtime, tstate) \
- (runtime->finalizing == tstate)
+/* Only execute pending calls on the main thread. */
+static inline int
+_Py_ThreadCanHandlePendingCalls(void)
+{
+ return _Py_IsMainThread();
+}
/* Variable and macro for in-line access to current thread
and interpreter state */
-#define _PyRuntimeState_GetThreadState(runtime) \
- ((PyThreadState*)_Py_atomic_load_relaxed(&(runtime)->gilstate.tstate_current))
+static inline PyThreadState*
+_PyRuntimeState_GetThreadState(_PyRuntimeState *runtime)
+{
+ return (PyThreadState*)_Py_atomic_load_relaxed(&runtime->gilstate.tstate_current);
+}
/* Get the current Python thread state.
@@ -282,12 +64,31 @@ PyAPI_FUNC(void) _PyRuntime_Finalize(void);
The caller must hold the GIL.
See also PyThreadState_Get() and PyThreadState_GET(). */
-#define _PyThreadState_GET() _PyRuntimeState_GetThreadState(&_PyRuntime)
+static inline PyThreadState*
+_PyThreadState_GET(void)
+{
+ return _PyRuntimeState_GetThreadState(&_PyRuntime);
+}
/* Redefine PyThreadState_GET() as an alias to _PyThreadState_GET() */
#undef PyThreadState_GET
#define PyThreadState_GET() _PyThreadState_GET()
+PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalError_TstateNULL(const char *func);
+
+static inline void
+_Py_EnsureFuncTstateNotNULL(const char *func, PyThreadState *tstate)
+{
+ if (tstate == NULL) {
+ _Py_FatalError_TstateNULL(func);
+ }
+}
+
+// Call Py_FatalError() if tstate is NULL
+#define _Py_EnsureTstateNotNULL(tstate) \
+ _Py_EnsureFuncTstateNotNULL(__func__, tstate)
+
+
/* Get the current interpreter state.
The macro is unsafe: it does not check for error and it can return NULL.
@@ -296,13 +97,18 @@ PyAPI_FUNC(void) _PyRuntime_Finalize(void);
See also _PyInterpreterState_Get()
and _PyGILState_GetInterpreterStateUnsafe(). */
-#define _PyInterpreterState_GET_UNSAFE() (_PyThreadState_GET()->interp)
+static inline PyInterpreterState* _PyInterpreterState_GET(void) {
+ PyThreadState *tstate = _PyThreadState_GET();
+#ifdef Py_DEBUG
+ _Py_EnsureTstateNotNULL(tstate);
+#endif
+ return tstate->interp;
+}
/* Other */
PyAPI_FUNC(void) _PyThreadState_Init(
- _PyRuntimeState *runtime,
PyThreadState *tstate);
PyAPI_FUNC(void) _PyThreadState_DeleteExcept(
_PyRuntimeState *runtime,
@@ -317,6 +123,15 @@ PyAPI_FUNC(void) _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyGILState_Reinit(_PyRuntimeState *runtime);
+
+PyAPI_FUNC(int) _PyState_AddModule(
+ PyThreadState *tstate,
+ PyObject* module,
+ struct PyModuleDef* def);
+
+
+PyAPI_FUNC(int) _PyOS_InterruptOccurred(PyThreadState *tstate);
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/internal/pycore_runtime.h b/x64/include/internal/pycore_runtime.h
new file mode 100644
index 0000000..34eb492
--- /dev/null
+++ b/x64/include/internal/pycore_runtime.h
@@ -0,0 +1,144 @@
+#ifndef Py_INTERNAL_RUNTIME_H
+#define Py_INTERNAL_RUNTIME_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_atomic.h" /* _Py_atomic_address */
+#include "pycore_gil.h" // struct _gil_runtime_state
+
+/* ceval state */
+
+struct _ceval_runtime_state {
+ /* Request for checking signals. It is shared by all interpreters (see
+ bpo-40513). Any thread of any interpreter can receive a signal, but only
+ the main thread of the main interpreter can handle signals: see
+ _Py_ThreadCanHandleSignals(). */
+ _Py_atomic_int signals_pending;
+ struct _gil_runtime_state gil;
+};
+
+/* GIL state */
+
+struct _gilstate_runtime_state {
+ /* bpo-26558: Flag to disable PyGILState_Check().
+ If set to non-zero, PyGILState_Check() always return 1. */
+ int check_enabled;
+ /* Assuming the current thread holds the GIL, this is the
+ PyThreadState for the current thread. */
+ _Py_atomic_address tstate_current;
+ /* The single PyInterpreterState used by this process'
+ GILState implementation
+ */
+ /* TODO: Given interp_main, it may be possible to kill this ref */
+ PyInterpreterState *autoInterpreterState;
+ Py_tss_t autoTSSkey;
+};
+
+/* Runtime audit hook state */
+
+typedef struct _Py_AuditHookEntry {
+ struct _Py_AuditHookEntry *next;
+ Py_AuditHookFunction hookCFunction;
+ void *userData;
+} _Py_AuditHookEntry;
+
+/* Full Python runtime state */
+
+typedef struct pyruntimestate {
+ /* Is running Py_PreInitialize()? */
+ int preinitializing;
+
+ /* Is Python preinitialized? Set to 1 by Py_PreInitialize() */
+ int preinitialized;
+
+ /* Is Python core initialized? Set to 1 by _Py_InitializeCore() */
+ int core_initialized;
+
+ /* Is Python fully initialized? Set to 1 by Py_Initialize() */
+ int initialized;
+
+ /* Set by Py_FinalizeEx(). Only reset to NULL if Py_Initialize()
+ is called again.
+
+ Use _PyRuntimeState_GetFinalizing() and _PyRuntimeState_SetFinalizing()
+ to access it, don't access it directly. */
+ _Py_atomic_address _finalizing;
+
+ struct pyinterpreters {
+ PyThread_type_lock mutex;
+ PyInterpreterState *head;
+ PyInterpreterState *main;
+ /* _next_interp_id is an auto-numbered sequence of small
+ integers. It gets initialized in _PyInterpreterState_Init(),
+ which is called in Py_Initialize(), and used in
+ PyInterpreterState_New(). A negative interpreter ID
+ indicates an error occurred. The main interpreter will
+ always have an ID of 0. Overflow results in a RuntimeError.
+ If that becomes a problem later then we can adjust, e.g. by
+ using a Python int. */
+ int64_t next_id;
+ } interpreters;
+ // XXX Remove this field once we have a tp_* slot.
+ struct _xidregistry {
+ PyThread_type_lock mutex;
+ struct _xidregitem *head;
+ } xidregistry;
+
+ unsigned long main_thread;
+
+#define NEXITFUNCS 32
+ void (*exitfuncs[NEXITFUNCS])(void);
+ int nexitfuncs;
+
+ struct _ceval_runtime_state ceval;
+ struct _gilstate_runtime_state gilstate;
+
+ PyPreConfig preconfig;
+
+ Py_OpenCodeHookFunction open_code_hook;
+ void *open_code_userdata;
+ _Py_AuditHookEntry *audit_hook_head;
+
+ // XXX Consolidate globals found via the check-c-globals script.
+} _PyRuntimeState;
+
+#define _PyRuntimeState_INIT \
+ {.preinitialized = 0, .core_initialized = 0, .initialized = 0}
+/* Note: _PyRuntimeState_INIT sets other fields to 0/NULL */
+
+
+PyAPI_DATA(_PyRuntimeState) _PyRuntime;
+
+PyAPI_FUNC(PyStatus) _PyRuntimeState_Init(_PyRuntimeState *runtime);
+PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *runtime);
+
+#ifdef HAVE_FORK
+PyAPI_FUNC(void) _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime);
+#endif
+
+/* Initialize _PyRuntimeState.
+ Return NULL on success, or return an error message on failure. */
+PyAPI_FUNC(PyStatus) _PyRuntime_Initialize(void);
+
+PyAPI_FUNC(void) _PyRuntime_Finalize(void);
+
+
+static inline PyThreadState*
+_PyRuntimeState_GetFinalizing(_PyRuntimeState *runtime) {
+ return (PyThreadState*)_Py_atomic_load_relaxed(&runtime->_finalizing);
+}
+
+static inline void
+_PyRuntimeState_SetFinalizing(_PyRuntimeState *runtime, PyThreadState *tstate) {
+ _Py_atomic_store_relaxed(&runtime->_finalizing, (uintptr_t)tstate);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_RUNTIME_H */
diff --git a/x64/include/internal/pycore_sysmodule.h b/x64/include/internal/pycore_sysmodule.h
new file mode 100644
index 0000000..738a774
--- /dev/null
+++ b/x64/include/internal/pycore_sysmodule.h
@@ -0,0 +1,24 @@
+#ifndef Py_INTERNAL_SYSMODULE_H
+#define Py_INTERNAL_SYSMODULE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+PyAPI_FUNC(int) _PySys_Audit(
+ PyThreadState *tstate,
+ const char *event,
+ const char *argFormat,
+ ...);
+
+/* We want minimal exposure of this function, so use extern rather than
+ PyAPI_FUNC() to not export the symbol. */
+extern void _PySys_ClearAuditHooks(PyThreadState *tstate);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_SYSMODULE_H */
diff --git a/x64/include/internal/pycore_traceback.h b/x64/include/internal/pycore_traceback.h
index bf4d7fe..1f09241 100644
--- a/x64/include/internal/pycore_traceback.h
+++ b/x64/include/internal/pycore_traceback.h
@@ -8,7 +8,8 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pystate.h" /* PyInterpreterState */
+/* Forward declaration */
+struct _is;
/* Write the Python traceback into the file 'fd'. For example:
@@ -56,7 +57,7 @@ PyAPI_FUNC(void) _Py_DumpTraceback(
PyAPI_FUNC(const char*) _Py_DumpTracebackThreads(
int fd,
- PyInterpreterState *interp,
+ struct _is *interp,
PyThreadState *current_tstate);
/* Write a Unicode object into the file descriptor fd. Encode the string to
@@ -88,7 +89,7 @@ PyAPI_FUNC(void) _Py_DumpHexadecimal(
PyAPI_FUNC(PyObject*) _PyTraceBack_FromFrame(
PyObject *tb_next,
- struct _frame *frame);
+ PyFrameObject *frame);
#ifdef __cplusplus
}
diff --git a/x64/include/internal/pycore_tupleobject.h b/x64/include/internal/pycore_tupleobject.h
index 9fcfc5c..f95f16c 100644
--- a/x64/include/internal/pycore_tupleobject.h
+++ b/x64/include/internal/pycore_tupleobject.h
@@ -8,7 +8,7 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "tupleobject.h"
+#include "tupleobject.h" /* _PyTuple_CAST() */
#define _PyTuple_ITEMS(op) (_PyTuple_CAST(op)->ob_item)
PyAPI_FUNC(PyObject *) _PyTuple_FromArray(PyObject *const *, Py_ssize_t);
diff --git a/x64/include/internal/pycore_warnings.h b/x64/include/internal/pycore_warnings.h
index 73e5350..cafe305 100644
--- a/x64/include/internal/pycore_warnings.h
+++ b/x64/include/internal/pycore_warnings.h
@@ -8,8 +8,6 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "object.h"
-
struct _warnings_runtime_state {
/* Both 'filters' and 'onceregistry' can be set in warnings.py;
get_warnings_attr() will reset these variables accordingly. */
@@ -19,6 +17,8 @@ struct _warnings_runtime_state {
long filters_version;
};
+extern PyStatus _PyWarnings_InitState(PyThreadState *tstate);
+
#ifdef __cplusplus
}
#endif
diff --git a/x64/include/iterobject.h b/x64/include/iterobject.h
index f61726f..51139bf 100644
--- a/x64/include/iterobject.h
+++ b/x64/include/iterobject.h
@@ -7,14 +7,13 @@ extern "C" {
PyAPI_DATA(PyTypeObject) PySeqIter_Type;
PyAPI_DATA(PyTypeObject) PyCallIter_Type;
-PyAPI_DATA(PyTypeObject) PyCmpWrapper_Type;
-#define PySeqIter_Check(op) (Py_TYPE(op) == &PySeqIter_Type)
+#define PySeqIter_Check(op) Py_IS_TYPE(op, &PySeqIter_Type)
PyAPI_FUNC(PyObject *) PySeqIter_New(PyObject *);
-#define PyCallIter_Check(op) (Py_TYPE(op) == &PyCallIter_Type)
+#define PyCallIter_Check(op) Py_IS_TYPE(op, &PyCallIter_Type)
PyAPI_FUNC(PyObject *) PyCallIter_New(PyObject *, PyObject *);
diff --git a/x64/include/listobject.h b/x64/include/listobject.h
index 6057279..2a8a255 100644
--- a/x64/include/listobject.h
+++ b/x64/include/listobject.h
@@ -1,16 +1,14 @@
+/* List object interface
-/* List object interface */
+ Another generally useful object type is a list of object pointers.
+ This is a mutable type: the list items can be changed, and items can be
+ added or removed. Out-of-range indices or non-list objects are ignored.
-/*
-Another generally useful object type is a list of object pointers.
-This is a mutable type: the list items can be changed, and items can be
-added or removed. Out-of-range indices or non-list objects are ignored.
-
-*** WARNING *** PyList_SetItem does not increment the new item's reference
-count, but does decrement the reference count of the item it replaces,
-if not nil. It does *decrement* the reference count if it is *not*
-inserted in the list. Similarly, PyList_GetItem does not increment the
-returned item's reference count.
+ WARNING: PyList_SetItem does not increment the new item's reference count,
+ but does decrement the reference count of the item it replaces, if not nil.
+ It does *decrement* the reference count if it is *not* inserted in the list.
+ Similarly, PyList_GetItem does not increment the returned item's reference
+ count.
*/
#ifndef Py_LISTOBJECT_H
@@ -19,60 +17,33 @@ returned item's reference count.
extern "C" {
#endif
-#ifndef Py_LIMITED_API
-typedef struct {
- PyObject_VAR_HEAD
- /* Vector of pointers to list elements. list[0] is ob_item[0], etc. */
- PyObject **ob_item;
-
- /* ob_item contains space for 'allocated' elements. The number
- * currently in use is ob_size.
- * Invariants:
- * 0 <= ob_size <= allocated
- * len(list) == ob_size
- * ob_item == NULL implies ob_size == allocated == 0
- * list.sort() temporarily sets allocated to -1 to detect mutations.
- *
- * Items must normally not be NULL, except during construction when
- * the list is not yet visible outside the function that builds it.
- */
- Py_ssize_t allocated;
-} PyListObject;
-#endif
-
PyAPI_DATA(PyTypeObject) PyList_Type;
PyAPI_DATA(PyTypeObject) PyListIter_Type;
PyAPI_DATA(PyTypeObject) PyListRevIter_Type;
-PyAPI_DATA(PyTypeObject) PySortWrapper_Type;
#define PyList_Check(op) \
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_LIST_SUBCLASS)
-#define PyList_CheckExact(op) (Py_TYPE(op) == &PyList_Type)
+#define PyList_CheckExact(op) Py_IS_TYPE(op, &PyList_Type)
PyAPI_FUNC(PyObject *) PyList_New(Py_ssize_t size);
PyAPI_FUNC(Py_ssize_t) PyList_Size(PyObject *);
+
PyAPI_FUNC(PyObject *) PyList_GetItem(PyObject *, Py_ssize_t);
PyAPI_FUNC(int) PyList_SetItem(PyObject *, Py_ssize_t, PyObject *);
PyAPI_FUNC(int) PyList_Insert(PyObject *, Py_ssize_t, PyObject *);
PyAPI_FUNC(int) PyList_Append(PyObject *, PyObject *);
+
PyAPI_FUNC(PyObject *) PyList_GetSlice(PyObject *, Py_ssize_t, Py_ssize_t);
PyAPI_FUNC(int) PyList_SetSlice(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *);
+
PyAPI_FUNC(int) PyList_Sort(PyObject *);
PyAPI_FUNC(int) PyList_Reverse(PyObject *);
PyAPI_FUNC(PyObject *) PyList_AsTuple(PyObject *);
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(PyObject *) _PyList_Extend(PyListObject *, PyObject *);
-
-PyAPI_FUNC(int) PyList_ClearFreeList(void);
-PyAPI_FUNC(void) _PyList_DebugMallocStats(FILE *out);
-#endif
-/* Macro, trading safety for speed */
#ifndef Py_LIMITED_API
-#define PyList_GET_ITEM(op, i) (((PyListObject *)(op))->ob_item[i])
-#define PyList_SET_ITEM(op, i, v) (((PyListObject *)(op))->ob_item[i] = (v))
-#define PyList_GET_SIZE(op) (assert(PyList_Check(op)),Py_SIZE(op))
-#define _PyList_ITEMS(op) (((PyListObject *)(op))->ob_item)
+# define Py_CPYTHON_LISTOBJECT_H
+# include "cpython/listobject.h"
+# undef Py_CPYTHON_LISTOBJECT_H
#endif
#ifdef __cplusplus
diff --git a/x64/include/longobject.h b/x64/include/longobject.h
index 1e7a58d..1b28809 100644
--- a/x64/include/longobject.h
+++ b/x64/include/longobject.h
@@ -13,7 +13,7 @@ PyAPI_DATA(PyTypeObject) PyLong_Type;
#define PyLong_Check(op) \
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_LONG_SUBCLASS)
-#define PyLong_CheckExact(op) (Py_TYPE(op) == &PyLong_Type)
+#define PyLong_CheckExact(op) Py_IS_TYPE(op, &PyLong_Type)
PyAPI_FUNC(PyObject *) PyLong_FromLong(long);
PyAPI_FUNC(PyObject *) PyLong_FromUnsignedLong(unsigned long);
@@ -74,7 +74,7 @@ PyAPI_FUNC(int) _PyLong_Size_t_Converter(PyObject *, void *);
#endif
/* Used by Python/mystrtoul.c, _PyBytes_FromHex(),
- _PyBytes_DecodeEscapeRecode(), etc. */
+ _PyBytes_DecodeEscape(), etc. */
#ifndef Py_LIMITED_API
PyAPI_DATA(unsigned char) _PyLong_DigitValue[256];
#endif
diff --git a/x64/include/memoryobject.h b/x64/include/memoryobject.h
index 990a716..306028f 100644
--- a/x64/include/memoryobject.h
+++ b/x64/include/memoryobject.h
@@ -11,7 +11,7 @@ PyAPI_DATA(PyTypeObject) _PyManagedBuffer_Type;
#endif
PyAPI_DATA(PyTypeObject) PyMemoryView_Type;
-#define PyMemoryView_Check(op) (Py_TYPE(op) == &PyMemoryView_Type)
+#define PyMemoryView_Check(op) Py_IS_TYPE(op, &PyMemoryView_Type)
#ifndef Py_LIMITED_API
/* Get a pointer to the memoryview's private copy of the exporter's buffer. */
diff --git a/x64/include/methodobject.h b/x64/include/methodobject.h
index ba3b887..12e049b 100644
--- a/x64/include/methodobject.h
+++ b/x64/include/methodobject.h
@@ -13,7 +13,8 @@ extern "C" {
PyAPI_DATA(PyTypeObject) PyCFunction_Type;
-#define PyCFunction_Check(op) (Py_TYPE(op) == &PyCFunction_Type)
+#define PyCFunction_CheckExact(op) Py_IS_TYPE(op, &PyCFunction_Type)
+#define PyCFunction_Check(op) PyObject_TypeCheck(op, &PyCFunction_Type)
typedef PyObject *(*PyCFunction)(PyObject *, PyObject *);
typedef PyObject *(*_PyCFunctionFast) (PyObject *, PyObject *const *, Py_ssize_t);
@@ -22,31 +23,14 @@ typedef PyObject *(*PyCFunctionWithKeywords)(PyObject *, PyObject *,
typedef PyObject *(*_PyCFunctionFastWithKeywords) (PyObject *,
PyObject *const *, Py_ssize_t,
PyObject *);
-typedef PyObject *(*PyNoArgsFunction)(PyObject *);
+typedef PyObject *(*PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *,
+ size_t, PyObject *);
PyAPI_FUNC(PyCFunction) PyCFunction_GetFunction(PyObject *);
PyAPI_FUNC(PyObject *) PyCFunction_GetSelf(PyObject *);
PyAPI_FUNC(int) PyCFunction_GetFlags(PyObject *);
-/* Macros for direct access to these values. Type checks are *not*
- done, so use with care. */
-#ifndef Py_LIMITED_API
-#define PyCFunction_GET_FUNCTION(func) \
- (((PyCFunctionObject *)func) -> m_ml -> ml_meth)
-#define PyCFunction_GET_SELF(func) \
- (((PyCFunctionObject *)func) -> m_ml -> ml_flags & METH_STATIC ? \
- NULL : ((PyCFunctionObject *)func) -> m_self)
-#define PyCFunction_GET_FLAGS(func) \
- (((PyCFunctionObject *)func) -> m_ml -> ml_flags)
-#endif
-PyAPI_FUNC(PyObject *) PyCFunction_Call(PyObject *, PyObject *, PyObject *);
-
-#ifndef Py_LIMITED_API
-PyAPI_FUNC(PyObject *) _PyCFunction_FastCallDict(PyObject *func,
- PyObject *const *args,
- Py_ssize_t nargs,
- PyObject *kwargs);
-#endif
+Py_DEPRECATED(3.9) PyAPI_FUNC(PyObject *) PyCFunction_Call(PyObject *, PyObject *, PyObject *);
struct PyMethodDef {
const char *ml_name; /* The name of the built-in function/method */
@@ -61,6 +45,13 @@ typedef struct PyMethodDef PyMethodDef;
PyAPI_FUNC(PyObject *) PyCFunction_NewEx(PyMethodDef *, PyObject *,
PyObject *);
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
+#define PyCFunction_NewEx(ML, SELF, MOD) PyCMethod_New((ML), (SELF), (MOD), NULL)
+PyAPI_FUNC(PyObject *) PyCMethod_New(PyMethodDef *, PyObject *,
+ PyObject *, PyTypeObject *);
+#endif
+
+
/* Flag passed to newmethodobject */
/* #define METH_OLDARGS 0x0000 -- unsupported now */
#define METH_VARARGS 0x0001
@@ -93,36 +84,24 @@ PyAPI_FUNC(PyObject *) PyCFunction_NewEx(PyMethodDef *, PyObject *,
#define METH_STACKLESS 0x0000
#endif
-#ifndef Py_LIMITED_API
-typedef struct {
- PyObject_HEAD
- PyMethodDef *m_ml; /* Description of the C function to call */
- PyObject *m_self; /* Passed as 'self' arg to the C func, can be NULL */
- PyObject *m_module; /* The __module__ attribute, can be anything */
- PyObject *m_weakreflist; /* List of weak references */
- vectorcallfunc vectorcall;
-} PyCFunctionObject;
-
-PyAPI_FUNC(PyObject *) _PyMethodDef_RawFastCallDict(
- PyMethodDef *method,
- PyObject *self,
- PyObject *const *args,
- Py_ssize_t nargs,
- PyObject *kwargs);
-
-PyAPI_FUNC(PyObject *) _PyMethodDef_RawFastCallKeywords(
- PyMethodDef *method,
- PyObject *self,
- PyObject *const *args,
- Py_ssize_t nargs,
- PyObject *kwnames);
+/* METH_METHOD means the function stores an
+ * additional reference to the class that defines it;
+ * both self and class are passed to it.
+ * It uses PyCMethodObject instead of PyCFunctionObject.
+ * May not be combined with METH_NOARGS, METH_O, METH_CLASS or METH_STATIC.
+ */
+
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
+#define METH_METHOD 0x0200
#endif
-PyAPI_FUNC(int) PyCFunction_ClearFreeList(void);
#ifndef Py_LIMITED_API
-PyAPI_FUNC(void) _PyCFunction_DebugMallocStats(FILE *out);
-PyAPI_FUNC(void) _PyMethod_DebugMallocStats(FILE *out);
+
+#define Py_CPYTHON_METHODOBJECT_H
+#include "cpython/methodobject.h"
+#undef Py_CPYTHON_METHODOBJECT_H
+
#endif
#ifdef __cplusplus
diff --git a/x64/include/modsupport.h b/x64/include/modsupport.h
index f90ede4..4c4aab6 100644
--- a/x64/include/modsupport.h
+++ b/x64/include/modsupport.h
@@ -60,9 +60,12 @@ PyAPI_FUNC(int) _PyArg_UnpackStack(
...);
PyAPI_FUNC(int) _PyArg_NoKeywords(const char *funcname, PyObject *kwargs);
+PyAPI_FUNC(int) _PyArg_NoKwnames(const char *funcname, PyObject *kwnames);
PyAPI_FUNC(int) _PyArg_NoPositional(const char *funcname, PyObject *args);
#define _PyArg_NoKeywords(funcname, kwargs) \
((kwargs) == NULL || _PyArg_NoKeywords((funcname), (kwargs)))
+#define _PyArg_NoKwnames(funcname, kwnames) \
+ ((kwnames) == NULL || _PyArg_NoKwnames((funcname), (kwnames)))
#define _PyArg_NoPositional(funcname, args) \
((args) == NULL || _PyArg_NoPositional((funcname), (args)))
@@ -136,6 +139,10 @@ void _PyArg_Fini(void);
PyAPI_FUNC(int) PyModule_AddObject(PyObject *, const char *, PyObject *);
PyAPI_FUNC(int) PyModule_AddIntConstant(PyObject *, const char *, long);
PyAPI_FUNC(int) PyModule_AddStringConstant(PyObject *, const char *, const char *);
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
+/* New in 3.9 */
+PyAPI_FUNC(int) PyModule_AddType(PyObject *module, PyTypeObject *type);
+#endif /* Py_LIMITED_API */
#define PyModule_AddIntMacro(m, c) PyModule_AddIntConstant(m, #c, c)
#define PyModule_AddStringMacro(m, c) PyModule_AddStringConstant(m, #c, c)
diff --git a/x64/include/moduleobject.h b/x64/include/moduleobject.h
index e246fd2..cf9ad40 100644
--- a/x64/include/moduleobject.h
+++ b/x64/include/moduleobject.h
@@ -10,7 +10,7 @@ extern "C" {
PyAPI_DATA(PyTypeObject) PyModule_Type;
#define PyModule_Check(op) PyObject_TypeCheck(op, &PyModule_Type)
-#define PyModule_CheckExact(op) (Py_TYPE(op) == &PyModule_Type)
+#define PyModule_CheckExact(op) Py_IS_TYPE(op, &PyModule_Type)
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
PyAPI_FUNC(PyObject *) PyModule_NewObject(
diff --git a/x64/include/node.h b/x64/include/node.h
index 2b39074..ca24f28 100644
--- a/x64/include/node.h
+++ b/x64/include/node.h
@@ -31,7 +31,6 @@ PyAPI_FUNC(Py_ssize_t) _PyNode_SizeOf(node *n);
#define NCH(n) ((n)->n_nchildren)
#define CHILD(n, i) (&(n)->n_child[i])
-#define RCHILD(n, i) (CHILD(n, NCH(n) + i))
#define TYPE(n) ((n)->n_type)
#define STR(n) ((n)->n_str)
#define LINENO(n) ((n)->n_lineno)
diff --git a/x64/include/object.h b/x64/include/object.h
index cc98d8a..9c1a7f4 100644
--- a/x64/include/object.h
+++ b/x64/include/object.h
@@ -1,8 +1,6 @@
#ifndef Py_OBJECT_H
#define Py_OBJECT_H
-#include "pymem.h" /* _Py_tracemalloc_config */
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -29,7 +27,7 @@ of data it contains. An object's type is fixed when it is created.
Types themselves are represented as objects; an object contains a
pointer to the corresponding type object. The type itself has a type
pointer pointing to the object representing the type 'type', which
-contains a pointer to itself!).
+contains a pointer to itself!.
Objects do not float around in memory; once allocated an object keeps
the same size and address. Objects that must hold variable-size data
@@ -63,6 +61,9 @@ whose size is determined when the object is allocated.
#error Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, and Py_REF_DEBUG
#endif
+/* PyTypeObject structure is defined in cpython/object.h.
+ In Py_LIMITED_API, PyTypeObject is an opaque structure. */
+typedef struct _typeobject PyTypeObject;
#ifdef Py_TRACE_REFS
/* Define pointers to support a doubly-linked list of all live heap objects. */
@@ -104,11 +105,12 @@ whose size is determined when the object is allocated.
typedef struct _object {
_PyObject_HEAD_EXTRA
Py_ssize_t ob_refcnt;
- struct _typeobject *ob_type;
+ PyTypeObject *ob_type;
} PyObject;
/* Cast argument to PyObject* type. */
#define _PyObject_CAST(op) ((PyObject*)(op))
+#define _PyObject_CAST_CONST(op) ((const PyObject*)(op))
typedef struct {
PyObject ob_base;
@@ -122,6 +124,27 @@ typedef struct {
#define Py_TYPE(ob) (_PyObject_CAST(ob)->ob_type)
#define Py_SIZE(ob) (_PyVarObject_CAST(ob)->ob_size)
+static inline int _Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) {
+ return ob->ob_type == type;
+}
+#define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST_CONST(ob), type)
+
+static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt) {
+ ob->ob_refcnt = refcnt;
+}
+#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT(_PyObject_CAST(ob), refcnt)
+
+static inline void _Py_SET_TYPE(PyObject *ob, PyTypeObject *type) {
+ ob->ob_type = type;
+}
+#define Py_SET_TYPE(ob, type) _Py_SET_TYPE(_PyObject_CAST(ob), type)
+
+static inline void _Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size) {
+ ob->ob_size = size;
+}
+#define Py_SET_SIZE(ob, size) _Py_SET_SIZE(_PyVarObject_CAST(ob), size)
+
+
/*
Type objects contain a string containing the type name (to help somewhat
in debugging), the allocation parameters (see PyObject_New() and
@@ -167,15 +190,8 @@ typedef PyObject *(*iternextfunc) (PyObject *);
typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *);
typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
-typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
-typedef PyObject *(*allocfunc)(struct _typeobject *, Py_ssize_t);
-
-#ifdef Py_LIMITED_API
-/* In Py_LIMITED_API, PyTypeObject is an opaque structure. */
-typedef struct _typeobject PyTypeObject;
-#else
-/* PyTypeObject is defined in cpython/object.h */
-#endif
+typedef PyObject *(*newfunc)(PyTypeObject *, PyObject *, PyObject *);
+typedef PyObject *(*allocfunc)(PyTypeObject *, Py_ssize_t);
typedef struct{
int slot; /* slot id, see below */
@@ -195,30 +211,31 @@ PyAPI_FUNC(PyObject*) PyType_FromSpec(PyType_Spec*);
PyAPI_FUNC(PyObject*) PyType_FromSpecWithBases(PyType_Spec*, PyObject*);
#endif
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03040000
-PyAPI_FUNC(void*) PyType_GetSlot(struct _typeobject*, int);
+PyAPI_FUNC(void*) PyType_GetSlot(PyTypeObject*, int);
+#endif
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
+PyAPI_FUNC(PyObject*) PyType_FromModuleAndSpec(PyObject *, PyType_Spec *, PyObject *);
+PyAPI_FUNC(PyObject *) PyType_GetModule(struct _typeobject *);
+PyAPI_FUNC(void *) PyType_GetModuleState(struct _typeobject *);
#endif
/* Generic type check */
-PyAPI_FUNC(int) PyType_IsSubtype(struct _typeobject *, struct _typeobject *);
+PyAPI_FUNC(int) PyType_IsSubtype(PyTypeObject *, PyTypeObject *);
#define PyObject_TypeCheck(ob, tp) \
- (Py_TYPE(ob) == (tp) || PyType_IsSubtype(Py_TYPE(ob), (tp)))
-
-PyAPI_DATA(struct _typeobject) PyType_Type; /* built-in 'type' */
-PyAPI_DATA(struct _typeobject) PyBaseObject_Type; /* built-in 'object' */
-PyAPI_DATA(struct _typeobject) PySuper_Type; /* built-in 'super' */
+ (Py_IS_TYPE(ob, tp) || PyType_IsSubtype(Py_TYPE(ob), (tp)))
-PyAPI_FUNC(unsigned long) PyType_GetFlags(struct _typeobject*);
+PyAPI_DATA(PyTypeObject) PyType_Type; /* built-in 'type' */
+PyAPI_DATA(PyTypeObject) PyBaseObject_Type; /* built-in 'object' */
+PyAPI_DATA(PyTypeObject) PySuper_Type; /* built-in 'super' */
-#define PyType_Check(op) \
- PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_TYPE_SUBCLASS)
-#define PyType_CheckExact(op) (Py_TYPE(op) == &PyType_Type)
+PyAPI_FUNC(unsigned long) PyType_GetFlags(PyTypeObject*);
-PyAPI_FUNC(int) PyType_Ready(struct _typeobject *);
-PyAPI_FUNC(PyObject *) PyType_GenericAlloc(struct _typeobject *, Py_ssize_t);
-PyAPI_FUNC(PyObject *) PyType_GenericNew(struct _typeobject *,
+PyAPI_FUNC(int) PyType_Ready(PyTypeObject *);
+PyAPI_FUNC(PyObject *) PyType_GenericAlloc(PyTypeObject *, Py_ssize_t);
+PyAPI_FUNC(PyObject *) PyType_GenericNew(PyTypeObject *,
PyObject *, PyObject *);
PyAPI_FUNC(unsigned int) PyType_ClearCache(void);
-PyAPI_FUNC(void) PyType_Modified(struct _typeobject *);
+PyAPI_FUNC(void) PyType_Modified(PyTypeObject *);
/* Generic operations on objects */
PyAPI_FUNC(PyObject *) PyObject_Repr(PyObject *);
@@ -235,8 +252,7 @@ PyAPI_FUNC(int) PyObject_SetAttr(PyObject *, PyObject *, PyObject *);
PyAPI_FUNC(int) PyObject_HasAttr(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyObject_SelfIter(PyObject *);
PyAPI_FUNC(PyObject *) PyObject_GenericGetAttr(PyObject *, PyObject *);
-PyAPI_FUNC(int) PyObject_GenericSetAttr(PyObject *,
- PyObject *, PyObject *);
+PyAPI_FUNC(int) PyObject_GenericSetAttr(PyObject *, PyObject *, PyObject *);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
PyAPI_FUNC(int) PyObject_GenericSetDict(PyObject *, PyObject *, void *);
#endif
@@ -290,7 +306,9 @@ given type object has a specified feature.
/* Set if the type implements the vectorcall protocol (PEP 590) */
#ifndef Py_LIMITED_API
-#define _Py_TPFLAGS_HAVE_VECTORCALL (1UL << 11)
+#define Py_TPFLAGS_HAVE_VECTORCALL (1UL << 11)
+// Backwards compatibility alias for API that was provisional in Python 3.8
+#define _Py_TPFLAGS_HAVE_VECTORCALL Py_TPFLAGS_HAVE_VECTORCALL
#endif
/* Set if the type is 'ready' -- fully initialized */
@@ -337,19 +355,14 @@ given type object has a specified feature.
/* NOTE: The following flags reuse lower bits (removed as part of the
* Python 3.0 transition). */
-/* The following flag is kept for compatibility. Starting with 3.8,
- * binary compatibility of C extensions accross feature releases of
+/* The following flag is kept for compatibility. Starting with 3.8,
+ * binary compatibility of C extensions across feature releases of
* Python is not supported anymore, except when using the stable ABI.
*/
/* Type structure has tp_finalize member (3.4) */
#define Py_TPFLAGS_HAVE_FINALIZE (1UL << 0)
-#ifdef Py_LIMITED_API
-# define PyType_HasFeature(t,f) ((PyType_GetFlags(t) & (f)) != 0)
-#endif
-#define PyType_FastSubclass(t,f) PyType_HasFeature(t,f)
-
/*
The macros Py_INCREF(op) and Py_DECREF(op) are used to increment or decrement
@@ -379,94 +392,33 @@ decision that's up to the implementer of each new type so if you want,
you can count such references to the type object.)
*/
-/* First define a pile of simple helper macros, one set per special
- * build symbol. These either expand to the obvious things, or to
- * nothing at all when the special mode isn't in effect. The main
- * macros can later be defined just once then, yet expand to different
- * things depending on which special build options are and aren't in effect.
- * Trust me <wink>: while painful, this is 20x easier to understand than,
- * e.g, defining _Py_NewReference five different times in a maze of nested
- * #ifdefs (we used to do that -- it was impenetrable).
- */
#ifdef Py_REF_DEBUG
PyAPI_DATA(Py_ssize_t) _Py_RefTotal;
PyAPI_FUNC(void) _Py_NegativeRefcount(const char *filename, int lineno,
PyObject *op);
-PyAPI_FUNC(Py_ssize_t) _Py_GetRefTotal(void);
-#define _Py_INC_REFTOTAL _Py_RefTotal++
-#define _Py_DEC_REFTOTAL _Py_RefTotal--
-
-/* Py_REF_DEBUG also controls the display of refcounts and memory block
- * allocations at the interactive prompt and at interpreter shutdown
- */
-PyAPI_FUNC(void) _PyDebug_PrintTotalRefs(void);
-#else
-#define _Py_INC_REFTOTAL
-#define _Py_DEC_REFTOTAL
#endif /* Py_REF_DEBUG */
-#ifdef COUNT_ALLOCS
-PyAPI_FUNC(void) _Py_inc_count(struct _typeobject *);
-PyAPI_FUNC(void) _Py_dec_count(struct _typeobject *);
-#define _Py_INC_TPALLOCS(OP) _Py_inc_count(Py_TYPE(OP))
-#define _Py_INC_TPFREES(OP) _Py_dec_count(Py_TYPE(OP))
-#define _Py_DEC_TPFREES(OP) Py_TYPE(OP)->tp_frees--
-#define _Py_COUNT_ALLOCS_COMMA ,
-#else
-#define _Py_INC_TPALLOCS(OP)
-#define _Py_INC_TPFREES(OP)
-#define _Py_DEC_TPFREES(OP)
-#define _Py_COUNT_ALLOCS_COMMA
-#endif /* COUNT_ALLOCS */
-
-/* Update the Python traceback of an object. This function must be called
- when a memory block is reused from a free list. */
-PyAPI_FUNC(int) _PyTraceMalloc_NewReference(PyObject *op);
-
-#ifdef Py_TRACE_REFS
-/* Py_TRACE_REFS is such major surgery that we call external routines. */
-PyAPI_FUNC(void) _Py_NewReference(PyObject *);
-PyAPI_FUNC(void) _Py_ForgetReference(PyObject *);
-PyAPI_FUNC(void) _Py_PrintReferences(FILE *);
-PyAPI_FUNC(void) _Py_PrintReferenceAddresses(FILE *);
-PyAPI_FUNC(void) _Py_AddToAllObjects(PyObject *, int force);
-#else
-/* Without Py_TRACE_REFS, there's little enough to do that we expand code
- inline. */
-static inline void _Py_NewReference(PyObject *op)
-{
- if (_Py_tracemalloc_config.tracing) {
- _PyTraceMalloc_NewReference(op);
- }
- _Py_INC_TPALLOCS(op);
- _Py_INC_REFTOTAL;
- Py_REFCNT(op) = 1;
-}
-
-static inline void _Py_ForgetReference(PyObject *op)
-{
- (void)op; /* may be unused, shut up -Wunused-parameter */
- _Py_INC_TPFREES(op);
-}
-#endif /* !Py_TRACE_REFS */
-
-
PyAPI_FUNC(void) _Py_Dealloc(PyObject *);
static inline void _Py_INCREF(PyObject *op)
{
- _Py_INC_REFTOTAL;
+#ifdef Py_REF_DEBUG
+ _Py_RefTotal++;
+#endif
op->ob_refcnt++;
}
#define Py_INCREF(op) _Py_INCREF(_PyObject_CAST(op))
-static inline void _Py_DECREF(const char *filename, int lineno,
- PyObject *op)
+static inline void _Py_DECREF(
+#ifdef Py_REF_DEBUG
+ const char *filename, int lineno,
+#endif
+ PyObject *op)
{
- (void)filename; /* may be unused, shut up -Wunused-parameter */
- (void)lineno; /* may be unused, shut up -Wunused-parameter */
- _Py_DEC_REFTOTAL;
+#ifdef Py_REF_DEBUG
+ _Py_RefTotal--;
+#endif
if (--op->ob_refcnt != 0) {
#ifdef Py_REF_DEBUG
if (op->ob_refcnt < 0) {
@@ -479,7 +431,11 @@ static inline void _Py_DECREF(const char *filename, int lineno,
}
}
-#define Py_DECREF(op) _Py_DECREF(__FILE__, __LINE__, _PyObject_CAST(op))
+#ifdef Py_REF_DEBUG
+# define Py_DECREF(op) _Py_DECREF(__FILE__, __LINE__, _PyObject_CAST(op))
+#else
+# define Py_DECREF(op) _Py_DECREF(_PyObject_CAST(op))
+#endif
/* Safely decref `op` and set `op` to NULL, especially useful in tp_clear
@@ -654,98 +610,37 @@ it carefully, it may save lots of calls to Py_INCREF() and Py_DECREF() at
times.
*/
+#ifndef Py_LIMITED_API
+# define Py_CPYTHON_OBJECT_H
+# include "cpython/object.h"
+# undef Py_CPYTHON_OBJECT_H
+#endif
-/* Trashcan mechanism, thanks to Christian Tismer.
-
-When deallocating a container object, it's possible to trigger an unbounded
-chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
-next" object in the chain to 0. This can easily lead to stack overflows,
-especially in threads (which typically have less stack space to work with).
-
-A container object can avoid this by bracketing the body of its tp_dealloc
-function with a pair of macros:
-static void
-mytype_dealloc(mytype *p)
+static inline int
+PyType_HasFeature(PyTypeObject *type, unsigned long feature)
{
- ... declarations go here ...
-
- PyObject_GC_UnTrack(p); // must untrack first
- Py_TRASHCAN_BEGIN(p, mytype_dealloc)
- ... The body of the deallocator goes here, including all calls ...
- ... to Py_DECREF on contained objects. ...
- Py_TRASHCAN_END // there should be no code after this
+ unsigned long flags;
+#ifdef Py_LIMITED_API
+ // PyTypeObject is opaque in the limited C API
+ flags = PyType_GetFlags(type);
+#else
+ flags = type->tp_flags;
+#endif
+ return ((flags & feature) != 0);
}
-CAUTION: Never return from the middle of the body! If the body needs to
-"get out early", put a label immediately before the Py_TRASHCAN_END
-call, and goto it. Else the call-depth counter (see below) will stay
-above 0 forever, and the trashcan will never get emptied.
-
-How it works: The BEGIN macro increments a call-depth counter. So long
-as this counter is small, the body of the deallocator is run directly without
-further ado. But if the counter gets large, it instead adds p to a list of
-objects to be deallocated later, skips the body of the deallocator, and
-resumes execution after the END macro. The tp_dealloc routine then returns
-without deallocating anything (and so unbounded call-stack depth is avoided).
-
-When the call stack finishes unwinding again, code generated by the END macro
-notices this, and calls another routine to deallocate all the objects that
-may have been added to the list of deferred deallocations. In effect, a
-chain of N deallocations is broken into (N-1)/(PyTrash_UNWIND_LEVEL-1) pieces,
-with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
-
-Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
-class, we need to ensure that the trashcan is only triggered on the tp_dealloc
-of the actual class being deallocated. Otherwise we might end up with a
-partially-deallocated object. To check this, the tp_dealloc function must be
-passed as second argument to Py_TRASHCAN_BEGIN().
-*/
-
-/* The new thread-safe private API, invoked by the macros below. */
-PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyObject*);
-PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(void);
-
-#define PyTrash_UNWIND_LEVEL 50
-
-#define Py_TRASHCAN_BEGIN_CONDITION(op, cond) \
- do { \
- PyThreadState *_tstate = NULL; \
- /* If "cond" is false, then _tstate remains NULL and the deallocator \
- * is run normally without involving the trashcan */ \
- if (cond) { \
- _tstate = PyThreadState_GET(); \
- if (_tstate->trash_delete_nesting >= PyTrash_UNWIND_LEVEL) { \
- /* Store the object (to be deallocated later) and jump past \
- * Py_TRASHCAN_END, skipping the body of the deallocator */ \
- _PyTrash_thread_deposit_object(_PyObject_CAST(op)); \
- break; \
- } \
- ++_tstate->trash_delete_nesting; \
- }
- /* The body of the deallocator is here. */
-#define Py_TRASHCAN_END \
- if (_tstate) { \
- --_tstate->trash_delete_nesting; \
- if (_tstate->trash_delete_later && _tstate->trash_delete_nesting <= 0) \
- _PyTrash_thread_destroy_chain(); \
- } \
- } while (0);
-
-#define Py_TRASHCAN_BEGIN(op, dealloc) Py_TRASHCAN_BEGIN_CONDITION(op, \
- Py_TYPE(op)->tp_dealloc == (destructor)(dealloc))
-
-/* For backwards compatibility, these macros enable the trashcan
- * unconditionally */
-#define Py_TRASHCAN_SAFE_BEGIN(op) Py_TRASHCAN_BEGIN_CONDITION(op, 1)
-#define Py_TRASHCAN_SAFE_END(op) Py_TRASHCAN_END
+#define PyType_FastSubclass(type, flag) PyType_HasFeature(type, flag)
+static inline int _PyType_Check(PyObject *op) {
+ return PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_TYPE_SUBCLASS);
+}
+#define PyType_Check(op) _PyType_Check(_PyObject_CAST(op))
-#ifndef Py_LIMITED_API
-# define Py_CPYTHON_OBJECT_H
-# include "cpython/object.h"
-# undef Py_CPYTHON_OBJECT_H
-#endif
+static inline int _PyType_CheckExact(PyObject *op) {
+ return Py_IS_TYPE(op, &PyType_Type);
+}
+#define PyType_CheckExact(op) _PyType_CheckExact(_PyObject_CAST(op))
#ifdef __cplusplus
}
diff --git a/x64/include/objimpl.h b/x64/include/objimpl.h
index 2337d8a..030d7ee 100644
--- a/x64/include/objimpl.h
+++ b/x64/include/objimpl.h
@@ -122,103 +122,32 @@ PyAPI_FUNC(PyVarObject *) PyObject_InitVar(PyVarObject *,
PyAPI_FUNC(PyObject *) _PyObject_New(PyTypeObject *);
PyAPI_FUNC(PyVarObject *) _PyObject_NewVar(PyTypeObject *, Py_ssize_t);
-#define PyObject_New(type, typeobj) \
- ( (type *) _PyObject_New(typeobj) )
-#define PyObject_NewVar(type, typeobj, n) \
- ( (type *) _PyObject_NewVar((typeobj), (n)) )
-
-/* Inline functions trading binary compatibility for speed:
- PyObject_INIT() is the fast version of PyObject_Init(), and
- PyObject_INIT_VAR() is the fast version of PyObject_InitVar.
- See also pymem.h.
-
- These inline functions expect non-NULL object pointers. */
-static inline PyObject*
-_PyObject_INIT(PyObject *op, PyTypeObject *typeobj)
-{
- assert(op != NULL);
- Py_TYPE(op) = typeobj;
- if (PyType_GetFlags(typeobj) & Py_TPFLAGS_HEAPTYPE) {
- Py_INCREF(typeobj);
- }
- _Py_NewReference(op);
- return op;
-}
-
-#define PyObject_INIT(op, typeobj) \
- _PyObject_INIT(_PyObject_CAST(op), (typeobj))
-
-static inline PyVarObject*
-_PyObject_INIT_VAR(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
-{
- assert(op != NULL);
- Py_SIZE(op) = size;
- PyObject_INIT((PyObject *)op, typeobj);
- return op;
-}
+#define PyObject_New(type, typeobj) ((type *)_PyObject_New(typeobj))
-#define PyObject_INIT_VAR(op, typeobj, size) \
- _PyObject_INIT_VAR(_PyVarObject_CAST(op), (typeobj), (size))
+// Alias to PyObject_New(). In Python 3.8, PyObject_NEW() called directly
+// PyObject_MALLOC() with _PyObject_SIZE().
+#define PyObject_NEW(type, typeobj) PyObject_New(type, typeobj)
-#define _PyObject_SIZE(typeobj) ( (typeobj)->tp_basicsize )
-
-/* _PyObject_VAR_SIZE returns the number of bytes (as size_t) allocated for a
- vrbl-size object with nitems items, exclusive of gc overhead (if any). The
- value is rounded up to the closest multiple of sizeof(void *), in order to
- ensure that pointer fields at the end of the object are correctly aligned
- for the platform (this is of special importance for subclasses of, e.g.,
- str or int, so that pointers can be stored after the embedded data).
+#define PyObject_NewVar(type, typeobj, n) \
+ ( (type *) _PyObject_NewVar((typeobj), (n)) )
- Note that there's no memory wastage in doing this, as malloc has to
- return (at worst) pointer-aligned memory anyway.
-*/
-#if ((SIZEOF_VOID_P - 1) & SIZEOF_VOID_P) != 0
-# error "_PyObject_VAR_SIZE requires SIZEOF_VOID_P be a power of 2"
+// Alias to PyObject_New(). In Python 3.8, PyObject_NEW() called directly
+// PyObject_MALLOC() with _PyObject_VAR_SIZE().
+#define PyObject_NEW_VAR(type, typeobj, n) PyObject_NewVar(type, typeobj, n)
+
+
+#ifdef Py_LIMITED_API
+/* Define PyObject_INIT() and PyObject_INIT_VAR() as aliases to PyObject_Init()
+ and PyObject_InitVar() in the limited C API for compatibility with the
+ CPython C API. */
+# define PyObject_INIT(op, typeobj) \
+ PyObject_Init(_PyObject_CAST(op), (typeobj))
+# define PyObject_INIT_VAR(op, typeobj, size) \
+ PyObject_InitVar(_PyVarObject_CAST(op), (typeobj), (size))
+#else
+/* PyObject_INIT() and PyObject_INIT_VAR() are defined in cpython/objimpl.h */
#endif
-#define _PyObject_VAR_SIZE(typeobj, nitems) \
- _Py_SIZE_ROUND_UP((typeobj)->tp_basicsize + \
- (nitems)*(typeobj)->tp_itemsize, \
- SIZEOF_VOID_P)
-
-#define PyObject_NEW(type, typeobj) \
-( (type *) PyObject_Init( \
- (PyObject *) PyObject_MALLOC( _PyObject_SIZE(typeobj) ), (typeobj)) )
-
-#define PyObject_NEW_VAR(type, typeobj, n) \
-( (type *) PyObject_InitVar( \
- (PyVarObject *) PyObject_MALLOC(_PyObject_VAR_SIZE((typeobj),(n)) ),\
- (typeobj), (n)) )
-
-/* This example code implements an object constructor with a custom
- allocator, where PyObject_New is inlined, and shows the important
- distinction between two steps (at least):
- 1) the actual allocation of the object storage;
- 2) the initialization of the Python specific fields
- in this storage with PyObject_{Init, InitVar}.
-
- PyObject *
- YourObject_New(...)
- {
- PyObject *op;
-
- op = (PyObject *) Your_Allocator(_PyObject_SIZE(YourTypeStruct));
- if (op == NULL)
- return PyErr_NoMemory();
-
- PyObject_Init(op, &YourTypeStruct);
-
- op->ob_field = value;
- ...
- return op;
- }
-
- Note that in C++, the use of the new operator usually implies that
- the 1st step is performed automatically for you, so in a C++ class
- constructor you would start directly with PyObject_Init/InitVar
-*/
-
-
/*
* Garbage Collection Support
@@ -257,6 +186,8 @@ PyAPI_FUNC(void) PyObject_GC_Del(void *);
#define PyObject_GC_NewVar(type, typeobj, n) \
( (type *) _PyObject_GC_NewVar((typeobj), (n)) )
+PyAPI_FUNC(int) PyObject_GC_IsTracked(PyObject *);
+PyAPI_FUNC(int) PyObject_GC_IsFinalized(PyObject *);
/* Utility macro to help write tp_traverse functions.
* To use this macro, the tp_traverse function must name its arguments
diff --git a/x64/include/odictobject.h b/x64/include/odictobject.h
index 35aff8a..e070413 100644
--- a/x64/include/odictobject.h
+++ b/x64/include/odictobject.h
@@ -19,7 +19,7 @@ PyAPI_DATA(PyTypeObject) PyODictItems_Type;
PyAPI_DATA(PyTypeObject) PyODictValues_Type;
#define PyODict_Check(op) PyObject_TypeCheck(op, &PyODict_Type)
-#define PyODict_CheckExact(op) (Py_TYPE(op) == &PyODict_Type)
+#define PyODict_CheckExact(op) Py_IS_TYPE(op, &PyODict_Type)
#define PyODict_SIZE(op) PyDict_GET_SIZE((op))
PyAPI_FUNC(PyObject *) PyODict_New(void);
diff --git a/x64/include/opcode.h b/x64/include/opcode.h
index 2a29e97..19944fa 100644
--- a/x64/include/opcode.h
+++ b/x64/include/opcode.h
@@ -30,10 +30,11 @@ extern "C" {
#define BINARY_TRUE_DIVIDE 27
#define INPLACE_FLOOR_DIVIDE 28
#define INPLACE_TRUE_DIVIDE 29
+#define RERAISE 48
+#define WITH_EXCEPT_START 49
#define GET_AITER 50
#define GET_ANEXT 51
#define BEFORE_ASYNC_WITH 52
-#define BEGIN_FINALLY 53
#define END_ASYNC_FOR 54
#define INPLACE_ADD 55
#define INPLACE_SUBTRACT 56
@@ -53,19 +54,18 @@ extern "C" {
#define LOAD_BUILD_CLASS 71
#define YIELD_FROM 72
#define GET_AWAITABLE 73
+#define LOAD_ASSERTION_ERROR 74
#define INPLACE_LSHIFT 75
#define INPLACE_RSHIFT 76
#define INPLACE_AND 77
#define INPLACE_XOR 78
#define INPLACE_OR 79
-#define WITH_CLEANUP_START 81
-#define WITH_CLEANUP_FINISH 82
+#define LIST_TO_TUPLE 82
#define RETURN_VALUE 83
#define IMPORT_STAR 84
#define SETUP_ANNOTATIONS 85
#define YIELD_VALUE 86
#define POP_BLOCK 87
-#define END_FINALLY 88
#define POP_EXCEPT 89
#define HAVE_ARGUMENT 90
#define STORE_NAME 90
@@ -94,6 +94,9 @@ extern "C" {
#define POP_JUMP_IF_FALSE 114
#define POP_JUMP_IF_TRUE 115
#define LOAD_GLOBAL 116
+#define IS_OP 117
+#define CONTAINS_OP 118
+#define JUMP_IF_NOT_EXC_MATCH 121
#define SETUP_FINALLY 122
#define LOAD_FAST 124
#define STORE_FAST 125
@@ -114,20 +117,16 @@ extern "C" {
#define SET_ADD 146
#define MAP_ADD 147
#define LOAD_CLASSDEREF 148
-#define BUILD_LIST_UNPACK 149
-#define BUILD_MAP_UNPACK 150
-#define BUILD_MAP_UNPACK_WITH_CALL 151
-#define BUILD_TUPLE_UNPACK 152
-#define BUILD_SET_UNPACK 153
#define SETUP_ASYNC_WITH 154
#define FORMAT_VALUE 155
#define BUILD_CONST_KEY_MAP 156
#define BUILD_STRING 157
-#define BUILD_TUPLE_UNPACK_WITH_CALL 158
#define LOAD_METHOD 160
#define CALL_METHOD 161
-#define CALL_FINALLY 162
-#define POP_FINALLY 163
+#define LIST_EXTEND 162
+#define SET_UPDATE 163
+#define DICT_MERGE 164
+#define DICT_UPDATE 165
/* EXCEPT_HANDLER is a special, implicit block type which is created when
entering an except handler. It is not an opcode but we define it here
@@ -135,11 +134,6 @@ extern "C" {
remaining private.*/
#define EXCEPT_HANDLER 257
-
-enum cmp_op {PyCmp_LT=Py_LT, PyCmp_LE=Py_LE, PyCmp_EQ=Py_EQ, PyCmp_NE=Py_NE,
- PyCmp_GT=Py_GT, PyCmp_GE=Py_GE, PyCmp_IN, PyCmp_NOT_IN,
- PyCmp_IS, PyCmp_IS_NOT, PyCmp_EXC_MATCH, PyCmp_BAD};
-
#define HAS_ARG(op) ((op) >= HAVE_ARGUMENT)
#ifdef __cplusplus
diff --git a/x64/include/patchlevel.h b/x64/include/patchlevel.h
index 3671bb3..0b5d280 100644
--- a/x64/include/patchlevel.h
+++ b/x64/include/patchlevel.h
@@ -17,13 +17,13 @@
/* Version parsed out into numeric values */
/*--start constants--*/
#define PY_MAJOR_VERSION 3
-#define PY_MINOR_VERSION 8
-#define PY_MICRO_VERSION 2
+#define PY_MINOR_VERSION 9
+#define PY_MICRO_VERSION 1
#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL
#define PY_RELEASE_SERIAL 0
/* Version as a string */
-#define PY_VERSION "3.8.2"
+#define PY_VERSION "3.9.1"
/*--end constants--*/
/* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2.
diff --git a/x64/include/picklebufobject.h b/x64/include/picklebufobject.h
index f07e900..0df2561 100644
--- a/x64/include/picklebufobject.h
+++ b/x64/include/picklebufobject.h
@@ -12,7 +12,7 @@ extern "C" {
PyAPI_DATA(PyTypeObject) PyPickleBuffer_Type;
-#define PyPickleBuffer_Check(op) (Py_TYPE(op) == &PyPickleBuffer_Type)
+#define PyPickleBuffer_Check(op) Py_IS_TYPE(op, &PyPickleBuffer_Type)
/* Create a PickleBuffer redirecting to the given buffer-enabled object */
PyAPI_FUNC(PyObject *) PyPickleBuffer_FromObject(PyObject *);
diff --git a/x64/include/py_curses.h b/x64/include/py_curses.h
index 2702b37..b70252d 100644
--- a/x64/include/py_curses.h
+++ b/x64/include/py_curses.h
@@ -64,7 +64,7 @@ typedef struct {
char *encoding;
} PyCursesWindowObject;
-#define PyCursesWindow_Check(v) (Py_TYPE(v) == &PyCursesWindow_Type)
+#define PyCursesWindow_Check(v) Py_IS_TYPE(v, &PyCursesWindow_Type)
#define PyCurses_CAPSULE_NAME "_curses._C_API"
@@ -97,4 +97,3 @@ static const char catchall_NULL[] = "curses function returned NULL";
#endif /* !defined(Py_CURSES_H) */
-
diff --git a/x64/include/pycapsule.h b/x64/include/pycapsule.h
index d9ecda7..fb5d503 100644
--- a/x64/include/pycapsule.h
+++ b/x64/include/pycapsule.h
@@ -22,7 +22,7 @@ PyAPI_DATA(PyTypeObject) PyCapsule_Type;
typedef void (*PyCapsule_Destructor)(PyObject *);
-#define PyCapsule_CheckExact(op) (Py_TYPE(op) == &PyCapsule_Type)
+#define PyCapsule_CheckExact(op) Py_IS_TYPE(op, &PyCapsule_Type)
PyAPI_FUNC(PyObject *) PyCapsule_New(
diff --git a/x64/include/pyconfig.h b/x64/include/pyconfig.h
index b40e24f..d7d3cf0 100644
--- a/x64/include/pyconfig.h
+++ b/x64/include/pyconfig.h
@@ -135,9 +135,9 @@ WIN32 is still required for the locale module.
#endif /* MS_WIN64 */
/* set the version macros for the windows headers */
-/* Python 3.5+ requires Windows Vista or greater */
-#define Py_WINVER 0x0600 /* _WIN32_WINNT_VISTA */
-#define Py_NTDDI NTDDI_VISTA
+/* Python 3.9+ requires Windows 8 or greater */
+#define Py_WINVER 0x0602 /* _WIN32_WINNT_WIN8 */
+#define Py_NTDDI NTDDI_WIN8
/* We only set these values when building Python - we don't want to force
these values on extensions, as that will affect the prototypes and
@@ -193,12 +193,6 @@ typedef int pid_t;
#define Py_IS_NAN _isnan
#define Py_IS_INFINITY(X) (!_finite(X) && !_isnan(X))
#define Py_IS_FINITE(X) _finite(X)
-#define copysign _copysign
-
-/* Side by Side assemblies supported in VS 2005 and VS 2008 but not 2010*/
-#if _MSC_VER >= 1400 && _MSC_VER < 1600
-#define HAVE_SXS 1
-#endif
/* define some ANSI types that are not defined in earlier Win headers */
#if _MSC_VER >= 1200
@@ -274,11 +268,11 @@ Py_NO_ENABLE_SHARED to find out. Also support MS_NO_COREDLL for b/w compat */
file in their Makefile (other compilers are
generally taken care of by distutils.) */
# if defined(_DEBUG)
-# pragma comment(lib,"python38_d.lib")
+# pragma comment(lib,"python39_d.lib")
# elif defined(Py_LIMITED_API)
# pragma comment(lib,"python3.lib")
# else
-# pragma comment(lib,"python38.lib")
+# pragma comment(lib,"python39.lib")
# endif /* _DEBUG */
# endif /* _MSC_VER */
# endif /* Py_BUILD_CORE */
@@ -296,7 +290,7 @@ Py_NO_ENABLE_SHARED to find out. Also support MS_NO_COREDLL for b/w compat */
# define SIZEOF_HKEY 8
# define SIZEOF_SIZE_T 8
/* configure.ac defines HAVE_LARGEFILE_SUPPORT iff
- sizeof(off_t) > sizeof(long), and sizeof(PY_LONG_LONG) >= sizeof(off_t).
+ sizeof(off_t) > sizeof(long), and sizeof(long long) >= sizeof(off_t).
On Win64 the second condition is not true, but if fpos_t replaces off_t
then this is true. The uses of HAVE_LARGEFILE_SUPPORT imply that Win64
should define this. */
@@ -470,6 +464,10 @@ Py_NO_ENABLE_SHARED to find out. Also support MS_NO_COREDLL for b/w compat */
(which you can't on SCO ODT 3.0). */
/* #undef SYS_SELECT_WITH_SYS_TIME */
+/* Define if you want build the _decimal module using a coroutine-local rather
+ than a thread-local context */
+#define WITH_DECIMAL_CONTEXTVAR 1
+
/* Define if you want documentation strings in extension modules */
#define WITH_DOC_STRINGS 1
@@ -684,4 +682,6 @@ Py_NO_ENABLE_SHARED to find out. Also support MS_NO_COREDLL for b/w compat */
/* Define if libssl has X509_VERIFY_PARAM_set1_host and related function */
#define HAVE_X509_VERIFY_PARAM_SET1_HOST 1
+#define PLATLIBDIR "lib"
+
#endif /* !Py_CONFIG_H */
diff --git a/x64/include/pydebug.h b/x64/include/pydebug.h
index bd4aafe..78bcb11 100644
--- a/x64/include/pydebug.h
+++ b/x64/include/pydebug.h
@@ -5,8 +5,6 @@
extern "C" {
#endif
-/* These global variable are defined in pylifecycle.c */
-/* XXX (ncoghlan): move these declarations to pylifecycle.h? */
PyAPI_DATA(int) Py_DebugFlag;
PyAPI_DATA(int) Py_VerboseFlag;
PyAPI_DATA(int) Py_QuietFlag;
diff --git a/x64/include/pyerrors.h b/x64/include/pyerrors.h
index 5125a51..979a26b 100644
--- a/x64/include/pyerrors.h
+++ b/x64/include/pyerrors.h
@@ -4,6 +4,8 @@
extern "C" {
#endif
+#include <stdarg.h> // va_list
+
/* Error handling definitions */
PyAPI_FUNC(void) PyErr_SetNone(PyObject *);
@@ -21,7 +23,11 @@ PyAPI_FUNC(void) PyErr_GetExcInfo(PyObject **, PyObject **, PyObject **);
PyAPI_FUNC(void) PyErr_SetExcInfo(PyObject *, PyObject *, PyObject *);
#endif
-/* Defined in Python/pylifecycle.c */
+/* Defined in Python/pylifecycle.c
+
+ The Py_FatalError() function is replaced with a macro which logs
+ automatically the name of the current function, unless the Py_LIMITED_API
+ macro is defined. */
PyAPI_FUNC(void) _Py_NO_RETURN Py_FatalError(const char *message);
#if defined(Py_DEBUG) || defined(Py_LIMITED_API)
@@ -54,11 +60,11 @@ PyAPI_FUNC(void) PyException_SetContext(PyObject *, PyObject *);
PyType_FastSubclass((PyTypeObject*)(x), Py_TPFLAGS_BASE_EXC_SUBCLASS))
#define PyExceptionInstance_Check(x) \
- PyType_FastSubclass((x)->ob_type, Py_TPFLAGS_BASE_EXC_SUBCLASS)
+ PyType_FastSubclass(Py_TYPE(x), Py_TPFLAGS_BASE_EXC_SUBCLASS)
PyAPI_FUNC(const char *) PyExceptionClass_Name(PyObject *);
-#define PyExceptionInstance_Class(x) ((PyObject*)((x)->ob_type))
+#define PyExceptionInstance_Class(x) ((PyObject*)Py_TYPE(x))
/* Predefined exceptions */
@@ -303,21 +309,6 @@ PyAPI_FUNC(int) PyUnicodeTranslateError_SetReason(
const char *reason /* UTF-8 encoded string */
);
-/* These APIs aren't really part of the error implementation, but
- often needed to format error messages; the native C lib APIs are
- not available on all platforms, which is why we provide emulations
- for those platforms in Python/mysnprintf.c,
- WARNING: The return value of snprintf varies across platforms; do
- not rely on any particular behavior; eventually the C99 defn may
- be reliable.
-*/
-#if defined(MS_WIN32) && !defined(HAVE_SNPRINTF)
-# define HAVE_SNPRINTF
-# define snprintf _snprintf
-# define vsnprintf _vsnprintf
-#endif
-
-#include <stdarg.h>
PyAPI_FUNC(int) PyOS_snprintf(char *str, size_t size, const char *format, ...)
Py_GCC_ATTRIBUTE((format(printf, 3, 4)));
PyAPI_FUNC(int) PyOS_vsnprintf(char *str, size_t size, const char *format, va_list va)
diff --git a/x64/include/pyfpe.h b/x64/include/pyfpe.h
index 5a99e39..cc2def6 100644
--- a/x64/include/pyfpe.h
+++ b/x64/include/pyfpe.h
@@ -1,5 +1,7 @@
#ifndef Py_PYFPE_H
#define Py_PYFPE_H
+/* Header excluded from the stable API */
+#ifndef Py_LIMITED_API
/* These macros used to do something when Python was built with --with-fpectl,
* but support for that was dropped in 3.7. We continue to define them though,
@@ -9,4 +11,5 @@
#define PyFPE_START_PROTECT(err_string, leave_stmt)
#define PyFPE_END_PROTECT(v)
+#endif /* !defined(Py_LIMITED_API) */
#endif /* !Py_PYFPE_H */
diff --git a/x64/include/pyframe.h b/x64/include/pyframe.h
new file mode 100644
index 0000000..3816224
--- /dev/null
+++ b/x64/include/pyframe.h
@@ -0,0 +1,22 @@
+/* Limited C API of PyFrame API
+ *
+ * Include "frameobject.h" to get the PyFrameObject structure.
+ */
+
+#ifndef Py_PYFRAME_H
+#define Py_PYFRAME_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _frame PyFrameObject;
+
+/* Return the line of code the frame is currently executing. */
+PyAPI_FUNC(int) PyFrame_GetLineNumber(PyFrameObject *);
+
+PyAPI_FUNC(PyCodeObject *) PyFrame_GetCode(PyFrameObject *frame);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_PYFRAME_H */
diff --git a/x64/include/pyhash.h b/x64/include/pyhash.h
index dbcc974..4437b87 100644
--- a/x64/include/pyhash.h
+++ b/x64/include/pyhash.h
@@ -8,7 +8,9 @@ extern "C" {
/* Helpers for hash functions */
#ifndef Py_LIMITED_API
PyAPI_FUNC(Py_hash_t) _Py_HashDouble(double);
-PyAPI_FUNC(Py_hash_t) _Py_HashPointer(void*);
+PyAPI_FUNC(Py_hash_t) _Py_HashPointer(const void*);
+// Similar to _Py_HashPointer(), but don't replace -1 with -2
+PyAPI_FUNC(Py_hash_t) _Py_HashPointerRaw(const void*);
PyAPI_FUNC(Py_hash_t) _Py_HashBytes(const void*, Py_ssize_t);
#endif
diff --git a/x64/include/pymacro.h b/x64/include/pymacro.h
index 495c2c2..202b936 100644
--- a/x64/include/pymacro.h
+++ b/x64/include/pymacro.h
@@ -100,7 +100,33 @@
# define Py_UNUSED(name) _unused_ ## name
#endif
-#define Py_UNREACHABLE() \
+#if defined(RANDALL_WAS_HERE)
+# define Py_UNREACHABLE() \
+ Py_FatalError( \
+ "If you're seeing this, the code is in what I thought was\n" \
+ "an unreachable state.\n\n" \
+ "I could give you advice for what to do, but honestly, why\n" \
+ "should you trust me? I clearly screwed this up. I'm writing\n" \
+ "a message that should never appear, yet I know it will\n" \
+ "probably appear someday.\n\n" \
+ "On a deep level, I know I'm not up to this task.\n" \
+ "I'm so sorry.\n" \
+ "https://xkcd.com/2200")
+#elif defined(Py_DEBUG)
+# define Py_UNREACHABLE() \
+ Py_FatalError( \
+ "We've reached an unreachable state. Anything is possible.\n" \
+ "The limits were in our heads all along. Follow your dreams.\n" \
+ "https://xkcd.com/2200")
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+# define Py_UNREACHABLE() __builtin_unreachable()
+#elif defined(__clang__) || defined(__INTEL_COMPILER)
+# define Py_UNREACHABLE() __builtin_unreachable()
+#elif defined(_MSC_VER)
+# define Py_UNREACHABLE() __assume(0)
+#else
+# define Py_UNREACHABLE() \
Py_FatalError("Unreachable C code path reached")
+#endif
#endif /* Py_PYMACRO_H */
diff --git a/x64/include/pymath.h b/x64/include/pymath.h
index 6cf69f9..63ca972 100644
--- a/x64/include/pymath.h
+++ b/x64/include/pymath.h
@@ -125,7 +125,7 @@ PyAPI_FUNC(void) _Py_set_387controlword(unsigned short);
/* Py_IS_FINITE(X)
* Return 1 if float or double arg is neither infinite nor NAN, else 0.
- * Some compilers (e.g. VisualStudio) have intrisics for this, so a special
+ * Some compilers (e.g. VisualStudio) have intrinsics for this, so a special
* macro for this particular test is useful
* Note: PC/pyconfig.h defines Py_IS_FINITE as _finite
*/
@@ -227,4 +227,12 @@ PyAPI_FUNC(void) _Py_set_387controlword(unsigned short);
* behavior. */
#define _Py_InIntegralTypeRange(type, v) (_Py_IntegralTypeMin(type) <= v && v <= _Py_IntegralTypeMax(type))
+/* Return the smallest integer k such that n < 2**k, or 0 if n == 0.
+ * Equivalent to floor(log2(x))+1. Also equivalent to: bitwidth_of_type -
+ * count_leading_zero_bits(x)
+ */
+#ifndef Py_LIMITED_API
+PyAPI_FUNC(unsigned int) _Py_bit_length(unsigned long d);
+#endif
+
#endif /* Py_PYMATH_H */
diff --git a/x64/include/pymem.h b/x64/include/pymem.h
index 07b380a..607feb9 100644
--- a/x64/include/pymem.h
+++ b/x64/include/pymem.h
@@ -101,41 +101,6 @@ PyAPI_FUNC(void) PyMem_Free(void *ptr);
#define PyMem_Del PyMem_Free
#define PyMem_DEL PyMem_FREE
-/* bpo-35053: expose _Py_tracemalloc_config for performance:
- _Py_NewReference() needs an efficient check to test if tracemalloc is
- tracing.
-
- It has to be defined in pymem.h, before object.h is included. */
-struct _PyTraceMalloc_Config {
- /* Module initialized?
- Variable protected by the GIL */
- enum {
- TRACEMALLOC_NOT_INITIALIZED,
- TRACEMALLOC_INITIALIZED,
- TRACEMALLOC_FINALIZED
- } initialized;
-
- /* Is tracemalloc tracing memory allocations?
- Variable protected by the GIL */
- int tracing;
-
- /* limit of the number of frames in a traceback, 1 by default.
- Variable protected by the GIL. */
- int max_nframe;
-
- /* use domain in trace key?
- Variable protected by the GIL. */
- int use_domain;
-};
-
-PyAPI_DATA(struct _PyTraceMalloc_Config) _Py_tracemalloc_config;
-
-#define _PyTraceMalloc_Config_INIT \
- {.initialized = TRACEMALLOC_NOT_INITIALIZED, \
- .tracing = 0, \
- .max_nframe = 1, \
- .use_domain = 0}
-
#ifndef Py_LIMITED_API
# define Py_CPYTHON_PYMEM_H
diff --git a/x64/include/pyport.h b/x64/include/pyport.h
index 71f5794..4bd4eb4 100644
--- a/x64/include/pyport.h
+++ b/x64/include/pyport.h
@@ -133,8 +133,9 @@ typedef int Py_ssize_clean_t;
/* PY_FORMAT_SIZE_T is a platform-specific modifier for use in a printf
* format to convert an argument with the width of a size_t or Py_ssize_t.
- * C99 introduced "z" for this purpose, but not all platforms support that;
- * e.g., MS compilers use "I" instead.
+ * C99 introduced "z" for this purpose, but old MSVCs had not supported it.
+ * Since MSVC supports "z" since (at least) 2015, we can just use "z"
+ * for new code.
*
* These "high level" Python format functions interpret "z" correctly on
* all platforms (Python interprets the format string itself, and does whatever
@@ -152,19 +153,11 @@ typedef int Py_ssize_clean_t;
* Py_ssize_t index;
* fprintf(stderr, "index %" PY_FORMAT_SIZE_T "d sucks\n", index);
*
- * That will expand to %ld, or %Id, or to something else correct for a
- * Py_ssize_t on the platform.
+ * That will expand to %zd or to something else correct for a Py_ssize_t on
+ * the platform.
*/
#ifndef PY_FORMAT_SIZE_T
-# if SIZEOF_SIZE_T == SIZEOF_INT && !defined(__APPLE__)
-# define PY_FORMAT_SIZE_T ""
-# elif SIZEOF_SIZE_T == SIZEOF_LONG
-# define PY_FORMAT_SIZE_T "l"
-# elif defined(MS_WINDOWS)
-# define PY_FORMAT_SIZE_T "I"
-# else
-# error "This platform's pyconfig.h needs to define PY_FORMAT_SIZE_T"
-# endif
+# define PY_FORMAT_SIZE_T "z"
#endif
/* Py_LOCAL can be used instead of static to get the fastest possible calling
@@ -520,6 +513,26 @@ extern "C" {
#define Py_DEPRECATED(VERSION_UNUSED)
#endif
+#if defined(__clang__)
+#define _Py_COMP_DIAG_PUSH _Pragma("clang diagnostic push")
+#define _Py_COMP_DIAG_IGNORE_DEPR_DECLS \
+ _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
+#define _Py_COMP_DIAG_POP _Pragma("clang diagnostic pop")
+#elif defined(__GNUC__) \
+ && ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 6))
+#define _Py_COMP_DIAG_PUSH _Pragma("GCC diagnostic push")
+#define _Py_COMP_DIAG_IGNORE_DEPR_DECLS \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#define _Py_COMP_DIAG_POP _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+#define _Py_COMP_DIAG_PUSH __pragma(warning(push))
+#define _Py_COMP_DIAG_IGNORE_DEPR_DECLS __pragma(warning(disable: 4996))
+#define _Py_COMP_DIAG_POP __pragma(warning(pop))
+#else
+#define _Py_COMP_DIAG_PUSH
+#define _Py_COMP_DIAG_IGNORE_DEPR_DECLS
+#define _Py_COMP_DIAG_POP
+#endif
/* _Py_HOT_FUNCTION
* The hot attribute on a function is used to inform the compiler that the
@@ -645,16 +658,18 @@ extern char * _getpty(int *, int, mode_t, int);
# define HAVE_DECLSPEC_DLL
#endif
+#include "exports.h"
+
/* only get special linkage if built as shared or platform is Cygwin */
#if defined(Py_ENABLE_SHARED) || defined(__CYGWIN__)
# if defined(HAVE_DECLSPEC_DLL)
# if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
-# define PyAPI_FUNC(RTYPE) __declspec(dllexport) RTYPE
-# define PyAPI_DATA(RTYPE) extern __declspec(dllexport) RTYPE
+# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE
+# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE
/* module init functions inside the core need no external linkage */
/* except for Cygwin to handle embedding */
# if defined(__CYGWIN__)
-# define PyMODINIT_FUNC __declspec(dllexport) PyObject*
+# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
# else /* __CYGWIN__ */
# define PyMODINIT_FUNC PyObject*
# endif /* __CYGWIN__ */
@@ -665,14 +680,14 @@ extern char * _getpty(int *, int, mode_t, int);
/* failures similar to those described at the bottom of 4.1: */
/* http://docs.python.org/extending/windows.html#a-cookbook-approach */
# if !defined(__CYGWIN__)
-# define PyAPI_FUNC(RTYPE) __declspec(dllimport) RTYPE
+# define PyAPI_FUNC(RTYPE) Py_IMPORTED_SYMBOL RTYPE
# endif /* !__CYGWIN__ */
-# define PyAPI_DATA(RTYPE) extern __declspec(dllimport) RTYPE
+# define PyAPI_DATA(RTYPE) extern Py_IMPORTED_SYMBOL RTYPE
/* module init functions outside the core must be exported */
# if defined(__cplusplus)
-# define PyMODINIT_FUNC extern "C" __declspec(dllexport) PyObject*
+# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject*
# else /* __cplusplus */
-# define PyMODINIT_FUNC __declspec(dllexport) PyObject*
+# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
# endif /* __cplusplus */
# endif /* Py_BUILD_CORE */
# endif /* HAVE_DECLSPEC_DLL */
@@ -680,16 +695,16 @@ extern char * _getpty(int *, int, mode_t, int);
/* If no external linkage macros defined by now, create defaults */
#ifndef PyAPI_FUNC
-# define PyAPI_FUNC(RTYPE) RTYPE
+# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE
#endif
#ifndef PyAPI_DATA
-# define PyAPI_DATA(RTYPE) extern RTYPE
+# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE
#endif
#ifndef PyMODINIT_FUNC
# if defined(__cplusplus)
-# define PyMODINIT_FUNC extern "C" PyObject*
+# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject*
# else /* __cplusplus */
-# define PyMODINIT_FUNC PyObject*
+# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
# endif /* __cplusplus */
#endif
@@ -773,11 +788,11 @@ extern char * _getpty(int *, int, mode_t, int);
*/
#ifdef WORDS_BIGENDIAN
-#define PY_BIG_ENDIAN 1
-#define PY_LITTLE_ENDIAN 0
+# define PY_BIG_ENDIAN 1
+# define PY_LITTLE_ENDIAN 0
#else
-#define PY_BIG_ENDIAN 0
-#define PY_LITTLE_ENDIAN 1
+# define PY_BIG_ENDIAN 0
+# define PY_LITTLE_ENDIAN 1
#endif
#ifdef Py_BUILD_CORE
@@ -834,8 +849,9 @@ extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
#endif
/* Mark a function which cannot return. Example:
+ PyAPI_FUNC(void) _Py_NO_RETURN PyThread_exit_thread(void);
- PyAPI_FUNC(void) _Py_NO_RETURN PyThread_exit_thread(void); */
+ XLC support is intentionally omitted due to bpo-40244 */
#if defined(__clang__) || \
(defined(__GNUC__) && \
((__GNUC__ >= 3) || \
@@ -847,4 +863,16 @@ extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
# define _Py_NO_RETURN
#endif
+
+// Preprocessor check for a builtin preprocessor function. Always return 0
+// if __has_builtin() macro is not defined.
+//
+// __has_builtin() is available on clang and GCC 10.
+#ifdef __has_builtin
+# define _Py__has_builtin(x) __has_builtin(x)
+#else
+# define _Py__has_builtin(x) 0
+#endif
+
+
#endif /* Py_PYPORT_H */
diff --git a/x64/include/pystate.h b/x64/include/pystate.h
index 4c25e3f..bae4407 100644
--- a/x64/include/pystate.h
+++ b/x64/include/pystate.h
@@ -7,27 +7,35 @@
extern "C" {
#endif
-#include "pythread.h"
-
/* This limitation is for performance and simplicity. If needed it can be
removed (with effort). */
#define MAX_CO_EXTRA_USERS 255
/* Forward declarations for PyFrameObject, PyThreadState
and PyInterpreterState */
-struct _frame;
struct _ts;
struct _is;
/* struct _ts is defined in cpython/pystate.h */
typedef struct _ts PyThreadState;
-/* struct _is is defined in internal/pycore_pystate.h */
+/* struct _is is defined in internal/pycore_interp.h */
typedef struct _is PyInterpreterState;
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_New(void);
PyAPI_FUNC(void) PyInterpreterState_Clear(PyInterpreterState *);
PyAPI_FUNC(void) PyInterpreterState_Delete(PyInterpreterState *);
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
+/* New in 3.9 */
+/* Get the current interpreter state.
+
+ Issue a fatal error if there no current Python thread state or no current
+ interpreter. It cannot return NULL.
+
+ The caller must hold the GIL. */
+PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Get(void);
+#endif
+
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03080000
/* New in 3.8 */
PyAPI_FUNC(PyObject *) PyInterpreterState_GetDict(PyInterpreterState *);
@@ -50,7 +58,6 @@ PyAPI_FUNC(PyObject*) PyState_FindModule(struct PyModuleDef*);
PyAPI_FUNC(PyThreadState *) PyThreadState_New(PyInterpreterState *);
PyAPI_FUNC(void) PyThreadState_Clear(PyThreadState *);
PyAPI_FUNC(void) PyThreadState_Delete(PyThreadState *);
-PyAPI_FUNC(void) PyThreadState_DeleteCurrent(void);
/* Get the current thread state.
@@ -77,6 +84,13 @@ PyAPI_FUNC(PyThreadState *) PyThreadState_Swap(PyThreadState *);
PyAPI_FUNC(PyObject *) PyThreadState_GetDict(void);
PyAPI_FUNC(int) PyThreadState_SetAsyncExc(unsigned long, PyObject *);
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
+/* New in 3.9 */
+PyAPI_FUNC(PyInterpreterState*) PyThreadState_GetInterpreter(PyThreadState *tstate);
+PyAPI_FUNC(PyFrameObject*) PyThreadState_GetFrame(PyThreadState *tstate);
+PyAPI_FUNC(uint64_t) PyThreadState_GetID(PyThreadState *tstate);
+#endif
+
typedef
enum {PyGILState_LOCKED, PyGILState_UNLOCKED}
PyGILState_STATE;
diff --git a/x64/include/pythonrun.h b/x64/include/pythonrun.h
index 46091e0..5752907 100644
--- a/x64/include/pythonrun.h
+++ b/x64/include/pythonrun.h
@@ -72,16 +72,23 @@ PyAPI_FUNC(struct _mod *) PyParser_ASTFromFileObject(
#define PyParser_SimpleParseFile(FP, S, B) \
PyParser_SimpleParseFileFlags(FP, S, B, 0)
#endif
-PyAPI_FUNC(struct _node *) PyParser_SimpleParseStringFlags(const char *, int,
- int);
+
+#ifndef Py_BUILD_CORE
+Py_DEPRECATED(3.9)
+#endif
+PyAPI_FUNC(struct _node *) PyParser_SimpleParseStringFlags(const char *, int, int);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
+#ifndef Py_BUILD_CORE
+Py_DEPRECATED(3.9)
+#endif
PyAPI_FUNC(struct _node *) PyParser_SimpleParseStringFlagsFilename(const char *,
const char *,
int, int);
#endif
-PyAPI_FUNC(struct _node *) PyParser_SimpleParseFileFlags(FILE *, const char *,
- int, int);
-
+#ifndef Py_BUILD_CORE
+Py_DEPRECATED(3.9)
+#endif
+PyAPI_FUNC(struct _node *) PyParser_SimpleParseFileFlags(FILE *, const char *, int, int);
#ifndef Py_LIMITED_API
PyAPI_FUNC(PyObject *) PyRun_StringFlags(const char *, int, PyObject *,
PyObject *, PyCompilerFlags *);
diff --git a/x64/include/pythread.h b/x64/include/pythread.h
index f22e8c4..bb9d864 100644
--- a/x64/include/pythread.h
+++ b/x64/include/pythread.h
@@ -3,7 +3,6 @@
#define Py_PYTHREAD_H
typedef void *PyThread_type_lock;
-typedef void *PyThread_type_sema;
#ifdef __cplusplus
extern "C" {
@@ -37,6 +36,15 @@ PyAPI_FUNC(int) PyThread_acquire_lock(PyThread_type_lock, int);
#define WAIT_LOCK 1
#define NOWAIT_LOCK 0
+#ifndef Py_LIMITED_API
+#ifdef HAVE_FORK
+/* Private function to reinitialize a lock at fork in the child process.
+ Reset the lock to the unlocked state.
+ Return 0 on success, return -1 on error. */
+PyAPI_FUNC(int) _PyThread_at_fork_reinit(PyThread_type_lock *lock);
+#endif /* HAVE_FORK */
+#endif /* !Py_LIMITED_API */
+
/* PY_TIMEOUT_T is the integral type used to specify timeouts when waiting
on a lock (see PyThread_acquire_lock_timed() below).
PY_TIMEOUT_MAX is the highest usable value (in microseconds) of that
@@ -51,16 +59,16 @@ PyAPI_FUNC(int) PyThread_acquire_lock(PyThread_type_lock, int);
#if defined(_POSIX_THREADS)
/* PyThread_acquire_lock_timed() uses _PyTime_FromNanoseconds(us * 1000),
convert microseconds to nanoseconds. */
-# define PY_TIMEOUT_MAX (PY_LLONG_MAX / 1000)
+# define PY_TIMEOUT_MAX (LLONG_MAX / 1000)
#elif defined (NT_THREADS)
/* In the NT API, the timeout is a DWORD and is expressed in milliseconds */
-# if 0xFFFFFFFFLL * 1000 < PY_LLONG_MAX
+# if 0xFFFFFFFFLL * 1000 < LLONG_MAX
# define PY_TIMEOUT_MAX (0xFFFFFFFFLL * 1000)
# else
-# define PY_TIMEOUT_MAX PY_LLONG_MAX
+# define PY_TIMEOUT_MAX LLONG_MAX
# endif
#else
-# define PY_TIMEOUT_MAX PY_LLONG_MAX
+# define PY_TIMEOUT_MAX LLONG_MAX
#endif
diff --git a/x64/include/rangeobject.h b/x64/include/rangeobject.h
index 7e4dc28..d6af847 100644
--- a/x64/include/rangeobject.h
+++ b/x64/include/rangeobject.h
@@ -19,7 +19,7 @@ PyAPI_DATA(PyTypeObject) PyRange_Type;
PyAPI_DATA(PyTypeObject) PyRangeIter_Type;
PyAPI_DATA(PyTypeObject) PyLongRangeIter_Type;
-#define PyRange_Check(op) (Py_TYPE(op) == &PyRange_Type)
+#define PyRange_Check(op) Py_IS_TYPE(op, &PyRange_Type)
#ifdef __cplusplus
}
diff --git a/x64/include/setobject.h b/x64/include/setobject.h
index fc0ea83..119619e 100644
--- a/x64/include/setobject.h
+++ b/x64/include/setobject.h
@@ -70,7 +70,6 @@ PyAPI_DATA(PyObject *) _PySet_Dummy;
PyAPI_FUNC(int) _PySet_NextEntry(PyObject *set, Py_ssize_t *pos, PyObject **key, Py_hash_t *hash);
PyAPI_FUNC(int) _PySet_Update(PyObject *set, PyObject *iterable);
-PyAPI_FUNC(int) PySet_ClearFreeList(void);
#endif /* Section excluded by Py_LIMITED_API */
@@ -88,18 +87,18 @@ PyAPI_FUNC(int) PySet_Discard(PyObject *set, PyObject *key);
PyAPI_FUNC(PyObject *) PySet_Pop(PyObject *set);
PyAPI_FUNC(Py_ssize_t) PySet_Size(PyObject *anyset);
-#define PyFrozenSet_CheckExact(ob) (Py_TYPE(ob) == &PyFrozenSet_Type)
+#define PyFrozenSet_CheckExact(ob) Py_IS_TYPE(ob, &PyFrozenSet_Type)
#define PyAnySet_CheckExact(ob) \
- (Py_TYPE(ob) == &PySet_Type || Py_TYPE(ob) == &PyFrozenSet_Type)
+ (Py_IS_TYPE(ob, &PySet_Type) || Py_IS_TYPE(ob, &PyFrozenSet_Type))
#define PyAnySet_Check(ob) \
- (Py_TYPE(ob) == &PySet_Type || Py_TYPE(ob) == &PyFrozenSet_Type || \
+ (Py_IS_TYPE(ob, &PySet_Type) || Py_IS_TYPE(ob, &PyFrozenSet_Type) || \
PyType_IsSubtype(Py_TYPE(ob), &PySet_Type) || \
PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type))
#define PySet_Check(ob) \
- (Py_TYPE(ob) == &PySet_Type || \
+ (Py_IS_TYPE(ob, &PySet_Type) || \
PyType_IsSubtype(Py_TYPE(ob), &PySet_Type))
#define PyFrozenSet_Check(ob) \
- (Py_TYPE(ob) == &PyFrozenSet_Type || \
+ (Py_IS_TYPE(ob, &PyFrozenSet_Type) || \
PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type))
#ifdef __cplusplus
diff --git a/x64/include/sliceobject.h b/x64/include/sliceobject.h
index aae6f3c..2c88950 100644
--- a/x64/include/sliceobject.h
+++ b/x64/include/sliceobject.h
@@ -28,7 +28,7 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PySlice_Type;
PyAPI_DATA(PyTypeObject) PyEllipsis_Type;
-#define PySlice_Check(op) (Py_TYPE(op) == &PySlice_Type)
+#define PySlice_Check(op) Py_IS_TYPE(op, &PySlice_Type)
PyAPI_FUNC(PyObject *) PySlice_New(PyObject* start, PyObject* stop,
PyObject* step);
diff --git a/x64/include/structseq.h b/x64/include/structseq.h
index e5e5d5c..8f51c89 100644
--- a/x64/include/structseq.h
+++ b/x64/include/structseq.h
@@ -19,7 +19,7 @@ typedef struct PyStructSequence_Desc {
int n_in_sequence;
} PyStructSequence_Desc;
-extern char* PyStructSequence_UnnamedField;
+extern const char * const PyStructSequence_UnnamedField;
#ifndef Py_LIMITED_API
PyAPI_FUNC(void) PyStructSequence_InitType(PyTypeObject *type,
diff --git a/x64/include/symtable.h b/x64/include/symtable.h
index 5dcfa7e..abd19a7 100644
--- a/x64/include/symtable.h
+++ b/x64/include/symtable.h
@@ -69,7 +69,7 @@ typedef struct _symtable_entry {
PyAPI_DATA(PyTypeObject) PySTEntry_Type;
-#define PySTEntry_Check(op) (Py_TYPE(op) == &PySTEntry_Type)
+#define PySTEntry_Check(op) Py_IS_TYPE(op, &PySTEntry_Type)
PyAPI_FUNC(int) PyST_GetScope(PySTEntryObject *, PyObject *);
diff --git a/x64/include/token.h b/x64/include/token.h
index e08708b..9b8a3aa 100644
--- a/x64/include/token.h
+++ b/x64/include/token.h
@@ -78,6 +78,10 @@ extern "C" {
#define ISTERMINAL(x) ((x) < NT_OFFSET)
#define ISNONTERMINAL(x) ((x) >= NT_OFFSET)
#define ISEOF(x) ((x) == ENDMARKER)
+#define ISWHITESPACE(x) ((x) == ENDMARKER || \
+ (x) == NEWLINE || \
+ (x) == INDENT || \
+ (x) == DEDENT)
PyAPI_DATA(const char * const) _PyParser_TokenNames[]; /* Token names */
diff --git a/x64/include/traceback.h b/x64/include/traceback.h
index b451927..781e5a6 100644
--- a/x64/include/traceback.h
+++ b/x64/include/traceback.h
@@ -4,16 +4,14 @@
extern "C" {
#endif
-struct _frame;
-
/* Traceback interface */
-PyAPI_FUNC(int) PyTraceBack_Here(struct _frame *);
+PyAPI_FUNC(int) PyTraceBack_Here(PyFrameObject *);
PyAPI_FUNC(int) PyTraceBack_Print(PyObject *, PyObject *);
/* Reveal traceback type so we can typecheck traceback objects */
PyAPI_DATA(PyTypeObject) PyTraceBack_Type;
-#define PyTraceBack_Check(v) (Py_TYPE(v) == &PyTraceBack_Type)
+#define PyTraceBack_Check(v) Py_IS_TYPE(v, &PyTraceBack_Type)
#ifndef Py_LIMITED_API
diff --git a/x64/include/tupleobject.h b/x64/include/tupleobject.h
index 590902d..e796a32 100644
--- a/x64/include/tupleobject.h
+++ b/x64/include/tupleobject.h
@@ -25,7 +25,7 @@ PyAPI_DATA(PyTypeObject) PyTupleIter_Type;
#define PyTuple_Check(op) \
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_TUPLE_SUBCLASS)
-#define PyTuple_CheckExact(op) (Py_TYPE(op) == &PyTuple_Type)
+#define PyTuple_CheckExact(op) Py_IS_TYPE(op, &PyTuple_Type)
PyAPI_FUNC(PyObject *) PyTuple_New(Py_ssize_t size);
PyAPI_FUNC(Py_ssize_t) PyTuple_Size(PyObject *);
@@ -34,8 +34,6 @@ PyAPI_FUNC(int) PyTuple_SetItem(PyObject *, Py_ssize_t, PyObject *);
PyAPI_FUNC(PyObject *) PyTuple_GetSlice(PyObject *, Py_ssize_t, Py_ssize_t);
PyAPI_FUNC(PyObject *) PyTuple_Pack(Py_ssize_t, ...);
-PyAPI_FUNC(int) PyTuple_ClearFreeList(void);
-
#ifndef Py_LIMITED_API
# define Py_CPYTHON_TUPLEOBJECT_H
# include "cpython/tupleobject.h"
diff --git a/x64/include/typeslots.h b/x64/include/typeslots.h
index 0ce6a37..64f6fff 100644
--- a/x64/include/typeslots.h
+++ b/x64/include/typeslots.h
@@ -1,7 +1,12 @@
/* Do not renumber the file; these numbers are part of the stable ABI. */
+#if defined(Py_LIMITED_API)
/* Disabled, see #10181 */
#undef Py_bf_getbuffer
#undef Py_bf_releasebuffer
+#else
+#define Py_bf_getbuffer 1
+#define Py_bf_releasebuffer 2
+#endif
#define Py_mp_ass_subscript 3
#define Py_mp_length 4
#define Py_mp_subscript 5
diff --git a/x64/include/unicodeobject.h b/x64/include/unicodeobject.h
index 97d8cd1..500ce24 100644
--- a/x64/include/unicodeobject.h
+++ b/x64/include/unicodeobject.h
@@ -113,7 +113,7 @@ PyAPI_DATA(PyTypeObject) PyUnicodeIter_Type;
#define PyUnicode_Check(op) \
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_UNICODE_SUBCLASS)
-#define PyUnicode_CheckExact(op) (Py_TYPE(op) == &PyUnicode_Type)
+#define PyUnicode_CheckExact(op) Py_IS_TYPE(op, &PyUnicode_Type)
/* --- Constants ---------------------------------------------------------- */
@@ -328,17 +328,6 @@ PyAPI_FUNC(wchar_t*) PyUnicode_AsWideCharString(
PyAPI_FUNC(PyObject*) PyUnicode_FromOrdinal(int ordinal);
-/* --- Free-list management ----------------------------------------------- */
-
-/* Clear the free list used by the Unicode implementation.
-
- This can be used to release memory used for objects on the free
- list back to the Python memory allocator.
-
-*/
-
-PyAPI_FUNC(int) PyUnicode_ClearFreeList(void);
-
/* === Builtin Codecs =====================================================
Many of these APIs take two arguments encoding and errors. These
diff --git a/x64/include/weakrefobject.h b/x64/include/weakrefobject.h
index 1705156..ac4b482 100644
--- a/x64/include/weakrefobject.h
+++ b/x64/include/weakrefobject.h
@@ -46,10 +46,10 @@ PyAPI_DATA(PyTypeObject) _PyWeakref_CallableProxyType;
#define PyWeakref_CheckRef(op) PyObject_TypeCheck(op, &_PyWeakref_RefType)
#define PyWeakref_CheckRefExact(op) \
- (Py_TYPE(op) == &_PyWeakref_RefType)
+ Py_IS_TYPE(op, &_PyWeakref_RefType)
#define PyWeakref_CheckProxy(op) \
- ((Py_TYPE(op) == &_PyWeakref_ProxyType) || \
- (Py_TYPE(op) == &_PyWeakref_CallableProxyType))
+ (Py_IS_TYPE(op, &_PyWeakref_ProxyType) || \
+ Py_IS_TYPE(op, &_PyWeakref_CallableProxyType))
#define PyWeakref_Check(op) \
(PyWeakref_CheckRef(op) || PyWeakref_CheckProxy(op))
diff --git a/x64/libs/_uuid.lib b/x64/libs/_uuid.lib
new file mode 100644
index 0000000..e062def
--- /dev/null
+++ b/x64/libs/_uuid.lib
Binary files differ
diff --git a/x64/libs/_zoneinfo.lib b/x64/libs/_zoneinfo.lib
new file mode 100644
index 0000000..355e70b
--- /dev/null
+++ b/x64/libs/_zoneinfo.lib
Binary files differ
diff --git a/x64/libs/python38.lib b/x64/libs/python38.lib
deleted file mode 100644
index df6f508..0000000
--- a/x64/libs/python38.lib
+++ /dev/null
Binary files differ
diff --git a/x64/libs/python39.lib b/x64/libs/python39.lib
new file mode 100644
index 0000000..208b57c
--- /dev/null
+++ b/x64/libs/python39.lib
Binary files differ
diff --git a/x64/libs/sqlite3.lib b/x64/libs/sqlite3.lib
index d13f850..9e20e15 100644
--- a/x64/libs/sqlite3.lib
+++ b/x64/libs/sqlite3.lib
Binary files differ
diff --git a/x64/python.exe b/x64/python.exe
index ca3d55c..77874c0 100644
--- a/x64/python.exe
+++ b/x64/python.exe
Binary files differ
diff --git a/x64/python38.dll b/x64/python38.dll
deleted file mode 100644
index 99f7d63..0000000
--- a/x64/python38.dll
+++ /dev/null
Binary files differ
diff --git a/x64/python39.dll b/x64/python39.dll
new file mode 100644
index 0000000..2f11616
--- /dev/null
+++ b/x64/python39.dll
Binary files differ
diff --git a/x64/pythonw.exe b/x64/pythonw.exe
index d9c8cba..5fa53f6 100644
--- a/x64/pythonw.exe
+++ b/x64/pythonw.exe
Binary files differ
diff --git a/x64/vcruntime140.dll b/x64/vcruntime140.dll
new file mode 100644
index 0000000..264a0ce
--- /dev/null
+++ b/x64/vcruntime140.dll
Binary files differ